repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
pmp-tool/PMP
|
src/qemu/src-pmp/hw/core/generic-loader.c
|
<reponame>pmp-tool/PMP<filename>src/qemu/src-pmp/hw/core/generic-loader.c
/*
* Generic Loader
*
* Copyright (C) 2014 <NAME>
* Copyright (C) 2016 Xilinx Inc.
* Written by <NAME> <<EMAIL>>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*/
/*
* Internally inside QEMU this is a device. It is a strange device that
* provides no hardware interface but allows QEMU to monkey patch memory
* specified when it is created. To be able to do this it has a reset
* callback that does the memory operations.
* This device allows the user to monkey patch memory. To be able to do
* this it needs a backend to manage the datas, the same as other
* memory-related devices. In this case as the backend is so trivial we
* have merged it with the frontend instead of creating and maintaining a
* separate backend.
*/
#include "qemu/osdep.h"
#include "qom/cpu.h"
#include "hw/sysbus.h"
#include "sysemu/dma.h"
#include "hw/loader.h"
#include "qapi/error.h"
#include "hw/core/generic-loader.h"
#define CPU_NONE 0xFFFFFFFF
static void generic_loader_reset(void *opaque)
{
GenericLoaderState *s = GENERIC_LOADER(opaque);
if (s->set_pc) {
CPUClass *cc = CPU_GET_CLASS(s->cpu);
cpu_reset(s->cpu);
if (cc) {
cc->set_pc(s->cpu, s->addr);
}
}
if (s->data_len) {
assert(s->data_len < sizeof(s->data));
dma_memory_write(s->cpu->as, s->addr, &s->data, s->data_len);
}
}
static void generic_loader_realize(DeviceState *dev, Error **errp)
{
GenericLoaderState *s = GENERIC_LOADER(dev);
hwaddr entry;
int big_endian;
int size = 0;
s->set_pc = false;
/* Perform some error checking on the user's options */
if (s->data || s->data_len || s->data_be) {
/* User is loading memory values */
if (s->file) {
error_setg(errp, "Specifying a file is not supported when loading "
"memory values");
return;
} else if (s->force_raw) {
error_setg(errp, "Specifying force-raw is not supported when "
"loading memory values");
return;
} else if (!s->data_len) {
/* We can't check for !data here as a value of 0 is still valid. */
error_setg(errp, "Both data and data-len must be specified");
return;
} else if (s->data_len > 8) {
error_setg(errp, "data-len cannot be greater then 8 bytes");
return;
}
} else if (s->file || s->force_raw) {
/* User is loading an image */
if (s->data || s->data_len || s->data_be) {
error_setg(errp, "data can not be specified when loading an "
"image");
return;
}
/* The user specified a file, only set the PC if they also specified
* a CPU to use.
*/
if (s->cpu_num != CPU_NONE) {
s->set_pc = true;
}
} else if (s->addr) {
/* User is setting the PC */
if (s->data || s->data_len || s->data_be) {
error_setg(errp, "data can not be specified when setting a "
"program counter");
return;
} else if (s->cpu_num == CPU_NONE) {
error_setg(errp, "cpu_num must be specified when setting a "
"program counter");
return;
}
s->set_pc = true;
} else {
/* Did the user specify anything? */
error_setg(errp, "please include valid arguments");
return;
}
qemu_register_reset(generic_loader_reset, dev);
if (s->cpu_num != CPU_NONE) {
s->cpu = qemu_get_cpu(s->cpu_num);
if (!s->cpu) {
error_setg(errp, "Specified boot CPU#%d is nonexistent",
s->cpu_num);
return;
}
} else {
s->cpu = first_cpu;
}
big_endian = target_words_bigendian();
if (s->file) {
AddressSpace *as = s->cpu ? s->cpu->as : NULL;
if (!s->force_raw) {
size = load_elf_as(s->file, NULL, NULL, NULL, &entry, NULL, NULL,
big_endian, 0, 0, 0, as);
if (size < 0) {
size = load_uimage_as(s->file, &entry, NULL, NULL, NULL, NULL,
as);
}
if (size < 0) {
size = load_targphys_hex_as(s->file, &entry, as);
}
}
if (size < 0 || s->force_raw) {
/* Default to the maximum size being the machine's ram size */
size = load_image_targphys_as(s->file, s->addr, ram_size, as);
} else {
s->addr = entry;
}
if (size < 0) {
error_setg(errp, "Cannot load specified image %s", s->file);
return;
}
}
/* Convert the data endiannes */
if (s->data_be) {
s->data = cpu_to_be64(s->data);
} else {
s->data = cpu_to_le64(s->data);
}
}
static void generic_loader_unrealize(DeviceState *dev, Error **errp)
{
qemu_unregister_reset(generic_loader_reset, dev);
}
static Property generic_loader_props[] = {
DEFINE_PROP_UINT64("addr", GenericLoaderState, addr, 0),
DEFINE_PROP_UINT64("data", GenericLoaderState, data, 0),
DEFINE_PROP_UINT8("data-len", GenericLoaderState, data_len, 0),
DEFINE_PROP_BOOL("data-be", GenericLoaderState, data_be, false),
DEFINE_PROP_UINT32("cpu-num", GenericLoaderState, cpu_num, CPU_NONE),
DEFINE_PROP_BOOL("force-raw", GenericLoaderState, force_raw, false),
DEFINE_PROP_STRING("file", GenericLoaderState, file),
DEFINE_PROP_END_OF_LIST(),
};
static void generic_loader_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
/* The reset function is not registered here and is instead registered in
* the realize function to allow this device to be added via the device_add
* command in the QEMU monitor.
* TODO: Improve the device_add functionality to allow resets to be
* connected
*/
dc->realize = generic_loader_realize;
dc->unrealize = generic_loader_unrealize;
dc->props = generic_loader_props;
dc->desc = "Generic Loader";
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
}
static TypeInfo generic_loader_info = {
.name = TYPE_GENERIC_LOADER,
.parent = TYPE_DEVICE,
.instance_size = sizeof(GenericLoaderState),
.class_init = generic_loader_class_init,
};
static void generic_loader_register_type(void)
{
type_register_static(&generic_loader_info);
}
type_init(generic_loader_register_type)
|
pmp-tool/PMP
|
src/qemu/src-pmp/hw/ppc/pnv_lpc.c
|
<gh_stars>1-10
/*
* QEMU PowerPC PowerNV LPC controller
*
* Copyright (c) 2016, IBM Corporation.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "target/ppc/cpu.h"
#include "qapi/error.h"
#include "qemu/log.h"
#include "hw/isa/isa.h"
#include "hw/ppc/pnv.h"
#include "hw/ppc/pnv_lpc.h"
#include "hw/ppc/pnv_xscom.h"
#include "hw/ppc/fdt.h"
#include <libfdt.h>
enum {
ECCB_CTL = 0,
ECCB_RESET = 1,
ECCB_STAT = 2,
ECCB_DATA = 3,
};
/* OPB Master LS registers */
#define OPB_MASTER_LS_ROUTE0 0x8
#define OPB_MASTER_LS_ROUTE1 0xC
#define OPB_MASTER_LS_IRQ_STAT 0x50
#define OPB_MASTER_IRQ_LPC 0x00000800
#define OPB_MASTER_LS_IRQ_MASK 0x54
#define OPB_MASTER_LS_IRQ_POL 0x58
#define OPB_MASTER_LS_IRQ_INPUT 0x5c
/* LPC HC registers */
#define LPC_HC_FW_SEG_IDSEL 0x24
#define LPC_HC_FW_RD_ACC_SIZE 0x28
#define LPC_HC_FW_RD_1B 0x00000000
#define LPC_HC_FW_RD_2B 0x01000000
#define LPC_HC_FW_RD_4B 0x02000000
#define LPC_HC_FW_RD_16B 0x04000000
#define LPC_HC_FW_RD_128B 0x07000000
#define LPC_HC_IRQSER_CTRL 0x30
#define LPC_HC_IRQSER_EN 0x80000000
#define LPC_HC_IRQSER_QMODE 0x40000000
#define LPC_HC_IRQSER_START_MASK 0x03000000
#define LPC_HC_IRQSER_START_4CLK 0x00000000
#define LPC_HC_IRQSER_START_6CLK 0x01000000
#define LPC_HC_IRQSER_START_8CLK 0x02000000
#define LPC_HC_IRQMASK 0x34 /* same bit defs as LPC_HC_IRQSTAT */
#define LPC_HC_IRQSTAT 0x38
#define LPC_HC_IRQ_SERIRQ0 0x80000000 /* all bits down to ... */
#define LPC_HC_IRQ_SERIRQ16 0x00008000 /* IRQ16=IOCHK#, IRQ2=SMI# */
#define LPC_HC_IRQ_SERIRQ_ALL 0xffff8000
#define LPC_HC_IRQ_LRESET 0x00000400
#define LPC_HC_IRQ_SYNC_ABNORM_ERR 0x00000080
#define LPC_HC_IRQ_SYNC_NORESP_ERR 0x00000040
#define LPC_HC_IRQ_SYNC_NORM_ERR 0x00000020
#define LPC_HC_IRQ_SYNC_TIMEOUT_ERR 0x00000010
#define LPC_HC_IRQ_SYNC_TARG_TAR_ERR 0x00000008
#define LPC_HC_IRQ_SYNC_BM_TAR_ERR 0x00000004
#define LPC_HC_IRQ_SYNC_BM0_REQ 0x00000002
#define LPC_HC_IRQ_SYNC_BM1_REQ 0x00000001
#define LPC_HC_ERROR_ADDRESS 0x40
#define LPC_OPB_SIZE 0x100000000ull
#define ISA_IO_SIZE 0x00010000
#define ISA_MEM_SIZE 0x10000000
#define ISA_FW_SIZE 0x10000000
#define LPC_IO_OPB_ADDR 0xd0010000
#define LPC_IO_OPB_SIZE 0x00010000
#define LPC_MEM_OPB_ADDR 0xe0010000
#define LPC_MEM_OPB_SIZE 0x10000000
#define LPC_FW_OPB_ADDR 0xf0000000
#define LPC_FW_OPB_SIZE 0x10000000
#define LPC_OPB_REGS_OPB_ADDR 0xc0010000
#define LPC_OPB_REGS_OPB_SIZE 0x00000060
#define LPC_OPB_REGS_OPBA_ADDR 0xc0011000
#define LPC_OPB_REGS_OPBA_SIZE 0x00000008
#define LPC_HC_REGS_OPB_ADDR 0xc0012000
#define LPC_HC_REGS_OPB_SIZE 0x00000100
static int pnv_lpc_dt_xscom(PnvXScomInterface *dev, void *fdt, int xscom_offset)
{
const char compat[] = "ibm,power8-lpc\0ibm,lpc";
char *name;
int offset;
uint32_t lpc_pcba = PNV_XSCOM_LPC_BASE;
uint32_t reg[] = {
cpu_to_be32(lpc_pcba),
cpu_to_be32(PNV_XSCOM_LPC_SIZE)
};
name = g_strdup_printf("isa@%x", lpc_pcba);
offset = fdt_add_subnode(fdt, xscom_offset, name);
_FDT(offset);
g_free(name);
_FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
_FDT((fdt_setprop_cell(fdt, offset, "#address-cells", 2)));
_FDT((fdt_setprop_cell(fdt, offset, "#size-cells", 1)));
_FDT((fdt_setprop(fdt, offset, "compatible", compat, sizeof(compat))));
return 0;
}
/* POWER9 only */
int pnv_dt_lpc(PnvChip *chip, void *fdt, int root_offset)
{
const char compat[] = "ibm,power9-lpcm-opb\0simple-bus";
const char lpc_compat[] = "ibm,power9-lpc\0ibm,lpc";
char *name;
int offset, lpcm_offset;
uint64_t lpcm_addr = PNV9_LPCM_BASE(chip);
uint32_t opb_ranges[8] = { 0,
cpu_to_be32(lpcm_addr >> 32),
cpu_to_be32((uint32_t)lpcm_addr),
cpu_to_be32(PNV9_LPCM_SIZE / 2),
cpu_to_be32(PNV9_LPCM_SIZE / 2),
cpu_to_be32(lpcm_addr >> 32),
cpu_to_be32(PNV9_LPCM_SIZE / 2),
cpu_to_be32(PNV9_LPCM_SIZE / 2),
};
uint32_t opb_reg[4] = { cpu_to_be32(lpcm_addr >> 32),
cpu_to_be32((uint32_t)lpcm_addr),
cpu_to_be32(PNV9_LPCM_SIZE >> 32),
cpu_to_be32((uint32_t)PNV9_LPCM_SIZE),
};
uint32_t reg[2];
/*
* OPB bus
*/
name = g_strdup_printf("lpcm-opb@%"PRIx64, lpcm_addr);
lpcm_offset = fdt_add_subnode(fdt, root_offset, name);
_FDT(lpcm_offset);
g_free(name);
_FDT((fdt_setprop(fdt, lpcm_offset, "reg", opb_reg, sizeof(opb_reg))));
_FDT((fdt_setprop_cell(fdt, lpcm_offset, "#address-cells", 1)));
_FDT((fdt_setprop_cell(fdt, lpcm_offset, "#size-cells", 1)));
_FDT((fdt_setprop(fdt, lpcm_offset, "compatible", compat, sizeof(compat))));
_FDT((fdt_setprop_cell(fdt, lpcm_offset, "ibm,chip-id", chip->chip_id)));
_FDT((fdt_setprop(fdt, lpcm_offset, "ranges", opb_ranges,
sizeof(opb_ranges))));
/*
* OPB Master registers
*/
name = g_strdup_printf("opb-master@%x", LPC_OPB_REGS_OPB_ADDR);
offset = fdt_add_subnode(fdt, lpcm_offset, name);
_FDT(offset);
g_free(name);
reg[0] = cpu_to_be32(LPC_OPB_REGS_OPB_ADDR);
reg[1] = cpu_to_be32(LPC_OPB_REGS_OPB_SIZE);
_FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
_FDT((fdt_setprop_string(fdt, offset, "compatible",
"ibm,power9-lpcm-opb-master")));
/*
* OPB arbitrer registers
*/
name = g_strdup_printf("opb-arbitrer@%x", LPC_OPB_REGS_OPBA_ADDR);
offset = fdt_add_subnode(fdt, lpcm_offset, name);
_FDT(offset);
g_free(name);
reg[0] = cpu_to_be32(LPC_OPB_REGS_OPBA_ADDR);
reg[1] = cpu_to_be32(LPC_OPB_REGS_OPBA_SIZE);
_FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
_FDT((fdt_setprop_string(fdt, offset, "compatible",
"ibm,power9-lpcm-opb-arbiter")));
/*
* LPC Host Controller registers
*/
name = g_strdup_printf("lpc-controller@%x", LPC_HC_REGS_OPB_ADDR);
offset = fdt_add_subnode(fdt, lpcm_offset, name);
_FDT(offset);
g_free(name);
reg[0] = cpu_to_be32(LPC_HC_REGS_OPB_ADDR);
reg[1] = cpu_to_be32(LPC_HC_REGS_OPB_SIZE);
_FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
_FDT((fdt_setprop_string(fdt, offset, "compatible",
"ibm,power9-lpc-controller")));
name = g_strdup_printf("lpc@0");
offset = fdt_add_subnode(fdt, lpcm_offset, name);
_FDT(offset);
g_free(name);
_FDT((fdt_setprop_cell(fdt, offset, "#address-cells", 2)));
_FDT((fdt_setprop_cell(fdt, offset, "#size-cells", 1)));
_FDT((fdt_setprop(fdt, offset, "compatible", lpc_compat,
sizeof(lpc_compat))));
return 0;
}
/*
* These read/write handlers of the OPB address space should be common
* with the P9 LPC Controller which uses direct MMIOs.
*
* TODO: rework to use address_space_stq() and address_space_ldq()
* instead.
*/
static bool opb_read(PnvLpcController *lpc, uint32_t addr, uint8_t *data,
int sz)
{
/* XXX Handle access size limits and FW read caching here */
return !address_space_rw(&lpc->opb_as, addr, MEMTXATTRS_UNSPECIFIED,
data, sz, false);
}
static bool opb_write(PnvLpcController *lpc, uint32_t addr, uint8_t *data,
int sz)
{
/* XXX Handle access size limits here */
return !address_space_rw(&lpc->opb_as, addr, MEMTXATTRS_UNSPECIFIED,
data, sz, true);
}
#define ECCB_CTL_READ PPC_BIT(15)
#define ECCB_CTL_SZ_LSH (63 - 7)
#define ECCB_CTL_SZ_MASK PPC_BITMASK(4, 7)
#define ECCB_CTL_ADDR_MASK PPC_BITMASK(32, 63)
#define ECCB_STAT_OP_DONE PPC_BIT(52)
#define ECCB_STAT_OP_ERR PPC_BIT(52)
#define ECCB_STAT_RD_DATA_LSH (63 - 37)
#define ECCB_STAT_RD_DATA_MASK (0xffffffff << ECCB_STAT_RD_DATA_LSH)
static void pnv_lpc_do_eccb(PnvLpcController *lpc, uint64_t cmd)
{
/* XXX Check for magic bits at the top, addr size etc... */
unsigned int sz = (cmd & ECCB_CTL_SZ_MASK) >> ECCB_CTL_SZ_LSH;
uint32_t opb_addr = cmd & ECCB_CTL_ADDR_MASK;
uint8_t data[8];
bool success;
if (sz > sizeof(data)) {
qemu_log_mask(LOG_GUEST_ERROR,
"ECCB: invalid operation at @0x%08x size %d\n", opb_addr, sz);
return;
}
if (cmd & ECCB_CTL_READ) {
success = opb_read(lpc, opb_addr, data, sz);
if (success) {
lpc->eccb_stat_reg = ECCB_STAT_OP_DONE |
(((uint64_t)data[0]) << 24 |
((uint64_t)data[1]) << 16 |
((uint64_t)data[2]) << 8 |
((uint64_t)data[3])) << ECCB_STAT_RD_DATA_LSH;
} else {
lpc->eccb_stat_reg = ECCB_STAT_OP_DONE |
(0xffffffffull << ECCB_STAT_RD_DATA_LSH);
}
} else {
data[0] = lpc->eccb_data_reg >> 24;
data[1] = lpc->eccb_data_reg >> 16;
data[2] = lpc->eccb_data_reg >> 8;
data[3] = lpc->eccb_data_reg;
success = opb_write(lpc, opb_addr, data, sz);
lpc->eccb_stat_reg = ECCB_STAT_OP_DONE;
}
/* XXX Which error bit (if any) to signal OPB error ? */
}
static uint64_t pnv_lpc_xscom_read(void *opaque, hwaddr addr, unsigned size)
{
PnvLpcController *lpc = PNV_LPC(opaque);
uint32_t offset = addr >> 3;
uint64_t val = 0;
switch (offset & 3) {
case ECCB_CTL:
case ECCB_RESET:
val = 0;
break;
case ECCB_STAT:
val = lpc->eccb_stat_reg;
lpc->eccb_stat_reg = 0;
break;
case ECCB_DATA:
val = ((uint64_t)lpc->eccb_data_reg) << 32;
break;
}
return val;
}
static void pnv_lpc_xscom_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
PnvLpcController *lpc = PNV_LPC(opaque);
uint32_t offset = addr >> 3;
switch (offset & 3) {
case ECCB_CTL:
pnv_lpc_do_eccb(lpc, val);
break;
case ECCB_RESET:
/* XXXX */
break;
case ECCB_STAT:
break;
case ECCB_DATA:
lpc->eccb_data_reg = val >> 32;
break;
}
}
static const MemoryRegionOps pnv_lpc_xscom_ops = {
.read = pnv_lpc_xscom_read,
.write = pnv_lpc_xscom_write,
.valid.min_access_size = 8,
.valid.max_access_size = 8,
.impl.min_access_size = 8,
.impl.max_access_size = 8,
.endianness = DEVICE_BIG_ENDIAN,
};
static uint64_t pnv_lpc_mmio_read(void *opaque, hwaddr addr, unsigned size)
{
PnvLpcController *lpc = PNV_LPC(opaque);
uint64_t val = 0;
uint32_t opb_addr = addr & ECCB_CTL_ADDR_MASK;
MemTxResult result;
switch (size) {
case 4:
val = address_space_ldl(&lpc->opb_as, opb_addr, MEMTXATTRS_UNSPECIFIED,
&result);
break;
case 1:
val = address_space_ldub(&lpc->opb_as, opb_addr, MEMTXATTRS_UNSPECIFIED,
&result);
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "OPB read failed at @0x%"
HWADDR_PRIx " invalid size %d\n", addr, size);
return 0;
}
if (result != MEMTX_OK) {
qemu_log_mask(LOG_GUEST_ERROR, "OPB read failed at @0x%"
HWADDR_PRIx "\n", addr);
}
return val;
}
static void pnv_lpc_mmio_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
PnvLpcController *lpc = PNV_LPC(opaque);
uint32_t opb_addr = addr & ECCB_CTL_ADDR_MASK;
MemTxResult result;
switch (size) {
case 4:
address_space_stl(&lpc->opb_as, opb_addr, val, MEMTXATTRS_UNSPECIFIED,
&result);
break;
case 1:
address_space_stb(&lpc->opb_as, opb_addr, val, MEMTXATTRS_UNSPECIFIED,
&result);
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "OPB write failed at @0x%"
HWADDR_PRIx " invalid size %d\n", addr, size);
return;
}
if (result != MEMTX_OK) {
qemu_log_mask(LOG_GUEST_ERROR, "OPB write failed at @0x%"
HWADDR_PRIx "\n", addr);
}
}
static const MemoryRegionOps pnv_lpc_mmio_ops = {
.read = pnv_lpc_mmio_read,
.write = pnv_lpc_mmio_write,
.impl = {
.min_access_size = 1,
.max_access_size = 4,
},
.endianness = DEVICE_BIG_ENDIAN,
};
static void pnv_lpc_eval_irqs(PnvLpcController *lpc)
{
bool lpc_to_opb_irq = false;
PnvLpcClass *plc = PNV_LPC_GET_CLASS(lpc);
/* Update LPC controller to OPB line */
if (lpc->lpc_hc_irqser_ctrl & LPC_HC_IRQSER_EN) {
uint32_t irqs;
irqs = lpc->lpc_hc_irqstat & lpc->lpc_hc_irqmask;
lpc_to_opb_irq = (irqs != 0);
}
/* We don't honor the polarity register, it's pointless and unused
* anyway
*/
if (lpc_to_opb_irq) {
lpc->opb_irq_input |= OPB_MASTER_IRQ_LPC;
} else {
lpc->opb_irq_input &= ~OPB_MASTER_IRQ_LPC;
}
/* Update OPB internal latch */
lpc->opb_irq_stat |= lpc->opb_irq_input & lpc->opb_irq_mask;
/* Reflect the interrupt */
pnv_psi_irq_set(lpc->psi, plc->psi_irq, lpc->opb_irq_stat != 0);
}
static uint64_t lpc_hc_read(void *opaque, hwaddr addr, unsigned size)
{
PnvLpcController *lpc = opaque;
uint64_t val = 0xfffffffffffffffful;
switch (addr) {
case LPC_HC_FW_SEG_IDSEL:
val = lpc->lpc_hc_fw_seg_idsel;
break;
case LPC_HC_FW_RD_ACC_SIZE:
val = lpc->lpc_hc_fw_rd_acc_size;
break;
case LPC_HC_IRQSER_CTRL:
val = lpc->lpc_hc_irqser_ctrl;
break;
case LPC_HC_IRQMASK:
val = lpc->lpc_hc_irqmask;
break;
case LPC_HC_IRQSTAT:
val = lpc->lpc_hc_irqstat;
break;
case LPC_HC_ERROR_ADDRESS:
val = lpc->lpc_hc_error_addr;
break;
default:
qemu_log_mask(LOG_UNIMP, "LPC HC Unimplemented register: 0x%"
HWADDR_PRIx "\n", addr);
}
return val;
}
static void lpc_hc_write(void *opaque, hwaddr addr, uint64_t val,
unsigned size)
{
PnvLpcController *lpc = opaque;
/* XXX Filter out reserved bits */
switch (addr) {
case LPC_HC_FW_SEG_IDSEL:
/* XXX Actually figure out how that works as this impact
* memory regions/aliases
*/
lpc->lpc_hc_fw_seg_idsel = val;
break;
case LPC_HC_FW_RD_ACC_SIZE:
lpc->lpc_hc_fw_rd_acc_size = val;
break;
case LPC_HC_IRQSER_CTRL:
lpc->lpc_hc_irqser_ctrl = val;
pnv_lpc_eval_irqs(lpc);
break;
case LPC_HC_IRQMASK:
lpc->lpc_hc_irqmask = val;
pnv_lpc_eval_irqs(lpc);
break;
case LPC_HC_IRQSTAT:
lpc->lpc_hc_irqstat &= ~val;
pnv_lpc_eval_irqs(lpc);
break;
case LPC_HC_ERROR_ADDRESS:
break;
default:
qemu_log_mask(LOG_UNIMP, "LPC HC Unimplemented register: 0x%"
HWADDR_PRIx "\n", addr);
}
}
static const MemoryRegionOps lpc_hc_ops = {
.read = lpc_hc_read,
.write = lpc_hc_write,
.endianness = DEVICE_BIG_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
},
.impl = {
.min_access_size = 4,
.max_access_size = 4,
},
};
static uint64_t opb_master_read(void *opaque, hwaddr addr, unsigned size)
{
PnvLpcController *lpc = opaque;
uint64_t val = 0xfffffffffffffffful;
switch (addr) {
case OPB_MASTER_LS_ROUTE0: /* TODO */
val = lpc->opb_irq_route0;
break;
case OPB_MASTER_LS_ROUTE1: /* TODO */
val = lpc->opb_irq_route1;
break;
case OPB_MASTER_LS_IRQ_STAT:
val = lpc->opb_irq_stat;
break;
case OPB_MASTER_LS_IRQ_MASK:
val = lpc->opb_irq_mask;
break;
case OPB_MASTER_LS_IRQ_POL:
val = lpc->opb_irq_pol;
break;
case OPB_MASTER_LS_IRQ_INPUT:
val = lpc->opb_irq_input;
break;
default:
qemu_log_mask(LOG_UNIMP, "OPBM: read on unimplemented register: 0x%"
HWADDR_PRIx "\n", addr);
}
return val;
}
static void opb_master_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
PnvLpcController *lpc = opaque;
switch (addr) {
case OPB_MASTER_LS_ROUTE0: /* TODO */
lpc->opb_irq_route0 = val;
break;
case OPB_MASTER_LS_ROUTE1: /* TODO */
lpc->opb_irq_route1 = val;
break;
case OPB_MASTER_LS_IRQ_STAT:
lpc->opb_irq_stat &= ~val;
pnv_lpc_eval_irqs(lpc);
break;
case OPB_MASTER_LS_IRQ_MASK:
lpc->opb_irq_mask = val;
pnv_lpc_eval_irqs(lpc);
break;
case OPB_MASTER_LS_IRQ_POL:
lpc->opb_irq_pol = val;
pnv_lpc_eval_irqs(lpc);
break;
case OPB_MASTER_LS_IRQ_INPUT:
/* Read only */
break;
default:
qemu_log_mask(LOG_UNIMP, "OPBM: write on unimplemented register: 0x%"
HWADDR_PRIx " val=0x%08"PRIx64"\n", addr, val);
}
}
static const MemoryRegionOps opb_master_ops = {
.read = opb_master_read,
.write = opb_master_write,
.endianness = DEVICE_BIG_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4,
},
.impl = {
.min_access_size = 4,
.max_access_size = 4,
},
};
static void pnv_lpc_power8_realize(DeviceState *dev, Error **errp)
{
PnvLpcController *lpc = PNV_LPC(dev);
PnvLpcClass *plc = PNV_LPC_GET_CLASS(dev);
Error *local_err = NULL;
plc->parent_realize(dev, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
/* P8 uses a XSCOM region for LPC registers */
pnv_xscom_region_init(&lpc->xscom_regs, OBJECT(lpc),
&pnv_lpc_xscom_ops, lpc, "xscom-lpc",
PNV_XSCOM_LPC_SIZE);
}
static void pnv_lpc_power8_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
PnvLpcClass *plc = PNV_LPC_CLASS(klass);
dc->desc = "PowerNV LPC Controller POWER8";
xdc->dt_xscom = pnv_lpc_dt_xscom;
plc->psi_irq = PSIHB_IRQ_LPC_I2C;
device_class_set_parent_realize(dc, pnv_lpc_power8_realize,
&plc->parent_realize);
}
static const TypeInfo pnv_lpc_power8_info = {
.name = TYPE_PNV8_LPC,
.parent = TYPE_PNV_LPC,
.instance_size = sizeof(PnvLpcController),
.class_init = pnv_lpc_power8_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_PNV_XSCOM_INTERFACE },
{ }
}
};
static void pnv_lpc_power9_realize(DeviceState *dev, Error **errp)
{
PnvLpcController *lpc = PNV_LPC(dev);
PnvLpcClass *plc = PNV_LPC_GET_CLASS(dev);
Error *local_err = NULL;
plc->parent_realize(dev, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
/* P9 uses a MMIO region */
memory_region_init_io(&lpc->xscom_regs, OBJECT(lpc), &pnv_lpc_mmio_ops,
lpc, "lpcm", PNV9_LPCM_SIZE);
}
static void pnv_lpc_power9_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvLpcClass *plc = PNV_LPC_CLASS(klass);
dc->desc = "PowerNV LPC Controller POWER9";
plc->psi_irq = PSIHB9_IRQ_LPCHC;
device_class_set_parent_realize(dc, pnv_lpc_power9_realize,
&plc->parent_realize);
}
static const TypeInfo pnv_lpc_power9_info = {
.name = TYPE_PNV9_LPC,
.parent = TYPE_PNV_LPC,
.instance_size = sizeof(PnvLpcController),
.class_init = pnv_lpc_power9_class_init,
};
static void pnv_lpc_realize(DeviceState *dev, Error **errp)
{
PnvLpcController *lpc = PNV_LPC(dev);
Object *obj;
Error *local_err = NULL;
obj = object_property_get_link(OBJECT(dev), "psi", &local_err);
if (!obj) {
error_propagate(errp, local_err);
error_prepend(errp, "required link 'psi' not found: ");
return;
}
/* The LPC controller needs PSI to generate interrupts */
lpc->psi = PNV_PSI(obj);
/* Reg inits */
lpc->lpc_hc_fw_rd_acc_size = LPC_HC_FW_RD_4B;
/* Create address space and backing MR for the OPB bus */
memory_region_init(&lpc->opb_mr, OBJECT(dev), "lpc-opb", 0x100000000ull);
address_space_init(&lpc->opb_as, &lpc->opb_mr, "lpc-opb");
/* Create ISA IO and Mem space regions which are the root of
* the ISA bus (ie, ISA address spaces). We don't create a
* separate one for FW which we alias to memory.
*/
memory_region_init(&lpc->isa_io, OBJECT(dev), "isa-io", ISA_IO_SIZE);
memory_region_init(&lpc->isa_mem, OBJECT(dev), "isa-mem", ISA_MEM_SIZE);
memory_region_init(&lpc->isa_fw, OBJECT(dev), "isa-fw", ISA_FW_SIZE);
/* Create windows from the OPB space to the ISA space */
memory_region_init_alias(&lpc->opb_isa_io, OBJECT(dev), "lpc-isa-io",
&lpc->isa_io, 0, LPC_IO_OPB_SIZE);
memory_region_add_subregion(&lpc->opb_mr, LPC_IO_OPB_ADDR,
&lpc->opb_isa_io);
memory_region_init_alias(&lpc->opb_isa_mem, OBJECT(dev), "lpc-isa-mem",
&lpc->isa_mem, 0, LPC_MEM_OPB_SIZE);
memory_region_add_subregion(&lpc->opb_mr, LPC_MEM_OPB_ADDR,
&lpc->opb_isa_mem);
memory_region_init_alias(&lpc->opb_isa_fw, OBJECT(dev), "lpc-isa-fw",
&lpc->isa_fw, 0, LPC_FW_OPB_SIZE);
memory_region_add_subregion(&lpc->opb_mr, LPC_FW_OPB_ADDR,
&lpc->opb_isa_fw);
/* Create MMIO regions for LPC HC and OPB registers */
memory_region_init_io(&lpc->opb_master_regs, OBJECT(dev), &opb_master_ops,
lpc, "lpc-opb-master", LPC_OPB_REGS_OPB_SIZE);
memory_region_add_subregion(&lpc->opb_mr, LPC_OPB_REGS_OPB_ADDR,
&lpc->opb_master_regs);
memory_region_init_io(&lpc->lpc_hc_regs, OBJECT(dev), &lpc_hc_ops, lpc,
"lpc-hc", LPC_HC_REGS_OPB_SIZE);
memory_region_add_subregion(&lpc->opb_mr, LPC_HC_REGS_OPB_ADDR,
&lpc->lpc_hc_regs);
}
static void pnv_lpc_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = pnv_lpc_realize;
dc->desc = "PowerNV LPC Controller";
}
static const TypeInfo pnv_lpc_info = {
.name = TYPE_PNV_LPC,
.parent = TYPE_DEVICE,
.class_init = pnv_lpc_class_init,
.class_size = sizeof(PnvLpcClass),
.abstract = true,
};
static void pnv_lpc_register_types(void)
{
type_register_static(&pnv_lpc_info);
type_register_static(&pnv_lpc_power8_info);
type_register_static(&pnv_lpc_power9_info);
}
type_init(pnv_lpc_register_types)
/* If we don't use the built-in LPC interrupt deserializer, we need
* to provide a set of qirqs for the ISA bus or things will go bad.
*
* Most machines using pre-Naples chips (without said deserializer)
* have a CPLD that will collect the SerIRQ and shoot them as a
* single level interrupt to the P8 chip. So let's setup a hook
* for doing just that.
*/
static void pnv_lpc_isa_irq_handler_cpld(void *opaque, int n, int level)
{
PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
uint32_t old_state = pnv->cpld_irqstate;
PnvLpcController *lpc = PNV_LPC(opaque);
if (level) {
pnv->cpld_irqstate |= 1u << n;
} else {
pnv->cpld_irqstate &= ~(1u << n);
}
if (pnv->cpld_irqstate != old_state) {
pnv_psi_irq_set(lpc->psi, PSIHB_IRQ_EXTERNAL, pnv->cpld_irqstate != 0);
}
}
static void pnv_lpc_isa_irq_handler(void *opaque, int n, int level)
{
PnvLpcController *lpc = PNV_LPC(opaque);
/* The Naples HW latches the 1 levels, clearing is done by SW */
if (level) {
lpc->lpc_hc_irqstat |= LPC_HC_IRQ_SERIRQ0 >> n;
pnv_lpc_eval_irqs(lpc);
}
}
ISABus *pnv_lpc_isa_create(PnvLpcController *lpc, bool use_cpld, Error **errp)
{
Error *local_err = NULL;
ISABus *isa_bus;
qemu_irq *irqs;
qemu_irq_handler handler;
/* let isa_bus_new() create its own bridge on SysBus otherwise
* devices speficied on the command line won't find the bus and
* will fail to create.
*/
isa_bus = isa_bus_new(NULL, &lpc->isa_mem, &lpc->isa_io, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return NULL;
}
/* Not all variants have a working serial irq decoder. If not,
* handling of LPC interrupts becomes a platform issue (some
* platforms have a CPLD to do it).
*/
if (use_cpld) {
handler = pnv_lpc_isa_irq_handler_cpld;
} else {
handler = pnv_lpc_isa_irq_handler;
}
irqs = qemu_allocate_irqs(handler, lpc, ISA_NUM_IRQS);
isa_bus_irqs(isa_bus, irqs);
return isa_bus;
}
|
pmp-tool/PMP
|
src/qemu/src-pmp/hw/ppc/spapr_rtas_ddw.c
|
<reponame>pmp-tool/PMP
/*
* QEMU sPAPR Dynamic DMA windows support
*
* Copyright (c) 2015 <NAME>, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License,
* or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/error-report.h"
#include "hw/ppc/spapr.h"
#include "hw/pci-host/spapr.h"
#include "trace.h"
static int spapr_phb_get_active_win_num_cb(Object *child, void *opaque)
{
SpaprTceTable *tcet;
tcet = (SpaprTceTable *) object_dynamic_cast(child, TYPE_SPAPR_TCE_TABLE);
if (tcet && tcet->nb_table) {
++*(unsigned *)opaque;
}
return 0;
}
static unsigned spapr_phb_get_active_win_num(SpaprPhbState *sphb)
{
unsigned ret = 0;
object_child_foreach(OBJECT(sphb), spapr_phb_get_active_win_num_cb, &ret);
return ret;
}
static int spapr_phb_get_free_liobn_cb(Object *child, void *opaque)
{
SpaprTceTable *tcet;
tcet = (SpaprTceTable *) object_dynamic_cast(child, TYPE_SPAPR_TCE_TABLE);
if (tcet && !tcet->nb_table) {
*(uint32_t *)opaque = tcet->liobn;
return 1;
}
return 0;
}
static unsigned spapr_phb_get_free_liobn(SpaprPhbState *sphb)
{
uint32_t liobn = 0;
object_child_foreach(OBJECT(sphb), spapr_phb_get_free_liobn_cb, &liobn);
return liobn;
}
static uint32_t spapr_page_mask_to_query_mask(uint64_t page_mask)
{
int i;
uint32_t mask = 0;
const struct { int shift; uint32_t mask; } masks[] = {
{ 12, RTAS_DDW_PGSIZE_4K },
{ 16, RTAS_DDW_PGSIZE_64K },
{ 24, RTAS_DDW_PGSIZE_16M },
{ 25, RTAS_DDW_PGSIZE_32M },
{ 26, RTAS_DDW_PGSIZE_64M },
{ 27, RTAS_DDW_PGSIZE_128M },
{ 28, RTAS_DDW_PGSIZE_256M },
{ 34, RTAS_DDW_PGSIZE_16G },
};
for (i = 0; i < ARRAY_SIZE(masks); ++i) {
if (page_mask & (1ULL << masks[i].shift)) {
mask |= masks[i].mask;
}
}
return mask;
}
static void rtas_ibm_query_pe_dma_window(PowerPCCPU *cpu,
SpaprMachineState *spapr,
uint32_t token, uint32_t nargs,
target_ulong args,
uint32_t nret, target_ulong rets)
{
SpaprPhbState *sphb;
uint64_t buid;
uint32_t avail, addr, pgmask = 0;
if ((nargs != 3) || (nret != 5)) {
goto param_error_exit;
}
buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
addr = rtas_ld(args, 0);
sphb = spapr_pci_find_phb(spapr, buid);
if (!sphb || !sphb->ddw_enabled) {
goto param_error_exit;
}
/* Translate page mask to LoPAPR format */
pgmask = spapr_page_mask_to_query_mask(sphb->page_size_mask);
avail = SPAPR_PCI_DMA_MAX_WINDOWS - spapr_phb_get_active_win_num(sphb);
rtas_st(rets, 0, RTAS_OUT_SUCCESS);
rtas_st(rets, 1, avail);
rtas_st(rets, 2, 0x80000000); /* The largest window we can possibly have */
rtas_st(rets, 3, pgmask);
rtas_st(rets, 4, 0); /* DMA migration mask, not supported */
trace_spapr_iommu_ddw_query(buid, addr, avail, 0x80000000, pgmask);
return;
param_error_exit:
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
}
static void rtas_ibm_create_pe_dma_window(PowerPCCPU *cpu,
SpaprMachineState *spapr,
uint32_t token, uint32_t nargs,
target_ulong args,
uint32_t nret, target_ulong rets)
{
SpaprPhbState *sphb;
SpaprTceTable *tcet = NULL;
uint32_t addr, page_shift, window_shift, liobn;
uint64_t buid, win_addr;
int windows;
if ((nargs != 5) || (nret != 4)) {
goto param_error_exit;
}
buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
addr = rtas_ld(args, 0);
sphb = spapr_pci_find_phb(spapr, buid);
if (!sphb || !sphb->ddw_enabled) {
goto param_error_exit;
}
page_shift = rtas_ld(args, 3);
window_shift = rtas_ld(args, 4);
liobn = spapr_phb_get_free_liobn(sphb);
windows = spapr_phb_get_active_win_num(sphb);
if (!(sphb->page_size_mask & (1ULL << page_shift)) ||
(window_shift < page_shift)) {
goto param_error_exit;
}
if (!liobn || !sphb->ddw_enabled || windows == SPAPR_PCI_DMA_MAX_WINDOWS) {
goto hw_error_exit;
}
tcet = spapr_tce_find_by_liobn(liobn);
if (!tcet) {
goto hw_error_exit;
}
win_addr = (windows == 0) ? sphb->dma_win_addr : sphb->dma64_win_addr;
/*
* We have just created a window, we know for the fact that it is empty,
* use a hack to avoid iterating over the table as it is quite possible
* to have billions of TCEs, all empty.
* Note that we cannot delay this to the first H_PUT_TCE as this hcall is
* mostly likely to be handled in KVM so QEMU just does not know if it
* happened.
*/
tcet->skipping_replay = true;
spapr_tce_table_enable(tcet, page_shift, win_addr,
1ULL << (window_shift - page_shift));
tcet->skipping_replay = false;
if (!tcet->nb_table) {
goto hw_error_exit;
}
trace_spapr_iommu_ddw_create(buid, addr, 1ULL << page_shift,
1ULL << window_shift, tcet->bus_offset, liobn);
rtas_st(rets, 0, RTAS_OUT_SUCCESS);
rtas_st(rets, 1, liobn);
rtas_st(rets, 2, tcet->bus_offset >> 32);
rtas_st(rets, 3, tcet->bus_offset & ((uint32_t) -1));
return;
hw_error_exit:
rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
param_error_exit:
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
}
static void rtas_ibm_remove_pe_dma_window(PowerPCCPU *cpu,
SpaprMachineState *spapr,
uint32_t token, uint32_t nargs,
target_ulong args,
uint32_t nret, target_ulong rets)
{
SpaprPhbState *sphb;
SpaprTceTable *tcet;
uint32_t liobn;
if ((nargs != 1) || (nret != 1)) {
goto param_error_exit;
}
liobn = rtas_ld(args, 0);
tcet = spapr_tce_find_by_liobn(liobn);
if (!tcet) {
goto param_error_exit;
}
sphb = SPAPR_PCI_HOST_BRIDGE(OBJECT(tcet)->parent);
if (!sphb || !sphb->ddw_enabled || !tcet->nb_table) {
goto param_error_exit;
}
spapr_tce_table_disable(tcet);
trace_spapr_iommu_ddw_remove(liobn);
rtas_st(rets, 0, RTAS_OUT_SUCCESS);
return;
param_error_exit:
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
}
static void rtas_ibm_reset_pe_dma_window(PowerPCCPU *cpu,
SpaprMachineState *spapr,
uint32_t token, uint32_t nargs,
target_ulong args,
uint32_t nret, target_ulong rets)
{
SpaprPhbState *sphb;
uint64_t buid;
uint32_t addr;
if ((nargs != 3) || (nret != 1)) {
goto param_error_exit;
}
buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
addr = rtas_ld(args, 0);
sphb = spapr_pci_find_phb(spapr, buid);
if (!sphb || !sphb->ddw_enabled) {
goto param_error_exit;
}
spapr_phb_dma_reset(sphb);
trace_spapr_iommu_ddw_reset(buid, addr);
rtas_st(rets, 0, RTAS_OUT_SUCCESS);
return;
param_error_exit:
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
}
static void spapr_rtas_ddw_init(void)
{
spapr_rtas_register(RTAS_IBM_QUERY_PE_DMA_WINDOW,
"ibm,query-pe-dma-window",
rtas_ibm_query_pe_dma_window);
spapr_rtas_register(RTAS_IBM_CREATE_PE_DMA_WINDOW,
"ibm,create-pe-dma-window",
rtas_ibm_create_pe_dma_window);
spapr_rtas_register(RTAS_IBM_REMOVE_PE_DMA_WINDOW,
"ibm,remove-pe-dma-window",
rtas_ibm_remove_pe_dma_window);
spapr_rtas_register(RTAS_IBM_RESET_PE_DMA_WINDOW,
"ibm,reset-pe-dma-window",
rtas_ibm_reset_pe_dma_window);
}
type_init(spapr_rtas_ddw_init)
|
pmp-tool/PMP
|
src/qemu/src-pmp/block/qcow2.h
|
/*
* Block driver for the QCOW version 2 format
*
* Copyright (c) 2004-2006 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef BLOCK_QCOW2_H
#define BLOCK_QCOW2_H
#include "crypto/block.h"
#include "qemu/coroutine.h"
#include "qemu/units.h"
//#define DEBUG_ALLOC
//#define DEBUG_ALLOC2
//#define DEBUG_EXT
#define QCOW_MAGIC (('Q' << 24) | ('F' << 16) | ('I' << 8) | 0xfb)
#define QCOW_CRYPT_NONE 0
#define QCOW_CRYPT_AES 1
#define QCOW_CRYPT_LUKS 2
#define QCOW_MAX_CRYPT_CLUSTERS 32
#define QCOW_MAX_SNAPSHOTS 65536
/* Field widths in qcow2 mean normal cluster offsets cannot reach
* 64PB; depending on cluster size, compressed clusters can have a
* smaller limit (64PB for up to 16k clusters, then ramps down to
* 512TB for 2M clusters). */
#define QCOW_MAX_CLUSTER_OFFSET ((1ULL << 56) - 1)
/* 8 MB refcount table is enough for 2 PB images at 64k cluster size
* (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
#define QCOW_MAX_REFTABLE_SIZE (8 * MiB)
/* 32 MB L1 table is enough for 2 PB images at 64k cluster size
* (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
#define QCOW_MAX_L1_SIZE (32 * MiB)
/* Allow for an average of 1k per snapshot table entry, should be plenty of
* space for snapshot names and IDs */
#define QCOW_MAX_SNAPSHOTS_SIZE (1024 * QCOW_MAX_SNAPSHOTS)
/* Bitmap header extension constraints */
#define QCOW2_MAX_BITMAPS 65535
#define QCOW2_MAX_BITMAP_DIRECTORY_SIZE (1024 * QCOW2_MAX_BITMAPS)
/* indicate that the refcount of the referenced cluster is exactly one. */
#define QCOW_OFLAG_COPIED (1ULL << 63)
/* indicate that the cluster is compressed (they never have the copied flag) */
#define QCOW_OFLAG_COMPRESSED (1ULL << 62)
/* The cluster reads as all zeros */
#define QCOW_OFLAG_ZERO (1ULL << 0)
#define MIN_CLUSTER_BITS 9
#define MAX_CLUSTER_BITS 21
/* Must be at least 2 to cover COW */
#define MIN_L2_CACHE_SIZE 2 /* cache entries */
/* Must be at least 4 to cover all cases of refcount table growth */
#define MIN_REFCOUNT_CACHE_SIZE 4 /* clusters */
#ifdef CONFIG_LINUX
#define DEFAULT_L2_CACHE_MAX_SIZE (32 * MiB)
#define DEFAULT_CACHE_CLEAN_INTERVAL 600 /* seconds */
#else
#define DEFAULT_L2_CACHE_MAX_SIZE (8 * MiB)
/* Cache clean interval is currently available only on Linux, so must be 0 */
#define DEFAULT_CACHE_CLEAN_INTERVAL 0
#endif
#define DEFAULT_CLUSTER_SIZE 65536
#define QCOW2_OPT_DATA_FILE "data-file"
#define QCOW2_OPT_LAZY_REFCOUNTS "lazy-refcounts"
#define QCOW2_OPT_DISCARD_REQUEST "pass-discard-request"
#define QCOW2_OPT_DISCARD_SNAPSHOT "pass-discard-snapshot"
#define QCOW2_OPT_DISCARD_OTHER "pass-discard-other"
#define QCOW2_OPT_OVERLAP "overlap-check"
#define QCOW2_OPT_OVERLAP_TEMPLATE "overlap-check.template"
#define QCOW2_OPT_OVERLAP_MAIN_HEADER "overlap-check.main-header"
#define QCOW2_OPT_OVERLAP_ACTIVE_L1 "overlap-check.active-l1"
#define QCOW2_OPT_OVERLAP_ACTIVE_L2 "overlap-check.active-l2"
#define QCOW2_OPT_OVERLAP_REFCOUNT_TABLE "overlap-check.refcount-table"
#define QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK "overlap-check.refcount-block"
#define QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE "overlap-check.snapshot-table"
#define QCOW2_OPT_OVERLAP_INACTIVE_L1 "overlap-check.inactive-l1"
#define QCOW2_OPT_OVERLAP_INACTIVE_L2 "overlap-check.inactive-l2"
#define QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY "overlap-check.bitmap-directory"
#define QCOW2_OPT_CACHE_SIZE "cache-size"
#define QCOW2_OPT_L2_CACHE_SIZE "l2-cache-size"
#define QCOW2_OPT_L2_CACHE_ENTRY_SIZE "l2-cache-entry-size"
#define QCOW2_OPT_REFCOUNT_CACHE_SIZE "refcount-cache-size"
#define QCOW2_OPT_CACHE_CLEAN_INTERVAL "cache-clean-interval"
typedef struct QCowHeader {
uint32_t magic;
uint32_t version;
uint64_t backing_file_offset;
uint32_t backing_file_size;
uint32_t cluster_bits;
uint64_t size; /* in bytes */
uint32_t crypt_method;
uint32_t l1_size; /* XXX: save number of clusters instead ? */
uint64_t l1_table_offset;
uint64_t refcount_table_offset;
uint32_t refcount_table_clusters;
uint32_t nb_snapshots;
uint64_t snapshots_offset;
/* The following fields are only valid for version >= 3 */
uint64_t incompatible_features;
uint64_t compatible_features;
uint64_t autoclear_features;
uint32_t refcount_order;
uint32_t header_length;
} QEMU_PACKED QCowHeader;
typedef struct QEMU_PACKED QCowSnapshotHeader {
/* header is 8 byte aligned */
uint64_t l1_table_offset;
uint32_t l1_size;
uint16_t id_str_size;
uint16_t name_size;
uint32_t date_sec;
uint32_t date_nsec;
uint64_t vm_clock_nsec;
uint32_t vm_state_size;
uint32_t extra_data_size; /* for extension */
/* extra data follows */
/* id_str follows */
/* name follows */
} QCowSnapshotHeader;
typedef struct QEMU_PACKED QCowSnapshotExtraData {
uint64_t vm_state_size_large;
uint64_t disk_size;
} QCowSnapshotExtraData;
typedef struct QCowSnapshot {
uint64_t l1_table_offset;
uint32_t l1_size;
char *id_str;
char *name;
uint64_t disk_size;
uint64_t vm_state_size;
uint32_t date_sec;
uint32_t date_nsec;
uint64_t vm_clock_nsec;
} QCowSnapshot;
struct Qcow2Cache;
typedef struct Qcow2Cache Qcow2Cache;
typedef struct Qcow2CryptoHeaderExtension {
uint64_t offset;
uint64_t length;
} QEMU_PACKED Qcow2CryptoHeaderExtension;
typedef struct Qcow2UnknownHeaderExtension {
uint32_t magic;
uint32_t len;
QLIST_ENTRY(Qcow2UnknownHeaderExtension) next;
uint8_t data[];
} Qcow2UnknownHeaderExtension;
enum {
QCOW2_FEAT_TYPE_INCOMPATIBLE = 0,
QCOW2_FEAT_TYPE_COMPATIBLE = 1,
QCOW2_FEAT_TYPE_AUTOCLEAR = 2,
};
/* Incompatible feature bits */
enum {
QCOW2_INCOMPAT_DIRTY_BITNR = 0,
QCOW2_INCOMPAT_CORRUPT_BITNR = 1,
QCOW2_INCOMPAT_DATA_FILE_BITNR = 2,
QCOW2_INCOMPAT_DIRTY = 1 << QCOW2_INCOMPAT_DIRTY_BITNR,
QCOW2_INCOMPAT_CORRUPT = 1 << QCOW2_INCOMPAT_CORRUPT_BITNR,
QCOW2_INCOMPAT_DATA_FILE = 1 << QCOW2_INCOMPAT_DATA_FILE_BITNR,
QCOW2_INCOMPAT_MASK = QCOW2_INCOMPAT_DIRTY
| QCOW2_INCOMPAT_CORRUPT
| QCOW2_INCOMPAT_DATA_FILE,
};
/* Compatible feature bits */
enum {
QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR = 0,
QCOW2_COMPAT_LAZY_REFCOUNTS = 1 << QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR,
QCOW2_COMPAT_FEAT_MASK = QCOW2_COMPAT_LAZY_REFCOUNTS,
};
/* Autoclear feature bits */
enum {
QCOW2_AUTOCLEAR_BITMAPS_BITNR = 0,
QCOW2_AUTOCLEAR_DATA_FILE_RAW_BITNR = 1,
QCOW2_AUTOCLEAR_BITMAPS = 1 << QCOW2_AUTOCLEAR_BITMAPS_BITNR,
QCOW2_AUTOCLEAR_DATA_FILE_RAW = 1 << QCOW2_AUTOCLEAR_DATA_FILE_RAW_BITNR,
QCOW2_AUTOCLEAR_MASK = QCOW2_AUTOCLEAR_BITMAPS
| QCOW2_AUTOCLEAR_DATA_FILE_RAW,
};
enum qcow2_discard_type {
QCOW2_DISCARD_NEVER = 0,
QCOW2_DISCARD_ALWAYS,
QCOW2_DISCARD_REQUEST,
QCOW2_DISCARD_SNAPSHOT,
QCOW2_DISCARD_OTHER,
QCOW2_DISCARD_MAX
};
typedef struct Qcow2Feature {
uint8_t type;
uint8_t bit;
char name[46];
} QEMU_PACKED Qcow2Feature;
typedef struct Qcow2DiscardRegion {
BlockDriverState *bs;
uint64_t offset;
uint64_t bytes;
QTAILQ_ENTRY(Qcow2DiscardRegion) next;
} Qcow2DiscardRegion;
typedef uint64_t Qcow2GetRefcountFunc(const void *refcount_array,
uint64_t index);
typedef void Qcow2SetRefcountFunc(void *refcount_array,
uint64_t index, uint64_t value);
typedef struct Qcow2BitmapHeaderExt {
uint32_t nb_bitmaps;
uint32_t reserved32;
uint64_t bitmap_directory_size;
uint64_t bitmap_directory_offset;
} QEMU_PACKED Qcow2BitmapHeaderExt;
typedef struct BDRVQcow2State {
int cluster_bits;
int cluster_size;
int cluster_sectors;
int l2_slice_size;
int l2_bits;
int l2_size;
int l1_size;
int l1_vm_state_index;
int refcount_block_bits;
int refcount_block_size;
int csize_shift;
int csize_mask;
uint64_t cluster_offset_mask;
uint64_t l1_table_offset;
uint64_t *l1_table;
Qcow2Cache* l2_table_cache;
Qcow2Cache* refcount_block_cache;
QEMUTimer *cache_clean_timer;
unsigned cache_clean_interval;
uint8_t *cluster_cache;
uint8_t *cluster_data;
uint64_t cluster_cache_offset;
QLIST_HEAD(, QCowL2Meta) cluster_allocs;
uint64_t *refcount_table;
uint64_t refcount_table_offset;
uint32_t refcount_table_size;
uint32_t max_refcount_table_index; /* Last used entry in refcount_table */
uint64_t free_cluster_index;
uint64_t free_byte_offset;
CoMutex lock;
Qcow2CryptoHeaderExtension crypto_header; /* QCow2 header extension */
QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */
QCryptoBlock *crypto; /* Disk encryption format driver */
bool crypt_physical_offset; /* Whether to use virtual or physical offset
for encryption initialization vector tweak */
uint32_t crypt_method_header;
uint64_t snapshots_offset;
int snapshots_size;
unsigned int nb_snapshots;
QCowSnapshot *snapshots;
uint32_t nb_bitmaps;
uint64_t bitmap_directory_size;
uint64_t bitmap_directory_offset;
int flags;
int qcow_version;
bool use_lazy_refcounts;
int refcount_order;
int refcount_bits;
uint64_t refcount_max;
Qcow2GetRefcountFunc *get_refcount;
Qcow2SetRefcountFunc *set_refcount;
bool discard_passthrough[QCOW2_DISCARD_MAX];
int overlap_check; /* bitmask of Qcow2MetadataOverlap values */
bool signaled_corruption;
uint64_t incompatible_features;
uint64_t compatible_features;
uint64_t autoclear_features;
size_t unknown_header_fields_size;
void* unknown_header_fields;
QLIST_HEAD(, Qcow2UnknownHeaderExtension) unknown_header_ext;
QTAILQ_HEAD (, Qcow2DiscardRegion) discards;
bool cache_discards;
/* Backing file path and format as stored in the image (this is not the
* effective path/format, which may be the result of a runtime option
* override) */
char *image_backing_file;
char *image_backing_format;
char *image_data_file;
CoQueue compress_wait_queue;
int nb_compress_threads;
BdrvChild *data_file;
} BDRVQcow2State;
typedef struct Qcow2COWRegion {
/**
* Offset of the COW region in bytes from the start of the first cluster
* touched by the request.
*/
unsigned offset;
/** Number of bytes to copy */
unsigned nb_bytes;
} Qcow2COWRegion;
/**
* Describes an in-flight (part of a) write request that writes to clusters
* that are not referenced in their L2 table yet.
*/
typedef struct QCowL2Meta
{
/** Guest offset of the first newly allocated cluster */
uint64_t offset;
/** Host offset of the first newly allocated cluster */
uint64_t alloc_offset;
/** Number of newly allocated clusters */
int nb_clusters;
/** Do not free the old clusters */
bool keep_old_clusters;
/**
* Requests that overlap with this allocation and wait to be restarted
* when the allocating request has completed.
*/
CoQueue dependent_requests;
/**
* The COW Region between the start of the first allocated cluster and the
* area the guest actually writes to.
*/
Qcow2COWRegion cow_start;
/**
* The COW Region between the area the guest actually writes to and the
* end of the last allocated cluster.
*/
Qcow2COWRegion cow_end;
/**
* The I/O vector with the data from the actual guest write request.
* If non-NULL, this is meant to be merged together with the data
* from @cow_start and @cow_end into one single write operation.
*/
QEMUIOVector *data_qiov;
/** Pointer to next L2Meta of the same write request */
struct QCowL2Meta *next;
QLIST_ENTRY(QCowL2Meta) next_in_flight;
} QCowL2Meta;
typedef enum QCow2ClusterType {
QCOW2_CLUSTER_UNALLOCATED,
QCOW2_CLUSTER_ZERO_PLAIN,
QCOW2_CLUSTER_ZERO_ALLOC,
QCOW2_CLUSTER_NORMAL,
QCOW2_CLUSTER_COMPRESSED,
} QCow2ClusterType;
typedef enum QCow2MetadataOverlap {
QCOW2_OL_MAIN_HEADER_BITNR = 0,
QCOW2_OL_ACTIVE_L1_BITNR = 1,
QCOW2_OL_ACTIVE_L2_BITNR = 2,
QCOW2_OL_REFCOUNT_TABLE_BITNR = 3,
QCOW2_OL_REFCOUNT_BLOCK_BITNR = 4,
QCOW2_OL_SNAPSHOT_TABLE_BITNR = 5,
QCOW2_OL_INACTIVE_L1_BITNR = 6,
QCOW2_OL_INACTIVE_L2_BITNR = 7,
QCOW2_OL_BITMAP_DIRECTORY_BITNR = 8,
QCOW2_OL_MAX_BITNR = 9,
QCOW2_OL_NONE = 0,
QCOW2_OL_MAIN_HEADER = (1 << QCOW2_OL_MAIN_HEADER_BITNR),
QCOW2_OL_ACTIVE_L1 = (1 << QCOW2_OL_ACTIVE_L1_BITNR),
QCOW2_OL_ACTIVE_L2 = (1 << QCOW2_OL_ACTIVE_L2_BITNR),
QCOW2_OL_REFCOUNT_TABLE = (1 << QCOW2_OL_REFCOUNT_TABLE_BITNR),
QCOW2_OL_REFCOUNT_BLOCK = (1 << QCOW2_OL_REFCOUNT_BLOCK_BITNR),
QCOW2_OL_SNAPSHOT_TABLE = (1 << QCOW2_OL_SNAPSHOT_TABLE_BITNR),
QCOW2_OL_INACTIVE_L1 = (1 << QCOW2_OL_INACTIVE_L1_BITNR),
/* NOTE: Checking overlaps with inactive L2 tables will result in bdrv
* reads. */
QCOW2_OL_INACTIVE_L2 = (1 << QCOW2_OL_INACTIVE_L2_BITNR),
QCOW2_OL_BITMAP_DIRECTORY = (1 << QCOW2_OL_BITMAP_DIRECTORY_BITNR),
} QCow2MetadataOverlap;
/* Perform all overlap checks which can be done in constant time */
#define QCOW2_OL_CONSTANT \
(QCOW2_OL_MAIN_HEADER | QCOW2_OL_ACTIVE_L1 | QCOW2_OL_REFCOUNT_TABLE | \
QCOW2_OL_SNAPSHOT_TABLE | QCOW2_OL_BITMAP_DIRECTORY)
/* Perform all overlap checks which don't require disk access */
#define QCOW2_OL_CACHED \
(QCOW2_OL_CONSTANT | QCOW2_OL_ACTIVE_L2 | QCOW2_OL_REFCOUNT_BLOCK | \
QCOW2_OL_INACTIVE_L1)
/* Perform all overlap checks */
#define QCOW2_OL_ALL \
(QCOW2_OL_CACHED | QCOW2_OL_INACTIVE_L2)
#define L1E_OFFSET_MASK 0x00fffffffffffe00ULL
#define L2E_OFFSET_MASK 0x00fffffffffffe00ULL
#define L2E_COMPRESSED_OFFSET_SIZE_MASK 0x3fffffffffffffffULL
#define REFT_OFFSET_MASK 0xfffffffffffffe00ULL
#define INV_OFFSET (-1ULL)
static inline bool has_data_file(BlockDriverState *bs)
{
BDRVQcow2State *s = bs->opaque;
return (s->data_file != bs->file);
}
static inline bool data_file_is_raw(BlockDriverState *bs)
{
BDRVQcow2State *s = bs->opaque;
return !!(s->autoclear_features & QCOW2_AUTOCLEAR_DATA_FILE_RAW);
}
static inline int64_t start_of_cluster(BDRVQcow2State *s, int64_t offset)
{
return offset & ~(s->cluster_size - 1);
}
static inline int64_t offset_into_cluster(BDRVQcow2State *s, int64_t offset)
{
return offset & (s->cluster_size - 1);
}
static inline uint64_t size_to_clusters(BDRVQcow2State *s, uint64_t size)
{
return (size + (s->cluster_size - 1)) >> s->cluster_bits;
}
static inline int64_t size_to_l1(BDRVQcow2State *s, int64_t size)
{
int shift = s->cluster_bits + s->l2_bits;
return (size + (1ULL << shift) - 1) >> shift;
}
static inline int offset_to_l1_index(BDRVQcow2State *s, uint64_t offset)
{
return offset >> (s->l2_bits + s->cluster_bits);
}
static inline int offset_to_l2_index(BDRVQcow2State *s, int64_t offset)
{
return (offset >> s->cluster_bits) & (s->l2_size - 1);
}
static inline int offset_to_l2_slice_index(BDRVQcow2State *s, int64_t offset)
{
return (offset >> s->cluster_bits) & (s->l2_slice_size - 1);
}
static inline int64_t qcow2_vm_state_offset(BDRVQcow2State *s)
{
return (int64_t)s->l1_vm_state_index << (s->cluster_bits + s->l2_bits);
}
static inline QCow2ClusterType qcow2_get_cluster_type(BlockDriverState *bs,
uint64_t l2_entry)
{
if (l2_entry & QCOW_OFLAG_COMPRESSED) {
return QCOW2_CLUSTER_COMPRESSED;
} else if (l2_entry & QCOW_OFLAG_ZERO) {
if (l2_entry & L2E_OFFSET_MASK) {
return QCOW2_CLUSTER_ZERO_ALLOC;
}
return QCOW2_CLUSTER_ZERO_PLAIN;
} else if (!(l2_entry & L2E_OFFSET_MASK)) {
/* Offset 0 generally means unallocated, but it is ambiguous with
* external data files because 0 is a valid offset there. However, all
* clusters in external data files always have refcount 1, so we can
* rely on QCOW_OFLAG_COPIED to disambiguate. */
if (has_data_file(bs) && (l2_entry & QCOW_OFLAG_COPIED)) {
return QCOW2_CLUSTER_NORMAL;
} else {
return QCOW2_CLUSTER_UNALLOCATED;
}
} else {
return QCOW2_CLUSTER_NORMAL;
}
}
/* Check whether refcounts are eager or lazy */
static inline bool qcow2_need_accurate_refcounts(BDRVQcow2State *s)
{
return !(s->incompatible_features & QCOW2_INCOMPAT_DIRTY);
}
static inline uint64_t l2meta_cow_start(QCowL2Meta *m)
{
return m->offset + m->cow_start.offset;
}
static inline uint64_t l2meta_cow_end(QCowL2Meta *m)
{
return m->offset + m->cow_end.offset + m->cow_end.nb_bytes;
}
static inline uint64_t refcount_diff(uint64_t r1, uint64_t r2)
{
return r1 > r2 ? r1 - r2 : r2 - r1;
}
static inline
uint32_t offset_to_reftable_index(BDRVQcow2State *s, uint64_t offset)
{
return offset >> (s->refcount_block_bits + s->cluster_bits);
}
/* qcow2.c functions */
int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size,
int refcount_order, bool generous_increase,
uint64_t *refblock_count);
int qcow2_mark_dirty(BlockDriverState *bs);
int qcow2_mark_corrupt(BlockDriverState *bs);
int qcow2_mark_consistent(BlockDriverState *bs);
int qcow2_update_header(BlockDriverState *bs);
void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset,
int64_t size, const char *message_format, ...)
GCC_FMT_ATTR(5, 6);
int qcow2_validate_table(BlockDriverState *bs, uint64_t offset,
uint64_t entries, size_t entry_len,
int64_t max_size_bytes, const char *table_name,
Error **errp);
/* qcow2-refcount.c functions */
int qcow2_refcount_init(BlockDriverState *bs);
void qcow2_refcount_close(BlockDriverState *bs);
int qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index,
uint64_t *refcount);
int qcow2_update_cluster_refcount(BlockDriverState *bs, int64_t cluster_index,
uint64_t addend, bool decrease,
enum qcow2_discard_type type);
int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t offset,
uint64_t additional_clusters, bool exact_size,
int new_refblock_index,
uint64_t new_refblock_offset);
int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size);
int64_t qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
int64_t nb_clusters);
int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size);
void qcow2_free_clusters(BlockDriverState *bs,
int64_t offset, int64_t size,
enum qcow2_discard_type type);
void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry,
int nb_clusters, enum qcow2_discard_type type);
int qcow2_update_snapshot_refcount(BlockDriverState *bs,
int64_t l1_table_offset, int l1_size, int addend);
int coroutine_fn qcow2_flush_caches(BlockDriverState *bs);
int coroutine_fn qcow2_write_caches(BlockDriverState *bs);
int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix);
void qcow2_process_discards(BlockDriverState *bs, int ret);
int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
int64_t size);
int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
int64_t size, bool data_file);
int qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size,
int64_t offset, int64_t size);
int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
BlockDriverAmendStatusCB *status_cb,
void *cb_opaque, Error **errp);
int qcow2_shrink_reftable(BlockDriverState *bs);
int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size);
/* qcow2-cluster.c functions */
int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
bool exact_size);
int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t max_size);
int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index);
int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num,
uint8_t *buf, int nb_sectors, bool enc, Error **errp);
int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
unsigned int *bytes, uint64_t *cluster_offset);
int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
unsigned int *bytes, uint64_t *host_offset,
QCowL2Meta **m);
int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
uint64_t offset,
int compressed_size,
uint64_t *host_offset);
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m);
void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m);
int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, enum qcow2_discard_type type,
bool full_discard);
int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, int flags);
int qcow2_expand_zero_clusters(BlockDriverState *bs,
BlockDriverAmendStatusCB *status_cb,
void *cb_opaque);
/* qcow2-snapshot.c functions */
int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info);
int qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id);
int qcow2_snapshot_delete(BlockDriverState *bs,
const char *snapshot_id,
const char *name,
Error **errp);
int qcow2_snapshot_list(BlockDriverState *bs, QEMUSnapshotInfo **psn_tab);
int qcow2_snapshot_load_tmp(BlockDriverState *bs,
const char *snapshot_id,
const char *name,
Error **errp);
void qcow2_free_snapshots(BlockDriverState *bs);
int qcow2_read_snapshots(BlockDriverState *bs);
/* qcow2-cache.c functions */
Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables,
unsigned table_size);
int qcow2_cache_destroy(Qcow2Cache *c);
void qcow2_cache_entry_mark_dirty(Qcow2Cache *c, void *table);
int qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c);
int qcow2_cache_write(BlockDriverState *bs, Qcow2Cache *c);
int qcow2_cache_set_dependency(BlockDriverState *bs, Qcow2Cache *c,
Qcow2Cache *dependency);
void qcow2_cache_depends_on_flush(Qcow2Cache *c);
void qcow2_cache_clean_unused(Qcow2Cache *c);
int qcow2_cache_empty(BlockDriverState *bs, Qcow2Cache *c);
int qcow2_cache_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
void **table);
int qcow2_cache_get_empty(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
void **table);
void qcow2_cache_put(Qcow2Cache *c, void **table);
void *qcow2_cache_is_table_offset(Qcow2Cache *c, uint64_t offset);
void qcow2_cache_discard(Qcow2Cache *c, void *table);
/* qcow2-bitmap.c functions */
int qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size);
bool qcow2_load_dirty_bitmaps(BlockDriverState *bs, Error **errp);
Qcow2BitmapInfoList *qcow2_get_bitmap_info_list(BlockDriverState *bs,
Error **errp);
int qcow2_reopen_bitmaps_rw_hint(BlockDriverState *bs, bool *header_updated,
Error **errp);
int qcow2_reopen_bitmaps_rw(BlockDriverState *bs, Error **errp);
int qcow2_truncate_bitmaps_check(BlockDriverState *bs, Error **errp);
void qcow2_store_persistent_dirty_bitmaps(BlockDriverState *bs, Error **errp);
int qcow2_reopen_bitmaps_ro(BlockDriverState *bs, Error **errp);
bool qcow2_can_store_new_dirty_bitmap(BlockDriverState *bs,
const char *name,
uint32_t granularity,
Error **errp);
void qcow2_remove_persistent_dirty_bitmap(BlockDriverState *bs,
const char *name,
Error **errp);
#endif
|
pmp-tool/PMP
|
src/qemu/src-pmp/include/authz/listfile.h
|
<filename>src/qemu/src-pmp/include/authz/listfile.h
/*
* QEMU list file authorization driver
*
* Copyright (c) 2018 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*
*/
#ifndef QAUTHZ_LIST_FILE_H__
#define QAUTHZ_LIST_FILE_H__
#include "authz/list.h"
#include "qapi/qapi-types-authz.h"
#include "qemu/filemonitor.h"
#define TYPE_QAUTHZ_LIST_FILE "authz-list-file"
#define QAUTHZ_LIST_FILE_CLASS(klass) \
OBJECT_CLASS_CHECK(QAuthZListFileClass, (klass), \
TYPE_QAUTHZ_LIST_FILE)
#define QAUTHZ_LIST_FILE_GET_CLASS(obj) \
OBJECT_GET_CLASS(QAuthZListFileClass, (obj), \
TYPE_QAUTHZ_LIST_FILE)
#define QAUTHZ_LIST_FILE(obj) \
OBJECT_CHECK(QAuthZListFile, (obj), \
TYPE_QAUTHZ_LIST_FILE)
typedef struct QAuthZListFile QAuthZListFile;
typedef struct QAuthZListFileClass QAuthZListFileClass;
/**
* QAuthZListFile:
*
* This authorization driver provides a file mechanism
* for granting access by matching user names against a
* file of globs. Each match rule has an associated policy
* and a catch all policy applies if no rule matches
*
* To create an instance of this class via QMP:
*
* {
* "execute": "object-add",
* "arguments": {
* "qom-type": "authz-list-file",
* "id": "authz0",
* "props": {
* "filename": "/etc/qemu/myvm-vnc.acl",
* "refresh": true
* }
* }
* }
*
* If 'refresh' is 'yes', inotify is used to monitor for changes
* to the file and auto-reload the rules.
*
* The myvm-vnc.acl file should contain the parameters for
* the QAuthZList object in JSON format:
*
* {
* "rules": [
* { "match": "fred", "policy": "allow", "format": "exact" },
* { "match": "bob", "policy": "allow", "format": "exact" },
* { "match": "danb", "policy": "deny", "format": "exact" },
* { "match": "dan*", "policy": "allow", "format": "glob" }
* ],
* "policy": "deny"
* }
*
* The object can be created on the command line using
*
* -object authz-list-file,id=authz0,\
* filename=/etc/qemu/myvm-vnc.acl,refresh=yes
*
*/
struct QAuthZListFile {
QAuthZ parent_obj;
QAuthZ *list;
char *filename;
bool refresh;
QFileMonitor *file_monitor;
int64_t file_watch;
};
struct QAuthZListFileClass {
QAuthZClass parent_class;
};
QAuthZListFile *qauthz_list_file_new(const char *id,
const char *filename,
bool refresh,
Error **errp);
#endif /* QAUTHZ_LIST_FILE_H__ */
|
pmp-tool/PMP
|
src/qemu/src-pmp/tests/tcg/i386/system/memory.c
|
/*
* Memory Test
*
* This is intended to test the softmmu code and ensure we properly
* behave across normal and unaligned accesses across several pages.
* We are not replicating memory tests for stuck bits and other
* hardware level failures but looking for issues with different size
* accesses when:
*
*/
#include <inttypes.h>
#include <minilib.h>
#define TEST_SIZE (4096 * 4) /* 4 pages */
static uint8_t test_data[TEST_SIZE];
static void pdot(int count)
{
if (count % 128 == 0) {
ml_printf(".");
}
}
/*
* Fill the data with ascending value bytes. As x86 is a LE machine we
* write in ascending order and then read and high byte should either
* be zero or higher than the lower bytes.
*/
static void init_test_data_u8(void)
{
uint8_t count = 0, *ptr = &test_data[0];
int i;
ml_printf("Filling test area with u8:");
for (i = 0; i < TEST_SIZE; i++) {
*ptr++ = count++;
pdot(i);
}
ml_printf("done\n");
}
static void init_test_data_u16(int offset)
{
uint8_t count = 0;
uint16_t word, *ptr = (uint16_t *) &test_data[0];
const int max = (TEST_SIZE - offset) / sizeof(word);
int i;
ml_printf("Filling test area with u16 (offset %d):", offset);
/* Leading zeros */
for (i = 0; i < offset; i++) {
*ptr = 0;
}
ptr = (uint16_t *) &test_data[offset];
for (i = 0; i < max; i++) {
uint8_t high, low;
low = count++;
high = count++;
word = (high << 8) | low;
*ptr++ = word;
pdot(i);
}
ml_printf("done\n");
}
static void init_test_data_u32(int offset)
{
uint8_t count = 0;
uint32_t word, *ptr = (uint32_t *) &test_data[0];
const int max = (TEST_SIZE - offset) / sizeof(word);
int i;
ml_printf("Filling test area with u32 (offset %d):", offset);
/* Leading zeros */
for (i = 0; i < offset; i++) {
*ptr = 0;
}
ptr = (uint32_t *) &test_data[offset];
for (i = 0; i < max; i++) {
uint8_t b1, b2, b3, b4;
b4 = count++;
b3 = count++;
b2 = count++;
b1 = count++;
word = (b1 << 24) | (b2 << 16) | (b3 << 8) | b4;
*ptr++ = word;
pdot(i);
}
ml_printf("done\n");
}
static int read_test_data_u16(int offset)
{
uint16_t word, *ptr = (uint16_t *)&test_data[offset];
int i;
const int max = (TEST_SIZE - offset) / sizeof(word);
ml_printf("Reading u16 from %#lx (offset %d):", ptr, offset);
for (i = 0; i < max; i++) {
uint8_t high, low;
word = *ptr++;
high = (word >> 8) & 0xff;
low = word & 0xff;
if (high < low && high != 0) {
ml_printf("Error %d < %d\n", high, low);
return 1;
} else {
pdot(i);
}
}
ml_printf("done\n");
return 0;
}
static int read_test_data_u32(int offset)
{
uint32_t word, *ptr = (uint32_t *)&test_data[offset];
int i;
const int max = (TEST_SIZE - offset) / sizeof(word);
ml_printf("Reading u32 from %#lx (offset %d):", ptr, offset);
for (i = 0; i < max; i++) {
uint8_t b1, b2, b3, b4;
word = *ptr++;
b1 = word >> 24 & 0xff;
b2 = word >> 16 & 0xff;
b3 = word >> 8 & 0xff;
b4 = word & 0xff;
if ((b1 < b2 && b1 != 0) ||
(b2 < b3 && b2 != 0) ||
(b3 < b4 && b3 != 0)) {
ml_printf("Error %d, %d, %d, %d", b1, b2, b3, b4);
return 2;
} else {
pdot(i);
}
}
ml_printf("done\n");
return 0;
}
static int read_test_data_u64(int offset)
{
uint64_t word, *ptr = (uint64_t *)&test_data[offset];
int i;
const int max = (TEST_SIZE - offset) / sizeof(word);
ml_printf("Reading u64 from %#lx (offset %d):", ptr, offset);
for (i = 0; i < max; i++) {
uint8_t b1, b2, b3, b4, b5, b6, b7, b8;
word = *ptr++;
b1 = ((uint64_t) (word >> 56)) & 0xff;
b2 = ((uint64_t) (word >> 48)) & 0xff;
b3 = ((uint64_t) (word >> 40)) & 0xff;
b4 = (word >> 32) & 0xff;
b5 = (word >> 24) & 0xff;
b6 = (word >> 16) & 0xff;
b7 = (word >> 8) & 0xff;
b8 = (word >> 0) & 0xff;
if ((b1 < b2 && b1 != 0) ||
(b2 < b3 && b2 != 0) ||
(b3 < b4 && b3 != 0) ||
(b4 < b5 && b4 != 0) ||
(b5 < b6 && b5 != 0) ||
(b6 < b7 && b6 != 0) ||
(b7 < b8 && b7 != 0)) {
ml_printf("Error %d, %d, %d, %d, %d, %d, %d, %d",
b1, b2, b3, b4, b5, b6, b7, b8);
return 2;
} else {
pdot(i);
}
}
ml_printf("done\n");
return 0;
}
/* Read the test data and verify at various offsets */
int do_reads(void)
{
int r = 0;
int off = 0;
while (r == 0 && off < 8) {
r = read_test_data_u16(off);
r |= read_test_data_u32(off);
r |= read_test_data_u64(off);
off++;
}
return r;
}
int main(void)
{
int i, r = 0;
init_test_data_u8();
r = do_reads();
if (r) {
return r;
}
for (i = 0; i < 8; i++) {
init_test_data_u16(i);
r = do_reads();
if (r) {
return r;
}
}
for (i = 0; i < 8; i++) {
init_test_data_u32(i);
r = do_reads();
if (r) {
return r;
}
}
ml_printf("Test complete: %s\n", r == 0 ? "PASSED" : "FAILED");
return r;
}
|
pmp-tool/PMP
|
src/qemu/src-pmp/tests/test-hmp.c
|
/*
* Test HMP commands.
*
* Copyright (c) 2017 Red Hat Inc.
*
* Author:
* <NAME> <<EMAIL>>
*
* This work is licensed under the terms of the GNU GPL, version 2
* or later. See the COPYING file in the top-level directory.
*
* This test calls some HMP commands for all machines that the current
* QEMU binary provides, to check whether they terminate successfully
* (i.e. do not crash QEMU).
*/
#include "qemu/osdep.h"
#include "libqtest.h"
static int verbose;
static const char *hmp_cmds[] = {
"announce_self",
"boot_set ndc",
"chardev-add null,id=testchardev1",
"chardev-send-break testchardev1",
"chardev-change testchardev1 ringbuf",
"chardev-remove testchardev1",
"commit all",
"cpu-add 1",
"cpu 0",
"device_add ?",
"device_add usb-mouse,id=mouse1",
"mouse_button 7",
"mouse_move 10 10",
"mouse_button 0",
"device_del mouse1",
"dump-guest-memory /dev/null 0 4096",
"dump-guest-memory /dev/null",
"gdbserver",
"hostfwd_add tcp::43210-:43210",
"hostfwd_remove tcp::43210-:43210",
"i /w 0",
"log all",
"log none",
"memsave 0 4096 \"/dev/null\"",
"migrate_set_cache_size 1",
"migrate_set_downtime 1",
"migrate_set_speed 1",
"netdev_add user,id=net1",
"set_link net1 off",
"set_link net1 on",
"netdev_del net1",
"nmi",
"o /w 0 0x1234",
"object_add memory-backend-ram,id=mem1,size=256M",
"object_del mem1",
"pmemsave 0 4096 \"/dev/null\"",
"p $pc + 8",
"qom-list /",
"qom-set /machine initrd test",
"screendump /dev/null",
"sendkey x",
"singlestep on",
"wavcapture /dev/null",
"stopcapture 0",
"sum 0 512",
"x /8i 0x100",
"xp /16x 0",
NULL
};
/* Run through the list of pre-defined commands */
static void test_commands(void)
{
char *response;
int i;
for (i = 0; hmp_cmds[i] != NULL; i++) {
response = hmp("%s", hmp_cmds[i]);
if (verbose) {
fprintf(stderr,
"\texecute HMP command: %s\n"
"\tresult : %s\n",
hmp_cmds[i], response);
}
g_free(response);
}
}
/* Run through all info commands and call them blindly (without arguments) */
static void test_info_commands(void)
{
char *resp, *info, *info_buf, *endp;
info_buf = info = hmp("help info");
while (*info) {
/* Extract the info command, ignore parameters and description */
g_assert(strncmp(info, "info ", 5) == 0);
endp = strchr(&info[5], ' ');
g_assert(endp != NULL);
*endp = '\0';
/* Now run the info command */
if (verbose) {
fprintf(stderr, "\t%s\n", info);
}
resp = hmp("%s", info);
g_free(resp);
/* And move forward to the next line */
info = strchr(endp + 1, '\n');
if (!info) {
break;
}
info += 1;
}
g_free(info_buf);
}
static void test_machine(gconstpointer data)
{
const char *machine = data;
char *args;
args = g_strdup_printf("-S -M %s", machine);
qtest_start(args);
test_info_commands();
test_commands();
qtest_end();
g_free(args);
g_free((void *)data);
}
static void add_machine_test_case(const char *mname)
{
char *path;
/* Ignore blacklisted machines that have known problems */
if (!strcmp("xenfv", mname) || !strcmp("xenpv", mname)) {
return;
}
path = g_strdup_printf("hmp/%s", mname);
qtest_add_data_func(path, g_strdup(mname), test_machine);
g_free(path);
}
int main(int argc, char **argv)
{
char *v_env = getenv("V");
if (v_env && *v_env >= '2') {
verbose = true;
}
g_test_init(&argc, &argv, NULL);
qtest_cb_for_every_machine(add_machine_test_case, g_test_quick());
/* as none machine has no memory by default, add a test case with memory */
qtest_add_data_func("hmp/none+2MB", g_strdup("none -m 2"), test_machine);
return g_test_run();
}
|
pmp-tool/PMP
|
src/qemu/src-pmp/tests/tcg/mips/user/ase/msa/bit-count/test_msa_pcnt_w.c
|
/*
* Test program for MSA instruction PCNT.W
*
* Copyright (C) 2019 Wave Computing, Inc.
* Copyright (C) 2019 <NAME> <<EMAIL>>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*
*/
#include <sys/time.h>
#include <stdint.h>
#include "../../../../include/wrappers_msa.h"
#include "../../../../include/test_inputs_128.h"
#include "../../../../include/test_utils_128.h"
#define TEST_COUNT_TOTAL (PATTERN_INPUTS_COUNT + RANDOM_INPUTS_COUNT)
int32_t main(void)
{
char *instruction_name = "PCNT.W";
int32_t ret;
uint32_t i;
struct timeval start, end;
double elapsed_time;
uint64_t b128_result[TEST_COUNT_TOTAL][2];
uint64_t b128_expect[TEST_COUNT_TOTAL][2] = {
{ 0x0000002000000020ULL, 0x0000002000000020ULL, }, /* 0 */
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x0000001000000010ULL, 0x0000001000000010ULL, },
{ 0x0000001000000010ULL, 0x0000001000000010ULL, },
{ 0x0000001000000010ULL, 0x0000001000000010ULL, },
{ 0x0000001000000010ULL, 0x0000001000000010ULL, },
{ 0x0000001100000010ULL, 0x0000000f00000011ULL, },
{ 0x0000000f00000010ULL, 0x000000110000000fULL, },
{ 0x0000001000000010ULL, 0x0000001000000010ULL, }, /* 8 */
{ 0x0000001000000010ULL, 0x0000001000000010ULL, },
{ 0x0000001100000011ULL, 0x000000100000000fULL, },
{ 0x0000000f0000000fULL, 0x0000001000000011ULL, },
{ 0x0000001200000010ULL, 0x0000000e00000012ULL, },
{ 0x0000000e00000010ULL, 0x000000120000000eULL, },
{ 0x0000001200000011ULL, 0x0000000e00000010ULL, },
{ 0x0000000e0000000fULL, 0x0000001200000010ULL, },
{ 0x0000001000000010ULL, 0x0000001000000010ULL, }, /* 16 */
{ 0x0000001000000010ULL, 0x0000001000000010ULL, },
{ 0x0000001200000012ULL, 0x0000000f0000000eULL, },
{ 0x0000000e0000000eULL, 0x0000001100000012ULL, },
{ 0x000000140000000eULL, 0x0000001000000012ULL, },
{ 0x0000000c00000012ULL, 0x000000100000000eULL, },
{ 0x000000150000000cULL, 0x000000130000000eULL, },
{ 0x0000000b00000014ULL, 0x0000000d00000012ULL, },
{ 0x0000001400000010ULL, 0x0000000c00000014ULL, }, /* 24 */
{ 0x0000000c00000010ULL, 0x000000140000000cULL, },
{ 0x0000001300000013ULL, 0x0000000e0000000dULL, },
{ 0x0000000d0000000dULL, 0x0000001200000013ULL, },
{ 0x0000001200000012ULL, 0x0000001200000010ULL, },
{ 0x0000000e0000000eULL, 0x0000000e00000010ULL, },
{ 0x0000001100000011ULL, 0x0000001100000011ULL, },
{ 0x0000000f0000000fULL, 0x0000000f0000000fULL, },
{ 0x0000001000000010ULL, 0x0000001000000010ULL, }, /* 32 */
{ 0x0000001000000010ULL, 0x0000001000000010ULL, },
{ 0x0000001100000011ULL, 0x0000001100000011ULL, },
{ 0x0000000f0000000fULL, 0x0000000f0000000fULL, },
{ 0x0000001200000012ULL, 0x0000001200000012ULL, },
{ 0x0000000e0000000eULL, 0x0000000e0000000eULL, },
{ 0x0000001300000013ULL, 0x000000130000000eULL, },
{ 0x0000000d0000000dULL, 0x0000000d00000012ULL, },
{ 0x0000001400000014ULL, 0x000000100000000cULL, }, /* 40 */
{ 0x0000000c0000000cULL, 0x0000001000000014ULL, },
{ 0x0000001500000015ULL, 0x0000000c0000000bULL, },
{ 0x0000000b0000000bULL, 0x0000001400000015ULL, },
{ 0x0000001600000014ULL, 0x0000000a0000000eULL, },
{ 0x0000000a0000000cULL, 0x0000001600000012ULL, },
{ 0x0000001700000012ULL, 0x0000000900000013ULL, },
{ 0x000000090000000eULL, 0x000000170000000dULL, },
{ 0x0000001800000010ULL, 0x0000000800000018ULL, }, /* 48 */
{ 0x0000000800000010ULL, 0x0000001800000008ULL, },
{ 0x000000190000000eULL, 0x0000000b00000019ULL, },
{ 0x0000000700000012ULL, 0x0000001500000007ULL, },
{ 0x0000001a0000000cULL, 0x0000000e00000018ULL, },
{ 0x0000000600000014ULL, 0x0000001200000008ULL, },
{ 0x0000001b0000000aULL, 0x0000001100000014ULL, },
{ 0x0000000500000016ULL, 0x0000000f0000000cULL, },
{ 0x0000001c00000008ULL, 0x0000001400000010ULL, }, /* 56 */
{ 0x0000000400000018ULL, 0x0000000c00000010ULL, },
{ 0x0000001d00000006ULL, 0x000000170000000cULL, },
{ 0x000000030000001aULL, 0x0000000900000014ULL, },
{ 0x0000001e00000004ULL, 0x0000001a00000008ULL, },
{ 0x000000020000001cULL, 0x0000000600000018ULL, },
{ 0x0000001f00000002ULL, 0x0000001d00000004ULL, },
{ 0x000000010000001eULL, 0x000000030000001cULL, },
{ 0x0000000f0000000aULL, 0x0000001100000012ULL, }, /* 64 */
{ 0x000000110000000eULL, 0x0000001200000012ULL, },
{ 0x0000001100000010ULL, 0x000000140000000eULL, },
{ 0x0000000f00000010ULL, 0x0000000f0000000cULL, },
{ 0x0000001200000011ULL, 0x0000000f00000013ULL, },
{ 0x0000001000000014ULL, 0x0000000b00000011ULL, },
{ 0x0000000e00000010ULL, 0x0000000900000013ULL, },
{ 0x0000001200000010ULL, 0x000000140000000eULL, },
{ 0x0000000c00000011ULL, 0x000000130000000fULL, }, /* 72 */
{ 0x0000001100000014ULL, 0x0000000e00000010ULL, },
{ 0x0000001300000011ULL, 0x0000000e0000000fULL, },
{ 0x000000110000000eULL, 0x0000001100000013ULL, },
{ 0x0000000f0000000eULL, 0x0000000e00000010ULL, },
{ 0x000000110000000cULL, 0x0000001100000010ULL, },
{ 0x0000000b00000013ULL, 0x000000140000000eULL, },
{ 0x0000001000000014ULL, 0x0000001200000010ULL, },
};
gettimeofday(&start, NULL);
for (i = 0; i < TEST_COUNT_TOTAL; i++) {
if (i < PATTERN_INPUTS_COUNT) {
do_msa_PCNT_W(b128_pattern[i], b128_result[i]);
} else {
do_msa_PCNT_W(b128_random[i - PATTERN_INPUTS_COUNT],
b128_result[i]);
}
}
gettimeofday(&end, NULL);
elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0;
elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0;
ret = check_results(instruction_name, TEST_COUNT_TOTAL, elapsed_time,
&b128_result[0][0], &b128_expect[0][0]);
return ret;
}
|
pmp-tool/PMP
|
src/qemu/src-pmp/target/ppc/machine.c
|
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "hw/hw.h"
#include "hw/boards.h"
#include "sysemu/kvm.h"
#include "helper_regs.h"
#include "mmu-hash64.h"
#include "migration/cpu.h"
#include "qapi/error.h"
#include "kvm_ppc.h"
#include "exec/helper-proto.h"
static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
{
PowerPCCPU *cpu = opaque;
CPUPPCState *env = &cpu->env;
unsigned int i, j;
target_ulong sdr1;
uint32_t fpscr, vscr;
#if defined(TARGET_PPC64)
int32_t slb_nr;
#endif
target_ulong xer;
for (i = 0; i < 32; i++)
qemu_get_betls(f, &env->gpr[i]);
#if !defined(TARGET_PPC64)
for (i = 0; i < 32; i++)
qemu_get_betls(f, &env->gprh[i]);
#endif
qemu_get_betls(f, &env->lr);
qemu_get_betls(f, &env->ctr);
for (i = 0; i < 8; i++)
qemu_get_be32s(f, &env->crf[i]);
qemu_get_betls(f, &xer);
cpu_write_xer(env, xer);
qemu_get_betls(f, &env->reserve_addr);
qemu_get_betls(f, &env->msr);
for (i = 0; i < 4; i++)
qemu_get_betls(f, &env->tgpr[i]);
for (i = 0; i < 32; i++) {
union {
float64 d;
uint64_t l;
} u;
u.l = qemu_get_be64(f);
*cpu_fpr_ptr(env, i) = u.d;
}
qemu_get_be32s(f, &fpscr);
env->fpscr = fpscr;
qemu_get_sbe32s(f, &env->access_type);
#if defined(TARGET_PPC64)
qemu_get_betls(f, &env->spr[SPR_ASR]);
qemu_get_sbe32s(f, &slb_nr);
#endif
qemu_get_betls(f, &sdr1);
for (i = 0; i < 32; i++)
qemu_get_betls(f, &env->sr[i]);
for (i = 0; i < 2; i++)
for (j = 0; j < 8; j++)
qemu_get_betls(f, &env->DBAT[i][j]);
for (i = 0; i < 2; i++)
for (j = 0; j < 8; j++)
qemu_get_betls(f, &env->IBAT[i][j]);
qemu_get_sbe32s(f, &env->nb_tlb);
qemu_get_sbe32s(f, &env->tlb_per_way);
qemu_get_sbe32s(f, &env->nb_ways);
qemu_get_sbe32s(f, &env->last_way);
qemu_get_sbe32s(f, &env->id_tlbs);
qemu_get_sbe32s(f, &env->nb_pids);
if (env->tlb.tlb6) {
// XXX assumes 6xx
for (i = 0; i < env->nb_tlb; i++) {
qemu_get_betls(f, &env->tlb.tlb6[i].pte0);
qemu_get_betls(f, &env->tlb.tlb6[i].pte1);
qemu_get_betls(f, &env->tlb.tlb6[i].EPN);
}
}
for (i = 0; i < 4; i++)
qemu_get_betls(f, &env->pb[i]);
for (i = 0; i < 1024; i++)
qemu_get_betls(f, &env->spr[i]);
if (!cpu->vhyp) {
ppc_store_sdr1(env, sdr1);
}
qemu_get_be32s(f, &vscr);
helper_mtvscr(env, vscr);
qemu_get_be64s(f, &env->spe_acc);
qemu_get_be32s(f, &env->spe_fscr);
qemu_get_betls(f, &env->msr_mask);
qemu_get_be32s(f, &env->flags);
qemu_get_sbe32s(f, &env->error_code);
qemu_get_be32s(f, &env->pending_interrupts);
qemu_get_be32s(f, &env->irq_input_state);
for (i = 0; i < POWERPC_EXCP_NB; i++)
qemu_get_betls(f, &env->excp_vectors[i]);
qemu_get_betls(f, &env->excp_prefix);
qemu_get_betls(f, &env->ivor_mask);
qemu_get_betls(f, &env->ivpr_mask);
qemu_get_betls(f, &env->hreset_vector);
qemu_get_betls(f, &env->nip);
qemu_get_betls(f, &env->hflags);
qemu_get_betls(f, &env->hflags_nmsr);
qemu_get_sbe32(f); /* Discard unused mmu_idx */
qemu_get_sbe32(f); /* Discard unused power_mode */
/* Recompute mmu indices */
hreg_compute_mem_idx(env);
return 0;
}
static int get_avr(QEMUFile *f, void *pv, size_t size,
const VMStateField *field)
{
ppc_avr_t *v = pv;
v->u64[0] = qemu_get_be64(f);
v->u64[1] = qemu_get_be64(f);
return 0;
}
static int put_avr(QEMUFile *f, void *pv, size_t size,
const VMStateField *field, QJSON *vmdesc)
{
ppc_avr_t *v = pv;
qemu_put_be64(f, v->u64[0]);
qemu_put_be64(f, v->u64[1]);
return 0;
}
static const VMStateInfo vmstate_info_avr = {
.name = "avr",
.get = get_avr,
.put = put_avr,
};
#define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v) \
VMSTATE_SUB_ARRAY(_f, _s, 32, _n, _v, vmstate_info_avr, ppc_avr_t)
#define VMSTATE_AVR_ARRAY(_f, _s, _n) \
VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0)
static int get_fpr(QEMUFile *f, void *pv, size_t size,
const VMStateField *field)
{
ppc_vsr_t *v = pv;
v->VsrD(0) = qemu_get_be64(f);
return 0;
}
static int put_fpr(QEMUFile *f, void *pv, size_t size,
const VMStateField *field, QJSON *vmdesc)
{
ppc_vsr_t *v = pv;
qemu_put_be64(f, v->VsrD(0));
return 0;
}
static const VMStateInfo vmstate_info_fpr = {
.name = "fpr",
.get = get_fpr,
.put = put_fpr,
};
#define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \
VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_fpr, ppc_vsr_t)
#define VMSTATE_FPR_ARRAY(_f, _s, _n) \
VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0)
static int get_vsr(QEMUFile *f, void *pv, size_t size,
const VMStateField *field)
{
ppc_vsr_t *v = pv;
v->VsrD(1) = qemu_get_be64(f);
return 0;
}
static int put_vsr(QEMUFile *f, void *pv, size_t size,
const VMStateField *field, QJSON *vmdesc)
{
ppc_vsr_t *v = pv;
qemu_put_be64(f, v->VsrD(1));
return 0;
}
static const VMStateInfo vmstate_info_vsr = {
.name = "vsr",
.get = get_vsr,
.put = put_vsr,
};
#define VMSTATE_VSR_ARRAY_V(_f, _s, _n, _v) \
VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_vsr, ppc_vsr_t)
#define VMSTATE_VSR_ARRAY(_f, _s, _n) \
VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0)
static bool cpu_pre_2_8_migration(void *opaque, int version_id)
{
PowerPCCPU *cpu = opaque;
return cpu->pre_2_8_migration;
}
#if defined(TARGET_PPC64)
static bool cpu_pre_3_0_migration(void *opaque, int version_id)
{
PowerPCCPU *cpu = opaque;
return cpu->pre_3_0_migration;
}
#endif
static int cpu_pre_save(void *opaque)
{
PowerPCCPU *cpu = opaque;
CPUPPCState *env = &cpu->env;
int i;
uint64_t insns_compat_mask =
PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB
| PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES
| PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES
| PPC_FLOAT_STFIWX | PPC_FLOAT_EXT
| PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ
| PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC
| PPC_64B | PPC_64BX | PPC_ALTIVEC
| PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD;
uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX
| PPC2_PERM_ISA206 | PPC2_DIVE_ISA206
| PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206
| PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207
| PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207
| PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM;
env->spr[SPR_LR] = env->lr;
env->spr[SPR_CTR] = env->ctr;
env->spr[SPR_XER] = cpu_read_xer(env);
#if defined(TARGET_PPC64)
env->spr[SPR_CFAR] = env->cfar;
#endif
env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr;
for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
env->spr[SPR_DBAT0U + 2*i] = env->DBAT[0][i];
env->spr[SPR_DBAT0U + 2*i + 1] = env->DBAT[1][i];
env->spr[SPR_IBAT0U + 2*i] = env->IBAT[0][i];
env->spr[SPR_IBAT0U + 2*i + 1] = env->IBAT[1][i];
}
for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) {
env->spr[SPR_DBAT4U + 2*i] = env->DBAT[0][i+4];
env->spr[SPR_DBAT4U + 2*i + 1] = env->DBAT[1][i+4];
env->spr[SPR_IBAT4U + 2*i] = env->IBAT[0][i+4];
env->spr[SPR_IBAT4U + 2*i + 1] = env->IBAT[1][i+4];
}
/* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */
if (cpu->pre_2_8_migration) {
/* Mask out bits that got added to msr_mask since the versions
* which stupidly included it in the migration stream. */
target_ulong metamask = 0
#if defined(TARGET_PPC64)
| (1ULL << MSR_TS0)
| (1ULL << MSR_TS1)
#endif
;
cpu->mig_msr_mask = env->msr_mask & ~metamask;
cpu->mig_insns_flags = env->insns_flags & insns_compat_mask;
/* CPU models supported by old machines all have PPC_MEM_TLBIE,
* so we set it unconditionally to allow backward migration from
* a POWER9 host to a POWER8 host.
*/
cpu->mig_insns_flags |= PPC_MEM_TLBIE;
cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2;
cpu->mig_nb_BATs = env->nb_BATs;
}
if (cpu->pre_3_0_migration) {
if (cpu->hash64_opts) {
cpu->mig_slb_nr = cpu->hash64_opts->slb_size;
}
}
return 0;
}
/*
* Determine if a given PVR is a "close enough" match to the CPU
* object. For TCG and KVM PR it would probably be sufficient to
* require an exact PVR match. However for KVM HV the user is
* restricted to a PVR exactly matching the host CPU. The correct way
* to handle this is to put the guest into an architected
* compatibility mode. However, to allow a more forgiving transition
* and migration from before this was widely done, we allow migration
* between sufficiently similar PVRs, as determined by the CPU class's
* pvr_match() hook.
*/
static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr)
{
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
if (pvr == pcc->pvr) {
return true;
}
return pcc->pvr_match(pcc, pvr);
}
static int cpu_post_load(void *opaque, int version_id)
{
PowerPCCPU *cpu = opaque;
CPUPPCState *env = &cpu->env;
int i;
target_ulong msr;
/*
* If we're operating in compat mode, we should be ok as long as
* the destination supports the same compatiblity mode.
*
* Otherwise, however, we require that the destination has exactly
* the same CPU model as the source.
*/
#if defined(TARGET_PPC64)
if (cpu->compat_pvr) {
uint32_t compat_pvr = cpu->compat_pvr;
Error *local_err = NULL;
cpu->compat_pvr = 0;
ppc_set_compat(cpu, compat_pvr, &local_err);
if (local_err) {
error_report_err(local_err);
return -1;
}
} else
#endif
{
if (!pvr_match(cpu, env->spr[SPR_PVR])) {
return -1;
}
}
/*
* If we're running with KVM HV, there is a chance that the guest
* is running with KVM HV and its kernel does not have the
* capability of dealing with a different PVR other than this
* exact host PVR in KVM_SET_SREGS. If that happens, the
* guest freezes after migration.
*
* The function kvmppc_pvr_workaround_required does this verification
* by first checking if the kernel has the cap, returning true immediately
* if that is the case. Otherwise, it checks if we're running in KVM PR.
* If the guest kernel does not have the cap and we're not running KVM-PR
* (so, it is running KVM-HV), we need to ensure that KVM_SET_SREGS will
* receive the PVR it expects as a workaround.
*
*/
#if defined(CONFIG_KVM)
if (kvmppc_pvr_workaround_required(cpu)) {
env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value;
}
#endif
env->lr = env->spr[SPR_LR];
env->ctr = env->spr[SPR_CTR];
cpu_write_xer(env, env->spr[SPR_XER]);
#if defined(TARGET_PPC64)
env->cfar = env->spr[SPR_CFAR];
#endif
env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR];
for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2*i];
env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2*i + 1];
env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2*i];
env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2*i + 1];
}
for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) {
env->DBAT[0][i+4] = env->spr[SPR_DBAT4U + 2*i];
env->DBAT[1][i+4] = env->spr[SPR_DBAT4U + 2*i + 1];
env->IBAT[0][i+4] = env->spr[SPR_IBAT4U + 2*i];
env->IBAT[1][i+4] = env->spr[SPR_IBAT4U + 2*i + 1];
}
if (!cpu->vhyp) {
ppc_store_sdr1(env, env->spr[SPR_SDR1]);
}
/* Invalidate all supported msr bits except MSR_TGPR/MSR_HVB before restoring */
msr = env->msr;
env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB);
ppc_store_msr(env, msr);
hreg_compute_mem_idx(env);
return 0;
}
static bool fpu_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
return (cpu->env.insns_flags & PPC_FLOAT);
}
static const VMStateDescription vmstate_fpu = {
.name = "cpu/fpu",
.version_id = 1,
.minimum_version_id = 1,
.needed = fpu_needed,
.fields = (VMStateField[]) {
VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32),
VMSTATE_UINTTL(env.fpscr, PowerPCCPU),
VMSTATE_END_OF_LIST()
},
};
static bool altivec_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
return (cpu->env.insns_flags & PPC_ALTIVEC);
}
static int get_vscr(QEMUFile *f, void *opaque, size_t size,
const VMStateField *field)
{
PowerPCCPU *cpu = opaque;
helper_mtvscr(&cpu->env, qemu_get_be32(f));
return 0;
}
static int put_vscr(QEMUFile *f, void *opaque, size_t size,
const VMStateField *field, QJSON *vmdesc)
{
PowerPCCPU *cpu = opaque;
qemu_put_be32(f, helper_mfvscr(&cpu->env));
return 0;
}
static const VMStateInfo vmstate_vscr = {
.name = "cpu/altivec/vscr",
.get = get_vscr,
.put = put_vscr,
};
static const VMStateDescription vmstate_altivec = {
.name = "cpu/altivec",
.version_id = 1,
.minimum_version_id = 1,
.needed = altivec_needed,
.fields = (VMStateField[]) {
VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32),
/*
* Save the architecture value of the vscr, not the internally
* expanded version. Since this architecture value does not
* exist in memory to be stored, this requires a but of hoop
* jumping. We want OFFSET=0 so that we effectively pass CPU
* to the helper functions.
*/
{
.name = "vscr",
.version_id = 0,
.size = sizeof(uint32_t),
.info = &vmstate_vscr,
.flags = VMS_SINGLE,
.offset = 0
},
VMSTATE_END_OF_LIST()
},
};
static bool vsx_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
return (cpu->env.insns_flags2 & PPC2_VSX);
}
static const VMStateDescription vmstate_vsx = {
.name = "cpu/vsx",
.version_id = 1,
.minimum_version_id = 1,
.needed = vsx_needed,
.fields = (VMStateField[]) {
VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32),
VMSTATE_END_OF_LIST()
},
};
#ifdef TARGET_PPC64
/* Transactional memory state */
static bool tm_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
CPUPPCState *env = &cpu->env;
return msr_ts;
}
static const VMStateDescription vmstate_tm = {
.name = "cpu/tm",
.version_id = 1,
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.needed = tm_needed,
.fields = (VMStateField []) {
VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32),
VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64),
VMSTATE_UINT64(env.tm_cr, PowerPCCPU),
VMSTATE_UINT64(env.tm_lr, PowerPCCPU),
VMSTATE_UINT64(env.tm_ctr, PowerPCCPU),
VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU),
VMSTATE_UINT64(env.tm_amr, PowerPCCPU),
VMSTATE_UINT64(env.tm_ppr, PowerPCCPU),
VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU),
VMSTATE_UINT32(env.tm_vscr, PowerPCCPU),
VMSTATE_UINT64(env.tm_dscr, PowerPCCPU),
VMSTATE_UINT64(env.tm_tar, PowerPCCPU),
VMSTATE_END_OF_LIST()
},
};
#endif
static bool sr_needed(void *opaque)
{
#ifdef TARGET_PPC64
PowerPCCPU *cpu = opaque;
return !(cpu->env.mmu_model & POWERPC_MMU_64);
#else
return true;
#endif
}
static const VMStateDescription vmstate_sr = {
.name = "cpu/sr",
.version_id = 1,
.minimum_version_id = 1,
.needed = sr_needed,
.fields = (VMStateField[]) {
VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32),
VMSTATE_END_OF_LIST()
},
};
#ifdef TARGET_PPC64
static int get_slbe(QEMUFile *f, void *pv, size_t size,
const VMStateField *field)
{
ppc_slb_t *v = pv;
v->esid = qemu_get_be64(f);
v->vsid = qemu_get_be64(f);
return 0;
}
static int put_slbe(QEMUFile *f, void *pv, size_t size,
const VMStateField *field, QJSON *vmdesc)
{
ppc_slb_t *v = pv;
qemu_put_be64(f, v->esid);
qemu_put_be64(f, v->vsid);
return 0;
}
static const VMStateInfo vmstate_info_slbe = {
.name = "slbe",
.get = get_slbe,
.put = put_slbe,
};
#define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v) \
VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t)
#define VMSTATE_SLB_ARRAY(_f, _s, _n) \
VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0)
static bool slb_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
/* We don't support any of the old segment table based 64-bit CPUs */
return (cpu->env.mmu_model & POWERPC_MMU_64);
}
static int slb_post_load(void *opaque, int version_id)
{
PowerPCCPU *cpu = opaque;
CPUPPCState *env = &cpu->env;
int i;
/* We've pulled in the raw esid and vsid values from the migration
* stream, but we need to recompute the page size pointers */
for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
/* Migration source had bad values in its SLB */
return -1;
}
}
return 0;
}
static const VMStateDescription vmstate_slb = {
.name = "cpu/slb",
.version_id = 1,
.minimum_version_id = 1,
.needed = slb_needed,
.post_load = slb_post_load,
.fields = (VMStateField[]) {
VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration),
VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
VMSTATE_END_OF_LIST()
}
};
#endif /* TARGET_PPC64 */
static const VMStateDescription vmstate_tlb6xx_entry = {
.name = "cpu/tlb6xx_entry",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINTTL(pte0, ppc6xx_tlb_t),
VMSTATE_UINTTL(pte1, ppc6xx_tlb_t),
VMSTATE_UINTTL(EPN, ppc6xx_tlb_t),
VMSTATE_END_OF_LIST()
},
};
static bool tlb6xx_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
CPUPPCState *env = &cpu->env;
return env->nb_tlb && (env->tlb_type == TLB_6XX);
}
static const VMStateDescription vmstate_tlb6xx = {
.name = "cpu/tlb6xx",
.version_id = 1,
.minimum_version_id = 1,
.needed = tlb6xx_needed,
.fields = (VMStateField[]) {
VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU,
env.nb_tlb,
vmstate_tlb6xx_entry,
ppc6xx_tlb_t),
VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_tlbemb_entry = {
.name = "cpu/tlbemb_entry",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT64(RPN, ppcemb_tlb_t),
VMSTATE_UINTTL(EPN, ppcemb_tlb_t),
VMSTATE_UINTTL(PID, ppcemb_tlb_t),
VMSTATE_UINTTL(size, ppcemb_tlb_t),
VMSTATE_UINT32(prot, ppcemb_tlb_t),
VMSTATE_UINT32(attr, ppcemb_tlb_t),
VMSTATE_END_OF_LIST()
},
};
static bool tlbemb_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
CPUPPCState *env = &cpu->env;
return env->nb_tlb && (env->tlb_type == TLB_EMB);
}
static bool pbr403_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
uint32_t pvr = cpu->env.spr[SPR_PVR];
return (pvr & 0xffff0000) == 0x00200000;
}
static const VMStateDescription vmstate_pbr403 = {
.name = "cpu/pbr403",
.version_id = 1,
.minimum_version_id = 1,
.needed = pbr403_needed,
.fields = (VMStateField[]) {
VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4),
VMSTATE_END_OF_LIST()
},
};
static const VMStateDescription vmstate_tlbemb = {
.name = "cpu/tlb6xx",
.version_id = 1,
.minimum_version_id = 1,
.needed = tlbemb_needed,
.fields = (VMStateField[]) {
VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU,
env.nb_tlb,
vmstate_tlbemb_entry,
ppcemb_tlb_t),
/* 403 protection registers */
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription*[]) {
&vmstate_pbr403,
NULL
}
};
static const VMStateDescription vmstate_tlbmas_entry = {
.name = "cpu/tlbmas_entry",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32(mas8, ppcmas_tlb_t),
VMSTATE_UINT32(mas1, ppcmas_tlb_t),
VMSTATE_UINT64(mas2, ppcmas_tlb_t),
VMSTATE_UINT64(mas7_3, ppcmas_tlb_t),
VMSTATE_END_OF_LIST()
},
};
static bool tlbmas_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
CPUPPCState *env = &cpu->env;
return env->nb_tlb && (env->tlb_type == TLB_MAS);
}
static const VMStateDescription vmstate_tlbmas = {
.name = "cpu/tlbmas",
.version_id = 1,
.minimum_version_id = 1,
.needed = tlbmas_needed,
.fields = (VMStateField[]) {
VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU,
env.nb_tlb,
vmstate_tlbmas_entry,
ppcmas_tlb_t),
VMSTATE_END_OF_LIST()
}
};
static bool compat_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
assert(!(cpu->compat_pvr && !cpu->vhyp));
return !cpu->pre_2_10_migration && cpu->compat_pvr != 0;
}
static const VMStateDescription vmstate_compat = {
.name = "cpu/compat",
.version_id = 1,
.minimum_version_id = 1,
.needed = compat_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT32(compat_pvr, PowerPCCPU),
VMSTATE_END_OF_LIST()
}
};
const VMStateDescription vmstate_ppc_cpu = {
.name = "cpu",
.version_id = 5,
.minimum_version_id = 5,
.minimum_version_id_old = 4,
.load_state_old = cpu_load_old,
.pre_save = cpu_pre_save,
.post_load = cpu_post_load,
.fields = (VMStateField[]) {
VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */
/* User mode architected state */
VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32),
#if !defined(TARGET_PPC64)
VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32),
#endif
VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8),
VMSTATE_UINTTL(env.nip, PowerPCCPU),
/* SPRs */
VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024),
VMSTATE_UINT64(env.spe_acc, PowerPCCPU),
/* Reservation */
VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU),
/* Supervisor mode architected state */
VMSTATE_UINTTL(env.msr, PowerPCCPU),
/* Internal state */
VMSTATE_UINTTL(env.hflags_nmsr, PowerPCCPU),
/* FIXME: access_type? */
/* Sanity checking */
VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration),
VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration),
VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU,
cpu_pre_2_8_migration),
VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription*[]) {
&vmstate_fpu,
&vmstate_altivec,
&vmstate_vsx,
&vmstate_sr,
#ifdef TARGET_PPC64
&vmstate_tm,
&vmstate_slb,
#endif /* TARGET_PPC64 */
&vmstate_tlb6xx,
&vmstate_tlbemb,
&vmstate_tlbmas,
&vmstate_compat,
NULL
}
};
|
pmp-tool/PMP
|
src/qemu/src-pmp/target/riscv/op_helper.c
|
<filename>src/qemu/src-pmp/target/riscv/op_helper.c
/*
* RISC-V Emulation Helpers for QEMU.
*
* Copyright (c) 2016-2017 <NAME>, <EMAIL>
* Copyright (c) 2017-2018 SiFive, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
#include "qemu/main-loop.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
/* Exceptions processing helpers */
void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env,
uint32_t exception, uintptr_t pc)
{
CPUState *cs = CPU(riscv_env_get_cpu(env));
qemu_log_mask(CPU_LOG_INT, "%s: %d\n", __func__, exception);
cs->exception_index = exception;
cpu_loop_exit_restore(cs, pc);
}
void helper_raise_exception(CPURISCVState *env, uint32_t exception)
{
riscv_raise_exception(env, exception, 0);
}
target_ulong helper_csrrw(CPURISCVState *env, target_ulong src,
target_ulong csr)
{
target_ulong val = 0;
if (riscv_csrrw(env, csr, &val, src, -1) < 0) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
return val;
}
target_ulong helper_csrrs(CPURISCVState *env, target_ulong src,
target_ulong csr, target_ulong rs1_pass)
{
target_ulong val = 0;
if (riscv_csrrw(env, csr, &val, -1, rs1_pass ? src : 0) < 0) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
return val;
}
target_ulong helper_csrrc(CPURISCVState *env, target_ulong src,
target_ulong csr, target_ulong rs1_pass)
{
target_ulong val = 0;
if (riscv_csrrw(env, csr, &val, 0, rs1_pass ? src : 0) < 0) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
return val;
}
#ifndef CONFIG_USER_ONLY
target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb)
{
if (!(env->priv >= PRV_S)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
target_ulong retpc = env->sepc;
if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
}
if (env->priv_ver >= PRIV_VERSION_1_10_0 &&
get_field(env->mstatus, MSTATUS_TSR)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
target_ulong mstatus = env->mstatus;
target_ulong prev_priv = get_field(mstatus, MSTATUS_SPP);
mstatus = set_field(mstatus,
env->priv_ver >= PRIV_VERSION_1_10_0 ?
MSTATUS_SIE : MSTATUS_UIE << prev_priv,
get_field(mstatus, MSTATUS_SPIE));
mstatus = set_field(mstatus, MSTATUS_SPIE, 0);
mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U);
riscv_cpu_set_mode(env, prev_priv);
env->mstatus = mstatus;
return retpc;
}
target_ulong helper_mret(CPURISCVState *env, target_ulong cpu_pc_deb)
{
if (!(env->priv >= PRV_M)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
target_ulong retpc = env->mepc;
if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
}
target_ulong mstatus = env->mstatus;
target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
mstatus = set_field(mstatus,
env->priv_ver >= PRIV_VERSION_1_10_0 ?
MSTATUS_MIE : MSTATUS_UIE << prev_priv,
get_field(mstatus, MSTATUS_MPIE));
mstatus = set_field(mstatus, MSTATUS_MPIE, 0);
mstatus = set_field(mstatus, MSTATUS_MPP, PRV_U);
riscv_cpu_set_mode(env, prev_priv);
env->mstatus = mstatus;
return retpc;
}
void helper_wfi(CPURISCVState *env)
{
CPUState *cs = CPU(riscv_env_get_cpu(env));
if (env->priv == PRV_S &&
env->priv_ver >= PRIV_VERSION_1_10_0 &&
get_field(env->mstatus, MSTATUS_TW)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
} else {
cs->halted = 1;
cs->exception_index = EXCP_HLT;
cpu_loop_exit(cs);
}
}
void helper_tlb_flush(CPURISCVState *env)
{
RISCVCPU *cpu = riscv_env_get_cpu(env);
CPUState *cs = CPU(cpu);
if (env->priv == PRV_S &&
env->priv_ver >= PRIV_VERSION_1_10_0 &&
get_field(env->mstatus, MSTATUS_TVM)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
} else {
tlb_flush(cs);
}
}
#endif /* !CONFIG_USER_ONLY */
|
pmp-tool/PMP
|
src/qemu/src-pmp/target/ppc/helper_regs.h
|
<gh_stars>1-10
/*
* PowerPC emulation special registers manipulation helpers for qemu.
*
* Copyright (c) 2003-2007 <NAME>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef HELPER_REGS_H
#define HELPER_REGS_H
#include "qemu/main-loop.h"
#include "exec/exec-all.h"
/* Swap temporary saved registers with GPRs */
static inline void hreg_swap_gpr_tgpr(CPUPPCState *env)
{
target_ulong tmp;
tmp = env->gpr[0];
env->gpr[0] = env->tgpr[0];
env->tgpr[0] = tmp;
tmp = env->gpr[1];
env->gpr[1] = env->tgpr[1];
env->tgpr[1] = tmp;
tmp = env->gpr[2];
env->gpr[2] = env->tgpr[2];
env->tgpr[2] = tmp;
tmp = env->gpr[3];
env->gpr[3] = env->tgpr[3];
env->tgpr[3] = tmp;
}
static inline void hreg_compute_mem_idx(CPUPPCState *env)
{
/* This is our encoding for server processors. The architecture
* specifies that there is no such thing as userspace with
* translation off, however it appears that MacOS does it and
* some 32-bit CPUs support it. Weird...
*
* 0 = Guest User space virtual mode
* 1 = Guest Kernel space virtual mode
* 2 = Guest User space real mode
* 3 = Guest Kernel space real mode
* 4 = HV User space virtual mode
* 5 = HV Kernel space virtual mode
* 6 = HV User space real mode
* 7 = HV Kernel space real mode
*
* For BookE, we need 8 MMU modes as follow:
*
* 0 = AS 0 HV User space
* 1 = AS 0 HV Kernel space
* 2 = AS 1 HV User space
* 3 = AS 1 HV Kernel space
* 4 = AS 0 Guest User space
* 5 = AS 0 Guest Kernel space
* 6 = AS 1 Guest User space
* 7 = AS 1 Guest Kernel space
*/
if (env->mmu_model & POWERPC_MMU_BOOKE) {
env->immu_idx = env->dmmu_idx = msr_pr ? 0 : 1;
env->immu_idx += msr_is ? 2 : 0;
env->dmmu_idx += msr_ds ? 2 : 0;
env->immu_idx += msr_gs ? 4 : 0;
env->dmmu_idx += msr_gs ? 4 : 0;
} else {
env->immu_idx = env->dmmu_idx = msr_pr ? 0 : 1;
env->immu_idx += msr_ir ? 0 : 2;
env->dmmu_idx += msr_dr ? 0 : 2;
env->immu_idx += msr_hv ? 4 : 0;
env->dmmu_idx += msr_hv ? 4 : 0;
}
}
static inline void hreg_compute_hflags(CPUPPCState *env)
{
target_ulong hflags_mask;
/* We 'forget' FE0 & FE1: we'll never generate imprecise exceptions */
hflags_mask = (1 << MSR_VR) | (1 << MSR_AP) | (1 << MSR_SA) |
(1 << MSR_PR) | (1 << MSR_FP) | (1 << MSR_SE) | (1 << MSR_BE) |
(1 << MSR_LE) | (1 << MSR_VSX) | (1 << MSR_IR) | (1 << MSR_DR);
hflags_mask |= (1ULL << MSR_CM) | (1ULL << MSR_SF) | MSR_HVB;
hreg_compute_mem_idx(env);
env->hflags = env->msr & hflags_mask;
/* Merge with hflags coming from other registers */
env->hflags |= env->hflags_nmsr;
}
static inline void cpu_interrupt_exittb(CPUState *cs)
{
if (!qemu_mutex_iothread_locked()) {
qemu_mutex_lock_iothread();
cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
qemu_mutex_unlock_iothread();
} else {
cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
}
}
static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
int alter_hv)
{
int excp;
#if !defined(CONFIG_USER_ONLY)
CPUState *cs = CPU(ppc_env_get_cpu(env));
#endif
excp = 0;
value &= env->msr_mask;
#if !defined(CONFIG_USER_ONLY)
/* Neither mtmsr nor guest state can alter HV */
if (!alter_hv || !(env->msr & MSR_HVB)) {
value &= ~MSR_HVB;
value |= env->msr & MSR_HVB;
}
if (((value >> MSR_IR) & 1) != msr_ir ||
((value >> MSR_DR) & 1) != msr_dr) {
cpu_interrupt_exittb(cs);
}
if ((env->mmu_model & POWERPC_MMU_BOOKE) &&
((value >> MSR_GS) & 1) != msr_gs) {
cpu_interrupt_exittb(cs);
}
if (unlikely((env->flags & POWERPC_FLAG_TGPR) &&
((value ^ env->msr) & (1 << MSR_TGPR)))) {
/* Swap temporary saved registers with GPRs */
hreg_swap_gpr_tgpr(env);
}
if (unlikely((value >> MSR_EP) & 1) != msr_ep) {
/* Change the exception prefix on PowerPC 601 */
env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000;
}
/* If PR=1 then EE, IR and DR must be 1
*
* Note: We only enforce this on 64-bit server processors.
* It appears that:
* - 32-bit implementations supports PR=1 and EE/DR/IR=0 and MacOS
* exploits it.
* - 64-bit embedded implementations do not need any operation to be
* performed when PR is set.
*/
if (is_book3s_arch2x(env) && ((value >> MSR_PR) & 1)) {
value |= (1 << MSR_EE) | (1 << MSR_DR) | (1 << MSR_IR);
}
#endif
env->msr = value;
hreg_compute_hflags(env);
#if !defined(CONFIG_USER_ONLY)
if (unlikely(msr_pow == 1)) {
if (!env->pending_interrupts && (*env->check_pow)(env)) {
cs->halted = 1;
excp = EXCP_HALTED;
}
}
#endif
return excp;
}
#if !defined(CONFIG_USER_ONLY)
static inline void check_tlb_flush(CPUPPCState *env, bool global)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
/* Handle global flushes first */
if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) {
env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH;
env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
tlb_flush_all_cpus_synced(cs);
return;
}
/* Then handle local ones */
if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) {
env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
tlb_flush(cs);
}
}
#else
static inline void check_tlb_flush(CPUPPCState *env, bool global) { }
#endif
#endif /* HELPER_REGS_H */
|
pmp-tool/PMP
|
src/qemu/src-pmp/tests/tcg/mips/user/isa/mips64r6/shift/test_mips64r6_srlv.c
|
<filename>src/qemu/src-pmp/tests/tcg/mips/user/isa/mips64r6/shift/test_mips64r6_srlv.c
/*
* Test program for MIPS64R6 instruction SRLV
*
* Copyright (C) 2019 Wave Computing, Inc.
* Copyright (C) 2019 <NAME> <<EMAIL>>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*
*/
#include <sys/time.h>
#include <stdint.h>
#include "../../../../include/wrappers_mips64r6.h"
#include "../../../../include/test_inputs_64.h"
#include "../../../../include/test_utils_64.h"
#define TEST_COUNT_TOTAL (PATTERN_INPUTS_64_COUNT + RANDOM_INPUTS_64_COUNT)
int32_t main(void)
{
char *instruction_name = "SRLV";
int32_t ret;
uint32_t i, j;
struct timeval start, end;
double elapsed_time;
uint64_t b64_result[TEST_COUNT_TOTAL];
uint64_t b64_expect[TEST_COUNT_TOTAL] = {
0x0000000000000001ULL, /* 0 */
0xffffffffffffffffULL,
0x00000000003fffffULL,
0x00000000000007ffULL,
0x00000000000fffffULL,
0x0000000000001fffULL,
0x000000000003ffffULL,
0x0000000000007fffULL,
0x0000000000000000ULL, /* 8 */
0x0000000000000000ULL,
0x0000000000000000ULL,
0x0000000000000000ULL,
0x0000000000000000ULL,
0x0000000000000000ULL,
0x0000000000000000ULL,
0x0000000000000000ULL,
0x0000000000000001ULL, /* 16 */
0xffffffffaaaaaaaaULL,
0x00000000002aaaaaULL,
0x0000000000000555ULL,
0x00000000000aaaaaULL,
0x0000000000001555ULL,
0x000000000002aaaaULL,
0x0000000000005555ULL,
0x0000000000000000ULL, /* 24 */
0x0000000055555555ULL,
0x0000000000155555ULL,
0x00000000000002aaULL,
0x0000000000055555ULL,
0x0000000000000aaaULL,
0x0000000000015555ULL,
0x0000000000002aaaULL,
0x0000000000000001ULL, /* 32 */
0xffffffffccccccccULL,
0x0000000000333333ULL,
0x0000000000000666ULL,
0x00000000000cccccULL,
0x0000000000001999ULL,
0x0000000000033333ULL,
0x0000000000006666ULL,
0x0000000000000000ULL, /* 40 */
0x0000000033333333ULL,
0x00000000000cccccULL,
0x0000000000000199ULL,
0x0000000000033333ULL,
0x0000000000000666ULL,
0x000000000000ccccULL,
0x0000000000001999ULL,
0x0000000000000001ULL, /* 48 */
0xffffffff8e38e38eULL,
0x0000000000238e38ULL,
0x0000000000000471ULL,
0x000000000008e38eULL,
0x00000000000011c7ULL,
0x00000000000238e3ULL,
0x000000000000471cULL,
0x0000000000000000ULL, /* 56 */
0x0000000071c71c71ULL,
0x00000000001c71c7ULL,
0x000000000000038eULL,
0x0000000000071c71ULL,
0x0000000000000e38ULL,
0x000000000001c71cULL,
0x00000000000038e3ULL,
0x0000000028625540ULL, /* 64 */
0x0000000000286255ULL,
0x0000000028625540ULL,
0x000000000000a189ULL,
0x000000004d93c708ULL,
0x00000000004d93c7ULL,
0x000000004d93c708ULL,
0x000000000001364fULL,
0xffffffffb9cf8b80ULL, /* 72 */
0x0000000000b9cf8bULL,
0xffffffffb9cf8b80ULL,
0x000000000002e73eULL,
0x000000005e31e24eULL,
0x00000000005e31e2ULL,
0x000000005e31e24eULL,
0x00000000000178c7ULL,
};
gettimeofday(&start, NULL);
for (i = 0; i < PATTERN_INPUTS_64_SHORT_COUNT; i++) {
for (j = 0; j < PATTERN_INPUTS_64_SHORT_COUNT; j++) {
do_mips64r6_SRLV(b64_pattern + i, b64_pattern + j,
b64_result + (PATTERN_INPUTS_64_SHORT_COUNT * i + j));
}
}
for (i = 0; i < RANDOM_INPUTS_64_SHORT_COUNT; i++) {
for (j = 0; j < RANDOM_INPUTS_64_SHORT_COUNT; j++) {
do_mips64r6_SRLV(b64_random + i, b64_random + j,
b64_result + (((PATTERN_INPUTS_64_SHORT_COUNT) *
(PATTERN_INPUTS_64_SHORT_COUNT)) +
RANDOM_INPUTS_64_SHORT_COUNT * i + j));
}
}
gettimeofday(&end, NULL);
elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0;
elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0;
ret = check_results_64(instruction_name, TEST_COUNT_TOTAL, elapsed_time,
b64_result, b64_expect);
return ret;
}
|
pmp-tool/PMP
|
src/qemu/src-pmp/hw/misc/armsse-mhu.c
|
/*
* ARM SSE-200 Message Handling Unit (MHU)
*
* Copyright (c) 2019 Linaro Limited
* Written by <NAME>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 or
* (at your option) any later version.
*/
/*
* This is a model of the Message Handling Unit (MHU) which is part of the
* Arm SSE-200 and documented in
* http://infocenter.arm.com/help/topic/com.arm.doc.101104_0100_00_en/corelink_sse200_subsystem_for_embedded_technical_reference_manual_101104_0100_00_en.pdf
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "trace.h"
#include "qapi/error.h"
#include "sysemu/sysemu.h"
#include "hw/sysbus.h"
#include "hw/registerfields.h"
#include "hw/misc/armsse-mhu.h"
REG32(CPU0INTR_STAT, 0x0)
REG32(CPU0INTR_SET, 0x4)
REG32(CPU0INTR_CLR, 0x8)
REG32(CPU1INTR_STAT, 0x10)
REG32(CPU1INTR_SET, 0x14)
REG32(CPU1INTR_CLR, 0x18)
REG32(PID4, 0xfd0)
REG32(PID5, 0xfd4)
REG32(PID6, 0xfd8)
REG32(PID7, 0xfdc)
REG32(PID0, 0xfe0)
REG32(PID1, 0xfe4)
REG32(PID2, 0xfe8)
REG32(PID3, 0xfec)
REG32(CID0, 0xff0)
REG32(CID1, 0xff4)
REG32(CID2, 0xff8)
REG32(CID3, 0xffc)
/* Valid bits in the interrupt registers. If any are set the IRQ is raised */
#define INTR_MASK 0xf
/* PID/CID values */
static const int armsse_mhu_id[] = {
0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */
0x56, 0xb8, 0x0b, 0x00, /* PID0..PID3 */
0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */
};
static void armsse_mhu_update(ARMSSEMHU *s)
{
qemu_set_irq(s->cpu0irq, s->cpu0intr != 0);
qemu_set_irq(s->cpu1irq, s->cpu1intr != 0);
}
static uint64_t armsse_mhu_read(void *opaque, hwaddr offset, unsigned size)
{
ARMSSEMHU *s = ARMSSE_MHU(opaque);
uint64_t r;
switch (offset) {
case A_CPU0INTR_STAT:
r = s->cpu0intr;
break;
case A_CPU1INTR_STAT:
r = s->cpu1intr;
break;
case A_PID4 ... A_CID3:
r = armsse_mhu_id[(offset - A_PID4) / 4];
break;
case A_CPU0INTR_SET:
case A_CPU0INTR_CLR:
case A_CPU1INTR_SET:
case A_CPU1INTR_CLR:
qemu_log_mask(LOG_GUEST_ERROR,
"SSE MHU: read of write-only register at offset 0x%x\n",
(int)offset);
r = 0;
break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
"SSE MHU read: bad offset 0x%x\n", (int)offset);
r = 0;
break;
}
trace_armsse_mhu_read(offset, r, size);
return r;
}
static void armsse_mhu_write(void *opaque, hwaddr offset,
uint64_t value, unsigned size)
{
ARMSSEMHU *s = ARMSSE_MHU(opaque);
trace_armsse_mhu_write(offset, value, size);
switch (offset) {
case A_CPU0INTR_SET:
s->cpu0intr |= (value & INTR_MASK);
break;
case A_CPU0INTR_CLR:
s->cpu0intr &= ~(value & INTR_MASK);
break;
case A_CPU1INTR_SET:
s->cpu1intr |= (value & INTR_MASK);
break;
case A_CPU1INTR_CLR:
s->cpu1intr &= ~(value & INTR_MASK);
break;
case A_CPU0INTR_STAT:
case A_CPU1INTR_STAT:
case A_PID4 ... A_CID3:
qemu_log_mask(LOG_GUEST_ERROR,
"SSE MHU: write to read-only register at offset 0x%x\n",
(int)offset);
break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
"SSE MHU write: bad offset 0x%x\n", (int)offset);
break;
}
armsse_mhu_update(s);
}
static const MemoryRegionOps armsse_mhu_ops = {
.read = armsse_mhu_read,
.write = armsse_mhu_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid.min_access_size = 4,
.valid.max_access_size = 4,
};
static void armsse_mhu_reset(DeviceState *dev)
{
ARMSSEMHU *s = ARMSSE_MHU(dev);
s->cpu0intr = 0;
s->cpu1intr = 0;
}
static const VMStateDescription armsse_mhu_vmstate = {
.name = "armsse-mhu",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32(cpu0intr, ARMSSEMHU),
VMSTATE_UINT32(cpu1intr, ARMSSEMHU),
VMSTATE_END_OF_LIST()
},
};
static void armsse_mhu_init(Object *obj)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
ARMSSEMHU *s = ARMSSE_MHU(obj);
memory_region_init_io(&s->iomem, obj, &armsse_mhu_ops,
s, "armsse-mhu", 0x1000);
sysbus_init_mmio(sbd, &s->iomem);
sysbus_init_irq(sbd, &s->cpu0irq);
sysbus_init_irq(sbd, &s->cpu1irq);
}
static void armsse_mhu_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->reset = armsse_mhu_reset;
dc->vmsd = &armsse_mhu_vmstate;
}
static const TypeInfo armsse_mhu_info = {
.name = TYPE_ARMSSE_MHU,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(ARMSSEMHU),
.instance_init = armsse_mhu_init,
.class_init = armsse_mhu_class_init,
};
static void armsse_mhu_register_types(void)
{
type_register_static(&armsse_mhu_info);
}
type_init(armsse_mhu_register_types);
|
pmp-tool/PMP
|
src/qemu/src-pmp/roms/openbios/drivers/virtio.c
|
/*
* OpenBIOS virtio-1.0 virtio-blk driver
*
* Copyright (c) 2013 <NAME> <<EMAIL>>
* Copyright (c) 2018 <NAME> <<EMAIL>>
*
* This work is licensed under the terms of the GNU GPL, version 2 or (at
* your option) any later version. See the COPYING file in the top-level
* directory.
*/
#include "config.h"
#include "libc/byteorder.h"
#include "libc/vsprintf.h"
#include "libopenbios/bindings.h"
#include "libopenbios/ofmem.h"
#include "kernel/kernel.h"
#include "drivers/drivers.h"
#include "virtio.h"
#define VRING_WAIT_REPLY_TIMEOUT 10000
static uint8_t virtio_cfg_read8(uint64_t cfg_addr, int addr)
{
return in_8((uint8_t *)(uintptr_t)(cfg_addr + addr));
}
static void virtio_cfg_write8(uint64_t cfg_addr, int addr, uint8_t value)
{
out_8((uint8_t *)(uintptr_t)(cfg_addr + addr), value);
}
static uint16_t virtio_cfg_read16(uint64_t cfg_addr, int addr)
{
return in_le16((uint16_t *)(uintptr_t)(cfg_addr + addr));
}
static void virtio_cfg_write16(uint64_t cfg_addr, int addr, uint16_t value)
{
out_le16((uint16_t *)(uintptr_t)(cfg_addr + addr), value);
}
static uint32_t virtio_cfg_read32(uint64_t cfg_addr, int addr)
{
return in_le32((uint32_t *)(uintptr_t)(cfg_addr + addr));
}
static void virtio_cfg_write32(uint64_t cfg_addr, int addr, uint32_t value)
{
out_le32((uint32_t *)(uintptr_t)(cfg_addr + addr), value);
}
static uint64_t virtio_cfg_read64(uint64_t cfg_addr, int addr)
{
uint64_t q = ((uint64_t)virtio_cfg_read32(cfg_addr + 4, addr) << 32);
q |= virtio_cfg_read32(cfg_addr, addr);
return q;
}
static void virtio_cfg_write64(uint64_t cfg_addr, int addr, uint64_t value)
{
virtio_cfg_write32(cfg_addr, addr, (value & 0xffffffff));
virtio_cfg_write32(cfg_addr, addr + 4, ((value >> 32) & 0xffffffff));
}
static long virtio_notify(VDev *vdev, int vq_idx, long cookie)
{
uint16_t notify_offset = virtio_cfg_read16(vdev->common_cfg, VIRTIO_PCI_COMMON_Q_NOFF);
virtio_cfg_write16(vdev->notify_base, notify_offset +
vq_idx * vdev->notify_mult, vq_idx);
return 0;
}
/***********************************************
* Virtio functions *
***********************************************/
static void vring_init(VRing *vr, VqInfo *info)
{
void *p = (void *) (uintptr_t)info->queue;
vr->id = info->index;
vr->num = info->num;
vr->desc = p;
vr->avail = (void *)((uintptr_t)p + info->num * sizeof(VRingDesc));
vr->used = (void *)(((unsigned long)&vr->avail->ring[info->num]
+ info->align - 1) & ~(info->align - 1));
/* Zero out all relevant field */
vr->avail->flags = __cpu_to_le16(0);
vr->avail->idx = __cpu_to_le16(0);
/* We're running with interrupts off anyways, so don't bother */
vr->used->flags = __cpu_to_le16(VRING_USED_F_NO_NOTIFY);
vr->used->idx = __cpu_to_le16(0);
vr->used_idx = 0;
vr->next_idx = 0;
vr->cookie = 0;
}
static int vring_notify(VDev *vdev, VRing *vr)
{
return virtio_notify(vdev, vr->id, vr->cookie);
}
static void vring_send_buf(VRing *vr, uint64_t p, int len, int flags)
{
/* For follow-up chains we need to keep the first entry point */
if (!(flags & VRING_HIDDEN_IS_CHAIN)) {
vr->avail->ring[__le16_to_cpu(vr->avail->idx) % vr->num] = __cpu_to_le16(vr->next_idx);
}
vr->desc[vr->next_idx].addr = __cpu_to_le64(p);
vr->desc[vr->next_idx].len = __cpu_to_le32(len);
vr->desc[vr->next_idx].flags = __cpu_to_le16(flags & ~VRING_HIDDEN_IS_CHAIN);
vr->desc[vr->next_idx].next = __cpu_to_le16(vr->next_idx);
vr->desc[vr->next_idx].next = __cpu_to_le16(__le16_to_cpu(vr->desc[vr->next_idx].next) + 1);
vr->next_idx++;
/* Chains only have a single ID */
if (!(flags & VRING_DESC_F_NEXT)) {
vr->avail->idx = __cpu_to_le16(__le16_to_cpu(vr->avail->idx) + 1);
}
}
static int vr_poll(VDev *vdev, VRing *vr)
{
if (__le16_to_cpu(vr->used->idx) == vr->used_idx) {
vring_notify(vdev, vr);
return 0;
}
vr->used_idx = __le16_to_cpu(vr->used->idx);
vr->next_idx = 0;
vr->desc[0].len = __cpu_to_le32(0);
vr->desc[0].flags = __cpu_to_le16(0);
return 1; /* vr has been updated */
}
/*
* Wait for the host to reply.
*
* timeout is in msecs if > 0.
*
* Returns 0 on success, 1 on timeout.
*/
static int vring_wait_reply(VDev *vdev)
{
ucell target_ms, get_ms;
fword("get-msecs");
target_ms = POP();
target_ms += vdev->wait_reply_timeout;
/* Wait for any queue to be updated by the host */
do {
int i, r = 0;
for (i = 0; i < vdev->nr_vqs; i++) {
r += vr_poll(vdev, &vdev->vrings[i]);
}
if (r) {
return 0;
}
fword("get-msecs");
get_ms = POP();
} while (!vdev->wait_reply_timeout || (get_ms < target_ms));
return 1;
}
static uint64_t vring_addr_translate(VDev *vdev, void *p)
{
ucell mode;
uint64_t iova;
iova = ofmem_translate(pointer2cell(p), &mode);
return iova;
}
/***********************************************
* Virtio block *
***********************************************/
static int virtio_blk_read_many(VDev *vdev,
uint64_t offset, void *load_addr, int len)
{
VirtioBlkOuthdr out_hdr;
u8 status;
VRing *vr = &vdev->vrings[vdev->cmd_vr_idx];
uint8_t discard[VIRTIO_SECTOR_SIZE];
uint64_t start_sector = offset / virtio_get_block_size(vdev);
int head_len = offset & (virtio_get_block_size(vdev) - 1);
uint64_t end_sector = (offset + len + virtio_get_block_size(vdev) - 1) /
virtio_get_block_size(vdev);
int tail_len = end_sector * virtio_get_block_size(vdev) - (offset + len);
/* Tell the host we want to read */
out_hdr.type = __cpu_to_le32(VIRTIO_BLK_T_IN);
out_hdr.ioprio = __cpu_to_le32(99);
out_hdr.sector = __cpu_to_le64(virtio_sector_adjust(vdev, start_sector));
vring_send_buf(vr, vring_addr_translate(vdev, &out_hdr), sizeof(out_hdr),
VRING_DESC_F_NEXT);
/* Discarded head */
if (head_len) {
vring_send_buf(vr, vring_addr_translate(vdev, &discard), head_len,
VRING_DESC_F_WRITE | VRING_HIDDEN_IS_CHAIN |
VRING_DESC_F_NEXT);
}
/* This is where we want to receive data */
vring_send_buf(vr, vring_addr_translate(vdev, load_addr), len,
VRING_DESC_F_WRITE | VRING_HIDDEN_IS_CHAIN |
VRING_DESC_F_NEXT);
/* Discarded tail */
if (tail_len) {
vring_send_buf(vr, vring_addr_translate(vdev, &discard), tail_len,
VRING_DESC_F_WRITE | VRING_HIDDEN_IS_CHAIN |
VRING_DESC_F_NEXT);
}
/* status field */
vring_send_buf(vr, vring_addr_translate(vdev, &status), sizeof(u8),
VRING_DESC_F_WRITE | VRING_HIDDEN_IS_CHAIN);
/* Now we can tell the host to read */
vring_wait_reply(vdev);
return status;
}
int virtio_read_many(VDev *vdev, uint64_t offset, void *load_addr, int len)
{
switch (vdev->senseid) {
case VIRTIO_ID_BLOCK:
return virtio_blk_read_many(vdev, offset, load_addr, len);
}
return -1;
}
static int virtio_read(VDev *vdev, uint64_t offset, void *load_addr, int len)
{
return virtio_read_many(vdev, offset, load_addr, len);
}
int virtio_get_block_size(VDev *vdev)
{
switch (vdev->senseid) {
case VIRTIO_ID_BLOCK:
return vdev->config.blk.blk_size << vdev->config.blk.physical_block_exp;
}
return 0;
}
static void
ob_virtio_configure_device(VDev *vdev)
{
uint32_t feature;
uint8_t status;
int i;
/* Indicate we recognise the device */
status = virtio_cfg_read8(vdev->common_cfg, VIRTIO_PCI_COMMON_STATUS);
status |= VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER;
virtio_cfg_write8(vdev->common_cfg, VIRTIO_PCI_COMMON_STATUS, status);
/* Negotiate features: acknowledge VIRTIO_F_VERSION_1 for 1.0 specification
little-endian access */
virtio_cfg_write32(vdev->common_cfg, VIRTIO_PCI_COMMON_DFSELECT, 0x1);
virtio_cfg_write32(vdev->common_cfg, VIRTIO_PCI_COMMON_GFSELECT, 0x1);
feature = virtio_cfg_read32(vdev->common_cfg, VIRTIO_PCI_COMMON_DF);
feature &= (1ULL << (VIRTIO_F_VERSION_1 - 32));
virtio_cfg_write32(vdev->common_cfg, VIRTIO_PCI_COMMON_GF, feature);
status = virtio_cfg_read8(vdev->common_cfg, VIRTIO_PCI_COMMON_STATUS);
status |= VIRTIO_CONFIG_S_FEATURES_OK;
virtio_cfg_write8(vdev->common_cfg, VIRTIO_PCI_COMMON_STATUS, status);
vdev->senseid = VIRTIO_ID_BLOCK;
vdev->nr_vqs = 1;
vdev->cmd_vr_idx = 0;
vdev->wait_reply_timeout = VRING_WAIT_REPLY_TIMEOUT;
vdev->scsi_block_size = VIRTIO_SCSI_BLOCK_SIZE;
vdev->blk_factor = 1;
for (i = 0; i < vdev->nr_vqs; i++) {
VqInfo info = {
.queue = (uintptr_t) vdev->ring_area + (i * VIRTIO_RING_SIZE),
.align = VIRTIO_PCI_VRING_ALIGN,
.index = i,
.num = 0,
};
virtio_cfg_write16(vdev->common_cfg, VIRTIO_PCI_COMMON_Q_SELECT, i);
info.num = virtio_cfg_read16(vdev->common_cfg, VIRTIO_PCI_COMMON_Q_SIZE);
vring_init(&vdev->vrings[i], &info);
/* Set block information */
vdev->guessed_disk_nature = VIRTIO_GDN_NONE;
vdev->config.blk.blk_size = VIRTIO_SECTOR_SIZE;
vdev->config.blk.physical_block_exp = 0;
/* Read sectors */
vdev->config.blk.capacity = virtio_cfg_read64(vdev->device_cfg, 0);
/* Set queue addresses */
virtio_cfg_write64(vdev->common_cfg, VIRTIO_PCI_COMMON_Q_DESCLO,
vring_addr_translate(vdev, &vdev->vrings[i].desc[0]));
virtio_cfg_write64(vdev->common_cfg, VIRTIO_PCI_COMMON_Q_AVAILLO,
vring_addr_translate(vdev, &vdev->vrings[i].avail[0]));
virtio_cfg_write64(vdev->common_cfg, VIRTIO_PCI_COMMON_Q_USEDLO,
vring_addr_translate(vdev, &vdev->vrings[i].used[0]));
/* Enable queue */
virtio_cfg_write16(vdev->common_cfg, VIRTIO_PCI_COMMON_Q_ENABLE, 1);
}
/* Initialisation complete */
status |= VIRTIO_CONFIG_S_DRIVER_OK;
virtio_cfg_write8(vdev->common_cfg, VIRTIO_PCI_COMMON_STATUS, status);
vdev->configured = 1;
}
static void
ob_virtio_disk_open(VDev **_vdev)
{
VDev *vdev = *_vdev;
phandle_t ph;
vdev->pos = 0;
if (!vdev->configured) {
ob_virtio_configure_device(vdev);
}
/* interpose disk-label */
ph = find_dev("/packages/disk-label");
fword("my-args");
PUSH_ph( ph );
fword("interpose");
RET(-1);
}
static void
ob_virtio_disk_close(VDev **_vdev)
{
return;
}
/* ( pos.d -- status ) */
static void
ob_virtio_disk_seek(VDev **_vdev)
{
VDev *vdev = *_vdev;
uint64_t pos;
pos = ((uint64_t)POP()) << 32;
pos |= POP();
/* Make sure we are within the physical limits */
if (pos < (vdev->config.blk.capacity * virtio_get_block_size(vdev))) {
vdev->pos = pos;
PUSH(0);
} else {
PUSH(1);
}
return;
}
/* ( addr len -- actual ) */
static void
ob_virtio_disk_read(VDev **_vdev)
{
VDev *vdev = *_vdev;
ucell len = POP();
uint8_t *addr = (uint8_t *)POP();
virtio_read(vdev, vdev->pos, addr, len);
vdev->pos += len;
PUSH(len);
}
static void set_virtio_alias(const char *path, int idx)
{
phandle_t aliases;
char name[9];
aliases = find_dev("/aliases");
snprintf(name, sizeof(name), "virtio%d", idx);
set_property(aliases, name, path, strlen(path) + 1);
}
static void
ob_virtio_disk_initialize(VDev **_vdev)
{
phandle_t ph = get_cur_dev();
VDev *vdev;
int len;
vdev = cell2pointer(get_int_property(ph, "_vdev", &len));
push_str("_vdev");
feval("delete-property");
*_vdev = vdev;
}
DECLARE_UNNAMED_NODE(ob_virtio_disk, 0, sizeof(VDev *));
NODE_METHODS(ob_virtio_disk) = {
{ NULL, ob_virtio_disk_initialize },
{ "open", ob_virtio_disk_open },
{ "close", ob_virtio_disk_close },
{ "seek", ob_virtio_disk_seek },
{ "read", ob_virtio_disk_read },
};
static void
ob_virtio_open(VDev **_vdev)
{
PUSH(-1);
}
static void
ob_virtio_close(VDev **_vdev)
{
return;
}
static void
ob_virtio_vdev(VDev **_vdev)
{
PUSH(pointer2cell(_vdev));
}
static void
ob_virtio_dma_alloc(__attribute__((unused)) VDev **_vdev)
{
call_parent_method("dma-alloc");
}
static void
ob_virtio_dma_free(__attribute__((unused)) VDev **_vdev)
{
call_parent_method("dma-free");
}
static void
ob_virtio_dma_map_in(__attribute__((unused)) VDev **_vdev)
{
call_parent_method("dma-map-in");
}
static void
ob_virtio_dma_map_out(__attribute__((unused)) VDev **_vdev)
{
call_parent_method("dma-map-out");
}
static void
ob_virtio_dma_sync(__attribute__((unused)) VDev **_vdev)
{
call_parent_method("dma-sync");
}
DECLARE_UNNAMED_NODE(ob_virtio, 0, sizeof(VDev *));
NODE_METHODS(ob_virtio) = {
{ "open", ob_virtio_open },
{ "close", ob_virtio_close },
{ "vdev", ob_virtio_vdev },
{ "dma-alloc", ob_virtio_dma_alloc },
{ "dma-free", ob_virtio_dma_free },
{ "dma-map-in", ob_virtio_dma_map_in },
{ "dma-map-out", ob_virtio_dma_map_out },
{ "dma-sync", ob_virtio_dma_sync },
};
void ob_virtio_init(const char *path, const char *dev_name, uint64_t common_cfg,
uint64_t device_cfg, uint64_t notify_base, uint32_t notify_mult,
int idx)
{
char buf[256];
phandle_t ph;
ucell addr;
VDev *vdev, **_vdev;
REGISTER_NODE_METHODS(ob_virtio, path);
/* Open ob_virtio */
fword("my-self");
push_str(path);
feval("open-dev to my-self");
ph = find_ih_method("vdev", my_self());
PUSH(ph);
fword("execute");
_vdev = cell2pointer(POP());
vdev = malloc(sizeof(VDev));
vdev->common_cfg = common_cfg;
vdev->device_cfg = device_cfg;
vdev->notify_base = notify_base;
vdev->notify_mult = notify_mult;
vdev->configured = 0;
PUSH(sizeof(VRing) * VIRTIO_MAX_VQS);
feval("dma-alloc");
addr = POP();
vdev->vrings = cell2pointer(addr);
PUSH(VIRTIO_RING_SIZE * VIRTIO_MAX_VQS);
feval("dma-alloc");
addr = POP();
vdev->ring_area = cell2pointer(addr);
*_vdev = vdev;
feval("to my-self");
fword("new-device");
push_str("disk");
fword("device-name");
push_str("block");
fword("device-type");
PUSH(pointer2cell(vdev));
fword("encode-int");
push_str("_vdev");
fword("property");
fword("finish-device");
snprintf(buf, sizeof(buf), "%s/disk", path);
REGISTER_NODE_METHODS(ob_virtio_disk, buf);
set_virtio_alias(buf, idx);
}
|
pmp-tool/PMP
|
src/qemu/src-pmp/target/riscv/cpu.h
|
/*
* QEMU RISC-V CPU
*
* Copyright (c) 2016-2017 <NAME>, <EMAIL>
* Copyright (c) 2017-2018 SiFive, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef RISCV_CPU_H
#define RISCV_CPU_H
/* QEMU addressing/paging config */
#define TARGET_PAGE_BITS 12 /* 4 KiB Pages */
#if defined(TARGET_RISCV64)
#define TARGET_LONG_BITS 64
#define TARGET_PHYS_ADDR_SPACE_BITS 56 /* 44-bit PPN */
#define TARGET_VIRT_ADDR_SPACE_BITS 48 /* sv48 */
#elif defined(TARGET_RISCV32)
#define TARGET_LONG_BITS 32
#define TARGET_PHYS_ADDR_SPACE_BITS 34 /* 22-bit PPN */
#define TARGET_VIRT_ADDR_SPACE_BITS 32 /* sv32 */
#endif
#define TCG_GUEST_DEFAULT_MO 0
#define CPUArchState struct CPURISCVState
#include "qemu-common.h"
#include "qom/cpu.h"
#include "exec/cpu-defs.h"
#include "fpu/softfloat.h"
#define TYPE_RISCV_CPU "riscv-cpu"
#define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU
#define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX)
#define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
#define TYPE_RISCV_CPU_ANY RISCV_CPU_TYPE_NAME("any")
#define TYPE_RISCV_CPU_RV32GCSU_V1_09_1 RISCV_CPU_TYPE_NAME("rv32gcsu-v1.9.1")
#define TYPE_RISCV_CPU_RV32GCSU_V1_10_0 RISCV_CPU_TYPE_NAME("rv32gcsu-v1.10.0")
#define TYPE_RISCV_CPU_RV32IMACU_NOMMU RISCV_CPU_TYPE_NAME("rv32imacu-nommu")
#define TYPE_RISCV_CPU_RV64GCSU_V1_09_1 RISCV_CPU_TYPE_NAME("rv64gcsu-v1.9.1")
#define TYPE_RISCV_CPU_RV64GCSU_V1_10_0 RISCV_CPU_TYPE_NAME("rv64gcsu-v1.10.0")
#define TYPE_RISCV_CPU_RV64IMACU_NOMMU RISCV_CPU_TYPE_NAME("rv64imacu-nommu")
#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31")
#define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51")
#define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34")
#define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54")
#define RV32 ((target_ulong)1 << (TARGET_LONG_BITS - 2))
#define RV64 ((target_ulong)2 << (TARGET_LONG_BITS - 2))
#if defined(TARGET_RISCV32)
#define RVXLEN RV32
#elif defined(TARGET_RISCV64)
#define RVXLEN RV64
#endif
#define RV(x) ((target_ulong)1 << (x - 'A'))
#define RVI RV('I')
#define RVE RV('E') /* E and I are mutually exclusive */
#define RVM RV('M')
#define RVA RV('A')
#define RVF RV('F')
#define RVD RV('D')
#define RVC RV('C')
#define RVS RV('S')
#define RVU RV('U')
/* S extension denotes that Supervisor mode exists, however it is possible
to have a core that support S mode but does not have an MMU and there
is currently no bit in misa to indicate whether an MMU exists or not
so a cpu features bitfield is required, likewise for optional PMP support */
enum {
RISCV_FEATURE_MMU,
RISCV_FEATURE_PMP,
RISCV_FEATURE_MISA
};
#define USER_VERSION_2_02_0 0x00020200
#define PRIV_VERSION_1_09_1 0x00010901
#define PRIV_VERSION_1_10_0 0x00011000
#define TRANSLATE_FAIL 1
#define TRANSLATE_SUCCESS 0
#define NB_MMU_MODES 4
#define MMU_USER_IDX 3
#define MAX_RISCV_PMPS (16)
typedef struct CPURISCVState CPURISCVState;
#include "pmp.h"
struct CPURISCVState {
target_ulong gpr[32];
uint64_t fpr[32]; /* assume both F and D extensions */
target_ulong pc;
target_ulong load_res;
target_ulong load_val;
target_ulong frm;
target_ulong badaddr;
target_ulong user_ver;
target_ulong priv_ver;
target_ulong misa;
target_ulong misa_mask;
uint32_t features;
#ifdef CONFIG_USER_ONLY
uint32_t elf_flags;
#endif
#ifndef CONFIG_USER_ONLY
target_ulong priv;
target_ulong resetvec;
target_ulong mhartid;
target_ulong mstatus;
/*
* CAUTION! Unlike the rest of this struct, mip is accessed asynchonously
* by I/O threads. It should be read with atomic_read. It should be updated
* using riscv_cpu_update_mip with the iothread mutex held. The iothread
* mutex must be held because mip must be consistent with the CPU inturrept
* state. riscv_cpu_update_mip calls cpu_interrupt or cpu_reset_interrupt
* wuth the invariant that CPU_INTERRUPT_HARD is set iff mip is non-zero.
* mip is 32-bits to allow atomic_read on 32-bit hosts.
*/
uint32_t mip;
uint32_t miclaim;
target_ulong mie;
target_ulong mideleg;
target_ulong sptbr; /* until: priv-1.9.1 */
target_ulong satp; /* since: priv-1.10.0 */
target_ulong sbadaddr;
target_ulong mbadaddr;
target_ulong medeleg;
target_ulong stvec;
target_ulong sepc;
target_ulong scause;
target_ulong mtvec;
target_ulong mepc;
target_ulong mcause;
target_ulong mtval; /* since: priv-1.10.0 */
target_ulong scounteren;
target_ulong mcounteren;
target_ulong sscratch;
target_ulong mscratch;
/* temporary htif regs */
uint64_t mfromhost;
uint64_t mtohost;
uint64_t timecmp;
/* physical memory protection */
pmp_table_t pmp_state;
/* True if in debugger mode. */
bool debugger;
#endif
float_status fp_status;
/* QEMU */
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
QEMUTimer *timer; /* Internal timer */
};
#define RISCV_CPU_CLASS(klass) \
OBJECT_CLASS_CHECK(RISCVCPUClass, (klass), TYPE_RISCV_CPU)
#define RISCV_CPU(obj) \
OBJECT_CHECK(RISCVCPU, (obj), TYPE_RISCV_CPU)
#define RISCV_CPU_GET_CLASS(obj) \
OBJECT_GET_CLASS(RISCVCPUClass, (obj), TYPE_RISCV_CPU)
/**
* RISCVCPUClass:
* @parent_realize: The parent class' realize handler.
* @parent_reset: The parent class' reset handler.
*
* A RISCV CPU model.
*/
typedef struct RISCVCPUClass {
/*< private >*/
CPUClass parent_class;
/*< public >*/
DeviceRealize parent_realize;
void (*parent_reset)(CPUState *cpu);
} RISCVCPUClass;
/**
* RISCVCPU:
* @env: #CPURISCVState
*
* A RISCV CPU.
*/
typedef struct RISCVCPU {
/*< private >*/
CPUState parent_obj;
/*< public >*/
CPURISCVState env;
} RISCVCPU;
static inline RISCVCPU *riscv_env_get_cpu(CPURISCVState *env)
{
return container_of(env, RISCVCPU, env);
}
static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
{
return (env->misa & ext) != 0;
}
static inline bool riscv_feature(CPURISCVState *env, int feature)
{
return env->features & (1ULL << feature);
}
#include "cpu_user.h"
#include "cpu_bits.h"
extern const char * const riscv_int_regnames[];
extern const char * const riscv_fpr_regnames[];
extern const char * const riscv_excp_names[];
extern const char * const riscv_intr_names[];
#define ENV_GET_CPU(e) CPU(riscv_env_get_cpu(e))
#define ENV_OFFSET offsetof(RISCVCPU, env)
void riscv_cpu_do_interrupt(CPUState *cpu);
int riscv_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch);
hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type, int mmu_idx,
uintptr_t retaddr);
int riscv_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
int rw, int mmu_idx);
char *riscv_isa_string(RISCVCPU *cpu);
void riscv_cpu_list(FILE *f, fprintf_function cpu_fprintf);
#define cpu_signal_handler riscv_cpu_signal_handler
#define cpu_list riscv_cpu_list
#define cpu_mmu_index riscv_cpu_mmu_index
#ifndef CONFIG_USER_ONLY
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts);
uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value);
#define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
#endif
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv);
void riscv_translate_init(void);
int riscv_cpu_signal_handler(int host_signum, void *pinfo, void *puc);
void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env,
uint32_t exception, uintptr_t pc);
target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
#define TB_FLAGS_MMU_MASK 3
#define TB_FLAGS_MSTATUS_FS MSTATUS_FS
static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;
#ifdef CONFIG_USER_ONLY
*flags = TB_FLAGS_MSTATUS_FS;
#else
*flags = cpu_mmu_index(env, 0) | (env->mstatus & MSTATUS_FS);
#endif
}
int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value,
target_ulong new_value, target_ulong write_mask);
int riscv_csrrw_debug(CPURISCVState *env, int csrno, target_ulong *ret_value,
target_ulong new_value, target_ulong write_mask);
static inline void riscv_csr_write(CPURISCVState *env, int csrno,
target_ulong val)
{
riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
}
static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
{
target_ulong val = 0;
riscv_csrrw(env, csrno, &val, 0, 0);
return val;
}
typedef int (*riscv_csr_predicate_fn)(CPURISCVState *env, int csrno);
typedef int (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
target_ulong *ret_value);
typedef int (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
target_ulong new_value);
typedef int (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
target_ulong *ret_value, target_ulong new_value, target_ulong write_mask);
typedef struct {
riscv_csr_predicate_fn predicate;
riscv_csr_read_fn read;
riscv_csr_write_fn write;
riscv_csr_op_fn op;
} riscv_csr_operations;
void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
#include "exec/cpu-all.h"
#endif /* RISCV_CPU_H */
|
pmp-tool/PMP
|
src/qemu/src-pmp/slirp/src/qtailq.h
|
/* SPDX-License-Identifier: BSD-3-Clause */
/* $NetBSD: queue.h,v 1.52 2009/04/20 09:56:08 mschuett Exp $ */
/*
* slirp version: Copy from QEMU, removed all but tail queues.
*/
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)queue.h 8.5 (Berkeley) 8/20/94
*/
#ifndef QTAILQ_H
#define QTAILQ_H
/*
* A tail queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or
* after an existing element, at the head of the list, or at the end of
* the list. A tail queue may be traversed in either direction.
*/
typedef struct QTailQLink {
void *tql_next;
struct QTailQLink *tql_prev;
} QTailQLink;
/*
* Tail queue definitions. The union acts as a poor man template, as if
* it were QTailQLink<type>.
*/
#define QTAILQ_HEAD(name, type) \
union name { \
struct type *tqh_first; /* first element */ \
QTailQLink tqh_circ; /* link for circular backwards list */ \
}
#define QTAILQ_HEAD_INITIALIZER(head) \
{ .tqh_circ = { NULL, &(head).tqh_circ } }
#define QTAILQ_ENTRY(type) \
union { \
struct type *tqe_next; /* next element */ \
QTailQLink tqe_circ; /* link for circular backwards list */ \
}
#define QTAILQ_INIT(head) do { \
(head)->tqh_first = NULL; \
(head)->tqh_circ.tql_prev = &(head)->tqh_circ; \
} while (/*CONSTCOND*/0)
#define QTAILQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
(head)->tqh_first->field.tqe_circ.tql_prev = \
&(elm)->field.tqe_circ; \
else \
(head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
(head)->tqh_first = (elm); \
(elm)->field.tqe_circ.tql_prev = &(head)->tqh_circ; \
} while (/*CONSTCOND*/0)
#define QTAILQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.tqe_next = NULL; \
(elm)->field.tqe_circ.tql_prev = (head)->tqh_circ.tql_prev; \
(head)->tqh_circ.tql_prev->tql_next = (elm); \
(head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
} while (/*CONSTCOND*/0)
#define QTAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
(elm)->field.tqe_next->field.tqe_circ.tql_prev = \
&(elm)->field.tqe_circ; \
else \
(head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
(listelm)->field.tqe_next = (elm); \
(elm)->field.tqe_circ.tql_prev = &(listelm)->field.tqe_circ; \
} while (/*CONSTCOND*/0)
#define QTAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_circ.tql_prev = (listelm)->field.tqe_circ.tql_prev; \
(elm)->field.tqe_next = (listelm); \
(listelm)->field.tqe_circ.tql_prev->tql_next = (elm); \
(listelm)->field.tqe_circ.tql_prev = &(elm)->field.tqe_circ; \
} while (/*CONSTCOND*/0)
#define QTAILQ_REMOVE(head, elm, field) do { \
if (((elm)->field.tqe_next) != NULL) \
(elm)->field.tqe_next->field.tqe_circ.tql_prev = \
(elm)->field.tqe_circ.tql_prev; \
else \
(head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev; \
(elm)->field.tqe_circ.tql_prev->tql_next = (elm)->field.tqe_next; \
(elm)->field.tqe_circ.tql_prev = NULL; \
} while (/*CONSTCOND*/0)
#define QTAILQ_FOREACH(var, head, field) \
for ((var) = ((head)->tqh_first); \
(var); \
(var) = ((var)->field.tqe_next))
#define QTAILQ_FOREACH_SAFE(var, head, field, next_var) \
for ((var) = ((head)->tqh_first); \
(var) && ((next_var) = ((var)->field.tqe_next), 1); \
(var) = (next_var))
#define QTAILQ_FOREACH_REVERSE(var, head, field) \
for ((var) = QTAILQ_LAST(head); \
(var); \
(var) = QTAILQ_PREV(var, field))
#define QTAILQ_FOREACH_REVERSE_SAFE(var, head, field, prev_var) \
for ((var) = QTAILQ_LAST(head); \
(var) && ((prev_var) = QTAILQ_PREV(var, field)); \
(var) = (prev_var))
/*
* Tail queue access methods.
*/
#define QTAILQ_EMPTY(head) ((head)->tqh_first == NULL)
#define QTAILQ_FIRST(head) ((head)->tqh_first)
#define QTAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define QTAILQ_IN_USE(elm, field) ((elm)->field.tqe_circ.tql_prev != NULL)
#define QTAILQ_LINK_PREV(link) \
((link).tql_prev->tql_prev->tql_next)
#define QTAILQ_LAST(head) \
((typeof((head)->tqh_first)) QTAILQ_LINK_PREV((head)->tqh_circ))
#define QTAILQ_PREV(elm, field) \
((typeof((elm)->field.tqe_next)) QTAILQ_LINK_PREV((elm)->field.tqe_circ))
#define field_at_offset(base, offset, type) \
((type *) (((char *) (base)) + (offset)))
/*
* Raw access of elements of a tail queue head. Offsets are all zero
* because it's a union.
*/
#define QTAILQ_RAW_FIRST(head) \
field_at_offset(head, 0, void *)
#define QTAILQ_RAW_TQH_CIRC(head) \
field_at_offset(head, 0, QTailQLink)
/*
* Raw access of elements of a tail entry
*/
#define QTAILQ_RAW_NEXT(elm, entry) \
field_at_offset(elm, entry, void *)
#define QTAILQ_RAW_TQE_CIRC(elm, entry) \
field_at_offset(elm, entry, QTailQLink)
/*
* Tail queue traversal using pointer arithmetic.
*/
#define QTAILQ_RAW_FOREACH(elm, head, entry) \
for ((elm) = *QTAILQ_RAW_FIRST(head); \
(elm); \
(elm) = *QTAILQ_RAW_NEXT(elm, entry))
/*
* Tail queue insertion using pointer arithmetic.
*/
#define QTAILQ_RAW_INSERT_TAIL(head, elm, entry) do { \
*QTAILQ_RAW_NEXT(elm, entry) = NULL; \
QTAILQ_RAW_TQE_CIRC(elm, entry)->tql_prev = QTAILQ_RAW_TQH_CIRC(head)->tql_prev; \
QTAILQ_RAW_TQH_CIRC(head)->tql_prev->tql_next = (elm); \
QTAILQ_RAW_TQH_CIRC(head)->tql_prev = QTAILQ_RAW_TQE_CIRC(elm, entry); \
} while (/*CONSTCOND*/0)
#endif /* QTAILQ_H */
|
pmp-tool/PMP
|
src/qemu/src-pmp/tests/tcg/mips/user/ase/msa/int-subtract/test_msa_subsus_u_h.c
|
/*
* Test program for MSA instruction SUBSUS_U.H
*
* Copyright (C) 2018 Wave Computing, Inc.
* Copyright (C) 2018 <NAME> <<EMAIL>>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*
*/
#include <sys/time.h>
#include <stdint.h>
#include "../../../../include/wrappers_msa.h"
#include "../../../../include/test_inputs.h"
#include "../../../../include/test_utils.h"
#define TEST_COUNT_TOTAL ( \
(PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT) + \
(RANDOM_INPUTS_SHORT_COUNT) * (RANDOM_INPUTS_SHORT_COUNT))
int32_t main(void)
{
char *instruction_name = "SUBSUS_U.H";
int32_t ret;
uint32_t i, j;
struct timeval start, end;
double elapsed_time;
uint64_t b128_result[TEST_COUNT_TOTAL][2];
uint64_t b128_expect[TEST_COUNT_TOTAL][2] = {
{ 0xffffffffffffffffULL, 0xffffffffffffffffULL, }, /* 0 */
{ 0xffffffffffffffffULL, 0xffffffffffffffffULL, },
{ 0xffffffffffffffffULL, 0xffffffffffffffffULL, },
{ 0xaaaaaaaaaaaaaaaaULL, 0xaaaaaaaaaaaaaaaaULL, },
{ 0xffffffffffffffffULL, 0xffffffffffffffffULL, },
{ 0xccccccccccccccccULL, 0xccccccccccccccccULL, },
{ 0xffffc71cffffffffULL, 0xc71cffffffffc71cULL, },
{ 0xe38effff8e38e38eULL, 0xffff8e38e38effffULL, },
{ 0x0001000100010001ULL, 0x0001000100010001ULL, }, /* 8 */
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x5556555655565556ULL, 0x5556555655565556ULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x3334333433343334ULL, 0x3334333433343334ULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x1c72000071c81c72ULL, 0x000071c81c720000ULL, },
{ 0x000038e400000000ULL, 0x38e40000000038e4ULL, },
{ 0xaaabaaabaaabaaabULL, 0xaaabaaabaaabaaabULL, }, /* 16 */
{ 0xaaaaaaaaaaaaaaaaULL, 0xaaaaaaaaaaaaaaaaULL, },
{ 0xffffffffffffffffULL, 0xffffffffffffffffULL, },
{ 0x5555555555555555ULL, 0x5555555555555555ULL, },
{ 0xdddedddedddedddeULL, 0xdddedddedddedddeULL, },
{ 0x7777777777777777ULL, 0x7777777777777777ULL, },
{ 0xc71c71c7ffffc71cULL, 0x71c7ffffc71c71c7ULL, },
{ 0x8e39e38e38e38e39ULL, 0xe38e38e38e39e38eULL, },
{ 0x5556555655565556ULL, 0x5556555655565556ULL, }, /* 24 */
{ 0x5555555555555555ULL, 0x5555555555555555ULL, },
{ 0xaaabaaabaaabaaabULL, 0xaaabaaabaaabaaabULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x8889888988898889ULL, 0x8889888988898889ULL, },
{ 0x2222222222222222ULL, 0x2222222222222222ULL, },
{ 0x71c71c72c71d71c7ULL, 0x1c72c71d71c71c72ULL, },
{ 0x38e48e39000038e4ULL, 0x8e39000038e48e39ULL, },
{ 0xcccdcccdcccdcccdULL, 0xcccdcccdcccdcccdULL, }, /* 32 */
{ 0xccccccccccccccccULL, 0xccccccccccccccccULL, },
{ 0xffffffffffffffffULL, 0xffffffffffffffffULL, },
{ 0x7777777777777777ULL, 0x7777777777777777ULL, },
{ 0xffffffffffffffffULL, 0xffffffffffffffffULL, },
{ 0x9999999999999999ULL, 0x9999999999999999ULL, },
{ 0xe93e93e9ffffe93eULL, 0x93e9ffffe93e93e9ULL, },
{ 0xb05bffff5b05b05bULL, 0xffff5b05b05bffffULL, },
{ 0x3334333433343334ULL, 0x3334333433343334ULL, }, /* 40 */
{ 0x3333333333333333ULL, 0x3333333333333333ULL, },
{ 0x8889888988898889ULL, 0x8889888988898889ULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x6667666766676667ULL, 0x6667666766676667ULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x4fa50000a4fb4fa5ULL, 0x0000a4fb4fa50000ULL, },
{ 0x16c26c17000016c2ULL, 0x6c17000016c26c17ULL, },
{ 0xe38f38e48e39e38fULL, 0x38e48e39e38f38e4ULL, }, /* 48 */
{ 0xe38e38e38e38e38eULL, 0x38e38e38e38e38e3ULL, },
{ 0xffff8e39e38effffULL, 0x8e39e38effff8e39ULL, },
{ 0x8e39000038e38e39ULL, 0x000038e38e390000ULL, },
{ 0xffff6c17c16cffffULL, 0x6c17c16cffff6c17ULL, },
{ 0xb05b05b05b05b05bULL, 0x05b05b05b05b05b0ULL, },
{ 0xffff0000ffffffffULL, 0x0000ffffffff0000ULL, },
{ 0xc71d71c71c71c71dULL, 0x71c71c71c71d71c7ULL, },
{ 0x1c72c71d71c81c72ULL, 0xc71d71c81c72c71dULL, }, /* 56 */
{ 0x1c71c71c71c71c71ULL, 0xc71c71c71c71c71cULL, },
{ 0x71c7ffffc71d71c7ULL, 0xffffc71d71c7ffffULL, },
{ 0x000071c71c720000ULL, 0x71c71c72000071c7ULL, },
{ 0x4fa5fa50a4fb4fa5ULL, 0xfa50a4fb4fa5fa50ULL, },
{ 0x000093e93e940000ULL, 0x93e93e94000093e9ULL, },
{ 0x38e38e39e38f38e3ULL, 0x8e39e38f38e38e39ULL, },
{ 0x0000ffff00000000ULL, 0xffff00000000ffffULL, },
{ 0xffffffff00000000ULL, 0x00000000ffffffffULL, }, /* 64 */
{ 0x8cace66900008e38ULL, 0x38705044e93c5d10ULL, },
{ 0xdc10ffff6e93c9c0ULL, 0x238f445fffff8af8ULL, },
{ 0x181bd07f000072f2ULL, 0xbd768286ffffcd6cULL, },
{ 0xffff1997253171c8ULL, 0x0000afbc16c4a2f0ULL, },
{ 0xffff00000000ffffULL, 0x0000ffff00000000ULL, },
{ 0xffff51b993c4ffffULL, 0x0000f41b6a142de8ULL, },
{ 0x8b6f00000000e4baULL, 0x8506ffff6bfd705cULL, },
{ 0xffffc7de916d3640ULL, 0x0000bba1acb07508ULL, }, /* 72 */
{ 0xb09cae476c3cc478ULL, 0x14e1ffff95ec0000ULL, },
{ 0xffffffffffffffffULL, 0x0000ffffffff0000ULL, },
{ 0x3c0b985d5b9ea932ULL, 0x99e7ffffffff4274ULL, },
{ 0xe7e52f8135cf8d0eULL, 0x428a7d7aaac7ffffULL, },
{ 0x749115ea109effffULL, 0x7afacdbe94038fa4ULL, },
};
gettimeofday(&start, NULL);
for (i = 0; i < PATTERN_INPUTS_SHORT_COUNT; i++) {
for (j = 0; j < PATTERN_INPUTS_SHORT_COUNT; j++) {
do_msa_SUBSUS_U_H(b128_pattern[i], b128_pattern[j],
b128_result[PATTERN_INPUTS_SHORT_COUNT * i + j]);
}
}
for (i = 0; i < RANDOM_INPUTS_SHORT_COUNT; i++) {
for (j = 0; j < RANDOM_INPUTS_SHORT_COUNT; j++) {
do_msa_SUBSUS_U_H(b128_random[i], b128_random[j],
b128_result[((PATTERN_INPUTS_SHORT_COUNT) *
(PATTERN_INPUTS_SHORT_COUNT)) +
RANDOM_INPUTS_SHORT_COUNT * i + j]);
}
}
gettimeofday(&end, NULL);
elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0;
elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0;
ret = check_results(instruction_name, TEST_COUNT_TOTAL, elapsed_time,
&b128_result[0][0], &b128_expect[0][0]);
return ret;
}
|
pmp-tool/PMP
|
src/qemu/src-pmp/hw/virtio/vhost-backend.c
|
<reponame>pmp-tool/PMP<gh_stars>1-10
/*
* vhost-backend
*
* Copyright (c) 2013 Virtual Open Systems Sarl.
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-backend.h"
#include "qemu/error-report.h"
#include "standard-headers/linux/vhost_types.h"
#ifdef CONFIG_VHOST_KERNEL
#include <linux/vhost.h>
#include <sys/ioctl.h>
static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
void *arg)
{
int fd = (uintptr_t) dev->opaque;
assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
return ioctl(fd, request, arg);
}
static int vhost_kernel_init(struct vhost_dev *dev, void *opaque)
{
assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
dev->opaque = opaque;
return 0;
}
static int vhost_kernel_cleanup(struct vhost_dev *dev)
{
int fd = (uintptr_t) dev->opaque;
assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
return close(fd);
}
static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
{
int limit = 64;
char *s;
if (g_file_get_contents("/sys/module/vhost/parameters/max_mem_regions",
&s, NULL, NULL)) {
uint64_t val = g_ascii_strtoull(s, NULL, 10);
if (!((val == G_MAXUINT64 || !val) && errno)) {
g_free(s);
return val;
}
error_report("ignoring invalid max_mem_regions value in vhost module:"
" %s", s);
}
g_free(s);
return limit;
}
static int vhost_kernel_net_set_backend(struct vhost_dev *dev,
struct vhost_vring_file *file)
{
return vhost_kernel_call(dev, VHOST_NET_SET_BACKEND, file);
}
static int vhost_kernel_scsi_set_endpoint(struct vhost_dev *dev,
struct vhost_scsi_target *target)
{
return vhost_kernel_call(dev, VHOST_SCSI_SET_ENDPOINT, target);
}
static int vhost_kernel_scsi_clear_endpoint(struct vhost_dev *dev,
struct vhost_scsi_target *target)
{
return vhost_kernel_call(dev, VHOST_SCSI_CLEAR_ENDPOINT, target);
}
static int vhost_kernel_scsi_get_abi_version(struct vhost_dev *dev, int *version)
{
return vhost_kernel_call(dev, VHOST_SCSI_GET_ABI_VERSION, version);
}
static int vhost_kernel_set_log_base(struct vhost_dev *dev, uint64_t base,
struct vhost_log *log)
{
return vhost_kernel_call(dev, VHOST_SET_LOG_BASE, &base);
}
static int vhost_kernel_set_mem_table(struct vhost_dev *dev,
struct vhost_memory *mem)
{
return vhost_kernel_call(dev, VHOST_SET_MEM_TABLE, mem);
}
static int vhost_kernel_set_vring_addr(struct vhost_dev *dev,
struct vhost_vring_addr *addr)
{
return vhost_kernel_call(dev, VHOST_SET_VRING_ADDR, addr);
}
static int vhost_kernel_set_vring_endian(struct vhost_dev *dev,
struct vhost_vring_state *ring)
{
return vhost_kernel_call(dev, VHOST_SET_VRING_ENDIAN, ring);
}
static int vhost_kernel_set_vring_num(struct vhost_dev *dev,
struct vhost_vring_state *ring)
{
return vhost_kernel_call(dev, VHOST_SET_VRING_NUM, ring);
}
static int vhost_kernel_set_vring_base(struct vhost_dev *dev,
struct vhost_vring_state *ring)
{
return vhost_kernel_call(dev, VHOST_SET_VRING_BASE, ring);
}
static int vhost_kernel_get_vring_base(struct vhost_dev *dev,
struct vhost_vring_state *ring)
{
return vhost_kernel_call(dev, VHOST_GET_VRING_BASE, ring);
}
static int vhost_kernel_set_vring_kick(struct vhost_dev *dev,
struct vhost_vring_file *file)
{
return vhost_kernel_call(dev, VHOST_SET_VRING_KICK, file);
}
static int vhost_kernel_set_vring_call(struct vhost_dev *dev,
struct vhost_vring_file *file)
{
return vhost_kernel_call(dev, VHOST_SET_VRING_CALL, file);
}
static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev,
struct vhost_vring_state *s)
{
return vhost_kernel_call(dev, VHOST_SET_VRING_BUSYLOOP_TIMEOUT, s);
}
static int vhost_kernel_set_features(struct vhost_dev *dev,
uint64_t features)
{
return vhost_kernel_call(dev, VHOST_SET_FEATURES, &features);
}
static int vhost_kernel_get_features(struct vhost_dev *dev,
uint64_t *features)
{
return vhost_kernel_call(dev, VHOST_GET_FEATURES, features);
}
static int vhost_kernel_set_owner(struct vhost_dev *dev)
{
return vhost_kernel_call(dev, VHOST_SET_OWNER, NULL);
}
static int vhost_kernel_reset_device(struct vhost_dev *dev)
{
return vhost_kernel_call(dev, VHOST_RESET_OWNER, NULL);
}
static int vhost_kernel_get_vq_index(struct vhost_dev *dev, int idx)
{
assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
return idx - dev->vq_index;
}
#ifdef CONFIG_VHOST_VSOCK
static int vhost_kernel_vsock_set_guest_cid(struct vhost_dev *dev,
uint64_t guest_cid)
{
return vhost_kernel_call(dev, VHOST_VSOCK_SET_GUEST_CID, &guest_cid);
}
static int vhost_kernel_vsock_set_running(struct vhost_dev *dev, int start)
{
return vhost_kernel_call(dev, VHOST_VSOCK_SET_RUNNING, &start);
}
#endif /* CONFIG_VHOST_VSOCK */
static void vhost_kernel_iotlb_read(void *opaque)
{
struct vhost_dev *dev = opaque;
struct vhost_msg msg;
ssize_t len;
while ((len = read((uintptr_t)dev->opaque, &msg, sizeof msg)) > 0) {
if (len < sizeof msg) {
error_report("Wrong vhost message len: %d", (int)len);
break;
}
if (msg.type != VHOST_IOTLB_MSG) {
error_report("Unknown vhost iotlb message type");
break;
}
vhost_backend_handle_iotlb_msg(dev, &msg.iotlb);
}
}
static int vhost_kernel_send_device_iotlb_msg(struct vhost_dev *dev,
struct vhost_iotlb_msg *imsg)
{
struct vhost_msg msg;
msg.type = VHOST_IOTLB_MSG;
msg.iotlb = *imsg;
if (write((uintptr_t)dev->opaque, &msg, sizeof msg) != sizeof msg) {
error_report("Fail to update device iotlb");
return -EFAULT;
}
return 0;
}
static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
int enabled)
{
if (enabled)
qemu_set_fd_handler((uintptr_t)dev->opaque,
vhost_kernel_iotlb_read, NULL, dev);
else
qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
}
static const VhostOps kernel_ops = {
.backend_type = VHOST_BACKEND_TYPE_KERNEL,
.vhost_backend_init = vhost_kernel_init,
.vhost_backend_cleanup = vhost_kernel_cleanup,
.vhost_backend_memslots_limit = vhost_kernel_memslots_limit,
.vhost_net_set_backend = vhost_kernel_net_set_backend,
.vhost_scsi_set_endpoint = vhost_kernel_scsi_set_endpoint,
.vhost_scsi_clear_endpoint = vhost_kernel_scsi_clear_endpoint,
.vhost_scsi_get_abi_version = vhost_kernel_scsi_get_abi_version,
.vhost_set_log_base = vhost_kernel_set_log_base,
.vhost_set_mem_table = vhost_kernel_set_mem_table,
.vhost_set_vring_addr = vhost_kernel_set_vring_addr,
.vhost_set_vring_endian = vhost_kernel_set_vring_endian,
.vhost_set_vring_num = vhost_kernel_set_vring_num,
.vhost_set_vring_base = vhost_kernel_set_vring_base,
.vhost_get_vring_base = vhost_kernel_get_vring_base,
.vhost_set_vring_kick = vhost_kernel_set_vring_kick,
.vhost_set_vring_call = vhost_kernel_set_vring_call,
.vhost_set_vring_busyloop_timeout =
vhost_kernel_set_vring_busyloop_timeout,
.vhost_set_features = vhost_kernel_set_features,
.vhost_get_features = vhost_kernel_get_features,
.vhost_set_owner = vhost_kernel_set_owner,
.vhost_reset_device = vhost_kernel_reset_device,
.vhost_get_vq_index = vhost_kernel_get_vq_index,
#ifdef CONFIG_VHOST_VSOCK
.vhost_vsock_set_guest_cid = vhost_kernel_vsock_set_guest_cid,
.vhost_vsock_set_running = vhost_kernel_vsock_set_running,
#endif /* CONFIG_VHOST_VSOCK */
.vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
.vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
};
#endif
int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type)
{
int r = 0;
switch (backend_type) {
#ifdef CONFIG_VHOST_KERNEL
case VHOST_BACKEND_TYPE_KERNEL:
dev->vhost_ops = &kernel_ops;
break;
#endif
#ifdef CONFIG_VHOST_USER
case VHOST_BACKEND_TYPE_USER:
dev->vhost_ops = &user_ops;
break;
#endif
default:
error_report("Unknown vhost backend type");
r = -1;
}
return r;
}
int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
uint64_t iova, uint64_t uaddr,
uint64_t len,
IOMMUAccessFlags perm)
{
struct vhost_iotlb_msg imsg;
imsg.iova = iova;
imsg.uaddr = uaddr;
imsg.size = len;
imsg.type = VHOST_IOTLB_UPDATE;
switch (perm) {
case IOMMU_RO:
imsg.perm = VHOST_ACCESS_RO;
break;
case IOMMU_WO:
imsg.perm = VHOST_ACCESS_WO;
break;
case IOMMU_RW:
imsg.perm = VHOST_ACCESS_RW;
break;
default:
return -EINVAL;
}
if (dev->vhost_ops && dev->vhost_ops->vhost_send_device_iotlb_msg)
return dev->vhost_ops->vhost_send_device_iotlb_msg(dev, &imsg);
return -ENODEV;
}
int vhost_backend_invalidate_device_iotlb(struct vhost_dev *dev,
uint64_t iova, uint64_t len)
{
struct vhost_iotlb_msg imsg;
imsg.iova = iova;
imsg.size = len;
imsg.type = VHOST_IOTLB_INVALIDATE;
if (dev->vhost_ops && dev->vhost_ops->vhost_send_device_iotlb_msg)
return dev->vhost_ops->vhost_send_device_iotlb_msg(dev, &imsg);
return -ENODEV;
}
int vhost_backend_handle_iotlb_msg(struct vhost_dev *dev,
struct vhost_iotlb_msg *imsg)
{
int ret = 0;
switch (imsg->type) {
case VHOST_IOTLB_MISS:
ret = vhost_device_iotlb_miss(dev, imsg->iova,
imsg->perm != VHOST_ACCESS_RO);
break;
case VHOST_IOTLB_ACCESS_FAIL:
/* FIXME: report device iotlb error */
error_report("Access failure IOTLB message type not supported");
ret = -ENOTSUP;
break;
case VHOST_IOTLB_UPDATE:
case VHOST_IOTLB_INVALIDATE:
default:
error_report("Unexpected IOTLB message type");
ret = -EINVAL;
break;
}
return ret;
}
|
pmp-tool/PMP
|
src/qemu/src-pmp/tcg/mips/tcg-target.h
|
/*
* Tiny Code Generator for QEMU
*
* Copyright (c) 2008-2009 <NAME> <<EMAIL>>
* Copyright (c) 2009 <NAME> <<EMAIL>>
* Based on i386/tcg-target.c - Copyright (c) 2008 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIPS_TCG_TARGET_H
#define MIPS_TCG_TARGET_H
#if _MIPS_SIM == _ABIO32
# define TCG_TARGET_REG_BITS 32
#elif _MIPS_SIM == _ABIN32 || _MIPS_SIM == _ABI64
# define TCG_TARGET_REG_BITS 64
#else
# error "Unknown ABI"
#endif
#define TCG_TARGET_INSN_UNIT_SIZE 4
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 16
#define TCG_TARGET_NB_REGS 32
typedef enum {
TCG_REG_ZERO = 0,
TCG_REG_AT,
TCG_REG_V0,
TCG_REG_V1,
TCG_REG_A0,
TCG_REG_A1,
TCG_REG_A2,
TCG_REG_A3,
TCG_REG_T0,
TCG_REG_T1,
TCG_REG_T2,
TCG_REG_T3,
TCG_REG_T4,
TCG_REG_T5,
TCG_REG_T6,
TCG_REG_T7,
TCG_REG_S0,
TCG_REG_S1,
TCG_REG_S2,
TCG_REG_S3,
TCG_REG_S4,
TCG_REG_S5,
TCG_REG_S6,
TCG_REG_S7,
TCG_REG_T8,
TCG_REG_T9,
TCG_REG_K0,
TCG_REG_K1,
TCG_REG_GP,
TCG_REG_SP,
TCG_REG_S8,
TCG_REG_RA,
TCG_REG_CALL_STACK = TCG_REG_SP,
TCG_AREG0 = TCG_REG_S0,
} TCGReg;
/* used for function call generation */
#define TCG_TARGET_STACK_ALIGN 16
#if _MIPS_SIM == _ABIO32
# define TCG_TARGET_CALL_STACK_OFFSET 16
#else
# define TCG_TARGET_CALL_STACK_OFFSET 0
#endif
#define TCG_TARGET_CALL_ALIGN_ARGS 1
/* MOVN/MOVZ instructions detection */
#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 1)) || \
defined(_MIPS_ARCH_LOONGSON2E) || defined(_MIPS_ARCH_LOONGSON2F) || \
defined(_MIPS_ARCH_MIPS4)
#define use_movnz_instructions 1
#else
extern bool use_movnz_instructions;
#endif
/* MIPS32 instruction set detection */
#if defined(__mips_isa_rev) && (__mips_isa_rev >= 1)
#define use_mips32_instructions 1
#else
extern bool use_mips32_instructions;
#endif
/* MIPS32R2 instruction set detection */
#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
#define use_mips32r2_instructions 1
#else
extern bool use_mips32r2_instructions;
#endif
/* MIPS32R6 instruction set detection */
#if defined(__mips_isa_rev) && (__mips_isa_rev >= 6)
#define use_mips32r6_instructions 1
#else
#define use_mips32r6_instructions 0
#endif
/* optional instructions */
#define TCG_TARGET_HAS_div_i32 1
#define TCG_TARGET_HAS_rem_i32 1
#define TCG_TARGET_HAS_not_i32 1
#define TCG_TARGET_HAS_nor_i32 1
#define TCG_TARGET_HAS_andc_i32 0
#define TCG_TARGET_HAS_orc_i32 0
#define TCG_TARGET_HAS_eqv_i32 0
#define TCG_TARGET_HAS_nand_i32 0
#define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions)
#define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions)
#define TCG_TARGET_HAS_muluh_i32 1
#define TCG_TARGET_HAS_mulsh_i32 1
#define TCG_TARGET_HAS_bswap32_i32 1
#define TCG_TARGET_HAS_goto_ptr 1
#define TCG_TARGET_HAS_direct_jump 1
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_add2_i32 0
#define TCG_TARGET_HAS_sub2_i32 0
#define TCG_TARGET_HAS_extrl_i64_i32 1
#define TCG_TARGET_HAS_extrh_i64_i32 1
#define TCG_TARGET_HAS_div_i64 1
#define TCG_TARGET_HAS_rem_i64 1
#define TCG_TARGET_HAS_not_i64 1
#define TCG_TARGET_HAS_nor_i64 1
#define TCG_TARGET_HAS_andc_i64 0
#define TCG_TARGET_HAS_orc_i64 0
#define TCG_TARGET_HAS_eqv_i64 0
#define TCG_TARGET_HAS_nand_i64 0
#define TCG_TARGET_HAS_add2_i64 0
#define TCG_TARGET_HAS_sub2_i64 0
#define TCG_TARGET_HAS_mulu2_i64 (!use_mips32r6_instructions)
#define TCG_TARGET_HAS_muls2_i64 (!use_mips32r6_instructions)
#define TCG_TARGET_HAS_muluh_i64 1
#define TCG_TARGET_HAS_mulsh_i64 1
#define TCG_TARGET_HAS_ext32s_i64 1
#define TCG_TARGET_HAS_ext32u_i64 1
#endif
/* optional instructions detected at runtime */
#define TCG_TARGET_HAS_movcond_i32 use_movnz_instructions
#define TCG_TARGET_HAS_bswap16_i32 use_mips32r2_instructions
#define TCG_TARGET_HAS_deposit_i32 use_mips32r2_instructions
#define TCG_TARGET_HAS_extract_i32 use_mips32r2_instructions
#define TCG_TARGET_HAS_sextract_i32 0
#define TCG_TARGET_HAS_ext8s_i32 use_mips32r2_instructions
#define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions
#define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions
#define TCG_TARGET_HAS_clz_i32 use_mips32r2_instructions
#define TCG_TARGET_HAS_ctz_i32 0
#define TCG_TARGET_HAS_ctpop_i32 0
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_movcond_i64 use_movnz_instructions
#define TCG_TARGET_HAS_bswap16_i64 use_mips32r2_instructions
#define TCG_TARGET_HAS_bswap32_i64 use_mips32r2_instructions
#define TCG_TARGET_HAS_bswap64_i64 use_mips32r2_instructions
#define TCG_TARGET_HAS_deposit_i64 use_mips32r2_instructions
#define TCG_TARGET_HAS_extract_i64 use_mips32r2_instructions
#define TCG_TARGET_HAS_sextract_i64 0
#define TCG_TARGET_HAS_ext8s_i64 use_mips32r2_instructions
#define TCG_TARGET_HAS_ext16s_i64 use_mips32r2_instructions
#define TCG_TARGET_HAS_rot_i64 use_mips32r2_instructions
#define TCG_TARGET_HAS_clz_i64 use_mips32r2_instructions
#define TCG_TARGET_HAS_ctz_i64 0
#define TCG_TARGET_HAS_ctpop_i64 0
#endif
/* optional instructions automatically implemented */
#define TCG_TARGET_HAS_neg_i32 0 /* sub rd, zero, rt */
#define TCG_TARGET_HAS_ext8u_i32 0 /* andi rt, rs, 0xff */
#define TCG_TARGET_HAS_ext16u_i32 0 /* andi rt, rs, 0xffff */
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_neg_i64 0 /* sub rd, zero, rt */
#define TCG_TARGET_HAS_ext8u_i64 0 /* andi rt, rs, 0xff */
#define TCG_TARGET_HAS_ext16u_i64 0 /* andi rt, rs, 0xffff */
#endif
#ifdef __OpenBSD__
#include <machine/sysarch.h>
#else
#include <sys/cachectl.h>
#endif
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
cacheflush ((void *)start, stop-start, ICACHE);
}
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t);
#ifdef CONFIG_SOFTMMU
#define TCG_TARGET_NEED_LDST_LABELS
#endif
#endif
|
pmp-tool/PMP
|
src/qemu/src-pmp/tests/tcg/mips/user/ase/msa/int-multiply/test_msa_mulv_h.c
|
<reponame>pmp-tool/PMP
/*
* Test program for MSA instruction MULV.H
*
* Copyright (C) 2018 Wave Computing, Inc.
* Copyright (C) 2018 <NAME> <<EMAIL>>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*
*/
#include <sys/time.h>
#include <stdint.h>
#include "../../../../include/wrappers_msa.h"
#include "../../../../include/test_inputs.h"
#include "../../../../include/test_utils.h"
#define TEST_COUNT_TOTAL ( \
(PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT) + \
(RANDOM_INPUTS_SHORT_COUNT) * (RANDOM_INPUTS_SHORT_COUNT))
int32_t main(void)
{
char *instruction_name = "MULV.H";
int32_t ret;
uint32_t i, j;
struct timeval start, end;
double elapsed_time;
uint64_t b128_result[TEST_COUNT_TOTAL][2];
uint64_t b128_expect[TEST_COUNT_TOTAL][2] = {
{ 0x0001000100010001ULL, 0x0001000100010001ULL, }, /* 0 */
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x5556555655565556ULL, 0x5556555655565556ULL, },
{ 0xaaabaaabaaabaaabULL, 0xaaabaaabaaabaaabULL, },
{ 0x3334333433343334ULL, 0x3334333433343334ULL, },
{ 0xcccdcccdcccdcccdULL, 0xcccdcccdcccdcccdULL, },
{ 0x1c72c71d71c81c72ULL, 0xc71d71c81c72c71dULL, },
{ 0xe38f38e48e39e38fULL, 0x38e48e39e38f38e4ULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, }, /* 8 */
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x5556555655565556ULL, 0x5556555655565556ULL, }, /* 16 */
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x38e438e438e438e4ULL, 0x38e438e438e438e4ULL, },
{ 0x1c721c721c721c72ULL, 0x1c721c721c721c72ULL, },
{ 0x7778777877787778ULL, 0x7778777877787778ULL, },
{ 0xdddedddedddedddeULL, 0xdddedddedddedddeULL, },
{ 0x684c84bea130684cULL, 0x84bea130684c84beULL, },
{ 0xed0ad098b426ed0aULL, 0xd098b426ed0ad098ULL, },
{ 0xaaabaaabaaabaaabULL, 0xaaabaaabaaabaaabULL, }, /* 24 */
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x1c721c721c721c72ULL, 0x1c721c721c721c72ULL, },
{ 0x8e398e398e398e39ULL, 0x8e398e398e398e39ULL, },
{ 0xbbbcbbbcbbbcbbbcULL, 0xbbbcbbbcbbbcbbbcULL, },
{ 0xeeefeeefeeefeeefULL, 0xeeefeeefeeefeeefULL, },
{ 0xb426425fd098b426ULL, 0x425fd098b426425fULL, },
{ 0xf685684cda13f685ULL, 0x684cda13f685684cULL, },
{ 0x3334333433343334ULL, 0x3334333433343334ULL, }, /* 32 */
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x7778777877787778ULL, 0x7778777877787778ULL, },
{ 0xbbbcbbbcbbbcbbbcULL, 0xbbbcbbbcbbbcbbbcULL, },
{ 0xc290c290c290c290ULL, 0xc290c290c290c290ULL, },
{ 0x70a470a470a470a4ULL, 0x70a470a470a470a4ULL, },
{ 0x7d2838e4f4a07d28ULL, 0x38e4f4a07d2838e4ULL, },
{ 0xb60cfa503e94b60cULL, 0xfa503e94b60cfa50ULL, },
{ 0xcccdcccdcccdcccdULL, 0xcccdcccdcccdcccdULL, }, /* 40 */
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0xdddedddedddedddeULL, 0xdddedddedddedddeULL, },
{ 0xeeefeeefeeefeeefULL, 0xeeefeeefeeefeeefULL, },
{ 0x70a470a470a470a4ULL, 0x70a470a470a470a4ULL, },
{ 0x5c295c295c295c29ULL, 0x5c295c295c295c29ULL, },
{ 0x9f4a8e397d289f4aULL, 0x8e397d289f4a8e39ULL, },
{ 0x2d833e944fa52d83ULL, 0x3e944fa52d833e94ULL, },
{ 0x1c72c71d71c81c72ULL, 0xc71d71c81c72c71dULL, }, /* 48 */
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x684c84bea130684cULL, 0x84bea130684c84beULL, },
{ 0xb426425fd098b426ULL, 0x425fd098b426425fULL, },
{ 0x7d2838e4f4a07d28ULL, 0x38e4f4a07d2838e4ULL, },
{ 0x9f4a8e397d289f4aULL, 0x8e397d289f4a8e39ULL, },
{ 0x22c419492c4022c4ULL, 0x19492c4022c41949ULL, },
{ 0xf9aeadd44588f9aeULL, 0xadd44588f9aeadd4ULL, },
{ 0xe38f38e48e39e38fULL, 0x38e48e39e38f38e4ULL, }, /* 56 */
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0xed0ad098b426ed0aULL, 0xd098b426ed0ad098ULL, },
{ 0xf685684cda13f685ULL, 0x684cda13f685684cULL, },
{ 0xb60cfa503e94b60cULL, 0xfa503e94b60cfa50ULL, },
{ 0x2d833e944fa52d83ULL, 0x3e944fa52d833e94ULL, },
{ 0xf9aeadd44588f9aeULL, 0xadd44588f9aeadd4ULL, },
{ 0xe9e18b1048b1e9e1ULL, 0x8b1048b1e9e18b10ULL, },
{ 0xcbe43290c5849000ULL, 0x837136844f198090ULL, }, /* 64 */
{ 0x2cac40e4aa466a00ULL, 0xfe61d18cb74523d0ULL, },
{ 0x2d44eb78793e6000ULL, 0x4fe806a2e7a97cf0ULL, },
{ 0x78b6f35cb6c27980ULL, 0xb6f78750ceb69f80ULL, },
{ 0x2cac40e4aa466a00ULL, 0xfe61d18cb74523d0ULL, },
{ 0x21042649c2697040ULL, 0xaa51fea465816810ULL, },
{ 0x28cc8bbef4dddc00ULL, 0xa1687ae6a695e7b0ULL, },
{ 0xcfa29fc7d323b470ULL, 0xe587adf0113e5580ULL, },
{ 0x2d44eb78793e6000ULL, 0x4fe806a2e7a97cf0ULL, }, /* 72 */
{ 0x28cc8bbef4dddc00ULL, 0xa1687ae6a695e7b0ULL, },
{ 0x0fa488e4d5614000ULL, 0x864072017939c990ULL, },
{ 0x8fc62522929f8100ULL, 0x7a585f288416d480ULL, },
{ 0x78b6f35cb6c27980ULL, 0xb6f78750ceb69f80ULL, },
{ 0xcfa29fc7d323b470ULL, 0xe587adf0113e5580ULL, },
};
gettimeofday(&start, NULL);
for (i = 0; i < PATTERN_INPUTS_SHORT_COUNT; i++) {
for (j = 0; j < PATTERN_INPUTS_SHORT_COUNT; j++) {
do_msa_MULV_H(b128_pattern[i], b128_pattern[j],
b128_result[PATTERN_INPUTS_SHORT_COUNT * i + j]);
}
}
for (i = 0; i < RANDOM_INPUTS_SHORT_COUNT; i++) {
for (j = 0; j < RANDOM_INPUTS_SHORT_COUNT; j++) {
do_msa_MULV_H(b128_random[i], b128_random[j],
b128_result[((PATTERN_INPUTS_SHORT_COUNT) *
(PATTERN_INPUTS_SHORT_COUNT)) +
RANDOM_INPUTS_SHORT_COUNT * i + j]);
}
}
gettimeofday(&end, NULL);
elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0;
elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0;
ret = check_results(instruction_name, TEST_COUNT_TOTAL, elapsed_time,
&b128_result[0][0], &b128_expect[0][0]);
return ret;
}
|
pmp-tool/PMP
|
src/qemu/src-pmp/tests/tcg/mips/user/ase/msa/interleave/test_msa_ilvod_b.c
|
/*
* Test program for MSA instruction ILVOD.B
*
* Copyright (C) 2019 Wave Computing, Inc.
* Copyright (C) 2019 <NAME> <<EMAIL>>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*
*/
#include <sys/time.h>
#include <stdint.h>
#include "../../../../include/wrappers_msa.h"
#include "../../../../include/test_inputs_128.h"
#include "../../../../include/test_utils_128.h"
#define TEST_COUNT_TOTAL ( \
(PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT) + \
(RANDOM_INPUTS_SHORT_COUNT) * (RANDOM_INPUTS_SHORT_COUNT))
int32_t main(void)
{
char *instruction_name = "ILVOD.B";
int32_t ret;
uint32_t i, j;
struct timeval start, end;
double elapsed_time;
uint64_t b128_result[TEST_COUNT_TOTAL][2];
uint64_t b128_expect[TEST_COUNT_TOTAL][2] = {
{ 0xffffffffffffffffULL, 0xffffffffffffffffULL, }, /* 0 */
{ 0xff00ff00ff00ff00ULL, 0xff00ff00ff00ff00ULL, },
{ 0xffaaffaaffaaffaaULL, 0xffaaffaaffaaffaaULL, },
{ 0xff55ff55ff55ff55ULL, 0xff55ff55ff55ff55ULL, },
{ 0xffccffccffccffccULL, 0xffccffccffccffccULL, },
{ 0xff33ff33ff33ff33ULL, 0xff33ff33ff33ff33ULL, },
{ 0xffe3ff38ff8effe3ULL, 0xff38ff8effe3ff38ULL, },
{ 0xff1cffc7ff71ff1cULL, 0xffc7ff71ff1cffc7ULL, },
{ 0x00ff00ff00ff00ffULL, 0x00ff00ff00ff00ffULL, }, /* 8 */
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x00aa00aa00aa00aaULL, 0x00aa00aa00aa00aaULL, },
{ 0x0055005500550055ULL, 0x0055005500550055ULL, },
{ 0x00cc00cc00cc00ccULL, 0x00cc00cc00cc00ccULL, },
{ 0x0033003300330033ULL, 0x0033003300330033ULL, },
{ 0x00e30038008e00e3ULL, 0x0038008e00e30038ULL, },
{ 0x001c00c70071001cULL, 0x00c70071001c00c7ULL, },
{ 0xaaffaaffaaffaaffULL, 0xaaffaaffaaffaaffULL, }, /* 16 */
{ 0xaa00aa00aa00aa00ULL, 0xaa00aa00aa00aa00ULL, },
{ 0xaaaaaaaaaaaaaaaaULL, 0xaaaaaaaaaaaaaaaaULL, },
{ 0xaa55aa55aa55aa55ULL, 0xaa55aa55aa55aa55ULL, },
{ 0xaaccaaccaaccaaccULL, 0xaaccaaccaaccaaccULL, },
{ 0xaa33aa33aa33aa33ULL, 0xaa33aa33aa33aa33ULL, },
{ 0xaae3aa38aa8eaae3ULL, 0xaa38aa8eaae3aa38ULL, },
{ 0xaa1caac7aa71aa1cULL, 0xaac7aa71aa1caac7ULL, },
{ 0x55ff55ff55ff55ffULL, 0x55ff55ff55ff55ffULL, }, /* 24 */
{ 0x5500550055005500ULL, 0x5500550055005500ULL, },
{ 0x55aa55aa55aa55aaULL, 0x55aa55aa55aa55aaULL, },
{ 0x5555555555555555ULL, 0x5555555555555555ULL, },
{ 0x55cc55cc55cc55ccULL, 0x55cc55cc55cc55ccULL, },
{ 0x5533553355335533ULL, 0x5533553355335533ULL, },
{ 0x55e35538558e55e3ULL, 0x5538558e55e35538ULL, },
{ 0x551c55c75571551cULL, 0x55c75571551c55c7ULL, },
{ 0xccffccffccffccffULL, 0xccffccffccffccffULL, }, /* 32 */
{ 0xcc00cc00cc00cc00ULL, 0xcc00cc00cc00cc00ULL, },
{ 0xccaaccaaccaaccaaULL, 0xccaaccaaccaaccaaULL, },
{ 0xcc55cc55cc55cc55ULL, 0xcc55cc55cc55cc55ULL, },
{ 0xccccccccccccccccULL, 0xccccccccccccccccULL, },
{ 0xcc33cc33cc33cc33ULL, 0xcc33cc33cc33cc33ULL, },
{ 0xcce3cc38cc8ecce3ULL, 0xcc38cc8ecce3cc38ULL, },
{ 0xcc1cccc7cc71cc1cULL, 0xccc7cc71cc1cccc7ULL, },
{ 0x33ff33ff33ff33ffULL, 0x33ff33ff33ff33ffULL, }, /* 40 */
{ 0x3300330033003300ULL, 0x3300330033003300ULL, },
{ 0x33aa33aa33aa33aaULL, 0x33aa33aa33aa33aaULL, },
{ 0x3355335533553355ULL, 0x3355335533553355ULL, },
{ 0x33cc33cc33cc33ccULL, 0x33cc33cc33cc33ccULL, },
{ 0x3333333333333333ULL, 0x3333333333333333ULL, },
{ 0x33e33338338e33e3ULL, 0x3338338e33e33338ULL, },
{ 0x331c33c73371331cULL, 0x33c73371331c33c7ULL, },
{ 0xe3ff38ff8effe3ffULL, 0x38ff8effe3ff38ffULL, }, /* 48 */
{ 0xe30038008e00e300ULL, 0x38008e00e3003800ULL, },
{ 0xe3aa38aa8eaae3aaULL, 0x38aa8eaae3aa38aaULL, },
{ 0xe35538558e55e355ULL, 0x38558e55e3553855ULL, },
{ 0xe3cc38cc8ecce3ccULL, 0x38cc8ecce3cc38ccULL, },
{ 0xe33338338e33e333ULL, 0x38338e33e3333833ULL, },
{ 0xe3e338388e8ee3e3ULL, 0x38388e8ee3e33838ULL, },
{ 0xe31c38c78e71e31cULL, 0x38c78e71e31c38c7ULL, },
{ 0x1cffc7ff71ff1cffULL, 0xc7ff71ff1cffc7ffULL, }, /* 56 */
{ 0x1c00c70071001c00ULL, 0xc70071001c00c700ULL, },
{ 0x1caac7aa71aa1caaULL, 0xc7aa71aa1caac7aaULL, },
{ 0x1c55c75571551c55ULL, 0xc75571551c55c755ULL, },
{ 0x1cccc7cc71cc1cccULL, 0xc7cc71cc1cccc7ccULL, },
{ 0x1c33c73371331c33ULL, 0xc73371331c33c733ULL, },
{ 0x1ce3c738718e1ce3ULL, 0xc738718e1ce3c738ULL, },
{ 0x1c1cc7c771711c1cULL, 0xc7c771711c1cc7c7ULL, },
{ 0x8888e6e628285555ULL, 0x4b4b0b0bfefeb0b0ULL, }, /* 64 */
{ 0x88fbe600284d55c7ULL, 0x4b120bbbfe15b052ULL, },
{ 0x88ace6ae28b9558bULL, 0x4b270bc6feabb025ULL, },
{ 0x8870e616285e55e2ULL, 0x4b8d0b88fea9b0e2ULL, },
{ 0xfb8800e64d28c755ULL, 0x124bbb0b15fe52b0ULL, },
{ 0xfbfb00004d4dc7c7ULL, 0x1212bbbb15155252ULL, },
{ 0xfbac00ae4db9c78bULL, 0x1227bbc615ab5225ULL, },
{ 0xfb7000164d5ec7e2ULL, 0x128dbb8815a952e2ULL, },
{ 0xac88aee6b9288b55ULL, 0x274bc60babfe25b0ULL, }, /* 72 */
{ 0xacfbae00b94d8bc7ULL, 0x2712c6bbab152552ULL, },
{ 0xacacaeaeb9b98b8bULL, 0x2727c6c6abab2525ULL, },
{ 0xac70ae16b95e8be2ULL, 0x278dc688aba925e2ULL, },
{ 0x708816e65e28e255ULL, 0x8d4b880ba9fee2b0ULL, },
{ 0x70fb16005e4de2c7ULL, 0x8d1288bba915e252ULL, },
{ 0x70ac16ae5eb9e28bULL, 0x8d2788c6a9abe225ULL, },
{ 0x707016165e5ee2e2ULL, 0x8d8d8888a9a9e2e2ULL, },
};
gettimeofday(&start, NULL);
for (i = 0; i < PATTERN_INPUTS_SHORT_COUNT; i++) {
for (j = 0; j < PATTERN_INPUTS_SHORT_COUNT; j++) {
do_msa_ILVOD_B(b128_pattern[i], b128_pattern[j],
b128_result[PATTERN_INPUTS_SHORT_COUNT * i + j]);
}
}
for (i = 0; i < RANDOM_INPUTS_SHORT_COUNT; i++) {
for (j = 0; j < RANDOM_INPUTS_SHORT_COUNT; j++) {
do_msa_ILVOD_B(b128_random[i], b128_random[j],
b128_result[((PATTERN_INPUTS_SHORT_COUNT) *
(PATTERN_INPUTS_SHORT_COUNT)) +
RANDOM_INPUTS_SHORT_COUNT * i + j]);
}
}
gettimeofday(&end, NULL);
elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0;
elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0;
ret = check_results(instruction_name, TEST_COUNT_TOTAL, elapsed_time,
&b128_result[0][0], &b128_expect[0][0]);
return ret;
}
|
pmp-tool/PMP
|
src/qemu/src-pmp/include/migration/register.h
|
/*
* QEMU migration vmstate registration
*
* Copyright IBM, Corp. 2008
*
* Authors:
* <NAME> <<EMAIL>>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#ifndef MIGRATION_REGISTER_H
#define MIGRATION_REGISTER_H
typedef struct SaveVMHandlers {
/* This runs inside the iothread lock. */
SaveStateHandler *save_state;
void (*save_cleanup)(void *opaque);
int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque);
int (*save_live_complete_precopy)(QEMUFile *f, void *opaque);
/* This runs both outside and inside the iothread lock. */
bool (*is_active)(void *opaque);
bool (*has_postcopy)(void *opaque);
/* is_active_iterate
* If it is not NULL then qemu_savevm_state_iterate will skip iteration if
* it returns false. For example, it is needed for only-postcopy-states,
* which needs to be handled by qemu_savevm_state_setup and
* qemu_savevm_state_pending, but do not need iterations until not in
* postcopy stage.
*/
bool (*is_active_iterate)(void *opaque);
/* This runs outside the iothread lock in the migration case, and
* within the lock in the savevm case. The callback had better only
* use data that is local to the migration thread or protected
* by other locks.
*/
int (*save_live_iterate)(QEMUFile *f, void *opaque);
/* This runs outside the iothread lock! */
int (*save_setup)(QEMUFile *f, void *opaque);
void (*save_live_pending)(QEMUFile *f, void *opaque,
uint64_t threshold_size,
uint64_t *res_precopy_only,
uint64_t *res_compatible,
uint64_t *res_postcopy_only);
/* Note for save_live_pending:
* - res_precopy_only is for data which must be migrated in precopy phase
* or in stopped state, in other words - before target vm start
* - res_compatible is for data which may be migrated in any phase
* - res_postcopy_only is for data which must be migrated in postcopy phase
* or in stopped state, in other words - after source vm stop
*
* Sum of res_postcopy_only, res_compatible and res_postcopy_only is the
* whole amount of pending data.
*/
LoadStateHandler *load_state;
int (*load_setup)(QEMUFile *f, void *opaque);
int (*load_cleanup)(void *opaque);
/* Called when postcopy migration wants to resume from failure */
int (*resume_prepare)(MigrationState *s, void *opaque);
} SaveVMHandlers;
int register_savevm_live(DeviceState *dev,
const char *idstr,
int instance_id,
int version_id,
const SaveVMHandlers *ops,
void *opaque);
void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque);
#endif
|
pmp-tool/PMP
|
src/qemu/src-pmp/hw/usb/bus.c
|
#include "qemu/osdep.h"
#include "hw/hw.h"
#include "hw/usb.h"
#include "hw/qdev.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "sysemu/sysemu.h"
#include "monitor/monitor.h"
#include "trace.h"
#include "qemu/cutils.h"
static void usb_bus_dev_print(Monitor *mon, DeviceState *qdev, int indent);
static char *usb_get_dev_path(DeviceState *dev);
static char *usb_get_fw_dev_path(DeviceState *qdev);
static void usb_qdev_unrealize(DeviceState *qdev, Error **errp);
static Property usb_props[] = {
DEFINE_PROP_STRING("port", USBDevice, port_path),
DEFINE_PROP_STRING("serial", USBDevice, serial),
DEFINE_PROP_BIT("full-path", USBDevice, flags,
USB_DEV_FLAG_FULL_PATH, true),
DEFINE_PROP_BIT("msos-desc", USBDevice, flags,
USB_DEV_FLAG_MSOS_DESC_ENABLE, true),
DEFINE_PROP_END_OF_LIST()
};
static void usb_bus_class_init(ObjectClass *klass, void *data)
{
BusClass *k = BUS_CLASS(klass);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
k->print_dev = usb_bus_dev_print;
k->get_dev_path = usb_get_dev_path;
k->get_fw_dev_path = usb_get_fw_dev_path;
hc->unplug = qdev_simple_device_unplug_cb;
}
static const TypeInfo usb_bus_info = {
.name = TYPE_USB_BUS,
.parent = TYPE_BUS,
.instance_size = sizeof(USBBus),
.class_init = usb_bus_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
};
static int next_usb_bus = 0;
static QTAILQ_HEAD(, USBBus) busses = QTAILQ_HEAD_INITIALIZER(busses);
static int usb_device_post_load(void *opaque, int version_id)
{
USBDevice *dev = opaque;
if (dev->state == USB_STATE_NOTATTACHED) {
dev->attached = false;
} else {
dev->attached = true;
}
return 0;
}
const VMStateDescription vmstate_usb_device = {
.name = "USBDevice",
.version_id = 1,
.minimum_version_id = 1,
.post_load = usb_device_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT8(addr, USBDevice),
VMSTATE_INT32(state, USBDevice),
VMSTATE_INT32(remote_wakeup, USBDevice),
VMSTATE_INT32(setup_state, USBDevice),
VMSTATE_INT32(setup_len, USBDevice),
VMSTATE_INT32(setup_index, USBDevice),
VMSTATE_UINT8_ARRAY(setup_buf, USBDevice, 8),
VMSTATE_END_OF_LIST(),
}
};
void usb_bus_new(USBBus *bus, size_t bus_size,
USBBusOps *ops, DeviceState *host)
{
qbus_create_inplace(bus, bus_size, TYPE_USB_BUS, host, NULL);
qbus_set_bus_hotplug_handler(BUS(bus), &error_abort);
bus->ops = ops;
bus->busnr = next_usb_bus++;
QTAILQ_INIT(&bus->free);
QTAILQ_INIT(&bus->used);
QTAILQ_INSERT_TAIL(&busses, bus, next);
}
void usb_bus_release(USBBus *bus)
{
assert(next_usb_bus > 0);
QTAILQ_REMOVE(&busses, bus, next);
}
USBBus *usb_bus_find(int busnr)
{
USBBus *bus;
if (-1 == busnr)
return QTAILQ_FIRST(&busses);
QTAILQ_FOREACH(bus, &busses, next) {
if (bus->busnr == busnr)
return bus;
}
return NULL;
}
static void usb_device_realize(USBDevice *dev, Error **errp)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
if (klass->realize) {
klass->realize(dev, errp);
}
}
USBDevice *usb_device_find_device(USBDevice *dev, uint8_t addr)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
if (klass->find_device) {
return klass->find_device(dev, addr);
}
return NULL;
}
static void usb_device_unrealize(USBDevice *dev, Error **errp)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
if (klass->unrealize) {
klass->unrealize(dev, errp);
}
}
void usb_device_cancel_packet(USBDevice *dev, USBPacket *p)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
if (klass->cancel_packet) {
klass->cancel_packet(dev, p);
}
}
void usb_device_handle_attach(USBDevice *dev)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
if (klass->handle_attach) {
klass->handle_attach(dev);
}
}
void usb_device_handle_reset(USBDevice *dev)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
if (klass->handle_reset) {
klass->handle_reset(dev);
}
}
void usb_device_handle_control(USBDevice *dev, USBPacket *p, int request,
int value, int index, int length, uint8_t *data)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
if (klass->handle_control) {
klass->handle_control(dev, p, request, value, index, length, data);
}
}
void usb_device_handle_data(USBDevice *dev, USBPacket *p)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
if (klass->handle_data) {
klass->handle_data(dev, p);
}
}
const char *usb_device_get_product_desc(USBDevice *dev)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
return klass->product_desc;
}
const USBDesc *usb_device_get_usb_desc(USBDevice *dev)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
if (dev->usb_desc) {
return dev->usb_desc;
}
return klass->usb_desc;
}
void usb_device_set_interface(USBDevice *dev, int interface,
int alt_old, int alt_new)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
if (klass->set_interface) {
klass->set_interface(dev, interface, alt_old, alt_new);
}
}
void usb_device_flush_ep_queue(USBDevice *dev, USBEndpoint *ep)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
if (klass->flush_ep_queue) {
klass->flush_ep_queue(dev, ep);
}
}
void usb_device_ep_stopped(USBDevice *dev, USBEndpoint *ep)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
if (klass->ep_stopped) {
klass->ep_stopped(dev, ep);
}
}
int usb_device_alloc_streams(USBDevice *dev, USBEndpoint **eps, int nr_eps,
int streams)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
if (klass->alloc_streams) {
return klass->alloc_streams(dev, eps, nr_eps, streams);
}
return 0;
}
void usb_device_free_streams(USBDevice *dev, USBEndpoint **eps, int nr_eps)
{
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
if (klass->free_streams) {
klass->free_streams(dev, eps, nr_eps);
}
}
static void usb_qdev_realize(DeviceState *qdev, Error **errp)
{
USBDevice *dev = USB_DEVICE(qdev);
Error *local_err = NULL;
pstrcpy(dev->product_desc, sizeof(dev->product_desc),
usb_device_get_product_desc(dev));
dev->auto_attach = 1;
QLIST_INIT(&dev->strings);
usb_ep_init(dev);
usb_claim_port(dev, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
usb_device_realize(dev, &local_err);
if (local_err) {
usb_release_port(dev);
error_propagate(errp, local_err);
return;
}
if (dev->auto_attach) {
usb_device_attach(dev, &local_err);
if (local_err) {
usb_qdev_unrealize(qdev, NULL);
error_propagate(errp, local_err);
return;
}
}
}
static void usb_qdev_unrealize(DeviceState *qdev, Error **errp)
{
USBDevice *dev = USB_DEVICE(qdev);
USBDescString *s, *next;
QLIST_FOREACH_SAFE(s, &dev->strings, next, next) {
QLIST_REMOVE(s, next);
g_free(s->str);
g_free(s);
}
if (dev->attached) {
usb_device_detach(dev);
}
usb_device_unrealize(dev, errp);
if (dev->port) {
usb_release_port(dev);
}
}
typedef struct LegacyUSBFactory
{
const char *name;
const char *usbdevice_name;
USBDevice *(*usbdevice_init)(USBBus *bus, const char *params);
} LegacyUSBFactory;
static GSList *legacy_usb_factory;
void usb_legacy_register(const char *typename, const char *usbdevice_name,
USBDevice *(*usbdevice_init)(USBBus *bus,
const char *params))
{
if (usbdevice_name) {
LegacyUSBFactory *f = g_malloc0(sizeof(*f));
f->name = typename;
f->usbdevice_name = usbdevice_name;
f->usbdevice_init = usbdevice_init;
legacy_usb_factory = g_slist_append(legacy_usb_factory, f);
}
}
USBDevice *usb_create(USBBus *bus, const char *name)
{
DeviceState *dev;
dev = qdev_create(&bus->qbus, name);
return USB_DEVICE(dev);
}
static USBDevice *usb_try_create_simple(USBBus *bus, const char *name,
Error **errp)
{
Error *err = NULL;
USBDevice *dev;
dev = USB_DEVICE(qdev_try_create(&bus->qbus, name));
if (!dev) {
error_setg(errp, "Failed to create USB device '%s'", name);
return NULL;
}
object_property_set_bool(OBJECT(dev), true, "realized", &err);
if (err) {
error_propagate_prepend(errp, err,
"Failed to initialize USB device '%s': ",
name);
return NULL;
}
return dev;
}
USBDevice *usb_create_simple(USBBus *bus, const char *name)
{
return usb_try_create_simple(bus, name, &error_abort);
}
static void usb_fill_port(USBPort *port, void *opaque, int index,
USBPortOps *ops, int speedmask)
{
port->opaque = opaque;
port->index = index;
port->ops = ops;
port->speedmask = speedmask;
usb_port_location(port, NULL, index + 1);
}
void usb_register_port(USBBus *bus, USBPort *port, void *opaque, int index,
USBPortOps *ops, int speedmask)
{
usb_fill_port(port, opaque, index, ops, speedmask);
QTAILQ_INSERT_TAIL(&bus->free, port, next);
bus->nfree++;
}
void usb_register_companion(const char *masterbus, USBPort *ports[],
uint32_t portcount, uint32_t firstport,
void *opaque, USBPortOps *ops, int speedmask,
Error **errp)
{
USBBus *bus;
int i;
QTAILQ_FOREACH(bus, &busses, next) {
if (strcmp(bus->qbus.name, masterbus) == 0) {
break;
}
}
if (!bus) {
error_setg(errp, "USB bus '%s' not found", masterbus);
return;
}
if (!bus->ops->register_companion) {
error_setg(errp, "Can't use USB bus '%s' as masterbus,"
" it doesn't support companion controllers",
masterbus);
return;
}
for (i = 0; i < portcount; i++) {
usb_fill_port(ports[i], opaque, i, ops, speedmask);
}
bus->ops->register_companion(bus, ports, portcount, firstport, errp);
}
void usb_port_location(USBPort *downstream, USBPort *upstream, int portnr)
{
if (upstream) {
int l = snprintf(downstream->path, sizeof(downstream->path), "%s.%d",
upstream->path, portnr);
/* Max string is nn.nn.nn.nn.nn, which fits in 16 bytes */
assert(l < sizeof(downstream->path));
downstream->hubcount = upstream->hubcount + 1;
} else {
snprintf(downstream->path, sizeof(downstream->path), "%d", portnr);
downstream->hubcount = 0;
}
}
void usb_unregister_port(USBBus *bus, USBPort *port)
{
if (port->dev) {
object_unparent(OBJECT(port->dev));
}
QTAILQ_REMOVE(&bus->free, port, next);
bus->nfree--;
}
void usb_claim_port(USBDevice *dev, Error **errp)
{
USBBus *bus = usb_bus_from_device(dev);
USBPort *port;
assert(dev->port == NULL);
if (dev->port_path) {
QTAILQ_FOREACH(port, &bus->free, next) {
if (strcmp(port->path, dev->port_path) == 0) {
break;
}
}
if (port == NULL) {
error_setg(errp, "usb port %s (bus %s) not found (in use?)",
dev->port_path, bus->qbus.name);
return;
}
} else {
if (bus->nfree == 1 && strcmp(object_get_typename(OBJECT(dev)), "usb-hub") != 0) {
/* Create a new hub and chain it on */
usb_try_create_simple(bus, "usb-hub", NULL);
}
if (bus->nfree == 0) {
error_setg(errp, "tried to attach usb device %s to a bus "
"with no free ports", dev->product_desc);
return;
}
port = QTAILQ_FIRST(&bus->free);
}
trace_usb_port_claim(bus->busnr, port->path);
QTAILQ_REMOVE(&bus->free, port, next);
bus->nfree--;
dev->port = port;
port->dev = dev;
QTAILQ_INSERT_TAIL(&bus->used, port, next);
bus->nused++;
}
void usb_release_port(USBDevice *dev)
{
USBBus *bus = usb_bus_from_device(dev);
USBPort *port = dev->port;
assert(port != NULL);
trace_usb_port_release(bus->busnr, port->path);
QTAILQ_REMOVE(&bus->used, port, next);
bus->nused--;
dev->port = NULL;
port->dev = NULL;
QTAILQ_INSERT_TAIL(&bus->free, port, next);
bus->nfree++;
}
static void usb_mask_to_str(char *dest, size_t size,
unsigned int speedmask)
{
static const struct {
unsigned int mask;
const char *name;
} speeds[] = {
{ .mask = USB_SPEED_MASK_FULL, .name = "full" },
{ .mask = USB_SPEED_MASK_HIGH, .name = "high" },
{ .mask = USB_SPEED_MASK_SUPER, .name = "super" },
};
int i, pos = 0;
for (i = 0; i < ARRAY_SIZE(speeds); i++) {
if (speeds[i].mask & speedmask) {
pos += snprintf(dest + pos, size - pos, "%s%s",
pos ? "+" : "",
speeds[i].name);
}
}
if (pos == 0) {
snprintf(dest, size, "unknown");
}
}
void usb_check_attach(USBDevice *dev, Error **errp)
{
USBBus *bus = usb_bus_from_device(dev);
USBPort *port = dev->port;
char devspeed[32], portspeed[32];
assert(port != NULL);
assert(!dev->attached);
usb_mask_to_str(devspeed, sizeof(devspeed), dev->speedmask);
usb_mask_to_str(portspeed, sizeof(portspeed), port->speedmask);
trace_usb_port_attach(bus->busnr, port->path,
devspeed, portspeed);
if (!(port->speedmask & dev->speedmask)) {
error_setg(errp, "Warning: speed mismatch trying to attach"
" usb device \"%s\" (%s speed)"
" to bus \"%s\", port \"%s\" (%s speed)",
dev->product_desc, devspeed,
bus->qbus.name, port->path, portspeed);
return;
}
}
void usb_device_attach(USBDevice *dev, Error **errp)
{
USBPort *port = dev->port;
Error *local_err = NULL;
usb_check_attach(dev, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
dev->attached = true;
usb_attach(port);
}
int usb_device_detach(USBDevice *dev)
{
USBBus *bus = usb_bus_from_device(dev);
USBPort *port = dev->port;
assert(port != NULL);
assert(dev->attached);
trace_usb_port_detach(bus->busnr, port->path);
usb_detach(port);
dev->attached = false;
return 0;
}
static const char *usb_speed(unsigned int speed)
{
static const char *txt[] = {
[ USB_SPEED_LOW ] = "1.5",
[ USB_SPEED_FULL ] = "12",
[ USB_SPEED_HIGH ] = "480",
[ USB_SPEED_SUPER ] = "5000",
};
if (speed >= ARRAY_SIZE(txt))
return "?";
return txt[speed];
}
static void usb_bus_dev_print(Monitor *mon, DeviceState *qdev, int indent)
{
USBDevice *dev = USB_DEVICE(qdev);
USBBus *bus = usb_bus_from_device(dev);
monitor_printf(mon, "%*saddr %d.%d, port %s, speed %s, name %s%s\n",
indent, "", bus->busnr, dev->addr,
dev->port ? dev->port->path : "-",
usb_speed(dev->speed), dev->product_desc,
dev->attached ? ", attached" : "");
}
static char *usb_get_dev_path(DeviceState *qdev)
{
USBDevice *dev = USB_DEVICE(qdev);
DeviceState *hcd = qdev->parent_bus->parent;
char *id = NULL;
if (dev->flags & (1 << USB_DEV_FLAG_FULL_PATH)) {
id = qdev_get_dev_path(hcd);
}
if (id) {
char *ret = g_strdup_printf("%s/%s", id, dev->port->path);
g_free(id);
return ret;
} else {
return g_strdup(dev->port->path);
}
}
static char *usb_get_fw_dev_path(DeviceState *qdev)
{
USBDevice *dev = USB_DEVICE(qdev);
char *fw_path, *in;
ssize_t pos = 0, fw_len;
long nr;
fw_len = 32 + strlen(dev->port->path) * 6;
fw_path = g_malloc(fw_len);
in = dev->port->path;
while (fw_len - pos > 0) {
nr = strtol(in, &in, 10);
if (in[0] == '.') {
/* some hub between root port and device */
pos += snprintf(fw_path + pos, fw_len - pos, "hub@%lx/", nr);
in++;
} else {
/* the device itself */
pos += snprintf(fw_path + pos, fw_len - pos, "%s@%lx",
qdev_fw_name(qdev), nr);
break;
}
}
return fw_path;
}
void hmp_info_usb(Monitor *mon, const QDict *qdict)
{
USBBus *bus;
USBDevice *dev;
USBPort *port;
if (QTAILQ_EMPTY(&busses)) {
monitor_printf(mon, "USB support not enabled\n");
return;
}
QTAILQ_FOREACH(bus, &busses, next) {
QTAILQ_FOREACH(port, &bus->used, next) {
dev = port->dev;
if (!dev)
continue;
monitor_printf(mon, " Device %d.%d, Port %s, Speed %s Mb/s, "
"Product %s%s%s\n",
bus->busnr, dev->addr, port->path,
usb_speed(dev->speed), dev->product_desc,
dev->qdev.id ? ", ID: " : "",
dev->qdev.id ?: "");
}
}
}
/* handle legacy -usbdevice cmd line option */
USBDevice *usbdevice_create(const char *cmdline)
{
USBBus *bus = usb_bus_find(-1 /* any */);
LegacyUSBFactory *f = NULL;
Error *err = NULL;
GSList *i;
char driver[32];
const char *params;
int len;
USBDevice *dev;
params = strchr(cmdline,':');
if (params) {
params++;
len = params - cmdline;
if (len > sizeof(driver))
len = sizeof(driver);
pstrcpy(driver, len, cmdline);
} else {
params = "";
pstrcpy(driver, sizeof(driver), cmdline);
}
for (i = legacy_usb_factory; i; i = i->next) {
f = i->data;
if (strcmp(f->usbdevice_name, driver) == 0) {
break;
}
}
if (i == NULL) {
#if 0
/* no error because some drivers are not converted (yet) */
error_report("usbdevice %s not found", driver);
#endif
return NULL;
}
if (!bus) {
error_report("Error: no usb bus to attach usbdevice %s, "
"please try -machine usb=on and check that "
"the machine model supports USB", driver);
return NULL;
}
if (f->usbdevice_init) {
dev = f->usbdevice_init(bus, params);
} else {
if (*params) {
error_report("usbdevice %s accepts no params", driver);
return NULL;
}
dev = usb_create(bus, f->name);
}
if (!dev) {
error_report("Failed to create USB device '%s'", f->name);
return NULL;
}
object_property_set_bool(OBJECT(dev), true, "realized", &err);
if (err) {
error_reportf_err(err, "Failed to initialize USB device '%s': ",
f->name);
object_unparent(OBJECT(dev));
return NULL;
}
return dev;
}
static bool usb_get_attached(Object *obj, Error **errp)
{
USBDevice *dev = USB_DEVICE(obj);
return dev->attached;
}
static void usb_set_attached(Object *obj, bool value, Error **errp)
{
USBDevice *dev = USB_DEVICE(obj);
Error *err = NULL;
if (dev->attached == value) {
return;
}
if (value) {
usb_device_attach(dev, &err);
error_propagate(errp, err);
} else {
usb_device_detach(dev);
}
}
static void usb_device_instance_init(Object *obj)
{
USBDevice *dev = USB_DEVICE(obj);
USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev);
if (klass->attached_settable) {
object_property_add_bool(obj, "attached",
usb_get_attached, usb_set_attached,
NULL);
} else {
object_property_add_bool(obj, "attached",
usb_get_attached, NULL,
NULL);
}
}
static void usb_device_class_init(ObjectClass *klass, void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
k->bus_type = TYPE_USB_BUS;
k->realize = usb_qdev_realize;
k->unrealize = usb_qdev_unrealize;
k->props = usb_props;
}
static const TypeInfo usb_device_type_info = {
.name = TYPE_USB_DEVICE,
.parent = TYPE_DEVICE,
.instance_size = sizeof(USBDevice),
.instance_init = usb_device_instance_init,
.abstract = true,
.class_size = sizeof(USBDeviceClass),
.class_init = usb_device_class_init,
};
static void usb_register_types(void)
{
type_register_static(&usb_bus_info);
type_register_static(&usb_device_type_info);
}
type_init(usb_register_types)
|
pmp-tool/PMP
|
src/qemu/src-pmp/include/sysemu/arch_init.h
|
<gh_stars>1-10
#ifndef QEMU_ARCH_INIT_H
#define QEMU_ARCH_INIT_H
#include "qapi/qapi-types-misc.h"
enum {
QEMU_ARCH_ALL = -1,
QEMU_ARCH_ALPHA = (1 << 0),
QEMU_ARCH_ARM = (1 << 1),
QEMU_ARCH_CRIS = (1 << 2),
QEMU_ARCH_I386 = (1 << 3),
QEMU_ARCH_M68K = (1 << 4),
QEMU_ARCH_LM32 = (1 << 5),
QEMU_ARCH_MICROBLAZE = (1 << 6),
QEMU_ARCH_MIPS = (1 << 7),
QEMU_ARCH_PPC = (1 << 8),
QEMU_ARCH_S390X = (1 << 9),
QEMU_ARCH_SH4 = (1 << 10),
QEMU_ARCH_SPARC = (1 << 11),
QEMU_ARCH_XTENSA = (1 << 12),
QEMU_ARCH_OPENRISC = (1 << 13),
QEMU_ARCH_UNICORE32 = (1 << 14),
QEMU_ARCH_MOXIE = (1 << 15),
QEMU_ARCH_TRICORE = (1 << 16),
QEMU_ARCH_NIOS2 = (1 << 17),
QEMU_ARCH_HPPA = (1 << 18),
QEMU_ARCH_RISCV = (1 << 19),
};
extern const uint32_t arch_type;
int kvm_available(void);
int xen_available(void);
#endif
|
pmp-tool/PMP
|
src/qemu/src-pmp/hw/riscv/virt.c
|
/*
* QEMU RISC-V VirtIO Board
*
* Copyright (c) 2017 SiFive, Inc.
*
* RISC-V machine with 16550a UART and VirtIO MMIO
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "hw/hw.h"
#include "hw/boards.h"
#include "hw/loader.h"
#include "hw/sysbus.h"
#include "hw/char/serial.h"
#include "target/riscv/cpu.h"
#include "hw/riscv/riscv_htif.h"
#include "hw/riscv/riscv_hart.h"
#include "hw/riscv/sifive_plic.h"
#include "hw/riscv/sifive_clint.h"
#include "hw/riscv/sifive_test.h"
#include "hw/riscv/virt.h"
#include "chardev/char.h"
#include "sysemu/arch_init.h"
#include "sysemu/device_tree.h"
#include "exec/address-spaces.h"
#include "hw/pci/pci.h"
#include "hw/pci-host/gpex.h"
#include "elf.h"
#include <libfdt.h>
static const struct MemmapEntry {
hwaddr base;
hwaddr size;
} virt_memmap[] = {
[VIRT_DEBUG] = { 0x0, 0x100 },
[VIRT_MROM] = { 0x1000, 0x11000 },
[VIRT_TEST] = { 0x100000, 0x1000 },
[VIRT_CLINT] = { 0x2000000, 0x10000 },
[VIRT_PLIC] = { 0xc000000, 0x4000000 },
[VIRT_UART0] = { 0x10000000, 0x100 },
[VIRT_VIRTIO] = { 0x10001000, 0x1000 },
[VIRT_DRAM] = { 0x80000000, 0x0 },
[VIRT_PCIE_MMIO] = { 0x40000000, 0x40000000 },
[VIRT_PCIE_PIO] = { 0x03000000, 0x00010000 },
[VIRT_PCIE_ECAM] = { 0x30000000, 0x10000000 },
};
static target_ulong load_kernel(const char *kernel_filename)
{
uint64_t kernel_entry, kernel_high;
if (load_elf(kernel_filename, NULL, NULL, NULL,
&kernel_entry, NULL, &kernel_high,
0, EM_RISCV, 1, 0) < 0) {
error_report("could not load kernel '%s'", kernel_filename);
exit(1);
}
return kernel_entry;
}
static hwaddr load_initrd(const char *filename, uint64_t mem_size,
uint64_t kernel_entry, hwaddr *start)
{
int size;
/* We want to put the initrd far enough into RAM that when the
* kernel is uncompressed it will not clobber the initrd. However
* on boards without much RAM we must ensure that we still leave
* enough room for a decent sized initrd, and on boards with large
* amounts of RAM we must avoid the initrd being so far up in RAM
* that it is outside lowmem and inaccessible to the kernel.
* So for boards with less than 256MB of RAM we put the initrd
* halfway into RAM, and for boards with 256MB of RAM or more we put
* the initrd at 128MB.
*/
*start = kernel_entry + MIN(mem_size / 2, 128 * MiB);
size = load_ramdisk(filename, *start, mem_size - *start);
if (size == -1) {
size = load_image_targphys(filename, *start, mem_size - *start);
if (size == -1) {
error_report("could not load ramdisk '%s'", filename);
exit(1);
}
}
return *start + size;
}
static void create_pcie_irq_map(void *fdt, char *nodename,
uint32_t plic_phandle)
{
int pin, dev;
uint32_t
full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * FDT_INT_MAP_WIDTH] = {};
uint32_t *irq_map = full_irq_map;
/* This code creates a standard swizzle of interrupts such that
* each device's first interrupt is based on it's PCI_SLOT number.
* (See pci_swizzle_map_irq_fn())
*
* We only need one entry per interrupt in the table (not one per
* possible slot) seeing the interrupt-map-mask will allow the table
* to wrap to any number of devices.
*/
for (dev = 0; dev < GPEX_NUM_IRQS; dev++) {
int devfn = dev * 0x8;
for (pin = 0; pin < GPEX_NUM_IRQS; pin++) {
int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS);
int i = 0;
irq_map[i] = cpu_to_be32(devfn << 8);
i += FDT_PCI_ADDR_CELLS;
irq_map[i] = cpu_to_be32(pin + 1);
i += FDT_PCI_INT_CELLS;
irq_map[i++] = cpu_to_be32(plic_phandle);
i += FDT_PLIC_ADDR_CELLS;
irq_map[i] = cpu_to_be32(irq_nr);
irq_map += FDT_INT_MAP_WIDTH;
}
}
qemu_fdt_setprop(fdt, nodename, "interrupt-map",
full_irq_map, sizeof(full_irq_map));
qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask",
0x1800, 0, 0, 0x7);
}
static void *create_fdt(RISCVVirtState *s, const struct MemmapEntry *memmap,
uint64_t mem_size, const char *cmdline)
{
void *fdt;
int cpu;
uint32_t *cells;
char *nodename;
uint32_t plic_phandle, phandle = 1;
int i;
fdt = s->fdt = create_device_tree(&s->fdt_size);
if (!fdt) {
error_report("create_device_tree() failed");
exit(1);
}
qemu_fdt_setprop_string(fdt, "/", "model", "riscv-virtio,qemu");
qemu_fdt_setprop_string(fdt, "/", "compatible", "riscv-virtio");
qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x2);
qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x2);
qemu_fdt_add_subnode(fdt, "/soc");
qemu_fdt_setprop(fdt, "/soc", "ranges", NULL, 0);
qemu_fdt_setprop_string(fdt, "/soc", "compatible", "simple-bus");
qemu_fdt_setprop_cell(fdt, "/soc", "#size-cells", 0x2);
qemu_fdt_setprop_cell(fdt, "/soc", "#address-cells", 0x2);
nodename = g_strdup_printf("/memory@%lx",
(long)memmap[VIRT_DRAM].base);
qemu_fdt_add_subnode(fdt, nodename);
qemu_fdt_setprop_cells(fdt, nodename, "reg",
memmap[VIRT_DRAM].base >> 32, memmap[VIRT_DRAM].base,
mem_size >> 32, mem_size);
qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory");
g_free(nodename);
qemu_fdt_add_subnode(fdt, "/cpus");
qemu_fdt_setprop_cell(fdt, "/cpus", "timebase-frequency",
SIFIVE_CLINT_TIMEBASE_FREQ);
qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0);
qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1);
for (cpu = s->soc.num_harts - 1; cpu >= 0; cpu--) {
int cpu_phandle = phandle++;
nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu);
char *isa = riscv_isa_string(&s->soc.harts[cpu]);
qemu_fdt_add_subnode(fdt, nodename);
qemu_fdt_setprop_cell(fdt, nodename, "clock-frequency",
VIRT_CLOCK_FREQ);
qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv48");
qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", isa);
qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv");
qemu_fdt_setprop_string(fdt, nodename, "status", "okay");
qemu_fdt_setprop_cell(fdt, nodename, "reg", cpu);
qemu_fdt_setprop_string(fdt, nodename, "device_type", "cpu");
qemu_fdt_add_subnode(fdt, intc);
qemu_fdt_setprop_cell(fdt, intc, "phandle", cpu_phandle);
qemu_fdt_setprop_cell(fdt, intc, "linux,phandle", cpu_phandle);
qemu_fdt_setprop_string(fdt, intc, "compatible", "riscv,cpu-intc");
qemu_fdt_setprop(fdt, intc, "interrupt-controller", NULL, 0);
qemu_fdt_setprop_cell(fdt, intc, "#interrupt-cells", 1);
g_free(isa);
g_free(intc);
g_free(nodename);
}
cells = g_new0(uint32_t, s->soc.num_harts * 4);
for (cpu = 0; cpu < s->soc.num_harts; cpu++) {
nodename =
g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu);
uint32_t intc_phandle = qemu_fdt_get_phandle(fdt, nodename);
cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT);
cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
g_free(nodename);
}
nodename = g_strdup_printf("/soc/clint@%lx",
(long)memmap[VIRT_CLINT].base);
qemu_fdt_add_subnode(fdt, nodename);
qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv,clint0");
qemu_fdt_setprop_cells(fdt, nodename, "reg",
0x0, memmap[VIRT_CLINT].base,
0x0, memmap[VIRT_CLINT].size);
qemu_fdt_setprop(fdt, nodename, "interrupts-extended",
cells, s->soc.num_harts * sizeof(uint32_t) * 4);
g_free(cells);
g_free(nodename);
plic_phandle = phandle++;
cells = g_new0(uint32_t, s->soc.num_harts * 4);
for (cpu = 0; cpu < s->soc.num_harts; cpu++) {
nodename =
g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu);
uint32_t intc_phandle = qemu_fdt_get_phandle(fdt, nodename);
cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_EXT);
cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
cells[cpu * 4 + 3] = cpu_to_be32(IRQ_S_EXT);
g_free(nodename);
}
nodename = g_strdup_printf("/soc/interrupt-controller@%lx",
(long)memmap[VIRT_PLIC].base);
qemu_fdt_add_subnode(fdt, nodename);
qemu_fdt_setprop_cells(fdt, nodename, "#address-cells",
FDT_PLIC_ADDR_CELLS);
qemu_fdt_setprop_cell(fdt, nodename, "#interrupt-cells",
FDT_PLIC_INT_CELLS);
qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv,plic0");
qemu_fdt_setprop(fdt, nodename, "interrupt-controller", NULL, 0);
qemu_fdt_setprop(fdt, nodename, "interrupts-extended",
cells, s->soc.num_harts * sizeof(uint32_t) * 4);
qemu_fdt_setprop_cells(fdt, nodename, "reg",
0x0, memmap[VIRT_PLIC].base,
0x0, memmap[VIRT_PLIC].size);
qemu_fdt_setprop_string(fdt, nodename, "reg-names", "control");
qemu_fdt_setprop_cell(fdt, nodename, "riscv,max-priority", 7);
qemu_fdt_setprop_cell(fdt, nodename, "riscv,ndev", VIRTIO_NDEV);
qemu_fdt_setprop_cells(fdt, nodename, "phandle", plic_phandle);
qemu_fdt_setprop_cells(fdt, nodename, "linux,phandle", plic_phandle);
plic_phandle = qemu_fdt_get_phandle(fdt, nodename);
g_free(cells);
g_free(nodename);
for (i = 0; i < VIRTIO_COUNT; i++) {
nodename = g_strdup_printf("/virtio_mmio@%lx",
(long)(memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size));
qemu_fdt_add_subnode(fdt, nodename);
qemu_fdt_setprop_string(fdt, nodename, "compatible", "virtio,mmio");
qemu_fdt_setprop_cells(fdt, nodename, "reg",
0x0, memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size,
0x0, memmap[VIRT_VIRTIO].size);
qemu_fdt_setprop_cells(fdt, nodename, "interrupt-parent", plic_phandle);
qemu_fdt_setprop_cells(fdt, nodename, "interrupts", VIRTIO_IRQ + i);
g_free(nodename);
}
nodename = g_strdup_printf("/soc/pci@%lx",
(long) memmap[VIRT_PCIE_ECAM].base);
qemu_fdt_add_subnode(fdt, nodename);
qemu_fdt_setprop_cells(fdt, nodename, "#address-cells",
FDT_PCI_ADDR_CELLS);
qemu_fdt_setprop_cells(fdt, nodename, "#interrupt-cells",
FDT_PCI_INT_CELLS);
qemu_fdt_setprop_cells(fdt, nodename, "#size-cells", 0x2);
qemu_fdt_setprop_string(fdt, nodename, "compatible",
"pci-host-ecam-generic");
qemu_fdt_setprop_string(fdt, nodename, "device_type", "pci");
qemu_fdt_setprop_cell(fdt, nodename, "linux,pci-domain", 0);
qemu_fdt_setprop_cells(fdt, nodename, "bus-range", 0,
memmap[VIRT_PCIE_ECAM].base /
PCIE_MMCFG_SIZE_MIN - 1);
qemu_fdt_setprop(fdt, nodename, "dma-coherent", NULL, 0);
qemu_fdt_setprop_cells(fdt, nodename, "reg", 0, memmap[VIRT_PCIE_ECAM].base,
0, memmap[VIRT_PCIE_ECAM].size);
qemu_fdt_setprop_sized_cells(fdt, nodename, "ranges",
1, FDT_PCI_RANGE_IOPORT, 2, 0,
2, memmap[VIRT_PCIE_PIO].base, 2, memmap[VIRT_PCIE_PIO].size,
1, FDT_PCI_RANGE_MMIO,
2, memmap[VIRT_PCIE_MMIO].base,
2, memmap[VIRT_PCIE_MMIO].base, 2, memmap[VIRT_PCIE_MMIO].size);
create_pcie_irq_map(fdt, nodename, plic_phandle);
g_free(nodename);
nodename = g_strdup_printf("/test@%lx",
(long)memmap[VIRT_TEST].base);
qemu_fdt_add_subnode(fdt, nodename);
qemu_fdt_setprop_string(fdt, nodename, "compatible", "sifive,test0");
qemu_fdt_setprop_cells(fdt, nodename, "reg",
0x0, memmap[VIRT_TEST].base,
0x0, memmap[VIRT_TEST].size);
g_free(nodename);
nodename = g_strdup_printf("/uart@%lx",
(long)memmap[VIRT_UART0].base);
qemu_fdt_add_subnode(fdt, nodename);
qemu_fdt_setprop_string(fdt, nodename, "compatible", "ns16550a");
qemu_fdt_setprop_cells(fdt, nodename, "reg",
0x0, memmap[VIRT_UART0].base,
0x0, memmap[VIRT_UART0].size);
qemu_fdt_setprop_cell(fdt, nodename, "clock-frequency", 3686400);
qemu_fdt_setprop_cells(fdt, nodename, "interrupt-parent", plic_phandle);
qemu_fdt_setprop_cells(fdt, nodename, "interrupts", UART0_IRQ);
qemu_fdt_add_subnode(fdt, "/chosen");
qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", nodename);
if (cmdline) {
qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline);
}
g_free(nodename);
return fdt;
}
static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem,
hwaddr ecam_base, hwaddr ecam_size,
hwaddr mmio_base, hwaddr mmio_size,
hwaddr pio_base,
DeviceState *plic, bool link_up)
{
DeviceState *dev;
MemoryRegion *ecam_alias, *ecam_reg;
MemoryRegion *mmio_alias, *mmio_reg;
qemu_irq irq;
int i;
dev = qdev_create(NULL, TYPE_GPEX_HOST);
qdev_init_nofail(dev);
ecam_alias = g_new0(MemoryRegion, 1);
ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
memory_region_init_alias(ecam_alias, OBJECT(dev), "pcie-ecam",
ecam_reg, 0, ecam_size);
memory_region_add_subregion(get_system_memory(), ecam_base, ecam_alias);
mmio_alias = g_new0(MemoryRegion, 1);
mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
mmio_reg, mmio_base, mmio_size);
memory_region_add_subregion(get_system_memory(), mmio_base, mmio_alias);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, pio_base);
for (i = 0; i < GPEX_NUM_IRQS; i++) {
irq = qdev_get_gpio_in(plic, PCIE_IRQ + i);
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq);
gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ + i);
}
return dev;
}
static void riscv_virt_board_init(MachineState *machine)
{
const struct MemmapEntry *memmap = virt_memmap;
RISCVVirtState *s = g_new0(RISCVVirtState, 1);
MemoryRegion *system_memory = get_system_memory();
MemoryRegion *main_mem = g_new(MemoryRegion, 1);
MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
char *plic_hart_config;
size_t plic_hart_config_len;
int i;
void *fdt;
/* Initialize SOC */
object_initialize_child(OBJECT(machine), "soc", &s->soc, sizeof(s->soc),
TYPE_RISCV_HART_ARRAY, &error_abort, NULL);
object_property_set_str(OBJECT(&s->soc), VIRT_CPU, "cpu-type",
&error_abort);
object_property_set_int(OBJECT(&s->soc), smp_cpus, "num-harts",
&error_abort);
object_property_set_bool(OBJECT(&s->soc), true, "realized",
&error_abort);
/* register system main memory (actual RAM) */
memory_region_init_ram(main_mem, NULL, "riscv_virt_board.ram",
machine->ram_size, &error_fatal);
memory_region_add_subregion(system_memory, memmap[VIRT_DRAM].base,
main_mem);
/* create device tree */
fdt = create_fdt(s, memmap, machine->ram_size, machine->kernel_cmdline);
/* boot rom */
memory_region_init_rom(mask_rom, NULL, "riscv_virt_board.mrom",
memmap[VIRT_MROM].size, &error_fatal);
memory_region_add_subregion(system_memory, memmap[VIRT_MROM].base,
mask_rom);
if (machine->kernel_filename) {
uint64_t kernel_entry = load_kernel(machine->kernel_filename);
if (machine->initrd_filename) {
hwaddr start;
hwaddr end = load_initrd(machine->initrd_filename,
machine->ram_size, kernel_entry,
&start);
qemu_fdt_setprop_cell(fdt, "/chosen",
"linux,initrd-start", start);
qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end",
end);
}
}
/* reset vector */
uint32_t reset_vec[8] = {
0x00000297, /* 1: auipc t0, %pcrel_hi(dtb) */
0x02028593, /* addi a1, t0, %pcrel_lo(1b) */
0xf1402573, /* csrr a0, mhartid */
#if defined(TARGET_RISCV32)
0x0182a283, /* lw t0, 24(t0) */
#elif defined(TARGET_RISCV64)
0x0182b283, /* ld t0, 24(t0) */
#endif
0x00028067, /* jr t0 */
0x00000000,
memmap[VIRT_DRAM].base, /* start: .dword memmap[VIRT_DRAM].base */
0x00000000,
/* dtb: */
};
/* copy in the reset vector in little_endian byte order */
for (i = 0; i < sizeof(reset_vec) >> 2; i++) {
reset_vec[i] = cpu_to_le32(reset_vec[i]);
}
rom_add_blob_fixed_as("mrom.reset", reset_vec, sizeof(reset_vec),
memmap[VIRT_MROM].base, &address_space_memory);
/* copy in the device tree */
if (fdt_pack(s->fdt) || fdt_totalsize(s->fdt) >
memmap[VIRT_MROM].size - sizeof(reset_vec)) {
error_report("not enough space to store device-tree");
exit(1);
}
qemu_fdt_dumpdtb(s->fdt, fdt_totalsize(s->fdt));
rom_add_blob_fixed_as("mrom.fdt", s->fdt, fdt_totalsize(s->fdt),
memmap[VIRT_MROM].base + sizeof(reset_vec),
&address_space_memory);
/* create PLIC hart topology configuration string */
plic_hart_config_len = (strlen(VIRT_PLIC_HART_CONFIG) + 1) * smp_cpus;
plic_hart_config = g_malloc0(plic_hart_config_len);
for (i = 0; i < smp_cpus; i++) {
if (i != 0) {
strncat(plic_hart_config, ",", plic_hart_config_len);
}
strncat(plic_hart_config, VIRT_PLIC_HART_CONFIG, plic_hart_config_len);
plic_hart_config_len -= (strlen(VIRT_PLIC_HART_CONFIG) + 1);
}
/* MMIO */
s->plic = sifive_plic_create(memmap[VIRT_PLIC].base,
plic_hart_config,
VIRT_PLIC_NUM_SOURCES,
VIRT_PLIC_NUM_PRIORITIES,
VIRT_PLIC_PRIORITY_BASE,
VIRT_PLIC_PENDING_BASE,
VIRT_PLIC_ENABLE_BASE,
VIRT_PLIC_ENABLE_STRIDE,
VIRT_PLIC_CONTEXT_BASE,
VIRT_PLIC_CONTEXT_STRIDE,
memmap[VIRT_PLIC].size);
sifive_clint_create(memmap[VIRT_CLINT].base,
memmap[VIRT_CLINT].size, smp_cpus,
SIFIVE_SIP_BASE, SIFIVE_TIMECMP_BASE, SIFIVE_TIME_BASE);
sifive_test_create(memmap[VIRT_TEST].base);
for (i = 0; i < VIRTIO_COUNT; i++) {
sysbus_create_simple("virtio-mmio",
memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size,
qdev_get_gpio_in(DEVICE(s->plic), VIRTIO_IRQ + i));
}
gpex_pcie_init(system_memory,
memmap[VIRT_PCIE_ECAM].base,
memmap[VIRT_PCIE_ECAM].size,
memmap[VIRT_PCIE_MMIO].base,
memmap[VIRT_PCIE_MMIO].size,
memmap[VIRT_PCIE_PIO].base,
DEVICE(s->plic), true);
serial_mm_init(system_memory, memmap[VIRT_UART0].base,
0, qdev_get_gpio_in(DEVICE(s->plic), UART0_IRQ), 399193,
serial_hd(0), DEVICE_LITTLE_ENDIAN);
g_free(plic_hart_config);
}
static void riscv_virt_board_machine_init(MachineClass *mc)
{
mc->desc = "RISC-V VirtIO Board (Privileged ISA v1.10)";
mc->init = riscv_virt_board_init;
mc->max_cpus = 8; /* hardcoded limit in BBL */
}
DEFINE_MACHINE("virt", riscv_virt_board_machine_init)
|
pmp-tool/PMP
|
src/qemu/src-pmp/include/chardev/spice.h
|
<filename>src/qemu/src-pmp/include/chardev/spice.h<gh_stars>1-10
#ifndef CHARDEV_SPICE_H_
#define CHARDEV_SPICE_H_
#include <spice.h>
#include "chardev/char-fe.h"
typedef struct SpiceChardev {
Chardev parent;
SpiceCharDeviceInstance sin;
bool active;
bool blocked;
const uint8_t *datapos;
int datalen;
QLIST_ENTRY(SpiceChardev) next;
} SpiceChardev;
#define TYPE_CHARDEV_SPICE "chardev-spice"
#define TYPE_CHARDEV_SPICEVMC "chardev-spicevmc"
#define TYPE_CHARDEV_SPICEPORT "chardev-spiceport"
#define SPICE_CHARDEV(obj) OBJECT_CHECK(SpiceChardev, (obj), TYPE_CHARDEV_SPICE)
void qemu_chr_open_spice_port(Chardev *chr, ChardevBackend *backend,
bool *be_opened, Error **errp);
#endif
|
pmp-tool/PMP
|
src/qemu/src-pmp/tests/tcg/mips/user/ase/msa/int-add/test_msa_hadd_s_w.c
|
/*
* Test program for MSA instruction HADD_S.W
*
* Copyright (C) 2019 RT-RK Computer Based Systems LLC
* Copyright (C) 2019 <NAME> <<EMAIL>>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*
*/
#include <sys/time.h>
#include <stdint.h>
#include "../../../../include/wrappers_msa.h"
#include "../../../../include/test_inputs_128.h"
#include "../../../../include/test_utils_128.h"
#define TEST_COUNT_TOTAL ( \
(PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT) + \
(RANDOM_INPUTS_SHORT_COUNT) * (RANDOM_INPUTS_SHORT_COUNT))
int32_t main(void)
{
char *instruction_name = "HADD_S.W";
int32_t ret;
uint32_t i, j;
struct timeval start, end;
double elapsed_time;
uint64_t b128_result[TEST_COUNT_TOTAL][2];
uint64_t b128_expect[TEST_COUNT_TOTAL][2] = {
{ 0xfffffffefffffffeULL, 0xfffffffefffffffeULL, }, /* 0 */
{ 0xffffffffffffffffULL, 0xffffffffffffffffULL, },
{ 0xffffaaa9ffffaaa9ULL, 0xffffaaa9ffffaaa9ULL, },
{ 0x0000555400005554ULL, 0x0000555400005554ULL, },
{ 0xffffcccbffffcccbULL, 0xffffcccbffffcccbULL, },
{ 0x0000333200003332ULL, 0x0000333200003332ULL, },
{ 0x000038e2ffffe38dULL, 0xffff8e37000038e2ULL, },
{ 0xffffc71b00001c70ULL, 0x000071c6ffffc71bULL, },
{ 0xffffffffffffffffULL, 0xffffffffffffffffULL, }, /* 8 */
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0xffffaaaaffffaaaaULL, 0xffffaaaaffffaaaaULL, },
{ 0x0000555500005555ULL, 0x0000555500005555ULL, },
{ 0xffffccccffffccccULL, 0xffffccccffffccccULL, },
{ 0x0000333300003333ULL, 0x0000333300003333ULL, },
{ 0x000038e3ffffe38eULL, 0xffff8e38000038e3ULL, },
{ 0xffffc71c00001c71ULL, 0x000071c7ffffc71cULL, },
{ 0xffffaaa9ffffaaa9ULL, 0xffffaaa9ffffaaa9ULL, }, /* 16 */
{ 0xffffaaaaffffaaaaULL, 0xffffaaaaffffaaaaULL, },
{ 0xffff5554ffff5554ULL, 0xffff5554ffff5554ULL, },
{ 0xffffffffffffffffULL, 0xffffffffffffffffULL, },
{ 0xffff7776ffff7776ULL, 0xffff7776ffff7776ULL, },
{ 0xffffddddffffddddULL, 0xffffddddffffddddULL, },
{ 0xffffe38dffff8e38ULL, 0xffff38e2ffffe38dULL, },
{ 0xffff71c6ffffc71bULL, 0x00001c71ffff71c6ULL, },
{ 0x0000555400005554ULL, 0x0000555400005554ULL, }, /* 24 */
{ 0x0000555500005555ULL, 0x0000555500005555ULL, },
{ 0xffffffffffffffffULL, 0xffffffffffffffffULL, },
{ 0x0000aaaa0000aaaaULL, 0x0000aaaa0000aaaaULL, },
{ 0x0000222100002221ULL, 0x0000222100002221ULL, },
{ 0x0000888800008888ULL, 0x0000888800008888ULL, },
{ 0x00008e38000038e3ULL, 0xffffe38d00008e38ULL, },
{ 0x00001c71000071c6ULL, 0x0000c71c00001c71ULL, },
{ 0xffffcccbffffcccbULL, 0xffffcccbffffcccbULL, }, /* 32 */
{ 0xffffccccffffccccULL, 0xffffccccffffccccULL, },
{ 0xffff7776ffff7776ULL, 0xffff7776ffff7776ULL, },
{ 0x0000222100002221ULL, 0x0000222100002221ULL, },
{ 0xffff9998ffff9998ULL, 0xffff9998ffff9998ULL, },
{ 0xffffffffffffffffULL, 0xffffffffffffffffULL, },
{ 0x000005afffffb05aULL, 0xffff5b04000005afULL, },
{ 0xffff93e8ffffe93dULL, 0x00003e93ffff93e8ULL, },
{ 0x0000333200003332ULL, 0x0000333200003332ULL, }, /* 40 */
{ 0x0000333300003333ULL, 0x0000333300003333ULL, },
{ 0xffffddddffffddddULL, 0xffffddddffffddddULL, },
{ 0x0000888800008888ULL, 0x0000888800008888ULL, },
{ 0xffffffffffffffffULL, 0xffffffffffffffffULL, },
{ 0x0000666600006666ULL, 0x0000666600006666ULL, },
{ 0x00006c16000016c1ULL, 0xffffc16b00006c16ULL, },
{ 0xfffffa4f00004fa4ULL, 0x0000a4fafffffa4fULL, },
{ 0xffffe38dffff8e37ULL, 0x000038e2ffffe38dULL, }, /* 48 */
{ 0xffffe38effff8e38ULL, 0x000038e3ffffe38eULL, },
{ 0xffff8e38ffff38e2ULL, 0xffffe38dffff8e38ULL, },
{ 0x000038e3ffffe38dULL, 0x00008e38000038e3ULL, },
{ 0xffffb05affff5b04ULL, 0x000005afffffb05aULL, },
{ 0x000016c1ffffc16bULL, 0x00006c16000016c1ULL, },
{ 0x00001c71ffff71c6ULL, 0xffffc71b00001c71ULL, },
{ 0xffffaaaaffffaaa9ULL, 0x0000aaaaffffaaaaULL, },
{ 0x00001c70000071c6ULL, 0xffffc71b00001c70ULL, }, /* 56 */
{ 0x00001c71000071c7ULL, 0xffffc71c00001c71ULL, },
{ 0xffffc71b00001c71ULL, 0xffff71c6ffffc71bULL, },
{ 0x000071c60000c71cULL, 0x00001c71000071c6ULL, },
{ 0xffffe93d00003e93ULL, 0xffff93e8ffffe93dULL, },
{ 0x00004fa40000a4faULL, 0xfffffa4f00004fa4ULL, },
{ 0x0000555400005555ULL, 0xffff555400005554ULL, },
{ 0xffffe38d00008e38ULL, 0x000038e3ffffe38dULL, },
{ 0xffff6f3600007da2ULL, 0x000056c5ffffae87ULL, }, /* 64 */
{ 0xffff88cdffffef6aULL, 0x0000068100005177ULL, },
{ 0xffff3714ffffb3e2ULL, 0x000012660000238fULL, },
{ 0xffff9eb700000ab0ULL, 0xffffd43fffffe11bULL, },
{ 0xffffe28a0000a2d3ULL, 0x00001e55ffffc54bULL, },
{ 0xfffffc210000149bULL, 0xffffce110000683bULL, },
{ 0xffffaa68ffffd913ULL, 0xffffd9f600003a53ULL, },
{ 0x0000120b00002fe1ULL, 0xffff9bcffffff7dfULL, },
{ 0xffff932600000f0fULL, 0x00003336ffff5b37ULL, }, /* 72 */
{ 0xffffacbdffff80d7ULL, 0xffffe2f2fffffe27ULL, },
{ 0xffff5b04ffff454fULL, 0xffffeed7ffffd03fULL, },
{ 0xffffc2a7ffff9c1dULL, 0xffffb0b0ffff8dcbULL, },
{ 0x0000571b0000b371ULL, 0xffff994fffff594eULL, },
{ 0x000070b200002539ULL, 0xffff490bfffffc3eULL, },
};
gettimeofday(&start, NULL);
for (i = 0; i < PATTERN_INPUTS_SHORT_COUNT; i++) {
for (j = 0; j < PATTERN_INPUTS_SHORT_COUNT; j++) {
do_msa_HADD_S_W(b128_pattern[i], b128_pattern[j],
b128_result[PATTERN_INPUTS_SHORT_COUNT * i + j]);
}
}
for (i = 0; i < RANDOM_INPUTS_SHORT_COUNT; i++) {
for (j = 0; j < RANDOM_INPUTS_SHORT_COUNT; j++) {
do_msa_HADD_S_W(b128_random[i], b128_random[j],
b128_result[((PATTERN_INPUTS_SHORT_COUNT) *
(PATTERN_INPUTS_SHORT_COUNT)) +
RANDOM_INPUTS_SHORT_COUNT * i + j]);
}
}
gettimeofday(&end, NULL);
elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0;
elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0;
ret = check_results(instruction_name, TEST_COUNT_TOTAL, elapsed_time,
&b128_result[0][0], &b128_expect[0][0]);
return ret;
}
|
pmp-tool/PMP
|
src/qemu/src-pmp/include/hw/ppc/spapr_xive.h
|
/*
* QEMU PowerPC sPAPR XIVE interrupt controller model
*
* Copyright (c) 2017-2018, IBM Corporation.
*
* This code is licensed under the GPL version 2 or later. See the
* COPYING file in the top-level directory.
*/
#ifndef PPC_SPAPR_XIVE_H
#define PPC_SPAPR_XIVE_H
#include "hw/ppc/xive.h"
#define TYPE_SPAPR_XIVE "spapr-xive"
#define SPAPR_XIVE(obj) OBJECT_CHECK(SpaprXive, (obj), TYPE_SPAPR_XIVE)
typedef struct SpaprXive {
XiveRouter parent;
/* Internal interrupt source for IPIs and virtual devices */
XiveSource source;
hwaddr vc_base;
/* END ESB MMIOs */
XiveENDSource end_source;
hwaddr end_base;
/* DT */
gchar *nodename;
/* Routing table */
XiveEAS *eat;
uint32_t nr_irqs;
XiveEND *endt;
uint32_t nr_ends;
/* TIMA mapping address */
hwaddr tm_base;
MemoryRegion tm_mmio;
} SpaprXive;
bool spapr_xive_irq_claim(SpaprXive *xive, uint32_t lisn, bool lsi);
bool spapr_xive_irq_free(SpaprXive *xive, uint32_t lisn);
void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon);
void spapr_xive_hcall_init(SpaprMachineState *spapr);
void spapr_dt_xive(SpaprMachineState *spapr, uint32_t nr_servers, void *fdt,
uint32_t phandle);
void spapr_xive_set_tctx_os_cam(XiveTCTX *tctx);
void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable);
#endif /* PPC_SPAPR_XIVE_H */
|
pmp-tool/PMP
|
src/qemu/src-pmp/target/ppc/mmu-book3s-v3.h
|
<reponame>pmp-tool/PMP<gh_stars>1-10
/*
* PowerPC ISAV3 BookS emulation generic mmu definitions for qemu.
*
* Copyright (c) 2017 <NAME>, IBM Corporation
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef MMU_BOOOK3S_V3_H
#define MMU_BOOOK3S_V3_H
#include "mmu-hash64.h"
#ifndef CONFIG_USER_ONLY
/*
* Partition table definitions
*/
#define PTCR_PATB 0x0FFFFFFFFFFFF000ULL /* Partition Table Base */
#define PTCR_PATS 0x000000000000001FULL /* Partition Table Size */
/* Partition Table Entry Fields */
#define PATE0_HR 0x8000000000000000
/*
* WARNING: This field doesn't actually exist in the final version of
* the architecture and is unused by hardware. However, qemu uses it
* as an indication of a radix guest in the pseudo-PATB entry that it
* maintains for SPAPR guests and in the migration stream, so we need
* to keep it around
*/
#define PATE1_GR 0x8000000000000000
/* Process Table Entry */
struct prtb_entry {
uint64_t prtbe0, prtbe1;
};
#ifdef TARGET_PPC64
static inline bool ppc64_use_proc_tbl(PowerPCCPU *cpu)
{
return !!(cpu->env.spr[SPR_LPCR] & LPCR_UPRT);
}
bool ppc64_v3_get_pate(PowerPCCPU *cpu, target_ulong lpid,
ppc_v3_pate_t *entry);
/*
* The LPCR:HR bit is a shortcut that avoids having to
* dig out the partition table in the fast path. This is
* also how the HW uses it.
*/
static inline bool ppc64_v3_radix(PowerPCCPU *cpu)
{
return !!(cpu->env.spr[SPR_LPCR] & LPCR_HR);
}
hwaddr ppc64_v3_get_phys_page_debug(PowerPCCPU *cpu, vaddr eaddr);
int ppc64_v3_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
int mmu_idx);
static inline hwaddr ppc_hash64_hpt_base(PowerPCCPU *cpu)
{
uint64_t base;
if (cpu->vhyp) {
return 0;
}
if (cpu->env.mmu_model == POWERPC_MMU_3_00) {
ppc_v3_pate_t pate;
if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) {
return 0;
}
base = pate.dw0;
} else {
base = cpu->env.spr[SPR_SDR1];
}
return base & SDR_64_HTABORG;
}
static inline hwaddr ppc_hash64_hpt_mask(PowerPCCPU *cpu)
{
uint64_t base;
if (cpu->vhyp) {
PPCVirtualHypervisorClass *vhc =
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
return vhc->hpt_mask(cpu->vhyp);
}
if (cpu->env.mmu_model == POWERPC_MMU_3_00) {
ppc_v3_pate_t pate;
if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) {
return 0;
}
base = pate.dw0;
} else {
base = cpu->env.spr[SPR_SDR1];
}
return (1ULL << ((base & SDR_64_HTABSIZE) + 18 - 7)) - 1;
}
#endif /* TARGET_PPC64 */
#endif /* CONFIG_USER_ONLY */
#endif /* MMU_BOOOK3S_V3_H */
|
pmp-tool/PMP
|
src/qemu/src-pmp/hw/xen/xen-bus-helper.c
|
/*
* Copyright (c) 2018 Citrix Systems Inc.
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
#include "hw/sysbus.h"
#include "hw/xen/xen.h"
#include "hw/xen/xen-bus.h"
#include "hw/xen/xen-bus-helper.h"
#include "qapi/error.h"
#include <glib/gprintf.h>
struct xs_state {
enum xenbus_state statenum;
const char *statestr;
};
#define XS_STATE(state) { state, #state }
static struct xs_state xs_state[] = {
XS_STATE(XenbusStateUnknown),
XS_STATE(XenbusStateInitialising),
XS_STATE(XenbusStateInitWait),
XS_STATE(XenbusStateInitialised),
XS_STATE(XenbusStateConnected),
XS_STATE(XenbusStateClosing),
XS_STATE(XenbusStateClosed),
XS_STATE(XenbusStateReconfiguring),
XS_STATE(XenbusStateReconfigured),
};
#undef XS_STATE
const char *xs_strstate(enum xenbus_state state)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(xs_state); i++) {
if (xs_state[i].statenum == state) {
return xs_state[i].statestr;
}
}
return "INVALID";
}
void xs_node_create(struct xs_handle *xsh, xs_transaction_t tid,
const char *node, struct xs_permissions perms[],
unsigned int nr_perms, Error **errp)
{
trace_xs_node_create(node);
if (!xs_write(xsh, tid, node, "", 0)) {
error_setg_errno(errp, errno, "failed to create node '%s'", node);
return;
}
if (!xs_set_permissions(xsh, tid, node, perms, nr_perms)) {
error_setg_errno(errp, errno, "failed to set node '%s' permissions",
node);
}
}
void xs_node_destroy(struct xs_handle *xsh, xs_transaction_t tid,
const char *node, Error **errp)
{
trace_xs_node_destroy(node);
if (!xs_rm(xsh, tid, node)) {
error_setg_errno(errp, errno, "failed to destroy node '%s'", node);
}
}
void xs_node_vprintf(struct xs_handle *xsh, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, va_list ap)
{
char *path, *value;
int len;
path = (strlen(node) != 0) ? g_strdup_printf("%s/%s", node, key) :
g_strdup(key);
len = g_vasprintf(&value, fmt, ap);
trace_xs_node_vprintf(path, value);
if (!xs_write(xsh, tid, path, value, len)) {
error_setg_errno(errp, errno, "failed to write '%s' to '%s'",
value, path);
}
g_free(value);
g_free(path);
}
void xs_node_printf(struct xs_handle *xsh, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
xs_node_vprintf(xsh, tid, node, key, errp, fmt, ap);
va_end(ap);
}
int xs_node_vscanf(struct xs_handle *xsh, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, va_list ap)
{
char *path, *value;
int rc;
path = (strlen(node) != 0) ? g_strdup_printf("%s/%s", node, key) :
g_strdup(key);
value = xs_read(xsh, tid, path, NULL);
trace_xs_node_vscanf(path, value);
if (value) {
rc = vsscanf(value, fmt, ap);
} else {
error_setg_errno(errp, errno, "failed to read from '%s'",
path);
rc = EOF;
}
free(value);
g_free(path);
return rc;
}
int xs_node_scanf(struct xs_handle *xsh, xs_transaction_t tid,
const char *node, const char *key, Error **errp,
const char *fmt, ...)
{
va_list ap;
int rc;
va_start(ap, fmt);
rc = xs_node_vscanf(xsh, tid, node, key, errp, fmt, ap);
va_end(ap);
return rc;
}
void xs_node_watch(struct xs_handle *xsh, const char *node, const char *key,
char *token, Error **errp)
{
char *path;
path = (strlen(node) != 0) ? g_strdup_printf("%s/%s", node, key) :
g_strdup(key);
trace_xs_node_watch(path);
if (!xs_watch(xsh, path, token)) {
error_setg_errno(errp, errno, "failed to watch node '%s'", path);
}
g_free(path);
}
void xs_node_unwatch(struct xs_handle *xsh, const char *node,
const char *key, const char *token, Error **errp)
{
char *path;
path = (strlen(node) != 0) ? g_strdup_printf("%s/%s", node, key) :
g_strdup(key);
trace_xs_node_unwatch(path);
if (!xs_unwatch(xsh, path, token)) {
error_setg_errno(errp, errno, "failed to unwatch node '%s'", path);
}
g_free(path);
}
|
pmp-tool/PMP
|
src/qemu/src-pmp/tests/tcg/mips/user/ase/msa/int-subtract/test_msa_subs_s_h.c
|
/*
* Test program for MSA instruction SUBS_S.H
*
* Copyright (C) 2018 Wave Computing, Inc.
* Copyright (C) 2018 <NAME> <<EMAIL>>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*
*/
#include <sys/time.h>
#include <stdint.h>
#include "../../../../include/wrappers_msa.h"
#include "../../../../include/test_inputs.h"
#include "../../../../include/test_utils.h"
#define TEST_COUNT_TOTAL ( \
(PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT) + \
(RANDOM_INPUTS_SHORT_COUNT) * (RANDOM_INPUTS_SHORT_COUNT))
int32_t main(void)
{
char *instruction_name = "SUBS_S.H";
int32_t ret;
uint32_t i, j;
struct timeval start, end;
double elapsed_time;
uint64_t b128_result[TEST_COUNT_TOTAL][2];
uint64_t b128_expect[TEST_COUNT_TOTAL][2] = {
{ 0x0000000000000000ULL, 0x0000000000000000ULL, }, /* 0 */
{ 0xffffffffffffffffULL, 0xffffffffffffffffULL, },
{ 0x5555555555555555ULL, 0x5555555555555555ULL, },
{ 0xaaaaaaaaaaaaaaaaULL, 0xaaaaaaaaaaaaaaaaULL, },
{ 0x3333333333333333ULL, 0x3333333333333333ULL, },
{ 0xccccccccccccccccULL, 0xccccccccccccccccULL, },
{ 0x1c71c71c71c71c71ULL, 0xc71c71c71c71c71cULL, },
{ 0xe38e38e38e38e38eULL, 0x38e38e38e38e38e3ULL, },
{ 0x0001000100010001ULL, 0x0001000100010001ULL, }, /* 8 */
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x5556555655565556ULL, 0x5556555655565556ULL, },
{ 0xaaabaaabaaabaaabULL, 0xaaabaaabaaabaaabULL, },
{ 0x3334333433343334ULL, 0x3334333433343334ULL, },
{ 0xcccdcccdcccdcccdULL, 0xcccdcccdcccdcccdULL, },
{ 0x1c72c71d71c81c72ULL, 0xc71d71c81c72c71dULL, },
{ 0xe38f38e48e39e38fULL, 0x38e48e39e38f38e4ULL, },
{ 0xaaabaaabaaabaaabULL, 0xaaabaaabaaabaaabULL, }, /* 16 */
{ 0xaaaaaaaaaaaaaaaaULL, 0xaaaaaaaaaaaaaaaaULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x8000800080008000ULL, 0x8000800080008000ULL, },
{ 0xdddedddedddedddeULL, 0xdddedddedddedddeULL, },
{ 0x8000800080008000ULL, 0x8000800080008000ULL, },
{ 0xc71c80001c72c71cULL, 0x80001c72c71c8000ULL, },
{ 0x8e39e38e80008e39ULL, 0xe38e80008e39e38eULL, },
{ 0x5556555655565556ULL, 0x5556555655565556ULL, }, /* 24 */
{ 0x5555555555555555ULL, 0x5555555555555555ULL, },
{ 0x7fff7fff7fff7fffULL, 0x7fff7fff7fff7fffULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x7fff7fff7fff7fffULL, 0x7fff7fff7fff7fffULL, },
{ 0x2222222222222222ULL, 0x2222222222222222ULL, },
{ 0x71c71c727fff71c7ULL, 0x1c727fff71c71c72ULL, },
{ 0x38e47fffe38e38e4ULL, 0x7fffe38e38e47fffULL, },
{ 0xcccdcccdcccdcccdULL, 0xcccdcccdcccdcccdULL, }, /* 32 */
{ 0xccccccccccccccccULL, 0xccccccccccccccccULL, },
{ 0x2222222222222222ULL, 0x2222222222222222ULL, },
{ 0x8000800080008000ULL, 0x8000800080008000ULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x9999999999999999ULL, 0x9999999999999999ULL, },
{ 0xe93e93e93e94e93eULL, 0x93e93e94e93e93e9ULL, },
{ 0xb05b05b08000b05bULL, 0x05b08000b05b05b0ULL, },
{ 0x3334333433343334ULL, 0x3334333433343334ULL, }, /* 40 */
{ 0x3333333333333333ULL, 0x3333333333333333ULL, },
{ 0x7fff7fff7fff7fffULL, 0x7fff7fff7fff7fffULL, },
{ 0xdddedddedddedddeULL, 0xdddedddedddedddeULL, },
{ 0x6667666766676667ULL, 0x6667666766676667ULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x4fa5fa507fff4fa5ULL, 0xfa507fff4fa5fa50ULL, },
{ 0x16c26c17c16c16c2ULL, 0x6c17c16c16c26c17ULL, },
{ 0xe38f38e48e39e38fULL, 0x38e48e39e38f38e4ULL, }, /* 48 */
{ 0xe38e38e38e38e38eULL, 0x38e38e38e38e38e3ULL, },
{ 0x38e47fffe38e38e4ULL, 0x7fffe38e38e47fffULL, },
{ 0x8e39e38e80008e39ULL, 0xe38e80008e39e38eULL, },
{ 0x16c26c17c16c16c2ULL, 0x6c17c16c16c26c17ULL, },
{ 0xb05b05b08000b05bULL, 0x05b08000b05b05b0ULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0xc71d71c78000c71dULL, 0x71c78000c71d71c7ULL, },
{ 0x1c72c71d71c81c72ULL, 0xc71d71c81c72c71dULL, }, /* 56 */
{ 0x1c71c71c71c71c71ULL, 0xc71c71c71c71c71cULL, },
{ 0x71c71c727fff71c7ULL, 0x1c727fff71c71c72ULL, },
{ 0xc71c80001c72c71cULL, 0x80001c72c71c8000ULL, },
{ 0x4fa5fa507fff4fa5ULL, 0xfa507fff4fa5fa50ULL, },
{ 0xe93e93e93e94e93eULL, 0x93e93e94e93e93e9ULL, },
{ 0x38e38e397fff38e3ULL, 0x8e397fff38e38e39ULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, }, /* 64 */
{ 0x8cace669dacf7fffULL, 0x38705044e93c8000ULL, },
{ 0xdc1038226e937fffULL, 0x238f445f53508af8ULL, },
{ 0x8000d07fca3172f2ULL, 0x7fff7fff5539cd6cULL, },
{ 0x7354199725318000ULL, 0xc790afbc16c47fffULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x4f6451b97fff3b88ULL, 0xeb1ff41b6a142de8ULL, },
{ 0x8b6fea16ef62e4baULL, 0x7fff32426bfd705cULL, },
{ 0x23f0c7de916d8000ULL, 0xdc71bba1acb07508ULL, }, /* 72 */
{ 0xb09cae478000c478ULL, 0x14e10be595ecd218ULL, },
{ 0x0000000000000000ULL, 0x0000000000000000ULL, },
{ 0x8000985d8000a932ULL, 0x7fff3e2701e94274ULL, },
{ 0x7fff2f8135cf8d0eULL, 0x80008000aac73294ULL, },
{ 0x749115ea109e1b46ULL, 0x8000cdbe94038fa4ULL, },
};
gettimeofday(&start, NULL);
for (i = 0; i < PATTERN_INPUTS_SHORT_COUNT; i++) {
for (j = 0; j < PATTERN_INPUTS_SHORT_COUNT; j++) {
do_msa_SUBS_S_H(b128_pattern[i], b128_pattern[j],
b128_result[PATTERN_INPUTS_SHORT_COUNT * i + j]);
}
}
for (i = 0; i < RANDOM_INPUTS_SHORT_COUNT; i++) {
for (j = 0; j < RANDOM_INPUTS_SHORT_COUNT; j++) {
do_msa_SUBS_S_H(b128_random[i], b128_random[j],
b128_result[((PATTERN_INPUTS_SHORT_COUNT) *
(PATTERN_INPUTS_SHORT_COUNT)) +
RANDOM_INPUTS_SHORT_COUNT * i + j]);
}
}
gettimeofday(&end, NULL);
elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0;
elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0;
ret = check_results(instruction_name, TEST_COUNT_TOTAL, elapsed_time,
&b128_result[0][0], &b128_expect[0][0]);
return ret;
}
|
pmp-tool/PMP
|
src/qemu/src-pmp/hw/arm/digic_boards.c
|
<reponame>pmp-tool/PMP<gh_stars>1-10
/*
* QEMU model of the Canon DIGIC boards (cameras indeed :).
*
* Copyright (C) 2013 <NAME> <<EMAIL>>
*
* This model is based on reverse engineering efforts
* made by CHDK (http://chdk.wikia.com) and
* Magic Lantern (http://www.magiclantern.fm) projects
* contributors.
*
* See docs here:
* http://magiclantern.wikia.com/wiki/Register_Map
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu-common.h"
#include "cpu.h"
#include "hw/boards.h"
#include "exec/address-spaces.h"
#include "qemu/error-report.h"
#include "hw/arm/digic.h"
#include "hw/block/flash.h"
#include "hw/loader.h"
#include "sysemu/sysemu.h"
#include "sysemu/qtest.h"
#define DIGIC4_ROM0_BASE 0xf0000000
#define DIGIC4_ROM1_BASE 0xf8000000
#define DIGIC4_ROM_MAX_SIZE 0x08000000
typedef struct DigicBoardState {
DigicState *digic;
MemoryRegion ram;
} DigicBoardState;
typedef struct DigicBoard {
hwaddr ram_size;
void (*add_rom0)(DigicBoardState *, hwaddr, const char *);
const char *rom0_def_filename;
void (*add_rom1)(DigicBoardState *, hwaddr, const char *);
const char *rom1_def_filename;
} DigicBoard;
static void digic4_board_setup_ram(DigicBoardState *s, hwaddr ram_size)
{
memory_region_allocate_system_memory(&s->ram, NULL, "ram", ram_size);
memory_region_add_subregion(get_system_memory(), 0, &s->ram);
}
static void digic4_board_init(DigicBoard *board)
{
Error *err = NULL;
DigicBoardState *s = g_new(DigicBoardState, 1);
s->digic = DIGIC(object_new(TYPE_DIGIC));
object_property_set_bool(OBJECT(s->digic), true, "realized", &err);
if (err != NULL) {
error_reportf_err(err, "Couldn't realize DIGIC SoC: ");
exit(1);
}
digic4_board_setup_ram(s, board->ram_size);
if (board->add_rom0) {
board->add_rom0(s, DIGIC4_ROM0_BASE, board->rom0_def_filename);
}
if (board->add_rom1) {
board->add_rom1(s, DIGIC4_ROM1_BASE, board->rom1_def_filename);
}
}
static void digic_load_rom(DigicBoardState *s, hwaddr addr,
hwaddr max_size, const char *def_filename)
{
target_long rom_size;
const char *filename;
if (qtest_enabled()) {
/* qtest runs no code so don't attempt a ROM load which
* could fail and result in a spurious test failure.
*/
return;
}
if (bios_name) {
filename = bios_name;
} else {
filename = def_filename;
}
if (filename) {
char *fn = qemu_find_file(QEMU_FILE_TYPE_BIOS, filename);
if (!fn) {
error_report("Couldn't find rom image '%s'.", filename);
exit(1);
}
rom_size = load_image_targphys(fn, addr, max_size);
if (rom_size < 0 || rom_size > max_size) {
error_report("Couldn't load rom image '%s'.", filename);
exit(1);
}
g_free(fn);
}
}
/*
* Samsung K8P3215UQB
* 64M Bit (4Mx16) Page Mode / Multi-Bank NOR Flash Memory
*/
static void digic4_add_k8p3215uqb_rom(DigicBoardState *s, hwaddr addr,
const char *def_filename)
{
#define FLASH_K8P3215UQB_SIZE (4 * 1024 * 1024)
#define FLASH_K8P3215UQB_SECTOR_SIZE (64 * 1024)
pflash_cfi02_register(addr, "pflash", FLASH_K8P3215UQB_SIZE,
NULL, FLASH_K8P3215UQB_SECTOR_SIZE,
DIGIC4_ROM_MAX_SIZE / FLASH_K8P3215UQB_SIZE,
4,
0x00EC, 0x007E, 0x0003, 0x0001,
0x0555, 0x2aa, 0);
digic_load_rom(s, addr, FLASH_K8P3215UQB_SIZE, def_filename);
}
static DigicBoard digic4_board_canon_a1100 = {
.ram_size = 64 * 1024 * 1024,
.add_rom1 = digic4_add_k8p3215uqb_rom,
.rom1_def_filename = "canon-a1100-rom1.bin",
};
static void canon_a1100_init(MachineState *machine)
{
digic4_board_init(&digic4_board_canon_a1100);
}
static void canon_a1100_machine_init(MachineClass *mc)
{
mc->desc = "Canon PowerShot A1100 IS";
mc->init = &canon_a1100_init;
mc->ignore_memory_transaction_failures = true;
}
DEFINE_MACHINE("canon-a1100", canon_a1100_machine_init)
|
pmp-tool/PMP
|
src/qemu/src-pmp/hw/sd/sd.c
|
/*
* SD Memory Card emulation as defined in the "SD Memory Card Physical
* layer specification, Version 2.00."
*
* Copyright (c) 2006 <NAME> <<EMAIL>>
* Copyright (c) 2007 CodeSourcery
* Copyright (c) 2018 <NAME> <<EMAIL>>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "hw/qdev.h"
#include "hw/hw.h"
#include "hw/registerfields.h"
#include "sysemu/block-backend.h"
#include "hw/sd/sd.h"
#include "qapi/error.h"
#include "qemu/bitmap.h"
#include "hw/qdev-properties.h"
#include "qemu/error-report.h"
#include "qemu/timer.h"
#include "qemu/log.h"
#include "sdmmc-internal.h"
#include "trace.h"
//#define DEBUG_SD 1
typedef enum {
sd_r0 = 0, /* no response */
sd_r1, /* normal response command */
sd_r2_i, /* CID register */
sd_r2_s, /* CSD register */
sd_r3, /* OCR register */
sd_r6 = 6, /* Published RCA response */
sd_r7, /* Operating voltage */
sd_r1b = -1,
sd_illegal = -2,
} sd_rsp_type_t;
enum SDCardModes {
sd_inactive,
sd_card_identification_mode,
sd_data_transfer_mode,
};
enum SDCardStates {
sd_inactive_state = -1,
sd_idle_state = 0,
sd_ready_state,
sd_identification_state,
sd_standby_state,
sd_transfer_state,
sd_sendingdata_state,
sd_receivingdata_state,
sd_programming_state,
sd_disconnect_state,
};
struct SDState {
DeviceState parent_obj;
/* SD Memory Card Registers */
uint32_t ocr;
uint8_t scr[8];
uint8_t cid[16];
uint8_t csd[16];
uint16_t rca;
uint32_t card_status;
uint8_t sd_status[64];
/* Configurable properties */
uint8_t spec_version;
BlockBackend *blk;
bool spi;
uint32_t mode; /* current card mode, one of SDCardModes */
int32_t state; /* current card state, one of SDCardStates */
uint32_t vhs;
bool wp_switch;
unsigned long *wp_groups;
int32_t wpgrps_size;
uint64_t size;
uint32_t blk_len;
uint32_t multi_blk_cnt;
uint32_t erase_start;
uint32_t erase_end;
uint8_t pwd[16];
uint32_t pwd_len;
uint8_t function_group[6];
uint8_t current_cmd;
/* True if we will handle the next command as an ACMD. Note that this does
* *not* track the APP_CMD status bit!
*/
bool expecting_acmd;
uint32_t blk_written;
uint64_t data_start;
uint32_t data_offset;
uint8_t data[512];
qemu_irq readonly_cb;
qemu_irq inserted_cb;
QEMUTimer *ocr_power_timer;
const char *proto_name;
bool enable;
uint8_t dat_lines;
bool cmd_line;
};
static const char *sd_state_name(enum SDCardStates state)
{
static const char *state_name[] = {
[sd_idle_state] = "idle",
[sd_ready_state] = "ready",
[sd_identification_state] = "identification",
[sd_standby_state] = "standby",
[sd_transfer_state] = "transfer",
[sd_sendingdata_state] = "sendingdata",
[sd_receivingdata_state] = "receivingdata",
[sd_programming_state] = "programming",
[sd_disconnect_state] = "disconnect",
};
if (state == sd_inactive_state) {
return "inactive";
}
assert(state <= ARRAY_SIZE(state_name));
return state_name[state];
}
static const char *sd_response_name(sd_rsp_type_t rsp)
{
static const char *response_name[] = {
[sd_r0] = "RESP#0 (no response)",
[sd_r1] = "RESP#1 (normal cmd)",
[sd_r2_i] = "RESP#2 (CID reg)",
[sd_r2_s] = "RESP#2 (CSD reg)",
[sd_r3] = "RESP#3 (OCR reg)",
[sd_r6] = "RESP#6 (RCA)",
[sd_r7] = "RESP#7 (operating voltage)",
};
if (rsp == sd_illegal) {
return "ILLEGAL RESP";
}
if (rsp == sd_r1b) {
rsp = sd_r1;
}
assert(rsp <= ARRAY_SIZE(response_name));
return response_name[rsp];
}
static uint8_t sd_get_dat_lines(SDState *sd)
{
return sd->enable ? sd->dat_lines : 0;
}
static bool sd_get_cmd_line(SDState *sd)
{
return sd->enable ? sd->cmd_line : false;
}
static void sd_set_voltage(SDState *sd, uint16_t millivolts)
{
trace_sdcard_set_voltage(millivolts);
switch (millivolts) {
case 3001 ... 3600: /* SD_VOLTAGE_3_3V */
case 2001 ... 3000: /* SD_VOLTAGE_3_0V */
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "SD card voltage not supported: %.3fV",
millivolts / 1000.f);
}
}
static void sd_set_mode(SDState *sd)
{
switch (sd->state) {
case sd_inactive_state:
sd->mode = sd_inactive;
break;
case sd_idle_state:
case sd_ready_state:
case sd_identification_state:
sd->mode = sd_card_identification_mode;
break;
case sd_standby_state:
case sd_transfer_state:
case sd_sendingdata_state:
case sd_receivingdata_state:
case sd_programming_state:
case sd_disconnect_state:
sd->mode = sd_data_transfer_mode;
break;
}
}
static const sd_cmd_type_t sd_cmd_type[SDMMC_CMD_MAX] = {
sd_bc, sd_none, sd_bcr, sd_bcr, sd_none, sd_none, sd_none, sd_ac,
sd_bcr, sd_ac, sd_ac, sd_adtc, sd_ac, sd_ac, sd_none, sd_ac,
/* 16 */
sd_ac, sd_adtc, sd_adtc, sd_none, sd_none, sd_none, sd_none, sd_none,
sd_adtc, sd_adtc, sd_adtc, sd_adtc, sd_ac, sd_ac, sd_adtc, sd_none,
/* 32 */
sd_ac, sd_ac, sd_none, sd_none, sd_none, sd_none, sd_ac, sd_none,
sd_none, sd_none, sd_bc, sd_none, sd_none, sd_none, sd_none, sd_none,
/* 48 */
sd_none, sd_none, sd_none, sd_none, sd_none, sd_none, sd_none, sd_ac,
sd_adtc, sd_none, sd_none, sd_none, sd_none, sd_none, sd_none, sd_none,
};
static const int sd_cmd_class[SDMMC_CMD_MAX] = {
0, 0, 0, 0, 0, 9, 10, 0, 0, 0, 0, 1, 0, 0, 0, 0,
2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 6, 6, 6, 6,
5, 5, 10, 10, 10, 10, 5, 9, 9, 9, 7, 7, 7, 7, 7, 7,
7, 7, 10, 7, 9, 9, 9, 8, 8, 10, 8, 8, 8, 8, 8, 8,
};
static uint8_t sd_crc7(void *message, size_t width)
{
int i, bit;
uint8_t shift_reg = 0x00;
uint8_t *msg = (uint8_t *) message;
for (i = 0; i < width; i ++, msg ++)
for (bit = 7; bit >= 0; bit --) {
shift_reg <<= 1;
if ((shift_reg >> 7) ^ ((*msg >> bit) & 1))
shift_reg ^= 0x89;
}
return shift_reg;
}
static uint16_t sd_crc16(void *message, size_t width)
{
int i, bit;
uint16_t shift_reg = 0x0000;
uint16_t *msg = (uint16_t *) message;
width <<= 1;
for (i = 0; i < width; i ++, msg ++)
for (bit = 15; bit >= 0; bit --) {
shift_reg <<= 1;
if ((shift_reg >> 15) ^ ((*msg >> bit) & 1))
shift_reg ^= 0x1011;
}
return shift_reg;
}
#define OCR_POWER_DELAY_NS 500000 /* 0.5ms */
FIELD(OCR, VDD_VOLTAGE_WINDOW, 0, 24)
FIELD(OCR, VDD_VOLTAGE_WIN_LO, 0, 8)
FIELD(OCR, DUAL_VOLTAGE_CARD, 7, 1)
FIELD(OCR, VDD_VOLTAGE_WIN_HI, 8, 16)
FIELD(OCR, ACCEPT_SWITCH_1V8, 24, 1) /* Only UHS-I */
FIELD(OCR, UHS_II_CARD, 29, 1) /* Only UHS-II */
FIELD(OCR, CARD_CAPACITY, 30, 1) /* 0:SDSC, 1:SDHC/SDXC */
FIELD(OCR, CARD_POWER_UP, 31, 1)
#define ACMD41_ENQUIRY_MASK 0x00ffffff
#define ACMD41_R3_MASK (R_OCR_VDD_VOLTAGE_WIN_HI_MASK \
| R_OCR_ACCEPT_SWITCH_1V8_MASK \
| R_OCR_UHS_II_CARD_MASK \
| R_OCR_CARD_CAPACITY_MASK \
| R_OCR_CARD_POWER_UP_MASK)
static void sd_set_ocr(SDState *sd)
{
/* All voltages OK */
sd->ocr = R_OCR_VDD_VOLTAGE_WIN_HI_MASK;
}
static void sd_ocr_powerup(void *opaque)
{
SDState *sd = opaque;
trace_sdcard_powerup();
assert(!FIELD_EX32(sd->ocr, OCR, CARD_POWER_UP));
/* card power-up OK */
sd->ocr = FIELD_DP32(sd->ocr, OCR, CARD_POWER_UP, 1);
if (sd->size > 1 * GiB) {
sd->ocr = FIELD_DP32(sd->ocr, OCR, CARD_CAPACITY, 1);
}
}
static void sd_set_scr(SDState *sd)
{
sd->scr[0] = 0 << 4; /* SCR structure version 1.0 */
if (sd->spec_version == SD_PHY_SPECv1_10_VERS) {
sd->scr[0] |= 1; /* Spec Version 1.10 */
} else {
sd->scr[0] |= 2; /* Spec Version 2.00 or Version 3.0X */
}
sd->scr[1] = (2 << 4) /* SDSC Card (Security Version 1.01) */
| 0b0101; /* 1-bit or 4-bit width bus modes */
sd->scr[2] = 0x00; /* Extended Security is not supported. */
if (sd->spec_version >= SD_PHY_SPECv3_01_VERS) {
sd->scr[2] |= 1 << 7; /* Spec Version 3.0X */
}
sd->scr[3] = 0x00;
/* reserved for manufacturer usage */
sd->scr[4] = 0x00;
sd->scr[5] = 0x00;
sd->scr[6] = 0x00;
sd->scr[7] = 0x00;
}
#define MID 0xaa
#define OID "XY"
#define PNM "QEMU!"
#define PRV 0x01
#define MDT_YR 2006
#define MDT_MON 2
static void sd_set_cid(SDState *sd)
{
sd->cid[0] = MID; /* Fake card manufacturer ID (MID) */
sd->cid[1] = OID[0]; /* OEM/Application ID (OID) */
sd->cid[2] = OID[1];
sd->cid[3] = PNM[0]; /* Fake product name (PNM) */
sd->cid[4] = PNM[1];
sd->cid[5] = PNM[2];
sd->cid[6] = PNM[3];
sd->cid[7] = PNM[4];
sd->cid[8] = PRV; /* Fake product revision (PRV) */
sd->cid[9] = 0xde; /* Fake serial number (PSN) */
sd->cid[10] = 0xad;
sd->cid[11] = 0xbe;
sd->cid[12] = 0xef;
sd->cid[13] = 0x00 | /* Manufacture date (MDT) */
((MDT_YR - 2000) / 10);
sd->cid[14] = ((MDT_YR % 10) << 4) | MDT_MON;
sd->cid[15] = (sd_crc7(sd->cid, 15) << 1) | 1;
}
#define HWBLOCK_SHIFT 9 /* 512 bytes */
#define SECTOR_SHIFT 5 /* 16 kilobytes */
#define WPGROUP_SHIFT 7 /* 2 megs */
#define CMULT_SHIFT 9 /* 512 times HWBLOCK_SIZE */
#define WPGROUP_SIZE (1 << (HWBLOCK_SHIFT + SECTOR_SHIFT + WPGROUP_SHIFT))
static const uint8_t sd_csd_rw_mask[16] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0xfe,
};
static void sd_set_csd(SDState *sd, uint64_t size)
{
uint32_t csize = (size >> (CMULT_SHIFT + HWBLOCK_SHIFT)) - 1;
uint32_t sectsize = (1 << (SECTOR_SHIFT + 1)) - 1;
uint32_t wpsize = (1 << (WPGROUP_SHIFT + 1)) - 1;
if (size <= 1 * GiB) { /* Standard Capacity SD */
sd->csd[0] = 0x00; /* CSD structure */
sd->csd[1] = 0x26; /* Data read access-time-1 */
sd->csd[2] = 0x00; /* Data read access-time-2 */
sd->csd[3] = 0x32; /* Max. data transfer rate: 25 MHz */
sd->csd[4] = 0x5f; /* Card Command Classes */
sd->csd[5] = 0x50 | /* Max. read data block length */
HWBLOCK_SHIFT;
sd->csd[6] = 0xe0 | /* Partial block for read allowed */
((csize >> 10) & 0x03);
sd->csd[7] = 0x00 | /* Device size */
((csize >> 2) & 0xff);
sd->csd[8] = 0x3f | /* Max. read current */
((csize << 6) & 0xc0);
sd->csd[9] = 0xfc | /* Max. write current */
((CMULT_SHIFT - 2) >> 1);
sd->csd[10] = 0x40 | /* Erase sector size */
(((CMULT_SHIFT - 2) << 7) & 0x80) | (sectsize >> 1);
sd->csd[11] = 0x00 | /* Write protect group size */
((sectsize << 7) & 0x80) | wpsize;
sd->csd[12] = 0x90 | /* Write speed factor */
(HWBLOCK_SHIFT >> 2);
sd->csd[13] = 0x20 | /* Max. write data block length */
((HWBLOCK_SHIFT << 6) & 0xc0);
sd->csd[14] = 0x00; /* File format group */
} else { /* SDHC */
size /= 512 * KiB;
size -= 1;
sd->csd[0] = 0x40;
sd->csd[1] = 0x0e;
sd->csd[2] = 0x00;
sd->csd[3] = 0x32;
sd->csd[4] = 0x5b;
sd->csd[5] = 0x59;
sd->csd[6] = 0x00;
sd->csd[7] = (size >> 16) & 0xff;
sd->csd[8] = (size >> 8) & 0xff;
sd->csd[9] = (size & 0xff);
sd->csd[10] = 0x7f;
sd->csd[11] = 0x80;
sd->csd[12] = 0x0a;
sd->csd[13] = 0x40;
sd->csd[14] = 0x00;
}
sd->csd[15] = (sd_crc7(sd->csd, 15) << 1) | 1;
}
static void sd_set_rca(SDState *sd)
{
sd->rca += 0x4567;
}
FIELD(CSR, AKE_SEQ_ERROR, 3, 1)
FIELD(CSR, APP_CMD, 5, 1)
FIELD(CSR, FX_EVENT, 6, 1)
FIELD(CSR, READY_FOR_DATA, 8, 1)
FIELD(CSR, CURRENT_STATE, 9, 4)
FIELD(CSR, ERASE_RESET, 13, 1)
FIELD(CSR, CARD_ECC_DISABLED, 14, 1)
FIELD(CSR, WP_ERASE_SKIP, 15, 1)
FIELD(CSR, CSD_OVERWRITE, 16, 1)
FIELD(CSR, DEFERRED_RESPONSE, 17, 1)
FIELD(CSR, ERROR, 19, 1)
FIELD(CSR, CC_ERROR, 20, 1)
FIELD(CSR, CARD_ECC_FAILED, 21, 1)
FIELD(CSR, ILLEGAL_COMMAND, 22, 1)
FIELD(CSR, COM_CRC_ERROR, 23, 1)
FIELD(CSR, LOCK_UNLOCK_FAILED, 24, 1)
FIELD(CSR, CARD_IS_LOCKED, 25, 1)
FIELD(CSR, WP_VIOLATION, 26, 1)
FIELD(CSR, ERASE_PARAM, 27, 1)
FIELD(CSR, ERASE_SEQ_ERROR, 28, 1)
FIELD(CSR, BLOCK_LEN_ERROR, 29, 1)
FIELD(CSR, ADDRESS_ERROR, 30, 1)
FIELD(CSR, OUT_OF_RANGE, 31, 1)
/* Card status bits, split by clear condition:
* A : According to the card current state
* B : Always related to the previous command
* C : Cleared by read
*/
#define CARD_STATUS_A (R_CSR_READY_FOR_DATA_MASK \
| R_CSR_CARD_ECC_DISABLED_MASK \
| R_CSR_CARD_IS_LOCKED_MASK)
#define CARD_STATUS_B (R_CSR_CURRENT_STATE_MASK \
| R_CSR_ILLEGAL_COMMAND_MASK \
| R_CSR_COM_CRC_ERROR_MASK)
#define CARD_STATUS_C (R_CSR_AKE_SEQ_ERROR_MASK \
| R_CSR_APP_CMD_MASK \
| R_CSR_ERASE_RESET_MASK \
| R_CSR_WP_ERASE_SKIP_MASK \
| R_CSR_CSD_OVERWRITE_MASK \
| R_CSR_ERROR_MASK \
| R_CSR_CC_ERROR_MASK \
| R_CSR_CARD_ECC_FAILED_MASK \
| R_CSR_LOCK_UNLOCK_FAILED_MASK \
| R_CSR_WP_VIOLATION_MASK \
| R_CSR_ERASE_PARAM_MASK \
| R_CSR_ERASE_SEQ_ERROR_MASK \
| R_CSR_BLOCK_LEN_ERROR_MASK \
| R_CSR_ADDRESS_ERROR_MASK \
| R_CSR_OUT_OF_RANGE_MASK)
static void sd_set_cardstatus(SDState *sd)
{
sd->card_status = 0x00000100;
}
static void sd_set_sdstatus(SDState *sd)
{
memset(sd->sd_status, 0, 64);
}
static int sd_req_crc_validate(SDRequest *req)
{
uint8_t buffer[5];
buffer[0] = 0x40 | req->cmd;
stl_be_p(&buffer[1], req->arg);
return 0;
return sd_crc7(buffer, 5) != req->crc; /* TODO */
}
static void sd_response_r1_make(SDState *sd, uint8_t *response)
{
stl_be_p(response, sd->card_status);
/* Clear the "clear on read" status bits */
sd->card_status &= ~CARD_STATUS_C;
}
static void sd_response_r3_make(SDState *sd, uint8_t *response)
{
stl_be_p(response, sd->ocr & ACMD41_R3_MASK);
}
static void sd_response_r6_make(SDState *sd, uint8_t *response)
{
uint16_t status;
status = ((sd->card_status >> 8) & 0xc000) |
((sd->card_status >> 6) & 0x2000) |
(sd->card_status & 0x1fff);
sd->card_status &= ~(CARD_STATUS_C & 0xc81fff);
stw_be_p(response + 0, sd->rca);
stw_be_p(response + 2, status);
}
static void sd_response_r7_make(SDState *sd, uint8_t *response)
{
stl_be_p(response, sd->vhs);
}
static inline uint64_t sd_addr_to_wpnum(uint64_t addr)
{
return addr >> (HWBLOCK_SHIFT + SECTOR_SHIFT + WPGROUP_SHIFT);
}
static void sd_reset(DeviceState *dev)
{
SDState *sd = SD_CARD(dev);
uint64_t size;
uint64_t sect;
trace_sdcard_reset();
if (sd->blk) {
blk_get_geometry(sd->blk, §);
} else {
sect = 0;
}
size = sect << 9;
sect = sd_addr_to_wpnum(size) + 1;
sd->state = sd_idle_state;
sd->rca = 0x0000;
sd_set_ocr(sd);
sd_set_scr(sd);
sd_set_cid(sd);
sd_set_csd(sd, size);
sd_set_cardstatus(sd);
sd_set_sdstatus(sd);
g_free(sd->wp_groups);
sd->wp_switch = sd->blk ? blk_is_read_only(sd->blk) : false;
sd->wpgrps_size = sect;
sd->wp_groups = bitmap_new(sd->wpgrps_size);
memset(sd->function_group, 0, sizeof(sd->function_group));
sd->erase_start = 0;
sd->erase_end = 0;
sd->size = size;
sd->blk_len = 0x200;
sd->pwd_len = 0;
sd->expecting_acmd = false;
sd->dat_lines = 0xf;
sd->cmd_line = true;
sd->multi_blk_cnt = 0;
}
static bool sd_get_inserted(SDState *sd)
{
return sd->blk && blk_is_inserted(sd->blk);
}
static bool sd_get_readonly(SDState *sd)
{
return sd->wp_switch;
}
static void sd_cardchange(void *opaque, bool load, Error **errp)
{
SDState *sd = opaque;
DeviceState *dev = DEVICE(sd);
SDBus *sdbus = SD_BUS(qdev_get_parent_bus(dev));
bool inserted = sd_get_inserted(sd);
bool readonly = sd_get_readonly(sd);
if (inserted) {
trace_sdcard_inserted(readonly);
sd_reset(dev);
} else {
trace_sdcard_ejected();
}
/* The IRQ notification is for legacy non-QOM SD controller devices;
* QOMified controllers use the SDBus APIs.
*/
if (sdbus) {
sdbus_set_inserted(sdbus, inserted);
if (inserted) {
sdbus_set_readonly(sdbus, readonly);
}
} else {
qemu_set_irq(sd->inserted_cb, inserted);
if (inserted) {
qemu_set_irq(sd->readonly_cb, readonly);
}
}
}
static const BlockDevOps sd_block_ops = {
.change_media_cb = sd_cardchange,
};
static bool sd_ocr_vmstate_needed(void *opaque)
{
SDState *sd = opaque;
/* Include the OCR state (and timer) if it is not yet powered up */
return !FIELD_EX32(sd->ocr, OCR, CARD_POWER_UP);
}
static const VMStateDescription sd_ocr_vmstate = {
.name = "sd-card/ocr-state",
.version_id = 1,
.minimum_version_id = 1,
.needed = sd_ocr_vmstate_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT32(ocr, SDState),
VMSTATE_TIMER_PTR(ocr_power_timer, SDState),
VMSTATE_END_OF_LIST()
},
};
static int sd_vmstate_pre_load(void *opaque)
{
SDState *sd = opaque;
/* If the OCR state is not included (prior versions, or not
* needed), then the OCR must be set as powered up. If the OCR state
* is included, this will be replaced by the state restore.
*/
sd_ocr_powerup(sd);
return 0;
}
static const VMStateDescription sd_vmstate = {
.name = "sd-card",
.version_id = 1,
.minimum_version_id = 1,
.pre_load = sd_vmstate_pre_load,
.fields = (VMStateField[]) {
VMSTATE_UINT32(mode, SDState),
VMSTATE_INT32(state, SDState),
VMSTATE_UINT8_ARRAY(cid, SDState, 16),
VMSTATE_UINT8_ARRAY(csd, SDState, 16),
VMSTATE_UINT16(rca, SDState),
VMSTATE_UINT32(card_status, SDState),
VMSTATE_PARTIAL_BUFFER(sd_status, SDState, 1),
VMSTATE_UINT32(vhs, SDState),
VMSTATE_BITMAP(wp_groups, SDState, 0, wpgrps_size),
VMSTATE_UINT32(blk_len, SDState),
VMSTATE_UINT32(multi_blk_cnt, SDState),
VMSTATE_UINT32(erase_start, SDState),
VMSTATE_UINT32(erase_end, SDState),
VMSTATE_UINT8_ARRAY(pwd, SDState, 16),
VMSTATE_UINT32(pwd_len, SDState),
VMSTATE_UINT8_ARRAY(function_group, SDState, 6),
VMSTATE_UINT8(current_cmd, SDState),
VMSTATE_BOOL(expecting_acmd, SDState),
VMSTATE_UINT32(blk_written, SDState),
VMSTATE_UINT64(data_start, SDState),
VMSTATE_UINT32(data_offset, SDState),
VMSTATE_UINT8_ARRAY(data, SDState, 512),
VMSTATE_UNUSED_V(1, 512),
VMSTATE_BOOL(enable, SDState),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription*[]) {
&sd_ocr_vmstate,
NULL
},
};
/* Legacy initialization function for use by non-qdevified callers */
SDState *sd_init(BlockBackend *blk, bool is_spi)
{
Object *obj;
DeviceState *dev;
Error *err = NULL;
obj = object_new(TYPE_SD_CARD);
dev = DEVICE(obj);
qdev_prop_set_drive(dev, "drive", blk, &err);
if (err) {
error_report("sd_init failed: %s", error_get_pretty(err));
return NULL;
}
qdev_prop_set_bit(dev, "spi", is_spi);
object_property_set_bool(obj, true, "realized", &err);
if (err) {
error_report("sd_init failed: %s", error_get_pretty(err));
return NULL;
}
return SD_CARD(dev);
}
void sd_set_cb(SDState *sd, qemu_irq readonly, qemu_irq insert)
{
sd->readonly_cb = readonly;
sd->inserted_cb = insert;
qemu_set_irq(readonly, sd->blk ? blk_is_read_only(sd->blk) : 0);
qemu_set_irq(insert, sd->blk ? blk_is_inserted(sd->blk) : 0);
}
static void sd_erase(SDState *sd)
{
int i;
uint64_t erase_start = sd->erase_start;
uint64_t erase_end = sd->erase_end;
trace_sdcard_erase();
if (!sd->erase_start || !sd->erase_end) {
sd->card_status |= ERASE_SEQ_ERROR;
return;
}
if (FIELD_EX32(sd->ocr, OCR, CARD_CAPACITY)) {
/* High capacity memory card: erase units are 512 byte blocks */
erase_start *= 512;
erase_end *= 512;
}
erase_start = sd_addr_to_wpnum(erase_start);
erase_end = sd_addr_to_wpnum(erase_end);
sd->erase_start = 0;
sd->erase_end = 0;
sd->csd[14] |= 0x40;
for (i = erase_start; i <= erase_end; i++) {
if (test_bit(i, sd->wp_groups)) {
sd->card_status |= WP_ERASE_SKIP;
}
}
}
static uint32_t sd_wpbits(SDState *sd, uint64_t addr)
{
uint32_t i, wpnum;
uint32_t ret = 0;
wpnum = sd_addr_to_wpnum(addr);
for (i = 0; i < 32; i++, wpnum++, addr += WPGROUP_SIZE) {
if (addr < sd->size && test_bit(wpnum, sd->wp_groups)) {
ret |= (1 << i);
}
}
return ret;
}
static void sd_function_switch(SDState *sd, uint32_t arg)
{
int i, mode, new_func;
mode = !!(arg & 0x80000000);
sd->data[0] = 0x00; /* Maximum current consumption */
sd->data[1] = 0x01;
sd->data[2] = 0x80; /* Supported group 6 functions */
sd->data[3] = 0x01;
sd->data[4] = 0x80; /* Supported group 5 functions */
sd->data[5] = 0x01;
sd->data[6] = 0x80; /* Supported group 4 functions */
sd->data[7] = 0x01;
sd->data[8] = 0x80; /* Supported group 3 functions */
sd->data[9] = 0x01;
sd->data[10] = 0x80; /* Supported group 2 functions */
sd->data[11] = 0x43;
sd->data[12] = 0x80; /* Supported group 1 functions */
sd->data[13] = 0x03;
for (i = 0; i < 6; i ++) {
new_func = (arg >> (i * 4)) & 0x0f;
if (mode && new_func != 0x0f)
sd->function_group[i] = new_func;
sd->data[14 + (i >> 1)] = new_func << ((i * 4) & 4);
}
memset(&sd->data[17], 0, 47);
stw_be_p(sd->data + 64, sd_crc16(sd->data, 64));
}
static inline bool sd_wp_addr(SDState *sd, uint64_t addr)
{
return test_bit(sd_addr_to_wpnum(addr), sd->wp_groups);
}
static void sd_lock_command(SDState *sd)
{
int erase, lock, clr_pwd, set_pwd, pwd_len;
erase = !!(sd->data[0] & 0x08);
lock = sd->data[0] & 0x04;
clr_pwd = sd->data[0] & 0x02;
set_pwd = sd->data[0] & 0x01;
if (sd->blk_len > 1)
pwd_len = sd->data[1];
else
pwd_len = 0;
if (lock) {
trace_sdcard_lock();
} else {
trace_sdcard_unlock();
}
if (erase) {
if (!(sd->card_status & CARD_IS_LOCKED) || sd->blk_len > 1 ||
set_pwd || clr_pwd || lock || sd->wp_switch ||
(sd->csd[14] & 0x20)) {
sd->card_status |= LOCK_UNLOCK_FAILED;
return;
}
bitmap_zero(sd->wp_groups, sd->wpgrps_size);
sd->csd[14] &= ~0x10;
sd->card_status &= ~CARD_IS_LOCKED;
sd->pwd_len = 0;
/* Erasing the entire card here! */
fprintf(stderr, "SD: Card force-erased by CMD42\n");
return;
}
if (sd->blk_len < 2 + pwd_len ||
pwd_len <= sd->pwd_len ||
pwd_len > sd->pwd_len + 16) {
sd->card_status |= LOCK_UNLOCK_FAILED;
return;
}
if (sd->pwd_len && memcmp(sd->pwd, sd->data + 2, sd->pwd_len)) {
sd->card_status |= LOCK_UNLOCK_FAILED;
return;
}
pwd_len -= sd->pwd_len;
if ((pwd_len && !set_pwd) ||
(clr_pwd && (set_pwd || lock)) ||
(lock && !sd->pwd_len && !set_pwd) ||
(!set_pwd && !clr_pwd &&
(((sd->card_status & CARD_IS_LOCKED) && lock) ||
(!(sd->card_status & CARD_IS_LOCKED) && !lock)))) {
sd->card_status |= LOCK_UNLOCK_FAILED;
return;
}
if (set_pwd) {
memcpy(sd->pwd, sd->data + 2 + sd->pwd_len, pwd_len);
sd->pwd_len = pwd_len;
}
if (clr_pwd) {
sd->pwd_len = 0;
}
if (lock)
sd->card_status |= CARD_IS_LOCKED;
else
sd->card_status &= ~CARD_IS_LOCKED;
}
static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req)
{
uint32_t rca = 0x0000;
uint64_t addr = (sd->ocr & (1 << 30)) ? (uint64_t) req.arg << 9 : req.arg;
/* CMD55 precedes an ACMD, so we are not interested in tracing it.
* However there is no ACMD55, so we want to trace this particular case.
*/
if (req.cmd != 55 || sd->expecting_acmd) {
trace_sdcard_normal_command(sd->proto_name,
sd_cmd_name(req.cmd), req.cmd,
req.arg, sd_state_name(sd->state));
}
/* Not interpreting this as an app command */
sd->card_status &= ~APP_CMD;
if (sd_cmd_type[req.cmd] == sd_ac
|| sd_cmd_type[req.cmd] == sd_adtc) {
rca = req.arg >> 16;
}
/* CMD23 (set block count) must be immediately followed by CMD18 or CMD25
* if not, its effects are cancelled */
if (sd->multi_blk_cnt != 0 && !(req.cmd == 18 || req.cmd == 25)) {
sd->multi_blk_cnt = 0;
}
switch (req.cmd) {
/* Basic commands (Class 0 and Class 1) */
case 0: /* CMD0: GO_IDLE_STATE */
switch (sd->state) {
case sd_inactive_state:
return sd->spi ? sd_r1 : sd_r0;
default:
sd->state = sd_idle_state;
sd_reset(DEVICE(sd));
return sd->spi ? sd_r1 : sd_r0;
}
break;
case 1: /* CMD1: SEND_OP_CMD */
if (!sd->spi)
goto bad_cmd;
sd->state = sd_transfer_state;
return sd_r1;
case 2: /* CMD2: ALL_SEND_CID */
if (sd->spi)
goto bad_cmd;
switch (sd->state) {
case sd_ready_state:
sd->state = sd_identification_state;
return sd_r2_i;
default:
break;
}
break;
case 3: /* CMD3: SEND_RELATIVE_ADDR */
if (sd->spi)
goto bad_cmd;
switch (sd->state) {
case sd_identification_state:
case sd_standby_state:
sd->state = sd_standby_state;
sd_set_rca(sd);
return sd_r6;
default:
break;
}
break;
case 4: /* CMD4: SEND_DSR */
if (sd->spi)
goto bad_cmd;
switch (sd->state) {
case sd_standby_state:
break;
default:
break;
}
break;
case 5: /* CMD5: reserved for SDIO cards */
return sd_illegal;
case 6: /* CMD6: SWITCH_FUNCTION */
switch (sd->mode) {
case sd_data_transfer_mode:
sd_function_switch(sd, req.arg);
sd->state = sd_sendingdata_state;
sd->data_start = 0;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
case 7: /* CMD7: SELECT/DESELECT_CARD */
if (sd->spi)
goto bad_cmd;
switch (sd->state) {
case sd_standby_state:
if (sd->rca != rca)
return sd_r0;
sd->state = sd_transfer_state;
return sd_r1b;
case sd_transfer_state:
case sd_sendingdata_state:
if (sd->rca == rca)
break;
sd->state = sd_standby_state;
return sd_r1b;
case sd_disconnect_state:
if (sd->rca != rca)
return sd_r0;
sd->state = sd_programming_state;
return sd_r1b;
case sd_programming_state:
if (sd->rca == rca)
break;
sd->state = sd_disconnect_state;
return sd_r1b;
default:
break;
}
break;
case 8: /* CMD8: SEND_IF_COND */
if (sd->spec_version < SD_PHY_SPECv2_00_VERS) {
break;
}
if (sd->state != sd_idle_state) {
break;
}
sd->vhs = 0;
/* No response if not exactly one VHS bit is set. */
if (!(req.arg >> 8) || (req.arg >> (ctz32(req.arg & ~0xff) + 1))) {
return sd->spi ? sd_r7 : sd_r0;
}
/* Accept. */
sd->vhs = req.arg;
return sd_r7;
case 9: /* CMD9: SEND_CSD */
switch (sd->state) {
case sd_standby_state:
if (sd->rca != rca)
return sd_r0;
return sd_r2_s;
case sd_transfer_state:
if (!sd->spi)
break;
sd->state = sd_sendingdata_state;
memcpy(sd->data, sd->csd, 16);
sd->data_start = addr;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
case 10: /* CMD10: SEND_CID */
switch (sd->state) {
case sd_standby_state:
if (sd->rca != rca)
return sd_r0;
return sd_r2_i;
case sd_transfer_state:
if (!sd->spi)
break;
sd->state = sd_sendingdata_state;
memcpy(sd->data, sd->cid, 16);
sd->data_start = addr;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
case 12: /* CMD12: STOP_TRANSMISSION */
switch (sd->state) {
case sd_sendingdata_state:
sd->state = sd_transfer_state;
return sd_r1b;
case sd_receivingdata_state:
sd->state = sd_programming_state;
/* Bzzzzzzztt .... Operation complete. */
sd->state = sd_transfer_state;
return sd_r1b;
default:
break;
}
break;
case 13: /* CMD13: SEND_STATUS */
switch (sd->mode) {
case sd_data_transfer_mode:
if (sd->rca != rca)
return sd_r0;
return sd_r1;
default:
break;
}
break;
case 15: /* CMD15: GO_INACTIVE_STATE */
if (sd->spi)
goto bad_cmd;
switch (sd->mode) {
case sd_data_transfer_mode:
if (sd->rca != rca)
return sd_r0;
sd->state = sd_inactive_state;
return sd_r0;
default:
break;
}
break;
/* Block read commands (Classs 2) */
case 16: /* CMD16: SET_BLOCKLEN */
switch (sd->state) {
case sd_transfer_state:
if (req.arg > (1 << HWBLOCK_SHIFT)) {
sd->card_status |= BLOCK_LEN_ERROR;
} else {
trace_sdcard_set_blocklen(req.arg);
sd->blk_len = req.arg;
}
return sd_r1;
default:
break;
}
break;
case 17: /* CMD17: READ_SINGLE_BLOCK */
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_sendingdata_state;
sd->data_start = addr;
sd->data_offset = 0;
if (sd->data_start + sd->blk_len > sd->size)
sd->card_status |= ADDRESS_ERROR;
return sd_r1;
default:
break;
}
break;
case 18: /* CMD18: READ_MULTIPLE_BLOCK */
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_sendingdata_state;
sd->data_start = addr;
sd->data_offset = 0;
if (sd->data_start + sd->blk_len > sd->size)
sd->card_status |= ADDRESS_ERROR;
return sd_r1;
default:
break;
}
break;
case 19: /* CMD19: SEND_TUNING_BLOCK (SD) */
if (sd->spec_version < SD_PHY_SPECv3_01_VERS) {
break;
}
if (sd->state == sd_transfer_state) {
sd->state = sd_sendingdata_state;
sd->data_offset = 0;
return sd_r1;
}
break;
case 23: /* CMD23: SET_BLOCK_COUNT */
if (sd->spec_version < SD_PHY_SPECv3_01_VERS) {
break;
}
switch (sd->state) {
case sd_transfer_state:
sd->multi_blk_cnt = req.arg;
return sd_r1;
default:
break;
}
break;
/* Block write commands (Class 4) */
case 24: /* CMD24: WRITE_SINGLE_BLOCK */
switch (sd->state) {
case sd_transfer_state:
/* Writing in SPI mode not implemented. */
if (sd->spi)
break;
sd->state = sd_receivingdata_state;
sd->data_start = addr;
sd->data_offset = 0;
sd->blk_written = 0;
if (sd->data_start + sd->blk_len > sd->size)
sd->card_status |= ADDRESS_ERROR;
if (sd_wp_addr(sd, sd->data_start))
sd->card_status |= WP_VIOLATION;
if (sd->csd[14] & 0x30)
sd->card_status |= WP_VIOLATION;
return sd_r1;
default:
break;
}
break;
case 25: /* CMD25: WRITE_MULTIPLE_BLOCK */
switch (sd->state) {
case sd_transfer_state:
/* Writing in SPI mode not implemented. */
if (sd->spi)
break;
sd->state = sd_receivingdata_state;
sd->data_start = addr;
sd->data_offset = 0;
sd->blk_written = 0;
if (sd->data_start + sd->blk_len > sd->size)
sd->card_status |= ADDRESS_ERROR;
if (sd_wp_addr(sd, sd->data_start))
sd->card_status |= WP_VIOLATION;
if (sd->csd[14] & 0x30)
sd->card_status |= WP_VIOLATION;
return sd_r1;
default:
break;
}
break;
case 26: /* CMD26: PROGRAM_CID */
if (sd->spi)
goto bad_cmd;
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_receivingdata_state;
sd->data_start = 0;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
case 27: /* CMD27: PROGRAM_CSD */
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_receivingdata_state;
sd->data_start = 0;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
/* Write protection (Class 6) */
case 28: /* CMD28: SET_WRITE_PROT */
switch (sd->state) {
case sd_transfer_state:
if (addr >= sd->size) {
sd->card_status |= ADDRESS_ERROR;
return sd_r1b;
}
sd->state = sd_programming_state;
set_bit(sd_addr_to_wpnum(addr), sd->wp_groups);
/* Bzzzzzzztt .... Operation complete. */
sd->state = sd_transfer_state;
return sd_r1b;
default:
break;
}
break;
case 29: /* CMD29: CLR_WRITE_PROT */
switch (sd->state) {
case sd_transfer_state:
if (addr >= sd->size) {
sd->card_status |= ADDRESS_ERROR;
return sd_r1b;
}
sd->state = sd_programming_state;
clear_bit(sd_addr_to_wpnum(addr), sd->wp_groups);
/* Bzzzzzzztt .... Operation complete. */
sd->state = sd_transfer_state;
return sd_r1b;
default:
break;
}
break;
case 30: /* CMD30: SEND_WRITE_PROT */
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_sendingdata_state;
*(uint32_t *) sd->data = sd_wpbits(sd, req.arg);
sd->data_start = addr;
sd->data_offset = 0;
return sd_r1b;
default:
break;
}
break;
/* Erase commands (Class 5) */
case 32: /* CMD32: ERASE_WR_BLK_START */
switch (sd->state) {
case sd_transfer_state:
sd->erase_start = req.arg;
return sd_r1;
default:
break;
}
break;
case 33: /* CMD33: ERASE_WR_BLK_END */
switch (sd->state) {
case sd_transfer_state:
sd->erase_end = req.arg;
return sd_r1;
default:
break;
}
break;
case 38: /* CMD38: ERASE */
switch (sd->state) {
case sd_transfer_state:
if (sd->csd[14] & 0x30) {
sd->card_status |= WP_VIOLATION;
return sd_r1b;
}
sd->state = sd_programming_state;
sd_erase(sd);
/* Bzzzzzzztt .... Operation complete. */
sd->state = sd_transfer_state;
return sd_r1b;
default:
break;
}
break;
/* Lock card commands (Class 7) */
case 42: /* CMD42: LOCK_UNLOCK */
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_receivingdata_state;
sd->data_start = 0;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
case 52 ... 54:
/* CMD52, CMD53, CMD54: reserved for SDIO cards
* (see the SDIO Simplified Specification V2.0)
* Handle as illegal command but do not complain
* on stderr, as some OSes may use these in their
* probing for presence of an SDIO card.
*/
return sd_illegal;
/* Application specific commands (Class 8) */
case 55: /* CMD55: APP_CMD */
switch (sd->state) {
case sd_ready_state:
case sd_identification_state:
case sd_inactive_state:
return sd_illegal;
case sd_idle_state:
if (rca) {
qemu_log_mask(LOG_GUEST_ERROR,
"SD: illegal RCA 0x%04x for APP_CMD\n", req.cmd);
}
default:
break;
}
if (!sd->spi) {
if (sd->rca != rca) {
return sd_r0;
}
}
sd->expecting_acmd = true;
sd->card_status |= APP_CMD;
return sd_r1;
case 56: /* CMD56: GEN_CMD */
switch (sd->state) {
case sd_transfer_state:
sd->data_offset = 0;
if (req.arg & 1)
sd->state = sd_sendingdata_state;
else
sd->state = sd_receivingdata_state;
return sd_r1;
default:
break;
}
break;
case 58: /* CMD58: READ_OCR (SPI) */
if (!sd->spi) {
goto bad_cmd;
}
return sd_r3;
case 59: /* CMD59: CRC_ON_OFF (SPI) */
if (!sd->spi) {
goto bad_cmd;
}
goto unimplemented_spi_cmd;
default:
bad_cmd:
qemu_log_mask(LOG_GUEST_ERROR, "SD: Unknown CMD%i\n", req.cmd);
return sd_illegal;
unimplemented_spi_cmd:
/* Commands that are recognised but not yet implemented in SPI mode. */
qemu_log_mask(LOG_UNIMP, "SD: CMD%i not implemented in SPI mode\n",
req.cmd);
return sd_illegal;
}
qemu_log_mask(LOG_GUEST_ERROR, "SD: CMD%i in a wrong state\n", req.cmd);
return sd_illegal;
}
static sd_rsp_type_t sd_app_command(SDState *sd,
SDRequest req)
{
trace_sdcard_app_command(sd->proto_name, sd_acmd_name(req.cmd),
req.cmd, req.arg, sd_state_name(sd->state));
sd->card_status |= APP_CMD;
switch (req.cmd) {
case 6: /* ACMD6: SET_BUS_WIDTH */
if (sd->spi) {
goto unimplemented_spi_cmd;
}
switch (sd->state) {
case sd_transfer_state:
sd->sd_status[0] &= 0x3f;
sd->sd_status[0] |= (req.arg & 0x03) << 6;
return sd_r1;
default:
break;
}
break;
case 13: /* ACMD13: SD_STATUS */
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_sendingdata_state;
sd->data_start = 0;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
case 22: /* ACMD22: SEND_NUM_WR_BLOCKS */
switch (sd->state) {
case sd_transfer_state:
*(uint32_t *) sd->data = sd->blk_written;
sd->state = sd_sendingdata_state;
sd->data_start = 0;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
case 23: /* ACMD23: SET_WR_BLK_ERASE_COUNT */
switch (sd->state) {
case sd_transfer_state:
return sd_r1;
default:
break;
}
break;
case 41: /* ACMD41: SD_APP_OP_COND */
if (sd->spi) {
/* SEND_OP_CMD */
sd->state = sd_transfer_state;
return sd_r1;
}
if (sd->state != sd_idle_state) {
break;
}
/* If it's the first ACMD41 since reset, we need to decide
* whether to power up. If this is not an enquiry ACMD41,
* we immediately report power on and proceed below to the
* ready state, but if it is, we set a timer to model a
* delay for power up. This works around a bug in EDK2
* UEFI, which sends an initial enquiry ACMD41, but
* assumes that the card is in ready state as soon as it
* sees the power up bit set. */
if (!FIELD_EX32(sd->ocr, OCR, CARD_POWER_UP)) {
if ((req.arg & ACMD41_ENQUIRY_MASK) != 0) {
timer_del(sd->ocr_power_timer);
sd_ocr_powerup(sd);
} else {
trace_sdcard_inquiry_cmd41();
if (!timer_pending(sd->ocr_power_timer)) {
timer_mod_ns(sd->ocr_power_timer,
(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)
+ OCR_POWER_DELAY_NS));
}
}
}
if (FIELD_EX32(sd->ocr & req.arg, OCR, VDD_VOLTAGE_WINDOW)) {
/* We accept any voltage. 10000 V is nothing.
*
* Once we're powered up, we advance straight to ready state
* unless it's an enquiry ACMD41 (bits 23:0 == 0).
*/
sd->state = sd_ready_state;
}
return sd_r3;
case 42: /* ACMD42: SET_CLR_CARD_DETECT */
switch (sd->state) {
case sd_transfer_state:
/* Bringing in the 50KOhm pull-up resistor... Done. */
return sd_r1;
default:
break;
}
break;
case 51: /* ACMD51: SEND_SCR */
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_sendingdata_state;
sd->data_start = 0;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
case 18: /* Reserved for SD security applications */
case 25:
case 26:
case 38:
case 43 ... 49:
/* Refer to the "SD Specifications Part3 Security Specification" for
* information about the SD Security Features.
*/
qemu_log_mask(LOG_UNIMP, "SD: CMD%i Security not implemented\n",
req.cmd);
return sd_illegal;
default:
/* Fall back to standard commands. */
return sd_normal_command(sd, req);
unimplemented_spi_cmd:
/* Commands that are recognised but not yet implemented in SPI mode. */
qemu_log_mask(LOG_UNIMP, "SD: CMD%i not implemented in SPI mode\n",
req.cmd);
return sd_illegal;
}
qemu_log_mask(LOG_GUEST_ERROR, "SD: ACMD%i in a wrong state\n", req.cmd);
return sd_illegal;
}
static int cmd_valid_while_locked(SDState *sd, SDRequest *req)
{
/* Valid commands in locked state:
* basic class (0)
* lock card class (7)
* CMD16
* implicitly, the ACMD prefix CMD55
* ACMD41 and ACMD42
* Anything else provokes an "illegal command" response.
*/
if (sd->expecting_acmd) {
return req->cmd == 41 || req->cmd == 42;
}
if (req->cmd == 16 || req->cmd == 55) {
return 1;
}
return sd_cmd_class[req->cmd] == 0
|| sd_cmd_class[req->cmd] == 7;
}
int sd_do_command(SDState *sd, SDRequest *req,
uint8_t *response) {
int last_state;
sd_rsp_type_t rtype;
int rsplen;
if (!sd->blk || !blk_is_inserted(sd->blk) || !sd->enable) {
return 0;
}
if (sd_req_crc_validate(req)) {
sd->card_status |= COM_CRC_ERROR;
rtype = sd_illegal;
goto send_response;
}
if (req->cmd >= SDMMC_CMD_MAX) {
qemu_log_mask(LOG_GUEST_ERROR, "SD: incorrect command 0x%02x\n",
req->cmd);
req->cmd &= 0x3f;
}
if (sd->card_status & CARD_IS_LOCKED) {
if (!cmd_valid_while_locked(sd, req)) {
sd->card_status |= ILLEGAL_COMMAND;
sd->expecting_acmd = false;
qemu_log_mask(LOG_GUEST_ERROR, "SD: Card is locked\n");
rtype = sd_illegal;
goto send_response;
}
}
last_state = sd->state;
sd_set_mode(sd);
if (sd->expecting_acmd) {
sd->expecting_acmd = false;
rtype = sd_app_command(sd, *req);
} else {
rtype = sd_normal_command(sd, *req);
}
if (rtype == sd_illegal) {
sd->card_status |= ILLEGAL_COMMAND;
} else {
/* Valid command, we can update the 'state before command' bits.
* (Do this now so they appear in r1 responses.)
*/
sd->current_cmd = req->cmd;
sd->card_status &= ~CURRENT_STATE;
sd->card_status |= (last_state << 9);
}
send_response:
switch (rtype) {
case sd_r1:
case sd_r1b:
sd_response_r1_make(sd, response);
rsplen = 4;
break;
case sd_r2_i:
memcpy(response, sd->cid, sizeof(sd->cid));
rsplen = 16;
break;
case sd_r2_s:
memcpy(response, sd->csd, sizeof(sd->csd));
rsplen = 16;
break;
case sd_r3:
sd_response_r3_make(sd, response);
rsplen = 4;
break;
case sd_r6:
sd_response_r6_make(sd, response);
rsplen = 4;
break;
case sd_r7:
sd_response_r7_make(sd, response);
rsplen = 4;
break;
case sd_r0:
case sd_illegal:
rsplen = 0;
break;
default:
g_assert_not_reached();
}
trace_sdcard_response(sd_response_name(rtype), rsplen);
if (rtype != sd_illegal) {
/* Clear the "clear on valid command" status bits now we've
* sent any response
*/
sd->card_status &= ~CARD_STATUS_B;
}
#ifdef DEBUG_SD
qemu_hexdump((const char *)response, stderr, "Response", rsplen);
#endif
return rsplen;
}
static void sd_blk_read(SDState *sd, uint64_t addr, uint32_t len)
{
trace_sdcard_read_block(addr, len);
if (!sd->blk || blk_pread(sd->blk, addr, sd->data, len) < 0) {
fprintf(stderr, "sd_blk_read: read error on host side\n");
}
}
static void sd_blk_write(SDState *sd, uint64_t addr, uint32_t len)
{
trace_sdcard_write_block(addr, len);
if (!sd->blk || blk_pwrite(sd->blk, addr, sd->data, len, 0) < 0) {
fprintf(stderr, "sd_blk_write: write error on host side\n");
}
}
#define BLK_READ_BLOCK(a, len) sd_blk_read(sd, a, len)
#define BLK_WRITE_BLOCK(a, len) sd_blk_write(sd, a, len)
#define APP_READ_BLOCK(a, len) memset(sd->data, 0xec, len)
#define APP_WRITE_BLOCK(a, len)
void sd_write_data(SDState *sd, uint8_t value)
{
int i;
if (!sd->blk || !blk_is_inserted(sd->blk) || !sd->enable)
return;
if (sd->state != sd_receivingdata_state) {
qemu_log_mask(LOG_GUEST_ERROR,
"sd_write_data: not in Receiving-Data state\n");
return;
}
if (sd->card_status & (ADDRESS_ERROR | WP_VIOLATION))
return;
trace_sdcard_write_data(sd->proto_name,
sd_acmd_name(sd->current_cmd),
sd->current_cmd, value);
switch (sd->current_cmd) {
case 24: /* CMD24: WRITE_SINGLE_BLOCK */
sd->data[sd->data_offset ++] = value;
if (sd->data_offset >= sd->blk_len) {
/* TODO: Check CRC before committing */
sd->state = sd_programming_state;
BLK_WRITE_BLOCK(sd->data_start, sd->data_offset);
sd->blk_written ++;
sd->csd[14] |= 0x40;
/* Bzzzzzzztt .... Operation complete. */
sd->state = sd_transfer_state;
}
break;
case 25: /* CMD25: WRITE_MULTIPLE_BLOCK */
if (sd->data_offset == 0) {
/* Start of the block - let's check the address is valid */
if (sd->data_start + sd->blk_len > sd->size) {
sd->card_status |= ADDRESS_ERROR;
break;
}
if (sd_wp_addr(sd, sd->data_start)) {
sd->card_status |= WP_VIOLATION;
break;
}
}
sd->data[sd->data_offset++] = value;
if (sd->data_offset >= sd->blk_len) {
/* TODO: Check CRC before committing */
sd->state = sd_programming_state;
BLK_WRITE_BLOCK(sd->data_start, sd->data_offset);
sd->blk_written++;
sd->data_start += sd->blk_len;
sd->data_offset = 0;
sd->csd[14] |= 0x40;
/* Bzzzzzzztt .... Operation complete. */
if (sd->multi_blk_cnt != 0) {
if (--sd->multi_blk_cnt == 0) {
/* Stop! */
sd->state = sd_transfer_state;
break;
}
}
sd->state = sd_receivingdata_state;
}
break;
case 26: /* CMD26: PROGRAM_CID */
sd->data[sd->data_offset ++] = value;
if (sd->data_offset >= sizeof(sd->cid)) {
/* TODO: Check CRC before committing */
sd->state = sd_programming_state;
for (i = 0; i < sizeof(sd->cid); i ++)
if ((sd->cid[i] | 0x00) != sd->data[i])
sd->card_status |= CID_CSD_OVERWRITE;
if (!(sd->card_status & CID_CSD_OVERWRITE))
for (i = 0; i < sizeof(sd->cid); i ++) {
sd->cid[i] |= 0x00;
sd->cid[i] &= sd->data[i];
}
/* Bzzzzzzztt .... Operation complete. */
sd->state = sd_transfer_state;
}
break;
case 27: /* CMD27: PROGRAM_CSD */
sd->data[sd->data_offset ++] = value;
if (sd->data_offset >= sizeof(sd->csd)) {
/* TODO: Check CRC before committing */
sd->state = sd_programming_state;
for (i = 0; i < sizeof(sd->csd); i ++)
if ((sd->csd[i] | sd_csd_rw_mask[i]) !=
(sd->data[i] | sd_csd_rw_mask[i]))
sd->card_status |= CID_CSD_OVERWRITE;
/* Copy flag (OTP) & Permanent write protect */
if (sd->csd[14] & ~sd->data[14] & 0x60)
sd->card_status |= CID_CSD_OVERWRITE;
if (!(sd->card_status & CID_CSD_OVERWRITE))
for (i = 0; i < sizeof(sd->csd); i ++) {
sd->csd[i] |= sd_csd_rw_mask[i];
sd->csd[i] &= sd->data[i];
}
/* Bzzzzzzztt .... Operation complete. */
sd->state = sd_transfer_state;
}
break;
case 42: /* CMD42: LOCK_UNLOCK */
sd->data[sd->data_offset ++] = value;
if (sd->data_offset >= sd->blk_len) {
/* TODO: Check CRC before committing */
sd->state = sd_programming_state;
sd_lock_command(sd);
/* Bzzzzzzztt .... Operation complete. */
sd->state = sd_transfer_state;
}
break;
case 56: /* CMD56: GEN_CMD */
sd->data[sd->data_offset ++] = value;
if (sd->data_offset >= sd->blk_len) {
APP_WRITE_BLOCK(sd->data_start, sd->data_offset);
sd->state = sd_transfer_state;
}
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "sd_write_data: unknown command\n");
break;
}
}
#define SD_TUNING_BLOCK_SIZE 64
static const uint8_t sd_tuning_block_pattern[SD_TUNING_BLOCK_SIZE] = {
/* See: Physical Layer Simplified Specification Version 3.01, Table 4-2 */
0xff, 0x0f, 0xff, 0x00, 0x0f, 0xfc, 0xc3, 0xcc,
0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
};
uint8_t sd_read_data(SDState *sd)
{
/* TODO: Append CRCs */
uint8_t ret;
int io_len;
if (!sd->blk || !blk_is_inserted(sd->blk) || !sd->enable)
return 0x00;
if (sd->state != sd_sendingdata_state) {
qemu_log_mask(LOG_GUEST_ERROR,
"sd_read_data: not in Sending-Data state\n");
return 0x00;
}
if (sd->card_status & (ADDRESS_ERROR | WP_VIOLATION))
return 0x00;
io_len = (sd->ocr & (1 << 30)) ? 512 : sd->blk_len;
trace_sdcard_read_data(sd->proto_name,
sd_acmd_name(sd->current_cmd),
sd->current_cmd, io_len);
switch (sd->current_cmd) {
case 6: /* CMD6: SWITCH_FUNCTION */
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= 64)
sd->state = sd_transfer_state;
break;
case 9: /* CMD9: SEND_CSD */
case 10: /* CMD10: SEND_CID */
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= 16)
sd->state = sd_transfer_state;
break;
case 13: /* ACMD13: SD_STATUS */
ret = sd->sd_status[sd->data_offset ++];
if (sd->data_offset >= sizeof(sd->sd_status))
sd->state = sd_transfer_state;
break;
case 17: /* CMD17: READ_SINGLE_BLOCK */
if (sd->data_offset == 0)
BLK_READ_BLOCK(sd->data_start, io_len);
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= io_len)
sd->state = sd_transfer_state;
break;
case 18: /* CMD18: READ_MULTIPLE_BLOCK */
if (sd->data_offset == 0) {
if (sd->data_start + io_len > sd->size) {
sd->card_status |= ADDRESS_ERROR;
return 0x00;
}
BLK_READ_BLOCK(sd->data_start, io_len);
}
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= io_len) {
sd->data_start += io_len;
sd->data_offset = 0;
if (sd->multi_blk_cnt != 0) {
if (--sd->multi_blk_cnt == 0) {
/* Stop! */
sd->state = sd_transfer_state;
break;
}
}
}
break;
case 19: /* CMD19: SEND_TUNING_BLOCK (SD) */
if (sd->data_offset >= SD_TUNING_BLOCK_SIZE - 1) {
sd->state = sd_transfer_state;
}
ret = sd_tuning_block_pattern[sd->data_offset++];
break;
case 22: /* ACMD22: SEND_NUM_WR_BLOCKS */
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= 4)
sd->state = sd_transfer_state;
break;
case 30: /* CMD30: SEND_WRITE_PROT */
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= 4)
sd->state = sd_transfer_state;
break;
case 51: /* ACMD51: SEND_SCR */
ret = sd->scr[sd->data_offset ++];
if (sd->data_offset >= sizeof(sd->scr))
sd->state = sd_transfer_state;
break;
case 56: /* CMD56: GEN_CMD */
if (sd->data_offset == 0)
APP_READ_BLOCK(sd->data_start, sd->blk_len);
ret = sd->data[sd->data_offset ++];
if (sd->data_offset >= sd->blk_len)
sd->state = sd_transfer_state;
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "sd_read_data: unknown command\n");
return 0x00;
}
return ret;
}
bool sd_data_ready(SDState *sd)
{
return sd->state == sd_sendingdata_state;
}
void sd_enable(SDState *sd, bool enable)
{
sd->enable = enable;
}
static void sd_instance_init(Object *obj)
{
SDState *sd = SD_CARD(obj);
sd->enable = true;
sd->ocr_power_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sd_ocr_powerup, sd);
}
static void sd_instance_finalize(Object *obj)
{
SDState *sd = SD_CARD(obj);
timer_del(sd->ocr_power_timer);
timer_free(sd->ocr_power_timer);
}
static void sd_realize(DeviceState *dev, Error **errp)
{
SDState *sd = SD_CARD(dev);
int ret;
sd->proto_name = sd->spi ? "SPI" : "SD";
switch (sd->spec_version) {
case SD_PHY_SPECv1_10_VERS
... SD_PHY_SPECv3_01_VERS:
break;
default:
error_setg(errp, "Invalid SD card Spec version: %u", sd->spec_version);
return;
}
if (sd->blk && blk_is_read_only(sd->blk)) {
error_setg(errp, "Cannot use read-only drive as SD card");
return;
}
if (sd->blk) {
ret = blk_set_perm(sd->blk, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE,
BLK_PERM_ALL, errp);
if (ret < 0) {
return;
}
blk_set_dev_ops(sd->blk, &sd_block_ops, sd);
}
}
static Property sd_properties[] = {
DEFINE_PROP_UINT8("spec_version", SDState,
spec_version, SD_PHY_SPECv2_00_VERS),
DEFINE_PROP_DRIVE("drive", SDState, blk),
/* We do not model the chip select pin, so allow the board to select
* whether card should be in SSI or MMC/SD mode. It is also up to the
* board to ensure that ssi transfers only occur when the chip select
* is asserted. */
DEFINE_PROP_BOOL("spi", SDState, spi, false),
DEFINE_PROP_END_OF_LIST()
};
static void sd_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SDCardClass *sc = SD_CARD_CLASS(klass);
dc->realize = sd_realize;
dc->props = sd_properties;
dc->vmsd = &sd_vmstate;
dc->reset = sd_reset;
dc->bus_type = TYPE_SD_BUS;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
sc->set_voltage = sd_set_voltage;
sc->get_dat_lines = sd_get_dat_lines;
sc->get_cmd_line = sd_get_cmd_line;
sc->do_command = sd_do_command;
sc->write_data = sd_write_data;
sc->read_data = sd_read_data;
sc->data_ready = sd_data_ready;
sc->enable = sd_enable;
sc->get_inserted = sd_get_inserted;
sc->get_readonly = sd_get_readonly;
}
static const TypeInfo sd_info = {
.name = TYPE_SD_CARD,
.parent = TYPE_DEVICE,
.instance_size = sizeof(SDState),
.class_size = sizeof(SDCardClass),
.class_init = sd_class_init,
.instance_init = sd_instance_init,
.instance_finalize = sd_instance_finalize,
};
static void sd_register_types(void)
{
type_register_static(&sd_info);
}
type_init(sd_register_types)
|
pmp-tool/PMP
|
src/qemu/src-pmp/target/s390x/translate_vx.inc.c
|
<filename>src/qemu/src-pmp/target/s390x/translate_vx.inc.c
/*
* QEMU TCG support -- s390x vector instruction translation functions
*
* Copyright (C) 2019 Red Hat Inc
*
* Authors:
* <NAME> <<EMAIL>>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
/*
* For most instructions that use the same element size for reads and
* writes, we can use real gvec vector expansion, which potantially uses
* real host vector instructions. As they only work up to 64 bit elements,
* 128 bit elements (vector is a single element) have to be handled
* differently. Operations that are too complicated to encode via TCG ops
* are handled via gvec ool (out-of-line) handlers.
*
* As soon as instructions use different element sizes for reads and writes
* or access elements "out of their element scope" we expand them manually
* in fancy loops, as gvec expansion does not deal with actual element
* numbers and does also not support access to other elements.
*
* 128 bit elements:
* As we only have i32/i64, such elements have to be loaded into two
* i64 values and can then be processed e.g. by tcg_gen_add2_i64.
*
* Sizes:
* On s390x, the operand size (oprsz) and the maximum size (maxsz) are
* always 16 (128 bit). What gvec code calls "vece", s390x calls "es",
* a.k.a. "element size". These values nicely map to MO_8 ... MO_64. Only
* 128 bit element size has to be treated in a special way (MO_64 + 1).
* We will use ES_* instead of MO_* for this reason in this file.
*
* CC handling:
* As gvec ool-helpers can currently not return values (besides via
* pointers like vectors or cpu_env), whenever we have to set the CC and
* can't conclude the value from the result vector, we will directly
* set it in "env->cc_op" and mark it as static via set_cc_static()".
* Whenever this is done, the helper writes globals (cc_op).
*/
#define NUM_VEC_ELEMENT_BYTES(es) (1 << (es))
#define NUM_VEC_ELEMENTS(es) (16 / NUM_VEC_ELEMENT_BYTES(es))
#define NUM_VEC_ELEMENT_BITS(es) (NUM_VEC_ELEMENT_BYTES(es) * BITS_PER_BYTE)
#define ES_8 MO_8
#define ES_16 MO_16
#define ES_32 MO_32
#define ES_64 MO_64
#define ES_128 4
static inline bool valid_vec_element(uint8_t enr, TCGMemOp es)
{
return !(enr & ~(NUM_VEC_ELEMENTS(es) - 1));
}
static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr,
TCGMemOp memop)
{
const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
switch (memop) {
case ES_8:
tcg_gen_ld8u_i64(dst, cpu_env, offs);
break;
case ES_16:
tcg_gen_ld16u_i64(dst, cpu_env, offs);
break;
case ES_32:
tcg_gen_ld32u_i64(dst, cpu_env, offs);
break;
case ES_8 | MO_SIGN:
tcg_gen_ld8s_i64(dst, cpu_env, offs);
break;
case ES_16 | MO_SIGN:
tcg_gen_ld16s_i64(dst, cpu_env, offs);
break;
case ES_32 | MO_SIGN:
tcg_gen_ld32s_i64(dst, cpu_env, offs);
break;
case ES_64:
case ES_64 | MO_SIGN:
tcg_gen_ld_i64(dst, cpu_env, offs);
break;
default:
g_assert_not_reached();
}
}
static void write_vec_element_i64(TCGv_i64 src, int reg, uint8_t enr,
TCGMemOp memop)
{
const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
switch (memop) {
case ES_8:
tcg_gen_st8_i64(src, cpu_env, offs);
break;
case ES_16:
tcg_gen_st16_i64(src, cpu_env, offs);
break;
case ES_32:
tcg_gen_st32_i64(src, cpu_env, offs);
break;
case ES_64:
tcg_gen_st_i64(src, cpu_env, offs);
break;
default:
g_assert_not_reached();
}
}
static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr,
uint8_t es)
{
TCGv_i64 tmp = tcg_temp_new_i64();
/* mask off invalid parts from the element nr */
tcg_gen_andi_i64(tmp, enr, NUM_VEC_ELEMENTS(es) - 1);
/* convert it to an element offset relative to cpu_env (vec_reg_offset() */
tcg_gen_shli_i64(tmp, tmp, es);
#ifndef HOST_WORDS_BIGENDIAN
tcg_gen_xori_i64(tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es));
#endif
tcg_gen_addi_i64(tmp, tmp, vec_full_reg_offset(reg));
/* generate the final ptr by adding cpu_env */
tcg_gen_trunc_i64_ptr(ptr, tmp);
tcg_gen_add_ptr(ptr, ptr, cpu_env);
tcg_temp_free_i64(tmp);
}
#define gen_gvec_3_ool(v1, v2, v3, data, fn) \
tcg_gen_gvec_3_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
vec_full_reg_offset(v3), 16, 16, data, fn)
#define gen_gvec_3_ptr(v1, v2, v3, ptr, data, fn) \
tcg_gen_gvec_3_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
vec_full_reg_offset(v3), ptr, 16, 16, data, fn)
#define gen_gvec_4(v1, v2, v3, v4, gen) \
tcg_gen_gvec_4(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
16, 16, gen)
#define gen_gvec_4_ool(v1, v2, v3, v4, data, fn) \
tcg_gen_gvec_4_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
16, 16, data, fn)
#define gen_gvec_dup_i64(es, v1, c) \
tcg_gen_gvec_dup_i64(es, vec_full_reg_offset(v1), 16, 16, c)
#define gen_gvec_mov(v1, v2) \
tcg_gen_gvec_mov(0, vec_full_reg_offset(v1), vec_full_reg_offset(v2), 16, \
16)
#define gen_gvec_dup64i(v1, c) \
tcg_gen_gvec_dup64i(vec_full_reg_offset(v1), 16, 16, c)
static void gen_gvec_dupi(uint8_t es, uint8_t reg, uint64_t c)
{
switch (es) {
case ES_8:
tcg_gen_gvec_dup8i(vec_full_reg_offset(reg), 16, 16, c);
break;
case ES_16:
tcg_gen_gvec_dup16i(vec_full_reg_offset(reg), 16, 16, c);
break;
case ES_32:
tcg_gen_gvec_dup32i(vec_full_reg_offset(reg), 16, 16, c);
break;
case ES_64:
gen_gvec_dup64i(reg, c);
break;
default:
g_assert_not_reached();
}
}
static void zero_vec(uint8_t reg)
{
tcg_gen_gvec_dup8i(vec_full_reg_offset(reg), 16, 16, 0);
}
static DisasJumpType op_vge(DisasContext *s, DisasOps *o)
{
const uint8_t es = s->insn->data;
const uint8_t enr = get_field(s->fields, m3);
TCGv_i64 tmp;
if (!valid_vec_element(enr, es)) {
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
tmp = tcg_temp_new_i64();
read_vec_element_i64(tmp, get_field(s->fields, v2), enr, es);
tcg_gen_add_i64(o->addr1, o->addr1, tmp);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0);
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
write_vec_element_i64(tmp, get_field(s->fields, v1), enr, es);
tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
static uint64_t generate_byte_mask(uint8_t mask)
{
uint64_t r = 0;
int i;
for (i = 0; i < 8; i++) {
if ((mask >> i) & 1) {
r |= 0xffull << (i * 8);
}
}
return r;
}
static DisasJumpType op_vgbm(DisasContext *s, DisasOps *o)
{
const uint16_t i2 = get_field(s->fields, i2);
if (i2 == (i2 & 0xff) * 0x0101) {
/*
* Masks for both 64 bit elements of the vector are the same.
* Trust tcg to produce a good constant loading.
*/
gen_gvec_dup64i(get_field(s->fields, v1),
generate_byte_mask(i2 & 0xff));
} else {
TCGv_i64 t = tcg_temp_new_i64();
tcg_gen_movi_i64(t, generate_byte_mask(i2 >> 8));
write_vec_element_i64(t, get_field(s->fields, v1), 0, ES_64);
tcg_gen_movi_i64(t, generate_byte_mask(i2));
write_vec_element_i64(t, get_field(s->fields, v1), 1, ES_64);
tcg_temp_free_i64(t);
}
return DISAS_NEXT;
}
static DisasJumpType op_vgm(DisasContext *s, DisasOps *o)
{
const uint8_t es = get_field(s->fields, m4);
const uint8_t bits = NUM_VEC_ELEMENT_BITS(es);
const uint8_t i2 = get_field(s->fields, i2) & (bits - 1);
const uint8_t i3 = get_field(s->fields, i3) & (bits - 1);
uint64_t mask = 0;
int i;
if (es > ES_64) {
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
/* generate the mask - take care of wrapping */
for (i = i2; ; i = (i + 1) % bits) {
mask |= 1ull << (bits - i - 1);
if (i == i3) {
break;
}
}
gen_gvec_dupi(es, get_field(s->fields, v1), mask);
return DISAS_NEXT;
}
static DisasJumpType op_vl(DisasContext *s, DisasOps *o)
{
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_TEQ);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
write_vec_element_i64(t0, get_field(s->fields, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s->fields, v1), 1, ES_64);
tcg_temp_free(t0);
tcg_temp_free(t1);
return DISAS_NEXT;
}
static DisasJumpType op_vlr(DisasContext *s, DisasOps *o)
{
gen_gvec_mov(get_field(s->fields, v1), get_field(s->fields, v2));
return DISAS_NEXT;
}
static DisasJumpType op_vlrep(DisasContext *s, DisasOps *o)
{
const uint8_t es = get_field(s->fields, m3);
TCGv_i64 tmp;
if (es > ES_64) {
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
gen_gvec_dup_i64(es, get_field(s->fields, v1), tmp);
tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
static DisasJumpType op_vle(DisasContext *s, DisasOps *o)
{
const uint8_t es = s->insn->data;
const uint8_t enr = get_field(s->fields, m3);
TCGv_i64 tmp;
if (!valid_vec_element(enr, es)) {
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
write_vec_element_i64(tmp, get_field(s->fields, v1), enr, es);
tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
static DisasJumpType op_vlei(DisasContext *s, DisasOps *o)
{
const uint8_t es = s->insn->data;
const uint8_t enr = get_field(s->fields, m3);
TCGv_i64 tmp;
if (!valid_vec_element(enr, es)) {
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
tmp = tcg_const_i64((int16_t)get_field(s->fields, i2));
write_vec_element_i64(tmp, get_field(s->fields, v1), enr, es);
tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
static DisasJumpType op_vlgv(DisasContext *s, DisasOps *o)
{
const uint8_t es = get_field(s->fields, m4);
TCGv_ptr ptr;
if (es > ES_64) {
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
/* fast path if we don't need the register content */
if (!get_field(s->fields, b2)) {
uint8_t enr = get_field(s->fields, d2) & (NUM_VEC_ELEMENTS(es) - 1);
read_vec_element_i64(o->out, get_field(s->fields, v3), enr, es);
return DISAS_NEXT;
}
ptr = tcg_temp_new_ptr();
get_vec_element_ptr_i64(ptr, get_field(s->fields, v3), o->addr1, es);
switch (es) {
case ES_8:
tcg_gen_ld8u_i64(o->out, ptr, 0);
break;
case ES_16:
tcg_gen_ld16u_i64(o->out, ptr, 0);
break;
case ES_32:
tcg_gen_ld32u_i64(o->out, ptr, 0);
break;
case ES_64:
tcg_gen_ld_i64(o->out, ptr, 0);
break;
default:
g_assert_not_reached();
}
tcg_temp_free_ptr(ptr);
return DISAS_NEXT;
}
static DisasJumpType op_vllez(DisasContext *s, DisasOps *o)
{
uint8_t es = get_field(s->fields, m3);
uint8_t enr;
TCGv_i64 t;
switch (es) {
/* rightmost sub-element of leftmost doubleword */
case ES_8:
enr = 7;
break;
case ES_16:
enr = 3;
break;
case ES_32:
enr = 1;
break;
case ES_64:
enr = 0;
break;
/* leftmost sub-element of leftmost doubleword */
case 6:
if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
es = ES_32;
enr = 0;
break;
}
default:
/* fallthrough */
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
t = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TE | es);
zero_vec(get_field(s->fields, v1));
write_vec_element_i64(t, get_field(s->fields, v1), enr, es);
tcg_temp_free_i64(t);
return DISAS_NEXT;
}
static DisasJumpType op_vlm(DisasContext *s, DisasOps *o)
{
const uint8_t v3 = get_field(s->fields, v3);
uint8_t v1 = get_field(s->fields, v1);
TCGv_i64 t0, t1;
if (v3 < v1 || (v3 - v1 + 1) > 16) {
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
/*
* Check for possible access exceptions by trying to load the last
* element. The first element will be checked first next.
*/
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
gen_addi_and_wrap_i64(s, t0, o->addr1, (v3 - v1) * 16 + 8);
tcg_gen_qemu_ld_i64(t0, t0, get_mem_index(s), MO_TEQ);
for (;; v1++) {
tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
write_vec_element_i64(t1, v1, 0, ES_64);
if (v1 == v3) {
break;
}
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
write_vec_element_i64(t1, v1, 1, ES_64);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
}
/* Store the last element, loaded first */
write_vec_element_i64(t0, v1, 1, ES_64);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
return DISAS_NEXT;
}
static DisasJumpType op_vlbb(DisasContext *s, DisasOps *o)
{
const int64_t block_size = (1ull << (get_field(s->fields, m3) + 6));
const int v1_offs = vec_full_reg_offset(get_field(s->fields, v1));
TCGv_ptr a0;
TCGv_i64 bytes;
if (get_field(s->fields, m3) > 6) {
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
bytes = tcg_temp_new_i64();
a0 = tcg_temp_new_ptr();
/* calculate the number of bytes until the next block boundary */
tcg_gen_ori_i64(bytes, o->addr1, -block_size);
tcg_gen_neg_i64(bytes, bytes);
tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
gen_helper_vll(cpu_env, a0, o->addr1, bytes);
tcg_temp_free_i64(bytes);
tcg_temp_free_ptr(a0);
return DISAS_NEXT;
}
static DisasJumpType op_vlvg(DisasContext *s, DisasOps *o)
{
const uint8_t es = get_field(s->fields, m4);
TCGv_ptr ptr;
if (es > ES_64) {
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
/* fast path if we don't need the register content */
if (!get_field(s->fields, b2)) {
uint8_t enr = get_field(s->fields, d2) & (NUM_VEC_ELEMENTS(es) - 1);
write_vec_element_i64(o->in2, get_field(s->fields, v1), enr, es);
return DISAS_NEXT;
}
ptr = tcg_temp_new_ptr();
get_vec_element_ptr_i64(ptr, get_field(s->fields, v1), o->addr1, es);
switch (es) {
case ES_8:
tcg_gen_st8_i64(o->in2, ptr, 0);
break;
case ES_16:
tcg_gen_st16_i64(o->in2, ptr, 0);
break;
case ES_32:
tcg_gen_st32_i64(o->in2, ptr, 0);
break;
case ES_64:
tcg_gen_st_i64(o->in2, ptr, 0);
break;
default:
g_assert_not_reached();
}
tcg_temp_free_ptr(ptr);
return DISAS_NEXT;
}
static DisasJumpType op_vlvgp(DisasContext *s, DisasOps *o)
{
write_vec_element_i64(o->in1, get_field(s->fields, v1), 0, ES_64);
write_vec_element_i64(o->in2, get_field(s->fields, v1), 1, ES_64);
return DISAS_NEXT;
}
static DisasJumpType op_vll(DisasContext *s, DisasOps *o)
{
const int v1_offs = vec_full_reg_offset(get_field(s->fields, v1));
TCGv_ptr a0 = tcg_temp_new_ptr();
/* convert highest index into an actual length */
tcg_gen_addi_i64(o->in2, o->in2, 1);
tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
gen_helper_vll(cpu_env, a0, o->addr1, o->in2);
tcg_temp_free_ptr(a0);
return DISAS_NEXT;
}
static DisasJumpType op_vmr(DisasContext *s, DisasOps *o)
{
const uint8_t v1 = get_field(s->fields, v1);
const uint8_t v2 = get_field(s->fields, v2);
const uint8_t v3 = get_field(s->fields, v3);
const uint8_t es = get_field(s->fields, m4);
int dst_idx, src_idx;
TCGv_i64 tmp;
if (es > ES_64) {
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
tmp = tcg_temp_new_i64();
if (s->fields->op2 == 0x61) {
/* iterate backwards to avoid overwriting data we might need later */
for (dst_idx = NUM_VEC_ELEMENTS(es) - 1; dst_idx >= 0; dst_idx--) {
src_idx = dst_idx / 2;
if (dst_idx % 2 == 0) {
read_vec_element_i64(tmp, v2, src_idx, es);
} else {
read_vec_element_i64(tmp, v3, src_idx, es);
}
write_vec_element_i64(tmp, v1, dst_idx, es);
}
} else {
/* iterate forward to avoid overwriting data we might need later */
for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(es); dst_idx++) {
src_idx = (dst_idx + NUM_VEC_ELEMENTS(es)) / 2;
if (dst_idx % 2 == 0) {
read_vec_element_i64(tmp, v2, src_idx, es);
} else {
read_vec_element_i64(tmp, v3, src_idx, es);
}
write_vec_element_i64(tmp, v1, dst_idx, es);
}
}
tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
static DisasJumpType op_vpk(DisasContext *s, DisasOps *o)
{
const uint8_t v1 = get_field(s->fields, v1);
const uint8_t v2 = get_field(s->fields, v2);
const uint8_t v3 = get_field(s->fields, v3);
const uint8_t es = get_field(s->fields, m4);
static gen_helper_gvec_3 * const vpk[3] = {
gen_helper_gvec_vpk16,
gen_helper_gvec_vpk32,
gen_helper_gvec_vpk64,
};
static gen_helper_gvec_3 * const vpks[3] = {
gen_helper_gvec_vpks16,
gen_helper_gvec_vpks32,
gen_helper_gvec_vpks64,
};
static gen_helper_gvec_3_ptr * const vpks_cc[3] = {
gen_helper_gvec_vpks_cc16,
gen_helper_gvec_vpks_cc32,
gen_helper_gvec_vpks_cc64,
};
static gen_helper_gvec_3 * const vpkls[3] = {
gen_helper_gvec_vpkls16,
gen_helper_gvec_vpkls32,
gen_helper_gvec_vpkls64,
};
static gen_helper_gvec_3_ptr * const vpkls_cc[3] = {
gen_helper_gvec_vpkls_cc16,
gen_helper_gvec_vpkls_cc32,
gen_helper_gvec_vpkls_cc64,
};
if (es == ES_8 || es > ES_64) {
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
switch (s->fields->op2) {
case 0x97:
if (get_field(s->fields, m5) & 0x1) {
gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpks_cc[es - 1]);
set_cc_static(s);
} else {
gen_gvec_3_ool(v1, v2, v3, 0, vpks[es - 1]);
}
break;
case 0x95:
if (get_field(s->fields, m5) & 0x1) {
gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpkls_cc[es - 1]);
set_cc_static(s);
} else {
gen_gvec_3_ool(v1, v2, v3, 0, vpkls[es - 1]);
}
break;
case 0x94:
/* If sources and destination dont't overlap -> fast path */
if (v1 != v2 && v1 != v3) {
const uint8_t src_es = get_field(s->fields, m4);
const uint8_t dst_es = src_es - 1;
TCGv_i64 tmp = tcg_temp_new_i64();
int dst_idx, src_idx;
for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) {
src_idx = dst_idx;
if (src_idx < NUM_VEC_ELEMENTS(src_es)) {
read_vec_element_i64(tmp, v2, src_idx, src_es);
} else {
src_idx -= NUM_VEC_ELEMENTS(src_es);
read_vec_element_i64(tmp, v3, src_idx, src_es);
}
write_vec_element_i64(tmp, v1, dst_idx, dst_es);
}
tcg_temp_free_i64(tmp);
} else {
gen_gvec_3_ool(v1, v2, v3, 0, vpk[es - 1]);
}
break;
default:
g_assert_not_reached();
}
return DISAS_NEXT;
}
static DisasJumpType op_vperm(DisasContext *s, DisasOps *o)
{
gen_gvec_4_ool(get_field(s->fields, v1), get_field(s->fields, v2),
get_field(s->fields, v3), get_field(s->fields, v4),
0, gen_helper_gvec_vperm);
return DISAS_NEXT;
}
static DisasJumpType op_vpdi(DisasContext *s, DisasOps *o)
{
const uint8_t i2 = extract32(get_field(s->fields, m4), 2, 1);
const uint8_t i3 = extract32(get_field(s->fields, m4), 0, 1);
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
read_vec_element_i64(t0, get_field(s->fields, v2), i2, ES_64);
read_vec_element_i64(t1, get_field(s->fields, v3), i3, ES_64);
write_vec_element_i64(t0, get_field(s->fields, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s->fields, v1), 1, ES_64);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
return DISAS_NEXT;
}
static DisasJumpType op_vrep(DisasContext *s, DisasOps *o)
{
const uint8_t enr = get_field(s->fields, i2);
const uint8_t es = get_field(s->fields, m4);
if (es > ES_64 || !valid_vec_element(enr, es)) {
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
tcg_gen_gvec_dup_mem(es, vec_full_reg_offset(get_field(s->fields, v1)),
vec_reg_offset(get_field(s->fields, v3), enr, es),
16, 16);
return DISAS_NEXT;
}
static DisasJumpType op_vrepi(DisasContext *s, DisasOps *o)
{
const int64_t data = (int16_t)get_field(s->fields, i2);
const uint8_t es = get_field(s->fields, m3);
if (es > ES_64) {
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
gen_gvec_dupi(es, get_field(s->fields, v1), data);
return DISAS_NEXT;
}
static DisasJumpType op_vsce(DisasContext *s, DisasOps *o)
{
const uint8_t es = s->insn->data;
const uint8_t enr = get_field(s->fields, m3);
TCGv_i64 tmp;
if (!valid_vec_element(enr, es)) {
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
tmp = tcg_temp_new_i64();
read_vec_element_i64(tmp, get_field(s->fields, v2), enr, es);
tcg_gen_add_i64(o->addr1, o->addr1, tmp);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0);
read_vec_element_i64(tmp, get_field(s->fields, v1), enr, es);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
static void gen_sel_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c)
{
TCGv_i64 t = tcg_temp_new_i64();
/* bit in c not set -> copy bit from b */
tcg_gen_andc_i64(t, b, c);
/* bit in c set -> copy bit from a */
tcg_gen_and_i64(d, a, c);
/* merge the results */
tcg_gen_or_i64(d, d, t);
tcg_temp_free_i64(t);
}
static void gen_sel_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b,
TCGv_vec c)
{
TCGv_vec t = tcg_temp_new_vec_matching(d);
tcg_gen_andc_vec(vece, t, b, c);
tcg_gen_and_vec(vece, d, a, c);
tcg_gen_or_vec(vece, d, d, t);
tcg_temp_free_vec(t);
}
static DisasJumpType op_vsel(DisasContext *s, DisasOps *o)
{
static const GVecGen4 gvec_op = {
.fni8 = gen_sel_i64,
.fniv = gen_sel_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
};
gen_gvec_4(get_field(s->fields, v1), get_field(s->fields, v2),
get_field(s->fields, v3), get_field(s->fields, v4), &gvec_op);
return DISAS_NEXT;
}
static DisasJumpType op_vseg(DisasContext *s, DisasOps *o)
{
const uint8_t es = get_field(s->fields, m3);
int idx1, idx2;
TCGv_i64 tmp;
switch (es) {
case ES_8:
idx1 = 7;
idx2 = 15;
break;
case ES_16:
idx1 = 3;
idx2 = 7;
break;
case ES_32:
idx1 = 1;
idx2 = 3;
break;
default:
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
tmp = tcg_temp_new_i64();
read_vec_element_i64(tmp, get_field(s->fields, v2), idx1, es | MO_SIGN);
write_vec_element_i64(tmp, get_field(s->fields, v1), 0, ES_64);
read_vec_element_i64(tmp, get_field(s->fields, v2), idx2, es | MO_SIGN);
write_vec_element_i64(tmp, get_field(s->fields, v1), 1, ES_64);
tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
static DisasJumpType op_vst(DisasContext *s, DisasOps *o)
{
TCGv_i64 tmp = tcg_const_i64(16);
/* Probe write access before actually modifying memory */
gen_helper_probe_write_access(cpu_env, o->addr1, tmp);
read_vec_element_i64(tmp, get_field(s->fields, v1), 0, ES_64);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
read_vec_element_i64(tmp, get_field(s->fields, v1), 1, ES_64);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
static DisasJumpType op_vste(DisasContext *s, DisasOps *o)
{
const uint8_t es = s->insn->data;
const uint8_t enr = get_field(s->fields, m3);
TCGv_i64 tmp;
if (!valid_vec_element(enr, es)) {
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
tmp = tcg_temp_new_i64();
read_vec_element_i64(tmp, get_field(s->fields, v1), enr, es);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
static DisasJumpType op_vstm(DisasContext *s, DisasOps *o)
{
const uint8_t v3 = get_field(s->fields, v3);
uint8_t v1 = get_field(s->fields, v1);
TCGv_i64 tmp;
while (v3 < v1 || (v3 - v1 + 1) > 16) {
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
/* Probe write access before actually modifying memory */
tmp = tcg_const_i64((v3 - v1 + 1) * 16);
gen_helper_probe_write_access(cpu_env, o->addr1, tmp);
for (;; v1++) {
read_vec_element_i64(tmp, v1, 0, ES_64);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
read_vec_element_i64(tmp, v1, 1, ES_64);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
if (v1 == v3) {
break;
}
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
}
tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
static DisasJumpType op_vstl(DisasContext *s, DisasOps *o)
{
const int v1_offs = vec_full_reg_offset(get_field(s->fields, v1));
TCGv_ptr a0 = tcg_temp_new_ptr();
/* convert highest index into an actual length */
tcg_gen_addi_i64(o->in2, o->in2, 1);
tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
gen_helper_vstl(cpu_env, a0, o->addr1, o->in2);
tcg_temp_free_ptr(a0);
return DISAS_NEXT;
}
static DisasJumpType op_vup(DisasContext *s, DisasOps *o)
{
const bool logical = s->fields->op2 == 0xd4 || s->fields->op2 == 0xd5;
const uint8_t v1 = get_field(s->fields, v1);
const uint8_t v2 = get_field(s->fields, v2);
const uint8_t src_es = get_field(s->fields, m3);
const uint8_t dst_es = src_es + 1;
int dst_idx, src_idx;
TCGv_i64 tmp;
if (src_es > ES_32) {
gen_program_exception(s, PGM_SPECIFICATION);
return DISAS_NORETURN;
}
tmp = tcg_temp_new_i64();
if (s->fields->op2 == 0xd7 || s->fields->op2 == 0xd5) {
/* iterate backwards to avoid overwriting data we might need later */
for (dst_idx = NUM_VEC_ELEMENTS(dst_es) - 1; dst_idx >= 0; dst_idx--) {
src_idx = dst_idx;
read_vec_element_i64(tmp, v2, src_idx,
src_es | (logical ? 0 : MO_SIGN));
write_vec_element_i64(tmp, v1, dst_idx, dst_es);
}
} else {
/* iterate forward to avoid overwriting data we might need later */
for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) {
src_idx = dst_idx + NUM_VEC_ELEMENTS(src_es) / 2;
read_vec_element_i64(tmp, v2, src_idx,
src_es | (logical ? 0 : MO_SIGN));
write_vec_element_i64(tmp, v1, dst_idx, dst_es);
}
}
tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
|
pmp-tool/PMP
|
src/qemu/src-pmp/hw/scsi/esp.c
|
/*
* QEMU ESP/NCR53C9x emulation
*
* Copyright (c) 2005-2006 <NAME>
* Copyright (c) 2012 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
#include "hw/sysbus.h"
#include "hw/scsi/esp.h"
#include "trace.h"
#include "qemu/log.h"
/*
* On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
* also produced as NCR89C100. See
* http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
* and
* http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
*/
static void esp_raise_irq(ESPState *s)
{
if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
s->rregs[ESP_RSTAT] |= STAT_INT;
qemu_irq_raise(s->irq);
trace_esp_raise_irq();
}
}
static void esp_lower_irq(ESPState *s)
{
if (s->rregs[ESP_RSTAT] & STAT_INT) {
s->rregs[ESP_RSTAT] &= ~STAT_INT;
qemu_irq_lower(s->irq);
trace_esp_lower_irq();
}
}
void esp_dma_enable(ESPState *s, int irq, int level)
{
if (level) {
s->dma_enabled = 1;
trace_esp_dma_enable();
if (s->dma_cb) {
s->dma_cb(s);
s->dma_cb = NULL;
}
} else {
trace_esp_dma_disable();
s->dma_enabled = 0;
}
}
void esp_request_cancelled(SCSIRequest *req)
{
ESPState *s = req->hba_private;
if (req == s->current_req) {
scsi_req_unref(s->current_req);
s->current_req = NULL;
s->current_dev = NULL;
}
}
static uint32_t get_cmd(ESPState *s, uint8_t *buf, uint8_t buflen)
{
uint32_t dmalen;
int target;
target = s->wregs[ESP_WBUSID] & BUSID_DID;
if (s->dma) {
dmalen = s->rregs[ESP_TCLO];
dmalen |= s->rregs[ESP_TCMID] << 8;
dmalen |= s->rregs[ESP_TCHI] << 16;
if (dmalen > buflen) {
return 0;
}
s->dma_memory_read(s->dma_opaque, buf, dmalen);
} else {
dmalen = s->ti_size;
if (dmalen > TI_BUFSZ) {
return 0;
}
memcpy(buf, s->ti_buf, dmalen);
buf[0] = buf[2] >> 5;
}
trace_esp_get_cmd(dmalen, target);
s->ti_size = 0;
s->ti_rptr = 0;
s->ti_wptr = 0;
if (s->current_req) {
/* Started a new command before the old one finished. Cancel it. */
scsi_req_cancel(s->current_req);
s->async_len = 0;
}
s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
if (!s->current_dev) {
// No such drive
s->rregs[ESP_RSTAT] = 0;
s->rregs[ESP_RINTR] = INTR_DC;
s->rregs[ESP_RSEQ] = SEQ_0;
esp_raise_irq(s);
return 0;
}
return dmalen;
}
static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
{
int32_t datalen;
int lun;
SCSIDevice *current_lun;
trace_esp_do_busid_cmd(busid);
lun = busid & 7;
current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
datalen = scsi_req_enqueue(s->current_req);
s->ti_size = datalen;
if (datalen != 0) {
s->rregs[ESP_RSTAT] = STAT_TC;
s->dma_left = 0;
s->dma_counter = 0;
if (datalen > 0) {
s->rregs[ESP_RSTAT] |= STAT_DI;
} else {
s->rregs[ESP_RSTAT] |= STAT_DO;
}
scsi_req_continue(s->current_req);
}
s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
s->rregs[ESP_RSEQ] = SEQ_CD;
esp_raise_irq(s);
}
static void do_cmd(ESPState *s, uint8_t *buf)
{
uint8_t busid = buf[0];
do_busid_cmd(s, &buf[1], busid);
}
static void handle_satn(ESPState *s)
{
uint8_t buf[32];
int len;
if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_satn;
return;
}
len = get_cmd(s, buf, sizeof(buf));
if (len)
do_cmd(s, buf);
}
static void handle_s_without_atn(ESPState *s)
{
uint8_t buf[32];
int len;
if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_s_without_atn;
return;
}
len = get_cmd(s, buf, sizeof(buf));
if (len) {
do_busid_cmd(s, buf, 0);
}
}
static void handle_satn_stop(ESPState *s)
{
if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_satn_stop;
return;
}
s->cmdlen = get_cmd(s, s->cmdbuf, sizeof(s->cmdbuf));
if (s->cmdlen) {
trace_esp_handle_satn_stop(s->cmdlen);
s->do_cmd = 1;
s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
s->rregs[ESP_RSEQ] = SEQ_CD;
esp_raise_irq(s);
}
}
static void write_response(ESPState *s)
{
trace_esp_write_response(s->status);
s->ti_buf[0] = s->status;
s->ti_buf[1] = 0;
if (s->dma) {
s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
s->rregs[ESP_RSEQ] = SEQ_CD;
} else {
s->ti_size = 2;
s->ti_rptr = 0;
s->ti_wptr = 2;
s->rregs[ESP_RFLAGS] = 2;
}
esp_raise_irq(s);
}
static void esp_dma_done(ESPState *s)
{
s->rregs[ESP_RSTAT] |= STAT_TC;
s->rregs[ESP_RINTR] = INTR_BS;
s->rregs[ESP_RSEQ] = 0;
s->rregs[ESP_RFLAGS] = 0;
s->rregs[ESP_TCLO] = 0;
s->rregs[ESP_TCMID] = 0;
s->rregs[ESP_TCHI] = 0;
esp_raise_irq(s);
}
static void esp_do_dma(ESPState *s)
{
uint32_t len;
int to_device;
len = s->dma_left;
if (s->do_cmd) {
trace_esp_do_dma(s->cmdlen, len);
assert (s->cmdlen <= sizeof(s->cmdbuf) &&
len <= sizeof(s->cmdbuf) - s->cmdlen);
s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
return;
}
if (s->async_len == 0) {
/* Defer until data is available. */
return;
}
if (len > s->async_len) {
len = s->async_len;
}
to_device = (s->ti_size < 0);
if (to_device) {
s->dma_memory_read(s->dma_opaque, s->async_buf, len);
} else {
s->dma_memory_write(s->dma_opaque, s->async_buf, len);
}
s->dma_left -= len;
s->async_buf += len;
s->async_len -= len;
if (to_device)
s->ti_size += len;
else
s->ti_size -= len;
if (s->async_len == 0) {
scsi_req_continue(s->current_req);
/* If there is still data to be read from the device then
complete the DMA operation immediately. Otherwise defer
until the scsi layer has completed. */
if (to_device || s->dma_left != 0 || s->ti_size == 0) {
return;
}
}
/* Partially filled a scsi buffer. Complete immediately. */
esp_dma_done(s);
}
static void esp_report_command_complete(ESPState *s, uint32_t status)
{
trace_esp_command_complete();
if (s->ti_size != 0) {
trace_esp_command_complete_unexpected();
}
s->ti_size = 0;
s->dma_left = 0;
s->async_len = 0;
if (status) {
trace_esp_command_complete_fail();
}
s->status = status;
s->rregs[ESP_RSTAT] = STAT_ST;
esp_dma_done(s);
if (s->current_req) {
scsi_req_unref(s->current_req);
s->current_req = NULL;
s->current_dev = NULL;
}
}
void esp_command_complete(SCSIRequest *req, uint32_t status,
size_t resid)
{
ESPState *s = req->hba_private;
if (s->rregs[ESP_RSTAT] & STAT_INT) {
/* Defer handling command complete until the previous
* interrupt has been handled.
*/
trace_esp_command_complete_deferred();
s->deferred_status = status;
s->deferred_complete = true;
return;
}
esp_report_command_complete(s, status);
}
void esp_transfer_data(SCSIRequest *req, uint32_t len)
{
ESPState *s = req->hba_private;
assert(!s->do_cmd);
trace_esp_transfer_data(s->dma_left, s->ti_size);
s->async_len = len;
s->async_buf = scsi_req_get_buf(req);
if (s->dma_left) {
esp_do_dma(s);
} else if (s->dma_counter != 0 && s->ti_size <= 0) {
/* If this was the last part of a DMA transfer then the
completion interrupt is deferred to here. */
esp_dma_done(s);
}
}
static void handle_ti(ESPState *s)
{
uint32_t dmalen, minlen;
if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_ti;
return;
}
dmalen = s->rregs[ESP_TCLO];
dmalen |= s->rregs[ESP_TCMID] << 8;
dmalen |= s->rregs[ESP_TCHI] << 16;
if (dmalen==0) {
dmalen=0x10000;
}
s->dma_counter = dmalen;
if (s->do_cmd)
minlen = (dmalen < ESP_CMDBUF_SZ) ? dmalen : ESP_CMDBUF_SZ;
else if (s->ti_size < 0)
minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
else
minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
trace_esp_handle_ti(minlen);
if (s->dma) {
s->dma_left = minlen;
s->rregs[ESP_RSTAT] &= ~STAT_TC;
esp_do_dma(s);
}
if (s->do_cmd) {
trace_esp_handle_ti_cmd(s->cmdlen);
s->ti_size = 0;
s->cmdlen = 0;
s->do_cmd = 0;
do_cmd(s, s->cmdbuf);
}
}
void esp_hard_reset(ESPState *s)
{
memset(s->rregs, 0, ESP_REGS);
memset(s->wregs, 0, ESP_REGS);
s->tchi_written = 0;
s->ti_size = 0;
s->ti_rptr = 0;
s->ti_wptr = 0;
s->dma = 0;
s->do_cmd = 0;
s->dma_cb = NULL;
s->rregs[ESP_CFG1] = 7;
}
static void esp_soft_reset(ESPState *s)
{
qemu_irq_lower(s->irq);
esp_hard_reset(s);
}
static void parent_esp_reset(ESPState *s, int irq, int level)
{
if (level) {
esp_soft_reset(s);
}
}
uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
{
uint32_t old_val;
trace_esp_mem_readb(saddr, s->rregs[saddr]);
switch (saddr) {
case ESP_FIFO:
if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
/* Data out. */
qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
s->rregs[ESP_FIFO] = 0;
} else if (s->ti_rptr < s->ti_wptr) {
s->ti_size--;
s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
}
if (s->ti_rptr == s->ti_wptr) {
s->ti_rptr = 0;
s->ti_wptr = 0;
}
break;
case ESP_RINTR:
/* Clear sequence step, interrupt register and all status bits
except TC */
old_val = s->rregs[ESP_RINTR];
s->rregs[ESP_RINTR] = 0;
s->rregs[ESP_RSTAT] &= ~STAT_TC;
s->rregs[ESP_RSEQ] = SEQ_CD;
esp_lower_irq(s);
if (s->deferred_complete) {
esp_report_command_complete(s, s->deferred_status);
s->deferred_complete = false;
}
return old_val;
case ESP_TCHI:
/* Return the unique id if the value has never been written */
if (!s->tchi_written) {
return s->chip_id;
}
default:
break;
}
return s->rregs[saddr];
}
void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
{
trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
switch (saddr) {
case ESP_TCHI:
s->tchi_written = true;
/* fall through */
case ESP_TCLO:
case ESP_TCMID:
s->rregs[ESP_RSTAT] &= ~STAT_TC;
break;
case ESP_FIFO:
if (s->do_cmd) {
if (s->cmdlen < ESP_CMDBUF_SZ) {
s->cmdbuf[s->cmdlen++] = val & 0xff;
} else {
trace_esp_error_fifo_overrun();
}
} else if (s->ti_wptr == TI_BUFSZ - 1) {
trace_esp_error_fifo_overrun();
} else {
s->ti_size++;
s->ti_buf[s->ti_wptr++] = val & 0xff;
}
break;
case ESP_CMD:
s->rregs[saddr] = val;
if (val & CMD_DMA) {
s->dma = 1;
/* Reload DMA counter. */
s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
s->rregs[ESP_TCHI] = s->wregs[ESP_TCHI];
} else {
s->dma = 0;
}
switch(val & CMD_CMD) {
case CMD_NOP:
trace_esp_mem_writeb_cmd_nop(val);
break;
case CMD_FLUSH:
trace_esp_mem_writeb_cmd_flush(val);
//s->ti_size = 0;
s->rregs[ESP_RINTR] = INTR_FC;
s->rregs[ESP_RSEQ] = 0;
s->rregs[ESP_RFLAGS] = 0;
break;
case CMD_RESET:
trace_esp_mem_writeb_cmd_reset(val);
esp_soft_reset(s);
break;
case CMD_BUSRESET:
trace_esp_mem_writeb_cmd_bus_reset(val);
s->rregs[ESP_RINTR] = INTR_RST;
if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
esp_raise_irq(s);
}
break;
case CMD_TI:
handle_ti(s);
break;
case CMD_ICCS:
trace_esp_mem_writeb_cmd_iccs(val);
write_response(s);
s->rregs[ESP_RINTR] = INTR_FC;
s->rregs[ESP_RSTAT] |= STAT_MI;
break;
case CMD_MSGACC:
trace_esp_mem_writeb_cmd_msgacc(val);
s->rregs[ESP_RINTR] = INTR_DC;
s->rregs[ESP_RSEQ] = 0;
s->rregs[ESP_RFLAGS] = 0;
esp_raise_irq(s);
break;
case CMD_PAD:
trace_esp_mem_writeb_cmd_pad(val);
s->rregs[ESP_RSTAT] = STAT_TC;
s->rregs[ESP_RINTR] = INTR_FC;
s->rregs[ESP_RSEQ] = 0;
break;
case CMD_SATN:
trace_esp_mem_writeb_cmd_satn(val);
break;
case CMD_RSTATN:
trace_esp_mem_writeb_cmd_rstatn(val);
break;
case CMD_SEL:
trace_esp_mem_writeb_cmd_sel(val);
handle_s_without_atn(s);
break;
case CMD_SELATN:
trace_esp_mem_writeb_cmd_selatn(val);
handle_satn(s);
break;
case CMD_SELATNS:
trace_esp_mem_writeb_cmd_selatns(val);
handle_satn_stop(s);
break;
case CMD_ENSEL:
trace_esp_mem_writeb_cmd_ensel(val);
s->rregs[ESP_RINTR] = 0;
break;
case CMD_DISSEL:
trace_esp_mem_writeb_cmd_dissel(val);
s->rregs[ESP_RINTR] = 0;
esp_raise_irq(s);
break;
default:
trace_esp_error_unhandled_command(val);
break;
}
break;
case ESP_WBUSID ... ESP_WSYNO:
break;
case ESP_CFG1:
case ESP_CFG2: case ESP_CFG3:
case ESP_RES3: case ESP_RES4:
s->rregs[saddr] = val;
break;
case ESP_WCCF ... ESP_WTEST:
break;
default:
trace_esp_error_invalid_write(val, saddr);
return;
}
s->wregs[saddr] = val;
}
static bool esp_mem_accepts(void *opaque, hwaddr addr,
unsigned size, bool is_write,
MemTxAttrs attrs)
{
return (size == 1) || (is_write && size == 4);
}
const VMStateDescription vmstate_esp = {
.name ="esp",
.version_id = 4,
.minimum_version_id = 3,
.fields = (VMStateField[]) {
VMSTATE_BUFFER(rregs, ESPState),
VMSTATE_BUFFER(wregs, ESPState),
VMSTATE_INT32(ti_size, ESPState),
VMSTATE_UINT32(ti_rptr, ESPState),
VMSTATE_UINT32(ti_wptr, ESPState),
VMSTATE_BUFFER(ti_buf, ESPState),
VMSTATE_UINT32(status, ESPState),
VMSTATE_UINT32(deferred_status, ESPState),
VMSTATE_BOOL(deferred_complete, ESPState),
VMSTATE_UINT32(dma, ESPState),
VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16),
VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4),
VMSTATE_UINT32(cmdlen, ESPState),
VMSTATE_UINT32(do_cmd, ESPState),
VMSTATE_UINT32(dma_left, ESPState),
VMSTATE_END_OF_LIST()
}
};
static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
uint64_t val, unsigned int size)
{
SysBusESPState *sysbus = opaque;
uint32_t saddr;
saddr = addr >> sysbus->it_shift;
esp_reg_write(&sysbus->esp, saddr, val);
}
static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
unsigned int size)
{
SysBusESPState *sysbus = opaque;
uint32_t saddr;
saddr = addr >> sysbus->it_shift;
return esp_reg_read(&sysbus->esp, saddr);
}
static const MemoryRegionOps sysbus_esp_mem_ops = {
.read = sysbus_esp_mem_read,
.write = sysbus_esp_mem_write,
.endianness = DEVICE_NATIVE_ENDIAN,
.valid.accepts = esp_mem_accepts,
};
static const struct SCSIBusInfo esp_scsi_info = {
.tcq = false,
.max_target = ESP_MAX_DEVS,
.max_lun = 7,
.transfer_data = esp_transfer_data,
.complete = esp_command_complete,
.cancel = esp_request_cancelled
};
static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
{
SysBusESPState *sysbus = ESP_STATE(opaque);
ESPState *s = &sysbus->esp;
switch (irq) {
case 0:
parent_esp_reset(s, irq, level);
break;
case 1:
esp_dma_enable(opaque, irq, level);
break;
}
}
static void sysbus_esp_realize(DeviceState *dev, Error **errp)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
SysBusESPState *sysbus = ESP_STATE(dev);
ESPState *s = &sysbus->esp;
sysbus_init_irq(sbd, &s->irq);
assert(sysbus->it_shift != -1);
s->chip_id = TCHI_FAS100A;
memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
sysbus, "esp", ESP_REGS << sysbus->it_shift);
sysbus_init_mmio(sbd, &sysbus->iomem);
qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
}
static void sysbus_esp_hard_reset(DeviceState *dev)
{
SysBusESPState *sysbus = ESP_STATE(dev);
esp_hard_reset(&sysbus->esp);
}
static const VMStateDescription vmstate_sysbus_esp_scsi = {
.name = "sysbusespscsi",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
VMSTATE_END_OF_LIST()
}
};
static void sysbus_esp_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = sysbus_esp_realize;
dc->reset = sysbus_esp_hard_reset;
dc->vmsd = &vmstate_sysbus_esp_scsi;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
static const TypeInfo sysbus_esp_info = {
.name = TYPE_ESP,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(SysBusESPState),
.class_init = sysbus_esp_class_init,
};
static void esp_register_types(void)
{
type_register_static(&sysbus_esp_info);
}
type_init(esp_register_types)
|
pmp-tool/PMP
|
src/qemu/src-pmp/hw/ppc/pnv_psi.c
|
<reponame>pmp-tool/PMP
/*
* QEMU PowerPC PowerNV Processor Service Interface (PSI) model
*
* Copyright (c) 2015-2017, IBM Corporation.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
#include "target/ppc/cpu.h"
#include "qemu/log.h"
#include "qapi/error.h"
#include "monitor/monitor.h"
#include "exec/address-spaces.h"
#include "hw/ppc/fdt.h"
#include "hw/ppc/pnv.h"
#include "hw/ppc/pnv_xscom.h"
#include "hw/ppc/pnv_psi.h"
#include <libfdt.h>
#define PSIHB_XSCOM_FIR_RW 0x00
#define PSIHB_XSCOM_FIR_AND 0x01
#define PSIHB_XSCOM_FIR_OR 0x02
#define PSIHB_XSCOM_FIRMASK_RW 0x03
#define PSIHB_XSCOM_FIRMASK_AND 0x04
#define PSIHB_XSCOM_FIRMASK_OR 0x05
#define PSIHB_XSCOM_FIRACT0 0x06
#define PSIHB_XSCOM_FIRACT1 0x07
/* Host Bridge Base Address Register */
#define PSIHB_XSCOM_BAR 0x0a
#define PSIHB_BAR_EN 0x0000000000000001ull
/* FSP Base Address Register */
#define PSIHB_XSCOM_FSPBAR 0x0b
/* PSI Host Bridge Control/Status Register */
#define PSIHB_XSCOM_CR 0x0e
#define PSIHB_CR_FSP_CMD_ENABLE 0x8000000000000000ull
#define PSIHB_CR_FSP_MMIO_ENABLE 0x4000000000000000ull
#define PSIHB_CR_FSP_IRQ_ENABLE 0x1000000000000000ull
#define PSIHB_CR_FSP_ERR_RSP_ENABLE 0x0800000000000000ull
#define PSIHB_CR_PSI_LINK_ENABLE 0x0400000000000000ull
#define PSIHB_CR_FSP_RESET 0x0200000000000000ull
#define PSIHB_CR_PSIHB_RESET 0x0100000000000000ull
#define PSIHB_CR_PSI_IRQ 0x0000800000000000ull
#define PSIHB_CR_FSP_IRQ 0x0000400000000000ull
#define PSIHB_CR_FSP_LINK_ACTIVE 0x0000200000000000ull
#define PSIHB_CR_IRQ_CMD_EXPECT 0x0000010000000000ull
/* and more ... */
/* PSIHB Status / Error Mask Register */
#define PSIHB_XSCOM_SEMR 0x0f
/* XIVR, to signal interrupts to the CEC firmware. more XIVR below. */
#define PSIHB_XSCOM_XIVR_FSP 0x10
#define PSIHB_XIVR_SERVER_SH 40
#define PSIHB_XIVR_SERVER_MSK (0xffffull << PSIHB_XIVR_SERVER_SH)
#define PSIHB_XIVR_PRIO_SH 32
#define PSIHB_XIVR_PRIO_MSK (0xffull << PSIHB_XIVR_PRIO_SH)
#define PSIHB_XIVR_SRC_SH 29
#define PSIHB_XIVR_SRC_MSK (0x7ull << PSIHB_XIVR_SRC_SH)
#define PSIHB_XIVR_PENDING 0x01000000ull
/* PSI Host Bridge Set Control/ Status Register */
#define PSIHB_XSCOM_SCR 0x12
/* PSI Host Bridge Clear Control/ Status Register */
#define PSIHB_XSCOM_CCR 0x13
/* DMA Upper Address Register */
#define PSIHB_XSCOM_DMA_UPADD 0x14
/* Interrupt Status */
#define PSIHB_XSCOM_IRQ_STAT 0x15
#define PSIHB_IRQ_STAT_OCC 0x0000001000000000ull
#define PSIHB_IRQ_STAT_FSI 0x0000000800000000ull
#define PSIHB_IRQ_STAT_LPCI2C 0x0000000400000000ull
#define PSIHB_IRQ_STAT_LOCERR 0x0000000200000000ull
#define PSIHB_IRQ_STAT_EXT 0x0000000100000000ull
/* remaining XIVR */
#define PSIHB_XSCOM_XIVR_OCC 0x16
#define PSIHB_XSCOM_XIVR_FSI 0x17
#define PSIHB_XSCOM_XIVR_LPCI2C 0x18
#define PSIHB_XSCOM_XIVR_LOCERR 0x19
#define PSIHB_XSCOM_XIVR_EXT 0x1a
/* Interrupt Requester Source Compare Register */
#define PSIHB_XSCOM_IRSN 0x1b
#define PSIHB_IRSN_COMP_SH 45
#define PSIHB_IRSN_COMP_MSK (0x7ffffull << PSIHB_IRSN_COMP_SH)
#define PSIHB_IRSN_IRQ_MUX 0x0000000800000000ull
#define PSIHB_IRSN_IRQ_RESET 0x0000000400000000ull
#define PSIHB_IRSN_DOWNSTREAM_EN 0x0000000200000000ull
#define PSIHB_IRSN_UPSTREAM_EN 0x0000000100000000ull
#define PSIHB_IRSN_COMPMASK_SH 13
#define PSIHB_IRSN_COMPMASK_MSK (0x7ffffull << PSIHB_IRSN_COMPMASK_SH)
#define PSIHB_BAR_MASK 0x0003fffffff00000ull
#define PSIHB_FSPBAR_MASK 0x0003ffff00000000ull
#define PSIHB9_BAR_MASK 0x00fffffffff00000ull
#define PSIHB9_FSPBAR_MASK 0x00ffffff00000000ull
#define PSIHB_REG(addr) (((addr) >> 3) + PSIHB_XSCOM_BAR)
static void pnv_psi_set_bar(PnvPsi *psi, uint64_t bar)
{
PnvPsiClass *ppc = PNV_PSI_GET_CLASS(psi);
MemoryRegion *sysmem = get_system_memory();
uint64_t old = psi->regs[PSIHB_XSCOM_BAR];
psi->regs[PSIHB_XSCOM_BAR] = bar & (ppc->bar_mask | PSIHB_BAR_EN);
/* Update MR, always remove it first */
if (old & PSIHB_BAR_EN) {
memory_region_del_subregion(sysmem, &psi->regs_mr);
}
/* Then add it back if needed */
if (bar & PSIHB_BAR_EN) {
uint64_t addr = bar & ppc->bar_mask;
memory_region_add_subregion(sysmem, addr, &psi->regs_mr);
}
}
static void pnv_psi_update_fsp_mr(PnvPsi *psi)
{
/* TODO: Update FSP MR if/when we support FSP BAR */
}
static void pnv_psi_set_cr(PnvPsi *psi, uint64_t cr)
{
uint64_t old = psi->regs[PSIHB_XSCOM_CR];
psi->regs[PSIHB_XSCOM_CR] = cr;
/* Check some bit changes */
if ((old ^ psi->regs[PSIHB_XSCOM_CR]) & PSIHB_CR_FSP_MMIO_ENABLE) {
pnv_psi_update_fsp_mr(psi);
}
}
static void pnv_psi_set_irsn(PnvPsi *psi, uint64_t val)
{
ICSState *ics = &PNV8_PSI(psi)->ics;
/* In this model we ignore the up/down enable bits for now
* as SW doesn't use them (other than setting them at boot).
* We ignore IRQ_MUX, its meaning isn't clear and we don't use
* it and finally we ignore reset (XXX fix that ?)
*/
psi->regs[PSIHB_XSCOM_IRSN] = val & (PSIHB_IRSN_COMP_MSK |
PSIHB_IRSN_IRQ_MUX |
PSIHB_IRSN_IRQ_RESET |
PSIHB_IRSN_DOWNSTREAM_EN |
PSIHB_IRSN_UPSTREAM_EN);
/* We ignore the compare mask as well, our ICS emulation is too
* simplistic to make any use if it, and we extract the offset
* from the compare value
*/
ics->offset = (val & PSIHB_IRSN_COMP_MSK) >> PSIHB_IRSN_COMP_SH;
}
/*
* FSP and PSI interrupts are muxed under the same number.
*/
static const uint32_t xivr_regs[] = {
[PSIHB_IRQ_PSI] = PSIHB_XSCOM_XIVR_FSP,
[PSIHB_IRQ_FSP] = PSIHB_XSCOM_XIVR_FSP,
[PSIHB_IRQ_OCC] = PSIHB_XSCOM_XIVR_OCC,
[PSIHB_IRQ_FSI] = PSIHB_XSCOM_XIVR_FSI,
[PSIHB_IRQ_LPC_I2C] = PSIHB_XSCOM_XIVR_LPCI2C,
[PSIHB_IRQ_LOCAL_ERR] = PSIHB_XSCOM_XIVR_LOCERR,
[PSIHB_IRQ_EXTERNAL] = PSIHB_XSCOM_XIVR_EXT,
};
static const uint32_t stat_regs[] = {
[PSIHB_IRQ_PSI] = PSIHB_XSCOM_CR,
[PSIHB_IRQ_FSP] = PSIHB_XSCOM_CR,
[PSIHB_IRQ_OCC] = PSIHB_XSCOM_IRQ_STAT,
[PSIHB_IRQ_FSI] = PSIHB_XSCOM_IRQ_STAT,
[PSIHB_IRQ_LPC_I2C] = PSIHB_XSCOM_IRQ_STAT,
[PSIHB_IRQ_LOCAL_ERR] = PSIHB_XSCOM_IRQ_STAT,
[PSIHB_IRQ_EXTERNAL] = PSIHB_XSCOM_IRQ_STAT,
};
static const uint64_t stat_bits[] = {
[PSIHB_IRQ_PSI] = PSIHB_CR_PSI_IRQ,
[PSIHB_IRQ_FSP] = PSIHB_CR_FSP_IRQ,
[PSIHB_IRQ_OCC] = PSIHB_IRQ_STAT_OCC,
[PSIHB_IRQ_FSI] = PSIHB_IRQ_STAT_FSI,
[PSIHB_IRQ_LPC_I2C] = PSIHB_IRQ_STAT_LPCI2C,
[PSIHB_IRQ_LOCAL_ERR] = PSIHB_IRQ_STAT_LOCERR,
[PSIHB_IRQ_EXTERNAL] = PSIHB_IRQ_STAT_EXT,
};
void pnv_psi_irq_set(PnvPsi *psi, int irq, bool state)
{
PNV_PSI_GET_CLASS(psi)->irq_set(psi, irq, state);
}
static void pnv_psi_power8_irq_set(PnvPsi *psi, int irq, bool state)
{
uint32_t xivr_reg;
uint32_t stat_reg;
uint32_t src;
bool masked;
if (irq > PSIHB_IRQ_EXTERNAL) {
qemu_log_mask(LOG_GUEST_ERROR, "PSI: Unsupported irq %d\n", irq);
return;
}
xivr_reg = xivr_regs[irq];
stat_reg = stat_regs[irq];
src = (psi->regs[xivr_reg] & PSIHB_XIVR_SRC_MSK) >> PSIHB_XIVR_SRC_SH;
if (state) {
psi->regs[stat_reg] |= stat_bits[irq];
/* TODO: optimization, check mask here. That means
* re-evaluating when unmasking
*/
qemu_irq_raise(psi->qirqs[src]);
} else {
psi->regs[stat_reg] &= ~stat_bits[irq];
/* FSP and PSI are muxed so don't lower if either is still set */
if (stat_reg != PSIHB_XSCOM_CR ||
!(psi->regs[stat_reg] & (PSIHB_CR_PSI_IRQ | PSIHB_CR_FSP_IRQ))) {
qemu_irq_lower(psi->qirqs[src]);
} else {
state = true;
}
}
/* Note about the emulation of the pending bit: This isn't
* entirely correct. The pending bit should be cleared when the
* EOI has been received. However, we don't have callbacks on EOI
* (especially not under KVM) so no way to emulate that properly,
* so instead we just set that bit as the logical "output" of the
* XIVR (ie pending & !masked)
*
* CLG: We could define a new ICS object with a custom eoi()
* handler to clear the pending bit. But I am not sure this would
* be useful for the software anyhow.
*/
masked = (psi->regs[xivr_reg] & PSIHB_XIVR_PRIO_MSK) == PSIHB_XIVR_PRIO_MSK;
if (state && !masked) {
psi->regs[xivr_reg] |= PSIHB_XIVR_PENDING;
} else {
psi->regs[xivr_reg] &= ~PSIHB_XIVR_PENDING;
}
}
static void pnv_psi_set_xivr(PnvPsi *psi, uint32_t reg, uint64_t val)
{
ICSState *ics = &PNV8_PSI(psi)->ics;
uint16_t server;
uint8_t prio;
uint8_t src;
psi->regs[reg] = (psi->regs[reg] & PSIHB_XIVR_PENDING) |
(val & (PSIHB_XIVR_SERVER_MSK |
PSIHB_XIVR_PRIO_MSK |
PSIHB_XIVR_SRC_MSK));
val = psi->regs[reg];
server = (val & PSIHB_XIVR_SERVER_MSK) >> PSIHB_XIVR_SERVER_SH;
prio = (val & PSIHB_XIVR_PRIO_MSK) >> PSIHB_XIVR_PRIO_SH;
src = (val & PSIHB_XIVR_SRC_MSK) >> PSIHB_XIVR_SRC_SH;
if (src >= PSI_NUM_INTERRUPTS) {
qemu_log_mask(LOG_GUEST_ERROR, "PSI: Unsupported irq %d\n", src);
return;
}
/* Remove pending bit if the IRQ is masked */
if ((psi->regs[reg] & PSIHB_XIVR_PRIO_MSK) == PSIHB_XIVR_PRIO_MSK) {
psi->regs[reg] &= ~PSIHB_XIVR_PENDING;
}
/* The low order 2 bits are the link pointer (Type II interrupts).
* Shift back to get a valid IRQ server.
*/
server >>= 2;
/* Now because of source remapping, weird things can happen
* if you change the source number dynamically, our simple ICS
* doesn't deal with remapping. So we just poke a different
* ICS entry based on what source number was written. This will
* do for now but a more accurate implementation would instead
* use a fixed server/prio and a remapper of the generated irq.
*/
ics_simple_write_xive(ics, src, server, prio, prio);
}
static uint64_t pnv_psi_reg_read(PnvPsi *psi, uint32_t offset, bool mmio)
{
uint64_t val = 0xffffffffffffffffull;
switch (offset) {
case PSIHB_XSCOM_FIR_RW:
case PSIHB_XSCOM_FIRACT0:
case PSIHB_XSCOM_FIRACT1:
case PSIHB_XSCOM_BAR:
case PSIHB_XSCOM_FSPBAR:
case PSIHB_XSCOM_CR:
case PSIHB_XSCOM_XIVR_FSP:
case PSIHB_XSCOM_XIVR_OCC:
case PSIHB_XSCOM_XIVR_FSI:
case PSIHB_XSCOM_XIVR_LPCI2C:
case PSIHB_XSCOM_XIVR_LOCERR:
case PSIHB_XSCOM_XIVR_EXT:
case PSIHB_XSCOM_IRQ_STAT:
case PSIHB_XSCOM_SEMR:
case PSIHB_XSCOM_DMA_UPADD:
case PSIHB_XSCOM_IRSN:
val = psi->regs[offset];
break;
default:
qemu_log_mask(LOG_UNIMP, "PSI: read at 0x%" PRIx32 "\n", offset);
}
return val;
}
static void pnv_psi_reg_write(PnvPsi *psi, uint32_t offset, uint64_t val,
bool mmio)
{
switch (offset) {
case PSIHB_XSCOM_FIR_RW:
case PSIHB_XSCOM_FIRACT0:
case PSIHB_XSCOM_FIRACT1:
case PSIHB_XSCOM_SEMR:
case PSIHB_XSCOM_DMA_UPADD:
psi->regs[offset] = val;
break;
case PSIHB_XSCOM_FIR_OR:
psi->regs[PSIHB_XSCOM_FIR_RW] |= val;
break;
case PSIHB_XSCOM_FIR_AND:
psi->regs[PSIHB_XSCOM_FIR_RW] &= val;
break;
case PSIHB_XSCOM_BAR:
/* Only XSCOM can write this one */
if (!mmio) {
pnv_psi_set_bar(psi, val);
} else {
qemu_log_mask(LOG_GUEST_ERROR, "PSI: invalid write of BAR\n");
}
break;
case PSIHB_XSCOM_FSPBAR:
psi->regs[PSIHB_XSCOM_FSPBAR] = val & PSIHB_FSPBAR_MASK;
pnv_psi_update_fsp_mr(psi);
break;
case PSIHB_XSCOM_CR:
pnv_psi_set_cr(psi, val);
break;
case PSIHB_XSCOM_SCR:
pnv_psi_set_cr(psi, psi->regs[PSIHB_XSCOM_CR] | val);
break;
case PSIHB_XSCOM_CCR:
pnv_psi_set_cr(psi, psi->regs[PSIHB_XSCOM_CR] & ~val);
break;
case PSIHB_XSCOM_XIVR_FSP:
case PSIHB_XSCOM_XIVR_OCC:
case PSIHB_XSCOM_XIVR_FSI:
case PSIHB_XSCOM_XIVR_LPCI2C:
case PSIHB_XSCOM_XIVR_LOCERR:
case PSIHB_XSCOM_XIVR_EXT:
pnv_psi_set_xivr(psi, offset, val);
break;
case PSIHB_XSCOM_IRQ_STAT:
/* Read only */
qemu_log_mask(LOG_GUEST_ERROR, "PSI: invalid write of IRQ_STAT\n");
break;
case PSIHB_XSCOM_IRSN:
pnv_psi_set_irsn(psi, val);
break;
default:
qemu_log_mask(LOG_UNIMP, "PSI: write at 0x%" PRIx32 "\n", offset);
}
}
/*
* The values of the registers when accessed through the MMIO region
* follow the relation : xscom = (mmio + 0x50) >> 3
*/
static uint64_t pnv_psi_mmio_read(void *opaque, hwaddr addr, unsigned size)
{
return pnv_psi_reg_read(opaque, PSIHB_REG(addr), true);
}
static void pnv_psi_mmio_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
pnv_psi_reg_write(opaque, PSIHB_REG(addr), val, true);
}
static const MemoryRegionOps psi_mmio_ops = {
.read = pnv_psi_mmio_read,
.write = pnv_psi_mmio_write,
.endianness = DEVICE_BIG_ENDIAN,
.valid = {
.min_access_size = 8,
.max_access_size = 8,
},
.impl = {
.min_access_size = 8,
.max_access_size = 8,
},
};
static uint64_t pnv_psi_xscom_read(void *opaque, hwaddr addr, unsigned size)
{
return pnv_psi_reg_read(opaque, addr >> 3, false);
}
static void pnv_psi_xscom_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
pnv_psi_reg_write(opaque, addr >> 3, val, false);
}
static const MemoryRegionOps pnv_psi_xscom_ops = {
.read = pnv_psi_xscom_read,
.write = pnv_psi_xscom_write,
.endianness = DEVICE_BIG_ENDIAN,
.valid = {
.min_access_size = 8,
.max_access_size = 8,
},
.impl = {
.min_access_size = 8,
.max_access_size = 8,
}
};
static void pnv_psi_reset(void *dev)
{
PnvPsi *psi = PNV_PSI(dev);
memset(psi->regs, 0x0, sizeof(psi->regs));
psi->regs[PSIHB_XSCOM_BAR] = psi->bar | PSIHB_BAR_EN;
}
static void pnv_psi_power8_instance_init(Object *obj)
{
Pnv8Psi *psi8 = PNV8_PSI(obj);
object_initialize_child(obj, "ics-psi", &psi8->ics, sizeof(psi8->ics),
TYPE_ICS_SIMPLE, &error_abort, NULL);
}
static const uint8_t irq_to_xivr[] = {
PSIHB_XSCOM_XIVR_FSP,
PSIHB_XSCOM_XIVR_OCC,
PSIHB_XSCOM_XIVR_FSI,
PSIHB_XSCOM_XIVR_LPCI2C,
PSIHB_XSCOM_XIVR_LOCERR,
PSIHB_XSCOM_XIVR_EXT,
};
static void pnv_psi_power8_realize(DeviceState *dev, Error **errp)
{
PnvPsi *psi = PNV_PSI(dev);
ICSState *ics = &PNV8_PSI(psi)->ics;
Object *obj;
Error *err = NULL;
unsigned int i;
obj = object_property_get_link(OBJECT(dev), "xics", &err);
if (!obj) {
error_setg(errp, "%s: required link 'xics' not found: %s",
__func__, error_get_pretty(err));
return;
}
/* Create PSI interrupt control source */
object_property_add_const_link(OBJECT(ics), ICS_PROP_XICS, obj,
&error_abort);
object_property_set_int(OBJECT(ics), PSI_NUM_INTERRUPTS, "nr-irqs", &err);
if (err) {
error_propagate(errp, err);
return;
}
object_property_set_bool(OBJECT(ics), true, "realized", &err);
if (err) {
error_propagate(errp, err);
return;
}
for (i = 0; i < ics->nr_irqs; i++) {
ics_set_irq_type(ics, i, true);
}
psi->qirqs = qemu_allocate_irqs(ics_simple_set_irq, ics, ics->nr_irqs);
/* XSCOM region for PSI registers */
pnv_xscom_region_init(&psi->xscom_regs, OBJECT(dev), &pnv_psi_xscom_ops,
psi, "xscom-psi", PNV_XSCOM_PSIHB_SIZE);
/* Initialize MMIO region */
memory_region_init_io(&psi->regs_mr, OBJECT(dev), &psi_mmio_ops, psi,
"psihb", PNV_PSIHB_SIZE);
/* Default BAR for MMIO region */
pnv_psi_set_bar(psi, psi->bar | PSIHB_BAR_EN);
/* Default sources in XIVR */
for (i = 0; i < PSI_NUM_INTERRUPTS; i++) {
uint8_t xivr = irq_to_xivr[i];
psi->regs[xivr] = PSIHB_XIVR_PRIO_MSK |
((uint64_t) i << PSIHB_XIVR_SRC_SH);
}
qemu_register_reset(pnv_psi_reset, dev);
}
static const char compat_p8[] = "ibm,power8-psihb-x\0ibm,psihb-x";
static const char compat_p9[] = "ibm,power9-psihb-x\0ibm,psihb-x";
static int pnv_psi_dt_xscom(PnvXScomInterface *dev, void *fdt, int xscom_offset)
{
PnvPsiClass *ppc = PNV_PSI_GET_CLASS(dev);
char *name;
int offset;
uint32_t reg[] = {
cpu_to_be32(ppc->xscom_pcba),
cpu_to_be32(ppc->xscom_size)
};
name = g_strdup_printf("psihb@%x", ppc->xscom_pcba);
offset = fdt_add_subnode(fdt, xscom_offset, name);
_FDT(offset);
g_free(name);
_FDT(fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)));
_FDT(fdt_setprop_cell(fdt, offset, "#address-cells", 2));
_FDT(fdt_setprop_cell(fdt, offset, "#size-cells", 1));
if (ppc->chip_type == PNV_CHIP_POWER9) {
_FDT(fdt_setprop(fdt, offset, "compatible", compat_p9,
sizeof(compat_p9)));
} else {
_FDT(fdt_setprop(fdt, offset, "compatible", compat_p8,
sizeof(compat_p8)));
}
return 0;
}
static Property pnv_psi_properties[] = {
DEFINE_PROP_UINT64("bar", PnvPsi, bar, 0),
DEFINE_PROP_UINT64("fsp-bar", PnvPsi, fsp_bar, 0),
DEFINE_PROP_END_OF_LIST(),
};
static void pnv_psi_power8_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvPsiClass *ppc = PNV_PSI_CLASS(klass);
dc->desc = "PowerNV PSI Controller POWER8";
dc->realize = pnv_psi_power8_realize;
ppc->chip_type = PNV_CHIP_POWER8;
ppc->xscom_pcba = PNV_XSCOM_PSIHB_BASE;
ppc->xscom_size = PNV_XSCOM_PSIHB_SIZE;
ppc->bar_mask = PSIHB_BAR_MASK;
ppc->irq_set = pnv_psi_power8_irq_set;
}
static const TypeInfo pnv_psi_power8_info = {
.name = TYPE_PNV8_PSI,
.parent = TYPE_PNV_PSI,
.instance_size = sizeof(Pnv8Psi),
.instance_init = pnv_psi_power8_instance_init,
.class_init = pnv_psi_power8_class_init,
};
/* Common registers */
#define PSIHB9_CR 0x20
#define PSIHB9_SEMR 0x28
/* P9 registers */
#define PSIHB9_INTERRUPT_CONTROL 0x58
#define PSIHB9_IRQ_METHOD PPC_BIT(0)
#define PSIHB9_IRQ_RESET PPC_BIT(1)
#define PSIHB9_ESB_CI_BASE 0x60
#define PSIHB9_ESB_CI_VALID 1
#define PSIHB9_ESB_NOTIF_ADDR 0x68
#define PSIHB9_ESB_NOTIF_VALID 1
#define PSIHB9_IVT_OFFSET 0x70
#define PSIHB9_IVT_OFF_SHIFT 32
#define PSIHB9_IRQ_LEVEL 0x78 /* assertion */
#define PSIHB9_IRQ_LEVEL_PSI PPC_BIT(0)
#define PSIHB9_IRQ_LEVEL_OCC PPC_BIT(1)
#define PSIHB9_IRQ_LEVEL_FSI PPC_BIT(2)
#define PSIHB9_IRQ_LEVEL_LPCHC PPC_BIT(3)
#define PSIHB9_IRQ_LEVEL_LOCAL_ERR PPC_BIT(4)
#define PSIHB9_IRQ_LEVEL_GLOBAL_ERR PPC_BIT(5)
#define PSIHB9_IRQ_LEVEL_TPM PPC_BIT(6)
#define PSIHB9_IRQ_LEVEL_LPC_SIRQ1 PPC_BIT(7)
#define PSIHB9_IRQ_LEVEL_LPC_SIRQ2 PPC_BIT(8)
#define PSIHB9_IRQ_LEVEL_LPC_SIRQ3 PPC_BIT(9)
#define PSIHB9_IRQ_LEVEL_LPC_SIRQ4 PPC_BIT(10)
#define PSIHB9_IRQ_LEVEL_SBE_I2C PPC_BIT(11)
#define PSIHB9_IRQ_LEVEL_DIO PPC_BIT(12)
#define PSIHB9_IRQ_LEVEL_PSU PPC_BIT(13)
#define PSIHB9_IRQ_LEVEL_I2C_C PPC_BIT(14)
#define PSIHB9_IRQ_LEVEL_I2C_D PPC_BIT(15)
#define PSIHB9_IRQ_LEVEL_I2C_E PPC_BIT(16)
#define PSIHB9_IRQ_LEVEL_SBE PPC_BIT(19)
#define PSIHB9_IRQ_STAT 0x80 /* P bit */
#define PSIHB9_IRQ_STAT_PSI PPC_BIT(0)
#define PSIHB9_IRQ_STAT_OCC PPC_BIT(1)
#define PSIHB9_IRQ_STAT_FSI PPC_BIT(2)
#define PSIHB9_IRQ_STAT_LPCHC PPC_BIT(3)
#define PSIHB9_IRQ_STAT_LOCAL_ERR PPC_BIT(4)
#define PSIHB9_IRQ_STAT_GLOBAL_ERR PPC_BIT(5)
#define PSIHB9_IRQ_STAT_TPM PPC_BIT(6)
#define PSIHB9_IRQ_STAT_LPC_SIRQ1 PPC_BIT(7)
#define PSIHB9_IRQ_STAT_LPC_SIRQ2 PPC_BIT(8)
#define PSIHB9_IRQ_STAT_LPC_SIRQ3 PPC_BIT(9)
#define PSIHB9_IRQ_STAT_LPC_SIRQ4 PPC_BIT(10)
#define PSIHB9_IRQ_STAT_SBE_I2C PPC_BIT(11)
#define PSIHB9_IRQ_STAT_DIO PPC_BIT(12)
#define PSIHB9_IRQ_STAT_PSU PPC_BIT(13)
static void pnv_psi_notify(XiveNotifier *xf, uint32_t srcno)
{
PnvPsi *psi = PNV_PSI(xf);
uint64_t notif_port = psi->regs[PSIHB_REG(PSIHB9_ESB_NOTIF_ADDR)];
bool valid = notif_port & PSIHB9_ESB_NOTIF_VALID;
uint64_t notify_addr = notif_port & ~PSIHB9_ESB_NOTIF_VALID;
uint32_t offset =
(psi->regs[PSIHB_REG(PSIHB9_IVT_OFFSET)] >> PSIHB9_IVT_OFF_SHIFT);
uint64_t lisn = cpu_to_be64(offset + srcno);
if (valid) {
cpu_physical_memory_write(notify_addr, &lisn, sizeof(lisn));
}
}
static uint64_t pnv_psi_p9_mmio_read(void *opaque, hwaddr addr, unsigned size)
{
PnvPsi *psi = PNV_PSI(opaque);
uint32_t reg = PSIHB_REG(addr);
uint64_t val = -1;
switch (addr) {
case PSIHB9_CR:
case PSIHB9_SEMR:
/* FSP stuff */
case PSIHB9_INTERRUPT_CONTROL:
case PSIHB9_ESB_CI_BASE:
case PSIHB9_ESB_NOTIF_ADDR:
case PSIHB9_IVT_OFFSET:
val = psi->regs[reg];
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "PSI: read at 0x%" PRIx64 "\n", addr);
}
return val;
}
static void pnv_psi_p9_mmio_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
PnvPsi *psi = PNV_PSI(opaque);
Pnv9Psi *psi9 = PNV9_PSI(psi);
uint32_t reg = PSIHB_REG(addr);
MemoryRegion *sysmem = get_system_memory();
switch (addr) {
case PSIHB9_CR:
case PSIHB9_SEMR:
/* FSP stuff */
break;
case PSIHB9_INTERRUPT_CONTROL:
if (val & PSIHB9_IRQ_RESET) {
device_reset(DEVICE(&psi9->source));
}
psi->regs[reg] = val;
break;
case PSIHB9_ESB_CI_BASE:
if (!(val & PSIHB9_ESB_CI_VALID)) {
if (psi->regs[reg] & PSIHB9_ESB_CI_VALID) {
memory_region_del_subregion(sysmem, &psi9->source.esb_mmio);
}
} else {
if (!(psi->regs[reg] & PSIHB9_ESB_CI_VALID)) {
memory_region_add_subregion(sysmem,
val & ~PSIHB9_ESB_CI_VALID,
&psi9->source.esb_mmio);
}
}
psi->regs[reg] = val;
break;
case PSIHB9_ESB_NOTIF_ADDR:
psi->regs[reg] = val;
break;
case PSIHB9_IVT_OFFSET:
psi->regs[reg] = val;
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "PSI: write at 0x%" PRIx64 "\n", addr);
}
}
static const MemoryRegionOps pnv_psi_p9_mmio_ops = {
.read = pnv_psi_p9_mmio_read,
.write = pnv_psi_p9_mmio_write,
.endianness = DEVICE_BIG_ENDIAN,
.valid = {
.min_access_size = 8,
.max_access_size = 8,
},
.impl = {
.min_access_size = 8,
.max_access_size = 8,
},
};
static uint64_t pnv_psi_p9_xscom_read(void *opaque, hwaddr addr, unsigned size)
{
/* No read are expected */
qemu_log_mask(LOG_GUEST_ERROR, "PSI: xscom read at 0x%" PRIx64 "\n", addr);
return -1;
}
static void pnv_psi_p9_xscom_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
PnvPsi *psi = PNV_PSI(opaque);
/* XSCOM is only used to set the PSIHB MMIO region */
switch (addr >> 3) {
case PSIHB_XSCOM_BAR:
pnv_psi_set_bar(psi, val);
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "PSI: xscom write at 0x%" PRIx64 "\n",
addr);
}
}
static const MemoryRegionOps pnv_psi_p9_xscom_ops = {
.read = pnv_psi_p9_xscom_read,
.write = pnv_psi_p9_xscom_write,
.endianness = DEVICE_BIG_ENDIAN,
.valid = {
.min_access_size = 8,
.max_access_size = 8,
},
.impl = {
.min_access_size = 8,
.max_access_size = 8,
}
};
static void pnv_psi_power9_irq_set(PnvPsi *psi, int irq, bool state)
{
uint64_t irq_method = psi->regs[PSIHB_REG(PSIHB9_INTERRUPT_CONTROL)];
if (irq > PSIHB9_NUM_IRQS) {
qemu_log_mask(LOG_GUEST_ERROR, "PSI: Unsupported irq %d\n", irq);
return;
}
if (irq_method & PSIHB9_IRQ_METHOD) {
qemu_log_mask(LOG_GUEST_ERROR, "PSI: LSI IRQ method no supported\n");
return;
}
/* Update LSI levels */
if (state) {
psi->regs[PSIHB_REG(PSIHB9_IRQ_LEVEL)] |= PPC_BIT(irq);
} else {
psi->regs[PSIHB_REG(PSIHB9_IRQ_LEVEL)] &= ~PPC_BIT(irq);
}
qemu_set_irq(psi->qirqs[irq], state);
}
static void pnv_psi_power9_reset(void *dev)
{
Pnv9Psi *psi = PNV9_PSI(dev);
pnv_psi_reset(dev);
if (memory_region_is_mapped(&psi->source.esb_mmio)) {
memory_region_del_subregion(get_system_memory(), &psi->source.esb_mmio);
}
}
static void pnv_psi_power9_instance_init(Object *obj)
{
Pnv9Psi *psi = PNV9_PSI(obj);
object_initialize_child(obj, "source", &psi->source, sizeof(psi->source),
TYPE_XIVE_SOURCE, &error_abort, NULL);
}
static void pnv_psi_power9_realize(DeviceState *dev, Error **errp)
{
PnvPsi *psi = PNV_PSI(dev);
XiveSource *xsrc = &PNV9_PSI(psi)->source;
Error *local_err = NULL;
int i;
/* This is the only device with 4k ESB pages */
object_property_set_int(OBJECT(xsrc), XIVE_ESB_4K, "shift",
&error_fatal);
object_property_set_int(OBJECT(xsrc), PSIHB9_NUM_IRQS, "nr-irqs",
&error_fatal);
object_property_add_const_link(OBJECT(xsrc), "xive", OBJECT(psi),
&error_fatal);
object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
for (i = 0; i < xsrc->nr_irqs; i++) {
xive_source_irq_set_lsi(xsrc, i);
}
psi->qirqs = qemu_allocate_irqs(xive_source_set_irq, xsrc, xsrc->nr_irqs);
/* XSCOM region for PSI registers */
pnv_xscom_region_init(&psi->xscom_regs, OBJECT(dev), &pnv_psi_p9_xscom_ops,
psi, "xscom-psi", PNV9_XSCOM_PSIHB_SIZE);
/* MMIO region for PSI registers */
memory_region_init_io(&psi->regs_mr, OBJECT(dev), &pnv_psi_p9_mmio_ops, psi,
"psihb", PNV9_PSIHB_SIZE);
pnv_psi_set_bar(psi, psi->bar | PSIHB_BAR_EN);
qemu_register_reset(pnv_psi_power9_reset, dev);
}
static void pnv_psi_power9_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvPsiClass *ppc = PNV_PSI_CLASS(klass);
XiveNotifierClass *xfc = XIVE_NOTIFIER_CLASS(klass);
dc->desc = "PowerNV PSI Controller POWER9";
dc->realize = pnv_psi_power9_realize;
ppc->chip_type = PNV_CHIP_POWER9;
ppc->xscom_pcba = PNV9_XSCOM_PSIHB_BASE;
ppc->xscom_size = PNV9_XSCOM_PSIHB_SIZE;
ppc->bar_mask = PSIHB9_BAR_MASK;
ppc->irq_set = pnv_psi_power9_irq_set;
xfc->notify = pnv_psi_notify;
}
static const TypeInfo pnv_psi_power9_info = {
.name = TYPE_PNV9_PSI,
.parent = TYPE_PNV_PSI,
.instance_size = sizeof(Pnv9Psi),
.instance_init = pnv_psi_power9_instance_init,
.class_init = pnv_psi_power9_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_XIVE_NOTIFIER },
{ },
},
};
static void pnv_psi_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
xdc->dt_xscom = pnv_psi_dt_xscom;
dc->desc = "PowerNV PSI Controller";
dc->props = pnv_psi_properties;
}
static const TypeInfo pnv_psi_info = {
.name = TYPE_PNV_PSI,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(PnvPsi),
.class_init = pnv_psi_class_init,
.class_size = sizeof(PnvPsiClass),
.abstract = true,
.interfaces = (InterfaceInfo[]) {
{ TYPE_PNV_XSCOM_INTERFACE },
{ }
}
};
static void pnv_psi_register_types(void)
{
type_register_static(&pnv_psi_info);
type_register_static(&pnv_psi_power8_info);
type_register_static(&pnv_psi_power9_info);
}
type_init(pnv_psi_register_types);
void pnv_psi_pic_print_info(Pnv9Psi *psi9, Monitor *mon)
{
PnvPsi *psi = PNV_PSI(psi9);
uint32_t offset =
(psi->regs[PSIHB_REG(PSIHB9_IVT_OFFSET)] >> PSIHB9_IVT_OFF_SHIFT);
monitor_printf(mon, "PSIHB Source %08x .. %08x\n",
offset, offset + psi9->source.nr_irqs - 1);
xive_source_pic_print_info(&psi9->source, offset, mon);
}
|
pmp-tool/PMP
|
src/qemu/src-pmp/tests/acpi-utils.c
|
/*
* ACPI Utility Functions
*
* Copyright (c) 2013 Red Hat Inc.
* Copyright (c) 2017 Skyport Systems
*
* Authors:
* <NAME> <<EMAIL>>,
* <NAME> <<EMAIL>>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include <glib/gstdio.h>
#include "qemu-common.h"
#include "qemu/bitmap.h"
#include "acpi-utils.h"
#include "boot-sector.h"
uint8_t acpi_calc_checksum(const uint8_t *data, int len)
{
int i;
uint8_t sum = 0;
for (i = 0; i < len; i++) {
sum += data[i];
}
return sum;
}
uint32_t acpi_find_rsdp_address(QTestState *qts)
{
uint32_t off;
/* RSDP location can vary across a narrow range */
for (off = 0xf0000; off < 0x100000; off += 0x10) {
uint8_t sig[] = "RSD PTR ";
int i;
for (i = 0; i < sizeof sig - 1; ++i) {
sig[i] = qtest_readb(qts, off + i);
}
if (!memcmp(sig, "RSD PTR ", sizeof sig)) {
break;
}
}
return off;
}
uint64_t acpi_get_xsdt_address(uint8_t *rsdp_table)
{
uint64_t xsdt_physical_address;
uint8_t revision = rsdp_table[15 /* Revision offset */];
/* We must have revision 2 if we're looking for an XSDT pointer */
g_assert(revision == 2);
memcpy(&xsdt_physical_address, &rsdp_table[24 /* XsdtAddress offset */], 8);
return le64_to_cpu(xsdt_physical_address);
}
void acpi_parse_rsdp_table(QTestState *qts, uint32_t addr, uint8_t *rsdp_table)
{
uint8_t revision;
/* Read mandatory revision 0 table data (20 bytes) first */
qtest_memread(qts, addr, rsdp_table, 20);
revision = rsdp_table[15 /* Revision offset */];
switch (revision) {
case 0: /* ACPI 1.0 RSDP */
break;
case 2: /* ACPI 2.0+ RSDP */
/* Read the rest of the RSDP table */
qtest_memread(qts, addr + 20, rsdp_table + 20, 16);
break;
default:
g_assert_not_reached();
}
ACPI_ASSERT_CMP64(*((uint64_t *)(rsdp_table)), "RSD PTR ");
}
/** acpi_fetch_table
* load ACPI table at @addr_ptr offset pointer into buffer and return it in
* @aml, its length in @aml_len and check that signature/checksum matches
* actual one.
*/
void acpi_fetch_table(QTestState *qts, uint8_t **aml, uint32_t *aml_len,
const uint8_t *addr_ptr, const char *sig,
bool verify_checksum)
{
uint32_t addr, len;
memcpy(&addr, addr_ptr , sizeof(addr));
addr = le32_to_cpu(addr);
qtest_memread(qts, addr + 4, &len, 4); /* Length of ACPI table */
*aml_len = le32_to_cpu(len);
*aml = g_malloc0(*aml_len);
/* get whole table */
qtest_memread(qts, addr, *aml, *aml_len);
if (sig) {
ACPI_ASSERT_CMP(**aml, sig);
}
if (verify_checksum) {
g_assert(!acpi_calc_checksum(*aml, *aml_len));
}
}
|
pmp-tool/PMP
|
src/qemu/src-pmp/hw/misc/iotkit-secctl.c
|
<gh_stars>1-10
/*
* Arm IoT Kit security controller
*
* Copyright (c) 2018 Linaro Limited
* Written by <NAME>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 or
* (at your option) any later version.
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qapi/error.h"
#include "trace.h"
#include "hw/sysbus.h"
#include "hw/registerfields.h"
#include "hw/misc/iotkit-secctl.h"
/* Registers in the secure privilege control block */
REG32(SECRESPCFG, 0x10)
REG32(NSCCFG, 0x14)
REG32(SECMPCINTSTATUS, 0x1c)
REG32(SECPPCINTSTAT, 0x20)
REG32(SECPPCINTCLR, 0x24)
REG32(SECPPCINTEN, 0x28)
REG32(SECMSCINTSTAT, 0x30)
REG32(SECMSCINTCLR, 0x34)
REG32(SECMSCINTEN, 0x38)
REG32(BRGINTSTAT, 0x40)
REG32(BRGINTCLR, 0x44)
REG32(BRGINTEN, 0x48)
REG32(AHBNSPPC0, 0x50)
REG32(AHBNSPPCEXP0, 0x60)
REG32(AHBNSPPCEXP1, 0x64)
REG32(AHBNSPPCEXP2, 0x68)
REG32(AHBNSPPCEXP3, 0x6c)
REG32(APBNSPPC0, 0x70)
REG32(APBNSPPC1, 0x74)
REG32(APBNSPPCEXP0, 0x80)
REG32(APBNSPPCEXP1, 0x84)
REG32(APBNSPPCEXP2, 0x88)
REG32(APBNSPPCEXP3, 0x8c)
REG32(AHBSPPPC0, 0x90)
REG32(AHBSPPPCEXP0, 0xa0)
REG32(AHBSPPPCEXP1, 0xa4)
REG32(AHBSPPPCEXP2, 0xa8)
REG32(AHBSPPPCEXP3, 0xac)
REG32(APBSPPPC0, 0xb0)
REG32(APBSPPPC1, 0xb4)
REG32(APBSPPPCEXP0, 0xc0)
REG32(APBSPPPCEXP1, 0xc4)
REG32(APBSPPPCEXP2, 0xc8)
REG32(APBSPPPCEXP3, 0xcc)
REG32(NSMSCEXP, 0xd0)
REG32(PID4, 0xfd0)
REG32(PID5, 0xfd4)
REG32(PID6, 0xfd8)
REG32(PID7, 0xfdc)
REG32(PID0, 0xfe0)
REG32(PID1, 0xfe4)
REG32(PID2, 0xfe8)
REG32(PID3, 0xfec)
REG32(CID0, 0xff0)
REG32(CID1, 0xff4)
REG32(CID2, 0xff8)
REG32(CID3, 0xffc)
/* Registers in the non-secure privilege control block */
REG32(AHBNSPPPC0, 0x90)
REG32(AHBNSPPPCEXP0, 0xa0)
REG32(AHBNSPPPCEXP1, 0xa4)
REG32(AHBNSPPPCEXP2, 0xa8)
REG32(AHBNSPPPCEXP3, 0xac)
REG32(APBNSPPPC0, 0xb0)
REG32(APBNSPPPC1, 0xb4)
REG32(APBNSPPPCEXP0, 0xc0)
REG32(APBNSPPPCEXP1, 0xc4)
REG32(APBNSPPPCEXP2, 0xc8)
REG32(APBNSPPPCEXP3, 0xcc)
/* PID and CID registers are also present in the NS block */
static const uint8_t iotkit_secctl_s_idregs[] = {
0x04, 0x00, 0x00, 0x00,
0x52, 0xb8, 0x0b, 0x00,
0x0d, 0xf0, 0x05, 0xb1,
};
static const uint8_t iotkit_secctl_ns_idregs[] = {
0x04, 0x00, 0x00, 0x00,
0x53, 0xb8, 0x0b, 0x00,
0x0d, 0xf0, 0x05, 0xb1,
};
/* The register sets for the various PPCs (AHB internal, APB internal,
* AHB expansion, APB expansion) are all set up so that they are
* in 16-aligned blocks so offsets 0xN0, 0xN4, 0xN8, 0xNC are PPCs
* 0, 1, 2, 3 of that type, so we can convert a register address offset
* into an an index into a PPC array easily.
*/
static inline int offset_to_ppc_idx(uint32_t offset)
{
return extract32(offset, 2, 2);
}
typedef void PerPPCFunction(IoTKitSecCtlPPC *ppc);
static void foreach_ppc(IoTKitSecCtl *s, PerPPCFunction *fn)
{
int i;
for (i = 0; i < IOTS_NUM_APB_PPC; i++) {
fn(&s->apb[i]);
}
for (i = 0; i < IOTS_NUM_APB_EXP_PPC; i++) {
fn(&s->apbexp[i]);
}
for (i = 0; i < IOTS_NUM_AHB_EXP_PPC; i++) {
fn(&s->ahbexp[i]);
}
}
static MemTxResult iotkit_secctl_s_read(void *opaque, hwaddr addr,
uint64_t *pdata,
unsigned size, MemTxAttrs attrs)
{
uint64_t r;
uint32_t offset = addr & ~0x3;
IoTKitSecCtl *s = IOTKIT_SECCTL(opaque);
switch (offset) {
case A_AHBNSPPC0:
case A_AHBSPPPC0:
r = 0;
break;
case A_SECRESPCFG:
r = s->secrespcfg;
break;
case A_NSCCFG:
r = s->nsccfg;
break;
case A_SECMPCINTSTATUS:
r = s->mpcintstatus;
break;
case A_SECPPCINTSTAT:
r = s->secppcintstat;
break;
case A_SECPPCINTEN:
r = s->secppcinten;
break;
case A_BRGINTSTAT:
/* QEMU's bus fabric can never report errors as it doesn't buffer
* writes, so we never report bridge interrupts.
*/
r = 0;
break;
case A_BRGINTEN:
r = s->brginten;
break;
case A_AHBNSPPCEXP0:
case A_AHBNSPPCEXP1:
case A_AHBNSPPCEXP2:
case A_AHBNSPPCEXP3:
r = s->ahbexp[offset_to_ppc_idx(offset)].ns;
break;
case A_APBNSPPC0:
case A_APBNSPPC1:
r = s->apb[offset_to_ppc_idx(offset)].ns;
break;
case A_APBNSPPCEXP0:
case A_APBNSPPCEXP1:
case A_APBNSPPCEXP2:
case A_APBNSPPCEXP3:
r = s->apbexp[offset_to_ppc_idx(offset)].ns;
break;
case A_AHBSPPPCEXP0:
case A_AHBSPPPCEXP1:
case A_AHBSPPPCEXP2:
case A_AHBSPPPCEXP3:
r = s->apbexp[offset_to_ppc_idx(offset)].sp;
break;
case A_APBSPPPC0:
case A_APBSPPPC1:
r = s->apb[offset_to_ppc_idx(offset)].sp;
break;
case A_APBSPPPCEXP0:
case A_APBSPPPCEXP1:
case A_APBSPPPCEXP2:
case A_APBSPPPCEXP3:
r = s->apbexp[offset_to_ppc_idx(offset)].sp;
break;
case A_SECMSCINTSTAT:
r = s->secmscintstat;
break;
case A_SECMSCINTEN:
r = s->secmscinten;
break;
case A_NSMSCEXP:
r = s->nsmscexp;
break;
case A_PID4:
case A_PID5:
case A_PID6:
case A_PID7:
case A_PID0:
case A_PID1:
case A_PID2:
case A_PID3:
case A_CID0:
case A_CID1:
case A_CID2:
case A_CID3:
r = iotkit_secctl_s_idregs[(offset - A_PID4) / 4];
break;
case A_SECPPCINTCLR:
case A_SECMSCINTCLR:
case A_BRGINTCLR:
qemu_log_mask(LOG_GUEST_ERROR,
"IotKit SecCtl S block read: write-only offset 0x%x\n",
offset);
r = 0;
break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
"IotKit SecCtl S block read: bad offset 0x%x\n", offset);
r = 0;
break;
}
if (size != 4) {
/* None of our registers are access-sensitive, so just pull the right
* byte out of the word read result.
*/
r = extract32(r, (addr & 3) * 8, size * 8);
}
trace_iotkit_secctl_s_read(offset, r, size);
*pdata = r;
return MEMTX_OK;
}
static void iotkit_secctl_update_ppc_ap(IoTKitSecCtlPPC *ppc)
{
int i;
for (i = 0; i < ppc->numports; i++) {
bool v;
if (extract32(ppc->ns, i, 1)) {
v = extract32(ppc->nsp, i, 1);
} else {
v = extract32(ppc->sp, i, 1);
}
qemu_set_irq(ppc->ap[i], v);
}
}
static void iotkit_secctl_ppc_ns_write(IoTKitSecCtlPPC *ppc, uint32_t value)
{
int i;
ppc->ns = value & MAKE_64BIT_MASK(0, ppc->numports);
for (i = 0; i < ppc->numports; i++) {
qemu_set_irq(ppc->nonsec[i], extract32(ppc->ns, i, 1));
}
iotkit_secctl_update_ppc_ap(ppc);
}
static void iotkit_secctl_ppc_sp_write(IoTKitSecCtlPPC *ppc, uint32_t value)
{
ppc->sp = value & MAKE_64BIT_MASK(0, ppc->numports);
iotkit_secctl_update_ppc_ap(ppc);
}
static void iotkit_secctl_ppc_nsp_write(IoTKitSecCtlPPC *ppc, uint32_t value)
{
ppc->nsp = value & MAKE_64BIT_MASK(0, ppc->numports);
iotkit_secctl_update_ppc_ap(ppc);
}
static void iotkit_secctl_ppc_update_irq_clear(IoTKitSecCtlPPC *ppc)
{
uint32_t value = ppc->parent->secppcintstat;
qemu_set_irq(ppc->irq_clear, extract32(value, ppc->irq_bit_offset, 1));
}
static void iotkit_secctl_ppc_update_irq_enable(IoTKitSecCtlPPC *ppc)
{
uint32_t value = ppc->parent->secppcinten;
qemu_set_irq(ppc->irq_enable, extract32(value, ppc->irq_bit_offset, 1));
}
static void iotkit_secctl_update_mscexp_irqs(qemu_irq *msc_irqs, uint32_t value)
{
int i;
for (i = 0; i < IOTS_NUM_EXP_MSC; i++) {
qemu_set_irq(msc_irqs[i], extract32(value, i + 16, 1));
}
}
static void iotkit_secctl_update_msc_irq(IoTKitSecCtl *s)
{
/* Update the combined MSC IRQ, based on S_MSCEXP_STATUS and S_MSCEXP_EN */
bool level = s->secmscintstat & s->secmscinten;
qemu_set_irq(s->msc_irq, level);
}
static MemTxResult iotkit_secctl_s_write(void *opaque, hwaddr addr,
uint64_t value,
unsigned size, MemTxAttrs attrs)
{
IoTKitSecCtl *s = IOTKIT_SECCTL(opaque);
uint32_t offset = addr;
IoTKitSecCtlPPC *ppc;
trace_iotkit_secctl_s_write(offset, value, size);
if (size != 4) {
/* Byte and halfword writes are ignored */
qemu_log_mask(LOG_GUEST_ERROR,
"IotKit SecCtl S block write: bad size, ignored\n");
return MEMTX_OK;
}
switch (offset) {
case A_NSCCFG:
s->nsccfg = value & 3;
qemu_set_irq(s->nsc_cfg_irq, s->nsccfg);
break;
case A_SECRESPCFG:
value &= 1;
s->secrespcfg = value;
qemu_set_irq(s->sec_resp_cfg, s->secrespcfg);
break;
case A_SECPPCINTCLR:
value &= 0x00f000f3;
foreach_ppc(s, iotkit_secctl_ppc_update_irq_clear);
break;
case A_SECPPCINTEN:
s->secppcinten = value & 0x00f000f3;
foreach_ppc(s, iotkit_secctl_ppc_update_irq_enable);
break;
case A_BRGINTCLR:
break;
case A_BRGINTEN:
s->brginten = value & 0xffff0000;
break;
case A_AHBNSPPCEXP0:
case A_AHBNSPPCEXP1:
case A_AHBNSPPCEXP2:
case A_AHBNSPPCEXP3:
ppc = &s->ahbexp[offset_to_ppc_idx(offset)];
iotkit_secctl_ppc_ns_write(ppc, value);
break;
case A_APBNSPPC0:
case A_APBNSPPC1:
ppc = &s->apb[offset_to_ppc_idx(offset)];
iotkit_secctl_ppc_ns_write(ppc, value);
break;
case A_APBNSPPCEXP0:
case A_APBNSPPCEXP1:
case A_APBNSPPCEXP2:
case A_APBNSPPCEXP3:
ppc = &s->apbexp[offset_to_ppc_idx(offset)];
iotkit_secctl_ppc_ns_write(ppc, value);
break;
case A_AHBSPPPCEXP0:
case A_AHBSPPPCEXP1:
case A_AHBSPPPCEXP2:
case A_AHBSPPPCEXP3:
ppc = &s->ahbexp[offset_to_ppc_idx(offset)];
iotkit_secctl_ppc_sp_write(ppc, value);
break;
case A_APBSPPPC0:
case A_APBSPPPC1:
ppc = &s->apb[offset_to_ppc_idx(offset)];
iotkit_secctl_ppc_sp_write(ppc, value);
break;
case A_APBSPPPCEXP0:
case A_APBSPPPCEXP1:
case A_APBSPPPCEXP2:
case A_APBSPPPCEXP3:
ppc = &s->apbexp[offset_to_ppc_idx(offset)];
iotkit_secctl_ppc_sp_write(ppc, value);
break;
case A_SECMSCINTCLR:
iotkit_secctl_update_mscexp_irqs(s->mscexp_clear, value);
break;
case A_SECMSCINTEN:
s->secmscinten = value;
iotkit_secctl_update_msc_irq(s);
break;
case A_NSMSCEXP:
s->nsmscexp = value;
iotkit_secctl_update_mscexp_irqs(s->mscexp_ns, value);
break;
case A_SECMPCINTSTATUS:
case A_SECPPCINTSTAT:
case A_SECMSCINTSTAT:
case A_BRGINTSTAT:
case A_AHBNSPPC0:
case A_AHBSPPPC0:
case A_PID4:
case A_PID5:
case A_PID6:
case A_PID7:
case A_PID0:
case A_PID1:
case A_PID2:
case A_PID3:
case A_CID0:
case A_CID1:
case A_CID2:
case A_CID3:
qemu_log_mask(LOG_GUEST_ERROR,
"IoTKit SecCtl S block write: "
"read-only offset 0x%x\n", offset);
break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
"IotKit SecCtl S block write: bad offset 0x%x\n",
offset);
break;
}
return MEMTX_OK;
}
static MemTxResult iotkit_secctl_ns_read(void *opaque, hwaddr addr,
uint64_t *pdata,
unsigned size, MemTxAttrs attrs)
{
IoTKitSecCtl *s = IOTKIT_SECCTL(opaque);
uint64_t r;
uint32_t offset = addr & ~0x3;
switch (offset) {
case A_AHBNSPPPC0:
r = 0;
break;
case A_AHBNSPPPCEXP0:
case A_AHBNSPPPCEXP1:
case A_AHBNSPPPCEXP2:
case A_AHBNSPPPCEXP3:
r = s->ahbexp[offset_to_ppc_idx(offset)].nsp;
break;
case A_APBNSPPPC0:
case A_APBNSPPPC1:
r = s->apb[offset_to_ppc_idx(offset)].nsp;
break;
case A_APBNSPPPCEXP0:
case A_APBNSPPPCEXP1:
case A_APBNSPPPCEXP2:
case A_APBNSPPPCEXP3:
r = s->apbexp[offset_to_ppc_idx(offset)].nsp;
break;
case A_PID4:
case A_PID5:
case A_PID6:
case A_PID7:
case A_PID0:
case A_PID1:
case A_PID2:
case A_PID3:
case A_CID0:
case A_CID1:
case A_CID2:
case A_CID3:
r = iotkit_secctl_ns_idregs[(offset - A_PID4) / 4];
break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
"IotKit SecCtl NS block write: bad offset 0x%x\n",
offset);
r = 0;
break;
}
if (size != 4) {
/* None of our registers are access-sensitive, so just pull the right
* byte out of the word read result.
*/
r = extract32(r, (addr & 3) * 8, size * 8);
}
trace_iotkit_secctl_ns_read(offset, r, size);
*pdata = r;
return MEMTX_OK;
}
static MemTxResult iotkit_secctl_ns_write(void *opaque, hwaddr addr,
uint64_t value,
unsigned size, MemTxAttrs attrs)
{
IoTKitSecCtl *s = IOTKIT_SECCTL(opaque);
uint32_t offset = addr;
IoTKitSecCtlPPC *ppc;
trace_iotkit_secctl_ns_write(offset, value, size);
if (size != 4) {
/* Byte and halfword writes are ignored */
qemu_log_mask(LOG_GUEST_ERROR,
"IotKit SecCtl NS block write: bad size, ignored\n");
return MEMTX_OK;
}
switch (offset) {
case A_AHBNSPPPCEXP0:
case A_AHBNSPPPCEXP1:
case A_AHBNSPPPCEXP2:
case A_AHBNSPPPCEXP3:
ppc = &s->ahbexp[offset_to_ppc_idx(offset)];
iotkit_secctl_ppc_nsp_write(ppc, value);
break;
case A_APBNSPPPC0:
case A_APBNSPPPC1:
ppc = &s->apb[offset_to_ppc_idx(offset)];
iotkit_secctl_ppc_nsp_write(ppc, value);
break;
case A_APBNSPPPCEXP0:
case A_APBNSPPPCEXP1:
case A_APBNSPPPCEXP2:
case A_APBNSPPPCEXP3:
ppc = &s->apbexp[offset_to_ppc_idx(offset)];
iotkit_secctl_ppc_nsp_write(ppc, value);
break;
case A_AHBNSPPPC0:
case A_PID4:
case A_PID5:
case A_PID6:
case A_PID7:
case A_PID0:
case A_PID1:
case A_PID2:
case A_PID3:
case A_CID0:
case A_CID1:
case A_CID2:
case A_CID3:
qemu_log_mask(LOG_GUEST_ERROR,
"IoTKit SecCtl NS block write: "
"read-only offset 0x%x\n", offset);
break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
"IotKit SecCtl NS block write: bad offset 0x%x\n",
offset);
break;
}
return MEMTX_OK;
}
static const MemoryRegionOps iotkit_secctl_s_ops = {
.read_with_attrs = iotkit_secctl_s_read,
.write_with_attrs = iotkit_secctl_s_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid.min_access_size = 1,
.valid.max_access_size = 4,
.impl.min_access_size = 1,
.impl.max_access_size = 4,
};
static const MemoryRegionOps iotkit_secctl_ns_ops = {
.read_with_attrs = iotkit_secctl_ns_read,
.write_with_attrs = iotkit_secctl_ns_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid.min_access_size = 1,
.valid.max_access_size = 4,
.impl.min_access_size = 1,
.impl.max_access_size = 4,
};
static void iotkit_secctl_reset_ppc(IoTKitSecCtlPPC *ppc)
{
ppc->ns = 0;
ppc->sp = 0;
ppc->nsp = 0;
}
static void iotkit_secctl_reset(DeviceState *dev)
{
IoTKitSecCtl *s = IOTKIT_SECCTL(dev);
s->secppcintstat = 0;
s->secppcinten = 0;
s->secrespcfg = 0;
s->nsccfg = 0;
s->brginten = 0;
foreach_ppc(s, iotkit_secctl_reset_ppc);
}
static void iotkit_secctl_mpc_status(void *opaque, int n, int level)
{
IoTKitSecCtl *s = IOTKIT_SECCTL(opaque);
s->mpcintstatus = deposit32(s->mpcintstatus, n, 1, !!level);
}
static void iotkit_secctl_mpcexp_status(void *opaque, int n, int level)
{
IoTKitSecCtl *s = IOTKIT_SECCTL(opaque);
s->mpcintstatus = deposit32(s->mpcintstatus, n + 16, 1, !!level);
}
static void iotkit_secctl_mscexp_status(void *opaque, int n, int level)
{
IoTKitSecCtl *s = IOTKIT_SECCTL(opaque);
s->secmscintstat = deposit32(s->secmscintstat, n + 16, 1, !!level);
iotkit_secctl_update_msc_irq(s);
}
static void iotkit_secctl_ppc_irqstatus(void *opaque, int n, int level)
{
IoTKitSecCtlPPC *ppc = opaque;
IoTKitSecCtl *s = IOTKIT_SECCTL(ppc->parent);
int irqbit = ppc->irq_bit_offset + n;
s->secppcintstat = deposit32(s->secppcintstat, irqbit, 1, level);
}
static void iotkit_secctl_init_ppc(IoTKitSecCtl *s,
IoTKitSecCtlPPC *ppc,
const char *name,
int numports,
int irq_bit_offset)
{
char *gpioname;
DeviceState *dev = DEVICE(s);
ppc->numports = numports;
ppc->irq_bit_offset = irq_bit_offset;
ppc->parent = s;
gpioname = g_strdup_printf("%s_nonsec", name);
qdev_init_gpio_out_named(dev, ppc->nonsec, gpioname, numports);
g_free(gpioname);
gpioname = g_strdup_printf("%s_ap", name);
qdev_init_gpio_out_named(dev, ppc->ap, gpioname, numports);
g_free(gpioname);
gpioname = g_strdup_printf("%s_irq_enable", name);
qdev_init_gpio_out_named(dev, &ppc->irq_enable, gpioname, 1);
g_free(gpioname);
gpioname = g_strdup_printf("%s_irq_clear", name);
qdev_init_gpio_out_named(dev, &ppc->irq_clear, gpioname, 1);
g_free(gpioname);
gpioname = g_strdup_printf("%s_irq_status", name);
qdev_init_gpio_in_named_with_opaque(dev, iotkit_secctl_ppc_irqstatus,
ppc, gpioname, 1);
g_free(gpioname);
}
static void iotkit_secctl_init(Object *obj)
{
IoTKitSecCtl *s = IOTKIT_SECCTL(obj);
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
DeviceState *dev = DEVICE(obj);
int i;
iotkit_secctl_init_ppc(s, &s->apb[0], "apb_ppc0",
IOTS_APB_PPC0_NUM_PORTS, 0);
iotkit_secctl_init_ppc(s, &s->apb[1], "apb_ppc1",
IOTS_APB_PPC1_NUM_PORTS, 1);
for (i = 0; i < IOTS_NUM_APB_EXP_PPC; i++) {
IoTKitSecCtlPPC *ppc = &s->apbexp[i];
char *ppcname = g_strdup_printf("apb_ppcexp%d", i);
iotkit_secctl_init_ppc(s, ppc, ppcname, IOTS_PPC_NUM_PORTS, 4 + i);
g_free(ppcname);
}
for (i = 0; i < IOTS_NUM_AHB_EXP_PPC; i++) {
IoTKitSecCtlPPC *ppc = &s->ahbexp[i];
char *ppcname = g_strdup_printf("ahb_ppcexp%d", i);
iotkit_secctl_init_ppc(s, ppc, ppcname, IOTS_PPC_NUM_PORTS, 20 + i);
g_free(ppcname);
}
qdev_init_gpio_out_named(dev, &s->sec_resp_cfg, "sec_resp_cfg", 1);
qdev_init_gpio_out_named(dev, &s->nsc_cfg_irq, "nsc_cfg", 1);
qdev_init_gpio_in_named(dev, iotkit_secctl_mpc_status, "mpc_status",
IOTS_NUM_MPC);
qdev_init_gpio_in_named(dev, iotkit_secctl_mpcexp_status,
"mpcexp_status", IOTS_NUM_EXP_MPC);
qdev_init_gpio_in_named(dev, iotkit_secctl_mscexp_status,
"mscexp_status", IOTS_NUM_EXP_MSC);
qdev_init_gpio_out_named(dev, s->mscexp_clear, "mscexp_clear",
IOTS_NUM_EXP_MSC);
qdev_init_gpio_out_named(dev, s->mscexp_ns, "mscexp_ns",
IOTS_NUM_EXP_MSC);
qdev_init_gpio_out_named(dev, &s->msc_irq, "msc_irq", 1);
memory_region_init_io(&s->s_regs, obj, &iotkit_secctl_s_ops,
s, "iotkit-secctl-s-regs", 0x1000);
memory_region_init_io(&s->ns_regs, obj, &iotkit_secctl_ns_ops,
s, "iotkit-secctl-ns-regs", 0x1000);
sysbus_init_mmio(sbd, &s->s_regs);
sysbus_init_mmio(sbd, &s->ns_regs);
}
static const VMStateDescription iotkit_secctl_ppc_vmstate = {
.name = "iotkit-secctl-ppc",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32(ns, IoTKitSecCtlPPC),
VMSTATE_UINT32(sp, IoTKitSecCtlPPC),
VMSTATE_UINT32(nsp, IoTKitSecCtlPPC),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription iotkit_secctl_mpcintstatus_vmstate = {
.name = "iotkit-secctl-mpcintstatus",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32(mpcintstatus, IoTKitSecCtl),
VMSTATE_END_OF_LIST()
}
};
static bool needed_always(void *opaque)
{
return true;
}
static const VMStateDescription iotkit_secctl_msc_vmstate = {
.name = "iotkit-secctl/msc",
.version_id = 1,
.minimum_version_id = 1,
.needed = needed_always,
.fields = (VMStateField[]) {
VMSTATE_UINT32(secmscintstat, IoTKitSecCtl),
VMSTATE_UINT32(secmscinten, IoTKitSecCtl),
VMSTATE_UINT32(nsmscexp, IoTKitSecCtl),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription iotkit_secctl_vmstate = {
.name = "iotkit-secctl",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32(secppcintstat, IoTKitSecCtl),
VMSTATE_UINT32(secppcinten, IoTKitSecCtl),
VMSTATE_UINT32(secrespcfg, IoTKitSecCtl),
VMSTATE_UINT32(nsccfg, IoTKitSecCtl),
VMSTATE_UINT32(brginten, IoTKitSecCtl),
VMSTATE_STRUCT_ARRAY(apb, IoTKitSecCtl, IOTS_NUM_APB_PPC, 1,
iotkit_secctl_ppc_vmstate, IoTKitSecCtlPPC),
VMSTATE_STRUCT_ARRAY(apbexp, IoTKitSecCtl, IOTS_NUM_APB_EXP_PPC, 1,
iotkit_secctl_ppc_vmstate, IoTKitSecCtlPPC),
VMSTATE_STRUCT_ARRAY(ahbexp, IoTKitSecCtl, IOTS_NUM_AHB_EXP_PPC, 1,
iotkit_secctl_ppc_vmstate, IoTKitSecCtlPPC),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription*[]) {
&iotkit_secctl_mpcintstatus_vmstate,
&iotkit_secctl_msc_vmstate,
NULL
},
};
static void iotkit_secctl_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->vmsd = &iotkit_secctl_vmstate;
dc->reset = iotkit_secctl_reset;
}
static const TypeInfo iotkit_secctl_info = {
.name = TYPE_IOTKIT_SECCTL,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(IoTKitSecCtl),
.instance_init = iotkit_secctl_init,
.class_init = iotkit_secctl_class_init,
};
static void iotkit_secctl_register_types(void)
{
type_register_static(&iotkit_secctl_info);
}
type_init(iotkit_secctl_register_types);
|
pmp-tool/PMP
|
src/qemu/src-pmp/include/hw/timer/m48t59.h
|
<filename>src/qemu/src-pmp/include/hw/timer/m48t59.h
#ifndef HW_M48T59_H
#define HW_M48T59_H
#include "qemu-common.h"
#include "qom/object.h"
#define TYPE_NVRAM "nvram"
#define NVRAM_CLASS(klass) \
OBJECT_CLASS_CHECK(NvramClass, (klass), TYPE_NVRAM)
#define NVRAM_GET_CLASS(obj) \
OBJECT_GET_CLASS(NvramClass, (obj), TYPE_NVRAM)
#define NVRAM(obj) \
INTERFACE_CHECK(Nvram, (obj), TYPE_NVRAM)
typedef struct Nvram Nvram;
typedef struct NvramClass {
InterfaceClass parent;
uint32_t (*read)(Nvram *obj, uint32_t addr);
void (*write)(Nvram *obj, uint32_t addr, uint32_t val);
void (*toggle_lock)(Nvram *obj, int lock);
} NvramClass;
Nvram *m48t59_init_isa(ISABus *bus, uint32_t io_base, uint16_t size,
int base_year, int type);
Nvram *m48t59_init(qemu_irq IRQ, hwaddr mem_base,
uint32_t io_base, uint16_t size, int base_year,
int type);
#endif /* HW_M48T59_H */
|
pmp-tool/PMP
|
src/qemu/src-pmp/audio/audio_int.h
|
/*
* QEMU Audio subsystem header
*
* Copyright (c) 2003-2005 <NAME> (malc)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef QEMU_AUDIO_INT_H
#define QEMU_AUDIO_INT_H
#ifdef CONFIG_AUDIO_COREAUDIO
#define FLOAT_MIXENG
/* #define RECIPROCAL */
#endif
#include "mixeng.h"
struct audio_pcm_ops;
struct audio_callback {
void *opaque;
audio_callback_fn fn;
};
struct audio_pcm_info {
int bits;
int sign;
int freq;
int nchannels;
int align;
int shift;
int bytes_per_second;
int swap_endianness;
};
typedef struct SWVoiceCap SWVoiceCap;
typedef struct HWVoiceOut {
int enabled;
int poll_mode;
int pending_disable;
struct audio_pcm_info info;
f_sample *clip;
int rpos;
uint64_t ts_helper;
struct st_sample *mix_buf;
int samples;
QLIST_HEAD (sw_out_listhead, SWVoiceOut) sw_head;
QLIST_HEAD (sw_cap_listhead, SWVoiceCap) cap_head;
int ctl_caps;
struct audio_pcm_ops *pcm_ops;
QLIST_ENTRY (HWVoiceOut) entries;
} HWVoiceOut;
typedef struct HWVoiceIn {
int enabled;
int poll_mode;
struct audio_pcm_info info;
t_sample *conv;
int wpos;
int total_samples_captured;
uint64_t ts_helper;
struct st_sample *conv_buf;
int samples;
QLIST_HEAD (sw_in_listhead, SWVoiceIn) sw_head;
int ctl_caps;
struct audio_pcm_ops *pcm_ops;
QLIST_ENTRY (HWVoiceIn) entries;
} HWVoiceIn;
struct SWVoiceOut {
QEMUSoundCard *card;
struct audio_pcm_info info;
t_sample *conv;
int64_t ratio;
struct st_sample *buf;
void *rate;
int total_hw_samples_mixed;
int active;
int empty;
HWVoiceOut *hw;
char *name;
struct mixeng_volume vol;
struct audio_callback callback;
QLIST_ENTRY (SWVoiceOut) entries;
};
struct SWVoiceIn {
QEMUSoundCard *card;
int active;
struct audio_pcm_info info;
int64_t ratio;
void *rate;
int total_hw_samples_acquired;
struct st_sample *buf;
f_sample *clip;
HWVoiceIn *hw;
char *name;
struct mixeng_volume vol;
struct audio_callback callback;
QLIST_ENTRY (SWVoiceIn) entries;
};
typedef struct audio_driver audio_driver;
struct audio_driver {
const char *name;
const char *descr;
void *(*init) (Audiodev *);
void (*fini) (void *);
struct audio_pcm_ops *pcm_ops;
int can_be_default;
int max_voices_out;
int max_voices_in;
int voice_size_out;
int voice_size_in;
int ctl_caps;
QLIST_ENTRY(audio_driver) next;
};
struct audio_pcm_ops {
int (*init_out)(HWVoiceOut *hw, struct audsettings *as, void *drv_opaque);
void (*fini_out)(HWVoiceOut *hw);
int (*run_out) (HWVoiceOut *hw, int live);
int (*write) (SWVoiceOut *sw, void *buf, int size);
int (*ctl_out) (HWVoiceOut *hw, int cmd, ...);
int (*init_in) (HWVoiceIn *hw, struct audsettings *as, void *drv_opaque);
void (*fini_in) (HWVoiceIn *hw);
int (*run_in) (HWVoiceIn *hw);
int (*read) (SWVoiceIn *sw, void *buf, int size);
int (*ctl_in) (HWVoiceIn *hw, int cmd, ...);
};
struct capture_callback {
struct audio_capture_ops ops;
void *opaque;
QLIST_ENTRY (capture_callback) entries;
};
struct CaptureVoiceOut {
HWVoiceOut hw;
void *buf;
QLIST_HEAD (cb_listhead, capture_callback) cb_head;
QLIST_ENTRY (CaptureVoiceOut) entries;
};
struct SWVoiceCap {
SWVoiceOut sw;
CaptureVoiceOut *cap;
QLIST_ENTRY (SWVoiceCap) entries;
};
typedef struct AudioState {
struct audio_driver *drv;
Audiodev *dev;
void *drv_opaque;
QEMUTimer *ts;
QLIST_HEAD (card_listhead, QEMUSoundCard) card_head;
QLIST_HEAD (hw_in_listhead, HWVoiceIn) hw_head_in;
QLIST_HEAD (hw_out_listhead, HWVoiceOut) hw_head_out;
QLIST_HEAD (cap_listhead, CaptureVoiceOut) cap_head;
int nb_hw_voices_out;
int nb_hw_voices_in;
int vm_running;
int64_t period_ticks;
} AudioState;
extern const struct mixeng_volume nominal_volume;
extern const char *audio_prio_list[];
void audio_driver_register(audio_driver *drv);
audio_driver *audio_driver_lookup(const char *name);
void audio_pcm_init_info (struct audio_pcm_info *info, struct audsettings *as);
void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len);
int audio_pcm_sw_write (SWVoiceOut *sw, void *buf, int len);
int audio_pcm_hw_get_live_in (HWVoiceIn *hw);
int audio_pcm_sw_read (SWVoiceIn *sw, void *buf, int len);
int audio_pcm_hw_clip_out (HWVoiceOut *hw, void *pcm_buf,
int live, int pending);
int audio_bug (const char *funcname, int cond);
void *audio_calloc (const char *funcname, int nmemb, size_t size);
void audio_run (const char *msg);
#define VOICE_ENABLE 1
#define VOICE_DISABLE 2
#define VOICE_VOLUME 3
#define VOICE_VOLUME_CAP (1 << VOICE_VOLUME)
static inline int audio_ring_dist (int dst, int src, int len)
{
return (dst >= src) ? (dst - src) : (len - src + dst);
}
#define dolog(fmt, ...) AUD_log(AUDIO_CAP, fmt, ## __VA_ARGS__)
#ifdef DEBUG
#define ldebug(fmt, ...) AUD_log(AUDIO_CAP, fmt, ## __VA_ARGS__)
#else
#define ldebug(fmt, ...) (void)0
#endif
#define AUDIO_STRINGIFY_(n) #n
#define AUDIO_STRINGIFY(n) AUDIO_STRINGIFY_(n)
typedef struct AudiodevListEntry {
Audiodev *dev;
QSIMPLEQ_ENTRY(AudiodevListEntry) next;
} AudiodevListEntry;
typedef QSIMPLEQ_HEAD(, AudiodevListEntry) AudiodevListHead;
AudiodevListHead audio_handle_legacy_opts(void);
void audio_free_audiodev_list(AudiodevListHead *head);
void audio_create_pdos(Audiodev *dev);
AudiodevPerDirectionOptions *audio_get_pdo_in(Audiodev *dev);
AudiodevPerDirectionOptions *audio_get_pdo_out(Audiodev *dev);
#endif /* QEMU_AUDIO_INT_H */
|
pmp-tool/PMP
|
src/qemu/src-pmp/hw/intc/xive.c
|
<gh_stars>1-10
/*
* QEMU PowerPC XIVE interrupt controller model
*
* Copyright (c) 2017-2018, IBM Corporation.
*
* This code is licensed under the GPL version 2 or later. See the
* COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qapi/error.h"
#include "target/ppc/cpu.h"
#include "sysemu/cpus.h"
#include "sysemu/dma.h"
#include "hw/qdev-properties.h"
#include "monitor/monitor.h"
#include "hw/ppc/xive.h"
#include "hw/ppc/xive_regs.h"
/*
* XIVE Thread Interrupt Management context
*/
/*
* Convert a priority number to an Interrupt Pending Buffer (IPB)
* register, which indicates a pending interrupt at the priority
* corresponding to the bit number
*/
static uint8_t priority_to_ipb(uint8_t priority)
{
return priority > XIVE_PRIORITY_MAX ?
0 : 1 << (XIVE_PRIORITY_MAX - priority);
}
/*
* Convert an Interrupt Pending Buffer (IPB) register to a Pending
* Interrupt Priority Register (PIPR), which contains the priority of
* the most favored pending notification.
*/
static uint8_t ipb_to_pipr(uint8_t ibp)
{
return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
}
static void ipb_update(uint8_t *regs, uint8_t priority)
{
regs[TM_IPB] |= priority_to_ipb(priority);
regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
}
static uint8_t exception_mask(uint8_t ring)
{
switch (ring) {
case TM_QW1_OS:
return TM_QW1_NSR_EO;
case TM_QW3_HV_PHYS:
return TM_QW3_NSR_HE;
default:
g_assert_not_reached();
}
}
static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
{
uint8_t *regs = &tctx->regs[ring];
uint8_t nsr = regs[TM_NSR];
uint8_t mask = exception_mask(ring);
qemu_irq_lower(tctx->output);
if (regs[TM_NSR] & mask) {
uint8_t cppr = regs[TM_PIPR];
regs[TM_CPPR] = cppr;
/* Reset the pending buffer bit */
regs[TM_IPB] &= ~priority_to_ipb(cppr);
regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
/* Drop Exception bit */
regs[TM_NSR] &= ~mask;
}
return (nsr << 8) | regs[TM_CPPR];
}
static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
{
uint8_t *regs = &tctx->regs[ring];
if (regs[TM_PIPR] < regs[TM_CPPR]) {
switch (ring) {
case TM_QW1_OS:
regs[TM_NSR] |= TM_QW1_NSR_EO;
break;
case TM_QW3_HV_PHYS:
regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6);
break;
default:
g_assert_not_reached();
}
qemu_irq_raise(tctx->output);
}
}
static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
{
if (cppr > XIVE_PRIORITY_MAX) {
cppr = 0xff;
}
tctx->regs[ring + TM_CPPR] = cppr;
/* CPPR has changed, check if we need to raise a pending exception */
xive_tctx_notify(tctx, ring);
}
/*
* XIVE Thread Interrupt Management Area (TIMA)
*/
static void xive_tm_set_hv_cppr(XiveTCTX *tctx, hwaddr offset,
uint64_t value, unsigned size)
{
xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff);
}
static uint64_t xive_tm_ack_hv_reg(XiveTCTX *tctx, hwaddr offset, unsigned size)
{
return xive_tctx_accept(tctx, TM_QW3_HV_PHYS);
}
static uint64_t xive_tm_pull_pool_ctx(XiveTCTX *tctx, hwaddr offset,
unsigned size)
{
uint64_t ret;
ret = tctx->regs[TM_QW2_HV_POOL + TM_WORD2] & TM_QW2W2_POOL_CAM;
tctx->regs[TM_QW2_HV_POOL + TM_WORD2] &= ~TM_QW2W2_POOL_CAM;
return ret;
}
static void xive_tm_vt_push(XiveTCTX *tctx, hwaddr offset,
uint64_t value, unsigned size)
{
tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff;
}
static uint64_t xive_tm_vt_poll(XiveTCTX *tctx, hwaddr offset, unsigned size)
{
return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff;
}
/*
* Define an access map for each page of the TIMA that we will use in
* the memory region ops to filter values when doing loads and stores
* of raw registers values
*
* Registers accessibility bits :
*
* 0x0 - no access
* 0x1 - write only
* 0x2 - read only
* 0x3 - read/write
*/
static const uint8_t xive_tm_hw_view[] = {
/* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
/* QW-1 OS */ 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 0, 0, 0, 0,
/* QW-2 POOL */ 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
/* QW-3 PHYS */ 3, 3, 3, 3, 0, 3, 0, 3, 3, 0, 0, 3, 3, 3, 3, 0,
};
static const uint8_t xive_tm_hv_view[] = {
/* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
/* QW-1 OS */ 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 0, 0, 0, 0,
/* QW-2 POOL */ 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0,
/* QW-3 PHYS */ 3, 3, 3, 3, 0, 3, 0, 3, 3, 0, 0, 3, 0, 0, 0, 0,
};
static const uint8_t xive_tm_os_view[] = {
/* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
/* QW-1 OS */ 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0,
/* QW-2 POOL */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* QW-3 PHYS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
static const uint8_t xive_tm_user_view[] = {
/* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* QW-1 OS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* QW-2 POOL */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* QW-3 PHYS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
/*
* Overall TIMA access map for the thread interrupt management context
* registers
*/
static const uint8_t *xive_tm_views[] = {
[XIVE_TM_HW_PAGE] = xive_tm_hw_view,
[XIVE_TM_HV_PAGE] = xive_tm_hv_view,
[XIVE_TM_OS_PAGE] = xive_tm_os_view,
[XIVE_TM_USER_PAGE] = xive_tm_user_view,
};
/*
* Computes a register access mask for a given offset in the TIMA
*/
static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write)
{
uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
uint8_t reg_offset = offset & 0x3F;
uint8_t reg_mask = write ? 0x1 : 0x2;
uint64_t mask = 0x0;
int i;
for (i = 0; i < size; i++) {
if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) {
mask |= (uint64_t) 0xff << (8 * (size - i - 1));
}
}
return mask;
}
static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
unsigned size)
{
uint8_t ring_offset = offset & 0x30;
uint8_t reg_offset = offset & 0x3F;
uint64_t mask = xive_tm_mask(offset, size, true);
int i;
/*
* Only 4 or 8 bytes stores are allowed and the User ring is
* excluded
*/
if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%"
HWADDR_PRIx"\n", offset);
return;
}
/*
* Use the register offset for the raw values and filter out
* reserved values
*/
for (i = 0; i < size; i++) {
uint8_t byte_mask = (mask >> (8 * (size - i - 1)));
if (byte_mask) {
tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) &
byte_mask;
}
}
}
static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
{
uint8_t ring_offset = offset & 0x30;
uint8_t reg_offset = offset & 0x3F;
uint64_t mask = xive_tm_mask(offset, size, false);
uint64_t ret;
int i;
/*
* Only 4 or 8 bytes loads are allowed and the User ring is
* excluded
*/
if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%"
HWADDR_PRIx"\n", offset);
return -1;
}
/* Use the register offset for the raw values */
ret = 0;
for (i = 0; i < size; i++) {
ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1));
}
/* filter out reserved values */
return ret & mask;
}
/*
* The TM context is mapped twice within each page. Stores and loads
* to the first mapping below 2K write and read the specified values
* without modification. The second mapping above 2K performs specific
* state changes (side effects) in addition to setting/returning the
* interrupt management area context of the processor thread.
*/
static uint64_t xive_tm_ack_os_reg(XiveTCTX *tctx, hwaddr offset, unsigned size)
{
return xive_tctx_accept(tctx, TM_QW1_OS);
}
static void xive_tm_set_os_cppr(XiveTCTX *tctx, hwaddr offset,
uint64_t value, unsigned size)
{
xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
}
/*
* Adjust the IPB to allow a CPU to process event queues of other
* priorities during one physical interrupt cycle.
*/
static void xive_tm_set_os_pending(XiveTCTX *tctx, hwaddr offset,
uint64_t value, unsigned size)
{
ipb_update(&tctx->regs[TM_QW1_OS], value & 0xff);
xive_tctx_notify(tctx, TM_QW1_OS);
}
/*
* Define a mapping of "special" operations depending on the TIMA page
* offset and the size of the operation.
*/
typedef struct XiveTmOp {
uint8_t page_offset;
uint32_t op_offset;
unsigned size;
void (*write_handler)(XiveTCTX *tctx, hwaddr offset, uint64_t value,
unsigned size);
uint64_t (*read_handler)(XiveTCTX *tctx, hwaddr offset, unsigned size);
} XiveTmOp;
static const XiveTmOp xive_tm_operations[] = {
/*
* MMIOs below 2K : raw values and special operations without side
* effects
*/
{ XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll },
/* MMIOs above 2K : special operations with side effects */
{ XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
{ XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
{ XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx },
};
static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write)
{
uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
uint32_t op_offset = offset & 0xFFF;
int i;
for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) {
const XiveTmOp *xto = &xive_tm_operations[i];
/* Accesses done from a more privileged TIMA page is allowed */
if (xto->page_offset >= page_offset &&
xto->op_offset == op_offset &&
xto->size == size &&
((write && xto->write_handler) || (!write && xto->read_handler))) {
return xto;
}
}
return NULL;
}
/*
* TIMA MMIO handlers
*/
void xive_tctx_tm_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
unsigned size)
{
const XiveTmOp *xto;
/*
* TODO: check V bit in Q[0-3]W2
*/
/*
* First, check for special operations in the 2K region
*/
if (offset & 0x800) {
xto = xive_tm_find_op(offset, size, true);
if (!xto) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA"
"@%"HWADDR_PRIx"\n", offset);
} else {
xto->write_handler(tctx, offset, value, size);
}
return;
}
/*
* Then, for special operations in the region below 2K.
*/
xto = xive_tm_find_op(offset, size, true);
if (xto) {
xto->write_handler(tctx, offset, value, size);
return;
}
/*
* Finish with raw access to the register values
*/
xive_tm_raw_write(tctx, offset, value, size);
}
uint64_t xive_tctx_tm_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
{
const XiveTmOp *xto;
/*
* TODO: check V bit in Q[0-3]W2
*/
/*
* First, check for special operations in the 2K region
*/
if (offset & 0x800) {
xto = xive_tm_find_op(offset, size, false);
if (!xto) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA"
"@%"HWADDR_PRIx"\n", offset);
return -1;
}
return xto->read_handler(tctx, offset, size);
}
/*
* Then, for special operations in the region below 2K.
*/
xto = xive_tm_find_op(offset, size, false);
if (xto) {
return xto->read_handler(tctx, offset, size);
}
/*
* Finish with raw access to the register values
*/
return xive_tm_raw_read(tctx, offset, size);
}
static void xive_tm_write(void *opaque, hwaddr offset,
uint64_t value, unsigned size)
{
XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu);
xive_tctx_tm_write(tctx, offset, value, size);
}
static uint64_t xive_tm_read(void *opaque, hwaddr offset, unsigned size)
{
XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu);
return xive_tctx_tm_read(tctx, offset, size);
}
const MemoryRegionOps xive_tm_ops = {
.read = xive_tm_read,
.write = xive_tm_write,
.endianness = DEVICE_BIG_ENDIAN,
.valid = {
.min_access_size = 1,
.max_access_size = 8,
},
.impl = {
.min_access_size = 1,
.max_access_size = 8,
},
};
static inline uint32_t xive_tctx_word2(uint8_t *ring)
{
return *((uint32_t *) &ring[TM_WORD2]);
}
static char *xive_tctx_ring_print(uint8_t *ring)
{
uint32_t w2 = xive_tctx_word2(ring);
return g_strdup_printf("%02x %02x %02x %02x %02x "
"%02x %02x %02x %08x",
ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB],
ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR],
be32_to_cpu(w2));
}
static const char * const xive_tctx_ring_names[] = {
"USER", "OS", "POOL", "PHYS",
};
void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon)
{
int cpu_index = tctx->cs ? tctx->cs->cpu_index : -1;
int i;
monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR"
" W2\n", cpu_index);
for (i = 0; i < XIVE_TM_RING_COUNT; i++) {
char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]);
monitor_printf(mon, "CPU[%04x]: %4s %s\n", cpu_index,
xive_tctx_ring_names[i], s);
g_free(s);
}
}
static void xive_tctx_reset(void *dev)
{
XiveTCTX *tctx = XIVE_TCTX(dev);
memset(tctx->regs, 0, sizeof(tctx->regs));
/* Set some defaults */
tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF;
tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF;
tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF;
/*
* Initialize PIPR to 0xFF to avoid phantom interrupts when the
* CPPR is first set.
*/
tctx->regs[TM_QW1_OS + TM_PIPR] =
ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] =
ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]);
}
static void xive_tctx_realize(DeviceState *dev, Error **errp)
{
XiveTCTX *tctx = XIVE_TCTX(dev);
PowerPCCPU *cpu;
CPUPPCState *env;
Object *obj;
Error *local_err = NULL;
obj = object_property_get_link(OBJECT(dev), "cpu", &local_err);
if (!obj) {
error_propagate(errp, local_err);
error_prepend(errp, "required link 'cpu' not found: ");
return;
}
cpu = POWERPC_CPU(obj);
tctx->cs = CPU(obj);
env = &cpu->env;
switch (PPC_INPUT(env)) {
case PPC_FLAGS_INPUT_POWER9:
tctx->output = env->irq_inputs[POWER9_INPUT_INT];
break;
default:
error_setg(errp, "XIVE interrupt controller does not support "
"this CPU bus model");
return;
}
qemu_register_reset(xive_tctx_reset, dev);
}
static void xive_tctx_unrealize(DeviceState *dev, Error **errp)
{
qemu_unregister_reset(xive_tctx_reset, dev);
}
static const VMStateDescription vmstate_xive_tctx = {
.name = TYPE_XIVE_TCTX,
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_BUFFER(regs, XiveTCTX),
VMSTATE_END_OF_LIST()
},
};
static void xive_tctx_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "XIVE Interrupt Thread Context";
dc->realize = xive_tctx_realize;
dc->unrealize = xive_tctx_unrealize;
dc->vmsd = &vmstate_xive_tctx;
}
static const TypeInfo xive_tctx_info = {
.name = TYPE_XIVE_TCTX,
.parent = TYPE_DEVICE,
.instance_size = sizeof(XiveTCTX),
.class_init = xive_tctx_class_init,
};
Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp)
{
Error *local_err = NULL;
Object *obj;
obj = object_new(TYPE_XIVE_TCTX);
object_property_add_child(cpu, TYPE_XIVE_TCTX, obj, &error_abort);
object_unref(obj);
object_property_add_const_link(obj, "cpu", cpu, &error_abort);
object_property_set_bool(obj, true, "realized", &local_err);
if (local_err) {
goto error;
}
return obj;
error:
object_unparent(obj);
error_propagate(errp, local_err);
return NULL;
}
/*
* XIVE ESB helpers
*/
static uint8_t xive_esb_set(uint8_t *pq, uint8_t value)
{
uint8_t old_pq = *pq & 0x3;
*pq &= ~0x3;
*pq |= value & 0x3;
return old_pq;
}
static bool xive_esb_trigger(uint8_t *pq)
{
uint8_t old_pq = *pq & 0x3;
switch (old_pq) {
case XIVE_ESB_RESET:
xive_esb_set(pq, XIVE_ESB_PENDING);
return true;
case XIVE_ESB_PENDING:
case XIVE_ESB_QUEUED:
xive_esb_set(pq, XIVE_ESB_QUEUED);
return false;
case XIVE_ESB_OFF:
xive_esb_set(pq, XIVE_ESB_OFF);
return false;
default:
g_assert_not_reached();
}
}
static bool xive_esb_eoi(uint8_t *pq)
{
uint8_t old_pq = *pq & 0x3;
switch (old_pq) {
case XIVE_ESB_RESET:
case XIVE_ESB_PENDING:
xive_esb_set(pq, XIVE_ESB_RESET);
return false;
case XIVE_ESB_QUEUED:
xive_esb_set(pq, XIVE_ESB_PENDING);
return true;
case XIVE_ESB_OFF:
xive_esb_set(pq, XIVE_ESB_OFF);
return false;
default:
g_assert_not_reached();
}
}
/*
* XIVE Interrupt Source (or IVSE)
*/
uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno)
{
assert(srcno < xsrc->nr_irqs);
return xsrc->status[srcno] & 0x3;
}
uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq)
{
assert(srcno < xsrc->nr_irqs);
return xive_esb_set(&xsrc->status[srcno], pq);
}
/*
* Returns whether the event notification should be forwarded.
*/
static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno)
{
uint8_t old_pq = xive_source_esb_get(xsrc, srcno);
xsrc->status[srcno] |= XIVE_STATUS_ASSERTED;
switch (old_pq) {
case XIVE_ESB_RESET:
xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING);
return true;
default:
return false;
}
}
/*
* Returns whether the event notification should be forwarded.
*/
static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno)
{
bool ret;
assert(srcno < xsrc->nr_irqs);
ret = xive_esb_trigger(&xsrc->status[srcno]);
if (xive_source_irq_is_lsi(xsrc, srcno) &&
xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) {
qemu_log_mask(LOG_GUEST_ERROR,
"XIVE: queued an event on LSI IRQ %d\n", srcno);
}
return ret;
}
/*
* Returns whether the event notification should be forwarded.
*/
static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
{
bool ret;
assert(srcno < xsrc->nr_irqs);
ret = xive_esb_eoi(&xsrc->status[srcno]);
/*
* LSI sources do not set the Q bit but they can still be
* asserted, in which case we should forward a new event
* notification
*/
if (xive_source_irq_is_lsi(xsrc, srcno) &&
xsrc->status[srcno] & XIVE_STATUS_ASSERTED) {
ret = xive_source_lsi_trigger(xsrc, srcno);
}
return ret;
}
/*
* Forward the source event notification to the Router
*/
static void xive_source_notify(XiveSource *xsrc, int srcno)
{
XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive);
if (xnc->notify) {
xnc->notify(xsrc->xive, srcno);
}
}
/*
* In a two pages ESB MMIO setting, even page is the trigger page, odd
* page is for management
*/
static inline bool addr_is_even(hwaddr addr, uint32_t shift)
{
return !((addr >> shift) & 1);
}
static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr)
{
return xive_source_esb_has_2page(xsrc) &&
addr_is_even(addr, xsrc->esb_shift - 1);
}
/*
* ESB MMIO loads
* Trigger page Management/EOI page
*
* ESB MMIO setting 2 pages 1 or 2 pages
*
* 0x000 .. 0x3FF -1 EOI and return 0|1
* 0x400 .. 0x7FF -1 EOI and return 0|1
* 0x800 .. 0xBFF -1 return PQ
* 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00
* 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01
* 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10
* 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11
*/
static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size)
{
XiveSource *xsrc = XIVE_SOURCE(opaque);
uint32_t offset = addr & 0xFFF;
uint32_t srcno = addr >> xsrc->esb_shift;
uint64_t ret = -1;
/* In a two pages ESB MMIO setting, trigger page should not be read */
if (xive_source_is_trigger_page(xsrc, addr)) {
qemu_log_mask(LOG_GUEST_ERROR,
"XIVE: invalid load on IRQ %d trigger page at "
"0x%"HWADDR_PRIx"\n", srcno, addr);
return -1;
}
switch (offset) {
case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
ret = xive_source_esb_eoi(xsrc, srcno);
/* Forward the source event notification for routing */
if (ret) {
xive_source_notify(xsrc, srcno);
}
break;
case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
ret = xive_source_esb_get(xsrc, srcno);
break;
case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n",
offset);
}
return ret;
}
/*
* ESB MMIO stores
* Trigger page Management/EOI page
*
* ESB MMIO setting 2 pages 1 or 2 pages
*
* 0x000 .. 0x3FF Trigger Trigger
* 0x400 .. 0x7FF Trigger EOI
* 0x800 .. 0xBFF Trigger undefined
* 0xC00 .. 0xCFF Trigger PQ=00
* 0xD00 .. 0xDFF Trigger PQ=01
* 0xE00 .. 0xDFF Trigger PQ=10
* 0xF00 .. 0xDFF Trigger PQ=11
*/
static void xive_source_esb_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size)
{
XiveSource *xsrc = XIVE_SOURCE(opaque);
uint32_t offset = addr & 0xFFF;
uint32_t srcno = addr >> xsrc->esb_shift;
bool notify = false;
/* In a two pages ESB MMIO setting, trigger page only triggers */
if (xive_source_is_trigger_page(xsrc, addr)) {
notify = xive_source_esb_trigger(xsrc, srcno);
goto out;
}
switch (offset) {
case 0 ... 0x3FF:
notify = xive_source_esb_trigger(xsrc, srcno);
break;
case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) {
qemu_log_mask(LOG_GUEST_ERROR,
"XIVE: invalid Store EOI for IRQ %d\n", srcno);
return;
}
notify = xive_source_esb_eoi(xsrc, srcno);
break;
case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n",
offset);
return;
}
out:
/* Forward the source event notification for routing */
if (notify) {
xive_source_notify(xsrc, srcno);
}
}
static const MemoryRegionOps xive_source_esb_ops = {
.read = xive_source_esb_read,
.write = xive_source_esb_write,
.endianness = DEVICE_BIG_ENDIAN,
.valid = {
.min_access_size = 8,
.max_access_size = 8,
},
.impl = {
.min_access_size = 8,
.max_access_size = 8,
},
};
void xive_source_set_irq(void *opaque, int srcno, int val)
{
XiveSource *xsrc = XIVE_SOURCE(opaque);
bool notify = false;
if (xive_source_irq_is_lsi(xsrc, srcno)) {
if (val) {
notify = xive_source_lsi_trigger(xsrc, srcno);
} else {
xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED;
}
} else {
if (val) {
notify = xive_source_esb_trigger(xsrc, srcno);
}
}
/* Forward the source event notification for routing */
if (notify) {
xive_source_notify(xsrc, srcno);
}
}
void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon)
{
int i;
for (i = 0; i < xsrc->nr_irqs; i++) {
uint8_t pq = xive_source_esb_get(xsrc, i);
if (pq == XIVE_ESB_OFF) {
continue;
}
monitor_printf(mon, " %08x %s %c%c%c\n", i + offset,
xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
pq & XIVE_ESB_VAL_P ? 'P' : '-',
pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ');
}
}
static void xive_source_reset(void *dev)
{
XiveSource *xsrc = XIVE_SOURCE(dev);
/* Do not clear the LSI bitmap */
/* PQs are initialized to 0b01 (Q=1) which corresponds to "ints off" */
memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs);
}
static void xive_source_realize(DeviceState *dev, Error **errp)
{
XiveSource *xsrc = XIVE_SOURCE(dev);
Object *obj;
Error *local_err = NULL;
obj = object_property_get_link(OBJECT(dev), "xive", &local_err);
if (!obj) {
error_propagate(errp, local_err);
error_prepend(errp, "required link 'xive' not found: ");
return;
}
xsrc->xive = XIVE_NOTIFIER(obj);
if (!xsrc->nr_irqs) {
error_setg(errp, "Number of interrupt needs to be greater than 0");
return;
}
if (xsrc->esb_shift != XIVE_ESB_4K &&
xsrc->esb_shift != XIVE_ESB_4K_2PAGE &&
xsrc->esb_shift != XIVE_ESB_64K &&
xsrc->esb_shift != XIVE_ESB_64K_2PAGE) {
error_setg(errp, "Invalid ESB shift setting");
return;
}
xsrc->status = g_malloc0(xsrc->nr_irqs);
xsrc->lsi_map = bitmap_new(xsrc->nr_irqs);
memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
&xive_source_esb_ops, xsrc, "xive.esb",
(1ull << xsrc->esb_shift) * xsrc->nr_irqs);
qemu_register_reset(xive_source_reset, dev);
}
static const VMStateDescription vmstate_xive_source = {
.name = TYPE_XIVE_SOURCE,
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL),
VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs),
VMSTATE_END_OF_LIST()
},
};
/*
* The default XIVE interrupt source setting for the ESB MMIOs is two
* 64k pages without Store EOI, to be in sync with KVM.
*/
static Property xive_source_properties[] = {
DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0),
DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0),
DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE),
DEFINE_PROP_END_OF_LIST(),
};
static void xive_source_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "XIVE Interrupt Source";
dc->props = xive_source_properties;
dc->realize = xive_source_realize;
dc->vmsd = &vmstate_xive_source;
}
static const TypeInfo xive_source_info = {
.name = TYPE_XIVE_SOURCE,
.parent = TYPE_DEVICE,
.instance_size = sizeof(XiveSource),
.class_init = xive_source_class_init,
};
/*
* XiveEND helpers
*/
void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon)
{
uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
| be32_to_cpu(end->w3);
uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
uint32_t qentries = 1 << (qsize + 10);
int i;
/*
* print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
*/
monitor_printf(mon, " [ ");
qindex = (qindex - (width - 1)) & (qentries - 1);
for (i = 0; i < width; i++) {
uint64_t qaddr = qaddr_base + (qindex << 2);
uint32_t qdata = -1;
if (dma_memory_read(&address_space_memory, qaddr, &qdata,
sizeof(qdata))) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
HWADDR_PRIx "\n", qaddr);
return;
}
monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "",
be32_to_cpu(qdata));
qindex = (qindex + 1) & (qentries - 1);
}
}
void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon)
{
uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
| be32_to_cpu(end->w3);
uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
uint32_t qentries = 1 << (qsize + 10);
uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
if (!xive_end_is_valid(end)) {
return;
}
monitor_printf(mon, " %08x %c%c%c%c%c prio:%d nvt:%04x eq:@%08"PRIx64
"% 6d/%5d ^%d", end_idx,
xive_end_is_valid(end) ? 'v' : '-',
xive_end_is_enqueue(end) ? 'q' : '-',
xive_end_is_notify(end) ? 'n' : '-',
xive_end_is_backlog(end) ? 'b' : '-',
xive_end_is_escalate(end) ? 'e' : '-',
priority, nvt, qaddr_base, qindex, qentries, qgen);
xive_end_queue_pic_print_info(end, 6, mon);
monitor_printf(mon, "]\n");
}
static void xive_end_enqueue(XiveEND *end, uint32_t data)
{
uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
| be32_to_cpu(end->w3);
uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
uint64_t qaddr = qaddr_base + (qindex << 2);
uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
uint32_t qentries = 1 << (qsize + 10);
if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
HWADDR_PRIx "\n", qaddr);
return;
}
qindex = (qindex + 1) & (qentries - 1);
if (qindex == 0) {
qgen ^= 1;
end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen);
}
end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex);
}
/*
* XIVE Router (aka. Virtualization Controller or IVRE)
*/
int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
XiveEAS *eas)
{
XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
}
int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
XiveEND *end)
{
XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
return xrc->get_end(xrtr, end_blk, end_idx, end);
}
int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
XiveEND *end, uint8_t word_number)
{
XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
}
int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
XiveNVT *nvt)
{
XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt);
}
int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
XiveNVT *nvt, uint8_t word_number)
{
XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number);
}
XiveTCTX *xive_router_get_tctx(XiveRouter *xrtr, CPUState *cs)
{
XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
return xrc->get_tctx(xrtr, cs);
}
/*
* By default on P9, the HW CAM line (23bits) is hardwired to :
*
* 0x000||0b1||4Bit chip number||7Bit Thread number.
*
* When the block grouping is enabled, the CAM line is changed to :
*
* 4Bit chip number||0x001||7Bit Thread number.
*/
static uint32_t hw_cam_line(uint8_t chip_id, uint8_t tid)
{
return 1 << 11 | (chip_id & 0xf) << 7 | (tid & 0x7f);
}
static bool xive_presenter_tctx_match_hw(XiveTCTX *tctx,
uint8_t nvt_blk, uint32_t nvt_idx)
{
CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
uint32_t pir = env->spr_cb[SPR_PIR].default_value;
return hw_cam_line((pir >> 8) & 0xf, pir & 0x7f) ==
hw_cam_line(nvt_blk, nvt_idx);
}
/*
* The thread context register words are in big-endian format.
*/
static int xive_presenter_tctx_match(XiveTCTX *tctx, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool cam_ignore, uint32_t logic_serv)
{
uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx);
uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
/*
* TODO (PowerNV): ignore mode. The low order bits of the NVT
* identifier are ignored in the "CAM" match.
*/
if (format == 0) {
if (cam_ignore == true) {
/*
* F=0 & i=1: Logical server notification (bits ignored at
* the end of the NVT identifier)
*/
qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
nvt_blk, nvt_idx);
return -1;
}
/* F=0 & i=0: Specific NVT notification */
/* PHYS ring */
if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) &&
xive_presenter_tctx_match_hw(tctx, nvt_blk, nvt_idx)) {
return TM_QW3_HV_PHYS;
}
/* HV POOL ring */
if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) &&
cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) {
return TM_QW2_HV_POOL;
}
/* OS ring */
if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) {
return TM_QW1_OS;
}
} else {
/* F=1 : User level Event-Based Branch (EBB) notification */
/* USER ring */
if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
(cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) &&
(be32_to_cpu(qw0w2) & TM_QW0W2_VU) &&
(logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) {
return TM_QW0_USER;
}
}
return -1;
}
typedef struct XiveTCTXMatch {
XiveTCTX *tctx;
uint8_t ring;
} XiveTCTXMatch;
static bool xive_presenter_match(XiveRouter *xrtr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match)
{
CPUState *cs;
/*
* TODO (PowerNV): handle chip_id overwrite of block field for
* hardwired CAM compares
*/
CPU_FOREACH(cs) {
XiveTCTX *tctx = xive_router_get_tctx(xrtr, cs);
int ring;
/*
* HW checks that the CPU is enabled in the Physical Thread
* Enable Register (PTER).
*/
/*
* Check the thread context CAM lines and record matches. We
* will handle CPU exception delivery later
*/
ring = xive_presenter_tctx_match(tctx, format, nvt_blk, nvt_idx,
cam_ignore, logic_serv);
/*
* Save the context and follow on to catch duplicates, that we
* don't support yet.
*/
if (ring != -1) {
if (match->tctx) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread "
"context NVT %x/%x\n", nvt_blk, nvt_idx);
return false;
}
match->ring = ring;
match->tctx = tctx;
}
}
if (!match->tctx) {
qemu_log_mask(LOG_UNIMP, "XIVE: NVT %x/%x is not dispatched\n",
nvt_blk, nvt_idx);
return false;
}
return true;
}
/*
* This is our simple Xive Presenter Engine model. It is merged in the
* Router as it does not require an extra object.
*
* It receives notification requests sent by the IVRE to find one
* matching NVT (or more) dispatched on the processor threads. In case
* of a single NVT notification, the process is abreviated and the
* thread is signaled if a match is found. In case of a logical server
* notification (bits ignored at the end of the NVT identifier), the
* IVPE and IVRE select a winning thread using different filters. This
* involves 2 or 3 exchanges on the PowerBus that the model does not
* support.
*
* The parameters represent what is sent on the PowerBus
*/
static void xive_presenter_notify(XiveRouter *xrtr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool cam_ignore, uint8_t priority,
uint32_t logic_serv)
{
XiveNVT nvt;
XiveTCTXMatch match = { .tctx = NULL, .ring = 0 };
bool found;
/* NVT cache lookup */
if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n",
nvt_blk, nvt_idx);
return;
}
if (!xive_nvt_is_valid(&nvt)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n",
nvt_blk, nvt_idx);
return;
}
found = xive_presenter_match(xrtr, format, nvt_blk, nvt_idx, cam_ignore,
priority, logic_serv, &match);
if (found) {
ipb_update(&match.tctx->regs[match.ring], priority);
xive_tctx_notify(match.tctx, match.ring);
return;
}
/* Record the IPB in the associated NVT structure */
ipb_update((uint8_t *) &nvt.w4, priority);
xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
/*
* If no matching NVT is dispatched on a HW thread :
* - update the NVT structure if backlog is activated
* - escalate (ESe PQ bits and EAS in w4-5) if escalation is
* activated
*/
}
/*
* An END trigger can come from an event trigger (IPI or HW) or from
* another chip. We don't model the PowerBus but the END trigger
* message has the same parameters than in the function below.
*/
static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
uint32_t end_idx, uint32_t end_data)
{
XiveEND end;
uint8_t priority;
uint8_t format;
/* END cache lookup */
if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
end_idx);
return;
}
if (!xive_end_is_valid(&end)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
end_blk, end_idx);
return;
}
if (xive_end_is_enqueue(&end)) {
xive_end_enqueue(&end, end_data);
/* Enqueuing event data modifies the EQ toggle and index */
xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
}
/*
* The W7 format depends on the F bit in W6. It defines the type
* of the notification :
*
* F=0 : single or multiple NVT notification
* F=1 : User level Event-Based Branch (EBB) notification, no
* priority
*/
format = xive_get_field32(END_W6_FORMAT_BIT, end.w6);
priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7);
/* The END is masked */
if (format == 0 && priority == 0xff) {
return;
}
/*
* Check the END ESn (Event State Buffer for notification) for
* even futher coalescing in the Router
*/
if (!xive_end_is_notify(&end)) {
uint8_t pq = xive_get_field32(END_W1_ESn, end.w1);
bool notify = xive_esb_trigger(&pq);
if (pq != xive_get_field32(END_W1_ESn, end.w1)) {
end.w1 = xive_set_field32(END_W1_ESn, end.w1, pq);
xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
}
/* ESn[Q]=1 : end of notification */
if (!notify) {
return;
}
}
/*
* Follows IVPE notification
*/
xive_presenter_notify(xrtr, format,
xive_get_field32(END_W6_NVT_BLOCK, end.w6),
xive_get_field32(END_W6_NVT_INDEX, end.w6),
xive_get_field32(END_W7_F0_IGNORE, end.w7),
priority,
xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7));
/* TODO: Auto EOI. */
}
void xive_router_notify(XiveNotifier *xn, uint32_t lisn)
{
XiveRouter *xrtr = XIVE_ROUTER(xn);
uint8_t eas_blk = XIVE_SRCNO_BLOCK(lisn);
uint32_t eas_idx = XIVE_SRCNO_INDEX(lisn);
XiveEAS eas;
/* EAS cache lookup */
if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
return;
}
/*
* The IVRE checks the State Bit Cache at this point. We skip the
* SBC lookup because the state bits of the sources are modeled
* internally in QEMU.
*/
if (!xive_eas_is_valid(&eas)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn);
return;
}
if (xive_eas_is_masked(&eas)) {
/* Notification completed */
return;
}
/*
* The event trigger becomes an END trigger
*/
xive_router_end_notify(xrtr,
xive_get_field64(EAS_END_BLOCK, eas.w),
xive_get_field64(EAS_END_INDEX, eas.w),
xive_get_field64(EAS_END_DATA, eas.w));
}
static void xive_router_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
dc->desc = "XIVE Router Engine";
xnc->notify = xive_router_notify;
}
static const TypeInfo xive_router_info = {
.name = TYPE_XIVE_ROUTER,
.parent = TYPE_SYS_BUS_DEVICE,
.abstract = true,
.class_size = sizeof(XiveRouterClass),
.class_init = xive_router_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_XIVE_NOTIFIER },
{ }
}
};
void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon)
{
if (!xive_eas_is_valid(eas)) {
return;
}
monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n",
lisn, xive_eas_is_masked(eas) ? "M" : " ",
(uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
(uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
(uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
}
/*
* END ESB MMIO loads
*/
static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size)
{
XiveENDSource *xsrc = XIVE_END_SOURCE(opaque);
uint32_t offset = addr & 0xFFF;
uint8_t end_blk;
uint32_t end_idx;
XiveEND end;
uint32_t end_esmask;
uint8_t pq;
uint64_t ret = -1;
end_blk = xsrc->block_id;
end_idx = addr >> (xsrc->esb_shift + 1);
if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
end_idx);
return -1;
}
if (!xive_end_is_valid(&end)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
end_blk, end_idx);
return -1;
}
end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe;
pq = xive_get_field32(end_esmask, end.w1);
switch (offset) {
case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
ret = xive_esb_eoi(&pq);
/* Forward the source event notification for routing ?? */
break;
case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
ret = pq;
break;
case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
offset);
return -1;
}
if (pq != xive_get_field32(end_esmask, end.w1)) {
end.w1 = xive_set_field32(end_esmask, end.w1, pq);
xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
}
return ret;
}
/*
* END ESB MMIO stores are invalid
*/
static void xive_end_source_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size)
{
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%"
HWADDR_PRIx"\n", addr);
}
static const MemoryRegionOps xive_end_source_ops = {
.read = xive_end_source_read,
.write = xive_end_source_write,
.endianness = DEVICE_BIG_ENDIAN,
.valid = {
.min_access_size = 8,
.max_access_size = 8,
},
.impl = {
.min_access_size = 8,
.max_access_size = 8,
},
};
static void xive_end_source_realize(DeviceState *dev, Error **errp)
{
XiveENDSource *xsrc = XIVE_END_SOURCE(dev);
Object *obj;
Error *local_err = NULL;
obj = object_property_get_link(OBJECT(dev), "xive", &local_err);
if (!obj) {
error_propagate(errp, local_err);
error_prepend(errp, "required link 'xive' not found: ");
return;
}
xsrc->xrtr = XIVE_ROUTER(obj);
if (!xsrc->nr_ends) {
error_setg(errp, "Number of interrupt needs to be greater than 0");
return;
}
if (xsrc->esb_shift != XIVE_ESB_4K &&
xsrc->esb_shift != XIVE_ESB_64K) {
error_setg(errp, "Invalid ESB shift setting");
return;
}
/*
* Each END is assigned an even/odd pair of MMIO pages, the even page
* manages the ESn field while the odd page manages the ESe field.
*/
memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
&xive_end_source_ops, xsrc, "xive.end",
(1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
}
static Property xive_end_source_properties[] = {
DEFINE_PROP_UINT8("block-id", XiveENDSource, block_id, 0),
DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0),
DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K),
DEFINE_PROP_END_OF_LIST(),
};
static void xive_end_source_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "XIVE END Source";
dc->props = xive_end_source_properties;
dc->realize = xive_end_source_realize;
}
static const TypeInfo xive_end_source_info = {
.name = TYPE_XIVE_END_SOURCE,
.parent = TYPE_DEVICE,
.instance_size = sizeof(XiveENDSource),
.class_init = xive_end_source_class_init,
};
/*
* XIVE Notifier
*/
static const TypeInfo xive_notifier_info = {
.name = TYPE_XIVE_NOTIFIER,
.parent = TYPE_INTERFACE,
.class_size = sizeof(XiveNotifierClass),
};
static void xive_register_types(void)
{
type_register_static(&xive_source_info);
type_register_static(&xive_notifier_info);
type_register_static(&xive_router_info);
type_register_static(&xive_end_source_info);
type_register_static(&xive_tctx_info);
}
type_init(xive_register_types)
|
MickTheMechanic/FLIR-color-palette-arrays
|
RAINBOW.c
|
<reponame>MickTheMechanic/FLIR-color-palette-arrays<filename>RAINBOW.c
uint32_t RAINBOW [] {
0xFF, 0x2FF, 0x5FF, 0x8FF, 0xBFF, 0xEFF, 0x11FF, 0x14FF, 0x17FF, 0x1AFF, 0x1DFF,
0x20FF, 0x23FF, 0x26FF, 0x29FF, 0x2CFF, 0x2FFF, 0x32FF, 0x35FF, 0x37FF, 0x3AFF, 0x3DFF,
0x40FF, 0x43FF, 0x46FF, 0x49FF, 0x4CFF, 0x4FFF, 0x52FF, 0x55FF, 0x58FF, 0x5BFF, 0x5EFF,
0x61FF, 0x64FF, 0x67FF, 0x6AFF, 0x6CFF, 0x6FFF, 0x72FF, 0x75FF, 0x78FF, 0x7BFF, 0x7EFF,
0x81FF, 0x84FF, 0x87FF, 0x8AFF, 0x8DFF, 0x90FF, 0x93FF, 0x96FF, 0x99FF, 0x9CFF, 0x9FFF,
0xA1FF, 0xA4FF, 0xA7FF, 0xAAFF, 0xADFF, 0xB0FF, 0xB3FF, 0xB6FF, 0xB9FF, 0xBCFF, 0xBFFF,
0xC2FF, 0xC5FF, 0xC8FF, 0xCBFF, 0xCEFF, 0xD1FF, 0xD4FF, 0xD6FF, 0xD9FF, 0xDCFF, 0xDFFF,
0xE2FF, 0xE5FF, 0xE8FF, 0xEBFF, 0xEEFF, 0xF1FF, 0xF4FF, 0xF7FF, 0xFAFF, 0xFDFF, 0xFFFD,
0xFFFA, 0xFFF7, 0xFFF4, 0xFFF2, 0xFFEF, 0xFFEC, 0xFFE9, 0xFFE6, 0xFFE3, 0xFFE0, 0xFFDD,
0xFFDA, 0xFFD7, 0xFFD4, 0xFFD1, 0xFFCE, 0xFFCB, 0xFFC8, 0xFFC5, 0xFFC2, 0xFFBF, 0xFFBD,
0xFFBA, 0xFFB7, 0xFFB4, 0xFFB1, 0xFFAE, 0xFFAB, 0xFFA8, 0xFFA5, 0xFFA2, 0xFF9F, 0xFF9C,
0xFF99, 0xFF96, 0xFF93, 0xFF90, 0xFF8D, 0xFF8A, 0xFF88, 0xFF85, 0xFF82, 0xFF7F, 0xFF7C,
0xFF79, 0xFF76, 0xFF73, 0xFF70, 0xFF6D, 0xFF6A, 0xFF67, 0xFF64, 0xFF61, 0xFF5E, 0xFF5B,
0xFF58, 0xFF55, 0xFF53, 0xFF50, 0xFF4D, 0xFF4A, 0xFF47, 0xFF44, 0xFF41, 0xFF3E, 0xFF3B,
0xFF38, 0xFF35, 0xFF32, 0xFF2F, 0xFF2C, 0xFF29, 0xFF26, 0xFF23, 0xFF20, 0xFF1E, 0xFF1B,
0xFF18, 0xFF15, 0xFF12, 0xFF0F, 0xFF0C, 0xFF09, 0xFF06, 0xFF03, 0xFF00, 0x2FF00, 0x5FF00,
0x8FF00, 0xBFF00, 0xEFF00, 0x11FF00, 0x14FF00, 0x16FF00, 0x19FF00, 0x1CFF00, 0x1FFF00, 0x22FF00, 0x25FF00,
0x28FF00, 0x2BFF00, 0x2EFF00, 0x31FF00, 0x34FF00, 0x37FF00, 0x3AFF00, 0x3DFF00, 0x40FF00, 0x43FF00, 0x46FF00,
0x49FF00, 0x4BFF00, 0x4EFF00, 0x51FF00, 0x54FF00, 0x57FF00, 0x5AFF00, 0x5DFF00, 0x60FF00, 0x63FF00, 0x66FF00,
0x69FF00, 0x6CFF00, 0x6FFF00, 0x72FF00, 0x75FF00, 0x78FF00, 0x7BFF00, 0x7EFF00, 0x80FF00, 0x83FF00, 0x86FF00,
0x89FF00, 0x8CFF00, 0x8FFF00, 0x92FF00, 0x95FF00, 0x98FF00, 0x9BFF00, 0x9EFF00, 0xA1FF00, 0xA4FF00, 0xA7FF00,
0xAAFF00, 0xADFF00, 0xB0FF00, 0xB3FF00, 0xB5FF00, 0xB8FF00, 0xBBFF00, 0xBEFF00, 0xC1FF00, 0xC4FF00, 0xC7FF00,
0xCAFF00, 0xCDFF00, 0xD0FF00, 0xD3FF00, 0xD6FF00, 0xD9FF00, 0xDCFF00, 0xDFFF00, 0xE2FF00, 0xE5FF00, 0xE8FF00,
0xEAFF00, 0xEDFF00, 0xF0FF00, 0xF3FF00, 0xF6FF00, 0xF9FF00, 0xFCFF00, 0xFFFE00, 0xFFFB00, 0xFFF800, 0xFFF500,
0xFFF200, 0xFFEF00, 0xFFEC00, 0xFFE900, 0xFFE600, 0xFFE300, 0xFFE000, 0xFFDE00, 0xFFDB00, 0xFFD800, 0xFFD500,
0xFFD200, 0xFFCF00, 0xFFCC00, 0xFFC900, 0xFFC600, 0xFFC300, 0xFFC000, 0xFFBD00, 0xFFBA00, 0xFFB700, 0xFFB400,
0xFFB100, 0xFFAE00, 0xFFAB00, 0xFFA900, 0xFFA600, 0xFFA300, 0xFFA000, 0xFF9D00, 0xFF9A00, 0xFF9700, 0xFF9400,
0xFF9100, 0xFF8E00, 0xFF8B00, 0xFF8800, 0xFF8500, 0xFF8200, 0xFF7F00, 0xFF7C00, 0xFF7900, 0xFF7600, 0xFF7400,
0xFF7100, 0xFF6E00, 0xFF6B00, 0xFF6800, 0xFF6500, 0xFF6200, 0xFF5F00, 0xFF5C00, 0xFF5900, 0xFF5600, 0xFF5300,
0xFF5000, 0xFF4D00, 0xFF4A00, 0xFF4700, 0xFF4400, 0xFF4100, 0xFF3F00, 0xFF3C00, 0xFF3900, 0xFF3600, 0xFF3300,
0xFF3000, 0xFF2D00, 0xFF2A00, 0xFF2700, 0xFF2400, 0xFF2100, 0xFF1E00, 0xFF1B00, 0xFF1800, 0xFF1500, 0xFF1200,
0xFF0F00, 0xFF0C00, 0xFF0A00, 0xFF0700, 0xFF0400, 0xFF0100, 0xFF0001, 0xFF0004, 0xFF0007, 0xFF0009, 0xFF000C,
0xFF000F, 0xFF0012, 0xFF0015, 0xFF0017, 0xFF001A, 0xFF001D, 0xFF0020, 0xFF0022, 0xFF0025, 0xFF0028, 0xFF002B,
0xFF002E, 0xFF0030, 0xFF0033, 0xFF0036, 0xFF0039, 0xFF003B, 0xFF003E, 0xFF0041, 0xFF0044, 0xFF0046, 0xFF0049,
0xFF004C, 0xFF004F, 0xFF0052, 0xFF0054, 0xFF0057, 0xFF005A, 0xFF005D, 0xFF005F, 0xFF0062, 0xFF0065, 0xFF0068,
0xFF006A, 0xFF006D, 0xFF0070, 0xFF0073, 0xFF0076, 0xFF0078, 0xFF007B, 0xFF007E, 0xFF0081, 0xFF0083, 0xFF0086,
0xFF0089, 0xFF008C, 0xFF008F, 0xFF0091, 0xFF0094, 0xFF0097, 0xFF009A, 0xFF009C, 0xFF009F, 0xFF00A2, 0xFF00A5,
0xFF00A7, 0xFF00AA, 0xFF00AD, 0xFF00B0, 0xFF00B3, 0xFF00B5, 0xFF00B8, 0xFF00BB, 0xFF00BE, 0xFF00C0, 0xFF00C3,
0xFF00C6, 0xFF00C9, 0xFF00CB, 0xFF00CE, 0xFF00D1, 0xFF00D4, 0xFF00D7, 0xFF00D9, 0xFF00DC, 0xFF00DF, 0xFF00E2,
0xFF00E4, 0xFF00E7, 0xFF00EA, 0xFF00ED,
};
|
rahuljaswa/Snapp
|
Pod/Classes/RJUserPickerViewController.h
|
<filename>Pod/Classes/RJUserPickerViewController.h
//
// RJUserPickerViewController.h
// Pods
//
// Created by <NAME> on 4/23/15.
//
//
#import <UIKit/UIKit.h>
@class RJUserPickerViewController;
@protocol RJUserPickerViewControllerDelegate <NSObject>
- (void)userPickerViewControllerDidCancel:(RJUserPickerViewController *)userPickerViewController;
- (void)userPickerViewControllerDidFinish:(RJUserPickerViewController *)userPickerViewController;
@end
@class RJManagedObjectUser;
@interface RJUserPickerViewController : UITableViewController
@property (nonatomic, weak) id<RJUserPickerViewControllerDelegate> delegate;
@property (nonatomic, strong) RJManagedObjectUser *selectedUser;
- (instancetype)initWithInitiallySelectedUser:(RJManagedObjectUser *)user;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJImageCacheManager.h
|
//
// RJImageCacheManager.h
// Community
//
#import <FastImageCache/FICImageCache.h>
@import Foundation;
@interface RJImageCacheManager : NSObject <FICImageCacheDelegate>
+ (NSArray *)formats;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJCollectionViewStickyHeaderFlowLayout.h
|
<filename>Pod/Classes/RJCollectionViewStickyHeaderFlowLayout.h
//
// RJCollectionViewStickyHeaderFlowLayout.h
// Community
//
#import <UIKit/UIKit.h>
@interface RJCollectionViewStickyHeaderFlowLayout : UICollectionViewFlowLayout
@end
|
rahuljaswa/Snapp
|
Pod/Classes/UIButton+RJAdditions.h
|
<filename>Pod/Classes/UIButton+RJAdditions.h
//
// UIButton+RJAdditions.h
// Community
//
#import <UIKit/UIKit.h>
@interface UIButton (RJAdditions)
- (void)centerWithSpacing:(CGFloat)spacing padding:(CGFloat)padding;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJNotificationsViewController.h
|
//
// RJNotificationsViewController.h
// Community
//
#import "RJViewControllerDataSourceProtocol.h"
#import <UIKit/UIKit.h>
@interface RJNotificationsViewController : UITableViewController <RJViewControllerDataSourceProtocol>
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJCategoryHeaderView.h
|
//
// RJCategoryHeaderView.h
// Community
//
@interface RJCategoryHeaderView : UICollectionReusableView
@property (nonatomic, strong, readonly) UIButton *headerButton;
@property (nonatomic, strong, readonly) UIButton *followingButton;
@property (nonatomic, strong, readonly) UIButton *postsButton;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJFlagViewController.h
|
//
// RJFlagViewController.h
// Community
//
#import "RJManagedObjectFlag.h"
#import <UIKit/UIKit.h>
@class RJFlagViewController;
@protocol RJFlagViewControllerDelegate <NSObject>
- (void)flagViewControllerDidPressCancelButton:(RJFlagViewController *)flagViewController;
- (void)flagViewController:(RJFlagViewController *)flagViewController didSelectReason:(RJManagedObjectFlagReason)reason;
@end
@class RJManagedObjectPost;
@interface RJFlagViewController : UITableViewController
@property (nonatomic, weak) id<RJFlagViewControllerDelegate> delegate;
@property (nonatomic, strong, readonly) RJManagedObjectPost *post;
- (instancetype)initWithPost:(RJManagedObjectPost *)post;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJFeedsViewController.h
|
//
// RJFeedsViewController.h
// Pods
//
// Created by <NAME> on 3/11/15.
//
//
#import "RJViewControllerDataSourceProtocol.h"
#import <UIKit/UIKit.h>
@interface RJFeedsViewController : UIViewController <RJViewControllerDataSourceProtocol>
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJImagePickerViewController.h
|
//
// RJImagePickerViewController.h
// Community
//
#import <UIKit/UIKit.h>
@class RJImagePickerViewController;
@protocol RJImagePickerViewControllerDelegate <NSObject>
- (void)imagePickerViewControllerSelectedImagesDidChange:(RJImagePickerViewController *)imagePickerViewController;
@end
@interface RJImagePickerViewController : UICollectionViewController
@property (nonatomic, strong) NSArray *selectedImages;
@property (nonatomic, weak) id<RJImagePickerViewControllerDelegate> pickerDelegate;
- (instancetype)initWithInitiallySelectedImages:(NSArray *)images;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJCollectionCell.h
|
<reponame>rahuljaswa/Snapp<filename>Pod/Classes/RJCollectionCell.h
//
// RJCollectionCell.h
// Community
//
#import <UIKit/UIKit.h>
@interface RJCollectionCell : UICollectionViewCell
@property (nonatomic, strong, readonly) UICollectionView *collection;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJThreadsCell.h
|
<filename>Pod/Classes/RJThreadsCell.h
//
// RJThreadsCell.h
// Pods
//
// Created by <NAME> on 3/2/15.
//
//
#import <UIKit/UIKit.h>
@interface RJThreadsCell : UITableViewCell
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJCreateSkeletonUserViewController.h
|
<filename>Pod/Classes/RJCreateSkeletonUserViewController.h
//
// RJCreateSkeletonUserViewController.h
// Pods
//
// Created by <NAME> on 4/23/15.
//
//
#import <UIKit/UIKit.h>
@class RJCreateSkeletonUserViewController;
@protocol RJCreateSkeletonUserViewControllerProtocol <NSObject>
- (void)createSkeletonUserViewControllerDidCreateUser:(RJCreateSkeletonUserViewController *)createSkeletonUserViewController;
@end
@interface RJCreateSkeletonUserViewController : UIViewController
@property (nonatomic, weak) id<RJCreateSkeletonUserViewControllerProtocol> delegate;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJDataMarshaller.h
|
<gh_stars>0
//
// RJDataMarshaller.h
// Community
//
#import <Foundation/Foundation.h>
typedef NS_ENUM(NSUInteger, RJDataMarshallerPFRelation) {
kRJDataMarshallerPFRelationNone,
kRJDataMarshallerPFRelationUserBlockedUsers,
kRJDataMarshallerPFRelationUserFollowingUsers,
kRJDataMarshallerPFRelationUserFollowers,
kRJDataMarshallerPFRelationUserFollowingCategories,
kRJDataMarshallerPFRelationCategoryFollowingUsers
};
@class RJRemoteObjectCategory;
@class RJRemoteObjectUser;
@interface RJDataMarshaller : NSObject
+ (void)updateOrCreateObjectsWithPFObjects:(NSArray *)pfObjects relation:(RJDataMarshallerPFRelation)relation targetCategory:(RJRemoteObjectCategory *)targetCategory targetUser:(RJRemoteObjectUser *)targetUser completion:(void (^)(void))completion;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJRemoteObjectThread.h
|
<filename>Pod/Classes/RJRemoteObjectThread.h
//
// RJRemoteObjectThread.h
// Pods
//
// Created by <NAME> on 2/28/15.
//
//
#import <Parse/Parse.h>
@class RJRemoteObjectMessage;
@class RJRemoteObjectPost;
@class RJRemoteObjectUser;
@interface RJRemoteObjectThread : PFObject <PFSubclassing>
@property (nonatomic, strong) RJRemoteObjectUser *contacter;
@property (nonatomic, assign, readonly) BOOL deleted;
@property (nonatomic, strong) RJRemoteObjectMessage *lastMessage;
@property (nonatomic, strong, readonly) PFRelation *messages;
@property (nonatomic, strong) RJRemoteObjectPost *post;
@property (nonatomic, strong) NSArray *readReceipts;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/UIImage+RJAdditions.h
|
<reponame>rahuljaswa/Snapp<filename>Pod/Classes/UIImage+RJAdditions.h<gh_stars>0
//
// UIImage+RJAdditions.h
// Community
//
#import <UIKit/UIKit.h>
@interface UIImage (RJAdditions)
+ (instancetype)imageWithColor:(UIColor *)color;
+ (instancetype)imageWithColor:(UIColor *)color size:(CGSize)size;
+ (instancetype)tintableImageNamed:(NSString *)named;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJManagedObjectFlag.h
|
<gh_stars>0
//
// RJManagedObjectFlag.h
// Community
//
#import <CoreData/CoreData.h>
#import <Foundation/Foundation.h>
typedef NS_ENUM(NSUInteger, RJManagedObjectFlagReason) {
RJManagedObjectFlagReasonSpamScam,
RJManagedObjectFlagReasonSelfHarm,
RJManagedObjectFlagReasonHarassment,
RJManagedObjectFlagReasonPrivacy,
RJManagedObjectFlagReasonIllegal,
RJManagedObjectFlagReasonViolent,
RJManagedObjectFlagReasonPornography,
RJManagedObjectFlagReasonHateful,
RJManagedObjectFlagReasonIntellectualProperty
};
@class RJManagedObjectPost;
@class RJManagedObjectUser;
@interface RJManagedObjectFlag : NSManagedObject
@property (nonatomic, retain) NSDate *createdAt;
@property (nonatomic, retain) NSString *objectId;
@property (nonatomic, retain) NSNumber *reason;
@property (nonatomic, retain) RJManagedObjectUser *creator;
@property (nonatomic, retain) RJManagedObjectPost *post;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJGooglePlacesAPIClient.h
|
//
// RJGooglePlacesAPIClient.h
// Pods
//
// Created by <NAME> on 4/5/15.
//
//
#import "AFHTTPRequestOperationManager.h"
@interface RJGooglePlacesAPIClient : AFHTTPRequestOperationManager
+ (instancetype)sharedAPIClient;
- (void)getCitiesWithSearchString:(NSString *)searchString success:(void (^)(AFHTTPRequestOperation *operation, id responseObject))success failure:(void (^)(AFHTTPRequestOperation *operation, NSError *error))failure;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJStore.h
|
<reponame>rahuljaswa/Snapp
//
// RJStore.h
// Community
//
#import <Foundation/Foundation.h>
#import <Parse/Parse.h>
@class NSFetchRequest;
@class RJManagedObjectPostCategory;
@class RJManagedObjectPost;
@class RJManagedObjectThread;
@class RJManagedObjectUser;
@class RJRemoteObjectUser;
@interface RJStore : NSObject
+ (NSFetchRequest *)fetchRequestForAllUsersWithSearchString:(NSString *)searchString;
+ (void)refreshAllUsersWithSearchString:(NSString *)searchString completion:(void (^)(BOOL success))completion;
+ (NSFetchRequest *)fetchRequestForCurrentUser;
+ (void)refreshCurrentUser:(void (^)(BOOL success))completion;
+ (NSFetchRequest *)fetchRequestForAllUsers;
+ (void)refreshAllUsers:(void (^)(BOOL success))completion;
+ (NSFetchRequest *)fetchRequestForAllCategories;
+ (void)refreshAllCategoriesWithCompletion:(void (^)(BOOL success))completion;
+ (NSFetchRequest *)fetchRequestForAllCategoriesWithSearchString:(NSString *)searchString;
+ (void)refreshAllCategoriesWithSearchString:(NSString *)searchString completion:(void (^)(BOOL success))completion;
+ (void)refreshPost:(RJManagedObjectPost *)post completion:(void (^)(BOOL success))completion;
+ (NSFetchRequest *)fetchRequestForAllPostsWithSearchString:(NSString *)searchString;
+ (void)refreshAllPostsWithSearchString:(NSString *)searchString completion:(void (^)(BOOL success))completion;
+ (NSFetchRequest *)fetchRequestForAllPosts;
+ (void)refreshAllPostsWithCompletion:(void (^)(BOOL success))completion;
+ (NSFetchRequest *)fetchRequestForAllPostsWithinMiles:(CGFloat)miles ofLocation:(CLLocation *)location;
+ (void)refreshAllPostsWithinMiles:(CGFloat)miles ofLocation:(CLLocation *)location completion:(void (^)(BOOL success))completion;
+ (NSFetchRequest *)fetchRequestForAllPostsForCategory:(RJManagedObjectPostCategory *)category;
+ (void)refreshAllPostsForCategory:(RJManagedObjectPostCategory *)category completion:(void (^)(BOOL success))completion;
+ (NSFetchRequest *)fetchRequestForAllPostsForCreator:(RJManagedObjectUser *)creator;
+ (void)refreshAllPostsForCreator:(RJManagedObjectUser *)creator completion:(void (^)(BOOL success))completion;
+ (NSFetchRequest *)fetchRequestForAllPostsForCommenter:(RJManagedObjectUser *)liker;
+ (void)refreshAllPostsForCommenter:(RJManagedObjectUser *)commenter completion:(void (^)(BOOL success))completion;
+ (NSFetchRequest *)fetchRequestForAllPostsForLiker:(RJManagedObjectUser *)liker;
+ (void)refreshAllPostsForLiker:(RJManagedObjectUser *)liker completion:(void (^)(BOOL success))completion;
+ (NSFetchRequest *)fetchRequestForAllLikesForUser:(RJManagedObjectUser *)user;
+ (void)refreshAllLikesForUser:(RJManagedObjectUser *)user completion:(void (^)(BOOL success))completion;
+ (NSFetchRequest *)fetchRequestForAllThreadsForUser:(RJManagedObjectUser *)user;
+ (void)refreshAllThreadsForUser:(RJManagedObjectUser *)user completion:(void (^)(BOOL success))completion;
+ (void)refreshAllMessagesForThread:(RJManagedObjectThread *)thread completion:(void (^)(BOOL success))completion;
+ (void)refreshAllLikesAndCommentsForCurrentUserWithCompletion:(void (^)(BOOL success))completion;
// many-to-many user/category relationships
+ (void)refreshAllBlockedUsersForUser:(RJManagedObjectUser *)user completion:(void (^)(BOOL success))completion;
+ (void)refreshAllFollowersForUser:(RJManagedObjectUser *)user completion:(void (^)(BOOL success))completion;
+ (void)refreshAllFollowingCategoriesForUser:(RJManagedObjectUser *)user completion:(void (^)(BOOL success))completion;
+ (void)refreshAllFollowingUsersForUser:(RJManagedObjectUser *)user completion:(void (^)(BOOL success))completion;
+ (void)refreshAllFollowingUsersForCategory:(RJManagedObjectPostCategory *)category completion:(void (^)(BOOL success))completion;
+ (void)prefetchData;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJPostImageCacheEntity.h
|
//
// RJPostImageCacheEntity.h
// Community
//
#import <FastImageCache/FICEntity.h>
@import CoreGraphics;
@import Foundation;
FOUNDATION_EXPORT NSString *const kRJImageFormatFamilyPost;
FOUNDATION_EXPORT NSString *const kRJPostImageFormatCardSquare16BitBGR;
FOUNDATION_EXPORT NSString *const kRJPostImageFormatCard16BitBGR;
FOUNDATION_EXPORT CGSize const kRJPostImageSizeCard;
@interface RJPostImageCacheEntity : NSObject <FICEntity>
- (instancetype)initWithPostImageURL:(NSURL *)imageURL objectID:(NSString *)objectID;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJUserImageCacheEntity.h
|
//
// RJUserImageCacheEntity.h
// Community
//
#import <FastImageCache/FICEntity.h>
#import <Foundation/Foundation.h>
#import <UIKit/UIKit.h>
FOUNDATION_EXPORT NSString *const kRJImageFormatFamilyUser;
FOUNDATION_EXPORT NSString *const kRJUserImageFormatCard16BitBGR40x40;
FOUNDATION_EXPORT CGSize const kRJUserImageSize40x40;
FOUNDATION_EXPORT NSString *const kRJUserImageFormatCard16BitBGR80x80;
FOUNDATION_EXPORT CGSize const kRJUserImageSize80x80;
@interface RJUserImageCacheEntity : NSObject <FICEntity>
- (instancetype)initWithUserImageURL:(NSURL *)imageURL objectID:(NSString *)objectID;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJCommentCell.h
|
<reponame>rahuljaswa/Snapp
//
// RJCommentCell.h
// Community
//
#import <UIKit/UIKit.h>
@class ActionLabel;
@class RJManagedObjectComment;
@class RJManagedObjectMessage;
@class RJManagedObjectUser;
@interface RJCommentCell : UITableViewCell
@property (nonatomic, strong, readonly) ActionLabel *commentLabel;
@property (nonatomic, assign) BOOL offsetForImageView;
- (void)updateWithMessage:(RJManagedObjectMessage *)message blockForSender:(void (^)(RJManagedObjectUser *sender))block;
- (void)updateWithComment:(RJManagedObjectComment *)comment blockForCreator:(void (^)(RJManagedObjectUser *creator))block;
- (void)updateWithNumberOfComments:(NSUInteger)number blockForSelection:(void (^)(void))block;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJTemplateManager.h
|
//
// RJTemplateManager.h
// Pods
//
// Created by <NAME> on 4/17/15.
//
//
#import <Foundation/Foundation.h>
typedef NS_ENUM(NSUInteger, RJTemplateManagerType) {
kRJTemplateManagerTypeCommunity,
kRJTemplateManagerTypeClassifieds
};
@interface RJTemplateManager : NSObject
@property (nonatomic, assign) RJTemplateManagerType type;
+ (instancetype)sharedInstance;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJViewControllerDataSourceProtocol.h
|
<filename>Pod/Classes/RJViewControllerDataSourceProtocol.h
//
// RJViewControllerDataSourceProtocol.h
// Community
//
#import <Foundation/Foundation.h>
@protocol RJViewControllerDataSourceProtocol <NSObject>
- (void)fetchData;
- (void)reloadWithCompletion:(void (^)(BOOL success))completion;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJNotificationCell.h
|
//
// RJNotificationCell.h
// Community
//
#import <UIKit/UIKit.h>
@class ActionLabel;
@interface RJNotificationCell : UITableViewCell
@property (nonatomic, strong, readonly) ActionLabel *actionLabel;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/UIImageView+RJAdditions.h
|
<gh_stars>0
//
// UIImageView+RJAdditions.h
// Community
//
#import <FastImageCache/FICEntity.h>
#import <UIKit/UIKit.h>
@interface UIImageView (RJAdditions)
- (void)setImageEntity:(NSObject<FICEntity> *)entity formatName:(NSString *)formatName placeholder:(UIImage *)placeholder;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJProfileHeaderView.h
|
//
// RJProfileHeaderView.h
// Community
//
#import <UIKit/UIKit.h>
@interface RJProfileHeaderView : UICollectionReusableView
@property (nonatomic, strong, readonly) UILabel *bio;
@property (nonatomic, strong, readonly) UIImageView *image;
@property (nonatomic, strong, readonly) UILabel *name;
@property (nonatomic, strong, readonly) UIButton *headerButton;
@property (nonatomic, strong, readonly) UIButton *followersButton;
@property (nonatomic, strong, readonly) UIButton *followingButton;
@property (nonatomic, strong, readonly) UIButton *categoriesButton;
@property (nonatomic, strong, readonly) UIButton *likesButton;
@property (nonatomic, strong, readonly) UIButton *postsButton;
@property (nonatomic, strong, readonly) UIButton *commentsButton;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJRemoteObjectFlag.h
|
<reponame>rahuljaswa/Snapp
//
// RJRemoteObjectFlag.h
// Community
//
#import "RJManagedObjectFlag.h"
#import <Parse/Parse.h>
@class RJRemoteObjectPost;
@class RJRemoteObjectUser;
@interface RJRemoteObjectFlag : PFObject <PFSubclassing>
@property (nonatomic, strong) RJRemoteObjectUser *creator;
@property (nonatomic, assign) BOOL deleted;
@property (nonatomic, strong) RJRemoteObjectPost *post;
@property (nonatomic, assign) RJManagedObjectFlagReason reason;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJParseUtils.h
|
<gh_stars>0
//
// RJParseUtils.h
// Community
//
#import "RJRemoteObjectFlag.h"
#import <Foundation/Foundation.h>
#import <Parse/Parse.h>
@class RJManagedObjectPostCategory;
@class RJManagedObjectLike;
@class RJManagedObjectUser;
@class RJManagedObjectPost;
@class RJManagedObjectThread;
@interface RJParseUtils : NSObject
+ (id)sharedInstance;
- (void)updateUser:(RJManagedObjectUser *)user
withImage:(UIImage *)image
remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
- (void)markThreadAsRead:(RJManagedObjectThread *)thread remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
- (void)createNewUserWithName:(NSString *)name image:(UIImage *)image remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
- (void)createNewThreadForPost:(RJManagedObjectPost *)post initialMessage:(NSString *)initialMessage remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
- (void)insertNewMessage:(NSString *)message inThread:(RJManagedObjectThread *)thread remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
- (void)followCategory:(RJManagedObjectPostCategory *)category remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
- (void)unfollowCategory:(RJManagedObjectPostCategory *)category remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
- (void)followUser:(RJManagedObjectUser *)user remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
- (void)unfollowUser:(RJManagedObjectUser *)user remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
- (void)blockUser:(RJManagedObjectUser *)user remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
- (void)unblockUser:(RJManagedObjectUser *)user remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
- (void)deletePost:(RJManagedObjectPost *)post remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
- (void)deleteLike:(RJManagedObjectLike *)like withPost:(RJManagedObjectPost *)post remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
- (void)createFlagWithPost:(RJManagedObjectPost *)post creator:(RJManagedObjectUser *)creator reason:(RJManagedObjectFlagReason)reason remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
- (void)createCommentWithPost:(RJManagedObjectPost *)post creator:(RJManagedObjectUser *)creator text:(NSString *)text remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
- (void)createLikeWithPost:(RJManagedObjectPost *)post creator:(RJManagedObjectUser *)creator remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
- (void)createPostWithName:(NSString *)name longDescription:(NSString *)longDescription images:(NSArray *)images existingCategories:(NSArray *)existingCategories createdCategories:(NSArray *)createdCategories forSale:(BOOL)forSale location:(CLLocation *)location locationDescription:(NSString *)locationDescription creator:(RJManagedObjectUser *)creator remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
- (void)updatePost:(RJManagedObjectPost *)post withName:(NSString *)name longDescription:(NSString *)longDescription images:(NSArray *)images existingCategories:(NSArray *)existingCategories createdCategories:(NSArray *)createdCategories forSale:(BOOL)forSale location:(CLLocation *)location locationDescription:(NSString *)locationDescription creator:(RJManagedObjectUser *)creator remoteSuccess:(void (^)(BOOL succeeded))remoteSuccess;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/UINavigationItem+RJAdditions.h
|
<filename>Pod/Classes/UINavigationItem+RJAdditions.h
//
// UINavigationItem+RJAdditions.h
// Community
//
#import <UIKit/UIKit.h>
@interface UINavigationItem (RJAdditions)
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJDataMarshallerOperation.h
|
//
// RJDataMarshallerOperation.h
// Pods
//
// Created by <NAME> on 3/12/15.
//
//
#import <Foundation/Foundation.h>
@interface RJDataMarshallerOperation : NSOperation
- (instancetype)initWithPFObjects:(NSArray *)pfObjects relation:(RJDataMarshallerPFRelation)relation targetCategory:(RJRemoteObjectCategory *)targetCategory targetUser:(RJRemoteObjectUser *)targetUser;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJCreateViewController.h
|
//
// RJCreateViewController.h
// Community
//
#import <UIKit/UIKit.h>
@class RJCreateViewController;
@protocol RJCreateViewControllerDelegate <NSObject>
- (void)createViewControllerDidCancel:(RJCreateViewController *)createViewController;
- (void)createViewControllerDidFinish:(RJCreateViewController *)createViewController;
@end
@class CLLocation;
@class RJManagedObjectPost;
@class RJManagedObjectUser;
@interface RJCreateViewController : UICollectionViewController
@property (nonatomic, weak) id<RJCreateViewControllerDelegate> delegate;
@property (nonatomic, assign, readonly) BOOL forSale;
@property (nonatomic, strong, readonly) CLLocation *location;
@property (nonatomic, strong, readonly) NSString *locationDescription;
@property (nonatomic, strong, readonly) NSString *name;
@property (nonatomic, strong, readonly) NSString *textDescription;
@property (nonatomic, strong, readonly) NSArray *selectedCreatedTags;
@property (nonatomic, strong, readonly) NSArray *selectedImages;
@property (nonatomic, strong, readonly) NSArray *selectedExistingTags;
@property (nonatomic, strong, readonly) RJManagedObjectUser *creator;
@property (nonatomic, strong, readonly) RJManagedObjectPost *post;
- (instancetype)initWithPost:(RJManagedObjectPost *)post;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJSwitchCell.h
|
//
// RJSwitchCell.h
// Pods
//
// Created by <NAME> on 3/2/15.
//
//
#import <UIKit/UIKit.h>
@interface RJSwitchCell : UICollectionViewCell
@property (nonatomic, strong, readonly) UISwitch *switchControl;
@property (nonatomic, strong, readonly) UILabel *textLabel;
@property (nonatomic, strong, readonly) UIView *bottomBorder;
@property (nonatomic, strong, readonly) UIView *topBorder;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJLocationPickerViewController.h
|
//
// RJLocationPickerViewController.h
// Pods
//
// Created by <NAME> on 4/5/15.
//
//
#import <UIKit/UIKit.h>
@class RJLocationPickerViewController;
@protocol RJLocationPickerViewControllerDelegate <NSObject>
- (void)locationPickerViewControllerSelectedLocationDidChange:(RJLocationPickerViewController *)locationPickerViewController;
@end
@class CLLocation;
@interface RJLocationPickerViewController : UITableViewController
@property (nonatomic, assign) id<RJLocationPickerViewControllerDelegate> delegate;
@property (nonatomic, strong) NSString *selectedLocationString;
@property (nonatomic, strong) CLLocation *selectedLocation;
- (instancetype)initWithInitiallySelectedLocation:(CLLocation *)location;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJLoadingCell.h
|
<reponame>rahuljaswa/Snapp
//
// RJLoadingCell.h
// Pods
//
// Created by <NAME> on 3/14/15.
//
//
#import <UIKit/UIKit.h>
@interface RJLoadingCell : UITableViewCell
@property (nonatomic, strong, readonly) UIActivityIndicatorView *spinner;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJUploadProgressViewController.h
|
//
// RJUploadProgressViewController.h
// Pods
//
// Created by <NAME> on 3/21/15.
//
//
#import <UIKit/UIKit.h>
@interface RJUploadProgressViewController : UITableViewController
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJManagedObjectThread.h
|
<reponame>rahuljaswa/Snapp<gh_stars>0
//
// RJManagedObjectThread.h
// NINEXX
//
// Created by <NAME> on 2/28/15.
// Copyright (c) 2015 <NAME>. All rights reserved.
//
#import <Foundation/Foundation.h>
#import <CoreData/CoreData.h>
@class RJManagedObjectMessage;
@class RJManagedObjectPost;
@class RJManagedObjectUser;
@interface RJManagedObjectThread : NSManagedObject
@property (nonatomic, retain) RJManagedObjectUser *contacter;
@property (nonatomic, retain) NSDate *createdAt;
@property (nonatomic, retain) RJManagedObjectMessage *lastMessage;
@property (nonatomic, retain) NSSet *messages;
@property (nonatomic, retain) NSString *objectId;
@property (nonatomic, retain) RJManagedObjectPost *post;
@property (nonatomic, retain) NSSet *readReceipts;
@property (nonatomic, retain) NSDate *updatedAt;
@end
@interface RJManagedObjectThread (CoreDataGeneratedAccessors)
- (void)addMessagesObject:(RJManagedObjectMessage *)value;
- (void)removeMessagesObject:(RJManagedObjectMessage *)value;
- (void)addMessages:(NSSet *)values;
- (void)removeMessages:(NSSet *)values;
- (void)addReadReceiptsObject:(RJManagedObjectUser *)value;
- (void)removeReadReceiptsObject:(RJManagedObjectUser *)value;
- (void)addReadReceipts:(NSSet *)values;
- (void)removeReadReceipts:(NSSet *)values;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJPostHeaderView.h
|
//
// RJPostHeaderView.h
// Community
//
#import <UIKit/UIKit.h>
@class ActionLabel;
@interface RJPostHeaderView : UICollectionReusableView
@property (nonatomic, strong, readonly) UIView *bottomBorder;
@property (nonatomic, strong, readonly) UIImageView *imageView;
@property (nonatomic, strong, readonly) ActionLabel *userName;
@property (nonatomic, strong, readonly) UILabel *timestamp;
@property (nonatomic, strong, readonly) UILabel *name;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJPostCell.h
|
<filename>Pod/Classes/RJPostCell.h
//
// RJPostCell.h
// Community
//
#import <UIKit/UIKit.h>
@class RJManagedObjectPostCategory;
@class RJRemoteObjectLike;
@class RJRemoteObjectPost;
@class RJPostCell;
@class RJRemoteObjectUser;
@protocol RJPostCellDelegate <NSObject>
- (void)postCell:(RJPostCell *)postCell didPressLocation:(CLLocation *)location locationDescription:(NSString *)locationDescription;
- (void)postCell:(RJPostCell *)postCell didPressCategory:(RJManagedObjectPostCategory *)category;
- (void)postCell:(RJPostCell *)postCell didPressUser:(RJManagedObjectUser *)user;
- (void)postCellDidPressCommentButton:(RJPostCell *)postCell;
- (void)postCellDidPressLikeButton:(RJPostCell *)postCell;
- (void)postCellDidPressMessageButton:(RJPostCell *)postCell;
- (void)postCellDidPressMoreButton:(RJPostCell *)postCell;
@end
@class ActionLabel, RJManagedObjectPost;
@interface RJPostCell : UICollectionViewCell
@property (nonatomic, weak) id<RJPostCellDelegate> delegate;
@property (strong, nonatomic, readonly) RJManagedObjectLike *currentUserLike;
@property (strong, nonatomic) RJManagedObjectPost *post;
@property (strong, nonatomic, readonly) UIButton *commentButton;
@property (strong, nonatomic, readonly) UIButton *likeButton;
@property (strong, nonatomic, readonly) UIButton *messageButton;
@property (strong, nonatomic, readonly) UIButton *moreButton;
@property (strong, nonatomic, readonly) UITableView *detailsTable;
@property (strong, nonatomic, readonly) UICollectionView *imageCV;
- (void)preloadPostImages:(NSArray *)images;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJRemoteObjectPost.h
|
<filename>Pod/Classes/RJRemoteObjectPost.h
//
// RJRemoteObjectPost.h
// Community
//
#import <Parse/Parse.h>
@class RJRemoteObjectUser;
@interface RJRemoteObjectPost : PFObject <PFSubclassing>
@property (nonatomic, strong) NSString *appIdentifier;
@property (nonatomic, strong) NSArray *categories;
@property (nonatomic, strong) NSArray *comments;
@property (nonatomic, strong) RJRemoteObjectUser *creator;
@property (nonatomic, assign) BOOL deleted;
@property (nonatomic, assign) BOOL forSale;
@property (nonatomic, strong) NSArray *images;
@property (nonatomic, strong) NSArray *likes;
@property (nonatomic, strong) PFGeoPoint *location;
@property (nonatomic, strong) NSString *locationDescription;
@property (nonatomic, strong) NSString *longDescription;
@property (nonatomic, strong) NSString *name;
@property (nonatomic, assign) BOOL sold;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJManagedObjectImage.h
|
<gh_stars>0
//
// RJManagedObjectImage.h
// Community
//
#import <CoreData/CoreData.h>
#import <Foundation/Foundation.h>
@class RJManagedObjectPost;
@class RJManagedObjectPostCategory;
@class RJManagedObjectUser;
@interface RJManagedObjectImage : NSManagedObject
@property (nonatomic, retain) NSDate *createdAt;
@property (nonatomic, retain) NSString *imageURL;
@property (nonatomic, retain) NSSet *categories;
@property (nonatomic, retain) NSSet *posts;
@property (nonatomic, retain) NSSet *users;
@end
@interface RJManagedObjectImage (CoreDataGeneratedAccessors)
- (void)addCategoriesObject:(RJManagedObjectPostCategory *)value;
- (void)removeCategoriesObject:(RJManagedObjectPostCategory *)value;
- (void)addCategories:(NSSet *)values;
- (void)removeCategories:(NSSet *)values;
- (void)addPostsObject:(RJManagedObjectPost *)value;
- (void)removePostsObject:(RJManagedObjectPost *)value;
- (void)addPosts:(NSSet *)values;
- (void)removePosts:(NSSet *)values;
- (void)addUsersObject:(RJManagedObjectUser *)value;
- (void)removeUsersObject:(RJManagedObjectUser *)value;
- (void)addUsers:(NSSet *)values;
- (void)removeUsers:(NSSet *)values;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJUserCommentsViewController.h
|
//
// RJUserCommentsViewController.h
// Pods
//
// Created by <NAME> on 2/12/15.
//
//
#import "RJViewControllerDataSourceProtocol.h"
#import "RJGalleryViewController.h"
@class RJManagedObjectUser;
@interface RJUserCommentsViewController : RJGalleryViewController <RJViewControllerDataSourceProtocol>
- (instancetype)initWithCommenter:(RJManagedObjectUser *)commenter;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJAppDelegate.h
|
//
// RJAppDelegate.h
// Community
//
#import <UIKit/UIKit.h>
@class RJStyleManager;
@class RJTemplateManager;
@interface RJAppDelegate : UIResponder <UIApplicationDelegate>
@property (strong, nonatomic) UIWindow *window;
@property (strong, nonatomic, readonly) RJStyleManager *styleManager;
@property (strong, nonatomic, readonly) RJTemplateManager *templateManager;
- (void)authenticateWithCompletion:(void (^)(BOOL))completion;
- (void)requestNotificationsPermissionsWithCompletion:(void (^)(void))completion;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJMessagingViewController.h
|
//
// RJMessagingViewController.h
// Pods
//
// Created by <NAME> on 2/28/15.
//
//
#import "RJViewControllerDataSourceProtocol.h"
#import <ChatViewControllers/RJChatTableViewController.h>
@class RJManagedObjectThread;
@interface RJMessagingViewController : RJChatTableViewController <RJViewControllerDataSourceProtocol>
- (instancetype)initWithThread:(RJManagedObjectThread *)thread;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJGridCell.h
|
<filename>Pod/Classes/RJGridCell.h
//
// RJGridCell.h
// Community
//
#import <UIKit/UIKit.h>
@interface RJGridCell : UICollectionViewCell
@property (nonatomic, strong, readonly) UILabel *title;
@property (nonatomic, strong, readonly) UIImageView *image;
@property (nonatomic, strong, readonly) UIActivityIndicatorView *spinner;
@property (nonatomic, assign, getter = shouldDisableDuringLoading) BOOL disableDuringLoading;
@property (nonatomic, assign, getter = shouldMask) BOOL mask;
@property (nonatomic, strong) UIColor *selectedColor;
- (void)updateWithImage:(id)image formatName:(NSString *)formatName displaysLoadingIndicator:(BOOL)displaysLoadingIndicator;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJCategoryViewController.h
|
<filename>Pod/Classes/RJCategoryViewController.h
//
// RJCategoryViewController.h
// Community
//
#import "RJGalleryViewController.h"
#import "RJViewControllerDataSourceProtocol.h"
@class RJManagedObjectPostCategory;
@interface RJCategoryViewController : RJGalleryViewController <RJViewControllerDataSourceProtocol>
- (instancetype)initWithCategory:(RJManagedObjectPostCategory *)category;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJManagedObjectLike.h
|
<reponame>rahuljaswa/Snapp
//
// RJManagedObjectLike.h
// Community
//
#import <CoreData/CoreData.h>
#import <Foundation/Foundation.h>
@class RJManagedObjectPost;
@class RJManagedObjectUser;
@interface RJManagedObjectLike : NSManagedObject
@property (nonatomic, retain) NSDate *createdAt;
@property (nonatomic, retain) NSString *objectId;
@property (nonatomic, retain) RJManagedObjectUser *creator;
@property (nonatomic, retain) RJManagedObjectPost *post;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJRemoteObjectCategory.h
|
<reponame>rahuljaswa/Snapp<filename>Pod/Classes/RJRemoteObjectCategory.h
//
// RJRemoteObjectCategory.h
// Community
//
#import <Parse/Parse.h>
@interface RJRemoteObjectCategory : PFObject <PFSubclassing>
@property (nonatomic, strong) NSString *appIdentifier;
@property (nonatomic, assign, readonly) BOOL deleted;
@property (nonatomic, strong) NSString *name;
@property (nonatomic, strong) NSString *image;
@property (nonatomic, strong, readonly) PFRelation *followers;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJCollectionHeaderView.h
|
//
// RJCollectionHeaderView.h
// Community
//
#import <UIKit/UIKit.h>
@interface RJCollectionHeaderView : UICollectionReusableView
@property (nonatomic, strong, readonly) UILabel *label;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJMessageUserViewController.h
|
<reponame>rahuljaswa/Snapp<gh_stars>0
//
// RJMessageUserViewController.h
// Pods
//
// Created by <NAME> on 3/1/15.
//
//
#import <UIKit/UIKit.h>
@class RJMessageUserViewController;
@protocol RJMessageUserViewControllerDelegate <NSObject>
- (void)messageUserViewControllerDidPressDoneButton:(RJMessageUserViewController *)messageUserViewController;
@end
@class RJManagedObjectPost;
@interface RJMessageUserViewController : UIViewController
@property (nonatomic, weak) id<RJMessageUserViewControllerDelegate> delegate;
@property (nonatomic, strong, readonly) RJManagedObjectPost *post;
@property (nonatomic, strong, readonly) NSString *text;
- (instancetype)initWithPost:(RJManagedObjectPost *)post;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJFeedViewController.h
|
<reponame>rahuljaswa/Snapp
//
// RJFeedViewController.h
// Community
//
#import "RJViewControllerDataSourceProtocol.h"
#import <UIKit/UIKit.h>
@class CLLocation;
@class RJManagedObjectPost;
@interface RJFeedViewController : UICollectionViewController <RJViewControllerDataSourceProtocol>
@property (nonatomic, strong) NSArray *posts;
- (instancetype)initWithPost:(RJManagedObjectPost *)post;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJRelationsViewController.h
|
//
// RJRelationsViewController.h
// Community
//
#import "RJViewControllerDataSourceProtocol.h"
#import <UIKit/UIKit.h>
typedef NS_ENUM(NSUInteger, RJRelationsViewControllerUserRelationsType) {
kRJRelationsViewControllerUserRelationsTypeBlockedUsers,
kRJRelationsViewControllerUserRelationsTypeFollowers,
kRJRelationsViewControllerUserRelationsTypeFollowingUsers,
kRJRelationsViewControllerUserRelationsTypeFollowingCategories
};
typedef NS_ENUM(NSUInteger, RJRelationsViewControllerCategoryRelationsType) {
kRJRelationsViewControllerCategoryRelationsTypeCategoryFollowers
};
@class RJManagedObjectPostCategory;
@class RJManagedObjectUser;
@interface RJRelationsViewController : UITableViewController <RJViewControllerDataSourceProtocol>
- (instancetype)initWithCategoryRelationsType:(RJRelationsViewControllerCategoryRelationsType)categoryRelationsType category:(RJManagedObjectPostCategory *)category;
- (instancetype)initWithUserRelationsType:(RJRelationsViewControllerUserRelationsType)userRelationsType user:(RJManagedObjectUser *)user;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJStyleManager.h
|
//
// RJStyleManager.h
// Community
//
#import <UIKit/UIKit.h>
@interface RJStyleManager : NSObject
// *** APP OPTIONS ***
@property (nonatomic, assign) BOOL cropsImagesToSquares;
@property (nonatomic, assign) BOOL displaysImageResultsFromWeb;
// *** APPLICATION WIDE TINT COLORS ***
/*
@discussion Tint color used for accessories like borders and icons.
*/
@property (nonatomic, strong) UIColor *accessoryIconColor;
/*
@discussion Tint color used for `UIButton`s.
*/
@property (nonatomic, strong) UIColor *buttonBackgroundColor;
/*
@discussion Background color for text that is being touched by user.
*/
@property (nonatomic, strong) UIColor *highlightedBackgroundColor;
/*
@discussion Tint color used for icons and text on buttons.
*/
@property (nonatomic, strong) UIColor *iconTextColor;
/*
@discussion Tint color used for plain text, usually a shade of gray.
*/
@property (nonatomic, strong) UIColor *plainTextColor;
/*
@discussion Tint color used for themed text like borders and icons.
*/
@property (nonatomic, strong) UIColor *themedTextColor;
/*
@discussion Tint color used for things like text labels.
*/
@property (nonatomic, strong) UIColor *tintBlueColor;
// *** APPLICATION WIDE FONTS ***
/*
@discussion Plain-text font used in all text-based objects.
*/
@property (nonatomic, strong) UIFont *plainTextFont;
/*
@discussion Bold-text font used in all text-based objects.
*/
@property (nonatomic, strong) UIFont *boldTextFont;
/*
@discussion Title font used in all text-based objects.
*/
@property (nonatomic, strong) UIFont *titleFont;
// *** NAVIGATION BAR ***
/*
@discussion Font used with `...` text for more buttons.
*/
@property (nonatomic, strong) UIFont *moreButtonFont;
/*
@discussion Font used for `UINavigationBar`s.
*/
@property (nonatomic, strong) UIFont *navBarFont;
/*
@discussion Color used for `UINavigationBar` backgrounds.
*/
@property (nonatomic, strong) UIColor *themeColor;
/*
@discussion Color used for `UINavigationBar` text and `UIBarButtonItem`s.
*/
@property (nonatomic, strong) UIColor *windowTintColor;
// *** SPECIALIZED VIEWS ***
/*
@discussion Color used for loading state with images.
*/
@property (nonatomic, strong) UIColor *loadingImageBackgroundColor;
// *** ATTRIBUTES - LINKS ***
@property (nonatomic, strong, readonly) NSDictionary *boldLinkTextAttributes;
@property (nonatomic, strong, readonly) NSDictionary *grayLinkTextAttributes;
@property (nonatomic, strong, readonly) NSDictionary *highlightedBoldLinkTextAttributes;
@property (nonatomic, strong, readonly) NSDictionary *highlightedGrayLinkTextAttributes;
@property (nonatomic, strong, readonly) NSDictionary *highlightedLinkTextAttributes;
@property (nonatomic, strong, readonly) NSDictionary *linkTextAttributes;
// *** ATTRIBUTES - REGULAR TEXT ***
@property (nonatomic, strong, readonly) NSDictionary *boldTextAttributes;
@property (nonatomic, strong, readonly) NSDictionary *darkGrayTextAttributes;
@property (nonatomic, strong, readonly) NSDictionary *highlightedBoldTextAttributes;
@property (nonatomic, strong, readonly) NSDictionary *highlightedDarkGrayTextAttributes;
@property (nonatomic, strong, readonly) NSDictionary *plainTextAttributes;
// *** SINGLETON ***
+ (instancetype)sharedInstance;
// *** WINDOW CUSTOMIZATION ***
- (void)applyGlobalStylesToWindow:(UIWindow *)window;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJLabelCell.h
|
<filename>Pod/Classes/RJLabelCell.h
//
// RJLabelCell.h
// Community
//
#import <UIKit/UIKit.h>
#import <SZTextView/SZTextView.h>
typedef NS_ENUM(NSUInteger, RJLabelCellStyle) {
kRJLabelCellStyleTextLabel,
kRJLabelCellStyleTextField,
kRJLabelCellStyleTextView
};
@interface RJLabelCell : UICollectionViewCell
@property (nonatomic, assign) RJLabelCellStyle style;
@property (nonatomic, strong, readonly) UIImageView *accessoryView;
@property (nonatomic, strong, readonly) UITextField *textField;
@property (nonatomic, strong, readonly) UILabel *textLabel;
@property (nonatomic, strong, readonly) SZTextView *textView;
@property (nonatomic, strong, readonly) UIView *bottomBorder;
@property (nonatomic, strong, readonly) UIView *topBorder;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJTagPickerViewController.h
|
//
// RJTagPickerViewController.h
// Community
//
#import "RJViewControllerDataSourceProtocol.h"
#import <UIKit/UIKit.h>
@class RJTagPickerViewController;
@protocol RJTagPickerViewControllerDelegate <NSObject>
- (void)tagPickerViewControllerSelectedTagsDidChange:(RJTagPickerViewController *)tagPickerViewController;
@end
@interface RJTagPickerViewController : UIViewController
@property (nonatomic, strong, readonly) NSArray *selectedCreatedTags;
@property (nonatomic, strong, readonly) NSArray *selectedExistingTags;
@property (nonatomic, weak) id<RJTagPickerViewControllerDelegate> pickerDelegate;
- (instancetype)initWithInitiallySelectedExistingTags:(NSArray *)initiallySelectedExistingTags initiallySelectedCreatedTags:(NSArray *)initiallySelectedCreatedTags;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJRemoteObjectUser.h
|
<filename>Pod/Classes/RJRemoteObjectUser.h
//
// RJRemoteObjectUser.h
// Community
//
#import <Parse/Parse.h>
@interface RJRemoteObjectUser : PFUser
@property (nonatomic, strong, readonly) PFRelation *blockedUsers;
@property (nonatomic, strong, readonly) PFRelation *followingCategories;
@property (nonatomic, strong, readonly) PFRelation *followingUsers;
@property (nonatomic, strong) NSArray *communityMemberships;
@property (nonatomic, assign) BOOL deleted;
@property (nonatomic, strong) PFFile *image;
@property (nonatomic, strong) NSString *name;
@property (nonatomic, strong) NSString *phone;
@property (nonatomic, assign) BOOL skeleton;
@property (nonatomic, strong) NSString *twitterDigitsUserID;
@property (nonatomic, assign) BOOL admin;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJUserLikesViewController.h
|
//
// RJUserLikesViewController.h
// Community
//
#import "RJGalleryViewController.h"
#import "RJViewControllerDataSourceProtocol.h"
@class RJManagedObjectUser;
@interface RJUserLikesViewController : RJGalleryViewController <RJViewControllerDataSourceProtocol>
- (instancetype)initWithLiker:(RJManagedObjectUser *)liker;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJOverflowGridLayout.h
|
//
// RJOverflowGridLayout.h
// Pods
//
// Created by <NAME> on 3/20/15.
//
//
#import <UIKit/UIKit.h>
@interface RJOverflowGridLayout : UICollectionViewLayout
@property (nonatomic, assign) CGFloat sideLength;
@end
|
rahuljaswa/Snapp
|
Pod/Classes/RJProfileViewController.h
|
//
// RJProfileViewController.h
// Community
//
#import "RJGalleryViewController.h"
#import "RJViewControllerDataSourceProtocol.h"
#import <UIKit/UIKit.h>
@class RJManagedObjectUser;
@interface RJProfileViewController : RJGalleryViewController <RJViewControllerDataSourceProtocol>
@property (nonatomic, assign) BOOL showsSettingsButton;
- (instancetype)initWithUser:(RJManagedObjectUser *)user;
@end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.