repo_name
stringlengths 5
122
| path
stringlengths 3
232
| text
stringlengths 6
1.05M
|
|---|---|---|
ic-lab-duth/NoCpad
|
src/include/fifo_queue_oh.h
|
#ifndef __FIFO_QUEUE__
#define __FIFO_QUEUE__
#include "../include/duth_fun.h"
#include "../include/nvhls_assert.h"
template <typename T, unsigned SIZE>
class fifo_queue {
public :
//typedef sc_uint< clog2<SIZE>::val > wb_depth_t;
T mem[SIZE];
sc_uint<SIZE> push_ptr;
sc_uint<SIZE> pop_ptr;
sc_uint<SIZE+1> item_count;
public :
fifo_queue(){
reset();
};
void reset () {
push_ptr = 1; // points [0] in one hot
pop_ptr = 1; // points [0] in one hot
item_count = 1; // empty buffer when item_count[0]
// full buffer when item_count[SIZE]
};
// Non Intrusive
inline bool full() const {return (item_count[SIZE]);};
inline bool ready() const {return !full();};
inline bool empty() const {return (item_count[0]);};
inline bool valid() const {return !empty();};
inline T peek() const {
//mem[pop_ptr];
return mux<T, SIZE>::mux_oh_case(pop_ptr, mem);
//return mux<T, SIZE>::mux_oh_ao(pop_ptr, mem);
};
inline void push_no_count_incr (T &push_val) {
NVHLS_ASSERT_MSG(!full(), "Pushing on FULL!")
#pragma hls_unroll yes
for (int i=0; i<SIZE; ++i) {
bool enable = (push_ptr>>i) & 1;
if (enable && !full()) mem[i] = push_val;
}
inc_push_ptr();
}
inline void push(T &push_val) {
NVHLS_ASSERT_MSG(!full(), "Pushing on FULL!")
#pragma hls_unroll yes
for (int i=0; i<SIZE; ++i) {
bool enable = (push_ptr>>i) & 1;
if (enable && !full()) mem[i] = push_val;
}
//mem[push_ptr] = push_val;
//switch (push_ptr) {
// case 1 : mem[0] = push_val;
// break;
// case 2 : mem[1] = push_val;
// break;
// case 4 : mem[2] = push_val;
// break;
// default : ;
// break;
//}
inc_push_ptr();
incr_count();
};
inline T pop() {
NVHLS_ASSERT_MSG(!empty(), "Popping on EMPTY!")
T mule = mux<T, SIZE>::mux_oh_case(pop_ptr, mem);
inc_pop_ptr();
decr_count();
return mule;
};
inline void try_push(bool pushed, T &push_val) {
#pragma hls_unroll yes
for (int i = 0; i < SIZE; ++i) {
bool enable = ((push_ptr >> i) & 1) && pushed; // ToDo : for some reason catapult uses rshift mod/func instead of statically select the bit.
if (enable) mem[i] = push_val;
}
if (pushed) inc_push_ptr();
};
inline void set_count(bool pushed, bool popped) {
if ( pushed && !popped) item_count = (item_count << 1);
else if (!pushed && popped) item_count = (item_count >> 1);
}
inline void inc_pop_ptr() { pop_ptr = (pop_ptr <<1) | ((pop_ptr >>(SIZE-1))&1);}; // maybe (pop_ptr<<1) | pop_ptr[SIZE]; would work equally, although not tested
inline void inc_push_ptr() { push_ptr = (push_ptr<<1) | ((push_ptr>>(SIZE-1))&1);}; // maybe (push_ptr<<1) | push_ptr[SIZE]; would work equally, although not tested
inline void incr_count() {item_count = (item_count << 1);};
inline void decr_count() {item_count = (item_count >> 1);};
};
#endif // #define __FIFO_QUEUE__
|
ic-lab-duth/NoCpad
|
src/include/dnp_ace_v0.h
|
#ifndef __DNP_ACE_DEF__
#define __DNP_ACE_DEF__
// Definition of Duth Network Protocol for ACE network.
// Interconnect's internal packetization protocol
namespace dnp {
enum {
// !!!! THIS MUST BE 24. 20 is temp for router synth!!!
PHIT_W = 24, // Phit Width
V_W = 2, // Virtual Channel
S_W = 3, // Source
D_W = 3, // Destination
Q_W = 3, // QoS
T_W = 3, // Type
V_PTR = 0,
S_PTR = (V_PTR + V_W),
D_PTR = (S_PTR + S_W),
Q_PTR = (D_PTR + D_W),
T_PTR = (Q_PTR + Q_W),
};
class ace {
public:
enum {
// AXI RELATED WIDTHS
ID_W = 4, // AXI Transaction ID
BU_W = 2, // AXI Burst
SZ_W = 3, // AXI Size
LE_W = 8, // AXI Length
AL_W = 16, // Address Low
AH_W = 16, // Address High
AP_W = 8, // Address part (for alignment)
W_RE_W = 2, // AXI Write Response
R_RE_W = 4, // ACE Read response
B_W = 8, // Byte Width ...
E_W = 1, // Enable width
LA_W = 1, // AXI Last
// ACE RELATED WIDTHS
SNP_W = 4,
DOM_W = 2,
BAR_W = 2,
UNQ_W = 1,
C_PROT_W = 3,
C_RESP_W = 5,
C_HAS_DATA_W = 1,
};
struct req {
enum {
// PHIT #0
ID_PHIT = 0,
DOM_PHIT = 0,
SNP_PHIT = 0,
ID_PTR = T_PTR+T_W,
DOM_PTR = ID_PTR + ID_W,
SNP_PTR = DOM_PTR + DOM_W,
// PHIT #1
AL_PHIT = 1,
LE_PHIT = 1,
AL_PTR = 0,
LE_PTR = AL_PTR+AL_W,
// PHIT #2
AH_PHIT = 2,
SZ_PHIT = 2,
BU_PHIT = 2,
BAR_PHIT = 2,
UNQ_PHIT = 2,
AH_PTR = 0,
SZ_PTR = AH_PTR+AH_W,
BU_PTR = SZ_PTR+SZ_W,
BAR_PTR = BU_PTR+BU_W,
UNQ_PTR = BAR_PTR+BAR_W,
};
};
struct wresp {
enum {
// PHIT #0
ID_PHIT = 0,
RESP_PHIT = 0,
ID_PTR = T_PTR+T_W,
RESP_PTR = ID_PTR+ID_W,
};
};
struct rresp {
enum {
// PHIT #0
ID_PHIT = 0,
BU_PHIT = 0,
ID_PTR = T_PTR+T_W,
BU_PTR = ID_PTR+ID_W,
// PHIT #1
SZ_PHIT = 1,
LE_PHIT = 1,
AP_PHIT = 1,
SZ_PTR = 0,
LE_PTR = SZ_PTR+SZ_W,
AP_PTR = LE_PTR+LE_W,
};
};
struct wdata {
enum {
B0_PTR = 0,
B1_PTR = B0_PTR+B_W,
LA_PTR = B1_PTR+B_W,
E0_PTR = LA_PTR+LA_W,
E1_PTR = E0_PTR+E_W,
};
};
struct rdata {
enum {
B0_PTR = 0,
B1_PTR = B0_PTR+B_W,
LA_PTR = B1_PTR+B_W,
RE_PTR = LA_PTR+LA_W,
};
};
// ACE Extension
struct creq {
enum {
// PHIT #1
AL_PHIT = 1,
SNP_PHIT = 1,
AL_PTR = 0,
SNP_PTR = AL_PTR+AL_W,
// PHIT #2
AH_PHIT = 2,
C_PROT_PHIT = 2,
AH_PTR = 0,
C_PROT_PTR = AH_PTR+AH_W,
};
};
struct cresp {
enum {
// PHIT #0
C_RESP_PHIT = 0,
C_HAS_DATA_PHIT = 0,
C_RESP_PTR = T_PTR+T_W,
C_HAS_DATA_PTR = C_RESP_PTR + C_RESP_W,
};
};
}; // class ACE
enum PACK_TYPE {
PACK_TYPE__WR_REQ = 0,
PACK_TYPE__WR_RESP = 1,
PACK_TYPE__RD_REQ = 2,
PACK_TYPE__RD_RESP = 3,
PACK_TYPE__C_RD_REQ = 4,
PACK_TYPE__C_RD_RESP = 5,
PACK_TYPE__C_WR_REQ = 6,
PACK_TYPE__C_WR_RESP = 7
//PACK_TYPE__SNP_REQ = ?
//PACK_TYPE__SNP_RESP = ?
};
}; // namespace dnp
#endif // __DNP_ACE_DEF__
|
ic-lab-duth/NoCpad
|
examples/nocpad_ACE_4m-2s_1stage/ic_top.h
|
<reponame>ic-lab-duth/NoCpad
#ifndef _ACE_IC_TOP_H_
#define _ACE_IC_TOP_H_
#pragma once
#include "../../src/ace/acelite_master_if.h"
#include "../../src/ace/ace_master_if.h"
#include "../../src/ace/ace_slave_if.h"
#include "../../src/ace/ace_home.h"
#include "../../src/router_wh.h"
#include "systemc.h"
#include "nvhls_connections.h"
#pragma hls_design top
// Bundle of configuration parameters
template <
unsigned char HOME_NUM_,
unsigned char FULL_MASTER_NUM_, unsigned char LITE_MASTER_NUM_ , unsigned char SLAVE_NUM_,
unsigned char RD_LANES_ , unsigned char WR_LANES_,
unsigned char RREQ_PHITS_ , unsigned char RRESP_PHITS_,
unsigned char WREQ_PHITS_ , unsigned char WRESP_PHITS_,
unsigned char CREQ_PHITS_ , unsigned char CRESP_PHITS_
>
struct cfg {
static const unsigned char HOME_NUM = HOME_NUM_;
static const unsigned char FULL_MASTER_NUM = FULL_MASTER_NUM_;
static const unsigned char LITE_MASTER_NUM = LITE_MASTER_NUM_;
static const unsigned char ALL_MASTER_NUM = FULL_MASTER_NUM_ + LITE_MASTER_NUM;
static const unsigned char SLAVE_NUM = SLAVE_NUM_;
static const unsigned char RD_LANES = RD_LANES_;
static const unsigned char WR_LANES = WR_LANES_;
static const unsigned char RREQ_PHITS = RREQ_PHITS_;
static const unsigned char RRESP_PHITS = RRESP_PHITS_;
static const unsigned char WREQ_PHITS = WREQ_PHITS_;
static const unsigned char WRESP_PHITS = WRESP_PHITS_;
static const unsigned char CREQ_PHITS = CREQ_PHITS_;
static const unsigned char CRESP_PHITS = CRESP_PHITS_;
};
// the used configuration.1 Home, 2 Full ACE Masters, 2 ACE-Lite Masters, 2 Slaves
typedef cfg<1, 4, 0, 2,
(ace::ace5<axi::cfg::ace>::C_CACHE_WIDTH >> 3), //bits to bytes
(ace::ace5<axi::cfg::ace>::C_CACHE_WIDTH >> 3), //bits to bytes
(ace::ace5<axi::cfg::ace>::C_CACHE_WIDTH < 64) ? 4 : (ace::ace5<axi::cfg::ace>::C_CACHE_WIDTH >> 4), //bits to phits
(ace::ace5<axi::cfg::ace>::C_CACHE_WIDTH < 64) ? 4 : (ace::ace5<axi::cfg::ace>::C_CACHE_WIDTH >> 4), //bits to phits
(ace::ace5<axi::cfg::ace>::C_CACHE_WIDTH < 64) ? 4 : (ace::ace5<axi::cfg::ace>::C_CACHE_WIDTH >> 4), //bits to phits
1,
3,
(ace::ace5<axi::cfg::ace>::C_CACHE_WIDTH < 64) ? 4 : (ace::ace5<axi::cfg::ace>::C_CACHE_WIDTH >> 4) //bits to phits
> smpl_cfg;
SC_MODULE(ic_top) {
public:
// typedef matchlib's axi with the "standard" configuration
typedef typename ace::ace5<axi::cfg::ace> ace5_;
// typedef the 4 kind of flits(RD/WR Req/Resp) depending their size
typedef flit_dnp<smpl_cfg::RREQ_PHITS> rreq_flit_t;
typedef flit_dnp<smpl_cfg::RRESP_PHITS> rresp_flit_t;
typedef flit_dnp<smpl_cfg::WREQ_PHITS> wreq_flit_t;
typedef flit_dnp<smpl_cfg::WRESP_PHITS> wresp_flit_t;
typedef flit_dnp<smpl_cfg::CREQ_PHITS> creq_flit_t;
typedef flit_dnp<smpl_cfg::CRESP_PHITS> cresp_flit_t;
typedef flit_ack ack_flit_t;
static const unsigned NODES = smpl_cfg::SLAVE_NUM+smpl_cfg::ALL_MASTER_NUM+smpl_cfg::HOME_NUM;
sc_in_clk clk;
sc_in <bool> rst_n;
// IC's Address map
sc_in<sc_uint <32> > addr_map[smpl_cfg::SLAVE_NUM][2]; // [SLAVE_NUM][0:begin, 1: End]
// The Node IDs are passed to IFs as signals
sc_signal< sc_uint<dnp::D_W> > NODE_IDS[NODES];
// MASTER Side AXI Channels
// --- ACE --- //
Connections::Out<ace5_::AC> ac_out[smpl_cfg::FULL_MASTER_NUM];
Connections::In<ace5_::CR> cr_in[smpl_cfg::FULL_MASTER_NUM];
Connections::In<ace5_::CD> cd_in[smpl_cfg::FULL_MASTER_NUM];
// --- Read --- //
Connections::In<ace5_::AddrPayload> ar_in[smpl_cfg::ALL_MASTER_NUM];
Connections::Out<ace5_::ReadPayload> r_out[smpl_cfg::ALL_MASTER_NUM];
Connections::In<ace5_::RACK> rack_in[smpl_cfg::FULL_MASTER_NUM];
// --- Write --- //
Connections::In<ace5_::AddrPayload> aw_in[smpl_cfg::ALL_MASTER_NUM];
Connections::In<ace5_::WritePayload> w_in[smpl_cfg::ALL_MASTER_NUM];
Connections::Out<ace5_::WRespPayload> b_out[smpl_cfg::ALL_MASTER_NUM];
Connections::In<ace5_::WACK> wack_in[smpl_cfg::FULL_MASTER_NUM];
// SLAVE Side AXI Channels
Connections::Out<ace5_::AddrPayload> ar_out[smpl_cfg::SLAVE_NUM];
Connections::In<ace5_::ReadPayload> r_in[smpl_cfg::SLAVE_NUM];
Connections::Out<ace5_::AddrPayload> aw_out[smpl_cfg::SLAVE_NUM];
Connections::Out<ace5_::WritePayload> w_out[smpl_cfg::SLAVE_NUM];
Connections::In<ace5_::WRespPayload> b_in[smpl_cfg::SLAVE_NUM];
//--- Internals ---//
// Master/Slave IFs
ace_master_if < smpl_cfg > *master_if[smpl_cfg::FULL_MASTER_NUM];
acelite_master_if < smpl_cfg > *master_lite_if[smpl_cfg::LITE_MASTER_NUM];
ace_slave_if < smpl_cfg > *slave_if[smpl_cfg::SLAVE_NUM];
ace_home < smpl_cfg > *home[smpl_cfg::HOME_NUM];
// NoC Channels
// READ Fwd Req, master+home -> slaves+home
sc_signal<sc_uint<dnp::D_W> > route_rd_req[NODES];
router_wh_top< smpl_cfg::ALL_MASTER_NUM+smpl_cfg::HOME_NUM, smpl_cfg::SLAVE_NUM+smpl_cfg::HOME_NUM, rreq_flit_t, 4, 0, NODES> INIT_S1(rtr_rd_req);
Connections::Combinational<rreq_flit_t> chan_rd_m2r[smpl_cfg::ALL_MASTER_NUM]; // M-IF_to_Rtr
Connections::Combinational<rreq_flit_t> chan_rd_r2s[smpl_cfg::SLAVE_NUM]; // Rtr_to_S-IF
Connections::Combinational<rreq_flit_t> chan_rd_req_r2h[smpl_cfg::HOME_NUM]; // M-IF_to_Rtr
Connections::Combinational<rreq_flit_t> chan_rd_req_h2r[smpl_cfg::HOME_NUM]; // Rtr_to_S-IF
// READ Bck Resp, slaves+home -> home+masters
sc_signal<sc_uint<dnp::D_W> > route_rd_resp[NODES];
router_wh_top<smpl_cfg::SLAVE_NUM+smpl_cfg::HOME_NUM, smpl_cfg::ALL_MASTER_NUM+smpl_cfg::HOME_NUM, rresp_flit_t, 4, 0, NODES> INIT_S1(rtr_rd_resp);
Connections::Combinational<rresp_flit_t> chan_rd_s2r[smpl_cfg::SLAVE_NUM]; // S-IF_to_Rtr
Connections::Combinational<rresp_flit_t> chan_rd_r2m[smpl_cfg::ALL_MASTER_NUM]; // Rtr_to_M-IF
Connections::Combinational<rresp_flit_t> chan_rd_resp_r2h[smpl_cfg::HOME_NUM]; // M-IF_to_Rtr
Connections::Combinational<rresp_flit_t> chan_rd_resp_h2r[smpl_cfg::HOME_NUM]; // Rtr_to_S-IF
// WRITE fwd Req, Router+In/Out Channels
sc_signal<sc_uint<dnp::D_W> > route_wr_req[NODES];
router_wh_top< smpl_cfg::ALL_MASTER_NUM+smpl_cfg::HOME_NUM, smpl_cfg::SLAVE_NUM+smpl_cfg::HOME_NUM, wreq_flit_t, 4, 0, NODES> INIT_S1(rtr_wr_req);
Connections::Combinational<wreq_flit_t> chan_wr_m2r[smpl_cfg::ALL_MASTER_NUM]; // M-IF_to_Rtr
Connections::Combinational<wreq_flit_t> chan_wr_r2s[smpl_cfg::SLAVE_NUM]; // Rtr_to_S-IF
Connections::Combinational<wreq_flit_t> chan_wr_req_r2h[smpl_cfg::HOME_NUM]; // M-IF_to_Rtr
Connections::Combinational<wreq_flit_t> chan_wr_req_h2r[smpl_cfg::HOME_NUM]; // Rtr_to_S-IF
// WRITE fwd Req, Router+In/Out Channels
sc_signal<sc_uint<dnp::D_W> > route_wr_resp[NODES];
router_wh_top<smpl_cfg::SLAVE_NUM+smpl_cfg::HOME_NUM, smpl_cfg::ALL_MASTER_NUM+smpl_cfg::HOME_NUM, wresp_flit_t, 4, 0, NODES> INIT_S1(rtr_wr_resp);
Connections::Combinational<wresp_flit_t> chan_wr_s2r[smpl_cfg::SLAVE_NUM]; // S-IF_to_Rtr
Connections::Combinational<wresp_flit_t> chan_wr_r2m[smpl_cfg::ALL_MASTER_NUM]; // Rtr_to_M-IF
Connections::Combinational<wresp_flit_t> chan_wr_resp_r2h[smpl_cfg::HOME_NUM]; // M-IF_to_Rtr
Connections::Combinational<wresp_flit_t> chan_wr_resp_h2r[smpl_cfg::HOME_NUM]; // Rtr_to_S-IF
// CACHE fwd Req, Router+In/Out Channels
sc_signal<sc_uint<dnp::D_W> > route_cache_req[NODES];
router_wh_top<smpl_cfg::HOME_NUM, smpl_cfg::FULL_MASTER_NUM, creq_flit_t, 4, 0, NODES> INIT_S1(rtr_cache_req);
Connections::Combinational<creq_flit_t> chan_creq_h2r[smpl_cfg::HOME_NUM]; // Home_to_Rtr
Connections::Combinational<creq_flit_t> chan_creq_r2m[smpl_cfg::FULL_MASTER_NUM]; // Rtr_to_M-IF
// CACHE Bck Resp, Router+In/Out Channels
sc_signal<sc_uint<dnp::D_W> > route_cache_resp[NODES];
router_wh_top<smpl_cfg::FULL_MASTER_NUM, smpl_cfg::HOME_NUM, cresp_flit_t, 4, 0, NODES> INIT_S1(rtr_cache_resp);
Connections::Combinational<cresp_flit_t> chan_cresp_m2r[smpl_cfg::FULL_MASTER_NUM]; // M-IF_to_Rtr
Connections::Combinational<cresp_flit_t> chan_cresp_r2h[smpl_cfg::HOME_NUM]; // Rtr_to_Home
// Master read+write ACKs back to HOME
sc_signal<sc_uint<dnp::D_W> > route_acks[NODES];
router_wh_top<smpl_cfg::FULL_MASTER_NUM*2, smpl_cfg::HOME_NUM, ack_flit_t, 4, 0, NODES> INIT_S1(rtr_acks);
Connections::Combinational<ack_flit_t> chan_acks_m2r[smpl_cfg::FULL_MASTER_NUM*2]; // M-IF_to_Rtr
Connections::Combinational<ack_flit_t> chan_acks_r2h[smpl_cfg::HOME_NUM]; // Rtr_to_Home
sc_signal< sc_uint<dnp::D_W> > rtr_id_dummmy;
SC_CTOR(ic_top) {
rtr_id_dummmy = 0;
for (unsigned i=0; i<smpl_cfg::HOME_NUM+smpl_cfg::ALL_MASTER_NUM+smpl_cfg::SLAVE_NUM; ++i)
NODE_IDS[i] = i;
// ------------------ //
// --- SLAVE-IFs --- //
// -------------------//
for(unsigned char j=0; j<smpl_cfg::SLAVE_NUM; ++j){
slave_if[j] = new ace_slave_if < smpl_cfg > (sc_gen_unique_name("Slave-if"));
slave_if[j]->clk(clk);
slave_if[j]->rst_n(rst_n);
slave_if[j]->THIS_ID(NODE_IDS[j]);
slave_if[j]->slave_base_addr(addr_map[j][0]);
// Read-NoC
slave_if[j]->rd_flit_in(chan_rd_r2s[j]);
slave_if[j]->rd_flit_out(chan_rd_s2r[j]);
// Write-NoC
slave_if[j]->wr_flit_in(chan_wr_r2s[j]);
slave_if[j]->wr_flit_out(chan_wr_s2r[j]);
// Slave-Side
slave_if[j]->ar_out(ar_out[j]);
slave_if[j]->r_in(r_in[j]);
slave_if[j]->aw_out(aw_out[j]);
slave_if[j]->w_out(w_out[j]);
slave_if[j]->b_in(b_in[j]);
}
// ------------------------------ //
// --- MASTER-IFs Connectivity--- //
// ------------------------------ //
// Connect each Master-IF to the appropriate channels
for(int i=0; i<smpl_cfg::FULL_MASTER_NUM; ++i){
master_if[i] = new ace_master_if < smpl_cfg > (sc_gen_unique_name("Master-if"));
master_if[i]->clk(clk);
master_if[i]->rst_n(rst_n);
// Pass the address Map
for (int n=0; n<smpl_cfg::SLAVE_NUM; ++n) // Iterate Slaves
for (int s=0; s<2; ++s) // Iterate Begin-End Values
master_if[i]->addr_map[n][s](addr_map[n][s]);
master_if[i]->THIS_ID(NODE_IDS[i+smpl_cfg::SLAVE_NUM]);
// Master-AXI-Side
master_if[i]->ac_out(ac_out[i]);
master_if[i]->cr_in(cr_in[i]);
master_if[i]->cd_in(cd_in[i]);
master_if[i]->ar_in(ar_in[i]);
master_if[i]->r_out(r_out[i]);
master_if[i]->rack_in(rack_in[i]);
master_if[i]->aw_in(aw_in[i]);
master_if[i]->w_in(w_in[i]);
master_if[i]->b_out(b_out[i]);
master_if[i]->wack_in(wack_in[i]);
// Read-NoC
master_if[i]->rd_flit_out(chan_rd_m2r[i]);
master_if[i]->rd_flit_in(chan_rd_r2m[i]);
master_if[i]->rack_flit_out(chan_acks_m2r[i]);
// Write-NoC
master_if[i]->wr_flit_out(chan_wr_m2r[i]);
master_if[i]->wr_flit_in(chan_wr_r2m[i]);
master_if[i]->wack_flit_out(chan_acks_m2r[smpl_cfg::FULL_MASTER_NUM+i]);
// Cache-NoC
master_if[i]->cache_flit_in(chan_creq_r2m[i]);
master_if[i]->cache_flit_out(chan_cresp_m2r[i]);
}
// Connect ACE LITE Master-IFs to the appropriate channels
for(int i=0; i<smpl_cfg::LITE_MASTER_NUM; ++i){
master_lite_if[i] = new acelite_master_if < smpl_cfg > (sc_gen_unique_name("Master-Lite-if"));
master_lite_if[i]->clk(clk);
master_lite_if[i]->rst_n(rst_n);
// Pass the address Map
for (int n=0; n<smpl_cfg::SLAVE_NUM; ++n) // Iterate Slaves
for (int s=0; s<2; ++s) // Iterate Begin-End Values
master_lite_if[i]->addr_map[n][s](addr_map[n][s]);
master_lite_if[i]->THIS_ID(NODE_IDS[i+smpl_cfg::SLAVE_NUM+smpl_cfg::FULL_MASTER_NUM]);
// Master-AXI-Side
master_lite_if[i]->ar_in(ar_in[i+smpl_cfg::FULL_MASTER_NUM]);
master_lite_if[i]->r_out(r_out[i+smpl_cfg::FULL_MASTER_NUM]);
master_lite_if[i]->aw_in(aw_in[i+smpl_cfg::FULL_MASTER_NUM]);
master_lite_if[i]->w_in(w_in[i+smpl_cfg::FULL_MASTER_NUM]);
master_lite_if[i]->b_out(b_out[i+smpl_cfg::FULL_MASTER_NUM]);
// Read-NoC
master_lite_if[i]->rd_flit_out(chan_rd_m2r[i+smpl_cfg::FULL_MASTER_NUM]);
master_lite_if[i]->rd_flit_in(chan_rd_r2m[i+smpl_cfg::FULL_MASTER_NUM]);
// Write-NoC
master_lite_if[i]->wr_flit_out(chan_wr_m2r[i+smpl_cfg::FULL_MASTER_NUM]);
master_lite_if[i]->wr_flit_in(chan_wr_r2m[i+smpl_cfg::FULL_MASTER_NUM]);
}
// -------------------- //
// --- HOME-NODE(s) --- //
// ---------------------//
for (unsigned i=0; i<smpl_cfg::HOME_NUM; ++i) {
home[i] = new ace_home < smpl_cfg > (sc_gen_unique_name("Home-Node"));
home[i]->clk(clk);
home[i]->rst_n(rst_n);
// Pass the address Map
for (int n=0; n<smpl_cfg::SLAVE_NUM; ++n) // Iterate Slaves
for (int s=0; s<2; ++s) // Iterate Begin-End Values
home[i]->addr_map[n][s](addr_map[n][s]);
home[i]->THIS_ID(NODE_IDS[i+smpl_cfg::SLAVE_NUM+smpl_cfg::ALL_MASTER_NUM]);
home[i]->cache_req(chan_creq_h2r[i]);
home[i]->cache_resp(chan_cresp_r2h[i]);
//home[i]->cache_ack();
home[i]->rd_from_master(chan_rd_req_r2h[i]);
home[i]->rd_to_master(chan_rd_resp_h2r[i]);
home[i]->rd_to_slave(chan_rd_req_h2r[i]);
home[i]->rd_from_slave(chan_rd_resp_r2h[i]);
home[i]->wr_from_master(chan_wr_req_r2h[i]);
home[i]->wr_to_master(chan_wr_resp_h2r[i]);
home[i]->wr_to_slave(chan_wr_req_h2r[i]);
home[i]->wr_from_slave(chan_wr_resp_r2h[i]);
home[i]->ack_from_master(chan_acks_r2h[i]);
}
// -o-o-o-o-o-o-o-o-o- //
// -o-o-o-o-o-o-o-o-o- //
// --- NoC Connectivity --- //
// Read Req/Fwd Router
for(int i=0; i<smpl_cfg::SLAVE_NUM; ++i)
route_rd_req[i] = i; // Slaves
for(int i=0; i<smpl_cfg::ALL_MASTER_NUM; ++i)
route_rd_req[i+smpl_cfg::SLAVE_NUM] = 0; // Masters
for(int i=0; i<smpl_cfg::HOME_NUM; ++i)
route_rd_req[i+smpl_cfg::SLAVE_NUM+smpl_cfg::ALL_MASTER_NUM] = i+smpl_cfg::SLAVE_NUM; // Homes
rtr_rd_req.clk(clk);
rtr_rd_req.rst_n(rst_n);
rtr_rd_req.id_x(rtr_id_dummmy);
rtr_rd_req.id_y(rtr_id_dummmy);
for (unsigned i=0; i<NODES; ++i)
rtr_rd_req.route_lut[i](route_rd_req[i]);
// In from Masters
for(int i=0; i<smpl_cfg::ALL_MASTER_NUM; ++i) rtr_rd_req.data_in[i](chan_rd_m2r[i]);
for(int i=0; i<smpl_cfg::HOME_NUM; ++i) rtr_rd_req.data_in[smpl_cfg::ALL_MASTER_NUM+i](chan_rd_req_h2r[i]);
// Out to Slave
for(int i=0; i<smpl_cfg::SLAVE_NUM; ++i) rtr_rd_req.data_out[i](chan_rd_r2s[i]);
for(int i=0; i<smpl_cfg::HOME_NUM; ++i) rtr_rd_req.data_out[smpl_cfg::SLAVE_NUM+i](chan_rd_req_r2h[i]);
// Read Resp/Bck Router
for(int i=0; i<smpl_cfg::SLAVE_NUM; ++i)
route_rd_resp[i] = 0; // Slaves
for(int i=0; i<smpl_cfg::ALL_MASTER_NUM; ++i)
route_rd_resp[i+smpl_cfg::SLAVE_NUM] = i; // Masters
for(int i=0; i<smpl_cfg::HOME_NUM; ++i)
route_rd_resp[i+smpl_cfg::SLAVE_NUM+smpl_cfg::ALL_MASTER_NUM] = i+smpl_cfg::ALL_MASTER_NUM; // Homes
rtr_rd_resp.clk(clk);
rtr_rd_resp.rst_n(rst_n);
rtr_rd_resp.id_x(rtr_id_dummmy);
rtr_rd_resp.id_y(rtr_id_dummmy);
for (unsigned i=0; i<NODES; ++i)
rtr_rd_resp.route_lut[i](route_rd_resp[i]);
// In from Slaves
for(int i=0; i<smpl_cfg::SLAVE_NUM; ++i) rtr_rd_resp.data_in[i](chan_rd_s2r[i]);
for(int i=0; i<smpl_cfg::HOME_NUM; ++i) rtr_rd_resp.data_in[smpl_cfg::SLAVE_NUM+i](chan_rd_resp_h2r[i]);
// Out to Master
for(int i=0; i<smpl_cfg::ALL_MASTER_NUM; ++i) rtr_rd_resp.data_out[i](chan_rd_r2m[i]);
for(int i=0; i<smpl_cfg::HOME_NUM; ++i) rtr_rd_resp.data_out[smpl_cfg::ALL_MASTER_NUM+i](chan_rd_resp_r2h[i]);
// Write Req/Fwd Router
for(int i=0; i<smpl_cfg::SLAVE_NUM; ++i)
route_wr_req[i] = i; // Slaves
for(int i=0; i<smpl_cfg::ALL_MASTER_NUM; ++i)
route_wr_req[i+smpl_cfg::SLAVE_NUM] = 0; // Masters
for(int i=0; i<smpl_cfg::HOME_NUM; ++i)
route_wr_req[i+smpl_cfg::SLAVE_NUM+smpl_cfg::ALL_MASTER_NUM] = i+smpl_cfg::SLAVE_NUM; // Homes
rtr_wr_req.clk(clk);
rtr_wr_req.rst_n(rst_n);
rtr_wr_req.id_x(rtr_id_dummmy);
rtr_wr_req.id_y(rtr_id_dummmy);
for (unsigned i=0; i<NODES; ++i)
rtr_wr_req.route_lut[i](route_wr_req[i]);
// In from Masters
for(int i=0; i<smpl_cfg::ALL_MASTER_NUM; ++i) rtr_wr_req.data_in[i](chan_wr_m2r[i]);
for(int i=0; i<smpl_cfg::HOME_NUM; ++i) rtr_wr_req.data_in[smpl_cfg::ALL_MASTER_NUM+i](chan_wr_req_h2r[i]);
// Out to Slaves
for(int i=0; i<smpl_cfg::SLAVE_NUM; ++i) rtr_wr_req.data_out[i](chan_wr_r2s[i]);
for(int i=0; i<smpl_cfg::HOME_NUM; ++i) rtr_wr_req.data_out[smpl_cfg::SLAVE_NUM+i](chan_wr_req_r2h[i]);
// Write Resp/Bck Router
for(int i=0; i<smpl_cfg::SLAVE_NUM; ++i)
route_wr_resp[i] = 0; // Slaves
for(int i=0; i<smpl_cfg::ALL_MASTER_NUM; ++i)
route_wr_resp[i+smpl_cfg::SLAVE_NUM] = i; // Masters
for(int i=0; i<smpl_cfg::HOME_NUM; ++i)
route_wr_resp[i+smpl_cfg::SLAVE_NUM+smpl_cfg::ALL_MASTER_NUM] = i+smpl_cfg::ALL_MASTER_NUM; // Homes
rtr_wr_resp.clk(clk);
rtr_wr_resp.rst_n(rst_n);
rtr_wr_resp.id_x(rtr_id_dummmy);
rtr_wr_resp.id_y(rtr_id_dummmy);
for (unsigned i=0; i<NODES; ++i)
rtr_wr_resp.route_lut[i](route_wr_resp[i]);
// In from Slaves
for(int i=0; i<smpl_cfg::SLAVE_NUM; ++i) rtr_wr_resp.data_in[i](chan_wr_s2r[i]);
for(int i=0; i<smpl_cfg::HOME_NUM; ++i) rtr_wr_resp.data_in[smpl_cfg::SLAVE_NUM+i](chan_wr_resp_h2r[i]);
// Out to Master
for(int i=0; i<smpl_cfg::ALL_MASTER_NUM; ++i) rtr_wr_resp.data_out[i](chan_wr_r2m[i]);
for(int i=0; i<smpl_cfg::HOME_NUM; ++i) rtr_wr_resp.data_out[smpl_cfg::ALL_MASTER_NUM+i](chan_wr_resp_r2h[i]);
// Cache Req/Fwd Router
for(int i=0; i<smpl_cfg::SLAVE_NUM; ++i)
route_cache_req[i] = 0; // Slaves
for(int i=0; i<smpl_cfg::ALL_MASTER_NUM; ++i)
route_cache_req[i+smpl_cfg::SLAVE_NUM] = i; // Masters
for(int i=0; i<smpl_cfg::HOME_NUM; ++i)
route_cache_req[i+smpl_cfg::SLAVE_NUM+smpl_cfg::ALL_MASTER_NUM] = 0; // Homes
rtr_cache_req.clk(clk);
rtr_cache_req.rst_n(rst_n);
rtr_cache_req.id_x(rtr_id_dummmy);
rtr_cache_req.id_y(rtr_id_dummmy);
for (unsigned i=0; i<NODES; ++i)
rtr_cache_req.route_lut[i](route_cache_req[i]);
// In from Home
for(int i=0; i<smpl_cfg::HOME_NUM; ++i) {
rtr_cache_req.data_in[i](chan_creq_h2r[i]);
}
// Out to Master
for(int i=0; i<smpl_cfg::FULL_MASTER_NUM; ++i) {
rtr_cache_req.data_out[i](chan_creq_r2m[i]);
}
// Cache Resp/Bck Router
for(int i=0; i<smpl_cfg::SLAVE_NUM; ++i)
route_cache_resp[i] = 0; // Slaves
for(int i=0; i<smpl_cfg::ALL_MASTER_NUM; ++i)
route_cache_resp[i+smpl_cfg::SLAVE_NUM] = 0; // Masters
for(int i=0; i<smpl_cfg::HOME_NUM; ++i)
route_cache_resp[i+smpl_cfg::SLAVE_NUM+smpl_cfg::ALL_MASTER_NUM] = i; // Homes
rtr_cache_resp.clk(clk);
rtr_cache_resp.rst_n(rst_n);
rtr_cache_resp.id_x(rtr_id_dummmy);
rtr_cache_resp.id_y(rtr_id_dummmy);
for (unsigned i=0; i<NODES; ++i)
rtr_cache_resp.route_lut[i](route_cache_resp[i]);
// In from Home
for(int i=0; i<smpl_cfg::FULL_MASTER_NUM; ++i) {
rtr_cache_resp.data_in[i](chan_cresp_m2r[i]);
}
// Out to Master
for(int i=0; i<smpl_cfg::HOME_NUM; ++i) {
rtr_cache_resp.data_out[i](chan_cresp_r2h[i]);
}
// ACKS Router
for(int i=0; i<smpl_cfg::SLAVE_NUM; ++i)
route_acks[i] = 0; // Slaves
for(int i=0; i<smpl_cfg::ALL_MASTER_NUM; ++i)
route_acks[i+smpl_cfg::SLAVE_NUM] = 0; // Masters
for(int i=0; i<smpl_cfg::HOME_NUM; ++i)
route_acks[i+smpl_cfg::SLAVE_NUM+smpl_cfg::ALL_MASTER_NUM] = i; // Homes
rtr_acks.clk(clk);
rtr_acks.rst_n(rst_n);
rtr_acks.id_x(rtr_id_dummmy);
rtr_acks.id_y(rtr_id_dummmy);
for (unsigned i=0; i<NODES; ++i)
rtr_acks.route_lut[i](route_acks[i]);
// In from Home
for(int i=0; i<(smpl_cfg::FULL_MASTER_NUM*2); ++i) {
rtr_acks.data_in[i](chan_acks_m2r[i]);
}
// Out to HOME
for(int i=0; i<smpl_cfg::HOME_NUM; ++i) {
rtr_acks.data_out[i](chan_acks_r2h[i]);
}
}; // End of constructor
private:
}; // End of SC_MODULE
#endif // _ACE_IC_TOP_H_
|
ic-lab-duth/NoCpad
|
src/router_vc.h
|
#ifndef WH_ROUTER_CON_CR_H
#define WH_ROUTER_CON_CR_H
#include <systemc.h>
#include "./include/flit_axi.h"
#include "./include/duth_fun.h"
#include "./include/arbiters.h"
#include "./include/fifo_queue_oh.h"
#include "nvhls_connections.h"
// Select In/Out Ports, the type of flit and the Routing Computation calculation function
// For RC_METHOD: 0-> direct rc 1-> lut, 2-> type, ... , 4-> LUT based routing
// IN_NUM : Number of inputs
// OUT_NUM : Number of inputs
// flit_t : The networks flit type
// DIM_X : X Dimension of a 2-D mesh network. Used in XY routing
// NODES : All possible target nodes of the network. Used in LUT routing
// VCS : Number of Virtual Channels
// BUFF_DEPTH : Input Buffer slots
// RC_METHOD : Routing Computation Algotrithm
// - 0 : Direct RC
// - 1 : Constant RC (for mergers)
// - 2 : Packet type RC (for distinct routing of Writes and reads)
// - 3 : For single stage NoCs
// - 4 : LUT based RC
// - 5 : XY routing with merged RD/WR Req-Resp
// ARB_C : The arbiter type. Eg MATRIX, ROUND_ROBIN
template< unsigned int IN_NUM, unsigned int OUT_NUM, typename flit_t, int DIM_X=0, int NODES=1, unsigned VCS=2, unsigned BUFF_DEPTH=3, unsigned RC_METHOD=3, arb_type arbiter_t=MATRIX >
SC_MODULE(rtr_vc) {
public:
typedef sc_uint< nvhls::log2_ceil<VCS>::val > cr_t;
sc_in_clk clk;
sc_in<bool> rst_n;
sc_in< sc_uint<dnp::D_W> > route_lut[NODES];
sc_in< sc_uint<dnp::D_W> > id_x{"id_x"};
sc_in< sc_uint<dnp::D_W> > id_y{"id_y"};
// input channels
Connections::In<flit_t> data_in[IN_NUM];
Connections::Out<cr_t> cr_out[IN_NUM];
// output channels
Connections::Out<flit_t> data_out[OUT_NUM];
Connections::In<cr_t> cr_in[OUT_NUM];
// Internals
fifo_queue<flit_t, BUFF_DEPTH> fifo[IN_NUM][VCS];
bool out_lock[IN_NUM][VCS];
onehot<OUT_NUM> out_port_locked[IN_NUM][VCS];
onehot<BUFF_DEPTH+1> credits[OUT_NUM][VCS];
onehot<VCS> out_available[OUT_NUM];
arbiter<VCS , arbiter_t> arb_sa1[IN_NUM];
arbiter<IN_NUM, arbiter_t> arb_sa2[OUT_NUM];
// Constructor
SC_HAS_PROCESS(rtr_vc);
rtr_vc(sc_module_name name_ = "rtr_vc")
:
sc_module(name_)
{
SC_THREAD(router_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
}
void router_job() {
flit_t vc_hol_flit[IN_NUM][VCS];
onehot<VCS> sa1_grants[IN_NUM];
flit_t flit_to_xbar[IN_NUM];
// The request and grants of the Inputs/Outputs
onehot<OUT_NUM> req_sa2_per_i[IN_NUM];
onehot<IN_NUM> req_sa2_per_o[OUT_NUM];
onehot<IN_NUM> gnt_sa2_per_o[OUT_NUM];
onehot<OUT_NUM> gnt_sa2_per_i[IN_NUM];
// Reset per input state
#pragma hls_unroll yes
per_i_rst:for (unsigned char i=0; i<IN_NUM; ++i) {
data_in[i].Reset();
cr_out[i].Reset();
#pragma hls_unroll yes
for(unsigned v=0; v<VCS; ++v) out_lock[i][v] = false;
}
// Reset per output state
#pragma hls_unroll yes
per_o_rst:for(unsigned char j=0; j<OUT_NUM; ++j) {
data_out[j].Reset();
cr_in[j].Reset();
#pragma hls_unroll yes
for(unsigned v=0; v<VCS; ++v) {
out_available[j].val[v] = true;
credits[j][v] = onehot<BUFF_DEPTH+1>(1<<BUFF_DEPTH);
}
}
// Post Reset
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while(1) {
wait();
// UpStream Interface
bool data_val_in[IN_NUM];
bool data_rdy_out[IN_NUM];
flit_t data_data_in[IN_NUM];
bool cr_val_out[IN_NUM];
bool cr_rdy_in[IN_NUM];
cr_t cr_data_out[IN_NUM];
// DowStream Interface
bool data_val_out[OUT_NUM];
bool data_rdy_in[OUT_NUM];
flit_t data_data_out[OUT_NUM];
bool cr_val_in[OUT_NUM];
bool cr_rdy_out[OUT_NUM];
cr_t cr_data_in[OUT_NUM];
onehot<VCS> out_ready[OUT_NUM];
// Read all inputs
#pragma hls_unroll yes
for (int i=0; i<IN_NUM; ++i) {
data_val_in[i] = data_in[i].PopNB(data_data_in[i]);
}
#pragma hls_unroll yes
for (int j=0; j<OUT_NUM; ++j) {
cr_val_in[j] = cr_in[j].PopNB(cr_data_in[j]);
// Check Credits
#pragma hls_unroll yes
for (int v = 0; v < VCS; ++v) {
out_ready[j][v] = (credits[j][v].is_ready());
}
}
// Input logic, loops for each input to produce the required requests
#pragma hls_unroll yes
input_prep : for (int i = 0; i < IN_NUM; ++i) {
onehot<VCS> req_sa1;
onehot<OUT_NUM> port_req_oh[VCS];
// prepare requests of each VC, to content in SA1
#pragma hls_unroll yes
vc_prep : for (unsigned v=0; v<VCS; ++v) {
vc_hol_flit[i][v] = fifo[i][v].peek();
// Depending the Flit type the input selects an output port to request.
// The required output gets stored to be used by the rest of the flits.
if (out_lock[i][v]) {
port_req_oh[v].set(out_port_locked[i][v]);
} else {
// Route Computation
unsigned char current_op;
if (RC_METHOD==0) { current_op = do_rc_direct(vc_hol_flit[i][v].get_dst());} // returns the node ID
else if (RC_METHOD==1) { current_op = do_rc_const();} // returns 0. Used for mergers
else if (RC_METHOD==2) { current_op = do_rc_type(vc_hol_flit[i][v].get_type());} // Return 0/1 depending the type. used for splitters
else if (RC_METHOD==3) { current_op = do_rc_common(vc_hol_flit[i][v].get_dst(),vc_hol_flit[i][v].get_type());}
else if (RC_METHOD==4) { current_op = do_rc_lut(vc_hol_flit[i][v].get_dst());}
else if (RC_METHOD==5) { current_op = do_rc_xy_merge(vc_hol_flit[i][v].get_dst(), vc_hol_flit[i][v].get_type());}
else { NVHLS_ASSERT_MSG(0, "Wrong Routing method selected.");}
port_req_oh[v].set(current_op);
out_port_locked[i][v].set(port_req_oh[v]);
}
// The required output port must be also Ready and or available.
onehot<VCS> req_out_ready_vcs = mux<onehot<VCS>, OUT_NUM>::mux_oh_case(port_req_oh[v], out_ready);
onehot<VCS> req_out_avail_vcs = mux<onehot<VCS>, OUT_NUM>::mux_oh_case(port_req_oh[v], out_available);
bool req_out_ready = req_out_ready_vcs[v];
bool req_out_avail = req_out_avail_vcs[v];
req_sa1[v] = (fifo[i][v].valid() && req_out_ready && (out_lock[i][v] || req_out_avail));
}
// Arbitrate amonng the VCs and select the winner to access SA2 and output MUX
bool any_sa1_gnt = arb_sa1[i].arbitrate(req_sa1.val, sa1_grants[i].val);
flit_to_xbar[i] = mux<flit_t, VCS>::mux_oh_case(sa1_grants[i], vc_hol_flit[i]);
req_sa2_per_i[i] = mux<onehot<OUT_NUM>, VCS>::mux_oh_case(sa1_grants[i], port_req_oh).and_mask(any_sa1_gnt);
} // End of set inputs
// Per Output arbitration and multiplexing
#pragma hls_unroll yes
outp_route : for (unsigned char j = 0; j < OUT_NUM; ++j) {
// Swap from per input to per output
#pragma hls_unroll yes
for(int i=0; i<IN_NUM; ++i) {
req_sa2_per_o[j][i] = req_sa2_per_i[i][j];
}
// SA2 arbitration among the inputs to win the output and the required VC
bool any_gnt = arb_sa2[j].arbitrate(req_sa2_per_o[j].val, gnt_sa2_per_o[j].val);
flit_t selected_flit = mux<flit_t, IN_NUM>::mux_oh_case(gnt_sa2_per_o[j], flit_to_xbar);
cr_t selected_vc = selected_flit.get_vc();
data_val_out[j] = any_gnt;
data_data_out[j] = selected_flit;
#pragma hls_unroll yes
for (unsigned v=0; v<VCS; ++v) {
bool cr_upd_this_vc = cr_val_in[j] && (cr_data_in[j]==v);
bool cr_cons_this_vc = any_gnt && (selected_vc ==v);
if ( cr_cons_this_vc && (!cr_upd_this_vc)) credits[j][v].decrease();
else if (!cr_cons_this_vc && ( cr_upd_this_vc)) credits[j][v].increase();
}
if (any_gnt) {
if (selected_flit.is_head()) out_available[j][selected_vc] = false;
if (selected_flit.is_tail()) out_available[j][selected_vc] = true;
}
} // End per output
// Loop through each input and VC to handle the case of actually winning the output
#pragma hls_unroll yes
inp_feedback : for (unsigned char i = 0; i < IN_NUM; ++i) {
#pragma hls_unroll yes
for(int j=0; j<IN_NUM; ++j) {
gnt_sa2_per_i[i][j] = gnt_sa2_per_o[j][i];
}
// Handle Grants and incoming flits
bool sa2_grant = gnt_sa2_per_i[i].or_reduce();
cr_val_out[i] = sa2_grant;
bool got_new_flit = data_val_in[i];
cr_t new_flit_vc = data_data_in[i].get_vc();
if (got_new_flit) fifo[i][new_flit_vc].push_no_count_incr(data_data_in[i]);
// Update the FIFO and VC state
cr_t vc_popped = 0;
#pragma hls_unroll yes
for (unsigned v=0; v<VCS; ++v) {
bool this_vc_popped = sa2_grant && sa1_grants[i][v];
if (this_vc_popped) {
cr_data_out[i] = v;
fifo[i][v].inc_pop_ptr();
if (vc_hol_flit[i][v].is_head()) out_lock[i][v] = true;
else if (vc_hol_flit[i][v].is_tail()) out_lock[i][v] = false;
}
bool this_vc_pushed = got_new_flit && (new_flit_vc==v);
fifo[i][v].set_count(this_vc_pushed, this_vc_popped);
}
}
// Write to outputs
#pragma hls_unroll yes
for (int i=0; i<IN_NUM; ++i) {
if(cr_val_out[i]) {
bool dbg_push_ok = cr_out[i].PushNB(cr_data_out[i]);
NVHLS_ASSERT_MSG(dbg_push_ok, "Push Credit DROP!!!");
}
}
#pragma hls_unroll yes
for (int j=0; j<OUT_NUM; ++j) {
if (data_val_out[j]) {
bool dbg_push_ok = data_out[j].PushNB(data_data_out[j]);
NVHLS_ASSERT_MSG(dbg_push_ok, "Push Data DROP!!!");
}
}
} // End of while(1)
}; //end router_job_credits
// Direct RC : The Dst Node is the Output port
//inline unsigned char do_rc_direct (const unsigned char destination) {return destination;};
inline unsigned char do_rc_direct (sc_uint<dnp::D_W> destination) {return destination.to_uint();};
// TYPE RC : Used for splitter/mergers. Routes RD to Out==0, and WR to Out==1
inline unsigned char do_rc_type (sc_uint<dnp::T_W> type) {return (type==dnp::PACK_TYPE__RD_REQ || type==dnp::PACK_TYPE__RD_RESP) ? 0 : 1;};
// Const RC : Used for mergers to always request port #0
inline unsigned char do_rc_const () {return 0;};
// Const RC : Used for mergers to always request port #0
inline unsigned char do_rc_common (sc_uint<dnp::D_W> destination, sc_uint<dnp::T_W> type) {
if (type==dnp::PACK_TYPE__RD_REQ) return destination.to_uint();
else if (type==dnp::PACK_TYPE__RD_RESP) return destination.to_uint()-2;
else if (type==dnp::PACK_TYPE__WR_REQ) return destination.to_uint()+2;
else return destination.to_uint();
};
inline unsigned char do_rc_lut (sc_uint<dnp::D_W> destination) {
return route_lut[destination.to_uint()].read();
};
inline unsigned char do_rc_xy_merge (sc_uint<dnp::D_W> destination, sc_uint<dnp::T_W> type) {
sc_uint<dnp::D_W> this_id_x = id_x.read();
sc_uint<dnp::D_W> this_id_y = id_y.read();
sc_uint<dnp::D_W> dst_x = destination % DIM_X;
sc_uint<dnp::D_W> dst_y = destination / DIM_X;
if (dst_x>this_id_x) {
return 1;
} else if (dst_x<this_id_x) {
return 0;
} else {
if (dst_y>this_id_y) {
return 3;
} else if (dst_y<this_id_y) {
return 2;
} else {
if (type==dnp::PACK_TYPE__RD_REQ) return 4;
else if (type==dnp::PACK_TYPE__RD_RESP) return 4;
else if (type==dnp::PACK_TYPE__WR_REQ) return 5;
else return 5;
}
}
};
}; // End of Module
#endif // WH_ROUTER_CON_CR_H
|
ic-lab-duth/NoCpad
|
src/include/arbiters.h
|
<reponame>ic-lab-duth/NoCpad
#ifndef __ARBITERS_HEADER__
#define __ARBITERS_HEADER__
enum arb_type {FIXED, MATRIX, ROUND_ROBIN, WEIGHTED_RR, DEFICIT_RR, STRATIFIED_RR, PHASE};
template<unsigned SIZE, arb_type ARB_TYPE, unsigned S=0, unsigned DOMAINS=0>
class arbiter {
public:
arbiter();
unsigned arbitrate();
};
/* FUNCTION: Fixed Priority Arbiter
* INPUT: Array of bools
* OUTPUT: Unsigned integer pointer to bit position
* -----------------------------------------
*
*/
template<unsigned SIZE>
class arbiter<SIZE, FIXED, 0, 0> {
private:
public:
arbiter(){
}
unsigned arbitrate( bool inp[SIZE] ) {
unsigned grants;
bool found = false;
#pragma hls_unroll yes
for (int i=0; i<SIZE; i++) {
if (inp[i] && !found) {
grants = i;
found = true;
}
}
return grants;
};
};
/* FUNCTION: Round Robin Arbiter
* INPUT: Array of bools
* OUTPUT: Unsigned integer pointer to bit position
* -----------------------------------------
*
*/
template<unsigned SIZE>
class arbiter<SIZE, ROUND_ROBIN, 0, 0> {
private:
unsigned priority;
sc_uint<SIZE> priority_therm;
public:
arbiter() {
priority = 0;
priority_therm = 0;
}
//#pragma hls_design ccore
//#pragma hls_ccore_type combinational
unsigned arbitrate(bool inp[SIZE]) {
bool found_hp = false;
bool found_lp = false;
unsigned grant_hp = 0;
unsigned grant_lp = 0;
unsigned grants = 0;
#pragma hls_unroll yes
for (int i = 0; i < SIZE; i++) {
// split arbitration to keep for-loop bounds constant - HLS requirement
// if requests belong to the high priority segment
if (i >= priority) {
if (inp[i] && !found_hp) {
grant_hp = i;
found_hp = true;
}
} else { // requests that belong to low priority
if (inp[i] && !found_lp) {
grant_lp = i;
found_lp = true;
}
}
}
grants = (found_hp) ? grant_hp : grant_lp;
if (found_hp || found_lp) {
priority = ((grants + 1) == SIZE) ? 0 : (grants + 1);
}
return grants;
};
//#pragma hls_design ccore
//#pragma hls_ccore_type combinational
//#pragma hls_map_to_operator le_arbiter
bool arbitrate(const sc_uint<SIZE> reqs_i, sc_uint<SIZE>& grants_o) {
sc_uint<SIZE> req_lp = reqs_i & (~priority_therm);
sc_uint<SIZE> req_hp = reqs_i & priority_therm;
sc_uint<SIZE> grants_lp = ((~req_lp) + 1) & req_lp;
sc_uint<SIZE> grants_hp = ((~req_hp) + 1) & req_hp;
bool anygrant = reqs_i.or_reduce();
grants_o = grants_hp.or_reduce() ? grants_hp : grants_lp;
// OH to THERM
if (anygrant) priority_therm = ~((grants_o << 1) - 1);
return anygrant;
};
};
/* FUNCTION: Matrix Arbiter
* INPUT: Array of bools
* OUTPUT: Unsigned integer pointer to bit position
* -----------------------------------------
*
*/
template<unsigned SIZE>
class arbiter<SIZE, MATRIX, 0, 0> {
private:
bool mat[SIZE][SIZE];
bool mat_v2[SIZE][SIZE];
//sc_uint<SIZE> matrix_reg[SIZE]; // a word reflects a vertical line of the matrix
public:
arbiter(){
#pragma hls_unroll yes
for (int i=0; i<SIZE; i++) {
#pragma hls_unroll yes
for (int j=0; j<SIZE; j++) {
mat_v2[i][j] = (i>j);
if (i < j) {
mat[i][j] = true;
}
}
}
}
unsigned arbitrate( bool inp[SIZE] ) {
bool found = false;
unsigned grants = 0;
#pragma hls_unroll yes
for (int i=0; i<SIZE; i++) {
if (inp[i] && !found) {
found = true;
#pragma hls_unroll yes
for (int j=0; j<SIZE; j++) {
if ( (i < j) && (!mat[i][j] && inp[j])) {
found = false;
} else {
if ( (i > j) && (mat[i][j] && inp[j])) {
found = false;
}
}
}
if (found) {
grants = i;
}
}
}
if (found) {
// If a grants is given - change matrix values
#pragma hls_unroll yes
for (int i=0; i<SIZE; i++) {
if (i > grants) {
mat[grants][i] = false;
} else {
if (i < grants) {
mat[i][grants] = true;
}
}
}
}
return grants;
};
bool arbitrate(const sc_uint<SIZE> reqs_i, sc_uint<SIZE>& grants_o) {
#pragma hls_unroll yes
for (int i=0; i<SIZE; ++i) { // Horizontal
sc_uint<SIZE> vert_line;
#pragma hls_unroll yes
for (int j=0; j<SIZE; ++j) { // Vertical
if (i==j) vert_line[j] = false;
else vert_line[j] = reqs_i[j] && mat_v2[i][j];
}
grants_o[i] = !(vert_line.or_reduce()) && reqs_i[i];
}
//NVHLS_ASSERT_MSG(dbg_grants<=1, "More than one granted!")
// Update table
#pragma hls_unroll yes
for (int i=0; i<SIZE; ++i) { // Horizontal
#pragma hls_unroll yes
for (int j=0; j<SIZE; ++j) { // Vertical
if (i!=j) {
if (grants_o[i]) mat_v2[i][j] = true;
else if (grants_o[j]) mat_v2[i][j] = false;
}
}
}
bool anygrant = reqs_i.or_reduce();
return anygrant;
};
};
/* FUNCTION: Wighted Round Robin Arbiter
* INPUT: Array of bools
* OUTPUT: Unsigned integer pointer to bit position
* -----------------------------------------
*
*/
template<unsigned SIZE>
class arbiter<SIZE, WEIGHTED_RR, 0, 0> {
private:
bool Weights[SIZE][SIZE];
bool unvisited[SIZE];
unsigned scanCycle;
/* FUNCTION: Priority Enforcer
* INPUT: Integer Value
* OUTPUT: Pointer to the least significant
* non zerob bit.
* -----------------------------------------
*
*/
unsigned priorityEnforcer( unsigned inp ) {
unsigned pbit=0;
bool found = false;
#pragma hls_unroll yes
for (int i=0; i<SIZE; i++) {
if ( ((inp & 1<<i) >0) && !found ) {
found = true;
pbit = i;
}
}
return pbit;
}
public:
arbiter(){
scanCycle = 1;
#pragma hls_unroll yes
for (int i=0; i<SIZE; i++) {
unvisited[i] = true;
#pragma hls_unroll yes
for (int j=0; j<SIZE; j++) {
if (j == i)
Weights[i][j] = true;
else
Weights[i][j] = false;
}
}
}
unsigned arbitrate( bool inp[SIZE] ) {
bool sterCyles[SIZE];
bool isSterile;
bool found = false;
unsigned pbit;
unsigned sterile = 0;;
unsigned grants = 0;
#pragma hls_unroll yes
for (int i=0; i<SIZE; i++) {
sterCyles[i] = false;
}
#pragma hls_unroll yes
for (int k=0;k<SIZE; k++) { // repeat until a non sterile scan cycle
pbit = priorityEnforcer(scanCycle);
#pragma hls_unroll yes
for (int i=0; i<SIZE; i++) { // check each input flow
if (!found && inp[i] && unvisited[i] && Weights[i][pbit]) {
unvisited[i] = false;
found = true;
grants = i;
}
}
if (!found) { // if no grant was given
isSterile = true;
#pragma hls_unroll yes
for (int i=0; i<SIZE; i++) { // update unvisited flows
if ( !unvisited[i] ) {
isSterile = false;
}
unvisited[i] = true;
}
if (isSterile) { // update scan cycle avoiding sterile cycles
sterCyles[pbit] = true;
#pragma hls_unroll yes
for (int i=0; i<SIZE; i++) {
if (sterCyles[i] && ((scanCycle | 1<<i)!=scanCycle) ) {
sterile = sterile + (1<<i);
}
}
}
scanCycle = scanCycle + sterile + 1;
if (scanCycle >= ((1<<SIZE)-1) ) {
scanCycle = sterile + 1;
}
}
}
return grants;
};
/* FUNCTION: WRR Arbiter :: setWeights
* INPUT: Array of unsigned weight values
* OUTPUT:
* -----------------------------------------
* Input weight for each flow is given as a
* percentage of the bandwidth is asks for.
* For example if the value of the weight is 75,
* this flow asks for the 75% of the bandwidth.
*/
void setWeights( unsigned inp[SIZE] ) {
unsigned tmp;
#pragma hls_unroll yes
for (int i=0; i<SIZE; i++) {
tmp = inp[i];
#pragma hls_unroll yes
for (int j=0; j<SIZE; j++) {
if (tmp >= 100/(1<<(j+1))) {
Weights[i][j] = true;
tmp = tmp - 100/(1<<(j+1));
} else {
Weights[i][j] = false;
}
}
}
}
};
/* FUNCTION: Deficit Round Robin Arbiter
* INPUT: Array of bools
* OUTPUT: Unsigned integer pointer to bit position
* -----------------------------------------
*
*/
template<unsigned SIZE>
class arbiter<SIZE, DEFICIT_RR, 0, 0> {
private:
unsigned priority;
struct flowQueue {
unsigned packetSize[5];
unsigned maxIndex;
};
unsigned D[SIZE];
unsigned Q[SIZE];
bool ActiveList[SIZE];
flowQueue F[SIZE];
public:
arbiter(){
#pragma hls_unroll yes
for (int i=0; i<SIZE; i++) {
ActiveList[i] = false;
F[i].maxIndex = 0;
D[i] = 0;
Q[i] = 30; // maybe a function to set quantum of BW per input
}
}
unsigned arbitrate( bool inp[SIZE], unsigned inp_size[SIZE] ) {
bool found_hp = false;
bool found_lp = false;
unsigned grant_hp = 0;
unsigned grant_lp = 0;
unsigned grants = 0;
#pragma hls_unroll yes
for (int i=0; i<SIZE; i++) {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if ( inp[i] && F[i].maxIndex<5) {
ActiveList[i] = true;
F[i].packetSize[F[i].maxIndex] = inp_size[i];
F[i].maxIndex++;
}
D[i] = ( ActiveList[i] ) ? (D[i] + Q[i]) : 0;
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if (i >= priority) {
if (F[i].packetSize[0]<=D[i] && ActiveList[i] && !found_hp) {
grant_hp = i;
found_hp = true;
}
} else {
if (F[i].packetSize[0]<=D[i] && ActiveList[i] && !found_lp) {
grant_lp = i;
found_lp = true;
}
}
}
grants = (found_hp) ? grant_hp: grant_lp;
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Update Queue and Deficit Counter for flow i
D[grants] = D[grants] - F[grants].packetSize[0];
#pragma hls_unroll yes
for (int i=0; i<4; i++) {
F[grants].packetSize[i] = F[grants].packetSize[i+1];
}
F[grants].maxIndex--;
if ( F[grants].maxIndex == 0 ) {
ActiveList[grants] = false;
D[grants] = 0;
}
if (found_hp || found_lp && (D[grants]<F[grants].packetSize[0])) {
priority = ((grants + 1) == SIZE) ? 0 : (grants+1);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
return grants;
};
};
#endif // __ARBITERS_HEADER__
|
ic-lab-duth/NoCpad
|
src/include/dnp20_axi.h
|
<gh_stars>1-10
#ifndef __DNP20_V0_DEF__
#define __DNP20_V0_DEF__
// Definition of Duth Network Protocol.
// Interconnect's internal packetization protocol
namespace dnp {
enum {
PHIT_W = 24, // Phit Width
V_W = 2, // Virtual Channel
S_W = 4, // Source
D_W = 4, // Destination
Q_W = 3, // QoS
T_W = 2, // Type
V_PTR = 0,
S_PTR = (V_PTR + V_W),
D_PTR = (S_PTR + S_W),
Q_PTR = (D_PTR + D_W),
T_PTR = (Q_PTR + Q_W),
// AXI RELATED WIDTHS
ID_W = 4, // AXI Transaction ID
BU_W = 2, // AXI Burst
SZ_W = 3, // AXI Size
LE_W = 8, // AXI Length
AL_W = 16, // Address Low
AH_W = 16, // Address High
AP_W = 8, // Address part (for alignment)
RE_W = 2, // AXI Write Responce
REORD_W = 3, // Ticket for reorder buffer
B_W = 8, // Byte Width ...
E_W = 1, // Enable width
LA_W = 1, // AXI Last
};
// Read and Write Request field pointers
struct req {
enum {
ID_PTR = T_PTR+T_W,
REORD_PTR = ID_PTR+ID_W,
AL_PTR = 0,
LE_PTR = AL_PTR+AL_W,
AH_PTR = 0,
SZ_PTR = AH_PTR+AH_W,
BU_PTR = SZ_PTR+SZ_W,
};
};
// Write Responce field pointers
struct wresp {
enum {
ID_PTR = T_PTR+T_W,
REORD_PTR = ID_PTR+ID_W,
RESP_PTR = REORD_PTR+REORD_W,
};
};
// Read Responce field pointers
struct rresp {
enum {
ID_PTR = T_PTR+T_W,
REORD_PTR = ID_PTR+ID_W,
BU_PTR = REORD_PTR+REORD_W,
SZ_PTR = 0,
LE_PTR = SZ_PTR+SZ_W,
AP_PTR = LE_PTR+LE_W,
};
};
// Write request Data field pointers
struct wdata {
enum {
B0_PTR = 0,
B1_PTR = B0_PTR+B_W,
E0_PTR = B1_PTR+B_W,
E1_PTR = E0_PTR+E_W,
LA_PTR = E1_PTR+E_W,
};
};
// Read response Data field pointers
struct rdata {
enum {
B0_PTR = 0,
B1_PTR = B0_PTR+B_W,
RE_PTR = B1_PTR+B_W,
LA_PTR = RE_PTR+RE_W,
};
};
enum PACK_TYPE {
PACK_TYPE__WR_REQ = 0,
PACK_TYPE__WR_RESP = 1,
PACK_TYPE__RD_REQ = 2,
PACK_TYPE__RD_RESP = 3
};
}
#endif // __DNP20_V0_DEF__
|
ic-lab-duth/NoCpad
|
tb/tb_ace/harness.h
|
#ifndef ACE_IC_HARNESS_H
#define ACE_IC_HARNESS_H
#include "systemc.h"
#include <mc_scverify.h>
#include "./ic_top.h"
#define NVHLS_VERIFY_BLOCKS (ic_top)
#include "nvhls_verify.h"
#include "stdlib.h"
#include <string>
#include "../../tb/tb_ace/acelite_master.h"
#include "../../tb/tb_ace/ace_master.h"
#include "../../tb/tb_ace/ace_slave.h"
#include "../../tb/tb_ace/ace_coherency_checker.h"
#include <iostream>
#include <fstream>
SC_MODULE(harness) {
typedef typename ace::ace5<axi::cfg::ace> ace5_;
const int CLK_PERIOD = 5;
const int GEN_CYCLES = 2 * 1000;
const int AXI_GEN_RATE_RD[smpl_cfg::ALL_MASTER_NUM] = {20, 20, 20, 20};
const int AXI_GEN_RATE_WR[smpl_cfg::ALL_MASTER_NUM] = {20, 20, 20, 20};
const int ACE_GEN_RATE_CACHE[smpl_cfg::ALL_MASTER_NUM] = {10, 10, 10, 10};
const int AXI_STALL_RATE_RD = 00;
const int AXI_STALL_RATE_WR = 00;
const int DRAIN_CYCLES = GEN_CYCLES/10;
sc_clock clk; //clock signal
sc_signal<bool> rst_n;
sc_signal<bool> stop_gen;
// --- Scoreboards --- //
// Scoreboards refer to the receiver of the queue.
// I.e. the receiver checks what is expected to be received. Thus sender must take care to push Transactions to the appropriate queue
sc_mutex sb_lock;
std::vector< std::deque< msg_tb_wrap<ace5_::AddrPayload> > > sb_rd_req_q;
std::vector< std::deque< msg_tb_wrap<ace5_::ReadPayload> > > sb_rd_resp_q;
std::vector< std::deque< msg_tb_wrap<ace5_::AddrPayload > > > sb_wr_req_q;
std::vector< std::deque< msg_tb_wrap<ace5_::WritePayload> > > sb_wr_data_q;
std::vector< std::deque< msg_tb_wrap<ace5_::WRespPayload> > > sb_wr_resp_q;
std::vector< std::deque< msg_tb_wrap<ace5_::AddrPayload> > > sb_coherent_access_q;
std::vector< std::deque< msg_tb_wrap<ace5_::AC> > > sb_snoop_req_q;
std::vector< std::deque< msg_tb_wrap<ace5_::CR> > > sb_snoop_resp_q;
std::vector< std::deque< msg_tb_wrap<ace5_::CD> > > sb_snoop_data_resp_q;
CCS_DESIGN(ic_top) interconnect;
// ic_top interconnect("interconnect");
ace_master<smpl_cfg::RD_LANES, smpl_cfg::RD_LANES, smpl_cfg::WR_LANES, smpl_cfg::WR_LANES, smpl_cfg::ALL_MASTER_NUM, smpl_cfg::SLAVE_NUM> *master[smpl_cfg::FULL_MASTER_NUM];
acelite_master<smpl_cfg::RD_LANES, smpl_cfg::RD_LANES, smpl_cfg::WR_LANES, smpl_cfg::WR_LANES, smpl_cfg::ALL_MASTER_NUM, smpl_cfg::SLAVE_NUM> *master_lite[smpl_cfg::LITE_MASTER_NUM];
ace_slave<smpl_cfg::RD_LANES, smpl_cfg::RD_LANES, smpl_cfg::WR_LANES, smpl_cfg::WR_LANES, smpl_cfg::ALL_MASTER_NUM, smpl_cfg::SLAVE_NUM> *slave[smpl_cfg::SLAVE_NUM];
ace_coherency_checker<smpl_cfg::RD_LANES, smpl_cfg::RD_LANES, smpl_cfg::WR_LANES, smpl_cfg::WR_LANES, smpl_cfg::FULL_MASTER_NUM, smpl_cfg::LITE_MASTER_NUM, smpl_cfg::SLAVE_NUM> coherency_checker;
sc_signal< sc_uint<32> > addr_map[smpl_cfg::SLAVE_NUM][2];
// Channels connecting Master <--> IC
Connections::Combinational<ace5_::AC> master_snoop[smpl_cfg::FULL_MASTER_NUM];
Connections::Combinational<ace5_::CR> master_snoop_resp[smpl_cfg::FULL_MASTER_NUM];
Connections::Combinational<ace5_::CD> master_snoop_data[smpl_cfg::FULL_MASTER_NUM];
Connections::Combinational<ace5_::AddrPayload> master_rd_req[smpl_cfg::ALL_MASTER_NUM];
Connections::Combinational<ace5_::ReadPayload> master_rd_resp[smpl_cfg::ALL_MASTER_NUM];
Connections::Combinational<ace5_::RACK> master_rd_ack[smpl_cfg::FULL_MASTER_NUM];
Connections::Combinational<ace5_::AddrPayload> master_wr_req[smpl_cfg::ALL_MASTER_NUM];
Connections::Combinational<ace5_::WritePayload> master_wr_data[smpl_cfg::ALL_MASTER_NUM];
Connections::Combinational<ace5_::WRespPayload> master_wr_resp[smpl_cfg::ALL_MASTER_NUM];
Connections::Combinational<ace5_::WACK> master_wr_ack[smpl_cfg::FULL_MASTER_NUM];
// Channels connecting IC <--> Slave
Connections::Combinational<ace5_::AddrPayload> slave_rd_req[smpl_cfg::SLAVE_NUM];
Connections::Combinational<ace5_::ReadPayload> slave_rd_resp[smpl_cfg::SLAVE_NUM];
Connections::Combinational<ace5_::AddrPayload> slave_wr_req[smpl_cfg::SLAVE_NUM];
Connections::Combinational<ace5_::WritePayload> slave_wr_data[smpl_cfg::SLAVE_NUM];
Connections::Combinational<ace5_::WRespPayload> slave_wr_resp[smpl_cfg::SLAVE_NUM];
SC_CTOR(harness) :
clk("clock",10,SC_NS,0.5,0.0,SC_NS),
rst_n("rst_n"),
stop_gen("stop_gen"),
sb_lock(),
sb_rd_req_q(smpl_cfg::SLAVE_NUM),
sb_rd_resp_q(smpl_cfg::ALL_MASTER_NUM),
sb_wr_req_q(smpl_cfg::SLAVE_NUM),
sb_wr_data_q(smpl_cfg::SLAVE_NUM),
sb_wr_resp_q(smpl_cfg::ALL_MASTER_NUM),
sb_coherent_access_q(smpl_cfg::ALL_MASTER_NUM),
sb_snoop_req_q(smpl_cfg::FULL_MASTER_NUM),
sb_snoop_resp_q(smpl_cfg::FULL_MASTER_NUM),
sb_snoop_data_resp_q(smpl_cfg::FULL_MASTER_NUM),
coherency_checker("coherency_checker"),
interconnect("interconnect")
{
// Construct Components
for (int i=0; i<smpl_cfg::FULL_MASTER_NUM; ++i)
master[i] = new ace_master<smpl_cfg::RD_LANES, smpl_cfg::RD_LANES, smpl_cfg::WR_LANES, smpl_cfg::WR_LANES, smpl_cfg::ALL_MASTER_NUM, smpl_cfg::SLAVE_NUM>(sc_gen_unique_name("master"));
for (int i=0; i<smpl_cfg::LITE_MASTER_NUM; ++i)
master_lite[i] = new acelite_master<smpl_cfg::RD_LANES, smpl_cfg::RD_LANES, smpl_cfg::WR_LANES, smpl_cfg::WR_LANES, smpl_cfg::ALL_MASTER_NUM, smpl_cfg::SLAVE_NUM>(sc_gen_unique_name("master-lite"));
for (int i=0; i<smpl_cfg::SLAVE_NUM; ++i)
slave[i] = new ace_slave <smpl_cfg::RD_LANES, smpl_cfg::RD_LANES, smpl_cfg::WR_LANES, smpl_cfg::WR_LANES, smpl_cfg::ALL_MASTER_NUM, smpl_cfg::SLAVE_NUM>(sc_gen_unique_name("slave"));
std::cout << "--- Binding... ---\n";
std::cout.flush();
unsigned fu = smpl_cfg::SLAVE_NUM;
addr_map[0][0] = 0;
addr_map[0][1] = 0x0ffff;
addr_map[1][0] = 0x10000;
addr_map[1][1] = 0x2ffff;
// BINDING - START
// COHERENCY CHECKER
coherency_checker.sb_lock = &sb_lock; // Scoreboard by Ref
coherency_checker.sb_rd_req_q = &sb_rd_req_q; // Scoreboard by Ref
coherency_checker.sb_rd_resp_q = &sb_rd_resp_q; // Scoreboard by Ref
coherency_checker.sb_wr_req_q = &sb_wr_req_q; // Scoreboard by Ref
coherency_checker.sb_wr_data_q = &sb_wr_data_q; // Scoreboard by Ref
coherency_checker.sb_wr_resp_q = &sb_wr_resp_q; // Scoreboard by Ref
coherency_checker.sb_coherent_access_q = &sb_coherent_access_q; // Scoreboard by Ref
coherency_checker.sb_snoop_req_q = &sb_snoop_req_q; // Scoreboard by Ref
coherency_checker.sb_snoop_resp_q = &sb_snoop_resp_q; // Scoreboard by Ref
coherency_checker.sb_snoop_data_resp_q = &sb_snoop_data_resp_q; // Scoreboard by Ref
coherency_checker.stop_gen(stop_gen);
coherency_checker.clk(clk);
coherency_checker.rst_n(rst_n);
for(int j=0; j<smpl_cfg::SLAVE_NUM; ++j) {
coherency_checker.addr_map[j][0](addr_map[j][0]);
coherency_checker.addr_map[j][1](addr_map[j][1]);
}
// IC-Clk/Rst
interconnect.clk(clk);
interconnect.rst_n(rst_n);
// FULL ACE MASTER
for (int i=0; i<smpl_cfg::FULL_MASTER_NUM; ++i) {
master[i]->sb_lock = &sb_lock; // Scoreboard by Ref
master[i]->sb_rd_req_q = &sb_rd_req_q; // Scoreboard by Ref
master[i]->sb_rd_resp_q = &sb_rd_resp_q; // Scoreboard by Ref
master[i]->sb_wr_req_q = &sb_wr_req_q; // Scoreboard by Ref
master[i]->sb_wr_data_q = &sb_wr_data_q; // Scoreboard by Ref
master[i]->sb_wr_resp_q = &sb_wr_resp_q; // Scoreboard by Ref
master[i]->sb_coherent_access_q = &sb_coherent_access_q; // Scoreboard by Ref
master[i]->sb_snoop_req_q = &sb_snoop_req_q; // Scoreboard by Ref
master[i]->sb_snoop_resp_q = &sb_snoop_resp_q; // Scoreboard by Ref
master[i]->sb_snoop_data_resp_q = &sb_snoop_data_resp_q; // Scoreboard by Ref
master[i]->MASTER_ID = i+smpl_cfg::SLAVE_NUM;
master[i]->AXI_GEN_RATE_RD = AXI_GEN_RATE_RD[i];
master[i]->AXI_GEN_RATE_WR = AXI_GEN_RATE_WR[i];
master[i]->ACE_GEN_RATE_CACHE = ACE_GEN_RATE_CACHE[i];
master[i]->stop_gen(stop_gen);
master[i]->clk(clk);
master[i]->rst_n(rst_n);
for(int j=0; j<smpl_cfg::SLAVE_NUM; ++j) {
master[i]->addr_map[j][0](addr_map[j][0]);
master[i]->addr_map[j][1](addr_map[j][1]);
}
master[i]->ac_in(master_snoop[i]);
master[i]->cr_out(master_snoop_resp[i]);
master[i]->cd_out(master_snoop_data[i]);
master[i]->ar_out(master_rd_req[i]);
master[i]->r_in(master_rd_resp[i]);
master[i]->rack_out(master_rd_ack[i]);
master[i]->aw_out(master_wr_req[i]);
master[i]->w_out(master_wr_data[i]);
master[i]->b_in(master_wr_resp[i]);
master[i]->wack_out(master_wr_ack[i]);
// Connect masters to IC
interconnect.ac_out[i](master_snoop[i]);
interconnect.cr_in[i](master_snoop_resp[i]);
interconnect.cd_in[i](master_snoop_data[i]);
interconnect.ar_in[i](master_rd_req[i]);
interconnect.r_out[i](master_rd_resp[i]);
interconnect.rack_in[i](master_rd_ack[i]);
interconnect.aw_in[i](master_wr_req[i]);
interconnect.w_in[i](master_wr_data[i]);
interconnect.b_out[i](master_wr_resp[i]);
interconnect.wack_in[i](master_wr_ack[i]);
}
// ACE-LITE MASTER
for (int i=0; i<smpl_cfg::LITE_MASTER_NUM; ++i) {
master_lite[i]->sb_lock = &sb_lock; // Scoreboard by Ref
master_lite[i]->sb_rd_req_q = &sb_rd_req_q; // Scoreboard by Ref
master_lite[i]->sb_rd_resp_q = &sb_rd_resp_q; // Scoreboard by Ref
master_lite[i]->sb_wr_req_q = &sb_wr_req_q; // Scoreboard by Ref
master_lite[i]->sb_wr_data_q = &sb_wr_data_q; // Scoreboard by Ref
master_lite[i]->sb_wr_resp_q = &sb_wr_resp_q; // Scoreboard by Ref
master_lite[i]->sb_coherent_access_q = &sb_coherent_access_q; // Scoreboard by Ref
master_lite[i]->sb_snoop_req_q = &sb_snoop_req_q; // Scoreboard by Ref
master_lite[i]->sb_snoop_resp_q = &sb_snoop_resp_q; // Scoreboard by Ref
master_lite[i]->sb_snoop_data_resp_q = &sb_snoop_data_resp_q; // Scoreboard by Ref
master_lite[i]->MASTER_ID = i+smpl_cfg::SLAVE_NUM+smpl_cfg::FULL_MASTER_NUM;
master_lite[i]->AXI_GEN_RATE_RD = AXI_GEN_RATE_RD[i+smpl_cfg::FULL_MASTER_NUM];
master_lite[i]->AXI_GEN_RATE_WR = AXI_GEN_RATE_WR[i+smpl_cfg::FULL_MASTER_NUM];
master_lite[i]->ACE_GEN_RATE_CACHE = ACE_GEN_RATE_CACHE[i+smpl_cfg::FULL_MASTER_NUM];
master_lite[i]->stop_gen(stop_gen);
master_lite[i]->clk(clk);
master_lite[i]->rst_n(rst_n);
for(int j=0; j<smpl_cfg::SLAVE_NUM; ++j) {
master_lite[i]->addr_map[j][0](addr_map[j][0]);
master_lite[i]->addr_map[j][1](addr_map[j][1]);
}
master_lite[i]->ar_out(master_rd_req[i+smpl_cfg::FULL_MASTER_NUM]);
master_lite[i]->r_in(master_rd_resp[i+smpl_cfg::FULL_MASTER_NUM]);
master_lite[i]->aw_out(master_wr_req[i+smpl_cfg::FULL_MASTER_NUM]);
master_lite[i]->w_out(master_wr_data[i+smpl_cfg::FULL_MASTER_NUM]);
master_lite[i]->b_in(master_wr_resp[i+smpl_cfg::FULL_MASTER_NUM]);
// Connect masters to IC
interconnect.ar_in[i+smpl_cfg::FULL_MASTER_NUM](master_rd_req[i+smpl_cfg::FULL_MASTER_NUM]);
interconnect.r_out[i+smpl_cfg::FULL_MASTER_NUM](master_rd_resp[i+smpl_cfg::FULL_MASTER_NUM]);
interconnect.aw_in[i+smpl_cfg::FULL_MASTER_NUM](master_wr_req[i+smpl_cfg::FULL_MASTER_NUM]);
interconnect.w_in[i+smpl_cfg::FULL_MASTER_NUM](master_wr_data[i+smpl_cfg::FULL_MASTER_NUM]);
interconnect.b_out[i+smpl_cfg::FULL_MASTER_NUM](master_wr_resp[i+smpl_cfg::FULL_MASTER_NUM]);
}
// ToDO : Decide the LLC Interface
for (int i=0; i<smpl_cfg::SLAVE_NUM; ++i) {
// SLAVE
slave[i]->sb_lock = &sb_lock; // Scoreboard by Ref
slave[i]->sb_rd_req_q = &sb_rd_req_q; // Scoreboard by Ref
slave[i]->sb_rd_resp_q = &sb_rd_resp_q; // Scoreboard by Ref
slave[i]->sb_wr_req_q = &sb_wr_req_q; // Scoreboard by Ref
slave[i]->sb_wr_data_q = &sb_wr_data_q; // Scoreboard by Ref
slave[i]->sb_wr_resp_q = &sb_wr_resp_q; // Scoreboard by Ref
slave[i]->AXI_STALL_RATE_RD = AXI_STALL_RATE_RD;
slave[i]->AXI_STALL_RATE_WR = AXI_STALL_RATE_WR;
slave[i]->SLAVE_ID = i;
slave[i]->stop_gen(stop_gen);
slave[i]->clk(clk);
slave[i]->rst_n(rst_n);
for(int j=0; j<smpl_cfg::SLAVE_NUM; ++j) {
slave[i]->addr_map[j][0](addr_map[j][0]);
slave[i]->addr_map[j][1](addr_map[j][1]);
}
slave[i]->ar_in(slave_rd_req[i]);
slave[i]->r_out(slave_rd_resp[i]);
slave[i]->aw_in(slave_wr_req[i]);
slave[i]->w_in(slave_wr_data[i]);
slave[i]->b_out(slave_wr_resp[i]);
// Slave Side
interconnect.addr_map[i][0](addr_map[i][0]);
interconnect.addr_map[i][1](addr_map[i][1]);
interconnect.ar_out[i](slave_rd_req[i]);
interconnect.r_in[i] (slave_rd_resp[i]);
interconnect.aw_out[i](slave_wr_req[i]);
interconnect.w_out[i](slave_wr_data[i]);
interconnect.b_in[i](slave_wr_resp[i]);
}
// BINDING - END
std::cout << "--- Binding Succeed ---\n";
std::cout.flush();
//sc_object_tracer<sc_clock> trace_clk(clk);
Connections::set_sim_clk(&clk);
SC_THREAD(harness_job);
sensitive << clk.posedge_event();
//sc_object_tracer<sc_clock> trace_clk(clk);
} // End of Constructor
void harness_job() {
std::cout << "--- Simulation is Starting @" << sc_time_stamp() << " ---\n";
std::cout.flush();
rst_n.write(false);
stop_gen.write(true);
wait(CLK_PERIOD*2, SC_NS);
rst_n.write(true);
wait(CLK_PERIOD*2, SC_NS);
stop_gen.write(false);
wait(CLK_PERIOD*GEN_CYCLES, SC_NS);
stop_gen.write(true);
std::cout << "--- Transaction Generation Stopped @" << sc_time_stamp() << " ---\n";
std::cout.flush();
// Drain
bool all_drained = false;
do {
int rd_req_remain = 0;
int rd_resp_remain = 0;
for(int i=0; i<smpl_cfg::SLAVE_NUM; ++i) rd_req_remain += sb_rd_req_q[i].size();
for(int i=0; i<smpl_cfg::ALL_MASTER_NUM; ++i) rd_resp_remain += sb_rd_resp_q[i].size();
int wr_req_remain = 0;
int wr_data_remain = 0;
int wr_resp_remain = 0;
for(int i=0; i<smpl_cfg::SLAVE_NUM;++i) wr_req_remain += sb_wr_req_q[i].size();
for(int i=0; i<smpl_cfg::SLAVE_NUM;++i) wr_data_remain += sb_wr_data_q[i].size();
for(int i=0; i<smpl_cfg::ALL_MASTER_NUM;++i) wr_resp_remain += sb_wr_resp_q[i].size();
// Outstanding coherent transactions
int cache_remain = 0;
for (int i=0; i<smpl_cfg::FULL_MASTER_NUM; ++i) {
for (auto it = master[i]->cache_outstanding.begin(); it != master[i]->cache_outstanding.end(); it++) {
cache_remain += it->second;
//std::cout << "Master " << i << " Addr:" << std::hex << it->first << std::dec << " : " << it->second << "\n";
}
}
all_drained = (!rd_req_remain) && (!rd_resp_remain) &&
(!wr_req_remain) && (!wr_data_remain) && (!wr_resp_remain) &&
(!cache_remain);
if(all_drained) {
std::cout << "--- Everything Drained @" << sc_time_stamp() << " ---\n";
} else {
std::cout << "--- Wait to drain";
std::cout << " (RD_Req: " << rd_req_remain << ", RD_Resp: " << rd_resp_remain << ")";
std::cout << " (WR_Req: " << wr_req_remain << ", WR_Data: " << wr_data_remain << ", WR_Resp: "<< wr_resp_remain <<")";
std::cout << " @" << sc_time_stamp() << " ---\n";
/* DEBUG ....
std::cout << "M0 AW_in available: " << (*master_wr_req)[0].num_available() << "\n"; // interconnect.aw_in_0.num_available() << "\n";
std::cout << "M0 avail :";
for (int i=0; i<5; ++i) std::cout << " " << interconnect.master_if_0.wr_reord_avail[i];
// std::cout << "\n";
// std::cout << __VERSION__ << "\n";
*/
wait(CLK_PERIOD*DRAIN_CYCLES, SC_NS);
}
std::cout.flush();
} while(!all_drained);
std::cout << "--- Harness Exits @" << sc_time_stamp() << "\n";
std::cout << "--- Simulation FINISHED @" << sc_time_stamp() << " ---\n";
std::cout.flush();
//--- Check for Errors ---//
int err_sb_rd_req_not_found=0, err_sb_rd_resp_not_found=0;
int err_sb_wr_req_not_found=0, err_sb_wr_data_not_found=0, err_sb_wr_resp_not_found=0;
int rd_req_generated=0, rd_resp_generated=0;
int rd_req_injected=0 , rd_req_ejected=0;
int rd_resp_injected=0, rd_resp_ejected=0;
int wr_req_generated=0, wr_data_generated=0, wr_resp_generated=0;
int wr_req_injected=0 , wr_req_ejected=0;
int wr_data_injected=0 , wr_data_ejected=0;
int wr_resp_injected=0, wr_resp_ejected=0;
for (int i=0; i<smpl_cfg::ALL_MASTER_NUM; ++i){
// READS
rd_req_generated += master[i]->rd_trans_generated;
rd_resp_generated += master[i]->rd_data_generated;
rd_req_injected += master[i]->rd_trans_inj;
rd_resp_injected += master[i]->rd_data_generated;
rd_resp_ejected += master[i]->rd_resp_ej;
err_sb_rd_resp_not_found += master[i]->error_sb_rd_resp_not_found;
// WRITES
wr_req_generated += master[i]->wr_trans_generated;
wr_data_generated += master[i]->wr_data_generated;
wr_req_injected += master[i]->wr_trans_inj;
wr_data_injected += master[i]->wr_data_inj;
wr_resp_ejected += master[i]->wr_resp_ej;
err_sb_wr_resp_not_found += master[i]->error_sb_wr_resp_not_found;
}
for (int i=0; i<smpl_cfg::SLAVE_NUM; ++i){
// READS
rd_req_ejected += slave[i]->rd_req_ej;
err_sb_rd_req_not_found += slave[i]->error_sb_rd_req_not_found;
// WRITES
wr_resp_generated += slave[i]->wr_resp_generated;
wr_req_ejected += slave[i]->wr_req_ej;
wr_data_ejected += slave[i]->wr_data_ej;
wr_resp_injected += slave[i]->wr_resp_inj;
err_sb_wr_req_not_found += slave[i]->error_sb_wr_req_not_found;
err_sb_wr_data_not_found += slave[i]->error_sb_wr_data_not_found;
}
bool error = (rd_req_injected - rd_req_ejected) || (rd_resp_injected - rd_resp_ejected) || err_sb_rd_req_not_found || err_sb_rd_resp_not_found;
/*
std::cout << "\n";
if (error) {
std::cout << "!!! --- FAILED --- !!!\n";
std::cout << "READS : " << (rd_req_injected-rd_req_ejected) << " Reqs Dropped\n";
std::cout << " " << (rd_resp_injected-rd_resp_ejected) << " Resps Dropped\n";
std::cout << " " << err_sb_rd_req_not_found << " Reqs Not Found in SB\n";
std::cout << " " << err_sb_rd_resp_not_found << " Resps Not Found in SB\n";
std::cout << " " << " Out of :\n";
std::cout << " " << rd_req_generated << " Reqs Generated\n";
std::cout << " " << rd_resp_generated << " Resps Generated\n";
std::cout << "WRITES : " << (wr_req_injected-wr_req_ejected) << " Reqs Dropped\n";
std::cout << " " << (wr_data_injected-wr_data_ejected) << " Data Dropped\n";
std::cout << " " << (wr_resp_injected-wr_resp_ejected) << " Resps Dropped\n";
std::cout << " " << err_sb_wr_req_not_found << " Reqs Not Found in SB\n";
std::cout << " " << err_sb_wr_data_not_found << " Data Not Found in SB\n";
std::cout << " " << err_sb_wr_resp_not_found << " Resps Not Found in SB\n";
std::cout << " " << " Out of :\n";
std::cout << " " << wr_req_generated << " Reqs Generated\n";
std::cout << " " << wr_data_generated << " Data Generated\n";
std::cout << " " << wr_resp_generated << " Resps Generated\n";
} else {
std::cout << " " << rd_req_generated << " RD_Reqs Generated\n";
std::cout << " " << rd_resp_generated << " RD_Resps Generated\n\n";
std::cout << " " << wr_req_generated << " WR_Reqs Generated\n";
std::cout << " " << wr_data_generated << " WR_Data Generated\n";
std::cout << " " << wr_resp_generated << " WR_Resps Generated\n\n";
std::cout << "PASSED. No Errors.\n";
}
std::cout << "\n";
*/
std::cout.flush();
// Print Masters' Caches
for (int i=0; i<smpl_cfg::FULL_MASTER_NUM; ++i) {
std::cout << "--- [ Master " << i+smpl_cfg::SLAVE_NUM << "] ---\n";
for(auto it = master[i]->cache.begin(); it != master[i]->cache.end(); it++) {
std::cout << "Addr:"<< std::hex << it->first << std::dec << " : " << it->second << "\n";
}
//std::cout << "\n";
}
for (int i=0; i<smpl_cfg::LITE_MASTER_NUM; ++i) {
std::cout << "--- [ Master " << i+smpl_cfg::SLAVE_NUM+smpl_cfg::FULL_MASTER_NUM << "] ---\n";
std::cout << " ACE-Lite " << "\n";
}
// Delay calculation
std::cout << "\n";
unsigned long long int wr_delay_full_sum_glob = 0;
unsigned long long int rd_delay_full_sum_glob = 0;
unsigned long long int wr_trans_sum_glob = 0;
unsigned long long int rd_trans_sum_glob = 0;
unsigned long long int rd_data_count_glob = 0;
unsigned long long int wr_data_count_glob = 0;
unsigned long long int wr_delay_full_sum_p_m[smpl_cfg::ALL_MASTER_NUM];
unsigned long long int rd_delay_full_sum_p_m[smpl_cfg::ALL_MASTER_NUM];
unsigned long long int wr_trans_sum_p_m[smpl_cfg::ALL_MASTER_NUM];
unsigned long long int rd_trans_sum_p_m[smpl_cfg::ALL_MASTER_NUM];
for(int i=0; i<smpl_cfg::ALL_MASTER_NUM; ++i) {
wr_delay_full_sum_p_m[i] = 0; rd_delay_full_sum_p_m[i] = 0;
wr_trans_sum_p_m[i] = 0; rd_trans_sum_p_m[i] = 0;
}
for (int i=0; i<smpl_cfg::ALL_MASTER_NUM; ++i) {
rd_delay_full_sum_p_m[i] += master[i]->rd_resp_delay;
rd_trans_sum_p_m[i] += master[i]->rd_resp_count;
rd_delay_full_sum_glob += master[i]->rd_resp_delay;
rd_trans_sum_glob += master[i]->rd_resp_count;
rd_data_count_glob += master[i]->rd_resp_data_count;
wr_delay_full_sum_p_m[i] += master[i]->wr_resp_delay;
wr_trans_sum_p_m[i] += master[i]->wr_resp_count;
wr_delay_full_sum_glob += master[i]->wr_resp_delay;
wr_trans_sum_glob += master[i]->wr_resp_count;
wr_data_count_glob += master[i]->wr_resp_data_count;
}
sc_time this_clk_period = clk.period();
unsigned long long int total_cycles = sc_time_stamp() / this_clk_period;
/*
std::cout << "Delay Per Master Slave(delay, Throughput) :\n";
for (int i=0; i<smpl_cfg::MASTER_NUM; i++) {
std::cout << "M" << i << " RD: " << (rd_trans_sum_p_m[i] ? ((float)rd_delay_full_sum_p_m[i] / (float)rd_trans_sum_p_m[i]) : 0)
<< ", "
<< (rd_trans_sum_p_m[i] ? ((float)master[i]->rd_resp_data_count / (float)master[i]->last_rd_sinked_cycle) : 0)
<< "\n WR: "
<< (wr_trans_sum_p_m[i] ? ((float)wr_delay_full_sum_p_m[i] / (float)wr_trans_sum_p_m[i]) : 0)
<< ", "
<< (wr_trans_sum_p_m[i] ? ((float)master[i]->wr_resp_data_count / (float)total_cycles) : 0)
<< "\n";
}
float rd_delay_full_total = ((float)rd_delay_full_sum_glob / (float)rd_trans_sum_glob);
float wr_delay_full_total = ((float)wr_delay_full_sum_glob / (float)wr_trans_sum_glob);
float rd_throughput_total = ((float)rd_data_count_glob / (float)total_cycles) / (float)smpl_cfg::MASTER_NUM;
float wr_throughput_total = ((float)wr_data_count_glob / (float)total_cycles) / (float)smpl_cfg::MASTER_NUM;
std::cout << " (RD, WR) \n";
std::cout << "Full Avg delay(cycles) : " << rd_delay_full_total << ", "<< wr_delay_full_total << "\n";
std::cout << "Throughput (flits/cycle/node) : " << rd_throughput_total << ", "<< wr_throughput_total << "\n";
*/
std::cout << __VERSION__ << "\n";
std::cout.flush();
std::cout << "\n Simulation Finished! \n";
sc_stop();
}
}; // End of harness
#endif // ACE_IC_HARNESS_H
|
ic-lab-duth/NoCpad
|
src/ace/ace_home.h
|
// --------------------------------------------------------- //
// HOME-NODE - Sorts the coherent transactions //
// --------------------------------------------------------- //
#ifndef _ACE_HOME_H_
#define _ACE_HOME_H_
#include "systemc.h"
#include "nvhls_connections.h"
#include "../include/ace.h"
#include "../include/flit_ace.h"
#include "../include/duth_fun.h"
// --- HOME NODE ---
// All coherent transactions are serialized to a HOME NODE.
// HOME generates the apropriate Snoop requests and gathers their responses
// Regarding the responses of the snooped masters, HOME decides if access to
// a main memory (i.e. Slave) is required for the transaction completion
template <typename cfg>
SC_MODULE(ace_home) {
typedef typename ace::ace5<axi::cfg::ace> ace5_;
typedef typename ace::ACE_Encoding enc_;
typedef flit_dnp<cfg::RREQ_PHITS> rreq_flit_t;
typedef flit_dnp<cfg::RRESP_PHITS> rresp_flit_t;
typedef flit_dnp<cfg::WREQ_PHITS> wreq_flit_t;
typedef flit_dnp<cfg::WRESP_PHITS> wresp_flit_t;
typedef flit_dnp<cfg::CREQ_PHITS> creq_flit_t;
typedef flit_dnp<cfg::CRESP_PHITS> cresp_flit_t;
typedef flit_ack ack_flit_t;
typedef sc_uint< clog2<cfg::RRESP_PHITS>::val > cnt_phit_rresp_t;
typedef sc_uint< clog2<cfg::WRESP_PHITS>::val > cnt_phit_wresp_t;
const unsigned char LOG_RD_M_LANES = nvhls::log2_ceil<cfg::RD_LANES>::val;
const unsigned char LOG_WR_M_LANES = nvhls::log2_ceil<cfg::WR_LANES>::val;
sc_in_clk clk;
sc_in <bool> rst_n;
sc_in < sc_uint<(dnp::ace::AH_W+dnp::ace::AL_W)> > addr_map[cfg::SLAVE_NUM][2];
sc_in< sc_uint<dnp::S_W> > THIS_ID;
// NoC Side Channels
Connections::Out<creq_flit_t> INIT_S1(cache_req);
Connections::In<cresp_flit_t> INIT_S1(cache_resp);
Connections::In<rreq_flit_t> INIT_S1(rd_from_master);
Connections::Out<rresp_flit_t> INIT_S1(rd_to_master);
Connections::Out<rreq_flit_t> INIT_S1(rd_to_slave);
Connections::In<rresp_flit_t> INIT_S1(rd_from_slave);
Connections::In<wreq_flit_t> INIT_S1(wr_from_master);
Connections::Out<wresp_flit_t> INIT_S1(wr_to_master);
Connections::Out<wreq_flit_t> INIT_S1(wr_to_slave);
Connections::In<wresp_flit_t> INIT_S1(wr_from_slave);
Connections::In<ack_flit_t> INIT_S1(ack_from_master);
// Constructor
SC_HAS_PROCESS(ace_home);
ace_home(sc_module_name name_="ace_home")
:
sc_module (name_)
{
SC_THREAD(req_check);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
}
void req_check () {
cache_req.Reset();
cache_resp.Reset();
rd_from_master.Reset();
rd_to_master.Reset();
rd_to_slave.Reset();
rd_from_slave.Reset();
wr_from_master.Reset();
wr_to_master.Reset();
wr_to_slave.Reset();
wr_from_slave.Reset();
//-- End of Reset ---//
while(1) {
wait();
// Initial Request
// Wait until a read or write request is received
rreq_flit_t flit_req_rcv;
bool got_new_req = false;
do {
if (!got_new_req) got_new_req = wr_from_master.PopNB(flit_req_rcv);
if (!got_new_req) got_new_req = rd_from_master.PopNB(flit_req_rcv);
wait();
} while (!got_new_req);
// Check who initiated the transaction the the type of transaction
ace5_::AddrPayload cur_req;
unsigned initiator = flit_req_rcv.get_src();
bool is_read = (flit_req_rcv.get_type() == dnp::PACK_TYPE__RD_REQ);
bool is_write = (flit_req_rcv.get_type() == dnp::PACK_TYPE__WR_REQ);
NVHLS_ASSERT_MSG(is_read ^ is_write , "ERROR : Home got request of wrong type.");
flit_req_rcv.get_rd_req(cur_req);
#ifndef __SYNTHESIS__
if(is_read) std::cout << "[HOME "<< THIS_ID <<"] Got RD from " << initiator << " : " << cur_req << " @" << sc_time_stamp() << "\n";
else std::cout << "[HOME "<< THIS_ID <<"] Got WR from " << initiator << " : " << cur_req << " @" << sc_time_stamp() << "\n";
#endif
NVHLS_ASSERT_MSG(((flit_req_rcv.data[0].to_uint() >> dnp::D_PTR) & ((1<<dnp::D_W)-1)) == (THIS_ID.read().to_uint()), "Flit misrouted!");
// Build the appropriate Snoop request for the cached FULL ACE Masters, depending the coherent access
ace5_::AC snoop_req;
snoop_req.addr = cur_req.addr;
snoop_req.snoop = is_read ? ace::rd_2_snoop(cur_req.snoop) : ace::wr_2_snoop(cur_req.snoop);
snoop_req.prot = initiator;
creq_flit_t flit_snoop;
flit_snoop.type = SINGLE; // Entire request fits in single flits thus SINGLE
flit_snoop.set_network(THIS_ID, 0, 0, (is_read ? dnp::PACK_TYPE__RD_REQ : dnp::PACK_TYPE__WR_REQ), 0);
flit_snoop.set_snoop_req(snoop_req);
// Send the Snoop requests to the masters. This can exploit multicast capabilities of the routers
for (int i=cfg::SLAVE_NUM; i<cfg::SLAVE_NUM+cfg::FULL_MASTER_NUM; ++i) {
if(i!=initiator) {
flit_snoop.set_dst(i);
cache_req.Push(flit_snoop);
wait();
}
}
// Depending the initiating master (Lite or Full Ace) different number of request/repsonses are expected
bool init_is_full = (initiator<(cfg::SLAVE_NUM+cfg::FULL_MASTER_NUM));
unsigned resp_wait = init_is_full ? cfg::FULL_MASTER_NUM-1 : cfg::FULL_MASTER_NUM;
// Wait until all responses are received
cresp_flit_t snoop_resp_data;
ace5_::CR::Resp resp_accum = 0;
bool got_data = false;
bool got_dirty = false;
while (resp_wait) {
cresp_flit_t flit_rcv_snoop_resp = cache_resp.Pop();
NVHLS_ASSERT_MSG((flit_rcv_snoop_resp.type == HEAD || flit_rcv_snoop_resp.type == SINGLE), "Snoop Responce Must be at HEAD/SINGLE flit.");
// Each response is checked if it contains data and accumulate the response to conclude to an action
ace5_::CR::Resp cur_snoop_resp;
cur_snoop_resp = (flit_rcv_snoop_resp.data[0] >> dnp::ace::cresp::C_RESP_PTR) & ((1<<dnp::ace::C_RESP_W)-1);
bool has_data = cur_snoop_resp & 0x1;
bool has_dirty = cur_snoop_resp & 0x4;
if (has_data) {
if (has_dirty || !got_data) {
snoop_resp_data = cache_resp.Pop(); // Get data
NVHLS_ASSERT_MSG((snoop_resp_data.type == TAIL), "Currently a single flit cache line is supported!");
} else {
cresp_flit_t drop_data = cache_resp.Pop(); // Already got Data, thus drop any other
NVHLS_ASSERT_MSG((drop_data.type == TAIL), "Currently a single flit cache line is supported!" );
}
}
resp_accum |= cur_snoop_resp;
got_dirty |= cur_snoop_resp & 0x4;
got_data |= cur_snoop_resp & 0x1;
resp_wait--;
}
// If initiator demands clean, update Mem in case of dirty line
bool update_mem = req_denies_dirty(cur_req.snoop, is_read);
if (got_dirty && update_mem) {
unsigned mem_to_write = addr_lut(cur_req.addr);
wreq_flit_t mem_upd_flit;
mem_upd_flit.type = HEAD;
mem_upd_flit.data[0] = flit_req_rcv.data[0];
mem_upd_flit.data[1] = flit_req_rcv.data[1];
mem_upd_flit.data[2] = flit_req_rcv.data[2];
mem_upd_flit.set_network(THIS_ID, mem_to_write, 0, dnp::PACK_TYPE__C_WR_REQ, 0);
wr_to_slave.Push(mem_upd_flit);
#pragma hls_unroll yes
for (int i=0; i<cfg::WREQ_PHITS; ++i) {
mem_upd_flit.data[i] = snoop_resp_data.data[i] | (((sc_uint<dnp::PHIT_W>)3) << dnp::ace::wdata::E0_PTR);
}
mem_upd_flit.type = TAIL;
wr_to_slave.Push(mem_upd_flit);
wr_from_slave.Pop(); // ToDo : Maybe error handling
resp_accum = resp_accum & 0x1B; // Drop Pass Dirty bit as it got writen in Mem
}
if (is_read) {
bool data_are_expected = req_expects_data(cur_req.snoop, is_read);
// After responces are gathered, either respond to initiating master, or ask Main_mem/LLC
if (data_are_expected) {
// After responces are gathered, either respond to initiating master, or ask Main_mem/LLC
if (!got_data) { // Didn't get data response, thus ask memory
// Didn't get data response, thus ask memory
unsigned mem_to_req = addr_lut(cur_req.addr);
flit_req_rcv.set_network(THIS_ID, mem_to_req, 0, dnp::PACK_TYPE__C_RD_REQ, 0);
rd_to_slave.Push(flit_req_rcv);
rd_from_slave.Pop(); // Drop the header
snoop_resp_data = rd_from_slave.Pop();
resp_accum = ((snoop_resp_data.data[0] >> dnp::ace::rdata::RE_PTR) & 0x3) | (resp_accum & 0xC);
} else {
resp_accum = resp_accum & 0xE; // MASK WasUnique and HasData. Easily creating the R resp from CR resp
}
#pragma hls_unroll yes
for (int i=0; i<cfg::RRESP_PHITS; ++i) {
snoop_resp_data.data[i] |= snoop_resp_data.data[i] | (((sc_uint<dnp::PHIT_W>)resp_accum) << dnp::ace::rdata::RE_PTR);
}
} else {
// Master does not expect Data, thus build empty data+response And write any data received to Mem
resp_accum = resp_accum & 0xE; // MASK WasUnique and HasData. Easily creating the R resp from CR resp
#pragma hls_unroll yes
for (int i=0; i<cfg::RRESP_PHITS; ++i) {
snoop_resp_data.data[i] = (((sc_uint<dnp::PHIT_W>) resp_accum) << dnp::ace::rdata::RE_PTR);
}
snoop_resp_data.type = TAIL;
}
// Build and send reponse packet
rresp_flit_t flit_resp_to_init;
flit_resp_to_init.type = HEAD;
flit_resp_to_init.set_network(THIS_ID, initiator, 0, dnp::PACK_TYPE__C_RD_RESP, 0);
flit_resp_to_init.set_rd_resp(cur_req);
rd_to_master.Push(flit_resp_to_init); // Send Header flit
rd_to_master.Push(snoop_resp_data); // Send Data. IsSHared and IsDirty are expected to be 0
} else {
// Init transaction is a Write thus resolbe Mem to write and send the Write transaction
unsigned mem_to_write = addr_lut(cur_req.addr);
flit_req_rcv.set_network(THIS_ID, mem_to_write, 0, dnp::PACK_TYPE__C_WR_REQ, 0);
wr_to_slave.Push(flit_req_rcv); // Send Head
wreq_flit_t data_to_update;
while(!wr_from_master.PopNB(data_to_update)) {
wait();
}
wr_to_slave.Push(data_to_update);
// transfer the response to the init Master
wresp_flit_t mv_wr_resp = wr_from_slave.Pop();
mv_wr_resp.set_src(THIS_ID); // Set the HOME src to receive the response
mv_wr_resp.set_dst(initiator); // Set the initiator as a recipient src to receive the response
wr_to_master.Push(mv_wr_resp);
}
// Wait for the final ack from the Init Master that signifies that the cache has been completed the transaction
// Only a Full Master responds with an Ack
if (init_is_full) {
ack_flit_t rcv_ack = ack_from_master.Pop();
NVHLS_ASSERT_MSG( (rcv_ack.is_rack()&&is_read) || (rcv_ack.is_wack()&&(!is_read)), "ACK does not match the responce (i.e. RD/WR)");
#ifndef __SYNTHESIS__
if(is_read) std::cout << "[HOME "<< THIS_ID <<"] Got RD ACK from " << rcv_ack.get_src() << " : " << cur_req << " @" << sc_time_stamp() << "\n";
else std::cout << "[HOME "<< THIS_ID <<"] Got WR AK from " << rcv_ack.get_src() << " : " << cur_req << " @" << sc_time_stamp() << "\n";
#endif
}
} // End of while(1)
}; // End of HOME Node
// Transactions that do not accept Dirty data, thus the interconnects is responsible for to handle them
inline bool req_denies_dirty(NVUINTW(enc_::ARSNOOP::_WIDTH) &request_in, bool is_read ) {
if (is_read) {
if ((request_in == enc_::ARSNOOP::RD_ONCE) ||
(request_in == enc_::ARSNOOP::RD_CLEAN) ||
(request_in == enc_::ARSNOOP::RD_NOT_SHARED_DIRTY) ||
(request_in == enc_::ARSNOOP::CLEAN_UNIQUE) ||
(request_in == enc_::ARSNOOP::MAKE_UNIQUE) ||
(request_in == enc_::ARSNOOP::CLEAN_SHARED) ||
(request_in == enc_::ARSNOOP::CLEAN_INVALID) ||
(request_in == enc_::ARSNOOP::MAKE_INVALID) )
{
return true;
}
} else { // request is a write, thus any dirty must be sent to Mem
return true;
}
return false;
};
// When data are required, HOME must access Mem when all snoops where missed
inline bool req_expects_data(NVUINTW(enc_::ARSNOOP::_WIDTH) &request_in, bool is_read ) {
if (is_read) {
if ((request_in == enc_::ARSNOOP::RD_ONCE) ||
(request_in == enc_::ARSNOOP::RD_CLEAN) ||
(request_in == enc_::ARSNOOP::RD_NOT_SHARED_DIRTY) ||
(request_in == enc_::ARSNOOP::RD_SHARED) ||
(request_in == enc_::ARSNOOP::RD_UNIQUE) )
{
return true;
}
}
return false;
};
// Memory map resolving
inline unsigned char addr_lut(const ace5_::Addr addr) {
for (int i=0; i<2; ++i) {
if (addr>=addr_map[i][0].read() && addr <= addr_map[i][1].read()) return i;
}
return 0; // Or send 404
};
}; // End of Home module
#endif // _ACE_HOME_H_
|
ic-lab-duth/NoCpad
|
src/include/axi_for_ace.h
|
<gh_stars>1-10
/*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Modifications Copyright (c) 2019-2020 Integrated Circuits Lab, Democritus University of Thrace, Greece.
*
* Licensed under the Apache License, Version 2.0 (the "License")
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _AXI_FOR_ACE_H_
#define _AXI_FOR_ACE_H_
#include <systemc>
#include <nvhls_connections.h>
#include <nvhls_assert.h>
#include <nvhls_message.h>
#include <nvhls_module.h>
#include <UIntOrEmpty.h>
#include <axi/axi4_encoding.h>
#include <axi/axi4_configs.h>
/**
* \brief The ace namespace contains classes and definitions related to the AXI standard.
* \ingroup AXI
*/
namespace ace {
template <typename Cfg>
class axi4 {
public:
typedef axi::AXI4_Encoding Enc;
enum {
DATA_WIDTH = Cfg::dataWidth,
ADDR_WIDTH = Cfg::addrWidth,
ID_WIDTH = Cfg::idWidth,
BID_WIDTH = (Cfg::useWriteResponses == 0 ? 0 : Cfg::idWidth),
ALEN_WIDTH =
(Cfg::useBurst != 0 ? nvhls::log2_ceil<Cfg::maxBurstSize>::val : 0),
ASIZE_WIDTH = (Cfg::useVariableBeatSize != 0 ? 3 : 0),
LAST_WIDTH = (Cfg::useLast != 0 ? 1 : 0),
CACHE_WIDTH = (Cfg::useCache != 0 ? Enc::ARCACHE::_WIDTH : 0),
BURST_WIDTH = ((Cfg::useBurst != 0 &&
(Cfg::useFixedBurst != 0 || Cfg::useWrapBurst != 0))
? Enc::AXBURST::_WIDTH
: 0),
WSTRB_WIDTH = (Cfg::useWriteStrobes != 0 ? (DATA_WIDTH >> 3) : 0),
RESP_WIDTH = Cfg::useACE ? Enc::XRESP::_WIDTH+2 : Enc::XRESP::_WIDTH,
// TODO - The B channel ought to disappear entirely if useWriteResponses is
// 0, but that will require substantial refactoring. For now we leave
// RESP_WIDTH so there is a data stub in the ready-valid interface.
AUSER_WIDTH = Cfg::aUserWidth,
WUSER_WIDTH = Cfg::wUserWidth,
BUSER_WIDTH = (Cfg::useWriteResponses == 0 ? 0 : Cfg::bUserWidth),
RUSER_WIDTH = Cfg::rUserWidth,
C_SNOOP_WIDTH = Cfg::useACE ? 4 : 0,
C_DOMAIN_WIDTH = Cfg::useACE ? 2 : 0,
C_BARRIER_WIDTH = Cfg::useACE ? 2 : 0,
C_UNIQUE_WIDTH = Cfg::useACE ? 1 : 0, // The AWUNIQUE signal is only required by a component that supports the WriteEvict transaction.
};
typedef NVUINTW(ADDR_WIDTH) Addr;
typedef NVUINTW(DATA_WIDTH) Data;
typedef typename nvhls::UIntOrEmpty<ID_WIDTH>::T Id;
typedef typename nvhls::UIntOrEmpty<BID_WIDTH>::T BId;
typedef typename nvhls::UIntOrEmpty<ALEN_WIDTH>::T BeatNum;
typedef typename nvhls::UIntOrEmpty<ASIZE_WIDTH>::T BeatSize;
typedef typename nvhls::UIntOrEmpty<LAST_WIDTH>::T Last;
typedef typename nvhls::UIntOrEmpty<WSTRB_WIDTH>::T Wstrb;
typedef typename nvhls::UIntOrEmpty<CACHE_WIDTH>::T Cache;
typedef typename nvhls::UIntOrEmpty<BURST_WIDTH>::T Burst;
typedef NVUINTW(RESP_WIDTH) Resp;
typedef typename nvhls::UIntOrEmpty<AUSER_WIDTH>::T AUser;
typedef typename nvhls::UIntOrEmpty<WUSER_WIDTH>::T WUser;
typedef typename nvhls::UIntOrEmpty<BUSER_WIDTH>::T BUser;
typedef typename nvhls::UIntOrEmpty<RUSER_WIDTH>::T RUser;
// ACE extensions on AW - AR - R
typedef typename nvhls::UIntOrEmpty<C_SNOOP_WIDTH>::T Snoop;
typedef typename nvhls::UIntOrEmpty<C_DOMAIN_WIDTH>::T Domain;
typedef typename nvhls::UIntOrEmpty<C_BARRIER_WIDTH>::T Barrier;
typedef typename nvhls::UIntOrEmpty<C_UNIQUE_WIDTH>::T Unique;
/**
* \brief A struct composed of the signals associated with AXI read and write requests.
*/
struct AddrPayload : public nvhls_message {
Id id;
Addr addr;
Burst burst;
BeatNum len; // A*LEN
BeatSize size; // A*SIZE
Cache cache;
AUser auser;
//ACE extension
Snoop snoop;
Domain domain;
Barrier barrier;
Unique unique; // Only Used by AW. Consider split the AddrPayload into separate classes
static const unsigned int width = ADDR_WIDTH + ID_WIDTH + ALEN_WIDTH +
ASIZE_WIDTH + BURST_WIDTH + CACHE_WIDTH +
AUSER_WIDTH +
C_SNOOP_WIDTH + C_DOMAIN_WIDTH + C_BARRIER_WIDTH + C_UNIQUE_WIDTH;
AddrPayload() {
if(ID_WIDTH > 0)
id = 0;
addr = 0; // NVUINT, ADDR_WIDTH always > 0
if(ALEN_WIDTH > 0)
len = 0;
if(ASIZE_WIDTH > 0)
size = 0;
if(BURST_WIDTH > 0)
burst = 0;
if(CACHE_WIDTH > 0)
cache = 0;
if(AUSER_WIDTH > 0)
auser = 0;
if(C_SNOOP_WIDTH > 0)
snoop = 0;
if(C_DOMAIN_WIDTH > 0)
domain = 0;
if(C_BARRIER_WIDTH > 0)
barrier = 0;
if(C_UNIQUE_WIDTH > 0)
unique = 0;
}
template <unsigned int Size>
void Marshall(Marshaller<Size> &m) {
m &id;
m &addr;
m &len;
m &size;
m &burst;
m &cache;
m &auser;
m &snoop;
m &domain;
m &barrier;
m &unique;
}
#ifdef CONNECTIONS_SIM_ONLY
inline friend void sc_trace(sc_trace_file *tf, const AddrPayload& v, const std::string& NAME ) {
sc_trace(tf,v.id, NAME + ".id");
sc_trace(tf,v.addr, NAME + ".addr");
sc_trace(tf,v.len, NAME + ".len");
if (Cfg::useACE) {
sc_trace(tf,v.snoop, NAME + ".snoop");
sc_trace(tf,v.domain, NAME + ".domain");
sc_trace(tf,v.barrier, NAME + ".barrier");
sc_trace(tf,v.unique, NAME + ".unique");
}
}
inline friend std::ostream& operator<<(ostream& os, const AddrPayload& rhs)
{
#ifdef LOG_MSG_WIDTHS
os << std::dec;
os << "id:" << rhs.id.width << " ";
os << "addr:" << rhs.addr.width << " ";
os << "len:" << rhs.len.width << " ";
os << "size:" << rhs.size.width << " ";
os << "burst:" << rhs.burst.width << " ";
os << "cache:" << rhs.cache.width << " ";
os << "auser:" << rhs.auser.width << " ";
if (Cfg::useACE) {
os << "snoop:" << rhs.snoop.width << " ";
os << "domain:" << rhs.domain.width << " ";
os << "barrier:" << rhs.barrier.width << " ";
os << "unique:" << rhs.unique.width << " ";
}
#else
os << std::hex;
os << "Id:" << rhs.id << " ";
os << "Addr:" << rhs.addr << " ";
os << "Len:" << rhs.len << " ";
os << "Sz:" << rhs.size << " ";
os << "Bu:" << rhs.burst << " ";
if (CACHE_WIDTH)
os << "csh:" << rhs.cache << " ";
if (AUSER_WIDTH)
os << "Us:" << rhs.auser << " ";
if (Cfg::useACE) {
os << "--ACE-- ";
os << "Snp:" << rhs.snoop << " ";
os << "Dom:" << rhs.domain << " ";
os << "Bar:" << rhs.barrier << " ";
os << "Unq:" << rhs.unique << " ";
}
os << std::dec;
#endif
return os;
}
#endif
};
/**
* \brief A struct composed of the signals associated with an AXI read
* response.
*/
struct ReadPayload : public nvhls_message {
Id id;
Data data;
Resp resp;
Last last;
RUser ruser;
static const unsigned int width =
DATA_WIDTH + RESP_WIDTH + ID_WIDTH + LAST_WIDTH + RUSER_WIDTH;
ReadPayload() {
if(ID_WIDTH > 0)
id = 0;
data = 0; // NVUINT, DATA_WIDTH always > 0
resp = 0; // NVUINT, RESP_WIDTH always > 0
if(LAST_WIDTH > 0)
last = 0;
if(RUSER_WIDTH > 0)
ruser = 0;
}
template <unsigned int Size>
void Marshall(Marshaller<Size> &m) {
m &id;
m &data;
m &resp;
m &last;
m &ruser;
}
#ifdef CONNECTIONS_SIM_ONLY
inline friend void sc_trace(sc_trace_file *tf, const ReadPayload& v, const std::string& NAME ) {
sc_trace(tf,v.id, NAME + ".id");
sc_trace(tf,v.data, NAME + ".data");
sc_trace(tf,v.last, NAME + ".last");
}
inline friend std::ostream& operator<<(ostream& os, const ReadPayload& rhs)
{
#ifdef LOG_MSG_WIDTHS
os << std::dec;
os << "id:" << rhs.id.width << " ";
os << "data:" << rhs.data.width << " ";
os << "resp:" << rhs.resp.width << " ";
os << "last:" << rhs.last.width << " ";
os << "ruser:" << rhs.ruser.width << " ";
#else
os << std::hex;
os << "Id:" << rhs.id << " ";
os << "Data:" << rhs.data << " ";
os << "Resp:" << rhs.resp << " ";
os << "Last:" << rhs.last << " ";
os << "Usr:" << rhs.ruser << " ";
os << std::dec;
#endif
return os;
}
#endif
};
/**
* \brief A struct composed of the signals associated with an AXI write
* response.
*/
struct WRespPayload : public nvhls_message {
BId id;
Resp resp;
BUser buser;
static const unsigned int width = RESP_WIDTH + BID_WIDTH + BUSER_WIDTH;
WRespPayload() {
if(ID_WIDTH > 0)
id = 0;
resp = 0; // NVUINT, RESP_WIDTH always > 0
if(BUSER_WIDTH > 0)
buser = 0;
}
template <unsigned int Size>
void Marshall(Marshaller<Size> &m) {
m &id;
m &resp;
m &buser;
}
#ifdef CONNECTIONS_SIM_ONLY
inline friend void sc_trace(sc_trace_file *tf, const WRespPayload& v, const std::string& NAME ) {
sc_trace(tf,v.id, NAME + ".id");
sc_trace(tf,v.resp, NAME + ".resp");
}
inline friend std::ostream& operator<<(ostream& os, const WRespPayload& rhs)
{
#ifdef LOG_MSG_WIDTHS
os << std::dec;
os << "id:" << rhs.id.width << " ";
os << "resp:" << rhs.resp.width << " ";
os << "buser:" << rhs.buser.width << " ";
#else
os << std::hex;
os << "Id:" << rhs.id << " ";
os << "Resp:" << rhs.resp << " ";
os << "Usr:" << rhs.buser << " ";
#endif
return os;
}
#endif
};
/**
* \brief A struct composed of the signals associated with AXI write data.
*/
struct WritePayload : public nvhls_message {
// no id here!
Data data;
Last last;
Wstrb wstrb;
WUser wuser;
WritePayload() {
data = 0; // NVUINT, DATA_WIDTH always > 0
if(LAST_WIDTH > 0)
last = 0;
if(WSTRB_WIDTH > 0)
wstrb = ~0;
if(WUSER_WIDTH > 0)
wuser = 0;
}
static const unsigned int width =
DATA_WIDTH + LAST_WIDTH + WSTRB_WIDTH + WUSER_WIDTH;
template <unsigned int Size>
void Marshall(Marshaller<Size> &m) {
m &data;
m &last;
m &wstrb;
m &wuser;
}
#ifdef CONNECTIONS_SIM_ONLY
inline friend void sc_trace(sc_trace_file *tf, const WritePayload& v, const std::string& NAME ) {
sc_trace(tf,v.data, NAME + ".data");
sc_trace(tf,v.last, NAME + ".last");
sc_trace(tf,v.wstrb, NAME + ".wstrb");
}
inline friend std::ostream& operator<<(ostream& os, const WritePayload& rhs)
{
#ifdef LOG_MSG_WIDTHS
os << std::dec;
os << "data:" << rhs.data.width << " ";
os << "last:" << rhs.last.width << " ";
os << "wstrb:" << rhs.wstrb.width << " ";
os << "wuser:" << rhs.wuser.width << " ";
#else
os << std::hex;
os << "Data:" << rhs.data << " ";
os << "Last:" << rhs.last << " ";
os << "Strb:" << rhs.wstrb << " ";
os << "Usr:" << rhs.wuser << " ";
os << std::dec;
#endif
return os;
}
#endif
};
/**
* \brief The AXI read class.
*
* Each Connections implementation contains two ready-valid interfaces, AR for
* read requests and R for read responses.
*/
class read {
public:
/**
* \brief The AXI read channel, used for connecting an AXI master and AXI slave.
*/
template <Connections::connections_port_t PortType = AUTO_PORT>
class chan {
public:
typedef Connections::Combinational<AddrPayload, PortType> ARChan;
typedef Connections::Combinational<ReadPayload, PortType> RChan;
ARChan ar; // master to slave
RChan r; // slave to master
chan(const char *name)
: ar(nvhls_concat(name, "_ar")), r(nvhls_concat(name, "_r")){};
// TODO: Implement AXI protocol checker
}; // read::chan
/**
* \brief The AXI read master port. This port has an AR request channel as output and an R response channel as input.
*/
template <Connections::connections_port_t PortType = AUTO_PORT>
class master {
public:
typedef Connections::Out<AddrPayload, PortType> ARPort;
typedef Connections::In<ReadPayload, PortType> RPort;
ARPort ar;
RPort r;
master(const char *name)
: ar(nvhls_concat(name, "_ar")), r(nvhls_concat(name, "_r")) {}
void reset() {
ar.Reset();
r.Reset();
}
ReadPayload query(const AddrPayload &addr) {
// TODO: add nb version with state
ar.Push(addr);
return r.Pop();
}
template <class C>
void operator()(C &c) {
ar(c.ar);
r(c.r);
}
}; // read::master
/**
* \brief The AXI read slave port. This port has an AR request channel as input and an R response channel as output.
*/
template <Connections::connections_port_t PortType = AUTO_PORT>
class slave {
public:
typedef Connections::In<AddrPayload, PortType> ARPort;
typedef Connections::Out<ReadPayload, PortType> RPort;
ARPort ar;
RPort r;
slave(const char *name)
: ar(nvhls_concat(name, "_ar")), r(nvhls_concat(name, "_r")) {}
void reset() {
ar.Reset();
r.Reset();
}
AddrPayload aread() { return ar.Pop(); }
bool nb_aread(AddrPayload &addr) { return ar.PopNB(addr); }
void rwrite(const ReadPayload &data) { r.Push(data); }
bool nb_rwrite(const ReadPayload &data) { return r.PushNB(data); }
template <class C>
void operator()(C &c) {
ar(c.ar);
r(c.r);
}
}; // read::slave
}; // read
/**
* \brief The AXI write class.
*
* Each Connections implementation contains three ready-valid interfaces: AW
* for write requests, W for write data, and B for write responses.
*/
class write {
public:
/**
* \brief The AXI write channel, used for connecting an AXI master and AXI slave.
*/
template <Connections::connections_port_t PortType = AUTO_PORT>
class chan {
public:
typedef Connections::Combinational<AddrPayload, PortType> AWChan;
typedef Connections::Combinational<WritePayload, PortType> WChan;
typedef Connections::Combinational<WRespPayload, PortType> BChan;
AWChan aw; // master to slave
WChan w; // master to slave
BChan b; // slave to master
chan(const char *name)
: aw(nvhls_concat(name, "_aw")),
w(nvhls_concat(name, "_w")),
b(nvhls_concat(name, "_b")){};
// TODO: Implement AXI protocol checker
}; // write::chan
/**
* \brief The AXI write master port. This port has AW and W request channels as outputs and a B response channel as input.
*/
template <Connections::connections_port_t PortType = AUTO_PORT>
class master {
public:
typedef Connections::Out<AddrPayload, PortType> AWPort;
typedef Connections::Out<WritePayload, PortType> WPort;
typedef Connections::In<WRespPayload, PortType> BPort;
AWPort aw;
WPort w;
BPort b;
master(const char *name)
: aw(nvhls_concat(name, "_aw")),
w(nvhls_concat(name, "_w")),
b(nvhls_concat(name, "_b")) {}
void reset() {
aw.Reset();
w.Reset();
b.Reset();
}
WRespPayload write(const AddrPayload &addr, const WritePayload &data) {
// TODO: add nb version with state
aw.Push(addr);
w.Push(data);
return b.Pop();
}
template <class C>
void operator()(C &c) {
aw(c.aw);
w(c.w);
b(c.b);
}
}; // write::master
/**
* \brief The AXI write slave port. This port has AW and W request channels as inputs and a B response channel as output.
*/
template <Connections::connections_port_t PortType = AUTO_PORT>
class slave {
public:
typedef Connections::In<AddrPayload, PortType> AWPort;
typedef Connections::In<WritePayload, PortType> WPort;
typedef Connections::Out<WRespPayload, PortType> BPort;
AWPort aw;
WPort w;
BPort b;
bool got_waddr;
AddrPayload stored_waddr;
slave(const char *name)
: aw(nvhls_concat(name, "_aw")),
w(nvhls_concat(name, "_w")),
b(nvhls_concat(name, "_b")),
got_waddr(false) {}
void reset() {
aw.Reset();
w.Reset();
b.Reset();
}
void wread(AddrPayload &addr, WritePayload &data) {
addr = aw.Pop();
data = w.Pop();
}
bool nb_wread(AddrPayload &addr, WritePayload &data) {
if (!got_waddr) {
if (!aw.PopNB(addr)) {
return false;
} else {
got_waddr = true;
stored_waddr = addr;
}
} else {
addr = stored_waddr;
}
if (w.PopNB(data)) {
got_waddr = false;
return true;
} else {
return false;
}
}
void bwrite(const WRespPayload &resp) { b.Push(resp); }
bool nb_bwrite(const WRespPayload &resp) { return b.PushNB(resp); }
template <class C>
void operator()(C &c) {
aw(c.aw);
w(c.w);
b(c.b);
}
}; // write::slave
}; // write
}; // axi_for_ace
}; // ace
#endif // _AXI_FOR_ACE_H_
|
ic-lab-duth/NoCpad
|
src/axi_master_if.h
|
// --------------------------------------------------------- //
// MASTER-IF Is where the MASTER CONNECTS!!!!! //
// //
// Aka. Master <-> Master-IF <-> NoC <-> Slave-IF <-> Slave //
// --------------------------------------------------------- //
#ifndef AXI4_MASTER_IF_CON_H
#define AXI4_MASTER_IF_CON_H
#include "systemc.h"
#include "nvhls_connections.h"
#include "./include/flit_axi.h"
#include <axi/axi4.h>
#include "./include/axi4_configs_extra.h"
#include "./include/duth_fun.h"
#define LOG_MAX_OUTS 8
// --- Helping Data structures --- //
struct outs_table_entry {
sc_uint<dnp::D_W> dst_last;
sc_uint<LOG_MAX_OUTS> sent;
bool reorder;
};
// Info passed between packetizer and depacketizer to inform about new and finished transactions.
struct order_info {
sc_uint<dnp::ID_W> tid;
sc_uint<dnp::D_W> dst;
inline friend std::ostream& operator << ( std::ostream& os, const order_info& info ) {
os <<"TID: "<< info.tid <<", Dst: "<< info.dst /*<<", Ticket: "<< info.ticket*/;
#ifdef SYSTEMC_INCLUDED
os << std::dec << " @" << sc_time_stamp();
#else
os << std::dec << " @" << "no-timed";
#endif
return os;
}
#ifdef SYSTEMC_INCLUDED
// Only for SystemC
inline friend void sc_trace(sc_trace_file* tf, const order_info& info, const std::string& name) {
sc_trace(tf, info.tid, name + ".tid");
sc_trace(tf, info.dst, name + ".dst");
//sc_trace(tf, info.ticket, name + ".ticket");
}
#endif
};
// --- Master IF --- //
// AXI Master connects the independent AXI RD and WR cahnnels to the interface
// The interface gets the Requests and independently packetize and send them into the network
// The Responses are getting depacketized into a seperate thread and are fed back to the MASTER
// Thus Master interface comprises of 4 distinct/parallel blocks WR/RD pack and WR/RD depack
template <typename cfg>
SC_MODULE(axi_master_if) {
typedef typename axi::axi4<axi::cfg::standard_duth> axi4_;
typedef typename axi::AXI4_Encoding enc_;
typedef flit_dnp<cfg::RREQ_PHITS> rreq_flit_t;
typedef flit_dnp<cfg::RRESP_PHITS> rresp_flit_t;
typedef flit_dnp<cfg::WREQ_PHITS> wreq_flit_t;
typedef flit_dnp<cfg::WRESP_PHITS> wresp_flit_t;
typedef sc_uint< nvhls::log2_ceil<cfg::RRESP_PHITS>::val > cnt_phit_rresp_t;
typedef sc_uint< nvhls::log2_ceil<cfg::WREQ_PHITS>::val > cnt_phit_wreq_t;
const unsigned char LOG_RD_M_LANES = nvhls::log2_ceil<cfg::RD_LANES>::val;
const unsigned char LOG_WR_M_LANES = nvhls::log2_ceil<cfg::WR_LANES>::val;
sc_in_clk clk;
sc_in <bool> rst_n;
sc_in < sc_uint<(dnp::AH_W+dnp::AL_W)> > addr_map[cfg::SLAVE_NUM][2];
sc_in< sc_uint<dnp::S_W> > THIS_ID;
// AXI MASTER Side Channels
// --- READ --- //
Connections::In<axi4_::AddrPayload> ar_in{"ar_in"};
Connections::Out<axi4_::ReadPayload> r_out{"r_out"};
// --- WRITE --- //
Connections::In<axi4_::AddrPayload> aw_in{"aw_in"};
Connections::In<axi4_::WritePayload> w_in{"w_in"};
Connections::Out<axi4_::WRespPayload> b_out{"b_out"};
// NoC Side Channels
Connections::Out<rreq_flit_t> rd_flit_out{"rd_flit_out"};
Connections::In<rresp_flit_t> rd_flit_in{"rd_flit_in"};
Connections::Out<wreq_flit_t> wr_flit_out{"wr_flit_out"};
Connections::In<wresp_flit_t> wr_flit_in{"wr_flit_in"};
// --- READ Internals --- //
// FIFOs that pass initiation and finish transactions between Pack-Depack
sc_fifo<order_info> rd_trans_init{"rd_trans_init"};
sc_fifo<sc_uint<dnp::ID_W>> rd_trans_fin{"rd_trans_fin"};
outs_table_entry rd_out_table[1<<dnp::ID_W];
// --- WRITE Internals --- //
sc_fifo<sc_uint<dnp::ID_W>> wr_trans_fin{"wr_trans_fin"};
outs_table_entry wr_out_table[1<<dnp::ID_W];
// Constructor
SC_HAS_PROCESS(axi_master_if);
axi_master_if(sc_module_name name_="axi_master_if")
:
sc_module (name_),
rd_trans_fin (2),
wr_trans_fin (2)
{
SC_THREAD(rd_req_pack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
SC_THREAD(rd_resp_depack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
SC_THREAD(wr_req_pack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
SC_THREAD(wr_resp_depack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
}
//-------------------------------//
//--- READ REQuest Packetizer ---//
//-------------------------------//
// Pop a request, check reordering requirements, and pass it to NoC
void rd_req_pack_job () {
//-- Start of Reset ---//
// rd_out_table contains outstanding info to decide if reordering is possible.
#pragma hls_unroll yes
for (int i=0; i<1<<dnp::ID_W; ++i) {
rd_out_table[i].dst_last = 0;
rd_out_table[i].sent = 0;
rd_out_table[i].reorder = false;
}
// For REORD_SCHEME = 0
sc_uint<LOG_MAX_OUTS> outstanding = 0;
sc_uint<dnp::D_W> out_dst = 0;
ar_in.Reset();
rd_flit_out.Reset();
axi4_::AddrPayload this_req;
//-- End of Reset ---//
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while(1) {
wait();
if(ar_in.PopNB(this_req)) {
// A new request must stall until it is eligible to depart.
// Depending the reordering scheme
// 0 : all in-flight transactions must be to the same destination
// 1 : all in-flight transactions of the SAME ID, must be to the same destination
sc_uint<dnp::D_W> this_dst = addr_lut_rd(this_req.addr);
if (cfg::ORD_SCHEME==0) {
// Poll for Finished transactions until reordering is not possible.
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while((outstanding>0) && (out_dst != this_dst)) {
sc_uint<dnp::ID_W> tid_fin;
if(rd_trans_fin.nb_read(tid_fin)) outstanding--;
wait();
}; // End of while reorder
outstanding++;
out_dst = this_dst;
} else {
// Get info about the outstanding transactions the received request's TID
outs_table_entry sel_entry = rd_out_table[this_req.id.to_uint()];
sc_uint<dnp::D_W> this_dst = addr_lut_rd(this_req.addr);
bool may_reorder = (sel_entry.sent>0) && (sel_entry.dst_last != this_dst);
sc_uint<LOG_MAX_OUTS> wait_for = sel_entry.sent;
// Poll for Finished transactions until reordering is not possible.
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while(may_reorder || rd_flit_out.Full()) {
sc_uint<dnp::ID_W> tid_fin;
if(rd_trans_fin.nb_read(tid_fin)) {
rd_out_table[tid_fin].sent--; // update outstanding table
if(tid_fin==this_req.id.to_uint()) wait_for--; // update local wait value
}
may_reorder = (wait_for>0);
wait();
}; // End of while
rd_out_table[this_req.id.to_uint()].sent++;
rd_out_table[this_req.id.to_uint()].dst_last = this_dst;
}
// --- Start Packetization --- //
// Packetize request into a flit. The fields are described in DNP20
rreq_flit_t tmp_flit;
tmp_flit.type = SINGLE; // Entire request fits in at single flits thus SINGLE
tmp_flit.data[0] = ((sc_uint<dnp::PHIT_W>)0 << dnp::req::REORD_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.id << dnp::req::ID_PTR ) |
((sc_uint<dnp::PHIT_W>)dnp::PACK_TYPE__RD_REQ << dnp::T_PTR ) |
((sc_uint<dnp::PHIT_W>) 0 << dnp::Q_PTR ) |
((sc_uint<dnp::PHIT_W>)this_dst << dnp::D_PTR ) |
((sc_uint<dnp::PHIT_W>)THIS_ID << dnp::S_PTR ) |
((sc_uint<dnp::PHIT_W>)0 << dnp::V_PTR ) ;
tmp_flit.data[1] = ((sc_uint<dnp::PHIT_W>)this_req.len << dnp::req::LE_PTR) |
((sc_uint<dnp::PHIT_W>)(this_req.addr & 0xffff) << dnp::req::AL_PTR) ;
tmp_flit.data[2] = ((sc_uint<dnp::PHIT_W>)this_req.burst << dnp::req::BU_PTR ) |
((sc_uint<dnp::PHIT_W>)this_req.size << dnp::req::SZ_PTR ) |
((sc_uint<dnp::PHIT_W>)(this_req.addr >> dnp::AL_W) << dnp::req::AH_PTR ) ;
rd_flit_out.Push(tmp_flit);
} else {
// No RD Req from Master, simply check for finished Outstanding trans
sc_uint<dnp::ID_W> tid_fin;
if(rd_trans_fin.nb_read(tid_fin)) {
if (cfg::ORD_SCHEME==0) outstanding--;
else rd_out_table[tid_fin].sent--; // update outstanding table
}
}
} // End of while(1)
}; // End of Read Request Packetizer
//-----------------------------------//
//--- READ RESPonce DE-Packetizer ---//
//-----------------------------------//
void rd_resp_depack_job () {
r_out.Reset();
rd_flit_in.Reset();
while(1) {
// Get the response flits, depacketize them to form AXI Master's response and
// inform the packetizer for the transaction completion
rresp_flit_t flit_rcv;
flit_rcv = rd_flit_in.Pop();
// Construct the transaction's attributes to build the response accordingly.
axi4_::AddrPayload active_trans;
active_trans.id = (flit_rcv.data[0] >> dnp::rresp::ID_PTR) & ((1 << dnp::ID_W) - 1);
active_trans.burst = (flit_rcv.data[0] >> dnp::rresp::BU_PTR) & ((1 << dnp::BU_W) - 1);
active_trans.size = (flit_rcv.data[1] >> dnp::rresp::SZ_PTR) & ((1 << dnp::SZ_W) - 1);
active_trans.len = (flit_rcv.data[1] >> dnp::rresp::LE_PTR) & ((1 << dnp::LE_W) - 1);
sc_uint<dnp::SZ_W> final_size = (unsigned) active_trans.size;
// Partial lower 8-bit part of address to calculate the initial axi pointer in case of a non-aligned address
sc_uint<dnp::AP_W> addr_part = (flit_rcv.data[1] >> dnp::rresp::AP_PTR) & ((1<<dnp::AP_W) - 1);
sc_uint<dnp::AP_W> addr_init_aligned = ((addr_part & (cfg::RD_LANES-1)) & ~((1<<final_size)-1));
// Data Depacketization happens in a loop. Each iteration pops a flit and constructs a beat.
// Each iteration transfers data bytes from the flit to the AXI beat.
// bytes_per_iter bytes may be transfered, which is limited by two factors
// depending the AXI beat size and the bytes in the flit.
// 1) The available data bytes in the flit is less than the required for the beat
// 2) The remaining byte lanes are less than the available in the flit
// For case (1) the flit is emptied and the next flit is popped at the next iteration
// For case (2) the beat is pushed to Master and the next beat starts in the next iteration
// For data Depacketization loop, we keep 2 pointers.
// axi_lane_ptr -> to keep track axi byte lanes to place to data
// flit_phit_ptr -> to point at the data of the flit
sc_uint<8> axi_lane_ptr = addr_init_aligned; // Bytes MOD axi size
cnt_phit_rresp_t flit_phit_ptr = 0; // Bytes MOD phits in flit
// Also we keep track the processed and total data.
sc_uint<16> bytes_total = ((active_trans.len.to_uint()+1)<<final_size);
sc_uint<16> bytes_depacked = 0; // Number of DE-packetized bytes
unsigned char resp_build_tmp[cfg::RD_LANES];
#pragma hls_unroll yes
for(int i=0; i<cfg::RD_LANES; ++i) resp_build_tmp[i] = 0;
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
gather_wr_beats : while (1) {
// Each iteration moves data from the flit the the appropriate place on the AXI RD response
// The two flit and axi pointers orchistrate the operation, until completion
sc_uint<8> bytes_axi_left = ((1<<final_size) - (axi_lane_ptr & ((1<<final_size)-1)));
sc_uint<8> bytes_flit_left = ((cfg::RRESP_PHITS<<1) - (flit_phit_ptr<<1));
sc_uint<8> bytes_per_iter = (bytes_axi_left<bytes_flit_left) ? bytes_axi_left : bytes_flit_left;
if(flit_phit_ptr==0)
flit_rcv = rd_flit_in.Pop();
#pragma hls_unroll yes
build_resp: for (int i = 0; i < (cfg::RD_LANES >> 1); ++i) { // i counts AXI Byte Lanes IN PHITS (i.e. Lanes/bytes_in_phit)
if (i>=(axi_lane_ptr>>1) && i<((axi_lane_ptr+bytes_per_iter)>>1)) {
cnt_phit_rresp_t loc_flit_ptr = flit_phit_ptr + (i-(axi_lane_ptr>>1));
resp_build_tmp[(i << 1) + 1] = (flit_rcv.data[loc_flit_ptr] >> dnp::rdata::B1_PTR) & ((1 << dnp::B_W) - 1); // MSB
resp_build_tmp[(i << 1) ] = (flit_rcv.data[loc_flit_ptr] >> dnp::rdata::B0_PTR) & ((1 << dnp::B_W) - 1); // LSB
}
}
bool done_job = ((bytes_depacked+bytes_per_iter)==bytes_total); // All bytes are processed
bool done_flit = (flit_phit_ptr+(bytes_per_iter>>1)==cfg::RRESP_PHITS); // Flit got empty
bool done_axi = (((bytes_depacked+bytes_per_iter)&((1<<final_size)-1))==0); // Beat got full
// Push the response to MASTER, when either this Beat got the needed bytes or all bytes are transferred
if( done_job || done_axi ) {
axi4_::ReadPayload builder_resp;
builder_resp.id = active_trans.id;
builder_resp.resp = (flit_rcv.data[flit_phit_ptr] >> dnp::rdata::RE_PTR) & ((1 << dnp::RE_W) - 1);
builder_resp.last = ((bytes_depacked+bytes_per_iter)==bytes_total);
duth_fun<axi4_::Data, cfg::RD_LANES>::assign_char2ac(builder_resp.data, resp_build_tmp);
r_out.Push(builder_resp);
#pragma hls_unroll yes
for(int i=0; i<cfg::RD_LANES; ++i) resp_build_tmp[i] = 0;
}
// Check to either finish transaction or update the pointers for the next iteration
if (done_job) { // End of transaction
rd_trans_fin.write(active_trans.id.to_uint());
break;
} else {
bytes_depacked +=bytes_per_iter;
flit_phit_ptr = (done_flit) ? 0 : (flit_phit_ptr +(bytes_per_iter>>1));
axi_lane_ptr = (active_trans.burst==enc_::AXBURST::FIXED) ? ((axi_lane_ptr+bytes_per_iter) & ((1<<final_size)-1)) + addr_init_aligned :
((axi_lane_ptr+bytes_per_iter) & (cfg::RD_LANES-1)) ;
}
} // End of flit gathering loop
} // End of while(1)
}; // End of Read Responce Packetizer
//--------------------------------//
//--- WRITE REQuest Packetizer ---//
//--------------------------------//
void wr_req_pack_job () {
wr_flit_out.Reset();
aw_in.Reset();
w_in.Reset();
for (int i=0; i<1<<dnp::ID_W; ++i) {
wr_out_table[i].dst_last = 0;
wr_out_table[i].sent = 0;
wr_out_table[i].reorder = false;
}
sc_uint<LOG_MAX_OUTS> outstanding = 0;
sc_uint<dnp::D_W> out_dst = 0;
axi4_::AddrPayload this_req;
wait();
while(1) {
if(aw_in.PopNB(this_req)) { // New Request
// A new request must stall until it is eligible to depart.
// Depending the reordering scheme
// 0 : all in-flight transactions must be to the same destination
// 1 : all in-flight transactions of the SAME ID, must be to the same destination
sc_uint<dnp::D_W> this_dst = addr_lut_wr(this_req.addr);
if (cfg::ORD_SCHEME==0) {
// Poll for Finished transactions until reordering is not possible.
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while((outstanding>0) && (out_dst != this_dst)) {
sc_uint<dnp::ID_W> tid_fin;
if(wr_trans_fin.nb_read(tid_fin)) outstanding--;
wait();
}; // End of while reorder
outstanding++;
out_dst = this_dst;
} else {
outs_table_entry sel_entry = wr_out_table[this_req.id.to_uint()];
bool may_reorder = (sel_entry.sent>0) && (sel_entry.dst_last != this_dst);
sc_uint<LOG_MAX_OUTS> wait_for = sel_entry.sent; // Counts outstanding transactions to wait for
// Poll for Finished transactions until reordering is not possible.
while(may_reorder || wr_flit_out.Full()) {
sc_uint<dnp::ID_W> tid_fin;
if(wr_trans_fin.nb_read(tid_fin)) {
wr_out_table[tid_fin].sent--;
if(tid_fin==this_req.id.to_uint()) wait_for--;
}
may_reorder = (wait_for>0);
wait();
}; // End of while reorder
wr_out_table[this_req.id.to_uint()].sent++;
wr_out_table[this_req.id.to_uint()].dst_last = this_dst;
}
// --- Start HEADER Packetization --- //
// Packetize request according DNP20, and send
rreq_flit_t tmp_flit;
wreq_flit_t tmp_mule_flit;
tmp_mule_flit.type = HEAD;
tmp_mule_flit.data[0] = ((sc_uint<dnp::PHIT_W>)0 << dnp::req::REORD_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.id << dnp::req::ID_PTR) |
((sc_uint<dnp::PHIT_W>)dnp::PACK_TYPE__WR_REQ << dnp::T_PTR) |
((sc_uint<dnp::PHIT_W>)0 << dnp::Q_PTR) |
((sc_uint<dnp::PHIT_W>)this_dst << dnp::D_PTR) |
((sc_uint<dnp::PHIT_W>)THIS_ID << dnp::S_PTR) |
((sc_uint<dnp::PHIT_W>)0 << dnp::V_PTR) ;
tmp_mule_flit.data[1] = ((sc_uint<dnp::PHIT_W>) this_req.len << dnp::req::LE_PTR) |
((sc_uint<dnp::PHIT_W>)(this_req.addr & 0xffff) << dnp::req::AL_PTR) ;
tmp_mule_flit.data[2] = ((sc_uint<dnp::PHIT_W>)this_req.burst << dnp::req::BU_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.size << dnp::req::SZ_PTR) |
((sc_uint<dnp::PHIT_W>)(this_req.addr >> dnp::AL_W) << dnp::req::AH_PTR) ;
// push header flit to NoC
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while (!wr_flit_out.PushNB(tmp_mule_flit)) {
sc_uint<dnp::ID_W> tid_fin;
if(wr_trans_fin.nb_read(tid_fin)) {
if (cfg::ORD_SCHEME==0) outstanding--;
else wr_out_table[tid_fin].sent--; // update outstanding table
}
wait();
}
// --- Start DATA Packetization --- //
// Data Depacketization happens in a loop. Each iteration pops a flit and constructs a beat.
// Multiple iterations may be needed either the consume incoming data or fill a flit, which
// which depends on the AXI and flit size.
// Each iteration transfers data bytes from the flit to the AXI beat.
// The processed bytes per iteration is limited by two factors
// depending the AXI beat size and the bytes in the flit.
// 1) The available data bytes in the flit is less than the required for the beat
// 2) The remaining byte lanes are less than the available in the flit
// For case (1) the flit is emptied and the next flit is popped at the next iteration
// For case (2) the beat is pushed to Master and the next beat starts in the next iteration
sc_uint<8> addr_init_aligned = (this_req.addr.to_uint() & (cfg::WR_LANES-1)) & ~((1<<this_req.size.to_uint())-1);
// For data Depacketization we keep 2 pointers.
// - One to keep track axi byte lanes to place to data (axi_lane_ptr)
// - One to point at the data of the flit (flit_phit_ptr)
sc_uint<8> axi_lane_ptr = addr_init_aligned; // Bytes MOD size
cnt_phit_wreq_t flit_phit_ptr = 0; // Bytes MOD phits in flit
sc_uint<16> bytes_total = ((this_req.len.to_uint()+1)<<this_req.size.to_uint());
sc_uint<16> bytes_packed = 0;
unsigned char data_build_tmp[cfg::WR_LANES];
bool wstrb_tmp[cfg::WR_LANES];
sc_uint<1> last_tmp;
//#pragma hls_pipeline_init_interval 1
//#pragma pipeline_stall_mode flush
gather_wr_beats : while (1) {
// Calculate the bytes transferred in this iteration, depending the available flit bytes and the remaining to the beat
sc_uint<8> bytes_axi_left = ((1<<this_req.size.to_uint()) - (axi_lane_ptr & ((1<<this_req.size.to_uint())-1)));
sc_uint<8> bytes_flit_left = ((cfg::WREQ_PHITS<<1) - (flit_phit_ptr<<1));
sc_uint<8> bytes_per_iter = (bytes_axi_left<bytes_flit_left) ? bytes_axi_left : bytes_flit_left;
// If current beat has been packed, get the next one
if((bytes_packed & ((1<<this_req.size.to_uint())-1))==0) {
axi4_::WritePayload this_wr;
this_wr = w_in.Pop();
last_tmp = this_wr.last;
duth_fun<axi4_::Data , cfg::WR_LANES>::assign_ac2char(data_build_tmp , this_wr.data);
duth_fun<axi4_::Wstrb, cfg::WR_LANES>::assign_ac2bool(wstrb_tmp , this_wr.wstrb);
}
// Convert AXI Beats to flits.
#pragma hls_unroll yes
for (int i=0; i<cfg::WREQ_PHITS; ++i){ // i counts phits on the flit
if(i>=flit_phit_ptr && i<(flit_phit_ptr+(bytes_per_iter>>1))) {
sc_uint<8> loc_axi_ptr = (axi_lane_ptr + ((i-flit_phit_ptr)<<1));
tmp_mule_flit.data[i] = ((sc_uint<dnp::PHIT_W>)last_tmp << dnp::wdata::LA_PTR ) | // MSB
((sc_uint<dnp::PHIT_W>)wstrb_tmp[loc_axi_ptr+1] << dnp::wdata::E1_PTR ) |
((sc_uint<dnp::PHIT_W>)wstrb_tmp[loc_axi_ptr ] << dnp::wdata::E0_PTR ) |
((sc_uint<dnp::PHIT_W>)data_build_tmp[loc_axi_ptr+1] << dnp::wdata::B1_PTR ) | // (i*2) % 4
((sc_uint<dnp::PHIT_W>)data_build_tmp[loc_axi_ptr ] << dnp::wdata::B0_PTR ) ;
}
}
// transaction event flags
bool done_job = ((bytes_packed+bytes_per_iter)==bytes_total); // All bytes are processed
bool done_flit = (flit_phit_ptr+(bytes_per_iter>>1)==cfg::WREQ_PHITS); // Flit got empty
bool done_axi = (((bytes_packed+bytes_per_iter)&((1<<(this_req.size.to_uint()))-1))==0); // Beat got full
if(done_job || done_flit) {
tmp_mule_flit.type = (bytes_packed+bytes_per_iter==bytes_total) ? TAIL : BODY;
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while (!wr_flit_out.PushNB(tmp_mule_flit)) {
sc_uint<dnp::ID_W> tid_fin;
if(wr_trans_fin.nb_read(tid_fin)) {
if (cfg::ORD_SCHEME==0) outstanding--;
else wr_out_table[tid_fin].sent--; // update outstanding table
}
wait();
}
}
// Check to either finish transaction or update the pointers for the next iteration
if (done_job) {
break;
} else { // Move to next iteration
bytes_packed = bytes_packed+bytes_per_iter;
flit_phit_ptr = (done_flit) ? 0 : (flit_phit_ptr +(bytes_per_iter>>1));
axi_lane_ptr = ((unsigned)this_req.burst==enc_::AXBURST::FIXED) ? ((axi_lane_ptr+bytes_per_iter) & ((1<<this_req.size.to_uint())-1)) + addr_init_aligned :
((axi_lane_ptr+bytes_per_iter) & (cfg::WR_LANES-1)) ;
}
} // End of gather_beats. End of transaction loop
} else {
// When no request, Check for finished transactions
sc_uint<dnp::ID_W> tid_fin;
if(wr_trans_fin.nb_read(tid_fin)) {
if (cfg::ORD_SCHEME==0) outstanding--;
else wr_out_table[tid_fin].sent--;
}
wait();
}
} // End of While(1)
}; // End of Read Request Packetizer
//------------------------------------//
//--- WRITE RESPonce DE-Packetizer ---//
//------------------------------------//
void wr_resp_depack_job(){
wr_flit_in.Reset();
b_out.Reset();
wait();
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while(1) {
// Blocking read from NoC to start depacketize the response
wresp_flit_t flit_rcv;
flit_rcv = wr_flit_in.Pop();
// Construct the trans Header to create the response
axi4_::WRespPayload this_resp;
sc_uint<dnp::ID_W> this_tid = (flit_rcv.data[0] >> dnp::wresp::ID_PTR) & ((1 << dnp::ID_W) - 1);
this_resp.id = this_tid.to_uint();
this_resp.resp = (flit_rcv.data[0] >> dnp::wresp::RESP_PTR) & ((1 << dnp::RE_W) - 1);
b_out.Push(this_resp); // Send the response to MASTER
wr_trans_fin.write(this_tid); // Inform Packetizer for finished transaction
} // End of While(1)
}; // End of Write Resp De-pack
// Memory map resolving
inline unsigned char addr_lut_rd(const axi4_::Addr addr) {
for (int i=0; i<2; ++i) {
if (addr>=addr_map[i][0].read() && addr <= addr_map[i][1].read()) return i;
}
return 0;
};
inline unsigned char addr_lut_wr(const axi4_::Addr addr) {
for (int i=0; i<2; ++i) {
if (addr>=addr_map[i][0].read() && addr <= addr_map[i][1].read()) return i;
}
return 0;
};
}; // End of Master-IF module
#endif // AXI4_MASTER_IF_CON_H
|
ic-lab-duth/NoCpad
|
tb/tb_ace/ace_master.h
|
#ifndef _ACE_MASTER_H_
#define _ACE_MASTER_H_
#include "systemc.h"
#include "../helper_non_synth.h"
#include "../../src/include/dnp_ace_v0.h"
#include "../tb_wrap.h"
#include <deque>
#include <queue>
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
#define AXI4_MAX_LEN 4 // FIXED, WRAP bursts has a maximum of 16 beats
#define AXI4_MAX_INCR_LEN 4 // AXI4 extends INCR bursts upto 256 beats
#define AXI_TID_NUM 4
#define AXI_BURST_NUM 3
#define ACE_CACHE_LINES 8
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
SC_MODULE(ace_master) {
typedef typename ace::ace5<axi::cfg::ace> ace5_;
typedef typename ace::ACE_Encoding enc_;
sc_in_clk clk;
sc_in <bool> rst_n;
sc_in<bool> stop_gen;
sc_in< sc_uint<32> > addr_map[SLAVE_NUM][2];
// Master ACE Ports
Connections::In<ace5_::AC> ac_in;
Connections::Out<ace5_::CR> cr_out;
Connections::Out<ace5_::CD> cd_out;
Connections::Out<ace5_::AddrPayload> ar_out;
Connections::In<ace5_::ReadPayload> r_in;
Connections::Out<ace5_::RACK> rack_out;
Connections::Out<ace5_::AddrPayload> aw_out;
Connections::Out<ace5_::WritePayload> w_out;
Connections::In<ace5_::WRespPayload> b_in;
Connections::Out<ace5_::WACK> wack_out;
// Scoreboard
sc_mutex *sb_lock;
std::vector< std::deque< msg_tb_wrap<ace5_::AddrPayload> > > *sb_rd_req_q;
std::vector< std::deque< msg_tb_wrap<ace5_::ReadPayload> > > *sb_rd_resp_q;
std::vector< std::deque< msg_tb_wrap<ace5_::AddrPayload > > > *sb_wr_req_q;
std::vector< std::deque< msg_tb_wrap<ace5_::WritePayload> > > *sb_wr_data_q;
std::vector< std::deque< msg_tb_wrap<ace5_::WRespPayload> > > *sb_wr_resp_q;
std::vector< std::deque< msg_tb_wrap<ace5_::AddrPayload> > > *sb_coherent_access_q;
std::vector< std::deque< msg_tb_wrap<ace5_::AC> > > *sb_snoop_req_q;
std::vector< std::deque< msg_tb_wrap<ace5_::CR> > > *sb_snoop_resp_q;
std::vector< std::deque< msg_tb_wrap<ace5_::CD> > > *sb_snoop_data_resp_q;
// Queues to store generated transactions
std::queue<ace5_::AddrPayload > stored_rd_trans;
std::queue<ace5_::AddrPayload > stored_wr_trans;
std::queue<ace5_::WritePayload> stored_wr_data;
std::queue<ace5_::CR> stored_cache_resp;
std::queue<ace5_::CD> stored_cache_data;
std::queue<ace5_::RACK> stored_rd_ack;
std::queue<ace5_::WACK> stored_wr_ack;
std::deque<ace5_::AddrPayload> sb_rd_order_q; // queue to check ordering
std::deque<ace5_::AddrPayload> sb_wr_order_q; // queue to check ordering
// Cache state keeping
class cache_line {
public:
enum State {
INV = 0, // INVALID
UC = 1, // UNIQUE_CLEAN
UD = 2, // UNIQUE_DIRTY
SC = 3, // SHARED_CLEAN
SD = 4, // SHARED_DIRTY
};
ace5_::CD::Data data;
State state;
cache_line () {
data = 0;
state = INV;
}
cache_line (ace5_::CD::Data data_, State state_) {
data = data_;
state = state_;
}
inline bool is_inv() {return (state == INV);};
inline bool is_uc() {return (state == UC);};
inline bool is_ud() {return (state == UD);};
inline bool is_sc() {return (state == SC);};
inline bool is_sd() {return (state == SD);};
inline friend std::ostream& operator<<(ostream& os, const cache_line& rhs)
{
os << std::hex;
os << "Data:" << rhs.data << " ";
os << std::dec;
if (rhs.state == INV) os << "State: I";
else if(rhs.state == UC) os << "State: UC";
else if(rhs.state == UD) os << "State: UD";
else if(rhs.state == SC) os << "State: SC";
else if(rhs.state == SD) os << "State: SD";
else os << "State: ??";
return os;
}
};
std::map<ace5_::Addr, cache_line> cache;
std::map<ace5_::Addr, int> cache_outstanding;
std::map<ace5_::Addr, int> cache_outstanding_writes;
int MASTER_ID = -1;
unsigned int AXI_GEN_RATE_RD;
unsigned int AXI_GEN_RATE_WR;
unsigned int ACE_GEN_RATE_CACHE;
// int FLOW_CTRL; // 0: READY-VALID
// // 1: CREDITS
// // 2: FIFO
// // 3: Credits fifo based
// delays
long total_cycles;
sc_time clk_period;
unsigned long long int rd_resp_delay = 0;
unsigned long long int rd_resp_count = 0;
unsigned long long int wr_resp_delay = 0;
unsigned long long int wr_resp_count = 0;
unsigned long long int last_rd_sinked_cycle = 0;
unsigned long long int last_wr_sinked_cycle = 0;
unsigned long long int rd_resp_data_count = 0;
unsigned long long int wr_resp_data_count = 0;
bool stop_at_tail, has_stopped_gen;
// Read Addr Generator
int cache_trans_generated;
int rd_trans_generated;
int rd_data_generated;
int wr_trans_generated;
int wr_data_generated;
int rd_trans_inj;
int wr_trans_inj;
int wr_data_inj;
unsigned int gen_rd_addr;
unsigned int gen_wr_addr;
unsigned int resp_val_expect;
// Read Responce Sink
int rd_resp_ej;
int wr_resp_ej;
// Errors
int error_sb_rd_resp_not_found;
int error_sb_wr_resp_not_found;
// Functions
void do_cycle();
void gen_new_rd_trans();
void gen_new_wr_trans();
void gen_new_cache_trans();
void gen_snoop_resp(ace5_::AC &rcv_snoop_req);
void upd_cache_read(ace5_::AddrPayload req, ace5_::ReadPayload);
void upd_cache_write(ace5_::AddrPayload req, ace5_::WRespPayload);
void verify_rd_resp(ace5_::ReadPayload &rcv_rd_resp);
void verify_wr_resp(ace5_::WRespPayload &rcv_wr_resp);
bool eq_rd_data(ace5_::ReadPayload &rcv_rd_data, ace5_::ReadPayload &sb_rd_data);
bool eq_wr_resp(ace5_::WRespPayload &rcv_wr_resp, ace5_::WRespPayload &sb_wr_resp);
unsigned mem_map_resolve(ace5_::Addr &addr);
// Constructor
SC_HAS_PROCESS(ace_master);
ace_master(sc_module_name name_) : sc_module(name_)
{
MASTER_ID = -1;
AXI_GEN_RATE_RD = 0;
AXI_GEN_RATE_WR = 0;
ACE_GEN_RATE_CACHE = 5;
SC_THREAD(do_cycle);
sensitive << clk.pos();
reset_signal_is(rst_n, false);
}
};
// --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- //
// --- --- --- --- --- --- IMPLEMENTATION --- --- --- --- --- --- --- --- //
// --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- //
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void ace_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::do_cycle () {
total_cycles = 0;
cache_trans_generated = 0;
rd_trans_generated = 0;
rd_data_generated = 0;
wr_trans_generated = 0;
wr_data_generated = 0;
rd_trans_inj = 0;
wr_trans_inj = 0;
wr_data_inj = 0;
gen_rd_addr = 0x10100;
gen_wr_addr = 0;
resp_val_expect = 0;
rd_resp_ej = 0;
wr_resp_ej = 0;
// Clear errors
error_sb_rd_resp_not_found = 0;
error_sb_wr_resp_not_found = 0;
clk_period = (dynamic_cast<sc_clock *>(clk.get_interface()))->period();
ac_in.Reset();
cr_out.Reset();
cd_out.Reset();
ar_out.Reset();
r_in.Reset();
rack_out.Reset();
aw_out.Reset();
w_out.Reset();
b_in.Reset();
wack_out.Reset();
//if (MASTER_ID == 2) cache[8] = cache_line(0xFF00FF00FF00FF00, cache_line::State::UC);
//if (MASTER_ID == 3) cache[8] = cache_line(0xAA00AA00AA00AA00, cache_line::State::SD);
//if (MASTER_ID == 4) cache[8] = cache_line(0xFF00FF00FF00FF00, cache_line::State::SC);
while(1) {
wait();
total_cycles++;
// Transaction Generator
if (!stop_gen.read()) {
unsigned int rnd_val_rd = rand()%100;
if (rnd_val_rd < AXI_GEN_RATE_RD) {
gen_new_rd_trans();
}
unsigned int rnd_val_wr = rand()%100;
if (rnd_val_wr < AXI_GEN_RATE_WR) {
gen_new_wr_trans();
}
unsigned int rnd_val_cache = rand()%100;
if (rnd_val_cache < ACE_GEN_RATE_CACHE) {
gen_new_cache_trans();
}
}
// Read Request Injection
if (!stored_rd_trans.empty()) {
ace5_::AddrPayload tmp_ar = stored_rd_trans.front();
bool is_coherent = (tmp_ar.snoop || tmp_ar.domain.xor_reduce());
int coherent_writes_outstanding = cache_outstanding_writes[tmp_ar.addr];
if (!(is_coherent && coherent_writes_outstanding)) {
if (ar_out.PushNB(tmp_ar)) {
stored_rd_trans.pop();
std::cout << "[Master " << MASTER_ID << "] : PUSHED AR:" << tmp_ar << " @" << sc_time_stamp() << std::endl;
rd_trans_inj++;
if(is_coherent) {
cache_outstanding[tmp_ar.addr]++;
// Push it to ACE Checker
sb_lock->lock();
msg_tb_wrap<ace5_::AddrPayload> temp_rd_coherent_req_tb;
temp_rd_coherent_req_tb.dut_msg = tmp_ar;
temp_rd_coherent_req_tb.is_read = true;
temp_rd_coherent_req_tb.time_gen = sc_time_stamp();
(*sb_coherent_access_q)[MASTER_ID - SLAVE_NUM].push_back(temp_rd_coherent_req_tb);
sb_lock->unlock();
}
}
}
}
// Write Request Injection
if (!stored_wr_trans.empty()) {
ace5_::AddrPayload tmp_aw = stored_wr_trans.front();
bool is_coherent = (tmp_aw.snoop || tmp_aw.domain.xor_reduce());
int coherent_outstanding = cache_outstanding[tmp_aw.addr];
if (!(is_coherent && coherent_outstanding)) {
if (aw_out.PushNB(tmp_aw)) {
stored_wr_trans.pop();
std::cout << "[Master " << MASTER_ID << "] : PUSHED AW: " << tmp_aw << " @" << sc_time_stamp() << std::endl;
wr_trans_inj++;
if(is_coherent) {
cache_outstanding[tmp_aw.addr]++;
cache_outstanding_writes[tmp_aw.addr]++;
sb_lock->lock();
msg_tb_wrap<ace5_::AddrPayload> temp_wr_coherent_req_tb;
temp_wr_coherent_req_tb.dut_msg = tmp_aw;
temp_wr_coherent_req_tb.is_read = false;
temp_wr_coherent_req_tb.time_gen = sc_time_stamp();
(*sb_coherent_access_q)[MASTER_ID - SLAVE_NUM].push_back(temp_wr_coherent_req_tb);
sb_lock->unlock();
}
}
}
}
// Write Data Injection
if (!stored_wr_data.empty()) {
ace5_::WritePayload tmp_w = stored_wr_data.front();
if (w_out.PushNB(tmp_w)) {
stored_wr_data.pop();
std::cout << "[Master " << MASTER_ID << "] : PUSHED W: " << tmp_w << " @" << sc_time_stamp() << std::endl;
wr_data_inj++;
}
}
// Read Response Ejection
ace5_::ReadPayload rcv_rd_resp;
bool got_ar_resp = r_in.PopNB(rcv_rd_resp); // Lacks backpressure
if(got_ar_resp){
verify_rd_resp(rcv_rd_resp);
}
// Write Response Ejection
ace5_::WRespPayload rcv_wr_resp;
bool got_b_resp = b_in.PopNB(rcv_wr_resp); // Lacks backpressure
if(got_b_resp){
verify_wr_resp(rcv_wr_resp);
}
// --- ACE --- //
// Snoop Request Ejection
ace5_::AC rcv_snoop_req;
bool got_snoop_req = ac_in.PopNB(rcv_snoop_req); // Lacks backpressure
if(got_snoop_req){
// ToDo : Implement ACE verification
//verify_snoop_req(got_snoop_req);
gen_snoop_resp(rcv_snoop_req);
}
// Snoop Response Injection
if (!stored_cache_resp.empty()) {
ace5_::CR tmp_cr = stored_cache_resp.front();
if (cr_out.PushNB(tmp_cr)) {
stored_cache_resp.pop();
std::cout << "[Master " << MASTER_ID << "] : PUSHED SNOOP Resp: " << tmp_cr << " @" << sc_time_stamp() << std::endl;
//cache_resp_inj++;
}
}
// Snoop Data Response Injection
if (!stored_cache_data.empty()) {
ace5_::CD tmp_cd = stored_cache_data.front();
if (cd_out.PushNB(tmp_cd)) {
stored_cache_data.pop();
std::cout << "[Master " << MASTER_ID << "] : PUSHED SNOOP Data: " << tmp_cd << " @" << sc_time_stamp() << std::endl;
//cache_resp_data_inj++;
}
}
// READ Ack Injection
if (!stored_rd_ack.empty()) {
ace5_::RACK tmp_rack = stored_rd_ack.front();
if (rack_out.PushNB(tmp_rack)) {
stored_rd_ack.pop();
std::cout << "[Master " << MASTER_ID << "] : PUSHED READ Ack: " << tmp_rack << " @" << sc_time_stamp() << std::endl;
}
}
// WRITE Ack Injection
if (!stored_wr_ack.empty()) {
ace5_::WACK tmp_wack = stored_wr_ack.front();
if (wack_out.PushNB(tmp_wack)) {
stored_wr_ack.pop();
std::cout << "[Master " << MASTER_ID << "] : PUSHED WRITE Ack: " << tmp_wack << " @" << sc_time_stamp() << std::endl;
}
}
}; // End of while(1)
}; // End of do_cycle
// --------------------------- //
// --- GENERATOR Functions --- //
// --------------------------- //
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void ace_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::gen_new_rd_trans() {
ace5_::AddrPayload rd_req_m;//(SINGLE, -1, -1, -1);
rd_req_m.id = (rand()%AXI_TID_NUM); // (rand()% 2)+2;
rd_req_m.size = ((rand()%my_log2c(RD_M_LANES))+1) & ((1<<my_log2c(RD_M_LANES))-1);
rd_req_m.burst = (rand()%AXI_BURST_NUM);
rd_req_m.len = (rd_req_m.burst==enc_::AXBURST::WRAP) ? ((1<<(rand()%my_log2c(AXI4_MAX_LEN+1)))-1) :
(rd_req_m.burst==enc_::AXBURST::FIXED) ? (rand()%AXI4_MAX_LEN) :
(RD_M_LANES>RD_S_LANES) ? (rand()%(AXI4_MAX_INCR_LEN/(RD_M_LANES/RD_S_LANES))) // INCR With Downsize // Cap the maximum len in case of transactions downsize (which increases len)
: (rand()%AXI4_MAX_INCR_LEN) ; // INCR WithOut Downsize
// Increasing address to keep track of the transactions
rd_req_m.addr = gen_rd_addr + ((rand()%RD_M_LANES) & (1<<rd_req_m.size));
gen_rd_addr = (gen_rd_addr + RD_M_LANES) % (addr_map[SLAVE_NUM-1][1].read()+1);
// Push it to injection queue
stored_rd_trans.push(rd_req_m);
// Push it to Scoreboard
sb_lock->lock();
// Consider resizing
ace5_::AddrPayload rd_req_s;
rd_req_s.id = rd_req_m.id;
rd_req_s.size = ((1<<rd_req_m.size)>RD_S_LANES) ? my_log2c(RD_S_LANES) : rd_req_m.size.to_uint();
rd_req_s.len = ((1<<rd_req_m.size)>RD_S_LANES) ? (((rd_req_m.len.to_uint()+1)<<(rd_req_m.size.to_uint()-my_log2c(RD_S_LANES)))-1) : rd_req_m.len.to_uint();
rd_req_s.burst = rd_req_m.burst;
rd_req_s.addr = rd_req_m.addr;
msg_tb_wrap<ace5_::AddrPayload> temp_rd_req_tb;
temp_rd_req_tb.dut_msg = rd_req_s;
temp_rd_req_tb.time_gen = sc_time_stamp();
unsigned dst = mem_map_resolve(rd_req_s.addr);
(*sb_rd_req_q)[dst].push_back(temp_rd_req_tb);
sb_lock->unlock();
// Push into order queue - Reorder check extension
sb_rd_order_q.push_back(rd_req_m);
rd_trans_generated++;
// --- --- --- --- --- --- --- --- //
// Generate the expected Responce
// --- --- --- --- --- --- --- --- //
sb_lock->lock();
ace5_::ReadPayload beat_expected;
beat_expected.id = rd_req_m.id;
// Create Expected Response
unsigned long int bytes_total = ((rd_req_m.len+1)<<rd_req_m.size);
unsigned long int byte_count = 0;
unsigned char m_init_ptr = rd_req_m.addr % RD_M_LANES;
unsigned char m_ptr = m_init_ptr;
unsigned char m_size = rd_req_m.size;
//beat_expected.reset_data();
beat_expected.data = 0;
while(byte_count<bytes_total) {
beat_expected.data |= ( ((ace5_::Data)(byte_count & 0xFF)) << ((ace5_::Data)(m_ptr*8)));
byte_count++;
m_ptr = (rd_req_m.burst==FIXED) ? ((m_ptr+1)%(1<<m_size)) + m_init_ptr
: (m_ptr+1)%RD_M_LANES ;
if(((m_ptr%(1<<m_size))==0) || (byte_count == bytes_total)) {
beat_expected.resp = mem_map_resolve(rd_req_m.addr);
beat_expected.last = (byte_count == bytes_total);
msg_tb_wrap< ace5_::ReadPayload > temp_rd_resp_tb;
temp_rd_resp_tb.dut_msg = beat_expected;
temp_rd_resp_tb.time_gen = sc_time_stamp();
(*sb_rd_resp_q)[MASTER_ID-SLAVE_NUM].push_back(temp_rd_resp_tb);
beat_expected.data = 0;
rd_data_generated++;
}
}
sb_lock->unlock();
}; // End of Read generator
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void ace_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::gen_new_wr_trans() {
sb_lock->lock();
ace5_::AddrPayload m_wr_req;//(SINGLE, -1, -1, -1);
m_wr_req.id = (rand()%AXI_TID_NUM) | (MASTER_ID << 2); // (rand()%4)+2;
m_wr_req.size = ((rand()%my_log2c(WR_M_LANES))+1) & ((1<<my_log2c(WR_M_LANES))-1); // 0 size is NOT supported
m_wr_req.burst = (rand()%AXI_BURST_NUM);
m_wr_req.len = (m_wr_req.burst==enc_::AXBURST::WRAP) ? ((1<<(rand()%my_log2c(AXI4_MAX_LEN+1)))-1) :
(m_wr_req.burst==enc_::AXBURST::FIXED) ? (rand()%AXI4_MAX_LEN) :
(WR_M_LANES>WR_S_LANES) ? (rand()%(AXI4_MAX_INCR_LEN/(WR_M_LANES/WR_S_LANES))) // INCR With Downsize // Cap the maximum len in case of transactions downsize (which increases len)
: (rand()%AXI4_MAX_INCR_LEN) ; // INCR WithOut Downsize
// Increasing address to keep track of the transactions
// Aligned on size transactions (Although non-aligned should be an easy addition)
m_wr_req.addr = gen_wr_addr + ((rand()%WR_M_LANES) & (1<<m_wr_req.size));
gen_wr_addr = (gen_wr_addr + WR_M_LANES) % (addr_map[SLAVE_NUM-1][1].read()+1);;
// Push it to injection queue
stored_wr_trans.push(m_wr_req);
// Push into order queue - Reorder check extension
sb_wr_order_q.push_back(m_wr_req);
// Create dummy write data
ace5_::WritePayload cur_beat; // The beat that will be injected at MASTER
ace5_::WritePayload beat_at_slave; // The expected beat ejected at SLAVE
unsigned long int bytes_total = ((m_wr_req.len+1)<<m_wr_req.size);
unsigned long int byte_count = 0;
unsigned char m_init_ptr = m_wr_req.addr % WR_M_LANES;
unsigned char s_init_ptr = m_wr_req.addr % WR_S_LANES;
unsigned char m_ptr = m_init_ptr;
unsigned char s_ptr = s_init_ptr;
unsigned char m_size = m_wr_req.size;
unsigned char s_size = ((1<<m_size)>WR_S_LANES) ? my_log2c(WR_S_LANES) : m_size;
unsigned char m_len = m_wr_req.len;
unsigned char s_len = ((1<<m_size)>WR_S_LANES) ? (((m_len+1)<<(m_size-s_size))-1) : m_len;
// Push it to Scoreboard
ace5_::AddrPayload s_wr_req;
s_wr_req.id = m_wr_req.id;
s_wr_req.addr = m_wr_req.addr;
s_wr_req.size = s_size;
s_wr_req.len = s_len;
s_wr_req.burst = m_wr_req.burst;
msg_tb_wrap<ace5_::AddrPayload> temp_wr_req_tb;
temp_wr_req_tb.dut_msg = s_wr_req;
unsigned dst = mem_map_resolve(s_wr_req.addr);
(*sb_wr_req_q)[dst].push_back(temp_wr_req_tb);
cur_beat.data = 0;
cur_beat.wstrb = 0;
beat_at_slave.data = 0;
beat_at_slave.wstrb = 0;
while(byte_count<bytes_total) {
unsigned byte_to_write = (byte_count==bytes_total-1) ? MASTER_ID : byte_count;
cur_beat.data |= (((ace5_::Data)(byte_to_write & 0xFF)) << ((ace5_::Data)(m_ptr*8)));
cur_beat.wstrb |= (((ace5_::Data)1) << ((ace5_::Data)m_ptr));
beat_at_slave.data |= (((ace5_::Data)(byte_to_write & 0xFF)) << ((ace5_::Data)(s_ptr*8)));
beat_at_slave.wstrb |= (((ace5_::Data)1) << ((ace5_::Data)s_ptr));
byte_count++;
m_ptr = (m_wr_req.burst==FIXED) ? ((m_ptr+1)%(1<<m_size)) + m_init_ptr :
(m_ptr+1)%WR_M_LANES ;
s_ptr = (s_wr_req.burst==FIXED) ? ((s_ptr+1)%(1<<s_size)) + s_init_ptr :
(s_ptr+1)%WR_S_LANES ;
if(((m_ptr%(1<<m_size))==0) || (byte_count == bytes_total)) {
wr_data_generated++;
cur_beat.last = (byte_count == bytes_total);
stored_wr_data.push(cur_beat);
cur_beat.data = 0;
cur_beat.wstrb = 0;
}
if(((s_ptr%(1<<s_size))==0) || (byte_count == bytes_total)) {
beat_at_slave.last = (byte_count == bytes_total);
msg_tb_wrap< ace5_::WritePayload > temp_wr_data_tb;
temp_wr_data_tb.dut_msg = beat_at_slave;
wr_resp_data_count++;
last_wr_sinked_cycle = (sc_time_stamp() / clk_period);
(*sb_wr_data_q)[dst].push_back(temp_wr_data_tb);
beat_at_slave.data = 0;
beat_at_slave.wstrb = 0;
}
}
sb_lock->unlock();
wr_trans_generated++;
}; // End of Write generator
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void ace_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::gen_new_cache_trans() {
ace5_::AddrPayload cache_req;
cache_req.id = MASTER_ID;// (rand() % AXI_TID_NUM); // (rand()% 2)+2;
cache_req.size = nvhls::log2_ceil<RD_M_LANES>::val;
cache_req.burst = enc_::AXBURST::INCR;
cache_req.len = 0;
cache_req.addr = ((rand()%ACE_CACHE_LINES)+1) * (ace5_::C_CACHE_WIDTH>>3);//0x8;
cache_line & this_line = cache[cache_req.addr];
cache_req.domain = enc_::AxDOMAIN::OUTER_SHARE;
// RD_ONCE, RD_SHARED, RD_CLEAN, RD_NOT_SHARED_DIRTY, RD_UNIQUE, CLEAN_UNIQUE, MAKE_UNIQUE, CLEAN_SHARED, CLEAN_INVALID, MAKE_INVALID
// WR_UNIQUE, WR_LINE_UNIQUE
bool is_read = false;
if (this_line.is_inv()) {
unsigned sel_req = rand() % 11;
if (sel_req == 0) cache_req.snoop = enc_::ARSNOOP::RD_ONCE;
else if (sel_req == 1) cache_req.snoop = enc_::ARSNOOP::RD_CLEAN;
else if (sel_req == 2) cache_req.snoop = enc_::ARSNOOP::RD_NOT_SHARED_DIRTY;
else if (sel_req == 3) cache_req.snoop = enc_::ARSNOOP::RD_SHARED;
else if (sel_req == 4) cache_req.snoop = enc_::ARSNOOP::RD_UNIQUE;
else if (sel_req == 5) cache_req.snoop = enc_::ARSNOOP::CLEAN_SHARED;
else if (sel_req == 6) cache_req.snoop = enc_::ARSNOOP::CLEAN_INVALID;
else if (sel_req == 7) cache_req.snoop = enc_::ARSNOOP::MAKE_UNIQUE;
else if (sel_req == 8) {
this_line.state = cache_line::State::INV;
cache_req.snoop = enc_::ARSNOOP::MAKE_INVALID;
}
else if (sel_req == 9) cache_req.snoop = enc_::AWSNOOP::WR_UNIQUE;
else if (sel_req == 10) cache_req.snoop = enc_::AWSNOOP::WR_LINE_UNIQUE;
is_read = (sel_req < 9);
} else if (this_line.is_uc()) {
unsigned sel_req = rand() % 3;
if (sel_req == 0) cache_req.snoop = enc_::ARSNOOP::CLEAN_SHARED;
else if (sel_req == 1)cache_req.snoop = enc_::AWSNOOP::WR_UNIQUE;
else if (sel_req == 2)cache_req.snoop = enc_::AWSNOOP::WR_LINE_UNIQUE;
is_read = (sel_req < 1);
} else if (this_line.is_ud()) {
this_line.state = cache_line::State::INV;
return;
} else if (this_line.is_sc()) {
unsigned sel_req = rand() % 5;
// RD_ONCE, RD_SHARED, RD_CLEAN, RD_NOT_SHARED_DIRTY, RD_UNIQUE, CLEAN_UNIQUE, MAKE_UNIQUE, CLEAN_SHARED, CLEAN_INVALID, MAKE_INVALID
if (sel_req == 0) cache_req.snoop = enc_::ARSNOOP::CLEAN_UNIQUE;
else if (sel_req == 1) cache_req.snoop = enc_::ARSNOOP::CLEAN_SHARED;
else if (sel_req == 2) cache_req.snoop = enc_::ARSNOOP::MAKE_UNIQUE;
else if (sel_req == 3) cache_req.snoop = enc_::AWSNOOP::WR_UNIQUE;
else if (sel_req == 4) cache_req.snoop = enc_::AWSNOOP::WR_LINE_UNIQUE;
is_read = (sel_req<3);
} else if (this_line.is_sd()) {
this_line.state = cache_line::State::INV;
return;
unsigned sel_req = rand() % 2;
// RD_ONCE, RD_SHARED, RD_CLEAN, RD_NOT_SHARED_DIRTY, RD_UNIQUE, CLEAN_UNIQUE, MAKE_UNIQUE, CLEAN_SHARED, CLEAN_INVALID, MAKE_INVALID
if (sel_req == 0) cache_req.snoop = enc_::ARSNOOP::CLEAN_UNIQUE;
else if (sel_req == 1) cache_req.snoop = enc_::ARSNOOP::MAKE_UNIQUE;
is_read = false;
} else {
std::cout << "You should not reach this...\n";
NVHLS_ASSERT(0);
}
if (true /*total_cycles > 14 && total_cycles <16*/) {
if (is_read) {
// Push it to injection queue
stored_rd_trans.push(cache_req);
// Pushing to ACE Coherency checker happens during injection
// Push into order queue - Reorder check extension
sb_rd_order_q.push_back(cache_req);
rd_trans_generated++;
} else { // It's a WR request
ace5_::WritePayload data_beat;
if (ace5_::C_CACHE_WIDTH<64) data_beat.data = (((ace5_::Data) MASTER_ID) << (ace5_::C_CACHE_WIDTH - 8)) | ((ace5_::Data) 0x000000000000BEEF);
else data_beat.data = (((ace5_::Data) MASTER_ID) << (ace5_::C_CACHE_WIDTH - 8)) | ((ace5_::Data) 0x0000BEEFDEADBEEF);
data_beat.wstrb = -1;
data_beat.last = 1;
// Push it to injection queue
stored_wr_trans.push(cache_req);
stored_wr_data.push(data_beat);
// Push it to Scoreboard
sb_lock->lock();
unsigned target_mem = mem_map_resolve(cache_req.addr);
msg_tb_wrap<ace5_::AddrPayload> temp_wr_coherent_req_tb;
temp_wr_coherent_req_tb.is_read = false;
temp_wr_coherent_req_tb.dut_msg = cache_req;
temp_wr_coherent_req_tb.dut_msg.snoop = 0;
temp_wr_coherent_req_tb.dut_msg.domain = 0;
temp_wr_coherent_req_tb.dut_msg.barrier = 0;
temp_wr_coherent_req_tb.dut_msg.unique = 0;
temp_wr_coherent_req_tb.time_gen = sc_time_stamp();
(*sb_wr_req_q)[target_mem].push_back(temp_wr_coherent_req_tb);
msg_tb_wrap<ace5_::WritePayload> temp_wr_data_tb;
temp_wr_data_tb.dut_msg = data_beat;
temp_wr_data_tb.is_read = false;
temp_wr_data_tb.time_gen = sc_time_stamp();
(*sb_wr_data_q)[target_mem].push_back(temp_wr_data_tb);
sb_lock->unlock();
// Push into order queue - Reorder check extension
sb_wr_order_q.push_back(cache_req);
wr_trans_generated++;
}
cache_trans_generated++;
}
}; // End of Cache generator
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void ace_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::gen_snoop_resp(ace5_::AC &rcv_snoop_req) {
typename std::map<ace5_::Addr, cache_line>::iterator cur_line_iter;
// CRRESP[0] : DataTransfer
// CRRESP[1] : Error
// CRRESP[2] : PassDirty
// CRRESP[3] : IsShared
// CRRESP[4] : WasUnique
cur_line_iter = cache.find(rcv_snoop_req.addr);
ace5_::CR cur_resp;
ace5_::CD cur_data;
bool has_data = false;
if (cur_line_iter == cache.end() || cur_line_iter->second.is_inv()) {
std::cout << "[Master " << MASTER_ID << "] SNOOP Miss " << rcv_snoop_req << "@" << sc_time_stamp() << "\n";
cur_resp.resp = 0;
has_data = false;
} else {
std::cout << "[Master " << MASTER_ID << "] SNOOP Hit @" << sc_time_stamp() << " " << rcv_snoop_req;
std::cout << " --- Addr:"<< std::hex << cur_line_iter->first << std::dec << " : " << cur_line_iter->second << "\n";
cur_data.data = cur_line_iter->second.data;
cur_data.last = 1;
has_data = true;
// CRRESP[0] : DataTransfer
// CRRESP[1] : Error
// CRRESP[2] : PassDirty
// CRRESP[3] : IsShared
// CRRESP[4] : WasUnique
if (rcv_snoop_req.snoop == enc_::ACSNOOP::RD_ONCE) {
if (cur_line_iter->second.is_uc()) {
cur_resp.resp = 0x19; // WasUnique, IsShared, DataTranfer
} else if (cur_line_iter->second.is_ud()) {
cur_resp.resp = 0x19; // WasUnique, IsShared, DataTranfer
} else if (cur_line_iter->second.is_sc()) {
cur_resp.resp = 0x9; // IsShared, DataTranfer
} else if (cur_line_iter->second.is_sd()) {
cur_resp.resp = 0x9; // IsShared, DataTranfer
} else {
std::cout << "You should not reach this...\n";
NVHLS_ASSERT(0);
}
} else if ( (rcv_snoop_req.snoop == enc_::ACSNOOP::RD_CLEAN) ||
(rcv_snoop_req.snoop == enc_::ACSNOOP::RD_SHARED) ||
(rcv_snoop_req.snoop == enc_::ACSNOOP::RD_NOT_SHARED_DIRTY) )
{
if (cur_line_iter->second.is_uc()) {
cur_resp.resp = 0x19; // WasUnique, IsShared
cur_line_iter->second.state = cache_line::State::SC;
} else if (cur_line_iter->second.is_ud()) {
cur_resp.resp = 0x19; // WasUnique, IsShared
cur_line_iter->second.state = cache_line::State::SD;
} else if (cur_line_iter->second.is_sc()) {
cur_resp.resp = 0x9; // IsShared
} else if (cur_line_iter->second.is_sd()) {
cur_resp.resp = 0x9; // IsShared
//cur_resp.resp = 0x5; // PassDirty // <---- THIS IS CHANGED
//cur_line_iter->second.state = cache_line::State::INV;
} else {
std::cout << "You should not reach this...\n";
NVHLS_ASSERT(0);
}
} else if ((rcv_snoop_req.snoop == enc_::ACSNOOP::RD_UNIQUE) ||
(rcv_snoop_req.snoop == enc_::ACSNOOP::CLEAN_INVALID) ||
(rcv_snoop_req.snoop == enc_::ACSNOOP::MAKE_INVALID) )
{
if (cur_line_iter->second.is_uc()) cur_resp.resp = 0x10; // WasUnique
else if (cur_line_iter->second.is_ud()) cur_resp.resp = 0x15; // WasUnique, PassDirty
else if (cur_line_iter->second.is_sc()) cur_resp.resp = 0x00; //
else if (cur_line_iter->second.is_sd()) cur_resp.resp = 0x05; // PassDirty
else {
std::cout << "You should not reach this...\n";
NVHLS_ASSERT(0);
}
cur_line_iter->second.state = cache_line::State::INV;
} else if (rcv_snoop_req.snoop == enc_::ACSNOOP::CLEAN_SHARED) {
if (cur_line_iter->second.is_uc()) {
cur_resp.resp = 0x19; // WasUnique, IsShared
cur_line_iter->second.state = cache_line::State::UC;
} else if (cur_line_iter->second.is_ud()) {
cur_resp.resp = 0x1D; // WasUnique, IsShared
cur_line_iter->second.state = cache_line::State::SC;
} else if (cur_line_iter->second.is_sc()) {
cur_resp.resp = 0x9; // IsShared
} else if (cur_line_iter->second.is_sd()) {
cur_resp.resp = 0xD; // IsShared
cur_line_iter->second.state = cache_line::State::SC;
} else {
std::cout << "You should not reach this...\n";
NVHLS_ASSERT(0);
}
}
}
has_data = cur_resp.resp & 1;
stored_cache_resp.push(cur_resp);
if(has_data) stored_cache_data.push(cur_data);
// PUSH TO SCOREBOARD / COHERENCY CHECKER
msg_tb_wrap<ace5_::AC> temp_snoop_req_tb;
msg_tb_wrap<ace5_::CR> temp_snoop_resp_tb;
msg_tb_wrap<ace5_::CD> temp_snoop_data_tb;
sc_time now_time = sc_time_stamp();
temp_snoop_req_tb.time_gen = now_time;
temp_snoop_req_tb.dut_msg = rcv_snoop_req;
temp_snoop_resp_tb.time_gen = now_time;
temp_snoop_resp_tb.dut_msg = cur_resp;
temp_snoop_data_tb.time_gen = now_time;
temp_snoop_data_tb.dut_msg = cur_data;
sb_lock->lock();
(*sb_snoop_req_q)[MASTER_ID-SLAVE_NUM].push_back(temp_snoop_req_tb);
if (has_data) (*sb_snoop_data_resp_q)[MASTER_ID-SLAVE_NUM].push_back(temp_snoop_data_tb);
(*sb_snoop_resp_q)[MASTER_ID-SLAVE_NUM].push_back(temp_snoop_resp_tb); // Keep pushing to resp last, just to be ULTRA sure for read access sequence. (coherency checker checks resp to read the req-resp-data set)
sb_lock->unlock();
}
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void ace_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::upd_cache_read(ace5_::AddrPayload req, ace5_::ReadPayload resp) {
cache_line & this_line = cache[req.addr];
unsigned shared_dirty = (resp.resp) >> 2;
// --- READ Operations --- //
if (req.snoop == enc_::ARSNOOP::RD_ONCE && req.domain.xor_reduce()) {
if (this_line.is_inv()) { // Expected start state
if ( (shared_dirty != 0) &&
(shared_dirty != 2) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::INV;
this_line.data = resp.data;
} else if (this_line.is_uc()) {
std::cout << "WARN : Cache in UC not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) &&
(shared_dirty != 2) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UC;
this_line.data = resp.data;
} else if (this_line.is_ud()) {
std::cout << "WARN : Cache in UD not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) &&
(shared_dirty != 2) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UD;
this_line.data = resp.data;
} else if (this_line.is_sc()) {
std::cout << "WARN : Cache in SC not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) &&
(shared_dirty != 2) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = (shared_dirty==0) ? cache_line::State::UC : cache_line::State::SC;
this_line.data = resp.data;
} else if (this_line.is_sd()) {
std::cout << "WARN : Cache in SD not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) &&
(shared_dirty != 2) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = (shared_dirty==0) ? cache_line::State::UD : cache_line::State::SD;
this_line.data = resp.data;
} else {
std::cout << "Impossible to reach this. \n";
NVHLS_ASSERT(0);
}
} else if (req.snoop == enc_::ARSNOOP::RD_CLEAN) {
if (this_line.is_inv()) { // Expected start state
if ( (shared_dirty != 0) &&
(shared_dirty != 2) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = (shared_dirty==0) ? cache_line::State::UC : cache_line::State::SC;
this_line.data = resp.data;
} else if (this_line.is_uc()) {
std::cout << "WARN : Cache in UC not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UC;
this_line.data = resp.data;
} else if (this_line.is_ud()) {
std::cout << "WARN : Cache in UD not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UD;
this_line.data = resp.data;
} else if (this_line.is_sc()) {
std::cout << "WARN : Cache in SC not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) &&
(shared_dirty != 2) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = (shared_dirty==0) ? cache_line::State::UC : cache_line::State::SC;
this_line.data = resp.data;
} else if (this_line.is_sd()) {
std::cout << "WARN : Cache in SD not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) &&
(shared_dirty != 2) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = (shared_dirty==0) ? cache_line::State::UD : cache_line::State::SD;
this_line.data = resp.data;
} else {
std::cout << "Impossible to reach this. \n";
NVHLS_ASSERT(0);
}
} else if (req.snoop == enc_::ARSNOOP::RD_NOT_SHARED_DIRTY) {
if (this_line.is_inv()) { // Expected start state
if ( (shared_dirty != 0) &&
(shared_dirty != 1) &&
(shared_dirty != 2) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = (shared_dirty==0) ? cache_line::State::UC :
(shared_dirty==1) ? cache_line::State::UD
: cache_line::State::SC;
this_line.data = resp.data;
} else if (this_line.is_uc()) {
std::cout << "WARN : Cache in UC not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UC;
this_line.data = resp.data;
} else if (this_line.is_ud()) {
std::cout << "WARN : Cache in UD not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UD;
this_line.data = resp.data;
} else if (this_line.is_sc()) {
std::cout << "WARN : Cache in SC not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) &&
(shared_dirty != 1) &&
(shared_dirty != 2) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = (shared_dirty==0) ? cache_line::State::UC :
(shared_dirty==1) ? cache_line::State::UD
: cache_line::State::SC;
this_line.data = resp.data;
} else if (this_line.is_sd()) {
std::cout << "WARN : Cache in SD not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) &&
(shared_dirty != 2) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = (shared_dirty==0) ? cache_line::State::UD : cache_line::State::SD;
this_line.data = resp.data;
} else {
std::cout << "Impossible to reach this. \n";
NVHLS_ASSERT(0);
}
} else if (req.snoop == enc_::ARSNOOP::RD_SHARED) {
if (this_line.is_inv()) { // Expected start state
if ( (shared_dirty != 0) &&
(shared_dirty != 1) &&
(shared_dirty != 2) &&
(shared_dirty != 3) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = (shared_dirty==0) ? cache_line::State::UC :
(shared_dirty==1) ? cache_line::State::UD :
(shared_dirty==2) ? cache_line::State::SC
: cache_line::State::SD;
this_line.data = resp.data;
} else if (this_line.is_uc()) {
std::cout << "WARN : Cache in UC not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UC;
this_line.data = resp.data;
} else if (this_line.is_ud()) {
std::cout << "WARN : Cache in UD not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UD;
this_line.data = resp.data;
} else if (this_line.is_sc()) {
std::cout << "WARN : Cache in SC not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) &&
(shared_dirty != 1) &&
(shared_dirty != 2) &&
(shared_dirty != 3) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = (shared_dirty==0) ? cache_line::State::UC :
(shared_dirty==1) ? cache_line::State::UD :
(shared_dirty==2) ? cache_line::State::SC
: cache_line::State::SD;
this_line.data = resp.data;
} else if (this_line.is_sd()) {
std::cout << "WARN : Cache in SD not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) &&
(shared_dirty != 2) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = (shared_dirty==0) ? cache_line::State::UD : cache_line::State::SD;
this_line.data = resp.data;
} else {
std::cout << "Impossible to reach this. \n";
NVHLS_ASSERT(0);
}
} else if (req.snoop == enc_::ARSNOOP::RD_UNIQUE) {
if (this_line.is_inv()) { // Expected start state
if ( (shared_dirty != 0) &&
(shared_dirty != 1) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = (shared_dirty==0) ? cache_line::State::UC : cache_line::State::UD;
this_line.data = resp.data;
} else if (this_line.is_uc()) {
std::cout << "WARN : Cache in UC not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UC;
this_line.data = resp.data;
} else if (this_line.is_ud()) {
std::cout << "WARN : Cache in UD not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UD;
this_line.data = resp.data;
} else if (this_line.is_sc()) {
std::cout << "WARN : Cache in SC not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) &&
(shared_dirty != 1) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = (shared_dirty==0) ? cache_line::State::UC : cache_line::State::UD;
this_line.data = resp.data;
} else if (this_line.is_sd()) {
std::cout << "WARN : Cache in SD not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UD;
this_line.data = resp.data;
} else {
std::cout << "Impossible to reach this. \n";
NVHLS_ASSERT(0);
}
// --- CLEAN Operations --- //
} else if (req.snoop == enc_::ARSNOOP::CLEAN_UNIQUE) {
if (this_line.is_inv()) {
std::cout << "WARN : Cache in INV not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::INV;
} else if (this_line.is_uc()) {
std::cout << "WARN : Cache in UC not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UC;
} else if (this_line.is_ud()) {
std::cout << "WARN : Cache in UD not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UD;
} else if (this_line.is_sc()) { // Expected start state
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UC;
} else if (this_line.is_sd()) { // Expected start state
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UD;
} else {
std::cout << "Impossible to reach this. \n";
NVHLS_ASSERT(0);
}
} else if (req.snoop == enc_::ARSNOOP::CLEAN_SHARED) {
if (this_line.is_inv()) { // Expected start state
if ( (shared_dirty != 0) &&
(shared_dirty != 2) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::INV;
} else if (this_line.is_uc()) { // Expected start state
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UC;
} else if (this_line.is_ud()) {
std::cout << "WARN : Cache is not in expected state for this Request.\n";
//NVHLS_ASSERT(0);
} else if (this_line.is_sc()) {
if ( (shared_dirty != 0) &&
(shared_dirty != 2) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = (shared_dirty==0) ? cache_line::State::UC : cache_line::State::SC;
} else if (this_line.is_sd()) {
std::cout << "CHACHE MODEL ERR : Cache is not in expected state for this Request.\n";
//NVHLS_ASSERT(0);
} else {
std::cout << "Impossible to reach this. \n";
NVHLS_ASSERT(0);
}
} else if (req.snoop == enc_::ARSNOOP::CLEAN_INVALID) {
// ToDo this is a phony state INVALIDATION. Because It's cache's responsibility to INV line before a CLEAN_INVALID.
// Workaround until a valid cache controller model is available
this_line.state = cache_line::State::INV;
if (this_line.is_inv()) { // Expected start state
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::INV;
} else if (this_line.is_uc()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_ud()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_sc()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_sd()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else {
std::cout << "Impossible to reach this. \n";
NVHLS_ASSERT(0);
}
} else if (req.snoop == enc_::ARSNOOP::MAKE_UNIQUE) {
if (this_line.is_inv()) { // Expected start state
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UD;
} else if (this_line.is_uc()) {
std::cout << "WARN : Cache in UC not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UD;
} else if (this_line.is_ud()) {
std::cout << "WARN : Cache in UD not expected (though permitted) for Req: " << req << "\n";
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UD;
} else if (this_line.is_sc()) { // Expected start state
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UD;
} else if (this_line.is_sd()) { // Expected start state
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::UD;
} else {
std::cout << "Impossible to reach this. \n";
NVHLS_ASSERT(0);
}
} else if (req.snoop == enc_::ARSNOOP::MAKE_INVALID) {
// ToDo this is a phony state INVALIDATION. Because It's cache's responsibility to INV line before a MAKE_INVALID.
// Workaround until a valid cache controller model is available
this_line.state = cache_line::State::INV;
if (this_line.is_inv()) { // Expected start state
if ( (shared_dirty != 0) )
{ NVHLS_ASSERT_MSG(0, "Invalid response.");}
this_line.state = cache_line::State::INV;
} else if (this_line.is_uc()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_ud()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_sc()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_sd()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else {
std::cout << "Impossible to reach this. \n";
NVHLS_ASSERT(0);
}
} else {
std::cout << " Req : " << req <<". \n";
std::cout << " Resp: " << resp <<". \n";
std::cout << " Did not handled correctly. \n";
NVHLS_ASSERT(0);
}
cache_outstanding[req.addr]--;
// when end-up in a Dirty state write The master ID
if (this_line.is_ud() || this_line.is_sd()) {
if (ace5_::C_CACHE_WIDTH<64) this_line.data = (((ace5_::Data) 0xFF) << (ace5_::C_CACHE_WIDTH - 8)) | ((ace5_::Data) 0x000000000000BEEF);
else this_line.data = (((ace5_::Data) 0xFF) << (ace5_::C_CACHE_WIDTH - 8)) | ((ace5_::Data) 0x0000BEEFDEADBEEF);
//this_line.data = ( ((ace5_::Data) MASTER_ID) << (ace5_::C_CACHE_WIDTH-8) ) | 0x0000BEEFDEADBEEF;
//this_line.data = (this_line.data & ( (~((ace5_::Data) 0xFF) << (ace5_::C_CACHE_WIDTH-8)) ) ) | ( ((ace5_::Data) MASTER_ID) << (ace5_::C_CACHE_WIDTH-8) );
}
}
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void ace_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::upd_cache_write(ace5_::AddrPayload req, ace5_::WRespPayload resp) {
cache_line &this_line = cache[req.addr];
if ((req.snoop == enc_::AWSNOOP::WR_UNIQUE && req.domain.xor_reduce()) ||
req.snoop == enc_::AWSNOOP::WR_LINE_UNIQUE)
{
if (this_line.is_inv()) {
this_line.state = cache_line::State::INV;
} else if (this_line.is_uc()) {
this_line.state = cache_line::State::SC;
if (ace5_::C_CACHE_WIDTH<64) this_line.data = (((ace5_::Data) 0xFF) << (ace5_::C_CACHE_WIDTH - 8)) | ((ace5_::Data) 0x000000000000BEEF);
else this_line.data = (((ace5_::Data) 0xFF) << (ace5_::C_CACHE_WIDTH - 8)) | ((ace5_::Data) 0x0000BEEFDEADBEEF);
} else if (this_line.is_ud()) {
std::cout << "WARN : Cache is not in expected state for this Request.\n";
//NVHLS_ASSERT(0);
} else if (this_line.is_sc()) {
this_line.state = cache_line::State::SC;
if (ace5_::C_CACHE_WIDTH<64) this_line.data = (((ace5_::Data) 0xFF) << (ace5_::C_CACHE_WIDTH - 8)) | ((ace5_::Data) 0x000000000000BEEF);
else this_line.data = (((ace5_::Data) 0xFF) << (ace5_::C_CACHE_WIDTH - 8)) | ((ace5_::Data) 0x0000BEEFDEADBEEF);
} else if (this_line.is_sd()) {
std::cout << "WARN : Cache is not in expected state for this Request.\n";
//NVHLS_ASSERT(0);
} else {
std::cout << "Impossible to reach this. \n";
NVHLS_ASSERT(0);
}
} else if (req.snoop == enc_::AWSNOOP::WR_BACK) {
if (this_line.is_inv()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_uc()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_ud()) {
this_line.state = cache_line::State::INV;
} else if (this_line.is_sc()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_sd()) {
this_line.state = cache_line::State::INV;
} else {
std::cout << "Impossible to reach this. \n";
NVHLS_ASSERT(0);
}
} else if (req.snoop == enc_::AWSNOOP::WR_CLEAN) {
if (this_line.is_inv()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_uc()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_ud()) {
this_line.state = cache_line::State::INV;
} else if (this_line.is_sc()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_sd()) {
this_line.state = cache_line::State::INV;
} else {
std::cout << "Impossible to reach this. \n";
NVHLS_ASSERT(0);
}
} else if (req.snoop == enc_::AWSNOOP::WR_CLEAN) {
if (this_line.is_inv()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_uc()) {
this_line.state = cache_line::State::INV;
} else if (this_line.is_ud()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_sc()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_sd()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else {
std::cout << "Impossible to reach this. \n";
NVHLS_ASSERT(0);
}
} else if (req.snoop == enc_::AWSNOOP::WR_EVICT) {
if (this_line.is_inv()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_uc()) {
this_line.state = cache_line::State::INV;
} else if (this_line.is_ud()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_sc()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_sd()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else {
std::cout << "Impossible to reach this. \n";
NVHLS_ASSERT(0);
}
} else if (req.snoop == enc_::AWSNOOP::EVICT) {
if (this_line.is_inv()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_uc()) {
this_line.state = cache_line::State::INV;
} else if (this_line.is_ud()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else if (this_line.is_sc()) {
this_line.state = cache_line::State::INV;
} else if (this_line.is_sd()) {
std::cout << "ERR : Cache is not in expected state for this Request.\n";
NVHLS_ASSERT(0);
} else {
std::cout << "Impossible to reach this. \n";
NVHLS_ASSERT(0);
}
} else {
std::cout << " Req : " << req <<". \n";
std::cout << " Resp: " << resp <<". \n";
std::cout << " Did not handled correctly. \n";
NVHLS_ASSERT(0);
}
cache_outstanding_writes[req.addr]--;
cache_outstanding[req.addr]--;
}
// ------------------------ //
// --- VERIFY Functions --- //
// ------------------------ //
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void ace_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::verify_rd_resp(ace5_::ReadPayload &rcv_rd_resp){
// Verify Response
sb_lock->lock();
bool is_coherent = false;
// --- Reorder Check --- //
unsigned reorder=2; // 2 : Req not found, 1 : Request reordered, 0 : everything is fine
unsigned j=0;
ace5_::AddrPayload sb_ord_req;
while (j<sb_rd_order_q.size()){
sb_ord_req = sb_rd_order_q[j];
if(sb_ord_req.id == rcv_rd_resp.id) {
// Slave must sneak its ID to the resp field.
unsigned dst = mem_map_resolve(sb_ord_req.addr);
is_coherent = (sb_ord_req.snoop || sb_ord_req.domain.xor_reduce());
reorder = (dst == rcv_rd_resp.resp) || is_coherent ? 0 : 1;
if(rcv_rd_resp.last) sb_rd_order_q.erase(sb_rd_order_q.begin()+j);
break;
}
j++;
}
// --------------------- //
bool found = false;
j=0;
//if (sb_ord_req.snoop == 0) {
while (j<(*sb_rd_resp_q)[MASTER_ID-SLAVE_NUM].size()){
msg_tb_wrap< ace5_::ReadPayload > sb_resp = (*sb_rd_resp_q)[MASTER_ID-SLAVE_NUM][j];
if (eq_rd_data(rcv_rd_resp, sb_resp.dut_msg)){
if (sb_resp.dut_msg.last) {
rd_resp_delay += ((sc_time_stamp() - sb_resp.time_gen) / clk_period) - 1;
rd_resp_count++;
}
rd_resp_data_count++;
last_rd_sinked_cycle = (sc_time_stamp() / clk_period);
(*sb_rd_resp_q)[MASTER_ID-SLAVE_NUM].erase((*sb_rd_resp_q)[MASTER_ID-SLAVE_NUM].begin()+j);
found = true;
break;
}
j++;
}
//} else {
// found = true; // Ignore the check if it's a Snoop access
//}
if (rcv_rd_resp.last) {
ace5_::RACK tmp_rack;
tmp_rack.rack = 1;
stored_rd_ack.push(tmp_rack);
}
// --- DEPRECATED --- This checks absolute order among all TIDs
//AXI4_R sb_val = (*sb_rd_resp_q)[MASTER_ID].front();
//bool found = (rcv_rd_resp == sb_val);
//if(found) (*sb_rd_resp_q)[MASTER_ID].erase((*sb_rd_resp_q)[MASTER_ID].begin());
if(!found){
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "RD-Resp : "<< rcv_rd_resp << " . NOT FOUND! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "-SB_front - "<< (*sb_rd_resp_q)[MASTER_ID-SLAVE_NUM].front() << "\n";
error_sb_rd_resp_not_found++;
sc_assert(0);
// sc_stop();
}else if(reorder==2) {
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "RD-Resp : "<< rcv_rd_resp << " . Respective Request wasn't found!!! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "-REQ_front - "<< sb_rd_order_q.front() << "\n";
sc_assert(0);
}else if(reorder==1) {
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "RD-Resp : "<< rcv_rd_resp << " . Got Reordered !!! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "REQ-Ordered - "<< sb_ord_req << "\n";
sc_assert(0);
}else{
std::cout<< "[Master " << MASTER_ID <<"] " << "RD-Resp OK : << " << rcv_rd_resp << " @" << sc_time_stamp() << "\n";
if (is_coherent){
upd_cache_read(sb_ord_req, rcv_rd_resp);
} else {
unsigned dbg_non_coh = 0;
}
rd_resp_ej++;
}
std::cout.flush();
sb_lock->unlock();
}; // End of READ Response Verify
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void ace_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::verify_wr_resp(ace5_::WRespPayload &rcv_wr_resp){
sb_lock->lock();
bool is_coherent = false;
// --- Reorder Check --- //
int reorder = 2; // 2 : Req not found, 1 : Request reordered, 0 : everything is fine
unsigned int j = 0;
ace5_::AddrPayload sb_ord_req;
while (j<sb_wr_order_q.size()) {
sb_ord_req = sb_wr_order_q[j];
if(sb_ord_req.id == rcv_wr_resp.id) {
// Slave must sneak its ID into the first data byte of every beat (aka data[0]).
unsigned dst = mem_map_resolve(sb_ord_req.addr);
is_coherent = (sb_ord_req.snoop || sb_ord_req.domain.xor_reduce());
reorder = (dst == rcv_wr_resp.resp) ? 0 : 1;
sb_wr_order_q.erase(sb_wr_order_q.begin()+j);
break;
}
j++;
}
// --------------------- //
// Verify Responce
bool found = false;
j = 0;
while (j<(*sb_wr_resp_q)[MASTER_ID-SLAVE_NUM].size()){
msg_tb_wrap< ace5_::WRespPayload > sb_resp = (*sb_wr_resp_q)[MASTER_ID-SLAVE_NUM][j];
if (eq_wr_resp(sb_resp.dut_msg, rcv_wr_resp)){
wr_resp_delay += ((sc_time_stamp() - sb_resp.time_gen) / clk_period) - 1;
wr_resp_count++;
(*sb_wr_resp_q)[MASTER_ID-SLAVE_NUM].erase((*sb_wr_resp_q)[MASTER_ID-SLAVE_NUM].begin()+j);
found = true;
break;
}
j++;
}
//if (rcv_wr_resp.last) {
ace5_::WACK tmp_wack;
tmp_wack.wack = 1;
stored_wr_ack.push(tmp_wack);
//}
if(!found){
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "WR-Resp : "<< rcv_wr_resp << " . NOT FOUND! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "-SB_front - "<< (*sb_wr_resp_q)[MASTER_ID-SLAVE_NUM].front() << "\n";
error_sb_wr_resp_not_found++;
sc_assert(0);
// sc_stop();
}else if(reorder==2) {
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "WR-Resp : "<< rcv_wr_resp << " . Respective Request wasn't found!!! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "-REQ_front - "<< sb_wr_order_q.front() << "\n";
sc_assert(0);
}else if(reorder==1) {
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "WR-Resp : "<< rcv_wr_resp << " . Got Reordered !!! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "REQ-Ordered - "<< sb_ord_req << "\n";
sc_assert(0);
}else{
std::cout<< "[Master " << MASTER_ID <<"] " << "WR-Resp OK : << " << rcv_wr_resp << "\n";
if (is_coherent) {
upd_cache_write(sb_ord_req, rcv_wr_resp);
} else {
unsigned dbg_non_coh = 0;
}
wr_resp_ej++;
}
std::cout.flush();
sb_lock->unlock();
}; // End of WRITE Response Verify
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
bool ace_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::eq_rd_data (ace5_::ReadPayload &rcv_rd_data, ace5_::ReadPayload &sb_rd_data) {
unsigned tid_mask = (1<<dnp::ace::ID_W)-1;
return ((rcv_rd_data.id & tid_mask) == (sb_rd_data.id & tid_mask) &&
rcv_rd_data.data == sb_rd_data.data &&
rcv_rd_data.resp == sb_rd_data.resp &&
rcv_rd_data.last == sb_rd_data.last //&&
//rcv_rd_data.ruser == sb_rd_data.ruser
);
};
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
bool ace_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::eq_wr_resp (ace5_::WRespPayload &rcv_wr_resp, ace5_::WRespPayload &sb_wr_resp) {
unsigned tid_mask = (1<<dnp::ace::ID_W)-1;
return ((rcv_wr_resp.id & tid_mask) == (sb_wr_resp.id & tid_mask) &&
rcv_wr_resp.resp == sb_wr_resp.resp //&&
//rcv_wr_resp.buser == sb_wr_resp.buser
);
};
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
unsigned ace_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::mem_map_resolve(ace5_::Addr &addr){
for (int i=0; i<SLAVE_NUM; ++i) {
if (addr>=addr_map[i][0].read() && addr <= addr_map[i][1].read()) return i;
}
NVHLS_ASSERT_MSG(0, "Target Addr not found!");
return 0; // Or send 404
}
#endif // _ACE_MASTER_H_
|
ic-lab-duth/NoCpad
|
tb/tb_ace/ace_slave.h
|
<gh_stars>1-10
#ifndef AXI_SLAVE_H
#define AXI_SLAVE_H
#include "systemc.h"
#include "../../src/include/dnp_ace_v0.h"
#include <deque>
#include <queue>
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
#define AXI_TID_NUM 5
#define AXI_ADDR_MAX 0xffffffff
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
SC_MODULE(ace_slave) {
typedef typename ace::ace5<axi::cfg::ace> ace5_;
typedef typename axi::AXI4_Encoding enc_;
sc_in_clk clk;
sc_in <bool> rst_n;
sc_in<bool> stop_gen; // Not Used
sc_in< sc_uint<32> > addr_map[SLAVE_NUM][2];
Connections::In<ace5_::AddrPayload> ar_in;
Connections::Out<ace5_::ReadPayload> r_out;
Connections::In<ace5_::AddrPayload> aw_in;
Connections::In<ace5_::WritePayload> w_in;
Connections::Out<ace5_::WRespPayload> b_out;
// Scoreboard
sc_mutex *sb_lock;
std::vector< std::deque< msg_tb_wrap<ace5_::AddrPayload> > > *sb_rd_req_q;
std::vector< std::deque< msg_tb_wrap<ace5_::ReadPayload> > > *sb_rd_resp_q;
std::vector< std::deque< msg_tb_wrap<ace5_::AddrPayload> > > *sb_wr_req_q;
std::vector< std::deque< msg_tb_wrap<ace5_::WritePayload> > > *sb_wr_data_q;
std::vector< std::deque< msg_tb_wrap<ace5_::WRespPayload> > > *sb_wr_resp_q;
int SLAVE_ID = -1;
unsigned int AXI_STALL_RATE_RD;
unsigned int AXI_STALL_RATE_WR;
// int FLOW_CTRL; // 0: READY-VALID
// // 1: CREDITS
// // 2: FIFO
// // 3: Credits fifo based
unsigned int total_cycles;
sc_time clk_period;
std::queue<ace5_::ReadPayload > stored_rd_resp;
std::queue<ace5_::WRespPayload> stored_wr_resp;
std::deque<ace5_::AddrPayload> wr_to_get_resp;
// Read Response Generator
int rd_resp_val;
int rd_resp_generated;
int rd_resp_inj;
int wr_resp_generated;
int wr_resp_inj;
// Read Req Sink
int rd_req_ej;
int wr_req_ej;
int wr_data_ej;
// Error
int error_sb_rd_req_not_found;
int error_sb_wr_req_not_found;
int error_sb_wr_data_not_found;
// Functions
void do_cycle();
void gen_rd_resp(ace5_::AddrPayload &rcv_rd_req );
void gen_wr_resp(unsigned wr_initiator);
bool verify_rd_req(ace5_::AddrPayload &rcv_rd_req);
bool verify_wr_req(ace5_::AddrPayload &rcv_wr_req);
bool verify_wr_data(ace5_::WritePayload &rcv_wr_data, unsigned &wr_initiator);
bool eq_rd_req (ace5_::AddrPayload &rcv_rd_req , ace5_::AddrPayload &sb_rd_req);
bool eq_wr_req (ace5_::AddrPayload &rcv_wr_req , ace5_::AddrPayload &sb_wr_req);
bool eq_wr_data(ace5_::WritePayload &rcv_wr_data, ace5_::WritePayload &sb_wr_data);
// Constructor
SC_HAS_PROCESS(ace_slave);
ace_slave(sc_module_name name_) : sc_module(name_)
{
SLAVE_ID = -1;
AXI_STALL_RATE_RD = 0;
AXI_STALL_RATE_WR = 0;
SC_THREAD(do_cycle);
sensitive << clk.pos();
reset_signal_is(rst_n, false);
}
};
// --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- //
// --- --- --- --- --- --- IMPLEMENTATION --- --- --- --- --- --- --- --- //
// --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- //
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void ace_slave<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::do_cycle () {
total_cycles = 0;
rd_resp_generated = 0;
rd_resp_inj = 0;
rd_req_ej = 0;
wr_resp_generated = 0;
wr_resp_inj = 0;
wr_req_ej = 0;
wr_data_ej = 0;
rd_resp_val = 1;
clk_period = (dynamic_cast<sc_clock *>(clk.get_interface()))->period();
// Error
error_sb_rd_req_not_found = 0;
error_sb_wr_req_not_found = 0;
error_sb_wr_data_not_found = 0;
ar_in.Reset();
r_out.Reset();
aw_in.Reset();
w_in.Reset();
b_out.Reset();
while(1) {
wait();
// READ REQUESTS
// Sink/Verify Read Request + Create the appropriate response
unsigned int rnd_val_sink = rand()%100;
if (rnd_val_sink >= AXI_STALL_RATE_RD) {
ace5_::AddrPayload rcv_rd_req;
if (ar_in.PopNB(rcv_rd_req)) {
sc_time this_gen_time;
if (rcv_rd_req.snoop==0) verify_rd_req(rcv_rd_req);
rd_req_ej++;
gen_rd_resp(rcv_rd_req);
}
}
// WRITE REQUESTS
rnd_val_sink = rand()%100;
if (rnd_val_sink >= AXI_STALL_RATE_WR) {
ace5_::AddrPayload rcv_wr_req;
if (aw_in.PopNB(rcv_wr_req)) {
verify_wr_req(rcv_wr_req);
wr_to_get_resp.push_back(rcv_wr_req);
wr_req_ej++;
}
}
if (rnd_val_sink >= AXI_STALL_RATE_WR) {
ace5_::WritePayload rcv_wr_data;
if (w_in.PopNB(rcv_wr_data)) {
unsigned wr_initiator = -1;
verify_wr_data(rcv_wr_data, wr_initiator);
wr_data_ej++;
if (rcv_wr_data.last) gen_wr_resp(wr_initiator);
}
}
// RESPONSES
// Inject READ Responses
unsigned int rnd_val_inj = rand()%100;
if (!stored_rd_resp.empty() && (rnd_val_inj>=AXI_STALL_RATE_RD)) {
ace5_::ReadPayload temp_resp = stored_rd_resp.front();
if (r_out.PushNB(temp_resp)) {
stored_rd_resp.pop();
std::cout << "[Slave " << SLAVE_ID << "] : PUSHED RD-Resp " << temp_resp << " @" << sc_time_stamp()
<< std::endl;
rd_resp_inj++;
}
}
// Inject WRITE Responses
rnd_val_inj = rand()%100;
if (!stored_wr_resp.empty() && (rnd_val_inj>=AXI_STALL_RATE_WR)) {
ace5_::WRespPayload temp_resp = stored_wr_resp.front();
if (b_out.PushNB(temp_resp)) {
stored_wr_resp.pop();
std::cout << "[Slave " << SLAVE_ID << "] : PUSHED WR-Resp " << temp_resp << " @" << sc_time_stamp()
<< std::endl;
wr_resp_inj++;
}
}
} // End of while(1)
}; // End of do_cycle
// --------------------------- //
// --- GENERATOR Functions --- //
// --------------------------- //
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void ace_slave<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::gen_rd_resp(ace5_::AddrPayload &rcv_rd_req) {
sb_lock->lock();
ace5_::ReadPayload cur_beat;
ace5_::ReadPayload beat_at_master;
cur_beat.id = rcv_rd_req.id;
beat_at_master.id = rcv_rd_req.id;
// Create Response
unsigned long int bytes_total = ((rcv_rd_req.len+1)<<rcv_rd_req.size);
unsigned long int byte_count = 0;
unsigned char s_init_ptr = rcv_rd_req.addr % RD_S_LANES;
unsigned char s_ptr = s_init_ptr;
unsigned char s_size = rcv_rd_req.size;
cur_beat.data = 0;
beat_at_master.data = 0;
while(byte_count<bytes_total) {
cur_beat.data |= ( ((ace5_::Data)(byte_count & 0xFF)) << ((ace5_::Data)(s_ptr*8)));
byte_count++;
s_ptr = (rcv_rd_req.burst==FIXED) ? ((s_ptr+1)%(1<<s_size)) + s_init_ptr
: (s_ptr+1)%RD_S_LANES ;
if(((s_ptr%(1<<s_size))==0) || (byte_count == bytes_total)) {
cur_beat.resp = SLAVE_ID;
cur_beat.last = (byte_count == bytes_total);
stored_rd_resp.push(cur_beat);
cur_beat.data = 0;
}
}
sb_lock->unlock();
}; // End of Read generator
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void ace_slave<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::gen_wr_resp(unsigned wr_initiator) {
sb_lock->lock();
// Create Response
ace5_::AddrPayload rcv_wr_req = wr_to_get_resp.front();
wr_to_get_resp.pop_front();
ace5_::WRespPayload temp_wr_resp;
temp_wr_resp.id = rcv_wr_req.id;
temp_wr_resp.resp = SLAVE_ID;
stored_wr_resp.push(temp_wr_resp); // Send WR-Resp to DUT
// If the WR request comes from HOME node, the response will be consumed internally
if (wr_initiator < MASTER_NUM+SLAVE_NUM) {
msg_tb_wrap<ace5_::WRespPayload> temp_wr_resp_tb;
temp_wr_resp_tb.dut_msg = temp_wr_resp;
temp_wr_resp_tb.time_gen = sc_time_stamp();
(*sb_wr_resp_q)[wr_initiator-SLAVE_NUM].push_back(temp_wr_resp_tb); // Send Beat to ScoreBoard.
wr_resp_generated++;
}
sb_lock->unlock();
}; // End of Read generator
// ------------------------ //
// --- VERIFY Functions --- //
// ------------------------ //
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
bool ace_slave<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::verify_rd_req (ace5_::AddrPayload &rcv_rd_req) {
bool verified = true;
sb_lock->lock();
bool found=false;
unsigned int j=0;
while (j<(*sb_rd_req_q)[SLAVE_ID].size()){
msg_tb_wrap< ace5_::AddrPayload > sb_req = (*sb_rd_req_q)[SLAVE_ID][j];
if (eq_rd_req(rcv_rd_req, sb_req.dut_msg)){
(*sb_rd_req_q)[SLAVE_ID].erase((*sb_rd_req_q)[SLAVE_ID].begin()+j);
found = true;
break;
}
j++;
}
if(!found){
std::cout << "ERR : [Slave " << SLAVE_ID <<"] " << "RD Request : "<< rcv_rd_req << " . NOT FOUND! @" << sc_time_stamp() << "\n";
std::cout << "ERR : [Slave " << SLAVE_ID <<"] " << "-SB_front - "<< (*sb_rd_req_q)[SLAVE_ID].front() << "\n";
error_sb_rd_req_not_found++;
sc_assert(0);
// sc_stop();
verified = false;
} else {
std::cout<< "[Slave " << SLAVE_ID <<"] " << "RD Req OK : << " << rcv_rd_req << " @" << sc_time_stamp() << "\n";
}
std::cout.flush();
sb_lock->unlock();
return verified;
}; // End of READ Req Verify
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
bool ace_slave<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::verify_wr_req (ace5_::AddrPayload &rcv_wr_req) {
bool verified = true;
sb_lock->lock();
bool found=false;
unsigned int j=0;
while (j<(*sb_wr_req_q)[SLAVE_ID].size()){
ace5_::AddrPayload sb_value = ((*sb_wr_req_q)[SLAVE_ID][j]).dut_msg;
if (eq_wr_req(rcv_wr_req, sb_value)){
(*sb_wr_req_q)[SLAVE_ID].erase((*sb_wr_req_q)[SLAVE_ID].begin()+j);
found = true;
break;
}
j++;
}
if(!found){
std::cout << "\n";
std::cout << "ERR : [Slave " << SLAVE_ID <<"] " << "WR Request : "<< rcv_wr_req << " . NOT FOUND! @" << sc_time_stamp() << "\n";
std::cout << "ERR : [Slave " << SLAVE_ID <<"] " << "-SB_front - "<< (*sb_wr_req_q)[SLAVE_ID].front() << "\n";
error_sb_wr_req_not_found++;
sc_assert(0);
// sc_stop();
verified = false;
} else {
std::cout<< "[Slave " << SLAVE_ID <<"] " << "WR Req OK : << " << rcv_wr_req << "\n";
}
std::cout.flush();
sb_lock->unlock();
return verified;
}; // End of WRITE Req Verify
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
bool ace_slave<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::verify_wr_data (ace5_::WritePayload &rcv_wr_data, unsigned &wr_initiator) {
bool verified = true;
sb_lock->lock();
bool found=false;
unsigned int j=0;
while (j<(*sb_wr_data_q)[SLAVE_ID].size()){
ace5_::WritePayload sb_value = ((*sb_wr_data_q)[SLAVE_ID][j]).dut_msg;
if (eq_wr_data(rcv_wr_data, sb_value)){
// The last byte of the last beat signals the initiator
if (rcv_wr_data.last.to_uint()) {
for (int i=WR_S_LANES-1; i>=0;--i) {
if ( (rcv_wr_data.wstrb.to_uint() >> i) & 1 ) {
wr_initiator = ((rcv_wr_data.data >> (i*8)) & 0xFF).to_uint();
break;
}
}
}
(*sb_wr_data_q)[SLAVE_ID].erase((*sb_wr_data_q)[SLAVE_ID].begin()+j);
found = true;
break;
}
j++;
}
if(!found){
std::cout << "ERR : [Slave " << SLAVE_ID <<"] " << "WR Data : "<< rcv_wr_data << " . NOT FOUND! @" << sc_time_stamp() << "\n";
std::cout << "ERR : [Slave " << SLAVE_ID <<"] " << "-SB_front - "<< (*sb_wr_data_q)[SLAVE_ID].front() << "\n";
error_sb_wr_data_not_found++;
sc_assert(0);
// sc_stop();
verified = false;
} else {
std::cout<< "[Slave " << SLAVE_ID <<"] " << "WR Data OK : << " << rcv_wr_data << "\n";
}
std::cout.flush();
sb_lock->unlock();
return verified;
}; // End of WRITE Data Verify
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
bool ace_slave<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::eq_rd_req (ace5_::AddrPayload &rcv_rd_req, ace5_::AddrPayload &sb_rd_req) {
bool equal = true;
unsigned tid_mask = (1<<dnp::ace::ID_W)-1;
equal = equal && ((rcv_rd_req.id & tid_mask) == (sb_rd_req.id & tid_mask));
equal = equal && (rcv_rd_req.addr == (sb_rd_req.addr - addr_map[SLAVE_ID][0].read()));
equal = equal && (rcv_rd_req.burst == sb_rd_req.burst);
equal = equal && (rcv_rd_req.len == sb_rd_req.len);
equal = equal && (rcv_rd_req.size == sb_rd_req.size);
//equal = equal && (rcv_rd_req.cache == sb_rd_req.cache);
//equal = equal && (rcv_rd_req.auser == sb_rd_req.auser);
equal = equal && (rcv_rd_req.snoop == sb_rd_req.snoop);
equal = equal && (rcv_rd_req.domain == sb_rd_req.domain);
equal = equal && (rcv_rd_req.barrier == sb_rd_req.barrier);
return equal;
};
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
bool ace_slave<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::eq_wr_req (ace5_::AddrPayload &rcv_wr_req, ace5_::AddrPayload &sb_wr_req) {
bool equal = true;
unsigned tid_mask = (1<<dnp::ace::ID_W)-1;
equal = equal && ((rcv_wr_req.id & tid_mask) == (sb_wr_req.id & tid_mask));
equal = equal && (rcv_wr_req.addr == (sb_wr_req.addr - addr_map[SLAVE_ID][0].read()));
equal = equal && (rcv_wr_req.burst == sb_wr_req.burst);
equal = equal && (rcv_wr_req.len == sb_wr_req.len);
equal = equal && (rcv_wr_req.size == sb_wr_req.size);
//equal = equal && (rcv_wr_req.cache == sb_wr_req.cache);
//equal = equal && (rcv_wr_req.auser == sb_wr_req.auser);
equal = equal && (rcv_wr_req.snoop == sb_wr_req.snoop);
equal = equal && (rcv_wr_req.domain == sb_wr_req.domain);
equal = equal && (rcv_wr_req.barrier == sb_wr_req.barrier);
return equal;
};
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
bool ace_slave<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::eq_wr_data (ace5_::WritePayload &rcv_wr_data, ace5_::WritePayload &sb_wr_data) {
bool equal = true;
unsigned tid_mask = (1<<dnp::ace::ID_W)-1;
for(int i=0; i<WR_S_LANES; ++i) {
equal = equal && (((rcv_wr_data.wstrb >> i) & 1) == ((sb_wr_data.wstrb >> i) & 1));
if ((rcv_wr_data.wstrb >> i) & 1) {
//if(rcv_wr_data.last && i==0) {
// The first byte of the last beat is the wr initiator. Thus don't check for equality.
//} else {
equal = equal && (((rcv_wr_data.data >> (8 * i)) & 0xFF) == ((sb_wr_data.data >> (8 * i)) & 0xFF));
//}
}
}
return (equal &&
rcv_wr_data.last == sb_wr_data.last //&&
//rcv_wr_data.wuser == sb_wr_data.wuser
);
};
#endif // AXI_SLAVE_H
|
ic-lab-duth/NoCpad
|
src/axi_master_if_reord.h
|
// --------------------------------------------------------- //
// MASTER-IF Is where the MASTER CONNECTS!!!!! //
// //
// Aka. Master <-> Master-IF <-> NoC <-> Slave-IF <-> Slave //
// --------------------------------------------------------- //
#ifndef AXI4_MASTER_IF_CON_H
#define AXI4_MASTER_IF_CON_H
#include "systemc.h"
#include "nvhls_connections.h"
#include "./include/flit_axi.h"
#include <axi/axi4.h>
#include "./include/axi4_configs_extra.h"
#include "./include/duth_fun.h"
#define LOG_MAX_OUTS 8
// --- Helping Data structures --- //
struct outs_table_entry {
sc_uint<dnp::D_W> dst_last;
sc_uint<LOG_MAX_OUTS> sent;
bool reorder;
};
// Info passed between packetizer and depacketizer to inform about new and finished transactions.
struct order_info {
sc_uint<dnp::ID_W> tid;
sc_uint<dnp::D_W> dst;
bool reord;
unsigned char ticket;
inline friend std::ostream& operator << ( std::ostream& os, const order_info& info ) {
os <<"TID: "<< info.tid <<", Dst: "<< info.dst <<", Ticket: "<< info.ticket;
#ifdef SYSTEMC_INCLUDED
os << std::dec << " @" << sc_time_stamp();
#else
os << std::dec << " @" << "no-timed";
#endif
return os;
}
#ifdef SYSTEMC_INCLUDED
// Only for SystemC
inline friend void sc_trace(sc_trace_file* tf, const order_info& info, const std::string& name) {
sc_trace(tf, info.tid, name + ".tid");
sc_trace(tf, info.dst, name + ".dst");
sc_trace(tf, info.ticket, name + ".ticket");
}
#endif
};
// Entries of data storage placed to form a linked list and keep order among TIDs.
template <class FLIT_T>
struct reorder_buff_entry {
FLIT_T flit;
unsigned char nxt_flit;
bool valid;
};
// Each TID has a head/tail pointer at the storage buffer, to service each TID independently.
struct reorder_book_entry {
unsigned char head_flit;
unsigned char tail_flit;
unsigned char hol_expect;
};
#define WR_REORD_SLOTS 3
#define RD_REORD_SLOTS 3
// --- Master IF --- //
// AXI Master connects the independent AXI RD and WR cahnnels to the interface
// The interface gets the Requests and independently packetize and send them into the network
// The Responses are getting depacketized into a seperate thread and are fed back to the MASTER
// Thus Master interface comprises of 4 distinct/parallel blocks WR/RD pack and WR/RD depack
template <typename cfg>
SC_MODULE(axi_master_if) {
typedef typename axi::axi4<axi::cfg::standard_duth> axi4_;
typedef typename axi::AXI4_Encoding enc_;
typedef flit_dnp<cfg::RREQ_PHITS> rreq_flit_t;
typedef flit_dnp<cfg::RRESP_PHITS> rresp_flit_t;
typedef flit_dnp<cfg::WREQ_PHITS> wreq_flit_t;
typedef flit_dnp<cfg::WRESP_PHITS> wresp_flit_t;
typedef sc_uint< nvhls::log2_ceil<cfg::RRESP_PHITS>::val > cnt_phit_rresp_t;
typedef sc_uint< nvhls::log2_ceil<cfg::WREQ_PHITS>::val > cnt_phit_wreq_t;
const unsigned char LOG_RD_M_LANES = nvhls::log2_ceil<cfg::RD_LANES>::val;
const unsigned char LOG_WR_M_LANES = nvhls::log2_ceil<cfg::WR_LANES>::val;
sc_in_clk clk;
sc_in <bool> rst_n;
sc_in < sc_uint<(dnp::AH_W+dnp::AL_W)> > addr_map[cfg::SLAVE_NUM][2];
sc_in< sc_uint<dnp::S_W> > THIS_ID;
// AXI MASTER Side Channels
// --- READ --- //
Connections::In<axi4_::AddrPayload> ar_in{"ar_in"};
Connections::Out<axi4_::ReadPayload> r_out{"r_out"};
// --- WRITE --- //
Connections::In<axi4_::AddrPayload> aw_in{"aw_in"};
Connections::In<axi4_::WritePayload> w_in{"w_in"};
Connections::Out<axi4_::WRespPayload> b_out{"b_out"};
// NoC Side Channels
Connections::Out<rreq_flit_t> rd_flit_out{"rd_flit_out"};
Connections::In<rresp_flit_t> rd_flit_in{"rd_flit_in"};
Connections::Out<wreq_flit_t> wr_flit_out{"wr_flit_out"};
Connections::In<wresp_flit_t> wr_flit_in{"wr_flit_in"};
// --- READ Internals --- //
// FIFOs that pass initiation and finish transactions between Pack-Depack
sc_fifo<order_info> rd_trans_init{"rd_trans_init"};
sc_fifo<order_info> rd_trans_fin{"rd_trans_fin"};
outs_table_entry rd_out_table[(1<<dnp::ID_W)];
bool rd_reord_avail[RD_REORD_SLOTS]; // Available slots of Reorder Buffer
reorder_buff_entry<rresp_flit_t> rd_reord_buff[RD_REORD_SLOTS]; // The Reorder buffer storage, plus link list metadata
reorder_book_entry rd_reord_book[(1<<dnp::ID_W)]; // Bookeeping information of the linked list
// --- WRITE Reordering --- //
sc_fifo<order_info> wr_trans_init{"wr_trans_init"};
sc_fifo<order_info> wr_trans_fin{"wr_trans_fin"};
outs_table_entry wr_out_table[(1<<dnp::ID_W)]; // Holds the OutStanding Transactions | Hint : TID_W=4 => 16 slots x 16bits (could be less)
bool wr_reord_avail[WR_REORD_SLOTS]; // Available slots of Reorder Buffer
reorder_buff_entry<wresp_flit_t> wr_reord_buff[WR_REORD_SLOTS]; // The Reorder buffer storage, plus link list metadata
reorder_book_entry wr_reord_book[(1<<dnp::ID_W)]; // Bookeeping information of the linked list
// Constructor
SC_HAS_PROCESS(axi_master_if);
axi_master_if(sc_module_name name_="axi_master_if")
:
sc_module (name_),
rd_trans_init (3),
rd_trans_fin (3),
wr_trans_init (3),
wr_trans_fin (3)
{
SC_THREAD(rd_req_pack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
SC_THREAD(rd_resp_depack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
SC_THREAD(wr_req_pack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
SC_THREAD(wr_resp_depack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
}
//-------------------------------//
//--- READ REQuest Packetizer ---//
//-------------------------------//
void rd_req_pack_job () {
//-- Start of Reset ---//
for (int i=0; i<(1<<dnp::ID_W); ++i) {
rd_out_table[i].dst_last = 0;
rd_out_table[i].sent = 0;
rd_out_table[i].reorder = false;
}
for (int i=0; i<RD_REORD_SLOTS; ++i) rd_reord_avail[i] = true;
unsigned char rd_avail_reord_slots = RD_REORD_SLOTS;
unsigned char this_dst = -1;
unsigned char this_ticket = -1;
unsigned char head_ticket = -1;
ar_in.Reset();
rd_flit_out.Reset();
unsigned char total_rd_flits_sent = 0;
for (int i=0; i<(1<<dnp::ID_W); ++i) rd_out_table[i].dst_last=0;
axi4_::AddrPayload this_req;
//-- End of Reset ---//
while(1) {
wait();
if(ar_in.PopNB(this_req)) {
// A new request must stall until it is eligible to depart.
// Reordering of responses of the same IDs are allowed and handled by the depacketizer in the reorder buffer
// The response that might get reordered must be able to fit in the buffer
outs_table_entry sel_entry = rd_out_table[this_req.id.to_uint()];
this_dst = addr_lut_rd(this_req.addr);
// Check if reorder may occur
bool may_reorder = (sel_entry.sent>0) && (sel_entry.dst_last != this_dst);
bool through_reord = may_reorder || sel_entry.reorder;
unsigned char wait_for = sel_entry.sent;
// Calculate the size of the transaction to check if its able to fit in the reorder buffer
unsigned int bytes_total = ((this_req.len+1)<<this_req.size.to_uint());
unsigned int phits_total = (bytes_total>>1) + 4; // each phit stores 2 bytes PLUS 2 header phits.
unsigned int flits_total = (phits_total & 0x3) ? (phits_total>>2)+1 : (phits_total>>2);
// All needed slots must available beforehand, thus wait until space has been freed or its no longer possible to be reordered
while((through_reord && (rd_avail_reord_slots < flits_total)) || (total_rd_flits_sent>9)) {
order_info rcv_fin;
if(rd_trans_fin.nb_read(rcv_fin)) {
if(rd_out_table[rcv_fin.tid].sent==1) rd_out_table[rcv_fin.tid].reorder = false;
rd_out_table[rcv_fin.tid].sent = rd_out_table[rcv_fin.tid].sent - 1;
total_rd_flits_sent--;
if (rcv_fin.ticket<RD_REORD_SLOTS) {
rd_reord_avail[rcv_fin.ticket] = true;
rd_avail_reord_slots++;
}
}
wait();
sel_entry = rd_out_table[this_req.id.to_uint()];
may_reorder = (sel_entry.sent>0) && (sel_entry.dst_last != this_dst);
through_reord = may_reorder || sel_entry.reorder;
}; // End of while reorder
// Required space is guaranteed. Get tickets for the reorder buffer and
// inform the depacketizer to set the reorder buffer
order_info trans_expect;
trans_expect.tid = this_req.id.to_uint();
trans_expect.dst = this_dst;
// get the reorder tickets
head_ticket = -1;
if (through_reord && RD_REORD_SLOTS) { // loop to get tickets and inform Depack
for(unsigned int tct=0; tct<flits_total; tct++) {
rd_out_table[this_req.id.to_uint()].reorder = true;
rd_avail_reord_slots--;
//#pragma hls_unroll yes
for (int i = 0; i < RD_REORD_SLOTS; ++i) {
if (rd_reord_avail[i]) {
this_ticket = i;
rd_reord_avail[i] = false;
break;
}
}
if (head_ticket >= RD_REORD_SLOTS) head_ticket = this_ticket;
trans_expect.reord = true;
trans_expect.ticket = this_ticket;
// Send the allocated slot (ticket) to depacketizer, and update local info
rd_out_table[this_req.id.to_uint()].dst_last = this_dst;
rd_out_table[this_req.id.to_uint()].sent = rd_out_table[this_req.id.to_uint()].sent + 1;
total_rd_flits_sent++;
rd_trans_init.write(trans_expect);
order_info rcv_fin;
if (rd_trans_fin.nb_read(rcv_fin)) {
if (rd_out_table[rcv_fin.tid].sent == 1) rd_out_table[rcv_fin.tid].reorder = false;
rd_out_table[rcv_fin.tid].sent = rd_out_table[rcv_fin.tid].sent - 1;
total_rd_flits_sent--;
if (rcv_fin.ticket < RD_REORD_SLOTS) {
rd_reord_avail[rcv_fin.ticket] = true;
rd_avail_reord_slots++;
}
} // end of fin queue checking
wait();
}
} else {
// No reorder is possible - no tickets are needed
trans_expect.reord = false;
trans_expect.ticket = flits_total;
rd_out_table[this_req.id.to_uint()].dst_last = this_dst;
rd_out_table[this_req.id.to_uint()].sent = rd_out_table[this_req.id.to_uint()].sent + flits_total;
total_rd_flits_sent += flits_total;
rd_trans_init.write(trans_expect);
}; // End of while reorder
} else {
// If no initiating RD Req from Master, check for finished Outstanding trans
order_info rcv_fin;
if(rd_trans_fin.nb_read(rcv_fin)) {
if(rd_out_table[rcv_fin.tid].sent==1) rd_out_table[rcv_fin.tid].reorder = false;
rd_out_table[rcv_fin.tid].sent = rd_out_table[rcv_fin.tid].sent - 1;
total_rd_flits_sent--;
if (rcv_fin.ticket<RD_REORD_SLOTS) {
rd_reord_avail[rcv_fin.ticket] = true;
rd_avail_reord_slots++;
}
}
continue;
}
// The required conditions are met so...
// --- Start Packetization --- //
// Packetize request into a flit. The fields are described in DNP20
rreq_flit_t tmp_flit;
tmp_flit.type = SINGLE; // all request fits in at single flits thus SINGLE
tmp_flit.data[0] = ((sc_uint<dnp::PHIT_W>)head_ticket << dnp::req::REORD_PTR)|
((sc_uint<dnp::PHIT_W>)this_req.id << dnp::req::ID_PTR) |
((sc_uint<dnp::PHIT_W>)dnp::PACK_TYPE__RD_REQ << dnp::T_PTR) |
((sc_uint<dnp::PHIT_W>)0 << dnp::Q_PTR) |
((sc_uint<dnp::PHIT_W>)this_dst << dnp::D_PTR) |
((sc_uint<dnp::PHIT_W>)THIS_ID.read() << dnp::S_PTR) |
((sc_uint<dnp::PHIT_W>)0 << dnp::V_PTR) ;
tmp_flit.data[1] = ((sc_uint<dnp::PHIT_W>)this_req.len << dnp::req::LE_PTR) |
((sc_uint<dnp::PHIT_W>) this_req.addr & 0xffff) << dnp::req::AL_PTR ;
tmp_flit.data[2] = ((sc_uint<dnp::PHIT_W>)this_req.burst << dnp::req::BU_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.size << dnp::req::SZ_PTR) |
((sc_uint<dnp::PHIT_W>)(this_req.addr >> dnp::AL_W) << dnp::req::AH_PTR ) ;
// Try to push to Network, but continue reading for incoming finished transactions
while(!rd_flit_out.PushNB(tmp_flit)) {
order_info rcv_fin;
if(rd_trans_fin.nb_read(rcv_fin)) {
if(rd_out_table[rcv_fin.tid].sent==1) rd_out_table[rcv_fin.tid].reorder = false;
rd_out_table[rcv_fin.tid].sent = rd_out_table[rcv_fin.tid].sent - 1;
total_rd_flits_sent--;
if (rcv_fin.ticket<RD_REORD_SLOTS) {
rd_reord_avail[rcv_fin.ticket] = true;
rd_avail_reord_slots++;
}
}
wait();
}
} // End of while(1)
}; // End of Read Request Packetizer
//-----------------------------------//
//--- READ RESPonce DE-Packetizer ---//
//-----------------------------------//
void rd_resp_depack_job () {
//--- Start of Reset ---//
const unsigned int MAX_RD_FLITS = 1024;
for (int i=0; i<(1<<dnp::ID_W); ++i){
rd_reord_book[i].tail_flit = -1;
rd_reord_book[i].head_flit = -1;
rd_reord_book[i].hol_expect = 0;
}
for (int i=0; i<RD_REORD_SLOTS; ++i){
rd_reord_buff[i].nxt_flit = -1;
rd_reord_buff[i].valid = false;
}
bool has_active_trans = false;
axi4_::AddrPayload active_trans;
unsigned char active_trans_dst = -1;
bool bypass_valid = false;
bool bypass_active = false;
rresp_flit_t bypass_flit;
bool reord_active = false;
unsigned char reord_slot_active = -1;
unsigned char rcv_ticket_nxt = -1;
unsigned int phits_data_count = 0;
rresp_flit_t cur_flit;
r_out.Reset();
rd_flit_in.Reset();
sc_uint<dnp::SZ_W> final_size;
sc_uint<dnp::AP_W> addr_part;
sc_uint<dnp::AP_W> addr_init_aligned;
sc_uint<8> axi_lane_ptr;
cnt_phit_rresp_t flit_phit_ptr;
sc_uint<16> bytes_total;
sc_uint<16> bytes_depacked;
unsigned char resp_build_tmp[cfg::RD_LANES];
axi4_::ReadPayload builder_resp;
//--- End of Reset ---//
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while(1) {
wait();
// Set the reorder buffer's state to expect the in-flight responses according the Init transactions
order_info trans_expect;
if(rd_trans_init.nb_read(trans_expect)) {
if(trans_expect.reord) {
// PUSH new item to linked list
if(rd_reord_book[trans_expect.tid].head_flit>=RD_REORD_SLOTS) {
rd_reord_book[trans_expect.tid].head_flit = trans_expect.ticket;
}
// Change old tail's nxt_flit to point to new ticket.
if(rd_reord_book[trans_expect.tid].tail_flit<RD_REORD_SLOTS) {
rd_reord_buff[rd_reord_book[trans_expect.tid].tail_flit].nxt_flit = trans_expect.ticket;
}
// Update the TID's tail.
rd_reord_book[trans_expect.tid].tail_flit = trans_expect.ticket;
} else {
// When no reorder, ticket gives the number of expected flits
rd_reord_book[trans_expect.tid].hol_expect += trans_expect.ticket;
}
} // End of new trans sink
// Handle incoming flits.
// Either a packet that bypasses the reorder buffer,
// or Store it in the reorder buffer
if (!bypass_valid) {
rresp_flit_t flit_rcv;
if (rd_flit_in.PopNB(flit_rcv)) {
unsigned char rcv_ticket;
if (flit_rcv.is_head() || flit_rcv.is_single())
rcv_ticket = ((flit_rcv.data[0] >> (dnp::rresp::REORD_PTR)) & ((1<<dnp::REORD_W)-1));
else
rcv_ticket = rcv_ticket_nxt;
// Through reorder when the ticket belongs to its slots
if(rcv_ticket<RD_REORD_SLOTS && RD_REORD_SLOTS) {
rd_reord_buff[rcv_ticket].flit = flit_rcv;
rd_reord_buff[rcv_ticket].valid = true;
rcv_ticket_nxt = rd_reord_buff[rcv_ticket].nxt_flit;
} else {
// Otherwise its a bypass flit
bypass_flit = flit_rcv;
bypass_valid = true;
rcv_ticket_nxt = -1;
}
}
}
// When the depacketizer does not actively reconstructs a response
// Check if there are conditions to initiate either from reorder buffer, or bypass path
if (!(bypass_active || reord_active)) {
if(bypass_valid) {
bypass_active = true;
cur_flit = bypass_flit;
} else if (RD_REORD_SLOTS) {
// Check ROB for new transactions to initiate
rresp_flit_t flit_reord;
#pragma hls_unroll yes
for (int i=0; i<(1<<dnp::ID_W); ++i) {
if (rd_reord_book[i].hol_expect==0) {
if (rd_reord_book[i].head_flit < RD_REORD_SLOTS) {
if (rd_reord_buff[rd_reord_book[i].head_flit].valid) {
cur_flit = rd_reord_buff[rd_reord_book[i].head_flit].flit;
reord_slot_active = rd_reord_book[i].head_flit;
reord_active = true;
break;
}
}
}
} // End of REORDER CHECK
}
// If either bypass/reorder has a valid response, get the transaction info
if (bypass_active || reord_active) {
active_trans.id = (cur_flit.data[0] >> dnp::rresp::ID_PTR) & ((1 << dnp::ID_W) - 1);
active_trans.burst = (cur_flit.data[0] >> dnp::rresp::BU_PTR) & ((1 << dnp::BU_W) - 1);
active_trans.size = (cur_flit.data[1] >> dnp::rresp::SZ_PTR) & ((1 << dnp::SZ_W) - 1);
active_trans.len = (cur_flit.data[1] >> dnp::rresp::LE_PTR) & ((1 << dnp::LE_W) - 1);
final_size = (unsigned) active_trans.size;
// Partial lower 8-bit part of address to calculate the initial axi pointer in case of a non-aligned address
addr_part = (cur_flit.data[1] >> dnp::rresp::AP_PTR) & ((1<<dnp::AP_W) - 1);
addr_init_aligned = ((addr_part & (cfg::RD_LANES-1)) & ~((1<<final_size)-1));
// Data Depacketization happens in a loop. Each iteration pops a flit and constructs a beat.
// Each iteration transfers data bytes from the flit to the AXI beat.
// bytes_per_iter bytes may be transfered, which is limited by two factors
// depending the AXI beat size and the bytes in the flit.
// 1) The available data bytes in the flit is less than the required for the beat
// 2) The remaining byte lanes are less than the available in the flit
// For case (1) the flit is emptied and the next flit is popped at the next iteration
// For case (2) the beat is pushed to Master and the next beat starts in the next iteration
// For data Depacketization loop, we keep 2 pointers.
// axi_lane_ptr -> to keep track axi byte lanes to place to data
// flit_phit_ptr -> to point at the data of the flit
axi_lane_ptr = addr_init_aligned; // Bytes MOD axi size
flit_phit_ptr = 0; // Bytes MOD phits in flit
bytes_total = ((active_trans.len.to_uint()+1)<<final_size);
bytes_depacked = 0; // Number of DE-packetized bytes
active_trans_dst = (cur_flit.data[0] >> dnp::S_PTR) & ((1<<dnp::S_W)-1);
// Drop the head Flit and update the info
unsigned char this_ticket;
if(bypass_active) {
rd_reord_book[active_trans.id.to_uint()].hol_expect--;
bypass_valid = false;
this_ticket = -1;
} else if (RD_REORD_SLOTS) {
this_ticket = rd_reord_book[active_trans.id.to_uint()].head_flit;
unsigned char this_nxt_flit = rd_reord_buff[rd_reord_book[active_trans.id.to_uint()].head_flit].nxt_flit;
reord_slot_active = this_nxt_flit;
// List Update
rd_reord_buff[rd_reord_book[active_trans.id.to_uint()].head_flit].valid = false;
rd_reord_buff[rd_reord_book[active_trans.id.to_uint()].head_flit].nxt_flit = -1; // Probably not needed
if (rd_reord_book[active_trans.id.to_uint()].head_flit == rd_reord_book[active_trans.id.to_uint()].tail_flit) {
rd_reord_book[active_trans.id.to_uint()].tail_flit = -1;
}
rd_reord_book[active_trans.id.to_uint()].head_flit = this_nxt_flit;
}
order_info fin_trans;
fin_trans.tid = active_trans.id.to_uint();
fin_trans.ticket = this_ticket;
fin_trans.dst = active_trans_dst;
rd_trans_fin.write(fin_trans);
}
} else if ((bypass_active && bypass_valid) || (reord_active && rd_reord_buff[reord_slot_active].valid)) {
// After the initiation of transaction, handle the rest of the flits
if(reord_active) cur_flit = rd_reord_buff[rd_reord_book[active_trans.id.to_uint()].head_flit].flit;
else cur_flit = bypass_flit;
// Calculate the bytes to transfer in this iteration
sc_uint<8> bytes_axi_left = ((1<<final_size) - (axi_lane_ptr & ((1<<final_size)-1)));
sc_uint<8> bytes_flit_left = ((cfg::RRESP_PHITS<<1) - (flit_phit_ptr<<1));
sc_uint<8> bytes_per_iter = (bytes_axi_left<bytes_flit_left) ? bytes_axi_left : bytes_flit_left;
#pragma hls_unroll yes
build_resp: for (int i = 0; i < (cfg::RD_LANES >> 1); ++i) { // i counts AXI Byte Lanes IN PHITS (i.e. Lanes/bytes_in_phit)
if (i >= (axi_lane_ptr >> 1) && i < ((axi_lane_ptr + bytes_per_iter) >> 1)) {
cnt_phit_rresp_t loc_flit_ptr = flit_phit_ptr + (i - (axi_lane_ptr >> 1));
resp_build_tmp[(i << 1) + 1] =
(cur_flit.data[loc_flit_ptr] >> dnp::rdata::B1_PTR) & ((1 << dnp::B_W) - 1); // MSB
resp_build_tmp[(i << 1)] = (cur_flit.data[loc_flit_ptr] >> dnp::rdata::B0_PTR) & ((1 << dnp::B_W) - 1); // LSB
}
}
// transaction event flags
bool done_job = ((bytes_depacked+bytes_per_iter)==bytes_total); // All bytes are processed
bool done_flit = (flit_phit_ptr+(bytes_per_iter>>1)==cfg::RRESP_PHITS); // Flit got empty
bool done_axi = (((bytes_depacked+bytes_per_iter)&((1<<final_size)-1))==0); // Beat got full
// Drop the flit when Flit is full or all data have been consumed, and inform packetizer
if( done_job || done_flit ) {
unsigned char this_ticket;
if(bypass_active) {
rd_reord_book[active_trans.id.to_uint()].hol_expect--;
bypass_valid = false;
this_ticket = -1;
} else if (RD_REORD_SLOTS) {
this_ticket = rd_reord_book[active_trans.id.to_uint()].head_flit;
unsigned char this_nxt_flit = rd_reord_buff[rd_reord_book[active_trans.id.to_uint()].head_flit].nxt_flit;
reord_slot_active = this_nxt_flit;
// List Update
rd_reord_buff[rd_reord_book[active_trans.id.to_uint()].head_flit].valid = false;
rd_reord_buff[rd_reord_book[active_trans.id.to_uint()].head_flit].nxt_flit = -1; // Probably not needed
if (rd_reord_book[active_trans.id.to_uint()].head_flit == rd_reord_book[active_trans.id.to_uint()].tail_flit) {
rd_reord_book[active_trans.id.to_uint()].tail_flit = -1;
}
rd_reord_book[active_trans.id.to_uint()].head_flit = this_nxt_flit;
}
order_info fin_trans;
fin_trans.tid = active_trans.id.to_uint();
fin_trans.ticket = this_ticket;
fin_trans.dst = active_trans_dst;
rd_trans_fin.write(fin_trans);
}
// Push the response when its gets the required data
if( done_job || done_axi ) {
builder_resp.id = active_trans.id;
builder_resp.resp = (cur_flit.data[flit_phit_ptr] >> dnp::rdata::RE_PTR) & ((1 << dnp::RE_W) - 1);
builder_resp.last = ((bytes_depacked+bytes_per_iter)==bytes_total);
duth_fun<axi4_::Data, cfg::RD_LANES>::assign_char2ac(builder_resp.data, resp_build_tmp);
r_out.Push(builder_resp);
#pragma hls_unroll yes
for(int i=0; i<cfg::RD_LANES; ++i) resp_build_tmp[i] = 0;
}
if(done_job) { // End of transaction
bypass_active = false;
reord_active = false;
bytes_depacked = 0;
} else {
// Response continues, update pointers.
bytes_depacked += bytes_per_iter;
flit_phit_ptr = (done_flit) ? 0 : (flit_phit_ptr +(bytes_per_iter>>1));
axi_lane_ptr = (active_trans.burst==enc_::AXBURST::FIXED) ? ((axi_lane_ptr+bytes_per_iter) & ((1<<final_size)-1)) + addr_init_aligned :
((axi_lane_ptr+bytes_per_iter) & (cfg::RD_LANES-1)) ;
}
} // End of transaction handling
} // End of while(1)
}; // End of Read Responce Packetizer
//--------------------------------//
//--- WRITE REQuest Packetizer ---//
//--------------------------------//
void wr_req_pack_job () {
wr_flit_out.Reset();
aw_in.Reset();
w_in.Reset();
for (int i=0; i<(1<<dnp::ID_W); ++i) {
wr_out_table[i].dst_last = 0;
wr_out_table[i].sent = 0;
wr_out_table[i].reorder = false;
}
for (int i=0; i<WR_REORD_SLOTS; ++i) wr_reord_avail[i] = true;
unsigned char wr_avail_reord_slots = WR_REORD_SLOTS;
unsigned char this_ticket = -1;
unsigned char this_dst = -1;
while(1) {
wait();
// Always check for finished transactions
order_info rcv_fin;
if(wr_trans_fin.nb_read(rcv_fin)) {
if(wr_out_table[rcv_fin.tid].sent==1) wr_out_table[rcv_fin.tid].reorder = false;
wr_out_table[rcv_fin.tid].sent = wr_out_table[rcv_fin.tid].sent - 1;
if (rcv_fin.ticket<WR_REORD_SLOTS) {
wr_reord_avail[rcv_fin.ticket] = true;
wr_avail_reord_slots++;
}
}
axi4_::AddrPayload this_req;
if(aw_in.PopNB(this_req)) {
// A new request must stall until it is eligible to depart.
// Reordering of responses of the same IDs are allowed and handled by the depacketizer in the reorder buffer
// The response that might get reordered must be able to fit in the buffer
outs_table_entry sel_entry = wr_out_table[this_req.id.to_uint()];
this_dst = addr_lut_wr(this_req.addr);
bool may_reorder = (sel_entry.sent>0) && (sel_entry.dst_last != this_dst);
bool through_reord = may_reorder || sel_entry.reorder;
unsigned char wait_for = sel_entry.sent;
// Stall until the necessary resources are available
while(through_reord && (wr_avail_reord_slots<1)) {
order_info rcv_fin;
if(wr_trans_fin.nb_read(rcv_fin)) {
if(wr_out_table[rcv_fin.tid].sent==1) wr_out_table[rcv_fin.tid].reorder = false;
wr_out_table[rcv_fin.tid].sent = wr_out_table[rcv_fin.tid].sent - 1;
if (rcv_fin.ticket<WR_REORD_SLOTS && WR_REORD_SLOTS) {
wr_reord_avail[rcv_fin.ticket] = true;
wr_avail_reord_slots++;
}
}
wait();
}; // End of while reorder
// Get ticket
this_ticket = -1;
if (through_reord) {
wr_out_table[this_req.id.to_uint()].reorder = true;
wr_avail_reord_slots--;
for (int i=0; i<WR_REORD_SLOTS; ++i) {
if(wr_reord_avail[i]) {
this_ticket = i;
wr_reord_avail[i] = false;
break;
}
}
}
} else {
// No available response
continue;
}
// --- Start HEADER Packetization --- //
wreq_flit_t tmp_flit;
wreq_flit_t tmp_mule_flit;
tmp_mule_flit.type = HEAD;
tmp_mule_flit.data[0] = ((sc_uint<dnp::PHIT_W>)this_ticket << dnp::req::REORD_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.id << dnp::req::ID_PTR) |
((sc_uint<dnp::PHIT_W>)dnp::PACK_TYPE__WR_REQ << dnp::T_PTR) |
((sc_uint<dnp::PHIT_W>)0 << dnp::Q_PTR) |
((sc_uint<dnp::PHIT_W>)this_dst << dnp::D_PTR) |
((sc_uint<dnp::PHIT_W>)THIS_ID.read() << dnp::S_PTR) |
((sc_uint<dnp::PHIT_W>)0 << dnp::V_PTR) ;
tmp_mule_flit.data[1] = ((sc_uint<dnp::PHIT_W>)this_req.len << dnp::req::LE_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.addr & 0xffff);
tmp_mule_flit.data[2] = ((sc_uint<dnp::PHIT_W>)this_req.burst << dnp::req::BU_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.size << dnp::req::SZ_PTR) |
((sc_uint<dnp::PHIT_W>)(this_req.addr >> dnp::AL_W) << dnp::req::AH_PTR) ;
wr_out_table[this_req.id.to_uint()].sent++;
wr_out_table[this_req.id.to_uint()].dst_last = this_dst;
order_info trans_expect;
trans_expect.tid = this_req.id.to_uint();
trans_expect.dst = this_dst;
trans_expect.ticket = this_ticket;
wr_trans_init.write(trans_expect);
// If network is not ready, continue to poll for finished transactions
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while(!wr_flit_out.PushNB(tmp_mule_flit)) {
order_info rcv_fin;
if(wr_trans_fin.nb_read(rcv_fin)) {
if(wr_out_table[rcv_fin.tid].sent==1) wr_out_table[rcv_fin.tid].reorder = false;
wr_out_table[rcv_fin.tid].sent = wr_out_table[rcv_fin.tid].sent - 1;
if (rcv_fin.ticket<WR_REORD_SLOTS) {
wr_reord_avail[rcv_fin.ticket] = true;
wr_avail_reord_slots++;
}
}
wait();
};
// --- Start DATA Packetization --- //
// Data Depacketization happens in a loop. Each iteration pops a flit and constructs a beat.
// Multiple iterations may be needed either the consume incoming data or fill a flit, which
// which depends on the AXI and flit size.
// Each iteration transfers data bytes from the flit to the AXI beat.
// The processed bytes per iteration is limited by two factors
// depending the AXI beat size and the bytes in the flit.
// 1) The available data bytes in the flit is less than the required for the beat
// 2) The remaining byte lanes are less than the available in the flit
// For case (1) the flit is emptied and the next flit is popped at the next iteration
// For case (2) the beat is pushed to Master and the next beat starts in the next iteration
sc_uint<8> addr_init_aligned = (this_req.addr.to_uint() & (cfg::WR_LANES-1)) & ~((1<<this_req.size.to_uint())-1);
// For data Depacketization we keep 2 pointers.
// - One to keep track axi byte lanes to place to data (axi_lane_ptr)
// - One to point at the data of the flit (flit_phit_ptr)
sc_uint<8> axi_lane_ptr = addr_init_aligned; // Bytes MOD size
cnt_phit_wreq_t flit_phit_ptr = 0; // Bytes MOD phits in flit
sc_uint<16> bytes_total = ((this_req.len.to_uint()+1)<<this_req.size.to_uint());
sc_uint<16> bytes_packed = 0;
unsigned char data_build_tmp[cfg::WR_LANES];
bool wstrb_tmp[cfg::WR_LANES];
sc_uint<1> last_tmp;
//#pragma hls_pipeline_init_interval 1
//#pragma pipeline_stall_mode flush
gather_wr_beats : while (1) {
wait();
// Calculate the bytes transferred in this iteration, depending the available flit bytes and the remaining to the beat
sc_uint<8> bytes_axi_left = ((1<<this_req.size.to_uint()) - (axi_lane_ptr & ((1<<this_req.size.to_uint())-1)));
sc_uint<8> bytes_flit_left = ((cfg::WREQ_PHITS<<1) - (flit_phit_ptr<<1));
sc_uint<8> bytes_per_iter = (bytes_axi_left<bytes_flit_left) ? bytes_axi_left : bytes_flit_left;
// If current beat has been packed, pop next
if((bytes_packed & ((1<<this_req.size.to_uint())-1))==0) {
axi4_::WritePayload this_wr;
this_wr = w_in.Pop();
last_tmp = this_wr.last;
duth_fun<axi4_::Data , cfg::WR_LANES>::assign_ac2char(data_build_tmp , this_wr.data);
duth_fun<axi4_::Wstrb, cfg::WR_LANES>::assign_ac2bool(wstrb_tmp , this_wr.wstrb);
}
// Convert AXI Beats to flits.
#pragma hls_unroll yes
for (int i=0; i<cfg::WREQ_PHITS; ++i){ // i counts phits on the flit
if(i>=flit_phit_ptr && i<(flit_phit_ptr+(bytes_per_iter>>1))) {
sc_uint<8> loc_axi_ptr = (axi_lane_ptr + ((i-flit_phit_ptr)<<1));
tmp_mule_flit.data[i] = ((sc_uint<dnp::PHIT_W>)last_tmp << dnp::wdata::LA_PTR ) | // MSB
((sc_uint<dnp::PHIT_W>)wstrb_tmp[loc_axi_ptr+1] << dnp::wdata::E1_PTR ) |
((sc_uint<dnp::PHIT_W>)wstrb_tmp[loc_axi_ptr ] << dnp::wdata::E0_PTR ) |
((sc_uint<dnp::PHIT_W>)data_build_tmp[loc_axi_ptr+1] << dnp::wdata::B1_PTR ) | // (i*2) % 4
((sc_uint<dnp::PHIT_W>)data_build_tmp[loc_axi_ptr ] << dnp::wdata::B0_PTR ) ;
}
}
// transaction event flags
bool done_job = ((bytes_packed+bytes_per_iter)==bytes_total); // All bytes are processed
bool done_flit = (flit_phit_ptr+(bytes_per_iter>>1)==cfg::WREQ_PHITS); // Flit got empty
bool done_axi = (((bytes_packed+bytes_per_iter)&((1<<(this_req.size.to_uint()))-1))==0); // Beat got full
// If network is not ready, continue to poll for finished transactions
if(done_job || done_flit) {
tmp_mule_flit.type = (bytes_packed+bytes_per_iter==bytes_total) ? TAIL : BODY;
while (!wr_flit_out.PushNB(tmp_mule_flit)) {
order_info rcv_fin;
if(wr_trans_fin.nb_read(rcv_fin)) {
if(wr_out_table[rcv_fin.tid].sent==1) wr_out_table[rcv_fin.tid].reorder = false;
wr_out_table[rcv_fin.tid].sent = wr_out_table[rcv_fin.tid].sent - 1;
if (rcv_fin.ticket<WR_REORD_SLOTS) {
wr_reord_avail[rcv_fin.ticket] = true;
wr_avail_reord_slots++;
}
}
wait();
}
}
// Check to either finish transaction or update the pointers for the next iteration
if (done_job) {
break;
} else { // Move to next iteration
bytes_packed = bytes_packed+bytes_per_iter;
flit_phit_ptr = (done_flit) ? 0 : (flit_phit_ptr +(bytes_per_iter>>1));
axi_lane_ptr = ((unsigned)this_req.burst==enc_::AXBURST::FIXED) ? ((axi_lane_ptr+bytes_per_iter) & ((1<<this_req.size.to_uint())-1)) + addr_init_aligned :
((axi_lane_ptr+bytes_per_iter) & (cfg::WR_LANES-1)) ;
}
} // End of gather beats
} // End of While(1)
}; // End of Read Request Packetizer
//------------------------------------//
//--- WRITE RESPonce DE-Packetizer ---//
//------------------------------------//
void wr_resp_depack_job(){
wr_flit_in.Reset();
b_out.Reset();
for (int i=0; i<(1<<dnp::ID_W); ++i){
wr_reord_book[i].tail_flit = -1;
wr_reord_book[i].head_flit = -1;
wr_reord_book[i].hol_expect = 0;
}
for (int i=0; i<WR_REORD_SLOTS; ++i){
wr_reord_buff[i].nxt_flit = -1;
wr_reord_buff[i].valid = false;
}
while(1) {
wait();
// Always check for finished transactions
order_info trans_expect;
if(wr_trans_init.nb_read(trans_expect)) {
if(trans_expect.ticket<WR_REORD_SLOTS && WR_REORD_SLOTS) {
// PUSH new item to linked list
if(wr_reord_book[trans_expect.tid].head_flit>=WR_REORD_SLOTS) {
wr_reord_book[trans_expect.tid].head_flit = trans_expect.ticket;
}
// Change old tail's nxt_flit to point to new ticket.
if(wr_reord_book[trans_expect.tid].tail_flit<WR_REORD_SLOTS) {
wr_reord_buff[wr_reord_book[trans_expect.tid].tail_flit].nxt_flit = trans_expect.ticket;
}
// Update the TID's tail.
wr_reord_book[trans_expect.tid].tail_flit = trans_expect.ticket;
} else {
wr_reord_book[trans_expect.tid].hol_expect++;
}
} // End of new trans sink
// Handle incoming flits.
// Either a packet that bypasses the reorder buffer,
// or Store it in the reorder buffer
bool bypass = false;
wresp_flit_t flit_rcv;
if(wr_flit_in.PopNB(flit_rcv)) {
unsigned char rcv_ticket = ((flit_rcv.data[0] >> (dnp::wresp::REORD_PTR)) & ((1<<dnp::REORD_W)-1));
if(rcv_ticket<WR_REORD_SLOTS && WR_REORD_SLOTS) {
wr_reord_buff[rcv_ticket].flit = flit_rcv;
wr_reord_buff[rcv_ticket].valid = true;
} else {
bypass = true;
}
}
// Send response to Master either from bypass or reorder buffer
axi4_::WRespPayload this_resp;
if(bypass) {
unsigned char this_tid = (flit_rcv.data[0] >> dnp::wresp::ID_PTR) & ((1<<dnp::ID_W)-1);
this_resp.id = this_tid;
this_resp.resp = (flit_rcv.data[0] >> dnp::wresp::RESP_PTR) & ((1<<dnp::RE_W)-1);
order_info fin_trans;
fin_trans.tid = this_tid;
fin_trans.ticket = (flit_rcv.data[0] >> dnp::wresp::REORD_PTR) & ((1<<dnp::REORD_W)-1);
fin_trans.dst = (flit_rcv.data[0] >> dnp::S_PTR) & ((1<<dnp::S_W)-1);
wr_reord_book[this_tid].hol_expect--;
b_out.Push(this_resp);
wr_trans_fin.write(fin_trans);
} else if (WR_REORD_SLOTS) {
// Check ROB for valid responses to send to Master
wresp_flit_t flit_reord;
bool reord_valid = false;
#pragma hls_unroll yes
for (int i=0; i<(1<<dnp::ID_W); ++i) {
if (wr_reord_book[i].hol_expect==0) {
if (wr_reord_book[i].head_flit < WR_REORD_SLOTS) {
if (wr_reord_buff[wr_reord_book[i].head_flit].valid) {
flit_reord = wr_reord_buff[wr_reord_book[i].head_flit].flit;
unsigned char this_nxt_flit = wr_reord_buff[wr_reord_book[i].head_flit].nxt_flit;
// List Update
wr_reord_buff[wr_reord_book[i].head_flit].valid = false;
wr_reord_buff[wr_reord_book[i].head_flit].nxt_flit = -1; // Probably not needed
if (wr_reord_book[i].head_flit == wr_reord_book[i].tail_flit) {
wr_reord_book[i].tail_flit = -1;
}
wr_reord_book[i].head_flit = this_nxt_flit;
// End of - List Update
reord_valid = true;
break;
}
}
}
}
if(reord_valid) {
this_resp.id = (flit_reord.data[0] >> dnp::wresp::ID_PTR) & ((1<<dnp::ID_W)-1);
this_resp.resp = (flit_reord.data[0] >> dnp::wresp::RESP_PTR) & ((1<<dnp::RE_W)-1);
order_info fin_trans;
fin_trans.tid = (flit_reord.data[0] >> dnp::wresp::ID_PTR) & ((1<<dnp::ID_W)-1);
fin_trans.ticket = (flit_reord.data[0] >> dnp::wresp::REORD_PTR) & ((1<<dnp::REORD_W)-1);
b_out.Push(this_resp);
wr_trans_fin.write(fin_trans);
}
} // End of send flit from ROB
} // End of While(1)
}; // End of Write Resp Packetizer
// Memory map resolving
inline unsigned char addr_lut_rd(const axi4_::Addr addr) {
for (int i=0; i<cfg::SLAVE_NUM; ++i) {
if (addr>=addr_map[i][0].read() && addr <= addr_map[i][1].read()) return i;
}
NVHLS_ASSERT_MSG(0, "RD address not resolved!");
return 0;
};
inline unsigned char addr_lut_wr(const axi4_::Addr addr) {
for (int i=0; i<cfg::SLAVE_NUM; ++i) {
if (addr>=addr_map[i][0].read() && addr <= addr_map[i][1].read()) return i;
}
NVHLS_ASSERT_MSG(0, "WR address not resolved!");
return 0;
};
}; // End of Master-IF module
#endif // AXI4_MASTER_IF_CON_H
|
ic-lab-duth/NoCpad
|
tb/tb_axi_con/axi_master.h
|
<gh_stars>1-10
#ifndef AXI_MASTER_H
#define AXI_MASTER_H
#include "systemc.h"
#include <axi/axi4.h>
#include "../helper_non_synth.h"
#include "../../src/include/flit_axi.h"
#include "../tb_wrap.h"
#include <deque>
#include <queue>
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
#define AXI_TID_NUM 4
#define AXI_BURST_NUM 3
#define AXI4_MAX_LEN 4 // FIXED, WRAP bursts has a maximum of 16 beats
#define AXI4_MAX_INCR_LEN 4 // AXI4 extends INCR bursts upto 256 beats
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
SC_MODULE(axi_master) {
typedef typename axi::axi4<axi::cfg::standard_duth> axi4_;
typedef typename axi::AXI4_Encoding enc_;
sc_in_clk clk;
sc_in <bool> rst_n;
sc_in<bool> stop_gen;
sc_in< sc_uint<32> > addr_map[SLAVE_NUM][2];
Connections::Out<axi4_::AddrPayload> ar_out;
Connections::In<axi4_::ReadPayload> r_in;
Connections::Out<axi4_::AddrPayload> aw_out;
Connections::Out<axi4_::WritePayload> w_out;
Connections::In<axi4_::WRespPayload> b_in;
// Scoreboard
sc_mutex *sb_lock;
std::vector< std::deque< msg_tb_wrap<axi4_::AddrPayload> > > *sb_rd_req_q;
std::vector< std::deque< msg_tb_wrap<axi4_::ReadPayload> > > *sb_rd_resp_q;
std::vector< std::deque< msg_tb_wrap<axi4_::AddrPayload> > > *sb_wr_req_q;
std::vector< std::deque< msg_tb_wrap<axi4_::WritePayload> > > *sb_wr_data_q;
std::vector< std::deque< msg_tb_wrap<axi4_::WRespPayload> > > *sb_wr_resp_q;
std::deque<axi4_::AddrPayload> sb_rd_order_q; // queue to check order
std::deque<axi4_::AddrPayload> sb_wr_order_q; // queue to check order
std::queue<axi4_::AddrPayload> stored_rd_trans;
std::queue<axi4_::AddrPayload> stored_wr_trans;
std::queue<axi4_::WritePayload> stored_wr_data;
int MASTER_ID = -1;
unsigned int GEN_RATE_RD;
unsigned int GEN_RATE_WR;
// int FLOW_CTRL; // 0: READY-VALID
// // 1: CREDITS
// // 2: FIFO
// // 3: Credits fifo based
// delays
int total_cycles;
sc_time clk_period;
unsigned long long int rd_resp_delay = 0;
unsigned long long int rd_resp_count = 0;
unsigned long long int wr_resp_delay = 0;
unsigned long long int wr_resp_count = 0;
unsigned long long int last_rd_sinked_cycle = 0;
unsigned long long int last_wr_sinked_cycle = 0;
unsigned long long int rd_resp_data_count = 0;
unsigned long long int wr_resp_data_count = 0;
bool stop_at_tail, has_stopped_gen;
// Read Addr Generator
int rd_trans_generated;
int rd_data_generated;
int wr_trans_generated;
int wr_data_generated;
int rd_trans_inj;
int wr_trans_inj;
int wr_data_inj;
unsigned int gen_rd_addr;
unsigned int gen_wr_addr;
unsigned int resp_val_expect;
// Read Responce Sink
int rd_resp_ej;
int wr_resp_ej;
// Errors
int error_sb_rd_resp_not_found;
int error_sb_wr_resp_not_found;
// Functions
void do_cycle();
void gen_new_rd_trans();
void gen_new_wr_trans();
void verify_rd_resp(axi4_::ReadPayload &rcv_rd_resp);
void verify_wr_resp(axi4_::WRespPayload &rcv_wr_resp);
bool eq_rd_data(axi4_::ReadPayload &rcv_rd_data, axi4_::ReadPayload &sb_rd_data);
bool eq_wr_resp(axi4_::WRespPayload &rcv_wr_resp, axi4_::WRespPayload &sb_wr_resp);
unsigned mem_map_resolve(axi4_::Addr &addr);
// Constructor
SC_HAS_PROCESS(axi_master);
axi_master(sc_module_name name_) : sc_module(name_)
{
MASTER_ID = -1;
GEN_RATE_RD = 0;
GEN_RATE_WR = 0;
SC_THREAD(do_cycle);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
}
};
// --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- //
// --- --- --- --- --- --- IMPLEMENTATION --- --- --- --- --- --- --- --- //
// --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- //
#include "axi_master.h"
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void axi_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::do_cycle () {
total_cycles = 0;
rd_trans_generated = 0;
rd_data_generated = 0;
wr_trans_generated = 0;
wr_data_generated = 0;
rd_trans_inj = 0;
wr_trans_inj = 0;
wr_data_inj = 0;
gen_rd_addr = 0;
gen_wr_addr = 0;
resp_val_expect = 0;
rd_resp_ej = 0;
wr_resp_ej = 0;
// Clear errors
error_sb_rd_resp_not_found = 0;
error_sb_wr_resp_not_found = 0;
clk_period = (dynamic_cast<sc_clock *>(clk.get_interface()))->period();
while(1) {
wait();
// Transaction Generator
if (!stop_gen.read()) {
unsigned int rnd_val_rd = rand()%100;
if (rnd_val_rd < GEN_RATE_RD) {
gen_new_rd_trans();
}
unsigned int rnd_val_wr = rand()%100;
if (rnd_val_wr < GEN_RATE_WR) {
gen_new_wr_trans();
}
}
// Read Request Injection
if (!stored_rd_trans.empty()) {
axi4_::AddrPayload tmp_ar = stored_rd_trans.front();
if (ar_out.PushNB(tmp_ar)){
stored_rd_trans.pop();
std::cout<<"[Master "<< MASTER_ID << "] : PUSHED AR:" << tmp_ar << " @" << sc_time_stamp() << std::endl;
rd_trans_inj++;
}
}
// Write Request Injection
if (!stored_wr_trans.empty()) {
axi4_::AddrPayload tmp_aw = stored_wr_trans.front();
if (aw_out.PushNB(tmp_aw)) {
stored_wr_trans.pop();
std::cout<<"[Master "<< MASTER_ID << "] : PUSHED AW: " << tmp_aw << " @" << sc_time_stamp() << std::endl;
wr_trans_inj++;
}
}
// Write Data Injection
if (!stored_wr_data.empty()) {
axi4_::WritePayload tmp_w = stored_wr_data.front();
if (w_out.PushNB(tmp_w)) {
stored_wr_data.pop();
std::cout<<"[Master "<< MASTER_ID << "] : PUSHED W: " << tmp_w << " @" << sc_time_stamp() << std::endl;
wr_data_inj++;
}
}
// Read Response Ejection
axi4_::ReadPayload rcv_rd_resp;
if(r_in.PopNB(rcv_rd_resp)) {
verify_rd_resp(rcv_rd_resp);
}
axi4_::WRespPayload rcv_wr_resp;
if(b_in.PopNB(rcv_wr_resp)) {
verify_wr_resp(rcv_wr_resp);
}
}; // End of while(1)
}; // End of do_cycle
// --------------------------- //
// --- GENERATOR Functions --- //
// --------------------------- //
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void axi_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::gen_new_rd_trans() {
axi4_::AddrPayload rd_req_m;
rd_req_m.id = (rand()%AXI_TID_NUM);
rd_req_m.size = ((rand()%my_log2c(RD_M_LANES))+1) & ((1<<my_log2c(RD_M_LANES))-1);
rd_req_m.burst = (rand()%AXI_BURST_NUM);
rd_req_m.len = (rd_req_m.burst==enc_::AXBURST::WRAP) ? ((1<<(rand()%my_log2c(AXI4_MAX_LEN+1)))-1) :
(rd_req_m.burst==enc_::AXBURST::FIXED) ? (rand()%AXI4_MAX_LEN) :
(RD_M_LANES>RD_S_LANES) ? (rand()%(AXI4_MAX_INCR_LEN/(RD_M_LANES/RD_S_LANES))) // INCR With Downsize // Cap the maximum len in case of transactions downsize (which increases len)
: (rand()%AXI4_MAX_INCR_LEN) ; // INCR WithOut Downsize
// Increasing address to keep track of the transactions
rd_req_m.addr = gen_rd_addr + ((rand()%RD_M_LANES) & (1<<rd_req_m.size));
rd_req_m.addr = (rand()%2) ? gen_rd_addr : gen_rd_addr + 0x10000; //addr_map[i][1].read();
gen_rd_addr = gen_rd_addr + RD_M_LANES;
// Push it to injection queue
stored_rd_trans.push(rd_req_m);
// Push it to Scoreboard
sb_lock->lock();
// Consider resizing at slave
axi4_::AddrPayload rd_req_s;
rd_req_s.id = rd_req_m.id;
rd_req_s.size = ((1<<rd_req_m.size)>RD_S_LANES) ? my_log2c(RD_S_LANES) : (unsigned) rd_req_m.size;
rd_req_s.len = ((1<<rd_req_m.size)>RD_S_LANES) ? unsigned (((rd_req_m.len+1)<<(rd_req_m.size-my_log2c(RD_S_LANES)))-1) : (unsigned) rd_req_m.len;
rd_req_s.burst = rd_req_m.burst;
rd_req_s.addr = rd_req_m.addr;
msg_tb_wrap<axi4_::AddrPayload> temp_rd_req_tb;
temp_rd_req_tb.dut_msg = rd_req_s;
temp_rd_req_tb.time_gen = sc_time_stamp();
unsigned dst = mem_map_resolve(rd_req_s.addr);
(*sb_rd_req_q)[dst].push_back(temp_rd_req_tb);
sb_lock->unlock();
// Push into order queue - Reorder check extension
sb_rd_order_q.push_back(rd_req_m);
rd_trans_generated++;
// --- --- --- --- --- --- --- --- //
// Generate the expected Responce
// --- --- --- --- --- --- --- --- //
sb_lock->lock();
axi4_::ReadPayload beat_expected;
beat_expected.id = rd_req_m.id;
// Create Expected Response
unsigned long int bytes_total = ((rd_req_m.len+1)<<rd_req_m.size);
unsigned long int byte_count = 0;
unsigned char m_init_ptr = rd_req_m.addr % RD_M_LANES;
unsigned char m_ptr = m_init_ptr;
unsigned char m_size = rd_req_m.size;
beat_expected.data = 0;
while(byte_count<bytes_total) {
beat_expected.data |= ( ((axi4_::Data)(byte_count & 0xFF)) << ((axi4_::Data)(m_ptr*8)));
byte_count++;
m_ptr = (rd_req_m.burst==enc_::AXBURST::FIXED) ? ((m_ptr+1)%(1<<m_size)) + m_init_ptr
: (m_ptr+1)%RD_M_LANES ;
if(((m_ptr%(1<<m_size))==0) || (byte_count == bytes_total)) {
beat_expected.resp = mem_map_resolve(rd_req_m.addr);
beat_expected.last = (byte_count == bytes_total);
msg_tb_wrap< axi4_::ReadPayload > temp_rd_resp_tb;
temp_rd_resp_tb.dut_msg = beat_expected;
temp_rd_resp_tb.time_gen = sc_time_stamp();
(*sb_rd_resp_q)[MASTER_ID].push_back(temp_rd_resp_tb);
beat_expected.data = 0;
rd_data_generated++;
}
}
sb_lock->unlock();
}; // End of Read generator
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void axi_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::gen_new_wr_trans() {
sb_lock->lock();
axi4_::AddrPayload m_wr_req;
m_wr_req.id = (rand()%AXI_TID_NUM);
m_wr_req.size = ((rand()%my_log2c(WR_M_LANES))+1) & ((1<<my_log2c(WR_M_LANES))-1); // 0 size is NOT supported
m_wr_req.burst = (rand()%AXI_BURST_NUM);
m_wr_req.len = (m_wr_req.burst==enc_::AXBURST::WRAP) ? ((1<<(rand()%my_log2c(AXI4_MAX_LEN+1)))-1) :
(m_wr_req.burst==enc_::AXBURST::FIXED) ? (rand()%AXI4_MAX_LEN) :
(WR_M_LANES>WR_S_LANES) ? (rand()%(AXI4_MAX_INCR_LEN/(WR_M_LANES/WR_S_LANES))) // INCR With Downsize // Cap the maximum len in case of transactions downsize (which increases len)
: (rand()%AXI4_MAX_INCR_LEN) ; // INCR WithOut Downsize
// Increasing address to keep track of the transactions
// Aligned on size transactions (Although non-aligned should be an easy addition)
m_wr_req.addr = gen_wr_addr + ((rand()%WR_M_LANES) & (1<<m_wr_req.size));
m_wr_req.addr = (rand()%2) ? gen_wr_addr : gen_wr_addr + 0x10000; //addr_map[i][1].read();
gen_wr_addr = gen_wr_addr + WR_M_LANES;
// Push it to injection queue
stored_wr_trans.push(m_wr_req);
// Push into order queue - Reorder check extension
sb_wr_order_q.push_back(m_wr_req);
// Create dummy write data
axi4_::WritePayload cur_beat; // The beat that will be injected at MASTER
axi4_::WritePayload beat_at_slave; // The expected beat ejected at SLAVE
unsigned long int bytes_total = ((m_wr_req.len+1)<<m_wr_req.size);
unsigned long int byte_count = 0;
unsigned char m_init_ptr = m_wr_req.addr % WR_M_LANES;
unsigned char s_init_ptr = m_wr_req.addr % WR_S_LANES;
unsigned char m_ptr = m_init_ptr;
unsigned char s_ptr = s_init_ptr;
unsigned char m_size = m_wr_req.size;
unsigned char s_size = ((1<<m_size)>WR_S_LANES) ? my_log2c(WR_S_LANES) : m_size;
unsigned char m_len = m_wr_req.len;
unsigned char s_len = ((1<<m_size)>WR_S_LANES) ? (((m_len+1)<<(m_size-s_size))-1) : m_len;
// Push it to Scoreboard
axi4_::AddrPayload s_wr_req;
s_wr_req.id = m_wr_req.id;
s_wr_req.addr = m_wr_req.addr;
s_wr_req.size = s_size;
s_wr_req.len = s_len;
s_wr_req.burst = m_wr_req.burst;
msg_tb_wrap<axi4_::AddrPayload> temp_wr_req_tb;
temp_wr_req_tb.dut_msg = s_wr_req;
unsigned dst = mem_map_resolve(s_wr_req.addr);
(*sb_wr_req_q)[dst].push_back(temp_wr_req_tb);
cur_beat.data = 0;
cur_beat.wstrb = 0;
beat_at_slave.data = 0;
beat_at_slave.wstrb = 0;
while(byte_count<bytes_total) {
unsigned byte_to_write = (byte_count==bytes_total-1) ? MASTER_ID : byte_count;
cur_beat.data |= (((axi4_::Data)(byte_to_write & 0xFF)) << ((axi4_::Data)(m_ptr*8)));
cur_beat.wstrb |= (((axi4_::Data)1) << ((axi4_::Data)m_ptr));
beat_at_slave.data |= (((axi4_::Data)(byte_to_write & 0xFF)) << ((axi4_::Data)(s_ptr*8)));
beat_at_slave.wstrb |= (((axi4_::Data)1) << ((axi4_::Data)s_ptr));
byte_count++;
m_ptr = (m_wr_req.burst==enc_::AXBURST::FIXED) ? ((m_ptr+1)%(1<<m_size)) + m_init_ptr :
(m_ptr+1)%WR_M_LANES ;
s_ptr = (m_wr_req.burst==enc_::AXBURST::FIXED) ? ((s_ptr+1)%(1<<s_size)) + s_init_ptr :
(s_ptr+1)%WR_S_LANES ;
if(((m_ptr%(1<<m_size))==0) || (byte_count == bytes_total)) {
wr_data_generated++;
cur_beat.last = (byte_count == bytes_total);
stored_wr_data.push(cur_beat);
cur_beat.data = 0;
cur_beat.wstrb = 0;
}
if(((s_ptr%(1<<s_size))==0) || (byte_count == bytes_total)) {
beat_at_slave.last = (byte_count == bytes_total);
msg_tb_wrap< axi4_::WritePayload > temp_wr_data_tb;
temp_wr_data_tb.dut_msg = beat_at_slave;
wr_resp_data_count++;
last_wr_sinked_cycle = (sc_time_stamp() / clk_period);
(*sb_wr_data_q)[dst].push_back(temp_wr_data_tb);
beat_at_slave.data = 0;
beat_at_slave.wstrb = 0;
}
}
sb_lock->unlock();
wr_trans_generated++;
}; // End of Read generator
// ------------------------ //
// --- VERIFY Functions --- //
// ------------------------ //
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void axi_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::verify_rd_resp(axi4_::ReadPayload &rcv_rd_resp){
// Verify Responce
sb_lock->lock();
// --- Reorder Check --- //
int reorder=2; // 2 : Req not found, 1 : Request reordered, 0 : everything is fine
unsigned j=0;
axi4_::AddrPayload sb_ord_req;
while (j<sb_rd_order_q.size()){
sb_ord_req = sb_rd_order_q[j];
if(sb_ord_req.id == rcv_rd_resp.id) {
// Slave must sneak its ID to the resp field.
unsigned dst = mem_map_resolve(sb_ord_req.addr);
reorder = (dst == rcv_rd_resp.resp) ? 0 : 1;
if(rcv_rd_resp.last) sb_rd_order_q.erase(sb_rd_order_q.begin()+j);
break;
}
j++;
}
// --------------------- //
bool found=false;
j=0;
while (j<(*sb_rd_resp_q)[MASTER_ID].size()){
msg_tb_wrap< axi4_::ReadPayload > sb_resp = (*sb_rd_resp_q)[MASTER_ID][j];
if (eq_rd_data(rcv_rd_resp, sb_resp.dut_msg)){
if (sb_resp.dut_msg.last) {
rd_resp_delay += ((sc_time_stamp() - sb_resp.time_gen) / clk_period) - 1;
rd_resp_count++;
}
rd_resp_data_count++;
last_rd_sinked_cycle = (sc_time_stamp() / clk_period);
(*sb_rd_resp_q)[MASTER_ID].erase((*sb_rd_resp_q)[MASTER_ID].begin()+j);
found = true;
break;
}
j++;
}
if(!found){
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "RD-Resp : "<< rcv_rd_resp << " . NOT FOUND! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "-SB_front - "<< (*sb_rd_resp_q)[MASTER_ID].front() << "\n";
error_sb_rd_resp_not_found++;
sc_assert(0);
// sc_stop();
}else if(reorder==2) {
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "RD-Resp : "<< rcv_rd_resp << " . Respective Request wasn't found!!! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "-REQ_front - "<< sb_rd_order_q.front() << "\n";
sc_assert(0);
}else if(reorder==1) {
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "RD-Resp : "<< rcv_rd_resp << " . Got Reordered !!! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "REQ-Ordered - "<< sb_ord_req << "\n";
sc_assert(0);
}else{
std::cout<< "[Master " << MASTER_ID <<"] " << "RD-Resp OK : << " << rcv_rd_resp << " @" << sc_time_stamp() << "\n";
rd_resp_ej++;
}
std::cout.flush();
sb_lock->unlock();
}; // End of READ Response Verify
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void axi_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::verify_wr_resp(axi4_::WRespPayload &rcv_wr_resp){
// Verify Responce
sb_lock->lock();
// --- Reorder Check --- //
int reorder=2; // 2 : Req not found, 1 : Request reordered, 0 : everything is fine
unsigned j=0;
axi4_::AddrPayload sb_ord_req;
while (j<sb_wr_order_q.size()){
sb_ord_req = sb_wr_order_q[j];
if(sb_ord_req.id == rcv_wr_resp.id) {
// Slave must sneak its ID into the first data byte of every beat (aka data[0]).
unsigned dst = mem_map_resolve(sb_ord_req.addr);
reorder = (dst == rcv_wr_resp.resp) ? 0 : 1;
sb_wr_order_q.erase(sb_wr_order_q.begin()+j);
break;
}
j++;
}
// --------------------- //
// Verify Responce
bool found=false;
j=0;
while (j<(*sb_wr_resp_q)[MASTER_ID].size()){
msg_tb_wrap< axi4_::WRespPayload > sb_resp = (*sb_wr_resp_q)[MASTER_ID][j];
if ( eq_wr_resp(sb_resp.dut_msg, rcv_wr_resp) ){
wr_resp_delay += ((sc_time_stamp() - sb_resp.time_gen) / clk_period) - 1;
wr_resp_count++;
(*sb_wr_resp_q)[MASTER_ID].erase((*sb_wr_resp_q)[MASTER_ID].begin()+j);
found = true;
break;
}
j++;
}
if(!found){
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "WR-Resp : "<< rcv_wr_resp << " . NOT FOUND! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "-SB_front - "<< (*sb_wr_resp_q)[MASTER_ID].front() << "\n";
error_sb_wr_resp_not_found++;
sc_assert(0);
// sc_stop();
}else if(reorder==2) {
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "WR-Resp : "<< rcv_wr_resp << " . Respective Request wasn't found!!! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "-REQ_front - "<< sb_wr_order_q.front() << "\n";
sc_assert(0);
}else if(reorder==1) {
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "WR-Resp : "<< rcv_wr_resp << " . Got Reordered !!! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "REQ-Ordered - "<< sb_ord_req << "\n";
sc_assert(0);
}else{
std::cout<< "[Master " << MASTER_ID <<"] " << "WR-Resp OK : << " << rcv_wr_resp << "\n";
wr_resp_ej++;
}
std::cout.flush();
sb_lock->unlock();
}; // End of WRITE Response Verify
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
bool axi_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::eq_rd_data (axi4_::ReadPayload &rcv_rd_data, axi4_::ReadPayload &sb_rd_data) {
unsigned tid_mask = (1<<dnp::ID_W)-1;
return ((rcv_rd_data.id & tid_mask) == (sb_rd_data.id & tid_mask) &&
rcv_rd_data.data == sb_rd_data.data &&
rcv_rd_data.resp == sb_rd_data.resp &&
rcv_rd_data.last == sb_rd_data.last //&&
//rcv_rd_data.ruser == sb_rd_data.ruser
);
};
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
bool axi_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::eq_wr_resp (axi4_::WRespPayload &rcv_wr_resp, axi4_::WRespPayload &sb_wr_resp) {
unsigned tid_mask = (1<<dnp::ID_W)-1;
return ((rcv_wr_resp.id & tid_mask) == (sb_wr_resp.id & tid_mask) &&
rcv_wr_resp.resp == sb_wr_resp.resp //&&
//rcv_wr_resp.buser == sb_wr_resp.buser
);
};
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
unsigned axi_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::mem_map_resolve(axi4_::Addr &addr){
for (int i=0; i<SLAVE_NUM; ++i) {
if (addr>=addr_map[i][0].read() && addr <= addr_map[i][1].read()) return i;
}
NVHLS_ASSERT_MSG(0, "Target Addr not found!");
return 0; // Or send 404
}
#endif // AXI_MASTER_H
|
ic-lab-duth/NoCpad
|
src/include/ace.h
|
<reponame>ic-lab-duth/NoCpad
#ifndef _AXI_ACE_H_
#define _AXI_ACE_H_
#include <systemc>
#include <nvhls_connections.h>
#include <nvhls_assert.h>
#include <nvhls_message.h>
#include <nvhls_module.h>
#include <UIntOrEmpty.h>
#include <axi/axi4_encoding.h>
#include <axi/axi4_configs.h>
#include "./axi_for_ace.h"
/**
* \brief The axi namespace contains classes and definitions related to the ACE standard.
* \ingroup ACE
*/
namespace ace {
/**
* \brief The base ACE class extending AXI with cache coherence.
* \ingroup ACE
*
* \tparam Cfg A valid AXI config.
*
* \par Overview
* axi4 defines the AXI base class. Based on the provided Cfg, the bitwidths of
* the various fields of the AXI specification are defined, and classes and
* convenience functions can be used to instantiate AXI Connections, wire them
* together, and use them to implement the AXI protocol.
* - Each AXI signal is defined as a UIntOrEmpty of an appropriate width, allowing
* for the presence of 0-width fields when they can be elided entirely.
* - If useWriteResponses = 0, the B channel is not removed entirely (for
* implementation convenience), but is reduced to minimum width.
* - All AW and AR fields are identical, and are combined into a common
* AddrPayload type.
*
*/
template <typename Cfg>
class ace5 : public ace::axi4<Cfg> {
public:
enum {
C_ADDR_WIDTH = Cfg::addrWidth,
C_SNOOP_WIDTH = 4,
C_PROT_WIDTH = 3,
C_RESP_WIDTH = 5,
C_CACHE_WIDTH = Cfg::CacheLineWidth,
C_DATA_CHAN_WIDTH = Cfg::CacheLineWidth,
};
typedef typename ace::axi4<Cfg>::Addr Addr;
/**
* \brief A struct for ACE Snoop Address channel fields
*/
struct AC : public nvhls_message {
typedef NVUINTW(C_SNOOP_WIDTH) Snoop;
typedef typename nvhls::UIntOrEmpty<C_PROT_WIDTH>::T Prot;
Addr addr;
Snoop snoop;
Prot prot;
static const unsigned int width = C_ADDR_WIDTH + C_SNOOP_WIDTH + C_PROT_WIDTH;
AC () {
addr = 0;
snoop = 0;
if(C_PROT_WIDTH > 0) prot = 0;
}
template <unsigned int Size>
void Marshall(Marshaller<Size> &m) {
m &addr;
m &snoop;
m &prot;
}
#ifdef CONNECTIONS_SIM_ONLY
inline friend void sc_trace(sc_trace_file *tf, const AC& v, const std::string& NAME ) {
sc_trace(tf,v.addr, NAME + ".addr");
sc_trace(tf,v.snoop, NAME + ".snoop");
if(C_PROT_WIDTH > 0)
sc_trace(tf,v.prot, NAME + ".prot");
}
inline friend std::ostream& operator<<(ostream& os, const AC& rhs)
{
#ifdef LOG_MSG_WIDTHS
os << std::dec;
os << "addr:" << rhs.addr.width << " ";
os << "snoop:" << rhs.snoop.width << " ";
if(C_PROT_WIDTH > 0)
os << "prot:" << rhs.prot.width << " ";
#else
os << std::hex;
os << "Addr:" << rhs.addr << " ";
os << "Snp:" << rhs.snoop << " ";
if(C_PROT_WIDTH > 0)
os << "Prot:" << rhs.prot << " ";
os << std::dec;
#endif
return os;
}
#endif
};
/**
* \brief A struct for ACE Snoop Response channel fields
*/
struct CR : public nvhls_message {
typedef NVUINTW(C_RESP_WIDTH) Resp;
Resp resp;
static const unsigned int width = C_RESP_WIDTH;
CR () {
resp = 0;
}
template <unsigned int Size>
void Marshall(Marshaller<Size> &m) {
m &resp;
}
#ifdef CONNECTIONS_SIM_ONLY
inline friend void sc_trace(sc_trace_file *tf, const CR& v, const std::string& NAME ) {
sc_trace(tf,v.resp, NAME + ".resp");
}
inline friend std::ostream& operator<<(ostream& os, const CR& rhs)
{
#ifdef LOG_MSG_WIDTHS
os << std::dec;
os << "Resp:" << rhs.resp.width << " ";
#else
os << std::hex;
os << "Resp:" << rhs.resp << " ";
os << std::dec;
#endif
return os;
}
#endif
};
/**
* \brief A struct for ACE Snoop Data channel fields
*/
struct CD : public nvhls_message {
typedef NVUINTW(C_DATA_CHAN_WIDTH) Data;
typedef NVUINTW(1) Last;
Data data;
Last last;
static const unsigned int width = C_DATA_CHAN_WIDTH + 1;
CD () {
data = 0;
last = 0;
}
template <unsigned int Size>
void Marshall(Marshaller<Size> &m) {
m &data;
m &last;
}
#ifdef CONNECTIONS_SIM_ONLY
inline friend void sc_trace(sc_trace_file *tf, const CD& v, const std::string& NAME ) {
sc_trace(tf,v.data, NAME + ".data");
sc_trace(tf,v.last, NAME + ".last");
}
inline friend std::ostream& operator<<(ostream& os, const CD& rhs)
{
#ifdef LOG_MSG_WIDTHS
os << std::dec;
os << "data:" << rhs.data.width << " ";
os << "last:" << rhs.last.width << " ";
#else
os << std::hex;
os << "Data:" << rhs.data << " ";
os << "last:" << rhs.last << " ";
os << std::dec;
#endif
return os;
}
#endif
};
/**
* \brief A struct for ACE Snoop ACK channels
*/
struct RACK : public nvhls_message {
typedef NVUINTW(1) Rack;
Rack rack;
static const unsigned int width = 1;
RACK () {
rack = 0;
}
template <unsigned int Size>
void Marshall(Marshaller<Size> &m) {
m &rack;
}
#ifdef CONNECTIONS_SIM_ONLY
inline friend void sc_trace(sc_trace_file *tf, const RACK& v, const std::string& NAME ) {
sc_trace(tf,v.rack, NAME + ".rack");
}
inline friend std::ostream& operator<<(ostream& os, const RACK& rhs)
{
#ifdef LOG_MSG_WIDTHS
os << std::dec;
os << "rack:" << rhs.rack.width << " ";
#else
os << std::dec;
os << "Rack:" << rhs.rack << " ";
#endif
return os;
}
#endif
};
struct WACK : public nvhls_message {
typedef NVUINTW(1) Wack;
Wack wack;
static const unsigned int width = 1;
WACK () {
wack = 0;
}
template <unsigned int Size>
void Marshall(Marshaller<Size> &m) {
m &wack;
}
#ifdef CONNECTIONS_SIM_ONLY
inline friend void sc_trace(sc_trace_file *tf, const WACK& v, const std::string& NAME ) {
sc_trace(tf,v.wack, NAME + ".wack");
}
inline friend std::ostream& operator<<(ostream& os, const WACK& rhs)
{
#ifdef LOG_MSG_WIDTHS
os << std::dec;
os << "wack:" << rhs.wack.width << " ";
#else
os << std::dec;
os << "Wack:" << rhs.wack << " ";
#endif
return os;
}
#endif
};
/**
* \brief The ACE extension channels.
*
* Each Connections implementation contains two ready-valid interfaces,
* AC for snoop address, CR for snoop response, CD for snoop data, ACK for acknowledgment
*/
class cache {
public:
/**
* \brief The ACE cache channel, used for connecting Caching components.
*/
template <Connections::connections_port_t PortType = AUTO_PORT>
class chan {
public:
typedef Connections::Combinational<AC, PortType> ACChan;
typedef Connections::Combinational<CR, PortType> CRhan;
typedef Connections::Combinational<CD, PortType> CDChan;
//typedef Connections::Combinational<ACK, PortType> ACKChan;
ACChan ac; // IC to master (DIR to IC)
CRhan cr; // master to IC (IC to DIR)
CDChan cd; // master to IC (IC to DIR)
//ACKChan ack; // Master to IC (IC to DIR???)
chan(const char *name)
:
ac(nvhls_concat(name, "_ac")),
cr(nvhls_concat(name, "_cr")),
cd(nvhls_concat(name, "_cd"))
//ack(nvhls_concat(name, "_ack"))
{};
// TODO: Implement AXI protocol checker
}; // read::chan
/**
* \brief The ACE cache master/IC port. This port has AC Snoop request as input and CR-CD-ACK channels as output.
*/
template <Connections::connections_port_t PortType = AUTO_PORT>
class master {
public:
typedef Connections::In<AC, PortType> ACPort;
typedef Connections::Out<CR, PortType> CRPort;
typedef Connections::Out<CD, PortType> CDPort;
//typedef Connections::Out<ACK, PortType> ACKPort;
ACPort ac;
CRPort cr;
CDPort cd;
//ACKPort ack;
master(const char *name)
:
ac(nvhls_concat(name, "_ac")),
cr(nvhls_concat(name, "_cr")),
cd(nvhls_concat(name, "_cd"))
//ack(nvhls_concat(name, "_ack"))
{}
void reset() {
ac.Reset();
cr.Reset();
cd.Reset();
//ack.Reset();
}
AC snp_rcv() { return ac.Pop(); }
bool snp_rcv_nb(AC &addr) { return ac.PopNB(addr); }
void snp_resp(const CR &resp) { cr.Push(resp); }
bool snp_resp_nb(const CR &resp) { return cr.PushNB(resp); }
void snp_data(const CD &data) { cd.Push(data); }
bool snp_data_nb(const CD &data) { return cd.PushNB(data); }
//void snp_ack(const ACK &ack_r) { ack.Push(ack_r); }
//bool snp_ack_nb(const ACK &ack_r) { return ack.PushNB(ack_r); }
template <class C>
void operator()(C &c) {
ac(c.ac);
cr(c.cr);
cd(c.cd);
//ack(c.ack);
}
}; // cache::master
/**
* \brief The ACE cache master/IC port. This port has AC Snoop request as output and CR-CD-ACK channels as input.
*/
template <Connections::connections_port_t PortType = AUTO_PORT>
class dir {
public:
typedef Connections::Out<AC, PortType> ACPort;
typedef Connections::In<CR, PortType> CRPort;
typedef Connections::In<CD, PortType> CDPort;
//typedef Connections::In<ACK, PortType> ACKPort;
ACPort ac;
CRPort cr;
CDPort cd;
//ACKPort ack;
dir(const char *name)
:
ac(nvhls_concat(name, "_ac")),
cr(nvhls_concat(name, "_cr")),
cd(nvhls_concat(name, "_cd"))
//ack(nvhls_concat(name, "_ack"))
{}
void reset() {
ac.Reset();
cr.Reset();
cd.Reset();
//ack.Reset();
}
void snp_snd(AC &addr) { ac.Push(addr); }
bool snp_snd_nb(AC &addr) { return ac.PushNB(addr); }
CR snp_resp_rcv() { return cr.Pop(); }
bool snp_resp_rcv_nb(const CR &resp) { return cr.PopNB(resp); }
CD snp_data_rcv( ) { return cd.Pop(); }
bool snp_data_rcv_nb(CD &data) { return cd.PopNB(data); }
//ACK snp_ack_rcv( ) { return ack.Pop(); }
//bool snp_ack_rcv_nb(ACK &ack_r) { return ack.PopNB(ack_r); }
template <class C>
void operator()(C &c) {
ac(c.ac);
cr(c.cr);
cd(c.cd);
//ack(c.ack);
}
}; // cache::dir
}; // cache
}; // ace5
/**
* \brief Hardcoded values associated with the ACE standard.
* \ingroup ACE
*
* \par
* These enumerated values are defined by the ACE standard and should not be modified.
*
*/
class ACE_Encoding : public axi::AXI4_Encoding {
public:
/**
* \brief Hardcoded values for the AxDOMAIN field.
*/
class AxDOMAIN {
public:
enum {
_WIDTH = 2, // bits
NON_SHARE = 0,
INNER_SHARE = 1,
OUTER_SHARE = 2,
SYSTEM_SHARE = 3,
};
}; // Domain
class ARSNOOP {
public:
enum {
_WIDTH = 4, // bits
// Np-Snoop
// Coherent
RD_ONCE = 0x0, // Or Read NoSnoop if Domain is Non-Shared or System
RD_SHARED = 0x1,
RD_CLEAN = 0x2,
RD_NOT_SHARED_DIRTY = 0x3,
RD_UNIQUE = 0x7,
CLEAN_UNIQUE = 0xB,
MAKE_UNIQUE = 0xC,
// Cache maintenance
CLEAN_SHARED = 0x8,
CLEAN_INVALID = 0x9,
MAKE_INVALID = 0xD,
};
}; // ARSNOOP
class AWSNOOP {
public:
enum {
_WIDTH = 3, // bits
// Np-Snoop
// Coherent
WR_UNIQUE = 0, // Or Write NoSnoop id DOMAIN is Not-Shared or System
WR_LINE_UNIQUE = 1,
// Memory update
WR_CLEAN = 2,
WR_BACK = 3, // No-Snoop
EVICT = 4, // No-Snoop
WR_EVICT = 5, // No-Snoop
};
}; // AWSNOOP
class ACSNOOP {
public:
enum {
_WIDTH = 4, // bits
RD_ONCE = 0x0,
RD_SHARED = 0x1,
RD_CLEAN = 0x2,
RD_NOT_SHARED_DIRTY = 0x3,
RD_UNIQUE = 0x7,
CLEAN_SHARED = 0x8,
CLEAN_INVALID = 0x9,
MAKE_INVALID = 0xD,
DVM_COMPLETE = 0xE,
DVM_MESSAGE = 0xF,
};
}; // AWSNOOP
}; // End of ACE_Encoding
inline NVUINTW(ACE_Encoding::ACSNOOP::_WIDTH) rd_2_snoop (NVUINTW(ACE_Encoding::ARSNOOP::_WIDTH) &request_in) {
if (request_in == ACE_Encoding::ARSNOOP::RD_ONCE) // 0 -> 0
return ACE_Encoding::ACSNOOP::RD_ONCE;
else if (request_in == ACE_Encoding::ARSNOOP::RD_CLEAN) // 2 -> 2
return ACE_Encoding::ACSNOOP::RD_CLEAN;
else if (request_in == ACE_Encoding::ARSNOOP::RD_NOT_SHARED_DIRTY) // 3 -> 3
return ACE_Encoding::ACSNOOP::RD_NOT_SHARED_DIRTY;
else if (request_in == ACE_Encoding::ARSNOOP::RD_SHARED) // 1-> 1
return ACE_Encoding::ACSNOOP::RD_SHARED;
else if (request_in == ACE_Encoding::ARSNOOP::RD_UNIQUE) // 7 -> 7
return ACE_Encoding::ACSNOOP::RD_UNIQUE;
else if ((request_in == ACE_Encoding::ARSNOOP::CLEAN_UNIQUE) ||
(request_in == ACE_Encoding::ARSNOOP::CLEAN_INVALID) ) // 11, 9 -> 9
return ACE_Encoding::ACSNOOP::CLEAN_INVALID;
else if ((request_in == ACE_Encoding::ARSNOOP::MAKE_UNIQUE) ||
(request_in == ACE_Encoding::ARSNOOP::MAKE_INVALID) ) // 12, 13 -> 13
return ACE_Encoding::ACSNOOP::MAKE_INVALID;
else if (request_in == ACE_Encoding::ARSNOOP::CLEAN_SHARED) // 8 -> 8
return ACE_Encoding::ACSNOOP::CLEAN_SHARED;
else {
NVHLS_ASSERT_MSG(0, "Read coherent access is out of valid snoop values.")
}
return 0;
}; // End of rd_2_snoop
inline NVUINTW(ACE_Encoding::ACSNOOP::_WIDTH) wr_2_snoop (NVUINTW(ACE_Encoding::ARSNOOP::_WIDTH) &request_in) {
if (request_in == ACE_Encoding::AWSNOOP::WR_UNIQUE) // 0 -> 9
return ACE_Encoding::ACSNOOP::CLEAN_INVALID;
else if (request_in == ACE_Encoding::AWSNOOP::WR_LINE_UNIQUE) // 1 -> 13
return ACE_Encoding::ACSNOOP::MAKE_INVALID;
else {
NVHLS_ASSERT_MSG(0, "Coherent access does not match any expected at Home.")
}
return 0;
}; // End of wr_2_snoop
}; // ace
#endif // _AXI_ACE_H_
|
ic-lab-duth/NoCpad
|
src/include/axi4_configs_extra.h
|
<filename>src/include/axi4_configs_extra.h
#ifndef __AXI_CONFIG_DUTH_H__
#define __AXI_CONFIG_DUTH_H__
namespace axi {
// Extension of Matchlib AXI configuration
namespace cfg {
/**
* \brief A standard AXI configuration with SIZE field.
*/
struct standard_duth {
enum {
useACE = 0,
dataWidth = 64,
useVariableBeatSize = 1,
useMisalignedAddresses = 0,
useLast = 1,
useWriteStrobes = 1,
useBurst = 1, useFixedBurst = 1, useWrapBurst = 0, maxBurstSize = 256,
useQoS = 0, useLock = 0, useProt = 0, useCache = 0, useRegion = 0,
aUserWidth = 0, wUserWidth = 0, bUserWidth = 0, rUserWidth = 0,
addrWidth = 32,
idWidth = 4,
useWriteResponses = 1,
};
};
struct standard_duth_128 {
enum {
useACE = 0,
dataWidth = 128,
useVariableBeatSize = 1,
useMisalignedAddresses = 0,
useLast = 1,
useWriteStrobes = 1,
useBurst = 1, useFixedBurst = 1, useWrapBurst = 0, maxBurstSize = 256,
useQoS = 0, useLock = 0, useProt = 0, useCache = 0, useRegion = 0,
aUserWidth = 0, wUserWidth = 0, bUserWidth = 0, rUserWidth = 0,
addrWidth = 32,
idWidth = 4,
useWriteResponses = 1,
};
};
/**
* \brief ACE enabled, AXI configuration.
*/
struct ace {
enum {
useACE = 1,
CacheLineWidth = 64, // bits
dataWidth = 64,
useVariableBeatSize = 1,
useMisalignedAddresses = 0,
useLast = 1,
useWriteStrobes = 1,
useBurst = 1, useFixedBurst = 1, useWrapBurst = 0, maxBurstSize = 256,
useQoS = 0, useLock = 0, useProt = 0, useCache = 0, useRegion = 0,
aUserWidth = 0, wUserWidth = 0, bUserWidth = 0, rUserWidth = 0,
addrWidth = 32,
idWidth = 4,
useWriteResponses = 1,
};
};
}; // namespace cfg
}; // namespace axi
#endif
|
ic-lab-duth/NoCpad
|
tb/tb_ace/ace_coherency_checker.h
|
#ifndef AXI_COHERENCY_CHECKER_H
#define AXI_COHERENCY_CHECKER_H
#include "systemc.h"
#include "../../src/include/dnp_ace_v0.h"
#include <deque>
#include <queue>
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int FULL_MASTER_NUM, unsigned int LITE_MASTER_NUM, unsigned int SLAVE_NUM>
SC_MODULE(ace_coherency_checker) {
typedef typename ace::ace5<axi::cfg::ace> ace5_;
typedef typename ace::ACE_Encoding enc_;
sc_in_clk clk;
sc_in <bool> rst_n;
sc_in<bool> stop_gen; // Not Used
sc_in< sc_uint<32> > addr_map[SLAVE_NUM][2];
// Scoreboard
sc_mutex *sb_lock;
std::vector< std::deque< msg_tb_wrap<ace5_::AddrPayload> > > *sb_rd_req_q;
std::vector< std::deque< msg_tb_wrap<ace5_::ReadPayload> > > *sb_rd_resp_q;
std::vector< std::deque< msg_tb_wrap<ace5_::AddrPayload> > > *sb_wr_req_q;
std::vector< std::deque< msg_tb_wrap<ace5_::WritePayload> > > *sb_wr_data_q;
std::vector< std::deque< msg_tb_wrap<ace5_::WRespPayload> > > *sb_wr_resp_q;
std::vector< std::deque< msg_tb_wrap<ace5_::AddrPayload> > > *sb_coherent_access_q;
std::vector< std::deque< msg_tb_wrap<ace5_::AC> > > *sb_snoop_req_q;
std::vector< std::deque< msg_tb_wrap<ace5_::CR> > > *sb_snoop_resp_q;
std::vector< std::deque< msg_tb_wrap<ace5_::CD> > > *sb_snoop_data_resp_q;
unsigned int total_cycles;
sc_time clk_period;
class snoop_set {
public:
ace5_::AC ac;
ace5_::CR cr;
ace5_::CD cd;
};
class snoop_trans_bundle {
public:
//snoop_set snoops[MASTER_NUM-1];
ace5_::AC ac[FULL_MASTER_NUM];
ace5_::CR cr[FULL_MASTER_NUM];
ace5_::CD cd[FULL_MASTER_NUM];
bool valid[FULL_MASTER_NUM];
unsigned count; // Must never
snoop_trans_bundle () {
for(int i=0; i<FULL_MASTER_NUM; ++i) valid[i] = false;
count = 0;
};
snoop_trans_bundle (
unsigned master_id_,
ace5_::AC ac_,
ace5_::CR cr_,
ace5_::CD cd_
) {
ac[master_id_] = ac_;
cr[master_id_] = cr_;
cd[master_id_] = cd_;
for(int i=0; i<FULL_MASTER_NUM; ++i) valid[i] = (i == master_id_);
count = 1;
};
unsigned push (
unsigned master_id_,
ace5_::AC ac_,
ace5_::CR cr_,
ace5_::CD cd_
) {
// Error handling ifs
if (count >= (FULL_MASTER_NUM)) {
std::cout << "Got more snoops than expected for addr: " << ac_.addr << "\n";
NVHLS_ASSERT(0);
} else if (valid[master_id_]) {
std::cout << "Second snoop at M#" << master_id_ << " for addr: " << std::hex << ac_.addr << std::dec << "\n";
NVHLS_ASSERT(0);
}
ac[master_id_] = ac_;
cr[master_id_] = cr_;
cd[master_id_] = cd_;
valid[master_id_] = true;
count++;
return count;
};
void clear () {
for(int i=0; i<FULL_MASTER_NUM; ++i) valid[i] = false;
count = 0;
};
};
// Vars to conclude who is the initiator of the responses
// -- We know that we expect N-1 snoop reqs/resps and the not received is the initiator, thus at most 2 addresses could be in front
// -- Currently we expect all requests in order, thus the snoop requests are going to be at the front of
// the queue. In case of multiple HOME nodes the above assumtions break
// -- each request/response is identifiable by its address and requests of the same address MUST be in-order
// This info should be used, to support multiple Home nodes, aka search in the queues for the first address occurrence
// Now each address, has a struct that collects snoops from the various target masters.
// At no point should exist two requests for the same address at the same target.
// -- In case more requests for the same address is allowed the internal class should provide distinct queues for each target
// which will facilitate the requests that MUST be in order.
// -- The above should probably never happen since to concurrent snoop requests are indistinguishable,
// thus it's impossible for initiators to sort the sequence and transient states would be necessary.
std::map<ace5_::Addr, snoop_trans_bundle> scrutineer; // Wow, nice word. Not gonna remember it. Don't care. Still Gonna use. Long live autocomplete!
// Read Response Generator
int rd_resp_val;
int rd_resp_generated;
int rd_resp_inj;
int wr_resp_generated;
int wr_resp_inj;
// Read Req Sink
int rd_req_ej;
int wr_req_ej;
int wr_data_ej;
// Error
int error_sb_rd_req_not_found;
int error_sb_wr_req_not_found;
int error_sb_wr_data_not_found;
// Functions
void do_cycle();
void manage_bundle (snoop_trans_bundle & cur_trans_bundle, unsigned initiator);
unsigned mem_map_resolve (ace5_::Addr &addr);
bool req_denies_dirty (NVUINTW(enc_::ARSNOOP::_WIDTH) &request_in, bool is_read );
bool req_no_data_resp (NVUINTW(enc_::ARSNOOP::_WIDTH) &request_in, bool is_read );
// Constructor
SC_HAS_PROCESS(ace_coherency_checker);
ace_coherency_checker(sc_module_name name_="ace_coherency_checker") : sc_module(name_)
{
SC_THREAD(do_cycle);
sensitive << clk.pos();
reset_signal_is(rst_n, false);
}
};
// --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- //
// --- --- --- --- --- --- IMPLEMENTATION --- --- --- --- --- --- --- --- //
// --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- //
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int FULL_MASTER_NUM, unsigned int LITE_MASTER_NUM, unsigned int SLAVE_NUM>
void ace_coherency_checker<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, FULL_MASTER_NUM, LITE_MASTER_NUM, SLAVE_NUM>::do_cycle () {
total_cycles = 0;
clk_period = (dynamic_cast<sc_clock *>(clk.get_interface()))->period();
ace5_::AC snoop_req;
ace5_::CR snoop_resp;
ace5_::CD snoop_data;
while(1) {
wait();
sb_lock->lock();
for (unsigned i=0; i<FULL_MASTER_NUM; ++i) {
if ( !(*sb_snoop_resp_q)[i].empty() ) {
snoop_req = (*sb_snoop_req_q)[i].front().dut_msg;
(*sb_snoop_req_q)[i].pop_front();
snoop_resp = (*sb_snoop_resp_q)[i].front().dut_msg;
(*sb_snoop_resp_q)[i].pop_front();
if (snoop_resp.resp & 1) {
snoop_data = (*sb_snoop_data_resp_q)[i].front().dut_msg;
(*sb_snoop_data_resp_q)[i].pop_front();
}
// resolve who is the initiator, to know the number of the expected snoop reqs
// ID format -> 0 - SLAVES - FULL_MASTERS - LITE_MASTERS - HOMES
unsigned initiator = snoop_req.prot;
NVHLS_ASSERT_MSG(initiator>=SLAVE_NUM, "A SLAVE cannot be an initiator. (I.e. ID<SLAVE_NUM)")
NVHLS_ASSERT_MSG(initiator<(SLAVE_NUM+FULL_MASTER_NUM+LITE_MASTER_NUM), "A HOME cannot be an initiator. (I.e. ID greater than the masters')")
unsigned init_is_ace = (initiator<(SLAVE_NUM+FULL_MASTER_NUM));
unsigned init_is_lite = (initiator>=(SLAVE_NUM+FULL_MASTER_NUM));
NVHLS_ASSERT_MSG(initiator<(SLAVE_NUM+FULL_MASTER_NUM+LITE_MASTER_NUM), "Wot?!?! Check the 2lines above....")
auto cur_trans_bundle_it = scrutineer.find(snoop_req.addr);
//if tha address is not init in scrutineer, create it.
if ( cur_trans_bundle_it == scrutineer.end() ) { // Not found
//scrutineer[snoop_req.addr] = snoop_trans_bundle(i, snoop_req, snoop_resp, snoop_data);
scrutineer[snoop_req.addr] = snoop_trans_bundle();
cur_trans_bundle_it = scrutineer.find(snoop_req.addr);
}
unsigned new_count = (cur_trans_bundle_it->second).push(i, snoop_req, snoop_resp, snoop_data);
// Existing Address of scrutineer
// Bundled completed, Set next expected packets and reset it.
unsigned master_initiator = (initiator - SLAVE_NUM);
if ( init_is_ace && (new_count == FULL_MASTER_NUM-1) ) {
manage_bundle(cur_trans_bundle_it->second, master_initiator);
(cur_trans_bundle_it->second).clear();
//scrutineer.erase(cur_trans_bundle_it); // ToDo : consider completely remove it to save space/ but lose time constr/deconstr
} else if (init_is_lite && (new_count == FULL_MASTER_NUM) ) {
manage_bundle(cur_trans_bundle_it->second, master_initiator);
(cur_trans_bundle_it->second).clear();
}
} // End of new bundle handle
} // End for Masters
sb_lock->unlock();
} // End of while(1)
}; // End of do_cycle
// --------------------------- //
// --- GENERATOR Functions --- //
// --------------------------- //
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int FULL_MASTER_NUM, unsigned int LITE_MASTER_NUM, unsigned int SLAVE_NUM>
void ace_coherency_checker<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, FULL_MASTER_NUM, LITE_MASTER_NUM, SLAVE_NUM>::manage_bundle (snoop_trans_bundle & cur_trans_bundle, unsigned initiator) {
// This is deprecated, when the lack of a snoop response gives hints the initiator.
// To add ACE LITE nodes, this is impossible and the initiator is passed by the HOME through the PROT field
// Find the initiator
/*
unsigned initiator = MASTER_NUM+1;
for (int i=0; i<MASTER_NUM; ++i) {
if (!cur_trans_bundle.valid[i]) {
NVHLS_ASSERT_MSG(initiator>MASTER_NUM, "Bundle went wrong. There are two masters that haven't received Snoop reqs.");
initiator = i;
}
}
NVHLS_ASSERT_MSG(initiator<MASTER_NUM, "Initiator not found!");
*/
// Find the initiating request from the Scoreboard
ace5_::AddrPayload coherent_init;
bool coherent_init_found = false;
bool is_read;
int dbg_size = (*sb_coherent_access_q)[initiator].size();
for (int i=0; i<(*sb_coherent_access_q)[initiator].size(); ++i) {
msg_tb_wrap<ace5_::AddrPayload> cur_coherent_req = ((*sb_coherent_access_q)[initiator][i]);
if (cur_coherent_req.dut_msg.addr == cur_trans_bundle.ac[(initiator+1)%FULL_MASTER_NUM].addr) {
coherent_init = cur_coherent_req.dut_msg;
coherent_init_found = true;
is_read = cur_coherent_req.is_read;
(*sb_coherent_access_q)[initiator].erase((*sb_coherent_access_q)[initiator].begin()+i);
break;
}
}
NVHLS_ASSERT_MSG(coherent_init_found, "Init transaction not found!");
// Check that all Snoop requests are the same and of correct type.
ace5_::AC snoop_type_expected;
snoop_type_expected.addr = coherent_init.addr;
snoop_type_expected.snoop = is_read ? ace::rd_2_snoop(coherent_init.snoop) : ace::wr_2_snoop(coherent_init.snoop);
for (int i=0; i<FULL_MASTER_NUM; ++i) {
if(i != initiator) {
bool addr_is_equal = (cur_trans_bundle.ac[i].addr == snoop_type_expected.addr);
bool snoop_is_equal = (cur_trans_bundle.ac[i].snoop == snoop_type_expected.snoop);
if (!(addr_is_equal && snoop_is_equal)) {
std::cout << "Home node didn't send correct snoop requests for access: " << coherent_init << "\n";
std::cout << " - Expected: " << snoop_type_expected << "\n";
std::cout << " - Got @[MASTER" << i+SLAVE_NUM << "]: " << cur_trans_bundle.ac[i] << "\n";
NVHLS_ASSERT(0);
}
}
}
// go through responses to see if there are data available
ace5_::CD snoop_data;
ace5_::CR::Resp resp_accum = 0;
bool got_data = false;
bool got_dirty = false;
for (int i=0; i<FULL_MASTER_NUM; ++i) {
if(i != initiator) {
resp_accum |= cur_trans_bundle.cr[i].resp;
bool this_has_data = cur_trans_bundle.cr[i].resp & 0x1;
bool this_dirty = cur_trans_bundle.cr[i].resp & 0x4;
if(got_data && this_has_data) {
if (snoop_data.data != cur_trans_bundle.cd[i].data){
std::cout << "ERR : Caches have different values for valid cache lines!\n";
std::cout << " " << snoop_data << "\n";
std::cout << " " << cur_trans_bundle.cd[i] << "\n";
NVHLS_ASSERT(0);
}
}
if (got_dirty && this_dirty) {
std::cout << "ERR : Got 2 Dirty lines for Addr: " << snoop_type_expected.addr << "\n";
NVHLS_ASSERT(0);
}
if (this_dirty || (this_has_data && !got_data)) {
got_data = true;
got_dirty = this_dirty;
snoop_data = cur_trans_bundle.cd[i];
}
}
}
// When Dirty gets Written back to Mem
bool req_no_dirty = req_denies_dirty(coherent_init.snoop, is_read) || req_no_data_resp(coherent_init.snoop, is_read);
// When dirty resp to req that denies dirty, writeback data to Mem
if (got_dirty && req_no_dirty) {
// Setup scoreboards to reflect the responses
msg_tb_wrap<ace5_::AddrPayload> temp_wr_req_tb;
temp_wr_req_tb.dut_msg = coherent_init;
//Mask all ACE fields
temp_wr_req_tb.dut_msg.snoop = 0;
temp_wr_req_tb.dut_msg.domain = 0;
temp_wr_req_tb.dut_msg.barrier = 0;
unsigned tgt_mem = mem_map_resolve(coherent_init.addr);
(*sb_wr_req_q)[tgt_mem].push_back(temp_wr_req_tb);
msg_tb_wrap< ace5_::WritePayload > wr_back_beat;
wr_back_beat.dut_msg.data = snoop_data.data;
wr_back_beat.dut_msg.last = snoop_data.last;
wr_back_beat.dut_msg.wstrb = -1;
(*sb_wr_data_q)[tgt_mem].push_back(wr_back_beat);
}
// Setup scoreboards to reflect the responses
if (is_read) { // Read Coherent access
// If there are no data, HOME will access Memory
bool req_no_data = req_no_data_resp(coherent_init.snoop, is_read);
if ( got_data || req_no_data ) {
// Push Read Response to expect at initiator
ace5_::ReadPayload data_responce;
data_responce.data = req_no_data ? (ace5_::CD::Data) 0 : snoop_data.data;
data_responce.last = req_no_data ? (ace5_::CD::Last) 1 : snoop_data.last;
data_responce.resp = req_no_dirty ? resp_accum & 0xA : resp_accum & 0xE; // If request denies Dirty, Expect dropped PassDirty
data_responce.id = coherent_init.id;
msg_tb_wrap<ace5_::ReadPayload> temp_rd_resp_tb;
temp_rd_resp_tb.dut_msg = data_responce;
(*sb_rd_resp_q)[initiator].push_back(temp_rd_resp_tb);
} else {
// Generate expected request to Mem
msg_tb_wrap<ace5_::AddrPayload> temp_rd_req_tb;
temp_rd_req_tb.dut_msg = coherent_init;
//Mask all ACE fields
temp_rd_req_tb.dut_msg.snoop = 0;
temp_rd_req_tb.dut_msg.domain = 0;
temp_rd_req_tb.dut_msg.barrier = 0;
unsigned tgt_mem = mem_map_resolve(coherent_init.addr);
(*sb_rd_req_q)[tgt_mem].push_back(temp_rd_req_tb);
// Generate expected responce from Mem
ace5_::ReadPayload beat_expected;
beat_expected.data = 0;
beat_expected.id = coherent_init.id;
unsigned byte_count = 0;
unsigned bytes_total = (ace5_::C_CACHE_WIDTH/8);
while(byte_count<bytes_total) {
beat_expected.data |= ( ((ace5_::Data)(byte_count & 0xFF)) << ((ace5_::Data)(byte_count*8)));
byte_count++;
if(((byte_count%(ace5_::C_DATA_CHAN_WIDTH/8))==0) || (byte_count == bytes_total)) {
beat_expected.resp = tgt_mem;
beat_expected.last = (byte_count == bytes_total);
msg_tb_wrap< ace5_::ReadPayload > temp_rd_resp_tb;
temp_rd_resp_tb.dut_msg = beat_expected;
temp_rd_resp_tb.time_gen = sc_time_stamp();
(*sb_rd_resp_q)[initiator].push_back(temp_rd_resp_tb);
beat_expected.data = 0;
}
}
}
} else { // Write Coherent access
// The expected behavior is handled by the generator, being unchanged.
}
};
// ------------------------ //
// --- VERIFY Functions --- //
// ------------------------ //
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int FULL_MASTER_NUM, unsigned int LITE_MASTER_NUM, unsigned int SLAVE_NUM>
bool ace_coherency_checker<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, FULL_MASTER_NUM, LITE_MASTER_NUM, SLAVE_NUM>::req_denies_dirty (NVUINTW(enc_::ARSNOOP::_WIDTH) &request_in, bool is_read ) {
if (is_read) {
if ((request_in == enc_::ARSNOOP::RD_ONCE) ||
(request_in == enc_::ARSNOOP::RD_CLEAN) ||
(request_in == enc_::ARSNOOP::RD_NOT_SHARED_DIRTY) ||
(request_in == enc_::ARSNOOP::CLEAN_UNIQUE) ||
(request_in == enc_::ARSNOOP::CLEAN_INVALID))
{
return true;
}
} else {
return true;
}
return false;
};
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int FULL_MASTER_NUM, unsigned int LITE_MASTER_NUM, unsigned int SLAVE_NUM>
bool ace_coherency_checker<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, FULL_MASTER_NUM, LITE_MASTER_NUM, SLAVE_NUM>::req_no_data_resp (NVUINTW(enc_::ARSNOOP::_WIDTH) &request_in, bool is_read ) {
if (is_read) {
if ((request_in == enc_::ARSNOOP::CLEAN_UNIQUE) ||
(request_in == enc_::ARSNOOP::MAKE_UNIQUE) ||
(request_in == enc_::ARSNOOP::CLEAN_SHARED) ||
(request_in == enc_::ARSNOOP::CLEAN_INVALID) ||
(request_in == enc_::ARSNOOP::MAKE_INVALID) )
{
return true;
}
}
return false;
};
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int FULL_MASTER_NUM, unsigned int LITE_MASTER_NUM, unsigned int SLAVE_NUM>
unsigned ace_coherency_checker<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, FULL_MASTER_NUM, LITE_MASTER_NUM, SLAVE_NUM>::mem_map_resolve (ace5_::Addr &addr) {
for (int i=0; i<SLAVE_NUM; ++i) {
if (addr>=addr_map[i][0].read() && addr <= addr_map[i][1].read()) return i;
}
NVHLS_ASSERT_MSG(0, "Target Addr not found!");
return 0; // Or send 404
}
#endif // AXI_COHERENCY_CHECKER_H
|
ic-lab-duth/NoCpad
|
tb/tb_ace/acelite_master.h
|
<gh_stars>1-10
#ifndef _ACE_LITE_MASTER_H_
#define _ACE_LITE_MASTER_H_
#include "systemc.h"
#include "../helper_non_synth.h"
#include "../../src/include/dnp_ace_v0.h"
#include "../tb_wrap.h"
#include <deque>
#include <queue>
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
#define AXI4_MAX_LEN 4 // FIXED, WRAP bursts has a maximum of 16 beats
#define AXI4_MAX_INCR_LEN 4 // AXI4 extends INCR bursts upto 256 beats
#define AXI_TID_NUM 4
#define AXI_BURST_NUM 3
#define ACE_CACHE_LINES 8
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
SC_MODULE(acelite_master) {
typedef typename ace::ace5<axi::cfg::ace> ace5_;
typedef typename ace::ACE_Encoding enc_;
sc_in_clk clk;
sc_in <bool> rst_n;
sc_in<bool> stop_gen;
sc_in< sc_uint<32> > addr_map[SLAVE_NUM][2];
// Master ACE Ports
Connections::Out<ace5_::AddrPayload> ar_out;
Connections::In<ace5_::ReadPayload> r_in;
Connections::Out<ace5_::AddrPayload> aw_out;
Connections::Out<ace5_::WritePayload> w_out;
Connections::In<ace5_::WRespPayload> b_in;
// Scoreboard
sc_mutex *sb_lock;
std::vector< std::deque< msg_tb_wrap<ace5_::AddrPayload> > > *sb_rd_req_q;
std::vector< std::deque< msg_tb_wrap<ace5_::ReadPayload> > > *sb_rd_resp_q;
std::vector< std::deque< msg_tb_wrap<ace5_::AddrPayload > > > *sb_wr_req_q;
std::vector< std::deque< msg_tb_wrap<ace5_::WritePayload> > > *sb_wr_data_q;
std::vector< std::deque< msg_tb_wrap<ace5_::WRespPayload> > > *sb_wr_resp_q;
std::vector< std::deque< msg_tb_wrap<ace5_::AddrPayload> > > *sb_coherent_access_q;
std::vector< std::deque< msg_tb_wrap<ace5_::AC> > > *sb_snoop_req_q;
std::vector< std::deque< msg_tb_wrap<ace5_::CR> > > *sb_snoop_resp_q;
std::vector< std::deque< msg_tb_wrap<ace5_::CD> > > *sb_snoop_data_resp_q;
// Queues to store generated transactions
std::queue<ace5_::AddrPayload > stored_rd_trans;
std::queue<ace5_::AddrPayload > stored_wr_trans;
std::queue<ace5_::WritePayload> stored_wr_data;
std::queue<ace5_::CR> stored_cache_resp;
std::queue<ace5_::CD> stored_cache_data;
std::deque<ace5_::AddrPayload> sb_rd_order_q; // queue to check ordering
std::deque<ace5_::AddrPayload> sb_wr_order_q; // queue to check ordering
std::map<ace5_::Addr, int> cache_outstanding;
std::map<ace5_::Addr, int> cache_outstanding_writes;
int MASTER_ID = -1;
unsigned int AXI_GEN_RATE_RD;
unsigned int AXI_GEN_RATE_WR;
unsigned int ACE_GEN_RATE_CACHE;
// int FLOW_CTRL; // 0: READY-VALID
// // 1: CREDITS
// // 2: FIFO
// // 3: Credits fifo based
// delays
long total_cycles;
sc_time clk_period;
unsigned long long int rd_resp_delay = 0;
unsigned long long int rd_resp_count = 0;
unsigned long long int wr_resp_delay = 0;
unsigned long long int wr_resp_count = 0;
unsigned long long int last_rd_sinked_cycle = 0;
unsigned long long int last_wr_sinked_cycle = 0;
unsigned long long int rd_resp_data_count = 0;
unsigned long long int wr_resp_data_count = 0;
bool stop_at_tail, has_stopped_gen;
// Read Addr Generator
int cache_trans_generated;
int rd_trans_generated;
int rd_data_generated;
int wr_trans_generated;
int wr_data_generated;
int rd_trans_inj;
int wr_trans_inj;
int wr_data_inj;
unsigned int gen_rd_addr;
unsigned int gen_wr_addr;
unsigned int resp_val_expect;
// Read Responce Sink
int rd_resp_ej;
int wr_resp_ej;
// Errors
int error_sb_rd_resp_not_found;
int error_sb_wr_resp_not_found;
// Functions
void do_cycle();
void gen_new_rd_trans();
void gen_new_wr_trans();
void gen_new_cache_trans();
void gen_snoop_resp(ace5_::AC &rcv_snoop_req);
void verify_rd_resp(ace5_::ReadPayload &rcv_rd_resp);
void verify_wr_resp(ace5_::WRespPayload &rcv_wr_resp);
bool eq_rd_data(ace5_::ReadPayload &rcv_rd_data, ace5_::ReadPayload &sb_rd_data);
bool eq_wr_resp(ace5_::WRespPayload &rcv_wr_resp, ace5_::WRespPayload &sb_wr_resp);
unsigned mem_map_resolve(ace5_::Addr &addr);
// Constructor
SC_HAS_PROCESS(acelite_master);
acelite_master(sc_module_name name_) : sc_module(name_)
{
MASTER_ID = -1;
AXI_GEN_RATE_RD = 0;
AXI_GEN_RATE_WR = 0;
ACE_GEN_RATE_CACHE = 5;
SC_THREAD(do_cycle);
sensitive << clk.pos();
reset_signal_is(rst_n, false);
}
};
// --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- //
// --- --- --- --- --- --- IMPLEMENTATION --- --- --- --- --- --- --- --- //
// --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- //
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void acelite_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::do_cycle () {
total_cycles = 0;
cache_trans_generated = 0;
rd_trans_generated = 0;
rd_data_generated = 0;
wr_trans_generated = 0;
wr_data_generated = 0;
rd_trans_inj = 0;
wr_trans_inj = 0;
wr_data_inj = 0;
gen_rd_addr = 0x10100;
gen_wr_addr = 0;
resp_val_expect = 0;
rd_resp_ej = 0;
wr_resp_ej = 0;
// Clear errors
error_sb_rd_resp_not_found = 0;
error_sb_wr_resp_not_found = 0;
clk_period = (dynamic_cast<sc_clock *>(clk.get_interface()))->period();
ar_out.Reset();
r_in.Reset();
aw_out.Reset();
w_out.Reset();
b_in.Reset();
while(1) {
wait();
total_cycles++;
// Transaction Generator
if (!stop_gen.read()) {
unsigned int rnd_val_rd = rand()%100;
if (rnd_val_rd < AXI_GEN_RATE_RD) {
gen_new_rd_trans();
}
unsigned int rnd_val_wr = rand()%100;
if (rnd_val_wr < AXI_GEN_RATE_WR) {
gen_new_wr_trans();
}
unsigned int rnd_val_cache = rand()%100;
if (rnd_val_cache < ACE_GEN_RATE_CACHE) {
gen_new_cache_trans();
}
}
// Read Request Injection
if (!stored_rd_trans.empty()) {
ace5_::AddrPayload tmp_ar = stored_rd_trans.front();
bool is_coherent = (tmp_ar.snoop || tmp_ar.domain.xor_reduce());
int coherent_writes_outstanding = cache_outstanding_writes[tmp_ar.addr];
if (!(is_coherent && coherent_writes_outstanding)) {
if (ar_out.PushNB(tmp_ar)) {
stored_rd_trans.pop();
std::cout << "[Master " << MASTER_ID << "] : PUSHED AR:" << tmp_ar << " @" << sc_time_stamp() << std::endl;
rd_trans_inj++;
if(is_coherent) {
cache_outstanding[tmp_ar.addr]++;
// Push it to ACE Checker
sb_lock->lock();
msg_tb_wrap<ace5_::AddrPayload> temp_rd_coherent_req_tb;
temp_rd_coherent_req_tb.dut_msg = tmp_ar;
temp_rd_coherent_req_tb.is_read = true;
temp_rd_coherent_req_tb.time_gen = sc_time_stamp();
(*sb_coherent_access_q)[MASTER_ID - SLAVE_NUM].push_back(temp_rd_coherent_req_tb);
sb_lock->unlock();
}
}
}
}
// Write Request Injection
if (!stored_wr_trans.empty()) {
ace5_::AddrPayload tmp_aw = stored_wr_trans.front();
bool is_coherent = (tmp_aw.snoop || tmp_aw.domain.xor_reduce());
int coherent_outstanding = cache_outstanding[tmp_aw.addr];
if (!(is_coherent && coherent_outstanding)) {
if (aw_out.PushNB(tmp_aw)) {
stored_wr_trans.pop();
std::cout << "[Master " << MASTER_ID << "] : PUSHED AW: " << tmp_aw << " @" << sc_time_stamp() << std::endl;
wr_trans_inj++;
if(is_coherent) {
cache_outstanding[tmp_aw.addr]++;
cache_outstanding_writes[tmp_aw.addr]++;
sb_lock->lock();
msg_tb_wrap<ace5_::AddrPayload> temp_wr_coherent_req_tb;
temp_wr_coherent_req_tb.dut_msg = tmp_aw;
temp_wr_coherent_req_tb.is_read = false;
temp_wr_coherent_req_tb.time_gen = sc_time_stamp();
(*sb_coherent_access_q)[MASTER_ID - SLAVE_NUM].push_back(temp_wr_coherent_req_tb);
sb_lock->unlock();
}
}
}
}
// Write Data Injection
if (!stored_wr_data.empty()) {
ace5_::WritePayload tmp_w = stored_wr_data.front();
if (w_out.PushNB(tmp_w)) {
stored_wr_data.pop();
std::cout << "[Master " << MASTER_ID << "] : PUSHED W: " << tmp_w << " @" << sc_time_stamp() << std::endl;
wr_data_inj++;
}
}
// Read Response Ejection
ace5_::ReadPayload rcv_rd_resp;
bool got_ar_resp = r_in.PopNB(rcv_rd_resp); // Lacks backpressure
if(got_ar_resp){
verify_rd_resp(rcv_rd_resp);
}
// Write Response Ejection
ace5_::WRespPayload rcv_wr_resp;
bool got_b_resp = b_in.PopNB(rcv_wr_resp); // Lacks backpressure
if(got_b_resp){
verify_wr_resp(rcv_wr_resp);
}
// --- ACE --- //
}; // End of while(1)
}; // End of do_cycle
// --------------------------- //
// --- GENERATOR Functions --- //
// --------------------------- //
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void acelite_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::gen_new_rd_trans() {
ace5_::AddrPayload rd_req_m;//(SINGLE, -1, -1, -1);
rd_req_m.id = (rand()%AXI_TID_NUM); // (rand()% 2)+2;
rd_req_m.size = ((rand()%my_log2c(RD_M_LANES))+1) & ((1<<my_log2c(RD_M_LANES))-1);
rd_req_m.burst = (rand()%AXI_BURST_NUM);
rd_req_m.len = (rd_req_m.burst==enc_::AXBURST::WRAP) ? ((1<<(rand()%my_log2c(AXI4_MAX_LEN+1)))-1) :
(rd_req_m.burst==enc_::AXBURST::FIXED) ? (rand()%AXI4_MAX_LEN) :
(RD_M_LANES>RD_S_LANES) ? (rand()%(AXI4_MAX_INCR_LEN/(RD_M_LANES/RD_S_LANES))) // INCR With Downsize // Cap the maximum len in case of transactions downsize (which increases len)
: (rand()%AXI4_MAX_INCR_LEN) ; // INCR WithOut Downsize
// Increasing address to keep track of the transactions
rd_req_m.addr = gen_rd_addr + ((rand()%RD_M_LANES) & (1<<rd_req_m.size));
gen_rd_addr = (gen_rd_addr + RD_M_LANES) % (addr_map[SLAVE_NUM-1][1].read()+1);
// Push it to injection queue
stored_rd_trans.push(rd_req_m);
// Push it to Scoreboard
sb_lock->lock();
// Consider resizing
ace5_::AddrPayload rd_req_s;
rd_req_s.id = rd_req_m.id;
rd_req_s.size = ((1<<rd_req_m.size)>RD_S_LANES) ? my_log2c(RD_S_LANES) : rd_req_m.size.to_uint();
rd_req_s.len = ((1<<rd_req_m.size)>RD_S_LANES) ? (((rd_req_m.len.to_uint()+1)<<(rd_req_m.size.to_uint()-my_log2c(RD_S_LANES)))-1) : rd_req_m.len.to_uint();
rd_req_s.burst = rd_req_m.burst;
rd_req_s.addr = rd_req_m.addr;
msg_tb_wrap<ace5_::AddrPayload> temp_rd_req_tb;
temp_rd_req_tb.dut_msg = rd_req_s;
temp_rd_req_tb.time_gen = sc_time_stamp();
unsigned dst = mem_map_resolve(rd_req_s.addr);
(*sb_rd_req_q)[dst].push_back(temp_rd_req_tb);
sb_lock->unlock();
// Push into order queue - Reorder check extension
sb_rd_order_q.push_back(rd_req_m);
rd_trans_generated++;
// --- --- --- --- --- --- --- --- //
// Generate the expected Responce
// --- --- --- --- --- --- --- --- //
sb_lock->lock();
ace5_::ReadPayload beat_expected;
beat_expected.id = rd_req_m.id;
// Create Expected Response
unsigned long int bytes_total = ((rd_req_m.len+1)<<rd_req_m.size);
unsigned long int byte_count = 0;
unsigned char m_init_ptr = rd_req_m.addr % RD_M_LANES;
unsigned char m_ptr = m_init_ptr;
unsigned char m_size = rd_req_m.size;
beat_expected.data = 0;
while(byte_count<bytes_total) {
beat_expected.data |= ( ((ace5_::Data)(byte_count & 0xFF)) << ((ace5_::Data)(m_ptr*8)));
byte_count++;
m_ptr = (rd_req_m.burst==FIXED) ? ((m_ptr+1)%(1<<m_size)) + m_init_ptr
: (m_ptr+1)%RD_M_LANES ;
if(((m_ptr%(1<<m_size))==0) || (byte_count == bytes_total)) {
beat_expected.resp = mem_map_resolve(rd_req_m.addr);
beat_expected.last = (byte_count == bytes_total);
msg_tb_wrap< ace5_::ReadPayload > temp_rd_resp_tb;
temp_rd_resp_tb.dut_msg = beat_expected;
temp_rd_resp_tb.time_gen = sc_time_stamp();
(*sb_rd_resp_q)[MASTER_ID-SLAVE_NUM].push_back(temp_rd_resp_tb);
beat_expected.data = 0;
rd_data_generated++;
}
}
sb_lock->unlock();
}; // End of Read generator
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void acelite_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::gen_new_wr_trans() {
sb_lock->lock();
ace5_::AddrPayload m_wr_req;//(SINGLE, -1, -1, -1);
m_wr_req.id = (rand()%AXI_TID_NUM) | (MASTER_ID << 2); // (rand()%4)+2;
m_wr_req.size = ((rand()%my_log2c(WR_M_LANES))+1) & ((1<<my_log2c(WR_M_LANES))-1); // 0 size is NOT supported
m_wr_req.burst = (rand()%AXI_BURST_NUM);
m_wr_req.len = (m_wr_req.burst==enc_::AXBURST::WRAP) ? ((1<<(rand()%my_log2c(AXI4_MAX_LEN+1)))-1) :
(m_wr_req.burst==enc_::AXBURST::FIXED) ? (rand()%AXI4_MAX_LEN) :
(WR_M_LANES>WR_S_LANES) ? (rand()%(AXI4_MAX_INCR_LEN/(WR_M_LANES/WR_S_LANES))) // INCR With Downsize // Cap the maximum len in case of transactions downsize (which increases len)
: (rand()%AXI4_MAX_INCR_LEN) ; // INCR WithOut Downsize
// Increasing address to keep track of the transactions
// Aligned on size transactions (Although non-aligned should be an easy addition)
m_wr_req.addr = gen_wr_addr + ((rand()%WR_M_LANES) & (1<<m_wr_req.size));
gen_wr_addr = (gen_wr_addr + WR_M_LANES) % (addr_map[SLAVE_NUM-1][1].read()+1);;
// Push it to injection queue
stored_wr_trans.push(m_wr_req);
// Push into order queue - Reorder check extension
sb_wr_order_q.push_back(m_wr_req);
// Create dummy write data
ace5_::WritePayload cur_beat; // The beat that will be injected at MASTER
ace5_::WritePayload beat_at_slave; // The expected beat ejected at SLAVE
unsigned long int bytes_total = ((m_wr_req.len+1)<<m_wr_req.size);
unsigned long int byte_count = 0;
unsigned char m_init_ptr = m_wr_req.addr % WR_M_LANES;
unsigned char s_init_ptr = m_wr_req.addr % WR_S_LANES;
unsigned char m_ptr = m_init_ptr;
unsigned char s_ptr = s_init_ptr;
unsigned char m_size = m_wr_req.size;
unsigned char s_size = ((1<<m_size)>WR_S_LANES) ? my_log2c(WR_S_LANES) : m_size;
unsigned char m_len = m_wr_req.len;
unsigned char s_len = ((1<<m_size)>WR_S_LANES) ? (((m_len+1)<<(m_size-s_size))-1) : m_len;
// Push it to Scoreboard
ace5_::AddrPayload s_wr_req;
s_wr_req.id = m_wr_req.id;
s_wr_req.addr = m_wr_req.addr;
s_wr_req.size = s_size;
s_wr_req.len = s_len;
s_wr_req.burst = m_wr_req.burst;
msg_tb_wrap<ace5_::AddrPayload> temp_wr_req_tb;
temp_wr_req_tb.dut_msg = s_wr_req;
unsigned dst = mem_map_resolve(s_wr_req.addr);
(*sb_wr_req_q)[dst].push_back(temp_wr_req_tb);
cur_beat.data = 0;
cur_beat.wstrb = 0;
beat_at_slave.data = 0;
beat_at_slave.wstrb = 0;
while(byte_count<bytes_total) {
unsigned byte_to_write = (byte_count==bytes_total-1) ? MASTER_ID : byte_count;
cur_beat.data |= (((ace5_::Data)(byte_to_write & 0xFF)) << ((ace5_::Data)(m_ptr*8)));
cur_beat.wstrb |= (((ace5_::Data)1) << ((ace5_::Data)m_ptr));
beat_at_slave.data |= (((ace5_::Data)(byte_to_write & 0xFF)) << ((ace5_::Data)(s_ptr*8)));
beat_at_slave.wstrb |= (((ace5_::Data)1) << ((ace5_::Data)s_ptr));
byte_count++;
m_ptr = (m_wr_req.burst==FIXED) ? ((m_ptr+1)%(1<<m_size)) + m_init_ptr :
(m_ptr+1)%WR_M_LANES ;
s_ptr = (s_wr_req.burst==FIXED) ? ((s_ptr+1)%(1<<s_size)) + s_init_ptr :
(s_ptr+1)%WR_S_LANES ;
if(((m_ptr%(1<<m_size))==0) || (byte_count == bytes_total)) {
wr_data_generated++;
cur_beat.last = (byte_count == bytes_total);
stored_wr_data.push(cur_beat);
cur_beat.data = 0;
cur_beat.wstrb = 0;
}
if(((s_ptr%(1<<s_size))==0) || (byte_count == bytes_total)) {
beat_at_slave.last = (byte_count == bytes_total);
msg_tb_wrap< ace5_::WritePayload > temp_wr_data_tb;
temp_wr_data_tb.dut_msg = beat_at_slave;
wr_resp_data_count++;
last_wr_sinked_cycle = (sc_time_stamp() / clk_period);
(*sb_wr_data_q)[dst].push_back(temp_wr_data_tb);
beat_at_slave.data = 0;
beat_at_slave.wstrb = 0;
}
}
sb_lock->unlock();
wr_trans_generated++;
}; // End of Write generator
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void acelite_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::gen_new_cache_trans() {
ace5_::AddrPayload cache_req;
cache_req.id = MASTER_ID;// (rand() % AXI_TID_NUM); // (rand()% 2)+2;
cache_req.size = nvhls::log2_ceil<RD_M_LANES>::val;
cache_req.burst = enc_::AXBURST::INCR;
cache_req.len = 0;
cache_req.addr = ((rand()%ACE_CACHE_LINES)+1) * (ace5_::C_CACHE_WIDTH>>3);//0x8;
cache_req.domain = enc_::AxDOMAIN::OUTER_SHARE;
// RD_ONCE, RD_SHARED, RD_CLEAN, RD_NOT_SHARED_DIRTY, RD_UNIQUE, CLEAN_UNIQUE, MAKE_UNIQUE, CLEAN_SHARED, CLEAN_INVALID, MAKE_INVALID
// WR_UNIQUE, WR_LINE_UNIQUE
bool is_read = false;
unsigned sel_req = rand() % 6;
if (sel_req == 0) cache_req.snoop = enc_::ARSNOOP::RD_ONCE;
else if (sel_req == 1) cache_req.snoop = enc_::ARSNOOP::CLEAN_SHARED;
else if (sel_req == 2) cache_req.snoop = enc_::ARSNOOP::CLEAN_INVALID;
else if (sel_req == 3) cache_req.snoop = enc_::ARSNOOP::MAKE_INVALID;
else if (sel_req == 4) cache_req.snoop = enc_::AWSNOOP::WR_UNIQUE;
else if (sel_req == 5) cache_req.snoop = enc_::AWSNOOP::WR_LINE_UNIQUE;
is_read = (sel_req < 4);
if (true /*total_cycles > 14 && total_cycles <16*/) {
if (is_read) {
// Push it to injection queue
stored_rd_trans.push(cache_req);
// Pushing to ACE Coherency checker happens during injection
// Push into order queue - Reorder check extension
sb_rd_order_q.push_back(cache_req);
rd_trans_generated++;
} else { // It's a WR request
ace5_::WritePayload data_beat;
if (ace5_::C_CACHE_WIDTH<64) data_beat.data = (((ace5_::Data) MASTER_ID) << (ace5_::C_CACHE_WIDTH - 8)) | ((ace5_::Data) 0x000000000000BEEF);
else data_beat.data = (((ace5_::Data) MASTER_ID) << (ace5_::C_CACHE_WIDTH - 8)) | ((ace5_::Data) 0x0000BEEFDEADBEEF);
data_beat.wstrb = -1;
data_beat.last = 1;
// Push it to injection queue
stored_wr_trans.push(cache_req);
stored_wr_data.push(data_beat);
// Push it to Scoreboard
sb_lock->lock();
unsigned target_mem = mem_map_resolve(cache_req.addr);
msg_tb_wrap<ace5_::AddrPayload> temp_wr_coherent_req_tb;
temp_wr_coherent_req_tb.is_read = false;
temp_wr_coherent_req_tb.dut_msg = cache_req;
temp_wr_coherent_req_tb.dut_msg.snoop = 0;
temp_wr_coherent_req_tb.dut_msg.domain = 0;
temp_wr_coherent_req_tb.dut_msg.barrier = 0;
temp_wr_coherent_req_tb.dut_msg.unique = 0;
temp_wr_coherent_req_tb.time_gen = sc_time_stamp();
(*sb_wr_req_q)[target_mem].push_back(temp_wr_coherent_req_tb);
msg_tb_wrap<ace5_::WritePayload> temp_wr_data_tb;
temp_wr_data_tb.dut_msg = data_beat;
temp_wr_data_tb.is_read = false;
temp_wr_data_tb.time_gen = sc_time_stamp();
(*sb_wr_data_q)[target_mem].push_back(temp_wr_data_tb);
sb_lock->unlock();
// Push into order queue - Reorder check extension
sb_wr_order_q.push_back(cache_req);
wr_trans_generated++;
}
cache_trans_generated++;
}
}; // End of Cache generator
// ------------------------ //
// --- VERIFY Functions --- //
// ------------------------ //
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void acelite_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::verify_rd_resp(ace5_::ReadPayload &rcv_rd_resp){
// Verify Response
sb_lock->lock();
bool is_coherent = false;
// --- Reorder Check --- //
unsigned reorder=2; // 2 : Req not found, 1 : Request reordered, 0 : everything is fine
unsigned j=0;
ace5_::AddrPayload sb_ord_req;
while (j<sb_rd_order_q.size()){
sb_ord_req = sb_rd_order_q[j];
if(sb_ord_req.id == rcv_rd_resp.id) {
// Slave must sneak its ID to the resp field.
unsigned dst = mem_map_resolve(sb_ord_req.addr);
is_coherent = (sb_ord_req.snoop || sb_ord_req.domain.xor_reduce());
reorder = (dst == rcv_rd_resp.resp) || is_coherent ? 0 : 1;
if(rcv_rd_resp.last) sb_rd_order_q.erase(sb_rd_order_q.begin()+j);
break;
}
j++;
}
// --------------------- //
bool found = false;
j=0;
//if (sb_ord_req.snoop == 0) {
while (j<(*sb_rd_resp_q)[MASTER_ID-SLAVE_NUM].size()){
msg_tb_wrap< ace5_::ReadPayload > sb_resp = (*sb_rd_resp_q)[MASTER_ID-SLAVE_NUM][j];
if (eq_rd_data(rcv_rd_resp, sb_resp.dut_msg)){
if (sb_resp.dut_msg.last) {
rd_resp_delay += ((sc_time_stamp() - sb_resp.time_gen) / clk_period) - 1;
rd_resp_count++;
}
rd_resp_data_count++;
last_rd_sinked_cycle = (sc_time_stamp() / clk_period);
(*sb_rd_resp_q)[MASTER_ID-SLAVE_NUM].erase((*sb_rd_resp_q)[MASTER_ID-SLAVE_NUM].begin()+j);
found = true;
break;
}
j++;
}
//} else {
// found = true; // Ignore the check if it's a Snoop access
//}
if(!found){
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "RD-Resp : "<< rcv_rd_resp << " . NOT FOUND! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "-SB_front - "<< (*sb_rd_resp_q)[MASTER_ID-SLAVE_NUM].front() << "\n";
error_sb_rd_resp_not_found++;
sc_assert(0);
// sc_stop();
}else if(reorder==2) {
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "RD-Resp : "<< rcv_rd_resp << " . Respective Request wasn't found!!! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "-REQ_front - "<< sb_rd_order_q.front() << "\n";
sc_assert(0);
}else if(reorder==1) {
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "RD-Resp : "<< rcv_rd_resp << " . Got Reordered !!! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "REQ-Ordered - "<< sb_ord_req << "\n";
sc_assert(0);
}else{
std::cout<< "[Master " << MASTER_ID <<"] " << "RD-Resp OK : << " << rcv_rd_resp << " @" << sc_time_stamp() << "\n";
if (is_coherent){
cache_outstanding[sb_ord_req.addr]--;
} else {
unsigned dbg_non_coh = 0;
}
rd_resp_ej++;
}
std::cout.flush();
sb_lock->unlock();
}; // End of READ Response Verify
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
void acelite_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::verify_wr_resp(ace5_::WRespPayload &rcv_wr_resp){
sb_lock->lock();
bool is_coherent = false;
// --- Reorder Check --- //
int reorder = 2; // 2 : Req not found, 1 : Request reordered, 0 : everything is fine
unsigned int j = 0;
ace5_::AddrPayload sb_ord_req;
while (j<sb_wr_order_q.size()) {
sb_ord_req = sb_wr_order_q[j];
if(sb_ord_req.id == rcv_wr_resp.id) {
// Slave must sneak its ID into the first data byte of every beat (aka data[0]).
unsigned dst = mem_map_resolve(sb_ord_req.addr);
is_coherent = (sb_ord_req.snoop || sb_ord_req.domain.xor_reduce());
reorder = (dst == rcv_wr_resp.resp) ? 0 : 1;
sb_wr_order_q.erase(sb_wr_order_q.begin()+j);
break;
}
j++;
}
// --------------------- //
// Verify Responce
bool found = false;
j = 0;
while (j<(*sb_wr_resp_q)[MASTER_ID-SLAVE_NUM].size()){
msg_tb_wrap< ace5_::WRespPayload > sb_resp = (*sb_wr_resp_q)[MASTER_ID-SLAVE_NUM][j];
if (eq_wr_resp(sb_resp.dut_msg, rcv_wr_resp)){
wr_resp_delay += ((sc_time_stamp() - sb_resp.time_gen) / clk_period) - 1;
wr_resp_count++;
(*sb_wr_resp_q)[MASTER_ID-SLAVE_NUM].erase((*sb_wr_resp_q)[MASTER_ID-SLAVE_NUM].begin()+j);
found = true;
break;
}
j++;
}
if(!found){
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "WR-Resp : "<< rcv_wr_resp << " . NOT FOUND! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "-SB_front - "<< (*sb_wr_resp_q)[MASTER_ID-SLAVE_NUM].front() << "\n";
error_sb_wr_resp_not_found++;
sc_assert(0);
// sc_stop();
}else if(reorder==2) {
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "WR-Resp : "<< rcv_wr_resp << " . Respective Request wasn't found!!! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "-REQ_front - "<< sb_wr_order_q.front() << "\n";
sc_assert(0);
}else if(reorder==1) {
std::cout<< "\n\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "WR-Resp : "<< rcv_wr_resp << " . Got Reordered !!! @" << sc_time_stamp() << "\n";
std::cout<< "[Master " << MASTER_ID <<"] " << "REQ-Ordered - "<< sb_ord_req << "\n";
sc_assert(0);
}else{
std::cout<< "[Master " << MASTER_ID <<"] " << "WR-Resp OK : << " << rcv_wr_resp << "\n";
if (is_coherent) {
//upd_cache_write(sb_ord_req, rcv_wr_resp);
cache_outstanding_writes[sb_ord_req.addr]--;
cache_outstanding[sb_ord_req.addr]--;
} else {
unsigned dbg_non_coh = 0;
}
wr_resp_ej++;
}
std::cout.flush();
sb_lock->unlock();
}; // End of WRITE Response Verify
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
bool acelite_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::eq_rd_data (ace5_::ReadPayload &rcv_rd_data, ace5_::ReadPayload &sb_rd_data) {
unsigned tid_mask = (1<<dnp::ace::ID_W)-1;
return ((rcv_rd_data.id & tid_mask) == (sb_rd_data.id & tid_mask) &&
rcv_rd_data.data == sb_rd_data.data &&
rcv_rd_data.resp == sb_rd_data.resp &&
rcv_rd_data.last == sb_rd_data.last //&&
//rcv_rd_data.ruser == sb_rd_data.ruser
);
};
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
bool acelite_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::eq_wr_resp (ace5_::WRespPayload &rcv_wr_resp, ace5_::WRespPayload &sb_wr_resp) {
unsigned tid_mask = (1<<dnp::ace::ID_W)-1;
return ((rcv_wr_resp.id & tid_mask) == (sb_wr_resp.id & tid_mask) &&
rcv_wr_resp.resp == sb_wr_resp.resp //&&
//rcv_wr_resp.buser == sb_wr_resp.buser
);
};
template <unsigned int RD_M_LANES, unsigned int RD_S_LANES, unsigned int WR_M_LANES, unsigned int WR_S_LANES, unsigned int MASTER_NUM, unsigned int SLAVE_NUM>
unsigned acelite_master<RD_M_LANES, RD_S_LANES, WR_M_LANES, WR_S_LANES, MASTER_NUM, SLAVE_NUM>::mem_map_resolve(ace5_::Addr &addr){
for (int i=0; i<SLAVE_NUM; ++i) {
if (addr>=addr_map[i][0].read() && addr <= addr_map[i][1].read()) return i;
}
NVHLS_ASSERT_MSG(0, "Target Addr not found!");
return 0; // Or send 404
}
#endif // _ACE_LITE_MASTER_H_
|
ic-lab-duth/NoCpad
|
tb/tb_axi_con/harness.h
|
#ifndef AXI_IC_HARNESS_H
#define AXI_IC_HARNESS_H
#include "systemc.h"
#include <mc_scverify.h>
#define NVHLS_VERIFY_BLOCKS (ic_top)
#include "stdlib.h"
#include <string>
#include "../../tb/tb_axi_con/axi_master.h"
#include "../../tb/tb_axi_con/axi_slave.h"
#include <iostream>
#include <fstream>
SC_MODULE(harness) {
const int CLK_PERIOD = 5;
const int GEN_CYCLES = 2 * 1000;
const int GEN_RATE_RD[smpl_cfg::MASTER_NUM] = {40, 40};
const int GEN_RATE_WR[smpl_cfg::MASTER_NUM] = {40, 40};
const int STALL_RATE_RD = 00;
const int STALL_RATE_WR = 00;
const int DRAIN_CYCLES = GEN_CYCLES/10;
typedef typename axi::axi4<axi::cfg::standard_duth> axi4_;
typedef typename axi::AXI4_Encoding enc_;
sc_clock clk;
sc_signal<bool> rst_n;
sc_signal<bool> stop_gen;
sc_signal< sc_uint<32> > addr_map[smpl_cfg::SLAVE_NUM][2];
// --- Scoreboards --- //
// Scoreboards refer to the receiver of the queue.
// I.e. the receiver checks what is expected to be received. Thus sender must take care to push Transactions to the appropriate queue
sc_mutex sb_lock;
std::vector< std::deque< msg_tb_wrap<axi4_::AddrPayload> > > sb_rd_req_q;
std::vector< std::deque< msg_tb_wrap<axi4_::ReadPayload> > > sb_rd_resp_q;
std::vector< std::deque< msg_tb_wrap<axi4_::AddrPayload> > > sb_wr_req_q;
std::vector< std::deque< msg_tb_wrap<axi4_::WritePayload> > > sb_wr_data_q;
std::vector< std::deque< msg_tb_wrap<axi4_::WRespPayload> > > sb_wr_resp_q;
axi_master<smpl_cfg::RD_LANES, smpl_cfg::RD_LANES, smpl_cfg::WR_LANES, smpl_cfg::WR_LANES, smpl_cfg::MASTER_NUM, smpl_cfg::SLAVE_NUM> *master[smpl_cfg::MASTER_NUM];
axi_slave<smpl_cfg::RD_LANES, smpl_cfg::RD_LANES, smpl_cfg::WR_LANES, smpl_cfg::WR_LANES, smpl_cfg::MASTER_NUM, smpl_cfg::SLAVE_NUM> *slave[smpl_cfg::SLAVE_NUM];
CCS_DESIGN(ic_top) interconnect;
// Master Side Channels
Connections::Combinational<axi4_::AddrPayload> *master_rd_req[smpl_cfg::MASTER_NUM];
Connections::Combinational<axi4_::ReadPayload> *master_rd_resp[smpl_cfg::MASTER_NUM];
Connections::Combinational<axi4_::AddrPayload> *master_wr_req[smpl_cfg::MASTER_NUM];
Connections::Combinational<axi4_::WritePayload> *master_wr_data[smpl_cfg::MASTER_NUM];
Connections::Combinational<axi4_::WRespPayload> *master_wr_resp[smpl_cfg::MASTER_NUM];
// Slave Side Channels
Connections::Combinational<axi4_::AddrPayload> *slave_rd_req[smpl_cfg::SLAVE_NUM];
Connections::Combinational<axi4_::ReadPayload> *slave_rd_resp[smpl_cfg::SLAVE_NUM];
Connections::Combinational<axi4_::AddrPayload> *slave_wr_req[smpl_cfg::SLAVE_NUM];
Connections::Combinational<axi4_::WritePayload> *slave_wr_data[smpl_cfg::SLAVE_NUM];
Connections::Combinational<axi4_::WRespPayload> *slave_wr_resp[smpl_cfg::SLAVE_NUM];
SC_CTOR(harness) :
clk("clock",10,SC_NS,0.5,0.0,SC_NS),
rst_n("rst_n"),
stop_gen("stop_gen"),
sb_lock(),
sb_rd_req_q(smpl_cfg::SLAVE_NUM),
sb_rd_resp_q(smpl_cfg::MASTER_NUM),
sb_wr_req_q(smpl_cfg::SLAVE_NUM),
sb_wr_data_q(smpl_cfg::SLAVE_NUM),
sb_wr_resp_q(smpl_cfg::MASTER_NUM),
interconnect("interconnect")
{
addr_map[0][0] = 0;
addr_map[0][1] = 0x0ffff;
addr_map[1][0] = 0x10000;
addr_map[1][1] = 0x2ffff;
// Construct Components
for (int i=0; i<smpl_cfg::MASTER_NUM; ++i) {
master[i] = new axi_master<smpl_cfg::RD_LANES, smpl_cfg::RD_LANES, smpl_cfg::WR_LANES, smpl_cfg::WR_LANES, smpl_cfg::MASTER_NUM, smpl_cfg::SLAVE_NUM>(sc_gen_unique_name("master"));
slave[i] = new axi_slave <smpl_cfg::RD_LANES, smpl_cfg::RD_LANES, smpl_cfg::WR_LANES, smpl_cfg::WR_LANES, smpl_cfg::MASTER_NUM, smpl_cfg::SLAVE_NUM>(sc_gen_unique_name("slave"));
master_rd_req[i] = new Connections::Combinational<axi4_::AddrPayload> (sc_gen_unique_name("master_rd_req"));
master_rd_resp[i] = new Connections::Combinational<axi4_::ReadPayload> (sc_gen_unique_name("master_rd_resp"));
master_wr_req[i] = new Connections::Combinational<axi4_::AddrPayload> (sc_gen_unique_name("master_wr_req"));
master_wr_data[i] = new Connections::Combinational<axi4_::WritePayload> (sc_gen_unique_name("master_wr_data"));
master_wr_resp[i] = new Connections::Combinational<axi4_::WRespPayload> (sc_gen_unique_name("master_wr_resp"));
// Slave Side Channels
slave_rd_req[i] = new Connections::Combinational<axi4_::AddrPayload> (sc_gen_unique_name("slave_rd_req"));
slave_rd_resp[i] = new Connections::Combinational<axi4_::ReadPayload> (sc_gen_unique_name("slave_rd_resp"));
slave_wr_req[i] = new Connections::Combinational<axi4_::AddrPayload> (sc_gen_unique_name("slave_wr_req"));
slave_wr_data[i] = new Connections::Combinational<axi4_::WritePayload> (sc_gen_unique_name("slave_wr_data"));
slave_wr_resp[i] = new Connections::Combinational<axi4_::WRespPayload> (sc_gen_unique_name("slave_wr_resp"));
}
std::cout << "--- Binding... ---\n";
std::cout.flush();
// BINDING - START
// MASTER
for (int i=0; i<smpl_cfg::MASTER_NUM; ++i) {
master[i]->sb_lock = &sb_lock; // Scoreboard by Ref
master[i]->sb_rd_req_q = &sb_rd_req_q; // Scoreboard by Ref
master[i]->sb_rd_resp_q = &sb_rd_resp_q; // Scoreboard by Ref
master[i]->sb_wr_req_q = &sb_wr_req_q; // Scoreboard by Ref
master[i]->sb_wr_data_q = &sb_wr_data_q; // Scoreboard by Ref
master[i]->sb_wr_resp_q = &sb_wr_resp_q; // Scoreboard by Ref
master[i]->MASTER_ID = i;
master[i]->GEN_RATE_RD = GEN_RATE_RD[i];
master[i]->GEN_RATE_WR = GEN_RATE_WR[i];
master[i]->stop_gen(stop_gen);
master[i]->clk(clk);
master[i]->rst_n(rst_n);
for(int j=0; j<smpl_cfg::SLAVE_NUM; ++j) {
master[i]->addr_map[j][0](addr_map[j][0]);
master[i]->addr_map[j][1](addr_map[j][1]);
}
master[i]->ar_out(*master_rd_req[i]);
master[i]->r_in(*master_rd_resp[i]);
master[i]->aw_out(*master_wr_req[i]);
master[i]->w_out(*master_wr_data[i]);
master[i]->b_in(*master_wr_resp[i]);
// SLAVE
slave[i]->sb_lock = &sb_lock; // Scoreboard by Ref
slave[i]->sb_rd_req_q = &sb_rd_req_q; // Scoreboard by Ref
slave[i]->sb_rd_resp_q = &sb_rd_resp_q; // Scoreboard by Ref
slave[i]->sb_wr_req_q = &sb_wr_req_q; // Scoreboard by Ref
slave[i]->sb_wr_data_q = &sb_wr_data_q; // Scoreboard by Ref
slave[i]->sb_wr_resp_q = &sb_wr_resp_q; // Scoreboard by Ref
slave[i]->STALL_RATE_RD = STALL_RATE_RD;
slave[i]->STALL_RATE_WR = STALL_RATE_WR;
slave[i]->SLAVE_ID = i;
slave[i]->stop_gen(stop_gen);
slave[i]->clk(clk);
slave[i]->rst_n(rst_n);
for(int j=0; j<smpl_cfg::SLAVE_NUM; ++j) {
slave[i]->addr_map[j][0](addr_map[j][0]);
slave[i]->addr_map[j][1](addr_map[j][1]);
}
slave[i]->ar_in(*slave_rd_req[i]);
slave[i]->r_out(*slave_rd_resp[i]);
slave[i]->aw_in(*slave_wr_req[i]);
slave[i]->w_in(*slave_wr_data[i]);
slave[i]->b_out(*slave_wr_resp[i]);
}
// IC-TOP
interconnect.clk(clk);
interconnect.rst_n(rst_n);
for(int j=0; j<smpl_cfg::SLAVE_NUM; ++j) {
interconnect.addr_map[j][0](addr_map[j][0]);
interconnect.addr_map[j][1](addr_map[j][1]);
}
// Master Side
for (int i=0; i<smpl_cfg::MASTER_NUM; ++i) {
interconnect.ar_in[i](*master_rd_req[i]);
interconnect.r_out[i](*master_rd_resp[i]);
interconnect.aw_in[i](*master_wr_req[i]);
interconnect.w_in[i](*master_wr_data[i]);
interconnect.b_out[i](*master_wr_resp[i]);
}
// Slave Side
for (int i=0; i<smpl_cfg::MASTER_NUM; ++i) {
interconnect.ar_out[i](*slave_rd_req[i]);
interconnect.r_in[i](*slave_rd_resp[i]);
interconnect.aw_out[i](*slave_wr_req[i]);
interconnect.w_out[i](*slave_wr_data[i]);
interconnect.b_in[i](*slave_wr_resp[i]);
}
// BINDING - END
std::cout << "--- Binding Succeed ---\n";
std::cout.flush();
Connections::set_sim_clk(&clk);
SC_THREAD(harness_job);
sensitive << clk.posedge_event();
} // End of Constructor
void harness_job() {
std::cout << "--- Simulation is Starting @" << sc_time_stamp() << " ---\n";
std::cout.flush();
rst_n.write(false);
stop_gen.write(true);
wait(CLK_PERIOD*2, SC_NS);
rst_n.write(true);
wait(CLK_PERIOD*2, SC_NS);
stop_gen.write(false);
wait(CLK_PERIOD*GEN_CYCLES, SC_NS);
stop_gen.write(true);
std::cout << "--- Transaction Generation Stopped @" << sc_time_stamp() << " ---\n";
std::cout.flush();
// Drain
bool all_drained = false;
do {
int rd_req_remain = 0;
int rd_resp_remain = 0;
for(int i=0; i<smpl_cfg::SLAVE_NUM; ++i) rd_req_remain += sb_rd_req_q[i].size();
for(int i=0; i<smpl_cfg::MASTER_NUM; ++i) rd_resp_remain += sb_rd_resp_q[i].size();
int wr_req_remain = 0;
int wr_data_remain = 0;
int wr_resp_remain = 0;
for(int i=0; i<smpl_cfg::SLAVE_NUM;++i) wr_req_remain += sb_wr_req_q[i].size();
for(int i=0; i<smpl_cfg::SLAVE_NUM;++i) wr_data_remain += sb_wr_data_q[i].size();
for(int i=0; i<smpl_cfg::MASTER_NUM;++i) wr_resp_remain += sb_wr_resp_q[i].size();
all_drained = (!rd_req_remain) && (!rd_resp_remain) &&
(!wr_req_remain) && (!wr_data_remain) && (!wr_resp_remain);
if(all_drained) {
std::cout << "--- Everything Drained @" << sc_time_stamp() << " ---\n";
} else {
std::cout << "--- Wait to drain";
std::cout << " (RD_Req: " << rd_req_remain << ", RD_Resp: " << rd_resp_remain << ")";
std::cout << " (WR_Req: " << wr_req_remain << ", WR_Data: " << wr_data_remain << ", WR_Resp: "<< wr_resp_remain <<")";
std::cout << " @" << sc_time_stamp() << " ---\n";
/* DEBUG ....
std::cout << "M0 AW_in available: " << (*master_wr_req)[0].num_available() << "\n"; // interconnect.aw_in_0.num_available() << "\n";
std::cout << "M0 avail :";
for (int i=0; i<5; ++i) std::cout << " " << interconnect.master_if_0.wr_reord_avail[i];
// std::cout << "\n";
// std::cout << __VERSION__ << "\n";
*/
wait(CLK_PERIOD*DRAIN_CYCLES, SC_NS);
}
std::cout.flush();
} while(!all_drained);
std::cout << "--- Harness Exits @" << sc_time_stamp() << "\n";
std::cout << "--- Simulation FINISHED @" << sc_time_stamp() << " ---\n";
std::cout.flush();
//--- Check for Errors ---//
int err_sb_rd_req_not_found=0, err_sb_rd_resp_not_found=0;
int err_sb_wr_req_not_found=0, err_sb_wr_data_not_found=0, err_sb_wr_resp_not_found=0;
int rd_req_generated=0, rd_resp_generated=0;
int rd_req_injected=0 , rd_req_ejected=0;
int rd_resp_injected=0, rd_resp_ejected=0;
int wr_req_generated=0, wr_data_generated=0, wr_resp_generated=0;
int wr_req_injected=0 , wr_req_ejected=0;
int wr_data_injected=0 , wr_data_ejected=0;
int wr_resp_injected=0, wr_resp_ejected=0;
for (int i=0; i<smpl_cfg::MASTER_NUM; ++i){
// READS
rd_req_generated += master[i]->rd_trans_generated;
rd_resp_generated += master[i]->rd_data_generated;
rd_req_injected += master[i]->rd_trans_inj;
rd_req_ejected += slave[i]->rd_req_ej;
rd_resp_injected += master[i]->rd_data_generated;
rd_resp_ejected += master[i]->rd_resp_ej;
err_sb_rd_req_not_found += slave[i]->error_sb_rd_req_not_found;
err_sb_rd_resp_not_found += master[i]->error_sb_rd_resp_not_found;
// WRITES
wr_req_generated += master[i]->wr_trans_generated;
wr_data_generated += master[i]->wr_data_generated;
wr_resp_generated += slave[i]->wr_resp_generated;
wr_req_injected += master[i]->wr_trans_inj;
wr_data_injected += master[i]->wr_data_inj;
wr_req_ejected += slave[i]->wr_req_ej;
wr_data_ejected += slave[i]->wr_data_ej;
wr_resp_injected += slave[i]->wr_resp_inj;
wr_resp_ejected += master[i]->wr_resp_ej;
err_sb_wr_req_not_found += slave[i]->error_sb_wr_req_not_found;
err_sb_wr_data_not_found += slave[i]->error_sb_wr_data_not_found;
err_sb_wr_resp_not_found += master[i]->error_sb_wr_resp_not_found;
}
bool error = (rd_req_injected - rd_req_ejected) || (rd_resp_injected - rd_resp_ejected) || err_sb_rd_req_not_found || err_sb_rd_resp_not_found;
std::cout << "\n";
if (error) {
std::cout << "!!! --- FAILED --- !!!\n";
std::cout << "READS : " << (rd_req_injected-rd_req_ejected) << " Reqs Dropped\n";
std::cout << " " << (rd_resp_injected-rd_resp_ejected) << " Resps Dropped\n";
std::cout << " " << err_sb_rd_req_not_found << " Reqs Not Found in SB\n";
std::cout << " " << err_sb_rd_resp_not_found << " Resps Not Found in SB\n";
std::cout << " " << " Out of :\n";
std::cout << " " << rd_req_generated << " Reqs Generated\n";
std::cout << " " << rd_resp_generated << " Resps Generated\n";
std::cout << "WRITES : " << (wr_req_injected-wr_req_ejected) << " Reqs Dropped\n";
std::cout << " " << (wr_data_injected-wr_data_ejected) << " Data Dropped\n";
std::cout << " " << (wr_resp_injected-wr_resp_ejected) << " Resps Dropped\n";
std::cout << " " << err_sb_wr_req_not_found << " Reqs Not Found in SB\n";
std::cout << " " << err_sb_wr_data_not_found << " Data Not Found in SB\n";
std::cout << " " << err_sb_wr_resp_not_found << " Resps Not Found in SB\n";
std::cout << " " << " Out of :\n";
std::cout << " " << wr_req_generated << " Reqs Generated\n";
std::cout << " " << wr_data_generated << " Data Generated\n";
std::cout << " " << wr_resp_generated << " Resps Generated\n";
} else {
std::cout << " " << rd_req_generated << " RD_Reqs Generated\n";
std::cout << " " << rd_resp_generated << " RD_Resps Generated\n\n";
std::cout << " " << wr_req_generated << " WR_Reqs Generated\n";
std::cout << " " << wr_data_generated << " WR_Data Generated\n";
std::cout << " " << wr_resp_generated << " WR_Resps Generated\n\n";
std::cout << "PASSED. No Errors.\n";
}
std::cout << "\n";
std::cout.flush();
// Delay calculation
std::cout << "\n";
unsigned long long int wr_delay_full_sum_glob = 0;
unsigned long long int rd_delay_full_sum_glob = 0;
unsigned long long int wr_trans_sum_glob = 0;
unsigned long long int rd_trans_sum_glob = 0;
unsigned long long int rd_data_count_glob = 0;
unsigned long long int wr_data_count_glob = 0;
unsigned long long int wr_delay_full_sum_p_m[smpl_cfg::MASTER_NUM];
unsigned long long int rd_delay_full_sum_p_m[smpl_cfg::MASTER_NUM];
unsigned long long int wr_trans_sum_p_m[smpl_cfg::MASTER_NUM];
unsigned long long int rd_trans_sum_p_m[smpl_cfg::MASTER_NUM];
for(int i=0; i<smpl_cfg::SLAVE_NUM; ++i) {
wr_delay_full_sum_p_m[i] = 0; rd_delay_full_sum_p_m[i] = 0;
wr_trans_sum_p_m[i] = 0; rd_trans_sum_p_m[i] = 0;
}
for (int i=0; i<smpl_cfg::MASTER_NUM; ++i) {
rd_delay_full_sum_p_m[i] += master[i]->rd_resp_delay;
rd_trans_sum_p_m[i] += master[i]->rd_resp_count;
rd_delay_full_sum_glob += master[i]->rd_resp_delay;
rd_trans_sum_glob += master[i]->rd_resp_count;
rd_data_count_glob += master[i]->rd_resp_data_count;
wr_delay_full_sum_p_m[i] += master[i]->wr_resp_delay;
wr_trans_sum_p_m[i] += master[i]->wr_resp_count;
wr_delay_full_sum_glob += master[i]->wr_resp_delay;
wr_trans_sum_glob += master[i]->wr_resp_count;
wr_data_count_glob += master[i]->wr_resp_data_count;
}
sc_time this_clk_period = clk.period();
unsigned long long int total_cycles = sc_time_stamp() / this_clk_period;
std::cout << "Delay Per Master Slave(delay, Throughput) :\n";
for (int i=0; i<smpl_cfg::MASTER_NUM; i++) {
std::cout << "M" << i << " RD: " << (rd_trans_sum_p_m[i] ? ((float)rd_delay_full_sum_p_m[i] / (float)rd_trans_sum_p_m[i]) : 0)
<< ", "
<< (rd_trans_sum_p_m[i] ? ((float)master[i]->rd_resp_data_count / (float)master[i]->last_rd_sinked_cycle) : 0)
<< "\n WR: "
<< (wr_trans_sum_p_m[i] ? ((float)wr_delay_full_sum_p_m[i] / (float)wr_trans_sum_p_m[i]) : 0)
<< ", "
<< (wr_trans_sum_p_m[i] ? ((float)master[i]->wr_resp_data_count / (float)total_cycles) : 0)
<< "\n";
}
float rd_delay_full_total = ((float)rd_delay_full_sum_glob / (float)rd_trans_sum_glob);
float wr_delay_full_total = ((float)wr_delay_full_sum_glob / (float)wr_trans_sum_glob);
float rd_throughput_total = ((float)rd_data_count_glob / (float)total_cycles) / (float)smpl_cfg::MASTER_NUM;
float wr_throughput_total = ((float)wr_data_count_glob / (float)total_cycles) / (float)smpl_cfg::MASTER_NUM;
std::cout << " (RD, WR) \n";
std::cout << "Full Avg delay(cycles) : " << rd_delay_full_total << ", "<< wr_delay_full_total << "\n";
std::cout << "Throughput (flits/cycle/node) : " << rd_throughput_total << ", "<< wr_throughput_total << "\n";
std::cout << "\n";
std::cout << __VERSION__ << "\n";
std::cout.flush();
sc_stop();
}
}; // End of harness
#endif // AXI_IC_HARNESS_H
|
ic-lab-duth/NoCpad
|
src/ace/acelite_master_if.h
|
// --------------------------------------------------------- //
// MASTER-IF Is where the MASTER CONNECTS!!!!! //
// //
// Aka. Master <-> Master-IF <-> NoC <-> Slave-IF <-> Slave //
// --------------------------------------------------------- //
#ifndef _ACELITE_MASTER_IF_H_
#define _ACELITE_MASTER_IF_H_
#include "systemc.h"
#include "nvhls_connections.h"
#include "./ace_master_if.h"
#include "../include/ace.h"
#include "../include/axi4_configs_extra.h"
#include "../include/flit_ace.h"
#include "../include/duth_fun.h"
#define LOG_MAX_OUTS 8
#define INIT_S1(n) n{#n}
// --- Helping Data structures --- //
template <typename cfg>
SC_MODULE(acelite_master_if) {
typedef typename ace::ace5<axi::cfg::ace> ace5_;
typedef typename axi::AXI4_Encoding enc_;
typedef flit_dnp<cfg::RREQ_PHITS> rreq_flit_t;
typedef flit_dnp<cfg::RRESP_PHITS> rresp_flit_t;
typedef flit_dnp<cfg::WREQ_PHITS> wreq_flit_t;
typedef flit_dnp<cfg::WRESP_PHITS> wresp_flit_t;
typedef sc_uint< nvhls::log2_ceil<cfg::RRESP_PHITS>::val > cnt_phit_rresp_t;
typedef sc_uint< nvhls::log2_ceil<cfg::WREQ_PHITS>::val > cnt_phit_wreq_t;
const unsigned char LOG_RD_M_LANES = nvhls::log2_ceil<cfg::RD_LANES>::val;
const unsigned char LOG_WR_M_LANES = nvhls::log2_ceil<cfg::WR_LANES>::val;
sc_in_clk clk;
sc_in <bool> rst_n;
sc_in < sc_uint<(dnp::ace::AH_W+dnp::ace::AL_W)> > addr_map[cfg::SLAVE_NUM][2];
sc_in< sc_uint<dnp::S_W> > THIS_ID;
// AXI MASTER Side Channels
// --- ACE-LITE --- //
// NO Snoop Channels
// --- READ --- //
Connections::In<ace5_::AddrPayload> INIT_S1(ar_in);
Connections::Out<ace5_::ReadPayload> INIT_S1(r_out);
// --- WRITE --- //
Connections::In<ace5_::AddrPayload> INIT_S1(aw_in);
Connections::In<ace5_::WritePayload> INIT_S1(w_in);
Connections::Out<ace5_::WRespPayload> INIT_S1(b_out);
// NoC Side Channels
Connections::Out<rreq_flit_t> INIT_S1(rd_flit_out);
Connections::In<rresp_flit_t> INIT_S1(rd_flit_in);
Connections::Out<wreq_flit_t> INIT_S1(wr_flit_out);
Connections::In<wresp_flit_t> INIT_S1(wr_flit_in);
// --- READ Internals --- //
// FIFOs that pass initiation and finish transactions between Pack-Depack
sc_fifo<order_info> INIT_S1(rd_trans_init);
sc_fifo<sc_uint<dnp::ace::ID_W>> INIT_S1(rd_trans_fin);
// Placed on READ Packetizer
outs_table_entry rd_out_table[1<<dnp::ace::ID_W];
// --- WRITE Internals --- //
sc_fifo<sc_uint<dnp::ace::ID_W>> INIT_S1(wr_trans_fin);
// Placed on WRITE Packetizer
outs_table_entry wr_out_table[1<<dnp::ace::ID_W];
// Constructor
SC_HAS_PROCESS(acelite_master_if);
acelite_master_if(sc_module_name name_="acelite_master_if")
:
sc_module (name_),
rd_trans_fin (2),
wr_trans_fin (2)
{
SC_THREAD(rd_req_pack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
SC_THREAD(rd_resp_depack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
SC_THREAD(wr_req_pack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
SC_THREAD(wr_resp_depack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
}
//-------------------------------//
//--- NO - ACE SNOOPING Module ---//
//-------------------------------//
//-------------------------------//
//--- READ REQuest Packetizer ---//
//-------------------------------//
void rd_req_pack_job () {
//-- Start of Reset ---//
// rd_out_table contains outstanding info for each TID, to decide if reordering is possible.
for (int i=0; i<1<<dnp::ace::ID_W; ++i) {
rd_out_table[i].dst_last = 0;
rd_out_table[i].sent = 0;
rd_out_table[i].reorder = false;
}
ar_in.Reset();
rd_flit_out.Reset();
ace5_::AddrPayload this_req;
//-- End of Reset ---//
wait();
while(1) {
// New Request from MASTER received
if(ar_in.PopNB(this_req)) {
// Get info about the outstanding transactions the received request's TID
outs_table_entry sel_entry = rd_out_table[this_req.id.to_uint()];
bool is_coherent = (this_req.snoop > 0) || ((this_req.snoop ==0) && (this_req.domain.xor_reduce()));
// resolve address to node-id
sc_uint<dnp::D_W> this_dst = is_coherent ? (cfg::SLAVE_NUM+cfg::ALL_MASTER_NUM) : addr_lut_rd(this_req.addr);
// Check reorder conditions for received TID.
bool may_reorder = (sel_entry.sent>0) && (sel_entry.dst_last != this_dst);
// In case of possible reordering wait_for has the number of transactions
// of the same ID, this request has to wait for.
sc_uint<LOG_MAX_OUTS> wait_for = sel_entry.sent;
// Poll for Finished transactions until no longer reordering is possible.
while(may_reorder || rd_flit_out.Full()) {
sc_uint<dnp::ace::ID_W> tid_fin;
if(rd_trans_fin.nb_read(tid_fin)) {
rd_out_table[tid_fin].sent--; // update outstanding table
if(tid_fin==this_req.id.to_uint()) wait_for--; // update local wait value
}
may_reorder = (wait_for>0);
wait();
}; // End of while reorder
// --- Start Packetization --- //
// Packetize request into a flit. The fields are described in DNP20
rreq_flit_t tmp_flit;
tmp_flit.type = SINGLE; // Entire request fits in at single flits thus SINGLE
tmp_flit.data[0] = ((sc_uint<dnp::PHIT_W>)this_req.snoop << dnp::ace::req::SNP_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.domain << dnp::ace::req::DOM_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.id << dnp::ace::req::ID_PTR ) |
((sc_uint<dnp::PHIT_W>)dnp::PACK_TYPE__RD_REQ << dnp::T_PTR ) |
((sc_uint<dnp::PHIT_W>) 0 << dnp::Q_PTR ) |
((sc_uint<dnp::PHIT_W>)this_dst << dnp::D_PTR ) |
((sc_uint<dnp::PHIT_W>)THIS_ID << dnp::S_PTR ) |
((sc_uint<dnp::PHIT_W>)0 << dnp::V_PTR ) ;
tmp_flit.data[1] = ((sc_uint<dnp::PHIT_W>)this_req.len << dnp::ace::req::LE_PTR) |
((sc_uint<dnp::PHIT_W>)(this_req.addr & 0xffff) << dnp::ace::req::AL_PTR) ;
tmp_flit.data[2] = ((sc_uint<dnp::PHIT_W>)this_req.barrier << dnp::ace::req::BAR_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.burst << dnp::ace::req::BU_PTR ) |
((sc_uint<dnp::PHIT_W>)this_req.size << dnp::ace::req::SZ_PTR ) |
((sc_uint<dnp::PHIT_W>)(this_req.addr >> dnp::ace::AL_W) << dnp::ace::req::AH_PTR ) ;
// Update the table due to the new outstanding
rd_out_table[this_req.id.to_uint()].sent++;
rd_out_table[this_req.id.to_uint()].dst_last = this_dst;
// send header flit to NoC
rd_flit_out.Push(tmp_flit);
} else {
// No RD Req from Master, simply check for finished Outstanding trans
sc_uint<dnp::ace::ID_W> tid_fin;
if(rd_trans_fin.nb_read(tid_fin)) {
rd_out_table[tid_fin].sent--; // update outstanding table
}
wait();
}
} // End of while(1)
}; // End of Read Request Packetizer
//-----------------------------------//
//--- READ RESPonce DE-Packetizer ---//
//-----------------------------------//
void rd_resp_depack_job () {
r_out.Reset();
rd_flit_in.Reset();
while(1) {
rresp_flit_t flit_rcv;
flit_rcv = rd_flit_in.Pop();
// Construct the transaction's attributes to build the response accordingly.
ace5_::AddrPayload active_trans;
active_trans.id = (flit_rcv.data[0] >> dnp::ace::rresp::ID_PTR) & ((1 << dnp::ace::ID_W) - 1);
active_trans.burst = (flit_rcv.data[0] >> dnp::ace::rresp::BU_PTR) & ((1 << dnp::ace::BU_W) - 1);
active_trans.size = (flit_rcv.data[1] >> dnp::ace::rresp::SZ_PTR) & ((1 << dnp::ace::SZ_W) - 1);
active_trans.len = (flit_rcv.data[1] >> dnp::ace::rresp::LE_PTR) & ((1 << dnp::ace::LE_W) - 1);
sc_uint<dnp::ace::SZ_W> final_size = (unsigned) active_trans.size; // Just the size. more compact
// Partial lower 8-bit part of address to calculate the initial axi pointer in case of a non-aligned address
sc_uint<dnp::ace::AP_W> addr_part = (flit_rcv.data[1] >> dnp::ace::rresp::AP_PTR) & ((1<<dnp::ace::AP_W) - 1);
sc_uint<dnp::ace::AP_W> addr_init_aligned = ((addr_part & (cfg::RD_LANES-1)) & ~((1<<final_size)-1));
// Data Depacketization happens in a loop. Each iteration pops a flit and constructs a beat.
// Each iteration transfers data bytes from the flit to the AXI beat.
// bytes_per_iter bytes may be transfered, which is limited by two factors
// depending the AXI beat size and the bytes in the flit.
// 1) The available data bytes in the flit is less than the required for the beat
// 2) The remaining byte lanes are less than the available in the flit
// For case (1) the flit is emptied and the next flit is popped at the next iteration
// For case (2) the beat is pushed to Master and the next beat starts in the next iteration
// For data Depacketization loop, we keep 2 pointers.
// axi_lane_ptr -> to keep track axi byte lanes to place to data
// flit_phit_ptr -> to point at the data of the flit
sc_uint<8> axi_lane_ptr = addr_init_aligned; // Bytes MOD axi size
cnt_phit_rresp_t flit_phit_ptr = 0; // Bytes MOD phits in flit
// Also we keep track the processed and total data.
sc_uint<16> bytes_total = ((active_trans.len.to_uint()+1)<<final_size);
sc_uint<16> bytes_depacked = 0; // Number of DE-packetized bytes
unsigned char resp_build_tmp[cfg::RD_LANES];
#pragma hls_unroll yes
for(int i=0; i<cfg::RD_LANES; ++i) resp_build_tmp[i] = 0;
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
gather_wr_beats : while (1) {
// Calculate the bytes to transfer in this iteration,
// depending the available flit bytes and the remaining to fill the beat
sc_uint<8> bytes_axi_left = ((1<<final_size) - (axi_lane_ptr & ((1<<final_size)-1)));
sc_uint<8> bytes_flit_left = ((cfg::RRESP_PHITS<<1) - (flit_phit_ptr<<1));
sc_uint<8> bytes_per_iter = (bytes_axi_left<bytes_flit_left) ? bytes_axi_left : bytes_flit_left;
if(flit_phit_ptr==0)
flit_rcv = rd_flit_in.Pop();
#pragma hls_unroll yes
build_resp: for (int i = 0; i < (cfg::RD_LANES >> 1); ++i) { // i counts AXI Byte Lanes IN PHITS (i.e. Lanes/bytes_in_phit)
if (i>=(axi_lane_ptr>>1) && i<((axi_lane_ptr+bytes_per_iter)>>1)) {
cnt_phit_rresp_t loc_flit_ptr = flit_phit_ptr + (i-(axi_lane_ptr>>1));
resp_build_tmp[(i << 1) + 1] = (flit_rcv.data[loc_flit_ptr] >> dnp::ace::rdata::B1_PTR) & ((1 << dnp::ace::B_W) - 1); // MSB
resp_build_tmp[(i << 1) ] = (flit_rcv.data[loc_flit_ptr] >> dnp::ace::rdata::B0_PTR) & ((1 << dnp::ace::B_W) - 1); // LSB
}
}
// transaction event flags
bool done_job = ((bytes_depacked+bytes_per_iter)==bytes_total); // All bytes are processed
bool done_flit = (flit_phit_ptr+(bytes_per_iter>>1)==cfg::RRESP_PHITS); // Flit got empty
bool done_axi = (((bytes_depacked+bytes_per_iter)&((1<<final_size)-1))==0); // Beat got full
// Push the response to MASTER, when either this Beat got the needed bytes or all bytes are transferred
if( done_job || done_axi ) {
ace5_::ReadPayload builder_resp;
builder_resp.id = active_trans.id;
builder_resp.resp = (flit_rcv.data[flit_phit_ptr] >> dnp::ace::rdata::RE_PTR) & ((1 << dnp::ace::R_RE_W) - 1);
builder_resp.last = ((bytes_depacked+bytes_per_iter)==bytes_total);
duth_fun<ace5_::Data, cfg::RD_LANES>::assign_char2ac(builder_resp.data, resp_build_tmp);
r_out.Push(builder_resp);
#pragma hls_unroll yes
for(int i=0; i<cfg::RD_LANES; ++i) resp_build_tmp[i] = 0;
}
// Check to either finish transaction or update the pointers for the next iteration
if (done_job) { // End of transaction
// Inform Packetizer about finished transaction, and Exit
rd_trans_fin.write(active_trans.id.to_uint());
break;
} else { // Check for finished transactions
bytes_depacked +=bytes_per_iter;
flit_phit_ptr = (done_flit) ? 0 : (flit_phit_ptr +(bytes_per_iter>>1));
axi_lane_ptr = (active_trans.burst==enc_::AXBURST::FIXED) ? ((axi_lane_ptr+bytes_per_iter) & ((1<<final_size)-1)) + addr_init_aligned :
((axi_lane_ptr+bytes_per_iter) & (cfg::RD_LANES-1)) ;
}
} // End of flit gathering loop
} // End of while(1)
}; // End of Read Responce Packetizer
//--------------------------------//
//--- WRITE REQuest Packetizer ---//
//--------------------------------//
void wr_req_pack_job () {
wr_flit_out.Reset();
aw_in.Reset();
w_in.Reset();
for (int i=0; i<1<<dnp::ace::ID_W; ++i) {
wr_out_table[i].dst_last = 0;
wr_out_table[i].sent = 0;
wr_out_table[i].reorder = false;
}
ace5_::AddrPayload this_req;
wait();
while(1) {
if(aw_in.PopNB(this_req)) { // New Request
// Get the outstanding info for the received request TID
outs_table_entry sel_entry = wr_out_table[this_req.id.to_uint()];
bool pass_thru_home = ((this_req.snoop == 0) && (this_req.domain.xor_reduce())) ||
(this_req.snoop == 1);
// resolve address to node-id
sc_uint<dnp::D_W> this_dst = pass_thru_home ? (cfg::SLAVE_NUM+cfg::ALL_MASTER_NUM) : addr_lut_wr(this_req.addr);
// Check reorder conditions for this TID.
// In an ordered NoC reorder may occur when there are outstanding trans towards different destinations
bool may_reorder = (sel_entry.sent>0) && (sel_entry.dst_last != this_dst);
sc_uint<LOG_MAX_OUTS> wait_for = sel_entry.sent; // Counts outstanding transactions to wait for
// Poll Finished transactions until no longer reorder is possible.
while(may_reorder || wr_flit_out.Full()) {
sc_uint<dnp::ace::ID_W> tid_fin;
if(wr_trans_fin.nb_read(tid_fin)) {
wr_out_table[tid_fin].sent--;
if(tid_fin==this_req.id.to_uint()) wait_for--;
}
may_reorder = (wait_for>0);
wait();
}; // End of while reorder
// --- Start HEADER Packetization --- //
// Packetize request according DNP20, and send
rreq_flit_t tmp_flit;
wreq_flit_t tmp_mule_flit;
tmp_mule_flit.type = HEAD;
tmp_mule_flit.data[0] = ((sc_uint<dnp::PHIT_W>)this_req.snoop << dnp::ace::req::SNP_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.domain << dnp::ace::req::DOM_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.id << dnp::ace::req::ID_PTR) |
((sc_uint<dnp::PHIT_W>)dnp::PACK_TYPE__WR_REQ << dnp::T_PTR) |
((sc_uint<dnp::PHIT_W>)0 << dnp::Q_PTR) |
((sc_uint<dnp::PHIT_W>)this_dst << dnp::D_PTR) |
((sc_uint<dnp::PHIT_W>)THIS_ID << dnp::S_PTR) |
((sc_uint<dnp::PHIT_W>)0 << dnp::V_PTR) ;
tmp_mule_flit.data[1] = ((sc_uint<dnp::PHIT_W>)this_req.len << dnp::ace::req::LE_PTR) |
((sc_uint<dnp::PHIT_W>)(this_req.addr & 0xffff) << dnp::ace::req::AL_PTR) ;
tmp_mule_flit.data[2] = ((sc_uint<dnp::PHIT_W>)this_req.unique << dnp::ace::req::UNQ_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.barrier << dnp::ace::req::BAR_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.burst << dnp::ace::req::BU_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.size << dnp::ace::req::SZ_PTR) |
((sc_uint<dnp::PHIT_W>)(this_req.addr >> dnp::ace::AL_W) << dnp::ace::req::AH_PTR) ;
wr_out_table[this_req.id.to_uint()].sent++;
wr_out_table[this_req.id.to_uint()].dst_last = this_dst;
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while (!wr_flit_out.PushNB(tmp_mule_flit)) {
sc_uint<dnp::ace::ID_W> tid_fin;
if(wr_trans_fin.nb_read(tid_fin)) {
wr_out_table[tid_fin].sent--; // update outstanding table
}
wait();
}
// --- Start DATA Packetization --- //
// Data Depacketization happens in a loop. Each iteration pops a flit and constructs a beat.
// Multiple iterations may be needed either the consume incoming data or fill a flit, which
// which depends on the AXI and flit size.
// Each iteration transfers data bytes from the flit to the AXI beat.
// The processed bytes per iteration is limited by two factors
// depending the AXI beat size and the bytes in the flit.
// 1) The available data bytes in the flit is less than the required for the beat
// 2) The remaining byte lanes are less than the available in the flit
// For case (1) the flit is emptied and the next flit is popped at the next iteration
// For case (2) the beat is pushed to Master and the next beat starts in the next iteration
// calculate the initial axi pointer in case of a non-aligned address to the bus
sc_uint<8> addr_init_aligned = (this_req.addr.to_uint() & (cfg::WR_LANES-1)) & ~((1<<this_req.size.to_uint())-1);
// For data Depacketization we keep 2 pointers.
// - One to keep track axi byte lanes to place to data (axi_lane_ptr)
// - One to point at the data of the flit (flit_phit_ptr)
sc_uint<8> axi_lane_ptr = addr_init_aligned; // Bytes MOD size
cnt_phit_wreq_t flit_phit_ptr = 0; // Bytes MOD phits in flit
sc_uint<16> bytes_total = ((this_req.len.to_uint()+1)<<this_req.size.to_uint());
sc_uint<16> bytes_packed = 0;
unsigned char data_build_tmp[cfg::WR_LANES];
bool wstrb_tmp[cfg::WR_LANES];
sc_uint<1> last_tmp;
//#pragma hls_pipeline_init_interval 1
//#pragma pipeline_stall_mode flush
gather_wr_beats : while (1) {
// Calculate the bytes transferred in this iteration, depending the available flit bytes and the remaining to the beat
sc_uint<8> bytes_axi_left = ((1<<this_req.size.to_uint()) - (axi_lane_ptr & ((1<<this_req.size.to_uint())-1)));
sc_uint<8> bytes_flit_left = ((cfg::WREQ_PHITS<<1) - (flit_phit_ptr<<1));
sc_uint<8> bytes_per_iter = (bytes_axi_left<bytes_flit_left) ? bytes_axi_left : bytes_flit_left;
// If current beat has been packed, pop next
if((bytes_packed & ((1<<this_req.size.to_uint())-1))==0) {
ace5_::WritePayload this_wr;
this_wr = w_in.Pop();
last_tmp = this_wr.last;
duth_fun<ace5_::Data , cfg::WR_LANES>::assign_ac2char(data_build_tmp , this_wr.data);
duth_fun<ace5_::Wstrb, cfg::WR_LANES>::assign_ac2bool(wstrb_tmp , this_wr.wstrb);
}
// Convert AXI Beats to flits. this should be synthesize a mux that routes bytes from axi lanes to flit
#pragma hls_unroll yes
for (int i=0; i<cfg::WREQ_PHITS; ++i){ // i counts phits on the flit
if(i>=flit_phit_ptr && i<(flit_phit_ptr+(bytes_per_iter>>1))) {
sc_uint<8> loc_axi_ptr = (axi_lane_ptr + ((i-flit_phit_ptr)<<1));
tmp_mule_flit.data[i] = ((sc_uint<dnp::PHIT_W>)last_tmp << dnp::ace::wdata::LA_PTR ) | // MSB
((sc_uint<dnp::PHIT_W>)wstrb_tmp[loc_axi_ptr+1] << dnp::ace::wdata::E1_PTR ) |
((sc_uint<dnp::PHIT_W>)wstrb_tmp[loc_axi_ptr ] << dnp::ace::wdata::E0_PTR ) |
((sc_uint<dnp::PHIT_W>)data_build_tmp[loc_axi_ptr+1] << dnp::ace::wdata::B1_PTR ) | // (i*2) % 4
((sc_uint<dnp::PHIT_W>)data_build_tmp[loc_axi_ptr ] << dnp::ace::wdata::B0_PTR ) ;
}
}
// transaction event flags
bool done_job = ((bytes_packed+bytes_per_iter)==bytes_total); // All bytes are processed
bool done_flit = (flit_phit_ptr+(bytes_per_iter>>1)==cfg::WREQ_PHITS); // Flit got empty
bool done_axi = (((bytes_packed+bytes_per_iter)&((1<<(this_req.size.to_uint()))-1))==0); // Beat got full
if(done_job || done_flit) {
tmp_mule_flit.type = (bytes_packed+bytes_per_iter==bytes_total) ? TAIL : BODY;
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while (!wr_flit_out.PushNB(tmp_mule_flit)) {
sc_uint<dnp::ace::ID_W> tid_fin;
if(wr_trans_fin.nb_read(tid_fin)) {
wr_out_table[tid_fin].sent--; // update outstanding table
}
wait();
}
}
// Check to either finish transaction or update the pointers for the next iteration
if (done_job) { // End of transaction
break;
} else { // Move to next iteration
bytes_packed = bytes_packed+bytes_per_iter;
flit_phit_ptr = (done_flit) ? 0 : (flit_phit_ptr +(bytes_per_iter>>1));
axi_lane_ptr = ((unsigned)this_req.burst==enc_::AXBURST::FIXED) ? ((axi_lane_ptr+bytes_per_iter) & ((1<<this_req.size.to_uint())-1)) + addr_init_aligned :
((axi_lane_ptr+bytes_per_iter) & (cfg::WR_LANES-1)) ;
}
} // End of gather_beats. End of transaction loop
} else { // When no request, Check for finished transactions
sc_uint<dnp::ace::ID_W> tid_fin;
if(wr_trans_fin.nb_read(tid_fin)) {
wr_out_table[tid_fin].sent--;
}
wait();
}
} // End of While(1)
}; // End of Read Request Packetizer
//------------------------------------//
//--- WRITE RESPonce DE-Packetizer ---//
//------------------------------------//
void wr_resp_depack_job(){
wr_flit_in.Reset();
b_out.Reset();
wait();
while(1) {
wresp_flit_t flit_rcv;
flit_rcv = wr_flit_in.Pop();
// Construct the trans Header to create the response
ace5_::WRespPayload this_resp;
sc_uint<dnp::ace::ID_W> this_tid = (flit_rcv.data[0] >> dnp::ace::wresp::ID_PTR) & ((1 << dnp::ace::ID_W) - 1);
this_resp.id = this_tid.to_uint();
this_resp.resp = (flit_rcv.data[0] >> dnp::ace::wresp::RESP_PTR) & ((1 << dnp::ace::W_RE_W) - 1);
b_out.Push(this_resp); // Send the response to MASTER
wr_trans_fin.write(this_tid); // Inform Packetizer for finished transaction
} // End of While(1)
}; // End of Write Resp De-pack
// Memory map resolving
inline unsigned char addr_lut_rd(const ace5_::Addr addr) {
for (int i=0; i<2; ++i) {
if (addr>=addr_map[i][0].read() && addr <= addr_map[i][1].read()) return i;
}
return 0; // Or send 404
};
inline unsigned char addr_lut_wr(const ace5_::Addr addr) {
for (int i=0; i<2; ++i) {
if (addr>=addr_map[i][0].read() && addr <= addr_map[i][1].read()) return i;
}
return 0; // Or send 404
};
}; // End of Master-IF module
#endif // _ACELITE_MASTER_IF_H_
|
ic-lab-duth/NoCpad
|
src/include/rc.h
|
#ifndef __ROUTING_COMPUTATION_HEADER__
#define __ROUTING_COMPUTATION_HEADER__
#include <systemc.h>
#include "./dnp_ace_v0.h"
enum rc_t {RC_DIRECT, RC_XY, RC_FIXED, RC_TYPE, RC_COMMON};
// Direct RC : The Dst Node is the Output port
//template<rc_t TYPE, typename...T >
//inline unsigned char do_rc(T...params);
template<rc_t TYPE, typename...T >
inline unsigned char do_rc(T...params);
template<>
inline unsigned char do_rc<RC_DIRECT, sc_uint<dnp::ace::D_W> > (sc_uint<dnp::ace::D_W> destination) {return destination;};
// TYPE RC : Used for splitter/mergers. Routes RD to Out==0, and WR to Out==1
template<>
inline unsigned char do_rc<RC_TYPE, sc_lv<dnp::ace::T_W> > (sc_lv<dnp::ace::T_W> type) {return (type==dnp::PACK_TYPE__RD_REQ || type==dnp::PACK_TYPE__RD_RESP) ? 0 : 1;};
// Const RC : Used for mergers to always request port #0
template<>
inline unsigned char do_rc<RC_FIXED> () {return 0;};
// Const RC : Used for mergers to always request port #0
template<>
inline unsigned char do_rc<RC_COMMON, sc_lv<dnp::ace::D_W>, sc_lv<dnp::ace::T_W> > (sc_lv<dnp::ace::D_W> destination, sc_lv<dnp::ace::T_W> type) {
if (type==dnp::PACK_TYPE__RD_REQ || type==dnp::PACK_TYPE__RD_RESP) return destination.to_uint();
else return destination.to_uint()+2;
};
// LUT RC : the Outport is selected from a LUT
//inline unsigned char do_rc_lut (sc_lv<dnp::D_W> dst) {return route_lut[dst];};
#endif // __ROUTING_COMPUTATION_HEADER__
|
ic-lab-duth/NoCpad
|
src/axi_slave_if.h
|
<filename>src/axi_slave_if.h
// --------------------------------------------------------- //
// SLAVE-IF Is where the SLAVE CONNECTS!!!!! //
// //
// Aka. Master <-> Master-IF <-> NoC <-> Slave-IF <-> Slave //
// --------------------------------------------------------- //
#ifndef AXI4_SLAVE_IF_CON_H
#define AXI4_SLAVE_IF_CON_H
#include "systemc.h"
#include "nvhls_connections.h"
#include "./include/flit_axi.h"
#include <axi/axi4.h>
#include "./include/axi4_configs_extra.h"
#include "./include/duth_fun.h"
#define LOG_MAX_OUTS 8
// --- Helping Data structures --- //
struct rd_trans_info_t {
sc_uint<dnp::S_W> src;
sc_uint<dnp::ID_W> tid;
sc_uint<dnp::BU_W> burst;
sc_uint<dnp::SZ_W> size;
sc_uint<dnp::LE_W> len;
sc_uint<dnp::AP_W> addr_part;
sc_uint<dnp::REORD_W> reord_tct; // Used for reordering at master
inline friend std::ostream& operator << ( std::ostream& os, const rd_trans_info_t& info ) {
os <<"S: "<< info.src /*<<", D: "<< info.dst*/ <<", TID: "<< info.tid <<", Bu: "<< info.burst <<"Si: "<< info.size <<"Le: "<< info.len <<", Ticket: "<<info.reord_tct;
#ifdef SYSTEMC_INCLUDED
os << std::dec << "@" << sc_time_stamp();
#else
os << std::dec << "@" << "no-timed";
#endif
return os;
}
#ifdef SYSTEMC_INCLUDED
// Only for SystemC
inline friend void sc_trace(sc_trace_file* tf, const rd_trans_info_t& info, const std::string& name) {
sc_trace(tf, info.src, name + ".src");
//sc_trace(tf, info.dst, name + ".dst");
sc_trace(tf, info.tid, name + ".tid");
sc_trace(tf, info.burst, name + ".burst");
sc_trace(tf, info.size, name + ".size");
sc_trace(tf, info.len, name + ".len");
// Needed only when reordering is supported
sc_trace(tf, info.reord_tct, name + ".ticket");
}
#endif
};
// Info passed between packetizer and depacketizer to inform about new and finished transactions.
struct wr_trans_info_t {
sc_uint<dnp::S_W> src;
sc_uint<dnp::ID_W> tid;
sc_uint<dnp::REORD_W> reord_tct; // Used for reordering at master
inline friend std::ostream& operator << ( std::ostream& os, const wr_trans_info_t& info ) {
os <<"S: "<< info.src << ", Id: " << info.tid <<", Ticket: "<<info.reord_tct;
#ifdef SYSTEMC_INCLUDED
os << std::dec << "@" << sc_time_stamp();
#else
os << std::dec << "@" << "no-timed";
#endif
return os;
}
#ifdef SYSTEMC_INCLUDED
// Only for SystemC
inline friend void sc_trace(sc_trace_file* tf, const wr_trans_info_t& info, const std::string& name) {
sc_trace(tf, info.src, name + ".src");
sc_trace(tf, info.tid, name + ".tid");
sc_trace(tf, info.reord_tct, name + ".ticket");
}
#endif
};
// --- Slave IF --- //
// AXI Slave connects the independent AXI RD and WR cahnnels to the interface
// The interface gets the Request packets and independently reconstructs the AXI depending the Slave's attributes
// The Responses are getting packetized into seperate threads and are fed back to the network
// Thus Slave interface comprises of 4 distinct/parallel blocks WR/RD pack and WR/RD depack
template <typename cfg>
SC_MODULE(axi_slave_if) {
typedef typename axi::axi4<axi::cfg::standard_duth> axi4_;
typedef typename axi::AXI4_Encoding enc_;
typedef flit_dnp<cfg::RREQ_PHITS> rreq_flit_t;
typedef flit_dnp<cfg::RRESP_PHITS> rresp_flit_t;
typedef flit_dnp<cfg::WREQ_PHITS> wreq_flit_t;
typedef flit_dnp<cfg::WRESP_PHITS> wresp_flit_t;
typedef sc_uint< nvhls::log2_ceil<cfg::RRESP_PHITS>::val > cnt_phit_rresp_t;
typedef sc_uint< nvhls::log2_ceil<cfg::WREQ_PHITS>::val > cnt_phit_wreq_t;
const unsigned char RD_S_SIZE = nvhls::log2_ceil<cfg::RD_LANES>::val;
const unsigned char WR_S_SIZE = nvhls::log2_ceil<cfg::WR_LANES>::val;
sc_in< sc_uint<dnp::D_W> > THIS_ID;
sc_in_clk clk;
sc_in <bool> rst_n;
// Memory Map, Slave's base address
sc_in < sc_uint<(dnp::AH_W+dnp::AL_W)> > slave_base_addr;
// NoC Side flit Channels
Connections::In<rreq_flit_t> rd_flit_in{"rd_flit_in"};
Connections::Out<rresp_flit_t> rd_flit_out{"rd_flit_out"};
Connections::In<wreq_flit_t> wr_flit_in{"wr_flit_in"};
Connections::Out<wresp_flit_t> wr_flit_out{"wr_flit_out"};
// SLAVE Side AXI Channels
// --- READ --- //
Connections::Out<axi4_::AddrPayload> ar_out{"ar_out"};
Connections::In<axi4_::ReadPayload> r_in{"r_in"};
// --- WRITE --- //
Connections::Out<axi4_::AddrPayload> aw_out{"aw_out"};
Connections::Out<axi4_::WritePayload> w_out{"w_out"};
Connections::In<axi4_::WRespPayload> b_in{"b_in"};
// --- READ Internal FIFOs --- //
sc_fifo<rd_trans_info_t> rd_trans_init{"rd_trans_init"};
sc_fifo< sc_uint<dnp::ID_W> > rd_trans_fin{"rd_trans_fin"};
// --- WRITE Internal FIFOs --- //
sc_fifo<wr_trans_info_t> wr_trans_init{"wr_trans_init"};
sc_fifo< sc_uint<dnp::ID_W> > wr_trans_fin{"wr_trans_fin"};
// Constructor
SC_HAS_PROCESS(axi_slave_if);
axi_slave_if(sc_module_name name_="axi_slave_if")
:
sc_module (name_),
rd_trans_init (3),
rd_trans_fin (3),
wr_trans_init (3),
wr_trans_fin (3)
{
SC_THREAD(rd_req_depack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
SC_THREAD(rd_resp_pack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
SC_THREAD(wr_req_depack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
SC_THREAD(wr_resp_pack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
}
//---------------------------------//
//--- READ REQuest Depacketizer ---//
//---------------------------------//
void rd_req_depack_job () {
sc_uint<LOG_MAX_OUTS> rd_in_flight = 0;
sc_uint<dnp::ID_W> outst_tid = -1;
rreq_flit_t flit_rcv;
ar_out.Reset();
rd_flit_in.Reset();
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while(1) {
// Poll NoC for request flits
if(rd_flit_in.PopNB(flit_rcv)) {
sc_uint<dnp::ID_W> orig_tid = (flit_rcv.data[0] >> dnp::req::ID_PTR) & ((1<<dnp::ID_W)-1);
sc_uint<dnp::S_W> req_src = (flit_rcv.data[0] >> dnp::S_PTR) & ((1<<dnp::S_W)-1);
// Wait while reordering is possible
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while ( ((rd_in_flight>0) && (orig_tid != outst_tid)) || (rd_in_flight>2) ) {
sc_uint<dnp::ID_W> fin_tid;
if(rd_trans_fin.nb_read(fin_tid)) {
rd_in_flight--;
NVHLS_ASSERT(fin_tid==outst_tid);
}
wait();
};
// --- Start of Request reconstruction ---
// Get transaction info and check for resizing
sc_uint<dnp::SZ_W> init_size = (flit_rcv.data[2] >> dnp::req::SZ_PTR) & ((1<<dnp::SZ_W)-1);
sc_uint<dnp::LE_W> init_len = (flit_rcv.data[1] >> dnp::req::LE_PTR) & ((1<<dnp::LE_W)-1);
// In case of resizing the size and length changes in Slave's terms
sc_uint<dnp::SZ_W> final_size = (init_size>RD_S_SIZE) ? (sc_uint<dnp::SZ_W>)RD_S_SIZE : init_size;
sc_uint<dnp::LE_W> final_len = (init_size>RD_S_SIZE) ? (sc_uint<dnp::LE_W>)(((init_len+1)<<(init_size-final_size))-1) : init_len;
// Build the appropriate request for Slave
axi4_::AddrPayload temp_req;
temp_req.id = orig_tid.to_uint();
temp_req.len = final_len.to_uint();
temp_req.size = final_size.to_uint();
temp_req.burst = (flit_rcv.data[2] >> dnp::req::BU_PTR) & ((1<<dnp::BU_W)-1);
temp_req.addr = ((((flit_rcv.data[2]>>dnp::req::AH_PTR) & ((1<<dnp::AH_W)-1)) << dnp::AL_W) |
((flit_rcv.data[1]>>dnp::req::AL_PTR) & ((1<<dnp::AL_W)-1)))
- slave_base_addr.read();
// Build the necessary info for Depacketizer
rd_trans_info_t temp_info;
temp_info.src = (flit_rcv.data[0] >> dnp::S_PTR) & ((1<<dnp::S_W)-1);
temp_info.tid = orig_tid;
temp_info.len = (flit_rcv.data[1] >> dnp::req::LE_PTR) & ((1<<dnp::LE_W)-1);
temp_info.size = (flit_rcv.data[2] >> dnp::req::SZ_PTR) & ((1<<dnp::SZ_W)-1);
temp_info.burst = (flit_rcv.data[2] >> dnp::req::BU_PTR) & ((1<<dnp::BU_W)-1);
temp_info.addr_part = (flit_rcv.data[1] & ((1<<dnp::AP_W)-1));
temp_info.reord_tct = (flit_rcv.data[0] >> dnp::req::REORD_PTR) & ((1<<dnp::REORD_W)-1);
NVHLS_ASSERT(((flit_rcv.data[0].to_uint() >> dnp::D_PTR) & ((1<<dnp::D_W)-1)) == (THIS_ID.read().to_uint()));
rd_in_flight++;
outst_tid = temp_info.tid;
rd_trans_init.write(temp_info);
ar_out.Push(temp_req);
} else {
// No new transaction, Check for finished transaction
sc_uint<dnp::ID_W> fin_tid;
if(rd_trans_fin.nb_read(fin_tid)) {
rd_in_flight--;
NVHLS_ASSERT(fin_tid==outst_tid);
}
wait();
}
} // End of while(1)
}; // End of Read Request Packetizer
//--------------------------------//
//--- READ RESPonce Packetizer ---//
//--------------------------------//
void rd_resp_pack_job () {
rd_flit_out.Reset();
r_in.Reset();
while(1) {
rresp_flit_t temp_flit;
rd_trans_info_t this_head = rd_trans_init.read();
//--- Build header ---
temp_flit.type = HEAD;
temp_flit.data[0] = ((sc_uint<dnp::PHIT_W>)this_head.burst << dnp::rresp::BU_PTR) |
((sc_uint<dnp::PHIT_W>)this_head.reord_tct << dnp::rresp::REORD_PTR) |
((sc_uint<dnp::PHIT_W>)this_head.tid << dnp::rresp::ID_PTR) |
((sc_uint<dnp::PHIT_W>)dnp::PACK_TYPE__RD_RESP << dnp::T_PTR) |
((sc_uint<dnp::PHIT_W>)0 << dnp::Q_PTR) |
((sc_uint<dnp::PHIT_W>)this_head.src << dnp::D_PTR) |
((sc_uint<dnp::PHIT_W>)THIS_ID << dnp::S_PTR) |
((sc_uint<dnp::PHIT_W>)0 << dnp::V_PTR) ;
temp_flit.data[1] = ((sc_uint<dnp::PHIT_W>)(this_head.addr_part) << dnp::rresp::AP_PTR) |
((sc_uint<dnp::PHIT_W>)this_head.len << dnp::rresp::LE_PTR) |
((sc_uint<dnp::PHIT_W>)this_head.size << dnp::rresp::SZ_PTR) ;
rd_flit_out.Push(temp_flit);
// --- Start DATA Packetization --- //
sc_uint<dnp::SZ_W> final_size = (this_head.size>RD_S_SIZE) ? (sc_uint<dnp::SZ_W>) RD_S_SIZE : this_head.size;
sc_uint<8> addr_init_aligned = (this_head.addr_part & (cfg::RD_LANES-1)) & ~((1<<final_size)-1);
// For data Depacketization we keep 2 pointers.
// - One to keep track axi byte lanes to place to data (axi_lane_ptr)
// - One to point at the data of the flit (flit_phit_ptr)
sc_uint<8> axi_lane_ptr = addr_init_aligned;
cnt_phit_rresp_t flit_phit_ptr = 0;
sc_uint<16> bytes_total = ((this_head.len+1)<<this_head.size); // Total number of bytes in the transaction
sc_uint<16> bytes_packed = 0; // Number of the packetized bytes
unsigned char data_build_tmp[cfg::RD_LANES];
sc_uint<dnp::RE_W> resp_tmp;
sc_uint<dnp::LA_W> last_tmp;
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
gather_beats: while(1) {
// Calculate the bytes to transfer in this iteration,
// depending the available flit bytes and the remaining to fill the beat
sc_uint<8> bytes_axi_left = ((1<<final_size) - (axi_lane_ptr & ((1<<final_size)-1)));
sc_uint<8> bytes_flit_left = ((cfg::RRESP_PHITS<<1) - (flit_phit_ptr<<1));
sc_uint<8> bytes_per_iter = (bytes_axi_left<bytes_flit_left) ? bytes_axi_left : bytes_flit_left;
// When the axi lane pointer wraps a size get the next beat
if((bytes_packed & ((1<<final_size)-1))==0) {
axi4_::ReadPayload this_resp;
this_resp = r_in.Pop();
duth_fun<axi4_::Data , cfg::RD_LANES>::assign_ac2char(data_build_tmp , this_resp.data);
last_tmp = this_resp.last;
resp_tmp = this_resp.resp;
}
// Convert AXI Beats to flits.
#pragma hls_unroll yes
for (int i=0; i<cfg::RRESP_PHITS; ++i) { // i counts phits on the flit
if(i>=flit_phit_ptr && i<(flit_phit_ptr+(bytes_per_iter>>1))) {
sc_uint<8> loc_axi_ptr = (axi_lane_ptr + ((i-flit_phit_ptr)<<1));
temp_flit.data[i] = ((sc_uint<dnp::PHIT_W>)resp_tmp << dnp::rdata::RE_PTR) | // MSB
((sc_uint<dnp::PHIT_W>)last_tmp << dnp::rdata::LA_PTR) |
((sc_uint<dnp::PHIT_W>)data_build_tmp[loc_axi_ptr+1] << dnp::rdata::B1_PTR) | // (i*2) % 4
((sc_uint<dnp::PHIT_W>)data_build_tmp[loc_axi_ptr ] << dnp::rdata::B0_PTR) ; // LSB
}
}
// transaction event flags
bool done_job = ((bytes_packed+bytes_per_iter)==bytes_total); // All bytes are processed
bool done_flit = (flit_phit_ptr+(bytes_per_iter>>1)==cfg::RRESP_PHITS); // Flit got empty
bool done_axi = (((bytes_packed+bytes_per_iter)&((1<<final_size)-1))==0); // Beat got full
// Push the flit to NoC
if(done_job || done_flit) {
temp_flit.type = (done_job) ? TAIL : BODY;
rd_flit_out.Push(temp_flit);
}
if (done_job) {
// End of transaction
bytes_packed = 0;
rd_trans_fin.write(this_head.tid);
break;
} else {
// Move to next iteration
bytes_packed += bytes_per_iter;
flit_phit_ptr = (done_flit) ? 0 : (flit_phit_ptr +(bytes_per_iter>>1));
axi_lane_ptr = (this_head.burst==enc_::AXBURST::FIXED) ? ((axi_lane_ptr+bytes_per_iter) & ((1<<final_size)-1)) + addr_init_aligned :
((axi_lane_ptr+bytes_per_iter) & (cfg::RD_LANES-1)) ;
}
} // End of transaction loop
} // End of While(1)
}; // End of Read Responce Packetizer
//-----------------------------------//
//--- WRITE REQuest DE-Packetizer ---//
//-----------------------------------//
void wr_req_depack_job () {
sc_uint<LOG_MAX_OUTS> wr_in_flight = 0;
sc_uint<dnp::ID_W> outst_tid = -1;
aw_out.Reset();
w_out.Reset();
wr_flit_in.Reset();
while(1) {
wreq_flit_t flit_rcv;
if (wr_flit_in.PopNB(flit_rcv)) {
sc_uint<dnp::ID_W> orig_tid = (flit_rcv.data[0] >> dnp::req::ID_PTR) & ((1<<dnp::ID_W)-1);
sc_uint<dnp::S_W> req_src = (flit_rcv.data[0] >> dnp::S_PTR) & ((1<<dnp::S_W)-1);
// Wait while reordering is possible
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while ( (wr_in_flight>0) && (orig_tid != outst_tid) || (wr_in_flight>2) ) {
// Check for finished transactions
sc_uint<dnp::ID_W> fin_tid;
if(wr_trans_fin.nb_read(fin_tid)) {
wr_in_flight--;
NVHLS_ASSERT(fin_tid==outst_tid);
}
wait();
};
// --- Start of Request reconstruction ---
// Get transaction info and check for resizing
sc_uint<dnp::SZ_W> init_size = (flit_rcv.data[2] >> dnp::req::SZ_PTR) & ((1<<dnp::SZ_W)-1);
sc_uint<dnp::LE_W> init_len = (flit_rcv.data[1] >> dnp::req::LE_PTR) & ((1<<dnp::LE_W)-1);
// In case of resizing the size and length changes in Slave's terms
sc_uint<dnp::SZ_W> final_size = (init_size>WR_S_SIZE) ? (sc_uint<dnp::SZ_W>)WR_S_SIZE : init_size;
sc_uint<dnp::LE_W> final_len = (init_size>WR_S_SIZE) ? (sc_uint<dnp::LE_W>)(((init_len+1)<<(init_size-final_size))-1) : init_len;
// Build the appropriate request for Slave
axi4_::AddrPayload this_req;
this_req.id = orig_tid.to_uint();
this_req.len = final_len.to_uint();
this_req.size = final_size.to_uint();
this_req.burst = (flit_rcv.data[2] >> dnp::req::BU_PTR) & ((1<<dnp::BU_W)-1);
this_req.addr = ((((flit_rcv.data[2]>>dnp::req::AH_PTR) & ((1<<dnp::AH_W)-1)) << dnp::AL_W) |
((flit_rcv.data[1]>>dnp::req::AL_PTR) & ((1<<dnp::AL_W)-1)))
- slave_base_addr.read();
NVHLS_ASSERT(((flit_rcv.data[0].to_uint() >> dnp::D_PTR) & ((1<<dnp::D_W)-1)) == (THIS_ID.read().to_uint()));
// Build the necessary info for Packetizer
wr_trans_info_t this_info;
this_info.tid = orig_tid;
this_info.src = req_src;
this_info.reord_tct = (flit_rcv.data[0] >> dnp::req::REORD_PTR) & ((1<<dnp::REORD_W)-1);
// update bookkeeping vars
wr_in_flight++;
outst_tid = orig_tid;
// Push info to Resp-pack and request to Slave
wr_trans_init.write(this_info);
aw_out.Push(this_req);
unsigned char data_build_tmp[cfg::WR_LANES];
bool wstr_build_tmp[cfg::WR_LANES];
#pragma hls_unroll yes
for (int i=0; i<cfg::WR_LANES; ++i) {
wstr_build_tmp[i] = false;
data_build_tmp[i] = 0;
}
// Gather DATA
sc_uint<8> addr_init_aligned = (this_req.addr.to_uint() & (cfg::WR_LANES-1)) & ~((1<<final_size)-1);
sc_uint<8> axi_lane_ptr = addr_init_aligned;
cnt_phit_wreq_t flit_phit_ptr = 0;
sc_uint<16> bytes_total = ((this_req.len.to_uint()+1)<<this_req.size.to_uint());
sc_uint<16> bytes_depacked = 0;
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
gather_wr_flits : while (1) {
// Calculate the bytes transferred in this iteration, depending the available flit bytes and the remaining to the beat
sc_uint<8> bytes_axi_left = ((1<<this_req.size.to_uint()) - (axi_lane_ptr & ((1<<this_req.size.to_uint())-1)));
sc_uint<8> bytes_flit_left = ((cfg::WREQ_PHITS<<1) - (flit_phit_ptr<<1));
sc_uint<8> bytes_per_iter = (bytes_axi_left<bytes_flit_left) ? bytes_axi_left : bytes_flit_left;
// When the phit pointer resets get the next flit
if(flit_phit_ptr==0) {
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while (!wr_flit_in.PopNB(flit_rcv)) {
sc_uint<dnp::ID_W> fin_tid;
if(wr_trans_fin.nb_read(fin_tid)) {
wr_in_flight--;
NVHLS_ASSERT(fin_tid==outst_tid);
}
wait();
}
}
// Convert AXI Beats to flits.
#pragma hls_unroll yes
build_resp: for (unsigned int i=0; i<(cfg::WR_LANES>>1); ++i){ // i counts PHITS
if(i>=(axi_lane_ptr>>1) && i<((axi_lane_ptr+bytes_per_iter)>>1)) {
sc_uint<8> loc_flit_ptr = flit_phit_ptr + (i-(axi_lane_ptr>>1));
data_build_tmp[(i<<1)+1] = (flit_rcv.data[loc_flit_ptr] >> dnp::wdata::B1_PTR) & ((1<<dnp::B_W)-1); // MSB
data_build_tmp[(i<<1) ] = (flit_rcv.data[loc_flit_ptr] >> dnp::wdata::B0_PTR) & ((1<<dnp::B_W)-1); // LSB
wstr_build_tmp[(i<<1)+1] = (flit_rcv.data[loc_flit_ptr] >> dnp::wdata::E1_PTR) & ((1<<dnp::E_W)-1); // MSB
wstr_build_tmp[(i<<1) ] = (flit_rcv.data[loc_flit_ptr] >> dnp::wdata::E0_PTR) & ((1<<dnp::E_W)-1); // LSB
}
}
// transaction event flags
bool done_job = ((bytes_depacked+bytes_per_iter)==bytes_total); // All bytes are processed
bool done_flit = (flit_phit_ptr+(bytes_per_iter>>1)==cfg::WREQ_PHITS); // Flit got empty
bool done_axi = (((bytes_depacked+bytes_per_iter)&((1<<final_size)-1))==0); // Beat got full
if(done_job || done_axi ) {
axi4_::WritePayload builder_wr_data;
builder_wr_data.last = ((bytes_depacked+bytes_per_iter)==bytes_total);
duth_fun<axi4_::Data , cfg::WR_LANES>::assign_char2ac(builder_wr_data.data , data_build_tmp);
duth_fun<axi4_::Wstrb, cfg::WR_LANES>::assign_bool2ac(builder_wr_data.wstrb, wstr_build_tmp);
w_out.Push(builder_wr_data);
#pragma hls_unroll yes
for (int i=0; i<cfg::WR_LANES; ++i) {
wstr_build_tmp[i] = false;
data_build_tmp[i] = 0;
}
}
// Check to either finish transaction or update the pointers for the next iteration
if (done_job) {
break;
} else {
bytes_depacked +=bytes_per_iter;
flit_phit_ptr = (done_flit) ? 0 : (flit_phit_ptr +(bytes_per_iter>>1));
axi_lane_ptr = ((unsigned)this_req.burst==enc_::AXBURST::FIXED) ? ((axi_lane_ptr+bytes_per_iter) & ((1<<this_req.size.to_uint())-1)) + addr_init_aligned :
((axi_lane_ptr+bytes_per_iter) & (cfg::WR_LANES-1)) ;
}
} // End of flit gather
} else {
// Check for finished transactions
sc_uint<dnp::ID_W> fin_tid;
if(wr_trans_fin.nb_read(fin_tid)) {
wr_in_flight--;
NVHLS_ASSERT(fin_tid==outst_tid);
}
wait();
}
} // End of while(1)
}; // End of Write Request DE-Packetizer
//---------------------------------//
//--- WRITE RESPonce Packetizer ---//
//---------------------------------//
void wr_resp_pack_job(){
wr_flit_out.Reset();
b_in.Reset();
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while(1) {
wait();
wresp_flit_t temp_flit;
wr_trans_info_t this_head = wr_trans_init.read();
axi4_::WRespPayload this_resp = b_in.Pop();
temp_flit.type = SINGLE;
temp_flit.data[0] = ((sc_uint<dnp::PHIT_W>)this_resp.resp << dnp::wresp::RESP_PTR ) |
((sc_uint<dnp::PHIT_W>)this_head.reord_tct << dnp::wresp::REORD_PTR ) |
((sc_uint<dnp::PHIT_W>)this_head.tid << dnp::wresp::ID_PTR ) |
((sc_uint<dnp::PHIT_W>)dnp::PACK_TYPE__WR_RESP << dnp::T_PTR ) |
((sc_uint<dnp::PHIT_W>)0 << dnp::Q_PTR ) |
((sc_uint<dnp::PHIT_W>)this_head.src << dnp::D_PTR ) |
((sc_uint<dnp::PHIT_W>)THIS_ID << dnp::S_PTR ) ;
wr_flit_out.Push(temp_flit);
wr_trans_fin.write(this_head.tid);
} // End of While(1)
}; // End of Write Resp Packetizer
}; // End of Slave-IF module
#endif // AXI4_SLAVE_IF_CON_H
|
ic-lab-duth/NoCpad
|
src/include/onehot.h
|
<gh_stars>1-10
#ifndef __ONEHOT_CLASS__
#define __ONEHOT_CLASS__
#include <systemc.h>
#include "./duth_fun.h"
//============================================================================//
//============================== One-Hot Class ========================//
//============================================================================//
template <unsigned N>
class onehot {
public:
static const unsigned WIDTH = N;
sc_uint<N> val;
onehot() {onehot(1);};
onehot(unsigned init_val) {val = init_val;};
// Only for SystemC
inline friend void sc_trace(sc_trace_file* tf, const onehot& oh_val, const std::string& name) {
sc_trace(tf, oh_val.val, name + ".val");
}
template<typename T>
inline void set(T wb_val) {
switch (wb_val) {
case 0 : val = (1<<0);
break;
case 1 : val = (1<<1);
break;
case 2 : val = (1<<2);
break;
case 3 : val = (1<<3);
break;
case 4 : val = (1<<4);
break;
case 5 : val = (1<<5);
break;
case 6 : val = (1<<6);
break;
case 7 : val = (1<<7);
break;
default : val = (1<<0);;
break;
}
}; //(1<<wb_val);};
template<unsigned RHS_N>
inline void set(onehot<RHS_N> oh_val) { val = oh_val.val;};
inline sc_uint<N> get() {return val;};
inline bool is_ready() {return !val[0];};
inline bool or_reduce() {return val.or_reduce();};
inline onehot<N> and_mask(bool bit) const {
onehot<N> mask(0);
#pragma hls_unroll yes
for(int i=0; i<N; ++i) mask.val[i] |= (bit << i); // Build the mask
return onehot<N>(val & mask.val);
};
inline void increase() { val = (val << 1);};
inline void decrease() { val = (val >> 1);};
template <class T>
T mux(const T data_i[N]);
//inline bool operator[] (const unsigned pos) {
// return ((val>>pos) & 1);
//z};
sc_dt::sc_uint_bitref& operator [] ( unsigned pos) {return val[pos];};
const sc_dt::sc_uint_bitref_r& operator [] ( unsigned pos ) const {return val[pos];};
//template<typename T>
//bool operator== (const T &rhs) {
// return (val==rhs);
//};
template<typename T>
onehot& operator= (const T &rhs) {
this->set(rhs);
};
};
template<>
template<class T>
T onehot<2>::mux(const T data_i[2] ) {
T selected;
switch (val) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
default : selected = data_i[0];
break;
}
return selected;
};
template<>
template<class T>
T onehot<3>::mux(const T data_i[3] ) {
T selected;
switch (val) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
case 4 : selected = data_i[2];
break;
default : selected = data_i[0];
break;
}
return selected;
};
template<>
template<class T>
T onehot<4>::mux(const T data_i[4] ) {
T selected;
switch (val) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
case 4 : selected = data_i[2];
break;
case 8 : selected = data_i[3];
break;
default : selected = data_i[0];
break;
}
return selected;
};
template<>
template<class T>
T onehot<5>::mux(const T data_i[5] ) {
T selected;
switch (val) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
case 4 : selected = data_i[2];
break;
case 8 : selected = data_i[3];
break;
case 16 : selected = data_i[4];
break;
default : selected = data_i[0];
break;
}
return selected;
};
template<>
template<class T>
T onehot<6>::mux(const T data_i[6] ) {
T selected;
switch (val) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
case 4 : selected = data_i[2];
break;
case 8 : selected = data_i[3];
break;
case 16 : selected = data_i[4];
break;
case 32 : selected = data_i[5];
break;
default : selected = data_i[0];
break;
}
return selected;
};
#endif // __ONEHOT_CLASS__
|
ic-lab-duth/NoCpad
|
tb/helper_non_synth.h
|
#ifndef __DUTH_HELPER_NON_SYNTH__
#define __DUTH_HELPER_NON_SYNTH__
unsigned int my_log2c(unsigned int val) {
/// ceil(log2) integer calculation. Returns -1 for log2(0)
unsigned int ret = -1;
while (val != 0) {
val >>= 1;
ret++;
}
return ret;
}
#endif // __DUTH_HELPER_NON_SYNTH__
|
ic-lab-duth/NoCpad
|
tb/tb_wrap.h
|
<reponame>ic-lab-duth/NoCpad
#ifndef __TB_WRAP_H__
#define __TB_WRAP_H__
#include "systemc.h"
#include "nvhls_connections.h"
#ifndef __SYNTHESIS__
#include <string>
#include <iostream>
#endif
template<class T>
struct msg_tb_wrap {
T dut_msg;
sc_time time_gen;
sc_time time_inj;
sc_time time_ej;
bool is_read = false;
inline friend std::ostream& operator << ( std::ostream& os, const msg_tb_wrap& msg_tmp ) {
os << msg_tmp.dut_msg;
return os;
}
// Only for SystemC
inline friend void sc_trace(sc_trace_file* tf, const msg_tb_wrap& msg, const std::string& name) {
sc_trace(tf, msg.dut_msg, name);
}
};
#endif // __TB_WRAP_H__
|
ic-lab-duth/NoCpad
|
src/include/duth_fun.h
|
<filename>src/include/duth_fun.h
#ifndef __DUTH_FUN_LIB_H__
#define __DUTH_FUN_LIB_H__
#include <systemc.h>
#ifdef HLS_CATAPULT
#include <ac_sc.h>
#include <ac_int.h>
#endif
#include "./onehot.h"
template <unsigned N> class onehot;
//============================================================================//
//============== Workaround to imitate assignments of sc types ===============//
//============================================================================//
// Functions to statically assign bit vectors to/from arrays of chars and bools.
// Used as e workaround to avoid variables in .range() function of bit vectors
template<class T, int BYTES> struct duth_fun {
static inline void assign_char2bv(T& lhs, unsigned char *rhs) {
duth_fun<T, BYTES-1>::assign_char2bv(lhs, rhs);
lhs.range((BYTES*8)-1, ((BYTES-1)*8)) = rhs[BYTES-1];
}
static inline void assign_char2ac(T& lhs, unsigned char *rhs) {
duth_fun<T, BYTES-1>::assign_char2ac(lhs, rhs);
lhs.set_slc((BYTES-1)*8, (ac_int<8, false>) rhs[BYTES-1]);
}
static inline void assign_bv2char(unsigned char *lhs, T& rhs) {
duth_fun<T, BYTES-1>::assign_bv2char(lhs, rhs);
lhs[BYTES-1] = rhs.range((BYTES*8)-1, ((BYTES-1)*8)).to_uint();
}
static inline void assign_ac2char(unsigned char *lhs, T& rhs) {
duth_fun<T, BYTES-1>::assign_ac2char(lhs, rhs);
lhs[BYTES-1] = (rhs.template slc<8>((BYTES-1)*8));
}
static inline void assign_bool2bv(T& lhs, bool *rhs) {
duth_fun<T, BYTES-1>::assign_bool2bv(lhs, rhs);
lhs.range(BYTES-1, BYTES-1) = rhs[BYTES-1];
}
static inline void assign_bv2bool(bool *lhs, T& rhs) {
duth_fun<T, BYTES-1>::assign_bv2bool(lhs, rhs);
lhs[BYTES-1] = rhs.range(BYTES-1, BYTES-1).to_uint();
}
static inline void assign_bool2ac(T& lhs, bool *rhs) {
duth_fun<T, BYTES-1>::assign_bool2ac(lhs, rhs);
lhs.set_slc(BYTES-1, (ac_int<1, false>) rhs[BYTES-1]);
}
static inline void assign_ac2bool(bool *lhs, T& rhs) {
duth_fun<T, BYTES-1>::assign_ac2bool(lhs, rhs);
lhs[BYTES-1] = rhs.template slc<1>(BYTES-1);
}
};
template<class T> struct duth_fun<T, 1> {
static inline void assign_char2bv(T& lhs, unsigned char *rhs) {
lhs.range(7, 0) = rhs[0];
}
static inline void assign_char2ac(T& lhs, unsigned char *rhs) {
lhs.set_slc(0, (ac_int<8, false>) rhs[0]);
}
static inline void assign_bv2char(unsigned char *lhs, T& rhs) {
lhs[0] = rhs.range(7, 0).to_uint();
}
static inline void assign_ac2char(unsigned char *lhs, T& rhs) {
lhs[0] = rhs.template slc<8>(0);
}
static inline void assign_bool2bv(T& lhs, bool *rhs) {
lhs.range(0, 0) = rhs[0];
}
static inline void assign_bv2bool(bool *lhs, T& rhs) {
lhs[0] = rhs.range(0, 0).to_uint();
}
static inline void assign_bool2ac(T& lhs, bool *rhs) {
lhs.set_slc(0, (ac_int<1, false>) rhs[0]);
}
static inline void assign_ac2bool(bool *lhs, T& rhs) {
lhs[0] = rhs.template slc<1>(0);
}
};
//============================================================================//
//==================== Statically calculate Number of bits ===================//
//============================================================================//
template <int x>
struct clog2 { enum { val = 1 + clog2<(x>>1)>::val }; };
template <> struct clog2<1> { enum { val = 1 }; };
//============================================================================//
//============ Weighted-Binary to One-Hot conversion (case based) ============//
//============================================================================//
// ISSUES ON CO-SIMULATION
/*
template<int OH_BITS>
sc_uint<OH_BITS> wb2oh_case (const sc_uint< clog2<OH_BITS>::val > wb_i);
template <>
sc_uint<1> wb2oh_case (const sc_uint< 1 > wb_i) {
return 1;
};
template <>
sc_uint<2> wb2oh_case (const sc_uint< clog2<2>::val > wb_i) {
sc_uint<2> oh_rep;
switch (wb_i) {
case 0 : oh_rep = 1;
break;
case 1 : oh_rep = 2;
break;
default : oh_rep = 1;
break;
}
return oh_rep;
};
template <>
sc_uint<3> wb2oh_case (const sc_uint< clog2<3>::val > wb_i) {
sc_uint<3> oh_rep;
switch (wb_i) {
case 0 : oh_rep = 1;
break;
case 1 : oh_rep = 2;
break;
case 2 : oh_rep = 4;
break;
case 3 : oh_rep = 8; // Possible, but functionally wrong
break;
default : oh_rep = 1;
break;
}
return oh_rep;
};
template <>
sc_uint<4> wb2oh_case (const sc_uint< clog2<4>::val > wb_i) {
sc_uint<4> oh_rep;
switch (wb_i) {
case 0 : oh_rep = 1;
break;
case 1 : oh_rep = 2;
break;
case 2 : oh_rep = 4;
break;
case 3 : oh_rep = 8;
break;
default : oh_rep = 1;
break;
}
return oh_rep;
};
template <>
sc_uint<5> wb2oh_case (const sc_uint< clog2<5>::val > wb_i) {
sc_uint<5> oh_rep;
switch (wb_i) {
case 0 : oh_rep = 1;
break;
case 1 : oh_rep = 2;
break;
case 2 : oh_rep = 4;
break;
case 3 : oh_rep = 8;
break;
case 4 : oh_rep = 16;
break;
case 5 : oh_rep = 32; // Should not occur
break;
case 6 : oh_rep = 64; // Should not occur
break;
case 7 : oh_rep = 128; // Should not occur
break;
default : oh_rep = 1;
break;
}
return oh_rep;
};
template <>
sc_uint<6> wb2oh_case (const sc_uint< clog2<6>::val > wb_i) {
sc_uint<6> oh_rep;
switch (wb_i) {
case 0 : oh_rep = 1;
break;
case 1 : oh_rep = 2;
break;
case 2 : oh_rep = 4;
break;
case 3 : oh_rep = 8;
break;
case 4 : oh_rep = 16;
break;
case 5 : oh_rep = 32;
break;
case 6 : oh_rep = 64; // Should not occur
break;
case 7 : oh_rep = 128; // Should not occur
break;
default : oh_rep = 1;
break;
}
return oh_rep;
};
template <>
sc_uint<7> wb2oh_case (const sc_uint< clog2<7>::val > wb_i) {
sc_uint<7> oh_rep;
switch (wb_i) {
case 0 : oh_rep = 1;
break;
case 1 : oh_rep = 2;
break;
case 2 : oh_rep = 4;
break;
case 3 : oh_rep = 8;
break;
case 4 : oh_rep = 16;
break;
case 5 : oh_rep = 32;
break;
case 6 : oh_rep = 64;
break;
case 7 : oh_rep = 128; // Should not occur
break;
default : oh_rep = 1;
break;
}
return oh_rep;
};
template <>
sc_uint<8> wb2oh_case (const sc_uint< clog2<8>::val > wb_i) {
sc_uint<8> oh_rep;
switch (wb_i) {
case 0 : oh_rep = 1;
break;
case 1 : oh_rep = 2;
break;
case 2 : oh_rep = 4;
break;
case 3 : oh_rep = 8;
break;
case 4 : oh_rep = 16;
break;
case 5 : oh_rep = 32;
break;
case 6 : oh_rep = 64;
break;
case 7 : oh_rep = 128;
break;
default : oh_rep = 1;
break;
}
return oh_rep;
};
*/
//============================================================================//
//============================== Mux Container Struct ========================//
//============================================================================//
template <class T, int SIZE> struct mux {
static T mux_oh_case(const sc_uint<SIZE> sel_i, const T data_i[SIZE]);
static T mux_oh_case(const onehot<SIZE> sel_i, const T data_i[SIZE]);
static T mux_oh_ao(const sc_uint<SIZE> sel_i, const T data_i[SIZE]);
};
//============================================================================//
//======================== One-Hot Multiplexer (case based) ==================//
//============================================================================//
template <class T>
struct mux<T, 1> {
static T mux_oh_case (const sc_uint<1> sel_i, const T data_i[1] ) {
return data_i[0];
};
static T mux_oh_case (const onehot<1> sel_i, const T data_i[1] ) {
return data_i[0];
};
static T mux_oh_ao (const sc_uint<1> sel_i, const T data_i[1] ) {
return data_i[0];
};
};
template <class T>
struct mux<T, 2> {
static T mux_oh_case (const sc_uint<2> sel_i, const T data_i[2] ) {
T selected;
switch (sel_i) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
default : selected = data_i[0];
break;
}
return selected;
};
static T mux_oh_case (const onehot<2> sel_i, const T data_i[2] ) {
T selected;
switch (sel_i.val) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
default : selected = data_i[0];
break;
}
return selected;
};
static T mux_oh_ao (const sc_uint<2> sel_i, const T data_i[2] ) {
T selected = T();
#pragma hls_unroll yes
for(int i=0; i<2; ++i) {
bool cur_sel_bit = (sel_i >> i) & 1;
selected = selected | data_i[i].and_mask(cur_sel_bit);
}
return selected;
};
};
template <class T>
struct mux<T, 3> {
static T mux_oh_case (const sc_uint<3> sel_i, const T data_i[3] ) {
T selected;
switch (sel_i) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
case 4 : selected = data_i[2];
break;
default : selected = data_i[0];
break;
}
return selected;
};
static T mux_oh_case (const onehot<3> sel_i, const T data_i[3] ) {
T selected;
switch (sel_i.val) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
case 4 : selected = data_i[2];
break;
default : selected = data_i[0];
break;
}
return selected;
};
static T mux_oh_ao (const sc_uint<3> sel_i, const T data_i[3] ) {
T selected = T();
#pragma hls_unroll yes
for(int i=0; i<3; ++i) {
bool cur_sel_bit = (sel_i >> i) & 1;
selected = selected | data_i[i].and_mask(cur_sel_bit);
}
return selected;
};
};
template <class T>
struct mux<T, 4> {
static T mux_oh_case (const sc_uint<4> sel_i, const T data_i[4] ) {
T selected;
switch (sel_i) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
case 4 : selected = data_i[2];
break;
case 8 : selected = data_i[3];
break;
default : selected = data_i[0];
break;
}
return selected;
};
static T mux_oh_case (const onehot<4> sel_i, const T data_i[4] ) {
T selected;
switch (sel_i.val) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
case 4 : selected = data_i[2];
break;
case 8 : selected = data_i[3];
break;
default : selected = data_i[0];
break;
}
return selected;
};
static T mux_oh_ao (const sc_uint<4> sel_i, const T data_i[4] ) {
T selected = T();
#pragma hls_unroll yes
for(int i=0; i<4; ++i) {
bool cur_sel_bit = (sel_i >> i) & 1;
selected = selected | data_i[i].and_mask(cur_sel_bit);
}
return selected;
};
};
template <class T>
struct mux<T, 5> {
static T mux_oh_case (const sc_uint<5> sel_i, const T data_i[5] ) {
T selected;
switch (sel_i) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
case 4 : selected = data_i[2];
break;
case 8 : selected = data_i[3];
break;
case 16 : selected = data_i[4];
break;
default : selected = data_i[0];
break;
}
return selected;
};
static T mux_oh_case (const onehot<5> sel_i, const T data_i[5] ) {
T selected;
switch (sel_i.val) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
case 4 : selected = data_i[2];
break;
case 8 : selected = data_i[3];
break;
case 16 : selected = data_i[4];
break;
default : selected = data_i[0];
break;
}
return selected;
};
static T mux_oh_ao (const sc_uint<5> sel_i, const T data_i[5] ) {
T selected = T();
#pragma hls_unroll yes
for(int i=0; i<5; ++i) {
bool cur_sel_bit = (sel_i >> i) & 1;
selected = selected | data_i[i].and_mask(cur_sel_bit);
}
return selected;
};
};
template <class T>
struct mux<T, 6> {
static T mux_oh_case (const sc_uint<6> sel_i, const T data_i[6] ) {
T selected;
switch (sel_i) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
case 4 : selected = data_i[2];
break;
case 8 : selected = data_i[3];
break;
case 16 : selected = data_i[4];
break;
case 32 : selected = data_i[5];
break;
default : selected = data_i[0];
break;
}
return selected;
};
static T mux_oh_case (const onehot<6> sel_i, const T data_i[6] ) {
T selected;
switch (sel_i.val) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
case 4 : selected = data_i[2];
break;
case 8 : selected = data_i[3];
break;
case 16 : selected = data_i[4];
break;
case 32 : selected = data_i[5];
break;
default : selected = data_i[0];
break;
}
return selected;
};
static T mux_oh_ao (const sc_uint<6> sel_i, const T data_i[6] ) {
T selected = T();
#pragma hls_unroll yes
for(int i=0; i<6; ++i) {
bool cur_sel_bit = (sel_i >> i) & 1;
selected = selected | data_i[i].and_mask(cur_sel_bit);
}
return selected;
};
};
template <class T>
struct mux<T, 7> {
static T mux_oh_case (const sc_uint<7> sel_i, const T data_i[7] ) {
T selected;
switch (sel_i) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
case 4 : selected = data_i[2];
break;
case 8 : selected = data_i[3];
break;
case 16 : selected = data_i[4];
break;
case 32 : selected = data_i[5];
break;
case 64 : selected = data_i[6];
break;
default : selected = data_i[0];
break;
}
return selected;
};
static T mux_oh_case (const onehot<7> sel_i, const T data_i[7] ) {
T selected;
switch (sel_i.val) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
case 4 : selected = data_i[2];
break;
case 8 : selected = data_i[3];
break;
case 16 : selected = data_i[4];
break;
case 32 : selected = data_i[5];
break;
case 64 : selected = data_i[6];
break;
default : selected = data_i[0];
break;
}
return selected;
};
static T mux_oh_ao (const sc_uint<7> sel_i, const T data_i[7] ) {
T selected = T();
#pragma hls_unroll yes
for(int i=0; i<7; ++i) {
bool cur_sel_bit = (sel_i >> i) & 1;
selected = selected | data_i[i].and_mask(cur_sel_bit);
}
return selected;
};
};
template <class T>
struct mux<T, 8> {
static T mux_oh_case (const sc_uint<8> sel_i, const T data_i[8] ) {
T selected;
switch (sel_i) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
case 4 : selected = data_i[2];
break;
case 8 : selected = data_i[3];
break;
case 16 : selected = data_i[4];
break;
case 32 : selected = data_i[5];
break;
case 64 : selected = data_i[6];
break;
case 128 : selected = data_i[7];
break;
default : selected = data_i[0];
break;
}
return selected;
};
static T mux_oh_case (const onehot<8> sel_i, const T data_i[8] ) {
T selected;
switch (sel_i.val) {
case 1 : selected = data_i[0];
break;
case 2 : selected = data_i[1];
break;
case 4 : selected = data_i[2];
break;
case 8 : selected = data_i[3];
break;
case 16 : selected = data_i[4];
break;
case 32 : selected = data_i[5];
break;
case 64 : selected = data_i[6];
break;
case 128 : selected = data_i[7];
break;
default : selected = data_i[0];
break;
}
return selected;
};
static T mux_oh_ao (const sc_uint<8> sel_i, const T data_i[8] ) {
T selected = T();
#pragma hls_unroll yes
for(int i=0; i<8; ++i) {
bool cur_sel_bit = (sel_i >> i) & 1;
selected = selected | data_i[i].and_mask(cur_sel_bit);
}
return selected;
};
};
//============================================================================//
//==================== Swap Dimensions of an Array Class =====================//
//============================================================================//
template<class X, int X_SZ, class Y, int Y_SZ>
inline void swap_dim (const X in[X_SZ], Y out[Y_SZ]) {
#pragma hls_unroll yes
for(int i=0; i<Y_SZ; ++i) {
Y mule = 0;
#pragma hls_unroll yes
for(int j=0; j<X_SZ; ++j) {
mule = mule | (((in[j] >> i) & 1) << j );
}
out[i] = mule;
}
};
template<class X, class Y>
inline void swap_dim (const X in[X::WIDTH], Y out[Y::WIDTH]) {
#pragma hls_unroll yes
for(int i=0; i<Y::WIDTH; ++i) {
Y mule;
#pragma hls_unroll yes
for(int j=0; j<X::WIDTH; ++j) {
mule.val |= (((in[j].val >> i) & 1) << j );
}
out[i] = mule;
}
};
//============================================================================//
//============================== One-Hot Credit Class ========================//
//============================================================================//
template <int N>
class credit_class {
public:
sc_uint<N+1> credits_oh = (1<<N);
inline bool ready() {return !credits_oh[0];};
inline void incr() { credits_oh = (credits_oh << 1);};
inline void decr() { credits_oh = (credits_oh >> 1);};
};
#endif // __DUTH_FUN_LIB_H__
|
ic-lab-duth/NoCpad
|
src/ace/ace_master_if.h
|
// --------------------------------------------------------- //
// MASTER-IF Is where the MASTER CONNECTS!!!!! //
// //
// Aka. Master <-> Master-IF <-> NoC <-> Slave-IF <-> Slave //
// --------------------------------------------------------- //
#ifndef _ACE_MASTER_IF_H_
#define _ACE_MASTER_IF_H_
#include "systemc.h"
#include "nvhls_connections.h"
#include "../include/ace.h"
#include "../include/axi4_configs_extra.h"
#include "../include/flit_ace.h"
#include "../include/duth_fun.h"
#define LOG_MAX_OUTS 8
#define INIT_S1(n) n{#n}
// --- Helping Data structures --- //
struct outs_table_entry {
sc_uint<dnp::D_W> dst_last;
sc_uint<LOG_MAX_OUTS> sent;
bool reorder;
};
// Info passed between packetizer and depacketizer to inform about new and finished transactions.
struct order_info {
sc_uint<dnp::ace::ID_W> tid;
sc_uint<dnp::D_W> dst;
//bool reord;
//unsigned char ticket;
inline friend std::ostream& operator << ( std::ostream& os, const order_info& info ) {
os <<"TID: "<< info.tid <<", Dst: "<< info.dst /*<<", Ticket: "<< info.ticket*/;
#ifdef SYSTEMC_INCLUDED
os << std::dec << " @" << sc_time_stamp();
#else
os << std::dec << " @" << "no-timed";
#endif
return os;
}
#ifdef SYSTEMC_INCLUDED
// Only for SystemC
inline friend void sc_trace(sc_trace_file* tf, const order_info& info, const std::string& name) {
sc_trace(tf, info.tid, name + ".tid");
sc_trace(tf, info.dst, name + ".dst");
//sc_trace(tf, info.ticket, name + ".ticket");
}
#endif
};
// --- Master IF --- //
// AXI Master connects the independent AXI RD and WR cahnnels to the interface
// The interface gets the Requests and independently packetize and send them into the network
// The Responses are getting depacketized into a seperate thread and are fed back to the MASTER
// Thus Master interface comprises of 4 distinct/paarallel blocks WR/RD pack and WR/RD depack
template <typename cfg>
SC_MODULE(ace_master_if) {
typedef typename ace::ace5<axi::cfg::ace> ace5_;
typedef typename axi::AXI4_Encoding enc_;
typedef flit_dnp<cfg::RREQ_PHITS> rreq_flit_t;
typedef flit_dnp<cfg::RRESP_PHITS> rresp_flit_t;
typedef flit_dnp<cfg::WREQ_PHITS> wreq_flit_t;
typedef flit_dnp<cfg::WRESP_PHITS> wresp_flit_t;
typedef flit_dnp<cfg::CREQ_PHITS> creq_flit_t;
typedef flit_dnp<cfg::CRESP_PHITS> cresp_flit_t;
typedef flit_ack ack_flit_t;
typedef sc_uint< nvhls::log2_ceil<cfg::RRESP_PHITS>::val > cnt_phit_rresp_t;
typedef sc_uint< nvhls::log2_ceil<cfg::WREQ_PHITS>::val > cnt_phit_wreq_t;
const unsigned char LOG_RD_M_LANES = nvhls::log2_ceil<cfg::RD_LANES>::val;
const unsigned char LOG_WR_M_LANES = nvhls::log2_ceil<cfg::WR_LANES>::val;
sc_in_clk clk;
sc_in <bool> rst_n;
sc_in < sc_uint<(dnp::ace::AH_W+dnp::ace::AL_W)> > addr_map[cfg::SLAVE_NUM][2];
sc_in< sc_uint<dnp::S_W> > THIS_ID;
// AXI MASTER Side Channels
// --- ACE --- //
Connections::Out<ace5_::AC> INIT_S1(ac_out);
Connections::In <ace5_::CR> INIT_S1(cr_in);
Connections::In <ace5_::CD> INIT_S1(cd_in);
// --- READ --- //
Connections::In<ace5_::AddrPayload> INIT_S1(ar_in);
Connections::Out<ace5_::ReadPayload> INIT_S1(r_out);
Connections::In <ace5_::RACK> INIT_S1(rack_in);
// --- WRITE --- //
Connections::In<ace5_::AddrPayload> INIT_S1(aw_in);
Connections::In<ace5_::WritePayload> INIT_S1(w_in);
Connections::Out<ace5_::WRespPayload> INIT_S1(b_out);
Connections::In <ace5_::WACK> INIT_S1(wack_in);
// NoC Side Channels
Connections::Out<rreq_flit_t> INIT_S1(rd_flit_out);
Connections::In<rresp_flit_t> INIT_S1(rd_flit_in);
Connections::Out<ack_flit_t> INIT_S1(rack_flit_out);
Connections::Out<wreq_flit_t> INIT_S1(wr_flit_out);
Connections::In<wresp_flit_t> INIT_S1(wr_flit_in);
Connections::Out<ack_flit_t> INIT_S1(wack_flit_out);
Connections::In<creq_flit_t> INIT_S1(cache_flit_in);
Connections::Out<cresp_flit_t> INIT_S1(cache_flit_out);
// --- READ Internals --- //
// FIFOs that pass initiation and finish transactions between Pack-Depack
sc_fifo<order_info> INIT_S1(rd_trans_init); // Pack to Pack | fwd to bck
sc_fifo<sc_uint<dnp::ace::ID_W>> INIT_S1(rd_trans_fin); // De-pack to Pack | bck to fwd
// Placed on READ Packetizer
outs_table_entry rd_out_table[1<<dnp::ace::ID_W]; //[(1<<TID_W)]; // Holds the OutStanding Transactions | Hint : TID_W=4 => 16 slots x 16bits
// --- WRITE Internals --- //
sc_fifo<sc_uint<dnp::ace::ID_W>> INIT_S1(wr_trans_fin); // Depack to pack
// Placed on WRITE Packetizer
outs_table_entry wr_out_table[1<<dnp::ace::ID_W]; //[(1<<TID_W)]; // Holds the OutStanding Transactions | Hint : TID_W=4 => 16 slots x 16bits (could be less)
// Constructor
SC_HAS_PROCESS(ace_master_if);
ace_master_if(sc_module_name name_="ace_master_if")
:
sc_module (name_),
rd_trans_fin (2),
wr_trans_fin (2)
{
SC_THREAD(rd_req_pack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
SC_THREAD(rd_resp_depack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
SC_THREAD(wr_req_pack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
SC_THREAD(wr_resp_depack_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
SC_THREAD(snoop_module);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
}
//-------------------------------//
//--- ACE SNOOPING Module ---//
//-------------------------------//
void snoop_module () {
//-- Start of Reset ---//
ac_out.Reset();
cr_in.Reset();
cd_in.Reset();
cache_flit_in.Reset();
cache_flit_out.Reset();
//-- End of Reset ---//
wait();
while(1) {
creq_flit_t flit_snp_rcv = cache_flit_in.Pop();
sc_uint<dnp::D_W> sender = (flit_snp_rcv.data[0] >> dnp::S_PTR) & ((1<<dnp::S_W)-1);
ace5_::AC snoop_req;
snoop_req.prot = (flit_snp_rcv.data[2] >> dnp::ace::creq::C_PROT_PTR) & ((1<<dnp::ace::C_PROT_W)-1);
snoop_req.snoop = (flit_snp_rcv.data[1] >> dnp::ace::creq::SNP_PTR) & ((1<<dnp::ace::SNP_W)-1);
snoop_req.addr = ((((flit_snp_rcv.data[2]>>dnp::ace::creq::AH_PTR) & ((1<<dnp::ace::AH_W)-1)) << dnp::ace::AL_W) |
((flit_snp_rcv.data[1]>>dnp::ace::creq::AL_PTR) & ((1<<dnp::ace::AL_W)-1)));
NVHLS_ASSERT_MSG(((flit_snp_rcv.data[0].to_uint() >> dnp::D_PTR) & ((1<<dnp::D_W)-1)) == (THIS_ID.read().to_uint()), "Flit misrouted!");
ac_out.Push(snoop_req);
ace5_::CR snoop_resp = cr_in.Pop();
cresp_flit_t resp_flit;
resp_flit.data[0] = ((sc_uint<dnp::PHIT_W>) snoop_resp.resp << dnp::ace::cresp::C_RESP_PTR) |
((sc_uint<dnp::PHIT_W>)dnp::PACK_TYPE__RD_REQ << dnp::T_PTR ) |
((sc_uint<dnp::PHIT_W>) 0 << dnp::Q_PTR ) |
((sc_uint<dnp::PHIT_W>)sender << dnp::D_PTR ) |
((sc_uint<dnp::PHIT_W>)THIS_ID << dnp::S_PTR ) |
((sc_uint<dnp::PHIT_W>)0 << dnp::V_PTR ) ;
bool has_data = (snoop_resp.resp & 1);
if (has_data) {
resp_flit.type = HEAD;
cache_flit_out.Push(resp_flit);
ace5_::CD snoop_data;
do {
unsigned char data_bytes[ace5_::C_CACHE_WIDTH/8];
snoop_data = cd_in.Pop();
duth_fun<ace5_::CD::Data, ace5_::C_CACHE_WIDTH/8>::assign_ac2char(data_bytes, snoop_data.data);
for(unsigned i=0; i<(ace5_::C_CACHE_WIDTH/8)/2; ++i) {
resp_flit.data[i] = ((sc_uint<dnp::PHIT_W>)snoop_data.last << dnp::ace::wdata::LA_PTR ) | // MSB
((sc_uint<dnp::PHIT_W>)data_bytes[(i<<1)+1] << dnp::ace::wdata::B1_PTR ) |
((sc_uint<dnp::PHIT_W>)data_bytes[(i<<1) ] << dnp::ace::wdata::B0_PTR ) ;
}
resp_flit.type = snoop_data.last ? TAIL : BODY;
cache_flit_out.Push(resp_flit);
} while (!snoop_data.last);
} else {
resp_flit.type = SINGLE;
cache_flit_out.Push(resp_flit);
}
}
}
//-------------------------------//
//--- READ REQuest Packetizer ---//
//-------------------------------//
void rd_req_pack_job () {
//-- Start of Reset ---//
// rd_out_table contains outstanding info for each TID, to decide if reordering is possible.
for (int i=0; i<1<<dnp::ace::ID_W; ++i) {
rd_out_table[i].dst_last = 0;
rd_out_table[i].sent = 0;
rd_out_table[i].reorder = false;
}
ar_in.Reset();
rd_flit_out.Reset();
ace5_::AddrPayload this_req;
//-- End of Reset ---//
wait();
while(1) {
// New Request from MASTER received
if(ar_in.PopNB(this_req)) {
// A new request must stall until it is eligible to depart.
// Depending the reordering scheme
// 0 : all in-flight transactions must be to the same destination
// 1 : all in-flight transactions of the SAME ID, must be to the same destination
outs_table_entry sel_entry = rd_out_table[this_req.id.to_uint()];
bool is_coherent = (this_req.snoop > 0) || ((this_req.snoop ==0) && (this_req.domain.xor_reduce()));
// resolve address to node-id
sc_uint<dnp::D_W> this_dst = is_coherent ? (cfg::SLAVE_NUM+cfg::ALL_MASTER_NUM) : addr_lut_rd(this_req.addr);
// Check reorder conditions for received TID.
bool may_reorder = (sel_entry.sent>0) && (sel_entry.dst_last != this_dst);
// In case of possible reordering wait_for has the number of transactions
// of the same ID, this request has to wait for.
sc_uint<LOG_MAX_OUTS> wait_for = sel_entry.sent;
// Poll for Finished transactions until no longer reordering is possible.
while(may_reorder || rd_flit_out.Full()) {
sc_uint<dnp::ace::ID_W> tid_fin;
if(rd_trans_fin.nb_read(tid_fin)) {
rd_out_table[tid_fin].sent--; // update outstanding table
if(tid_fin==this_req.id.to_uint()) wait_for--; // update local wait value
}
may_reorder = (wait_for>0);
wait();
}; // End of while reorder
// --- Start Packetization --- //
// Packetize request into a flit. The fields are described in DNP20
rreq_flit_t tmp_flit;
tmp_flit.type = SINGLE; // Entire request fits in at single flits thus SINGLE
tmp_flit.data[0] = ((sc_uint<dnp::PHIT_W>)this_req.snoop << dnp::ace::req::SNP_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.domain << dnp::ace::req::DOM_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.id << dnp::ace::req::ID_PTR ) |
((sc_uint<dnp::PHIT_W>)dnp::PACK_TYPE__RD_REQ << dnp::T_PTR ) |
((sc_uint<dnp::PHIT_W>) 0 << dnp::Q_PTR ) |
((sc_uint<dnp::PHIT_W>)this_dst << dnp::D_PTR ) |
((sc_uint<dnp::PHIT_W>)THIS_ID << dnp::S_PTR ) |
((sc_uint<dnp::PHIT_W>)0 << dnp::V_PTR ) ;
tmp_flit.data[1] = ((sc_uint<dnp::PHIT_W>)this_req.len << dnp::ace::req::LE_PTR) |
((sc_uint<dnp::PHIT_W>)(this_req.addr & 0xffff) << dnp::ace::req::AL_PTR) ;
tmp_flit.data[2] = ((sc_uint<dnp::PHIT_W>)this_req.barrier << dnp::ace::req::BAR_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.burst << dnp::ace::req::BU_PTR ) |
((sc_uint<dnp::PHIT_W>)this_req.size << dnp::ace::req::SZ_PTR ) |
((sc_uint<dnp::PHIT_W>)(this_req.addr >> dnp::ace::AL_W) << dnp::ace::req::AH_PTR ) ;
// Update the table due to the new outstanding
rd_out_table[this_req.id.to_uint()].sent++;
rd_out_table[this_req.id.to_uint()].dst_last = this_dst;
// send header flit to NoC
rd_flit_out.Push(tmp_flit); // We've already checked that !Full thus this should not block.
} else { // No RD Req from Master, simply check for finished Outstanding trans
sc_uint<dnp::ace::ID_W> tid_fin;
if(rd_trans_fin.nb_read(tid_fin)) {
rd_out_table[tid_fin].sent--; // update outstanding table
}
wait();
}
} // End of while(1)
}; // End of Read Request Packetizer
//-----------------------------------//
//--- READ RESPonce DE-Packetizer ---//
//-----------------------------------//
void rd_resp_depack_job () {
rack_in.Reset();
r_out.Reset();
rd_flit_in.Reset();
rack_flit_out.Reset();
while(1) {
// Blocking read a flit to start depacketize the response.
rresp_flit_t flit_rcv;
flit_rcv = rd_flit_in.Pop();
// Construct the transaction's attributes to build the response accordingly.
ace5_::AddrPayload active_trans;
active_trans.id = (flit_rcv.data[0] >> dnp::ace::rresp::ID_PTR) & ((1 << dnp::ace::ID_W) - 1);
active_trans.burst = (flit_rcv.data[0] >> dnp::ace::rresp::BU_PTR) & ((1 << dnp::ace::BU_W) - 1);
active_trans.size = (flit_rcv.data[1] >> dnp::ace::rresp::SZ_PTR) & ((1 << dnp::ace::SZ_W) - 1);
active_trans.len = (flit_rcv.data[1] >> dnp::ace::rresp::LE_PTR) & ((1 << dnp::ace::LE_W) - 1);
unsigned sender = flit_rcv.get_src();
bool is_coherent = (flit_rcv.get_type() == dnp::PACK_TYPE__C_RD_RESP);;
sc_uint<dnp::ace::SZ_W> final_size = (unsigned) active_trans.size; // Just the size. more compact
// Partial lower 8-bit part of address to calculate the initial axi pointer in case of a non-aligned address
sc_uint<dnp::ace::AP_W> addr_part = (flit_rcv.data[1] >> dnp::ace::rresp::AP_PTR) & ((1<<dnp::ace::AP_W) - 1);
sc_uint<dnp::ace::AP_W> addr_init_aligned = ((addr_part & (cfg::RD_LANES-1)) & ~((1<<final_size)-1));
// Data Depacketization happens in a loop. Each iteration pops a flit and constructs a beat.
// Each iteration transfers data bytes from the flit to the AXI beat.
// bytes_per_iter bytes may be transfered, which is limited by two factors
// depending the AXI beat size and the bytes in the flit.
// 1) The available data bytes in the flit is less than the required for the beat
// 2) The remaining byte lanes are less than the available in the flit
// For case (1) the flit is emptied and the next flit is popped at the next iteration
// For case (2) the beat is pushed to Master and the next beat starts in the next iteration
// For data Depacketization loop, we keep 2 pointers.
// axi_lane_ptr -> to keep track axi byte lanes to place to data
// flit_phit_ptr -> to point at the data of the flit
sc_uint<8> axi_lane_ptr = addr_init_aligned; // Bytes MOD axi size
cnt_phit_rresp_t flit_phit_ptr = 0; // Bytes MOD phits in flit
// Also we keep track the processed and total data.
sc_uint<16> bytes_total = ((active_trans.len.to_uint()+1)<<final_size);
sc_uint<16> bytes_depacked = 0; // Number of DE-packetized bytes
unsigned char resp_build_tmp[cfg::RD_LANES];
#pragma hls_unroll yes
for(int i=0; i<cfg::RD_LANES; ++i) resp_build_tmp[i] = 0;
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
gather_wr_beats : while (1) {
// Each iteration moves data from the flit the the appropriate place on the AXI RD response
// The two flit and axi pointers orchistrate the operation, until completion
sc_uint<8> bytes_axi_left = ((1<<final_size) - (axi_lane_ptr & ((1<<final_size)-1)));
sc_uint<8> bytes_flit_left = ((cfg::RRESP_PHITS<<1) - (flit_phit_ptr<<1));
sc_uint<8> bytes_per_iter = (bytes_axi_left<bytes_flit_left) ? bytes_axi_left : bytes_flit_left;
if(flit_phit_ptr==0)
flit_rcv = rd_flit_in.Pop();
// Convert flits to axi transfers. this should be synthesize a mux that routes bytes from flit to axi lanes
#pragma hls_unroll yes
build_resp: for (int i = 0; i < (cfg::RD_LANES >> 1); ++i) { // i counts AXI Byte Lanes IN PHITS (i.e. Lanes/bytes_in_phit)
if (i>=(axi_lane_ptr>>1) && i<((axi_lane_ptr+bytes_per_iter)>>1)) {
cnt_phit_rresp_t loc_flit_ptr = flit_phit_ptr + (i-(axi_lane_ptr>>1));
resp_build_tmp[(i << 1) + 1] = (flit_rcv.data[loc_flit_ptr] >> dnp::ace::rdata::B1_PTR) & ((1 << dnp::ace::B_W) - 1); // MSB
resp_build_tmp[(i << 1) ] = (flit_rcv.data[loc_flit_ptr] >> dnp::ace::rdata::B0_PTR) & ((1 << dnp::ace::B_W) - 1); // LSB
}
}
// transaction event flags
bool done_job = ((bytes_depacked+bytes_per_iter)==bytes_total); // All bytes are processed
bool done_flit = (flit_phit_ptr+(bytes_per_iter>>1)==cfg::RRESP_PHITS); // Flit got empty
bool done_axi = (((bytes_depacked+bytes_per_iter)&((1<<final_size)-1))==0); // Beat got full
// Push the response to MASTER, when either this Beat got the needed bytes or all bytes are transferred
if( done_job || done_axi ) {
ace5_::ReadPayload builder_resp;
builder_resp.id = active_trans.id;
builder_resp.resp = (flit_rcv.data[flit_phit_ptr] >> dnp::ace::rdata::RE_PTR) & ((1 << dnp::ace::R_RE_W) - 1);
builder_resp.last = ((bytes_depacked+bytes_per_iter)==bytes_total);
duth_fun<ace5_::Data, cfg::RD_LANES>::assign_char2ac(builder_resp.data, resp_build_tmp);
r_out.Push(builder_resp);
#pragma hls_unroll yes
for(int i=0; i<cfg::RD_LANES; ++i) resp_build_tmp[i] = 0;
}
// Check to either finish transaction or update the pointers for the next iteration
if (done_job) { // End of transaction
// Inform Packetizer about finished transaction, and Exit
rd_trans_fin.write(active_trans.id.to_uint());
break;
} else { // Check for finished transactions
bytes_depacked +=bytes_per_iter;
flit_phit_ptr = (done_flit) ? 0 : (flit_phit_ptr +(bytes_per_iter>>1));
axi_lane_ptr = (active_trans.burst==enc_::AXBURST::FIXED) ? ((axi_lane_ptr+bytes_per_iter) & ((1<<final_size)-1)) + addr_init_aligned :
((axi_lane_ptr+bytes_per_iter) & (cfg::RD_LANES-1)) ;
}
} // End of flit gathering loop
ace5_::RACK tmp_rack;
tmp_rack = rack_in.Pop();
if (is_coherent) {
ack_flit_t ack_flit(SINGLE,THIS_ID.read().to_uint(), sender, 1, 0);
rack_flit_out.Push(ack_flit);
}
} // End of while(1)
}; // End of Read Responce Packetizer
//--------------------------------//
//--- WRITE REQuest Packetizer ---//
//--------------------------------//
void wr_req_pack_job () {
wr_flit_out.Reset();
aw_in.Reset();
w_in.Reset();
for (int i=0; i<1<<dnp::ace::ID_W; ++i) {
wr_out_table[i].dst_last = 0;
wr_out_table[i].sent = 0;
wr_out_table[i].reorder = false;
}
ace5_::AddrPayload this_req;
wait();
while(1) {
if(aw_in.PopNB(this_req)) { // New Request
// A new request must stall until it is eligible to depart.
// Depending the reordering scheme
// 0 : all in-flight transactions must be to the same destination
// 1 : all in-flight transactions of the SAME ID, must be to the same destination
outs_table_entry sel_entry = wr_out_table[this_req.id.to_uint()];
bool pass_thru_home = ((this_req.snoop == 0) && (this_req.domain.xor_reduce())) ||
(this_req.snoop == 1);
// resolve address to node-id
sc_uint<dnp::D_W> this_dst = pass_thru_home ? (cfg::SLAVE_NUM+cfg::ALL_MASTER_NUM) : addr_lut_wr(this_req.addr);
// Check reorder conditions for this TID.
// In an ordered NoC reorder may occur when there are outstanding trans towards different destinations
bool may_reorder = (sel_entry.sent>0) && (sel_entry.dst_last != this_dst);
sc_uint<LOG_MAX_OUTS> wait_for = sel_entry.sent; // Counts outstanding transactions to wait for
// Poll Finished transactions until no longer reorder is possible.
while(may_reorder || wr_flit_out.Full()) {
sc_uint<dnp::ace::ID_W> tid_fin;
if(wr_trans_fin.nb_read(tid_fin)) {
wr_out_table[tid_fin].sent--;
if(tid_fin==this_req.id.to_uint()) wait_for--;
}
may_reorder = (wait_for>0);
wait();
}; // End of while reorder
// --- Start HEADER Packetization --- //
// Packetize request according DNP20, and send
rreq_flit_t tmp_flit;
wreq_flit_t tmp_mule_flit;
tmp_mule_flit.type = HEAD;
tmp_mule_flit.data[0] = ((sc_uint<dnp::PHIT_W>)this_req.snoop << dnp::ace::req::SNP_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.domain << dnp::ace::req::DOM_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.id << dnp::ace::req::ID_PTR) |
((sc_uint<dnp::PHIT_W>)dnp::PACK_TYPE__WR_REQ << dnp::T_PTR) |
((sc_uint<dnp::PHIT_W>)0 << dnp::Q_PTR) |
((sc_uint<dnp::PHIT_W>)this_dst << dnp::D_PTR) |
((sc_uint<dnp::PHIT_W>)THIS_ID << dnp::S_PTR) |
((sc_uint<dnp::PHIT_W>)0 << dnp::V_PTR) ;
tmp_mule_flit.data[1] = ((sc_uint<dnp::PHIT_W>)this_req.len << dnp::ace::req::LE_PTR) |
((sc_uint<dnp::PHIT_W>)(this_req.addr & 0xffff) << dnp::ace::req::AL_PTR) ;
tmp_mule_flit.data[2] = ((sc_uint<dnp::PHIT_W>)this_req.unique << dnp::ace::req::UNQ_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.barrier << dnp::ace::req::BAR_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.burst << dnp::ace::req::BU_PTR) |
((sc_uint<dnp::PHIT_W>)this_req.size << dnp::ace::req::SZ_PTR) |
((sc_uint<dnp::PHIT_W>)(this_req.addr >> dnp::ace::AL_W) << dnp::ace::req::AH_PTR) ;
//tmp_mule_flit.data[3] = ((sc_uint<20>) -1);
wr_out_table[this_req.id.to_uint()].sent++;
wr_out_table[this_req.id.to_uint()].dst_last = this_dst;
// push header flit to NoC
//wr_flit_out.Push(tmp_mule_flit); // We've already checked that !Full thus this should not block.
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while (!wr_flit_out.PushNB(tmp_mule_flit)) {
sc_uint<dnp::ace::ID_W> tid_fin;
if(wr_trans_fin.nb_read(tid_fin)) {
wr_out_table[tid_fin].sent--; // update outstanding table
}
wait();
}
// --- Start DATA Packetization --- //
// Data Depacketization happens in a loop. Each iteration pops a flit and constructs a beat.
// Multiple iterations may be needed either the consume incoming data or fill a flit, which
// which depends on the AXI and flit size.
// Each iteration transfers data bytes from the flit to the AXI beat.
// The processed bytes per iteration is limited by two factors
// depending the AXI beat size and the bytes in the flit.
// 1) The available data bytes in the flit is less than the required for the beat
// 2) The remaining byte lanes are less than the available in the flit
// For case (1) the flit is emptied and the next flit is popped at the next iteration
// For case (2) the beat is pushed to Master and the next beat starts in the next iteration
// calculate the initial axi pointer in case of a non-aligned address to the bus
sc_uint<8> addr_init_aligned = (this_req.addr.to_uint() & (cfg::WR_LANES-1)) & ~((1<<this_req.size.to_uint())-1);
// For data Depacketization we keep 2 pointers.
// - One to keep track axi byte lanes to place to data (axi_lane_ptr)
// - One to point at the data of the flit (flit_phit_ptr)
sc_uint<8> axi_lane_ptr = addr_init_aligned; // Bytes MOD size
cnt_phit_wreq_t flit_phit_ptr = 0; // Bytes MOD phits in flit
sc_uint<16> bytes_total = ((this_req.len.to_uint()+1)<<this_req.size.to_uint());
sc_uint<16> bytes_packed = 0;
unsigned char data_build_tmp[cfg::WR_LANES];
bool wstrb_tmp[cfg::WR_LANES];
sc_uint<1> last_tmp;
//#pragma hls_pipeline_init_interval 1
//#pragma pipeline_stall_mode flush
gather_wr_beats : while (1) {
// Calculate the bytes transferred in this iteration, depending the available flit bytes and the remaining to the beat
sc_uint<8> bytes_axi_left = ((1<<this_req.size.to_uint()) - (axi_lane_ptr & ((1<<this_req.size.to_uint())-1)));
sc_uint<8> bytes_flit_left = ((cfg::WREQ_PHITS<<1) - (flit_phit_ptr<<1));
sc_uint<8> bytes_per_iter = (bytes_axi_left<bytes_flit_left) ? bytes_axi_left : bytes_flit_left;
// If current beat has been packed, pop next
if((bytes_packed & ((1<<this_req.size.to_uint())-1))==0) {
ace5_::WritePayload this_wr;
this_wr = w_in.Pop();
last_tmp = this_wr.last;
duth_fun<ace5_::Data , cfg::WR_LANES>::assign_ac2char(data_build_tmp , this_wr.data);
duth_fun<ace5_::Wstrb, cfg::WR_LANES>::assign_ac2bool(wstrb_tmp , this_wr.wstrb);
}
// Convert AXI Beats to flits.
#pragma hls_unroll yes
for (int i=0; i<cfg::WREQ_PHITS; ++i){ // i counts phits on the flit
if(i>=flit_phit_ptr && i<(flit_phit_ptr+(bytes_per_iter>>1))) {
sc_uint<8> loc_axi_ptr = (axi_lane_ptr + ((i-flit_phit_ptr)<<1));
tmp_mule_flit.data[i] = ((sc_uint<dnp::PHIT_W>)last_tmp << dnp::ace::wdata::LA_PTR ) | // MSB
((sc_uint<dnp::PHIT_W>)wstrb_tmp[loc_axi_ptr+1] << dnp::ace::wdata::E1_PTR ) |
((sc_uint<dnp::PHIT_W>)wstrb_tmp[loc_axi_ptr ] << dnp::ace::wdata::E0_PTR ) |
((sc_uint<dnp::PHIT_W>)data_build_tmp[loc_axi_ptr+1] << dnp::ace::wdata::B1_PTR ) | // (i*2) % 4
((sc_uint<dnp::PHIT_W>)data_build_tmp[loc_axi_ptr ] << dnp::ace::wdata::B0_PTR ) ;
}
}
// transaction event flags
bool done_job = ((bytes_packed+bytes_per_iter)==bytes_total); // All bytes are processed
bool done_flit = (flit_phit_ptr+(bytes_per_iter>>1)==cfg::WREQ_PHITS); // Flit got empty
bool done_axi = (((bytes_packed+bytes_per_iter)&((1<<(this_req.size.to_uint()))-1))==0); // Beat got full
// Push the flit to NoC when either this Flit got the needed bytes or all bytes are transferred
if(done_job || done_flit) {
tmp_mule_flit.type = (bytes_packed+bytes_per_iter==bytes_total) ? TAIL : BODY;
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while (!wr_flit_out.PushNB(tmp_mule_flit)) {
sc_uint<dnp::ace::ID_W> tid_fin;
if(wr_trans_fin.nb_read(tid_fin)) {
wr_out_table[tid_fin].sent--; // update outstanding table
}
wait();
}
}
// Check to either finish transaction or update the pointers for the next iteration
if (done_job) { // End of transaction
break;
} else { // Move to next iteration
bytes_packed = bytes_packed+bytes_per_iter;
flit_phit_ptr = (done_flit) ? 0 : (flit_phit_ptr +(bytes_per_iter>>1));
axi_lane_ptr = ((unsigned)this_req.burst==enc_::AXBURST::FIXED) ? ((axi_lane_ptr+bytes_per_iter) & ((1<<this_req.size.to_uint())-1)) + addr_init_aligned :
((axi_lane_ptr+bytes_per_iter) & (cfg::WR_LANES-1)) ;
}
} // End of gather_beats. End of transaction loop
} else { // When no request, Check for finished transactions
sc_uint<dnp::ace::ID_W> tid_fin;
if(wr_trans_fin.nb_read(tid_fin)) {
wr_out_table[tid_fin].sent--;
}
wait();
}
} // End of While(1)
}; // End of Read Request Packetizer
//------------------------------------//
//--- WRITE RESPonce DE-Packetizer ---//
//------------------------------------//
void wr_resp_depack_job(){
wr_flit_in.Reset();
wack_flit_out.Reset();
b_out.Reset();
wack_in.Reset();
wait();
while(1) {
wresp_flit_t flit_rcv;
flit_rcv = wr_flit_in.Pop();
unsigned sender = flit_rcv.get_src();
bool is_coherent = (flit_rcv.get_type() == dnp::PACK_TYPE__C_WR_RESP);
// Construct the trans Header to create the response
ace5_::WRespPayload this_resp;
sc_uint<dnp::ace::ID_W> this_tid = (flit_rcv.data[0] >> dnp::ace::wresp::ID_PTR) & ((1 << dnp::ace::ID_W) - 1);
this_resp.id = this_tid.to_uint();
this_resp.resp = (flit_rcv.data[0] >> dnp::ace::wresp::RESP_PTR) & ((1 << dnp::ace::W_RE_W) - 1);
b_out.Push(this_resp); // Send the response to MASTER
wr_trans_fin.write(this_tid); // Inform Packetizer for finished transaction
ace5_::WACK tmp_wack;
tmp_wack = wack_in.Pop();
if (is_coherent) {
ack_flit_t ack_flit(SINGLE,THIS_ID.read().to_uint(), sender, 0, 1);
wack_flit_out.Push(ack_flit);
}
} // End of While(1)
}; // End of Write Resp De-pack
// Memory map resolving
inline unsigned char addr_lut_rd(const ace5_::Addr addr) {
for (int i=0; i<2; ++i) {
if (addr>=addr_map[i][0].read() && addr <= addr_map[i][1].read()) return i;
}
return 0; // Or send 404
};
inline unsigned char addr_lut_wr(const ace5_::Addr addr) {
for (int i=0; i<2; ++i) {
if (addr>=addr_map[i][0].read() && addr <= addr_map[i][1].read()) return i;
}
return 0; // Or send 404
};
}; // End of Master-IF module
#endif // _ACE_MASTER_IF_H_
|
ic-lab-duth/NoCpad
|
src/include/flit_ace.h
|
#ifndef __FLIT_DNP_H__
#define __FLIT_DNP_H__
#include "systemc.h"
#include "nvhls_connections.h"
#include "./dnp_ace_v0.h"
#ifndef __SYNTHESIS__
#include <string>
#include <iostream>
#endif
enum FLIT_TYPE {HEAD=0 , BODY=1, TAIL=3, SINGLE=2};
template<unsigned char PHIT_NUM>
struct flit_dnp {
sc_uint<2> type;
//sc_uint<32> dbg_id;
sc_uint<dnp::PHIT_W> data[PHIT_NUM];
static const int width = 2+(PHIT_NUM*dnp::PHIT_W); // Matchlib Marshaller requirement
// helping functions to retrieve flit info (e.g. flit type, source, destination)
inline bool performs_rc() { return ((type == HEAD) || (type == SINGLE)); }
inline bool resets_states() { return (type == TAIL); }
inline bool is_head() {return (type==HEAD);};
inline bool is_tail() {return (type==TAIL);};
inline bool is_body() {return (type==BODY);};
inline bool is_single() {return (type==SINGLE);};
// DNP fields set/getters
inline sc_uint<dnp::D_W> get_dst() const {return ((data[0] >> dnp::D_PTR) & ((1<<dnp::D_W)-1));};
inline sc_uint<dnp::S_W> get_src() const {return ((data[0] >> dnp::S_PTR) & ((1<<dnp::S_W)-1));};
inline sc_uint<dnp::T_W> get_type() const {return ((data[0] >> dnp::T_PTR) & ((1<<dnp::T_W)-1));};
inline sc_uint<dnp::V_W> get_vc() const {return ((data[0] >> dnp::V_PTR) & ((1<<dnp::V_W)-1));};
inline sc_uint<dnp::Q_W> get_qos() const {return ((data[0] >> dnp::Q_PTR) & ((1<<dnp::Q_W)-1));};
inline void set_dst(sc_uint<dnp::D_W> dst ) { data[0] = (data[0].range(dnp::PHIT_W-1, dnp::D_PTR+dnp::D_W) << (dnp::D_PTR+dnp::D_W)) |
(dst << dnp::D_PTR) |
(data[0].range(dnp::D_PTR-1, 0));
};
inline void set_src(sc_uint<dnp::S_W> src ) { data[0] = (data[0].range(dnp::PHIT_W-1, dnp::S_PTR+dnp::S_W) << (dnp::S_PTR+dnp::S_W)) |
(src << dnp::S_PTR) |
(data[0].range(dnp::S_PTR-1, 0));
};
inline void set_type(sc_uint<dnp::T_W> type) { data[0] = (data[0].range(dnp::PHIT_W-1, dnp::T_PTR+dnp::T_W) << (dnp::T_PTR+dnp::T_W)) |
(type << dnp::T_PTR) |
(data[0].range(dnp::T_PTR-1, 0));
};
inline void set_vc(sc_uint<dnp::V_W> vc ) { data[0] = (data[0].range(dnp::PHIT_W-1, dnp::V_PTR+dnp::V_W) << (dnp::V_PTR+dnp::V_W)) |
(vc << dnp::V_PTR) ;
//(data[0].range(dnp::V_PTR-1, 0));
};
inline void set_qos(sc_uint<dnp::Q_W> qos ) { data[0] = (data[0].range(dnp::PHIT_W-1, dnp::Q_PTR+dnp::Q_W) << (dnp::Q_PTR+dnp::Q_W)) |
(qos << dnp::Q_PTR) |
(data[0].range(dnp::Q_PTR-1, 0));
};
inline void set_network(
sc_uint<dnp::S_W> src,
sc_uint<dnp::D_W> dst,
sc_uint<dnp::V_W> vc,
sc_uint<dnp::T_W> type,
sc_uint<dnp::Q_W> qos
) {
data[0] = (data[0].range(dnp::PHIT_W-1, dnp::T_PTR+dnp::T_W) << (dnp::T_PTR+dnp::T_W)) |
(type << dnp::T_PTR) |
(qos << dnp::Q_PTR) |
(dst << dnp::D_PTR) |
(src << dnp::S_PTR) |
(vc << dnp::V_PTR) ;
};
template<typename T>
inline void set_rd_req (const T& rd_req) {
this->data[0] = ((sc_uint<dnp::PHIT_W>) rd_req.snoop << dnp::ace::req::SNP_PTR) |
((sc_uint<dnp::PHIT_W>) rd_req.domain << dnp::ace::req::DOM_PTR) |
((sc_uint<dnp::PHIT_W>) rd_req.id << dnp::ace::req::ID_PTR) |
(sc_uint<dnp::PHIT_W>) this->data[0].range(dnp::T_PTR+dnp::T_W-1, 0); // Keep the network portion unaffected
this->data[1] = ((sc_uint<dnp::PHIT_W>) rd_req.len << dnp::ace::req::LE_PTR) |
((sc_uint<dnp::PHIT_W>)(rd_req.addr & 0xffff) << dnp::ace::req::AL_PTR) ;
this->data[2] = ((sc_uint<dnp::PHIT_W>) rd_req.unique << dnp::ace::req::UNQ_PTR ) |
((sc_uint<dnp::PHIT_W>) rd_req.barrier << dnp::ace::req::BAR_PTR ) |
((sc_uint<dnp::PHIT_W>) rd_req.burst << dnp::ace::req::BU_PTR ) |
((sc_uint<dnp::PHIT_W>) rd_req.size << dnp::ace::req::SZ_PTR ) |
((sc_uint<dnp::PHIT_W>)(rd_req.addr >> dnp::ace::AL_W) << dnp::ace::req::AH_PTR) ;
};
template<typename T>
inline void set_wr_req (const T& wr_req) {this->set_rd_req(wr_req);};
template<typename T>
inline void set_snoop_req (const T& snoop_req) {
this->data[1] = ((sc_uint<dnp::PHIT_W>)snoop_req.snoop << dnp::ace::creq::SNP_PTR) |
((sc_uint<dnp::PHIT_W>)(snoop_req.addr & 0xffff) << dnp::ace::creq::AL_PTR) ;
this->data[2] = ((sc_uint<dnp::PHIT_W>) snoop_req.prot << dnp::ace::creq::C_PROT_PTR ) |
((sc_uint<dnp::PHIT_W>)(snoop_req.addr >> dnp::ace::AL_W) << dnp::ace::creq::AH_PTR) ;
};
template<typename T>
inline void set_rd_resp (const T& rd_req) {
this->data[0] = ((sc_uint<dnp::PHIT_W>) rd_req.burst << dnp::ace::rresp::BU_PTR) |
((sc_uint<dnp::PHIT_W>) rd_req.id << dnp::ace::rresp::ID_PTR) |
(sc_uint<dnp::PHIT_W>) this->data[0].range(dnp::T_PTR+dnp::T_W-1, 0); // Keep the network portion unaffected
this->data[1] = ((sc_uint<dnp::PHIT_W>) (rd_req.addr & ((1<<dnp::ace::AP_W)-1)) << dnp::ace::rresp::AP_PTR ) |
((sc_uint<dnp::PHIT_W>) rd_req.len << dnp::ace::rresp::LE_PTR ) |
((sc_uint<dnp::PHIT_W>) rd_req.size << dnp::ace::rresp::SZ_PTR) ;
};
template<typename T>
inline void get_rd_req (T& rd_req) const {
rd_req.id = (this->data[0] >> dnp::ace::req::ID_PTR) & ((1<<dnp::ace::ID_W)-1);
rd_req.len = (this->data[1] >> dnp::ace::req::LE_PTR) & ((1<<dnp::ace::LE_W)-1);
rd_req.size = (this->data[2] >> dnp::ace::req::SZ_PTR) & ((1<<dnp::ace::SZ_W)-1);
rd_req.burst = (this->data[2] >> dnp::ace::req::BU_PTR) & ((1<<dnp::ace::BU_W)-1);
rd_req.addr = ((((this->data[2]>>dnp::ace::req::AH_PTR) & ((1<<dnp::ace::AH_W)-1)) << dnp::ace::AL_W) |
((this->data[1]>>dnp::ace::req::AL_PTR) & ((1<<dnp::ace::AL_W)-1)));
// rd_req.cache =
// rd_req.auser =
rd_req.snoop = (this->data[0] >> dnp::ace::req::SNP_PTR) & ((1<<dnp::ace::SNP_W)-1);
rd_req.domain = (this->data[0] >> dnp::ace::req::DOM_PTR) & ((1<<dnp::ace::DOM_W)-1);
rd_req.barrier = (this->data[2] >> dnp::ace::req::BAR_PTR) & ((1<<dnp::ace::BAR_W)-1);
//rd_req.unique =
};
template<typename T>
inline void get_wr_req (const T& wr_req) const {this->get_rd_req(wr_req);};
template<typename T>
inline void get_snoop_req (T& snoop_req) const {
snoop_req.snoop = (this->data[1] >> dnp::ace::creq::SNP_PTR) & ((1<<dnp::ace::SNP_W)-1);
snoop_req.prot = (this->data[2] >> dnp::ace::creq::C_PROT_PTR) & ((1<<dnp::ace::C_PROT_W)-1);
snoop_req.addr = ((((this->data[2]>>dnp::ace::creq::AH_PTR) & ((1<<dnp::ace::AH_W)-1)) << dnp::ace::AL_W) |
((this->data[1]>>dnp::ace::creq::AL_PTR) & ((1<<dnp::ace::AL_W)-1)));
};
// Flit Constructors
flit_dnp () {
type = 0;
#pragma hls_unroll yes
for(int i=0; i<PHIT_NUM; ++i)
data[i] = 0;
};
flit_dnp(FLIT_TYPE _type, short int _src, short int _dst) {
type = _type;
data[0] = 0 |
(_src << dnp::S_PTR) |
(_dst << dnp::D_PTR) ;
};
// Flit operators
inline flit_dnp& operator = (const flit_dnp& rhs) {
type = rhs.type;
//dbg_id = rhs.dbg_id;
#pragma hls_unroll yes
for(int i=0; i<PHIT_NUM; ++i) data[i] = rhs.data[i];
return *this;
};
inline flit_dnp& operator = (const flit_dnp* rhs) {
type = rhs->type;
//dbg_id = rhs->dbg_id;
#pragma hls_unroll yes
for(int i=0; i<PHIT_NUM; ++i) data[i] = rhs->data[i];
return *this;
};
inline bool operator==(const flit_dnp& rhs) const {
bool eq = (rhs.type == type);
for(int i=0; i<PHIT_NUM; ++i) eq = eq && (data[i] == rhs.data[i]);
return eq;
}
inline bool operator!=(const flit_dnp& rhs) const {
return !(*this==rhs);
}
inline flit_dnp operator | (const flit_dnp& rhs) {
flit_dnp mule;
mule.type = type | rhs.type;
#pragma hls_unroll yes
for(int i=0; i<PHIT_NUM; ++i) mule.data[i] = data[i] | rhs.data[i];
return mule;
};
inline flit_dnp operator & (const flit_dnp& rhs) {
flit_dnp mule;
mule.type = type & rhs.type;
#pragma hls_unroll yes
for(int i=0; i<PHIT_NUM; ++i) mule.data[i] = data[i] & rhs.data[i];
return mule;
};
inline flit_dnp and_mask(bool bit) const {
flit_dnp mule;
sc_uint<dnp::PHIT_W> mask = 0;
#pragma hls_unroll yes
for(int j=0; j<dnp::PHIT_W; ++j) mask[j] = mask[j] | (bit << j);
mule.type = type & mask; //((mask<<1) | bit);
#pragma hls_unroll yes
for(int i=0; i<PHIT_NUM; ++i) mule.data[i] = data[i] & mask;
return mule;
};
//#ifndef __SYNTHESIS__
// Non synthesizable functions. (printing, tracing)
static std::string type_in_str(const flit_dnp& flit_in) {
if (flit_in.type==HEAD) return "H";
else if(flit_in.type==BODY) return "B";
else if(flit_in.type==TAIL) return "T";
else if(flit_in.type==SINGLE) return "S";
else return "X";
}
inline friend std::ostream& operator << ( std::ostream& os, const flit_dnp& flit_tmp ) {
//if (flit_tmp.type==HEAD || flit_tmp.type==SINGLE)
os << flit_tmp.get_vc() << flit_dnp::type_in_str(flit_tmp) <<"s" << flit_tmp.get_src() << "d" << flit_tmp.get_dst() << "v" << flit_tmp.get_vc();// << " DBG_ID: 0x" << std::hex << flit_tmp.dbg_id;
//else
// os << flit_tmp.get_vc() << flit_dnp::type_in_str(flit_tmp) << "s? d?";
os << " Pay: 0x";
for(int i=PHIT_NUM-1; i>=0; --i) os << std::hex << flit_tmp.data[i].to_uint() << "_";
#ifdef SYSTEMC_INCLUDED
os << std::dec << "@" << sc_time_stamp();
#else
os << std::dec << "@" << "no-timed";
#endif
return os;
}
//#endif
#ifdef SYSTEMC_INCLUDED
// Only for SystemC
inline friend void sc_trace(sc_trace_file* tf, const flit_dnp& flit, const std::string& name) {
sc_trace(tf, flit.type, name + ".type");
//sc_trace(tf, flit.dbg_id, name + ".dbg_id");
for(int i=0; i<PHIT_NUM; ++i)
sc_trace(tf, flit.data[i], name + ".data");
}
#endif
// Matchlib Marshaller requirement
template<unsigned int Size>
void Marshall(Marshaller<Size>& m) {
//m& dbg_id;
m& type;
#pragma hls_unroll yes
//for(int i=0; i<PHIT_NUM; ++i) m& data[i];
for(int i=PHIT_NUM-1; i>=0; --i) m& data[i];
//m& src;
//m& dst;
};
};
struct flit_ack {
sc_uint<2> type;
sc_uint<dnp::S_W> src;
sc_uint<dnp::D_W> dst;
sc_uint<1> rack;
sc_uint<1> wack;
static const int width = 2+dnp::S_W+dnp::D_W+1+1; // Matchlib Marshaller requirement
flit_ack(unsigned type_=0, unsigned src_=0, unsigned dst_=0, bool rack_=0, bool wack_=0) :
type(type_), src(src_), dst(dst_), rack(rack_), wack(wack_)
{};
// helping functions to retrieve flit info (e.g. flit type, source, destination)
inline bool performs_rc() { return ((type == HEAD) || (type == SINGLE)); }
inline bool resets_states() { return (type == TAIL); }
inline bool is_head() {return (type==HEAD);};
inline bool is_tail() {return (type==TAIL);};
inline bool is_body() {return (type==BODY);};
inline bool is_single() {return (type==SINGLE);};
inline sc_uint<dnp::D_W> get_dst() const {return dst;};
inline sc_uint<dnp::S_W> get_src() const {return src;};
inline sc_uint<dnp::T_W> get_type() const {return 0;};
inline bool is_rack() const {return rack;};
inline bool is_wack() const {return wack;};
// Non synthesizable functions. (printing, tracing)
static std::string type_in_str(const flit_ack& flit_in) {
if (flit_in.type==HEAD) return "H";
else if(flit_in.type==BODY) return "B";
else if(flit_in.type==TAIL) return "T";
else if(flit_in.type==SINGLE) return "S";
else return "X";
}
inline friend std::ostream& operator << ( std::ostream& os, const flit_ack& flit_tmp ) {
os << flit_ack::type_in_str(flit_tmp) <<"s" << flit_tmp.get_src() << "d" << flit_tmp.get_dst() << " rack: " << flit_tmp.is_rack() << " wack: " << flit_tmp.is_wack();
#ifdef SYSTEMC_INCLUDED
os << std::dec << "@" << sc_time_stamp();
#else
os << std::dec << "@" << "no-timed";
#endif
return os;
}
#ifdef SYSTEMC_INCLUDED
// Only for SystemC
inline friend void sc_trace(sc_trace_file* tf, const flit_ack& flit, const std::string& name) {
sc_trace(tf, flit.type, name + ".type");
sc_trace(tf, flit.src, name + ".src");
sc_trace(tf, flit.dst, name + ".dst");
sc_trace(tf, flit.rack, name + ".rack");
sc_trace(tf, flit.wack, name + ".wack");
}
#endif
// Matchlib Marshaller requirement
template<unsigned int Size>
void Marshall(Marshaller<Size>& m) {
m& type;
m& src;
m& dst;
m& rack;
m& wack;
};
};
#endif // __FLIT_DNP_H__
|
ic-lab-duth/NoCpad
|
examples/nocpad_2m-2s_2d-mesh_basic-order/ic_top_2d.h
|
#ifndef AXI4_TOP_IC_H
#define AXI4_TOP_IC_H
#pragma once
#include "../../src/axi_master_if.h"
#include "../../src/axi_slave_if.h"
#include "../../src/router_wh.h"
#include "systemc.h"
#include "nvhls_connections.h"
#pragma hls_design top
// Bundle of configuration parameters
template <
unsigned char MASTER_NUM_ , unsigned char SLAVE_NUM_,
unsigned char RD_LANES_ , unsigned char WR_LANES_,
unsigned char RREQ_PHITS_ , unsigned char RRESP_PHITS_,
unsigned char WREQ_PHITS_ , unsigned char WRESP_PHITS_,
unsigned char ORD_SCHEME_
>
struct cfg {
static const unsigned char MASTER_NUM = MASTER_NUM_;
static const unsigned char SLAVE_NUM = SLAVE_NUM_;
static const unsigned char RD_LANES = RD_LANES_;
static const unsigned char WR_LANES = WR_LANES_;
static const unsigned char RREQ_PHITS = RREQ_PHITS_;
static const unsigned char RRESP_PHITS = RRESP_PHITS_;
static const unsigned char WREQ_PHITS = WREQ_PHITS_;
static const unsigned char WRESP_PHITS = WRESP_PHITS_;
static const unsigned char ORD_SCHEME = ORD_SCHEME_;
};
// the used configuration. 2 Masters/Slaves, 64bit AXI, 2.4.4.1 phit flits
typedef cfg<2, 2, 8, 8, 4, 4, 4, 4, 0> smpl_cfg;
SC_MODULE(ic_top) {
public:
// typedef matchlib's axi with the "standard" configuration
typedef typename axi::axi4<axi::cfg::standard_duth> axi4_;
// typedef the 4 kind of flits(RD/WR Req/Resp) depending their size
typedef flit_dnp<smpl_cfg::RREQ_PHITS> rreq_flit_t;
typedef flit_dnp<smpl_cfg::RRESP_PHITS> rresp_flit_t;
typedef flit_dnp<smpl_cfg::WREQ_PHITS> wreq_flit_t;
typedef flit_dnp<smpl_cfg::WRESP_PHITS> wresp_flit_t;
static const unsigned DIM_X = 2;
static const unsigned DIM_Y = 2;
sc_in_clk clk;
sc_in <bool> rst_n;
// IC's Address map
sc_in<sc_uint <32> > addr_map[smpl_cfg::SLAVE_NUM][2]; // [SLAVE_NUM][0:begin, 1: End]
sc_signal< sc_uint<dnp::D_W> > route_lut[2][1];
// The Node IDs are passed to IFs as signals
sc_signal< sc_uint<dnp::S_W> > NODE_IDS_MASTER[smpl_cfg::MASTER_NUM];
sc_signal< sc_uint<dnp::S_W> > NODE_IDS_SLAVE[smpl_cfg::SLAVE_NUM];
sc_signal< sc_uint<dnp::D_W> > rtr_id_x_req[DIM_X];
sc_signal< sc_uint<dnp::D_W> > rtr_id_y_req[DIM_Y];
sc_signal< sc_uint<dnp::D_W> > rtr_id_x_resp[DIM_X];
sc_signal< sc_uint<dnp::D_W> > rtr_id_y_resp[DIM_Y];
// MASTER Side AXI Channels
Connections::In<axi4_::AddrPayload> ar_in[smpl_cfg::MASTER_NUM];
Connections::Out<axi4_::ReadPayload> r_out[smpl_cfg::MASTER_NUM];
Connections::In<axi4_::AddrPayload> aw_in[smpl_cfg::MASTER_NUM];
Connections::In<axi4_::WritePayload> w_in[smpl_cfg::MASTER_NUM];
Connections::Out<axi4_::WRespPayload> b_out[smpl_cfg::MASTER_NUM];
// SLAVE Side AXI Channels
Connections::Out<axi4_::AddrPayload> ar_out[smpl_cfg::SLAVE_NUM];
Connections::In<axi4_::ReadPayload> r_in[smpl_cfg::SLAVE_NUM];
Connections::Out<axi4_::AddrPayload> aw_out[smpl_cfg::SLAVE_NUM];
Connections::Out<axi4_::WritePayload> w_out[smpl_cfg::SLAVE_NUM];
Connections::In<axi4_::WRespPayload> b_in[smpl_cfg::SLAVE_NUM];
//--- Internals ---//
// --- Master/Slave IFs ---
axi_master_if < smpl_cfg > *master_if[smpl_cfg::MASTER_NUM];
axi_slave_if < smpl_cfg > *slave_if[smpl_cfg::SLAVE_NUM];
// Master IF Channels
// Read Req/Resp
Connections::Combinational<rreq_flit_t> chan_rd_m2r[smpl_cfg::MASTER_NUM];
Connections::Combinational<rresp_flit_t> chan_rd_r2m[smpl_cfg::MASTER_NUM];
// Write Req/Resp
Connections::Combinational<wreq_flit_t> chan_wr_m2r[smpl_cfg::MASTER_NUM];
Connections::Combinational<wresp_flit_t> chan_wr_r2m[smpl_cfg::MASTER_NUM];
// Slave IF
// Read Req/Resp
Connections::Combinational<rreq_flit_t> chan_rd_r2s[smpl_cfg::SLAVE_NUM];
Connections::Combinational<rresp_flit_t> chan_rd_s2r[smpl_cfg::SLAVE_NUM];
Connections::Combinational<wreq_flit_t> chan_wr_r2s[smpl_cfg::SLAVE_NUM];
Connections::Combinational<wresp_flit_t> chan_wr_s2r[smpl_cfg::SLAVE_NUM];
// --- NoC Channels ---
// REQ Router + In/Out Channels
router_wh_top< 4+2, 4+2, rreq_flit_t, 5, DIM_X> rtr_req[DIM_X][DIM_Y];
Connections::Combinational<wreq_flit_t> chan_hor_right_req[DIM_X+1][DIM_Y];
Connections::Combinational<wreq_flit_t> chan_hor_left_req[DIM_X+1][DIM_Y];
Connections::Combinational<wreq_flit_t> chan_ver_up_req[DIM_X][DIM_Y+1];
Connections::Combinational<wreq_flit_t> chan_ver_down_req[DIM_X][DIM_Y+1];
Connections::Combinational<wreq_flit_t> chan_inj_wreq[DIM_X][DIM_Y];
Connections::Combinational<wreq_flit_t> chan_inj_rreq[DIM_X][DIM_Y];
Connections::Combinational<wreq_flit_t> chan_ej_wreq[DIM_X][DIM_Y];
Connections::Combinational<wreq_flit_t> chan_ej_rreq[DIM_X][DIM_Y];
// RESP Router + In/Out Channels
router_wh_top< 4+2, 4+2, rresp_flit_t, 5, DIM_X> *rtr_resp[DIM_X][DIM_Y];
Connections::Combinational<rreq_flit_t> chan_hor_right_resp[DIM_X+1][DIM_Y];
Connections::Combinational<rreq_flit_t> chan_hor_left_resp[DIM_X+1][DIM_Y];
Connections::Combinational<rreq_flit_t> chan_ver_up_resp[DIM_X][DIM_Y+1];
Connections::Combinational<rreq_flit_t> chan_ver_down_resp[DIM_X][DIM_Y+1];
Connections::Combinational<rresp_flit_t> chan_inj_wresp[DIM_X][DIM_Y];
Connections::Combinational<rresp_flit_t> chan_inj_rresp[DIM_X][DIM_Y];
Connections::Combinational<rresp_flit_t> chan_ej_wresp[DIM_X][DIM_Y];
Connections::Combinational<rresp_flit_t> chan_ej_rresp[DIM_X][DIM_Y];
SC_CTOR(ic_top) {
route_lut[0][0] = 0;
route_lut[1][0] = 0;
// ----------------- //
// --- SLAVE-IFs --- //
// ----------------- //
for(unsigned char j=0; j<smpl_cfg::SLAVE_NUM; ++j){
NODE_IDS_SLAVE[j] = j;
unsigned col = j % DIM_X; // aka x dim
unsigned row = j / DIM_X; // aka y dim
slave_if[j] = new axi_slave_if < smpl_cfg > (sc_gen_unique_name("Slave-if"));
slave_if[j]->clk(clk);
slave_if[j]->rst_n(rst_n);
slave_if[j]->THIS_ID(NODE_IDS_SLAVE[j]);
slave_if[j]->slave_base_addr(addr_map[j][0]);
// Read-NoC
slave_if[j]->rd_flit_in(chan_ej_rreq[col][row]);
slave_if[j]->rd_flit_out(chan_inj_rresp[col][row]);
// Write-NoC
slave_if[j]->wr_flit_in(chan_ej_wreq[col][row]);
slave_if[j]->wr_flit_out(chan_inj_wresp[col][row]);
// Slave-Side
slave_if[j]->ar_out(ar_out[j]);
slave_if[j]->r_in(r_in[j]);
slave_if[j]->aw_out(aw_out[j]);
slave_if[j]->w_out(w_out[j]);
slave_if[j]->b_in(b_in[j]);
}
// ------------------------------ //
// --- MASTER-IFs Connectivity--- //
// ------------------------------ //
for (int i=0; i<smpl_cfg::MASTER_NUM; ++i) {
NODE_IDS_MASTER[i] = smpl_cfg::SLAVE_NUM + i;
unsigned col = (smpl_cfg::SLAVE_NUM + i) % DIM_X; // aka x dim
unsigned row = (smpl_cfg::SLAVE_NUM + i) / DIM_X; // aka y dim
master_if[i] = new axi_master_if < smpl_cfg > (sc_gen_unique_name("Master-if"));
master_if[i]->clk(clk);
master_if[i]->rst_n(rst_n);
// Pass the address Map
for (int n=0; n<smpl_cfg::SLAVE_NUM; ++n) // Iterate Slaves
for (int s=0; s<2; ++s) // Iterate Begin-End Values
master_if[i]->addr_map[n][s](addr_map[n][s]);
master_if[i]->THIS_ID(NODE_IDS_MASTER[i]);
// Master-AXI-Side
master_if[i]->ar_in(ar_in[i]);
master_if[i]->r_out(r_out[i]);
master_if[i]->aw_in(aw_in[i]);
master_if[i]->w_in(w_in[i]);
master_if[i]->b_out(b_out[i]);
// Read-NoC
master_if[i]->rd_flit_out(chan_inj_rreq[col][row]);
master_if[i]->rd_flit_in(chan_ej_rresp[col][row]);
// Write-NoC
master_if[i]->wr_flit_out(chan_inj_wreq[col][row]);
master_if[i]->wr_flit_in(chan_ej_wresp[col][row]);
}
// -o-o-o-o-o-o-o-o-o- //
// -o-o-o-o-o-o-o-o-o- //
for (int row=0; row<DIM_Y; ++row) rtr_id_y_req[row] = row;
for (int col=0; col<DIM_X; ++col) rtr_id_x_req[col] = col;
// --- NoC Connectivity --- //
// Req/Fwd Routers
for(int row=0; row<DIM_Y; ++row) {
for (int col=0; col<DIM_X; ++col) {
rtr_req[col][row].clk(clk);
rtr_req[col][row].rst_n(rst_n);
rtr_req[col][row].route_lut[0](route_lut[0][0]);
rtr_req[col][row].id_x(rtr_id_x_req[col]);
rtr_req[col][row].id_y(rtr_id_y_req[row]);
rtr_req[col][row].data_in[0](chan_hor_right_req[col][row]);
rtr_req[col][row].data_out[0](chan_hor_left_req[col][row]);
rtr_req[col][row].data_in[1](chan_hor_left_req[col+1][row]);
rtr_req[col][row].data_out[1](chan_hor_right_req[col+1][row]);
rtr_req[col][row].data_in[2](chan_ver_up_req[col][row]);
rtr_req[col][row].data_out[2](chan_ver_down_req[col][row]);
rtr_req[col][row].data_in[3](chan_ver_down_req[col][row+1]);
rtr_req[col][row].data_out[3](chan_ver_up_req[col][row+1]);
rtr_req[col][row].data_in[4](chan_inj_rreq[col][row]);
rtr_req[col][row].data_out[4](chan_ej_rreq[col][row]);
rtr_req[col][row].data_in[5](chan_inj_wreq[col][row]);
rtr_req[col][row].data_out[5](chan_ej_wreq[col][row]);
}
}
for (int row=0; row<DIM_Y; ++row) rtr_id_y_resp[row] = (row);
for (int col=0; col<DIM_X; ++col) rtr_id_x_resp[col] = (col);
// Resp/Bck Router
for(int row=0; row<DIM_Y; ++row) {
for (int col=0; col<DIM_X; ++col) {
rtr_resp[col][row] = new router_wh_top< 4+2, 4+2, rresp_flit_t, 5, DIM_X> (sc_gen_unique_name("Router-resp"));
rtr_resp[col][row]->clk(clk);
rtr_resp[col][row]->rst_n(rst_n);
rtr_resp[col][row]->route_lut[0](route_lut[0][0]);
rtr_resp[col][row]->id_x(rtr_id_x_resp[col]);
rtr_resp[col][row]->id_y(rtr_id_y_resp[row]);
rtr_resp[col][row]->data_in[0](chan_hor_right_resp[col][row]);
rtr_resp[col][row]->data_out[0](chan_hor_left_resp[col][row]);
rtr_resp[col][row]->data_in[1](chan_hor_left_resp[col+1][row]);
rtr_resp[col][row]->data_out[1](chan_hor_right_resp[col+1][row]);
rtr_resp[col][row]->data_in[2](chan_ver_up_resp[col][row]);
rtr_resp[col][row]->data_out[2](chan_ver_down_resp[col][row]);
rtr_resp[col][row]->data_in[3](chan_ver_down_resp[col][row+1]);
rtr_resp[col][row]->data_out[3](chan_ver_up_resp[col][row+1]);
rtr_resp[col][row]->data_in[4](chan_inj_rresp[col][row]);
rtr_resp[col][row]->data_out[4](chan_ej_rresp[col][row]);
rtr_resp[col][row]->data_in[5](chan_inj_wresp[col][row]);
rtr_resp[col][row]->data_out[5](chan_ej_wresp[col][row]);
}
}
}; // End of constructor
private:
}; // End of SC_MODULE
#endif // AXI4_TOP_IC_H
|
ic-lab-duth/NoCpad
|
src/router_wh.h
|
<reponame>ic-lab-duth/NoCpad<filename>src/router_wh.h
#ifndef WH_ROUTER_CON_ST_BUF_H
#define WH_ROUTER_CON_ST_BUF_H
#include "systemc.h"
#include "./include/flit_axi.h"
#include "./include/duth_fun.h"
#include "./include/arbiters.h"
#include "nvhls_connections.h"
// Select In/Out Ports, the type of flit and the Routing Computation calculation function
// For RC_METHOD: 0-> direct rc 1-> lut, 2-> type, ... , 4-> LUT based routing
// IN_NUM : Number of inputs
// OUT_NUM : Number of inputs
// flit_t : The networks flit type
// RC_METHOD : Routing Computation Algotrithm
// - 0 : Direct RC
// - 1 : Constant RC (for mergers)
// - 2 : Packet type RC (for distinct routing of Writes and reads)
// - 3 : For single stage NoCs
// - 4 : LUT based RC
// - 5 : XY routing with merged RD/WR Req-Resp
// DIM_X : X Dimension of a 2-D mesh network. Used in XY routing
// NODES : All possible target nodes of the network. Used in LUT routing
// ARB_C : The arbiter type. Eg MATRIX, ROUND_ROBIN
template<unsigned int IN_NUM, unsigned int OUT_NUM, class flit_t, int RC_METHOD=0, int DIM_X=0, int NODES=1, class ARB_C=arbiter<IN_NUM, MATRIX> >
SC_MODULE(router_wh_top) {
typedef sc_uint< clog2<OUT_NUM>::val > port_w_t;
sc_in_clk clk{"clk"};
sc_in <bool> rst_n{"rst_n"};
// LUT is passed as a signal for LUT based RC. Otherwise Not-Used
sc_in< sc_uint<dnp::D_W> > route_lut[NODES];
// id_x and id_y are the X,Y dimensions of the router in a 2-D mesh network. Otherwise Not-Used
sc_in< sc_uint<dnp::D_W> > id_x{"id_x"};
sc_in< sc_uint<dnp::D_W> > id_y{"id_y"};
// Input channels
Connections::InBuffered <flit_t, 2> data_in[IN_NUM];
// Output channels
Connections::OutBuffered<flit_t, 1> data_out[OUT_NUM];
//Per input
// Each input has a lock bit, meaning the required outport has been locked for this input
bool out_lock[IN_NUM];
// Each input stores its required outport (for body/tail flits)
port_w_t out_port[IN_NUM];
// Per Output
// out available holds the availability of the corresponding output port
bool out_available[OUT_NUM];
ARB_C arbiter[OUT_NUM];
// Constructor
SC_HAS_PROCESS(router_wh_top);
router_wh_top(sc_module_name name_="router_wh_top")
: sc_module(name_)
{
SC_THREAD(router_job);
sensitive << clk.pos();
async_reset_signal_is(rst_n, false);
}
void router_job (){
#pragma hls_unroll yes
per_i_rst:for (unsigned char i=0; i<IN_NUM; ++i) {
data_in[i].Reset();
out_lock[i] = false;
out_port[i] = 0;
}
#pragma hls_unroll yes
per_o_rst:for(unsigned char o=0; o<OUT_NUM; ++o) {
data_out[o].Reset();
out_available[o] = true;
}
// Post Reset
#pragma hls_pipeline_init_interval 1
#pragma pipeline_stall_mode flush
while(1){
wait();
bool fifo_valid[IN_NUM];
flit_t hol_data[IN_NUM];
// The request and grants of the Inputs/Outputs
bool qualified_reqs[OUT_NUM][IN_NUM];
sc_uint<OUT_NUM> req_per_i[IN_NUM];
sc_uint<IN_NUM> req_per_o[OUT_NUM];
sc_uint<IN_NUM> gnt_per_o[OUT_NUM];
sc_uint<OUT_NUM> gnt_per_i[IN_NUM];
bool is_inp_granted[IN_NUM][OUT_NUM];
// Input logic, loops for each input to produce the required requests
#pragma hls_unroll yes
set_inp: for (int ip=0; ip<IN_NUM; ++ip) {
// The input checks for available data to sent.
if(data_in[ip].Empty()) {
fifo_valid[ip] = false;
hol_data[ip] = flit_t();
} else {
fifo_valid[ip] = true;
hol_data[ip] = data_in[ip].Peek();
}
// Depending the Flit type the input selects an output port to request.
// The required output gets stored to be used by the rest of the flits.
port_w_t current_op;
bool is_head_single = hol_data[ip].performs_rc();
if (fifo_valid[ip] && is_head_single) {
// Route Computation Methods
if (RC_METHOD==0) { current_op = do_rc_direct(hol_data[ip].get_dst());} // returns the node ID
else if (RC_METHOD==1) { current_op = do_rc_const();} // returns 0. Used for mergers
else if (RC_METHOD==2) { current_op = do_rc_type(hol_data[ip].get_type());} // Return 0/1 depending the type. used for splitters
else if (RC_METHOD==3) { current_op = do_rc_common(hol_data[ip].get_dst(), hol_data[ip].get_type());}
else if (RC_METHOD==4) { current_op = do_rc_lut(hol_data[ip].get_dst());}
else if (RC_METHOD==5) { current_op = do_rc_xy_merge(hol_data[ip].get_dst(), hol_data[ip].get_type());}
else { NVHLS_ASSERT_MSG(0, "Wrong Routing method selected.");}
out_port[ip] = current_op;
} else {
current_op = out_port[ip];
}
// The required output port must be also Ready and or available.
sc_uint<OUT_NUM> port_req_oh = (1<<current_op); //;wb2oh_case<OUT_NUM>(current_op);// (1<<current_op);
bool ready_outp[OUT_NUM];
#pragma hls_unroll yes
for (int op=0; op<OUT_NUM; ++op) ready_outp[op] = !data_out[op].Full();
bool outp_ready = mux<bool, OUT_NUM>::mux_oh_case(port_req_oh, ready_outp);
bool outp_avail = mux<bool, OUT_NUM>::mux_oh_case(port_req_oh, out_available);
bool all_ok = (fifo_valid[ip] && outp_ready && (out_lock[ip] || (is_head_single && outp_avail)));
req_per_i[ip] = all_ok ? port_req_oh : (sc_uint<OUT_NUM>) 0;
} // End of set_inp
swap_dim< sc_uint<OUT_NUM>, IN_NUM, sc_uint<IN_NUM>, OUT_NUM >( req_per_i, req_per_o );
// Loop each of the outputs, choosing an input to win the output using an arbitration scheme
#pragma hls_unroll yes
per_o:for (unsigned char op=0; op<OUT_NUM; ++op) {
bool any_gnt; // the output has been granted
port_w_t gnt_ip; // Input port that got grant
any_gnt = arbiter[op].arbitrate(req_per_o[op], gnt_per_o[op]);
flit_t selected_flit;
selected_flit = mux<flit_t, IN_NUM>::mux_oh_case(gnt_per_o[op], hol_data);
if(any_gnt) {
data_out[op].Push(selected_flit);
if (selected_flit.is_head()) out_available[op] = false;
else if (selected_flit.is_tail()) out_available[op] = true;
}
} // End per_o
swap_dim< sc_uint<IN_NUM>, OUT_NUM, sc_uint<OUT_NUM>, IN_NUM >( gnt_per_o, gnt_per_i );
// Loop through each input to handle the case of actually winning the output
#pragma hls_unroll yes
popped_i:for (unsigned char ip=0; ip<IN_NUM; ++ip){
if (gnt_per_i[ip].or_reduce()) {
// Update the local lock bit depending the flit type
// Head->locks Tail->unlocks
if (hol_data[ip].is_head()) out_lock[ip] = true;
else if(hol_data[ip].is_tail()) out_lock[ip] = false;
data_in[ip].Pop();
}
}
// Move flits from internal Buffer to Out Port
#pragma hls_unroll yes
for (unsigned char op=0; op<OUT_NUM; ++op)
data_out[op].TransferNB();
// Transfer any flit at input Port to its internal buffer (This is due to InBuffered)
// Move flits from In Port to internal Buffer
#pragma hls_unroll yes
for (unsigned char ip=0; ip<IN_NUM; ++ip) {
data_in[ip].TransferNB();
}
}
}; // End of Router Job
// Direct RC : The Dst Node is the Output port
inline unsigned char do_rc_direct (sc_lv<dnp::D_W> destination) {return destination.to_uint();};
// TYPE RC : Used for splitter/mergers. Routes RD to Out==0, and WR to Out==1
inline unsigned char do_rc_type (sc_lv<dnp::T_W> type) {return (type==dnp::PACK_TYPE__RD_REQ || type==dnp::PACK_TYPE__RD_RESP) ? 0 : 1;};
// Const RC : Used for mergers to always request port #0
inline unsigned char do_rc_const () {return 0;};
// For single stage NoCs
inline unsigned char do_rc_common (sc_lv<dnp::D_W> destination, sc_lv<dnp::T_W> type) {
if (type==dnp::PACK_TYPE__RD_REQ) return destination.to_uint();
else if (type==dnp::PACK_TYPE__RD_RESP) return destination.to_uint()-2;
else if (type==dnp::PACK_TYPE__WR_REQ) return destination.to_uint()+2;
else return destination.to_uint();
};
// LUT Based RC
inline unsigned char do_rc_lut (sc_lv<dnp::D_W> destination) {
return route_lut[destination.to_uint()].read();
};
// XY merged RD/WR
inline unsigned char do_rc_xy_merge (sc_uint<dnp::D_W> destination, sc_uint<dnp::T_W> type) {
sc_uint<dnp::D_W> this_id_x = id_x.read();
sc_uint<dnp::D_W> this_id_y = id_y.read();
sc_uint<dnp::D_W> dst_x = destination % DIM_X;
sc_uint<dnp::D_W> dst_y = destination / DIM_X;
if (dst_x>this_id_x) {
return 1;
} else if (dst_x<this_id_x) {
return 0;
} else {
if (dst_y>this_id_y) {
return 3;
} else if (dst_y<this_id_y) {
return 2;
} else {
if (type==dnp::PACK_TYPE__RD_REQ) return 4;
else if (type==dnp::PACK_TYPE__RD_RESP) return 4;
else if (type==dnp::PACK_TYPE__WR_REQ) return 5;
else return 5;
}
}
};
};
#endif // WH_ROUTER_CON_ST_BUF_H
|
nickaj/cafhash
|
hashobj.c
|
<reponame>nickaj/cafhash
/* Copyright 2014 The University of Edinburgh */
/* Licensed under the Apache License, Version 2.0 (the "License"); */
/* you may not use this file except in compliance with the License. */
/* You may obtain a copy of the License at */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
typedef unsigned long int numb;
/* Should be 64 bit wide, to hold the square of: */
/* If you change this, also change "atol" in main */
#define modulus 1073741827
#define multipl 33554467
typedef numb * obj;
numb N=NHASH; /* Number of objects to hash */
size_t m=8; /* Size of objects in bytes, rounded up to be a multiple of
sizeof(numb) */
/*numb k;*/ /* Number of times each object is expected */
/* Some fast way to create funny data: */
numb val = 1234567;
numb next(void)
{
val = (val * multipl) % modulus;
return val;
}
void resetvalue(int *numi,int *inum)
{
int i,j,nval;
numb dummy;
val = 1234567;
/* nval=N/(*numi);*/
nval=NHASH;
for(j=0;j<(*inum-1)*nval;j++){
for(i=m/sizeof(numb);i>0;i--){
dummy = next();
}
}
}
obj newobj(void)
{
obj o,o2;
int i;
o = malloc(m);
o2 = o;
for (i = m/sizeof(numb); i > 0;i--) *o2++ = next();
/* printf("%d %d\n",o,*o);*/
return o;
}
numb f(obj o, numb *numi)
/* Our hash function, this should do: */
{
numb hashlen = 2*NHASH*(*numi)+1;
numb x = 0;
int i;
for (i = m/sizeof(numb); i > 0; i--){
x += *o++;
}
return x % hashlen;
}
void fnew_(obj o, numb *nb, numb *v, numb *numi){
o = newobj();
/* get the hash */
*v=f(o,numi)+1; /*F style counting! */
*nb=*o;
}
void fresetvalue_(int *numi, int *inum){
resetvalue(numi,inum);
}
void freeobj_(obj o)
{
free(o);
}
|
Gottox/qemuconf
|
qemuconf.c
|
<filename>qemuconf.c
/*
* qemuconf.c
* Copyright (C) 2015 tox <<EMAIL>>
*
* Distributed under terms of the MIT license.
*/
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <limits.h>
#include <string.h>
#include <libgen.h>
#include <ctype.h>
#define DROP(x, y) { for(y = i; i < len && x; i++); }
#define BEGINS(x, y) (strncmp(x, y, strlen(y)) == 0)
static int start();
static int dump();
static int addoptarg(char *arg, int len);
static int addopt(char *opt, int len);
static int compact(char *text, int i, int len, int minindent);
static int parseconfig(char *text, int len);
static int loadconfig(char *path);
char **cargv;
char **curopt = NULL;
char *cwd = ".";
static char *binary = NULL;
int cargc = 1;
int maxargc = 0;
int
start() {
if(chdir(cwd)) {
perror(cwd);
return 1;
}
execvp(binary, cargv);
perror(binary);
return 1;
}
int dump() {
int i;
fputs(cargv[0], stdout);
for(i = 1; i < cargc; i++) {
fputc(' ', stdout);
fputs(cargv[i], stdout);
}
fputc('\n', stdout);
}
int
addoptarg(char *arg, int len) {
arg[len] = '\0';
if(curopt != NULL) {
*curopt = arg;
curopt = NULL;
return 0;
}
cargv[cargc] = arg;
if(++cargc == maxargc) {
fputs("Too many args", stderr);
return 1;
}
cargv[cargc] = NULL;
return 0;
}
int
addopt(char *opt, int len) {
char *optdup;
if(BEGINS(opt, "cwd")) {
curopt = &cwd;
} else if(BEGINS(opt, "binary")) {
curopt = &binary;
} else {
if(!(optdup = calloc(sizeof(char), len + 2))) {
perror("malloc");
return 1;
}
optdup[0] = '-';
memcpy(optdup+1, opt, len);
return addoptarg(optdup, len + 1);
}
return 0;
}
int
compact(char *text, int i, int len, int minindent) {
int _i, indent, curindent, w=i, line = 0;
for(curindent = 0, indent = -1, line = 0; i < len; line++, i++) {
DROP(isspace(text[i]) && text[i] != '\n', _i);
curindent = i - _i;
if(text[i] == '#' || text[i] == '\n') {
DROP(text[i] != '\n', i);
continue;
}
else if(line != 0 && curindent <= minindent) {
break;
}
DROP(isalnum(text[i]), _i);
memmove(&text[w], &text[_i], i - _i);
w += i - _i;
DROP(isspace(text[i]) && text[i] != '\n', _i);
if(_i != i) {
text[w++] = '=';
}
DROP(text[i] != '\n', _i);
memmove(&text[w], &text[_i], i - _i);
w += i - _i;
text[w++] = ',';
curindent = 0;
}
memset(&text[w], ' ', i - curindent - w);
memset(&text[w-1], '\n', line - 1);
text[i-curindent-1] = '\n';
return 0;
}
int
parseconfig(char *text, int len) {
int i = 0, _i, line, linestart, curindent;
for(linestart = i = line = 0; i < len; line++, linestart = ++i) {
DROP(isspace(text[i]) && text[i] != '\n', _i);
curindent = i - _i;
if(i >= len)
break;
if(text[i] == '\n')
continue;
if(text[i] == '#') {
DROP(text[i] != '\n', i);
continue;
}
if(text[i] == '.') {
i++;
DROP(isspace(text[i]), i);
DROP(text[i] != '\n', _i);
text[i] = '\0';
if(loadconfig(&text[_i])) {
fprintf(stderr, "Error at line %i. ", line + 1);
return 1;
}
continue;
}
DROP(isalnum(text[i]) || text[i] == '-', _i);
addopt(&text[_i], i - _i);
DROP(isspace(text[i]) && text[i] != '\n', i);
if(text[i] == '\n')
continue;
else if(text[i] == ':' || i != _i) {
if(text[i] == ':') {
_i = ++i;
}
else {
DROP(isspace(text[i]) && text[i] != '\n', i);
DROP(text[i] != '\n', _i);
}
if(text[i-1] == ':') {
text[i-1] = ',';
if(compact(text, i, len, curindent)) {
fprintf(stderr, "at line %i character %i. ", line + 1, i - linestart);
return 1;
}
DROP(text[i] != '\n', i);
}
addoptarg(&text[_i], i - _i);
}
else if(i == _i) {
fprintf(stderr, "Expected whitespace or ':' instead of '%c' at line %i character %i. ", text[i], line + 1, i - linestart);
return 1;
}
}
return 0;
}
int
loadconfig(char *path) {
int r = 0, len = 0;
char *text = NULL, *wd;
char oldwd[PATH_MAX+1] = { 0 };
FILE *file;
char *pathdup = strdup(path);
if(!(file = fopen(path, "r"))) {
perror(path);
return 1;
}
if(!getcwd(oldwd, sizeof(oldwd))) {
perror("getcwd");
return EXIT_FAILURE;
}
wd = dirname(pathdup);
if(chdir(wd)) {
perror(wd);
return 1;
}
do {
len += r;
text = realloc(text, sizeof(char) * (len + BUFSIZ));
} while((r = fread(text, sizeof(char), (len + BUFSIZ - 1), file)) > 0);
text[len] = 0;
if(ferror(file)) {
perror(path);
return 1;
}
if(parseconfig(text, strlen(text))) {
fprintf(stderr, "At file '%s'\n", path);
return 1;
}
if(chdir(oldwd)) {
perror(oldwd);
return 1;
}
free(pathdup);
return 0;
}
int main(int argc, char *argv[]) {
int opt;
int (*action)() = start;
while ((opt = getopt(argc, argv, "nq:V")) != -1) {
switch(opt) {
case 'q':
binary = optarg;
break;
case 'n':
action = dump;
break;
case 'V':
puts("qemuconf-" VERSION);
return EXIT_SUCCESS;
usage:
default:
printf("Usage: %s [-n] [-q exec] [-V] CONFIGFILE [-- [qemu args...]]\n", argv[0]);
return EXIT_FAILURE;
}
}
if(optind >= argc)
goto usage;
if((maxargc = sysconf(_SC_ARG_MAX)) < 0) {
perror("sysconf");
return EXIT_FAILURE;
}
if(!(cargv = calloc(sizeof(char *), maxargc))) {
perror("calloc");
return EXIT_FAILURE;
}
if(loadconfig(argv[optind++]))
return EXIT_FAILURE;
for(; optind < argc; optind++, cargc++) {
cargv[cargc] = argv[optind];
}
if(!binary)
binary = BINARY;
cargv[0] = binary;
if(action())
return EXIT_FAILURE;
return EXIT_SUCCESS;
}
|
miguelmoraperea/CommandInterpreter
|
src/CommandInterpreter.c
|
<gh_stars>0
/*****************************************************************************
* Module name: CommandInterpreter.c
*
* First written on 2019/05/13 by <NAME>.
*
* Module Description:
* This module contains functions that parse an input line into arguments and
* executes the command if is found in a list of predefined commands.
*
*****************************************************************************/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include "CommandInterpreter.h"
#include "Mocks_Commands.h"
#define TOK_DELIM " \t\r\n\a"
#define MAX_NUM_OF_ARGS 4
static char **args;
static int argsBufferSize = -1;
static int numOfArgs = -1;
static int usedArgs = 0;
static int CommandInt_IsValidCommand(void)
{
for (int i = 0; i < NUM_OF_COMMANDS; ++i)
{
if (strcmp(args[0], commands_list[i].name) == 0)
{
return i;
}
}
return -1;
}
static void *Allocate(int numOfElements, size_t sizeOfElement)
{
void *ptr = (void *)calloc(numOfElements, sizeOfElement);
if (ptr == NULL )
{
printf("Error allocating memory");
exit(1);
}
return ptr;
}
static ci_result_t ParseIntoArgs(char * inputLine)
{
CommandInt_Destroy();
argsBufferSize = MAX_NUM_OF_ARGS;
args = (char **)Allocate(argsBufferSize, sizeof(char *));
char *token = strtok(inputLine, TOK_DELIM);
if (token == NULL )
{
return ERROR;
}
int i = 0;
args[i] = (char *)Allocate((strlen(token) + 1), sizeof(char));
memmove(args[i], token, strlen(token) + 1);
usedArgs++;
i++;
do
{
token = strtok(NULL, TOK_DELIM);
if (token != NULL)
{
if (i >= argsBufferSize - 1)
{
argsBufferSize += MAX_NUM_OF_ARGS;
args = (char **)realloc(args, (unsigned)argsBufferSize * sizeof(char *));
if (args == NULL )
{
return ERROR;
}
}
args[i] = (char *)Allocate((strlen(token) + 1), sizeof(char));
memmove(args[i], token, strlen(token) + 1);
usedArgs++;
i++;
}
} while (token != NULL);
numOfArgs = i - 1;
return SUCCESS;
}
ci_result_t ExecuteCommand(int commandIndex)
{
if (commandIndex < 0 || commandIndex >= NUM_OF_COMMANDS)
{
return ERROR;
}
commands_list[commandIndex].fptr(args, numOfArgs);
return SUCCESS;
}
void CommandInt_Init(void)
{
argsBufferSize = MAX_NUM_OF_ARGS;
args = (char **)Allocate((unsigned)argsBufferSize, sizeof(char *));
}
void CommandInt_Destroy(void)
{
for (int i = 0; i < usedArgs; ++i)
{
free(args[i]);
}
free(args);
numOfArgs = 0;
usedArgs = 0;
}
char **CommandInt_GetArgs(void)
{
return args;
}
ci_result_t CommandInt_Handle(char * inputLine)
{
if(ParseIntoArgs(inputLine) == ERROR)
{
return ERROR;
}
int cIndex = CommandInt_IsValidCommand();
if (cIndex < 0)
{
return ERROR;
}
return ExecuteCommand(cIndex);
}
|
miguelmoraperea/CommandInterpreter
|
mocks/Mocks_Commands.h
|
/*****************************************************************************
* Module name: CommandsList.h
*
* First written on May 16, 2019 by Miguel.
*
* Module Description:
* This module contains the interface of the mock commands.
*
*****************************************************************************/
#ifndef COMMANDSLIST_H_
#define COMMANDSLIST_H_
#define NUM_OF_COMMANDS 2
typedef struct {
char * name;
void (*fptr)(char ** args, int numOfArgs);
} command_t;
extern command_t commands_list[];
void help(char ** args, int numOfArgs);
void version(char ** args, int numOfArgs);
#endif /* COMMANDSLIST_H_ */
|
miguelmoraperea/CommandInterpreter
|
src/main.c
|
<gh_stars>0
/*****************************************************************************
* Module name: main.c
*
* First written on May 23, 2019 by Miguel.
*
*****************************************************************************/
#include <stdio.h>
#include "CommandInterpreter.h"
#define MAX_SIZE_OF_INPUT_LINE 128
int main(void)
{
char userInputLine[MAX_SIZE_OF_INPUT_LINE];
CommandInt_Init();
while(1)
{
printf("> ");
fgets(userInputLine, sizeof(userInputLine), stdin);
CommandInt_Handle(userInputLine);
}
return 0;
}
|
miguelmoraperea/CommandInterpreter
|
inc/CommandInterpreter.h
|
<reponame>miguelmoraperea/CommandInterpreter<filename>inc/CommandInterpreter.h<gh_stars>0
/*****************************************************************************
* Module name: CommandInterpreter.h
*
* First written on 2019/05/13 by <NAME>.
*
* Module Description:
* This is the interface for Command Interpreter which contains functions that
* parse an input line into arguments and executes the command if is found
* in a list of predefined commands.
*
*****************************************************************************/
#ifndef COMMANDINTERPRETER_H_
#define COMMANDINTERPRETER_H_
typedef enum {
ERROR = -1, SUCCESS = 1
} ci_result_t;
void CommandInt_Init(void);
void CommandInt_Destroy(void);
char **CommandInt_GetArgs(void);
ci_result_t CommandInt_Handle(char * inputLine);
#endif /* COMMANDINTERPRETER_H_ */
|
miguelmoraperea/CommandInterpreter
|
mocks/Mocks_Commands.c
|
<gh_stars>0
/*****************************************************************************
* Module name: Mocks_Commands.c
*
* First written on May 23, 2019 by Miguel.
*
* Module Description:
* This module contains a list of mock commands and their implementation.
*
*****************************************************************************/
#include <stdio.h>
#include "Mocks_Commands.h"
#include "Version.h"
#define NUM_OF_COMMANDS 2
command_t commands_list[NUM_OF_COMMANDS] = {
{"help", &help},
{"version", &version}
};
static void printArg(char *arg)
{
for (int i = 0; arg[i] != '\0'; i++)
{
printf("%c", arg[i]);
}
}
static void printAllArgs(char ** args, int numOfArgs)
{
if (numOfArgs > 0)
{
args++; // skip the command and print only the arguments
printf("\nPassed arguments:\n");
while (numOfArgs > 0)
{
printArg(*args);
printf("\n");
args++;
numOfArgs--;
}
}
}
void help(char ** args, int numOfArgs)
{
printf("/***** help *****/\r\n");
printf("Available commands:\r\n");
printf("- version\r\n");
printf("- help\r\n");
printAllArgs(args, numOfArgs);
}
void version(char ** args, int numOfArgs)
{
printf(VERSION);
printAllArgs(args, numOfArgs);
}
|
altafan/secp256k1-zkp
|
lib/main.c
|
#include "stdio.h"
#include "stdlib.h"
#include "string.h"
#include "secp256k1.h"
#include "secp256k1_ecdh.h"
#include "secp256k1_generator.h"
#include "secp256k1_rangeproof.h"
#include "secp256k1_preallocated.h"
#include "secp256k1_surjectionproof.h"
#ifndef SECP256K1_CONTEXT_ALL
#define SECP256K1_CONTEXT_ALL SECP256K1_CONTEXT_NONE | SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY
#endif
int ecdh(unsigned char *output, const unsigned char *pubkey, const unsigned char *scalar)
{
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_ALL);
secp256k1_pubkey point;
if (!secp256k1_ec_pubkey_parse(ctx, &point, pubkey, 33))
return 0;
int ret = secp256k1_ecdh(ctx, output, &point, scalar, NULL, NULL);
secp256k1_context_destroy(ctx);
return ret;
}
int generator_generate_blinded(unsigned char *gen_data, const unsigned char *key32, const unsigned char *blind32)
{
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_ALL);
secp256k1_generator gen;
memcpy(&(gen.data), gen_data, 64);
int ret = secp256k1_generator_generate_blinded(ctx, &gen, key32, blind32);
if (ret == 1)
{
memcpy(gen_data, gen.data, 64);
}
secp256k1_context_destroy(ctx);
return ret;
}
int generator_parse(unsigned char *gen_data, const unsigned char *input)
{
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_ALL);
secp256k1_generator gen;
memcpy(&(gen.data), gen_data, 64);
int ret = secp256k1_generator_parse(ctx, &gen, input);
if (ret == 1)
{
memcpy(gen_data, &(gen.data), 64);
}
secp256k1_context_destroy(ctx);
return ret;
}
int generator_serialize(unsigned char *output, const unsigned char *gen_data)
{
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_ALL);
secp256k1_generator gen;
memcpy(&(gen.data), gen_data, 64);
int ret = secp256k1_generator_serialize(ctx, output, &gen);
secp256k1_context_destroy(ctx);
return ret;
}
int pedersen_blind_generator_blind_sum(const uint64_t *values, const unsigned char* const *generator_blinds, unsigned char **blind_factors, size_t n_total, size_t n_inputs, unsigned char * bytes_out)
{
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_ALL);
blind_factors[n_total - 1] = bytes_out;
int ret = secp256k1_pedersen_blind_generator_blind_sum(ctx, values, generator_blinds, (unsigned char *const *)blind_factors, n_total, n_inputs);
secp256k1_context_destroy(ctx);
return ret;
}
int pedersen_commitment_parse(unsigned char *commit_data, const unsigned char *input)
{
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_ALL);
secp256k1_pedersen_commitment commit;
memcpy(&(commit.data), commit_data, 64);
int ret = secp256k1_pedersen_commitment_parse(ctx, &commit, input);
memcpy(commit_data, &(commit.data), 64);
secp256k1_context_destroy(ctx);
return ret;
}
int pedersen_commitment_serialize(unsigned char *output, unsigned char *commit_data)
{
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_ALL);
secp256k1_pedersen_commitment commit;
memcpy(&(commit.data), commit_data, 64);
int ret = secp256k1_pedersen_commitment_serialize(ctx, output, &commit);
secp256k1_context_destroy(ctx);
return ret;
}
int pedersen_commit(unsigned char *commit_data, const unsigned char *blind, uint64_t value, const unsigned char *generator_data)
{
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_ALL);
secp256k1_pedersen_commitment commit;
memcpy(&(commit.data), commit_data, 64);
secp256k1_generator gen;
memcpy(&(gen.data), generator_data, 64);
int ret = secp256k1_pedersen_commit(ctx, &commit, blind, value, &gen);
memcpy(commit_data, &(commit.data), 64);
secp256k1_context_destroy(ctx);
return ret;
}
int pedersen_blind_sum(unsigned char *sum, const unsigned char *const *blinds, size_t n, size_t npos)
{
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_ALL);
int ret = secp256k1_pedersen_blind_sum(ctx, sum, blinds, n, npos);
secp256k1_context_destroy(ctx);
return ret;
}
int pedersen_verify_tally(const unsigned char *const *commits_data, size_t n_commits, const unsigned char *const *negcommits_data, size_t n_negcommits)
{
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_ALL);
secp256k1_pedersen_commitment commits[n_commits];
secp256k1_pedersen_commitment negcommits[n_negcommits];
secp256k1_pedersen_commitment *p_commits[n_commits];
secp256k1_pedersen_commitment *p_negcommits[n_negcommits];
for (int i = 0; i < (int)n_commits; ++i)
{
memcpy(&(commits[i].data), commits_data[i], 64);
p_commits[i] = &commits[i];
}
for (int i = 0; i < (int)n_negcommits; ++i)
{
memcpy(&(negcommits[i].data), negcommits_data[i], 64);
p_negcommits[i] = &negcommits[i];
}
int ret = secp256k1_pedersen_verify_tally(ctx, (const secp256k1_pedersen_commitment * const*)p_commits, n_commits, (const secp256k1_pedersen_commitment * const*)p_negcommits, n_negcommits);
secp256k1_context_destroy(ctx);
return ret;
}
int rangeproof_sign(unsigned char *proof, size_t *plen, uint64_t min_value, const unsigned char *commit_data, const unsigned char *blind, const unsigned char *nonce, int exp, int min_bits, uint64_t value, const unsigned char *message, size_t msg_len, const unsigned char *extra_commit, size_t extra_commit_len, const unsigned char *generator_data)
{
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_ALL);
secp256k1_pedersen_commitment commit;
memcpy(&(commit.data), commit_data, 64);
secp256k1_generator gen;
memcpy(&(gen.data), generator_data, 64);
int ret = secp256k1_rangeproof_sign(ctx, proof, plen, min_value, &commit, blind, nonce, exp, min_bits, value, message, msg_len, extra_commit, extra_commit_len, &gen);
secp256k1_context_destroy(ctx);
return ret;
}
int rangeproof_info(int *exp, int *mantissa, uint64_t *min_value, uint64_t *max_value, const unsigned char *proof, size_t plen)
{
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_ALL);
int ret = secp256k1_rangeproof_info(ctx, exp, mantissa, min_value, max_value, proof, plen);
secp256k1_context_destroy(ctx);
return ret;
}
int rangeproof_verify(uint64_t *min_value, uint64_t *max_value, const unsigned char *commit_data, const unsigned char *proof, size_t plen, const unsigned char *extra_commit, size_t extra_commit_len, const unsigned char *generator_data)
{
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_ALL);
secp256k1_pedersen_commitment commit;
memcpy(&(commit.data), commit_data, 64);
secp256k1_generator gen;
memcpy(&(gen.data), generator_data, 64);
int ret = secp256k1_rangeproof_verify(ctx, min_value, max_value, &commit, proof, plen, extra_commit, extra_commit_len, &gen);
secp256k1_context_destroy(ctx);
return ret;
}
int rangeproof_rewind(unsigned char *blind_out, uint64_t *value_out, unsigned char *message_out, size_t *outlen, const unsigned char *nonce, uint64_t *min_value, uint64_t *max_value, const unsigned char *commit_data, const unsigned char *proof, size_t plen, const unsigned char *extra_commit, size_t extra_commit_len, const unsigned char *generator_data)
{
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_ALL);
secp256k1_pedersen_commitment commit;
secp256k1_generator gen;
memcpy(&(gen.data), generator_data, 64);
memcpy(&(commit.data), commit_data, 64);
int ret = secp256k1_rangeproof_rewind(ctx, blind_out, value_out, message_out, outlen, nonce, min_value, max_value, &commit, proof, plen, extra_commit, extra_commit_len, &gen);
secp256k1_context_destroy(ctx);
return ret;
}
int surjectionproof_parse(size_t *n_inputs, unsigned char *used_inputs, unsigned char *data, const unsigned char *input, size_t inputlen) {
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_ALL);
secp256k1_surjectionproof proof;
memcpy(&(proof.n_inputs), n_inputs, sizeof(proof.n_inputs));
memcpy(&(proof.used_inputs), used_inputs, 32);
memcpy(&(proof.data), data, 8224);
int ret = secp256k1_surjectionproof_parse(ctx, &proof, input, inputlen);
secp256k1_context_destroy(ctx);
return ret;
}
int surjectionproof_serialize(unsigned char *output, size_t *outputlen, size_t *n_inputs, const unsigned char *used_inputs, const unsigned char *data) {
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
secp256k1_surjectionproof proof;
memcpy(&(proof.n_inputs), n_inputs, sizeof(proof.n_inputs));
memcpy(&(proof.used_inputs), used_inputs, 32);
memcpy(&(proof.data), data, 8224);
int ret = secp256k1_surjectionproof_serialize(ctx, output, outputlen, &proof);
secp256k1_context_destroy(ctx);
return ret;
}
int surjectionproof_initialize(size_t *n_inputs, unsigned char *used_inputs, unsigned char *data, size_t *input_index, const unsigned char * const *input_tags_data, const size_t n_input_tags, const size_t n_input_tags_to_use, const unsigned char *output_tag_data, const size_t n_max_iterations, const unsigned char *random_seed32) {
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_ALL);
secp256k1_surjectionproof proof;
secp256k1_fixed_asset_tag input_tags[n_input_tags];
for (int i = 0; i < (int)n_input_tags; ++i) {
memcpy(&(input_tags[i].data), input_tags_data[i], 32);
}
secp256k1_fixed_asset_tag output_tag;
memcpy(&(output_tag.data), output_tag_data, 32);
int ret = secp256k1_surjectionproof_initialize(ctx, &proof, input_index, input_tags, n_input_tags, n_input_tags_to_use, &output_tag, n_max_iterations, random_seed32);
if (ret > 0) {
memcpy(n_inputs, &(proof.n_inputs), sizeof(proof.n_inputs));
memcpy(used_inputs, &(proof.used_inputs), 32);
memcpy(data, &(proof.data), 8224);
}
secp256k1_context_destroy(ctx);
return ret;
}
int surjectionproof_generate(size_t *n_inputs, unsigned char *used_inputs, unsigned char *data, const unsigned char * const *ephemeral_input_tags_data, const size_t n_ephemeral_input_tags, const unsigned char *ephemeral_output_tag_data, size_t input_index, const unsigned char *input_blinding_key, const unsigned char *output_blinding_key) {
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_ALL);
secp256k1_surjectionproof proof;
memcpy(&(proof.n_inputs), n_inputs, sizeof(proof.n_inputs));
memcpy(&(proof.used_inputs), used_inputs, 32);
memcpy(&(proof.data), data, 8224);
secp256k1_generator ephemeral_input_tags[n_ephemeral_input_tags];
for (int i = 0; i < (int)n_ephemeral_input_tags; ++i) {
memcpy(&(ephemeral_input_tags[i].data), ephemeral_input_tags_data[i], 64);
}
secp256k1_generator ephemeral_output_tag;
memcpy(&(ephemeral_output_tag.data), ephemeral_output_tag_data, 64);
int ret = secp256k1_surjectionproof_generate(ctx, &proof, ephemeral_input_tags, n_ephemeral_input_tags, &ephemeral_output_tag, input_index, input_blinding_key, output_blinding_key);
if (ret == 1) {
memcpy(n_inputs, &(proof.n_inputs), sizeof(proof.n_inputs));
memcpy(used_inputs, &(proof.used_inputs), 32);
memcpy(data, &(proof.data), 8224);
}
secp256k1_context_destroy(ctx);
return ret;
}
int surjectionproof_verify(const size_t *n_inputs, const unsigned char *used_inputs, const unsigned char *data, const unsigned char * const *ephemeral_input_tags_data, const size_t n_ephemeral_input_tags, const unsigned char *ephemeral_output_tag_data) {
secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
secp256k1_surjectionproof proof;
memcpy(&(proof.n_inputs), n_inputs, sizeof(proof.n_inputs));
memcpy(&(proof.used_inputs), used_inputs, 32);
memcpy(&(proof.data), data, 8224);
secp256k1_generator ephimeral_input_tags[n_ephemeral_input_tags];
for (int i = 0; i < (int)n_ephemeral_input_tags; ++i) {
memcpy(&(ephimeral_input_tags[i].data), ephemeral_input_tags_data[i], 64);
}
secp256k1_generator ephimeral_output_tag;
memcpy(&(ephimeral_output_tag.data), ephemeral_output_tag_data, 64);
int ret = secp256k1_surjectionproof_verify(ctx, &proof, ephimeral_input_tags, n_ephemeral_input_tags, &ephimeral_output_tag);
secp256k1_context_destroy(ctx);
return ret;
}
|
izabala123/Nemoh
|
Nemoh_c/date.c
|
<reponame>izabala123/Nemoh<gh_stars>10-100
#include <time.h>
#include <stdio.h>
#ifdef __GNUC__
void printcreditsc_(char *str) {
#else
void PRINTCREDITSC(char *str) {
#endif // GNU_C
char *end = str;
while (*end != '.')
end++;
*end = '\0';
printf("NEMOH V1.0 - January 2014. Copyright 2014 Ecole Centrale de Nantes");
printf("\nNemoh Mercurial v115 compiled by the BEMRosetta project. %s", str);
}
time_t t0;
int total;
#ifdef __GNUC__
void progressinit_(char *str) {
#else
void PROGRESSINIT() {
#endif
t0 = time(NULL);
struct tm tm = *localtime(&t0);
printf("\n%d/%02d/%02d %02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min);
}
#ifdef __GNUC__
void progresstotal_(float val) {
#else
void PROGRESSTOTAL(float val) {
#endif
total = (int)val;
}
#ifdef __GNUC__
void progress_(float val) {
#else
void PROGRESS(float val) {
#endif
if ((int)val >= total) {
time_t t = time(NULL);
double diff_t = difftime(t, t0);
int hours = (int)(diff_t/(60*60));
diff_t -= hours*(60*60);
int mins = (int)(diff_t/60);
diff_t -= mins*60;
printf("\nTotal elapsed time: %d:%02d", hours, mins);
} else {
time_t t = time(NULL);
double diff_t = difftime(t, t0);
double est_t = diff_t*total/val;
t = (time_t)(t0 + est_t);
struct tm tm = *localtime(&t);
printf(". Done !. ET: %d/%02d/%02d %02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min);
}
}
|
gorkaerana/tuplex
|
tuplex/core/include/DataSet.h
|
//--------------------------------------------------------------------------------------------------------------------//
// //
// Tuplex: Blazing Fast Python Data Science //
// //
// //
// (c) 2017 - 2021, Tuplex team //
// Created by <NAME> on 1/1/2021 //
// License: Apache 2.0 //
//--------------------------------------------------------------------------------------------------------------------//
#ifndef TUPLEX_DATASET_H
#define TUPLEX_DATASET_H
#include <memory>
#include "Schema.h"
#include "UDF.h"
#include "Partition.h"
#include "Row.h"
#include "Context.h"
#include <ExceptionCodes.h>
#include "Defs.h"
#include <limits>
namespace tuplex {
class DataSet;
class Context;
class LogicalOperator;
class ResultSet;
class Partition;
class CacheOperator;
inline std::unordered_map<std::string, std::string> defaultCSVOutputOptions() {
std::unordered_map<std::string, std::string> m;
m["header"] = "true"; // write header...
m["null_value"] = ""; // empty string
m["delimiter"] = ",";
m["quotechar"] = "\"";
return m;
}
/*!
* default output options for Orc file format
* @return Key-value string map of options
*/
inline std::unordered_map<std::string, std::string> defaultORCOutputOptions() {
std::unordered_map<std::string, std::string> m;
return m;
}
// maybe CRTP (Curiously recurring template pattern may be used here)
// but likely it is going to be difficult
// since the structure is already quite complicated
class DataSet {
friend class Context;
friend class CacheOperator; // is allowed to change the schema
protected:
int _id;
Schema _schema;
Context *_context;
LogicalOperator *_operator;
// one or more (materialized) partitions belong to a dataset
// they are used to store the data
// this vector orders the partitions
// some of them may also be error partitions (later feature, right now simple & straight processing)
// if a DataSet has zero partitions, that simply means it has not been yet materialized or executed.
std::vector<Partition *> _partitions;
bool _cached; // indicates whether all partitions are in main memory
std::vector<std::string> _columnNames;
void setSchema(const Schema& schema) { _schema = schema; }
bool allowTypeUnification() const;
public:
DataSet() : _id(-1),
_schema(Schema::UNKNOWN),
_context(nullptr),
_operator(nullptr) {}
DataSet(Context &context) : _id(-1),
_schema(Schema::UNKNOWN),
_context(&context),
_operator(nullptr) {}
virtual ~DataSet();
// NOTE: When defining new functions here, make sure to override them in ErrorDataSet!
/*!
* add a map operation T -> S to the logical graph
* @param udf UDF to apply which yields the trafo T -> S ultimately
* @return DataSet after the map operation
*/
virtual DataSet &map(const UDF &udf);
/*!
* add a filter operation with a UDF T -> bool to the logical graph.
* Tuples for which the UDF returns true are kept, the others discarded.
* @param udf UDF which returns bool
* @return DataSet after filter operation
*/
virtual DataSet &filter(const UDF &udf);
/*!
* add a resolve operation and apply it to all tuples with exception code ec.
* @param ec Apply UDF to tuples which resulted in an exception code of type ec
* @param udf UDF to apply. Type needs to be input of parent and output of parent logical node.
* @return DataSet after error resolution.
*/
virtual DataSet &resolve(const ExceptionCode &ec, const UDF &udf);
/*!
* ignore the following exception from the operator before
* @param ec
* @return
*/
virtual DataSet &ignore(const ExceptionCode &ec);
/*!
* action that displays tuples as nicely formatted table
* @param numRows how many rows to print, i.e. top numRows are printed.xs
* @param os ostream where to print table to
*/
virtual void show(const int64_t numRows = -1, std::ostream &os = std::cout);
// named dataset management functions
/*!
* map Column using a UDF
* @param columnName column name to map
* @param udf UDF to execute
* @return Dataset
*/
virtual DataSet &mapColumn(const std::string &columnName, const UDF &udf);
/*!
* selects a subset of columns from dataset
* @param columnNames
* @return Dataset
*/
virtual DataSet &selectColumns(const std::vector<std::string> &columnNames);
/*!
* selects a subset of columns from dataset using integer indices.
* @param columnIndices
* @return Dataset or Errordataset
*/
virtual DataSet &selectColumns(const std::vector<size_t> &columnIndices);
/*!
* rename column in dataframe, string based. throws error if oldColumnName doesn't exist.
* @param oldColumnName
* @param newColumnName
* @return Dataset or Errordataset
*/
virtual DataSet &renameColumn(const std::string &oldColumnName, const std::string &newColumnName);
/*!
* rename column based on position in dataframe. throws error if invalid index is supplied.
* @param index position, 0 <= index < #columns
* @param newColumnName new column name
* @return Dataset or Errordataset
*/
virtual DataSet &renameColumn(int index, const std::string& newColumnName);
/*!
* add a new column to dataset, whose result is defined through the given udf
* @param columnName
* @param udf
* @return
*/
virtual DataSet &withColumn(const std::string &columnName, const UDF &udf);
/*!
* performs unique aggregate (i.e. can be also used for duplicate removal)
* @return Dataset
*/
virtual DataSet& unique();
/*!
* aggregate function
* @param aggCombine function has signature lambda a, b: ... and needs to yield aggInitial type
* @param aggUDF function has signature lambda a, x: ... where a is the aggregate type and x is a row
* @param aggInitial initial value of the aggregate and with what to initialize it.
* @return DataSet
*/
virtual DataSet& aggregate(const UDF& aggCombine, const UDF& aggUDF, const Row& aggInitial);
/*!
* aggregate by key function
* @param aggCombine function has signature lambda a, b: ... and needs to yield aggInitial type
* @param aggUDF function has signature lambda a, x: ... where a is the aggregate type and x is a row
* @param aggInitial initial value of the aggregate and with what to initialize it.
* @param keyColumns set of columns to group by when aggregating
* @return DataSet
*/
virtual DataSet& aggregateByKey(const UDF& aggCombine, const UDF& aggUDF, const Row& aggInitial, const std::vector<std::string> &keyColumns);
/*!
* return column names of dataset
* @return
*/
std::vector<std::string> columns() const { return _columnNames; }
/*!
* get the normal case outputschema of the underlying operator
*/
Schema schema() const;
/*!
* How many columns dataset has (at least 1)
* @return number of columns
*/
size_t numColumns() const;
/*!
* join dataset with other dataset, either based on (K, V), (K, W) layout or via column names(equijoin)
* @param other
* @param leftColumn
* @param rightColumn
* @return DataSet
*/
virtual DataSet &join(const DataSet &other, option<std::string> leftColumn, option<std::string> rightColumn,
option<std::string> leftPrefix = std::string(),
option<std::string> leftSuffix = std::string(),
option<std::string> rightPrefix = std::string(),
option<std::string> rightSuffix = std::string());
/*!
* join dataset with other dataset, either based on (K, V), (K, W) layout or via column names(left outer join,
* i.e. all rows of the left dataset will be in the final result. NULL values will be filled in if there is no match for the right column)
* @param other
* @param leftColumn
* @param rightColumn
* @return DataSet
*/
virtual DataSet &leftJoin(const DataSet &other, option<std::string> leftColumn, option<std::string> rightColumn,
option<std::string> leftPrefix = std::string(),
option<std::string> leftSuffix = std::string(),
option<std::string> rightPrefix = std::string(),
option<std::string> rightSuffix = std::string());
/*!
* materializes stage in main-memory. Can be used to reuse partitions e.g.
* @param memoryLayout
* @return
*/
virtual DataSet& cache(const Schema::MemoryLayout& memoryLayout, bool storeSpecialized);
DataSet& cache(bool storeSpecialized=true) { return cache(Schema::MemoryLayout::ROW, storeSpecialized); }
/*!
* helper setter without checks, to update internal column names.
*/
void setColumns(const std::vector<std::string> &columnNames) { _columnNames = columnNames; }
// these are actions that cause execution
virtual std::shared_ptr<ResultSet> collect(std::ostream &os = std::cout);
virtual std::shared_ptr<ResultSet> take(int64_t numElements, std::ostream &os = std::cout);
virtual std::vector<Row> collectAsVector(std::ostream &os = std::cout);
virtual std::vector<Row> takeAsVector(int64_t numElements, std::ostream &os = std::cout);
/*!
* saves dataset to file. There are multiple options to control the behavior
* ==> 1.) files can be split across multiple ones. Specify number of files to split rows to
* ==> 2.) files can be split to max size each (sharding), specify shard size
* ==> 3.) URI can be one uri and tuplex auto creates numbering scheme, users may specify a naming function.
* @param fmt Output file format of files
* @param uri URI of the file (if tuplex should save to multiple files, then this will create a folder, where tuplex places part files.
* @param udf A udf to name the parts, i.e. will be called with integer for part number and should return string on where to store the file. If empty, this is ignored.
* @param fileCount number of files to split to. If 0, this is deactivated
* @param shardSize shardSize in bytes, if set to 0 not active and Tuplex defaults to splitting files after tasks.
* @param limit max number of rows to output.
* @param os
*/
virtual void tofile(FileFormat fmt,
const URI &uri,
const UDF &udf,
size_t fileCount,
size_t shardSize,
const std::unordered_map<std::string, std::string> &outputOptions,
size_t limit = std::numeric_limits<size_t>::max(),
std::ostream &os = std::cout);
/*!
* saves dataset as a csv file.
* @param uri URI of the file (if tuplex should save to multiple files, then this will create a folder, where tuplex places part files.
* @param outputOptions Options for writing the csv file.
* @param os
*/
void tocsv(const URI &uri,
const std::unordered_map<std::string, std::string> &outputOptions = defaultCSVOutputOptions(),
std::ostream &os = std::cout) {
// empty udf...
tofile(FileFormat::OUTFMT_CSV, uri, UDF(""), 0, 0, outputOptions, std::numeric_limits<size_t>::max(),
os);
}
/*!
* saves dataset as an orc file.
* supported options:
* - "columnNames" -> column names as csv string.
* @param uri URI of the file (if tuplex should save to multiple files, then this will create a folder, where tuplex places part files.
* @param outputOptions Options for writing the orc file.
* @param os
*/
void toorc(const URI &uri,
const std::unordered_map<std::string, std::string> &outputOptions = defaultORCOutputOptions(),
std::ostream &os = std::cout) {
#ifndef BUILD_WITH_ORC
throw std::runtime_error(MISSING_ORC_MESSAGE);
#endif
tofile(FileFormat::OUTFMT_ORC, uri, UDF(""), 0, 0, outputOptions, std::numeric_limits<size_t>::max(),
os);
}
// some handy functions to complete the API:
// --> input/output types
// --> exceptions: I.e. somehow it should be possible to retrieve the exception rows + types?
bool cached() const { return _cached; }
virtual int getID() const { return _id; }
std::vector<Partition *> &getPartitions() { return _partitions; }
Context *getContext() const { return _context; }
LogicalOperator* getOperator() const { return _operator; }
virtual bool isError() const { return false; }
virtual bool isEmpty() const;
};
}
#endif //TUPLEX_DATASET_H
|
gorkaerana/tuplex
|
tuplex/codegen/include/IFailable.h
|
//--------------------------------------------------------------------------------------------------------------------//
// //
// Tuplex: Blazing Fast Python Data Science //
// //
// //
// (c) 2017 - 2021, Tuplex team //
// Created by <NAME> on 1/1/2021 //
// License: Apache 2.0 //
//--------------------------------------------------------------------------------------------------------------------//
#ifndef TUPLEX_IFAILABLE_H
#define TUPLEX_IFAILABLE_H
#include <Base.h>
#include <Logger.h>
/*!
* error handling for unsupported language features (i.e. valid python UDF codes but not supported yet in Tuplex)
*/
enum class CompileError {
COMPILE_ERROR_NONE,
TYPE_ERROR_LIST_OF_LISTS,
TYPE_ERROR_RETURN_LIST_OF_TUPLES,
TYPE_ERROR_RETURN_LIST_OF_DICTS,
TYPE_ERROR_RETURN_LIST_OF_LISTS,
TYPE_ERROR_RETURN_LIST_OF_MULTITYPES,
TYPE_ERROR_LIST_OF_MULTITYPES,
TYPE_ERROR_ITER_CALL_WITH_NONHOMOGENEOUS_TUPLE,
TYPE_ERROR_ITER_CALL_WITH_DICTIONARY,
TYPE_ERROR_RETURN_ITERATOR,
TYPE_ERROR_NEXT_CALL_DIFFERENT_DEFAULT_TYPE,
TYPE_ERROR_MIXED_ASTNODETYPE_IN_FOR_LOOP_EXPRLIST, // exprlist contains a mix of tuple/list of identifiers and single identifier
TYPE_ERROR_INCOMPATIBLE_TYPES_FOR_IS_COMPARISON, // incompatible types for `is` comparison (one of the types is not BOOLEAN/NULLVALUE).
};
/*!
* helper interface/trait especially useful for visitors that may or may not fail
* when executed. Provides a silent and an explicit mode for logging errors/warnings/etc.
*/
class IFailable {
private:
bool _succeeded;
bool _silentMode; // don't issue warnings
std::vector<std::tuple<std::string, std::string>> _messages; //! stores messages in silent mode
std::vector<CompileError> _compileErrors;
protected:
/*!
* logs an error. this will automatically set the status to failure
* @param message
* @param logger optional logger to specify
*/
virtual void error(const std::string& message, const std::string& logger="");
virtual void fatal_error(const std::string& message, const std::string& logger="") {
error(message, logger);
throw std::runtime_error(message);
}
void reset() {
_succeeded = true;
_messages.clear();
_compileErrors.clear();
}
/*!
* add all CompileErrors in err to _compileErrors
* @param err
*/
void addCompileErrors(const std::vector<CompileError> &err) {_compileErrors.insert(_compileErrors.begin(), err.begin(), err.end());}
/*!
* add single CompileError to _compileErrors
* @param err
*/
void addCompileError(const CompileError& err) {_compileErrors.push_back(err);}
public:
IFailable(bool silentMode=false) : _succeeded(true), _silentMode(silentMode) {}
bool failed() const { return !_succeeded;}
bool succeeded() const { return _succeeded; }
void setFailingMode(bool silentMode) { _silentMode = silentMode; }
/*!
* if operated in silent mode, this allows to log out all messages (deletes them from internal buffer)
*/
void logMessages();
std::vector<std::tuple<std::string, std::string>> getErrorMessages() const { return _messages; }
/*!
* return all type errors (errors generated from unsupported types) encountered for the current class instance.
* @return
*/
std::vector<CompileError> getCompileErrors() {return _compileErrors;}
/*!
* return CompileError of returning list of lists/tuples/dicts/multi-types. If no such error exists, return COMPILE_ERROR_NONE.
* @return
*/
CompileError getReturnError();
/*!
* clear all compile errors (errors generated from unsupported language features) for the current class instance.
*/
void clearCompileErrors() {_compileErrors.clear();}
/*!
* return detailed error message of a CompileError.
* @param err
* @return
*/
std::string compileErrorToStr(const CompileError& err);
};
#endif //TUPLEX_IFAILABLE_H
|
zethon/ttvg
|
src/DelayedSound.h
|
<filename>src/DelayedSound.h
#pragma once
#include <memory>
#include <SFML/Audio.hpp>
#include "ResourceManager.h"
namespace tt
{
class DelayedSound;
using DelayedSoundPtr = std::unique_ptr<DelayedSound>;
class DelayedSound
{
public:
static DelayedSoundPtr create(const std::string& name,
float delay,
ResourceManager& resources);
DelayedSound(const sf::SoundBuffer& buffer);
DelayedSound() = default;
float delay() const { return _delay; }
void setDelay(float v) { _delay = v; }
void setVolume(float v) { _thesound.setVolume(v); }
void play();
private:
sf::Sound _thesound;
sf::Clock _clock;
float _delay = 0.f;
};
} // namesapce tt
|
zethon/ttvg
|
src/Player.h
|
#pragma once
#include <boost/signals2.hpp>
#include "AnimatedSprite.h"
#include "Item.h"
namespace tt
{
class Player;
using PlayerPtr = std::shared_ptr<Player>;
class Player : public AnimatedSprite
{
public:
static constexpr auto CLASS_NAME = "Player";
static const struct luaL_Reg LuaMethods[];
using AnimatedSprite::AnimatedSprite;
sf::Vector2f getGlobalCenter() const;
float getGlobalLeft() const;
float getGlobalRight() const;
float getGlobalTop() const;
float getGlobalBottom() const;
void setGlobalLeft(float left);
void setGlobalRight(float right);
void setGlobalTop(float top);
void setGlobalBottom(float bottom);
void addItem(ItemPtr item);
bool hasItem(const std::string& s);
bool hasItem(ItemPtr item);
void removeItem(const std::string& s);
void removeItem(ItemPtr item);
ItemPtr getItemByName(const std::string& name);
const std::vector<ItemPtr>& getInventory() const;
std::uint32_t health() const { return _health; }
void setHealth(std::int32_t h);
void reduceHealth(std::uint32_t amount);
void increaseHealth(std::uint32_t amount);
boost::signals2::signal<void(std::uint32_t health)> onSetHealth;
float balance() const { return _cash; }
void setBalance(float c);
boost::signals2::signal<void(float cash)> onSetCash;
private:
std::vector<ItemPtr> _inventory;
std::uint32_t _health = 100;
float _cash = 40.0f;
};
} // namespace tt
|
zethon/ttvg
|
src/ResourceManager.h
|
#pragma once
#include <optional>
#include <iostream>
#include <boost/filesystem.hpp>
#include <nlohmann/json.hpp>
#include <SFML/Graphics/Font.hpp>
#include <SFML/Audio.hpp>
namespace nl = nlohmann;
namespace tt
{
class ResourceManager
{
using TextureCache = std::map<std::string, sf::Texture>;
using SoundCache = std::map<std::string, sf::SoundBuffer>;
boost::filesystem::path _resourceFolder;
TextureCache _textcache;
SoundCache _soundcache;
public:
explicit ResourceManager(const boost::filesystem::path& path);
/// \brief Loads a texture into the cache
///
/// \param name The relative path of the texture from the
/// resource folder (e.g. "items/sax.png").
///
/// \ see getTexture
///
/// \return Pointer to the object in the container, or null
///
sf::Texture* cacheTexture(const std::string& name);
/// \brief Returns a pointer to the texture
///
/// \param name The relative path of the texture
/// (e.g. "items/sax.png").
///
/// \ see cacheTexture
///
/// \return A pointer to the texture or NULL
///
sf::Texture* getTexture(const std::string& name);
void clearTextureCache() { _textcache.clear(); }
sf::SoundBuffer* cacheSound(const std::string& name);
sf::SoundBuffer* getSound(const std::string& name);
void clearSoundCache() { _soundcache.clear(); }
void clearCaches();
template<typename T>
std::optional<T> load(const std::string& name)
{
auto filepath = _resourceFolder / name;
if (T item; item.loadFromFile(filepath.string()))
{
return item;
}
return {};
}
template<typename T>
std::shared_ptr<T> loadPtr(const std::string& name)
{
auto filepath = _resourceFolder / name;
std::shared_ptr<T> item = std::make_shared<T>();
if (item->loadFromFile(filepath.string()))
{
return item;
}
return {};
}
template<typename T>
std::unique_ptr<T> loadUniquePtr(const std::string& name)
{
auto filepath = _resourceFolder / name;
std::unique_ptr<T> item = std::make_unique<T>();
if (item->loadFromFile(filepath.string()))
{
return item;
}
return {};
}
template<typename T>
std::unique_ptr<T> openUniquePtr(const std::string& name)
{
auto filepath = _resourceFolder / name;
std::unique_ptr<T> item = std::make_unique<T>();
if (item->openFromFile(filepath.string()))
{
return item;
}
return {};
}
std::string getFilename(const std::string& name);
/// \brief Return a loaded JSON file
///
/// \param name Filename and relative path of the JSON file
/// (e.g. "maps/tucson.json").
///
/// \return An optional with the loaded JSON object if loaded
///
std::optional<nl::json> getJson(const std::string& name);
};
} // namespace tt
|
zethon/ttvg
|
src/Screen.h
|
#pragma once
#include <memory>
#include <boost/any.hpp>
#include <SFML/Graphics.hpp>
#include "ResourceManager.h"
#include "IUpdateable.h"
namespace tt
{
constexpr std::uint16_t SCREEN_SPLASH = 10;
constexpr std::uint16_t SCREEN_INTRO = 20;
constexpr std::uint16_t SCREEN_SHART = 30;
constexpr std::uint16_t SCREEN_GAME = 40;
constexpr std::uint16_t SCREEN_GAMEOVER = 50;
using DrawablePtr = std::shared_ptr<sf::Drawable>;
enum class ScreenActionType
{
NONE = 0,
EXIT_GAME,
CHANGE_SCREEN,
CHANGE_SCENE,
CLOSE_MODAL
};
struct ScreenAction
{
ScreenActionType type;
boost::any data;
};
struct PollResult
{
bool handled = false;
ScreenAction action;
};
class Screen
{
public:
Screen(ResourceManager& res, sf::RenderTarget& target);
virtual ~Screen() = default;
void addDrawable(DrawablePtr drawable);
void clearDrawable();
const std::vector<DrawablePtr>& getDrawables() const
{
return _objects;
}
void addUpdateable(IUpdateablePtr updateable);
void removeUpdateable(IUpdateablePtr updateable);
void clearUpdateable();
// iterate all draw'able obects
virtual void draw();
// poll system/user events
[[maybe_unused]] virtual PollResult poll(const sf::Event&);
// update positions and state
[[maybe_unused]] virtual ScreenAction timestep();
// clean up any resources
virtual void close()
{
clearDrawable();
clearUpdateable();
}
sf::FloatRect getObservableRect() const
{
const auto view = _window.getView();
const auto size = view.getSize();
const auto center = view.getCenter();
auto x = center.x - (size.x / 2);
auto y = center.y - (size.y / 2);
return { x, y, size.x, size.y };
}
void setVisible(bool var) { _visible = var; }
bool visible() const { return _visible; }
sf::RenderTarget& window() { return _window; }
ResourceManager& resources() { return _resources; }
protected:
std::vector<DrawablePtr> _objects;
std::vector<IUpdateablePtr> _updateables;
ResourceManager& _resources;
sf::RenderTarget& _window;
bool _visible = true;
};
} // namespace tt
|
zethon/ttvg
|
src/Background.h
|
<reponame>zethon/ttvg
#pragma once
#include <cmath>
#include <set>
#include <memory>
#include <optional>
#include <nlohmann/json.hpp>
#include <SFML/Graphics.hpp>
#include "TTUtils.h"
#include "Tiles.hpp"
#include "Transition.h"
#include "Zone.h"
namespace nl = nlohmann;
namespace tt
{
class ResourceManager;
class Background;
using BackgroundPtr = std::unique_ptr<Background>;
using BackgroundSharedPtr = std::shared_ptr<Background>;
class Background : public sf::Sprite
{
struct zone_compare
{
bool operator()(const Zone& z1, const Zone& z2) const
{
auto lhs = z1.rect;
auto rhs = z2.rect;
if (lhs.left == rhs.left)
{
return lhs.top < rhs.top;
}
return lhs.left < rhs.left;
}
};
using ZoneSet = std::set<Zone, zone_compare>;
public:
enum class CameraType { FIXED, FOLLOW };
Background(std::string_view name, ResourceManager& resmgr, sf::RenderTarget& target);
Background(std::string_view name, ResourceManager& resmgr, sf::RenderTarget& target, const sf::Vector2f& tilesize);
sf::FloatRect getWorldTileRect() const;
tt::Tile getTileFromGlobal(const sf::Vector2f& global) const
{
return tiles::getTileFromGlobal(global, tilesize(), getScale());
}
tt::Tile getTileFromGlobal(float x, float y)
{
return this->getTileFromGlobal(sf::Vector2f{x,y});
}
sf::Vector2f getGlobalFromTile(const tt::Tile& tile) const
{
return tiles::getGlobalFromTile(tile, tilesize(), getScale());
}
tt::Tile getGlobalFromTile(float x, float y)
{
return this->getGlobalFromTile(sf::Vector2f{x,y});
}
sf::Vector2f getGlobalCenterFromTile(const sf::Vector2f& tile) const
{
auto[tilex, tiley] = tilesize();
auto[scalex, scaley] = getScale();
auto pos = getGlobalFromTile(tile);
pos.x += (tilex * scalex) / 2;
pos.y += (tiley * scaley) / 2;
return pos;
}
sf::Vector2f tilesize() const { return _tilesize; }
nl::json& json() { return *_json; }
const nl::json& json() const { return const_cast<const nl::json&>(json()); }
std::string mapname() const { return _mapname; }
TileInfo getTileInfo(const sf::Vector2f& v);
CameraType cameraType() const { return _cameraType; }
protected:
std::unique_ptr<sf::Texture> _texture;
ZoneSet _zones;
private:
void initBackground(const sf::RenderTarget& target);
void initZones();
sf::Vector2f _tilesize;
std::unique_ptr<nl::json> _json;
std::string _mapname;
CameraType _cameraType = CameraType::FIXED;
};
} // namespace tt
|
zethon/ttvg
|
src/ItemFactory.h
|
<gh_stars>1-10
#pragma once
#include <memory>
#include <nlohmann/json.hpp>
#include "ResourceManager.h"
namespace nl = nlohmann;
namespace tt
{
class ItemFactory
{
ResourceManager& _resources;
public:
static constexpr auto CLASS_NAME = "ItemFactory";
static const struct luaL_Reg LuaMethods[];
ItemFactory(ResourceManager& resMgr);
ItemPtr createItem(const std::string& name,
const ItemCallbacks& callbacks);
ItemPtr createItem(const std::string& name)
{
return createItem(name, ItemCallbacks{});
}
};
} // namespace tt
|
zethon/ttvg
|
src/Vehicle.h
|
#pragma once
#include <vector>
#include <SFML/Graphics.hpp>
#include <SFML/Audio.hpp>
#include "GameTypes.h"
#include "Path.hpp"
#include "AnimatedSprite.h"
#include "Intersection.h"
#include "Tiles.hpp"
namespace tt
{
class Background;
using BackgroundSharedPtr = std::shared_ptr<Background>;
class Vehicle;
using VehiclePtr = std::shared_ptr<Vehicle>;
class Vehicle : public AnimatedSprite
{
public:
enum TimeStep
{
NOOP = 0,
DELETE_VEHICLE = 1
};
enum State
{
MOVING,
STOPPED
};
Vehicle(const sf::Texture& texture, const sf::Vector2i& size, BackgroundSharedPtr bg);
std::uint16_t timestep() override;
bool isBlocked(const sf::FloatRect& point);
State vehicleState() const { return _state; }
void setVehicleState(State val);
void setPath(const Path& path);
const Path& path() const { return _path; }
Path& path() { return const_cast<Path&>((static_cast<const Vehicle&>(*this)).path()); }
void setSpeed(float v) { _speed = v; }
float speed() const { return _speed; }
void setDamage(std::uint16_t v) { _damage = v; }
std::uint16_t damage() const { return _damage; }
Direction direction() const { return _direction; }
tt::Tile currentTile() const;
void setHornSound(sf::SoundBuffer* v)
{
_hornbuffer = v;
_hornsound.setBuffer(*_hornbuffer);
}
void playHornSound()
{
_hornsound.play();
}
void move();
private:
void setDirection(std::uint32_t dir);
sf::Clock _movementClock;
BackgroundSharedPtr _bg;
Path _path;
std::vector<sf::Vector2f> _globalPoints;
float _speed = 10.0f; // Pixels per timestep
std::uint16_t _damage = 0;
Direction _direction = DOWN; // Current direction of the object
State _state = MOVING;
bool _finishedPath = false;
sf::SoundBuffer* _hornbuffer = nullptr;
sf::Sound _hornsound;
};
bool isPathBlocked(const sf::FloatRect& object, const sf::FloatRect& other,
Direction direction, float minDistance);
////////////////////////////////////////////////////////////
/// \brief Calculate the next position in the given direction
///
/// \param point Starting point
/// \param direction Direction of movement
/// \param speed Speed in pixels
///
/// \return The new global coordinates
///
////////////////////////////////////////////////////////////
template<typename V>
inline sf::Vector2<V> vehicleStepDirection(const sf::Vector2<V>& point, Direction direction, V speed, const Scale& scale)
{
switch (direction)
{
default:
break;
case Direction::UP:
return { point.x, point.y - (speed * scale.y) };
case Direction::DOWN:
return { point.x, point.y + (speed * scale.y) };
case Direction::LEFT:
return { point.x - (speed * scale.x), point.y };
case Direction::RIGHT:
return { point.x + (speed * scale.x), point.y };
}
return point;
}
} // namespace tt
|
zethon/ttvg
|
src/Scenes/Hud.h
|
#pragma once
#include "../Screen.h"
#include "../Player.h"
namespace tt
{
class Hud : public Screen
{
sf::Font _statusFont;
std::shared_ptr<sf::RectangleShape> _background;
std::shared_ptr<sf::Text> _zoneText;
std::shared_ptr<sf::Text> _healthText;
std::shared_ptr<sf::Text> _balanceText;
PlayerPtr _player;
public:
Hud(ResourceManager& resmgr, sf::RenderTarget& target)
: Hud(resmgr, target, true)
{}
Hud(ResourceManager& resmgr, sf::RenderTarget& target, bool visible);
void setZoneText(const std::string& zone);
void setHealth(std::uint32_t health);
void setBalance(float cash);
};
} // namespace
|
zethon/ttvg
|
src/Zone.h
|
#pragma once
#include <string>
#include <optional>
#include <lua/lua.hpp>
#include <nlohmann/json.hpp>
#include <SFML/Graphics.hpp>
#include "Transition.h"
namespace tt
{
struct Zone
{
static constexpr auto CLASS_NAME = "Zone";
static const struct luaL_Reg LuaMethods[];
struct Callbacks
{
std::string onSelect;
};
std::string name;
std::string description;
sf::FloatRect rect;
std::optional<Transition> transition;
Callbacks callbacks;
};
void from_json(const nl::json& j, Zone& z);
} // namespace
|
zethon/ttvg
|
src/Scenes/DescriptionText.h
|
#pragma once
#include "../Screen.h"
namespace tt
{
class DescriptionText : public Screen
{
sf::Font _font;
std::shared_ptr<sf::Text> _text;
std::shared_ptr<sf::RectangleShape> _background;
public:
static constexpr auto CLASS_NAME = "DescriptionText";
static const struct luaL_Reg LuaMethods[];
DescriptionText(ResourceManager& resmgr, sf::RenderTarget& target);
void setText(const std::string& text);
std::string text() const;
};
} // namespace
|
zethon/ttvg
|
src/TooterLogger.h
|
#pragma once
#include <memory>
#include <spdlog/spdlog.h>
#include <lua/lua.hpp>
namespace tt
{
namespace log
{
constexpr auto GLOBAL_LOGGER = "tt";
using SpdLogPtr = std::shared_ptr<spdlog::logger>;
[[maybe_unused]] SpdLogPtr rootLogger();
SpdLogPtr initializeLogger(const std::string& name);
} // namespace log
namespace
{
int Logger_trace(lua_State* L)
{
auto logger = log::initializeLogger("LuaScript");
logger->trace(lua_tostring(L, 1));
return 0;
}
int Logger_debug(lua_State* L)
{
auto logger = log::initializeLogger("LuaScript");
logger->debug(lua_tostring(L, 1));
return 0;
}
int Logger_info(lua_State* L)
{
auto logger = log::initializeLogger("LuaScript");
logger->info(lua_tostring(L, 1));
return 0;
}
int Logger_warning(lua_State* L)
{
auto logger = log::initializeLogger("LuaScript");
logger->warn(lua_tostring(L, 1));
return 0;
}
int Logger_error(lua_State* L)
{
auto logger = log::initializeLogger("LuaScript");
logger->error(lua_tostring(L, 1));
return 0;
}
int Logger_critical(lua_State* L)
{
auto logger = log::initializeLogger("LuaScript");
logger->critical(lua_tostring(L, 1));
return 0;
}
} // namespace
const struct luaL_Reg Logger_LuaMethods[] =
{
{"trace", Logger_trace},
{"debug", Logger_debug},
{"info", Logger_info},
{"warn", Logger_warning},
{"error", Logger_error},
{"critical", Logger_critical},
{nullptr, nullptr}
};
} // namespace tt
|
zethon/ttvg
|
src/TTLua.h
|
<reponame>zethon/ttvg
#pragma once
#include <string>
#include <iostream>
#include <memory>
#include <any>
#include <vector>
#include <functional>
#include <optional>
#include <fmt/core.h>
#include <lua/lua.hpp>
namespace tt
{
inline static void dumpstack(lua_State* L)
{
int top = lua_gettop(L);
for (int i = 1; i <= top; i++)
{
const std::string line = fmt::format("{:3}\t{:4}\t{:15}", i, ((i - top) - 1), luaL_typename(L, i));
switch (lua_type(L, i))
{
case LUA_TNUMBER:
std::cout << fmt::format("{}\t{}", line, lua_tonumber(L, i));
break;
case LUA_TSTRING:
std::cout << fmt::format("{}\t'{}'", line, lua_tostring(L, i));
break;
case LUA_TBOOLEAN:
std::cout << fmt::format("{}\t{:boolalpha}", line, lua_toboolean(L, i));
break;
case LUA_TNIL:
std::cout << fmt::format("{}\tnil", line);
break;
default:
std::cout << fmt::format("{}\t{}", line, lua_topointer(L, i));
break;
}
std::cout << '\n';
}
}
constexpr auto GAMESCREEN_LUA_IDX = 3;
constexpr auto ITEMFACTORY_LUA_IDX = 4;
template<typename T>
[[maybe_unused]] T* checkObject(lua_State* L)
{
auto temp = static_cast<T**>(luaL_checkudata(L, 1, T::CLASS_NAME));
return *temp;
}
template<typename T>
[[maybe_unused]] T* checkSharedObject(lua_State* L)
{
using SharedT = std::shared_ptr<T>;
auto temp = static_cast<T**>(luaL_checkudata(L, 1, T::CLASS_NAME));
return *temp;
}
template<typename ClassT>
void registerLuaFunctions(lua_State* L)
{
luaL_newmetatable(L, ClassT::CLASS_NAME);
lua_pushstring(L, "__index");
lua_pushvalue(L, -2); // push the metatable
lua_settable(L, -3); // metatable.__index = metatable
// this creates object-like methods by populating the table
// on the stack with the function names/pointers
// luaL_openlib(L, nullptr, ClassT::LuaMethods, 0);
luaL_setfuncs(L, ClassT::LuaMethods, 0);
// clear the stack
lua_settop(L, 0);
}
using LuaArgPair = std::tuple<std::int32_t, std::any>;
using LuaValues = std::vector<LuaArgPair>;
using OptionalLuaValues = std::optional<LuaValues>;
template <typename NumT,
typename std::enable_if<std::is_arithmetic<NumT>::value>::type* = nullptr>
LuaArgPair MakeLuaArg(NumT x)
{
return { LUA_TNUMBER, static_cast<lua_Number>(x) };
}
template<typename ValT>
ValT GetLuaValue(const LuaArgPair& v)
{
throw std::runtime_error("unsupported Lua value");
}
template<>
bool GetLuaValue(const LuaArgPair& v);
template<>
float GetLuaValue(const LuaArgPair& v);
template<>
std::string GetLuaValue(const LuaArgPair& v);
[[maybe_unused]] OptionalLuaValues CallLuaFunction(lua_State* L,
std::string_view function,
std::string_view sandbox,
const LuaValues& args);
[[maybe_unused]] OptionalLuaValues CallLuaFunction(lua_State* L,
std::string_view function,
std::string_view sandbox,
const LuaArgPair& arg);
[[maybe_unused]] OptionalLuaValues CallLuaFunction(lua_State* L,
std::string_view function,
std::string_view sandbox);
}
|
zethon/ttvg
|
src/GameOverScreen.h
|
#pragma once
#include "Screen.h"
namespace tt
{
class GameOverScreen : public Screen
{
sf::Font _font;
public:
GameOverScreen(ResourceManager& res, sf::RenderTarget& target);
PollResult poll(const sf::Event& e) override;
};
} // namespace tt
|
zethon/ttvg
|
src/Transition.h
|
#pragma once
#include <set>
#include <SFML/Graphics.hpp>
#include <nlohmann/json.hpp>
namespace nl = nlohmann;
namespace tt
{
struct Transition
{
sf::Vector2f position;
bool enabled;
std::string newscene;
std::string selectEvent;
bool operator==(const Transition& other)
{
return position == other.position;
}
};
inline bool operator<(const Transition& lhs, const Transition& rhs)
{
if (lhs.position.x == rhs.position.x)
{
return lhs.position.y < rhs.position.y;
}
return lhs.position.x < rhs.position.x;
}
void from_json(const nl::json& j, Transition& t);
} // namespace tt
|
zethon/ttvg
|
src/VehicleFactory.h
|
<reponame>zethon/ttvg
#pragma once
#include <memory>
#include <nlohmann/json.hpp>
#include <SFML/Audio.hpp>
#include "Path.hpp"
#include "ResourceManager.h"
#include "Intersection.h"
namespace nl = nlohmann;
namespace tt
{
class PathFactory;
using PathFactoryPtr = std::shared_ptr<PathFactory>;
class Vehicle;
using VehiclePtr = std::shared_ptr<Vehicle>;
struct VehicleInfo
{
sf::Texture* texture = nullptr;
sf::SoundBuffer* sound = nullptr;
sf::Vector2f size;
sf::Vector2f scale;
sf::Vector2f speed; // the car's speed is randomly selected within this range
std::uint16_t damage;
};
class VehicleFactory
{
BackgroundSharedPtr _background;
ResourceManager& _resources;
std::shared_ptr<PathFactory> _pathFactory;
std::vector<VehicleInfo> _vehicles;
bool _highlighted = false;
public:
VehicleFactory(ResourceManager& resmgr, BackgroundSharedPtr bg);
void setPathFactory(PathFactoryPtr pf) { _pathFactory = pf; }
PathFactoryPtr pathFactory() { return _pathFactory; }
void setHighlighted(bool b) { _highlighted = b; }
bool highlighted() const { return _highlighted; }
VehiclePtr createVehicle();
private:
void loadVehicles(const nl::json& json);
};
} // namespace tt
|
zethon/ttvg
|
src/Item.h
|
#pragma once
#include <vector>
#include <variant>
#include <optional>
#include <SFML/Graphics.hpp>
#include <nlohmann/json.hpp>
#include "AnimatedSprite.h"
namespace nl = nlohmann;
namespace tt
{
class Item;
using ItemPtr = std::shared_ptr<Item>;
// Item callbacks can be null or non-null and empty.
// If the callback is null, then it was not defined.
// If it is empty, then this denotes a configuration
// like: `"onPickup": ""` which might be used to
// override a default action with an empty action
struct ItemCallbacks
{
// used when the item is picked up from the map
std::optional<std::string> onPickup;
// // used when a weapon is yielded or an instrument
// // is played
// std::optional<std::string> onUse;
// // used when somethin is eaten, smoked, etc
// std::optional<std::string> onConsume;
};
enum class ItemFlags : std::uint16_t
{
NONE = 0x0000,
CONSUMABLE = 0x0001,
WEAPON = 0x0002,
INSTRUMENT = 0x0004, // can be used for busking
};
struct ItemInfo
{
std::string id;
// a null x,y means that the coordinate was not specified,
// and a value of -1 means it should be picked randomly
std::optional<float> x;
std::optional<float> y;
std::optional<float> respawn;
ItemCallbacks callbacks;
};
class Item : public AnimatedSprite
{
public:
static constexpr auto CLASS_NAME = "Item";
static const struct luaL_Reg LuaMethods[];
Item( const std::string& id,
const sf::Texture& texture,
const sf::Vector2i& size );
std::string getID() const;
std::string getName() const;
void setName(const std::string& s);
std::string getDescription() const;
void setDescription(const std::string& s);
bool isObtainable() const;
void setObtainable(bool b);
ItemCallbacks callbacks;
void setInfo(const ItemInfo& info) { _itemInfo = info; }
ItemInfo info() const { return _itemInfo; }
private:
std::string _id;
std::string _name;
std::string _description;
std::uint32_t _flags = 0;
bool _isObtainable = false;
ItemInfo _itemInfo;
};
void from_json(const nl::json& j, ItemCallbacks& i);
void from_json(const nl::json& j, ItemInfo& i);
} // namespace tt
|
zethon/ttvg
|
src/Intersection.h
|
<reponame>zethon/ttvg
#pragma once
#include <string>
#include <optional>
#include <boost/spirit/home/x3.hpp>
#include <SFML/Graphics.hpp>
#include "GameTypes.h"
#include "TTUtils.h"
using namespace std::string_literals;
// Types of L and T intersections
//
// * ***
// * LO * TO
// *** *
// *** *
// * L90 *** T90
// * *
// *** *
// * L180 * T180
// * ***
// * *
// * L270 *** T270
// *** *
namespace tt
{
enum IntersectionType
{
L0,
L90,
L180,
L270,
T0,
T90,
T180,
T270,
CROSS
};
enum LaneSize
{
SINGLE,
DOUBLE
};
struct TurningPoint
{
sf::Vector2i point;
std::uint32_t turn;
bool decisionPoint = false;
};
using TurningPoints = std::vector<TurningPoint>;
TurningPoints makeIntersection(
const sf::Vector2i& origin,
IntersectionType type,
LaneSize h = LaneSize::SINGLE,
LaneSize v = LaneSize::SINGLE);
template<typename V>
auto getDirection(const V& start, const V& stop)
{
std::uint32_t retval{ Direction::NONE };
auto[diffx, diffy] = stop - start;
if (diffx > 0)
{
retval |= Direction::RIGHT;
}
else if (diffx < 0)
{
retval |= Direction::LEFT;
}
if (diffy > 0)
{
retval |= Direction::DOWN;
}
else if (diffy < 0)
{
retval |= Direction::UP;
}
return retval;
}
namespace x3 = boost::spirit::x3;
struct IntersectionParser
{
struct IntersectionType_ : x3::symbols<IntersectionType>
{
IntersectionType_()
{
add
("L0", IntersectionType::L0)
("L90", IntersectionType::L90)
("L180", IntersectionType::L180)
("L270", IntersectionType::L270)
("T0", IntersectionType::T0)
("T90", IntersectionType::T90)
("T180", IntersectionType::T180)
("T270", IntersectionType::T270)
("CROSS", IntersectionType::CROSS)
;
}
};
struct Lane_ : x3::symbols<LaneSize>
{
Lane_()
{
add
("single", LaneSize::SINGLE)
("double", LaneSize::DOUBLE)
;
}
};
using IntersectionHelper
= std::tuple<sf::Vector2f, tt::IntersectionType, LaneSize, LaneSize>;
template<typename It>
std::optional<IntersectionHelper> parse(It begin, It end)
{
static auto parser
= x3::rule<class IntersectionParser_, IntersectionHelper>{}
= (x3::float_ >> ',' >> x3::float_ >> ',' >> intersectionType_ >> ',' >> laneType_ >> ',' >> laneType_)
[(
[](auto& ctx)
{
auto& attr = x3::_attr(ctx);
using boost::fusion::at_c;
sf::Vector2f pt{ static_cast<float>(at_c<0>(attr)), static_cast<float>(at_c<1>(attr)) };
x3::_val(ctx)
= IntersectionHelper{ pt, at_c<2>(attr), at_c<3>(attr), at_c<4>(attr) };
}
)];
IntersectionHelper helper;
bool result = phrase_parse(begin, end, parser, x3::ascii::space, helper);
if (!result) return {};
return helper;
}
private:
IntersectionType_ intersectionType_;
Lane_ laneType_;
};
struct TurningPointParser
{
struct Direction_ : x3::symbols<Direction>
{
Direction_()
{
add
("up", Direction::UP)
("down", Direction::DOWN)
("left", Direction::LEFT)
("right", Direction::RIGHT)
;
}
};
using Edge
= std::tuple<sf::Vector2f, std::uint32_t, bool>;
template<typename It>
std::optional<TurningPoint> parse(It begin, It end)
{
static auto parser
= x3::rule<class EdgeParser_, Edge>{}
= (x3::float_ >> ',' >> x3::float_ >> ',' >> directionType_ >> -(',' >> x3::bool_))
[(
[](auto& ctx)
{
auto& attr = x3::_attr(ctx);
using boost::fusion::at_c;
sf::Vector2f pt{ static_cast<float>(at_c<0>(attr)), static_cast<float>(at_c<1>(attr)) };
x3::_val(ctx)
= Edge{ pt,
static_cast<std::uint32_t>(at_c<2>(attr)),
at_c<3>(attr) ? *(at_c<3>(attr)) : false };
}
)];
Edge helper;
bool result = phrase_parse(begin, end, parser, x3::ascii::space, helper);
if (!result) return {};
auto[origin, direction, dp] = helper;
return TurningPoint{ sf::Vector2i{origin}, direction, dp };
}
private:
Direction_ directionType_;
x3::rule<class EdgeParser_, Edge> parser_;
};
} // namespace tt
|
zethon/ttvg
|
src/Scenes/ModalWindow.h
|
#pragma once
#include <deque>
#include "../Screen.h"
namespace tt
{
class Player;
using PlayerPtr = std::shared_ptr<Player>;
enum class ModalType
{
Default = 0,
Messages,
Options,
Inventory
};
class ModalWindow : public Screen
{
public:
enum class Alignment
{
TOP,
CENTER,
BOTTOM
};
static constexpr auto CLASS_NAME = "ModalWindow";
static const struct luaL_Reg LuaMethods[];
ModalWindow(Screen& screen);
virtual ~ModalWindow() override = default;
PollResult poll(const sf::Event& e) override;
virtual void setText(const std::string& text);
void setAlignment(ModalWindow::Alignment al);
float width() const { return _background->getSize().x; }
void setWidth(float width);
float height() const { return _background->getSize().y; }
void setHeight(float height);
template<typename T>
T downcast()
{
return static_cast<T>(this);
}
void exec();
protected:
sf::Font _font;
Alignment _alignment = Alignment::BOTTOM;
Screen& _parent;
std::shared_ptr<sf::RectangleShape> _border;
std::shared_ptr<sf::RectangleShape> _background;
std::shared_ptr<sf::Text> _text;
};
////////////////////////////////////////////////////////////////////////////////////////////////
class MessagesWindow : public ModalWindow
{
std::deque<std::string> _messages;
public:
static constexpr auto CLASS_NAME = "MessagesWindow";
static const struct luaL_Reg LuaMethods[];
MessagesWindow(Screen& parent);
void pushMessage(const std::string& message)
{
if (_text->getString().getSize() == 0)
{
_text->setString(message);
}
_messages.push_back(message);
}
PollResult poll(const sf::Event& e) override;
};
//////////////////////////////////////////////////////////////////////////////////////////////
class OptionsWindow : public ModalWindow
{
public:
using TextPtr = std::shared_ptr<sf::Text>;
using Options = std::vector<TextPtr>;
static constexpr auto CLASS_NAME = "OptionsWindow";
static const struct luaL_Reg LuaMethods[];
OptionsWindow(Screen& parent);
PollResult poll(const sf::Event& e) override;
void setText(const std::string& header) override;
void addOption(const std::string& choice);
std::optional<std::size_t> selection() const { return _selection; }
protected:
Options _options;
private:
void adjustLayout();
void draw() override;
void nextSelection();
void prevSelection();
void updateText();
sf::Text _indicator;
std::optional<std::size_t> _selection = 0;
sf::Sound _selectSound;
sf::Sound _selectionMadeSound;
};
//////////////////////////////////////////////////////////////////////////////////////////////////
class InventoryWindow : public OptionsWindow
{
bool _debug = false;
// count and name
using InvAgg = std::tuple<std::uint32_t, ItemPtr>;
std::map<std::string, InvAgg> _aggregate;
void updateOptions();
public:
static constexpr auto CLASS_NAME = "InventoryWindow";
static const struct luaL_Reg LuaMethods[];
InventoryWindow(Screen& parent, PlayerPtr player)
: InventoryWindow(parent, player, false)
{}
InventoryWindow(Screen& parent, PlayerPtr player, bool);
void setDebug(bool v);
bool debug() const { return _debug; }
};
//////////////////////////////////////////////////////////////////////////////////////////////////
template<typename WinT>
WinT* checkModal(lua_State* L)
{
if (luaL_testudata(L, 1, ModalWindow::CLASS_NAME))
{
auto temp = static_cast<WinT**>(luaL_checkudata(L, 1, ModalWindow::CLASS_NAME));
return dynamic_cast<WinT*>(*temp);
}
else if (luaL_testudata(L, 1, MessagesWindow::CLASS_NAME))
{
auto temp = static_cast<WinT**>(luaL_checkudata(L, 1, MessagesWindow::CLASS_NAME));
return dynamic_cast<WinT*>(*temp);
}
else if (luaL_testudata(L, 1, OptionsWindow::CLASS_NAME))
{
auto temp = static_cast<WinT**>(luaL_checkudata(L, 1, OptionsWindow::CLASS_NAME));
return dynamic_cast<WinT*>(*temp);
}
else if (luaL_testudata(L, 1, InventoryWindow::CLASS_NAME))
{
auto temp = static_cast<WinT**>(luaL_checkudata(L, 1, InventoryWindow::CLASS_NAME));
return dynamic_cast<WinT*>(*temp);
}
return nullptr;
}
} // namespace tt
|
zethon/ttvg
|
src/PathFactory.h
|
#pragma once
#include <nlohmann/json.hpp>
#include <SFML/Graphics.hpp>
#include "Path.hpp"
#include "Intersection.h"
namespace nl = nlohmann;
namespace tt
{
class PathFactory;
using PathFactoryPtr = std::shared_ptr<PathFactory>;
////////////////////////////////////////////////////////////
/// \brief RiboPath generator for traffic system
///
////////////////////////////////////////////////////////////
class PathFactory final
{
public:
////////////////////////////////////////////////////////////
/// \brief Construct the RiboPath generator
///
/// \param size The tilesize of the world in which the paths
/// exist
///
////////////////////////////////////////////////////////////
PathFactory(const sf::Vector2i& size);
////////////////////////////////////////////////////////////
/// \brief Set the edges
///
/// The edges are the starting points of each RiboPath. When
/// a path is generated, one edge is selected a random. The
/// collection of edges is copied
///
/// \param edges Container of edges
///
////////////////////////////////////////////////////////////
void setEdges(const TurningPoints& edges)
{
_edges = edges;
}
////////////////////////////////////////////////////////////
/// \brief Set the list turning points
///
/// The set of turning points will be used when a RiboPath is
/// generated. The list of turning points is copied.
///
/// \param points Container of turning points
///
/// \see addTurn
///
////////////////////////////////////////////////////////////
void setTurningPoints(const TurningPoints& points)
{
_turns = points;
}
////////////////////////////////////////////////////////////
/// \brief Add a single turning point
///
/// Helper function that adds a single turning point.
///
/// \param tp The turning point to be added
///
/// \see setTurningPoints
///
////////////////////////////////////////////////////////////
void addTurn(const TurningPoint& tp)
{
_turns.push_back(tp);
}
////////////////////////////////////////////////////////////
/// \brief Generate a new RiboPath
///
/// This overload accepts a Path object in which to generate
/// the path. This should be used during gameplay to avoid
/// a vector copy
///
/// \param path The path in which to generate the RiboPath
///
////////////////////////////////////////////////////////////
void makeRiboPath(Path& path) const;
////////////////////////////////////////////////////////////
/// \brief Make and return a new RiboPath
///
/// This overload returns a generated RiboPath. Generally this
/// method will require a vector copy by the caller.
///
/// \return The generated RiboPath
///
////////////////////////////////////////////////////////////
Path makeRiboPath() const;
private:
////////////////////////////////////////////////////////////
// Member data
////////////////////////////////////////////////////////////
TurningPoints _edges; // Starting points that are slightly off the map
TurningPoints _turns; // Turns generated from configured intersections
sf::Vector2i _size; // X and Y tilesize of the map
};
} // namespace tt
|
zethon/ttvg
|
src/AnimatedSprite.h
|
#pragma once
#include <functional>
#include <SFML/Graphics.hpp>
#include "GameTypes.h"
#include "IUpdateable.h"
namespace tt
{
class AnimatedSprite;
using AnimatedSpritePtr = std::shared_ptr<AnimatedSprite>;
class AnimatedSprite;
using AnimatedSpritePtr = std::shared_ptr<AnimatedSprite>;
using AnimeCallback = std::function<sf::Vector2f(void)>;
class AnimatedSprite :
public sf::Drawable,
public sf::Transformable,
public IUpdateable
{
public:
AnimatedSprite(const sf::Texture& texture, const sf::Vector2i& size);
AnimatedState state() const;
void setState(AnimatedState state);
Direction direction() const { return _direction; }
void setDirection(Direction val) { _direction = val; }
///
/// \brief Set the current cell to be drawn
///
void setSource(std::uint32_t x, std::uint32_t y);
///
/// \brief Set the max width in terms of cells for a
/// given row. The animation will cycle through
/// this number of cells
///
void setMaxFramesPerRow(std::uint32_t max);
void setHighlighted(bool h);
bool highlighted() const { return _highlight.getSize().x != 0; }
sf::RectangleShape& highlight() { return _highlight; }
void setAnimeCallback(AnimeCallback cb) { _animeCallback = cb; }
sf::FloatRect getGlobalBounds() const;
std::uint16_t timestep() override;
protected:
void draw(sf::RenderTarget& target, sf::RenderStates states) const override final;
const sf::Vector2i _size; // fixed cell size of each frame within the sprite
AnimatedState _state = AnimatedState::STILL;
Direction _direction = Direction::DOWN;
sf::Vector2i _source;
sf::Clock _timer;
// some sprite sheets have different frames per row
// so this allows us to adjust how many frames get
// animated in a particular row
std::uint32_t _maxFramesPerRow = 0;
AnimeCallback _animeCallback;
sf::Sprite _sprite;
sf::RectangleShape _highlight;
};
} // namespace tt
|
zethon/ttvg
|
src/IUpdateable.h
|
<reponame>zethon/ttvg
#pragma once
#include <memory>
namespace tt
{
class IUpdateable;
using IUpdateablePtr = std::shared_ptr<IUpdateable>;
class IUpdateable
{
public:
virtual ~IUpdateable() = default;
virtual std::uint16_t timestep() = 0;
};
} // namespace
|
zethon/ttvg
|
src/GameScreen.h
|
<gh_stars>1-10
#pragma once
#include <lua/lua.hpp>
#include <SFML/Graphics.hpp>
#include "Scenes/Scene.h"
#include "Scenes/ModalWindow.h"
#include "Screen.h"
#include "AnimatedSprite.h"
#include "Player.h"
#include "TTLua.h"
#include "TTUtils.h"
#include "TooterLogger.h"
namespace tt
{
namespace
{
int Utils_openUrl(lua_State* L)
{
const auto url = lua_tostring(L, 1);
tt::openBrowser(url);
return 0;
}
int Utils_showModal(lua_State* L)
{
auto scene = checkObject<Scene>(L);
const auto text = lua_tostring(L, 2);
ModalWindow mw{ *scene, };
mw.setText(text);
mw.exec();
return 0;
}
int Utils_showYesNo(lua_State* L)
{
auto scene = checkObject<Scene>(L);
const auto text = lua_tostring(L, 2);
OptionsWindow mw{ *scene, };
mw.setText(text);
mw.addOption("Yes");
mw.addOption("No");
mw.exec();
if (auto res = mw.selection();
res.has_value() && *res == 0)
{
lua_pushboolean(L, 1);
}
else
{
lua_pushboolean(L, 0);
}
return 1;
}
const struct luaL_Reg Utils_LuaMethods[] =
{
{"openUrl", Utils_openUrl},
{"showModal", Utils_showModal},
{"showYesNo", Utils_showYesNo},
{nullptr, nullptr}
};
}
template<typename T>
void initLua(lua_State* L, T& screen, void* itemFactory)
{
auto logger = log::initializeLogger("Lua");
logger->info("initializing Lua subsystem");
luaL_openlibs(L);
// push a reference to `this` into the registry, it should
// always be the 3rd entry
lua_pushlightuserdata(L, static_cast<void*>(&screen));
luaL_checktype(L, 1, LUA_TLIGHTUSERDATA);
[[maybe_unused]] int reference = luaL_ref(L, LUA_REGISTRYINDEX);
assert(GAMESCREEN_LUA_IDX == reference);
if (itemFactory != nullptr)
{
lua_pushlightuserdata(L, itemFactory);
luaL_checktype(L, 1, LUA_TLIGHTUSERDATA);
reference = luaL_ref(L, LUA_REGISTRYINDEX);
assert(ITEMFACTORY_LUA_IDX == reference);
}
// register static methods for `ItemFactory`
{
lua_newtable(L);
luaL_setfuncs(L, ItemFactory::LuaMethods, 0);
lua_setglobal(L, ItemFactory::CLASS_NAME);
}
// register static methods for `Modal`
{
lua_newtable(L);
luaL_setfuncs(L, Utils_LuaMethods, 0);
lua_setglobal(L, "Utils");
}
// register static methods for `Log`
{
lua_newtable(L);
luaL_setfuncs(L, Logger_LuaMethods, 0);
lua_setglobal(L, "Log");
}
//luaL_newmetatable(_luaState, "GameScreen");
//lua_pushstring(_luaState, "__index");
//lua_pushvalue(_luaState, -2); // push the metatable
//lua_settable(_luaState, -3); // metatable.__index = metatable
registerLuaFunctions<Scene>(L);
registerLuaFunctions<Player>(L);
registerLuaFunctions<DescriptionText>(L);
registerLuaFunctions<Item>(L);
registerLuaFunctions<Zone>(L);
registerLuaFunctions<ModalWindow>(L);
registerLuaFunctions<MessagesWindow>(L);
registerLuaFunctions<OptionsWindow>(L);
registerLuaFunctions<InventoryWindow>(L);
{
lua_newtable(L);
lua_pushstring(L, "Default");
lua_pushnumber(L, static_cast<std::uint16_t>(ModalType::Default));
lua_settable(L, -3);
lua_pushstring(L, "Messages");
lua_pushnumber(L, static_cast<std::uint16_t>(ModalType::Messages));
lua_settable(L, -3);
lua_pushstring(L, "Options");
lua_pushnumber(L, static_cast<std::uint16_t>(ModalType::Options));
lua_settable(L, -3);
lua_pushstring(L, "Inventory");
lua_pushnumber(L, static_cast<std::uint16_t>(ModalType::Inventory));
lua_settable(L, -3);
lua_setglobal(L, "ModalType");
}
{
lua_newtable(L);
lua_pushstring(L, "Top");
lua_pushnumber(L, static_cast<std::uint16_t>(ModalType::Default));
lua_settable(L, -3);
lua_pushstring(L, "Center");
lua_pushnumber(L, static_cast<std::uint16_t>(ModalType::Messages));
lua_settable(L, -3);
lua_pushstring(L, "Bottom");
lua_pushnumber(L, static_cast<std::uint16_t>(ModalType::Options));
lua_settable(L, -3);
lua_setglobal(L, "ModalAlignment");
}
assert(lua_gettop(L) == 0);
}
class GameScreen final : public Screen
{
public:
using SceneMap = std::map<std::string, SceneSharedPtr>;
static GameScreen* l_get(lua_State* L);
GameScreen(ResourceManager& resmgr, sf::RenderTarget& target);
~GameScreen();
void draw() override;
PollResult poll(const sf::Event&) override;
ScreenAction timestep() override;
lua_State* lua() const { return _luaState; }
const SceneMap& scenes() const { return _scenes; }
private:
SceneSharedPtr _currentScene;
SceneMap _scenes;
PlayerPtr _player;
lua_State* _luaState;
std::shared_ptr<ItemFactory> _itemFactory;
sf::Clock _gameClock;
};
} // namespace tt
|
zethon/ttvg
|
src/IntroScreen.h
|
<filename>src/IntroScreen.h
#pragma once
#include <SFML/Audio.hpp>
#include "Screen.h"
namespace tt
{
using TextPtr = std::shared_ptr<sf::Text>;
using TextList = std::vector<TextPtr>;
class IntroScreen : public Screen
{
sf::Font _font;
sf::Texture _bgt;
sf::Sound _tomWillKillSound;
std::shared_ptr<sf::SoundBuffer> _selectorBuffer;
std::shared_ptr<sf::SoundBuffer> _twkBuffer;
std::uint16_t _selected = 0;
TextList _menuItems;
std::shared_ptr<sf::Sprite> _sprite;
std::unique_ptr<sf::Music> _bgsong;
sf::Clock _clock;
public:
IntroScreen(ResourceManager& res, sf::RenderTarget& target);
PollResult poll(const sf::Event& e) override;
ScreenAction timestep() override;
void close() override;
};
class SplashScreen : public Screen
{
sf::Texture _bg;
sf::Sound _tomWillKillSound;
sf::Clock _clock;
sf::Font _font;
std::shared_ptr<sf::SoundBuffer> _twkBuffer;
public:
SplashScreen(ResourceManager& res, sf::RenderTarget& target);
PollResult poll(const sf::Event& e) override;
ScreenAction timestep() override;
};
} // namespace tt
|
zethon/ttvg
|
src/Scenes/Tucson.h
|
#pragma once
#include <boost/range/adaptor/indexed.hpp>
#include <nlohmann/json.hpp>
#include <SFML/Graphics.hpp>
#include "../Vehicle.h"
#include "../VehicleFactory.h"
#include "../Background.h"
#include "../Player.h"
#include "Scene.h"
namespace nl = nlohmann;
namespace tt
{
class Tucson : public Scene
{
public:
Tucson(const SceneSetup& setup);
static constexpr auto SCENE_NAME = "Tucson";
PollResult poll(const sf::Event& e) override;
ScreenAction update(sf::Time elapsed) override;
private:
void toggleHighlight() override;
void customDraw() override;
void customUpdateCurrentTile(const TileInfo&) override;
void initTraffic();
void timestepTraffic(sf::Time elapsed);
nl::json _json;
std::unique_ptr<VehicleFactory> _vehicleFactory;
std::vector<VehiclePtr> _vehicles;
bool _updateTraffic = true;
sf::SoundBuffer _pgSoundBuffer;
sf::Sound _pgSound;
sf::Vector2f _pgCenter;
float _pgVolume = 0.f;
bool _showVehicleWarning = true;
};
} // namespace tt
|
zethon/ttvg
|
src/Scenes/Scene.h
|
#pragma once
#include <lua/lua.hpp>
#include <nlohmann/json.hpp>
#include "../Screen.h"
#include "../Player.h"
#include "../Background.h"
#include "../Item.h"
#include "../ItemFactory.h"
#include "../TooterLogger.h"
#include "../DelayedSound.h"
#include "Hud.h"
#include "DescriptionText.h"
#include "DebugWindow.h"
#include "ModalWindow.h"
namespace tt
{
struct AvatarInfo
{
sf::Vector2f start;
sf::Vector2f scale;
sf::Vector2f source;
sf::Vector2f origin;
float stepsize;
};
struct CallbackInfo
{
std::string onInit = "onInit";
std::string onEnter = "onEnter";
std::string onExit = "onExit";
std::string onTileUpdate = "onTileUpdate";
};
class Scene;
using ScenePtr = std::unique_ptr<Scene>;
using SceneSharedPtr = std::shared_ptr<Scene>;
struct SceneSetup
{
ResourceManager& resources;
sf::RenderTarget& window;
PlayerPtr player;
lua_State* lua;
std::shared_ptr<ItemFactory> itemFactory;
};
void from_json(const nl::json& j, AvatarInfo& av);
void from_json(const nl::json& j, CallbackInfo& cb);
template<typename SceneT>
bool loadSceneLuaFile(SceneT& scene, const std::string& filename, lua_State* L)
{
if (!L) return false;
auto logger = log::initializeLogger("Lua");
logger->debug("loading scene lua file {}", filename);
// load the Scene's Lua file into its own sandboxed
// environment which also contains everything in _G
{
lua_newtable(L); // 1:tbl
if (luaL_loadfile(L, filename.c_str()) != 0) // 1:tbl, 2:chunk
{
auto error = lua_tostring(L, -1);
logger->error("could not load scene lua file '{}' because: {}", filename, error);
lua_settop(L, 0);
return false;
}
lua_newtable(L); // 1:tbl, 2:chunk, 3:tbl(mt)
lua_getglobal(L, "_G"); // 1:tbl, 2:chunk, 3:tbl(mt), 4:_G
lua_setfield(L, 3, "__index"); // 1:tbl, 2:chunk, 3:tbl(mt)
lua_setmetatable(L, 1); // 1:tbl, 2:chunk
lua_pushvalue(L, 1); // 1:tbl, 2:chunk, 3:tbl
lua_setupvalue(L, -2, 1); // 1:tbl, 2:chunk
if (lua_pcall(L, 0, 0, 0) != 0) // 1:tbl
{
auto error = lua_tostring(L, -1);
logger->error("could not load scene lua file because: {}", filename, error);
lua_settop(L, 0);
return false;
}
lua_setglobal(L, scene.name().c_str()); // empty stack
assert(lua_gettop(L) == 0);
}
return true;
}
template<typename SceneT>
int registerScene(lua_State* L, SceneT& scene)
{
int idx = 0;
// create a pointer to `this` in the Lua state and register
// it as a `Scene` class/object/table inside Lua
{
// create the pointer to ourselves in the Lua state
std::size_t size = sizeof(SceneT*);
SceneT** data = static_cast<SceneT**>(lua_newuserdata(L, size)); // -1:ud
*data = &scene;
// and set the metatable
luaL_getmetatable(L, SceneT::CLASS_NAME); // -2:ud, -1: mt
lua_setmetatable(L, -2); // -1: ud
idx = luaL_ref(L, LUA_REGISTRYINDEX); // empty stack
// make sure we're balanced
assert(lua_gettop(L) == 0);
}
return idx;
}
int Scene_getPlayer(lua_State* L);
int Scene_getDescriptionWindow(lua_State* L);
struct BackgroundMusic
{
std::string file;
float volume = 100.f;
};
void from_json(const nl::json& j, BackgroundMusic& bm);
class Scene : public Screen
{
public:
using Items = std::vector<ItemPtr>;
using ItemTasks = std::map<sf::Time, ItemInfo>;
static constexpr auto CLASS_NAME = "Scene";
static const struct luaL_Reg LuaMethods[];
friend int Scene_getPlayer(lua_State* L);
friend int Scene_getDescriptionWindow(lua_State* L);
Scene(std::string_view name, const SceneSetup& setup);
std::string name() const { return _name; }
virtual void init();
virtual void enter();
virtual void exit();
PollResult poll(const sf::Event& e) override;
// ScreenAction timestep() override;
void draw() override;
virtual ScreenAction update(sf::Time elapsed);
sf::Vector2f getPlayerTile() const;
void setPlayerTile(const Tile& tile);
int luaIdx() const { return _luaIdx; }
Hud& hud() { return _hud; }
DescriptionText& descriptionText() { return _descriptionText; }
void addItem(ItemPtr item);
void removeItem(ItemPtr item);
const std::vector<ItemPtr>& items() const { return _items; }
BackgroundSharedPtr background() const { return _background; }
PlayerPtr player() const { return _player; }
protected:
virtual sf::Vector2f animeCallback();
virtual void adjustView();
// subclasses might also have to deal with highlighting
virtual void toggleHighlight();
[[maybe_unused]] bool walkPlayer(float speed);
void showHelp();
std::string _name;
lua_State* _luaState = nullptr;
int _luaIdx = 0;
CallbackInfo _callbackNames;
Hud _hud;
DescriptionText _descriptionText;
DebugWindow _debugWindow;
BackgroundSharedPtr _background;
std::unique_ptr<sf::Music> _bgmusic;
std::weak_ptr<Player> _weakPlayer;
PlayerPtr _player;
sf::Vector2f _lastPlayerPos;
AvatarInfo _playerAvatarInfo;
TileInfo _currentTile;
ItemTasks _itemTasks;
Items _items;
ItemFactory& _itemFactory;
log::SpdLogPtr _logger;
sf::Time _gameTime;
DelayedSoundPtr _walkSound;
DelayedSoundPtr _pickupSound;
private:
void createItems();
void pickupItem(Items::iterator itemIt);
virtual void updateCurrentTile(const TileInfo& info);
PollResult privatePollHandler(const sf::Event& e);
// allow subclasses to define any items that get drawn
virtual void customDraw() {}
// allow subclasses to do custom tile updating
virtual void customUpdateCurrentTile(const TileInfo&) { }
// setup an item's info based on the map and item info
void setItemInstance(Item& item, const ItemInfo& groupInfo, const ItemInfo& instanceInfo);
};
} // namespace tt
|
zethon/ttvg
|
src/GameTypes.h
|
<reponame>zethon/ttvg
#pragma once
namespace tt
{
enum Direction
{
NONE = 0x00,
UP = 0x01,
DOWN = 0x02,
LEFT = 0x04,
RIGHT = 0x08
};
enum AnimatedState
{
STILL,
ANIMATED
};
} // namespace
|
zethon/ttvg
|
src/Engine.h
|
<gh_stars>1-10
#pragma once
#include <memory>
#include <boost/filesystem.hpp>
#include <SFML/Graphics.hpp>
#include "ResourceManager.h"
#include "Screen.h"
#include "TooterLogger.h"
namespace tt
{
using RenderTargetPtr = std::shared_ptr<sf::RenderTarget>;
class TooterEngine
{
ResourceManager _resourceManager;
RenderTargetPtr _renderTarget;
std::shared_ptr<Screen> _currentScreen;
log::SpdLogPtr _logger;
public:
TooterEngine(
const boost::filesystem::path& respath,
RenderTargetPtr render);
void drawScreen();
PollResult poll(const sf::Event& e);
void timestep();
void changeScreen(std::uint16_t id);
};
} // namespace tt
|
zethon/ttvg
|
src/Scenes/DebugWindow.h
|
#pragma once
#include "../Screen.h"
namespace tt
{
class DebugWindow : public Screen
{
sf::Font _debugFont;
std::shared_ptr<sf::RectangleShape> _background;
std::shared_ptr<sf::Text> _debugText;
public:
DebugWindow(ResourceManager& resmgr, sf::RenderTarget& target);
void setText(const std::string& text);
};
} // namespace tt
|
zethon/ttvg
|
tests/Test.h
|
#pragma once
#include <iostream>
#include <boost/test/data/test_case.hpp>
#include <boost/algorithm/string/replace.hpp>
#include <test-config.h>
namespace std
{
template<typename T>
std::ostream& operator<<(std::ostream& out, const sf::Vector2<T> item)
{
auto[x, y] = item;
out << "{ x=" << x
<< " y=" << y
<< " }";
return out;
}
std::ostream& operator<<(std::ostream& out, const sf::FloatRect& item)
{
auto [left, top, width, height] = item;
out << "{ left=" << left
<< " top=" << top
<< " width=" << width
<< " height=" << height
<< "}";
return out;
}
} // namespace std
namespace tt
{
class NullWindow : public sf::RenderTarget
{
public:
sf::Vector2u getSize() const override
{
return sf::Vector2u{4096,4096};
}
};
void writeFile(const std::string& file, const std::string& data)
{
boost::filesystem::path filepath{ file };
if (!boost::filesystem::exists(filepath.parent_path()))
{
boost::filesystem::create_directories(filepath.parent_path());
}
std::ofstream out(file);
if (out.is_open())
{
out << data;
out.close();
}
}
namespace fs = boost::filesystem;
void copyDirectory(const fs::path& sourceDir, const fs::path& destinationDir)
{
if (!fs::exists(sourceDir) || !fs::is_directory(sourceDir))
{
throw std::runtime_error("Source directory " + sourceDir.string() + " does not exist or is not a directory");
}
if (fs::exists(destinationDir))
{
throw std::runtime_error("Destination directory " + destinationDir.string() + " already exists");
}
if (!fs::create_directories(destinationDir))
{
throw std::runtime_error("Cannot create destination directory " + destinationDir.string());
}
for (const auto& dirEnt : fs::recursive_directory_iterator{sourceDir})
{
const auto& path = dirEnt.path();
auto relativePathStr = path.string();
boost::replace_first(relativePathStr, sourceDir.string(), "");
fs::copy(path, destinationDir / relativePathStr);
}
}
void copyFile(const fs::path& srcFile, const fs::path& dstFile)
{
if (!fs::exists(srcFile) || !fs::is_regular_file(srcFile))
{
throw std::runtime_error("Source file " + srcFile.string() + " does not exist or it not a regular file");
}
if (!fs::create_directories(dstFile.parent_path()))
{
throw std::runtime_error("Cannot create destination directory " + dstFile.string());
}
fs::copy(srcFile, dstFile);
}
boost::filesystem::path tempFolder()
{
auto temp = boost::filesystem::temp_directory_path() / boost::filesystem::unique_path("ttvg%%%%%%");
temp /= std::to_string(boost::unit_test::framework::current_test_case().p_id);
boost::filesystem::create_directories(temp);
return temp;
}
}
|
zethon/ttvg
|
src/TTUtils.h
|
<reponame>zethon/ttvg
#pragma once
#include <string>
#include <ostream>
#include <random>
#include <iterator>
#include <cmath>
#include <boost/spirit/home/x3.hpp>
#include <nlohmann/json.hpp>
#include <SFML/Graphics.hpp>
namespace nl = nlohmann;
namespace sf
{
void from_json(const nl::json& j, Vector2f& v);
} // namespace sf
namespace tt
{
std::string defaultResourceFolder();
template<typename T>
std::ostream& operator<<(std::ostream& out, const sf::Rect<T> item)
{
auto [left, top, width, height] = item;
out << "{ x=" << left
<< " y=" << top
<< " w=" << width
<< " h=" << height
<< " }";
return out;
}
template<typename T>
std::ostream& operator<<(std::ostream& out, const sf::Vector2<T> item)
{
auto [x,y] = item;
out << "{ x=" << x
<< " y=" << y
<< " }";
return out;
}
namespace x3 = boost::spirit::x3;
const auto FloatRectParser
= x3::rule<class FloatRectParser_, sf::FloatRect>{}
= (x3::float_ >> ',' >> x3::float_ >> ',' >> x3::float_ >> ',' >> x3::float_)
[([](auto& ctx)
{
auto& attr = x3::_attr(ctx);
using boost::fusion::at_c;
auto width = at_c<2>(attr) - at_c<0>(attr);
auto height = at_c<3>(attr) - at_c<1>(attr);
x3::_val(ctx)
= sf::FloatRect{ at_c<0>(attr), at_c<1>(attr), width, height };
})
];
const auto VectorFloatParser
= x3::rule<class VectorFloatParser_, sf::Vector2f>{}
= (x3::float_ >> ',' >> x3::float_)
[([](auto& ctx)
{
auto& attr = x3::_attr(ctx);
using boost::fusion::at_c;
x3::_val(ctx)
= sf::Vector2f{ at_c<0>(attr), at_c<1>(attr)};
})
];
// custom version of `contains` that will return true if the testing
// point lies on the rectangles border
template<typename Rect, typename T>
bool RectContains(Rect rect, T x, T y)
{
// Rectangles with negative dimensions are allowed, so we must handle them correctly
// Compute the real min and max of the rectangle on both axes
auto minX = std::min(rect.left, static_cast<T>(rect.left + rect.width));
T maxX = std::max(rect.left, static_cast<T>(rect.left + rect.width));
T minY = std::min(rect.top, static_cast<T>(rect.top + rect.height));
T maxY = std::max(rect.top, static_cast<T>(rect.top + rect.height));
return (x >= minX) && (x <= maxX) && (y >= minY) && (y <= maxY);
}
template <typename T>
bool RectContains(const sf::Rect<T>& rect, const sf::Vector2<T>& point)
{
return RectContains(rect, point.x, point.y);
}
template<typename T>
auto select_randomly(const T& container)
{
static std::random_device rd;
static std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(0ul, static_cast<int>(std::size(container) - 1));
auto start = container.begin();
std::advance(start, dis(gen));
return start;
}
template<typename NumT>
NumT RandomNumber(NumT min, NumT max)
{
static std::random_device rd;
static std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(static_cast<int>(min), static_cast<int>(max));
return static_cast<NumT>(dis(gen));
}
template <typename T,
typename = typename std::enable_if<(std::is_integral<T>::value )>::type>
inline bool exactly_one_bit_set(T n)
{
return n && !(n & (n - 1));
}
inline float distance(const sf::Vector2f& v1, const sf::Vector2f& v2)
{
auto x = v1.x - v2.x;
auto y = v1.y - v2.y;
return std::sqrt((x*x) + (y*y));
}
void openBrowser(const std::string& url_str);
std::string getOsString();
} // namespace tt
|
mike-pt/xhyve
|
src/pci_e82545.c
|
<reponame>mike-pt/xhyve
/*
* Copyright (c) 2016 <NAME> <<EMAIL>>
* Copyright (c) 2015 <NAME> <<EMAIL>>
* Copyright (c) 2013 <NAME>, Avere Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer
* in this position and unchanged.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <machine/limits.h>
#include <sys/ioctl.h>
#include <sys/uio.h>
#include <net/ethernet.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <err.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sysexits.h>
#include <unistd.h>
#include <pthread.h>
#include <dispatch/dispatch.h>
#include <vmnet/vmnet.h>
#include <xhyve/xhyve.h>
#include <xhyve/support/misc.h>
#include <xhyve/support/e1000_regs.h>
#include <xhyve/support/e1000_defines.h>
#include <xhyve/support/uuid.h>
#include <xhyve/pci_emul.h>
#include <xhyve/mevent.h>
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-macros"
/* FreeBSD sys/net/ethernet.h */
#define ETHER_VLAN_ENCAP_LEN 4 /* len of 802.1Q VLAN encapsulation */
/* Hardware/register definitions XXX: move some to common code. */
#define E82545_VENDOR_ID_INTEL 0x8086
#define E82545_DEV_ID_82545EM_COPPER 0x100F
#define E82545_SUBDEV_ID 0x1008
#define E82545_REVISION_4 4
#define E82545_MDIC_DATA_MASK 0x0000FFFF
#define E82545_MDIC_OP_MASK 0x0c000000
#define E82545_MDIC_IE 0x20000000
#define E82545_EECD_FWE_DIS 0x00000010 /* Flash writes disabled */
#define E82545_EECD_FWE_EN 0x00000020 /* Flash writes enabled */
#define E82545_EECD_FWE_MASK 0x00000030 /* Flash writes mask */
#define E82545_BAR_REGISTER 0
#define E82545_BAR_REGISTER_LEN (128*1024)
#define E82545_BAR_FLASH 1
#define E82545_BAR_FLASH_LEN (64*1024)
#define E82545_BAR_IO 2
#define E82545_BAR_IO_LEN 8
#define E82545_IOADDR 0x00000000
#define E82545_IODATA 0x00000004
#define E82545_IO_REGISTER_MAX 0x0001FFFF
#define E82545_IO_FLASH_BASE 0x00080000
#define E82545_IO_FLASH_MAX 0x000FFFFF
#define E82545_ARRAY_ENTRY(reg, offset) (reg + (offset<<2))
#define E82545_RAR_MAX 15
#define E82545_MTA_MAX 127
#define E82545_VFTA_MAX 127
/* Slightly modified from the driver versions, hardcoded for 3 opcode bits,
* followed by 6 address bits.
* TODO: make opcode bits and addr bits configurable?
* NVM Commands - Microwire */
#define E82545_NVM_OPCODE_BITS 3
#define E82545_NVM_ADDR_BITS 6
#define E82545_NVM_DATA_BITS 16
#define E82545_NVM_OPADDR_BITS (E82545_NVM_OPCODE_BITS + E82545_NVM_ADDR_BITS)
#define E82545_NVM_ADDR_MASK ((1 << E82545_NVM_ADDR_BITS)-1)
#define E82545_NVM_OPCODE_MASK \
(((1 << E82545_NVM_OPCODE_BITS) - 1) << E82545_NVM_ADDR_BITS)
#define E82545_NVM_OPCODE_READ (0x6 << E82545_NVM_ADDR_BITS) /* read */
#define E82545_NVM_OPCODE_WRITE (0x5 << E82545_NVM_ADDR_BITS) /* write */
#define E82545_NVM_OPCODE_ERASE (0x7 << E82545_NVM_ADDR_BITS) /* erase */
#define E82545_NVM_OPCODE_EWEN (0x4 << E82545_NVM_ADDR_BITS) /* wr-enable */
#define E82545_NVM_EEPROM_SIZE 64 /* 64 * 16-bit values == 128K */
#define E1000_ICR_SRPD 0x00010000
/* This is an arbitrary number. There is no hard limit on the chip. */
#define I82545_MAX_TXSEGS 64
#pragma clang diagnostic pop
/* Legacy receive descriptor */
struct e1000_rx_desc {
uint64_t buffer_addr; /* Address of the descriptor's data buffer */
uint16_t length; /* Length of data DMAed into data buffer */
uint16_t csum; /* Packet checksum */
uint8_t status; /* Descriptor status */
uint8_t errors; /* Descriptor Errors */
uint16_t special;
};
/* Transmit descriptor types */
#define E1000_TXD_MASK (E1000_TXD_CMD_DEXT | 0x00F00000)
#define E1000_TXD_TYP_L (0)
#define E1000_TXD_TYP_C (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C)
#define E1000_TXD_TYP_D (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)
/* Legacy transmit descriptor */
struct e1000_tx_desc {
uint64_t buffer_addr; /* Address of the descriptor's data buffer */
union {
uint32_t data;
struct {
uint16_t length; /* Data buffer length */
uint8_t cso; /* Checksum offset */
uint8_t cmd; /* Descriptor control */
} flags;
} lower;
union {
uint32_t data;
struct {
uint8_t status; /* Descriptor status */
uint8_t css; /* Checksum start */
uint16_t special;
} fields;
} upper;
};
/* Context descriptor */
struct e1000_context_desc {
union {
uint32_t ip_config;
struct {
uint8_t ipcss; /* IP checksum start */
uint8_t ipcso; /* IP checksum offset */
uint16_t ipcse; /* IP checksum end */
} ip_fields;
} lower_setup;
union {
uint32_t tcp_config;
struct {
uint8_t tucss; /* TCP checksum start */
uint8_t tucso; /* TCP checksum offset */
uint16_t tucse; /* TCP checksum end */
} tcp_fields;
} upper_setup;
uint32_t cmd_and_length;
union {
uint32_t data;
struct {
uint8_t status; /* Descriptor status */
uint8_t hdr_len; /* Header length */
uint16_t mss; /* Maximum segment size */
} fields;
} tcp_seg_setup;
};
/* Data descriptor */
struct e1000_data_desc {
uint64_t buffer_addr; /* Address of the descriptor's buffer address */
union {
uint32_t data;
struct {
uint16_t length; /* Data buffer length */
uint8_t typ_len_ext;
uint8_t cmd;
} flags;
} lower;
union {
uint32_t data;
struct {
uint8_t status; /* Descriptor status */
uint8_t popts; /* Packet Options */
uint16_t special;
} fields;
} upper;
};
union e1000_tx_udesc {
struct e1000_tx_desc td;
struct e1000_context_desc cd;
struct e1000_data_desc dd;
};
/* Tx checksum info for a packet. */
struct ck_info {
int ck_valid; /* ck_info is valid */
uint8_t ck_start; /* start byte of cksum calcuation */
uint8_t ck_off; /* offset of cksum insertion */
uint16_t ck_len; /* length of cksum calc: 0 is to packet-end */
};
/*
* Debug printf
*/
static int e82545_debug = 0;
#define DPRINTF(msg,...) if (e82545_debug) fprintf(stderr, "e82545: " msg, __VA_ARGS__)
#define WPRINTF(msg,...) fprintf(stderr, "e82545: " msg, __VA_ARGS__)
#define MIN(a,b) (((a)<(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
/* s/w representation of the RAL/RAH regs */
struct eth_uni {
int eu_valid;
int eu_addrsel;
struct ether_addr eu_eth;
};
struct e82545_softc {
struct pci_devinst *esc_pi;
struct mevent *esc_mevp;
struct mevent *esc_mevpitr;
pthread_mutex_t esc_mtx;
struct vmnet_state *vms;
/* General */
uint32_t esc_CTRL; /* x0000 device ctl */
uint32_t esc_FCAL; /* x0028 flow ctl addr lo */
uint32_t esc_FCAH; /* x002C flow ctl addr hi */
uint32_t esc_FCT; /* x0030 flow ctl type */
uint32_t esc_VET; /* x0038 VLAN eth type */
uint32_t esc_FCTTV; /* x0170 flow ctl tx timer */
uint32_t esc_LEDCTL; /* x0E00 LED control */
uint32_t esc_PBA; /* x1000 pkt buffer allocation */
/* Interrupt control */
int esc_irq_asserted;
uint32_t esc_ICR; /* x00C0 cause read/clear */
uint32_t esc_ITR; /* x00C4 intr throttling */
uint32_t esc_ICS; /* x00C8 cause set */
uint32_t esc_IMS; /* x00D0 mask set/read */
uint32_t esc_IMC; /* x00D8 mask clear */
/* Transmit */
union e1000_tx_udesc *esc_txdesc;
struct e1000_context_desc esc_txctx;
pthread_t esc_tx_tid;
pthread_cond_t esc_tx_cond;
int esc_tx_enabled;
int esc_tx_active;
uint32_t esc_TXCW; /* x0178 transmit config */
uint32_t esc_TCTL; /* x0400 transmit ctl */
uint32_t esc_TIPG; /* x0410 inter-packet gap */
uint16_t esc_AIT; /* x0458 Adaptive Interframe Throttle */
uint64_t esc_tdba; /* verified 64-bit desc table addr */
uint32_t esc_TDBAL; /* x3800 desc table addr, low bits */
uint32_t esc_TDBAH; /* x3804 desc table addr, hi 32-bits */
uint32_t esc_TDLEN; /* x3808 # descriptors in bytes */
uint16_t esc_TDH; /* x3810 desc table head idx */
uint16_t esc_TDHr; /* internal read version of TDH */
uint16_t esc_TDT; /* x3818 desc table tail idx */
uint32_t esc_TIDV; /* x3820 intr delay */
uint32_t esc_TXDCTL; /* x3828 desc control */
uint32_t esc_TADV; /* x382C intr absolute delay */
/* L2 frame acceptance */
struct eth_uni esc_uni[16]; /* 16 x unicast MAC addresses */
uint32_t esc_fmcast[128]; /* Multicast filter bit-match */
uint32_t esc_fvlan[128]; /* VLAN 4096-bit filter */
/* Receive */
struct e1000_rx_desc *esc_rxdesc;
pthread_cond_t esc_rx_cond;
int esc_rx_enabled;
int esc_rx_active;
int esc_rx_loopback;
uint32_t esc_RCTL; /* x0100 receive ctl */
uint32_t esc_FCRTL; /* x2160 flow cntl thresh, low */
uint32_t esc_FCRTH; /* x2168 flow cntl thresh, hi */
uint64_t esc_rdba; /* verified 64-bit desc table addr */
uint32_t esc_RDBAL; /* x2800 desc table addr, low bits */
uint32_t esc_RDBAH; /* x2804 desc table addr, hi 32-bits*/
uint32_t esc_RDLEN; /* x2808 #descriptors */
uint16_t esc_RDH; /* x2810 desc table head idx */
uint16_t esc_RDT; /* x2818 desc table tail idx */
uint32_t esc_RDTR; /* x2820 intr delay */
uint32_t esc_RXDCTL; /* x2828 desc control */
uint32_t esc_RADV; /* x282C intr absolute delay */
uint32_t esc_RSRPD; /* x2C00 recv small packet detect */
uint32_t esc_RXCSUM; /* x5000 receive cksum ctl */
/* IO Port register access */
uint32_t io_addr;
/* Shadow copy of MDIC */
uint32_t mdi_control;
/* Shadow copy of EECD */
uint32_t eeprom_control;
/* Latest NVM in/out */
uint16_t nvm_data;
uint16_t nvm_opaddr;
/* stats */
uint32_t missed_pkt_count; /* dropped for no room in rx queue */
uint32_t pkt_rx_by_size[6];
uint32_t pkt_tx_by_size[6];
uint32_t good_pkt_rx_count;
uint32_t bcast_pkt_rx_count;
uint32_t mcast_pkt_rx_count;
uint32_t good_pkt_tx_count;
uint32_t bcast_pkt_tx_count;
uint32_t mcast_pkt_tx_count;
uint32_t oversize_rx_count;
uint32_t tso_tx_count;
uint64_t good_octets_rx;
uint64_t good_octets_tx;
uint64_t missed_octets; /* counts missed and oversized */
uint8_t nvm_bits:6; /* number of bits remaining in/out */
uint8_t nvm_mode:2;
#define E82545_NVM_MODE_OPADDR 0x0
#define E82545_NVM_MODE_DATAIN 0x1
#define E82545_NVM_MODE_DATAOUT 0x2
/* EEPROM data */
uint16_t eeprom_data[E82545_NVM_EEPROM_SIZE];
};
#pragma clang diagnostic pop
static void e82545_reset(struct e82545_softc *sc, int dev);
static void e82545_rx_enable(struct e82545_softc *sc);
static void e82545_rx_disable(struct e82545_softc *sc);
static void e82545_tap_callback(struct e82545_softc *sc);
static void e82545_tx_start(struct e82545_softc *sc);
static void e82545_tx_enable(struct e82545_softc *sc);
static void e82545_tx_disable(struct e82545_softc *sc);
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct vmnet_state {
interface_ref iface;
uint8_t mac[6];
unsigned int mtu;
unsigned int max_packet_size;
};
#pragma clang diagnostic pop
/*
* Drop privileges according to the CERT Secure C Coding Standard section
* POS36-C
* https://www.securecoding.cert.org/confluence/display/c/POS36-C.+Observe+correct+revocation+order+while+relinquishing+privileges
*/
static int drop_privileges(void) {
// If we are not effectively root, don't drop privileges
if (geteuid() != 0 && getegid() != 0) {
return 0;
}
if (setgid(getgid()) == -1) {
return -1;
}
if (setuid(getuid()) == -1) {
return -1;
}
return 0;
}
/*
* Create an interface for the guest using Apple's vmnet framework.
*
* The interface works in VMNET_SHARED_MODE which allows for packets
* of the guest to reach other guests and the Internet.
*
* See also: https://developer.apple.com/library/mac/documentation/vmnet/Reference/vmnet_Reference/index.html
*/
static int
vmn_create(struct e82545_softc *sc)
{
xpc_object_t interface_desc;
uuid_t uuid;
__block interface_ref iface;
__block vmnet_return_t iface_status;
dispatch_semaphore_t iface_created;
dispatch_queue_t if_create_q;
dispatch_queue_t if_q;
struct vmnet_state *vms;
uint32_t uuid_status;
interface_desc = xpc_dictionary_create(NULL, NULL, 0);
xpc_dictionary_set_uint64(interface_desc, vmnet_operation_mode_key,
VMNET_SHARED_MODE);
if (guest_uuid_str != NULL) {
uuid_from_string(guest_uuid_str, &uuid, &uuid_status);
if (uuid_status != uuid_s_ok) {
return (-1);
}
} else {
uuid_generate_random(uuid);
}
xpc_dictionary_set_uuid(interface_desc, vmnet_interface_id_key, uuid);
iface = NULL;
iface_status = 0;
vms = malloc(sizeof(struct vmnet_state));
if (!vms) {
return (-1);
}
if_create_q = dispatch_queue_create("org.xhyve.vmnet.create",
DISPATCH_QUEUE_SERIAL);
iface_created = dispatch_semaphore_create(0);
iface = vmnet_start_interface(interface_desc, if_create_q,
^(vmnet_return_t status, xpc_object_t interface_param)
{
iface_status = status;
if (status != VMNET_SUCCESS || !interface_param) {
dispatch_semaphore_signal(iface_created);
return;
}
if (sscanf(xpc_dictionary_get_string(interface_param,
vmnet_mac_address_key),
"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
&vms->mac[0], &vms->mac[1], &vms->mac[2], &vms->mac[3],
&vms->mac[4], &vms->mac[5]) != 6)
{
assert(0);
}
vms->mtu = (unsigned)xpc_dictionary_get_uint64(interface_param, vmnet_mtu_key);
vms->max_packet_size = (unsigned)xpc_dictionary_get_uint64(interface_param,
vmnet_max_packet_size_key);
dispatch_semaphore_signal(iface_created);
});
dispatch_semaphore_wait(iface_created, DISPATCH_TIME_FOREVER);
dispatch_release(if_create_q);
if (iface == NULL || iface_status != VMNET_SUCCESS) {
fprintf(stderr, "virtio_net: Could not create vmnet interface, "
"permission denied or no entitlement?\n");
free(vms);
return (-1);
}
vms->iface = iface;
sc->vms = vms;
if_q = dispatch_queue_create("org.xhyve.vmnet.iface_q", 0);
vmnet_interface_set_event_callback(iface, VMNET_INTERFACE_PACKETS_AVAILABLE,
if_q, ^(UNUSED interface_event_t event_id, UNUSED xpc_object_t event)
{
e82545_tap_callback(sc);
});
if (drop_privileges() == -1) {
perror("Dropping privileges after networking was enabled.");
free(vms);
return (-1);
}
return (0);
}
static ssize_t
vmn_read(struct vmnet_state *vms, struct iovec *iov, int n) {
vmnet_return_t r;
struct vmpktdesc v;
int pktcnt;
int i;
v.vm_pkt_size = 0;
for (i = 0; i < n; i++) {
v.vm_pkt_size += iov[i].iov_len;
}
assert(v.vm_pkt_size >= vms->max_packet_size);
v.vm_pkt_iov = iov;
v.vm_pkt_iovcnt = (uint32_t) n;
v.vm_flags = 0; /* TODO no clue what this is */
pktcnt = 1;
r = vmnet_read(vms->iface, &v, &pktcnt);
assert(r == VMNET_SUCCESS);
if (pktcnt < 1) {
return (-1);
}
return ((ssize_t) v.vm_pkt_size);
}
static void
vmn_write(struct vmnet_state *vms, struct iovec *iov, int n) {
vmnet_return_t r;
struct vmpktdesc v;
int pktcnt;
int i;
v.vm_pkt_size = 0;
for (i = 0; i < n; i++) {
v.vm_pkt_size += iov[i].iov_len;
}
assert(v.vm_pkt_size <= vms->max_packet_size);
v.vm_pkt_iov = iov;
v.vm_pkt_iovcnt = (uint32_t) n;
v.vm_flags = 0; /* TODO no clue what this is */
pktcnt = 1;
r = vmnet_write(vms->iface, &v, &pktcnt);
assert(r == VMNET_SUCCESS);
}
static void
e82545_init_eeprom(struct e82545_softc *sc)
{
uint16_t checksum, i;
/* mac addr */
sc->eeprom_data[NVM_MAC_ADDR] = (uint16_t)((sc->vms->mac[0]) |
(sc->vms->mac[1]) << 8);
sc->eeprom_data[NVM_MAC_ADDR+1] = (uint16_t)((sc->vms->mac[2]) |
(sc->vms->mac[3] << 8));
sc->eeprom_data[NVM_MAC_ADDR+2] = (uint16_t)((sc->vms->mac[4]) |
(sc->vms->mac[5] << 8));
/* pci ids */
sc->eeprom_data[NVM_SUB_DEV_ID] = E82545_SUBDEV_ID;
sc->eeprom_data[NVM_SUB_VEN_ID] = E82545_VENDOR_ID_INTEL;
sc->eeprom_data[NVM_DEV_ID] = E82545_DEV_ID_82545EM_COPPER;
sc->eeprom_data[NVM_VEN_ID] = E82545_VENDOR_ID_INTEL;
/* fill in the checksum */
checksum = 0;
for (i = 0; i < NVM_CHECKSUM_REG; i++) {
checksum += sc->eeprom_data[i];
}
checksum = NVM_SUM - checksum;
sc->eeprom_data[NVM_CHECKSUM_REG] = checksum;
DPRINTF("eeprom checksum: 0x%x\r\n", checksum);
}
static void
e82545_write_mdi(UNUSED struct e82545_softc *sc, uint8_t reg_addr,
uint8_t phy_addr, uint32_t data)
{
DPRINTF("Write mdi reg:0x%x phy:0x%x data: 0x%x\r\n", reg_addr, phy_addr, data);
}
static uint32_t
e82545_read_mdi(UNUSED struct e82545_softc *sc, uint8_t reg_addr,
uint8_t phy_addr)
{
//DPRINTF("Read mdi reg:0x%x phy:0x%x\r\n", reg_addr, phy_addr);
switch (reg_addr) {
case PHY_STATUS:
return (MII_SR_LINK_STATUS | MII_SR_AUTONEG_CAPS |
MII_SR_AUTONEG_COMPLETE);
case PHY_AUTONEG_ADV:
return NWAY_AR_SELECTOR_FIELD;
case PHY_LP_ABILITY:
return 0;
case PHY_1000T_STATUS:
return (SR_1000T_LP_FD_CAPS | SR_1000T_REMOTE_RX_STATUS |
SR_1000T_LOCAL_RX_STATUS);
case PHY_ID1:
return (M88E1011_I_PHY_ID >> 16) & 0xFFFF;
case PHY_ID2:
return (M88E1011_I_PHY_ID | E82545_REVISION_4) & 0xFFFF;
default:
DPRINTF("Unknown mdi read reg:0x%x phy:0x%x\r\n", reg_addr, phy_addr);
return 0;
}
/* not reached */
}
static void
e82545_eecd_strobe(struct e82545_softc *sc)
{
/* Microwire state machine */
/*
DPRINTF("eeprom state machine srtobe "
"0x%x 0x%x 0x%x 0x%x\r\n",
sc->nvm_mode, sc->nvm_bits,
sc->nvm_opaddr, sc->nvm_data);*/
if (sc->nvm_bits == 0) {
DPRINTF("eeprom state machine not expecting data! "
"0x%x 0x%x 0x%x 0x%x\r\n",
sc->nvm_mode, sc->nvm_bits,
sc->nvm_opaddr, sc->nvm_data);
return;
}
sc->nvm_bits--;
if (sc->nvm_mode == E82545_NVM_MODE_DATAOUT) {
/* shifting out */
if (sc->nvm_data & 0x8000) {
sc->eeprom_control |= E1000_EECD_DO;
} else {
sc->eeprom_control &= (uint32_t)~E1000_EECD_DO;
}
sc->nvm_data <<= 1;
if (sc->nvm_bits == 0) {
/* read done, back to opcode mode. */
sc->nvm_opaddr = 0;
sc->nvm_mode = E82545_NVM_MODE_OPADDR;
sc->nvm_bits = E82545_NVM_OPADDR_BITS;
}
} else if (sc->nvm_mode == E82545_NVM_MODE_DATAIN) {
/* shifting in */
sc->nvm_data <<= 1;
if (sc->eeprom_control & E1000_EECD_DI) {
sc->nvm_data |= 1;
}
if (sc->nvm_bits == 0) {
/* eeprom write */
uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
uint16_t addr = sc->nvm_opaddr & E82545_NVM_ADDR_MASK;
if (op != E82545_NVM_OPCODE_WRITE) {
DPRINTF("Illegal eeprom write op 0x%x\r\n",
sc->nvm_opaddr);
} else if (addr >= E82545_NVM_EEPROM_SIZE) {
DPRINTF("Illegal eeprom write addr 0x%x\r\n",
sc->nvm_opaddr);
} else {
DPRINTF("eeprom write eeprom[0x%x] = 0x%x\r\n",
addr, sc->nvm_data);
sc->eeprom_data[addr] = sc->nvm_data;
}
/* back to opcode mode */
sc->nvm_opaddr = 0;
sc->nvm_mode = E82545_NVM_MODE_OPADDR;
sc->nvm_bits = E82545_NVM_OPADDR_BITS;
}
} else if (sc->nvm_mode == E82545_NVM_MODE_OPADDR) {
sc->nvm_opaddr <<= 1;
if (sc->eeprom_control & E1000_EECD_DI) {
sc->nvm_opaddr |= 1;
}
if (sc->nvm_bits == 0) {
uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
switch (op) {
case E82545_NVM_OPCODE_EWEN:
DPRINTF("eeprom write enable: 0x%x\r\n",
sc->nvm_opaddr);
/* back to opcode mode */
sc->nvm_opaddr = 0;
sc->nvm_mode = E82545_NVM_MODE_OPADDR;
sc->nvm_bits = E82545_NVM_OPADDR_BITS;
break;
case E82545_NVM_OPCODE_READ:
{
uint16_t addr = sc->nvm_opaddr &
E82545_NVM_ADDR_MASK;
sc->nvm_mode = E82545_NVM_MODE_DATAOUT;
sc->nvm_bits = E82545_NVM_DATA_BITS;
if (addr < E82545_NVM_EEPROM_SIZE) {
sc->nvm_data = sc->eeprom_data[addr];
DPRINTF("eeprom read: eeprom[0x%x] = 0x%x\r\n",
addr, sc->nvm_data);
} else {
DPRINTF("eeprom illegal read: 0x%x\r\n",
sc->nvm_opaddr);
sc->nvm_data = 0;
}
break;
}
case E82545_NVM_OPCODE_WRITE:
sc->nvm_mode = E82545_NVM_MODE_DATAIN;
sc->nvm_bits = E82545_NVM_DATA_BITS;
sc->nvm_data = 0;
break;
default:
DPRINTF("eeprom unknown op: 0x%x\r\r",
sc->nvm_opaddr);
/* back to opcode mode */
sc->nvm_opaddr = 0;
sc->nvm_mode = E82545_NVM_MODE_OPADDR;
sc->nvm_bits = E82545_NVM_OPADDR_BITS;
}
}
} else {
DPRINTF("eeprom state machine wrong state! "
"0x%x 0x%x 0x%x 0x%x\r\n",
sc->nvm_mode, sc->nvm_bits,
sc->nvm_opaddr, sc->nvm_data);
}
}
static void
e82545_itr_callback(UNUSED int fd, UNUSED enum ev_type type, void *param)
{
uint32_t new;
struct e82545_softc *sc = param;
pthread_mutex_lock(&sc->esc_mtx);
new = sc->esc_ICR & sc->esc_IMS;
if (new && !sc->esc_irq_asserted) {
DPRINTF("itr callback: lintr assert %x\r\n", new);
sc->esc_irq_asserted = 1;
pci_lintr_assert(sc->esc_pi);
} else {
mevent_delete(sc->esc_mevpitr);
sc->esc_mevpitr = NULL;
}
pthread_mutex_unlock(&sc->esc_mtx);
}
static void
e82545_icr_assert(struct e82545_softc *sc, uint32_t bits)
{
uint32_t new;
DPRINTF("icr assert: 0x%x\r\n", bits);
/*
* An interrupt is only generated if bits are set that
* aren't already in the ICR, these bits are unmasked,
* and there isn't an interrupt already pending.
*/
new = bits & ~sc->esc_ICR & sc->esc_IMS;
sc->esc_ICR |= bits;
if (new == 0) {
DPRINTF("icr assert: masked %x, ims %x\r\n", new, sc->esc_IMS);
} else if (sc->esc_mevpitr != NULL) {
DPRINTF("icr assert: throttled %x, ims %x\r\n", new, sc->esc_IMS);
} else if (!sc->esc_irq_asserted) {
DPRINTF("icr assert: lintr assert %x\r\n", new);
sc->esc_irq_asserted = 1;
pci_lintr_assert(sc->esc_pi);
if (sc->esc_ITR != 0) {
sc->esc_mevpitr = mevent_add(
(sc->esc_ITR + 3905) / 3906, /* 256ns -> 1ms */
EVF_TIMER, e82545_itr_callback, sc);
}
}
}
static void
e82545_ims_change(struct e82545_softc *sc, uint32_t bits)
{
uint32_t new;
/*
* Changing the mask may allow previously asserted
* but masked interrupt requests to generate an interrupt.
*/
new = bits & sc->esc_ICR & ~sc->esc_IMS;
sc->esc_IMS |= bits;
if (new == 0) {
DPRINTF("ims change: masked %x, ims %x\r\n", new, sc->esc_IMS);
} else if (sc->esc_mevpitr != NULL) {
DPRINTF("ims change: throttled %x, ims %x\r\n", new, sc->esc_IMS);
} else if (!sc->esc_irq_asserted) {
DPRINTF("ims change: lintr assert %x\n\r", new);
sc->esc_irq_asserted = 1;
pci_lintr_assert(sc->esc_pi);
if (sc->esc_ITR != 0) {
sc->esc_mevpitr = mevent_add(
(sc->esc_ITR + 3905) / 3906, /* 256ns -> 1ms */
EVF_TIMER, e82545_itr_callback, sc);
}
}
}
static void
e82545_icr_deassert(struct e82545_softc *sc, uint32_t bits)
{
DPRINTF("icr deassert: 0x%x\r\n", bits);
sc->esc_ICR &= ~bits;
/*
* If there are no longer any interrupt sources and there
* was an asserted interrupt, clear it
*/
if (sc->esc_irq_asserted && !(sc->esc_ICR & sc->esc_IMS)) {
DPRINTF("icr deassert: lintr deassert %x\r\n", bits);
pci_lintr_deassert(sc->esc_pi);
sc->esc_irq_asserted = 0;
}
}
static void
e82545_intr_write(struct e82545_softc *sc, uint32_t offset, uint32_t value)
{
DPRINTF("intr_write: off %x, val %x\n\r", offset, value);
switch (offset) {
case E1000_ICR:
e82545_icr_deassert(sc, value);
break;
case E1000_ITR:
sc->esc_ITR = value;
break;
case E1000_ICS:
sc->esc_ICS = value; /* not used: store for debug */
e82545_icr_assert(sc, value);
break;
case E1000_IMS:
e82545_ims_change(sc, value);
break;
case E1000_IMC:
sc->esc_IMC = value; /* for debug */
sc->esc_IMS &= ~value;
// XXX clear interrupts if all ICR bits now masked
// and interrupt was pending ?
break;
default:
break;
}
}
static uint32_t
e82545_intr_read(struct e82545_softc *sc, uint32_t offset)
{
uint32_t retval;
retval = 0;
DPRINTF("intr_read: off %x\n\r", offset);
switch (offset) {
case E1000_ICR:
retval = sc->esc_ICR;
sc->esc_ICR = 0;
e82545_icr_deassert(sc, (uint32_t)~0);
break;
case E1000_ITR:
retval = sc->esc_ITR;
break;
case E1000_ICS:
/* write-only register */
break;
case E1000_IMS:
retval = sc->esc_IMS;
break;
case E1000_IMC:
/* write-only register */
break;
default:
break;
}
return (retval);
}
static void
e82545_devctl(struct e82545_softc *sc, uint32_t val)
{
sc->esc_CTRL = val & (uint32_t)~E1000_CTRL_RST;
if (val & E1000_CTRL_RST) {
DPRINTF("e1k: s/w reset, ctl %x\n", val);
e82545_reset(sc, 1);
}
/* XXX check for phy reset ? */
}
static void
e82545_rx_update_rdba(struct e82545_softc *sc)
{
/* XXX verify desc base/len within phys mem range */
sc->esc_rdba = (uint64_t)sc->esc_RDBAH << 32 |
sc->esc_RDBAL;
/* Cache host mapping of guest descriptor array */
sc->esc_rxdesc = paddr_guest2host(sc->esc_rdba, sc->esc_RDLEN);
}
static void
e82545_rx_ctl(struct e82545_softc *sc, uint32_t val)
{
int on;
on = ((val & E1000_RCTL_EN) == E1000_RCTL_EN);
/* Save RCTL after stripping reserved bits 31:27,24,21,14,11:10,0 */
sc->esc_RCTL = val & ~0xF9204c01;
DPRINTF("rx_ctl - %s RCTL %x, val %x\n",
on ? "on" : "off", sc->esc_RCTL, val);
/* state change requested */
if (on != sc->esc_rx_enabled) {
if (on) {
/* Catch disallowed/unimplemented settings */
//assert(!(val & E1000_RCTL_LBM_TCVR));
if (sc->esc_RCTL & E1000_RCTL_LBM_TCVR) {
sc->esc_rx_loopback = 1;
} else {
sc->esc_rx_loopback = 0;
}
e82545_rx_update_rdba(sc);
e82545_rx_enable(sc);
} else {
e82545_rx_disable(sc);
sc->esc_rx_loopback = 0;
sc->esc_rdba = 0;
sc->esc_rxdesc = NULL;
}
}
}
static void
e82545_tx_update_tdba(struct e82545_softc *sc)
{
/* XXX verify desc base/len within phys mem range */
sc->esc_tdba = (uint64_t)sc->esc_TDBAH << 32 | sc->esc_TDBAL;
/* Cache host mapping of guest descriptor array */
sc->esc_txdesc = paddr_guest2host(sc->esc_tdba, sc->esc_TDLEN);
}
static void
e82545_tx_ctl(struct e82545_softc *sc, uint32_t val)
{
int on;
on = ((val & E1000_TCTL_EN) == E1000_TCTL_EN);
/* ignore TCTL_EN settings that don't change state */
if (on == sc->esc_tx_enabled)
return;
if (on) {
e82545_tx_update_tdba(sc);
e82545_tx_enable(sc);
} else {
e82545_tx_disable(sc);
sc->esc_tdba = 0;
sc->esc_txdesc = NULL;
}
/* Save TCTL value after stripping reserved bits 31:25,23,2,0 */
sc->esc_TCTL = val & ~0xFE800005;
}
static int
e82545_bufsz(uint32_t rctl)
{
switch (rctl & (E1000_RCTL_BSEX | E1000_RCTL_SZ_256)) {
case (E1000_RCTL_SZ_2048): return (2048);
case (E1000_RCTL_SZ_1024): return (1024);
case (E1000_RCTL_SZ_512): return (512);
case (E1000_RCTL_SZ_256): return (256);
case (E1000_RCTL_BSEX|E1000_RCTL_SZ_16384): return (16384);
case (E1000_RCTL_BSEX|E1000_RCTL_SZ_8192): return (8192);
case (E1000_RCTL_BSEX|E1000_RCTL_SZ_4096): return (4096);
}
return (256); /* Forbidden value. */
}
static uint8_t dummybuf[2048];
/* XXX one packet at a time until this is debugged */
static void
e82545_tap_callback(struct e82545_softc *sc)
{
struct e1000_rx_desc *rxd;
struct iovec vec[64];
int left, len, lim, maxpktsz, maxpktdesc, bufsz, i, n, size;
uint32_t cause = 0;
uint16_t *tp, tag, head;
pthread_mutex_lock(&sc->esc_mtx);
DPRINTF("rx_run: head %x, tail %x\r\n", sc->esc_RDH, sc->esc_RDT);
if (!sc->esc_rx_enabled || sc->esc_rx_loopback) {
DPRINTF("rx disabled (!%d || %d) -- packet(s) dropped\r\n",
sc->esc_rx_enabled, sc->esc_rx_loopback);
vec[0].iov_base = dummybuf;
vec[0].iov_len = sizeof(dummybuf);
(void) vmn_read(sc->vms, vec, 1);
goto done1;
}
bufsz = e82545_bufsz(sc->esc_RCTL);
maxpktsz = (sc->esc_RCTL & E1000_RCTL_LPE) ? 16384 : 1522;
maxpktdesc = (maxpktsz + bufsz - 1) / bufsz;
size = sc->esc_RDLEN / 16;
head = sc->esc_RDH;
left = (size + sc->esc_RDT - head) % size;
if (left < maxpktdesc) {
DPRINTF("rx overflow (%d < %d) -- packet(s) dropped\r\n",
left, maxpktdesc);
vec[0].iov_base = dummybuf;
vec[0].iov_len = sizeof(dummybuf);
(void) vmn_read(sc->vms, vec, 1);
goto done1;
}
sc->esc_rx_active = 1;
pthread_mutex_unlock(&sc->esc_mtx);
for (lim = size / 4; lim > 0 && left >= maxpktdesc; lim -= n) {
/* Grab rx descriptor pointed to by the head pointer */
for (i = 0; i < maxpktdesc; i++) {
rxd = &sc->esc_rxdesc[(head + i) % size];
vec[i].iov_base = paddr_guest2host(rxd->buffer_addr, (size_t)bufsz);
vec[i].iov_len = (size_t)bufsz;
}
len = (int)vmn_read(sc->vms, vec, maxpktdesc);
if (len <= 0) {
DPRINTF("tap: readv() returned %d\n", len);
goto done;
}
/*
* Adjust the packet length based on whether the CRC needs
* to be stripped or if the packet is less than the minimum
* eth packet size.
*/
if (len < ETHER_MIN_LEN - ETHER_CRC_LEN)
len = ETHER_MIN_LEN - ETHER_CRC_LEN;
if (!(sc->esc_RCTL & E1000_RCTL_SECRC))
len += ETHER_CRC_LEN;
n = (len + bufsz - 1) / bufsz;
DPRINTF("packet read %d bytes, %d segs, head %d\r\n",
len, n, head);
/* Apply VLAN filter. */
tp = (uint16_t *)vec[0].iov_base + 6;
if ((sc->esc_RCTL & E1000_RCTL_VFE) &&
(ntohs(tp[0]) == sc->esc_VET)) {
tag = ntohs(tp[1]) & 0x0fff;
if ((sc->esc_fvlan[tag >> 5] &
(1 << (tag & 0x1f))) != 0) {
DPRINTF("known VLAN %d\r\n", tag);
} else {
DPRINTF("unknown VLAN %d\r\n", tag);
n = 0;
continue;
}
}
/* Update all consumed descriptors. */
for (i = 0; i < n - 1; i++) {
rxd = &sc->esc_rxdesc[(head + i) % size];
rxd->length = (uint16_t)bufsz;
rxd->csum = 0;
rxd->errors = 0;
rxd->special = 0;
rxd->status = E1000_RXD_STAT_DD;
}
rxd = &sc->esc_rxdesc[(head + i) % size];
rxd->length = (uint16_t)(len % bufsz);
rxd->csum = 0;
rxd->errors = 0;
rxd->special = 0;
/* XXX signal no checksum for now */
rxd->status = E1000_RXD_STAT_PIF | E1000_RXD_STAT_IXSM |
E1000_RXD_STAT_EOP | E1000_RXD_STAT_DD;
/* Schedule receive interrupts. */
if (len <= (int)sc->esc_RSRPD) {
cause |= E1000_ICR_SRPD | E1000_ICR_RXT0;
} else {
/* XXX: RDRT and RADV timers should be here. */
cause |= E1000_ICR_RXT0;
}
head = (uint16_t)((head + n) % size);
left -= n;
}
done:
pthread_mutex_lock(&sc->esc_mtx);
sc->esc_rx_active = 0;
if (sc->esc_rx_enabled == 0)
pthread_cond_signal(&sc->esc_rx_cond);
sc->esc_RDH = head;
/* Respect E1000_RCTL_RDMTS */
left = (size + sc->esc_RDT - head) % size;
if (left < (size >> (((sc->esc_RCTL >> 8) & 3) + 1)))
cause |= E1000_ICR_RXDMT0;
/* Assert all accumulated interrupts. */
if (cause != 0)
e82545_icr_assert(sc, cause);
done1:
DPRINTF("rx_run done: head %x, tail %x\r\n", sc->esc_RDH, sc->esc_RDT);
pthread_mutex_unlock(&sc->esc_mtx);
}
static uint16_t
e82545_carry(uint32_t sum)
{
sum = (sum & 0xFFFF) + (sum >> 16);
if (sum > 0xFFFF)
sum -= 0xFFFF;
return (uint16_t)sum;
}
static uint16_t
e82545_buf_checksum(uint8_t *buf, int len)
{
int i, limit;
uint32_t sum = 0;
/* Checksum all the pairs of bytes first... */
limit = len - (len & 1);
for (i = 0; i < limit; i += 2)
sum += read_uint16_unaligned(buf + i);
/*
* If there's a single byte left over, checksum it, too.
* Network byte order is big-endian, so the remaining byte is
* the high byte.
*/
if (i < len)
sum += htons(buf[i] << 8);
return (e82545_carry(sum));
}
static uint16_t
e82545_iov_checksum(struct iovec *iov, int iovcnt, int off, int len)
{
int now, odd;
uint32_t sum = 0, s;
/* Skip completely unneeded vectors. */
while (iovcnt > 0 && iov->iov_len <= (size_t)off && off > 0) {
off -= iov->iov_len;
iov++;
iovcnt--;
}
/* Calculate checksum of requested range. */
odd = 0;
while (len > 0 && iovcnt > 0) {
now = MIN(len, (int)(iov->iov_len - (size_t)off));
s = e82545_buf_checksum((uint8_t *)iov->iov_base + off, now);
sum += odd ? (s << 8) : s;
odd ^= (now & 1);
len -= now;
off = 0;
iov++;
iovcnt--;
}
return (e82545_carry(sum));
}
/*
* Return the transmit descriptor type.
*/
static int
e82545_txdesc_type(uint32_t lower)
{
int type;
type = 0;
if (lower & E1000_TXD_CMD_DEXT)
type = lower & E1000_TXD_MASK;
return (type);
}
static void
e82545_transmit_checksum(struct iovec *iov, int iovcnt, struct ck_info *ck)
{
uint16_t cksum;
int cklen;
DPRINTF("tx cksum: iovcnt/s/off/len %d/%d/%d/%d\r\n",
iovcnt, ck->ck_start, ck->ck_off, ck->ck_len);
cklen = ck->ck_len ? ck->ck_len - ck->ck_start + 1 : INT_MAX;
cksum = e82545_iov_checksum(iov, iovcnt, ck->ck_start, cklen);
write_uint16_unaligned((uint8_t *)iov[0].iov_base + ck->ck_off, ~cksum);
}
static void
e82545_transmit_backend(struct e82545_softc *sc, struct iovec *iov, int iovcnt)
{
if (!sc->vms)
return;
vmn_write(sc->vms, iov, iovcnt);
}
static void
e82545_transmit_done(struct e82545_softc *sc, uint16_t head, uint16_t tail,
uint16_t dsize, int *tdwb)
{
union e1000_tx_udesc *dsc;
for ( ; head != tail; head = (head + 1) % dsize) {
dsc = &sc->esc_txdesc[head];
if (dsc->td.lower.data & E1000_TXD_CMD_RS) {
dsc->td.upper.data |= E1000_TXD_STAT_DD;
*tdwb = 1;
}
}
}
static int
e82545_transmit(struct e82545_softc *sc, uint16_t head, uint16_t tail,
uint16_t dsize, uint16_t *rhead, int *tdwb)
{
uint8_t *hdr = NULL;
uint8_t *hdrp = NULL;
struct iovec iovb[I82545_MAX_TXSEGS + 2];
struct iovec tiov[I82545_MAX_TXSEGS + 2];
struct e1000_context_desc *cd;
struct ck_info ckinfo[2];
struct iovec *iov;
union e1000_tx_udesc *dsc;
int desc, dtype, len, ntype, iovcnt, tlen, hdrlen, vlen, tcp, tso;
int mss, paylen, seg, tiovcnt, left, now, nleft, nnow, pv, pvoff;
uint32_t tcpsum, tcpseq;
uint16_t ipcs, tcpcs, ipid, ohead;
ckinfo[0].ck_valid = ckinfo[1].ck_valid = 0;
iovcnt = 0;
tlen = 0;
ntype = 0;
tso = 0;
ohead = head;
/* iovb[0/1] may be used for writable copy of headers. */
iov = &iovb[2];
for (desc = 0; ; desc++, head = (head + 1) % dsize) {
if (head == tail) {
*rhead = head;
return (0);
}
dsc = &sc->esc_txdesc[head];
dtype = e82545_txdesc_type(dsc->td.lower.data);
if (desc == 0) {
switch (dtype) {
case E1000_TXD_TYP_C:
DPRINTF("tx ctxt desc idx %d: %016llx "
"%08x%08x\r\n",
head, dsc->td.buffer_addr,
dsc->td.upper.data, dsc->td.lower.data);
/* Save context and return */
sc->esc_txctx = dsc->cd;
goto done;
case E1000_TXD_TYP_L:
DPRINTF("tx legacy desc idx %d: %08x%08x\r\n",
head, dsc->td.upper.data, dsc->td.lower.data);
/*
* legacy cksum start valid in first descriptor
*/
ntype = dtype;
ckinfo[0].ck_start = dsc->td.upper.fields.css;
break;
case E1000_TXD_TYP_D:
DPRINTF("tx data desc idx %d: %08x%08x\r\n",
head, dsc->td.upper.data, dsc->td.lower.data);
ntype = dtype;
break;
default:
break;
}
} else {
/* Descriptor type must be consistent */
assert(dtype == ntype);
DPRINTF("tx next desc idx %d: %08x%08x\r\n",
head, dsc->td.upper.data, dsc->td.lower.data);
}
len = (dtype == E1000_TXD_TYP_L) ? dsc->td.lower.flags.length :
dsc->dd.lower.data & 0xFFFFF;
if (len > 0) {
/* Strip checksum supplied by guest. */
if ((dsc->td.lower.data & E1000_TXD_CMD_EOP) != 0 &&
(dsc->td.lower.data & E1000_TXD_CMD_IFCS) == 0)
len -= 2;
tlen += len;
if (iovcnt < I82545_MAX_TXSEGS) {
iov[iovcnt].iov_base = paddr_guest2host(dsc->td.buffer_addr, (size_t)len);
iov[iovcnt].iov_len = (size_t)len;
}
iovcnt++;
}
/*
* Pull out info that is valid in the final descriptor
* and exit descriptor loop.
*/
if (dsc->td.lower.data & E1000_TXD_CMD_EOP) {
if (dtype == E1000_TXD_TYP_L) {
if (dsc->td.lower.data & E1000_TXD_CMD_IC) {
ckinfo[0].ck_valid = 1;
ckinfo[0].ck_off =
dsc->td.lower.flags.cso;
ckinfo[0].ck_len = 0;
}
} else {
cd = &sc->esc_txctx;
if (dsc->dd.lower.data & E1000_TXD_CMD_TSE)
tso = 1;
if (dsc->dd.upper.fields.popts &
E1000_TXD_POPTS_IXSM)
ckinfo[0].ck_valid = 1;
if (dsc->dd.upper.fields.popts &
E1000_TXD_POPTS_IXSM || tso) {
ckinfo[0].ck_start =
cd->lower_setup.ip_fields.ipcss;
ckinfo[0].ck_off =
cd->lower_setup.ip_fields.ipcso;
ckinfo[0].ck_len =
cd->lower_setup.ip_fields.ipcse;
}
if (dsc->dd.upper.fields.popts &
E1000_TXD_POPTS_TXSM)
ckinfo[1].ck_valid = 1;
if (dsc->dd.upper.fields.popts &
E1000_TXD_POPTS_TXSM || tso) {
ckinfo[1].ck_start =
cd->upper_setup.tcp_fields.tucss;
ckinfo[1].ck_off =
cd->upper_setup.tcp_fields.tucso;
ckinfo[1].ck_len =
cd->upper_setup.tcp_fields.tucse;
}
}
break;
}
}
if (iovcnt > I82545_MAX_TXSEGS) {
WPRINTF("tx too many descriptors (%d > %d) -- dropped\r\n",
iovcnt, I82545_MAX_TXSEGS);
goto done;
}
hdrlen = vlen = 0;
/* Estimate writable space for VLAN header insertion. */
if ((sc->esc_CTRL & E1000_CTRL_VME) &&
(dsc->td.lower.data & E1000_TXD_CMD_VLE)) {
hdrlen = ETHER_ADDR_LEN*2;
vlen = ETHER_VLAN_ENCAP_LEN;
}
if (!tso) {
/* Estimate required writable space for checksums. */
if (ckinfo[0].ck_valid)
hdrlen = MAX(hdrlen, ckinfo[0].ck_off + 2);
if (ckinfo[1].ck_valid)
hdrlen = MAX(hdrlen, ckinfo[1].ck_off + 2);
/* Round up writable space to the first vector. */
if (hdrlen != 0 && iov[0].iov_len > (size_t)hdrlen &&
iov[0].iov_len < (size_t)(hdrlen + 100))
hdrlen = (int)iov[0].iov_len;
} else {
/* In case of TSO header length provided by software. */
hdrlen = sc->esc_txctx.tcp_seg_setup.fields.hdr_len;
}
/* Allocate, fill and prepend writable header vector. */
if (hdrlen != 0) {
hdr = __builtin_alloca((size_t)(hdrlen + vlen));
hdr += vlen;
for (left = hdrlen, hdrp = hdr; left > 0;
left -= now, hdrp += now) {
now = MIN(left, (int)(iov->iov_len));
memcpy(hdrp, iov->iov_base, now);
iov->iov_base = (uint8_t *)iov->iov_base + now;
iov->iov_len -= (size_t)now;
if (iov->iov_len == 0) {
iov++;
iovcnt--;
}
}
iov--;
iovcnt++;
iov->iov_base = hdr;
iov->iov_len = (size_t)hdrlen;
}
/* Insert VLAN tag. */
if (vlen != 0) {
hdr -= ETHER_VLAN_ENCAP_LEN;
memmove(hdr, hdr + ETHER_VLAN_ENCAP_LEN, ETHER_ADDR_LEN*2);
hdrlen += ETHER_VLAN_ENCAP_LEN;
hdr[ETHER_ADDR_LEN*2 + 0] = (uint8_t)(sc->esc_VET >> 8);
hdr[ETHER_ADDR_LEN*2 + 1] = (uint8_t)(sc->esc_VET & 0xff);
hdr[ETHER_ADDR_LEN*2 + 2] = (uint8_t)(dsc->td.upper.fields.special >> 8);
hdr[ETHER_ADDR_LEN*2 + 3] = (uint8_t)(dsc->td.upper.fields.special & 0xff);
iov->iov_base = hdr;
iov->iov_len += ETHER_VLAN_ENCAP_LEN;
/* Correct checksum offsets after VLAN tag insertion. */
ckinfo[0].ck_start += ETHER_VLAN_ENCAP_LEN;
ckinfo[0].ck_off += ETHER_VLAN_ENCAP_LEN;
if (ckinfo[0].ck_len != 0)
ckinfo[0].ck_len += ETHER_VLAN_ENCAP_LEN;
ckinfo[1].ck_start += ETHER_VLAN_ENCAP_LEN;
ckinfo[1].ck_off += ETHER_VLAN_ENCAP_LEN;
if (ckinfo[1].ck_len != 0)
ckinfo[1].ck_len += ETHER_VLAN_ENCAP_LEN;
}
/* Simple non-TSO case. */
if (!tso) {
/* Calculate checksums and transmit. */
if (ckinfo[0].ck_valid)
e82545_transmit_checksum(iov, iovcnt, &ckinfo[0]);
if (ckinfo[1].ck_valid)
e82545_transmit_checksum(iov, iovcnt, &ckinfo[1]);
e82545_transmit_backend(sc, iov, iovcnt);
goto done;
}
/* Doing TSO. */
tcp = (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_TCP) != 0;
mss = sc->esc_txctx.tcp_seg_setup.fields.mss;
paylen = (sc->esc_txctx.cmd_and_length & 0x000fffff);
DPRINTF("tx %s segmentation offload %d+%d/%d bytes %d iovs\r\n",
tcp ? "TCP" : "UDP", hdrlen, paylen, mss, iovcnt);
ipid = ntohs(read_uint16_unaligned(&hdr[ckinfo[0].ck_start + 4]));
tcpseq = ntohl(read_uint32_unaligned(&hdr[ckinfo[1].ck_start + 4]));
ipcs = read_uint16_unaligned(&hdr[ckinfo[0].ck_off]);
tcpcs = 0;
if (ckinfo[1].ck_valid) /* Save partial pseudo-header checksum. */
tcpcs = read_uint16_unaligned(&hdr[ckinfo[1].ck_off]);
pv = 1;
pvoff = 0;
for (seg = 0, left = paylen; left > 0; seg++, left -= now) {
now = MIN(left, mss);
/* Construct IOVs for the segment. */
/* Include whole original header. */
tiov[0].iov_base = hdr;
tiov[0].iov_len = (size_t)hdrlen;
tiovcnt = 1;
/* Include respective part of payload IOV. */
for (nleft = now; pv < iovcnt && nleft > 0; nleft -= nnow) {
nnow = MIN(nleft, (int)iov[pv].iov_len - pvoff);
tiov[tiovcnt].iov_base = (uint8_t *)iov[pv].iov_base + pvoff;
tiov[tiovcnt++].iov_len = (size_t)nnow;
if (pvoff + nnow == (int)iov[pv].iov_len) {
pv++;
pvoff = 0;
} else
pvoff += nnow;
}
DPRINTF("tx segment %d %d+%d bytes %d iovs\r\n",
seg, hdrlen, now, tiovcnt);
/* Update IP header. */
if (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_IP) {
/* IPv4 -- set length and ID */
write_uint16_unaligned(&hdr[ckinfo[0].ck_start + 2],
htons(hdrlen - ckinfo[0].ck_start + now));
write_uint16_unaligned(&hdr[ckinfo[0].ck_start + 4],
htons(ipid + seg));
} else {
/* IPv6 -- set length */
write_uint16_unaligned(&hdr[ckinfo[0].ck_start + 4],
htons(hdrlen - ckinfo[0].ck_start - 40 + now));
}
/* Update pseudo-header checksum. */
tcpsum = tcpcs;
tcpsum += htons(hdrlen - ckinfo[1].ck_start + now);
/* Update TCP/UDP headers. */
if (tcp) {
/* Update sequence number and FIN/PUSH flags. */
write_uint32_unaligned(&hdr[ckinfo[1].ck_start + 4],
htonl((int)tcpseq + paylen - left));
if (now < left) {
hdr[ckinfo[1].ck_start + 13] &=
~(TH_FIN | TH_PUSH);
}
} else {
/* Update payload length. */
write_uint32_unaligned(&hdr[ckinfo[1].ck_start + 4],
(uint32_t)(hdrlen - (int)ckinfo[1].ck_start + now));
}
/* Calculate checksums and transmit. */
if (ckinfo[0].ck_valid) {
write_uint16_unaligned(&hdr[ckinfo[0].ck_off], ipcs);
e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[0]);
}
if (ckinfo[1].ck_valid) {
write_uint16_unaligned(&hdr[ckinfo[1].ck_off],
e82545_carry(tcpsum));
e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[1]);
}
e82545_transmit_backend(sc, tiov, tiovcnt);
}
done:
head = (head + 1) % dsize;
e82545_transmit_done(sc, ohead, head, dsize, tdwb);
*rhead = head;
return (desc + 1);
}
static void
e82545_tx_run(struct e82545_softc *sc)
{
uint32_t cause;
uint16_t head, rhead, tail, size;
int lim, tdwb, sent;
head = sc->esc_TDH;
tail = sc->esc_TDT;
size = (uint16_t)(sc->esc_TDLEN / 16);
DPRINTF("tx_run: head %x, rhead %x, tail %x\r\n",
sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
pthread_mutex_unlock(&sc->esc_mtx);
rhead = head;
tdwb = 0;
for (lim = size / 4; sc->esc_tx_enabled && lim > 0; lim -= sent) {
sent = e82545_transmit(sc, head, tail, size, &rhead, &tdwb);
if (sent == 0)
break;
head = rhead;
}
pthread_mutex_lock(&sc->esc_mtx);
sc->esc_TDH = head;
sc->esc_TDHr = rhead;
cause = 0;
if (tdwb)
cause |= E1000_ICR_TXDW;
if (lim != size / 4 && sc->esc_TDH == sc->esc_TDT)
cause |= E1000_ICR_TXQE;
if (cause)
e82545_icr_assert(sc, cause);
DPRINTF("tx_run done: head %x, rhead %x, tail %x\r\n",
sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
}
static _Noreturn void *
e82545_tx_thread(void *param)
{
struct e82545_softc *sc = param;
char nstr[80];
snprintf(nstr, sizeof(nstr), "e82545-%d:%d tx", sc->esc_pi->pi_slot,
sc->esc_pi->pi_func);
pthread_setname_np(nstr);
pthread_mutex_lock(&sc->esc_mtx);
for (;;) {
while (!sc->esc_tx_enabled || sc->esc_TDHr == sc->esc_TDT) {
if (sc->esc_tx_enabled && sc->esc_TDHr != sc->esc_TDT)
break;
sc->esc_tx_active = 0;
if (sc->esc_tx_enabled == 0)
pthread_cond_signal(&sc->esc_tx_cond);
pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
}
sc->esc_tx_active = 1;
/* Process some tx descriptors. Lock dropped inside. */
e82545_tx_run(sc);
}
}
static void
e82545_tx_start(struct e82545_softc *sc)
{
if (sc->esc_tx_active == 0)
pthread_cond_signal(&sc->esc_tx_cond);
}
static void
e82545_tx_enable(struct e82545_softc *sc)
{
sc->esc_tx_enabled = 1;
}
static void
e82545_tx_disable(struct e82545_softc *sc)
{
sc->esc_tx_enabled = 0;
while (sc->esc_tx_active)
pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
}
static void
e82545_rx_enable(struct e82545_softc *sc)
{
sc->esc_rx_enabled = 1;
}
static void
e82545_rx_disable(struct e82545_softc *sc)
{
sc->esc_rx_enabled = 0;
while (sc->esc_rx_active)
pthread_cond_wait(&sc->esc_rx_cond, &sc->esc_mtx);
}
static void
e82545_write_ra(struct e82545_softc *sc, int reg, uint32_t wval)
{
struct eth_uni *eu;
int idx;
idx = reg >> 1;
assert(idx < 15);
eu = &sc->esc_uni[idx];
if (reg & 0x1) {
/* RAH */
eu->eu_valid = ((wval & E1000_RAH_AV) == E1000_RAH_AV);
eu->eu_addrsel = (wval >> 16) & 0x3;
eu->eu_eth.octet[5] = (u_char)(wval >> 8);
eu->eu_eth.octet[4] = (u_char)wval;
} else {
/* RAL */
eu->eu_eth.octet[3] = (u_char)(wval >> 24);
eu->eu_eth.octet[2] = (u_char)(wval >> 16);
eu->eu_eth.octet[1] = (u_char)(wval >> 8);
eu->eu_eth.octet[0] = (u_char)wval;
}
}
static uint32_t
e82545_read_ra(struct e82545_softc *sc, int reg)
{
struct eth_uni *eu;
uint32_t retval;
int idx;
idx = reg >> 1;
assert(idx < 15);
eu = &sc->esc_uni[idx];
if (reg & 0x1) {
/* RAH */
retval = (uint32_t)(eu->eu_valid << 31) |
(uint32_t)(eu->eu_addrsel << 16) |
(uint32_t)(eu->eu_eth.octet[5] << 8) |
eu->eu_eth.octet[4];
} else {
/* RAL */
retval = (uint32_t)(eu->eu_eth.octet[3] << 24) |
(uint32_t)(eu->eu_eth.octet[2] << 16) |
(uint32_t)(eu->eu_eth.octet[1] << 8) |
eu->eu_eth.octet[0];
}
return (retval);
}
static void
e82545_write_register(struct e82545_softc *sc, uint32_t offset, uint32_t value)
{
int ridx;
if (offset & 0x3) {
DPRINTF("Unaligned register write offset:0x%x value:0x%x\r\n", offset, value);
return;
}
DPRINTF("Register write: 0x%x value: 0x%x\r\n", offset, value);
switch (offset) {
case E1000_CTRL:
case E1000_CTRL_DUP:
e82545_devctl(sc, value);
break;
case E1000_FCAL:
sc->esc_FCAL = value;
break;
case E1000_FCAH:
sc->esc_FCAH = value & ~0xFFFF0000;
break;
case E1000_FCT:
sc->esc_FCT = value & ~0xFFFF0000;
break;
case E1000_VET:
sc->esc_VET = value & ~0xFFFF0000;
break;
case E1000_FCTTV:
sc->esc_FCTTV = value & ~0xFFFF0000;
break;
case E1000_LEDCTL:
sc->esc_LEDCTL = value & (uint32_t)~0x30303000;
break;
case E1000_PBA:
sc->esc_PBA = value & 0x0000FF80;
break;
case E1000_ICR:
case E1000_ITR:
case E1000_ICS:
case E1000_IMS:
case E1000_IMC:
e82545_intr_write(sc, offset, value);
break;
case E1000_RCTL:
e82545_rx_ctl(sc, value);
break;
case E1000_FCRTL:
sc->esc_FCRTL = value & ~0xFFFF0007;
break;
case E1000_FCRTH:
sc->esc_FCRTH = value & ~0xFFFF0007;
break;
case E1000_RDBAL(0):
sc->esc_RDBAL = value & (uint32_t)~0xF;
if (sc->esc_rx_enabled) {
/* Apparently legal: update cached address */
e82545_rx_update_rdba(sc);
}
break;
case E1000_RDBAH(0):
assert(!sc->esc_rx_enabled);
sc->esc_RDBAH = value;
break;
case E1000_RDLEN(0):
assert(!sc->esc_rx_enabled);
sc->esc_RDLEN = value & ~0xFFF0007F;
break;
case E1000_RDH(0):
/* XXX should only ever be zero ? Range check ? */
sc->esc_RDH = (uint16_t)value;
break;
case E1000_RDT(0):
/* XXX if this opens up the rx ring, do something ? */
sc->esc_RDT = (uint16_t)value;
break;
case E1000_RDTR:
/* ignore FPD bit 31 */
sc->esc_RDTR = value & ~0xFFFF0000;
break;
case E1000_RXDCTL(0):
sc->esc_RXDCTL = value & ~0xFEC0C0C0;
break;
case E1000_RADV:
sc->esc_RADV = value & ~0xFFFF0000;
break;
case E1000_RSRPD:
sc->esc_RSRPD = value & ~0xFFFFF000;
break;
case E1000_RXCSUM:
sc->esc_RXCSUM = value & ~0xFFFFF800;
break;
case E1000_TXCW:
sc->esc_TXCW = value & (uint32_t)~0x3FFF0000;
break;
case E1000_TCTL:
e82545_tx_ctl(sc, value);
break;
case E1000_TIPG:
sc->esc_TIPG = value;
break;
case E1000_AIT:
sc->esc_AIT = (uint16_t)value;
break;
case E1000_TDBAL(0):
sc->esc_TDBAL = value & (uint32_t)~0xF;
if (sc->esc_tx_enabled) {
/* Apparently legal */
e82545_tx_update_tdba(sc);
}
break;
case E1000_TDBAH(0):
//assert(!sc->esc_tx_enabled);
sc->esc_TDBAH = value;
break;
case E1000_TDLEN(0):
//assert(!sc->esc_tx_enabled);
sc->esc_TDLEN = value & ~0xFFF0007F;
break;
case E1000_TDH(0):
//assert(!sc->esc_tx_enabled);
/* XXX should only ever be zero ? Range check ? */
sc->esc_TDHr = sc->esc_TDH = (uint16_t)value;
break;
case E1000_TDT(0):
/* XXX range check ? */
sc->esc_TDT = (uint16_t)value;
if (sc->esc_tx_enabled)
e82545_tx_start(sc);
break;
case E1000_TIDV:
sc->esc_TIDV = value & ~0xFFFF0000;
break;
case E1000_TXDCTL(0):
//assert(!sc->esc_tx_enabled);
sc->esc_TXDCTL = value & (uint32_t)~0xC0C0C0;
break;
case E1000_TADV:
sc->esc_TADV = value & ~0xFFFF0000;
break;
case E1000_EECD:
{
//DPRINTF("EECD write 0x%x -> 0x%x\r\n", sc->eeprom_control, value);
/* edge triggered low->high */
uint32_t eecd_strobe = ((sc->eeprom_control & E1000_EECD_SK) ?
0 : (value & E1000_EECD_SK));
uint32_t eecd_mask = (E1000_EECD_SK|E1000_EECD_CS|
E1000_EECD_DI|E1000_EECD_REQ);
sc->eeprom_control &= ~eecd_mask;
sc->eeprom_control |= (value & eecd_mask);
/* grant/revoke immediately */
if (value & E1000_EECD_REQ) {
sc->eeprom_control |= E1000_EECD_GNT;
} else {
sc->eeprom_control &= (uint32_t)~E1000_EECD_GNT;
}
if (eecd_strobe && (sc->eeprom_control & E1000_EECD_CS)) {
e82545_eecd_strobe(sc);
}
return;
}
case E1000_MDIC:
{
uint8_t reg_addr = (uint8_t)((value & E1000_MDIC_REG_MASK) >>
E1000_MDIC_REG_SHIFT);
uint8_t phy_addr = (uint8_t)((value & E1000_MDIC_PHY_MASK) >>
E1000_MDIC_PHY_SHIFT);
sc->mdi_control =
(value & ~(E1000_MDIC_ERROR|E1000_MDIC_DEST));
if ((value & E1000_MDIC_READY) != 0) {
DPRINTF("Incorrect MDIC ready bit: 0x%x\r\n", value);
return;
}
switch (value & E82545_MDIC_OP_MASK) {
case E1000_MDIC_OP_READ:
sc->mdi_control &= (uint32_t)~E82545_MDIC_DATA_MASK;
sc->mdi_control |= e82545_read_mdi(sc, reg_addr, phy_addr);
break;
case E1000_MDIC_OP_WRITE:
e82545_write_mdi(sc, reg_addr, phy_addr,
value & E82545_MDIC_DATA_MASK);
break;
default:
DPRINTF("Unknown MDIC op: 0x%x\r\n", value);
return;
}
/* TODO: barrier? */
sc->mdi_control |= E1000_MDIC_READY;
if (value & E82545_MDIC_IE) {
// TODO: generate interrupt
}
return;
}
case E1000_MANC:
case E1000_STATUS:
return;
default:
if ((offset >= E1000_RAL(0)) && (offset <= E1000_RAH(15))) {
/* convert to u32 offset */
ridx = (offset - E1000_RAL(0)) >> 2;
e82545_write_ra(sc, ridx, value);
} else if ((offset >= E1000_MTA) && (offset <= E1000_MTA + (127*4))) {
sc->esc_fmcast[(offset - E1000_MTA) >> 2] = value;
} else if ((offset >= E1000_VFTA) && (offset <= E1000_VFTA + (127*4))) {
sc->esc_fvlan[(offset - E1000_VFTA) >> 2] = value;
} else {
DPRINTF("Unknown write register: 0x%x value:%x\r\n", offset, value);
}
return;
}
}
static uint32_t
e82545_read_register(struct e82545_softc *sc, uint32_t offset)
{
uint32_t retval;
int ridx;
if (offset & 0x3) {
DPRINTF("Unaligned register read offset:0x%x\r\n", offset);
return 0;
}
DPRINTF("Register read: 0x%x\r\n", offset);
switch (offset) {
case E1000_CTRL:
retval = sc->esc_CTRL;
break;
case E1000_STATUS:
retval = E1000_STATUS_FD | E1000_STATUS_LU |
E1000_STATUS_SPEED_1000;
break;
case E1000_FCAL:
retval = sc->esc_FCAL;
break;
case E1000_FCAH:
retval = sc->esc_FCAH;
break;
case E1000_FCT:
retval = sc->esc_FCT;
break;
case E1000_VET:
retval = sc->esc_VET;
break;
case E1000_FCTTV:
retval = sc->esc_FCTTV;
break;
case E1000_LEDCTL:
retval = sc->esc_LEDCTL;
break;
case E1000_PBA:
retval = sc->esc_PBA;
break;
case E1000_ICR:
case E1000_ITR:
case E1000_ICS:
case E1000_IMS:
case E1000_IMC:
retval = e82545_intr_read(sc, offset);
break;
case E1000_RCTL:
retval = sc->esc_RCTL;
break;
case E1000_FCRTL:
retval = sc->esc_FCRTL;
break;
case E1000_FCRTH:
retval = sc->esc_FCRTH;
break;
case E1000_RDBAL(0):
retval = sc->esc_RDBAL;
break;
case E1000_RDBAH(0):
retval = sc->esc_RDBAH;
break;
case E1000_RDLEN(0):
retval = sc->esc_RDLEN;
break;
case E1000_RDH(0):
retval = sc->esc_RDH;
break;
case E1000_RDT(0):
retval = sc->esc_RDT;
break;
case E1000_RDTR:
retval = sc->esc_RDTR;
break;
case E1000_RXDCTL(0):
retval = sc->esc_RXDCTL;
break;
case E1000_RADV:
retval = sc->esc_RADV;
break;
case E1000_RSRPD:
retval = sc->esc_RSRPD;
break;
case E1000_RXCSUM:
retval = sc->esc_RXCSUM;
break;
case E1000_TXCW:
retval = sc->esc_TXCW;
break;
case E1000_TCTL:
retval = sc->esc_TCTL;
break;
case E1000_TIPG:
retval = sc->esc_TIPG;
break;
case E1000_AIT:
retval = sc->esc_AIT;
break;
case E1000_TDBAL(0):
retval = sc->esc_TDBAL;
break;
case E1000_TDBAH(0):
retval = sc->esc_TDBAH;
break;
case E1000_TDLEN(0):
retval = sc->esc_TDLEN;
break;
case E1000_TDH(0):
retval = sc->esc_TDH;
break;
case E1000_TDT(0):
retval = sc->esc_TDT;
break;
case E1000_TIDV:
retval = sc->esc_TIDV;
break;
case E1000_TXDCTL(0):
retval = sc->esc_TXDCTL;
break;
case E1000_TADV:
retval = sc->esc_TADV;
break;
case E1000_EECD:
//DPRINTF("EECD read %x\r\n", sc->eeprom_control);
retval = sc->eeprom_control;
break;
case E1000_MDIC:
retval = sc->mdi_control;
break;
case E1000_MANC:
retval = 0;
break;
/* stats that we emulate. */
case E1000_MPC:
retval = sc->missed_pkt_count;
break;
case E1000_PRC64:
retval = sc->pkt_rx_by_size[0];
break;
case E1000_PRC127:
retval = sc->pkt_rx_by_size[1];
break;
case E1000_PRC255:
retval = sc->pkt_rx_by_size[2];
break;
case E1000_PRC511:
retval = sc->pkt_rx_by_size[3];
break;
case E1000_PRC1023:
retval = sc->pkt_rx_by_size[4];
break;
case E1000_PRC1522:
retval = sc->pkt_rx_by_size[5];
break;
case E1000_GPRC:
retval = sc->good_pkt_rx_count;
break;
case E1000_BPRC:
retval = sc->bcast_pkt_rx_count;
break;
case E1000_MPRC:
retval = sc->mcast_pkt_rx_count;
break;
case E1000_GPTC:
case E1000_TPT:
retval = sc->good_pkt_tx_count;
break;
case E1000_GORCL:
retval = (uint32_t)sc->good_octets_rx;
break;
case E1000_GORCH:
retval = (uint32_t)(sc->good_octets_rx >> 32);
break;
case E1000_TOTL:
case E1000_GOTCL:
retval = (uint32_t)sc->good_octets_tx;
break;
case E1000_TOTH:
case E1000_GOTCH:
retval = (uint32_t)(sc->good_octets_tx >> 32);
break;
case E1000_ROC:
retval = sc->oversize_rx_count;
break;
case E1000_TORL:
retval = (uint32_t)(sc->good_octets_rx + sc->missed_octets);
break;
case E1000_TORH:
retval = (uint32_t)((sc->good_octets_rx +
sc->missed_octets) >> 32);
break;
case E1000_TPR:
retval = sc->good_pkt_rx_count + sc->missed_pkt_count +
sc->oversize_rx_count;
break;
case E1000_PTC64:
retval = sc->pkt_tx_by_size[0];
break;
case E1000_PTC127:
retval = sc->pkt_tx_by_size[1];
break;
case E1000_PTC255:
retval = sc->pkt_tx_by_size[2];
break;
case E1000_PTC511:
retval = sc->pkt_tx_by_size[3];
break;
case E1000_PTC1023:
retval = sc->pkt_tx_by_size[4];
break;
case E1000_PTC1522:
retval = sc->pkt_tx_by_size[5];
break;
case E1000_MPTC:
retval = sc->mcast_pkt_tx_count;
break;
case E1000_BPTC:
retval = sc->bcast_pkt_tx_count;
break;
case E1000_TSCTC:
retval = sc->tso_tx_count;
break;
/* stats that are always 0. */
case E1000_CRCERRS:
case E1000_ALGNERRC:
case E1000_SYMERRS:
case E1000_RXERRC:
case E1000_SCC:
case E1000_ECOL:
case E1000_MCC:
case E1000_LATECOL:
case E1000_COLC:
case E1000_DC:
case E1000_TNCRS:
case E1000_SEC:
case E1000_CEXTERR:
case E1000_RLEC:
case E1000_XONRXC:
case E1000_XONTXC:
case E1000_XOFFRXC:
case E1000_XOFFTXC:
case E1000_FCRUC:
case E1000_RNBC:
case E1000_RUC:
case E1000_RFC:
case E1000_RJC:
case E1000_MGTPRC:
case E1000_MGTPDC:
case E1000_MGTPTC:
case E1000_TSCTFC:
retval = 0;
break;
default:
if ((offset >= E1000_RAL(0)) && (offset <= E1000_RAH(15))) {
/* convert to u32 offset */
ridx = (offset - E1000_RAL(0)) >> 2;
retval = e82545_read_ra(sc, ridx);
} else if ((offset >= E1000_MTA) && (offset <= E1000_MTA + (127*4))) {
retval = sc->esc_fmcast[(offset - E1000_MTA) >> 2];
} else if ((offset >= E1000_VFTA) && (offset <= E1000_VFTA + (127*4))) {
retval = sc->esc_fvlan[(offset - E1000_VFTA) >> 2];
} else {
DPRINTF("Unknown read register: 0x%x\r\n", offset);
retval = 0;
}
break;
}
return (retval);
}
static void
e82545_write(UNUSED int vcpu, struct pci_devinst *pi, int baridx,
uint64_t offset, int size, uint64_t value)
{
struct e82545_softc *sc;
//DPRINTF("Write bar:%d offset:0x%lx value:0x%lx size:%d\r\n", baridx, offset, value, size);
sc = pi->pi_arg;
pthread_mutex_lock(&sc->esc_mtx);
switch (baridx) {
case E82545_BAR_IO:
switch (offset) {
case E82545_IOADDR:
if (size != 4) {
DPRINTF("Wrong io addr write sz:%d value:0x%llx\r\n", size, value);
} else
sc->io_addr = (uint32_t)value;
break;
case E82545_IODATA:
if (size != 4) {
DPRINTF("Wrong io data write size:%d value:0x%llx\r\n", size, value);
} else if (sc->io_addr > E82545_IO_REGISTER_MAX) {
DPRINTF("Non-register io write addr:0x%x value:0x%llx\r\n", sc->io_addr, value);
} else
e82545_write_register(sc, sc->io_addr,
(uint32_t)value);
break;
default:
DPRINTF("Unknown io bar write offset:0x%llx value:0x%llx size:%d\r\n", offset, value, size);
break;
}
break;
case E82545_BAR_REGISTER:
if (size != 4) {
DPRINTF("Wrong register write size:%d offset:0x%llx value:0x%llx\r\n", size, offset, value);
} else
e82545_write_register(sc, (uint32_t)offset,
(uint32_t)value);
break;
default:
DPRINTF("Unknown write bar:%d off:0x%llx val:0x%llx size:%d\r\n",
baridx, offset, value, size);
}
pthread_mutex_unlock(&sc->esc_mtx);
}
static uint64_t
e82545_read(UNUSED int vcpu, struct pci_devinst *pi, int baridx,
uint64_t offset, int size)
{
struct e82545_softc *sc;
uint64_t retval;
//DPRINTF("Read bar:%d offset:0x%lx size:%d\r\n", baridx, offset, size);
sc = pi->pi_arg;
retval = 0;
pthread_mutex_lock(&sc->esc_mtx);
switch (baridx) {
case E82545_BAR_IO:
switch (offset) {
case E82545_IOADDR:
if (size != 4) {
DPRINTF("Wrong io addr read sz:%d\r\n", size);
} else
retval = sc->io_addr;
break;
case E82545_IODATA:
if (size != 4) {
DPRINTF("Wrong io data read sz:%d\r\n", size);
}
if (sc->io_addr > E82545_IO_REGISTER_MAX) {
DPRINTF("Non-register io read addr:0x%x\r\n",
sc->io_addr);
} else
retval = e82545_read_register(sc, sc->io_addr);
break;
default:
DPRINTF("Unknown io bar read offset:0x%llx size:%d\r\n",
offset, size);
break;
}
break;
case E82545_BAR_REGISTER:
if (size != 4) {
DPRINTF("Wrong register read size:%d offset:0x%llx\r\n",
size, offset);
} else
retval = e82545_read_register(sc, (uint32_t)offset);
break;
default:
DPRINTF("Unknown read bar:%d offset:0x%llx size:%d\r\n",
baridx, offset, size);
break;
}
pthread_mutex_unlock(&sc->esc_mtx);
return (retval);
}
static void
e82545_reset(struct e82545_softc *sc, int drvr)
{
int i;
e82545_rx_disable(sc);
e82545_tx_disable(sc);
/* clear outstanding interrupts */
if (sc->esc_irq_asserted)
pci_lintr_deassert(sc->esc_pi);
/* misc */
if (!drvr) {
sc->esc_FCAL = 0;
sc->esc_FCAH = 0;
sc->esc_FCT = 0;
sc->esc_VET = 0;
sc->esc_FCTTV = 0;
}
sc->esc_LEDCTL = 0x07061302;
sc->esc_PBA = 0x00100030;
/* start nvm in opcode mode. */
sc->nvm_opaddr = 0;
sc->nvm_mode = E82545_NVM_MODE_OPADDR;
sc->nvm_bits = E82545_NVM_OPADDR_BITS;
sc->eeprom_control = E1000_EECD_PRES | E82545_EECD_FWE_EN;
e82545_init_eeprom(sc);
/* interrupt */
sc->esc_ICR = 0;
sc->esc_ITR = 250;
sc->esc_ICS = 0;
sc->esc_IMS = 0;
sc->esc_IMC = 0;
/* L2 filters */
if (!drvr) {
memset(sc->esc_fvlan, 0, sizeof(sc->esc_fvlan));
memset(sc->esc_fmcast, 0, sizeof(sc->esc_fmcast));
memset(sc->esc_uni, 0, sizeof(sc->esc_uni));
/* XXX not necessary on 82545 ?? */
sc->esc_uni[0].eu_valid = 1;
memcpy(sc->esc_uni[0].eu_eth.octet, sc->vms->mac,
ETHER_ADDR_LEN);
} else {
/* Clear RAH valid bits */
for (i = 0; i < 16; i++)
sc->esc_uni[i].eu_valid = 0;
}
/* receive */
if (!drvr) {
sc->esc_RDBAL = 0;
sc->esc_RDBAH = 0;
}
sc->esc_RCTL = 0;
sc->esc_FCRTL = 0;
sc->esc_FCRTH = 0;
sc->esc_RDLEN = 0;
sc->esc_RDH = 0;
sc->esc_RDT = 0;
sc->esc_RDTR = 0;
sc->esc_RXDCTL = (1 << 24) | (1 << 16); /* default GRAN/WTHRESH */
sc->esc_RADV = 0;
sc->esc_RXCSUM = 0;
/* transmit */
if (!drvr) {
sc->esc_TDBAL = 0;
sc->esc_TDBAH = 0;
sc->esc_TIPG = 0;
sc->esc_AIT = 0;
sc->esc_TIDV = 0;
sc->esc_TADV = 0;
}
sc->esc_tdba = 0;
sc->esc_txdesc = NULL;
sc->esc_TXCW = 0;
sc->esc_TCTL = 0;
sc->esc_TDLEN = 0;
sc->esc_TDT = 0;
sc->esc_TDHr = sc->esc_TDH = 0;
sc->esc_TXDCTL = 0;
}
static int
e82545_init(struct pci_devinst *pi, char *opts)
{
DPRINTF("Loading with options: %s\r\n", opts);
struct e82545_softc *sc;
/* Setup our softc */
sc = calloc(1, sizeof(*sc));
pi->pi_arg = sc;
sc->esc_pi = pi;
pthread_mutex_init(&sc->esc_mtx, NULL);
pthread_cond_init(&sc->esc_rx_cond, NULL);
pthread_cond_init(&sc->esc_tx_cond, NULL);
pthread_create(&sc->esc_tx_tid, NULL, e82545_tx_thread, sc);
pci_set_cfgdata16(pi, PCIR_DEVICE, E82545_DEV_ID_82545EM_COPPER);
pci_set_cfgdata16(pi, PCIR_VENDOR, E82545_VENDOR_ID_INTEL);
pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK);
pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_NETWORK_ETHERNET);
pci_set_cfgdata16(pi, PCIR_SUBDEV_0, E82545_SUBDEV_ID);
pci_set_cfgdata16(pi, PCIR_SUBVEND_0, E82545_VENDOR_ID_INTEL);
pci_set_cfgdata8(pi, PCIR_HDRTYPE, PCIM_HDRTYPE_NORMAL);
pci_set_cfgdata8(pi, PCIR_INTPIN, 0x1);
/* TODO: this card also supports msi, but the freebsd driver for it
* does not, so I have not implemented it. */
pci_lintr_request(pi);
pci_emul_alloc_bar(pi, E82545_BAR_REGISTER, PCIBAR_MEM32,
E82545_BAR_REGISTER_LEN);
pci_emul_alloc_bar(pi, E82545_BAR_FLASH, PCIBAR_MEM32,
E82545_BAR_FLASH_LEN);
pci_emul_alloc_bar(pi, E82545_BAR_IO, PCIBAR_IO,
E82545_BAR_IO_LEN);
/*
* Attempt to open the tap device and read the MAC address
* if specified. Copied from virtio-net, slightly modified.
*/
if (vmn_create(sc) == -1) {
return (-1);
}
if (print_mac == 1)
{
printf("MAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
sc->vms->mac[0], sc->vms->mac[1], sc->vms->mac[2],
sc->vms->mac[3], sc->vms->mac[4], sc->vms->mac[5]);
exit(0);
}
/* H/w initiated reset */
e82545_reset(sc, 0);
return (0);
}
static struct pci_devemu pci_de_e82545 = {
.pe_emu = "e1000",
.pe_init = e82545_init,
.pe_barwrite = e82545_write,
.pe_barread = e82545_read
};
PCI_EMUL_SET(pci_de_e82545);
|
mike-pt/xhyve
|
src/pci_ahci.c
|
<filename>src/pci_ahci.c
/*-
* Copyright (c) 2013 <NAME> <<EMAIL>>
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <inttypes.h>
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <strings.h>
#include <pthread.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <sys/param.h>
#include <sys/stat.h>
#include <sys/uio.h>
#include <sys/ioctl.h>
#include <sys/disk.h>
#include <sys/queue.h>
// #include <sys/endian.h>
#include <CommonCrypto/CommonDigest.h>
#include <xhyve/support/misc.h>
#include <xhyve/support/ata.h>
#include <xhyve/support/linker_set.h>
#include <xhyve/xhyve.h>
#include <xhyve/pci_emul.h>
#include <xhyve/block_if.h>
#include <xhyve/ahci.h>
#define MAX_PORTS 6 /* Intel ICH8 AHCI supports 6 ports */
#define PxSIG_ATA 0x00000101 /* ATA drive */
#define PxSIG_ATAPI 0xeb140101 /* ATAPI drive */
enum sata_fis_type {
FIS_TYPE_REGH2D = 0x27, /* Register FIS - host to device */
FIS_TYPE_REGD2H = 0x34, /* Register FIS - device to host */
FIS_TYPE_DMAACT = 0x39, /* DMA activate FIS - device to host */
FIS_TYPE_DMASETUP = 0x41, /* DMA setup FIS - bidirectional */
FIS_TYPE_DATA = 0x46, /* Data FIS - bidirectional */
FIS_TYPE_BIST = 0x58, /* BIST activate FIS - bidirectional */
FIS_TYPE_PIOSETUP = 0x5F, /* PIO setup FIS - device to host */
FIS_TYPE_SETDEVBITS = 0xA1, /* Set dev bits FIS - device to host */
};
/*
* SCSI opcodes
*/
#define TEST_UNIT_READY 0x00
#define REQUEST_SENSE 0x03
#define INQUIRY 0x12
#define START_STOP_UNIT 0x1B
#define PREVENT_ALLOW 0x1E
#define READ_CAPACITY 0x25
#define READ_10 0x28
// #define POSITION_TO_ELEMENT 0x2B
#define READ_TOC 0x43
#define GET_EVENT_STATUS_NOTIFICATION 0x4A
#define MODE_SENSE_10 0x5A
#define REPORT_LUNS 0xA0
#define READ_12 0xA8
// #define READ_CD 0xBE
/*
* SCSI mode page codes
*/
#define MODEPAGE_RW_ERROR_RECOVERY 0x01
#define MODEPAGE_CD_CAPABILITIES 0x2A
/*
* ATA commands
*/
#define ATA_SF_ENAB_SATA_SF 0x10
#define ATA_SATA_SF_AN 0x05
// #define ATA_SF_DIS_SATA_SF 0x90
/*
* Debug printf
*/
#ifdef AHCI_DEBUG
static FILE *dbg;
#define DPRINTF(format, ...) \
do { \
fprintf(dbg, format, __VA_ARGS__); \
fflush(dbg); \
} while(0)
#else
#define DPRINTF(format, ...)
#endif
#define WPRINTF(format, ...) printf(format, __VA_ARGS__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct ahci_ioreq {
struct blockif_req io_req;
struct ahci_port *io_pr;
STAILQ_ENTRY(ahci_ioreq) io_flist;
TAILQ_ENTRY(ahci_ioreq) io_blist;
uint8_t *cfis;
uint32_t len;
uint32_t done;
int slot;
int more;
};
#define AHCI_PORT_IDENT 20 + 1
struct ahci_port {
struct blockif_ctxt *bctx;
struct pci_ahci_softc *pr_sc;
uint8_t *cmd_lst;
uint8_t *rfis;
char ident[AHCI_PORT_IDENT];
int atapi;
int reset;
int waitforclear;
int mult_sectors;
uint8_t xfermode;
uint8_t err_cfis[20];
uint8_t sense_key;
uint8_t asc;
u_int ccs;
uint32_t pending;
uint32_t clb;
uint32_t clbu;
uint32_t fb;
uint32_t fbu;
uint32_t is;
uint32_t ie;
uint32_t cmd;
uint32_t unused0;
uint32_t tfd;
uint32_t sig;
uint32_t ssts;
uint32_t sctl;
uint32_t serr;
uint32_t sact;
uint32_t ci;
uint32_t sntf;
uint32_t fbs;
/*
* i/o request info
*/
struct ahci_ioreq *ioreq;
int ioqsz;
STAILQ_HEAD(ahci_fhead, ahci_ioreq) iofhd;
TAILQ_HEAD(ahci_bhead, ahci_ioreq) iobhd;
};
struct ahci_cmd_hdr {
uint16_t flags;
uint16_t prdtl;
uint32_t prdbc;
uint64_t ctba;
uint32_t reserved[4];
};
struct ahci_prdt_entry {
uint64_t dba;
uint32_t reserved;
#define DBCMASK 0x3fffff
uint32_t dbc;
};
struct pci_ahci_softc {
struct pci_devinst *asc_pi;
pthread_mutex_t mtx;
int ports;
uint32_t cap;
uint32_t ghc;
uint32_t is;
uint32_t pi;
uint32_t vs;
uint32_t ccc_ctl;
uint32_t ccc_pts;
uint32_t em_loc;
uint32_t em_ctl;
uint32_t cap2;
uint32_t bohc;
uint32_t lintr;
struct ahci_port port[MAX_PORTS];
};
#pragma clang diagnostic pop
static void ahci_handle_port(struct ahci_port *p);
static inline void lba_to_msf(uint8_t *buf, int lba)
{
lba += 150;
buf[0] = (uint8_t) ((lba / 75) / 60);
buf[1] = (lba / 75) % 60;
buf[2] = lba % 75;
}
/*
* generate HBA intr depending on whether or not ports within
* the controller have an interrupt pending.
*/
static void
ahci_generate_intr(struct pci_ahci_softc *sc)
{
struct pci_devinst *pi;
int i;
pi = sc->asc_pi;
for (i = 0; i < sc->ports; i++) {
struct ahci_port *pr;
pr = &sc->port[i];
if (pr->is & pr->ie)
sc->is |= (1 << i);
}
DPRINTF("%s %x\n", __func__, sc->is);
if (sc->is && (sc->ghc & AHCI_GHC_IE)) {
if (pci_msi_enabled(pi)) {
/*
* Generate an MSI interrupt on every edge
*/
pci_generate_msi(pi, 0);
} else if (!sc->lintr) {
/*
* Only generate a pin-based interrupt if one wasn't
* in progress
*/
sc->lintr = 1;
pci_lintr_assert(pi);
}
} else if (sc->lintr) {
/*
* No interrupts: deassert pin-based signal if it had
* been asserted
*/
pci_lintr_deassert(pi);
sc->lintr = 0;
}
}
static void
ahci_write_fis(struct ahci_port *p, enum sata_fis_type ft, uint8_t *fis)
{
int offset, len, irq;
if (p->rfis == NULL || !(p->cmd & AHCI_P_CMD_FRE))
return;
switch (ft) {
case FIS_TYPE_REGD2H:
offset = 0x40;
len = 20;
irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_DHR : 0;
break;
case FIS_TYPE_SETDEVBITS:
offset = 0x58;
len = 8;
irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_SDB : 0;
break;
case FIS_TYPE_PIOSETUP:
offset = 0x20;
len = 20;
irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_PS : 0;
break;
case FIS_TYPE_REGH2D:
case FIS_TYPE_DMAACT:
case FIS_TYPE_DMASETUP:
case FIS_TYPE_DATA:
case FIS_TYPE_BIST:
WPRINTF("unsupported fis type %d\n", ft);
return;
}
if (fis[2] & ATA_S_ERROR) {
p->waitforclear = 1;
irq |= AHCI_P_IX_TFE;
}
memcpy(p->rfis + offset, fis, len);
if (irq) {
p->is |= ((unsigned) irq);
ahci_generate_intr(p->pr_sc);
}
}
static void
ahci_write_fis_piosetup(struct ahci_port *p)
{
uint8_t fis[20];
memset(fis, 0, sizeof(fis));
fis[0] = FIS_TYPE_PIOSETUP;
ahci_write_fis(p, FIS_TYPE_PIOSETUP, fis);
}
static void
ahci_write_fis_sdb(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
{
uint8_t fis[8];
uint8_t error;
error = (tfd >> 8) & 0xff;
tfd &= 0x77;
memset(fis, 0, sizeof(fis));
fis[0] = FIS_TYPE_SETDEVBITS;
fis[1] = (1 << 6);
fis[2] = (uint8_t) tfd;
fis[3] = error;
if (fis[2] & ATA_S_ERROR) {
p->err_cfis[0] = (uint8_t) slot;
p->err_cfis[2] = (uint8_t) tfd;
p->err_cfis[3] = error;
memcpy(&p->err_cfis[4], cfis + 4, 16);
} else {
*(uint32_t *)((void *) (fis + 4)) = (1 << slot);
p->sact &= ~(1 << slot);
}
p->tfd &= ~((unsigned) 0x77);
p->tfd |= tfd;
ahci_write_fis(p, FIS_TYPE_SETDEVBITS, fis);
}
static void
ahci_write_fis_d2h(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
{
uint8_t fis[20];
uint8_t error;
error = (tfd >> 8) & 0xff;
memset(fis, 0, sizeof(fis));
fis[0] = FIS_TYPE_REGD2H;
fis[1] = (1 << 6);
fis[2] = tfd & 0xff;
fis[3] = error;
fis[4] = cfis[4];
fis[5] = cfis[5];
fis[6] = cfis[6];
fis[7] = cfis[7];
fis[8] = cfis[8];
fis[9] = cfis[9];
fis[10] = cfis[10];
fis[11] = cfis[11];
fis[12] = cfis[12];
fis[13] = cfis[13];
if (fis[2] & ATA_S_ERROR) {
p->err_cfis[0] = 0x80;
p->err_cfis[2] = tfd & 0xff;
p->err_cfis[3] = error;
memcpy(&p->err_cfis[4], cfis + 4, 16);
} else
p->ci &= ~(1 << slot);
p->tfd = tfd;
ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
}
static void
ahci_write_fis_d2h_ncq(struct ahci_port *p, int slot)
{
uint8_t fis[20];
p->tfd = ATA_S_READY | ATA_S_DSC;
memset(fis, 0, sizeof(fis));
fis[0] = FIS_TYPE_REGD2H;
fis[1] = 0; /* No interrupt */
fis[2] = (uint8_t) p->tfd; /* Status */
fis[3] = 0; /* No error */
p->ci &= ~(1 << slot);
ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
}
static void
ahci_write_reset_fis_d2h(struct ahci_port *p)
{
uint8_t fis[20];
memset(fis, 0, sizeof(fis));
fis[0] = FIS_TYPE_REGD2H;
fis[3] = 1;
fis[4] = 1;
if (p->atapi) {
fis[5] = 0x14;
fis[6] = 0xeb;
}
fis[12] = 1;
ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
}
static void
ahci_check_stopped(struct ahci_port *p)
{
/*
* If we are no longer processing the command list and nothing
* is in-flight, clear the running bit, the current command
* slot, the command issue and active bits.
*/
if (!(p->cmd & AHCI_P_CMD_ST)) {
if (p->pending == 0) {
p->ccs = 0;
p->cmd &= ~((unsigned) (AHCI_P_CMD_CR | AHCI_P_CMD_CCS_MASK));
p->ci = 0;
p->sact = 0;
p->waitforclear = 0;
}
}
}
static void
ahci_port_stop(struct ahci_port *p)
{
struct ahci_ioreq *aior;
uint8_t *cfis;
int slot;
int ncq;
int error;
ncq = 0;
TAILQ_FOREACH(aior, &p->iobhd, io_blist) {
/*
* Try to cancel the outstanding blockif request.
*/
error = blockif_cancel(p->bctx, &aior->io_req);
if (error != 0)
continue;
slot = aior->slot;
cfis = aior->cfis;
if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
cfis[2] == ATA_READ_FPDMA_QUEUED ||
cfis[2] == ATA_SEND_FPDMA_QUEUED)
{
ncq = 1;
}
if (ncq)
p->sact &= ~(1 << slot);
else
p->ci &= ~(1 << slot);
/*
* This command is now done.
*/
p->pending &= ~(1 << slot);
/*
* Delete the blockif request from the busy list
*/
TAILQ_REMOVE(&p->iobhd, aior, io_blist);
/*
* Move the blockif request back to the free list
*/
STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
}
ahci_check_stopped(p);
}
static void
ahci_port_reset(struct ahci_port *pr)
{
pr->serr = 0;
pr->sact = 0;
pr->xfermode = ATA_UDMA6;
pr->mult_sectors = 128;
if (!pr->bctx) {
pr->ssts = ATA_SS_DET_NO_DEVICE;
pr->sig = 0xFFFFFFFF;
pr->tfd = 0x7F;
return;
}
pr->ssts = ATA_SS_DET_PHY_ONLINE | ATA_SS_IPM_ACTIVE;
if (pr->sctl & ATA_SC_SPD_MASK)
pr->ssts |= (pr->sctl & ATA_SC_SPD_MASK);
else
pr->ssts |= ATA_SS_SPD_GEN3;
pr->tfd = (1 << 8) | ATA_S_DSC | ATA_S_DMA;
if (!pr->atapi) {
pr->sig = PxSIG_ATA;
pr->tfd |= ATA_S_READY;
} else
pr->sig = PxSIG_ATAPI;
ahci_write_reset_fis_d2h(pr);
}
static void
ahci_reset(struct pci_ahci_softc *sc)
{
int i;
sc->ghc = AHCI_GHC_AE;
sc->is = 0;
if (sc->lintr) {
pci_lintr_deassert(sc->asc_pi);
sc->lintr = 0;
}
for (i = 0; i < sc->ports; i++) {
sc->port[i].ie = 0;
sc->port[i].is = 0;
sc->port[i].cmd = (AHCI_P_CMD_SUD | AHCI_P_CMD_POD);
if (sc->port[i].bctx)
sc->port[i].cmd |= AHCI_P_CMD_CPS;
sc->port[i].sctl = 0;
ahci_port_reset(&sc->port[i]);
}
}
static void
ata_string(uint8_t *dest, const char *src, int len)
{
int i;
for (i = 0; i < len; i++) {
if (*src)
dest[i ^ 1] = (uint8_t) *src++;
else
dest[i ^ 1] = ' ';
}
}
static void
atapi_string(uint8_t *dest, const char *src, int len)
{
int i;
for (i = 0; i < len; i++) {
if (*src)
dest[i] = (uint8_t) *src++;
else
dest[i] = ' ';
}
}
/*
* Build up the iovec based on the PRDT, 'done' and 'len'.
*/
static void
ahci_build_iov(struct ahci_port *p, struct ahci_ioreq *aior,
struct ahci_prdt_entry *prdt, uint16_t prdtl)
{
struct blockif_req *breq = &aior->io_req;
int i, j, skip, todo, left, extra;
uint32_t dbcsz;
/* Copy part of PRDT between 'done' and 'len' bytes into the iov. */
skip = (int) aior->done;
left = (int) (aior->len - aior->done);
todo = 0;
for (i = 0, j = 0; i < prdtl && j < BLOCKIF_IOV_MAX && left > 0;
i++, prdt++) {
dbcsz = (prdt->dbc & DBCMASK) + 1;
/* Skip already done part of the PRDT */
if (dbcsz <= ((uint32_t) skip)) {
skip -= dbcsz;
continue;
}
dbcsz -= ((unsigned) skip);
if (dbcsz > ((uint32_t) left)) {
dbcsz = ((uint32_t) left);
}
breq->br_iov[j].iov_base =
paddr_guest2host((prdt->dba + ((uint64_t) skip)), dbcsz);
breq->br_iov[j].iov_len = dbcsz;
todo += dbcsz;
left -= dbcsz;
skip = 0;
j++;
}
/* If we got limited by IOV length, round I/O down to sector size. */
if (j == BLOCKIF_IOV_MAX) {
extra = todo % blockif_sectsz(p->bctx);
todo -= extra;
assert(todo > 0);
while (extra > 0) {
if (breq->br_iov[j - 1].iov_len > ((size_t) extra)) {
breq->br_iov[j - 1].iov_len -= ((size_t) extra);
break;
}
extra -= breq->br_iov[j - 1].iov_len;
j--;
}
}
breq->br_iovcnt = j;
breq->br_resid = todo;
aior->done += ((unsigned) todo);
aior->more = (aior->done < aior->len && i < prdtl);
}
static void
ahci_handle_rw(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
{
struct ahci_ioreq *aior;
struct blockif_req *breq;
struct ahci_prdt_entry *prdt;
struct ahci_cmd_hdr *hdr;
uint64_t lba;
uint32_t len;
int err, first, ncq, readop;
prdt = (struct ahci_prdt_entry *)((void *) (cfis + 0x80));
hdr = (struct ahci_cmd_hdr *)((void *) (p->cmd_lst + slot * AHCI_CL_SIZE));
ncq = 0;
readop = 1;
first = (done == 0);
if (cfis[2] == ATA_WRITE || cfis[2] == ATA_WRITE48 ||
cfis[2] == ATA_WRITE_MUL || cfis[2] == ATA_WRITE_MUL48 ||
cfis[2] == ATA_WRITE_DMA || cfis[2] == ATA_WRITE_DMA48 ||
cfis[2] == ATA_WRITE_FPDMA_QUEUED)
readop = 0;
if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
cfis[2] == ATA_READ_FPDMA_QUEUED) {
lba = ((uint64_t)cfis[10] << 40) |
((uint64_t)cfis[9] << 32) |
((uint64_t)cfis[8] << 24) |
((uint64_t)cfis[6] << 16) |
((uint64_t)cfis[5] << 8) |
cfis[4];
len = (uint32_t) (cfis[11] << 8 | cfis[3]);
if (!len)
len = 65536;
ncq = 1;
} else if (cfis[2] == ATA_READ48 || cfis[2] == ATA_WRITE48 ||
cfis[2] == ATA_READ_MUL48 || cfis[2] == ATA_WRITE_MUL48 ||
cfis[2] == ATA_READ_DMA48 || cfis[2] == ATA_WRITE_DMA48) {
lba = ((uint64_t)cfis[10] << 40) |
((uint64_t)cfis[9] << 32) |
((uint64_t)cfis[8] << 24) |
((uint64_t)cfis[6] << 16) |
((uint64_t)cfis[5] << 8) |
cfis[4];
len = (uint32_t) (cfis[13] << 8 | cfis[12]);
if (!len)
len = 65536;
} else {
lba = (uint64_t) (((cfis[7] & 0xf) << 24) | (cfis[6] << 16) |
(cfis[5] << 8) | cfis[4]);
len = cfis[12];
if (!len)
len = 256;
}
lba *= (uint64_t) blockif_sectsz(p->bctx);
len *= (uint32_t) blockif_sectsz(p->bctx);
/* Pull request off free list */
aior = STAILQ_FIRST(&p->iofhd);
assert(aior != NULL);
STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
aior->cfis = cfis;
aior->slot = slot;
aior->len = len;
aior->done = done;
breq = &aior->io_req;
breq->br_offset = (off_t) (lba + done);
ahci_build_iov(p, aior, prdt, hdr->prdtl);
/* Mark this command in-flight. */
p->pending |= 1 << slot;
/* Stuff request onto busy list. */
TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
if (ncq && first)
ahci_write_fis_d2h_ncq(p, slot);
if (readop)
err = blockif_read(p->bctx, breq);
else
err = blockif_write(p->bctx, breq);
assert(err == 0);
}
static void
ahci_handle_flush(struct ahci_port *p, int slot, uint8_t *cfis)
{
struct ahci_ioreq *aior;
struct blockif_req *breq;
int err;
/*
* Pull request off free list
*/
aior = STAILQ_FIRST(&p->iofhd);
assert(aior != NULL);
STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
aior->cfis = cfis;
aior->slot = slot;
aior->len = 0;
aior->done = 0;
aior->more = 0;
breq = &aior->io_req;
/*
* Mark this command in-flight.
*/
p->pending |= 1 << slot;
/*
* Stuff request onto busy list
*/
TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
err = blockif_flush(p->bctx, breq);
assert(err == 0);
}
static inline void
read_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
void *buf, int size)
{
struct ahci_cmd_hdr *hdr;
struct ahci_prdt_entry *prdt;
void *to;
int i, len;
hdr = (struct ahci_cmd_hdr *)((void *) (p->cmd_lst + slot * AHCI_CL_SIZE));
len = size;
to = buf;
prdt = (struct ahci_prdt_entry *)((void *) (cfis + 0x80));
for (i = 0; i < hdr->prdtl && len; i++) {
uint8_t *ptr;
uint32_t dbcsz;
int sublen;
dbcsz = (prdt->dbc & DBCMASK) + 1;
ptr = paddr_guest2host(prdt->dba, dbcsz);
sublen = ((len < ((int) dbcsz)) ? len : ((int) dbcsz));
memcpy(to, ptr, sublen);
len -= sublen;
to = (uint8_t *) (((uintptr_t) to) + ((uintptr_t) sublen));
prdt++;
}
}
static void
ahci_handle_dsm_trim(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
{
struct ahci_ioreq *aior;
struct blockif_req *breq;
uint8_t *entry;
uint64_t elba;
uint32_t len, elen;
int err, first, ncq;
uint8_t buf[512];
first = (done == 0);
if (cfis[2] == ATA_DATA_SET_MANAGEMENT) {
len = (uint32_t) ((((uint16_t) cfis[13]) << 8) | cfis[12]);
len *= 512;
ncq = 0;
} else { /* ATA_SEND_FPDMA_QUEUED */
len = (uint32_t) ((((uint16_t) cfis[11]) << 8) | cfis[3]);
len *= 512;
ncq = 1;
}
read_prdt(p, slot, cfis, buf, sizeof(buf));
next:
entry = &buf[done];
elba = ((uint64_t)entry[5] << 40) |
((uint64_t)entry[4] << 32) |
((uint64_t)entry[3] << 24) |
((uint64_t)entry[2] << 16) |
((uint64_t)entry[1] << 8) |
entry[0];
elen = (uint32_t) ((((uint16_t) entry[7]) << 8) | entry[6]);
done += 8;
if (elen == 0) {
if (done >= len) {
ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
p->pending &= ~(1 << slot);
ahci_check_stopped(p);
if (!first)
ahci_handle_port(p);
return;
}
goto next;
}
/*
* Pull request off free list
*/
aior = STAILQ_FIRST(&p->iofhd);
assert(aior != NULL);
STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
aior->cfis = cfis;
aior->slot = slot;
aior->len = len;
aior->done = done;
aior->more = (len != done);
breq = &aior->io_req;
breq->br_offset = (off_t) (elba * ((uint64_t) blockif_sectsz(p->bctx)));
breq->br_resid = elen * ((unsigned) blockif_sectsz(p->bctx));
/*
* Mark this command in-flight.
*/
p->pending |= 1 << slot;
/*
* Stuff request onto busy list
*/
TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
if (ncq && first)
ahci_write_fis_d2h_ncq(p, slot);
err = blockif_delete(p->bctx, breq);
assert(err == 0);
}
static inline void
write_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
void *buf, int size)
{
struct ahci_cmd_hdr *hdr;
struct ahci_prdt_entry *prdt;
void *from;
int i, len;
hdr = (struct ahci_cmd_hdr *)((void *) (p->cmd_lst + slot * AHCI_CL_SIZE));
len = size;
from = buf;
prdt = (struct ahci_prdt_entry *)((void *) (cfis + 0x80));
for (i = 0; i < hdr->prdtl && len; i++) {
uint8_t *ptr;
uint32_t dbcsz;
int sublen;
dbcsz = (prdt->dbc & DBCMASK) + 1;
ptr = paddr_guest2host(prdt->dba, dbcsz);
sublen = (len < ((int) dbcsz)) ? len : ((int) dbcsz);
memcpy(ptr, from, sublen);
len -= sublen;
from = (void *) (((uintptr_t) from) + ((uintptr_t) sublen));
prdt++;
}
hdr->prdbc = (uint32_t) (size - len);
}
static void
ahci_checksum(uint8_t *buf, int size)
{
int i;
uint8_t sum = 0;
for (i = 0; i < size - 1; i++)
sum += buf[i];
buf[size - 1] = (uint8_t) (0x100 - sum);
}
static void
ahci_handle_read_log(struct ahci_port *p, int slot, uint8_t *cfis)
{
struct ahci_cmd_hdr *hdr;
uint8_t buf[512];
hdr = (struct ahci_cmd_hdr *)((void *) (p->cmd_lst + slot * AHCI_CL_SIZE));
if (p->atapi || hdr->prdtl == 0 || cfis[4] != 0x10 ||
cfis[5] != 0 || cfis[9] != 0 || cfis[12] != 1 || cfis[13] != 0) {
ahci_write_fis_d2h(p, slot, cfis,
(ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
return;
}
memset(buf, 0, sizeof(buf));
memcpy(buf, p->err_cfis, sizeof(p->err_cfis));
ahci_checksum(buf, sizeof(buf));
if (cfis[2] == ATA_READ_LOG_EXT)
ahci_write_fis_piosetup(p);
write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
}
static void
handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
{
struct ahci_cmd_hdr *hdr;
hdr = (struct ahci_cmd_hdr *)((void *) (p->cmd_lst + slot * AHCI_CL_SIZE));
if (p->atapi || hdr->prdtl == 0) {
ahci_write_fis_d2h(p, slot, cfis,
(ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
} else {
uint16_t buf[256];
uint64_t sectors;
int sectsz, psectsz, psectoff, candelete, ro;
uint16_t cyl;
uint8_t sech, heads;
ro = blockif_is_ro(p->bctx);
candelete = blockif_candelete(p->bctx);
sectsz = blockif_sectsz(p->bctx);
sectors = (uint64_t) (blockif_size(p->bctx) / sectsz);
blockif_chs(p->bctx, &cyl, &heads, &sech);
blockif_psectsz(p->bctx, &psectsz, &psectoff);
memset(buf, 0, sizeof(buf));
buf[0] = 0x0040;
buf[1] = cyl;
buf[3] = heads;
buf[6] = sech;
ata_string((uint8_t *)(buf+10), p->ident, 20);
ata_string((uint8_t *)(buf+23), "001", 8);
ata_string((uint8_t *)(buf+27), "BHYVE SATA DISK", 40);
buf[47] = (0x8000 | 128);
buf[48] = 0x1;
buf[49] = (1 << 8 | 1 << 9 | 1 << 11);
buf[50] = (1 << 14);
buf[53] = (1 << 1 | 1 << 2);
if (p->mult_sectors)
buf[59] = (uint16_t) (0x100 | p->mult_sectors);
if (sectors <= 0x0fffffff) {
buf[60] = (uint16_t) sectors;
buf[61] = (uint16_t)(sectors >> 16);
} else {
buf[60] = 0xffff;
buf[61] = 0x0fff;
}
buf[63] = 0x7;
if (p->xfermode & ATA_WDMA0)
buf[63] |= (1 << ((p->xfermode & 7) + 8));
buf[64] = 0x3;
buf[65] = 120;
buf[66] = 120;
buf[67] = 120;
buf[68] = 120;
buf[69] = 0;
buf[75] = 31;
buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3 |
ATA_SUPPORT_NCQ);
buf[77] = (ATA_SUPPORT_RCVSND_FPDMA_QUEUED |
(p->ssts & ATA_SS_SPD_MASK) >> 3);
buf[80] = 0x3f0;
buf[81] = 0x28;
buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
buf[83] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
ATA_SUPPORT_FLUSHCACHE48 | 1 << 14);
buf[84] = (1 << 14);
buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
buf[86] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
ATA_SUPPORT_FLUSHCACHE48 | 1 << 15);
buf[87] = (1 << 14);
buf[88] = 0x7f;
if (p->xfermode & ATA_UDMA0)
buf[88] |= (1 << ((p->xfermode & 7) + 8));
buf[100] = (uint16_t) sectors;
buf[101] = (uint16_t) (sectors >> 16);
buf[102] = (uint16_t) (sectors >> 32);
buf[103] = (sectors >> 48);
if (candelete && !ro) {
buf[69] |= ATA_SUPPORT_RZAT | ATA_SUPPORT_DRAT;
buf[105] = 1;
buf[169] = ATA_SUPPORT_DSM_TRIM;
}
buf[106] = 0x4000;
buf[209] = 0x4000;
if (psectsz > sectsz) {
buf[106] |= 0x2000;
buf[106] |= ffsl(psectsz / sectsz) - 1;
buf[209] |= (psectoff / sectsz);
}
if (sectsz > 512) {
buf[106] |= 0x1000;
buf[117] = (uint16_t) (sectsz / 2);
buf[118] = (uint16_t) ((sectsz / 2) >> 16);
}
buf[119] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
buf[120] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
buf[222] = 0x1020;
buf[255] = 0x00a5;
ahci_checksum((uint8_t *)buf, sizeof(buf));
ahci_write_fis_piosetup(p);
write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
}
}
static void
handle_atapi_identify(struct ahci_port *p, int slot, uint8_t *cfis)
{
if (!p->atapi) {
ahci_write_fis_d2h(p, slot, cfis,
(ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
} else {
uint16_t buf[256];
memset(buf, 0, sizeof(buf));
buf[0] = (2 << 14 | 5 << 8 | 1 << 7 | 2 << 5);
ata_string((uint8_t *)(buf+10), p->ident, 20);
ata_string((uint8_t *)(buf+23), "001", 8);
ata_string((uint8_t *)(buf+27), "BHYVE SATA DVD ROM", 40);
buf[49] = (1 << 9 | 1 << 8);
buf[50] = (1 << 14 | 1);
buf[53] = (1 << 2 | 1 << 1);
buf[62] = 0x3f;
buf[63] = 7;
if (p->xfermode & ATA_WDMA0)
buf[63] |= (1 << ((p->xfermode & 7) + 8));
buf[64] = 3;
buf[65] = 120;
buf[66] = 120;
buf[67] = 120;
buf[68] = 120;
buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3);
buf[77] = ((p->ssts & ATA_SS_SPD_MASK) >> 3);
buf[78] = (1 << 5);
buf[80] = 0x3f0;
buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
buf[83] = (1 << 14);
buf[84] = (1 << 14);
buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
buf[87] = (1 << 14);
buf[88] = 0x7f;
if (p->xfermode & ATA_UDMA0)
buf[88] |= (1 << ((p->xfermode & 7) + 8));
buf[222] = 0x1020;
buf[255] = 0x00a5;
ahci_checksum((uint8_t *)buf, sizeof(buf));
ahci_write_fis_piosetup(p);
write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
}
}
static void
atapi_inquiry(struct ahci_port *p, int slot, uint8_t *cfis)
{
uint8_t buf[36];
uint8_t *acmd;
int len;
uint32_t tfd;
acmd = cfis + 0x40;
if (acmd[1] & 1) { /* VPD */
if (acmd[2] == 0) { /* Supported VPD pages */
buf[0] = 0x05;
buf[1] = 0;
buf[2] = 0;
buf[3] = 1;
buf[4] = 0;
len = 4 + buf[3];
} else {
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x24;
tfd = (uint32_t) ((p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR);
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
ahci_write_fis_d2h(p, slot, cfis, tfd);
return;
}
} else {
buf[0] = 0x05;
buf[1] = 0x80;
buf[2] = 0x00;
buf[3] = 0x21;
buf[4] = 31;
buf[5] = 0;
buf[6] = 0;
buf[7] = 0;
atapi_string(buf + 8, "BHYVE", 8);
atapi_string(buf + 16, "BHYVE DVD-ROM", 16);
atapi_string(buf + 32, "001", 4);
len = sizeof(buf);
}
if (len > acmd[4])
len = acmd[4];
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
write_prdt(p, slot, cfis, buf, len);
ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
}
static __inline void
be16enc(void *pp, uint16_t u)
{
unsigned char *p = (unsigned char *)pp;
p[0] = (u >> 8) & 0xff;
p[1] = u & 0xff;
}
static __inline uint16_t
be16dec(const void *pp)
{
unsigned char const *p = (unsigned char const *)pp;
return ((uint16_t) ((((uint32_t) p[0]) << 8) | ((uint32_t) p[1])));
}
static __inline void
be32enc(void *pp, uint32_t u)
{
unsigned char *p = (unsigned char *)pp;
p[0] = (u >> 24) & 0xff;
p[1] = (u >> 16) & 0xff;
p[2] = (u >> 8) & 0xff;
p[3] = u & 0xff;
}
static __inline uint32_t
be32dec(const void *pp)
{
unsigned char const *p = (unsigned char const *)pp;
return (uint32_t) ((((uint64_t) p[0]) << 24) |
(((uint64_t) p[1]) << 16) | (((uint64_t) p[2]) << 8) |
((uint64_t) p[3]));
}
static void
atapi_read_capacity(struct ahci_port *p, int slot, uint8_t *cfis)
{
uint8_t buf[8];
uint64_t sectors;
sectors = (uint64_t) (blockif_size(p->bctx) / 2048);
be32enc(buf, ((uint32_t) (sectors - 1)));
be32enc(buf + 4, 2048);
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
write_prdt(p, slot, cfis, buf, sizeof(buf));
ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
}
static void
atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
{
uint8_t *acmd;
uint8_t format;
int len;
acmd = cfis + 0x40;
len = be16dec(acmd + 7);
format = acmd[9] >> 6;
switch (format) {
case 0:
{
int msf, size;
uint64_t sectors;
uint8_t start_track, buf[20], *bp;
msf = (acmd[1] >> 1) & 1;
start_track = acmd[6];
if (start_track > 1 && start_track != 0xaa) {
uint32_t tfd;
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x24;
tfd = (uint32_t) ((p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR);
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
ahci_write_fis_d2h(p, slot, cfis, tfd);
return;
}
bp = buf + 2;
*bp++ = 1;
*bp++ = 1;
if (start_track <= 1) {
*bp++ = 0;
*bp++ = 0x14;
*bp++ = 1;
*bp++ = 0;
if (msf) {
*bp++ = 0;
lba_to_msf(bp, 0);
bp += 3;
} else {
*bp++ = 0;
*bp++ = 0;
*bp++ = 0;
*bp++ = 0;
}
}
*bp++ = 0;
*bp++ = 0x14;
*bp++ = 0xaa;
*bp++ = 0;
sectors = (uint64_t) (blockif_size(p->bctx) / blockif_sectsz(p->bctx));
sectors >>= 2;
if (msf) {
*bp++ = 0;
lba_to_msf(bp, ((int) sectors));
bp += 3;
} else {
be32enc(bp, ((uint32_t) sectors));
bp += 4;
}
size = (int) (bp - buf);
be16enc(buf, ((uint16_t) (size - 2)));
if (len > size)
len = size;
write_prdt(p, slot, cfis, buf, len);
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
break;
}
case 1:
{
uint8_t buf[12];
memset(buf, 0, sizeof(buf));
buf[1] = 0xa;
buf[2] = 0x1;
buf[3] = 0x1;
if (((size_t) len) > sizeof(buf))
len = sizeof(buf);
write_prdt(p, slot, cfis, buf, len);
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
break;
}
case 2:
{
int msf, size;
uint64_t sectors;
uint8_t start_track, *bp, buf[50];
msf = (acmd[1] >> 1) & 1;
start_track = acmd[6];
bp = buf + 2;
*bp++ = 1;
*bp++ = 1;
*bp++ = 1;
*bp++ = 0x14;
*bp++ = 0;
*bp++ = 0xa0;
*bp++ = 0;
*bp++ = 0;
*bp++ = 0;
*bp++ = 0;
*bp++ = 1;
*bp++ = 0;
*bp++ = 0;
*bp++ = 1;
*bp++ = 0x14;
*bp++ = 0;
*bp++ = 0xa1;
*bp++ = 0;
*bp++ = 0;
*bp++ = 0;
*bp++ = 0;
*bp++ = 1;
*bp++ = 0;
*bp++ = 0;
*bp++ = 1;
*bp++ = 0x14;
*bp++ = 0;
*bp++ = 0xa2;
*bp++ = 0;
*bp++ = 0;
*bp++ = 0;
sectors = (uint64_t) (blockif_size(p->bctx) / blockif_sectsz(p->bctx));
sectors >>= 2;
if (msf) {
*bp++ = 0;
lba_to_msf(bp, ((int) sectors));
bp += 3;
} else {
be32enc(bp, ((uint32_t) sectors));
bp += 4;
}
*bp++ = 1;
*bp++ = 0x14;
*bp++ = 0;
*bp++ = 1;
*bp++ = 0;
*bp++ = 0;
*bp++ = 0;
if (msf) {
*bp++ = 0;
lba_to_msf(bp, 0);
bp += 3;
} else {
*bp++ = 0;
*bp++ = 0;
*bp++ = 0;
*bp++ = 0;
}
size = (int) (bp - buf);
be16enc(buf, ((uint16_t) (size - 2)));
if (len > size)
len = size;
write_prdt(p, slot, cfis, buf, len);
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
break;
}
default:
{
uint32_t tfd;
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x24;
tfd = (uint32_t) ((p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR);
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
ahci_write_fis_d2h(p, slot, cfis, tfd);
break;
}
}
}
static void
atapi_report_luns(struct ahci_port *p, int slot, uint8_t *cfis)
{
uint8_t buf[16];
memset(buf, 0, sizeof(buf));
buf[3] = 8;
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
write_prdt(p, slot, cfis, buf, sizeof(buf));
ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
}
static void
atapi_read(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
{
struct ahci_ioreq *aior;
struct ahci_cmd_hdr *hdr;
struct ahci_prdt_entry *prdt;
struct blockif_req *breq;
struct pci_ahci_softc *sc;
uint8_t *acmd;
uint64_t lba;
uint32_t len;
int err;
sc = p->pr_sc;
acmd = cfis + 0x40;
hdr = (struct ahci_cmd_hdr *)((void *) (p->cmd_lst + slot * AHCI_CL_SIZE));
prdt = (struct ahci_prdt_entry *)((void *) (cfis + 0x80));
lba = be32dec(acmd + 2);
if (acmd[0] == READ_10)
len = be16dec(acmd + 7);
else
len = be32dec(acmd + 6);
if (len == 0) {
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
}
lba *= 2048;
len *= 2048;
/*
* Pull request off free list
*/
aior = STAILQ_FIRST(&p->iofhd);
assert(aior != NULL);
STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
aior->cfis = cfis;
aior->slot = slot;
aior->len = len;
aior->done = done;
breq = &aior->io_req;
breq->br_offset = (off_t) (lba + ((uint64_t) done));
ahci_build_iov(p, aior, prdt, hdr->prdtl);
/* Mark this command in-flight. */
p->pending |= 1 << slot;
/* Stuff request onto busy list. */
TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
err = blockif_read(p->bctx, breq);
assert(err == 0);
}
static void
atapi_request_sense(struct ahci_port *p, int slot, uint8_t *cfis)
{
uint8_t buf[64];
uint8_t *acmd;
int len;
acmd = cfis + 0x40;
len = acmd[4];
if (((size_t) len) > sizeof(buf))
len = sizeof(buf);
memset(buf, 0, len);
buf[0] = 0x70 | (1 << 7);
buf[2] = p->sense_key;
buf[7] = 10;
buf[12] = p->asc;
write_prdt(p, slot, cfis, buf, len);
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
}
static void
atapi_start_stop_unit(struct ahci_port *p, int slot, uint8_t *cfis)
{
uint8_t *acmd = cfis + 0x40;
uint32_t tfd;
tfd = 0;
switch (acmd[4] & 3) {
case 0:
case 1:
case 3:
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
tfd = ATA_S_READY | ATA_S_DSC;
break;
case 2:
/* TODO eject media */
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x53;
tfd = (uint32_t) ((p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR);
break;
}
ahci_write_fis_d2h(p, slot, cfis, tfd);
}
static void
atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
{
uint8_t *acmd;
uint32_t tfd;
uint8_t pc, code;
int len;
tfd = 0;
acmd = cfis + 0x40;
len = be16dec(acmd + 7);
pc = acmd[2] >> 6;
code = acmd[2] & 0x3f;
switch (pc) {
case 0:
switch (code) {
case MODEPAGE_RW_ERROR_RECOVERY:
{
uint8_t buf[16];
if (((size_t) len) > sizeof(buf)) {
len = sizeof(buf);
}
memset(buf, 0, sizeof(buf));
be16enc(buf, 16 - 2);
buf[2] = 0x70;
buf[8] = 0x01;
buf[9] = 16 - 10;
buf[11] = 0x05;
write_prdt(p, slot, cfis, buf, len);
tfd = ATA_S_READY | ATA_S_DSC;
break;
}
case MODEPAGE_CD_CAPABILITIES:
{
uint8_t buf[30];
if (((size_t) len) > sizeof(buf)) {
len = sizeof(buf);
}
memset(buf, 0, sizeof(buf));
be16enc(buf, 30 - 2);
buf[2] = 0x70;
buf[8] = 0x2A;
buf[9] = 30 - 10;
buf[10] = 0x08;
buf[12] = 0x71;
be16enc(&buf[18], 2);
be16enc(&buf[20], 512);
write_prdt(p, slot, cfis, buf, len);
tfd = ATA_S_READY | ATA_S_DSC;
break;
}
default:
goto error;
}
break;
case 3:
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x39;
tfd = (uint32_t) ((p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR);
break;
error:
case 1:
case 2:
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x24;
tfd = (uint32_t) ((p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR);
break;
}
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
ahci_write_fis_d2h(p, slot, cfis, tfd);
}
static void
atapi_get_event_status_notification(struct ahci_port *p, int slot,
uint8_t *cfis)
{
uint8_t *acmd;
uint32_t tfd;
acmd = cfis + 0x40;
/* we don't support asynchronous operation */
if (!(acmd[1] & 1)) {
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x24;
tfd = (uint32_t) ((p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR);
} else {
uint8_t buf[8];
int len;
len = be16dec(acmd + 7);
if (((size_t) len) > sizeof(buf)) {
len = sizeof(buf);
}
memset(buf, 0, sizeof(buf));
be16enc(buf, 8 - 2);
buf[2] = 0x04;
buf[3] = 0x10;
buf[5] = 0x02;
write_prdt(p, slot, cfis, buf, len);
tfd = ATA_S_READY | ATA_S_DSC;
}
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
ahci_write_fis_d2h(p, slot, cfis, tfd);
}
static void
handle_packet_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
{
uint8_t *acmd;
acmd = cfis + 0x40;
#ifdef AHCI_DEBUG
{
int i;
DPRINTF("ACMD:");
for (i = 0; i < 16; i++)
DPRINTF("%02x ", acmd[i]);
DPRINTF("\n");
}
#endif
switch (acmd[0]) {
case TEST_UNIT_READY:
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
break;
case INQUIRY:
atapi_inquiry(p, slot, cfis);
break;
case READ_CAPACITY:
atapi_read_capacity(p, slot, cfis);
break;
case PREVENT_ALLOW:
/* TODO */
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
break;
case READ_TOC:
atapi_read_toc(p, slot, cfis);
break;
case REPORT_LUNS:
atapi_report_luns(p, slot, cfis);
break;
case READ_10:
case READ_12:
atapi_read(p, slot, cfis, 0);
break;
case REQUEST_SENSE:
atapi_request_sense(p, slot, cfis);
break;
case START_STOP_UNIT:
atapi_start_stop_unit(p, slot, cfis);
break;
case MODE_SENSE_10:
atapi_mode_sense(p, slot, cfis);
break;
case GET_EVENT_STATUS_NOTIFICATION:
atapi_get_event_status_notification(p, slot, cfis);
break;
default:
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x20;
ahci_write_fis_d2h(p, slot, cfis, ((uint32_t) (p->sense_key << 12)) |
((uint32_t) (ATA_S_READY | ATA_S_ERROR)));
break;
}
}
static void
ahci_handle_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
{
p->tfd |= ATA_S_BUSY;
switch (cfis[2]) {
case ATA_ATA_IDENTIFY:
handle_identify(p, slot, cfis);
break;
case ATA_SETFEATURES:
{
switch (cfis[3]) {
case ATA_SF_ENAB_SATA_SF:
switch (cfis[12]) {
case ATA_SATA_SF_AN:
p->tfd = ATA_S_DSC | ATA_S_READY;
break;
default:
p->tfd = ATA_S_ERROR | ATA_S_READY;
p->tfd |= (ATA_ERROR_ABORT << 8);
break;
}
break;
case ATA_SF_ENAB_WCACHE:
case ATA_SF_DIS_WCACHE:
case ATA_SF_ENAB_RCACHE:
case ATA_SF_DIS_RCACHE:
p->tfd = ATA_S_DSC | ATA_S_READY;
break;
case ATA_SF_SETXFER:
{
switch (cfis[12] & 0xf8) {
case ATA_PIO:
case ATA_PIO0:
break;
case ATA_WDMA0:
case ATA_UDMA0:
p->xfermode = (cfis[12] & 0x7);
break;
}
p->tfd = ATA_S_DSC | ATA_S_READY;
break;
}
default:
p->tfd = ATA_S_ERROR | ATA_S_READY;
p->tfd |= (ATA_ERROR_ABORT << 8);
break;
}
ahci_write_fis_d2h(p, slot, cfis, p->tfd);
break;
}
case ATA_SET_MULTI:
if (cfis[12] != 0 &&
(cfis[12] > 128 || (cfis[12] & (cfis[12] - 1)))) {
p->tfd = ATA_S_ERROR | ATA_S_READY;
p->tfd |= (ATA_ERROR_ABORT << 8);
} else {
p->mult_sectors = cfis[12];
p->tfd = ATA_S_DSC | ATA_S_READY;
}
ahci_write_fis_d2h(p, slot, cfis, p->tfd);
break;
case ATA_READ:
case ATA_WRITE:
case ATA_READ48:
case ATA_WRITE48:
case ATA_READ_MUL:
case ATA_WRITE_MUL:
case ATA_READ_MUL48:
case ATA_WRITE_MUL48:
case ATA_READ_DMA:
case ATA_WRITE_DMA:
case ATA_READ_DMA48:
case ATA_WRITE_DMA48:
case ATA_READ_FPDMA_QUEUED:
case ATA_WRITE_FPDMA_QUEUED:
ahci_handle_rw(p, slot, cfis, 0);
break;
case ATA_FLUSHCACHE:
case ATA_FLUSHCACHE48:
ahci_handle_flush(p, slot, cfis);
break;
case ATA_DATA_SET_MANAGEMENT:
if (cfis[11] == 0 && cfis[3] == ATA_DSM_TRIM &&
cfis[13] == 0 && cfis[12] == 1) {
ahci_handle_dsm_trim(p, slot, cfis, 0);
break;
}
ahci_write_fis_d2h(p, slot, cfis,
(ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
break;
case ATA_SEND_FPDMA_QUEUED:
if ((cfis[13] & 0x1f) == ATA_SFPDMA_DSM &&
cfis[17] == 0 && cfis[16] == ATA_DSM_TRIM &&
cfis[11] == 0 && cfis[13] == 1) {
ahci_handle_dsm_trim(p, slot, cfis, 0);
break;
}
ahci_write_fis_d2h(p, slot, cfis,
(ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
break;
case ATA_READ_LOG_EXT:
case ATA_READ_LOG_DMA_EXT:
ahci_handle_read_log(p, slot, cfis);
break;
case ATA_NOP:
ahci_write_fis_d2h(p, slot, cfis,
(ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
break;
case ATA_STANDBY_CMD:
case ATA_STANDBY_IMMEDIATE:
case ATA_IDLE_CMD:
case ATA_IDLE_IMMEDIATE:
case ATA_SLEEP:
ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
break;
case ATA_ATAPI_IDENTIFY:
handle_atapi_identify(p, slot, cfis);
break;
case ATA_PACKET_CMD:
if (!p->atapi) {
ahci_write_fis_d2h(p, slot, cfis,
(ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
} else
handle_packet_cmd(p, slot, cfis);
break;
default:
WPRINTF("Unsupported cmd:%02x\n", cfis[2]);
ahci_write_fis_d2h(p, slot, cfis,
(ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
break;
}
}
static void
ahci_handle_slot(struct ahci_port *p, int slot)
{
struct ahci_cmd_hdr *hdr;
struct ahci_prdt_entry *prdt;
struct pci_ahci_softc *sc;
uint8_t *cfis;
int cfl;
sc = p->pr_sc;
hdr = (struct ahci_cmd_hdr *)((void *) (p->cmd_lst + slot * AHCI_CL_SIZE));
cfl = (hdr->flags & 0x1f) * 4;
cfis = paddr_guest2host(hdr->ctba,
0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry));
prdt = (struct ahci_prdt_entry *)((void *) (cfis + 0x80));
#ifdef AHCI_DEBUG
DPRINTF("\ncfis:");
for (i = 0; i < cfl; i++) {
if (i % 10 == 0)
DPRINTF("\n");
DPRINTF("%02x ", cfis[i]);
}
DPRINTF("\n");
for (i = 0; i < hdr->prdtl; i++) {
DPRINTF("%d@%08"PRIx64"\n", prdt->dbc & 0x3fffff, prdt->dba);
prdt++;
}
#endif
if (cfis[0] != FIS_TYPE_REGH2D) {
WPRINTF("Not a H2D FIS:%02x\n", cfis[0]);
return;
}
if (cfis[1] & 0x80) {
ahci_handle_cmd(p, slot, cfis);
} else {
if (cfis[15] & (1 << 2))
p->reset = 1;
else if (p->reset) {
p->reset = 0;
ahci_port_reset(p);
}
p->ci &= ~(1 << slot);
}
}
static void
ahci_handle_port(struct ahci_port *p)
{
if (!(p->cmd & AHCI_P_CMD_ST))
return;
/*
* Search for any new commands to issue ignoring those that
* are already in-flight. Stop if device is busy or in error.
*/
for (; (p->ci & ~p->pending) != 0; p->ccs = ((p->ccs + 1) & 31)) {
if ((p->tfd & (ATA_S_BUSY | ATA_S_DRQ)) != 0)
break;
if (p->waitforclear)
break;
if ((p->ci & ~p->pending & (1 << p->ccs)) != 0) {
p->cmd &= ~((unsigned) AHCI_P_CMD_CCS_MASK);
p->cmd |= p->ccs << AHCI_P_CMD_CCS_SHIFT;
ahci_handle_slot(p, ((int) p->ccs));
}
}
}
/*
* blockif callback routine - this runs in the context of the blockif
* i/o thread, so the mutex needs to be acquired.
*/
static void
ata_ioreq_cb(struct blockif_req *br, int err)
{
struct ahci_cmd_hdr *hdr;
struct ahci_ioreq *aior;
struct ahci_port *p;
struct pci_ahci_softc *sc;
uint32_t tfd;
uint8_t *cfis;
int slot, ncq, dsm;
DPRINTF("%s %d\n", __func__, err);
ncq = dsm = 0;
aior = br->br_param;
p = aior->io_pr;
cfis = aior->cfis;
slot = aior->slot;
sc = p->pr_sc;
hdr = (struct ahci_cmd_hdr *)((void *) (p->cmd_lst + slot * AHCI_CL_SIZE));
if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
cfis[2] == ATA_READ_FPDMA_QUEUED ||
cfis[2] == ATA_SEND_FPDMA_QUEUED)
ncq = 1;
if (cfis[2] == ATA_DATA_SET_MANAGEMENT ||
(cfis[2] == ATA_SEND_FPDMA_QUEUED &&
(cfis[13] & 0x1f) == ATA_SFPDMA_DSM))
dsm = 1;
pthread_mutex_lock(&sc->mtx);
/*
* Delete the blockif request from the busy list
*/
TAILQ_REMOVE(&p->iobhd, aior, io_blist);
/*
* Move the blockif request back to the free list
*/
STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
if (!err)
hdr->prdbc = aior->done;
if (!err && aior->more) {
if (dsm)
ahci_handle_dsm_trim(p, slot, cfis, aior->done);
else
ahci_handle_rw(p, slot, cfis, aior->done);
goto out;
}
if (!err)
tfd = ATA_S_READY | ATA_S_DSC;
else
tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
if (ncq)
ahci_write_fis_sdb(p, slot, cfis, tfd);
else
ahci_write_fis_d2h(p, slot, cfis, tfd);
/*
* This command is now complete.
*/
p->pending &= ~(1 << slot);
ahci_check_stopped(p);
ahci_handle_port(p);
out:
pthread_mutex_unlock(&sc->mtx);
DPRINTF("%s exit\n", __func__);
}
static void
atapi_ioreq_cb(struct blockif_req *br, int err)
{
struct ahci_cmd_hdr *hdr;
struct ahci_ioreq *aior;
struct ahci_port *p;
struct pci_ahci_softc *sc;
uint8_t *cfis;
uint32_t tfd;
int slot;
DPRINTF("%s %d\n", __func__, err);
aior = br->br_param;
p = aior->io_pr;
cfis = aior->cfis;
slot = aior->slot;
sc = p->pr_sc;
hdr = (struct ahci_cmd_hdr *)
((void *) (p->cmd_lst + aior->slot * AHCI_CL_SIZE));
pthread_mutex_lock(&sc->mtx);
/*
* Delete the blockif request from the busy list
*/
TAILQ_REMOVE(&p->iobhd, aior, io_blist);
/*
* Move the blockif request back to the free list
*/
STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
if (!err)
hdr->prdbc = aior->done;
if (!err && aior->more) {
atapi_read(p, slot, cfis, aior->done);
goto out;
}
if (!err) {
tfd = ATA_S_READY | ATA_S_DSC;
} else {
p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
p->asc = 0x21;
tfd = (uint32_t) ((p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR);
}
cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
ahci_write_fis_d2h(p, slot, cfis, tfd);
/*
* This command is now complete.
*/
p->pending &= ~(1 << slot);
ahci_check_stopped(p);
ahci_handle_port(p);
out:
pthread_mutex_unlock(&sc->mtx);
DPRINTF("%s exit\n", __func__);
}
static void
pci_ahci_ioreq_init(struct ahci_port *pr)
{
struct ahci_ioreq *vr;
int i;
pr->ioqsz = blockif_queuesz(pr->bctx);
pr->ioreq = calloc(((size_t) pr->ioqsz), sizeof(struct ahci_ioreq));
STAILQ_INIT(&pr->iofhd);
/*
* Add all i/o request entries to the free queue
*/
for (i = 0; i < pr->ioqsz; i++) {
vr = &pr->ioreq[i];
vr->io_pr = pr;
if (!pr->atapi)
vr->io_req.br_callback = ata_ioreq_cb;
else
vr->io_req.br_callback = atapi_ioreq_cb;
vr->io_req.br_param = vr;
STAILQ_INSERT_TAIL(&pr->iofhd, vr, io_flist);
}
TAILQ_INIT(&pr->iobhd);
}
static void
pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
{
int port = (int) ((offset - AHCI_OFFSET) / AHCI_STEP);
offset = (offset - AHCI_OFFSET) % AHCI_STEP;
struct ahci_port *p = &sc->port[port];
DPRINTF("pci_ahci_port %d: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
port, offset, value);
switch (offset) {
case AHCI_P_CLB:
p->clb = (uint32_t) value;
break;
case AHCI_P_CLBU:
p->clbu = (uint32_t) value;
break;
case AHCI_P_FB:
p->fb = (uint32_t) value;
break;
case AHCI_P_FBU:
p->fbu = (uint32_t) value;
break;
case AHCI_P_IS:
p->is &= ~value;
break;
case AHCI_P_IE:
p->ie = value & 0xFDC000FF;
ahci_generate_intr(sc);
break;
case AHCI_P_CMD:
{
p->cmd &= ~(AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK);
p->cmd |= (AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK) & value;
if (!(value & AHCI_P_CMD_ST)) {
ahci_port_stop(p);
} else {
uint64_t clb;
p->cmd |= AHCI_P_CMD_CR;
clb = (uint64_t)p->clbu << 32 | p->clb;
p->cmd_lst = paddr_guest2host(clb, AHCI_CL_SIZE * AHCI_MAX_SLOTS);
}
if (value & AHCI_P_CMD_FRE) {
uint64_t fb;
p->cmd |= AHCI_P_CMD_FR;
fb = (uint64_t)p->fbu << 32 | p->fb;
/* we don't support FBSCP, so rfis size is 256Bytes */
p->rfis = paddr_guest2host(fb, 256);
} else {
p->cmd &= ~((unsigned) AHCI_P_CMD_FR);
}
if (value & AHCI_P_CMD_CLO) {
p->tfd &= ~((unsigned) (ATA_S_BUSY | ATA_S_DRQ));
p->cmd &= ~((unsigned) AHCI_P_CMD_CLO);
}
if (value & AHCI_P_CMD_ICC_MASK) {
p->cmd &= ~AHCI_P_CMD_ICC_MASK;
}
ahci_handle_port(p);
break;
}
case AHCI_P_TFD:
case AHCI_P_SIG:
case AHCI_P_SSTS:
WPRINTF("pci_ahci_port: read only registers 0x%"PRIx64"\n", offset);
break;
case AHCI_P_SCTL:
p->sctl = (uint32_t) value;
if (!(p->cmd & AHCI_P_CMD_ST)) {
if (value & ATA_SC_DET_RESET)
ahci_port_reset(p);
}
break;
case AHCI_P_SERR:
p->serr &= ~value;
break;
case AHCI_P_SACT:
p->sact |= value;
break;
case AHCI_P_CI:
p->ci |= value;
ahci_handle_port(p);
break;
case AHCI_P_SNTF:
case AHCI_P_FBS:
default:
break;
}
}
static void
pci_ahci_host_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
{
DPRINTF("pci_ahci_host: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
offset, value);
switch (offset) {
case AHCI_CAP:
case AHCI_PI:
case AHCI_VS:
case AHCI_CAP2:
DPRINTF("pci_ahci_host: read only registers 0x%"PRIx64"\n", offset);
break;
case AHCI_GHC:
if (value & AHCI_GHC_HR)
ahci_reset(sc);
else if (value & AHCI_GHC_IE) {
sc->ghc |= AHCI_GHC_IE;
ahci_generate_intr(sc);
}
break;
case AHCI_IS:
sc->is &= ~value;
ahci_generate_intr(sc);
break;
default:
break;
}
}
static void
pci_ahci_write(UNUSED int vcpu, struct pci_devinst *pi, int baridx,
uint64_t offset, int size, uint64_t value)
{
struct pci_ahci_softc *sc = pi->pi_arg;
assert(baridx == 5);
assert((offset % 4) == 0 && size == 4);
pthread_mutex_lock(&sc->mtx);
if (offset < AHCI_OFFSET)
pci_ahci_host_write(sc, offset, value);
else if (offset < ((uint64_t) (AHCI_OFFSET + (sc->ports * AHCI_STEP))))
pci_ahci_port_write(sc, offset, value);
else
WPRINTF("pci_ahci: unknown i/o write offset 0x%"PRIx64"\n", offset);
pthread_mutex_unlock(&sc->mtx);
}
static uint64_t
pci_ahci_host_read(struct pci_ahci_softc *sc, uint64_t offset)
{
uint32_t value;
switch (offset) {
case AHCI_CAP:
case AHCI_GHC:
case AHCI_IS:
case AHCI_PI:
case AHCI_VS:
case AHCI_CCCC:
case AHCI_CCCP:
case AHCI_EM_LOC:
case AHCI_EM_CTL:
case AHCI_CAP2:
{
uint32_t *p = &sc->cap;
p += (offset - AHCI_CAP) / sizeof(uint32_t);
value = *p;
break;
}
default:
value = 0;
break;
}
DPRINTF("pci_ahci_host: read offset 0x%"PRIx64" value 0x%x\n",
offset, value);
return (value);
}
static uint64_t
pci_ahci_port_read(struct pci_ahci_softc *sc, uint64_t offset)
{
uint32_t value;
int port = (int) ((offset - AHCI_OFFSET) / AHCI_STEP);
offset = (offset - AHCI_OFFSET) % AHCI_STEP;
switch (offset) {
case AHCI_P_CLB:
case AHCI_P_CLBU:
case AHCI_P_FB:
case AHCI_P_FBU:
case AHCI_P_IS:
case AHCI_P_IE:
case AHCI_P_CMD:
case AHCI_P_TFD:
case AHCI_P_SIG:
case AHCI_P_SSTS:
case AHCI_P_SCTL:
case AHCI_P_SERR:
case AHCI_P_SACT:
case AHCI_P_CI:
case AHCI_P_SNTF:
case AHCI_P_FBS:
{
uint32_t *p= &sc->port[port].clb;
p += (offset - AHCI_P_CLB) / sizeof(uint32_t);
value = *p;
break;
}
default:
value = 0;
break;
}
DPRINTF("pci_ahci_port %d: read offset 0x%"PRIx64" value 0x%x\n",
port, offset, value);
return value;
}
static uint64_t
pci_ahci_read(UNUSED int vcpu, struct pci_devinst *pi, int baridx,
uint64_t regoff, int size)
{
struct pci_ahci_softc *sc = pi->pi_arg;
uint64_t offset;
uint32_t value;
assert(baridx == 5);
assert(size == 1 || size == 2 || size == 4);
assert((regoff & ((uint64_t) (size - 1))) == 0);
pthread_mutex_lock(&sc->mtx);
/* round down to a multiple of 4 bytes */
offset = regoff & ~((uint64_t) 0x3);
if (offset < AHCI_OFFSET)
value = (uint32_t) pci_ahci_host_read(sc, offset);
else if (offset < ((uint64_t) (AHCI_OFFSET + (sc->ports * AHCI_STEP))))
value = (uint32_t) pci_ahci_port_read(sc, offset);
else {
value = 0;
WPRINTF("pci_ahci: unknown i/o read offset 0x%"PRIx64"\n",
regoff);
}
value >>= 8 * (regoff & 0x3);
pthread_mutex_unlock(&sc->mtx);
return (value);
}
static int
pci_ahci_init(struct pci_devinst *pi, char *opts, int atapi)
{
char bident[sizeof("XX:X:X")];
struct blockif_ctxt *bctxt;
struct pci_ahci_softc *sc;
int ret, slots;
u_char digest[CC_SHA256_DIGEST_LENGTH];
ret = 0;
if (opts == NULL) {
fprintf(stderr, "pci_ahci: backing device required\n");
return (1);
}
#ifdef AHCI_DEBUG
dbg = fopen("/tmp/log", "w+");
#endif
sc = calloc(1, sizeof(struct pci_ahci_softc));
pi->pi_arg = sc;
sc->asc_pi = pi;
sc->ports = MAX_PORTS;
/*
* Only use port 0 for a backing device. All other ports will be
* marked as unused
*/
sc->port[0].atapi = atapi;
/*
* Attempt to open the backing image. Use the PCI
* slot/func for the identifier string.
*/
snprintf(bident, sizeof(bident), "%d:%d", pi->pi_slot, pi->pi_func);
bctxt = blockif_open(opts, bident);
if (bctxt == NULL) {
ret = 1;
goto open_fail;
}
sc->port[0].bctx = bctxt;
sc->port[0].pr_sc = sc;
/*
* Create an identifier for the backing file. Use parts of the
* md5 sum of the filename
*/
CC_SHA256(opts, (CC_LONG)strlen(opts), digest);
snprintf(sc->port[0].ident, AHCI_PORT_IDENT, "BHYVE-%02X%02X-%02X%02X-%02X%02X",
digest[0], digest[1], digest[2], digest[3], digest[4], digest[5]);
/*
* Allocate blockif request structures and add them
* to the free list
*/
pci_ahci_ioreq_init(&sc->port[0]);
pthread_mutex_init(&sc->mtx, NULL);
/* Intel ICH8 AHCI */
slots = sc->port[0].ioqsz;
if (slots > 32)
slots = 32;
--slots;
sc->cap = AHCI_CAP_64BIT | AHCI_CAP_SNCQ | AHCI_CAP_SSNTF |
AHCI_CAP_SMPS | AHCI_CAP_SSS | AHCI_CAP_SALP |
AHCI_CAP_SAL | AHCI_CAP_SCLO | (0x3 << AHCI_CAP_ISS_SHIFT)|
AHCI_CAP_PMD | AHCI_CAP_SSC | AHCI_CAP_PSC |
(((unsigned) slots) << AHCI_CAP_NCS_SHIFT) | AHCI_CAP_SXS |
(((unsigned) sc->ports) - 1);
/* Only port 0 implemented */
sc->pi = 1;
sc->vs = 0x10300;
sc->cap2 = AHCI_CAP2_APST;
ahci_reset(sc);
pci_set_cfgdata16(pi, PCIR_DEVICE, 0x2821);
pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_SATA);
pci_set_cfgdata8(pi, PCIR_PROGIF, PCIP_STORAGE_SATA_AHCI_1_0);
pci_emul_add_msicap(pi, 1);
pci_emul_alloc_bar(pi, 5, PCIBAR_MEM32,
((uint64_t) (AHCI_OFFSET + sc->ports * AHCI_STEP)));
pci_lintr_request(pi);
open_fail:
if (ret) {
if (sc->port[0].bctx != NULL)
blockif_close(sc->port[0].bctx);
free(sc);
}
return (ret);
}
static int
pci_ahci_hd_init(struct pci_devinst *pi, char *opts)
{
return (pci_ahci_init(pi, opts, 0));
}
static int
pci_ahci_atapi_init(struct pci_devinst *pi, char *opts)
{
return (pci_ahci_init(pi, opts, 1));
}
/*
* Use separate emulation names to distinguish drive and atapi devices
*/
static struct pci_devemu pci_de_ahci_hd = {
.pe_emu = "ahci-hd",
.pe_init = pci_ahci_hd_init,
.pe_barwrite = pci_ahci_write,
.pe_barread = pci_ahci_read
};
PCI_EMUL_SET(pci_de_ahci_hd);
static struct pci_devemu pci_de_ahci_cd = {
.pe_emu = "ahci-cd",
.pe_init = pci_ahci_atapi_init,
.pe_barwrite = pci_ahci_write,
.pe_barread = pci_ahci_read
};
PCI_EMUL_SET(pci_de_ahci_cd);
|
mike-pt/xhyve
|
src/vmm/io/vrtc.c
|
/*-
* Copyright (c) 2014, <NAME> (<EMAIL>)
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdint.h>
#include <stdbool.h>
#include <stddef.h>
#include <strings.h>
#include <pthread.h>
#include <errno.h>
#include <assert.h>
#include <mach/mach.h>
#include <mach/clock.h>
#include <xhyve/support/misc.h>
#include <xhyve/support/rtc.h>
#include <xhyve/vmm/vmm.h>
#include <xhyve/vmm/vmm_callout.h>
#include <xhyve/vmm/vmm_ktr.h>
#include <xhyve/vmm/io/vatpic.h>
#include <xhyve/vmm/io/vioapic.h>
#include <xhyve/vmm/io/vrtc.h>
static const u_char bin2bcd_data[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99
};
/* Register layout of the RTC */
struct rtcdev {
uint8_t sec;
uint8_t alarm_sec;
uint8_t min;
uint8_t alarm_min;
uint8_t hour;
uint8_t alarm_hour;
uint8_t day_of_week;
uint8_t day_of_month;
uint8_t month;
uint8_t year;
uint8_t reg_a;
uint8_t reg_b;
uint8_t reg_c;
uint8_t reg_d;
uint8_t nvram[36];
uint8_t century;
uint8_t nvram2[128 - 51];
};
CTASSERT(sizeof(struct rtcdev) == 128);
CTASSERT(offsetof(struct rtcdev, century) == RTC_CENTURY);
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct vrtc {
struct vm *vm;
pthread_mutex_t mtx;
struct callout callout;
u_int addr; /* RTC register to read or write */
sbintime_t base_uptime;
time_t base_rtctime;
struct rtcdev rtcdev;
};
struct clocktime {
int year; /* year (4 digit year) */
int mon; /* month (1 - 12) */
int day; /* day (1 - 31) */
int hour; /* hour (0 - 23) */
int min; /* minute (0 - 59) */
int sec; /* second (0 - 59) */
int dow; /* day of week (0 - 6; 0 = Sunday) */
long nsec; /* nano seconds */
};
#pragma clang diagnostic pop
#define VRTC_LOCK(vrtc) pthread_mutex_lock(&((vrtc)->mtx))
#define VRTC_UNLOCK(vrtc) pthread_mutex_unlock(&((vrtc)->mtx))
/*
* RTC time is considered "broken" if:
* - RTC updates are halted by the guest
* - RTC date/time fields have invalid values
*/
#define VRTC_BROKEN_TIME ((time_t)-1)
#define RTC_IRQ 8
#define RTCSB_BIN 0x04
#define RTCSB_ALL_INTRS (RTCSB_UINTR | RTCSB_AINTR | RTCSB_PINTR)
#define rtc_halted(vrtc) ((vrtc->rtcdev.reg_b & RTCSB_HALT) != 0)
#define aintr_enabled(vrtc) (((vrtc)->rtcdev.reg_b & RTCSB_AINTR) != 0)
#define pintr_enabled(vrtc) (((vrtc)->rtcdev.reg_b & RTCSB_PINTR) != 0)
#define uintr_enabled(vrtc) (((vrtc)->rtcdev.reg_b & RTCSB_UINTR) != 0)
static void vrtc_callout_handler(void *arg);
static void vrtc_set_reg_c(struct vrtc *vrtc, uint8_t newval);
static int rtc_flag_broken_time = 1;
static clock_serv_t mach_clock;
#define POSIX_BASE_YEAR 1970
#define FEBRUARY 2
#define SECDAY (24 * 60 * 60)
#define days_in_year(y) (leapyear(y) ? 366 : 365)
#define days_in_month(y, m) \
(month_days[(m) - 1] + (m == FEBRUARY ? leapyear(y) : 0))
#define day_of_week(days) (((days) + 4) % 7)
static const int month_days[12] = {
31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
};
static __inline int
leapyear(int year)
{
int rv = 0;
if ((year & 3) == 0) {
rv = 1;
if ((year % 100) == 0) {
rv = 0;
if ((year % 400) == 0)
rv = 1;
}
}
return (rv);
}
static int
clock_ct_to_ts(struct clocktime *ct, struct timespec *ts)
{
int i, year, days;
year = ct->year;
/* Sanity checks. */
if (ct->mon < 1 || ct->mon > 12 || ct->day < 1 ||
ct->day > days_in_month(year, ct->mon) ||
ct->hour > 23 || ct->min > 59 || ct->sec > 59 ||
(year > 2037 && sizeof(time_t) == 4)) { /* time_t overflow */
return (EINVAL);
}
/*
* Compute days since start of time
* First from years, then from months.
*/
days = 0;
for (i = POSIX_BASE_YEAR; i < year; i++)
days += days_in_year(i);
/* Months */
for (i = 1; i < ct->mon; i++)
days += days_in_month(year, i);
days += (ct->day - 1);
ts->tv_sec = (((time_t)days * 24 + ct->hour) * 60 + ct->min) * 60 +
ct->sec;
ts->tv_nsec = ct->nsec;
return (0);
}
static void
clock_ts_to_ct(struct timespec *ts, struct clocktime *ct)
{
int i, year, days;
time_t rsec; /* remainder seconds */
time_t secs;
secs = ts->tv_sec;
days = (int) (secs / SECDAY);
rsec = secs % SECDAY;
ct->dow = day_of_week(days);
/* Subtract out whole years, counting them in i. */
for (year = POSIX_BASE_YEAR; days >= days_in_year(year); year++)
days -= days_in_year(year);
ct->year = year;
/* Subtract out whole months, counting them in i. */
for (i = 1; days >= days_in_month(year, i); i++)
days -= days_in_month(year, i);
ct->mon = i;
/* Days are what is left over (+1) from all that. */
ct->day = days + 1;
/* Hours, minutes, seconds are easy */
ct->hour = (int) (rsec / 3600);
rsec = rsec % 3600;
ct->min = (int) (rsec / 60);
rsec = rsec % 60;
ct->sec = (int) rsec;
ct->nsec = ts->tv_nsec;
}
static __inline bool
divider_enabled(int reg_a)
{
/*
* The RTC is counting only when dividers are not held in reset.
*/
return ((reg_a & 0x70) == 0x20);
}
static __inline bool
update_enabled(struct vrtc *vrtc)
{
/*
* RTC date/time can be updated only if:
* - divider is not held in reset
* - guest has not disabled updates
* - the date/time fields have valid contents
*/
if (!divider_enabled(vrtc->rtcdev.reg_a))
return (false);
if (rtc_halted(vrtc))
return (false);
if (vrtc->base_rtctime == VRTC_BROKEN_TIME)
return (false);
return (true);
}
static time_t
vrtc_curtime(struct vrtc *vrtc, sbintime_t *basetime)
{
sbintime_t now, delta;
time_t t, secs;
t = vrtc->base_rtctime;
*basetime = vrtc->base_uptime;
if (update_enabled(vrtc)) {
now = sbinuptime();
delta = now - vrtc->base_uptime;
KASSERT(delta >= 0, ("vrtc_curtime: uptime went backwards: "
"%#llx to %#llx", vrtc->base_uptime, now));
secs = delta / SBT_1S;
t += secs;
*basetime += secs * SBT_1S;
}
return (t);
}
static __inline uint8_t
rtcset(struct rtcdev *rtc, int val)
{
KASSERT(val >= 0 && val < 100, ("%s: invalid bin2bcd index %d",
__func__, val));
return ((uint8_t) ((rtc->reg_b & RTCSB_BIN) ? val : bin2bcd_data[val]));
}
static void
secs_to_rtc(time_t rtctime, struct vrtc *vrtc, int force_update)
{
mach_timespec_t mts;
struct clocktime ct;
struct timespec ts;
struct rtcdev *rtc;
int hour;
if (rtctime < 0) {
KASSERT(rtctime == VRTC_BROKEN_TIME,
("%s: invalid vrtc time %#lx", __func__, rtctime));
return;
}
/*
* If the RTC is halted then the guest has "ownership" of the
* date/time fields. Don't update the RTC date/time fields in
* this case (unless forced).
*/
if (rtc_halted(vrtc) && !force_update)
return;
clock_get_time(mach_clock, &mts);
ts.tv_sec = mts.tv_sec;
ts.tv_nsec = mts.tv_nsec;
clock_ts_to_ct(&ts, &ct);
KASSERT(ct.sec >= 0 && ct.sec <= 59, ("invalid clocktime sec %d",
ct.sec));
KASSERT(ct.min >= 0 && ct.min <= 59, ("invalid clocktime min %d",
ct.min));
KASSERT(ct.hour >= 0 && ct.hour <= 23, ("invalid clocktime hour %d",
ct.hour));
KASSERT(ct.dow >= 0 && ct.dow <= 6, ("invalid clocktime wday %d",
ct.dow));
KASSERT(ct.day >= 1 && ct.day <= 31, ("invalid clocktime mday %d",
ct.day));
KASSERT(ct.mon >= 1 && ct.mon <= 12, ("invalid clocktime month %d",
ct.mon));
KASSERT(ct.year >= 1900, ("invalid clocktime year %d", ct.year));
rtc = &vrtc->rtcdev;
rtc->sec = rtcset(rtc, ct.sec);
rtc->min = rtcset(rtc, ct.min);
if (rtc->reg_b & RTCSB_24HR) {
hour = ct.hour;
} else {
/*
* Convert to the 12-hour format.
*/
switch (ct.hour) {
case 0: /* 12 AM */
case 12: /* 12 PM */
hour = 12;
break;
default:
/*
* The remaining 'ct.hour' values are interpreted as:
* [1 - 11] -> 1 - 11 AM
* [13 - 23] -> 1 - 11 PM
*/
hour = ct.hour % 12;
break;
}
}
rtc->hour = rtcset(rtc, hour);
if ((rtc->reg_b & RTCSB_24HR) == 0 && ct.hour >= 12)
rtc->hour |= 0x80; /* set MSB to indicate PM */
rtc->day_of_week = rtcset(rtc, ct.dow + 1);
rtc->day_of_month = rtcset(rtc, ct.day);
rtc->month = rtcset(rtc, ct.mon);
rtc->year = rtcset(rtc, ct.year % 100);
rtc->century = rtcset(rtc, ct.year / 100);
}
static int
rtcget(struct rtcdev *rtc, int val, int *retval)
{
uint8_t upper, lower;
if (rtc->reg_b & RTCSB_BIN) {
*retval = val;
return (0);
}
lower = val & 0xf;
upper = (val >> 4) & 0xf;
if (lower > 9 || upper > 9)
return (-1);
*retval = upper * 10 + lower;
return (0);
}
static time_t
rtc_to_secs(struct vrtc *vrtc)
{
struct clocktime ct;
struct timespec ts;
struct rtcdev *rtc;
struct vm *vm;
int century, error, hour, pm, year;
vm = vrtc->vm;
rtc = &vrtc->rtcdev;
bzero(&ct, sizeof(struct clocktime));
error = rtcget(rtc, rtc->sec, &ct.sec);
if (error || ct.sec < 0 || ct.sec > 59) {
VM_CTR2(vm, "Invalid RTC sec %#x/%d", rtc->sec, ct.sec);
goto fail;
}
error = rtcget(rtc, rtc->min, &ct.min);
if (error || ct.min < 0 || ct.min > 59) {
VM_CTR2(vm, "Invalid RTC min %#x/%d", rtc->min, ct.min);
goto fail;
}
pm = 0;
hour = rtc->hour;
if ((rtc->reg_b & RTCSB_24HR) == 0) {
if (hour & 0x80) {
hour &= ~0x80;
pm = 1;
}
}
error = rtcget(rtc, hour, &ct.hour);
if ((rtc->reg_b & RTCSB_24HR) == 0) {
if (ct.hour >= 1 && ct.hour <= 12) {
/*
* Convert from 12-hour format to internal 24-hour
* representation as follows:
*
* 12-hour format ct.hour
* 12 AM 0
* 1 - 11 AM 1 - 11
* 12 PM 12
* 1 - 11 PM 13 - 23
*/
if (ct.hour == 12)
ct.hour = 0;
if (pm)
ct.hour += 12;
} else {
VM_CTR2(vm, "Invalid RTC 12-hour format %#x/%d",
rtc->hour, ct.hour);
goto fail;
}
}
if (error || ct.hour < 0 || ct.hour > 23) {
VM_CTR2(vm, "Invalid RTC hour %#x/%d", rtc->hour, ct.hour);
goto fail;
}
/*
* Ignore 'rtc->dow' because some guests like Linux don't bother
* setting it at all while others like OpenBSD/i386 set it incorrectly.
*
* clock_ct_to_ts() does not depend on 'ct.dow' anyways so ignore it.
*/
ct.dow = -1;
error = rtcget(rtc, rtc->day_of_month, &ct.day);
if (error || ct.day < 1 || ct.day > 31) {
VM_CTR2(vm, "Invalid RTC mday %#x/%d", rtc->day_of_month,
ct.day);
goto fail;
}
error = rtcget(rtc, rtc->month, &ct.mon);
if (error || ct.mon < 1 || ct.mon > 12) {
VM_CTR2(vm, "Invalid RTC month %#x/%d", rtc->month, ct.mon);
goto fail;
}
error = rtcget(rtc, rtc->year, &year);
if (error || year < 0 || year > 99) {
VM_CTR2(vm, "Invalid RTC year %#x/%d", rtc->year, year);
goto fail;
}
error = rtcget(rtc, rtc->century, ¢ury);
ct.year = century * 100 + year;
if (error || ct.year < 1900) {
VM_CTR2(vm, "Invalid RTC century %#x/%d", rtc->century,
ct.year);
goto fail;
}
error = clock_ct_to_ts(&ct, &ts);
if (error || ts.tv_sec < 0) {
VM_CTR3(vm, "Invalid RTC clocktime.date %04d-%02d-%02d",
ct.year, ct.mon, ct.day);
VM_CTR3(vm, "Invalid RTC clocktime.time %02d:%02d:%02d",
ct.hour, ct.min, ct.sec);
goto fail;
}
return (ts.tv_sec); /* success */
fail:
/*
* Stop updating the RTC if the date/time fields programmed by
* the guest are invalid.
*/
VM_CTR0(vrtc->vm, "Invalid RTC date/time programming detected");
return (VRTC_BROKEN_TIME);
}
static int
vrtc_time_update(struct vrtc *vrtc, time_t newtime, sbintime_t newbase)
{
struct rtcdev *rtc;
sbintime_t oldbase;
time_t oldtime;
uint8_t alarm_sec, alarm_min, alarm_hour;
rtc = &vrtc->rtcdev;
alarm_sec = rtc->alarm_sec;
alarm_min = rtc->alarm_min;
alarm_hour = rtc->alarm_hour;
oldtime = vrtc->base_rtctime;
VM_CTR2(vrtc->vm, "Updating RTC secs from %#lx to %#lx",
oldtime, newtime);
oldbase = vrtc->base_uptime;
VM_CTR2(vrtc->vm, "Updating RTC base uptime from %#llx to %#llx",
oldbase, newbase);
vrtc->base_uptime = newbase;
if (newtime == oldtime)
return (0);
/*
* If 'newtime' indicates that RTC updates are disabled then just
* record that and return. There is no need to do alarm interrupt
* processing in this case.
*/
if (newtime == VRTC_BROKEN_TIME) {
vrtc->base_rtctime = VRTC_BROKEN_TIME;
return (0);
}
/*
* Return an error if RTC updates are halted by the guest.
*/
if (rtc_halted(vrtc)) {
VM_CTR0(vrtc->vm, "RTC update halted by guest");
return (EBUSY);
}
do {
/*
* If the alarm interrupt is enabled and 'oldtime' is valid
* then visit all the seconds between 'oldtime' and 'newtime'
* to check for the alarm condition.
*
* Otherwise move the RTC time forward directly to 'newtime'.
*/
if (aintr_enabled(vrtc) && oldtime != VRTC_BROKEN_TIME)
vrtc->base_rtctime++;
else
vrtc->base_rtctime = newtime;
if (aintr_enabled(vrtc)) {
/*
* Update the RTC date/time fields before checking
* if the alarm conditions are satisfied.
*/
secs_to_rtc(vrtc->base_rtctime, vrtc, 0);
if ((alarm_sec >= 0xC0 || alarm_sec == rtc->sec) &&
(alarm_min >= 0xC0 || alarm_min == rtc->min) &&
(alarm_hour >= 0xC0 || alarm_hour == rtc->hour)) {
vrtc_set_reg_c(vrtc, rtc->reg_c | RTCIR_ALARM);
}
}
} while (vrtc->base_rtctime != newtime);
if (uintr_enabled(vrtc))
vrtc_set_reg_c(vrtc, rtc->reg_c | RTCIR_UPDATE);
return (0);
}
static sbintime_t
vrtc_freq(struct vrtc *vrtc)
{
int ratesel;
static sbintime_t pf[16] = {
0,
SBT_1S / 256,
SBT_1S / 128,
SBT_1S / 8192,
SBT_1S / 4096,
SBT_1S / 2048,
SBT_1S / 1024,
SBT_1S / 512,
SBT_1S / 256,
SBT_1S / 128,
SBT_1S / 64,
SBT_1S / 32,
SBT_1S / 16,
SBT_1S / 8,
SBT_1S / 4,
SBT_1S / 2,
};
/*
* If both periodic and alarm interrupts are enabled then use the
* periodic frequency to drive the callout. The minimum periodic
* frequency (2 Hz) is higher than the alarm frequency (1 Hz) so
* piggyback the alarm on top of it. The same argument applies to
* the update interrupt.
*/
if (pintr_enabled(vrtc) && divider_enabled(vrtc->rtcdev.reg_a)) {
ratesel = vrtc->rtcdev.reg_a & 0xf;
return (pf[ratesel]);
} else if (aintr_enabled(vrtc) && update_enabled(vrtc)) {
return (SBT_1S);
} else if (uintr_enabled(vrtc) && update_enabled(vrtc)) {
return (SBT_1S);
} else {
return (0);
}
}
static void
vrtc_callout_reset(struct vrtc *vrtc, sbintime_t freqsbt)
{
if (freqsbt == 0) {
if (callout_active(&vrtc->callout)) {
VM_CTR0(vrtc->vm, "RTC callout stopped");
callout_stop(&vrtc->callout);
}
return;
}
VM_CTR1(vrtc->vm, "RTC callout frequency %lld hz", SBT_1S / freqsbt);
callout_reset_sbt(&vrtc->callout, freqsbt, 0, vrtc_callout_handler,
vrtc, 0);
}
static void
vrtc_callout_handler(void *arg)
{
struct vrtc *vrtc = arg;
sbintime_t freqsbt, basetime;
time_t rtctime;
int error;
VM_CTR0(vrtc->vm, "vrtc callout fired");
VRTC_LOCK(vrtc);
if (callout_pending(&vrtc->callout)) /* callout was reset */
goto done;
if (!callout_active(&vrtc->callout)) /* callout was stopped */
goto done;
callout_deactivate(&vrtc->callout);
KASSERT((vrtc->rtcdev.reg_b & RTCSB_ALL_INTRS) != 0,
("gratuitous vrtc callout"));
if (pintr_enabled(vrtc))
vrtc_set_reg_c(vrtc, vrtc->rtcdev.reg_c | RTCIR_PERIOD);
if (aintr_enabled(vrtc) || uintr_enabled(vrtc)) {
rtctime = vrtc_curtime(vrtc, &basetime);
error = vrtc_time_update(vrtc, rtctime, basetime);
KASSERT(error == 0, ("%s: vrtc_time_update error %d",
__func__, error));
}
freqsbt = vrtc_freq(vrtc);
KASSERT(freqsbt != 0, ("%s: vrtc frequency cannot be zero", __func__));
vrtc_callout_reset(vrtc, freqsbt);
done:
VRTC_UNLOCK(vrtc);
}
static __inline void
vrtc_callout_check(struct vrtc *vrtc, sbintime_t freq)
{
int active;
active = callout_active(&vrtc->callout) ? 1 : 0;
KASSERT((freq == 0 && !active) || (freq != 0 && active),
("vrtc callout %s with frequency %#llx",
active ? "active" : "inactive", freq));
}
static void
vrtc_set_reg_c(struct vrtc *vrtc, uint8_t newval)
{
struct rtcdev *rtc;
int oldirqf, newirqf;
uint8_t oldval, changed;
rtc = &vrtc->rtcdev;
newval &= RTCIR_ALARM | RTCIR_PERIOD | RTCIR_UPDATE;
oldirqf = rtc->reg_c & RTCIR_INT;
if ((aintr_enabled(vrtc) && (newval & RTCIR_ALARM) != 0) ||
(pintr_enabled(vrtc) && (newval & RTCIR_PERIOD) != 0) ||
(uintr_enabled(vrtc) && (newval & RTCIR_UPDATE) != 0)) {
newirqf = RTCIR_INT;
} else {
newirqf = 0;
}
oldval = rtc->reg_c;
rtc->reg_c = (uint8_t) (newirqf | newval);
changed = oldval ^ rtc->reg_c;
if (changed) {
VM_CTR2(vrtc->vm, "RTC reg_c changed from %#x to %#x",
oldval, rtc->reg_c);
}
if (!oldirqf && newirqf) {
VM_CTR1(vrtc->vm, "RTC irq %d asserted", RTC_IRQ);
vatpic_pulse_irq(vrtc->vm, RTC_IRQ);
vioapic_pulse_irq(vrtc->vm, RTC_IRQ);
} else if (oldirqf && !newirqf) {
VM_CTR1(vrtc->vm, "RTC irq %d deasserted", RTC_IRQ);
}
}
static int
vrtc_set_reg_b(struct vrtc *vrtc, uint8_t newval)
{
struct rtcdev *rtc;
sbintime_t oldfreq, newfreq, basetime;
time_t curtime, rtctime;
int error;
uint8_t oldval, changed;
rtc = &vrtc->rtcdev;
oldval = rtc->reg_b;
oldfreq = vrtc_freq(vrtc);
rtc->reg_b = newval;
changed = oldval ^ newval;
if (changed) {
VM_CTR2(vrtc->vm, "RTC reg_b changed from %#x to %#x",
oldval, newval);
}
if (changed & RTCSB_HALT) {
if ((newval & RTCSB_HALT) == 0) {
rtctime = rtc_to_secs(vrtc);
basetime = sbinuptime();
if (rtctime == VRTC_BROKEN_TIME) {
if (rtc_flag_broken_time)
return (-1);
}
} else {
curtime = vrtc_curtime(vrtc, &basetime);
KASSERT(curtime == vrtc->base_rtctime, ("%s: mismatch "
"between vrtc basetime (%#lx) and curtime (%#lx)",
__func__, vrtc->base_rtctime, curtime));
/*
* Force a refresh of the RTC date/time fields so
* they reflect the time right before the guest set
* the HALT bit.
*/
secs_to_rtc(curtime, vrtc, 1);
/*
* Updates are halted so mark 'base_rtctime' to denote
* that the RTC date/time is in flux.
*/
rtctime = VRTC_BROKEN_TIME;
rtc->reg_b &= ~RTCSB_UINTR;
}
error = vrtc_time_update(vrtc, rtctime, basetime);
KASSERT(error == 0, ("vrtc_time_update error %d", error));
}
/*
* Side effect of changes to the interrupt enable bits.
*/
if (changed & RTCSB_ALL_INTRS)
vrtc_set_reg_c(vrtc, vrtc->rtcdev.reg_c);
/*
* Change the callout frequency if it has changed.
*/
newfreq = vrtc_freq(vrtc);
if (newfreq != oldfreq)
vrtc_callout_reset(vrtc, newfreq);
else
vrtc_callout_check(vrtc, newfreq);
/*
* The side effect of bits that control the RTC date/time format
* is handled lazily when those fields are actually read.
*/
return (0);
}
static void
vrtc_set_reg_a(struct vrtc *vrtc, uint8_t newval)
{
sbintime_t oldfreq, newfreq;
uint8_t oldval, changed;
newval &= ~RTCSA_TUP;
oldval = vrtc->rtcdev.reg_a;
oldfreq = vrtc_freq(vrtc);
if (divider_enabled(oldval) && !divider_enabled(newval)) {
VM_CTR2(vrtc->vm, "RTC divider held in reset at %#lx/%#llx",
vrtc->base_rtctime, vrtc->base_uptime);
} else if (!divider_enabled(oldval) && divider_enabled(newval)) {
/*
* If the dividers are coming out of reset then update
* 'base_uptime' before this happens. This is done to
* maintain the illusion that the RTC date/time was frozen
* while the dividers were disabled.
*/
vrtc->base_uptime = sbinuptime();
VM_CTR2(vrtc->vm, "RTC divider out of reset at %#lx/%#llx",
vrtc->base_rtctime, vrtc->base_uptime);
} else {
/* NOTHING */
}
vrtc->rtcdev.reg_a = newval;
changed = oldval ^ newval;
if (changed) {
VM_CTR2(vrtc->vm, "RTC reg_a changed from %#x to %#x",
oldval, newval);
}
/*
* Side effect of changes to rate select and divider enable bits.
*/
newfreq = vrtc_freq(vrtc);
if (newfreq != oldfreq)
vrtc_callout_reset(vrtc, newfreq);
else
vrtc_callout_check(vrtc, newfreq);
}
int
vrtc_set_time(struct vm *vm, time_t secs)
{
struct vrtc *vrtc;
int error;
vrtc = vm_rtc(vm);
VRTC_LOCK(vrtc);
error = vrtc_time_update(vrtc, secs, sbinuptime());
VRTC_UNLOCK(vrtc);
if (error) {
VM_CTR2(vrtc->vm, "Error %d setting RTC time to %#lx", error,
secs);
} else {
VM_CTR1(vrtc->vm, "RTC time set to %#lx", secs);
}
return (error);
}
time_t
vrtc_get_time(struct vm *vm)
{
struct vrtc *vrtc;
sbintime_t basetime;
time_t t;
vrtc = vm_rtc(vm);
VRTC_LOCK(vrtc);
t = vrtc_curtime(vrtc, &basetime);
VRTC_UNLOCK(vrtc);
return (t);
}
int
vrtc_nvram_write(struct vm *vm, int offset, uint8_t value)
{
struct vrtc *vrtc;
uint8_t *ptr;
vrtc = vm_rtc(vm);
/*
* Don't allow writes to RTC control registers or the date/time fields.
*/
if (((unsigned long) offset) < offsetof(struct rtcdev, nvram) ||
offset == RTC_CENTURY ||
((unsigned long) offset) >= sizeof(struct rtcdev))
{
VM_CTR1(vrtc->vm, "RTC nvram write to invalid offset %d",
offset);
return (EINVAL);
}
VRTC_LOCK(vrtc);
ptr = (uint8_t *)(&vrtc->rtcdev);
ptr[offset] = value;
VM_CTR2(vrtc->vm, "RTC nvram write %#x to offset %#x", value, offset);
VRTC_UNLOCK(vrtc);
return (0);
}
int
vrtc_nvram_read(struct vm *vm, int offset, uint8_t *retval)
{
struct vrtc *vrtc;
sbintime_t basetime;
time_t curtime;
uint8_t *ptr;
/*
* Allow all offsets in the RTC to be read.
*/
if (offset < 0 || ((unsigned long) offset) >= sizeof(struct rtcdev))
return (EINVAL);
vrtc = vm_rtc(vm);
VRTC_LOCK(vrtc);
/*
* Update RTC date/time fields if necessary.
*/
if (offset < 10 || offset == RTC_CENTURY) {
curtime = vrtc_curtime(vrtc, &basetime);
secs_to_rtc(curtime, vrtc, 0);
}
ptr = (uint8_t *)(&vrtc->rtcdev);
*retval = ptr[offset];
VRTC_UNLOCK(vrtc);
return (0);
}
int
vrtc_addr_handler(struct vm *vm, UNUSED int vcpuid, bool in, UNUSED int port,
int bytes, uint32_t *val)
{
struct vrtc *vrtc;
vrtc = vm_rtc(vm);
if (bytes != 1)
return (-1);
if (in) {
*val = 0xff;
return (0);
}
VRTC_LOCK(vrtc);
vrtc->addr = *val & 0x7f;
VRTC_UNLOCK(vrtc);
return (0);
}
int
vrtc_data_handler(struct vm *vm, int vcpuid, bool in, UNUSED int port,
int bytes, uint32_t *val)
{
struct vrtc *vrtc;
struct rtcdev *rtc;
sbintime_t basetime;
time_t curtime;
int error, offset;
vrtc = vm_rtc(vm);
rtc = &vrtc->rtcdev;
if (bytes != 1)
return (-1);
VRTC_LOCK(vrtc);
offset = (int) vrtc->addr;
if (((unsigned long) offset) >= sizeof(struct rtcdev)) {
VRTC_UNLOCK(vrtc);
return (-1);
}
error = 0;
curtime = vrtc_curtime(vrtc, &basetime);
vrtc_time_update(vrtc, curtime, basetime);
/*
* Update RTC date/time fields if necessary.
*
* This is not just for reads of the RTC. The side-effect of writing
* the century byte requires other RTC date/time fields (e.g. sec)
* to be updated here.
*/
if (offset < 10 || offset == RTC_CENTURY)
secs_to_rtc(curtime, vrtc, 0);
if (in) {
if (offset == 12) {
/*
* XXX
* reg_c interrupt flags are updated only if the
* corresponding interrupt enable bit in reg_b is set.
*/
*val = vrtc->rtcdev.reg_c;
vrtc_set_reg_c(vrtc, 0);
} else {
*val = *((uint8_t *)rtc + offset);
}
VCPU_CTR2(vm, vcpuid, "Read value %#x from RTC offset %#x",
*val, offset);
} else {
switch (offset) {
case 10:
VCPU_CTR1(vm, vcpuid, "RTC reg_a set to %#x", *val);
vrtc_set_reg_a(vrtc, ((uint8_t) *val));
break;
case 11:
VCPU_CTR1(vm, vcpuid, "RTC reg_b set to %#x", *val);
error = vrtc_set_reg_b(vrtc, ((uint8_t) *val));
break;
case 12:
VCPU_CTR1(vm, vcpuid, "RTC reg_c set to %#x (ignored)",
*val);
break;
case 13:
VCPU_CTR1(vm, vcpuid, "RTC reg_d set to %#x (ignored)",
*val);
break;
case 0:
/*
* High order bit of 'seconds' is readonly.
*/
*val &= 0x7f;
/* FALLTHRU */
default:
VCPU_CTR2(vm, vcpuid, "RTC offset %#x set to %#x",
offset, *val);
*((uint8_t *)rtc + offset) = ((uint8_t) *val);
break;
}
/*
* XXX some guests (e.g. OpenBSD) write the century byte
* outside of RTCSB_HALT so re-calculate the RTC date/time.
*/
if (offset == RTC_CENTURY && !rtc_halted(vrtc)) {
curtime = rtc_to_secs(vrtc);
error = vrtc_time_update(vrtc, curtime, sbinuptime());
KASSERT(!error, ("vrtc_time_update error %d", error));
if (curtime == VRTC_BROKEN_TIME && rtc_flag_broken_time)
error = -1;
}
}
VRTC_UNLOCK(vrtc);
return (error);
}
void
vrtc_reset(struct vrtc *vrtc)
{
struct rtcdev *rtc;
VRTC_LOCK(vrtc);
rtc = &vrtc->rtcdev;
vrtc_set_reg_b(vrtc, rtc->reg_b & ~(RTCSB_ALL_INTRS | RTCSB_SQWE));
vrtc_set_reg_c(vrtc, 0);
KASSERT(!callout_active(&vrtc->callout), ("rtc callout still active"));
VRTC_UNLOCK(vrtc);
}
struct vrtc *
vrtc_init(struct vm *vm)
{
struct vrtc *vrtc;
struct rtcdev *rtc;
time_t curtime;
vrtc = malloc(sizeof(struct vrtc));
assert(vrtc);
bzero(vrtc, sizeof(struct vrtc));
vrtc->vm = vm;
pthread_mutex_init(&vrtc->mtx, NULL);
host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &mach_clock);
callout_init(&vrtc->callout, 1);
/* Allow dividers to keep time but disable everything else */
rtc = &vrtc->rtcdev;
rtc->reg_a = 0x20;
rtc->reg_b = RTCSB_24HR;
rtc->reg_c = 0;
rtc->reg_d = RTCSD_PWR;
/* Reset the index register to a safe value. */
vrtc->addr = RTC_STATUSD;
/*
* Initialize RTC time to 00:00:00 Jan 1, 1970.
*/
curtime = 0;
VRTC_LOCK(vrtc);
vrtc->base_rtctime = VRTC_BROKEN_TIME;
vrtc_time_update(vrtc, curtime, sbinuptime());
secs_to_rtc(curtime, vrtc, 0);
VRTC_UNLOCK(vrtc);
return (vrtc);
}
void
vrtc_cleanup(struct vrtc *vrtc)
{
callout_drain(&vrtc->callout);
mach_port_deallocate(mach_task_self(), mach_clock);
free(vrtc);
}
|
mike-pt/xhyve
|
src/pci_uart.c
|
<gh_stars>1000+
/*-
* Copyright (c) 2012 NetApp, Inc.
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <stdint.h>
#include <stdio.h>
#include <xhyve/support/misc.h>
#include <xhyve/xhyve.h>
#include <xhyve/pci_emul.h>
#include <xhyve/uart_emul.h>
/*
* Pick a PCI vid/did of a chip with a single uart at
* BAR0, that most versions of FreeBSD can understand:
* Siig CyberSerial 1-port.
*/
#define COM_VENDOR 0x131f
#define COM_DEV 0x2000
static void
pci_uart_intr_assert(void *arg)
{
struct pci_devinst *pi = arg;
pci_lintr_assert(pi);
}
static void
pci_uart_intr_deassert(void *arg)
{
struct pci_devinst *pi = arg;
pci_lintr_deassert(pi);
}
static void
pci_uart_write(UNUSED int vcpu, struct pci_devinst *pi, int baridx, uint64_t offset,
int size, uint64_t value)
{
assert(baridx == 0);
assert(size == 1);
uart_write(pi->pi_arg, ((int) offset), ((uint8_t) value));
}
static uint64_t
pci_uart_read(UNUSED int vcpu, struct pci_devinst *pi, int baridx,
uint64_t offset, int size)
{
uint8_t val;
assert(baridx == 0);
assert(size == 1);
val = uart_read(pi->pi_arg, ((int) offset));
return (val);
}
static int
pci_uart_init(struct pci_devinst *pi, char *opts)
{
struct uart_softc *sc;
char *name;
pci_emul_alloc_bar(pi, 0, PCIBAR_IO, UART_IO_BAR_SIZE);
pci_lintr_request(pi);
/* initialize config space */
pci_set_cfgdata16(pi, PCIR_DEVICE, COM_DEV);
pci_set_cfgdata16(pi, PCIR_VENDOR, COM_VENDOR);
pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_SIMPLECOMM);
sc = uart_init(pci_uart_intr_assert, pci_uart_intr_deassert, pi);
pi->pi_arg = sc;
asprintf(&name, "pci uart at %d:%d", pi->pi_slot, pi->pi_func);
if (uart_set_backend(sc, opts, name) != 0) {
fprintf(stderr, "Unable to initialize backend '%s' for %s\n", opts, name);
free(name);
return (-1);
}
free(name);
return (0);
}
static struct pci_devemu pci_de_com = {
.pe_emu = "uart",
.pe_init = pci_uart_init,
.pe_barwrite = pci_uart_write,
.pe_barread = pci_uart_read
};
PCI_EMUL_SET(pci_de_com);
|
mike-pt/xhyve
|
src/vmm/intel/vmx_msr.c
|
<filename>src/vmm/intel/vmx_msr.c
/*-
* Copyright (c) 2011 NetApp, Inc.
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <stdint.h>
#include <stdbool.h>
#include <errno.h>
#include <sys/sysctl.h>
#include <Hypervisor/hv.h>
#include <Hypervisor/hv_vmx.h>
#include <xhyve/support/misc.h>
#include <xhyve/support/specialreg.h>
#include <xhyve/vmm/vmm.h>
#include <xhyve/vmm/intel/vmx.h>
#include <xhyve/vmm/intel/vmx_msr.h>
static bool
vmx_ctl_allows_one_setting(uint64_t msr_val, int bitpos)
{
if (msr_val & (1UL << (bitpos + 32)))
return (TRUE);
else
return (FALSE);
}
static bool
vmx_ctl_allows_zero_setting(uint64_t msr_val, int bitpos)
{
if ((msr_val & (1UL << bitpos)) == 0)
return (TRUE);
else
return (FALSE);
}
int vmx_set_ctlreg(hv_vmx_capability_t cap_field, uint32_t ones_mask,
uint32_t zeros_mask, uint32_t *retval)
{
int i;
uint64_t cap;
bool one_allowed, zero_allowed;
/* We cannot ask the same bit to be set to both '1' and '0' */
if ((ones_mask ^ zeros_mask) != (ones_mask | zeros_mask)) {
return EINVAL;
}
if (hv_vmx_read_capability(cap_field, &cap)) {
return EINVAL;
}
for (i = 0; i < 32; i++) {
one_allowed = vmx_ctl_allows_one_setting(cap, i);
zero_allowed = vmx_ctl_allows_zero_setting(cap, i);
if (zero_allowed && !one_allowed) {
/* must be zero */
if (ones_mask & (1 << i)) {
fprintf(stderr,
"vmx_set_ctlreg: cap_field: %d bit: %d must be zero\n",
cap_field, i);
return (EINVAL);
}
*retval &= ~(1 << i);
} else if (one_allowed && !zero_allowed) {
/* must be one */
if (zeros_mask & (1 << i)) {
fprintf(stderr,
"vmx_set_ctlreg: cap_field: %d bit: %d must be one\n",
cap_field, i);
return (EINVAL);
}
*retval |= 1 << i;
} else {
/* don't care */
if (zeros_mask & (1 << i)){
*retval &= ~(1 << i);
} else if (ones_mask & (1 << i)) {
*retval |= 1 << i;
} else {
/* XXX: don't allow unspecified don't cares */
fprintf(stderr,
"vmx_set_ctlreg: cap_field: %d bit: %d unspecified "
"don't care\n", cap_field, i);
return (EINVAL);
}
}
}
return (0);
}
static uint64_t misc_enable;
static uint64_t platform_info;
static uint64_t turbo_ratio_limit;
static bool
pat_valid(uint64_t val)
{
int i, pa;
/*
* From Intel SDM: Table "Memory Types That Can Be Encoded With PAT"
*
* Extract PA0 through PA7 and validate that each one encodes a
* valid memory type.
*/
for (i = 0; i < 8; i++) {
pa = (val >> (i * 8)) & 0xff;
if (pa == 2 || pa == 3 || pa >= 8)
return (false);
}
return (true);
}
void
vmx_msr_init(void) {
uint64_t bus_freq, tsc_freq, ratio;
size_t length;
int i;
length = sizeof(uint64_t);
if (sysctlbyname("machdep.tsc.frequency", &tsc_freq, &length, NULL, 0)) {
xhyve_abort("machdep.tsc.frequency\n");
}
if (sysctlbyname("hw.busfrequency", &bus_freq, &length, NULL, 0)) {
xhyve_abort("hw.busfrequency\n");
}
/* Initialize emulated MSRs */
/* FIXME */
misc_enable = 1;
/*
* Set mandatory bits
* 11: branch trace disabled
* 12: PEBS unavailable
* Clear unsupported features
* 16: SpeedStep enable
* 18: enable MONITOR FSM
*/
misc_enable |= (1u << 12) | (1u << 11);
misc_enable &= ~((1u << 18) | (1u << 16));
/*
* XXXtime
* The ratio should really be based on the virtual TSC frequency as
* opposed to the host TSC.
*/
ratio = (tsc_freq / bus_freq) & 0xff;
/*
* The register definition is based on the micro-architecture
* but the following bits are always the same:
* [15:8] Maximum Non-Turbo Ratio
* [28] Programmable Ratio Limit for Turbo Mode
* [29] Programmable TDC-TDP Limit for Turbo Mode
* [47:40] Maximum Efficiency Ratio
*
* The other bits can be safely set to 0 on all
* micro-architectures up to Haswell.
*/
platform_info = (ratio << 8) | (ratio << 40);
/*
* The number of valid bits in the MSR_TURBO_RATIO_LIMITx register is
* dependent on the maximum cores per package supported by the micro-
* architecture. For e.g., Westmere supports 6 cores per package and
* uses the low 48 bits. Sandybridge support 8 cores per package and
* uses up all 64 bits.
*
* However, the unused bits are reserved so we pretend that all bits
* in this MSR are valid.
*/
for (i = 0; i < 8; i++) {
turbo_ratio_limit = (turbo_ratio_limit << 8) | ratio;
}
}
void
vmx_msr_guest_init(struct vmx *vmx, int vcpuid)
{
uint64_t *guest_msrs;
guest_msrs = vmx->guest_msrs[vcpuid];
hv_vcpu_enable_native_msr(((hv_vcpuid_t) vcpuid), MSR_LSTAR, 1);
hv_vcpu_enable_native_msr(((hv_vcpuid_t) vcpuid), MSR_CSTAR, 1);
hv_vcpu_enable_native_msr(((hv_vcpuid_t) vcpuid), MSR_STAR, 1);
hv_vcpu_enable_native_msr(((hv_vcpuid_t) vcpuid), MSR_SF_MASK, 1);
hv_vcpu_enable_native_msr(((hv_vcpuid_t) vcpuid), MSR_KGSBASE, 1);
/*
* Initialize guest IA32_PAT MSR with default value after reset.
*/
guest_msrs[IDX_MSR_PAT] = PAT_VALUE(0, PAT_WRITE_BACK) |
PAT_VALUE(1, PAT_WRITE_THROUGH) |
PAT_VALUE(2, PAT_UNCACHED) |
PAT_VALUE(3, PAT_UNCACHEABLE) |
PAT_VALUE(4, PAT_WRITE_BACK) |
PAT_VALUE(5, PAT_WRITE_THROUGH) |
PAT_VALUE(6, PAT_UNCACHED) |
PAT_VALUE(7, PAT_UNCACHEABLE);
return;
}
int
vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val)
{
const uint64_t *guest_msrs;
int error;
guest_msrs = vmx->guest_msrs[vcpuid];
error = 0;
switch (num) {
case MSR_EFER:
*val = vmcs_read(vcpuid, VMCS_GUEST_IA32_EFER);
break;
case MSR_MCG_CAP:
case MSR_MCG_STATUS:
*val = 0;
break;
case MSR_MTRRcap:
case MSR_MTRRdefType:
case MSR_MTRR4kBase:
case MSR_MTRR4kBase + 1:
case MSR_MTRR4kBase + 2:
case MSR_MTRR4kBase + 3:
case MSR_MTRR4kBase + 4:
case MSR_MTRR4kBase + 5:
case MSR_MTRR4kBase + 6:
case MSR_MTRR4kBase + 7:
case MSR_MTRR4kBase + 8:
case MSR_MTRR16kBase:
case MSR_MTRR16kBase + 1:
case MSR_MTRR64kBase:
*val = 0;
break;
case MSR_IA32_MISC_ENABLE:
*val = misc_enable;
break;
case MSR_PLATFORM_INFO:
*val = platform_info;
break;
case MSR_TURBO_RATIO_LIMIT:
case MSR_TURBO_RATIO_LIMIT1:
*val = turbo_ratio_limit;
break;
case MSR_PAT:
*val = guest_msrs[IDX_MSR_PAT];
break;
default:
error = EINVAL;
break;
}
return (error);
}
int
vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val)
{
uint64_t *guest_msrs;
uint64_t changed;
int error;
guest_msrs = vmx->guest_msrs[vcpuid];
error = 0;
switch (num) {
case MSR_EFER:
vmcs_write(vcpuid, VMCS_GUEST_IA32_EFER, val);
break;
case MSR_MCG_CAP:
case MSR_MCG_STATUS:
break; /* ignore writes */
case MSR_MTRRcap:
vm_inject_gp(vmx->vm, vcpuid);
break;
case MSR_MTRRdefType:
case MSR_MTRR4kBase:
case MSR_MTRR4kBase + 1:
case MSR_MTRR4kBase + 2:
case MSR_MTRR4kBase + 3:
case MSR_MTRR4kBase + 4:
case MSR_MTRR4kBase + 5:
case MSR_MTRR4kBase + 6:
case MSR_MTRR4kBase + 7:
case MSR_MTRR4kBase + 8:
case MSR_MTRR16kBase:
case MSR_MTRR16kBase + 1:
case MSR_MTRR64kBase:
break; /* Ignore writes */
case MSR_IA32_MISC_ENABLE:
changed = val ^ misc_enable;
/*
* If the host has disabled the NX feature then the guest
* also cannot use it. However, a Linux guest will try to
* enable the NX feature by writing to the MISC_ENABLE MSR.
*
* This can be safely ignored because the memory management
* code looks at CPUID.80000001H:EDX.NX to check if the
* functionality is actually enabled.
*/
changed &= ~(1UL << 34);
/*
* Punt to userspace if any other bits are being modified.
*/
if (changed)
error = EINVAL;
break;
case MSR_PAT:
if (pat_valid(val))
guest_msrs[IDX_MSR_PAT] = val;
else
vm_inject_gp(vmx->vm, vcpuid);
break;
default:
error = EINVAL;
break;
}
return (error);
}
|
mike-pt/xhyve
|
src/vmm/io/vatpic.c
|
/*-
* Copyright (c) 2014 <NAME> <<EMAIL>>
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <stdint.h>
#include <stdbool.h>
#include <errno.h>
#include <assert.h>
#include <xhyve/lock.h>
#include <xhyve/support/misc.h>
#include <xhyve/support/i8259.h>
#include <xhyve/support/apicreg.h>
#include <xhyve/vmm/vmm_lapic.h>
#include <xhyve/vmm/vmm_ktr.h>
#include <xhyve/vmm/io/vatpic.h>
#include <xhyve/vmm/io/vioapic.h>
#define VATPIC_LOCK_INIT(v) XHYVE_LOCK_INIT(v, lock)
#define VATPIC_LOCK(v) XHYVE_LOCK(v, lock)
#define VATPIC_UNLOCK(v) XHYVE_UNLOCK(v, lock)
enum irqstate {
IRQSTATE_ASSERT,
IRQSTATE_DEASSERT,
IRQSTATE_PULSE
};
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct atpic {
bool ready;
int icw_num;
int rd_cmd_reg;
bool aeoi;
bool poll;
bool rotate;
bool sfn; /* special fully-nested mode */
int irq_base;
uint8_t request; /* Interrupt Request Register (IIR) */
uint8_t service; /* Interrupt Service (ISR) */
uint8_t mask; /* Interrupt Mask Register (IMR) */
uint8_t smm; /* special mask mode */
int acnt[8]; /* sum of pin asserts and deasserts */
int lowprio; /* lowest priority irq */
bool intr_raised;
};
struct vatpic {
struct vm *vm;
xhyve_lock_t lock;
struct atpic atpic[2];
uint8_t elc[2];
};
#pragma clang diagnostic pop
#define VATPIC_CTR0(vatpic, fmt) \
VM_CTR0((vatpic)->vm, fmt)
#define VATPIC_CTR1(vatpic, fmt, a1) \
VM_CTR1((vatpic)->vm, fmt, a1)
#define VATPIC_CTR2(vatpic, fmt, a1, a2) \
VM_CTR2((vatpic)->vm, fmt, a1, a2)
#define VATPIC_CTR3(vatpic, fmt, a1, a2, a3) \
VM_CTR3((vatpic)->vm, fmt, a1, a2, a3)
#define VATPIC_CTR4(vatpic, fmt, a1, a2, a3, a4) \
VM_CTR4((vatpic)->vm, fmt, a1, a2, a3, a4)
/*
* Loop over all the pins in priority order from highest to lowest.
*/
#define ATPIC_PIN_FOREACH(pinvar, atpic, tmpvar) \
for (tmpvar = 0, pinvar = (atpic->lowprio + 1) & 0x7; \
tmpvar < 8; \
tmpvar++, pinvar = (pinvar + 1) & 0x7)
static void vatpic_set_pinstate(struct vatpic *vatpic, int pin, bool newstate);
static __inline bool
master_atpic(struct vatpic *vatpic, struct atpic *atpic)
{
if (atpic == &vatpic->atpic[0])
return (true);
else
return (false);
}
static __inline int
vatpic_get_highest_isrpin(struct atpic *atpic)
{
int bit, pin;
int i;
ATPIC_PIN_FOREACH(pin, atpic, i) {
bit = (1 << pin);
if (atpic->service & bit) {
/*
* An IS bit that is masked by an IMR bit will not be
* cleared by a non-specific EOI in Special Mask Mode.
*/
if (atpic->smm && (atpic->mask & bit) != 0)
continue;
else
return (pin);
}
}
return (-1);
}
static __inline int
vatpic_get_highest_irrpin(struct atpic *atpic)
{
int serviced;
int bit, pin, tmp;
/*
* In 'Special Fully-Nested Mode' when an interrupt request from
* a slave is in service, the slave is not locked out from the
* master's priority logic.
*/
serviced = atpic->service;
if (atpic->sfn)
serviced &= ~(1 << 2);
/*
* In 'Special Mask Mode', when a mask bit is set in OCW1 it inhibits
* further interrupts at that level and enables interrupts from all
* other levels that are not masked. In other words the ISR has no
* bearing on the levels that can generate interrupts.
*/
if (atpic->smm)
serviced = 0;
ATPIC_PIN_FOREACH(pin, atpic, tmp) {
bit = 1 << pin;
/*
* If there is already an interrupt in service at the same
* or higher priority then bail.
*/
if ((serviced & bit) != 0)
break;
/*
* If an interrupt is asserted and not masked then return
* the corresponding 'pin' to the caller.
*/
if ((atpic->request & bit) != 0 && (atpic->mask & bit) == 0)
return (pin);
}
return (-1);
}
static void
vatpic_notify_intr(struct vatpic *vatpic)
{
struct atpic *atpic;
int pin;
/*
* First check the slave.
*/
atpic = &vatpic->atpic[1];
if (!atpic->intr_raised &&
(pin = vatpic_get_highest_irrpin(atpic)) != -1) {
VATPIC_CTR4(vatpic, "atpic slave notify pin = %d "
"(imr 0x%x irr 0x%x isr 0x%x)", pin,
atpic->mask, atpic->request, atpic->service);
/*
* Cascade the request from the slave to the master.
*/
atpic->intr_raised = true;
vatpic_set_pinstate(vatpic, 2, true);
vatpic_set_pinstate(vatpic, 2, false);
} else {
VATPIC_CTR3(vatpic, "atpic slave no eligible interrupts "
"(imr 0x%x irr 0x%x isr 0x%x)",
atpic->mask, atpic->request, atpic->service);
}
/*
* Then check the master.
*/
atpic = &vatpic->atpic[0];
if (!atpic->intr_raised &&
(pin = vatpic_get_highest_irrpin(atpic)) != -1) {
VATPIC_CTR4(vatpic, "atpic master notify pin = %d "
"(imr 0x%x irr 0x%x isr 0x%x)", pin,
atpic->mask, atpic->request, atpic->service);
/*
* From Section 3.6.2, "Interrupt Modes", in the
* MPtable Specification, Version 1.4
*
* PIC interrupts are routed to both the Local APIC
* and the I/O APIC to support operation in 1 of 3
* modes.
*
* 1. Legacy PIC Mode: the PIC effectively bypasses
* all APIC components. In this mode the local APIC is
* disabled and LINT0 is reconfigured as INTR to
* deliver the PIC interrupt directly to the CPU.
*
* 2. Virtual Wire Mode: the APIC is treated as a
* virtual wire which delivers interrupts from the PIC
* to the CPU. In this mode LINT0 is programmed as
* ExtINT to indicate that the PIC is the source of
* the interrupt.
*
* 3. Virtual Wire Mode via I/O APIC: PIC interrupts are
* fielded by the I/O APIC and delivered to the appropriate
* CPU. In this mode the I/O APIC input 0 is programmed
* as ExtINT to indicate that the PIC is the source of the
* interrupt.
*/
atpic->intr_raised = true;
lapic_set_local_intr(vatpic->vm, -1, APIC_LVT_LINT0);
vioapic_pulse_irq(vatpic->vm, 0);
} else {
VATPIC_CTR3(vatpic, "atpic master no eligible interrupts "
"(imr 0x%x irr 0x%x isr 0x%x)",
atpic->mask, atpic->request, atpic->service);
}
}
static int
vatpic_icw1(struct vatpic *vatpic, struct atpic *atpic, uint8_t val)
{
VATPIC_CTR1(vatpic, "atpic icw1 0x%x", val);
atpic->ready = false;
atpic->icw_num = 1;
atpic->request = 0;
atpic->mask = 0;
atpic->lowprio = 7;
atpic->rd_cmd_reg = 0;
atpic->poll = 0;
atpic->smm = 0;
if ((val & ICW1_SNGL) != 0) {
VATPIC_CTR0(vatpic, "vatpic cascade mode required");
return (-1);
}
if ((val & ICW1_IC4) == 0) {
VATPIC_CTR0(vatpic, "vatpic icw4 required");
return (-1);
}
atpic->icw_num++;
return (0);
}
static int
vatpic_icw2(struct vatpic *vatpic, struct atpic *atpic, uint8_t val)
{
VATPIC_CTR1(vatpic, "atpic icw2 0x%x", val);
atpic->irq_base = val & 0xf8;
atpic->icw_num++;
return (0);
}
static int
vatpic_icw3(struct vatpic *vatpic, struct atpic *atpic, uint8_t val)
{
VATPIC_CTR1(vatpic, "atpic icw3 0x%x", val);
atpic->icw_num++;
return (0);
}
static int
vatpic_icw4(struct vatpic *vatpic, struct atpic *atpic, uint8_t val)
{
VATPIC_CTR1(vatpic, "atpic icw4 0x%x", val);
if ((val & ICW4_8086) == 0) {
VATPIC_CTR0(vatpic, "vatpic microprocessor mode required");
return (-1);
}
if ((val & ICW4_AEOI) != 0)
atpic->aeoi = true;
if ((val & ICW4_SFNM) != 0) {
if (master_atpic(vatpic, atpic)) {
atpic->sfn = true;
} else {
VATPIC_CTR1(vatpic, "Ignoring special fully nested "
"mode on slave atpic: %#x", val);
}
}
atpic->icw_num = 0;
atpic->ready = true;
return (0);
}
static int
vatpic_ocw1(struct vatpic *vatpic, struct atpic *atpic, uint8_t val)
{
VATPIC_CTR1(vatpic, "atpic ocw1 0x%x", val);
atpic->mask = val & 0xff;
return (0);
}
static int
vatpic_ocw2(struct vatpic *vatpic, struct atpic *atpic, uint8_t val)
{
VATPIC_CTR1(vatpic, "atpic ocw2 0x%x", val);
atpic->rotate = ((val & OCW2_R) != 0);
if ((val & OCW2_EOI) != 0) {
int isr_bit;
if ((val & OCW2_SL) != 0) {
/* specific EOI */
isr_bit = val & 0x7;
} else {
/* non-specific EOI */
isr_bit = vatpic_get_highest_isrpin(atpic);
}
if (isr_bit != -1) {
atpic->service &= ~(1 << isr_bit);
if (atpic->rotate)
atpic->lowprio = isr_bit;
}
} else if ((val & OCW2_SL) != 0 && atpic->rotate == true) {
/* specific priority */
atpic->lowprio = val & 0x7;
}
return (0);
}
static int
vatpic_ocw3(struct vatpic *vatpic, struct atpic *atpic, uint8_t val)
{
VATPIC_CTR1(vatpic, "atpic ocw3 0x%x", val);
if (val & OCW3_ESMM) {
atpic->smm = val & OCW3_SMM ? 1 : 0;
VATPIC_CTR2(vatpic, "%s atpic special mask mode %s",
master_atpic(vatpic, atpic) ? "master" : "slave",
atpic->smm ? "enabled" : "disabled");
}
if (val & OCW3_RR) {
/* read register command */
atpic->rd_cmd_reg = val & OCW3_RIS;
/* Polling mode */
atpic->poll = ((val & OCW3_P) != 0);
}
return (0);
}
static void
vatpic_set_pinstate(struct vatpic *vatpic, int pin, bool newstate)
{
struct atpic *atpic;
int oldcnt, newcnt;
bool level;
KASSERT(pin >= 0 && pin < 16,
("vatpic_set_pinstate: invalid pin number %d", pin));
atpic = &vatpic->atpic[pin >> 3];
oldcnt = atpic->acnt[pin & 0x7];
if (newstate)
atpic->acnt[pin & 0x7]++;
else
atpic->acnt[pin & 0x7]--;
newcnt = atpic->acnt[pin & 0x7];
if (newcnt < 0) {
VATPIC_CTR2(vatpic, "atpic pin%d: bad acnt %d", pin, newcnt);
}
level = ((vatpic->elc[pin >> 3] & (1 << (pin & 0x7))) != 0);
if ((oldcnt == 0 && newcnt == 1) || (newcnt > 0 && level == true)) {
/* rising edge or level */
VATPIC_CTR1(vatpic, "atpic pin%d: asserted", pin);
atpic->request |= (1 << (pin & 0x7));
} else if (oldcnt == 1 && newcnt == 0) {
/* falling edge */
VATPIC_CTR1(vatpic, "atpic pin%d: deasserted", pin);
if (level)
atpic->request &= ~(1 << (pin & 0x7));
} else {
VATPIC_CTR3(vatpic, "atpic pin%d: %s, ignored, acnt %d",
pin, newstate ? "asserted" : "deasserted", newcnt);
}
vatpic_notify_intr(vatpic);
}
static int
vatpic_set_irqstate(struct vm *vm, int irq, enum irqstate irqstate)
{
struct vatpic *vatpic;
struct atpic *atpic;
if (irq < 0 || irq > 15)
return (EINVAL);
vatpic = vm_atpic(vm);
atpic = &vatpic->atpic[irq >> 3];
if (atpic->ready == false)
return (0);
VATPIC_LOCK(vatpic);
switch (irqstate) {
case IRQSTATE_ASSERT:
vatpic_set_pinstate(vatpic, irq, true);
break;
case IRQSTATE_DEASSERT:
vatpic_set_pinstate(vatpic, irq, false);
break;
case IRQSTATE_PULSE:
vatpic_set_pinstate(vatpic, irq, true);
vatpic_set_pinstate(vatpic, irq, false);
break;
}
VATPIC_UNLOCK(vatpic);
return (0);
}
int
vatpic_assert_irq(struct vm *vm, int irq)
{
return (vatpic_set_irqstate(vm, irq, IRQSTATE_ASSERT));
}
int
vatpic_deassert_irq(struct vm *vm, int irq)
{
return (vatpic_set_irqstate(vm, irq, IRQSTATE_DEASSERT));
}
int
vatpic_pulse_irq(struct vm *vm, int irq)
{
return (vatpic_set_irqstate(vm, irq, IRQSTATE_PULSE));
}
int
vatpic_set_irq_trigger(struct vm *vm, int irq, enum vm_intr_trigger trigger)
{
struct vatpic *vatpic;
if (irq < 0 || irq > 15)
return (EINVAL);
/*
* See comment in vatpic_elc_handler. These IRQs must be
* edge triggered.
*/
if (trigger == LEVEL_TRIGGER) {
switch (irq) {
case 0:
case 1:
case 2:
case 8:
case 13:
return (EINVAL);
}
}
vatpic = vm_atpic(vm);
VATPIC_LOCK(vatpic);
if (trigger == LEVEL_TRIGGER)
vatpic->elc[irq >> 3] |= 1 << (irq & 0x7);
else
vatpic->elc[irq >> 3] &= ~(1 << (irq & 0x7));
VATPIC_UNLOCK(vatpic);
return (0);
}
void
vatpic_pending_intr(struct vm *vm, int *vecptr)
{
struct vatpic *vatpic;
struct atpic *atpic;
int pin;
vatpic = vm_atpic(vm);
atpic = &vatpic->atpic[0];
VATPIC_LOCK(vatpic);
pin = vatpic_get_highest_irrpin(atpic);
if (pin == 2) {
atpic = &vatpic->atpic[1];
pin = vatpic_get_highest_irrpin(atpic);
}
/*
* If there are no pins active at this moment then return the spurious
* interrupt vector instead.
*/
if (pin == -1)
pin = 7;
KASSERT(pin >= 0 && pin <= 7, ("%s: invalid pin %d", __func__, pin));
*vecptr = atpic->irq_base + pin;
VATPIC_UNLOCK(vatpic);
}
static void
vatpic_pin_accepted(struct atpic *atpic, int pin)
{
atpic->intr_raised = false;
if (atpic->acnt[pin] == 0)
atpic->request &= ~(1 << pin);
if (atpic->aeoi == true) {
if (atpic->rotate == true)
atpic->lowprio = pin;
} else {
atpic->service |= (1 << pin);
}
}
void
vatpic_intr_accepted(struct vm *vm, int vector)
{
struct vatpic *vatpic;
int pin;
vatpic = vm_atpic(vm);
VATPIC_LOCK(vatpic);
pin = vector & 0x7;
if ((vector & ~0x7) == vatpic->atpic[1].irq_base) {
vatpic_pin_accepted(&vatpic->atpic[1], pin);
/*
* If this vector originated from the slave,
* accept the cascaded interrupt too.
*/
vatpic_pin_accepted(&vatpic->atpic[0], 2);
} else {
vatpic_pin_accepted(&vatpic->atpic[0], pin);
}
vatpic_notify_intr(vatpic);
VATPIC_UNLOCK(vatpic);
}
static int
vatpic_read(struct vatpic *vatpic, struct atpic *atpic, UNUSED bool in,
int port, UNUSED int bytes, uint32_t *eax)
{
int pin;
VATPIC_LOCK(vatpic);
if (atpic->poll) {
atpic->poll = 0;
pin = vatpic_get_highest_irrpin(atpic);
if (pin >= 0) {
vatpic_pin_accepted(atpic, pin);
*eax = 0x80 | ((uint32_t) pin);
} else {
*eax = 0;
}
} else {
if (port & ICU_IMR_OFFSET) {
/* read interrrupt mask register */
*eax = atpic->mask;
} else {
if (atpic->rd_cmd_reg == OCW3_RIS) {
/* read interrupt service register */
*eax = atpic->service;
} else {
/* read interrupt request register */
*eax = atpic->request;
}
}
}
VATPIC_UNLOCK(vatpic);
//printf("vatpic_read 0x%04x 0x%02x\n", port, (uint8_t)*eax);
return (0);
}
static int
vatpic_write(struct vatpic *vatpic, struct atpic *atpic, UNUSED bool in,
int port, UNUSED int bytes, uint32_t *eax)
{
int error;
uint8_t val;
error = 0;
val = (uint8_t) *eax;
//printf("vatpic_write 0x%04x 0x%02x %d\n", port, val, atpic->icw_num);
VATPIC_LOCK(vatpic);
if (port & ICU_IMR_OFFSET) {
switch (atpic->icw_num) {
case 2:
error = vatpic_icw2(vatpic, atpic, val);
break;
case 3:
error = vatpic_icw3(vatpic, atpic, val);
break;
case 4:
error = vatpic_icw4(vatpic, atpic, val);
break;
default:
error = vatpic_ocw1(vatpic, atpic, val);
break;
}
} else {
if (val & (1 << 4))
error = vatpic_icw1(vatpic, atpic, val);
if (atpic->ready) {
if (val & (1 << 3))
error = vatpic_ocw3(vatpic, atpic, val);
else
error = vatpic_ocw2(vatpic, atpic, val);
}
}
if (atpic->ready)
vatpic_notify_intr(vatpic);
VATPIC_UNLOCK(vatpic);
return (error);
}
int
vatpic_master_handler(struct vm *vm, UNUSED int vcpuid, bool in, int port,
int bytes, uint32_t *eax)
{
struct vatpic *vatpic;
struct atpic *atpic;
vatpic = vm_atpic(vm);
atpic = &vatpic->atpic[0];
if (bytes != 1)
return (-1);
if (in) {
return (vatpic_read(vatpic, atpic, in, port, bytes, eax));
}
return (vatpic_write(vatpic, atpic, in, port, bytes, eax));
}
int
vatpic_slave_handler(struct vm *vm, UNUSED int vcpuid, bool in, int port,
int bytes, uint32_t *eax)
{
struct vatpic *vatpic;
struct atpic *atpic;
vatpic = vm_atpic(vm);
atpic = &vatpic->atpic[1];
if (bytes != 1)
return (-1);
if (in) {
return (vatpic_read(vatpic, atpic, in, port, bytes, eax));
}
return (vatpic_write(vatpic, atpic, in, port, bytes, eax));
}
int
vatpic_elc_handler(struct vm *vm, UNUSED int vcpuid, bool in, int port,
int bytes, uint32_t *eax)
{
struct vatpic *vatpic;
bool is_master;
vatpic = vm_atpic(vm);
is_master = (port == IO_ELCR1);
if (bytes != 1)
return (-1);
VATPIC_LOCK(vatpic);
if (in) {
if (is_master)
*eax = vatpic->elc[0];
else
*eax = vatpic->elc[1];
} else {
/*
* For the master PIC the cascade channel (IRQ2), the
* heart beat timer (IRQ0), and the keyboard
* controller (IRQ1) cannot be programmed for level
* mode.
*
* For the slave PIC the real time clock (IRQ8) and
* the floating point error interrupt (IRQ13) cannot
* be programmed for level mode.
*/
if (is_master)
vatpic->elc[0] = (*eax & 0xf8);
else
vatpic->elc[1] = (*eax & 0xde);
}
VATPIC_UNLOCK(vatpic);
return (0);
}
struct vatpic *
vatpic_init(struct vm *vm)
{
struct vatpic *vatpic;
vatpic = malloc(sizeof(struct vatpic));
assert(vatpic);
bzero(vatpic, sizeof(struct vatpic));
vatpic->vm = vm;
VATPIC_LOCK_INIT(vatpic);
return (vatpic);
}
void
vatpic_cleanup(struct vatpic *vatpic)
{
free(vatpic);
}
|
mike-pt/xhyve
|
include/xhyve/pci_emul.h
|
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#pragma once
#include <stdint.h>
#include <pthread.h>
#include <assert.h>
#include <xhyve/support/misc.h>
#include <xhyve/support/pcireg.h>
#include <xhyve/support/linker_set.h>
#define PCI_BARMAX PCIR_MAX_BAR_0 /* BAR registers in a Type 0 header */
struct pci_devinst;
struct memory_region;
struct pci_devemu {
/* name of device emulation */
char *pe_emu;
/* instance creation */
int (*pe_init)(struct pci_devinst *, char *opts);
/* ACPI DSDT enumeration */
void (*pe_write_dsdt)(struct pci_devinst *);
/* config space read/write callbacks */
int (*pe_cfgwrite)(int vcpu, struct pci_devinst *pi,
int offset, int bytes, uint32_t val);
int (*pe_cfgread)(int vcpu, struct pci_devinst *pi, int offset, int bytes,
uint32_t *retval);
/* BAR read/write callbacks */
void (*pe_barwrite)(int vcpu, struct pci_devinst *pi, int baridx,
uint64_t offset, int size, uint64_t value);
uint64_t (*pe_barread)(int vcpu, struct pci_devinst *pi, int baridx,
uint64_t offset, int size);
};
#define PCI_EMUL_SET(x) DATA_SET(pci_devemu_set, x)
enum pcibar_type {
PCIBAR_NONE,
PCIBAR_IO,
PCIBAR_MEM32,
PCIBAR_MEM64,
PCIBAR_MEMHI64
};
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct pcibar {
enum pcibar_type type; /* io or memory */
uint64_t size;
uint64_t addr;
};
#pragma clang diagnostic pop
#define PI_NAMESZ 40
struct msix_table_entry {
uint64_t addr;
uint32_t msg_data;
uint32_t vector_control;
};
/*
* In case the structure is modified to hold extra information, use a define
* for the size that should be emulated.
*/
#define MSIX_TABLE_ENTRY_SIZE 16
#define MAX_MSIX_TABLE_ENTRIES 2048
#define PBA_SIZE(msgnum) (roundup2((msgnum), 64) / 8)
enum lintr_stat {
IDLE,
ASSERTED,
PENDING
};
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct pci_devinst {
struct pci_devemu *pi_d;
uint8_t pi_bus, pi_slot, pi_func;
char pi_name[PI_NAMESZ];
int pi_bar_getsize;
int pi_prevcap;
int pi_capend;
struct {
int8_t pin;
enum lintr_stat state;
int pirq_pin;
int ioapic_irq;
pthread_mutex_t lock;
} pi_lintr;
struct {
int enabled;
uint64_t addr;
uint64_t msg_data;
int maxmsgnum;
} pi_msi;
struct {
int enabled;
int table_bar;
int pba_bar;
uint32_t table_offset;
int table_count;
uint32_t pba_offset;
int pba_size;
int function_mask;
struct msix_table_entry *table; /* allocated at runtime */
} pi_msix;
void *pi_arg; /* devemu-private data */
u_char pi_cfgdata[PCI_REGMAX + 1];
struct pcibar pi_bar[PCI_BARMAX + 1];
};
#pragma clang diagnostic pop
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpacked"
struct msicap {
uint8_t capid;
uint8_t nextptr;
uint16_t msgctrl;
uint32_t addrlo;
uint32_t addrhi;
uint16_t msgdata;
} __packed;
struct msixcap {
uint8_t capid;
uint8_t nextptr;
uint16_t msgctrl;
uint32_t table_info; /* bar index and offset within it */
uint32_t pba_info; /* bar index and offset within it */
} __packed;
struct pciecap {
uint8_t capid;
uint8_t nextptr;
uint16_t pcie_capabilities;
uint32_t dev_capabilities; /* all devices */
uint16_t dev_control;
uint16_t dev_status;
uint32_t link_capabilities; /* devices with links */
uint16_t link_control;
uint16_t link_status;
uint32_t slot_capabilities; /* ports with slots */
uint16_t slot_control;
uint16_t slot_status;
uint16_t root_control; /* root ports */
uint16_t root_capabilities;
uint32_t root_status;
uint32_t dev_capabilities2; /* all devices */
uint16_t dev_control2;
uint16_t dev_status2;
uint32_t link_capabilities2; /* devices with links */
uint16_t link_control2;
uint16_t link_status2;
uint32_t slot_capabilities2; /* ports with slots */
uint16_t slot_control2;
uint16_t slot_status2;
} __packed;
#pragma clang diagnostic pop
typedef void (*pci_lintr_cb)(int b, int s, int pin, int pirq_pin,
int ioapic_irq, void *arg);
int init_pci(void);
void msicap_cfgwrite(struct pci_devinst *pi, int capoff, int offset,
int bytes, uint32_t val);
void msixcap_cfgwrite(struct pci_devinst *pi, int capoff, int offset,
int bytes, uint32_t val);
void pci_callback(void);
int pci_emul_alloc_bar(struct pci_devinst *pdi, int idx,
enum pcibar_type type, uint64_t size);
int pci_emul_alloc_pbar(struct pci_devinst *pdi, int idx,
uint64_t hostbase, enum pcibar_type type, uint64_t size);
int pci_emul_add_msicap(struct pci_devinst *pi, int msgnum);
int pci_emul_add_pciecap(struct pci_devinst *pi, int pcie_device_type);
void pci_generate_msi(struct pci_devinst *pi, int msgnum);
void pci_generate_msix(struct pci_devinst *pi, int msgnum);
void pci_lintr_assert(struct pci_devinst *pi);
void pci_lintr_deassert(struct pci_devinst *pi);
void pci_lintr_request(struct pci_devinst *pi);
int pci_msi_enabled(struct pci_devinst *pi);
int pci_msix_enabled(struct pci_devinst *pi);
int pci_msix_table_bar(struct pci_devinst *pi);
int pci_msix_pba_bar(struct pci_devinst *pi);
int pci_msi_msgnum(struct pci_devinst *pi);
int pci_parse_slot(char *opt);
void pci_populate_msicap(struct msicap *cap, int msgs, int nextptr);
int pci_emul_add_msixcap(struct pci_devinst *pi, int msgnum, int barnum);
int pci_emul_msix_twrite(struct pci_devinst *pi, uint64_t offset, int size,
uint64_t value);
uint64_t pci_emul_msix_tread(struct pci_devinst *pi, uint64_t offset, int size);
int pci_count_lintr(int bus);
void pci_walk_lintr(int bus, pci_lintr_cb cb, void *arg);
void pci_write_dsdt(void);
uint64_t pci_ecfg_base(void);
int pci_bus_configured(int bus);
static __inline void
pci_set_cfgdata8(struct pci_devinst *pi, int offset, uint8_t val)
{
assert(offset <= PCI_REGMAX);
*(uint8_t *)(((uintptr_t) &pi->pi_cfgdata) + ((unsigned) offset)) = val;
}
static __inline void
pci_set_cfgdata16(struct pci_devinst *pi, int offset, uint16_t val)
{
assert(offset <= (PCI_REGMAX - 1) && (offset & 1) == 0);
*(uint16_t *)(((uintptr_t) &pi->pi_cfgdata) + ((unsigned) offset)) = val;
}
static __inline void
pci_set_cfgdata32(struct pci_devinst *pi, int offset, uint32_t val)
{
assert(offset <= (PCI_REGMAX - 3) && (offset & 3) == 0);
*(uint32_t *)(((uintptr_t) &pi->pi_cfgdata) + ((unsigned) offset)) = val;
}
static __inline uint8_t
pci_get_cfgdata8(struct pci_devinst *pi, int offset)
{
assert(offset <= PCI_REGMAX);
return (*(uint8_t *)(((uintptr_t) &pi->pi_cfgdata) + ((unsigned) offset)));
}
static __inline uint16_t
pci_get_cfgdata16(struct pci_devinst *pi, int offset)
{
assert(offset <= (PCI_REGMAX - 1) && (offset & 1) == 0);
return (*(uint16_t *)(((uintptr_t) &pi->pi_cfgdata) + ((unsigned) offset)));
}
static __inline uint32_t
pci_get_cfgdata32(struct pci_devinst *pi, int offset)
{
assert(offset <= (PCI_REGMAX - 3) && (offset & 3) == 0);
return (*(uint32_t *)(((uintptr_t) &pi->pi_cfgdata) + ((unsigned) offset)));
}
|
mike-pt/xhyve
|
src/mevent_test.c
|
<filename>src/mevent_test.c
/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Test program for the micro event library. Set up a simple TCP echo
* service.
*
* cc mevent_test.c mevent.c -lpthread
*/
#include <stdint.h>
#include <sys/types.h>
#include <sys/sysctl.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <unistd.h>
#include <xhyve/mevent.h>
#define TEST_PORT 4321
static pthread_mutex_t accept_mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t accept_condvar = PTHREAD_COND_INITIALIZER;
static struct mevent *tevp;
char *vmname = "test vm";
#define MEVENT_ECHO
/* Number of timer events to capture */
#define TEVSZ 4096
uint64_t tevbuf[TEVSZ];
static __inline uint64_t rdtsc(void)
{
unsigned a, d;
__asm__ __volatile__ ("cpuid");
__asm__ __volatile__ ("rdtsc" : "=a" (a), "=d" (d));
return (((uint64_t) a) | (((uint64_t) d) << 32));
}
static void
timer_print(void)
{
uint64_t min, max, diff, sum, tsc_freq;
size_t len;
int j;
min = UINT64_MAX;
max = 0;
sum = 0;
len = sizeof(tsc_freq);
sysctlbyname("machdep.tsc_freq", &tsc_freq, &len, NULL, 0);
for (j = 1; j < TEVSZ; j++) {
/* Convert a tsc diff into microseconds */
diff = (tevbuf[j] - tevbuf[j-1]) * 1000000 / tsc_freq;
sum += diff;
if (min > diff)
min = diff;
if (max < diff)
max = diff;
}
printf("timers done: usecs, min %llu, max %llu, mean %llu\n", min, max,
sum/(TEVSZ - 1));
}
static void
timer_callback(int fd, enum ev_type type, void *param)
{
static int i;
if (i >= TEVSZ)
abort();
tevbuf[i++] = rdtsc();
if (i == TEVSZ) {
mevent_delete(tevp);
timer_print();
}
}
#ifdef MEVENT_ECHO
struct esync {
pthread_mutex_t e_mt;
pthread_cond_t e_cond;
};
static void
echoer_callback(int fd, enum ev_type type, void *param)
{
struct esync *sync = param;
pthread_mutex_lock(&sync->e_mt);
pthread_cond_signal(&sync->e_cond);
pthread_mutex_unlock(&sync->e_mt);
}
static void *
echoer(void *param)
{
struct esync sync;
struct mevent *mev;
char buf[128];
int fd = (int)(uintptr_t) param;
int len;
pthread_mutex_init(&sync.e_mt, NULL);
pthread_cond_init(&sync.e_cond, NULL);
pthread_mutex_lock(&sync.e_mt);
mev = mevent_add(fd, EVF_READ, echoer_callback, &sync);
if (mev == NULL) {
printf("Could not allocate echoer event\n");
exit(1);
}
while (!pthread_cond_wait(&sync.e_cond, &sync.e_mt)) {
len = read(fd, buf, sizeof(buf));
if (len > 0) {
write(fd, buf, len);
write(0, buf, len);
} else {
break;
}
}
mevent_delete_close(mev);
pthread_mutex_unlock(&sync.e_mt);
pthread_mutex_destroy(&sync.e_mt);
pthread_cond_destroy(&sync.e_cond);
return (NULL);
}
#else
static void *
echoer(void *param)
{
char buf[128];
int fd = (int)(uintptr_t) param;
int len;
while ((len = read(fd, buf, sizeof(buf))) > 0) {
write(1, buf, len);
}
return (NULL);
}
#endif /* MEVENT_ECHO */
static void
acceptor_callback(int fd, enum ev_type type, void *param)
{
pthread_mutex_lock(&accept_mutex);
pthread_cond_signal(&accept_condvar);
pthread_mutex_unlock(&accept_mutex);
}
static void *
acceptor(void *param)
{
struct sockaddr_in sin;
pthread_t tid;
int news;
int s;
static int first;
if ((s = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
perror("socket");
exit(1);
}
sin.sin_len = sizeof(sin);
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = htonl(INADDR_ANY);
sin.sin_port = htons(TEST_PORT);
if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) < 0) {
perror("bind");
exit(1);
}
if (listen(s, 1) < 0) {
perror("listen");
exit(1);
}
(void) mevent_add(s, EVF_READ, acceptor_callback, NULL);
pthread_mutex_lock(&accept_mutex);
while (!pthread_cond_wait(&accept_condvar, &accept_mutex)) {
news = accept(s, NULL, NULL);
if (news < 0) {
perror("accept error");
} else {
static int first = 1;
if (first) {
/*
* Start a timer
*/
first = 0;
tevp = mevent_add(1, EVF_TIMER, timer_callback,
NULL);
}
printf("incoming connection, spawning thread\n");
pthread_create(&tid, NULL, echoer,
(void *)(uintptr_t)news);
}
}
return (NULL);
}
int
main(void)
{
pthread_t tid;
pthread_create(&tid, NULL, acceptor, NULL);
mevent_dispatch();
return (0);
}
|
mike-pt/xhyve
|
include/xhyve/support/misc.h
|
#pragma once
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#define UNUSED __attribute__ ((unused))
#define CTASSERT(x) _Static_assert ((x), "CTASSERT")
#define XHYVE_PAGE_SIZE 0x1000
#define XHYVE_PAGE_MASK (XHYVE_PAGE_SIZE - 1)
#define XHYVE_PAGE_SHIFT 12
#define __aligned(x) __attribute__ ((aligned ((x))))
#define __packed __attribute__ ((packed))
#define nitems(x) (sizeof((x)) / sizeof((x)[0]))
#define powerof2(x) ((((x)-1)&(x))==0)
#define roundup2(x, y) (((x)+((y)-1))&(~((y)-1))) /* if y is powers of two */
#define nitems(x) (sizeof((x)) / sizeof((x)[0]))
#define min(x, y) (((x) < (y)) ? (x) : (y))
#define xhyve_abort(...) \
do { \
fprintf(stderr, __VA_ARGS__); \
abort(); \
} while (0)
#define xhyve_warn(...) \
do { \
fprintf(stderr, __VA_ARGS__); \
} while (0)
#ifdef XHYVE_CONFIG_ASSERT
#define KASSERT(exp, msg) if (!(exp)) xhyve_abort msg
#define KWARN(exp, msg) if (!(exp)) xhyve_warn msg
#else
#define KASSERT(exp, msg) if (0) xhyve_abort msg
#define KWARN(exp, msg) if (0) xhyve_warn msg
#endif
#define FALSE 0
#define TRUE 1
#define XHYVE_PROT_READ 1
#define XHYVE_PROT_WRITE 2
#define XHYVE_PROT_EXECUTE 4
#define VM_SUCCESS 0
/* sys/sys/types.h */
typedef unsigned char u_char;
typedef unsigned short u_short;
typedef unsigned int u_int;
typedef unsigned long u_long;
static inline void cpuid_count(uint32_t ax, uint32_t cx, uint32_t *p) {
__asm__ __volatile__ ("cpuid"
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax), "c" (cx));
}
static inline void do_cpuid(unsigned ax, unsigned *p) {
__asm__ __volatile__ ("cpuid"
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax));
}
/*
* read_uint16_unaligned, write_uint16_unaligned,
* read_uint32_unaligned, write_uint32_unaligned
* read_uint64_unaligned, write_uint64_unaligned
*
* Routines to handle unaligned reads/writes - these are nop on AMD64 but routing
* the reads through these bottlenecks silences the warning and provides a place
* to put #if code to handle architectures where aligment matters (if it is ever needed).
*/
static inline uint16_t read_uint16_unaligned(void *pointer) {
uint16_t *castPointer = (uint16_t *)pointer;
return *castPointer;
}
static inline void write_uint16_unaligned(void *pointer, uint16_t data) {
uint16_t *castPointer = (uint16_t *)pointer;
*castPointer = data;
}
static inline uint32_t read_uint32_unaligned(void *pointer) {
uint32_t *castPointer = (uint32_t *)pointer;
return *castPointer;
}
static inline void write_uint32_unaligned(void *pointer, uint32_t data) {
uint32_t *castPointer = (uint32_t *)pointer;
*castPointer = data;
}
static inline uint64_t read_uint64_unaligned(void *pointer) {
uint64_t *castPointer = (uint64_t *)pointer;
return *castPointer;
}
static inline void write_uint64_unaligned(void *pointer, uint64_t data) {
uint64_t *castPointer = (uint64_t *)pointer;
*castPointer = data;
}
|
mike-pt/xhyve
|
include/xhyve/mem.h
|
<reponame>mike-pt/xhyve
/*-
* Copyright (c) 2012 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#pragma once
#include <stdint.h>
#include <xhyve/support/linker_set.h>
typedef int (*mem_func_t)(int vcpu, int dir, uint64_t addr, int size,
uint64_t *val, void *arg1, long arg2);
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
struct mem_range {
const char *name;
int flags;
mem_func_t handler;
void *arg1;
long arg2;
uint64_t base;
uint64_t size;
};
#pragma clang diagnostic pop
#define MEM_F_READ 0x1
#define MEM_F_WRITE 0x2
#define MEM_F_RW 0x3
#define MEM_F_IMMUTABLE 0x4 /* mem_range cannot be unregistered */
void init_mem(void);
int emulate_mem(int vcpu, uint64_t paddr, struct vie *vie,
struct vm_guest_paging *paging);
int register_mem(struct mem_range *memp);
int register_mem_fallback(struct mem_range *memp);
int unregister_mem(struct mem_range *memp);
|
mike-pt/xhyve
|
include/xhyve/firmware/kexec.h
|
#pragma once
#include <stdint.h>
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpacked"
struct setup_header {
uint8_t setup_sects; /* The size of the setup in sectors */
uint16_t root_flags; /* If set, the root is mounted readonly */
uint32_t syssize; /* The size of the 32-bit code in 16-byte paras */
uint16_t ram_size; /* DO NOT USE - for bootsect.S use only */
uint16_t vid_mode; /* Video mode control */
uint16_t root_dev; /* Default root device number */
uint16_t boot_flag; /* 0xAA55 magic number */
uint16_t jump; /* Jump instruction */
uint32_t header; /* Magic signature "HdrS" */
uint16_t version; /* Boot protocol version supported */
uint32_t realmode_swtch; /* Boot loader hook (see below) */
uint16_t start_sys_seg; /* The load-low segment (0x1000) (obsolete) */
uint16_t kernel_version; /* Pointer to kernel version string */
uint8_t type_of_loader; /* Boot loader identifier */
uint8_t loadflags; /* Boot protocol option flags */
uint16_t setup_move_size; /* Move to high memory size (used with hooks) */
uint32_t code32_start; /* Boot loader hook (see below) */
uint32_t ramdisk_image; /* initrd load address (set by boot loader) */
uint32_t ramdisk_size; /* initrd size (set by boot loader) */
uint32_t bootsect_kludge; /* DO NOT USE - for bootsect.S use only */
uint16_t heap_end_ptr; /* Free memory after setup end */
uint8_t ext_loader_ver; /* Extended boot loader version */
uint8_t ext_loader_type; /* Extended boot loader ID */
uint32_t cmd_line_ptr; /* 32-bit pointer to the kernel command line */
uint32_t initrd_addr_max; /* Highest legal initrd address */
uint32_t kernel_alignment; /* Physical addr alignment required for kernel */
uint8_t relocatable_kernel; /* Whether kernel is relocatable or not */
uint8_t min_alignment; /* Minimum alignment, as a power of two */
uint16_t xloadflags; /* Boot protocol option flags */
uint32_t cmdline_size; /* Maximum size of the kernel command line */
uint32_t hardware_subarch; /* Hardware subarchitecture */
uint64_t hardware_subarch_data; /* Subarchitecture-specific data */
uint32_t payload_offset; /* Offset of kernel payload */
uint32_t payload_length; /* Length of kernel payload */
uint64_t setup_data; /* 64bit pointer to linked list of struct setup_data */
uint64_t pref_address; /* Preferred loading address */
uint32_t init_size; /* Linear memory required during initialization */
uint32_t handover_offset; /* Offset of handover entry point */
} __attribute__((packed));
struct zero_page {
uint8_t screen_info[64];
uint8_t apm_bios_info[20];
uint8_t _0[4];
uint64_t tboot_addr;
uint8_t ist_info[16];
uint8_t _1[16];
uint8_t hd0_info[16];
uint8_t hd1_info[16];
uint8_t sys_desc_table[16];
uint8_t olpc_ofw_header[16];
uint32_t ext_ramdisk_image;
uint32_t ext_ramdisk_size;
uint32_t ext_cmd_line_ptr;
uint8_t _2[116];
uint8_t edid_info[128];
uint8_t efi_info[32];
uint32_t alt_mem_k;
uint32_t scratch;
uint8_t e820_entries;
uint8_t eddbuf_entries;
uint8_t edd_mbr_sig_buf_entries;
uint8_t kbd_status;
uint8_t _3[3];
uint8_t sentinel;
uint8_t _4[1];
struct setup_header setup_header;
uint8_t _5[(0x290 - 0x1f1 - sizeof(struct setup_header))];
uint32_t edd_mbr_sig_buffer[16];
struct {
uint64_t addr;
uint64_t size;
uint32_t type;
} __attribute__((packed)) e820_map[128];
uint8_t _6[48];
uint8_t eddbuf[492];
uint8_t _7[276];
} __attribute__((packed));
#pragma clang diagnostic pop
void kexec_init(char *kernel_path, char *initrd_path, char *cmdline);
uint64_t kexec(void);
|
mike-pt/xhyve
|
include/xhyve/vga.h
|
/*-
* Copyright (c) 2015 <NAME> <<EMAIL>>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _VGA_H_
#define _VGA_H_
#define VGA_IOPORT_START 0x3c0
#define VGA_IOPORT_END 0x3df
/* General registers */
#define GEN_INPUT_STS0_PORT 0x3c2
#define GEN_FEATURE_CTRL_PORT 0x3ca
#define GEN_MISC_OUTPUT_PORT 0x3cc
#define GEN_INPUT_STS1_MONO_PORT 0x3ba
#define GEN_INPUT_STS1_COLOR_PORT 0x3da
#define GEN_IS1_VR 0x08 /* Vertical retrace */
#define GEN_IS1_DE 0x01 /* Display enable not */
/* Attribute controller registers. */
#define ATC_IDX_PORT 0x3c0
#define ATC_DATA_PORT 0x3c1
#define ATC_IDX_MASK 0x1f
#define ATC_PALETTE0 0
#define ATC_PALETTE15 15
#define ATC_MODE_CONTROL 16
#define ATC_MC_IPS 0x80 /* Internal palette size */
#define ATC_MC_GA 0x01 /* Graphics/alphanumeric */
#define ATC_OVERSCAN_COLOR 17
#define ATC_COLOR_PLANE_ENABLE 18
#define ATC_HORIZ_PIXEL_PANNING 19
#define ATC_COLOR_SELECT 20
#define ATC_CS_C67 0x0c /* Color select bits 6+7 */
#define ATC_CS_C45 0x03 /* Color select bits 4+5 */
/* Sequencer registers. */
#define SEQ_IDX_PORT 0x3c4
#define SEQ_DATA_PORT 0x3c5
#define SEQ_RESET 0
#define SEQ_RESET_ASYNC 0x1
#define SEQ_RESET_SYNC 0x2
#define SEQ_CLOCKING_MODE 1
#define SEQ_CM_SO 0x20 /* Screen off */
#define SEQ_CM_89 0x01 /* 8/9 dot clock */
#define SEQ_MAP_MASK 2
#define SEQ_CHAR_MAP_SELECT 3
#define SEQ_CMS_SAH 0x20 /* Char map A bit 2 */
#define SEQ_CMS_SAH_SHIFT 5
#define SEQ_CMS_SA 0x0c /* Char map A bits 0+1 */
#define SEQ_CMS_SA_SHIFT 2
#define SEQ_CMS_SBH 0x10 /* Char map B bit 2 */
#define SEQ_CMS_SBH_SHIFT 4
#define SEQ_CMS_SB 0x03 /* Char map B bits 0+1 */
#define SEQ_CMS_SB_SHIFT 0
#define SEQ_MEMORY_MODE 4
#define SEQ_MM_C4 0x08 /* Chain 4 */
#define SEQ_MM_OE 0x04 /* Odd/even */
#define SEQ_MM_EM 0x02 /* Extended memory */
/* Graphics controller registers. */
#define GC_IDX_PORT 0x3ce
#define GC_DATA_PORT 0x3cf
#define GC_SET_RESET 0
#define GC_ENABLE_SET_RESET 1
#define GC_COLOR_COMPARE 2
#define GC_DATA_ROTATE 3
#define GC_READ_MAP_SELECT 4
#define GC_MODE 5
#define GC_MODE_OE 0x10 /* Odd/even */
#define GC_MODE_C4 0x04 /* Chain 4 */
#define GC_MISCELLANEOUS 6
#define GC_MISC_GM 0x01 /* Graphics/alphanumeric */
#define GC_MISC_MM 0x0c /* memory map */
#define GC_MISC_MM_SHIFT 2
#define GC_COLOR_DONT_CARE 7
#define GC_BIT_MASK 8
/* CRT controller registers. */
#define CRTC_IDX_MONO_PORT 0x3b4
#define CRTC_DATA_MONO_PORT 0x3b5
#define CRTC_IDX_COLOR_PORT 0x3d4
#define CRTC_DATA_COLOR_PORT 0x3d5
#define CRTC_HORIZ_TOTAL 0
#define CRTC_HORIZ_DISP_END 1
#define CRTC_START_HORIZ_BLANK 2
#define CRTC_END_HORIZ_BLANK 3
#define CRTC_START_HORIZ_RETRACE 4
#define CRTC_END_HORIZ_RETRACE 5
#define CRTC_VERT_TOTAL 6
#define CRTC_OVERFLOW 7
#define CRTC_OF_VRS9 0x80 /* VRS bit 9 */
#define CRTC_OF_VRS9_SHIFT 7
#define CRTC_OF_VDE9 0x40 /* VDE bit 9 */
#define CRTC_OF_VDE9_SHIFT 6
#define CRTC_OF_VRS8 0x04 /* VRS bit 8 */
#define CRTC_OF_VRS8_SHIFT 2
#define CRTC_OF_VDE8 0x02 /* VDE bit 8 */
#define CRTC_OF_VDE8_SHIFT 1
#define CRTC_PRESET_ROW_SCAN 8
#define CRTC_MAX_SCAN_LINE 9
#define CRTC_MSL_MSL 0x1f
#define CRTC_CURSOR_START 10
#define CRTC_CS_CO 0x20 /* Cursor off */
#define CRTC_CS_CS 0x1f /* Cursor start */
#define CRTC_CURSOR_END 11
#define CRTC_CE_CE 0x1f /* Cursor end */
#define CRTC_START_ADDR_HIGH 12
#define CRTC_START_ADDR_LOW 13
#define CRTC_CURSOR_LOC_HIGH 14
#define CRTC_CURSOR_LOC_LOW 15
#define CRTC_VERT_RETRACE_START 16
#define CRTC_VERT_RETRACE_END 17
#define CRTC_VRE_MASK 0xf
#define CRTC_VERT_DISP_END 18
#define CRTC_OFFSET 19
#define CRTC_UNDERLINE_LOC 20
#define CRTC_START_VERT_BLANK 21
#define CRTC_END_VERT_BLANK 22
#define CRTC_MODE_CONTROL 23
#define CRTC_MC_TE 0x80 /* Timing enable */
#define CRTC_LINE_COMPARE 24
/* DAC registers */
#define DAC_MASK 0x3c6
#define DAC_IDX_RD_PORT 0x3c7
#define DAC_IDX_WR_PORT 0x3c8
#define DAC_DATA_PORT 0x3c9
void *vga_init(int io_only);
void vga_render(struct bhyvegc *gc, void *arg);
#endif /* _VGA_H_ */
|
mike-pt/xhyve
|
src/firmware/kexec.c
|
/*-
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY ???, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <xhyve/vmm/vmm_api.h>
#include <xhyve/firmware/kexec.h>
#ifndef ALIGNUP
#define ALIGNUP(x, a) (((x - 1) & ~(a - 1)) + a)
#endif
#ifndef ALIGNDOWN
#define ALIGNDOWN(x, a) (-(a) & (x))
#endif
#define BASE_GDT 0x2000ull
#define BASE_ZEROPAGE 0x3000ull
#define BASE_CMDLINE 0x4000ull
#define BASE_KERNEL 0x100000ull
#define HDRS 0x53726448 /* SrdH */
static struct {
uintptr_t base;
size_t size;
} lowmem, kernel, ramdisk;
static struct {
char *kernel;
char *initrd;
char *cmdline;
} config;
static int
kexec_load_kernel(char *path, char *cmdline) {
uint64_t kernel_offset, kernel_size, kernel_init_size, kernel_start, mem_k;
size_t sz, cmdline_len;
volatile struct zero_page *zp;
FILE *f;
if ((lowmem.size < (BASE_ZEROPAGE + sizeof(struct zero_page))) ||
((BASE_ZEROPAGE + sizeof(struct zero_page)) > BASE_CMDLINE))
{
return -1;
}
zp = ((struct zero_page *) (lowmem.base + ((off_t) BASE_ZEROPAGE)));
memset(((void *) ((uintptr_t) zp)), 0, sizeof(struct zero_page));
if (!(f = fopen(path, "r"))) {
return -1;
}
fseek(f, 0L, SEEK_END);
sz = (size_t) ftell(f);
if (sz < (0x01f1 + sizeof(struct setup_header))) {
fclose(f);
return -1;
}
fseek(f, 0x01f1, SEEK_SET);
if (!fread(((void *) ((uintptr_t) &zp->setup_header)), 1,
sizeof(zp->setup_header), f))
{
fclose(f);
return -1;
}
if ((zp->setup_header.setup_sects == 0) || /* way way too old */
(zp->setup_header.boot_flag != 0xaa55) || /* no boot magic */
(zp->setup_header.header != HDRS) || /* way too old */
(zp->setup_header.version < 0x020a) || /* too old */
(!(zp->setup_header.loadflags & 1)) || /* no bzImage */
(sz < (((zp->setup_header.setup_sects + 1) * 512) +
(zp->setup_header.syssize * 16)))) /* too small */
{
/* we can't boot this kernel */
fclose(f);
return -1;
}
kernel_offset = ((zp->setup_header.setup_sects + 1) * 512);
kernel_size = (sz - kernel_offset);
kernel_init_size = ALIGNUP(zp->setup_header.init_size, 0x1000ull);
kernel_start = (zp->setup_header.relocatable_kernel) ?
ALIGNUP(BASE_KERNEL, zp->setup_header.kernel_alignment) :
zp->setup_header.pref_address;
if ((kernel_start < BASE_KERNEL) ||
(kernel_size > kernel_init_size) || /* XXX: always true? */
((kernel_start + kernel_init_size) > lowmem.size)) /* oom */
{
fclose(f);
return -1;
}
/* copy kernel */
fseek(f, ((long) kernel_offset), SEEK_SET);
if (!fread(((void *) (lowmem.base + kernel_start)), 1, kernel_size, f)) {
fclose(f);
return -1;
}
fclose(f);
/* copy cmdline */
cmdline_len = strlen(cmdline);
if (((cmdline_len + 1)> zp->setup_header.cmdline_size) ||
((BASE_CMDLINE + (cmdline_len + 1)) > kernel_start))
{
return -1;
}
memcpy(((void *) (lowmem.base + BASE_CMDLINE)), cmdline, cmdline_len);
memset(((void *) (lowmem.base + BASE_CMDLINE + cmdline_len)), '\0', 1);
zp->setup_header.cmd_line_ptr = ((uint32_t) BASE_CMDLINE);
zp->ext_cmd_line_ptr = ((uint32_t) (BASE_CMDLINE >> 32));
zp->setup_header.hardware_subarch = 0; /* PC */
zp->setup_header.type_of_loader = 0xd; /* kexec */
mem_k = (lowmem.size - 0x100000) >> 10; /* assume lowmem base is at 0 */
zp->alt_mem_k = (mem_k > 0xffffffff) ? 0xffffffff : ((uint32_t) mem_k);
zp->e820_map[0].addr = 0x0000000000000000;
zp->e820_map[0].size = 0x000000000009fc00;
zp->e820_map[0].type = 1;
zp->e820_map[1].addr = 0x0000000000100000;
zp->e820_map[1].size = (lowmem.size - 0x0000000000100000);
zp->e820_map[1].type = 1;
if (xh_vm_get_highmem_size() == 0) {
zp->e820_entries = 2;
} else {
zp->e820_map[2].addr = 0x0000000100000000;
zp->e820_map[2].size = xh_vm_get_highmem_size();
zp->e820_map[2].type = 1;
zp->e820_entries = 3;
}
kernel.base = kernel_start;
kernel.size = kernel_init_size;
return 0;
}
static int
kexec_load_ramdisk(char *path) {
uint64_t ramdisk_start;
uint32_t initrd_max;
volatile struct zero_page *zp;
size_t sz;
FILE *f;
zp = ((struct zero_page *) (lowmem.base + BASE_ZEROPAGE));
if (!(f = fopen(path, "r"))) {;
return -1;
}
fseek(f, 0L, SEEK_END);
sz = (size_t) ftell(f);
fseek(f, 0, SEEK_SET);
/* highest address for loading the initrd */
if (zp->setup_header.version >= 0x203) {
initrd_max = zp->setup_header.initrd_addr_max;
} else {
initrd_max = 0x37ffffff; /* Hardcoded value for older kernels */
}
if (initrd_max >= lowmem.size) {
initrd_max = ((uint32_t) lowmem.size - 1);
}
ramdisk_start = ALIGNDOWN(initrd_max - sz, 0x1000ull);
if ((ramdisk_start + sz) > lowmem.size) {
/* not enough lowmem */
fclose(f);
return -1;
}
/* copy ramdisk */
if (!fread(((void *) (lowmem.base + ramdisk_start)), 1, sz, f)) {
fclose(f);
return -1;
}
fclose(f);
zp->setup_header.ramdisk_image = ((uint32_t) ramdisk_start);
zp->ext_ramdisk_image = ((uint32_t) (ramdisk_start >> 32));
zp->setup_header.ramdisk_size = ((uint32_t) sz);
zp->ext_ramdisk_size = ((uint32_t) (sz >> 32));
ramdisk.base = ramdisk_start;
ramdisk.size = sz;
return 0;
}
void
kexec_init(char *kernel_path, char *initrd_path, char *cmdline) {
config.kernel = kernel_path;
config.initrd = initrd_path;
config.cmdline = cmdline;
}
uint64_t
kexec(void)
{
uint64_t *gdt_entry;
void *gpa_map;
gpa_map = xh_vm_map_gpa(0, xh_vm_get_lowmem_size());
lowmem.base = (uintptr_t) gpa_map;
lowmem.size = xh_vm_get_lowmem_size();
if (kexec_load_kernel(config.kernel,
config.cmdline ? config.cmdline : "auto"))
{
fprintf(stderr, "kexec: failed to load kernel %s\n", config.kernel);
abort();
}
if (config.initrd && kexec_load_ramdisk(config.initrd)) {
fprintf(stderr, "kexec: failed to load initrd %s\n", config.initrd);
abort();
}
gdt_entry = ((uint64_t *) (lowmem.base + BASE_GDT));
gdt_entry[0] = 0x0000000000000000; /* null */
gdt_entry[1] = 0x0000000000000000; /* null */
gdt_entry[2] = 0x00cf9a000000ffff; /* code */
gdt_entry[3] = 0x00cf92000000ffff; /* data */
xh_vcpu_reset(0);
xh_vm_set_desc(0, VM_REG_GUEST_GDTR, BASE_GDT, 0x1f, 0);
xh_vm_set_desc(0, VM_REG_GUEST_CS, 0, 0xffffffff, 0xc09b);
xh_vm_set_desc(0, VM_REG_GUEST_DS, 0, 0xffffffff, 0xc093);
xh_vm_set_desc(0, VM_REG_GUEST_ES, 0, 0xffffffff, 0xc093);
xh_vm_set_desc(0, VM_REG_GUEST_SS, 0, 0xffffffff, 0xc093);
xh_vm_set_register(0, VM_REG_GUEST_CS, 0x10);
xh_vm_set_register(0, VM_REG_GUEST_DS, 0x18);
xh_vm_set_register(0, VM_REG_GUEST_ES, 0x18);
xh_vm_set_register(0, VM_REG_GUEST_SS, 0x18);
xh_vm_set_register(0, VM_REG_GUEST_CR0, 0x21); /* enable protected mode */
xh_vm_set_register(0, VM_REG_GUEST_RBP, 0);
xh_vm_set_register(0, VM_REG_GUEST_RDI, 0);
xh_vm_set_register(0, VM_REG_GUEST_RBX, 0);
xh_vm_set_register(0, VM_REG_GUEST_RFLAGS, 0x2);
xh_vm_set_register(0, VM_REG_GUEST_RSI, BASE_ZEROPAGE);
xh_vm_set_register(0, VM_REG_GUEST_RIP, kernel.base);
return kernel.base;
}
|
mike-pt/xhyve
|
src/vmm/vmm.c
|
<reponame>mike-pt/xhyve
/*-
* Copyright (c) 2011 NetApp, Inc.
* Copyright (c) 2015 xhyve developers
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <stdint.h>
#include <stdbool.h>
#include <string.h>
#include <errno.h>
#include <pthread.h>
#include <assert.h>
#include <xhyve/lock.h>
#include <xhyve/support/misc.h>
#include <xhyve/support/atomic.h>
#include <xhyve/support/cpuset.h>
#include <xhyve/support/psl.h>
#include <xhyve/support/specialreg.h>
#include <xhyve/support/apicreg.h>
#include <xhyve/vmm/vmm.h>
#include <xhyve/vmm/vmm_lapic.h>
#include <xhyve/vmm/vmm_mem.h>
#include <xhyve/vmm/vmm_ioport.h>
#include <xhyve/vmm/vmm_instruction_emul.h>
#include <xhyve/vmm/vmm_callout.h>
#include <xhyve/vmm/vmm_host.h>
#include <xhyve/vmm/vmm_stat.h>
#include <xhyve/vmm/vmm_ktr.h>
#include <xhyve/vmm/io/vatpic.h>
#include <xhyve/vmm/io/vatpit.h>
#include <xhyve/vmm/io/vioapic.h>
#include <xhyve/vmm/io/vlapic.h>
#include <xhyve/vmm/io/vhpet.h>
#include <xhyve/vmm/io/vpmtmr.h>
#include <xhyve/vmm/io/vrtc.h>
struct vlapic;
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
/*
* Initialization:
* (a) allocated when vcpu is created
* (i) initialized when vcpu is created and when it is reinitialized
* (o) initialized the first time the vcpu is created
* (x) initialized before use
*/
struct vcpu {
xhyve_lock_t lock; /* (o) protects 'state' */
pthread_mutex_t state_sleep_mtx;
pthread_cond_t state_sleep_cnd;
pthread_mutex_t vcpu_sleep_mtx;
pthread_cond_t vcpu_sleep_cnd;
enum vcpu_state state; /* (o) vcpu state */
struct vlapic *vlapic; /* (i) APIC device model */
enum x2apic_state x2apic_state; /* (i) APIC mode */
uint64_t exitintinfo; /* (i) events pending at VM exit */
int nmi_pending; /* (i) NMI pending */
int extint_pending; /* (i) INTR pending */
int exception_pending; /* (i) exception pending */
int exc_vector; /* (x) exception collateral */
int exc_errcode_valid;
uint32_t exc_errcode;
uint64_t guest_xcr0; /* (i) guest %xcr0 register */
void *stats; /* (a,i) statistics */
struct vm_exit exitinfo; /* (x) exit reason and collateral */
uint64_t nextrip; /* (x) next instruction to execute */
};
#define vcpu_lock_init(v) XHYVE_LOCK_INIT(v, lock)
#define vcpu_lock(v) XHYVE_LOCK(v, lock)
#define vcpu_unlock(v) XHYVE_UNLOCK(v, lock)
struct mem_seg {
uint64_t gpa;
size_t len;
void *object;
};
#define VM_MAX_MEMORY_SEGMENTS 4
/*
* Initialization:
* (o) initialized the first time the VM is created
* (i) initialized when VM is created and when it is reinitialized
* (x) initialized before use
*/
struct vm {
void *cookie; /* (i) cpu-specific data */
struct vhpet *vhpet; /* (i) virtual HPET */
struct vioapic *vioapic; /* (i) virtual ioapic */
struct vatpic *vatpic; /* (i) virtual atpic */
struct vatpit *vatpit; /* (i) virtual atpit */
struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */
struct vrtc *vrtc; /* (o) virtual RTC */
volatile cpuset_t active_cpus; /* (i) active vcpus */
int suspend; /* (i) stop VM execution */
volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
cpuset_t rendezvous_req_cpus; /* (x) rendezvous requested */
cpuset_t rendezvous_done_cpus; /* (x) rendezvous finished */
void *rendezvous_arg; /* (x) rendezvous func/arg */
vm_rendezvous_func_t rendezvous_func;
pthread_mutex_t rendezvous_mtx; /* (o) rendezvous lock */
pthread_cond_t rendezvous_sleep_cnd;
int num_mem_segs; /* (o) guest memory segments */
struct mem_seg mem_segs[VM_MAX_MEMORY_SEGMENTS];
struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */
};
#pragma clang diagnostic pop
static int vmm_initialized;
static struct vmm_ops *ops;
#define VMM_INIT() \
(*ops->init)()
#define VMM_CLEANUP() \
(*ops->cleanup)()
#define VM_INIT(vmi) \
(*ops->vm_init)(vmi)
#define VCPU_INIT(vmi, vcpu) \
(*ops->vcpu_init)(vmi, vcpu)
#define VMRUN(vmi, vcpu, rip, rptr, sptr) \
(*ops->vmrun)(vmi, vcpu, rip, rptr, sptr)
#define VM_CLEANUP(vmi) \
(*ops->vm_cleanup)(vmi)
#define VCPU_CLEANUP(vmi, vcpu) \
(*ops->vcpu_cleanup)(vmi, vcpu)
#define VMGETREG(vmi, vcpu, num, retval) \
(*ops->vmgetreg)(vmi, vcpu, num, retval)
#define VMSETREG(vmi, vcpu, num, val) \
(*ops->vmsetreg)(vmi, vcpu, num, val)
#define VMGETDESC(vmi, vcpu, num, desc) \
(*ops->vmgetdesc)(vmi, vcpu, num, desc)
#define VMSETDESC(vmi, vcpu, num, desc) \
(*ops->vmsetdesc)(vmi, vcpu, num, desc)
#define VMGETCAP(vmi, vcpu, num, retval) \
(*ops->vmgetcap)(vmi, vcpu, num, retval)
#define VMSETCAP(vmi, vcpu, num, val) \
(*ops->vmsetcap)(vmi, vcpu, num, val)
#define VLAPIC_INIT(vmi, vcpu) \
(*ops->vlapic_init)(vmi, vcpu)
#define VLAPIC_CLEANUP(vmi, vlapic) \
(*ops->vlapic_cleanup)(vmi, vlapic)
#define VCPU_INTERRUPT(vcpu) \
(*ops->vcpu_interrupt)(vcpu)
/* statistics */
//static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
/*
* Halt the guest if all vcpus are executing a HLT instruction with
* interrupts disabled.
*/
static int halt_detection_enabled = 1;
static int trace_guest_exceptions = 0;
static void
vcpu_cleanup(struct vm *vm, int i, bool destroy)
{
struct vcpu *vcpu = &vm->vcpu[i];
VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic);
if (destroy) {
vmm_stat_free(vcpu->stats);
}
}
static void
vcpu_init(struct vm *vm, int vcpu_id, bool create)
{
struct vcpu *vcpu;
KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU,
("vcpu_init: invalid vcpu %d", vcpu_id));
vcpu = &vm->vcpu[vcpu_id];
if (create) {
vcpu_lock_init(vcpu);
pthread_mutex_init(&vcpu->state_sleep_mtx, NULL);
pthread_cond_init(&vcpu->state_sleep_cnd, NULL);
pthread_mutex_init(&vcpu->vcpu_sleep_mtx, NULL);
pthread_cond_init(&vcpu->vcpu_sleep_cnd, NULL);
vcpu->state = VCPU_IDLE;
vcpu->stats = vmm_stat_alloc();
}
vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id);
vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
vcpu->exitintinfo = 0;
vcpu->nmi_pending = 0;
vcpu->extint_pending = 0;
vcpu->exception_pending = 0;
vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
vmm_stat_init(vcpu->stats);
}
int vcpu_create(struct vm *vm, int vcpu) {
if (vcpu < 0 || vcpu >= VM_MAXCPU)
xhyve_abort("vcpu_create: invalid cpuid %d\n", vcpu);
return VCPU_INIT(vm->cookie, vcpu);
}
void vcpu_destroy(struct vm *vm, int vcpu) {
if (vcpu < 0 || vcpu >= VM_MAXCPU)
xhyve_abort("vcpu_destroy: invalid cpuid %d\n", vcpu);
VCPU_CLEANUP(vm, vcpu);
}
int
vcpu_trace_exceptions(void)
{
return (trace_guest_exceptions);
}
struct vm_exit *
vm_exitinfo(struct vm *vm, int cpuid)
{
struct vcpu *vcpu;
if (cpuid < 0 || cpuid >= VM_MAXCPU)
xhyve_abort("vm_exitinfo: invalid cpuid %d\n", cpuid);
vcpu = &vm->vcpu[cpuid];
return (&vcpu->exitinfo);
}
int
vmm_init(void)
{
int error;
vmm_host_state_init();
error = vmm_mem_init();
if (error)
return (error);
ops = &vmm_ops_intel;
error = VMM_INIT();
if (error == 0)
vmm_initialized = 1;
return (error);
}
int
vmm_cleanup(void) {
int error;
error = VMM_CLEANUP();
if (error == 0)
vmm_initialized = 0;
return error;
}
static void
vm_init(struct vm *vm, bool create)
{
int vcpu;
if (create) {
callout_system_init();
}
vm->cookie = VM_INIT(vm);
vm->vioapic = vioapic_init(vm);
vm->vhpet = vhpet_init(vm);
vm->vatpic = vatpic_init(vm);
vm->vatpit = vatpit_init(vm);
vm->vpmtmr = vpmtmr_init(vm);
if (create) {
vm->vrtc = vrtc_init(vm);
}
CPU_ZERO(&vm->active_cpus);
vm->suspend = 0;
CPU_ZERO(&vm->suspended_cpus);
for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++) {
vcpu_init(vm, vcpu, create);
}
}
int
vm_create(struct vm **retvm)
{
struct vm *vm;
if (!vmm_initialized)
return (ENXIO);
vm = malloc(sizeof(struct vm));
assert(vm);
bzero(vm, sizeof(struct vm));
vm->num_mem_segs = 0;
pthread_mutex_init(&vm->rendezvous_mtx, NULL);
pthread_cond_init(&vm->rendezvous_sleep_cnd, NULL);
vm_init(vm, true);
*retvm = vm;
return (0);
}
static void
vm_free_mem_seg(struct mem_seg *seg)
{
if (seg->object != NULL) {
vmm_mem_free(seg->gpa, seg->len, seg->object);
}
bzero(seg, sizeof(*seg));
}
static void
vm_cleanup(struct vm *vm, bool destroy)
{
int i, vcpu;
for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++) {
vcpu_cleanup(vm, vcpu, destroy);
}
if (destroy) {
vrtc_cleanup(vm->vrtc);
} else {
vrtc_reset(vm->vrtc);
}
vpmtmr_cleanup(vm->vpmtmr);
vatpit_cleanup(vm->vatpit);
vhpet_cleanup(vm->vhpet);
vatpic_cleanup(vm->vatpic);
vioapic_cleanup(vm->vioapic);
VM_CLEANUP(vm->cookie);
if (destroy) {
for (i = 0; i < vm->num_mem_segs; i++) {
vm_free_mem_seg(&vm->mem_segs[i]);
}
vm->num_mem_segs = 0;
}
}
void
vm_destroy(struct vm *vm)
{
vm_cleanup(vm, true);
free(vm);
}
int
vm_reinit(struct vm *vm)
{
int error;
/*
* A virtual machine can be reset only if all vcpus are suspended.
*/
if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
vm_cleanup(vm, false);
vm_init(vm, false);
error = 0;
} else {
error = EBUSY;
}
return (error);
}
const char *
vm_name(UNUSED struct vm *vm)
{
return "VM";
}
bool
vm_mem_allocated(struct vm *vm, uint64_t gpa)
{
int i;
uint64_t gpabase, gpalimit;
for (i = 0; i < vm->num_mem_segs; i++) {
gpabase = vm->mem_segs[i].gpa;
gpalimit = gpabase + vm->mem_segs[i].len;
if (gpa >= gpabase && gpa < gpalimit)
return (TRUE); /* 'gpa' is regular memory */
}
return (FALSE);
}
int
vm_malloc(struct vm *vm, uint64_t gpa, size_t len, uint64_t prot)
{
int available, allocated;
struct mem_seg *seg;
void *object;
uint64_t g;
if ((gpa & XHYVE_PAGE_MASK) || (len & XHYVE_PAGE_MASK) || len == 0)
return (EINVAL);
available = allocated = 0;
g = gpa;
while (g < gpa + len) {
if (vm_mem_allocated(vm, g))
allocated++;
else
available++;
g += XHYVE_PAGE_SIZE;
}
/*
* If there are some allocated and some available pages in the address
* range then it is an error.
*/
if (allocated && available)
return (EINVAL);
/*
* If the entire address range being requested has already been
* allocated then there isn't anything more to do.
*/
if (allocated && available == 0)
return (0);
if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
return (E2BIG);
seg = &vm->mem_segs[vm->num_mem_segs];
if ((object = vmm_mem_alloc(gpa, len, prot)) == NULL)
return (ENOMEM);
seg->gpa = gpa;
seg->len = len;
seg->object = object;
vm->num_mem_segs++;
return (0);
}
void *
vm_gpa2hva(struct vm *vm, uint64_t gpa, uint64_t len) {
void *base;
uint64_t offset;
if (vm_get_memobj(vm, gpa, len, &offset, &base)) {
return NULL;
}
return (void *) (((uintptr_t) base) + offset);
}
int
vm_gpabase2memseg(struct vm *vm, uint64_t gpabase,
struct vm_memory_segment *seg)
{
int i;
for (i = 0; i < vm->num_mem_segs; i++) {
if (gpabase == vm->mem_segs[i].gpa) {
seg->gpa = vm->mem_segs[i].gpa;
seg->len = vm->mem_segs[i].len;
return (0);
}
}
return (-1);
}
int
vm_get_memobj(struct vm *vm, uint64_t gpa, size_t len,
uint64_t *offset, void **object)
{
int i;
size_t seg_len;
uint64_t seg_gpa;
void *seg_obj;
for (i = 0; i < vm->num_mem_segs; i++) {
if ((seg_obj = vm->mem_segs[i].object) == NULL)
continue;
seg_gpa = vm->mem_segs[i].gpa;
seg_len = vm->mem_segs[i].len;
if ((gpa >= seg_gpa) && ((gpa + len) <= (seg_gpa + seg_len))) {
*offset = gpa - seg_gpa;
*object = seg_obj;
return (0);
}
}
return (EINVAL);
}
int
vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
{
if (vcpu < 0 || vcpu >= VM_MAXCPU)
return (EINVAL);
if (reg >= VM_REG_LAST)
return (EINVAL);
return (VMGETREG(vm->cookie, vcpu, reg, retval));
}
int
vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val)
{
struct vcpu *vcpu;
int error;
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
return (EINVAL);
if (reg >= VM_REG_LAST)
return (EINVAL);
error = VMSETREG(vm->cookie, vcpuid, reg, val);
if (error || reg != VM_REG_GUEST_RIP)
return (error);
/* Set 'nextrip' to match the value of %rip */
VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#llx", val);
vcpu = &vm->vcpu[vcpuid];
vcpu->nextrip = val;
return (0);
}
static bool
is_descriptor_table(int reg)
{
switch (reg) {
case VM_REG_GUEST_IDTR:
case VM_REG_GUEST_GDTR:
return (TRUE);
default:
return (FALSE);
}
}
static bool
is_segment_register(int reg)
{
switch (reg) {
case VM_REG_GUEST_ES:
case VM_REG_GUEST_CS:
case VM_REG_GUEST_SS:
case VM_REG_GUEST_DS:
case VM_REG_GUEST_FS:
case VM_REG_GUEST_GS:
case VM_REG_GUEST_TR:
case VM_REG_GUEST_LDTR:
return (TRUE);
default:
return (FALSE);
}
}
int
vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
struct seg_desc *desc)
{
if (vcpu < 0 || vcpu >= VM_MAXCPU)
return (EINVAL);
if (!is_segment_register(reg) && !is_descriptor_table(reg))
return (EINVAL);
return (VMGETDESC(vm->cookie, vcpu, reg, desc));
}
int
vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
struct seg_desc *desc)
{
if (vcpu < 0 || vcpu >= VM_MAXCPU)
return (EINVAL);
if (!is_segment_register(reg) && !is_descriptor_table(reg))
return (EINVAL);
return (VMSETDESC(vm->cookie, vcpu, reg, desc));
}
// static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
static int
vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
bool from_idle)
{
int error;
const struct timespec ts = {.tv_sec = 1, .tv_nsec = 0}; /* 1 second */
/*
* State transitions from the vmmdev_ioctl() must always begin from
* the VCPU_IDLE state. This guarantees that there is only a single
* ioctl() operating on a vcpu at any point.
*/
if (from_idle) {
while (vcpu->state != VCPU_IDLE) {
pthread_mutex_lock(&vcpu->state_sleep_mtx);
vcpu_unlock(vcpu);
pthread_cond_timedwait_relative_np(&vcpu->state_sleep_cnd,
&vcpu->state_sleep_mtx, &ts);
vcpu_lock(vcpu);
pthread_mutex_unlock(&vcpu->state_sleep_mtx);
//msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
}
} else {
KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
"vcpu idle state"));
}
/*
* The following state transitions are allowed:
* IDLE -> FROZEN -> IDLE
* FROZEN -> RUNNING -> FROZEN
* FROZEN -> SLEEPING -> FROZEN
*/
switch (vcpu->state) {
case VCPU_IDLE:
case VCPU_RUNNING:
case VCPU_SLEEPING:
error = (newstate != VCPU_FROZEN);
break;
case VCPU_FROZEN:
error = (newstate == VCPU_FROZEN);
break;
}
if (error)
return (EBUSY);
vcpu->state = newstate;
if (newstate == VCPU_IDLE)
pthread_cond_broadcast(&vcpu->state_sleep_cnd);
//wakeup(&vcpu->state);
return (0);
}
static void
vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
{
int error;
if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0)
xhyve_abort("Error %d setting state to %d\n", error, newstate);
}
static void
vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
{
int error;
if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0)
xhyve_abort("Error %d setting state to %d", error, newstate);
}
static void
vm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func)
{
/*
* Update 'rendezvous_func' and execute a write memory barrier to
* ensure that it is visible across all host cpus. This is not needed
* for correctness but it does ensure that all the vcpus will notice
* that the rendezvous is requested immediately.
*/
vm->rendezvous_func = func;
wmb();
}
#define RENDEZVOUS_CTR0(vm, vcpuid, fmt) \
do { \
if (vcpuid >= 0) {\
VCPU_CTR0(vm, vcpuid, fmt); \
} else {\
VM_CTR0(vm, fmt); \
} \
} while (0)
static void
vm_handle_rendezvous(struct vm *vm, int vcpuid)
{
KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
("vm_handle_rendezvous: invalid vcpuid %d", vcpuid));
pthread_mutex_lock(&vm->rendezvous_mtx);
while (vm->rendezvous_func != NULL) {
/* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus);
if (vcpuid != -1 &&
CPU_ISSET(((unsigned) vcpuid), &vm->rendezvous_req_cpus) &&
!CPU_ISSET(((unsigned) vcpuid), &vm->rendezvous_done_cpus)) {
VCPU_CTR0(vm, vcpuid, "Calling rendezvous func");
(*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg);
CPU_SET(((unsigned) vcpuid), &vm->rendezvous_done_cpus);
}
if (CPU_CMP(&vm->rendezvous_req_cpus,
&vm->rendezvous_done_cpus) == 0) {
VCPU_CTR0(vm, vcpuid, "Rendezvous completed");
vm_set_rendezvous_func(vm, NULL);
pthread_cond_broadcast(&vm->rendezvous_sleep_cnd);
//wakeup(&vm->rendezvous_func);
break;
}
RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion");
pthread_cond_wait(&vm->rendezvous_sleep_cnd, &vm->rendezvous_mtx);
//mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, "vmrndv", 0);
}
pthread_mutex_unlock(&vm->rendezvous_mtx);
}
/*
* Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
*/
static int
vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled)
{
struct vcpu *vcpu;
const char *wmesg;
int vcpu_halted, vm_halted;
const struct timespec ts = {.tv_sec = 1, .tv_nsec = 0}; /* 1 second */
KASSERT(!CPU_ISSET(((unsigned) vcpuid), &vm->halted_cpus),
("vcpu already halted"));
vcpu = &vm->vcpu[vcpuid];
vcpu_halted = 0;
vm_halted = 0;
vcpu_lock(vcpu);
while (1) {
/*
* Do a final check for pending NMI or interrupts before
* really putting this thread to sleep. Also check for
* software events that would cause this vcpu to wakeup.
*
* These interrupts/events could have happened after the
* vcpu returned from VMRUN() and before it acquired the
* vcpu lock above.
*/
if (vm->rendezvous_func != NULL || vm->suspend)
break;
if (vm_nmi_pending(vm, vcpuid))
break;
if (!intr_disabled) {
if (vm_extint_pending(vm, vcpuid) ||
vlapic_pending_intr(vcpu->vlapic, NULL)) {
break;
}
}
/*
* Some Linux guests implement "halt" by having all vcpus
* execute HLT with interrupts disabled. 'halted_cpus' keeps
* track of the vcpus that have entered this state. When all
* vcpus enter the halted state the virtual machine is halted.
*/
if (intr_disabled) {
wmesg = "vmhalt";
VCPU_CTR0(vm, vcpuid, "Halted");
if (!vcpu_halted && halt_detection_enabled) {
vcpu_halted = 1;
CPU_SET_ATOMIC(((unsigned) vcpuid), &vm->halted_cpus);
}
if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) {
vm_halted = 1;
break;
}
} else {
wmesg = "vmidle";
}
//t = ticks;
vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
/*
* XXX msleep_spin() cannot be interrupted by signals so
* wake up periodically to check pending signals.
*/
pthread_mutex_lock(&vcpu->vcpu_sleep_mtx);
vcpu_unlock(vcpu);
pthread_cond_timedwait_relative_np(&vcpu->vcpu_sleep_cnd,
&vcpu->vcpu_sleep_mtx, &ts);
vcpu_lock(vcpu);
pthread_mutex_unlock(&vcpu->vcpu_sleep_mtx);
//msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
vcpu_require_state_locked(vcpu, VCPU_FROZEN);
//vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
}
if (vcpu_halted)
CPU_CLR_ATOMIC(((unsigned) vcpuid), &vm->halted_cpus);
vcpu_unlock(vcpu);
if (vm_halted)
vm_suspend(vm, VM_SUSPEND_HALT);
return (0);
}
static int
vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
{
struct vie *vie;
struct vcpu *vcpu;
struct vm_exit *vme;
uint64_t gla, gpa, cs_base;
struct vm_guest_paging *paging;
mem_region_read_t mread;
mem_region_write_t mwrite;
enum vm_cpu_mode cpu_mode;
int cs_d, error, fault, length;
vcpu = &vm->vcpu[vcpuid];
vme = &vcpu->exitinfo;
gla = vme->u.inst_emul.gla;
gpa = vme->u.inst_emul.gpa;
cs_base = vme->u.inst_emul.cs_base;
cs_d = vme->u.inst_emul.cs_d;
vie = &vme->u.inst_emul.vie;
paging = &vme->u.inst_emul.paging;
cpu_mode = paging->cpu_mode;
VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#llx", gpa);
/* Fetch, decode and emulate the faulting instruction */
if (vie->num_valid == 0) {
/*
* If the instruction length is not known then assume a
* maximum size instruction.
*/
length = vme->inst_length ? vme->inst_length : VIE_INST_SIZE;
error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip +
cs_base, length, vie, &fault);
} else {
/*
* The instruction bytes have already been copied into 'vie'
*/
error = fault = 0;
}
if (error || fault)
return (error);
if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) {
VCPU_CTR1(vm, vcpuid, "Error decoding instruction at %#llx",
vme->rip + cs_base);
*retu = true; /* dump instruction bytes in userspace */
return (0);
}
/*
* If the instruction length was not specified then update it now
* along with 'nextrip'.
*/
if (vme->inst_length == 0) {
vme->inst_length = vie->num_processed;
vcpu->nextrip += vie->num_processed;
}
/* return to userland unless this is an in-kernel emulated device */
if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + XHYVE_PAGE_SIZE) {
mread = lapic_mmio_read;
mwrite = lapic_mmio_write;
} else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
mread = vioapic_mmio_read;
mwrite = vioapic_mmio_write;
} else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
mread = vhpet_mmio_read;
mwrite = vhpet_mmio_write;
} else {
*retu = true;
return (0);
}
error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging,
mread, mwrite, retu);
return (error);
}
static int
vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu)
{
int i, done;
struct vcpu *vcpu;
const struct timespec ts = {.tv_sec = 1, .tv_nsec = 0}; /* 1 second */
done = 0;
vcpu = &vm->vcpu[vcpuid];
CPU_SET_ATOMIC(((unsigned) vcpuid), &vm->suspended_cpus);
/*
* Wait until all 'active_cpus' have suspended themselves.
*
* Since a VM may be suspended at any time including when one or
* more vcpus are doing a rendezvous we need to call the rendezvous
* handler while we are waiting to prevent a deadlock.
*/
vcpu_lock(vcpu);
while (1) {
if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
VCPU_CTR0(vm, vcpuid, "All vcpus suspended");
break;
}
if (vm->rendezvous_func == NULL) {
VCPU_CTR0(vm, vcpuid, "Sleeping during suspend");
vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
pthread_mutex_lock(&vcpu->vcpu_sleep_mtx);
vcpu_unlock(vcpu);
pthread_cond_timedwait_relative_np(&vcpu->vcpu_sleep_cnd,
&vcpu->vcpu_sleep_mtx, &ts);
vcpu_lock(vcpu);
pthread_mutex_unlock(&vcpu->vcpu_sleep_mtx);
//msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
vcpu_require_state_locked(vcpu, VCPU_FROZEN);
} else {
VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend");
vcpu_unlock(vcpu);
vm_handle_rendezvous(vm, vcpuid);
vcpu_lock(vcpu);
}
}
vcpu_unlock(vcpu);
/*
* Wakeup the other sleeping vcpus and return to userspace.
*/
for (i = 0; i < VM_MAXCPU; i++) {
if (CPU_ISSET(((unsigned) i), &vm->suspended_cpus)) {
vcpu_notify_event(vm, i, false);
}
}
*retu = true;
return (0);
}
int
vm_suspend(struct vm *vm, enum vm_suspend_how how)
{
int i;
if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST)
return (EINVAL);
if (atomic_cmpset_int(((volatile u_int *) &vm->suspend), 0, how) == 0) {
VM_CTR2(vm, "virtual machine already suspended %d/%d",
vm->suspend, how);
return (EALREADY);
}
VM_CTR1(vm, "virtual machine successfully suspended %d", how);
/*
* Notify all active vcpus that they are now suspended.
*/
for (i = 0; i < VM_MAXCPU; i++) {
if (CPU_ISSET(((unsigned) i), &vm->active_cpus))
vcpu_notify_event(vm, i, false);
}
return (0);
}
void
vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip)
{
struct vm_exit *vmexit;
KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST,
("vm_exit_suspended: invalid suspend type %d", vm->suspend));
vmexit = vm_exitinfo(vm, vcpuid);
vmexit->rip = rip;
vmexit->inst_length = 0;
vmexit->exitcode = VM_EXITCODE_SUSPENDED;
vmexit->u.suspended.how = (enum vm_suspend_how) vm->suspend;
}
void
vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip)
{
struct vm_exit *vmexit;
KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress"));
vmexit = vm_exitinfo(vm, vcpuid);
vmexit->rip = rip;
vmexit->inst_length = 0;
vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1);
}
void pittest(struct vm *thevm);
int
vm_run(struct vm *vm, int vcpuid, struct vm_exit *vm_exit)
{
int error;
struct vcpu *vcpu;
// uint64_t tscval;
struct vm_exit *vme;
bool retu, intr_disabled;
void *rptr, *sptr;
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
return (EINVAL);
if (!CPU_ISSET(((unsigned) vcpuid), &vm->active_cpus))
return (EINVAL);
if (CPU_ISSET(((unsigned) vcpuid), &vm->suspended_cpus))
return (EINVAL);
rptr = &vm->rendezvous_func;
sptr = &vm->suspend;
vcpu = &vm->vcpu[vcpuid];
vme = &vcpu->exitinfo;
retu = false;
restart:
// tscval = rdtsc();
vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
error = VMRUN(vm->cookie, vcpuid, (register_t) vcpu->nextrip, rptr, sptr);
vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
// vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
if (error == 0) {
retu = false;
vcpu->nextrip = vme->rip + ((unsigned) vme->inst_length);
switch (((int) (vme->exitcode))) {
case VM_EXITCODE_SUSPENDED:
error = vm_handle_suspend(vm, vcpuid, &retu);
break;
case VM_EXITCODE_IOAPIC_EOI:
vioapic_process_eoi(vm, vcpuid,
vme->u.ioapic_eoi.vector);
break;
case VM_EXITCODE_RENDEZVOUS:
vm_handle_rendezvous(vm, vcpuid);
error = 0;
break;
case VM_EXITCODE_HLT:
intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0);
error = vm_handle_hlt(vm, vcpuid, intr_disabled);
break;
case VM_EXITCODE_PAGING:
error = 0;
break;
case VM_EXITCODE_INST_EMUL:
error = vm_handle_inst_emul(vm, vcpuid, &retu);
break;
case VM_EXITCODE_INOUT:
case VM_EXITCODE_INOUT_STR:
error = vm_handle_inout(vm, vcpuid, vme, &retu);
break;
case VM_EXITCODE_MONITOR:
case VM_EXITCODE_MWAIT:
vm_inject_ud(vm, vcpuid);
break;
default:
retu = true; /* handled in userland */
break;
}
}
if (error == 0 && retu == false)
goto restart;
/* copy the exit information (FIXME: zero copy) */
bcopy(vme, vm_exit, sizeof(struct vm_exit));
return (error);
}
int
vm_restart_instruction(void *arg, int vcpuid)
{
struct vm *vm;
struct vcpu *vcpu;
enum vcpu_state state;
uint64_t rip;
int error;
vm = arg;
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
return (EINVAL);
vcpu = &vm->vcpu[vcpuid];
state = vcpu_get_state(vm, vcpuid);
if (state == VCPU_RUNNING) {
/*
* When a vcpu is "running" the next instruction is determined
* by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'.
* Thus setting 'inst_length' to zero will cause the current
* instruction to be restarted.
*/
vcpu->exitinfo.inst_length = 0;
VCPU_CTR1(vm, vcpuid, "restarting instruction at %#llx by "
"setting inst_length to zero", vcpu->exitinfo.rip);
} else if (state == VCPU_FROZEN) {
/*
* When a vcpu is "frozen" it is outside the critical section
* around VMRUN() and 'nextrip' points to the next instruction.
* Thus instruction restart is achieved by setting 'nextrip'
* to the vcpu's %rip.
*/
error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip);
KASSERT(!error, ("%s: error %d getting rip", __func__, error));
VCPU_CTR2(vm, vcpuid, "restarting instruction by updating "
"nextrip from %#llx to %#llx", vcpu->nextrip, rip);
vcpu->nextrip = rip;
} else {
xhyve_abort("%s: invalid state %d\n", __func__, state);
}
return (0);
}
int
vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info)
{
struct vcpu *vcpu;
int type, vector;
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
return (EINVAL);
vcpu = &vm->vcpu[vcpuid];
if (info & VM_INTINFO_VALID) {
type = info & VM_INTINFO_TYPE;
vector = info & 0xff;
if (type == VM_INTINFO_NMI && vector != IDT_NMI)
return (EINVAL);
if (type == VM_INTINFO_HWEXCEPTION && vector >= 32)
return (EINVAL);
if (info & VM_INTINFO_RSVD)
return (EINVAL);
} else {
info = 0;
}
VCPU_CTR2(vm, vcpuid, "%s: info1(%#llx)", __func__, info);
vcpu->exitintinfo = info;
return (0);
}
enum exc_class {
EXC_BENIGN,
EXC_CONTRIBUTORY,
EXC_PAGEFAULT
};
#define IDT_VE 20 /* Virtualization Exception (Intel specific) */
static enum exc_class
exception_class(uint64_t info)
{
int type, vector;
KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#llx", info));
type = info & VM_INTINFO_TYPE;
vector = info & 0xff;
/* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */
switch (type) {
case VM_INTINFO_HWINTR:
case VM_INTINFO_SWINTR:
case VM_INTINFO_NMI:
return (EXC_BENIGN);
default:
/*
* Hardware exception.
*
* SVM and VT-x use identical type values to represent NMI,
* hardware interrupt and software interrupt.
*
* SVM uses type '3' for all exceptions. VT-x uses type '3'
* for exceptions except #BP and #OF. #BP and #OF use a type
* value of '5' or '6'. Therefore we don't check for explicit
* values of 'type' to classify 'intinfo' into a hardware
* exception.
*/
break;
}
switch (vector) {
case IDT_PF:
case IDT_VE:
return (EXC_PAGEFAULT);
case IDT_DE:
case IDT_TS:
case IDT_NP:
case IDT_SS:
case IDT_GP:
return (EXC_CONTRIBUTORY);
default:
return (EXC_BENIGN);
}
}
static int
nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2,
uint64_t *retinfo)
{
enum exc_class exc1, exc2;
int type1, vector1;
KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#llx is not valid", info1));
KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#llx is not valid", info2));
/*
* If an exception occurs while attempting to call the double-fault
* handler the processor enters shutdown mode (aka triple fault).
*/
type1 = info1 & VM_INTINFO_TYPE;
vector1 = info1 & 0xff;
if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) {
VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#llx), info2(%#llx)",
info1, info2);
vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT);
*retinfo = 0;
return (0);
}
/*
* Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3
*/
exc1 = exception_class(info1);
exc2 = exception_class(info2);
if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) ||
(exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) {
/* Convert nested fault into a double fault. */
*retinfo = IDT_DF;
*retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
*retinfo |= VM_INTINFO_DEL_ERRCODE;
} else {
/* Handle exceptions serially */
*retinfo = info2;
}
return (1);
}
static uint64_t
vcpu_exception_intinfo(struct vcpu *vcpu)
{
uint64_t info = 0;
if (vcpu->exception_pending) {
info = vcpu->exc_vector & 0xff;
info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
if (vcpu->exc_errcode_valid) {
info |= VM_INTINFO_DEL_ERRCODE;
info |= (uint64_t)vcpu->exc_errcode << 32;
}
}
return (info);
}
int
vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo)
{
struct vcpu *vcpu;
uint64_t info1, info2;
int valid;
KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid));
vcpu = &vm->vcpu[vcpuid];
info1 = vcpu->exitintinfo;
vcpu->exitintinfo = 0;
info2 = 0;
if (vcpu->exception_pending) {
info2 = vcpu_exception_intinfo(vcpu);
vcpu->exception_pending = 0;
VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#llx",
vcpu->exc_vector, info2);
}
if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) {
valid = nested_fault(vm, vcpuid, info1, info2, retinfo);
} else if (info1 & VM_INTINFO_VALID) {
*retinfo = info1;
valid = 1;
} else if (info2 & VM_INTINFO_VALID) {
*retinfo = info2;
valid = 1;
} else {
valid = 0;
}
if (valid) {
VCPU_CTR4(vm, vcpuid, "%s: info1(%#llx), info2(%#llx), "
"retinfo(%#llx)", __func__, info1, info2, *retinfo);
}
return (valid);
}
int
vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2)
{
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
return (EINVAL);
vcpu = &vm->vcpu[vcpuid];
*info1 = vcpu->exitintinfo;
*info2 = vcpu_exception_intinfo(vcpu);
return (0);
}
int
vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid,
uint32_t errcode, int restart_instruction)
{
struct vcpu *vcpu;
int error;
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
return (EINVAL);
if (vector < 0 || vector >= 32)
return (EINVAL);
/*
* A double fault exception should never be injected directly into
* the guest. It is a derived exception that results from specific
* combinations of nested faults.
*/
if (vector == IDT_DF)
return (EINVAL);
vcpu = &vm->vcpu[vcpuid];
if (vcpu->exception_pending) {
VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to "
"pending exception %d", vector, vcpu->exc_vector);
return (EBUSY);
}
/*
* From section 26.6.1 "Interruptibility State" in Intel SDM:
*
* Event blocking by "STI" or "MOV SS" is cleared after guest executes
* one instruction or incurs an exception.
*/
error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0);
KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
__func__, error));
if (restart_instruction)
vm_restart_instruction(vm, vcpuid);
vcpu->exception_pending = 1;
vcpu->exc_vector = vector;
vcpu->exc_errcode = errcode;
vcpu->exc_errcode_valid = errcode_valid;
VCPU_CTR1(vm, vcpuid, "Exception %d pending", vector);
return (0);
}
void
vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid,
int errcode)
{
struct vm *vm;
int error, restart_instruction;
vm = vmarg;
restart_instruction = 1;
error = vm_inject_exception(vm, vcpuid, vector, errcode_valid,
((uint32_t) errcode), restart_instruction);
KASSERT(error == 0, ("vm_inject_exception error %d", error));
}
void
vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2)
{
struct vm *vm;
int error;
vm = vmarg;
VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#llx",
error_code, cr2);
error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2);
KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code);
}
static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
int
vm_inject_nmi(struct vm *vm, int vcpuid)
{
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
return (EINVAL);
vcpu = &vm->vcpu[vcpuid];
vcpu->nmi_pending = 1;
vcpu_notify_event(vm, vcpuid, false);
return (0);
}
int
vm_nmi_pending(struct vm *vm, int vcpuid)
{
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
xhyve_abort("vm_nmi_pending: invalid vcpuid %d\n", vcpuid);
vcpu = &vm->vcpu[vcpuid];
return (vcpu->nmi_pending);
}
void
vm_nmi_clear(struct vm *vm, int vcpuid)
{
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
xhyve_abort("vm_nmi_pending: invalid vcpuid %d\n", vcpuid);
vcpu = &vm->vcpu[vcpuid];
if (vcpu->nmi_pending == 0)
xhyve_abort("vm_nmi_clear: inconsistent nmi_pending state\n");
vcpu->nmi_pending = 0;
vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
}
static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
int
vm_inject_extint(struct vm *vm, int vcpuid)
{
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
return (EINVAL);
vcpu = &vm->vcpu[vcpuid];
vcpu->extint_pending = 1;
vcpu_notify_event(vm, vcpuid, false);
return (0);
}
int
vm_extint_pending(struct vm *vm, int vcpuid)
{
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
xhyve_abort("vm_extint_pending: invalid vcpuid %d\n", vcpuid);
vcpu = &vm->vcpu[vcpuid];
return (vcpu->extint_pending);
}
void
vm_extint_clear(struct vm *vm, int vcpuid)
{
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
xhyve_abort("vm_extint_pending: invalid vcpuid %d\n", vcpuid);
vcpu = &vm->vcpu[vcpuid];
if (vcpu->extint_pending == 0)
xhyve_abort("vm_extint_clear: inconsistent extint_pending state\n");
vcpu->extint_pending = 0;
vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1);
}
int
vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
{
if (vcpu < 0 || vcpu >= VM_MAXCPU)
return (EINVAL);
if (type < 0 || type >= VM_CAP_MAX)
return (EINVAL);
return (VMGETCAP(vm->cookie, vcpu, type, retval));
}
int
vm_set_capability(struct vm *vm, int vcpu, int type, int val)
{
if (vcpu < 0 || vcpu >= VM_MAXCPU)
return (EINVAL);
if (type < 0 || type >= VM_CAP_MAX)
return (EINVAL);
return (VMSETCAP(vm->cookie, vcpu, type, val));
}
struct vlapic *
vm_lapic(struct vm *vm, int cpu)
{
return (vm->vcpu[cpu].vlapic);
}
struct vioapic *
vm_ioapic(struct vm *vm)
{
return (vm->vioapic);
}
struct vhpet *
vm_hpet(struct vm *vm)
{
return (vm->vhpet);
}
int
vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate,
bool from_idle)
{
int error;
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
xhyve_abort("vm_set_run_state: invalid vcpuid %d\n", vcpuid);
vcpu = &vm->vcpu[vcpuid];
vcpu_lock(vcpu);
error = vcpu_set_state_locked(vcpu, newstate, from_idle);
vcpu_unlock(vcpu);
return (error);
}
enum vcpu_state
vcpu_get_state(struct vm *vm, int vcpuid)
{
struct vcpu *vcpu;
enum vcpu_state state;
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
xhyve_abort("vm_get_run_state: invalid vcpuid %d\n", vcpuid);
vcpu = &vm->vcpu[vcpuid];
vcpu_lock(vcpu);
state = vcpu->state;
vcpu_unlock(vcpu);
return (state);
}
int
vm_activate_cpu(struct vm *vm, int vcpuid)
{
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
return (EINVAL);
if (CPU_ISSET(((unsigned) vcpuid), &vm->active_cpus))
return (EBUSY);
VCPU_CTR0(vm, vcpuid, "activated");
CPU_SET_ATOMIC(((unsigned) vcpuid), &vm->active_cpus);
return (0);
}
cpuset_t
vm_active_cpus(struct vm *vm)
{
return (vm->active_cpus);
}
cpuset_t
vm_suspended_cpus(struct vm *vm)
{
return (vm->suspended_cpus);
}
void *
vcpu_stats(struct vm *vm, int vcpuid)
{
return (vm->vcpu[vcpuid].stats);
}
int
vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
{
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
return (EINVAL);
*state = vm->vcpu[vcpuid].x2apic_state;
return (0);
}
int
vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
{
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
return (EINVAL);
if (state >= X2APIC_STATE_LAST)
return (EINVAL);
vm->vcpu[vcpuid].x2apic_state = state;
vlapic_set_x2apic_state(vm, vcpuid, state);
return (0);
}
/*
* This function is called to ensure that a vcpu "sees" a pending event
* as soon as possible:
* - If the vcpu thread is sleeping then it is woken up.
* - If the vcpu is running on a different host_cpu then an IPI will be directed
* to the host_cpu to cause the vcpu to trap into the hypervisor.
*/
void
vcpu_notify_event(struct vm *vm, int vcpuid, UNUSED bool lapic_intr)
{
struct vcpu *vcpu;
vcpu = &vm->vcpu[vcpuid];
vcpu_lock(vcpu);
if (vcpu->state == VCPU_RUNNING) {
VCPU_INTERRUPT(vcpuid);
/* FIXME */
// if (hostcpu != curcpu) {
// if (lapic_intr) {
// vlapic_post_intr(vcpu->vlapic, hostcpu,
// vmm_ipinum);
// } else {
// ipi_cpu(hostcpu, vmm_ipinum);
// }
// } else {
// /*
// * If the 'vcpu' is running on 'curcpu' then it must
// * be sending a notification to itself (e.g. SELF_IPI).
// * The pending event will be picked up when the vcpu
// * transitions back to guest context.
// */
// }
} else {
if (vcpu->state == VCPU_SLEEPING)
pthread_cond_signal(&vcpu->vcpu_sleep_cnd);
//wakeup_one(vcpu);
}
vcpu_unlock(vcpu);
}
int
vm_apicid2vcpuid(UNUSED struct vm *vm, int apicid)
{
/*
* XXX apic id is assumed to be numerically identical to vcpu id
*/
return (apicid);
}
void
vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
vm_rendezvous_func_t func, void *arg)
{
int i;
KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
("vm_smp_rendezvous: invalid vcpuid %d", vcpuid));
restart:
pthread_mutex_lock(&vm->rendezvous_mtx);
if (vm->rendezvous_func != NULL) {
/*
* If a rendezvous is already in progress then we need to
* call the rendezvous handler in case this 'vcpuid' is one
* of the targets of the rendezvous.
*/
RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress");
pthread_mutex_unlock(&vm->rendezvous_mtx);
vm_handle_rendezvous(vm, vcpuid);
goto restart;
}
KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous "
"rendezvous is still in progress"));
RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous");
vm->rendezvous_req_cpus = dest;
CPU_ZERO(&vm->rendezvous_done_cpus);
vm->rendezvous_arg = arg;
vm_set_rendezvous_func(vm, func);
pthread_mutex_unlock(&vm->rendezvous_mtx);
/*
* Wake up any sleeping vcpus and trigger a VM-exit in any running
* vcpus so they handle the rendezvous as soon as possible.
*/
for (i = 0; i < VM_MAXCPU; i++) {
if (CPU_ISSET(((unsigned) i), &dest))
vcpu_notify_event(vm, i, false);
}
vm_handle_rendezvous(vm, vcpuid);
}
struct vatpic *
vm_atpic(struct vm *vm)
{
return (vm->vatpic);
}
struct vatpit *
vm_atpit(struct vm *vm)
{
return (vm->vatpit);
}
struct vpmtmr *
vm_pmtmr(struct vm *vm)
{
return (vm->vpmtmr);
}
struct vrtc *
vm_rtc(struct vm *vm)
{
return (vm->vrtc);
}
enum vm_reg_name
vm_segment_name(int seg)
{
static enum vm_reg_name seg_names[] = {
VM_REG_GUEST_ES,
VM_REG_GUEST_CS,
VM_REG_GUEST_SS,
VM_REG_GUEST_DS,
VM_REG_GUEST_FS,
VM_REG_GUEST_GS
};
KASSERT(seg >= 0 && seg < ((int) nitems(seg_names)),
("%s: invalid segment encoding %d", __func__, seg));
return (seg_names[seg]);
}
void
vm_copy_teardown(UNUSED struct vm *vm, UNUSED int vcpuid,
struct vm_copyinfo *copyinfo, int num_copyinfo)
{
bzero(copyinfo, ((unsigned) num_copyinfo) * sizeof(struct vm_copyinfo));
}
int
vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
int num_copyinfo, int *fault)
{
int error, idx, nused;
size_t n, off, remaining;
void *hva;
uint64_t gpa;
bzero(copyinfo, sizeof(struct vm_copyinfo) * ((unsigned) num_copyinfo));
nused = 0;
remaining = len;
while (remaining > 0) {
KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo"));
error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa, fault);
if (error || *fault)
return (error);
off = gpa & XHYVE_PAGE_MASK;
n = min(remaining, XHYVE_PAGE_SIZE - off);
copyinfo[nused].gpa = gpa;
copyinfo[nused].len = n;
remaining -= n;
gla += n;
nused++;
}
for (idx = 0; idx < nused; idx++) {
hva = vm_gpa2hva(vm, copyinfo[idx].gpa, copyinfo[idx].len);
if (hva == NULL)
break;
copyinfo[idx].hva = hva;
}
if (idx != nused) {
vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo);
return (EFAULT);
} else {
*fault = 0;
return (0);
}
}
void
vm_copyin(UNUSED struct vm *vm, UNUSED int vcpuid, struct vm_copyinfo *copyinfo,
void *kaddr, size_t len)
{
char *dst;
int idx;
dst = kaddr;
idx = 0;
while (len > 0) {
bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len);
len -= copyinfo[idx].len;
dst += copyinfo[idx].len;
idx++;
}
}
void
vm_copyout(UNUSED struct vm *vm, UNUSED int vcpuid, const void *kaddr,
struct vm_copyinfo *copyinfo, size_t len)
{
const char *src;
int idx;
src = kaddr;
idx = 0;
while (len > 0) {
bcopy(src, copyinfo[idx].hva, copyinfo[idx].len);
len -= copyinfo[idx].len;
src += copyinfo[idx].len;
idx++;
}
}
|
MiniCamel/BGECycleScrollView
|
Example/Pods/Target Support Files/Pods-BGECycleScrollView_Example/Pods-BGECycleScrollView_Example-umbrella.h
|
<gh_stars>0
#ifdef __OBJC__
#import <UIKit/UIKit.h>
#else
#ifndef FOUNDATION_EXPORT
#if defined(__cplusplus)
#define FOUNDATION_EXPORT extern "C"
#else
#define FOUNDATION_EXPORT extern
#endif
#endif
#endif
FOUNDATION_EXPORT double Pods_BGECycleScrollView_ExampleVersionNumber;
FOUNDATION_EXPORT const unsigned char Pods_BGECycleScrollView_ExampleVersionString[];
|
lucaspar/distributed_computing
|
openacc_demo/vector_addition.c
|
// pgcc -acc vector_addition.c
#include <stdio.h>
#include <stdlib.h>
void vecaddgpu(float *restrict r, float *a, float *b, int n)
{
#pragma acc kernels loop copyin(a [0:n], b [0:n]) copyout(r [0:n])
for (int i = 0; i < n; ++i)
r[i] = a[i] + b[i];
}
int main(int argc, char *argv[])
{
int n; /* vector length */
float *a; /* input vector 1 */
float *b; /* input vector 2 */
float *r; /* output vector */
float *e; /* expected output values */
int i, errs;
if (argc > 1)
n = atoi(argv[1]);
else
n = 100000; /* default vector length */
if (n <= 0)
n = 100000;
a = (float *)malloc(n * sizeof(float));
b = (float *)malloc(n * sizeof(float));
r = (float *)malloc(n * sizeof(float));
e = (float *)malloc(n * sizeof(float));
for (i = 0; i < n; ++i)
{
a[i] = (float)(i + 1);
b[i] = (float)(1000 * i);
}
/* compute on the GPU */
vecaddgpu(r, a, b, n);
/* compute on the host to compare */
for (i = 0; i < n; ++i)
e[i] = a[i] + b[i];
/* compare results */
errs = 0;
for (i = 0; i < n; ++i)
{
if (r[i] != e[i])
{
++errs;
}
}
printf( “% d errors found\n”, errs);
return errs;
}
|
Roshan-Thomas/ECEN-210-C-Programs
|
Lab Files/31.8.2020/compute_dimensional_weight.c
|
/*********************************************************
* From C PROGRAMMING: A MODERN APPROACH, Second Edition *
* By <NAME> *
* Copyright (c) 2008, 1996 <NAME> & Company, Inc. *
* All rights reserved. *
* This program may be freely distributed for class use, *
* provided that this copyright notice is retained. *
*********************************************************/
/* dweight2.c (Chapter 2, page 23) */
/* Computes the dimensional weight of a
box from input provided by the user */
#include <stdio.h>
int main(void)
{
int height, length, width, volume, weight;
printf("Enter height of box: ");
scanf("%d", &height);
printf("Enter length of box: ");
scanf("%d", &length);
printf("Enter width of box: ");
scanf("%d", &width);
volume = height * length * width;
weight = (volume + 165) / 166;
printf("Volume (cubic inches): %d\n", volume);
printf("Dimensional weight (pounds): %d\n", weight);
return 0;
}
|
Roshan-Thomas/ECEN-210-C-Programs
|
Lab Files/31.8.2020/variable_size_array.c
|
/***
Last modification: 12 September 2017
***/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
//------------------------------------------------------------
int main(argc, argv)
int argc;
char **argv;
{
double sum, *table;// This is a variable-size table of real numbers
// "*double" means that table is a pointer to an array of doubles
// The pointer points to the table without knowing its size
// The exact size will be known when allocating the array via malloc() or calloc()
int i, n;
printf("\nEnter the array size n: ", n);
scanf("%d", &n);
if((n<1)||(n>15))
{
fprintf(stderr,"%s: n must be in the range 1 to 15\n\n", argv[0]);
return(-1);
}
// memory allocation
table=(double*) calloc(n, sizeof(double));
if(table==NULL)
{
fprintf(stderr,"%s: memory allocation error\n", argv[0]);
return(-1);
}
// put numbers in the table
for(i=0; i<n; i++) table[i]=exp((double)(2.0*i+1.0));
for(sum=i=0; i<n; i++) sum += table[i];
printf("Sum of the exp() function of the %d real numbers is %1.2e \n", n, sum);
free(table);
return(0);
}/* end of main() */
//------------------------------------------------------------
|
Roshan-Thomas/ECEN-210-C-Programs
|
Lab Files/31.8.2020/hello_ECEN.c
|
<reponame>Roshan-Thomas/ECEN-210-C-Programs
/***
Last modification: 12 September 2017
***/
#include <stdio.h>
main()
{
// Just saying hello
printf("Hello ECEN 303! \n");
}/* end of main() */
|
Roshan-Thomas/ECEN-210-C-Programs
|
Lab Files/31.8.2020/convert_temperature_c_to_f.c
|
<reponame>Roshan-Thomas/ECEN-210-C-Programs
/*********************************************************
* From C PROGRAMMING: A MODERN APPROACH, Second Edition *
* By <NAME> *
* Copyright (c) 2008, 1996 <NAME> & Company, Inc. *
* All rights reserved. *
* This program may be freely distributed for class use, *
* provided that this copyright notice is retained. *
*********************************************************/
/* celsius.c (Chapter 2, page 24) */
/* Converts a Fahrenheit temperature to Celsius */
#include <stdio.h>
#define FREEZING_PT 32.0
#define SCALE_FACTOR (5.0/9.0)
/***
Modified: 12 September 2017
Dr Boutros changed float into double
***/
int main()
{
double fahrenheit, celsius;
printf("Enter Fahrenheit temperature: ");
scanf("%lf", &fahrenheit);
celsius = (fahrenheit - FREEZING_PT) * SCALE_FACTOR;
printf("Celsius equivalent: %.1f\n", celsius);
return(0);
}/* end of main () */
|
Roshan-Thomas/ECEN-210-C-Programs
|
Lab Files/31.8.2020/infinite_loop.c
|
<reponame>Roshan-Thomas/ECEN-210-C-Programs
/***
Last modification: 12 September 2017
***/
#include <stdio.h>
#include <math.h>
#define TRUE 1
main()
{
int iter=0;
double x, y, PI;
x=1.0;
PI=4.0*atan(x);
printf("PI=%1.12f \n", PI);
// Running an infinite loop
printf("I am running y=cos(x) forever...\n");
do
{
x=iter*PI/100.0;
y=cos(x);// angle must be in radian
if(++iter==100) iter=0;
}
while(TRUE);
}/* end of main() */
|
Roshan-Thomas/ECEN-210-C-Programs
|
Class Files/30.8.2020/print-variables.c
|
#include <stdio.h>
/**************************************
Lecture of 30 August 2020
Learning about printf() and variable declaration.
***************************************/
int main()
{
int i; // declaring an integer variable
double x; // declaring a double-precision floating-point (real number) variable
char c; // this is just one character (one letter)
printf("Hello!\n"); // just printing on the screen the string "Hello!"
i = 1391; // assigning 13 to i
x = 27.513; // assigning 27.513 to x
c = 'H'; // assigning H to c (single quotes for characters, double quotes for strings)
// printf is a function with a variable number of parameters!
// below, printf() is taking 4 parameters
printf("i=%d x=%f c=%c \n", i, x, c);
return(0);
}/* end of main() */
|
Roshan-Thomas/ECEN-210-C-Programs
|
Class Files/30.8.2020/cosine-and-defining-a-constant.c
|
<filename>Class Files/30.8.2020/cosine-and-defining-a-constant.c<gh_stars>0
#include <stdio.h>
#include <math.h>
/**************************************
Lecture of 30 August 2020
Learning about calling a cosine function and defining a constant.
***************************************/
#define PI 3.141592653589
int main()
{
double x, y; // declaring two double-precision floating-point (real numbers) variables
x=PI/10.0; // This is 18 degrees
y=cos(x); // calling the cosine function with one parameter
printf("cosine of %f is %f\n", x, y); // just printing on the screen
return(0);
}/* end of main() */
|
Roshan-Thomas/ECEN-210-C-Programs
|
Lab Files/31.8.2020/fixed_size_array.c
|
<filename>Lab Files/31.8.2020/fixed_size_array.c
/***
Last modification: 12 September 2017
***/
#include <stdio.h>
#include <math.h>
//------------------------------------------------------------
int main(argc, argv)
int argc;
char **argv;
{
int sum1, table1[20];// This is a fixed-size table of 20 integers
double sum2, table2[20];// This is a fixed-size table of 20 real numbers
unsigned char table3[20];// This is a fixed-size table of 20 characters
int i, n;
printf("\nint: this type is for integers, one int variable has 32 bits \n");
printf("An int variable varies from -2^31 to 2^31-1 (2^32 is almost 4 billion)\n\n");
printf("double: this type is for double-precision reals, one double variable has 64 bits \n");
printf("A double variable varies (in absolute value) from 1.7E-308 to 1.7E+308\n");
printf("1E-308 means 10^(-308) and 1E+308 means 10^(308) \n\n");
printf("char: this type is for characters, one char variable has 8 bits (char=byte)\n");
printf("A signed char variable varies from -128 to 127 \n");
printf("An unsigned char variable varies from 0 to 255 (it contains characters in ASCII code)\n\n");
n=4;// the tables can store up to 20 numbers, we are using 4 only
table1[0]=-3;
table1[1]=table1[2]=5;
table1[3]=11;
for(sum1=i=0; i<n; i++) sum1 += table1[i];
printf("The sum of the %d integers is %d \n", n, sum1);
table2[0]=4.1e-3;
table2[1]=table2[2]=5.5e-2;
table2[3]=1.0/10.0;
for(sum2=i=0; i<n; i++) sum2 += table2[i];
printf("The sum of the %d reals is %1.4f=%1.4e \n", n, sum2, sum2);
table3[0]='e';
table3[1]='c';
table3[2]='e';
table3[3]='n';
table3[4]=0;// a string in C language terminates with a 0, the 0 number not the '0' charcater.
printf("Printing characters one-by-one: the word is %c%c%c%c \n", table3[0], table3[1], table3[2], table3[3]);
printf("Printing the whole string : the word is %s \n\n", table3);
return(0);
}/* end of main() */
//------------------------------------------------------------
|
Roshan-Thomas/ECEN-210-C-Programs
|
Lab Files/31.8.2020/hyperbolic_tangent_function.c
|
/***
Last modification: 12 September 2017
***/
#include <stdio.h>
#include <math.h>
#define TRUE 1
static double hypertan(double x);
int main(argc, argv)
int argc;
char **argv;
{
double x, y;
FILE *fptr;
printf("########## Hyperbolic Tangent Function ##########\n");
// Compare our function to the system function tanh
x=-2.0; y=hypertan(x);
printf("x=%1.4f hypertan=%1.4f tanh=%1.4f \n", x, y, tanh(x));
x=-1.0; y=hypertan(x);
printf("x=%1.4f hypertan=%1.4f tanh=%1.4f \n", x, y, tanh(x));
x=0.0; y=hypertan(x);
printf("x=%1.4f hypertan=%1.4f tanh=%1.4f \n", x, y, tanh(x));
x=1.0; y=hypertan(x);
printf("x=%1.4f hypertan=%1.4f tanh=%1.4f \n", x, y, tanh(x));
x=2.0; y=hypertan(x);
printf("x=%1.4f hypertan=%1.4f tanh=%1.4f \n", x, y, tanh(x));
// Now generate a file to be uploaded later by gnuplot
fptr=fopen("tanh_function.dat", "w");
if(fptr==NULL)
{
fprintf(stderr,"%s: cannot open file for writing.\n", argv[0]);
return(-1);
}
for(x=-5; x<=5.0; x+= 0.02)
{
fprintf(fptr,"%1.2f %1.4f \n", x, tanh(x));
}
fclose(fptr);
return(0);
}/* end of main() */
//------------------------------------------------------------
static double hypertan(double x)
{
double value;
value=(exp(x)-exp(-x))/(exp(x)+exp(-x));
return(value);
}/* end of hypertan() */
//------------------------------------------------------------
|
ivan-mogilko/ags-refactoring
|
Common/ac/spritefile.h
|
<gh_stars>1-10
//=============================================================================
//
// Adventure Game Studio (AGS)
//
// Copyright (C) 1999-2011 <NAME> and 2011-20xx others
// The full list of copyright holders can be found in the Copyright.txt
// file, which is part of this source code distribution.
//
// The AGS source code is provided under the Artistic License 2.0.
// A copy of this license can be found in the file License.txt and at
// http://www.opensource.org/licenses/artistic-license-2.0.php
//
//=============================================================================
//
// SpriteFile class handles sprite file parsing and streaming sprites.
// SpriteFileWriter manages writing sprites into the output stream one by one,
// accumulating index information, and may therefore be suitable for a variety
// of situations.
//
//=============================================================================
#ifndef __AGS_CN_AC__SPRFILE_H
#define __AGS_CN_AC__SPRFILE_H
#include <memory>
#include <vector>
#include "core/types.h"
#include "util/error.h"
#include "util/geometry.h"
#include "util/stream.h"
#include "util/string.h"
namespace AGS
{
namespace Common
{
class Bitmap;
// TODO: research old version differences
enum SpriteFileVersion
{
kSprfVersion_Uncompressed = 4,
kSprfVersion_Compressed = 5,
kSprfVersion_Last32bit = 6,
kSprfVersion_64bit = 10,
kSprfVersion_HighSpriteLimit = 11,
kSprfVersion_StorageFormats = 12,
kSprfVersion_Current = kSprfVersion_StorageFormats
};
enum SpriteIndexFileVersion
{
kSpridxfVersion_Initial = 1,
kSpridxfVersion_Last32bit = 2,
kSpridxfVersion_64bit = 10,
kSpridxfVersion_HighSpriteLimit = 11,
kSpridxfVersion_Current = kSpridxfVersion_HighSpriteLimit
};
// Instructions to how the sprites are allowed to be stored
enum SpriteStorage
{
// When possible convert the sprite into another format for less disk space
// e.g. save 16/32-bit images as 8-bit colormaps with palette
kSprStore_OptimizeForSize = 0x01
};
// Format in which the sprite's pixel data is stored
enum SpriteFormat
{
kSprFmt_Undefined = 0, // undefined, or keep as-is
// Encoded as a 8-bit colormap with palette of 24-bit RGB values
kSprFmt_PaletteRgb888 = 32,
// Encoded as a 8-bit colormap with palette of 32-bit ARGB values
kSprFmt_PaletteArgb8888 = 33,
// Encoded as a 8-bit colormap with palette of 16-bit RGB565 values
kSprFmt_PaletteRgb565 = 34
};
enum SpriteCompression
{
kSprCompress_None = 0,
kSprCompress_RLE,
kSprCompress_LZW
};
typedef int32_t sprkey_t;
// SpriteFileIndex contains sprite file's table of contents
struct SpriteFileIndex
{
int SpriteFileIDCheck = 0; // tag matching sprite file and index file
std::vector<int16_t> Widths;
std::vector<int16_t> Heights;
std::vector<soff_t> Offsets;
inline size_t GetCount() const { return Offsets.size(); }
inline sprkey_t GetLastSlot() const { return (sprkey_t)GetCount() - 1; }
};
// Invidual sprite data header (as read from the file)
struct SpriteDatHeader
{
int BPP = 0; // color depth (bytes per pixel); or input format
SpriteFormat SFormat = kSprFmt_Undefined; // storage format
uint32_t PalCount = 0; // palette length, if applicable to storage format
SpriteCompression Compress = kSprCompress_None; // compression type
int Width = 0; // sprite's width
int Height = 0; // sprite's height
SpriteDatHeader() = default;
SpriteDatHeader(int bpp, SpriteFormat sformat = kSprFmt_Undefined,
uint32_t pal_count = 0, SpriteCompression compress = kSprCompress_None,
int w = 0, int h = 0) : BPP(bpp), SFormat(sformat), PalCount(pal_count),
Compress(compress), Width(w), Height(h) {}
};
// SpriteFile opens a sprite file for reading, reports general information,
// and lets read sprites in any order.
class SpriteFile
{
public:
// Standart sprite file and sprite index names
static const String DefaultSpriteFileName;
static const String DefaultSpriteIndexName;
SpriteFile();
// Loads sprite reference information and inits sprite stream
HError OpenFile(const String &filename, const String &sprindex_filename,
std::vector<Size> &metrics);
// Closes stream; no reading will be possible unless opened again
void Close();
int GetStoreFlags() const;
// Tells if bitmaps in the file are compressed
SpriteCompression GetSpriteCompression() const;
// Tells the highest known sprite index
sprkey_t GetTopmostSprite() const;
// Loads sprite index file
bool LoadSpriteIndexFile(const String &filename, int expectedFileID,
soff_t spr_initial_offs, sprkey_t topmost, std::vector<Size> &metrics);
// Rebuilds sprite index from the main sprite file
HError RebuildSpriteIndex(Stream *in, sprkey_t topmost, SpriteFileVersion vers,
std::vector<Size> &metrics);
// Loads an image data and creates a ready bitmap
HError LoadSprite(sprkey_t index, Bitmap *&sprite);
// Loads a raw sprite element data into the buffer, stores header info separately
HError LoadRawData(sprkey_t index, SpriteDatHeader &hdr, std::vector<uint8_t> &data);
HError LoadSpriteData(sprkey_t index, SpriteDatHeader &hdr, std::vector<uint8_t> &data,
std::vector<uint32_t> &palette);
private:
// Seek stream to sprite
void SeekToSprite(sprkey_t index);
// Internal sprite reference
struct SpriteRef
{
soff_t Offset = 0; // data offset
size_t RawSize = 0; // file size of element, in bytes
// TODO: RawSize is currently unused, due to incompleteness of spriteindex format
};
// Array of sprite references
std::vector<SpriteRef> _spriteData;
std::unique_ptr<Stream> _stream; // the sprite stream
SpriteFileVersion _version = kSprfVersion_Current;
int _storeFlags = 0; // storage flags, specify how sprites may be stored
SpriteCompression _compress = kSprCompress_None; // sprite compression type
sprkey_t _curPos; // current stream position (sprite slot)
};
// SpriteFileWriter class writes a sprite file in a requested format.
// Start using it by calling Begin, write ready bitmaps or copy raw sprite data
// over slot by slot, then call Finalize to let it close the format correctly.
class SpriteFileWriter
{
public:
SpriteFileWriter(std::unique_ptr<Stream> &&out)
: _out(std::move(out)) {}
~SpriteFileWriter() = default;
// Get the sprite index, accumulated after write
const SpriteFileIndex &GetIndex() const { return _index; }
// Initializes new sprite file format
void Begin(int store_flags, SpriteCompression compress, sprkey_t last_slot = -1);
// Writes a bitmap into file, compressing if necessary
void WriteBitmap(Bitmap *image);
// Writes an empty slot marker
void WriteEmptySlot();
// Writes a raw sprite data without any additional processing
void WriteRawData(const SpriteDatHeader &hdr, const uint8_t *data, size_t data_sz);
// Finalizes current format; no further writing is possible after this
void Finalize();
private:
// Writes prepared image data in a proper file format, following explicit data_bpp rule
void WriteSpriteData(const SpriteDatHeader &hdr,
const uint8_t *im_data, size_t im_data_sz, int im_bpp,
const uint32_t palette[256]);
std::unique_ptr<Stream> _out;
int _storeFlags = 0;
SpriteCompression _compress = kSprCompress_None;
soff_t _lastSlotPos = -1; // last slot save position in file
// sprite index accumulated on write for reporting back to user
SpriteFileIndex _index;
// compression buffer
std::vector<uint8_t> _membuf;
};
// Saves all sprites to file; fills in index data for external use
// TODO: refactor to be able to save main file and index file separately (separate function for gather data?)
int SaveSpriteFile(const String &save_to_file,
const std::vector<Bitmap*> &sprites, // available sprites (may contain nullptrs)
SpriteFile *read_from_file, // optional file to read missing sprites from
int store_flags, SpriteCompression compress, SpriteFileIndex &index);
// Saves sprite index table in a separate file
int SaveSpriteIndex(const String &filename, const SpriteFileIndex &index);
} // namespace Common
} // namespace AGS
#endif // __AGS_CN_AC__SPRFILE_H
|
ivan-mogilko/ags-refactoring
|
Common/util/memorystream.h
|
<reponame>ivan-mogilko/ags-refactoring
//=============================================================================
//
// Adventure Game Studio (AGS)
//
// Copyright (C) 1999-2011 <NAME> and 2011-20xx others
// The full list of copyright holders can be found in the Copyright.txt
// file, which is part of this source code distribution.
//
// The AGS source code is provided under the Artistic License 2.0.
// A copy of this license can be found in the file License.txt and at
// http://www.opensource.org/licenses/artistic-license-2.0.php
//
//=============================================================================
//
// MemoryStream does reading and writing over the buffer of bytes stored in
// memory. Currently has rather trivial implementation. Does not own a buffer
// itself, but works with the provided C-buffer pointer, which means that the
// buffer object *must* persist until stream is closed.
//
// VectorStream is a specialized implementation that works with std::vector.
// Unlike base MemoryStream provides continiously resizing buffer for writing.
// TODO: separate StringStream for reading & writing String object?
//
//=============================================================================
#ifndef __AGS_CN_UTIL__MEMORYSTREAM_H
#define __AGS_CN_UTIL__MEMORYSTREAM_H
#include <vector>
#include "util/datastream.h"
#include "util/string.h"
namespace AGS
{
namespace Common
{
class MemoryStream : public DataStream
{
public:
// Construct memory stream in the read-only mode over a const C-buffer;
// reading will never exceed buf_sz bytes;
// buffer must persist in memory until the stream is closed.
MemoryStream(const uint8_t *cbuf, size_t buf_sz, DataEndianess stream_endianess = kLittleEndian);
// Construct memory stream in the chosen mode over a given C-buffer;
// neither reading nor writing will ever exceed buf_sz bytes;
// buffer must persist in memory until the stream is closed.
MemoryStream(uint8_t *buf, size_t buf_sz, StreamWorkMode mode, DataEndianess stream_endianess = kLittleEndian);
~MemoryStream() override = default;
void Close() override;
bool Flush() override;
// Is stream valid (underlying data initialized properly)
bool IsValid() const override;
// Is end of stream
bool EOS() const override;
// Total length of stream (if known)
soff_t GetLength() const override;
// Current position (if known)
soff_t GetPosition() const override;
bool CanRead() const override;
bool CanWrite() const override;
bool CanSeek() const override;
size_t Read(void *buffer, size_t size) override;
int32_t ReadByte() override;
size_t Write(const void *buffer, size_t size) override;
int32_t WriteByte(uint8_t b) override;
bool Seek(soff_t offset, StreamSeek origin) override;
protected:
const uint8_t *_cbuf;
size_t _buf_sz; // hard buffer limit
size_t _len; // calculated length of stream
const StreamWorkMode _mode;
soff_t _pos; // current stream pos
private:
uint8_t *_buf;
};
class VectorStream : public MemoryStream
{
public:
// Construct memory stream in the read-only mode over a const std::vector;
// vector must persist in memory until the stream is closed.
VectorStream(const std::vector<uint8_t> &cbuf, DataEndianess stream_endianess = kLittleEndian);
// Construct memory stream in the chosen mode over a given std::vector;
// vector must persist in memory until the stream is closed.
VectorStream(std::vector<uint8_t> &buf, StreamWorkMode mode, DataEndianess stream_endianess = kLittleEndian);
~VectorStream() override = default;
void Close() override;
size_t Write(const void *buffer, size_t size) override;
int32_t WriteByte(uint8_t b) override;
private:
std::vector<uint8_t> *_vec; // writeable vector (may be null)
};
} // namespace Common
} // namespace AGS
#endif // __AGS_CN_UTIL__MEMORYSTREAM_H
|
ivan-mogilko/ags-refactoring
|
Common/font/fonts.h
|
//=============================================================================
//
// Adventure Game Studio (AGS)
//
// Copyright (C) 1999-2011 <NAME> and 2011-20xx others
// The full list of copyright holders can be found in the Copyright.txt
// file, which is part of this source code distribution.
//
// The AGS source code is provided under the Artistic License 2.0.
// A copy of this license can be found in the file License.txt and at
// http://www.opensource.org/licenses/artistic-license-2.0.php
//
//=============================================================================
#ifndef __AC_FONT_H
#define __AC_FONT_H
#include <vector>
#include "ac/gamestructdefines.h"
#include "util/string.h"
// TODO: we need to make some kind of TextManager class of this module
namespace AGS { namespace Common { class Bitmap; } }
using namespace AGS;
class IAGSFontRenderer;
class IAGSFontRenderer2;
struct FontInfo;
struct FontRenderParams;
void init_font_renderer();
void shutdown_font_renderer();
void adjust_y_coordinate_for_text(int* ypos, size_t fontnum);
IAGSFontRenderer* font_replace_renderer(size_t fontNumber, IAGSFontRenderer* renderer);
bool font_first_renderer_loaded();
bool is_font_loaded(size_t fontNumber);
bool is_bitmap_font(size_t fontNumber);
bool font_supports_extended_characters(size_t fontNumber);
// Get font's name, if it's available, otherwise returns empty string
const char *get_font_name(size_t fontNumber);
// Get a collection of FFLG_* flags corresponding to this font
int get_font_flags(size_t fontNumber);
// TODO: with changes to WFN font renderer that implemented safe rendering of
// strings containing invalid chars (since 3.3.1) this function is not
// important, except for (maybe) few particular cases.
// Furthermore, its use complicated things, because AGS could modify some texts
// at random times (usually - drawing routines).
// Need to check whether it is safe to completely remove it.
void ensure_text_valid_for_font(char *text, size_t fontnum);
// Get font's scaling multiplier
int get_font_scaling_mul(size_t fontNumber);
// Calculate actual width of a line of text
int get_text_width(const char *texx, size_t fontNumber);
// Get font's height; this value is used for logical arrangement of UI elements;
// note that this is a "formal" font height, that may have different value
// depending on compatibility mode (used when running old games);
int get_font_height(size_t fontNumber);
// Get the maximal height of the given font, with corresponding outlining
int get_font_height_outlined(size_t fontNumber);
// Get font's surface height: this always returns the height enough to accomodate
// font letters on a bitmap or a texture; the distinction is needed for compatibility reasons
int get_font_surface_height(size_t fontNumber);
// Get font's line spacing
int get_font_linespacing(size_t fontNumber);
// Set font's line spacing
void set_font_linespacing(size_t fontNumber, int spacing);
// Get font's outline type
int get_font_outline(size_t font_number);
// Get font's automatic outline thickness (if set)
int get_font_outline_thickness(size_t font_number);
// Gets the total maximal height of the given number of lines printed with the given font;
// note that this uses formal font height, for compatibility purposes
int get_text_lines_height(size_t fontNumber, size_t numlines);
// Gets the height of a graphic surface enough to accomodate this number of text lines;
// note this accounts for the real pixel font height
int get_text_lines_surf_height(size_t fontNumber, size_t numlines);
// Set font's outline type
void set_font_outline(size_t font_number, int outline_type,
enum FontInfo::AutoOutlineStyle style = FontInfo::kSquared, int thickness = 1);
// Outputs a single line of text on the defined position on bitmap, using defined font, color and parameters
void wouttextxy(Common::Bitmap *ds, int xxx, int yyy, size_t fontNumber, color_t text_color, const char *texx);
// Assigns FontInfo to the font
void set_fontinfo(size_t fontNumber, const FontInfo &finfo);
// Gets full information about the font
FontInfo get_fontinfo(size_t font_number);
// Loads a font from disk
bool load_font_size(size_t fontNumber, const FontInfo &font_info);
void wgtprintf(Common::Bitmap *ds, int xxx, int yyy, size_t fontNumber, color_t text_color, char *fmt, ...);
// Allocates two outline stencil buffers, or returns previously creates ones;
// these buffers are owned by the font, they should not be deleted by the caller.
void alloc_font_outline_buffers(size_t font_number,
Common::Bitmap **text_stencil, Common::Bitmap **outline_stencil,
int text_width, int text_height, int color_depth);
// Perform necessary adjustments on all fonts in case the text render mode changed (anti-aliasing etc)
void adjust_fonts_for_render_mode(bool aa_mode);
// Free particular font's data
void wfreefont(size_t fontNumber);
// Free all fonts data
void free_all_fonts();
// SplitLines class represents a list of lines and is meant to reduce
// subsequent memory (de)allocations if used often during game loops
// and drawing. For that reason it is not equivalent to std::vector,
// but keeps constructed String buffers intact for most time.
// TODO: implement proper strings pool.
class SplitLines
{
public:
inline size_t Count() const { return _count; }
inline const Common::String &operator[](size_t i) const { return _pool[i]; }
inline Common::String &operator[](size_t i) { return _pool[i]; }
inline void Clear() { _pool.clear(); _count = 0; }
inline void Reset() { _count = 0; }
inline void Add(const char *cstr)
{
if (_pool.size() == _count) _pool.resize(_count + 1);
_pool[_count++].SetString(cstr);
}
// An auxiliary line processing buffer
std::vector<char> LineBuf;
private:
std::vector<Common::String> _pool;
size_t _count; // actual number of lines in use
};
// Break up the text into lines restricted by the given width;
// returns number of lines, or 0 if text cannot be split well to fit in this width
size_t split_lines(const char *texx, SplitLines &lines, int width, int fontNumber, size_t max_lines = -1);
namespace AGS { namespace Common { extern SplitLines Lines; } }
#endif // __AC_FONT_H
|
ivan-mogilko/ags-refactoring
|
Common/font/ttffontrenderer.h
|
<reponame>ivan-mogilko/ags-refactoring
//=============================================================================
//
// Adventure Game Studio (AGS)
//
// Copyright (C) 1999-2011 <NAME> and 2011-20xx others
// The full list of copyright holders can be found in the Copyright.txt
// file, which is part of this source code distribution.
//
// The AGS source code is provided under the Artistic License 2.0.
// A copy of this license can be found in the file License.txt and at
// http://www.opensource.org/licenses/artistic-license-2.0.php
//
//=============================================================================
#ifndef __AC_TTFFONTRENDERER_H
#define __AC_TTFFONTRENDERER_H
#include <map>
#include "font/agsfontrenderer.h"
#include "util/string.h"
struct ALFONT_FONT;
class TTFFontRenderer : public IAGSFontRenderer, public IAGSFontRenderer2 {
public:
// IAGSFontRenderer implementation
bool LoadFromDisk(int fontNumber, int fontSize) override;
void FreeMemory(int fontNumber) override;
bool SupportsExtendedCharacters(int fontNumber) override { return true; }
int GetTextWidth(const char *text, int fontNumber) override;
int GetTextHeight(const char *text, int fontNumber) override;
void RenderText(const char *text, int fontNumber, BITMAP *destination, int x, int y, int colour) override ;
void AdjustYCoordinateForFont(int *ycoord, int fontNumber) override;
void EnsureTextValidForFont(char *text, int fontNumber) override;
// IAGSFontRenderer2 implementation
bool IsBitmapFont() override;
bool LoadFromDiskEx(int fontNumber, int fontSize, const FontRenderParams *params,
FontMetrics *metrics) override;
const char *GetName(int fontNumber) override;
void AdjustFontForAntiAlias(int fontNumber, bool aa_mode) override;
//
// Utility functions
//
// Try load the TTF font using provided point size, and report its metrics
static bool MeasureFontOfPointSize(const AGS::Common::String &filename, int size_pt, FontMetrics *metrics);
// Try load the TTF font, find the point size which results in pixel height
// as close to the requested as possible; report its metrics
static bool MeasureFontOfPixelHeight(const AGS::Common::String &filename, int pixel_height, FontMetrics *metrics);
private:
struct FontData
{
ALFONT_FONT *AlFont;
FontRenderParams Params;
};
std::map<int, FontData> _fontData;
};
#endif // __AC_TTFFONTRENDERER_H
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.