type
stringclasses 5
values | content
stringlengths 9
163k
|
|---|---|
functions
|
errval_t mem_allocate_call__lmp_send(struct mem_binding *_binding, struct event_closure _continuation, uint8_t bits, mem_genpaddr_t minbase, mem_genpaddr_t maxlimit)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_allocate_response__lmp_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t ret, struct capref mem_cap)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_steal_call__lmp_send(struct mem_binding *_binding, struct event_closure _continuation, uint8_t bits, mem_genpaddr_t minbase, mem_genpaddr_t maxlimit)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_steal_response__lmp_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t ret, struct capref mem_cap)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_available_call__lmp_send(struct mem_binding *_binding, struct event_closure _continuation)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_available_response__lmp_send(struct mem_binding *_binding, struct event_closure _continuation, mem_genpaddr_t mem_avail, mem_genpaddr_t mem_total)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_free_monitor_call__lmp_send(struct mem_binding *_binding, struct event_closure _continuation, struct capref mem_cap, mem_genpaddr_t base, uint8_t bits)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_free_monitor_response__lmp_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t err)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
void mem_lmp_rx_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_lmp_binding *b = arg;
errval_t err;
struct lmp_recv_msg msg = LMP_RECV_MSG_INIT;
struct capref cap;
struct event_closure recv_closure = (struct event_closure){ .handler = mem_lmp_rx_handler, .arg = arg }
|
functions
|
number
switch (_binding->rx_msgnum) {
case mem_allocate_call__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
// check length
if (((msg.buf).msglen) > 4) {
(_binding->error_handler)(_binding, FLOUNDER_ERR_RX_INVALID_LENGTH);
goto out;
}
|
functions
|
bool mem_lmp_can_send(struct mem_binding *b)
{
return((b->tx_msgnum) == 0);
}
|
functions
|
errval_t mem_lmp_register_send(struct mem_binding *b, struct waitset *ws, struct event_closure _continuation)
{
return(flounder_support_register(ws, &(b->register_chanstate), _continuation, mem_lmp_can_send(b)));
}
|
functions
|
void mem_lmp_default_error_handler(struct mem_binding *b, errval_t err)
{
DEBUG_ERR(err, "asynchronous error in Flounder-generated mem lmp binding (default handler)");
abort();
}
|
functions
|
errval_t mem_lmp_change_waitset(struct mem_binding *_binding, struct waitset *ws)
{
struct mem_lmp_binding *b = (void *)(_binding);
// Migrate register and TX continuation notifications
flounder_support_migrate_notify(&(_binding->register_chanstate), ws);
flounder_support_migrate_notify(&(_binding->tx_cont_chanstate), ws);
// change waitset on binding
_binding->waitset = ws;
// Migrate send and receive notifications
lmp_chan_migrate_recv(&(b->chan), ws);
lmp_chan_migrate_send(&(b->chan), ws);
return(SYS_ERR_OK);
}
|
functions
|
errval_t mem_lmp_control(struct mem_binding *_binding, idc_control_t control)
{
struct mem_lmp_binding *b = (void *)(_binding);
b->flags = idc_control_to_lmp_flags(control, b->flags);
return(SYS_ERR_OK);
}
|
functions
|
void mem_lmp_init(struct mem_lmp_binding *b, struct waitset *waitset)
{
(b->b).st = NULL;
(b->b).waitset = waitset;
event_mutex_init(&((b->b).mutex), waitset);
(b->b).can_send = mem_lmp_can_send;
(b->b).register_send = mem_lmp_register_send;
(b->b).error_handler = mem_lmp_default_error_handler;
(b->b).tx_vtbl = mem_lmp_tx_vtbl;
memset(&((b->b).rx_vtbl), 0, sizeof((b->b).rx_vtbl));
flounder_support_waitset_chanstate_init(&((b->b).register_chanstate));
flounder_support_waitset_chanstate_init(&((b->b).tx_cont_chanstate));
(b->b).tx_msgnum = 0;
(b->b).rx_msgnum = 0;
(b->b).tx_msg_fragment = 0;
(b->b).rx_msg_fragment = 0;
(b->b).tx_str_pos = 0;
(b->b).rx_str_pos = 0;
(b->b).tx_str_len = 0;
(b->b).rx_str_len = 0;
(b->b).bind_cont = NULL;
lmp_chan_init(&(b->chan));
(b->b).change_waitset = mem_lmp_change_waitset;
(b->b).control = mem_lmp_control;
b->flags = LMP_SEND_FLAGS_DEFAULT;
}
|
functions
|
void mem_lmp_destroy(struct mem_lmp_binding *b)
{
flounder_support_waitset_chanstate_destroy(&((b->b).register_chanstate));
flounder_support_waitset_chanstate_destroy(&((b->b).tx_cont_chanstate));
lmp_chan_destroy(&(b->chan));
}
|
functions
|
void mem_lmp_bind_continuation(void *st, errval_t err, struct lmp_chan *chan)
{
struct mem_lmp_binding *b = st;
if (err_is_ok(err)) {
// allocate a cap receive slot
err = lmp_chan_alloc_recv_slot(chan);
if (err_is_fail(err)) {
err = err_push(err, LIB_ERR_LMP_ALLOC_RECV_SLOT);
goto fail;
}
|
functions
|
errval_t mem_lmp_bind(struct mem_lmp_binding *b, iref_t iref, mem_bind_continuation_fn *_continuation, void *st, struct waitset *waitset, idc_bind_flags_t flags, size_t lmp_buflen)
{
errval_t err;
mem_lmp_init(b, waitset);
(b->b).st = st;
(b->b).bind_cont = _continuation;
err = lmp_chan_bind(&(b->chan), (struct lmp_bind_continuation){ .handler = mem_lmp_bind_continuation, .st = b }
|
functions
|
errval_t mem_lmp_connect_handler(void *st, size_t buflen_words, struct capref endpoint, struct lmp_chan **retchan)
{
struct mem_export *e = st;
errval_t err;
// allocate storage for binding
struct mem_lmp_binding *b = malloc(sizeof(struct mem_lmp_binding ));
if (b == NULL) {
return(LIB_ERR_MALLOC_FAIL);
}
|
functions
|
void mem_ump_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_ump_binding *b = arg;
errval_t err;
err = SYS_ERR_OK;
volatile struct ump_message *msg;
struct ump_control ctrl;
bool tx_notify = false;
// do we need to (and can we) send a cap ack?
if ((((b->ump_state).capst).tx_cap_ack) && flounder_stub_ump_can_send(&(b->ump_state))) {
flounder_stub_ump_send_cap_ack(&(b->ump_state));
((b->ump_state).capst).tx_cap_ack = false;
tx_notify = true;
}
|
functions
|
number
switch (_binding->tx_msgnum) {
case 0:
break;
case mem_allocate_call__msgnum:
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// check if we can send another message
if (!flounder_stub_ump_can_send(&(b->ump_state))) {
tx_notify = true;
break;
}
|
functions
|
fragment
switch (_binding->tx_msg_fragment) {
case 0:
// check if we can send another message
if (!flounder_stub_ump_can_send(&(b->ump_state))) {
tx_notify = true;
break;
}
|
functions
|
fragment
switch (_binding->tx_msg_fragment) {
case 0:
// check if we can send another message
if (!flounder_stub_ump_can_send(&(b->ump_state))) {
tx_notify = true;
break;
}
|
functions
|
fragment
switch (_binding->tx_msg_fragment) {
case 0:
// check if we can send another message
if (!flounder_stub_ump_can_send(&(b->ump_state))) {
tx_notify = true;
break;
}
|
functions
|
fragment
switch (_binding->tx_msg_fragment) {
case 0:
// check if we can send another message
if (!flounder_stub_ump_can_send(&(b->ump_state))) {
tx_notify = true;
break;
}
|
functions
|
fragment
switch (_binding->tx_msg_fragment) {
case 0:
// check if we can send another message
if (!flounder_stub_ump_can_send(&(b->ump_state))) {
tx_notify = true;
break;
}
|
functions
|
fragment
switch (_binding->tx_msg_fragment) {
case 0:
// check if we can send another message
if (!flounder_stub_ump_can_send(&(b->ump_state))) {
tx_notify = true;
break;
}
|
functions
|
fragment
switch (_binding->tx_msg_fragment) {
case 0:
// check if we can send another message
if (!flounder_stub_ump_can_send(&(b->ump_state))) {
tx_notify = true;
break;
}
|
functions
|
necessary
if (tx_notify) {
}
|
functions
|
void mem_ump_cap_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_ump_binding *b = arg;
errval_t err;
err = SYS_ERR_OK;
assert(((b->ump_state).capst).rx_cap_ack);
assert(((b->ump_state).capst).monitor_mutex_held);
// Switch on current outgoing message
switch (_binding->tx_msgnum) {
case mem_allocate_response__msgnum:
// Switch on current outgoing cap
switch (((b->ump_state).capst).tx_capnum) {
case 0:
err = flounder_stub_send_cap(&((b->ump_state).capst), ((b->ump_state).chan).monitor_binding, ((b->ump_state).chan).monitor_id, ((_binding->tx_union).allocate_response).mem_cap, true, mem_ump_cap_send_handler);
if (err_is_fail(err)) {
(_binding->error_handler)(_binding, err);
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->register_chanstate));
flounder_support_deregister_chan(&(_binding->tx_cont_chanstate));
break;
}
|
functions
|
void mem_ump_rx_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_ump_binding *b = arg;
errval_t err;
err = SYS_ERR_OK;
volatile struct ump_message *msg;
int msgnum;
while (true) {
// try to retrieve a message from the channel
err = ump_chan_recv(&((b->ump_state).chan), &msg);
// check if we succeeded
if (err_is_fail(err)) {
if (err_no(err) == LIB_ERR_NO_UMP_MSG) {
// no message
break;
}
|
functions
|
message
if (msgnum == FL_UMP_CAP_ACK) {
assert(!(((b->ump_state).capst).rx_cap_ack));
((b->ump_state).capst).rx_cap_ack = true;
if (((b->ump_state).capst).monitor_mutex_held) {
mem_ump_cap_send_handler(b);
}
|
functions
|
number
switch (_binding->rx_msgnum) {
case mem_allocate_call__msgnum:
switch (_binding->rx_msg_fragment) {
case 0:
((_binding->rx_union).allocate_call).bits = (((msg->data)[0]) & 0xff);
((_binding->rx_union).allocate_call).minbase = ((msg->data)[1]);
((_binding->rx_union).allocate_call).maxlimit = ((msg->data)[2]);
FL_DEBUG("ump RX mem.allocate_call\n");
assert(((_binding->rx_vtbl).allocate_call) != NULL);
((_binding->rx_vtbl).allocate_call)(_binding, ((_binding->rx_union).allocate_call).bits, ((_binding->rx_union).allocate_call).minbase, ((_binding->rx_union).allocate_call).maxlimit);
_binding->rx_msgnum = 0;
break;
default:
(_binding->error_handler)(_binding, FLOUNDER_ERR_INVALID_STATE);
goto out;
}
|
functions
|
void mem_ump_cap_rx_handler(void *arg, errval_t success, struct capref cap, uint32_t capid)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_ump_binding *b = arg;
errval_t err;
err = SYS_ERR_OK;
assert(capid == (((b->ump_state).capst).rx_capnum));
// Check if there's an associated error
// FIXME: how should we report this to the user? at present we just deliver a NULL capref
if (err_is_fail(success)) {
DEBUG_ERR(err, "error in cap transfer");
}
|
functions
|
message
switch (_binding->rx_msgnum) {
case mem_allocate_response__msgnum:
// Switch on current incoming cap
switch ((((b->ump_state).capst).rx_capnum)++) {
case 0:
((_binding->rx_union).allocate_response).mem_cap = cap;
if ((_binding->rx_msg_fragment) == 1) {
FL_DEBUG("ump RX mem.allocate_response\n");
assert(((_binding->rx_vtbl).allocate_response) != NULL);
((_binding->rx_vtbl).allocate_response)(_binding, ((_binding->rx_union).allocate_response).ret, ((_binding->rx_union).allocate_response).mem_cap);
_binding->rx_msgnum = 0;
// register for receive notification
err = ump_chan_register_recv(&((b->ump_state).chan), _binding->waitset, (struct event_closure){ .handler = mem_ump_rx_handler, .arg = _binding }
|
functions
|
void mem_ump_monitor_mutex_cont(void *arg)
{
struct mem_ump_binding *b = arg;
assert(!(((b->ump_state).capst).monitor_mutex_held));
((b->ump_state).capst).monitor_mutex_held = true;
if (((b->ump_state).capst).rx_cap_ack) {
mem_ump_cap_send_handler(b);
}
|
functions
|
errval_t mem_allocate_call__ump_send(struct mem_binding *_binding, struct event_closure _continuation, uint8_t bits, mem_genpaddr_t minbase, mem_genpaddr_t maxlimit)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_allocate_response__ump_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t ret, struct capref mem_cap)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_steal_call__ump_send(struct mem_binding *_binding, struct event_closure _continuation, uint8_t bits, mem_genpaddr_t minbase, mem_genpaddr_t maxlimit)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_steal_response__ump_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t ret, struct capref mem_cap)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_available_call__ump_send(struct mem_binding *_binding, struct event_closure _continuation)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_available_response__ump_send(struct mem_binding *_binding, struct event_closure _continuation, mem_genpaddr_t mem_avail, mem_genpaddr_t mem_total)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_free_monitor_call__ump_send(struct mem_binding *_binding, struct event_closure _continuation, struct capref mem_cap, mem_genpaddr_t base, uint8_t bits)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_free_monitor_response__ump_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t err)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
bool mem_ump_can_send(struct mem_binding *b)
{
return((b->tx_msgnum) == 0);
}
|
functions
|
errval_t mem_ump_register_send(struct mem_binding *b, struct waitset *ws, struct event_closure _continuation)
{
return(flounder_support_register(ws, &(b->register_chanstate), _continuation, mem_ump_can_send(b)));
}
|
functions
|
void mem_ump_default_error_handler(struct mem_binding *b, errval_t err)
{
DEBUG_ERR(err, "asynchronous error in Flounder-generated mem ump binding (default handler)");
abort();
}
|
functions
|
errval_t mem_ump_change_waitset(struct mem_binding *_binding, struct waitset *ws)
{
struct mem_ump_binding *b = (void *)(_binding);
errval_t err;
// change waitset on private monitor binding if we have one
if ((((b->ump_state).chan).monitor_binding) != get_monitor_binding()) {
err = flounder_support_change_monitor_waitset(((b->ump_state).chan).monitor_binding, ws);
if (err_is_fail(err)) {
return(err_push(err, FLOUNDER_ERR_CHANGE_MONITOR_WAITSET));
}
|
functions
|
errval_t mem_ump_control(struct mem_binding *_binding, idc_control_t control)
{
// no control flags are supported
return(SYS_ERR_OK);
}
|
functions
|
void mem_ump_destroy(struct mem_ump_binding *b)
{
flounder_support_waitset_chanstate_destroy(&((b->b).register_chanstate));
flounder_support_waitset_chanstate_destroy(&((b->b).tx_cont_chanstate));
ump_chan_destroy(&((b->ump_state).chan));
}
|
functions
|
void mem_ump_bind_continuation(void *st, errval_t err, struct ump_chan *chan, struct capref notify_cap)
{
struct mem_binding *_binding = st;
struct mem_ump_binding *b = st;
if (err_is_ok(err)) {
// notify cap ignored
// setup cap handlers
(((b->ump_state).chan).cap_handlers).st = b;
(((b->ump_state).chan).cap_handlers).cap_receive_handler = mem_ump_cap_rx_handler;
// register for receive notification
err = ump_chan_register_recv(&((b->ump_state).chan), _binding->waitset, (struct event_closure){ .handler = mem_ump_rx_handler, .arg = _binding }
|
functions
|
errval_t mem_ump_init(struct mem_ump_binding *b, struct waitset *waitset, volatile void *inbuf, size_t inbufsize, volatile void *outbuf, size_t outbufsize)
{
errval_t err;
struct mem_binding *_binding = &(b->b);
(b->b).st = NULL;
(b->b).waitset = waitset;
event_mutex_init(&((b->b).mutex), waitset);
(b->b).can_send = mem_ump_can_send;
(b->b).register_send = mem_ump_register_send;
(b->b).error_handler = mem_ump_default_error_handler;
(b->b).tx_vtbl = mem_ump_tx_vtbl;
memset(&((b->b).rx_vtbl), 0, sizeof((b->b).rx_vtbl));
flounder_support_waitset_chanstate_init(&((b->b).register_chanstate));
flounder_support_waitset_chanstate_init(&((b->b).tx_cont_chanstate));
(b->b).tx_msgnum = 0;
(b->b).rx_msgnum = 0;
(b->b).tx_msg_fragment = 0;
(b->b).rx_msg_fragment = 0;
(b->b).tx_str_pos = 0;
(b->b).rx_str_pos = 0;
(b->b).tx_str_len = 0;
(b->b).rx_str_len = 0;
(b->b).bind_cont = NULL;
flounder_stub_ump_state_init(&(b->ump_state), b);
err = ump_chan_init(&((b->ump_state).chan), inbuf, inbufsize, outbuf, outbufsize);
if (err_is_fail(err)) {
mem_ump_destroy(b);
return(err_push(err, LIB_ERR_UMP_CHAN_INIT));
}
|
functions
|
void mem_ump_new_monitor_binding_continuation(void *st, errval_t err, struct monitor_binding *monitor_binding)
{
struct mem_binding *_binding = st;
struct mem_ump_binding *b = st;
if (err_is_fail(err)) {
err = err_push(err, LIB_ERR_MONITOR_CLIENT_BIND);
goto out;
}
|
functions
|
errval_t mem_ump_bind(struct mem_ump_binding *b, iref_t iref, mem_bind_continuation_fn *_continuation, void *st, struct waitset *waitset, idc_bind_flags_t flags, size_t inchanlen, size_t outchanlen)
{
errval_t err;
(b->b).st = NULL;
(b->b).waitset = waitset;
event_mutex_init(&((b->b).mutex), waitset);
(b->b).can_send = mem_ump_can_send;
(b->b).register_send = mem_ump_register_send;
(b->b).error_handler = mem_ump_default_error_handler;
(b->b).tx_vtbl = mem_ump_tx_vtbl;
memset(&((b->b).rx_vtbl), 0, sizeof((b->b).rx_vtbl));
flounder_support_waitset_chanstate_init(&((b->b).register_chanstate));
flounder_support_waitset_chanstate_init(&((b->b).tx_cont_chanstate));
(b->b).tx_msgnum = 0;
(b->b).rx_msgnum = 0;
(b->b).tx_msg_fragment = 0;
(b->b).rx_msg_fragment = 0;
(b->b).tx_str_pos = 0;
(b->b).rx_str_pos = 0;
(b->b).tx_str_len = 0;
(b->b).rx_str_len = 0;
(b->b).bind_cont = NULL;
flounder_stub_ump_state_init(&(b->ump_state), b);
(b->b).change_waitset = mem_ump_change_waitset;
(b->b).control = mem_ump_control;
(b->b).st = st;
(b->b).bind_cont = _continuation;
b->iref = iref;
b->inchanlen = inchanlen;
b->outchanlen = outchanlen;
// do we need a new monitor binding?
if (flags & IDC_BIND_FLAG_RPC_CAP_TRANSFER) {
err = monitor_client_new_binding(mem_ump_new_monitor_binding_continuation, b, waitset, DEFAULT_LMP_BUF_WORDS);
}
|
functions
|
errval_t mem_ump_connect_handler(void *st, struct monitor_binding *mb, uintptr_t mon_id, struct capref frame, size_t inchanlen, size_t outchanlen, struct capref notify_cap)
{
struct mem_export *e = st;
errval_t err;
// allocate storage for binding
struct mem_ump_binding *b = malloc(sizeof(struct mem_ump_binding ));
if (b == NULL) {
return(LIB_ERR_MALLOC_FAIL);
}
|
functions
|
void mem_multihop_cap_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
// Switch on current outgoing message
switch (_binding->tx_msgnum) {
case mem_allocate_response__msgnum:
// Switch on current outgoing cap
switch ((mb->capst).tx_capnum) {
case 0:
err = multihop_send_capability(&(mb->chan), MKCONT(mem_multihop_cap_send_handler, _binding), &(mb->capst), ((_binding->tx_union).allocate_response).mem_cap);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_multihop_cap_send_handler, _binding));
assert(err_is_ok(err));
}
|
functions
|
void mem_allocate_call__multihop_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
uint64_t *msg;
uint64_t msg_size;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// Calculate size of message & allocate it
msg_size = 24;
assert(msg_size != 0);
msg = malloc(msg_size);
// copy words from fixed size fragments
// [[MsgCode,ArgFieldFragment uint8 [NamedField "bits"] 0],[ArgFieldFragment uint64 [NamedField "minbase"] 0],[ArgFieldFragment uint64 [NamedField "maxlimit"] 0]]
msg[0] = (mem_allocate_call__msgnum | (((uint64_t )(((_binding->tx_union).allocate_call).bits)) << 16));
msg[1] = (((_binding->tx_union).allocate_call).minbase);
msg[2] = (((_binding->tx_union).allocate_call).maxlimit);
// copy strings
// copy buffers
// try to send!
(_binding->tx_msg_fragment)++;
mb->message = msg;
err = multihop_send_message(&(mb->chan), MKCONT(mem_allocate_call__multihop_send_handler, _binding), msg, msg_size);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
(_binding->tx_msg_fragment)--;
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_allocate_call__multihop_send_handler, _binding));
assert(err_is_ok(err));
}
|
functions
|
void mem_allocate_response__multihop_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
uint64_t *msg;
uint64_t msg_size;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// Calculate size of message & allocate it
msg_size = 16;
assert(msg_size != 0);
msg = malloc(msg_size);
// copy words from fixed size fragments
// [[MsgCode],[ArgFieldFragment uint64 [NamedField "ret"] 0]]
msg[0] = mem_allocate_response__msgnum;
msg[1] = (((_binding->tx_union).allocate_response).ret);
// copy strings
// copy buffers
// try to send!
(_binding->tx_msg_fragment)++;
mb->message = msg;
err = multihop_send_message(&(mb->chan), MKCONT(mem_allocate_response__multihop_send_handler, _binding), msg, msg_size);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
(_binding->tx_msg_fragment)--;
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_allocate_response__multihop_send_handler, _binding));
assert(err_is_ok(err));
}
|
functions
|
void mem_steal_call__multihop_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
uint64_t *msg;
uint64_t msg_size;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// Calculate size of message & allocate it
msg_size = 24;
assert(msg_size != 0);
msg = malloc(msg_size);
// copy words from fixed size fragments
// [[MsgCode,ArgFieldFragment uint8 [NamedField "bits"] 0],[ArgFieldFragment uint64 [NamedField "minbase"] 0],[ArgFieldFragment uint64 [NamedField "maxlimit"] 0]]
msg[0] = (mem_steal_call__msgnum | (((uint64_t )(((_binding->tx_union).steal_call).bits)) << 16));
msg[1] = (((_binding->tx_union).steal_call).minbase);
msg[2] = (((_binding->tx_union).steal_call).maxlimit);
// copy strings
// copy buffers
// try to send!
(_binding->tx_msg_fragment)++;
mb->message = msg;
err = multihop_send_message(&(mb->chan), MKCONT(mem_steal_call__multihop_send_handler, _binding), msg, msg_size);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
(_binding->tx_msg_fragment)--;
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_steal_call__multihop_send_handler, _binding));
assert(err_is_ok(err));
}
|
functions
|
void mem_steal_response__multihop_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
uint64_t *msg;
uint64_t msg_size;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// Calculate size of message & allocate it
msg_size = 16;
assert(msg_size != 0);
msg = malloc(msg_size);
// copy words from fixed size fragments
// [[MsgCode],[ArgFieldFragment uint64 [NamedField "ret"] 0]]
msg[0] = mem_steal_response__msgnum;
msg[1] = (((_binding->tx_union).steal_response).ret);
// copy strings
// copy buffers
// try to send!
(_binding->tx_msg_fragment)++;
mb->message = msg;
err = multihop_send_message(&(mb->chan), MKCONT(mem_steal_response__multihop_send_handler, _binding), msg, msg_size);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
(_binding->tx_msg_fragment)--;
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_steal_response__multihop_send_handler, _binding));
assert(err_is_ok(err));
}
|
functions
|
void mem_available_call__multihop_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
uint64_t *msg;
uint64_t msg_size;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// Calculate size of message & allocate it
msg_size = 8;
assert(msg_size != 0);
msg = malloc(msg_size);
// copy words from fixed size fragments
// [[MsgCode]]
msg[0] = mem_available_call__msgnum;
// copy strings
// copy buffers
// try to send!
(_binding->tx_msg_fragment)++;
mb->message = msg;
err = multihop_send_message(&(mb->chan), MKCONT(mem_available_call__multihop_send_handler, _binding), msg, msg_size);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
(_binding->tx_msg_fragment)--;
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_available_call__multihop_send_handler, _binding));
assert(err_is_ok(err));
}
|
functions
|
void mem_available_response__multihop_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
uint64_t *msg;
uint64_t msg_size;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// Calculate size of message & allocate it
msg_size = 24;
assert(msg_size != 0);
msg = malloc(msg_size);
// copy words from fixed size fragments
// [[MsgCode],[ArgFieldFragment uint64 [NamedField "mem_avail"] 0],[ArgFieldFragment uint64 [NamedField "mem_total"] 0]]
msg[0] = mem_available_response__msgnum;
msg[1] = (((_binding->tx_union).available_response).mem_avail);
msg[2] = (((_binding->tx_union).available_response).mem_total);
// copy strings
// copy buffers
// try to send!
(_binding->tx_msg_fragment)++;
mb->message = msg;
err = multihop_send_message(&(mb->chan), MKCONT(mem_available_response__multihop_send_handler, _binding), msg, msg_size);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
(_binding->tx_msg_fragment)--;
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_available_response__multihop_send_handler, _binding));
assert(err_is_ok(err));
}
|
functions
|
void mem_free_monitor_call__multihop_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
uint64_t *msg;
uint64_t msg_size;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// Calculate size of message & allocate it
msg_size = 16;
assert(msg_size != 0);
msg = malloc(msg_size);
// copy words from fixed size fragments
// [[MsgCode,ArgFieldFragment uint8 [NamedField "bits"] 0],[ArgFieldFragment uint64 [NamedField "base"] 0]]
msg[0] = (mem_free_monitor_call__msgnum | (((uint64_t )(((_binding->tx_union).free_monitor_call).bits)) << 16));
msg[1] = (((_binding->tx_union).free_monitor_call).base);
// copy strings
// copy buffers
// try to send!
(_binding->tx_msg_fragment)++;
mb->message = msg;
err = multihop_send_message(&(mb->chan), MKCONT(mem_free_monitor_call__multihop_send_handler, _binding), msg, msg_size);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
(_binding->tx_msg_fragment)--;
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_free_monitor_call__multihop_send_handler, _binding));
assert(err_is_ok(err));
}
|
functions
|
void mem_free_monitor_response__multihop_send_handler(void *arg)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
errval_t err = SYS_ERR_OK;
uint64_t *msg;
uint64_t msg_size;
// Switch on current outgoing message fragment
switch (_binding->tx_msg_fragment) {
case 0:
// Calculate size of message & allocate it
msg_size = 16;
assert(msg_size != 0);
msg = malloc(msg_size);
// copy words from fixed size fragments
// [[MsgCode],[ArgFieldFragment uint64 [NamedField "err"] 0]]
msg[0] = mem_free_monitor_response__msgnum;
msg[1] = (((_binding->tx_union).free_monitor_response).err);
// copy strings
// copy buffers
// try to send!
(_binding->tx_msg_fragment)++;
mb->message = msg;
err = multihop_send_message(&(mb->chan), MKCONT(mem_free_monitor_response__multihop_send_handler, _binding), msg, msg_size);
if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
(_binding->tx_msg_fragment)--;
err = multihop_chan_register_send(&(mb->chan), _binding->waitset, MKCONT(mem_free_monitor_response__multihop_send_handler, _binding));
assert(err_is_ok(err));
}
|
functions
|
void mem_multihop_caps_rx_handler(void *arg, errval_t success, struct capref cap, uint32_t capid)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
assert(capid == ((mb->capst).rx_capnum));
// Check if there's an associated error
// FIXME: how should we report this to the user? at present we just deliver a NULL capref
if (err_is_fail(success)) {
DEBUG_ERR(success, "could not send cap over multihop channel");
}
|
functions
|
message
switch (_binding->rx_msgnum) {
case mem_allocate_response__msgnum:
// Switch on current incoming cap
switch (((mb->capst).rx_capnum)++) {
case 0:
((_binding->rx_union).allocate_response).mem_cap = cap;
if (mb->trigger_chan) {
mb->trigger_chan = false;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
|
functions
|
errval_t mem_allocate_call__multihop_send(struct mem_binding *_binding, struct event_closure _continuation, uint8_t bits, mem_genpaddr_t minbase, mem_genpaddr_t maxlimit)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_allocate_response__multihop_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t ret, struct capref mem_cap)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_steal_call__multihop_send(struct mem_binding *_binding, struct event_closure _continuation, uint8_t bits, mem_genpaddr_t minbase, mem_genpaddr_t maxlimit)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_steal_response__multihop_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t ret, struct capref mem_cap)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_available_call__multihop_send(struct mem_binding *_binding, struct event_closure _continuation)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_available_response__multihop_send(struct mem_binding *_binding, struct event_closure _continuation, mem_genpaddr_t mem_avail, mem_genpaddr_t mem_total)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_free_monitor_call__multihop_send(struct mem_binding *_binding, struct event_closure _continuation, struct capref mem_cap, mem_genpaddr_t base, uint8_t bits)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
errval_t mem_free_monitor_response__multihop_send(struct mem_binding *_binding, struct event_closure _continuation, mem_errval_t err)
{
// check that we can accept an outgoing message
if ((_binding->tx_msgnum) != 0) {
return(FLOUNDER_ERR_TX_BUSY);
}
|
functions
|
void mem_multihop_rx_handler(void *arg, uint8_t *message, size_t message_len)
{
// Get the binding state from our argument pointer
struct mem_binding *_binding = arg;
struct mem_multihop_binding *mb = arg;
uint8_t *msg;
// if this a dummy message?
if (message_len == 0) {
if (mb->trigger_chan) {
mb->trigger_chan = false;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
|
functions
|
number
switch (_binding->rx_msgnum) {
case mem_allocate_call__msgnum:
// store fixed size fragments
((_binding->rx_union).allocate_call).bits = (((((uint64_t *)(message))[0]) >> 16) & 0xff);
((_binding->rx_union).allocate_call).minbase = (((uint64_t *)(message))[1]);
((_binding->rx_union).allocate_call).maxlimit = (((uint64_t *)(message))[2]);
msg = (message + 24);
// receive strings
// receive buffers
free(message);
if (mb->trigger_chan) {
mb->trigger_chan = false;
_binding->tx_msgnum = 0;
flounder_support_trigger_chan(&(_binding->tx_cont_chanstate));
flounder_support_trigger_chan(&(_binding->register_chanstate));
}
|
functions
|
bool mem_multihop_can_send(struct mem_binding *b)
{
struct mem_multihop_binding *mb = (struct mem_multihop_binding *)(b);
return(((b->tx_msgnum) == 0) && (!multihop_chan_is_window_full(&(mb->chan))));
}
|
functions
|
errval_t mem_multihop_register_send(struct mem_binding *b, struct waitset *ws, struct event_closure _continuation)
{
return(flounder_support_register(ws, &(b->register_chanstate), _continuation, mem_multihop_can_send(b)));
}
|
functions
|
void mem_multihop_default_error_handler(struct mem_binding *b, errval_t err)
{
DEBUG_ERR(err, "asynchronous error in Flounder-generated mem multihop binding (default handler)");
abort();
}
|
functions
|
errval_t mem_multihop_change_waitset(struct mem_binding *_binding, struct waitset *ws)
{
struct mem_multihop_binding *mb = (void *)(_binding);
// change waitset on binding
_binding->waitset = ws;
// change waitset on multi-hop channel
return(multihop_chan_change_waitset(&(mb->chan), ws));
}
|
functions
|
errval_t mem_multihop_control(struct mem_binding *_binding, idc_control_t control)
{
// No control flags supported
return(SYS_ERR_OK);
}
|
functions
|
void mem_multihop_init(struct mem_multihop_binding *mb, struct waitset *waitset)
{
(mb->b).st = NULL;
(mb->b).waitset = waitset;
event_mutex_init(&((mb->b).mutex), waitset);
(mb->b).can_send = mem_multihop_can_send;
(mb->b).register_send = mem_multihop_register_send;
(mb->b).error_handler = mem_multihop_default_error_handler;
(mb->b).tx_vtbl = mem_multihop_tx_vtbl;
memset(&((mb->b).rx_vtbl), 0, sizeof((mb->b).rx_vtbl));
flounder_support_waitset_chanstate_init(&((mb->b).register_chanstate));
flounder_support_waitset_chanstate_init(&((mb->b).tx_cont_chanstate));
(mb->b).tx_msgnum = 0;
(mb->b).rx_msgnum = 0;
(mb->b).tx_msg_fragment = 0;
(mb->b).rx_msg_fragment = 0;
(mb->b).tx_str_pos = 0;
(mb->b).rx_str_pos = 0;
(mb->b).tx_str_len = 0;
(mb->b).rx_str_len = 0;
(mb->b).bind_cont = NULL;
(mb->b).change_waitset = mem_multihop_change_waitset;
(mb->b).control = mem_multihop_control;
mb->trigger_chan = false;
}
|
functions
|
void mem_multihop_destroy(struct mem_multihop_binding *mb)
{
flounder_support_waitset_chanstate_destroy(&((mb->b).register_chanstate));
flounder_support_waitset_chanstate_destroy(&((mb->b).tx_cont_chanstate));
assert(! "NYI!");
}
|
functions
|
void mem_multihop_bind_continuation(void *st, errval_t err, struct multihop_chan *chan)
{
struct mem_multihop_binding *mb = st;
if (err_is_ok(err)) {
// set receive handlers
multihop_chan_set_receive_handler(&(mb->chan), (struct multihop_receive_handler){ .handler = mem_multihop_rx_handler, .arg = st }
|
functions
|
errval_t mem_multihop_bind(struct mem_multihop_binding *mb, iref_t iref, mem_bind_continuation_fn *_continuation, void *st, struct waitset *waitset, idc_bind_flags_t flags)
{
errval_t err;
mem_multihop_init(mb, waitset);
(mb->b).st = st;
(mb->b).bind_cont = _continuation;
err = multihop_chan_bind(&(mb->chan), (struct multihop_bind_continuation){ .handler = mem_multihop_bind_continuation, .st = mb }
|
functions
|
errval_t mem_multihop_connect_handler(void *st, multihop_vci_t vci)
{
struct mem_export *e = st;
errval_t err;
// allocate storage for binding
struct mem_multihop_binding *mb = malloc(sizeof(struct mem_multihop_binding ));
if (mb == NULL) {
return(LIB_ERR_MALLOC_FAIL);
}
|
includes
|
#include <stdio.h>
|
includes
|
#include <stdint.h>
|
includes
|
#include <ibcrypt/chacha.h>
|
includes
|
#include <ibcrypt/rand.h>
|
includes
|
#include <ibcrypt/sha256.h>
|
includes
|
#include <ibcrypt/zfree.h>
|
includes
|
#include <libibur/util.h>
|
includes
|
#include <libibur/endian.h>
|
functions
|
int write_datafile(char *path, void *arg, void *data, struct format_desc *f) {
int ret = -1;
uint8_t *payload = NULL;
uint64_t payload_len = 0;
uint64_t payload_num = 0;
uint8_t *prefix = NULL;
uint64_t pref_len = 0;
uint8_t symm_key[0x20];
uint8_t hmac_key[0x20];
uint8_t enc_key[0x20];
FILE *ff = fopen(path, "wb");
if(ff == NULL) {
ERR("failed to open file for writing: %s", path);
goto err;
}
|
functions
|
int read_datafile(char *path, void *arg, void **data, struct format_desc *f) {
int ret = -1;
uint8_t *payload = NULL;
uint64_t payload_len = 0;
uint64_t payload_num = 0;
uint8_t *prefix = NULL;
uint64_t pref_len = 0;
uint8_t symm_key[0x20];
uint8_t hmac_key[0x20];
uint8_t enc_key[0x20];
uint8_t mac1[0x20];
uint8_t mac2c[0x20];
uint8_t mac2f[0x20];
FILE *ff = fopen(path, "rb");
if(ff == NULL) {
ERR("failed to open file for reading: %s", path);
goto err;
}
|
includes
|
#include <stdlib.h>
|
defines
|
#define APP_MAC_ADDR "00-AB-CD-EF-04-07"
|
defines
|
#define APP_USE_DHCP ENABLED
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.