idx
int64 | func
string | target
int64 |
|---|---|---|
343,276
|
void dodele(char *name)
{
#ifndef ANON_CAN_DELETE
if (guest != 0) {
addreply_noformat(550, MSG_ANON_CANT_DELETE);
return;
}
#endif
if (name == NULL || *name == 0) {
addreply_noformat(501, MSG_NO_FILE_NAME);
return;
}
if (checknamesanity(name, dot_write_ok) != 0) {
addreply(553, MSG_SANITY_FILE_FAILURE, name);
return;
}
if (keepallfiles != 0) {
#ifdef EPERM
errno = EPERM;
#else
errno = 1;
#endif
goto denied;
}
/*
* What we do here may look a bit strange. It's to defend against
* change-after-stat attacks. If we simply do lstat(name), then unlink(name)
* there's a race. An attacker can rename the file between these two
* system calls, so that a big file is lstat()ed, but a dummy tiny file is
* unlinked. That way, an attacker could easily get extra quota.
* To defend against this attack, we rename the file to an unique dot-file
* (an atomic operation) . People subject to quotas can't access dot-files.
* So we can securely stat it and unlink it. Having the pid in the file
* name should be enough to avoid that two concurrent sessions create the
* same temporary file. But to be paranoid to the extreme, we add some
* random number to that.
*/
#ifdef QUOTAS
{
char *p;
struct stat st;
struct stat st2;
size_t dirlen = (size_t) 0U;
char qtfile[PATH_MAX + 1];
if ((p = strrchr(name, '/')) != NULL) {
if ((dirlen = p - name + (size_t) 1U) >= sizeof qtfile) {
goto denied; /* should never happen */
}
memcpy(qtfile, name, dirlen); /* safe, dirlen < sizeof qtfile */
}
if (SNCHECK(snprintf(qtfile + dirlen, sizeof qtfile - dirlen,
PUREFTPD_TMPFILE_PREFIX "rename.%lu.%x",
(unsigned long) getpid(), zrand()),
sizeof qtfile)) {
goto denied;
}
if (lstat(name, &st) != 0) {
goto denied;
}
if (!S_ISREG(st.st_mode)
# ifndef NEVER_DELETE_SYMLINKS
&& !S_ISLNK(st.st_mode)
# endif
) {
# ifdef EINVAL
errno = EINVAL;
# endif
goto denied;
}
if (rename(name, qtfile) != 0) {
goto denied;
}
if (lstat(qtfile, &st2) != 0 ||
st.st_dev != st2.st_dev ||
st.st_ino != st2.st_ino ||
st.st_size != st2.st_size) {
# ifdef EINVAL
errno = EINVAL;
# endif
goto denied;
}
if (unlink(qtfile) < 0) {
/*
* Race if rename() goes to an existing file.
* seems very difficult to exploit, though.
* Does a perfect userland answer exist, after all?
*/
(void) rename(qtfile, name);
goto denied;
}
{
Quota quota;
if (quota_update("a, -1LL,
-((long long) st.st_size), NULL) == 0) {
displayquota("a);
}
}
}
#else
if (unlink(name) < 0) {
goto denied;
}
#endif
addreply(250, MSG_DELE_SUCCESS, "", "", "", name);
logfile(LOG_NOTICE, MSG_DELE_SUCCESS, root_directory,
*name == '/' ? "" : wd,
(*name != '/' && (!*wd || wd[strlen(wd) - 1] != '/'))
? "/" : "", name);
return;
denied:
addreply(550, MSG_DELE_FAILED ": %s", name, strerror(errno));
}
| 0
|
274,726
|
callbacks_handle_log_messages(const gchar *log_domain, GLogLevelFlags log_level,
const gchar *message, gpointer user_data)
{
GtkTextBuffer *textbuffer = NULL;
GtkTextIter iter;
GtkTextTag *tag;
GtkTextMark *StartMark = NULL, *StopMark = NULL;
GtkTextIter StartIter, StopIter;
GtkWidget *dialog, *label;
if (!screen.win.messageTextView)
return;
textbuffer = gtk_text_view_get_buffer((GtkTextView*)screen.win.messageTextView);
/* create a mark for the end of the text. */
gtk_text_buffer_get_end_iter(textbuffer, &iter);
/* get the current end position of the text (it will be the
start of the new text. */
StartMark = gtk_text_buffer_create_mark(textbuffer,
"NewTextStart", &iter, TRUE);
tag = gtk_text_tag_table_lookup(gtk_text_buffer_get_tag_table(textbuffer),
"blue_foreground");
/* the tag does not exist: create it and let them exist in the tag table.*/
if (tag == NULL) {
tag = gtk_text_buffer_create_tag(textbuffer, "black_foreground",
"foreground", "black", NULL);
tag = gtk_text_buffer_create_tag(textbuffer, "blue_foreground",
"foreground", "blue", NULL);
tag = gtk_text_buffer_create_tag(textbuffer, "red_foreground",
"foreground", "red", NULL);
tag = gtk_text_buffer_create_tag(textbuffer, "darkred_foreground",
"foreground", "darkred", NULL);
tag = gtk_text_buffer_create_tag(textbuffer, "darkblue_foreground",
"foreground", "darkblue", NULL);
tag = gtk_text_buffer_create_tag (textbuffer, "darkgreen_foreground",
"foreground", "darkgreen", NULL);
tag = gtk_text_buffer_create_tag (textbuffer,
"saddlebrown_foreground",
"foreground", "saddlebrown", NULL);
}
/*
* See rgb.txt for the color names definition
* (on my PC it is on /usr/X11R6/lib/X11/rgb.txt)
*/
switch (log_level & G_LOG_LEVEL_MASK) {
case G_LOG_LEVEL_ERROR:
/* a message of this kind aborts the application calling abort() */
tag = gtk_text_tag_table_lookup(gtk_text_buffer_get_tag_table(textbuffer),
"red_foreground");
gtk_notebook_set_current_page(GTK_NOTEBOOK(screen.win.sidepane_notebook), 1);
gtk_widget_show(screen.win.sidepane_notebook);
break;
case G_LOG_LEVEL_CRITICAL:
tag = gtk_text_tag_table_lookup(gtk_text_buffer_get_tag_table(textbuffer),
"red_foreground");
gtk_notebook_set_current_page(GTK_NOTEBOOK(screen.win.sidepane_notebook), 1);
gtk_widget_show(screen.win.sidepane_notebook);
break;
case G_LOG_LEVEL_WARNING:
tag = gtk_text_tag_table_lookup(gtk_text_buffer_get_tag_table(textbuffer),
"darkred_foreground");
gtk_notebook_set_current_page(GTK_NOTEBOOK(screen.win.sidepane_notebook), 1);
gtk_widget_show(screen.win.sidepane_notebook);
break;
case G_LOG_LEVEL_MESSAGE:
tag = gtk_text_tag_table_lookup(gtk_text_buffer_get_tag_table(textbuffer),
"darkblue_foreground");
gtk_notebook_set_current_page(GTK_NOTEBOOK(screen.win.sidepane_notebook), 1);
gtk_widget_show(screen.win.sidepane_notebook);
break;
case G_LOG_LEVEL_INFO:
tag = gtk_text_tag_table_lookup(gtk_text_buffer_get_tag_table(textbuffer),
"darkgreen_foreground");
break;
case G_LOG_LEVEL_DEBUG:
tag = gtk_text_tag_table_lookup(gtk_text_buffer_get_tag_table(textbuffer),
"saddlebrown_foreground");
break;
default:
tag = gtk_text_tag_table_lookup (gtk_text_buffer_get_tag_table(textbuffer),
"black_foreground");
break;
}
/*
* Fatal aborts application. We will try to get the message out anyhow.
*/
if (log_level & G_LOG_FLAG_FATAL) {
fprintf(stderr, _("Fatal error: %s\n"), message);
/* Try to show dialog box with error message */
dialog = gtk_dialog_new_with_buttons(_("Fatal Error"),
NULL, GTK_DIALOG_MODAL | GTK_DIALOG_DESTROY_WITH_PARENT,
GTK_STOCK_OK, GTK_RESPONSE_ACCEPT, NULL);
label = gtk_label_new(g_strdup_printf(_("Fatal error: %s"), message));
gtk_container_add(GTK_CONTAINER(GTK_DIALOG(dialog)->vbox),
label);
gtk_label_set_selectable(GTK_LABEL(label), TRUE);
gtk_container_add(GTK_CONTAINER(GTK_DIALOG(dialog)->vbox),
gtk_label_new(_("\nGerbv will be closed now!")));
gtk_container_set_border_width(GTK_CONTAINER(dialog), 5);
gtk_widget_show_all(dialog);
gtk_dialog_run(GTK_DIALOG(dialog));
}
gtk_text_buffer_insert(textbuffer, &iter, message, -1);
gtk_text_buffer_insert(textbuffer, &iter, "\n", -1);
/* Scroll view to inserted text */
g_signal_emit_by_name(textbuffer, "paste-done", NULL);
gtk_text_buffer_get_end_iter(textbuffer, &iter);
StopMark = gtk_text_buffer_create_mark(textbuffer,
"NewTextStop", &iter, TRUE);
gtk_text_buffer_get_iter_at_mark(textbuffer, &StartIter, StartMark);
gtk_text_buffer_get_iter_at_mark(textbuffer, &StopIter, StopMark);
gtk_text_buffer_apply_tag(textbuffer, tag, &StartIter, &StopIter);
}
| 0
|
206,510
|
int udf_expand_file_adinicb(struct inode *inode)
{
struct page *page;
char *kaddr;
struct udf_inode_info *iinfo = UDF_I(inode);
int err;
struct writeback_control udf_wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = 1,
};
WARN_ON_ONCE(!inode_is_locked(inode));
if (!iinfo->i_lenAlloc) {
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
/* from now on we have normal address_space methods */
inode->i_data.a_ops = &udf_aops;
up_write(&iinfo->i_data_sem);
mark_inode_dirty(inode);
return 0;
}
/*
* Release i_data_sem so that we can lock a page - page lock ranks
* above i_data_sem. i_mutex still protects us against file changes.
*/
up_write(&iinfo->i_data_sem);
page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
if (!page)
return -ENOMEM;
if (!PageUptodate(page)) {
kaddr = kmap_atomic(page);
memset(kaddr + iinfo->i_lenAlloc, 0x00,
PAGE_SIZE - iinfo->i_lenAlloc);
memcpy(kaddr, iinfo->i_data + iinfo->i_lenEAttr,
iinfo->i_lenAlloc);
flush_dcache_page(page);
SetPageUptodate(page);
kunmap_atomic(kaddr);
}
down_write(&iinfo->i_data_sem);
memset(iinfo->i_data + iinfo->i_lenEAttr, 0x00,
iinfo->i_lenAlloc);
iinfo->i_lenAlloc = 0;
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
/* from now on we have normal address_space methods */
inode->i_data.a_ops = &udf_aops;
up_write(&iinfo->i_data_sem);
err = inode->i_data.a_ops->writepage(page, &udf_wbc);
if (err) {
/* Restore everything back so that we don't lose data... */
lock_page(page);
down_write(&iinfo->i_data_sem);
kaddr = kmap_atomic(page);
memcpy(iinfo->i_data + iinfo->i_lenEAttr, kaddr, inode->i_size);
kunmap_atomic(kaddr);
unlock_page(page);
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
inode->i_data.a_ops = &udf_adinicb_aops;
up_write(&iinfo->i_data_sem);
}
put_page(page);
mark_inode_dirty(inode);
return err;
}
| 1
|
309,878
|
drv_read(TERMINAL_CONTROL_BLOCK * TCB, int *buf)
{
SCREEN *sp;
unsigned char c2 = 0;
int n;
AssertTCB();
assert(buf);
SetSP();
# if USE_PTHREADS_EINTR
if ((pthread_self) && (pthread_kill) && (pthread_equal))
_nc_globals.read_thread = pthread_self();
# endif
n = (int) read(sp->_ifd, &c2, (size_t) 1);
#if USE_PTHREADS_EINTR
_nc_globals.read_thread = 0;
#endif
*buf = (int) c2;
return n;
}
| 0
|
264,375
|
const std::unordered_map<string, TensorSliceSet*>& Tensors() const {
return tensors_;
}
| 0
|
230,122
|
json_t * user_auth_scheme_module_load(struct config_module * config) {
UNUSED(config);
return json_pack("{si ss ss ss }",
"result", G_OK,
"name", "webauthn",
"display_name", "WebAuthn",
"description", "WebAuthn scheme module");
}
| 0
|
261,760
|
void RtmpProtocol::sendPeerBandwidth(uint32_t size) {
size = htonl(size);
std::string set_peerBandwidth((char *) &size, 4);
set_peerBandwidth.push_back((char) 0x02);
sendRequest(MSG_SET_PEER_BW, set_peerBandwidth);
}
| 0
|
437,711
|
static inline unsigned int clock_divider_to_freq(unsigned int divider,
unsigned int rollovers)
{
return DIV_ROUND_CLOSEST(CX23888_IR_REFCLK_FREQ,
(divider + 1) * rollovers);
}
| 0
|
318,090
|
static int rsi_write_multiple(struct rsi_hw *adapter,
u8 endpoint,
u8 *data,
u32 count)
{
struct rsi_91x_usbdev *dev;
if (!adapter)
return -ENODEV;
if (endpoint == 0)
return -EINVAL;
dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
if (dev->write_fail)
return -ENETDOWN;
return rsi_usb_card_write(adapter, data, count, endpoint);
}
| 0
|
229,341
|
Status GetKernelOutputs(
std::vector<EagerKernelRet>* outputs, int num_outputs,
TensorHandle** retvals, EagerContext* ctx, KernelAndDevice* kernel,
const absl::optional<EagerFunctionParams>& eager_func_params) {
for (int i = 0, end = num_outputs; i < end; ++i) {
if (retvals[i] == nullptr) {
EagerKernelRet& ret = (*outputs)[i];
Device* output_device = ctx->CanonicalDevice(kernel->OutputDevice(i));
if (ret.index() == 0) {
retvals[i] = TensorHandle::CreateLocalHandle(
std::move(absl::get<Tensor>(ret)),
/* d= */ output_device,
/* op_device= */ kernel->device(),
/* resource_device= */ kernel->OutputResourceDevice(i), ctx);
} else {
const DataTypeVector& output_dtypes = kernel->output_dtypes();
TF_RETURN_IF_ERROR(
CreateUnshapedOutput(*kernel, i, output_device, output_dtypes[i],
eager_func_params, ctx, &retvals[i]));
#if !defined(IS_MOBILE_PLATFORM)
TF_RETURN_IF_ERROR(
retvals[i]->SetRemoteShape(absl::get<TensorShape>(ret),
output_device, ctx->GetContextViewId()));
#endif // IS_MOBILE_PLATFORM
}
} else {
if (!kernel->IsFunction() &&
TF_PREDICT_FALSE(kernel->device() != retvals[i]->op_device())) {
return errors::Internal(
"Kernel output tensor handle has a different op device than the "
"kernel. This should never happen.");
}
if (TF_PREDICT_FALSE(ctx->CanonicalDevice(kernel->OutputDevice(i)) !=
retvals[i]->device())) {
return errors::Internal(
"Kernel output tensor handle locates on a different device than "
"the specified kernel output device. This should never happen.");
}
EagerKernelRet& ret = (*outputs)[i];
if (ret.index() == 0) {
TF_RETURN_IF_ERROR(retvals[i]->SetTensor(
std::move(absl::get<Tensor>(ret)),
ctx->CanonicalDevice(kernel->OutputDevice(i))));
} else {
#if defined(IS_MOBILE_PLATFORM)
return errors::Unimplemented(
"Remote outputs are not available on mobile devices.");
#else // !IS_MOBILE_PLATFORM
TF_RETURN_IF_ERROR(retvals[i]->SetRemoteShape(
absl::get<TensorShape>(ret), retvals[i]->device(),
ctx->GetContextViewId()));
#endif // !IS_MOBILE_PLATFORM
}
}
}
return Status::OK();
}
| 0
|
459,159
|
static void tcf_act_put_cookie(struct flow_action_entry *entry)
{
flow_action_cookie_destroy(entry->cookie);
}
| 0
|
281,131
|
xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
{
struct xfrm_state *x;
if (!skb->sp || idx < 0 || idx >= skb->sp->len)
return 0;
x = skb->sp->xvec[idx];
if (!x->type->reject)
return 0;
return x->type->reject(x, skb, fl);
}
| 0
|
226,013
|
void tpay_box_del(GF_Box *s)
{
gf_free((GF_TPAYBox *)s);
}
| 0
|
225,494
|
Status MutableGraphView::DeleteNodes(
const absl::flat_hash_set<string>& nodes_to_delete) {
TF_RETURN_IF_ERROR(CheckNodesCanBeDeleted(nodes_to_delete));
// Find nodes in internal state and delete.
for (const string& node_name_to_delete : nodes_to_delete) {
NodeDef* node = GetNode(node_name_to_delete);
if (node != nullptr) {
RemoveFaninsInternal(node, /*keep_controlling_fanins=*/false);
RemoveFanoutsInternal(node);
}
}
for (const string& node_name_to_delete : nodes_to_delete) {
nodes().erase(node_name_to_delete);
}
// Find nodes in graph and delete by partitioning into nodes to retain and
// nodes to delete based on input set of nodes to delete by name.
// TODO(lyandy): Use a node name->idx hashmap if this is a performance
// bottleneck.
int pos = 0;
const int last_idx = graph()->node_size() - 1;
int last_pos = last_idx;
while (pos <= last_pos) {
if (nodes_to_delete.contains(graph()->node(pos).name())) {
graph()->mutable_node()->SwapElements(pos, last_pos);
--last_pos;
} else {
++pos;
}
}
if (last_pos < last_idx) {
graph()->mutable_node()->DeleteSubrange(last_pos + 1, last_idx - last_pos);
}
return Status::OK();
}
| 0
|
228,439
|
size_t operator()(const ArrayOrObject data) const {
return data.toOpaque();
}
| 0
|
229,333
|
Status GetOrCreateKernelAndDevice(
EagerOperation* op, TensorHandle** retvals, int* num_retvals,
core::RefCountPtr<KernelAndDevice>* out_kernel) {
EagerContext& ctx = op->EagerContext();
Device* device = absl::get<Device*>(op->Device());
// Set the EagerOperation's device prior to extracting the input_dev_ptrs to
// avoid any redundant H2D/D2H copies.
if (device == nullptr && !op->is_function()) {
Fprint128 device_cache_key = GetDeviceCacheKey(op, ctx);
device = ctx.GetCachedDevice(device_cache_key);
if (device == nullptr) {
TF_RETURN_IF_ERROR(SetOpDevice(ctx, op, &device));
ctx.AddDeviceToCache(device_cache_key, device);
} else {
op->SetDevice(device);
}
}
// Save the original value of reuse_rendezvous_for_functions from the context.
bool reuse_rendezvous_for_functions_original_value =
ctx.GetReuseRendezvousForFunctions();
// When running in eager_op_as_function mode Send/Recv ops need to be
// placed on the same rendezvous to match the behaviour of eager mode.
bool reuse_rendezvous_for_functions =
(ctx.RunEagerOpAsFunction() && !op->is_function()) ||
reuse_rendezvous_for_functions_original_value;
std::vector<Device*> input_dev_ptrs;
absl::flat_hash_map<string, const std::vector<string>*> composite_devices;
std::unordered_map<int, DtypeAndPartialTensorShape>
input_resource_variable_dtypes_and_shapes;
if (op->is_function() || ctx.RunEagerOpAsFunction()) {
profiler::TraceMe activity("EagerCopyToDevice",
profiler::TraceMeLevel::kInfo);
input_dev_ptrs.reserve(op->Inputs().size());
const absl::InlinedVector<TensorHandle*, 4>* inputs;
TF_RETURN_IF_ERROR(op->TensorHandleInputs(&inputs));
for (int i = 0, end = inputs->size(); i < end; ++i) {
TensorHandle* input = (*inputs)[i];
Device* input_device;
TF_RETURN_IF_ERROR(GetDeviceForInput(*op, ctx, input, &input_device));
VLOG(1) << op->Name() << ":input:" << i << " " << input_device->name();
input_dev_ptrs.push_back(input_device);
CompositeDevice* composite_device = nullptr;
if (ctx.FindCompositeDeviceFromName(input_device->name(),
&composite_device)
.ok()) {
composite_devices[input_device->name()] =
composite_device->underlying_devices();
}
if (input->dtype == DT_RESOURCE) {
// We only care about data type and shape for resource variable inputs.
// But we have no way to tell if input is resource variable (other than
// looking it up in ResourceMgr, which is slow). So we just get
// resource_dtypes_and_shapes for all DT_RESOURCE inputs. If
// resource_dtypes_and_shapes is not empty, take the first element.
std::vector<DtypeAndPartialTensorShape> resource_dtypes_and_shapes;
TF_RETURN_IF_ERROR(input->GetResourceHandleDtypesAndShapes(
&resource_dtypes_and_shapes));
if (!resource_dtypes_and_shapes.empty()) {
const DtypeAndPartialTensorShape& dtype_and_shape =
resource_dtypes_and_shapes.at(0);
input_resource_variable_dtypes_and_shapes[i] = dtype_and_shape;
}
}
}
}
TF_ASSIGN_OR_RETURN(
Fprint128 cache_key,
GetKernelCacheKey(*op, op->MutableAttrs()->CacheKey(op->DeviceName()),
input_dev_ptrs,
input_resource_variable_dtypes_and_shapes));
core::RefCountPtr<KernelAndDevice> kernel = ctx.GetCachedKernel(cache_key);
AbstractOperationPtr wrapped_op_releaser;
// We can eliminate some overhead by running simple functions using regular
// CallOp kernel. However, it is tricky to figure out which functions should
// be run using CallOp. Also, currently CallOp runs neither optimization
// passes (needed for TPU/XLA) nor grappler.
// Here are some cases where a function should be run in multi-device mode:
// - Function takes at least two resources on different devices.
// - Function takes a resource on deviceA and a body op explicitly placed
// on deviceB.
// - Function has a colocation constraint.
// - Function has an explicit device annotation (which might not be using
// full canonical device name) different from op_device. Note that false
// positives are ok.
// - Function has a node or a (node) attribute that can potentially make
// the function multi-device after a rewrite pass (e.g. various XLA/TPU
// special nodes and attributes)
if (kernel == nullptr) {
VLOG(2) << "Creating new kernel for " << op->Name() << " on device "
<< DeviceNameOrUnspecified(absl::get<Device*>(op->Device()));
bool run_function_with_flr = false;
bool function_outputs_on_op_device = false;
absl::optional<string> xla_compile_device_type;
if (op->is_function()) {
bool compile_with_xla;
TF_RETURN_IF_ERROR(MustCompileWithXLA(op, ctx, &compile_with_xla));
if (compile_with_xla) {
if (ctx.JitCompileRewrite()) {
xla_compile_device_type = op->GetDeviceParsedName().type;
run_function_with_flr = true;
} else {
// Note that it is not ideal, but currently correct, to set this
// attribute after computing the kernel cache key above.
// Note: If the attribute is already set to true, this is a noop.
op->MutableAttrs()->Set(kXlaMustCompileAttr, true);
}
} else {
run_function_with_flr = true;
}
GetFuncAttr(op, ctx, kOutputsOnOpDevice, &function_outputs_on_op_device)
.IgnoreError();
}
VLOG(2) << op->Name() << " function_outputs_on_op_device: "
<< function_outputs_on_op_device;
if (device == nullptr) {
TF_RETURN_IF_ERROR(SetOpDevice(ctx, op, &device));
} else {
VLOG(1) << "Device for [" << op->Name()
<< "] already set to: " << device->name();
}
// Note: We wrap the eager op AFTER the device has been inferred to ensure
// that placement of the NodeDef in the function is exactly the same as in
// eager mode. This is specially important for cases where the
// preferred device is not the actual device on which the op is run.
// E.g. the preferred device for a `RangeDataset` op could be set to `GPU`
// but `ctx->SelectDevice` would still place it on CPU. Placer on the other
// hand would throw an error.
//
// Note: The wrapped function is never jit compiled but rather run via the
// FLR. This is needed because certain ops e.g. `VarHandleOp` can not be
// jit compiled. Ideally we would run this via the jit compiled path and
// expect unsupported ops to be outside compiled but that is not supported
// on GPUs right now.
bool allow_small_function_optimizations = false;
bool int_args_and_retvals_on_device = false;
bool allow_control_flow_sync_execution = false;
// TODO(b/176491312): Remove this if shape inference on import flag is
// removed.
bool shape_inference_on_tfe_dialect_import = true;
if (ctx.RunEagerOpAsFunction() && !op->is_function()) {
EagerOperation* wrapped_op = nullptr;
TF_RETURN_IF_ERROR(ValidateOp(op));
TF_RETURN_IF_ERROR(WrapInCallOp(op, &wrapped_op));
DCHECK(wrapped_op);
DCHECK(wrapped_op->is_function());
wrapped_op_releaser.reset(wrapped_op);
run_function_with_flr = true;
allow_small_function_optimizations = true;
allow_control_flow_sync_execution = true;
shape_inference_on_tfe_dialect_import = false;
int_args_and_retvals_on_device = IntArgsAndRetvalsOnDevice(op);
op = wrapped_op;
}
const NodeDef& ndef = op->MutableAttrs()->BuildNodeDef();
FunctionLibraryRuntime* flr =
device == nullptr ? nullptr : ctx.func_lib(device);
if (device != nullptr && flr == nullptr) {
return errors::NotFound(
"Unable to find a FunctionLibraryRuntime corresponding to device ",
device->name());
}
auto runner = (flr != nullptr && flr->runner() != nullptr) ? flr->runner()
: ctx.runner();
GraphCollector* graph_collector = nullptr;
if (ctx.ShouldStoreGraphs()) {
graph_collector = ctx.GetGraphCollector();
}
// Treat the function as multi_device only when we are not compiling
// it wholly with XLA. When compiling wholly with XLA, flr->CreateKernel
// will create an XlaLaunchOp kernel to compile and run the function.
if (run_function_with_flr) {
// Multi-device functions don't use the rendezvous from eager context.
// If we use that rendezvous, multiple concurrent calls to the same
// function will likely result in collisions. However, this also means
// that we don't support legitimate sending/receiving across function
// boundary.
VLOG(2) << "Running " << ndef.op() << " using multi-device function. "
<< "Full node_def=" << ndef.DebugString();
std::function<int64_t()> get_op_id = nullptr;
#if !defined(IS_MOBILE_PLATFORM)
get_op_id = [&ctx]() { return ctx.RemoteMgr()->NextOpId(); };
#endif // IS_MOBILE_PLATFORM
ctx.reuse_rendezvous_for_functions_mu()->lock();
ctx.SetReuseRendezvousForFunctions(reuse_rendezvous_for_functions);
auto rendezvous_creator = ctx.RendezvousCreator();
ctx.SetReuseRendezvousForFunctions(
reuse_rendezvous_for_functions_original_value);
ctx.reuse_rendezvous_for_functions_mu()->unlock();
kernel.reset(new KernelAndDeviceFunc(
flr, ctx.pflr(), std::move(input_dev_ptrs),
std::move(composite_devices),
std::move(input_resource_variable_dtypes_and_shapes), runner,
ctx.GetCollectiveExecutorHandle(), ctx.HostCPU(), op->Name(),
function_outputs_on_op_device, allow_small_function_optimizations,
allow_control_flow_sync_execution,
shape_inference_on_tfe_dialect_import, int_args_and_retvals_on_device,
xla_compile_device_type, std::move(rendezvous_creator), get_op_id));
} else {
VLOG(2) << "Running " << ndef.op() << " using op kernel. "
<< ". Full node_def=" << ndef.DebugString();
kernel.reset(new KernelAndDeviceOp(
ctx.GetRendezvous(), ctx.LogMemory(), flr, runner,
ctx.GetCollectiveExecutorHandle(), ctx.HostCPU()));
}
TF_RETURN_IF_ERROR(
kernel->Init(ctx.LogDevicePlacement(), ndef, graph_collector));
if (op->is_function()) {
ctx.AddKernelToCache(cache_key, kernel.get());
} else {
// Exclude tf.data op kernels from being cached. The reason for this is
// that tf.data op kernels that accept a user-defined function will have a
// unique cache key every time they are executed (because the user-defined
// function is traced every time). Caching such kernels provides no
// benefit and in some cases results in linear memory growth of use
// programs that build input pipeline graphs in a loop.
const OpDef* op_def;
TF_RETURN_IF_ERROR(OpDefForOp(op->Name().data(), &op_def));
if (KernelCacheEnabled(*op_def)) {
ctx.AddKernelToCache(cache_key, kernel.get());
}
}
}
int num_outputs = kernel->num_outputs();
if (num_outputs > *num_retvals) {
return errors::InvalidArgument("Expecting ", num_outputs,
" outputs, but *num_retvals is ",
*num_retvals);
}
*num_retvals = num_outputs;
kernel->Ref(); // Ownership of reference is passed to out_kernel.
out_kernel->reset(kernel.get());
return Status::OK();
}
| 0
|
250,689
|
std::string HttpFileImpl::getMd5() const
{
return utils::getMd5(fileContent_.data(), fileContent_.size());
}
| 0
|
476,140
|
static int composite_ep0_queue(struct usb_composite_dev *cdev,
struct usb_request *req, gfp_t gfp_flags)
{
int ret;
ret = usb_ep_queue(cdev->gadget->ep0, req, gfp_flags);
if (ret == 0) {
if (cdev->req == req)
cdev->setup_pending = true;
else if (cdev->os_desc_req == req)
cdev->os_desc_pending = true;
else
WARN(1, "unknown request %p\n", req);
}
return ret;
}
| 0
|
244,365
|
GF_Err fdsa_box_size(GF_Box *s)
{
return GF_OK;
}
| 0
|
418,785
|
time_diff_ms(struct timeval *t1, struct timeval *t2)
{
// This handles wrapping of tv_usec correctly without any special case.
// Example of 2 pairs (tv_sec, tv_usec) with a duration of 5 ms:
// t1 = (1, 998000) t2 = (2, 3000) gives:
// (2 - 1) * 1000 + (3000 - 998000) / 1000 -> 5 ms.
return (t2->tv_sec - t1->tv_sec) * 1000
+ (t2->tv_usec - t1->tv_usec) / 1000;
}
| 0
|
359,560
|
DEFUN (bgp_redistribute_ipv4_rmap,
bgp_redistribute_ipv4_rmap_cmd,
"redistribute (connected|kernel|ospf|rip|static) route-map WORD",
"Redistribute information from another routing protocol\n"
"Connected\n"
"Kernel routes\n"
"Open Shurtest Path First (OSPF)\n"
"Routing Information Protocol (RIP)\n"
"Static routes\n"
"Route map reference\n"
"Pointer to route-map entries\n")
{
int type;
type = bgp_str2route_type (AFI_IP, argv[0]);
if (! type)
{
vty_out (vty, "%% Invalid route type%s", VTY_NEWLINE);
return CMD_WARNING;
}
bgp_redistribute_rmap_set (vty->index, AFI_IP, type, argv[1]);
return bgp_redistribute_set (vty->index, AFI_IP, type);
}
| 0
|
195,908
|
int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
u64 devid)
{
struct btrfs_device *device;
struct btrfs_fs_devices *cur_devices;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
u64 num_devices;
int ret = 0;
mutex_lock(&uuid_mutex);
num_devices = btrfs_num_devices(fs_info);
ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
if (ret)
goto out;
device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
if (IS_ERR(device)) {
if (PTR_ERR(device) == -ENOENT &&
strcmp(device_path, "missing") == 0)
ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
else
ret = PTR_ERR(device);
goto out;
}
if (btrfs_pinned_by_swapfile(fs_info, device)) {
btrfs_warn_in_rcu(fs_info,
"cannot remove device %s (devid %llu) due to active swapfile",
rcu_str_deref(device->name), device->devid);
ret = -ETXTBSY;
goto out;
}
if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = BTRFS_ERROR_DEV_TGT_REPLACE;
goto out;
}
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
fs_info->fs_devices->rw_devices == 1) {
ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
goto out;
}
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
mutex_lock(&fs_info->chunk_mutex);
list_del_init(&device->dev_alloc_list);
device->fs_devices->rw_devices--;
mutex_unlock(&fs_info->chunk_mutex);
}
mutex_unlock(&uuid_mutex);
ret = btrfs_shrink_device(device, 0);
if (!ret)
btrfs_reada_remove_dev(device);
mutex_lock(&uuid_mutex);
if (ret)
goto error_undo;
/*
* TODO: the superblock still includes this device in its num_devices
* counter although write_all_supers() is not locked out. This
* could give a filesystem state which requires a degraded mount.
*/
ret = btrfs_rm_dev_item(device);
if (ret)
goto error_undo;
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
btrfs_scrub_cancel_dev(device);
/*
* the device list mutex makes sure that we don't change
* the device list while someone else is writing out all
* the device supers. Whoever is writing all supers, should
* lock the device list mutex before getting the number of
* devices in the super block (super_copy). Conversely,
* whoever updates the number of devices in the super block
* (super_copy) should hold the device list mutex.
*/
/*
* In normal cases the cur_devices == fs_devices. But in case
* of deleting a seed device, the cur_devices should point to
* its own fs_devices listed under the fs_devices->seed.
*/
cur_devices = device->fs_devices;
mutex_lock(&fs_devices->device_list_mutex);
list_del_rcu(&device->dev_list);
cur_devices->num_devices--;
cur_devices->total_devices--;
/* Update total_devices of the parent fs_devices if it's seed */
if (cur_devices != fs_devices)
fs_devices->total_devices--;
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
cur_devices->missing_devices--;
btrfs_assign_next_active_device(device, NULL);
if (device->bdev) {
cur_devices->open_devices--;
/* remove sysfs entry */
btrfs_sysfs_remove_device(device);
}
num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
mutex_unlock(&fs_devices->device_list_mutex);
/*
* at this point, the device is zero sized and detached from
* the devices list. All that's left is to zero out the old
* supers and free the device.
*/
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
btrfs_scratch_superblocks(fs_info, device->bdev,
device->name->str);
btrfs_close_bdev(device);
synchronize_rcu();
btrfs_free_device(device);
if (cur_devices->open_devices == 0) {
list_del_init(&cur_devices->seed_list);
close_fs_devices(cur_devices);
free_fs_devices(cur_devices);
}
out:
mutex_unlock(&uuid_mutex);
return ret;
error_undo:
btrfs_reada_undo_remove_dev(device);
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
mutex_lock(&fs_info->chunk_mutex);
list_add(&device->dev_alloc_list,
&fs_devices->alloc_list);
device->fs_devices->rw_devices++;
mutex_unlock(&fs_info->chunk_mutex);
}
goto out;
}
| 1
|
220,192
|
const Edge* FindEdge(const Node* dst, int index) {
for (const Edge* e : dst->in_edges()) {
if (e->dst_input() == index) return e;
}
return nullptr;
}
| 0
|
221,678
|
int Socket::startSslClient(const std::string &certificate_path, String hostname)
{
if (isssl) {
stopSsl();
}
ERR_clear_error();
#if OPENSSL_VERSION_NUMBER < 0x10100000L
ctx = SSL_CTX_new(SSLv23_client_method());
#else
ctx = SSL_CTX_new(TLS_client_method());
#endif
if (ctx == NULL) {
#ifdef NETDEBUG
std::cout << thread_id << "Error ssl context is null (check that openssl has been inited)" << std::endl;
#endif
log_ssl_errors("Error ssl context is null for %s", hostname.c_str());
return -1;
}
//set the timeout for the ssl session
if (SSL_CTX_set_timeout(ctx, 130l) < 1) {
SSL_CTX_free(ctx);
ctx = NULL;
return -1;
}
//load certs
ERR_clear_error();
if (certificate_path.length()) {
if (!SSL_CTX_load_verify_locations(ctx, NULL, certificate_path.c_str())) {
#ifdef NETDEBUG
std::cout << thread_id << "couldnt load certificates" << std::endl;
#endif
log_ssl_errors("couldnt load certificates from %s", certificate_path.c_str());
//tidy up
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
} else if (!SSL_CTX_set_default_verify_paths(ctx)) //use default if no certPpath given
{
#ifdef NETDEBUG
std::cout << thread_id << "couldnt load certificates" << std::endl;
#endif
log_ssl_errors("couldnt load default certificates for %s", hostname.c_str());
//tidy up
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
// add validation params
ERR_clear_error();
X509_VERIFY_PARAM *x509_param = X509_VERIFY_PARAM_new();
if (!x509_param) {
log_ssl_errors("couldnt add validation params for %s", hostname.c_str());
//X509_VERIFY_PARAM_free(x509_param);
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
ERR_clear_error();
if (!X509_VERIFY_PARAM_set_flags(x509_param, X509_V_FLAG_TRUSTED_FIRST)) {
log_ssl_errors("couldnt add validation params for %s", hostname.c_str());
X509_VERIFY_PARAM_free(x509_param);
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
ERR_clear_error();
if (!SSL_CTX_set1_param(ctx, x509_param)) {
log_ssl_errors("couldnt add validation params for %s", hostname.c_str());
X509_VERIFY_PARAM_free(x509_param);
SSL_CTX_free(ctx);
ctx = NULL;
return -2;
}
X509_VERIFY_PARAM_free(x509_param); // try not freeing this as SSL_CTX_free seems to be ring to free it
//hand socket over to ssl lib
ERR_clear_error();
ssl = SSL_new(ctx);
SSL_set_options(ssl, SSL_OP_ALL);
SSL_set_mode(ssl, SSL_MODE_AUTO_RETRY);
SSL_set_connect_state(ssl);
//fcntl(this->getFD() ,F_SETFL, O_NONBLOCK); // blocking mode used currently
SSL_set_fd(ssl, this->getFD());
SSL_set_tlsext_host_name(ssl, hostname.c_str());
#if OPENSSL_VERSION_NUMBER < 0x10100000L
#else
X509_VERIFY_PARAM_set1_host(SSL_get0_param(ssl),hostname.c_str(),0);
#endif
//make io non blocking as select wont tell us if we can do a read without blocking
//BIO_set_nbio(SSL_get_rbio(ssl),1l); // blocking mode used currently
//BIO_set_nbio(SSL_get_wbio(ssl),1l); // blocking mode used currently
ERR_clear_error();
int rc = SSL_connect(ssl);
if (rc < 0) {
log_ssl_errors("ssl_connect failed to %s", hostname.c_str());
#ifdef NETDEBUG
std::cout << thread_id << "ssl_connect failed with error " << SSL_get_error(ssl, rc) << std::endl;
#endif
// tidy up
SSL_free(ssl);
ssl = NULL;
SSL_CTX_free(ctx);
ctx = NULL;
return -3;
}
//should be safer to do this last as nothing will ever try to use a ssl socket that isnt fully setup
isssl = true;
issslserver = false;
return 0;
}
| 0
|
424,904
|
void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
int i;
if (iwl_trans_dbg_ini_valid(trans)) {
if (!trans->dbg.num_blocks)
return;
IWL_DEBUG_FW(trans,
"WRT: Applying DRAM buffer[0] destination\n");
iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
trans->dbg.fw_mon[0].physical >>
MON_BUFF_SHIFT_VER2);
iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
(trans->dbg.fw_mon[0].physical +
trans->dbg.fw_mon[0].size - 256) >>
MON_BUFF_SHIFT_VER2);
return;
}
IWL_INFO(trans, "Applying debug destination %s\n",
get_fw_dbg_mode_string(dest->monitor_mode));
if (dest->monitor_mode == EXTERNAL_MODE)
iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
else
IWL_WARN(trans, "PCI should have external buffer debug\n");
for (i = 0; i < trans->dbg.n_dest_reg; i++) {
u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
u32 val = le32_to_cpu(dest->reg_ops[i].val);
switch (dest->reg_ops[i].op) {
case CSR_ASSIGN:
iwl_write32(trans, addr, val);
break;
case CSR_SETBIT:
iwl_set_bit(trans, addr, BIT(val));
break;
case CSR_CLEARBIT:
iwl_clear_bit(trans, addr, BIT(val));
break;
case PRPH_ASSIGN:
iwl_write_prph(trans, addr, val);
break;
case PRPH_SETBIT:
iwl_set_bits_prph(trans, addr, BIT(val));
break;
case PRPH_CLEARBIT:
iwl_clear_bits_prph(trans, addr, BIT(val));
break;
case PRPH_BLOCKBIT:
if (iwl_read_prph(trans, addr) & BIT(val)) {
IWL_ERR(trans,
"BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
val, addr);
goto monitor;
}
break;
default:
IWL_ERR(trans, "FW debug - unknown OP %d\n",
dest->reg_ops[i].op);
break;
}
}
monitor:
if (dest->monitor_mode == EXTERNAL_MODE && trans->dbg.fw_mon[0].size) {
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
trans->dbg.fw_mon[0].physical >>
dest->base_shift);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
(trans->dbg.fw_mon[0].physical +
trans->dbg.fw_mon[0].size - 256) >>
dest->end_shift);
else
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
(trans->dbg.fw_mon[0].physical +
trans->dbg.fw_mon[0].size) >>
dest->end_shift);
}
}
| 0
|
355,637
|
eval6(
char_u **arg,
typval_T *rettv,
evalarg_T *evalarg,
int want_string) // after "." operator
{
#ifdef FEAT_FLOAT
int use_float = FALSE;
#endif
/*
* Get the first variable.
*/
if (eval7t(arg, rettv, evalarg, want_string) == FAIL)
return FAIL;
/*
* Repeat computing, until no '*', '/' or '%' is following.
*/
for (;;)
{
int evaluate;
int getnext;
typval_T var2;
char_u *p;
int op;
varnumber_T n1, n2;
#ifdef FEAT_FLOAT
float_T f1, f2;
#endif
int error;
// "*=", "/=" and "%=" are assignments
p = eval_next_non_blank(*arg, evalarg, &getnext);
op = *p;
if ((op != '*' && op != '/' && op != '%') || p[1] == '=')
break;
evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE);
if (getnext)
*arg = eval_next_line(evalarg);
else
{
if (evaluate && in_vim9script() && !VIM_ISWHITE(**arg))
{
error_white_both(*arg, 1);
clear_tv(rettv);
return FAIL;
}
*arg = p;
}
#ifdef FEAT_FLOAT
f1 = 0;
f2 = 0;
#endif
error = FALSE;
if (evaluate)
{
#ifdef FEAT_FLOAT
if (rettv->v_type == VAR_FLOAT)
{
f1 = rettv->vval.v_float;
use_float = TRUE;
n1 = 0;
}
else
#endif
n1 = tv_get_number_chk(rettv, &error);
clear_tv(rettv);
if (error)
return FAIL;
}
else
n1 = 0;
/*
* Get the second variable.
*/
if (evaluate && in_vim9script() && !IS_WHITE_OR_NUL((*arg)[1]))
{
error_white_both(*arg, 1);
clear_tv(rettv);
return FAIL;
}
*arg = skipwhite_and_linebreak(*arg + 1, evalarg);
if (eval7t(arg, &var2, evalarg, FALSE) == FAIL)
return FAIL;
if (evaluate)
{
#ifdef FEAT_FLOAT
if (var2.v_type == VAR_FLOAT)
{
if (!use_float)
{
f1 = n1;
use_float = TRUE;
}
f2 = var2.vval.v_float;
n2 = 0;
}
else
#endif
{
n2 = tv_get_number_chk(&var2, &error);
clear_tv(&var2);
if (error)
return FAIL;
#ifdef FEAT_FLOAT
if (use_float)
f2 = n2;
#endif
}
/*
* Compute the result.
* When either side is a float the result is a float.
*/
#ifdef FEAT_FLOAT
if (use_float)
{
if (op == '*')
f1 = f1 * f2;
else if (op == '/')
{
# ifdef VMS
// VMS crashes on divide by zero, work around it
if (f2 == 0.0)
{
if (f1 == 0)
f1 = -1 * __F_FLT_MAX - 1L; // similar to NaN
else if (f1 < 0)
f1 = -1 * __F_FLT_MAX;
else
f1 = __F_FLT_MAX;
}
else
f1 = f1 / f2;
# else
// We rely on the floating point library to handle divide
// by zero to result in "inf" and not a crash.
f1 = f1 / f2;
# endif
}
else
{
emsg(_(e_cannot_use_percent_with_float));
return FAIL;
}
rettv->v_type = VAR_FLOAT;
rettv->vval.v_float = f1;
}
else
#endif
{
int failed = FALSE;
if (op == '*')
n1 = n1 * n2;
else if (op == '/')
n1 = num_divide(n1, n2, &failed);
else
n1 = num_modulus(n1, n2, &failed);
if (failed)
return FAIL;
rettv->v_type = VAR_NUMBER;
rettv->vval.v_number = n1;
}
}
}
return OK;
}
| 0
|
226,330
|
GF_Err pcrb_box_size(GF_Box *s)
{
GF_PcrInfoBox *ptr = (GF_PcrInfoBox*) s;
ptr->size += 4;
ptr->size += ptr->subsegment_count * 6;
return GF_OK;
| 0
|
247,078
|
struct _gf_ft_mgr *gf_fs_get_font_manager(GF_FilterSession *fsess)
{
#ifdef GPAC_DISABLE_PLAYER
return NULL;
#else
if (!fsess->font_manager) {
fsess->font_manager = gf_font_manager_new();
}
return fsess->font_manager;
#endif
}
| 0
|
229,166
|
static void handle_control_message(VirtIOSerial *vser, void *buf, size_t len)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vser);
struct VirtIOSerialPort *port;
VirtIOSerialPortClass *vsc;
struct virtio_console_control cpkt, *gcpkt;
uint8_t *buffer;
size_t buffer_len;
gcpkt = buf;
if (len < sizeof(cpkt)) {
/* The guest sent an invalid control packet */
return;
}
cpkt.event = virtio_lduw_p(vdev, &gcpkt->event);
cpkt.value = virtio_lduw_p(vdev, &gcpkt->value);
trace_virtio_serial_handle_control_message(cpkt.event, cpkt.value);
if (cpkt.event == VIRTIO_CONSOLE_DEVICE_READY) {
if (!cpkt.value) {
error_report("virtio-serial-bus: Guest failure in adding device %s",
vser->bus.qbus.name);
return;
}
/*
* The device is up, we can now tell the device about all the
* ports we have here.
*/
QTAILQ_FOREACH(port, &vser->ports, next) {
send_control_event(vser, port->id, VIRTIO_CONSOLE_PORT_ADD, 1);
}
return;
}
port = find_port_by_id(vser, virtio_ldl_p(vdev, &gcpkt->id));
if (!port) {
error_report("virtio-serial-bus: Unexpected port id %u for device %s",
virtio_ldl_p(vdev, &gcpkt->id), vser->bus.qbus.name);
return;
}
trace_virtio_serial_handle_control_message_port(port->id);
vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port);
switch(cpkt.event) {
case VIRTIO_CONSOLE_PORT_READY:
if (!cpkt.value) {
error_report("virtio-serial-bus: Guest failure in adding port %u for device %s",
port->id, vser->bus.qbus.name);
break;
}
/*
* Now that we know the guest asked for the port name, we're
* sure the guest has initialised whatever state is necessary
* for this port. Now's a good time to let the guest know if
* this port is a console port so that the guest can hook it
* up to hvc.
*/
if (vsc->is_console) {
send_control_event(vser, port->id, VIRTIO_CONSOLE_CONSOLE_PORT, 1);
}
if (port->name) {
virtio_stl_p(vdev, &cpkt.id, port->id);
virtio_stw_p(vdev, &cpkt.event, VIRTIO_CONSOLE_PORT_NAME);
virtio_stw_p(vdev, &cpkt.value, 1);
buffer_len = sizeof(cpkt) + strlen(port->name) + 1;
buffer = g_malloc(buffer_len);
memcpy(buffer, &cpkt, sizeof(cpkt));
memcpy(buffer + sizeof(cpkt), port->name, strlen(port->name));
buffer[buffer_len - 1] = 0;
send_control_msg(vser, buffer, buffer_len);
g_free(buffer);
}
if (port->host_connected) {
send_control_event(vser, port->id, VIRTIO_CONSOLE_PORT_OPEN, 1);
}
/*
* When the guest has asked us for this information it means
* the guest is all setup and has its virtqueues
* initialised. If some app is interested in knowing about
* this event, let it know.
*/
if (vsc->guest_ready) {
vsc->guest_ready(port);
}
break;
case VIRTIO_CONSOLE_PORT_OPEN:
port->guest_connected = cpkt.value;
if (vsc->set_guest_connected) {
/* Send the guest opened notification if an app is interested */
vsc->set_guest_connected(port, cpkt.value);
}
break;
}
}
| 0
|
512,840
|
cmp_item* cmp_item_row::make_same()
{
return new cmp_item_row();
}
| 0
|
473,992
|
st_init_numtable_with_size(st_index_t size)
{
return st_init_table_with_size(&type_numhash, size);
}
| 0
|
226,150
|
void ssix_box_del(GF_Box *s)
{
u32 i;
GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox *)s;
if (ptr == NULL) return;
if (ptr->subsegments) {
for (i = 0; i < ptr->subsegment_alloc; i++) {
GF_SubsegmentInfo *subsegment = &ptr->subsegments[i];
if (subsegment->ranges) gf_free(subsegment->ranges);
}
gf_free(ptr->subsegments);
}
gf_free(ptr);
| 0
|
409,501
|
blink_state_is_inverted()
{
#ifdef FEAT_TERMRESPONSE
return rbm_status.tr_progress == STATUS_GOT
&& rcs_status.tr_progress == STATUS_GOT
&& initial_cursor_blink != initial_cursor_shape_blink;
#else
return FALSE;
#endif
}
| 0
|
262,727
|
njs_iterator_object_handler(njs_vm_t *vm, njs_iterator_handler_t handler,
njs_iterator_args_t *args, njs_value_t *key, int64_t i)
{
njs_int_t ret;
njs_value_t prop, *entry;
if (key != NULL) {
ret = njs_value_property(vm, args->value, key, &prop);
if (njs_slow_path(ret == NJS_ERROR)) {
return ret;
}
} else {
ret = njs_value_property_i64(vm, args->value, i, &prop);
if (njs_slow_path(ret == NJS_ERROR)) {
return ret;
}
}
entry = (ret == NJS_OK) ? &prop : njs_value_arg(&njs_value_invalid);
ret = handler(vm, args, entry, i);
if (njs_slow_path(ret != NJS_OK)) {
if (ret == NJS_DONE) {
return NJS_DONE;
}
return NJS_ERROR;
}
return ret;
}
| 0
|
445,929
|
fr_window_populate_file_list (FrWindow *window,
GPtrArray *files)
{
int sort_column_id;
GtkSortType order;
int i;
if (! gtk_widget_get_realized (GTK_WIDGET (window))) {
_fr_window_stop_activity_mode (window);
return;
}
window->priv->populating_file_list = TRUE;
gtk_list_store_clear (window->priv->list_store);
gtk_tree_sortable_get_sort_column_id (GTK_TREE_SORTABLE (window->priv->list_store),
&sort_column_id,
&order);
gtk_tree_sortable_set_sort_column_id (GTK_TREE_SORTABLE (window->priv->list_store),
GTK_TREE_SORTABLE_UNSORTED_SORT_COLUMN_ID,
0);
for (i = 0; i < files->len; i++) {
FileData *fdata = g_ptr_array_index (files, i);
GtkTreeIter iter;
GdkPixbuf *icon, *emblem;
char *utf8_name;
if (fdata->list_name == NULL)
continue;
gtk_list_store_append (window->priv->list_store, &iter);
icon = get_icon (window, fdata);
emblem = get_emblem (window, fdata);
utf8_name = g_filename_display_name (fdata->list_name);
if (file_data_is_dir (fdata)) {
char *utf8_path;
char *tmp;
char *s_size;
char *s_time;
if (fdata->list_dir)
tmp = _g_path_remove_ending_separator (fr_window_get_current_location (window));
else
tmp = _g_path_remove_level (fdata->path);
utf8_path = g_filename_display_name (tmp);
g_free (tmp);
s_size = g_format_size (fdata->dir_size);
if (fdata->list_dir)
s_time = g_strdup ("");
else
s_time = _g_time_to_string (fdata->modified);
gtk_list_store_set (window->priv->list_store, &iter,
COLUMN_FILE_DATA, fdata,
COLUMN_ICON, icon,
COLUMN_NAME, utf8_name,
COLUMN_EMBLEM, emblem,
COLUMN_TYPE, _("Folder"),
COLUMN_SIZE, s_size,
COLUMN_TIME, s_time,
COLUMN_PATH, utf8_path,
-1);
g_free (utf8_path);
g_free (s_size);
g_free (s_time);
}
else {
char *utf8_path;
char *s_size;
char *s_time;
const char *desc;
utf8_path = g_filename_display_name (fdata->path);
s_size = g_format_size (fdata->size);
s_time = _g_time_to_string (fdata->modified);
desc = g_content_type_get_description (fdata->content_type);
gtk_list_store_set (window->priv->list_store, &iter,
COLUMN_FILE_DATA, fdata,
COLUMN_ICON, icon,
COLUMN_NAME, utf8_name,
COLUMN_EMBLEM, emblem,
COLUMN_TYPE, desc,
COLUMN_SIZE, s_size,
COLUMN_TIME, s_time,
COLUMN_PATH, utf8_path,
-1);
g_free (utf8_path);
g_free (s_size);
g_free (s_time);
}
g_free (utf8_name);
_g_object_unref (icon);
_g_object_unref (emblem);
}
gtk_tree_sortable_set_sort_column_id (GTK_TREE_SORTABLE (window->priv->list_store),
sort_column_id,
order);
window->priv->populating_file_list = FALSE;
_fr_window_stop_activity_mode (window);
}
| 0
|
446,061
|
LZWPostEncode(TIFF* tif)
{
register LZWCodecState *sp = EncoderState(tif);
uint8* op = tif->tif_rawcp;
long nextbits = sp->lzw_nextbits;
unsigned long nextdata = sp->lzw_nextdata;
long outcount = sp->enc_outcount;
int nbits = sp->lzw_nbits;
if (op > sp->enc_rawlimit) {
tif->tif_rawcc = (tmsize_t)(op - tif->tif_rawdata);
if( !TIFFFlushData1(tif) )
return 0;
op = tif->tif_rawdata;
}
if (sp->enc_oldcode != (hcode_t) -1) {
int free_ent = sp->lzw_free_ent;
PutNextCode(op, sp->enc_oldcode);
sp->enc_oldcode = (hcode_t) -1;
free_ent ++;
if (free_ent == CODE_MAX-1) {
/* table is full, emit clear code and reset */
outcount = 0;
PutNextCode(op, CODE_CLEAR);
nbits = BITS_MIN;
} else {
/*
* If the next entry is going to be too big for
* the code size, then increase it, if possible.
*/
if (free_ent > sp->lzw_maxcode) {
nbits++;
assert(nbits <= BITS_MAX);
}
}
}
PutNextCode(op, CODE_EOI);
/* Explicit 0xff masking to make icc -check=conversions happy */
if (nextbits > 0)
*op++ = (unsigned char)((nextdata << (8-nextbits))&0xff);
tif->tif_rawcc = (tmsize_t)(op - tif->tif_rawdata);
return (1);
}
| 0
|
220,101
|
static int nfs4_setlease(struct file *file, long arg, struct file_lock **lease,
void **priv)
{
return nfs4_proc_setlease(file, arg, lease, priv);
}
| 0
|
292,200
|
inbound_ping_reply (session *sess, char *timestring, char *from,
const message_tags_data *tags_data)
{
unsigned long tim, nowtim, dif;
int lag = 0;
char outbuf[64];
if (strncmp (timestring, "LAG", 3) == 0)
{
timestring += 3;
lag = 1;
}
tim = strtoul (timestring, NULL, 10);
nowtim = make_ping_time ();
dif = nowtim - tim;
sess->server->ping_recv = time (0);
if (lag)
{
sess->server->lag_sent = 0;
sess->server->lag = dif;
fe_set_lag (sess->server, dif);
return;
}
if (atol (timestring) == 0)
{
if (sess->server->lag_sent)
sess->server->lag_sent = 0;
else
EMIT_SIGNAL_TIMESTAMP (XP_TE_PINGREP, sess, from, "?", NULL, NULL, 0,
tags_data->timestamp);
} else
{
g_snprintf (outbuf, sizeof (outbuf), "%ld.%03ld", dif / 1000, dif % 1000);
EMIT_SIGNAL_TIMESTAMP (XP_TE_PINGREP, sess, from, outbuf, NULL, NULL, 0,
tags_data->timestamp);
}
}
| 0
|
232,314
|
static void FixSDTPInTRAF(GF_MovieFragmentBox *moof)
{
u32 k;
if (!moof)
return;
for (k = 0; k < gf_list_count(moof->TrackList); k++) {
GF_TrackFragmentBox *traf = gf_list_get(moof->TrackList, k);
if (traf->sdtp) {
GF_TrackFragmentRunBox *trun;
u32 j = 0, sample_index = 0;
if (traf->sdtp->sampleCount == gf_list_count(traf->TrackRuns)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Warning: TRAF box of track id=%u contains a SDTP. Converting to TRUN sample flags.\n", traf->tfhd->trackID));
}
while ((trun = (GF_TrackFragmentRunBox*)gf_list_enum(traf->TrackRuns, &j))) {
u32 i;
trun->flags |= GF_ISOM_TRUN_FLAGS;
for (i=0; i<trun->nb_samples; i++) {
GF_TrunEntry *entry = &trun->samples[i];
const u8 info = traf->sdtp->sample_info[sample_index];
entry->flags |= GF_ISOM_GET_FRAG_DEPEND_FLAGS(info >> 6, info >> 4, info >> 2, info);
sample_index++;
if (sample_index > traf->sdtp->sampleCount) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Error: TRAF box of track id=%u contained an inconsistent SDTP.\n", traf->tfhd->trackID));
return;
}
}
}
if (sample_index < traf->sdtp->sampleCount) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Error: TRAF box of track id=%u list less samples than SDTP.\n", traf->tfhd->trackID));
}
gf_isom_box_del_parent(&traf->child_boxes, (GF_Box*)traf->sdtp);
traf->sdtp = NULL;
}
}
}
| 0
|
226,078
|
GF_Box *ftyp_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_FileTypeBox, GF_ISOM_BOX_TYPE_FTYP);
return (GF_Box *)tmp;
}
| 0
|
314,756
|
cdf_tole2(uint16_t sv)
{
return CDF_TOLE2(sv);
}
| 0
|
90,219
|
bool CellularNetwork::is_gsm() const {
return network_technology_ != NETWORK_TECHNOLOGY_EVDO &&
network_technology_ != NETWORK_TECHNOLOGY_1XRTT &&
network_technology_ != NETWORK_TECHNOLOGY_UNKNOWN;
}
| 0
|
335,421
|
excmd_get_cmdidx(char_u *cmd, int len)
{
cmdidx_T idx;
if (!one_letter_cmd(cmd, &idx))
for (idx = (cmdidx_T)0; (int)idx < (int)CMD_SIZE;
idx = (cmdidx_T)((int)idx + 1))
if (STRNCMP(cmdnames[(int)idx].cmd_name, cmd, (size_t)len) == 0)
break;
return idx;
}
| 0
|
247,082
|
Bool gf_fs_ui_event(GF_FilterSession *session, GF_Event *uievt)
{
Bool ret;
gf_mx_p(session->ui_mx);
ret = session->ui_event_proc(session->ui_opaque, uievt);
gf_mx_v(session->ui_mx);
return ret;
}
| 0
|
335,089
|
static int skcipher_alloc_sgl(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_sg_list *sgl;
struct scatterlist *sg = NULL;
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
if (!list_empty(&ctx->tsgl))
sg = sgl->sg;
if (!sg || sgl->cur >= MAX_SGL_ENTS) {
sgl = sock_kmalloc(sk, sizeof(*sgl) +
sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
GFP_KERNEL);
if (!sgl)
return -ENOMEM;
sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
sgl->cur = 0;
if (sg) {
scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
}
list_add_tail(&sgl->list, &ctx->tsgl);
}
return 0;
}
| 0
|
317,229
|
static int smk_curacc_msq(struct kern_ipc_perm *isp, int access)
{
struct smack_known *msp = smack_of_ipc(isp);
struct smk_audit_info ad;
int rc;
#ifdef CONFIG_AUDIT
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC);
ad.a.u.ipc_id = isp->id;
#endif
rc = smk_curacc(msp, access, &ad);
rc = smk_bu_current("msq", msp, access, rc);
return rc;
}
| 0
|
197,111
|
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const std::vector<tinyexr::tinyexr_uint64> &offsets,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1;
int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1;
if ((data_width < 0) || (data_height < 0)) {
if (err) {
std::stringstream ss;
ss << "Invalid data width or data height: " << data_width << ", "
<< data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if ((data_width > threshold) || (data_height > threshold)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety.
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
size_t num_tiles = offsets.size(); // = # of blocks
exr_image->tiles = static_cast<EXRTile *>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
exr_header->tile_size_x, exr_header->tile_size_y);
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
if (offsets[tile_idx] + sizeof(int) * 5 > size) {
if (err) {
(*err) += "Insufficient data size.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3]));
// @todo{ LoD }
if (tile_coordinates[2] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
if (tile_coordinates[3] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len < 4 || size_t(data_len) > data_size) {
if (err) {
(*err) += "Insufficient data length.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height, tile_coordinates[0],
tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y,
static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels), exr_header->channels,
channel_offset_list);
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
exr_image->num_tiles = static_cast<int>(num_tiles);
}
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown = sizeof(void*) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown ) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example `data_len
// < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window[3] + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window[1]);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window[1];
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
} // omp parallel
}
if (invalid_data) {
if (err) {
std::stringstream ss;
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
| 1
|
310,135
|
drv_setfilter(TERMINAL_CONTROL_BLOCK * TCB)
{
AssertTCB();
/* *INDENT-EQLS* */
clear_screen = ABSENT_STRING;
cursor_address = ABSENT_STRING;
cursor_down = ABSENT_STRING;
cursor_up = ABSENT_STRING;
parm_down_cursor = ABSENT_STRING;
parm_up_cursor = ABSENT_STRING;
row_address = ABSENT_STRING;
cursor_home = carriage_return;
if (back_color_erase)
clr_eos = ABSENT_STRING;
}
| 0
|
412,125
|
dnsc_shared_secrets_cache_key(uint8_t* key,
uint8_t esversion,
uint8_t* pk,
uint8_t* sk)
{
key[0] = esversion;
memcpy(key + 1, pk, crypto_box_PUBLICKEYBYTES);
memcpy(key + 1 + crypto_box_PUBLICKEYBYTES, sk, crypto_box_SECRETKEYBYTES);
return hashlittle(key, DNSCRYPT_SHARED_SECRET_KEY_LENGTH, 0);
}
| 0
|
333,052
|
st_pop(Frag_T **p, Frag_T *stack)
{
Frag_T *stackp;
*p = *p - 1;
stackp = *p;
if (stackp < stack)
return empty;
return **p;
}
| 0
|
220,905
|
void DependencyOptimizer::OptimizeNode(int node_idx,
SetVector<int>* nodes_to_simplify,
std::set<int>* nodes_to_delete) {
NodeDef* node = optimized_graph_->mutable_node(node_idx);
const bool is_noop = IsNoOp(*node);
const bool is_identity = IsIdentity(*node) || IsIdentityNSingleInput(*node);
const bool is_multi_input_identity =
IsIdentityN(*node) && !IsIdentityNSingleInput(*node);
const string node_name = node->name();
// Constant nodes with no input control dependency are always executed early,
// so we can prune all their output control dependencies.
if (IsConstant(*node) && node->input_size() == 0) {
const auto output_nodes = node_map_->GetOutputs(node_name);
for (NodeDef* fanout : output_nodes) {
bool optimize_fanout = false;
bool data_connection = false;
for (int i = fanout->input_size() - 1; i >= 0; --i) {
const TensorId input_tensor = ParseTensorName(fanout->input(i));
if (input_tensor.node() == node_name) {
if (input_tensor.index() < 0) {
fanout->mutable_input()->SwapElements(i, fanout->input_size() - 1);
fanout->mutable_input()->RemoveLast();
optimize_fanout = true;
} else {
data_connection = true;
}
}
}
if (optimize_fanout) {
nodes_to_simplify->PushBack(node_to_idx_[fanout]);
if (!data_connection) {
node_map_->RemoveOutput(node_name, fanout->name());
}
}
}
if (node_map_->GetOutputs(node_name).empty() && fetch_nodes_known_ &&
nodes_to_preserve_.find(node_name) == nodes_to_preserve_.end()) {
// Mark the node for deletion.
nodes_to_delete->insert(node_to_idx_[node]);
}
return;
}
// Change ops that only have control dependencies as outputs to NoOps.
if (!is_noop && SafeToConvertToNoOp(*node)) {
VLOG(2) << "***** Replacing " << node_name << " (" << node->op()
<< ") with NoOp.";
// The outputs of this node are not consumed. Replace its inputs with
// control dependencies and replace the op itself with the NoOp op.
std::unordered_set<string> ctrl_inputs;
int pos = 0;
while (pos < node->input_size()) {
const string old_input = node->input(pos);
if (IsControlInput(old_input)) {
if (!ctrl_inputs.insert(old_input).second) {
// We found a duplicate control input. Remove it.
node->mutable_input()->SwapElements(pos, node->input_size() - 1);
node->mutable_input()->RemoveLast();
} else {
++pos;
}
continue;
}
// Replace a normal input with a control input.
const string ctrl_input = ConstantFolding::AddControlDependency(
old_input, optimized_graph_, node_map_.get());
ctrl_inputs.insert(ctrl_input);
node->set_input(pos, ctrl_input);
node_map_->UpdateInput(node_name, old_input, ctrl_input);
const NodeDef* old_input_node = node_map_->GetNode(old_input);
nodes_to_simplify->PushBack(node_to_idx_[old_input_node]);
++pos;
}
node->set_op("NoOp");
EraseRegularNodeAttributes(node);
DedupControlInputs(node);
nodes_to_simplify->PushBack(node_to_idx_[node]);
return;
}
// Remove NoOp nodes if the product of their fan-in and fan-out is less than
// or equal to the sum of the fan-in and fan-out. The non-trivial rewrites
// take the following form:
//
// Case a)
// x --^> +------+ x --^> +---+
// y --^> | NoOp | --^> a ==> y --^> | a |
// ... | | ... | |
// z --^> +------+ z --^> +---+
//
// Case b)
// +------+ --^> a +---+ --^> a
// x --^> | NoOp | --^> b ==> | x | --^> b
// | | ... | | ...
// +------+ --^> c +---+ --^> c
// Case c)
// +------+ x ---^> a
// x --^> | NoOp | --^> a ==> \/
// y --^> | | --^> b /\
// +------+ y ---^> b
//
// We only apply this optimization if we don't increase the number of control
// edges across device boundaries, e.g. in cases a) and b) if NoOp and
// a and x, respectively, are on the same device. Control edges across device
// boundaries require inter-device communication (Send/Recv pairs to be
// inserted in the graph), which is very costly.
//
// We also remove identity nodes, subject to the same constraints on number of
// resulting control edges and device boundary crossings:
//
// Case a)
// +----------+ ---> a +---+ ---> a
// x --> | Identity | --^> b ==> | x | --^> b
// | | ... | | ...
// +----------+ --^> c +---+ --^> c
//
// Case b)
// x ---> +----------+ ---> a x ---> +---+
// y --^> | Identity | ==> y --^> | a |
// ... | | ... | |
// z --^> +----------+ z --^> +---+
//
// Case c)
// +----------+ x ---> +---+
// x ---> | Identity | ---> a ==> \--^> | a |
// y --^> | | --^> b /\ +---+
// +----------+ y --^> b
if (is_noop || ((is_identity || is_multi_input_identity) &&
SafeToRemoveIdentity(*node))) {
const int num_inputs = node->input_size();
std::vector<NodeDef*> input_nodes;
for (int i = 0; i < num_inputs; ++i) {
NodeDef* input_node = node_map_->GetNode(node->input(i));
if (input_node == nullptr) {
LOG(ERROR) << "Invalid input " << node->input(i);
return;
}
input_nodes.push_back(input_node);
}
const auto& output_node_set = node_map_->GetOutputs(node_name);
const std::vector<NodeDef*> output_nodes(output_node_set.begin(),
output_node_set.end());
if (!BypassingNodeIsBeneficial(*node, input_nodes, output_nodes)) {
return;
}
VLOG(2) << "***** Rerouting input around\n" << node->DebugString();
// Now remove the node and re-wire its inputs to its outputs.
for (auto consumer : output_nodes) {
bool updated_consumer = false;
VLOG(2) << "consumer before:\n" << consumer->DebugString();
// Remove dependency on node from consumer.
for (int i = 0; i < num_inputs; ++i) {
const NodeDef* input = input_nodes[i];
// Forward dependency from input to consumer if it doesn't already
// depend on it.
if ((is_identity && i == 0) ||
(is_multi_input_identity && !IsControlInput(node->input(i)))) {
// Replace regular input from Identity node.
string new_input;
const string& input_to_forward = node->input(i);
CHECK(!IsControlInput(input_to_forward));
for (int j = 0; j < consumer->input_size(); ++j) {
const TensorId old_input = ParseTensorName(consumer->input(j));
if (old_input.node() == node_name) {
if (old_input.index() == i) {
// Regular input
new_input = input_to_forward;
node_map_->UpdateInput(consumer->name(),
string(old_input.node()), new_input);
consumer->set_input(j, new_input);
} else if (old_input.index() == -1) {
// Control dependency
new_input = AsControlDependency(NodeName(input_to_forward));
node_map_->UpdateInput(consumer->name(),
string(old_input.node()), new_input);
consumer->set_input(j, new_input);
}
}
}
updated_consumer = true;
} else {
// Forward dependency from input to consumer if it doesn't already
// depend on it.
if (node_map_->GetOutputs(input->name()).count(consumer) == 0) {
consumer->add_input(AsControlDependency(input->name()));
node_map_->AddOutput(input->name(), consumer->name());
nodes_to_simplify->PushBack(node_to_idx_[input]);
updated_consumer = true;
}
}
}
updated_consumer |= RemoveControlInput(
consumer, AsControlDependency(node_name), node_map_.get());
if (updated_consumer) {
nodes_to_simplify->PushBack(node_to_idx_[consumer]);
}
VLOG(2) << "consumer after:\n" << consumer->DebugString();
}
node_map_->RemoveOutputs(node_name);
if (fetch_nodes_known_ &&
nodes_to_preserve_.find(node_name) == nodes_to_preserve_.end()) {
// Mark the node for deletion.
nodes_to_delete->insert(node_idx);
// Disconnect the node from its inputs to enable further optimizations.
node_map_->RemoveInputs(node_name);
node->clear_input();
}
}
}
| 0
|
242,663
|
proto_register_sysdig_event(void)
{
/* XXX Match up with Sysdig's names. */
static hf_register_info hf[] = {
{ &hf_se_cpu_id,
{ "CPU ID", "sysdig.cpu_id",
FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }
},
{ &hf_se_thread_id,
{ "Thread ID", "sysdig.thread_id",
FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL }
},
{ &hf_se_event_length,
{ "Event length", "sysdig.event_len",
FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }
},
{ &hf_se_nparams,
{ "Number of parameters", "sysdig.nparams",
FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }
},
{ &hf_se_event_type,
{ "Event type", "sysdig.event_type",
FT_UINT16, BASE_DEC, VALS(event_type_vals), 0, NULL, HFILL }
},
{ &hf_se_param_lens,
{ "Parameter lengths", "sysdig.param.lens",
FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL }
},
{ &hf_se_param_len,
{ "Parameter length", "sysdig.param.len",
FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }
},
/* Header field registration. Automatically generated by tools/generate-sysdig-event.py */
{ &hf_param_ID_bytes, { "ID", "sysdig.param.syscall.ID", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_action_uint32, { "action", "sysdig.param.cpu_hotplug.action", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_addr_bytes, { "addr", "sysdig.param.ptrace.addr", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_addr_uint64, { "addr", "sysdig.param.page_fault.addr", FT_UINT64, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_args_string, { "Program arguments", "sysdig.param.execve.args", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_argument_uint64, { "I/O control: argument", "sysdig.param.ioctl.argument", FT_UINT64, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_backlog_uint32, { "backlog", "sysdig.param.listen.backlog", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_cgroups_bytes, { "cgroups", "sysdig.param.execve.cgroups", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_clockid_uint8, { "clockid", "sysdig.param.timerfd_create.clockid", FT_UINT8, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_cmd_bytes, { "cmd", "sysdig.param.semctl.cmd", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_cmd_int64, { "cmd", "sysdig.param.bpf.cmd", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_comm_string, { "Command", "sysdig.param.execve.comm", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_core_uint8, { "core", "sysdig.param.procexit.core", FT_UINT8, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_cpu_sys_uint64, { "cpu_sys", "sysdig.param.procinfo.cpu_sys", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_cpu_uint32, { "cpu", "sysdig.param.cpu_hotplug.cpu", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_cpu_usr_uint64, { "cpu_usr", "sysdig.param.procinfo.cpu_usr", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_cur_int64, { "cur", "sysdig.param.setrlimit.cur", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_cwd_string, { "Current working directory", "sysdig.param.execve.cwd", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_data_bytes, { "data", "sysdig.param.ptrace.data", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_desc_string, { "desc", "sysdig.param.notification.desc", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_description_string, { "description", "sysdig.param.infra.description", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dev_string, { "dev", "sysdig.param.mount.dev", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dev_uint32, { "dev", "sysdig.param.openat.dev", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_dir_string, { "dir", "sysdig.param.mount.dir", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dirfd_int64, { "dirfd", "sysdig.param.openat2.dirfd", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_domain_bytes, { "domain", "sysdig.param.socketpair.domain", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dpid_bytes, { "dpid", "sysdig.param.signaldeliver.dpid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dqb_bhardlimit_uint64, { "dqb_bhardlimit", "sysdig.param.quotactl.dqb_bhardlimit", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_dqb_bsoftlimit_uint64, { "dqb_bsoftlimit", "sysdig.param.quotactl.dqb_bsoftlimit", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_dqb_btime_bytes, { "dqb_btime", "sysdig.param.quotactl.dqb_btime", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dqb_curspace_uint64, { "dqb_curspace", "sysdig.param.quotactl.dqb_curspace", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_dqb_ihardlimit_uint64, { "dqb_ihardlimit", "sysdig.param.quotactl.dqb_ihardlimit", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_dqb_isoftlimit_uint64, { "dqb_isoftlimit", "sysdig.param.quotactl.dqb_isoftlimit", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_dqb_itime_bytes, { "dqb_itime", "sysdig.param.quotactl.dqb_itime", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dqi_bgrace_bytes, { "dqi_bgrace", "sysdig.param.quotactl.dqi_bgrace", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dqi_flags_bytes, { "dqi_flags", "sysdig.param.quotactl.dqi_flags", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_dqi_igrace_bytes, { "dqi_igrace", "sysdig.param.quotactl.dqi_igrace", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_egid_bytes, { "egid", "sysdig.param.getresgid.egid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_env_string, { "env", "sysdig.param.execve.env", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_error_bytes, { "error", "sysdig.param.page_fault.error", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_euid_bytes, { "euid", "sysdig.param.getresuid.euid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_event_data_bytes, { "event_data", "sysdig.param.pluginevent.event_data", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_event_data_uint64, { "event_data", "sysdig.param.sysdigevent.event_data", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_event_type_uint32, { "event_type", "sysdig.param.sysdigevent.event_type", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_exe_string, { "exe", "sysdig.param.execve.exe", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_fd1_int64, { "fd1", "sysdig.param.pipe.fd1", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_fd2_int64, { "fd2", "sysdig.param.pipe.fd2", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_fd_in_int64, { "fd_in", "sysdig.param.splice.fd_in", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_fd_int64, { "fd", "sysdig.param.openat2.fd", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_fd_out_int64, { "fd_out", "sysdig.param.splice.fd_out", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_fdlimit_int64, { "fdlimit", "sysdig.param.vfork.fdlimit", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_fdlimit_uint64, { "fdlimit", "sysdig.param.execve.fdlimit", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_fds_bytes, { "fds", "sysdig.param.ppoll.fds", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_filename_bytes, { "filename", "sysdig.param.fchmodat.filename", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_filename_string, { "filename", "sysdig.param.chmod.filename", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_flags_bytes, { "flags", "sysdig.param.openat2.flags", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_flags_uint32, { "flags", "sysdig.param.accept.flags", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_gid_bytes, { "gid", "sysdig.param.getgid.gid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_gid_uint32, { "gid", "sysdig.param.vfork.gid", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_how_bytes, { "how", "sysdig.param.shutdown.how", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_id_int64, { "id", "sysdig.param.tracer.id", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_id_string, { "id", "sysdig.param.notification.id", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_id_uint32, { "id", "sysdig.param.quotactl.id", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_image_string, { "image", "sysdig.param.container.image", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_in_fd_int64, { "in_fd", "sysdig.param.sendfile.in_fd", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_initval_uint64, { "initval", "sysdig.param.eventfd.initval", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_ino_uint64, { "ino", "sysdig.param.pipe.ino", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_interval_bytes, { "interval", "sysdig.param.nanosleep.interval", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_ip_uint64, { "ip", "sysdig.param.page_fault.ip", FT_UINT64, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_json_string, { "json", "sysdig.param.container.json", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_key_int32, { "key", "sysdig.param.semget.key", FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_length_uint64, { "length", "sysdig.param.munmap.length", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_level_bytes, { "level", "sysdig.param.getsockopt.level", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_linkdirfd_int64, { "linkdirfd", "sysdig.param.symlinkat.linkdirfd", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_linkpath_bytes, { "linkpath", "sysdig.param.symlinkat.linkpath", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_linkpath_string, { "linkpath", "sysdig.param.symlink.linkpath", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_loginuid_int32, { "loginuid", "sysdig.param.execve.loginuid", FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_mask_uint32, { "mask", "sysdig.param.signalfd.mask", FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_max_int64, { "max", "sysdig.param.setrlimit.max", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_maxevents_bytes, { "maxevents", "sysdig.param.epoll_wait.maxevents", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_mode_bytes, { "mode", "sysdig.param.fchmod.mode", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_mode_uint32, { "mode", "sysdig.param.openat2.mode", FT_UINT32, BASE_OCT, NULL, 0, NULL, HFILL } },
{ &hf_param_name_bytes, { "name", "sysdig.param.openat2.name", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_name_string, { "name", "sysdig.param.infra.name", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_nativeID_uint16, { "nativeID", "sysdig.param.syscall.nativeID", FT_UINT16, BASE_DEC, VALS(nativeID_uint16_vals), 0, NULL, HFILL } },
{ &hf_param_newcur_int64, { "newcur", "sysdig.param.prlimit.newcur", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_newdir_int64, { "newdir", "sysdig.param.linkat.newdir", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_newdirfd_int64, { "newdirfd", "sysdig.param.renameat2.newdirfd", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_newmax_int64, { "newmax", "sysdig.param.prlimit.newmax", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_newpath_bytes, { "newpath", "sysdig.param.renameat2.newpath", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_newpath_string, { "newpath", "sysdig.param.link.newpath", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_next_bytes, { "next", "sysdig.param.switch.next", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_nsems_int32, { "nsems", "sysdig.param.semget.nsems", FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_nsops_uint32, { "nsops", "sysdig.param.semop.nsops", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_nstype_bytes, { "nstype", "sysdig.param.setns.nstype", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_offset_uint64, { "offset", "sysdig.param.sendfile.offset", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_oldcur_int64, { "oldcur", "sysdig.param.prlimit.oldcur", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_olddir_int64, { "olddir", "sysdig.param.linkat.olddir", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_olddirfd_int64, { "olddirfd", "sysdig.param.renameat2.olddirfd", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_oldmax_int64, { "oldmax", "sysdig.param.prlimit.oldmax", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_oldpath_bytes, { "oldpath", "sysdig.param.renameat2.oldpath", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_oldpath_string, { "oldpath", "sysdig.param.link.oldpath", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_op_bytes, { "op", "sysdig.param.futex.op", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_op_uint64, { "op", "sysdig.param.seccomp.op", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_operation_bytes, { "operation", "sysdig.param.flock.operation", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_optlen_uint32, { "optlen", "sysdig.param.getsockopt.optlen", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_optname_bytes, { "optname", "sysdig.param.getsockopt.optname", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_out_fd_int64, { "out_fd", "sysdig.param.sendfile.out_fd", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_path_bytes, { "path", "sysdig.param.mkdirat.path", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_path_string, { "path", "sysdig.param.unlink.path", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_peer_uint64, { "peer", "sysdig.param.socketpair.peer", FT_UINT64, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_pgft_maj_uint64, { "pgft_maj", "sysdig.param.execve.pgft_maj", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_pgft_min_uint64, { "pgft_min", "sysdig.param.execve.pgft_min", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_pgid_bytes, { "pgid", "sysdig.param.setpgid.pgid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_pgoffset_uint64, { "pgoffset", "sysdig.param.mmap2.pgoffset", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_pid_bytes, { "pid", "sysdig.param.setpgid.pid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_plugin_ID_uint32, { "plugin_ID", "sysdig.param.pluginevent.plugin_ID", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_pos_uint64, { "pos", "sysdig.param.pwritev.pos", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_prot_bytes, { "prot", "sysdig.param.mmap2.prot", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_proto_uint32, { "proto", "sysdig.param.socketpair.proto", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_ptid_bytes, { "ptid", "sysdig.param.execve.ptid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_queuelen_uint32, { "queuelen", "sysdig.param.accept.queuelen", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_queuemax_uint32, { "queuemax", "sysdig.param.accept.queuemax", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_queuepct_uint8, { "Accept queue per connection", "sysdig.param.accept.queuepct", FT_UINT8, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_quota_fmt_bytes, { "quota_fmt", "sysdig.param.quotactl.quota_fmt", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_quota_fmt_out_bytes, { "quota_fmt_out", "sysdig.param.quotactl.quota_fmt_out", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_quotafilepath_string, { "quotafilepath", "sysdig.param.quotactl.quotafilepath", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_ratio_uint32, { "ratio", "sysdig.param.drop.ratio", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_request_bytes, { "request", "sysdig.param.ptrace.request", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_request_uint64, { "I/O control: request", "sysdig.param.ioctl.request", FT_UINT64, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_res_bytes, { "res", "sysdig.param.userfaultfd.res", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_res_int64, { "res", "sysdig.param.fcntl.res", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_res_or_fd_bytes, { "res_or_fd", "sysdig.param.bpf.res_or_fd", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_res_uint64, { "res", "sysdig.param.mmap2.res", FT_UINT64, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_resolve_bytes, { "resolve", "sysdig.param.openat2.resolve", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_resource_bytes, { "resource", "sysdig.param.prlimit.resource", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_ret_bytes, { "ret", "sysdig.param.procexit.ret", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_rgid_bytes, { "rgid", "sysdig.param.getresgid.rgid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_ruid_bytes, { "ruid", "sysdig.param.getresuid.ruid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_scope_string, { "scope", "sysdig.param.infra.scope", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_sem_flg_0_bytes, { "sem_flg_0", "sysdig.param.semop.sem_flg_0", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_sem_flg_1_bytes, { "sem_flg_1", "sysdig.param.semop.sem_flg_1", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_sem_num_0_uint16, { "sem_num_0", "sysdig.param.semop.sem_num_0", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_sem_num_1_uint16, { "sem_num_1", "sysdig.param.semop.sem_num_1", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_sem_op_0_int16, { "sem_op_0", "sysdig.param.semop.sem_op_0", FT_INT16, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_sem_op_1_int16, { "sem_op_1", "sysdig.param.semop.sem_op_1", FT_INT16, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_semflg_bytes, { "semflg", "sysdig.param.semget.semflg", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_semid_int32, { "semid", "sysdig.param.semctl.semid", FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_semnum_int32, { "semnum", "sysdig.param.semctl.semnum", FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_sgid_bytes, { "sgid", "sysdig.param.getresgid.sgid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_sig_bytes, { "sig", "sysdig.param.signaldeliver.sig", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_sigmask_bytes, { "sigmask", "sysdig.param.ppoll.sigmask", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_size_uint32, { "size", "sysdig.param.pwritev.size", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_size_uint64, { "size", "sysdig.param.sendfile.size", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_source_string, { "source", "sysdig.param.infra.source", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_source_uint64, { "source", "sysdig.param.socketpair.source", FT_UINT64, BASE_HEX, NULL, 0, NULL, HFILL } },
{ &hf_param_special_string, { "special", "sysdig.param.quotactl.special", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_spid_bytes, { "spid", "sysdig.param.signaldeliver.spid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_status_bytes, { "status", "sysdig.param.procexit.status", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_suid_bytes, { "suid", "sysdig.param.getresuid.suid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_tags_bytes, { "tags", "sysdig.param.tracer.tags", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_target_string, { "target", "sysdig.param.symlinkat.target", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_tid_bytes, { "tid", "sysdig.param.execve.tid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_timeout_bytes, { "timeout", "sysdig.param.ppoll.timeout", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_timeout_int64, { "timeout", "sysdig.param.poll.timeout", FT_INT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_tty_int32, { "tty", "sysdig.param.execve.tty", FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_tuple_bytes, { "tuple", "sysdig.param.accept.tuple", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_type_bytes, { "type", "sysdig.param.quotactl.type", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_type_string, { "type", "sysdig.param.mount.type", FT_STRING, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_type_uint32, { "type", "sysdig.param.container.type", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_uid_bytes, { "uid", "sysdig.param.getuid.uid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_uid_uint32, { "uid", "sysdig.param.vfork.uid", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_val_bytes, { "val", "sysdig.param.getsockopt.val", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_val_int32, { "val", "sysdig.param.semctl.val", FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_val_uint64, { "val", "sysdig.param.futex.val", FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_vm_rss_uint32, { "vm_rss", "sysdig.param.execve.vm_rss", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_vm_size_uint32, { "vm_size", "sysdig.param.execve.vm_size", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_vm_swap_uint32, { "vm_swap", "sysdig.param.execve.vm_swap", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL } },
{ &hf_param_vpid_bytes, { "vpid", "sysdig.param.vfork.vpid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_vtid_bytes, { "vtid", "sysdig.param.vfork.vtid", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
{ &hf_param_whence_bytes, { "whence", "sysdig.param.llseek.whence", FT_BYTES, BASE_NONE, NULL, 0, NULL, HFILL } },
};
/* Setup protocol subtree array */
static gint *ett[] = {
&ett_sysdig_event,
&ett_sysdig_parm_lens,
&ett_sysdig_syscall
};
/* Register the protocol name and description */
proto_sysdig_event = proto_register_protocol("Sysdig System Call",
"Sysdig Event", "sysdig");
/* Required function calls to register the header fields and subtrees */
proto_register_field_array(proto_sysdig_event, hf, array_length(hf));
proto_register_subtree_array(ett, array_length(ett));
register_dissector("sysdig", dissect_sysdig_event, proto_sysdig_event);
}
| 0
|
424,939
|
static int iwl_dbgfs_monitor_data_release(struct inode *inode,
struct file *file)
{
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(inode->i_private);
if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
return 0;
}
| 0
|
482,509
|
passGetVariableNumber(
const FileInfo *file, CharsString *passLine, int *passLinepos, widechar *number) {
if (!passGetNumber(passLine, passLinepos, number)) {
compileError(file, "missing variable number");
return 0;
}
if ((*number >= 0) && (*number < NUMVAR)) return 1;
compileError(file, "variable number out of range");
return 0;
}
| 0
|
509,520
|
int ha_maria::index_read_idx_map(uchar * buf, uint index, const uchar * key,
key_part_map keypart_map,
enum ha_rkey_function find_flag)
{
int error;
register_handler(file);
/* Use the pushed index condition if it matches the index we're scanning */
end_range= NULL;
if (index == pushed_idx_cond_keyno)
ma_set_index_cond_func(file, handler_index_cond_check, this);
error= maria_rkey(file, buf, index, key, keypart_map, find_flag);
ma_set_index_cond_func(file, NULL, 0);
return error;
}
| 0
|
389,723
|
check_for_string_or_list_or_blob_arg(typval_T *args, int idx)
{
if (args[idx].v_type != VAR_STRING
&& args[idx].v_type != VAR_LIST
&& args[idx].v_type != VAR_BLOB)
{
semsg(_(e_string_list_or_blob_required_for_argument_nr), idx + 1);
return FAIL;
}
return OK;
}
| 0
|
275,509
|
njs_vm_value_to_bytes(njs_vm_t *vm, njs_str_t *dst, njs_value_t *src)
{
u_char *start;
size_t size, length, offset;
njs_int_t ret;
njs_value_t value;
njs_typed_array_t *array;
njs_array_buffer_t *buffer;
if (njs_slow_path(src == NULL)) {
return NJS_ERROR;
}
ret = NJS_OK;
value = *src;
switch (value.type) {
case NJS_TYPED_ARRAY:
case NJS_DATA_VIEW:
case NJS_ARRAY_BUFFER:
if (value.type != NJS_ARRAY_BUFFER) {
array = njs_typed_array(&value);
buffer = njs_typed_array_buffer(array);
offset = array->offset;
length = array->byte_length;
} else {
buffer = njs_array_buffer(&value);
offset = 0;
length = buffer->size;
}
if (njs_slow_path(njs_is_detached_buffer(buffer))) {
njs_type_error(vm, "detached buffer");
return NJS_ERROR;
}
dst->start = &buffer->u.u8[offset];
dst->length = length;
break;
default:
ret = njs_value_to_string(vm, &value, &value);
if (njs_slow_path(ret != NJS_OK)) {
return NJS_ERROR;
}
size = value.short_string.size;
if (size != NJS_STRING_LONG) {
start = njs_mp_alloc(vm->mem_pool, size);
if (njs_slow_path(start == NULL)) {
njs_memory_error(vm);
return NJS_ERROR;
}
memcpy(start, value.short_string.start, size);
} else {
size = value.long_string.size;
start = value.long_string.data->start;
}
dst->length = size;
dst->start = start;
}
return ret;
}
| 0
|
313,557
|
static int rose_dev_exists(rose_address *addr)
{
struct net_device *dev;
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE &&
rosecmp(addr, (const rose_address *)dev->dev_addr) == 0)
goto out;
}
dev = NULL;
out:
rcu_read_unlock();
return dev != NULL;
}
| 0
|
261,951
|
njs_string_create(njs_vm_t *vm, njs_value_t *value, const char *src,
size_t size)
{
njs_str_t str;
str.start = (u_char *) src;
str.length = size;
return njs_string_decode_utf8(vm, value, &str);
}
| 0
|
317,314
|
static int selinux_msg_msg_alloc_security(struct msg_msg *msg)
{
struct msg_security_struct *msec;
msec = selinux_msg_msg(msg);
msec->sid = SECINITSID_UNLABELED;
return 0;
}
| 0
|
221,424
|
static inline bool nested_npt_enabled(struct vcpu_svm *svm)
{
return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
}
| 0
|
227,015
|
IRC_PROTOCOL_CALLBACK(366)
{
struct t_irc_channel *ptr_channel;
struct t_infolist *infolist;
struct t_config_option *ptr_option;
int num_nicks, num_op, num_halfop, num_voice, num_normal, length, i;
char *string, str_nicks_count[2048], *color;
const char *prefix, *prefix_color, *nickname;
IRC_PROTOCOL_MIN_ARGS(5);
ptr_channel = irc_channel_search (server, argv[3]);
if (ptr_channel && ptr_channel->nicks)
{
/* display users on channel */
if (weechat_hashtable_has_key (ptr_channel->join_msg_received, "353")
|| weechat_hashtable_has_key (irc_config_hashtable_display_join_message, "353"))
{
infolist = weechat_infolist_get ("nicklist", ptr_channel->buffer, NULL);
if (infolist)
{
length = 0;
while (weechat_infolist_next (infolist))
{
if (strcmp (weechat_infolist_string (infolist, "type"),
"nick") == 0)
{
ptr_option = weechat_config_get (weechat_infolist_string (infolist,
"prefix_color"));
length +=
((ptr_option) ? strlen (weechat_color (weechat_config_string (ptr_option))) : 0) +
strlen (weechat_infolist_string (infolist, "prefix")) +
16 + /* nick color */
strlen (weechat_infolist_string (infolist, "name")) +
16 + /* reset color */
1; /* space */
}
}
if (length > 0)
{
string = malloc (length);
if (string)
{
string[0] = '\0';
i = 0;
while (weechat_infolist_next (infolist))
{
if (strcmp (weechat_infolist_string (infolist, "type"),
"nick") == 0)
{
if (i > 0)
{
strcat (string, IRC_COLOR_RESET);
strcat (string, " ");
}
prefix = weechat_infolist_string (infolist, "prefix");
if (prefix[0] && (prefix[0] != ' '))
{
prefix_color = weechat_infolist_string (infolist,
"prefix_color");
if (strchr (prefix_color, '.'))
{
ptr_option = weechat_config_get (weechat_infolist_string (infolist,
"prefix_color"));
if (ptr_option)
strcat (string, weechat_color (weechat_config_string (ptr_option)));
}
else
{
strcat (string, weechat_color (prefix_color));
}
strcat (string, prefix);
}
nickname = weechat_infolist_string (infolist, "name");
if (weechat_config_boolean (irc_config_look_color_nicks_in_names))
{
if (irc_server_strcasecmp (server, nickname, server->nick) == 0)
strcat (string, IRC_COLOR_CHAT_NICK_SELF);
else
{
color = irc_nick_find_color (nickname);
strcat (string, color);
if (color)
free (color);
}
}
else
strcat (string, IRC_COLOR_RESET);
strcat (string, nickname);
i++;
}
}
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "names",
ptr_channel->buffer),
date,
irc_protocol_tags (
command, "irc_numeric", NULL, NULL),
_("%sNicks %s%s%s: %s[%s%s]"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
ptr_channel->name,
IRC_COLOR_RESET,
IRC_COLOR_CHAT_DELIMITERS,
string,
IRC_COLOR_CHAT_DELIMITERS);
free (string);
}
}
weechat_infolist_free (infolist);
}
}
/* display number of nicks, ops, halfops & voices on the channel */
if (weechat_hashtable_has_key (ptr_channel->join_msg_received, "366")
|| weechat_hashtable_has_key (irc_config_hashtable_display_join_message, "366"))
{
irc_nick_count (server, ptr_channel, &num_nicks, &num_op, &num_halfop,
&num_voice, &num_normal);
str_nicks_count[0] = '\0';
if (irc_server_get_prefix_mode_index (server, 'o') >= 0)
{
length = strlen (str_nicks_count);
snprintf (str_nicks_count + length,
sizeof (str_nicks_count) - length,
"%s%s%d%s %s",
(str_nicks_count[0]) ? ", " : "",
IRC_COLOR_CHAT_CHANNEL,
num_op,
IRC_COLOR_RESET,
NG_("op", "ops", num_op));
}
if (irc_server_get_prefix_mode_index (server, 'h') >= 0)
{
length = strlen (str_nicks_count);
snprintf (str_nicks_count + length,
sizeof (str_nicks_count) - length,
"%s%s%d%s %s",
(str_nicks_count[0]) ? ", " : "",
IRC_COLOR_CHAT_CHANNEL,
num_halfop,
IRC_COLOR_RESET,
NG_("halfop", "halfops", num_halfop));
}
if (irc_server_get_prefix_mode_index (server, 'v') >= 0)
{
length = strlen (str_nicks_count);
snprintf (str_nicks_count + length,
sizeof (str_nicks_count) - length,
"%s%s%d%s %s",
(str_nicks_count[0]) ? ", " : "",
IRC_COLOR_CHAT_CHANNEL,
num_voice,
IRC_COLOR_RESET,
NG_("voice", "voices", num_voice));
}
length = strlen (str_nicks_count);
snprintf (str_nicks_count + length,
sizeof (str_nicks_count) - length,
"%s%s%d%s %s",
(str_nicks_count[0]) ? ", " : "",
IRC_COLOR_CHAT_CHANNEL,
num_normal,
IRC_COLOR_RESET,
NG_("normal", "normals", num_normal));
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "names", ptr_channel->buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%sChannel %s%s%s: %s%d%s %s %s(%s%s)"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
ptr_channel->name,
IRC_COLOR_RESET,
IRC_COLOR_CHAT_CHANNEL,
num_nicks,
IRC_COLOR_RESET,
NG_("nick", "nicks", num_nicks),
IRC_COLOR_CHAT_DELIMITERS,
str_nicks_count,
IRC_COLOR_CHAT_DELIMITERS);
}
if (!weechat_hashtable_has_key (ptr_channel->join_msg_received, command))
{
irc_command_mode_server (server, "MODE", ptr_channel, NULL,
IRC_SERVER_SEND_OUTQ_PRIO_LOW);
irc_channel_check_whox (server, ptr_channel);
}
}
else
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "names", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s%s%s: %s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_RESET,
(argv[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]);
}
if (ptr_channel)
{
weechat_hashtable_set (ptr_channel->join_msg_received, "353", "1");
weechat_hashtable_set (ptr_channel->join_msg_received, "366", "1");
}
weechat_bar_item_update ("input_prompt");
return WEECHAT_RC_OK;
}
| 0
|
286,733
|
SWTPM_NVRAM_GetPlainData(unsigned char **plain, uint32_t *plain_length,
const unsigned char *data, uint32_t length,
uint16_t tag_data,
uint8_t hdrversion)
{
TPM_RESULT rc = 0;
tlv_data td[1];
switch (hdrversion) {
case 1:
*plain = malloc(length);
if (*plain) {
memcpy(*plain, data, length);
*plain_length = length;
} else {
logprintf(STDERR_FILENO,
"Could not allocate %u bytes.\n", length);
rc = TPM_FAIL;
}
break;
case 2:
if (!tlv_data_find_tag(data, length, tag_data, &td[0])) {
logprintf(STDERR_FILENO,
"Could not find plain data in byte stream.\n");
rc = TPM_FAIL;
break;
}
*plain = malloc(td->tlv.length);
if (*plain) {
memcpy(*plain, td->u.const_ptr, td->tlv.length);
*plain_length = td->tlv.length;
} else {
logprintf(STDERR_FILENO,
"Could not allocate %u bytes.\n", td->tlv.length);
rc = TPM_FAIL;
}
break;
}
return rc;
}
| 0
|
238,555
|
static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
{
return check_raw_mode_ok(fn) &&
check_arg_pair_ok(fn) &&
check_btf_id_ok(fn) &&
check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
}
| 0
|
439,170
|
static MagickBooleanType IsFITS(const unsigned char *magick,const size_t length)
{
if (length < 6)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"IT0",3) == 0)
return(MagickTrue);
if (LocaleNCompare((const char *) magick,"SIMPLE",6) == 0)
return(MagickTrue);
return(MagickFalse);
}
| 0
|
230,115
|
static int update_credential(struct config_module * config, json_t * j_params, const char * username, const char * credential_id, int status) {
json_t * j_query;
char * username_escaped, * mod_name_escaped, * username_clause;
int res, ret;
username_escaped = h_escape_string_with_quotes(config->conn, username);
mod_name_escaped = h_escape_string_with_quotes(config->conn, json_string_value(json_object_get(j_params, "mod_name")));
username_clause = msprintf(" = (SELECT gswu_id FROM "G_TABLE_WEBAUTHN_USER" WHERE UPPER(gswu_username) = UPPER(%s) AND gswu_mod_name = %s)", username_escaped, mod_name_escaped);
j_query = json_pack("{sss{si}s{sss{ssss}}}",
"table",
G_TABLE_WEBAUTHN_CREDENTIAL,
"set",
"gswc_status",
status,
"where",
"gswc_credential_id",
credential_id,
"gswu_id",
"operator",
"raw",
"value",
username_clause);
o_free(username_clause);
o_free(username_escaped);
o_free(mod_name_escaped);
res = h_update(config->conn, j_query, NULL);
json_decref(j_query);
if (res == H_OK) {
ret = G_OK;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "get_credential - Error executing j_query");
config->glewlwyd_module_callback_metrics_increment_counter(config, GLWD_METRICS_DATABSE_ERROR, 1, NULL);
ret = G_ERROR_DB;
}
return ret;
}
| 0
|
238,325
|
void digest_algo_prints(const char *prefix)
{
struct digest_algo* d;
printf("%s%-15s\t%-20s\t%-15s\n", prefix, "name", "driver", "priority");
printf("%s--------------------------------------------------\n", prefix);
list_for_each_entry(d, &digests, list) {
printf("%s%-15s\t%-20s\t%d\n", prefix, d->base.name,
d->base.driver_name, d->base.priority);
}
}
| 0
|
238,517
|
static int check_map_prog_compatibility(struct bpf_verifier_env *env,
struct bpf_map *map,
struct bpf_prog *prog)
{
enum bpf_prog_type prog_type = resolve_prog_type(prog);
/*
* Validate that trace type programs use preallocated hash maps.
*
* For programs attached to PERF events this is mandatory as the
* perf NMI can hit any arbitrary code sequence.
*
* All other trace types using preallocated hash maps are unsafe as
* well because tracepoint or kprobes can be inside locked regions
* of the memory allocator or at a place where a recursion into the
* memory allocator would see inconsistent state.
*
* On RT enabled kernels run-time allocation of all trace type
* programs is strictly prohibited due to lock type constraints. On
* !RT kernels it is allowed for backwards compatibility reasons for
* now, but warnings are emitted so developers are made aware of
* the unsafety and can fix their programs before this is enforced.
*/
if (is_tracing_prog_type(prog_type) && !is_preallocated_map(map)) {
if (prog_type == BPF_PROG_TYPE_PERF_EVENT) {
verbose(env, "perf_event programs can only use preallocated hash map\n");
return -EINVAL;
}
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
verbose(env, "trace type programs can only use preallocated hash map\n");
return -EINVAL;
}
WARN_ONCE(1, "trace type BPF program uses run-time allocation\n");
verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
}
if (map_value_has_spin_lock(map)) {
if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n");
return -EINVAL;
}
if (is_tracing_prog_type(prog_type)) {
verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
return -EINVAL;
}
if (prog->aux->sleepable) {
verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n");
return -EINVAL;
}
}
if (map_value_has_timer(map)) {
if (is_tracing_prog_type(prog_type)) {
verbose(env, "tracing progs cannot use bpf_timer yet\n");
return -EINVAL;
}
}
if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
!bpf_offload_prog_map_match(prog, map)) {
verbose(env, "offload device mismatch between prog and map\n");
return -EINVAL;
}
if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
verbose(env, "bpf_struct_ops map cannot be used in prog\n");
return -EINVAL;
}
if (prog->aux->sleepable)
switch (map->map_type) {
case BPF_MAP_TYPE_HASH:
case BPF_MAP_TYPE_LRU_HASH:
case BPF_MAP_TYPE_ARRAY:
case BPF_MAP_TYPE_PERCPU_HASH:
case BPF_MAP_TYPE_PERCPU_ARRAY:
case BPF_MAP_TYPE_LRU_PERCPU_HASH:
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_HASH_OF_MAPS:
if (!is_preallocated_map(map)) {
verbose(env,
"Sleepable programs can only use preallocated maps\n");
return -EINVAL;
}
break;
case BPF_MAP_TYPE_RINGBUF:
case BPF_MAP_TYPE_INODE_STORAGE:
case BPF_MAP_TYPE_SK_STORAGE:
case BPF_MAP_TYPE_TASK_STORAGE:
break;
default:
verbose(env,
"Sleepable programs can only use array, hash, and ringbuf maps\n");
return -EINVAL;
}
return 0;
}
| 0
|
487,658
|
static void kernel_restart_prepare(char *cmd)
{
blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
system_state = SYSTEM_RESTART;
device_shutdown();
}
| 0
|
249,975
|
GF_Err DoWrite(MovieWriter *mw, GF_List *writers, GF_BitStream *bs, u8 Emulation, u64 StartOffset)
{
u32 i;
GF_Err e;
TrackWriter *writer;
u64 offset, sampOffset, predOffset;
u32 chunkNumber, descIndex, sampSize;
Bool force;
GF_StscEntry *stsc_ent;
u64 size, mdatSize = 0;
GF_ISOFile *movie = mw->movie;
/*write meta content first - WE DON'T support fragmentation of resources in ISOM atm*/
if (movie->openMode != GF_ISOM_OPEN_WRITE) {
if (movie->meta) {
e = DoWriteMeta(movie, movie->meta, bs, Emulation, StartOffset, &size);
if (e) return e;
mdatSize += size;
StartOffset += size;
}
if (movie->moov && movie->moov->meta) {
e = DoWriteMeta(movie, movie->meta, bs, Emulation, StartOffset, &size);
if (e) return e;
mdatSize += size;
StartOffset += size;
}
i=0;
while ((writer = (TrackWriter*)gf_list_enum(writers, &i))) {
if (writer->mdia->mediaTrack->meta) {
e = DoWriteMeta(movie, movie->meta, bs, Emulation, StartOffset, &size);
if (e) return e;
mdatSize += size;
StartOffset += size;
}
}
}
offset = StartOffset;
predOffset = 0;
i=0;
while ((writer = (TrackWriter*)gf_list_enum(writers, &i))) {
while (!writer->isDone) {
Bool self_contained;
u32 nb_samp=1;
//To Check: are empty sample tables allowed ???
if (writer->sampleNumber > writer->stbl->SampleSize->sampleCount) {
writer->isDone = 1;
continue;
}
e = stbl_GetSampleInfos(writer->stbl, writer->sampleNumber, &sampOffset, &chunkNumber, &descIndex, &stsc_ent);
if (e) return e;
e = stbl_GetSampleSize(writer->stbl->SampleSize, writer->sampleNumber, &sampSize);
if (e) return e;
update_writer_constant_dur(movie, writer, stsc_ent, &nb_samp, &sampSize, GF_TRUE);
//update our chunks.
force = 0;
if (movie->openMode == GF_ISOM_OPEN_WRITE) {
offset = sampOffset;
if (predOffset != offset)
force = 1;
}
if (writer->stbl->MaxChunkSize && (writer->chunkSize + sampSize > writer->stbl->MaxChunkSize)) {
writer->chunkSize = 0;
force = 1;
}
writer->chunkSize += sampSize;
self_contained = ((writer->all_dref_mode==ISOM_DREF_SELF) || Media_IsSelfContained(writer->mdia, descIndex) ) ? GF_TRUE : GF_FALSE;
//update our global offset...
if (self_contained) {
e = stbl_SetChunkAndOffset(writer->stbl, writer->sampleNumber, descIndex, writer->stsc, &writer->stco, offset, force, nb_samp);
if (e) return e;
if (movie->openMode == GF_ISOM_OPEN_WRITE) {
predOffset = sampOffset + sampSize;
} else {
offset += sampSize;
mdatSize += sampSize;
}
} else {
if (predOffset != offset) force = 1;
predOffset = sampOffset + sampSize;
//we have a DataRef, so use the offset idicated in sampleToChunk and ChunkOffset tables...
e = stbl_SetChunkAndOffset(writer->stbl, writer->sampleNumber, descIndex, writer->stsc, &writer->stco, sampOffset, force, nb_samp);
if (e) return e;
}
//we write the sample if not emulation
if (!Emulation) {
if (self_contained) {
e = WriteSample(mw, sampSize, sampOffset, stsc_ent->isEdited, bs, 1);
if (e) return e;
}
}
//ok, the track is done
if (writer->sampleNumber >= writer->stbl->SampleSize->sampleCount) {
writer->isDone = 1;
} else {
writer->sampleNumber += nb_samp;
}
}
}
//set the mdatSize...
movie->mdat->dataSize = mdatSize;
return GF_OK;
}
| 0
|
235,763
|
Status ValidateShapes(OpKernelContext* ctx, const Tensor& hypothesis_indices,
const Tensor& hypothesis_values,
const Tensor& hypothesis_shape,
const Tensor& truth_indices, const Tensor& truth_values,
const Tensor& truth_shape) {
if (!TensorShapeUtils::IsMatrix(hypothesis_indices.shape()))
return errors::InvalidArgument(
"hypothesis_indices should be a matrix, but got shape: ",
hypothesis_indices.shape().DebugString());
if (!TensorShapeUtils::IsMatrix(truth_indices.shape()))
return errors::InvalidArgument(
"truth_indices should be a matrix, but got shape: ",
truth_indices.shape().DebugString());
if (!TensorShapeUtils::IsVector(hypothesis_values.shape()))
return errors::InvalidArgument(
"hypothesis_values should be a vector, but got shape: ",
hypothesis_values.shape().DebugString());
if (!TensorShapeUtils::IsVector(truth_values.shape()))
return errors::InvalidArgument(
"truth_values should be a vector, but got shape: ",
truth_values.shape().DebugString());
if (!TensorShapeUtils::IsVector(hypothesis_shape.shape()))
return errors::InvalidArgument(
"hypothesis_shape should be a vector, but got shape: ",
hypothesis_shape.shape().DebugString());
if (!TensorShapeUtils::IsVector(truth_shape.shape()))
return errors::InvalidArgument(
"truth_shape should be a vector, but got shape: ",
truth_shape.shape().DebugString());
if (hypothesis_values.NumElements() != hypothesis_indices.dim_size(0))
return errors::InvalidArgument(
"Expected hypothesis_values.NumElements == "
"#rows(hypothesis_indices), their shapes are: ",
hypothesis_values.shape().DebugString(), " and ",
hypothesis_indices.shape().DebugString());
if (hypothesis_shape.NumElements() != hypothesis_indices.dim_size(1))
return errors::InvalidArgument(
"Expected hypothesis_shape.NumElements == "
"#cols(hypothesis_indices), their shapes are: ",
hypothesis_shape.shape().DebugString(), " and ",
hypothesis_indices.shape().DebugString());
if (truth_shape.NumElements() < 2)
return errors::InvalidArgument(
"Input SparseTensors must have rank at least 2, but truth_shape "
"rank is: ",
truth_shape.NumElements());
if (truth_values.NumElements() != truth_indices.dim_size(0))
return errors::InvalidArgument(
"Expected truth_values.NumElements == "
"#rows(truth_indices), their shapes are: ",
truth_values.shape().DebugString(), " and ",
truth_indices.shape().DebugString());
if (truth_shape.NumElements() != truth_indices.dim_size(1))
return errors::InvalidArgument(
"Expected truth_shape.NumElements == "
"#cols(truth_indices), their shapes are: ",
truth_shape.shape().DebugString(), " and ",
truth_indices.shape().DebugString());
if (truth_shape.NumElements() != hypothesis_shape.NumElements())
return errors::InvalidArgument(
"Expected truth and hypothesis to have matching ranks, but "
"their shapes are: ",
truth_shape.shape().DebugString(), " and ",
hypothesis_shape.shape().DebugString());
return Status::OK();
}
| 0
|
236,141
|
GF_Err hlit_box_size(GF_Box *s)
{
s->size += 4;
return GF_OK;
}
| 0
|
226,094
|
GF_Box *tfra_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_TrackFragmentRandomAccessBox, GF_ISOM_BOX_TYPE_TFRA);
return (GF_Box *)tmp;
}
| 0
|
252,465
|
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable) return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf) return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
| 0
|
198,439
|
mrb_vm_exec(mrb_state *mrb, const struct RProc *proc, const mrb_code *pc)
{
/* mrb_assert(MRB_PROC_CFUNC_P(proc)) */
const mrb_irep *irep = proc->body.irep;
const mrb_pool_value *pool = irep->pool;
const mrb_sym *syms = irep->syms;
mrb_code insn;
int ai = mrb_gc_arena_save(mrb);
struct mrb_jmpbuf *prev_jmp = mrb->jmp;
struct mrb_jmpbuf c_jmp;
uint32_t a;
uint16_t b;
uint16_t c;
mrb_sym mid;
const struct mrb_irep_catch_handler *ch;
#ifdef DIRECT_THREADED
static const void * const optable[] = {
#define OPCODE(x,_) &&L_OP_ ## x,
#include "mruby/ops.h"
#undef OPCODE
};
#endif
mrb_bool exc_catched = FALSE;
RETRY_TRY_BLOCK:
MRB_TRY(&c_jmp) {
if (exc_catched) {
exc_catched = FALSE;
mrb_gc_arena_restore(mrb, ai);
if (mrb->exc && mrb->exc->tt == MRB_TT_BREAK)
goto L_BREAK;
goto L_RAISE;
}
mrb->jmp = &c_jmp;
mrb_vm_ci_proc_set(mrb->c->ci, proc);
#define regs (mrb->c->ci->stack)
INIT_DISPATCH {
CASE(OP_NOP, Z) {
/* do nothing */
NEXT;
}
CASE(OP_MOVE, BB) {
regs[a] = regs[b];
NEXT;
}
CASE(OP_LOADL, BB) {
switch (pool[b].tt) { /* number */
case IREP_TT_INT32:
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i32);
break;
case IREP_TT_INT64:
#if defined(MRB_INT64)
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
break;
#else
#if defined(MRB_64BIT)
if (INT32_MIN <= pool[b].u.i64 && pool[b].u.i64 <= INT32_MAX) {
regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
break;
}
#endif
goto L_INT_OVERFLOW;
#endif
case IREP_TT_BIGINT:
goto L_INT_OVERFLOW;
#ifndef MRB_NO_FLOAT
case IREP_TT_FLOAT:
regs[a] = mrb_float_value(mrb, pool[b].u.f);
break;
#endif
default:
/* should not happen (tt:string) */
regs[a] = mrb_nil_value();
break;
}
NEXT;
}
CASE(OP_LOADI, BB) {
SET_FIXNUM_VALUE(regs[a], b);
NEXT;
}
CASE(OP_LOADINEG, BB) {
SET_FIXNUM_VALUE(regs[a], -b);
NEXT;
}
CASE(OP_LOADI__1,B) goto L_LOADI;
CASE(OP_LOADI_0,B) goto L_LOADI;
CASE(OP_LOADI_1,B) goto L_LOADI;
CASE(OP_LOADI_2,B) goto L_LOADI;
CASE(OP_LOADI_3,B) goto L_LOADI;
CASE(OP_LOADI_4,B) goto L_LOADI;
CASE(OP_LOADI_5,B) goto L_LOADI;
CASE(OP_LOADI_6,B) goto L_LOADI;
CASE(OP_LOADI_7, B) {
L_LOADI:
SET_FIXNUM_VALUE(regs[a], (mrb_int)insn - (mrb_int)OP_LOADI_0);
NEXT;
}
CASE(OP_LOADI16, BS) {
SET_FIXNUM_VALUE(regs[a], (mrb_int)(int16_t)b);
NEXT;
}
CASE(OP_LOADI32, BSS) {
SET_INT_VALUE(mrb, regs[a], (int32_t)(((uint32_t)b<<16)+c));
NEXT;
}
CASE(OP_LOADSYM, BB) {
SET_SYM_VALUE(regs[a], syms[b]);
NEXT;
}
CASE(OP_LOADNIL, B) {
SET_NIL_VALUE(regs[a]);
NEXT;
}
CASE(OP_LOADSELF, B) {
regs[a] = regs[0];
NEXT;
}
CASE(OP_LOADT, B) {
SET_TRUE_VALUE(regs[a]);
NEXT;
}
CASE(OP_LOADF, B) {
SET_FALSE_VALUE(regs[a]);
NEXT;
}
CASE(OP_GETGV, BB) {
mrb_value val = mrb_gv_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETGV, BB) {
mrb_gv_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETSV, BB) {
mrb_value val = mrb_vm_special_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETSV, BB) {
mrb_vm_special_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETIV, BB) {
regs[a] = mrb_iv_get(mrb, regs[0], syms[b]);
NEXT;
}
CASE(OP_SETIV, BB) {
mrb_iv_set(mrb, regs[0], syms[b], regs[a]);
NEXT;
}
CASE(OP_GETCV, BB) {
mrb_value val;
val = mrb_vm_cv_get(mrb, syms[b]);
regs[a] = val;
NEXT;
}
CASE(OP_SETCV, BB) {
mrb_vm_cv_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETIDX, B) {
mrb_value va = regs[a], vb = regs[a+1];
switch (mrb_type(va)) {
case MRB_TT_ARRAY:
if (!mrb_integer_p(vb)) goto getidx_fallback;
regs[a] = mrb_ary_entry(va, mrb_integer(vb));
break;
case MRB_TT_HASH:
va = mrb_hash_get(mrb, va, vb);
regs[a] = va;
break;
case MRB_TT_STRING:
switch (mrb_type(vb)) {
case MRB_TT_INTEGER:
case MRB_TT_STRING:
case MRB_TT_RANGE:
va = mrb_str_aref(mrb, va, vb, mrb_undef_value());
regs[a] = va;
break;
default:
goto getidx_fallback;
}
break;
default:
getidx_fallback:
mid = MRB_OPSYM(aref);
goto L_SEND_SYM;
}
NEXT;
}
CASE(OP_SETIDX, B) {
c = 2;
mid = MRB_OPSYM(aset);
SET_NIL_VALUE(regs[a+3]);
goto L_SENDB_SYM;
}
CASE(OP_GETCONST, BB) {
mrb_value v = mrb_vm_const_get(mrb, syms[b]);
regs[a] = v;
NEXT;
}
CASE(OP_SETCONST, BB) {
mrb_vm_const_set(mrb, syms[b], regs[a]);
NEXT;
}
CASE(OP_GETMCNST, BB) {
mrb_value v = mrb_const_get(mrb, regs[a], syms[b]);
regs[a] = v;
NEXT;
}
CASE(OP_SETMCNST, BB) {
mrb_const_set(mrb, regs[a+1], syms[b], regs[a]);
NEXT;
}
CASE(OP_GETUPVAR, BBB) {
mrb_value *regs_a = regs + a;
struct REnv *e = uvenv(mrb, c);
if (e && b < MRB_ENV_LEN(e)) {
*regs_a = e->stack[b];
}
else {
*regs_a = mrb_nil_value();
}
NEXT;
}
CASE(OP_SETUPVAR, BBB) {
struct REnv *e = uvenv(mrb, c);
if (e) {
mrb_value *regs_a = regs + a;
if (b < MRB_ENV_LEN(e)) {
e->stack[b] = *regs_a;
mrb_write_barrier(mrb, (struct RBasic*)e);
}
}
NEXT;
}
CASE(OP_JMP, S) {
pc += (int16_t)a;
JUMP;
}
CASE(OP_JMPIF, BS) {
if (mrb_test(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPNOT, BS) {
if (!mrb_test(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPNIL, BS) {
if (mrb_nil_p(regs[a])) {
pc += (int16_t)b;
JUMP;
}
NEXT;
}
CASE(OP_JMPUW, S) {
a = (uint32_t)((pc - irep->iseq) + (int16_t)a);
CHECKPOINT_RESTORE(RBREAK_TAG_JUMP) {
struct RBreak *brk = (struct RBreak*)mrb->exc;
mrb_value target = mrb_break_value_get(brk);
mrb_assert(mrb_integer_p(target));
a = (uint32_t)mrb_integer(target);
mrb_assert(a >= 0 && a < irep->ilen);
}
CHECKPOINT_MAIN(RBREAK_TAG_JUMP) {
ch = catch_handler_find(mrb, mrb->c->ci, pc, MRB_CATCH_FILTER_ENSURE);
if (ch) {
/* avoiding a jump from a catch handler into the same handler */
if (a < mrb_irep_catch_handler_unpack(ch->begin) || a >= mrb_irep_catch_handler_unpack(ch->end)) {
THROW_TAGGED_BREAK(mrb, RBREAK_TAG_JUMP, proc, mrb_fixnum_value(a));
}
}
}
CHECKPOINT_END(RBREAK_TAG_JUMP);
mrb->exc = NULL; /* clear break object */
pc = irep->iseq + a;
JUMP;
}
CASE(OP_EXCEPT, B) {
mrb_value exc;
if (mrb->exc == NULL) {
exc = mrb_nil_value();
}
else {
switch (mrb->exc->tt) {
case MRB_TT_BREAK:
case MRB_TT_EXCEPTION:
exc = mrb_obj_value(mrb->exc);
break;
default:
mrb_assert(!"bad mrb_type");
exc = mrb_nil_value();
break;
}
mrb->exc = NULL;
}
regs[a] = exc;
NEXT;
}
CASE(OP_RESCUE, BB) {
mrb_value exc = regs[a]; /* exc on stack */
mrb_value e = regs[b];
struct RClass *ec;
switch (mrb_type(e)) {
case MRB_TT_CLASS:
case MRB_TT_MODULE:
break;
default:
{
mrb_value exc;
exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR,
"class or module required for rescue clause");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
}
ec = mrb_class_ptr(e);
regs[b] = mrb_bool_value(mrb_obj_is_kind_of(mrb, exc, ec));
NEXT;
}
CASE(OP_RAISEIF, B) {
mrb_value exc = regs[a];
if (mrb_break_p(exc)) {
mrb->exc = mrb_obj_ptr(exc);
goto L_BREAK;
}
mrb_exc_set(mrb, exc);
if (mrb->exc) {
goto L_RAISE;
}
NEXT;
}
CASE(OP_SSEND, BBB) {
regs[a] = regs[0];
insn = OP_SEND;
}
goto L_SENDB;
CASE(OP_SSENDB, BBB) {
regs[a] = regs[0];
}
goto L_SENDB;
CASE(OP_SEND, BBB)
goto L_SENDB;
L_SEND_SYM:
c = 1;
/* push nil after arguments */
SET_NIL_VALUE(regs[a+2]);
goto L_SENDB_SYM;
CASE(OP_SENDB, BBB)
L_SENDB:
mid = syms[b];
L_SENDB_SYM:
{
mrb_callinfo *ci = mrb->c->ci;
mrb_method_t m;
struct RClass *cls;
mrb_value recv, blk;
ARGUMENT_NORMALIZE(a, &c, insn);
recv = regs[a];
cls = mrb_class(mrb, recv);
m = mrb_method_search_vm(mrb, &cls, mid);
if (MRB_METHOD_UNDEF_P(m)) {
m = prepare_missing(mrb, recv, mid, &cls, a, &c, blk, 0);
mid = MRB_SYM(method_missing);
}
/* push callinfo */
ci = cipush(mrb, a, 0, cls, NULL, mid, c);
if (MRB_METHOD_CFUNC_P(m)) {
if (MRB_METHOD_PROC_P(m)) {
struct RProc *p = MRB_METHOD_PROC(m);
mrb_vm_ci_proc_set(ci, p);
recv = p->body.func(mrb, recv);
}
else {
if (MRB_METHOD_NOARG_P(m)) {
check_method_noarg(mrb, ci);
}
recv = MRB_METHOD_FUNC(m)(mrb, recv);
}
mrb_gc_arena_shrink(mrb, ai);
if (mrb->exc) goto L_RAISE;
ci = mrb->c->ci;
if (mrb_proc_p(blk)) {
struct RProc *p = mrb_proc_ptr(blk);
if (p && !MRB_PROC_STRICT_P(p) && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) {
p->flags |= MRB_PROC_ORPHAN;
}
}
if (!ci->u.target_class) { /* return from context modifying method (resume/yield) */
if (ci->cci == CINFO_RESUMED) {
mrb->jmp = prev_jmp;
return recv;
}
else {
mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc));
proc = ci[-1].proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
}
}
ci->stack[0] = recv;
/* pop stackpos */
ci = cipop(mrb);
pc = ci->pc;
}
else {
/* setup environment for calling method */
mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m)));
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs);
pc = irep->iseq;
}
}
JUMP;
CASE(OP_CALL, Z) {
mrb_callinfo *ci = mrb->c->ci;
mrb_value recv = ci->stack[0];
struct RProc *m = mrb_proc_ptr(recv);
/* replace callinfo */
ci->u.target_class = MRB_PROC_TARGET_CLASS(m);
mrb_vm_ci_proc_set(ci, m);
if (MRB_PROC_ENV_P(m)) {
ci->mid = MRB_PROC_ENV(m)->mid;
}
/* prepare stack */
if (MRB_PROC_CFUNC_P(m)) {
recv = MRB_PROC_CFUNC(m)(mrb, recv);
mrb_gc_arena_shrink(mrb, ai);
if (mrb->exc) goto L_RAISE;
/* pop stackpos */
ci = cipop(mrb);
pc = ci->pc;
ci[1].stack[0] = recv;
irep = mrb->c->ci->proc->body.irep;
}
else {
/* setup environment for calling method */
proc = m;
irep = m->body.irep;
if (!irep) {
mrb->c->ci->stack[0] = mrb_nil_value();
a = 0;
c = OP_R_NORMAL;
goto L_OP_RETURN_BODY;
}
mrb_int nargs = mrb_ci_bidx(ci)+1;
if (nargs < irep->nregs) {
mrb_stack_extend(mrb, irep->nregs);
stack_clear(regs+nargs, irep->nregs-nargs);
}
if (MRB_PROC_ENV_P(m)) {
regs[0] = MRB_PROC_ENV(m)->stack[0];
}
pc = irep->iseq;
}
pool = irep->pool;
syms = irep->syms;
JUMP;
}
CASE(OP_SUPER, BB) {
mrb_method_t m;
struct RClass *cls;
mrb_callinfo *ci = mrb->c->ci;
mrb_value recv, blk;
const struct RProc *p = ci->proc;
mrb_sym mid = ci->mid;
struct RClass* target_class = MRB_PROC_TARGET_CLASS(p);
if (MRB_PROC_ENV_P(p) && p->e.env->mid && p->e.env->mid != mid) { /* alias support */
mid = p->e.env->mid; /* restore old mid */
}
if (mid == 0 || !target_class) {
mrb_value exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (target_class->flags & MRB_FL_CLASS_IS_PREPENDED) {
target_class = mrb_vm_ci_target_class(ci);
}
else if (target_class->tt == MRB_TT_MODULE) {
target_class = mrb_vm_ci_target_class(ci);
if (!target_class || target_class->tt != MRB_TT_ICLASS) {
goto super_typeerror;
}
}
recv = regs[0];
if (!mrb_obj_is_kind_of(mrb, recv, target_class)) {
super_typeerror: ;
mrb_value exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR,
"self has wrong type to call super in this context");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
ARGUMENT_NORMALIZE(a, &b, OP_SUPER);
cls = target_class->super;
m = mrb_method_search_vm(mrb, &cls, mid);
if (MRB_METHOD_UNDEF_P(m)) {
m = prepare_missing(mrb, recv, mid, &cls, a, &b, blk, 1);
mid = MRB_SYM(method_missing);
}
/* push callinfo */
ci = cipush(mrb, a, 0, cls, NULL, mid, b);
/* prepare stack */
ci->stack[0] = recv;
if (MRB_METHOD_CFUNC_P(m)) {
mrb_value v;
if (MRB_METHOD_PROC_P(m)) {
mrb_vm_ci_proc_set(ci, MRB_METHOD_PROC(m));
}
v = MRB_METHOD_CFUNC(m)(mrb, recv);
mrb_gc_arena_restore(mrb, ai);
if (mrb->exc) goto L_RAISE;
ci = mrb->c->ci;
mrb_assert(!mrb_break_p(v));
if (!mrb_vm_ci_target_class(ci)) { /* return from context modifying method (resume/yield) */
if (ci->cci == CINFO_RESUMED) {
mrb->jmp = prev_jmp;
return v;
}
else {
mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc));
proc = ci[-1].proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
}
}
mrb->c->ci->stack[0] = v;
ci = cipop(mrb);
pc = ci->pc;
}
else {
/* setup environment for calling method */
mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m)));
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs);
pc = irep->iseq;
}
JUMP;
}
CASE(OP_ARGARY, BS) {
mrb_int m1 = (b>>11)&0x3f;
mrb_int r = (b>>10)&0x1;
mrb_int m2 = (b>>5)&0x1f;
mrb_int kd = (b>>4)&0x1;
mrb_int lv = (b>>0)&0xf;
mrb_value *stack;
if (mrb->c->ci->mid == 0 || mrb_vm_ci_target_class(mrb->c->ci) == NULL) {
mrb_value exc;
L_NOSUPER:
exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (lv == 0) stack = regs + 1;
else {
struct REnv *e = uvenv(mrb, lv-1);
if (!e) goto L_NOSUPER;
if (MRB_ENV_LEN(e) <= m1+r+m2+1)
goto L_NOSUPER;
stack = e->stack + 1;
}
if (r == 0) {
regs[a] = mrb_ary_new_from_values(mrb, m1+m2, stack);
}
else {
mrb_value *pp = NULL;
struct RArray *rest;
mrb_int len = 0;
if (mrb_array_p(stack[m1])) {
struct RArray *ary = mrb_ary_ptr(stack[m1]);
pp = ARY_PTR(ary);
len = ARY_LEN(ary);
}
regs[a] = mrb_ary_new_capa(mrb, m1+len+m2);
rest = mrb_ary_ptr(regs[a]);
if (m1 > 0) {
stack_copy(ARY_PTR(rest), stack, m1);
}
if (len > 0) {
stack_copy(ARY_PTR(rest)+m1, pp, len);
}
if (m2 > 0) {
stack_copy(ARY_PTR(rest)+m1+len, stack+m1+1, m2);
}
ARY_SET_LEN(rest, m1+len+m2);
}
if (kd) {
regs[a+1] = stack[m1+r+m2];
regs[a+2] = stack[m1+r+m2+1];
}
else {
regs[a+1] = stack[m1+r+m2];
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ENTER, W) {
mrb_int m1 = MRB_ASPEC_REQ(a);
mrb_int o = MRB_ASPEC_OPT(a);
mrb_int r = MRB_ASPEC_REST(a);
mrb_int m2 = MRB_ASPEC_POST(a);
mrb_int kd = (MRB_ASPEC_KEY(a) > 0 || MRB_ASPEC_KDICT(a))? 1 : 0;
/* unused
int b = MRB_ASPEC_BLOCK(a);
*/
mrb_int const len = m1 + o + r + m2;
mrb_callinfo *ci = mrb->c->ci;
mrb_int argc = ci->n;
mrb_value *argv = regs+1;
mrb_value * const argv0 = argv;
mrb_int const kw_pos = len + kd; /* where kwhash should be */
mrb_int const blk_pos = kw_pos + 1; /* where block should be */
mrb_value blk = regs[mrb_ci_bidx(ci)];
mrb_value kdict = mrb_nil_value();
/* keyword arguments */
if (ci->nk > 0) {
mrb_int kidx = mrb_ci_kidx(ci);
kdict = regs[kidx];
if (!mrb_hash_p(kdict) || mrb_hash_size(mrb, kdict) == 0) {
kdict = mrb_nil_value();
ci->nk = 0;
}
}
if (!kd && !mrb_nil_p(kdict)) {
if (argc < 14) {
ci->n++;
argc++; /* include kdict in normal arguments */
}
else if (argc == 14) {
/* pack arguments and kdict */
regs[1] = mrb_ary_new_from_values(mrb, argc+1, ®s[1]);
argc = ci->n = 15;
}
else {/* argc == 15 */
/* push kdict to packed arguments */
mrb_ary_push(mrb, regs[1], regs[2]);
}
ci->nk = 0;
}
if (kd && MRB_ASPEC_KEY(a) > 0 && mrb_hash_p(kdict)) {
kdict = mrb_hash_dup(mrb, kdict);
}
/* arguments is passed with Array */
if (argc == 15) {
struct RArray *ary = mrb_ary_ptr(regs[1]);
argv = ARY_PTR(ary);
argc = (int)ARY_LEN(ary);
mrb_gc_protect(mrb, regs[1]);
}
/* strict argument check */
if (ci->proc && MRB_PROC_STRICT_P(ci->proc)) {
if (argc < m1 + m2 || (r == 0 && argc > len)) {
argnum_error(mrb, m1+m2);
goto L_RAISE;
}
}
/* extract first argument array to arguments */
else if (len > 1 && argc == 1 && mrb_array_p(argv[0])) {
mrb_gc_protect(mrb, argv[0]);
argc = (int)RARRAY_LEN(argv[0]);
argv = RARRAY_PTR(argv[0]);
}
/* rest arguments */
mrb_value rest = mrb_nil_value();
if (argc < len) {
mrb_int mlen = m2;
if (argc < m1+m2) {
mlen = m1 < argc ? argc - m1 : 0;
}
/* copy mandatory and optional arguments */
if (argv0 != argv && argv) {
value_move(®s[1], argv, argc-mlen); /* m1 + o */
}
if (argc < m1) {
stack_clear(®s[argc+1], m1-argc);
}
/* copy post mandatory arguments */
if (mlen) {
value_move(®s[len-m2+1], &argv[argc-mlen], mlen);
}
if (mlen < m2) {
stack_clear(®s[len-m2+mlen+1], m2-mlen);
}
/* initialize rest arguments with empty Array */
if (r) {
rest = mrb_ary_new_capa(mrb, 0);
regs[m1+o+1] = rest;
}
/* skip initializer of passed arguments */
if (o > 0 && argc > m1+m2)
pc += (argc - m1 - m2)*3;
}
else {
mrb_int rnum = 0;
if (argv0 != argv) {
value_move(®s[1], argv, m1+o);
}
if (r) {
rnum = argc-m1-o-m2;
rest = mrb_ary_new_from_values(mrb, rnum, argv+m1+o);
regs[m1+o+1] = rest;
}
if (m2 > 0 && argc-m2 > m1) {
value_move(®s[m1+o+r+1], &argv[m1+o+rnum], m2);
}
pc += o*3;
}
/* need to be update blk first to protect blk from GC */
regs[blk_pos] = blk; /* move block */
if (kd) {
if (mrb_nil_p(kdict))
kdict = mrb_hash_new_capa(mrb, 0);
regs[kw_pos] = kdict; /* set kwhash */
}
/* format arguments for generated code */
mrb->c->ci->n = len;
/* clear local (but non-argument) variables */
if (irep->nlocals-blk_pos-1 > 0) {
stack_clear(®s[blk_pos+1], irep->nlocals-blk_pos-1);
}
JUMP;
}
CASE(OP_KARG, BB) {
mrb_value k = mrb_symbol_value(syms[b]);
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict, v;
if (kidx < 0 || !mrb_hash_p(kdict=regs[kidx]) || !mrb_hash_key_p(mrb, kdict, k)) {
mrb_value str = mrb_format(mrb, "missing keyword: %v", k);
mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str));
goto L_RAISE;
}
v = mrb_hash_get(mrb, kdict, k);
regs[a] = v;
mrb_hash_delete_key(mrb, kdict, k);
NEXT;
}
CASE(OP_KEY_P, BB) {
mrb_value k = mrb_symbol_value(syms[b]);
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict;
mrb_bool key_p = FALSE;
if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx])) {
key_p = mrb_hash_key_p(mrb, kdict, k);
}
regs[a] = mrb_bool_value(key_p);
NEXT;
}
CASE(OP_KEYEND, Z) {
mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
mrb_value kdict;
if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx]) && !mrb_hash_empty_p(mrb, kdict)) {
mrb_value keys = mrb_hash_keys(mrb, kdict);
mrb_value key1 = RARRAY_PTR(keys)[0];
mrb_value str = mrb_format(mrb, "unknown keyword: %v", key1);
mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str));
goto L_RAISE;
}
NEXT;
}
CASE(OP_BREAK, B) {
c = OP_R_BREAK;
goto L_RETURN;
}
CASE(OP_RETURN_BLK, B) {
c = OP_R_RETURN;
goto L_RETURN;
}
CASE(OP_RETURN, B)
c = OP_R_NORMAL;
L_RETURN:
{
mrb_callinfo *ci;
ci = mrb->c->ci;
if (ci->mid) {
mrb_value blk = regs[mrb_ci_bidx(ci)];
if (mrb_proc_p(blk)) {
struct RProc *p = mrb_proc_ptr(blk);
if (!MRB_PROC_STRICT_P(p) &&
ci > mrb->c->cibase && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) {
p->flags |= MRB_PROC_ORPHAN;
}
}
}
if (mrb->exc) {
L_RAISE:
ci = mrb->c->ci;
if (ci == mrb->c->cibase) {
ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL);
if (ch == NULL) goto L_FTOP;
goto L_CATCH;
}
while ((ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL)) == NULL) {
ci = cipop(mrb);
if (ci[1].cci == CINFO_SKIP && prev_jmp) {
mrb->jmp = prev_jmp;
MRB_THROW(prev_jmp);
}
pc = ci[0].pc;
if (ci == mrb->c->cibase) {
ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL);
if (ch == NULL) {
L_FTOP: /* fiber top */
if (mrb->c == mrb->root_c) {
mrb->c->ci->stack = mrb->c->stbase;
goto L_STOP;
}
else {
struct mrb_context *c = mrb->c;
c->status = MRB_FIBER_TERMINATED;
mrb->c = c->prev;
c->prev = NULL;
goto L_RAISE;
}
}
break;
}
}
L_CATCH:
if (ch == NULL) goto L_STOP;
if (FALSE) {
L_CATCH_TAGGED_BREAK: /* from THROW_TAGGED_BREAK() or UNWIND_ENSURE() */
ci = mrb->c->ci;
}
proc = ci->proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, irep->nregs);
pc = irep->iseq + mrb_irep_catch_handler_unpack(ch->target);
}
else {
mrb_int acc;
mrb_value v;
ci = mrb->c->ci;
v = regs[a];
mrb_gc_protect(mrb, v);
switch (c) {
case OP_R_RETURN:
/* Fall through to OP_R_NORMAL otherwise */
if (ci->cci == CINFO_NONE && MRB_PROC_ENV_P(proc) && !MRB_PROC_STRICT_P(proc)) {
const struct RProc *dst;
mrb_callinfo *cibase;
cibase = mrb->c->cibase;
dst = top_proc(mrb, proc);
if (MRB_PROC_ENV_P(dst)) {
struct REnv *e = MRB_PROC_ENV(dst);
if (!MRB_ENV_ONSTACK_P(e) || (e->cxt && e->cxt != mrb->c)) {
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
}
/* check jump destination */
while (cibase <= ci && ci->proc != dst) {
if (ci->cci > CINFO_NONE) { /* jump cross C boundary */
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
ci--;
}
if (ci <= cibase) { /* no jump destination */
localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
goto L_RAISE;
}
ci = mrb->c->ci;
while (cibase <= ci && ci->proc != dst) {
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_BLOCK) {
cibase = mrb->c->cibase;
dst = top_proc(mrb, proc);
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN_BLOCK) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_BLOCK, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN_BLOCK);
ci = cipop(mrb);
pc = ci->pc;
}
proc = ci->proc;
mrb->exc = NULL; /* clear break object */
break;
}
/* fallthrough */
case OP_R_NORMAL:
NORMAL_RETURN:
if (ci == mrb->c->cibase) {
struct mrb_context *c;
c = mrb->c;
if (!c->prev) { /* toplevel return */
regs[irep->nlocals] = v;
goto CHECKPOINT_LABEL_MAKE(RBREAK_TAG_STOP);
}
if (!c->vmexec && c->prev->ci == c->prev->cibase) {
mrb_value exc = mrb_exc_new_lit(mrb, E_FIBER_ERROR, "double resume");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_TOPLEVEL) {
c = mrb->c;
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN_TOPLEVEL) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_TOPLEVEL, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN_TOPLEVEL);
/* automatic yield at the end */
c->status = MRB_FIBER_TERMINATED;
mrb->c = c->prev;
mrb->c->status = MRB_FIBER_RUNNING;
c->prev = NULL;
if (c->vmexec) {
mrb_gc_arena_restore(mrb, ai);
c->vmexec = FALSE;
mrb->jmp = prev_jmp;
return v;
}
ci = mrb->c->ci;
}
CHECKPOINT_RESTORE(RBREAK_TAG_RETURN) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_RETURN) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_RETURN);
mrb->exc = NULL; /* clear break object */
break;
case OP_R_BREAK:
if (MRB_PROC_STRICT_P(proc)) goto NORMAL_RETURN;
if (MRB_PROC_ORPHAN_P(proc)) {
mrb_value exc;
L_BREAK_ERROR:
exc = mrb_exc_new_lit(mrb, E_LOCALJUMP_ERROR,
"break from proc-closure");
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
if (!MRB_PROC_ENV_P(proc) || !MRB_ENV_ONSTACK_P(MRB_PROC_ENV(proc))) {
goto L_BREAK_ERROR;
}
else {
struct REnv *e = MRB_PROC_ENV(proc);
if (e->cxt != mrb->c) {
goto L_BREAK_ERROR;
}
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK);
/* break from fiber block */
if (ci == mrb->c->cibase && ci->pc) {
struct mrb_context *c = mrb->c;
mrb->c = c->prev;
c->prev = NULL;
ci = mrb->c->ci;
}
if (ci->cci > CINFO_NONE) {
ci = cipop(mrb);
mrb_gc_arena_restore(mrb, ai);
mrb->c->vmexec = FALSE;
mrb->exc = (struct RObject*)break_new(mrb, RBREAK_TAG_BREAK, proc, v);
mrb->jmp = prev_jmp;
MRB_THROW(prev_jmp);
}
if (FALSE) {
struct RBreak *brk;
L_BREAK:
brk = (struct RBreak*)mrb->exc;
proc = mrb_break_proc_get(brk);
v = mrb_break_value_get(brk);
ci = mrb->c->ci;
switch (mrb_break_tag_get(brk)) {
#define DISPATCH_CHECKPOINTS(n, i) case n: goto CHECKPOINT_LABEL_MAKE(n);
RBREAK_TAG_FOREACH(DISPATCH_CHECKPOINTS)
#undef DISPATCH_CHECKPOINTS
default:
mrb_assert(!"wrong break tag");
}
}
while (mrb->c->cibase < ci && ci[-1].proc != proc->upper) {
if (ci[-1].cci == CINFO_SKIP) {
goto L_BREAK_ERROR;
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_UPPER) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK_UPPER) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_UPPER, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK_UPPER);
ci = cipop(mrb);
pc = ci->pc;
}
CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_INTARGET) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_BREAK_INTARGET) {
UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_INTARGET, proc, v);
}
CHECKPOINT_END(RBREAK_TAG_BREAK_INTARGET);
if (ci == mrb->c->cibase) {
goto L_BREAK_ERROR;
}
mrb->exc = NULL; /* clear break object */
break;
default:
/* cannot happen */
break;
}
mrb_assert(ci == mrb->c->ci);
mrb_assert(mrb->exc == NULL);
if (mrb->c->vmexec && !mrb_vm_ci_target_class(ci)) {
mrb_gc_arena_restore(mrb, ai);
mrb->c->vmexec = FALSE;
mrb->jmp = prev_jmp;
return v;
}
acc = ci->cci;
ci = cipop(mrb);
if (acc == CINFO_SKIP || acc == CINFO_DIRECT) {
mrb_gc_arena_restore(mrb, ai);
mrb->jmp = prev_jmp;
return v;
}
pc = ci->pc;
DEBUG(fprintf(stderr, "from :%s\n", mrb_sym_name(mrb, ci->mid)));
proc = ci->proc;
irep = proc->body.irep;
pool = irep->pool;
syms = irep->syms;
ci[1].stack[0] = v;
mrb_gc_arena_restore(mrb, ai);
}
JUMP;
}
CASE(OP_BLKPUSH, BS) {
int m1 = (b>>11)&0x3f;
int r = (b>>10)&0x1;
int m2 = (b>>5)&0x1f;
int kd = (b>>4)&0x1;
int lv = (b>>0)&0xf;
mrb_value *stack;
if (lv == 0) stack = regs + 1;
else {
struct REnv *e = uvenv(mrb, lv-1);
if (!e || (!MRB_ENV_ONSTACK_P(e) && e->mid == 0) ||
MRB_ENV_LEN(e) <= m1+r+m2+1) {
localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
goto L_RAISE;
}
stack = e->stack + 1;
}
if (mrb_nil_p(stack[m1+r+m2+kd])) {
localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
goto L_RAISE;
}
regs[a] = stack[m1+r+m2+kd];
NEXT;
}
L_INT_OVERFLOW:
{
mrb_value exc = mrb_exc_new_lit(mrb, E_RANGE_ERROR, "integer overflow");
mrb_exc_set(mrb, exc);
}
goto L_RAISE;
#define TYPES2(a,b) ((((uint16_t)(a))<<8)|(((uint16_t)(b))&0xff))
#define OP_MATH(op_name) \
/* need to check if op is overridden */ \
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { \
OP_MATH_CASE_INTEGER(op_name); \
OP_MATH_CASE_FLOAT(op_name, integer, float); \
OP_MATH_CASE_FLOAT(op_name, float, integer); \
OP_MATH_CASE_FLOAT(op_name, float, float); \
OP_MATH_CASE_STRING_##op_name(); \
default: \
mid = MRB_OPSYM(op_name); \
goto L_SEND_SYM; \
} \
NEXT;
#define OP_MATH_CASE_INTEGER(op_name) \
case TYPES2(MRB_TT_INTEGER, MRB_TT_INTEGER): \
{ \
mrb_int x = mrb_integer(regs[a]), y = mrb_integer(regs[a+1]), z; \
if (mrb_int_##op_name##_overflow(x, y, &z)) \
OP_MATH_OVERFLOW_INT(); \
else \
SET_INT_VALUE(mrb,regs[a], z); \
} \
break
#ifdef MRB_NO_FLOAT
#define OP_MATH_CASE_FLOAT(op_name, t1, t2) (void)0
#else
#define OP_MATH_CASE_FLOAT(op_name, t1, t2) \
case TYPES2(OP_MATH_TT_##t1, OP_MATH_TT_##t2): \
{ \
mrb_float z = mrb_##t1(regs[a]) OP_MATH_OP_##op_name mrb_##t2(regs[a+1]); \
SET_FLOAT_VALUE(mrb, regs[a], z); \
} \
break
#endif
#define OP_MATH_OVERFLOW_INT() goto L_INT_OVERFLOW
#define OP_MATH_CASE_STRING_add() \
case TYPES2(MRB_TT_STRING, MRB_TT_STRING): \
regs[a] = mrb_str_plus(mrb, regs[a], regs[a+1]); \
mrb_gc_arena_restore(mrb, ai); \
break
#define OP_MATH_CASE_STRING_sub() (void)0
#define OP_MATH_CASE_STRING_mul() (void)0
#define OP_MATH_OP_add +
#define OP_MATH_OP_sub -
#define OP_MATH_OP_mul *
#define OP_MATH_TT_integer MRB_TT_INTEGER
#define OP_MATH_TT_float MRB_TT_FLOAT
CASE(OP_ADD, B) {
OP_MATH(add);
}
CASE(OP_SUB, B) {
OP_MATH(sub);
}
CASE(OP_MUL, B) {
OP_MATH(mul);
}
CASE(OP_DIV, B) {
#ifndef MRB_NO_FLOAT
mrb_float x, y, f;
#endif
/* need to check if op is overridden */
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):
{
mrb_int x = mrb_integer(regs[a]);
mrb_int y = mrb_integer(regs[a+1]);
mrb_int div = mrb_div_int(mrb, x, y);
SET_INT_VALUE(mrb, regs[a], div);
}
NEXT;
#ifndef MRB_NO_FLOAT
case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):
x = (mrb_float)mrb_integer(regs[a]);
y = mrb_float(regs[a+1]);
break;
case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):
x = mrb_float(regs[a]);
y = (mrb_float)mrb_integer(regs[a+1]);
break;
case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):
x = mrb_float(regs[a]);
y = mrb_float(regs[a+1]);
break;
#endif
default:
mid = MRB_OPSYM(div);
goto L_SEND_SYM;
}
#ifndef MRB_NO_FLOAT
f = mrb_div_float(x, y);
SET_FLOAT_VALUE(mrb, regs[a], f);
#endif
NEXT;
}
#define OP_MATHI(op_name) \
/* need to check if op is overridden */ \
switch (mrb_type(regs[a])) { \
OP_MATHI_CASE_INTEGER(op_name); \
OP_MATHI_CASE_FLOAT(op_name); \
default: \
SET_INT_VALUE(mrb,regs[a+1], b); \
mid = MRB_OPSYM(op_name); \
goto L_SEND_SYM; \
} \
NEXT;
#define OP_MATHI_CASE_INTEGER(op_name) \
case MRB_TT_INTEGER: \
{ \
mrb_int x = mrb_integer(regs[a]), y = (mrb_int)b, z; \
if (mrb_int_##op_name##_overflow(x, y, &z)) \
OP_MATH_OVERFLOW_INT(); \
else \
SET_INT_VALUE(mrb,regs[a], z); \
} \
break
#ifdef MRB_NO_FLOAT
#define OP_MATHI_CASE_FLOAT(op_name) (void)0
#else
#define OP_MATHI_CASE_FLOAT(op_name) \
case MRB_TT_FLOAT: \
{ \
mrb_float z = mrb_float(regs[a]) OP_MATH_OP_##op_name b; \
SET_FLOAT_VALUE(mrb, regs[a], z); \
} \
break
#endif
CASE(OP_ADDI, BB) {
OP_MATHI(add);
}
CASE(OP_SUBI, BB) {
OP_MATHI(sub);
}
#define OP_CMP_BODY(op,v1,v2) (v1(regs[a]) op v2(regs[a+1]))
#ifdef MRB_NO_FLOAT
#define OP_CMP(op,sym) do {\
int result;\
/* need to check if - is overridden */\
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
break;\
default:\
mid = MRB_OPSYM(sym);\
goto L_SEND_SYM;\
}\
if (result) {\
SET_TRUE_VALUE(regs[a]);\
}\
else {\
SET_FALSE_VALUE(regs[a]);\
}\
} while(0)
#else
#define OP_CMP(op, sym) do {\
int result;\
/* need to check if - is overridden */\
switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
break;\
case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):\
result = OP_CMP_BODY(op,mrb_fixnum,mrb_float);\
break;\
case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):\
result = OP_CMP_BODY(op,mrb_float,mrb_fixnum);\
break;\
case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):\
result = OP_CMP_BODY(op,mrb_float,mrb_float);\
break;\
default:\
mid = MRB_OPSYM(sym);\
goto L_SEND_SYM;\
}\
if (result) {\
SET_TRUE_VALUE(regs[a]);\
}\
else {\
SET_FALSE_VALUE(regs[a]);\
}\
} while(0)
#endif
CASE(OP_EQ, B) {
if (mrb_obj_eq(mrb, regs[a], regs[a+1])) {
SET_TRUE_VALUE(regs[a]);
}
else {
OP_CMP(==,eq);
}
NEXT;
}
CASE(OP_LT, B) {
OP_CMP(<,lt);
NEXT;
}
CASE(OP_LE, B) {
OP_CMP(<=,le);
NEXT;
}
CASE(OP_GT, B) {
OP_CMP(>,gt);
NEXT;
}
CASE(OP_GE, B) {
OP_CMP(>=,ge);
NEXT;
}
CASE(OP_ARRAY, BB) {
regs[a] = mrb_ary_new_from_values(mrb, b, ®s[a]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARRAY2, BBB) {
regs[a] = mrb_ary_new_from_values(mrb, c, ®s[b]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARYCAT, B) {
mrb_value splat = mrb_ary_splat(mrb, regs[a+1]);
if (mrb_nil_p(regs[a])) {
regs[a] = splat;
}
else {
mrb_assert(mrb_array_p(regs[a]));
mrb_ary_concat(mrb, regs[a], splat);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_ARYPUSH, BB) {
mrb_assert(mrb_array_p(regs[a]));
for (mrb_int i=0; i<b; i++) {
mrb_ary_push(mrb, regs[a], regs[a+i+1]);
}
NEXT;
}
CASE(OP_ARYDUP, B) {
mrb_value ary = regs[a];
if (mrb_array_p(ary)) {
ary = mrb_ary_new_from_values(mrb, RARRAY_LEN(ary), RARRAY_PTR(ary));
}
else {
ary = mrb_ary_new_from_values(mrb, 1, &ary);
}
regs[a] = ary;
NEXT;
}
CASE(OP_AREF, BBB) {
mrb_value v = regs[b];
if (!mrb_array_p(v)) {
if (c == 0) {
regs[a] = v;
}
else {
SET_NIL_VALUE(regs[a]);
}
}
else {
v = mrb_ary_ref(mrb, v, c);
regs[a] = v;
}
NEXT;
}
CASE(OP_ASET, BBB) {
mrb_assert(mrb_array_p(regs[a]));
mrb_ary_set(mrb, regs[b], c, regs[a]);
NEXT;
}
CASE(OP_APOST, BBB) {
mrb_value v = regs[a];
int pre = b;
int post = c;
struct RArray *ary;
int len, idx;
if (!mrb_array_p(v)) {
v = mrb_ary_new_from_values(mrb, 1, ®s[a]);
}
ary = mrb_ary_ptr(v);
len = (int)ARY_LEN(ary);
if (len > pre + post) {
v = mrb_ary_new_from_values(mrb, len - pre - post, ARY_PTR(ary)+pre);
regs[a++] = v;
while (post--) {
regs[a++] = ARY_PTR(ary)[len-post-1];
}
}
else {
v = mrb_ary_new_capa(mrb, 0);
regs[a++] = v;
for (idx=0; idx+pre<len; idx++) {
regs[a+idx] = ARY_PTR(ary)[pre+idx];
}
while (idx < post) {
SET_NIL_VALUE(regs[a+idx]);
idx++;
}
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_INTERN, B) {
mrb_assert(mrb_string_p(regs[a]));
mrb_sym sym = mrb_intern_str(mrb, regs[a]);
regs[a] = mrb_symbol_value(sym);
NEXT;
}
CASE(OP_SYMBOL, BB) {
size_t len;
mrb_sym sym;
mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
len = pool[b].tt >> 2;
if (pool[b].tt & IREP_TT_SFLAG) {
sym = mrb_intern_static(mrb, pool[b].u.str, len);
}
else {
sym = mrb_intern(mrb, pool[b].u.str, len);
}
regs[a] = mrb_symbol_value(sym);
NEXT;
}
CASE(OP_STRING, BB) {
mrb_int len;
mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
len = pool[b].tt >> 2;
if (pool[b].tt & IREP_TT_SFLAG) {
regs[a] = mrb_str_new_static(mrb, pool[b].u.str, len);
}
else {
regs[a] = mrb_str_new(mrb, pool[b].u.str, len);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_STRCAT, B) {
mrb_assert(mrb_string_p(regs[a]));
mrb_str_concat(mrb, regs[a], regs[a+1]);
NEXT;
}
CASE(OP_HASH, BB) {
mrb_value hash = mrb_hash_new_capa(mrb, b);
int i;
int lim = a+b*2;
for (i=a; i<lim; i+=2) {
mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
}
regs[a] = hash;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_HASHADD, BB) {
mrb_value hash;
int i;
int lim = a+b*2+1;
hash = regs[a];
mrb_ensure_hash_type(mrb, hash);
for (i=a+1; i<lim; i+=2) {
mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
}
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_HASHCAT, B) {
mrb_value hash = regs[a];
mrb_assert(mrb_hash_p(hash));
mrb_hash_merge(mrb, hash, regs[a+1]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_LAMBDA, BB)
c = OP_L_LAMBDA;
L_MAKE_LAMBDA:
{
struct RProc *p;
const mrb_irep *nirep = irep->reps[b];
if (c & OP_L_CAPTURE) {
p = mrb_closure_new(mrb, nirep);
}
else {
p = mrb_proc_new(mrb, nirep);
p->flags |= MRB_PROC_SCOPE;
}
if (c & OP_L_STRICT) p->flags |= MRB_PROC_STRICT;
regs[a] = mrb_obj_value(p);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_BLOCK, BB) {
c = OP_L_BLOCK;
goto L_MAKE_LAMBDA;
}
CASE(OP_METHOD, BB) {
c = OP_L_METHOD;
goto L_MAKE_LAMBDA;
}
CASE(OP_RANGE_INC, B) {
mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], FALSE);
regs[a] = v;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_RANGE_EXC, B) {
mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], TRUE);
regs[a] = v;
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_OCLASS, B) {
regs[a] = mrb_obj_value(mrb->object_class);
NEXT;
}
CASE(OP_CLASS, BB) {
struct RClass *c = 0, *baseclass;
mrb_value base, super;
mrb_sym id = syms[b];
base = regs[a];
super = regs[a+1];
if (mrb_nil_p(base)) {
baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
if (!baseclass) baseclass = mrb->object_class;
base = mrb_obj_value(baseclass);
}
c = mrb_vm_define_class(mrb, base, super, id);
regs[a] = mrb_obj_value(c);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_MODULE, BB) {
struct RClass *cls = 0, *baseclass;
mrb_value base;
mrb_sym id = syms[b];
base = regs[a];
if (mrb_nil_p(base)) {
baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
if (!baseclass) baseclass = mrb->object_class;
base = mrb_obj_value(baseclass);
}
cls = mrb_vm_define_module(mrb, base, id);
regs[a] = mrb_obj_value(cls);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_EXEC, BB)
{
mrb_value recv = regs[a];
struct RProc *p;
const mrb_irep *nirep = irep->reps[b];
/* prepare closure */
p = mrb_proc_new(mrb, nirep);
p->c = NULL;
mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)proc);
MRB_PROC_SET_TARGET_CLASS(p, mrb_class_ptr(recv));
p->flags |= MRB_PROC_SCOPE;
/* prepare call stack */
cipush(mrb, a, 0, mrb_class_ptr(recv), p, 0, 0);
irep = p->body.irep;
pool = irep->pool;
syms = irep->syms;
mrb_stack_extend(mrb, irep->nregs);
stack_clear(regs+1, irep->nregs-1);
pc = irep->iseq;
JUMP;
}
CASE(OP_DEF, BB) {
struct RClass *target = mrb_class_ptr(regs[a]);
struct RProc *p = mrb_proc_ptr(regs[a+1]);
mrb_method_t m;
mrb_sym mid = syms[b];
MRB_METHOD_FROM_PROC(m, p);
mrb_define_method_raw(mrb, target, mid, m);
mrb_method_added(mrb, target, mid);
mrb_gc_arena_restore(mrb, ai);
regs[a] = mrb_symbol_value(mid);
NEXT;
}
CASE(OP_SCLASS, B) {
regs[a] = mrb_singleton_class(mrb, regs[a]);
mrb_gc_arena_restore(mrb, ai);
NEXT;
}
CASE(OP_TCLASS, B) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
regs[a] = mrb_obj_value(target);
NEXT;
}
CASE(OP_ALIAS, BB) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
mrb_alias_method(mrb, target, syms[a], syms[b]);
mrb_method_added(mrb, target, syms[a]);
NEXT;
}
CASE(OP_UNDEF, B) {
struct RClass *target = check_target_class(mrb);
if (!target) goto L_RAISE;
mrb_undef_method_id(mrb, target, syms[a]);
NEXT;
}
CASE(OP_DEBUG, Z) {
FETCH_BBB();
#ifdef MRB_USE_DEBUG_HOOK
mrb->debug_op_hook(mrb, irep, pc, regs);
#else
#ifndef MRB_NO_STDIO
printf("OP_DEBUG %d %d %d\n", a, b, c);
#else
abort();
#endif
#endif
NEXT;
}
CASE(OP_ERR, B) {
size_t len = pool[a].tt >> 2;
mrb_value exc;
mrb_assert((pool[a].tt&IREP_TT_NFLAG)==0);
exc = mrb_exc_new(mrb, E_LOCALJUMP_ERROR, pool[a].u.str, len);
mrb_exc_set(mrb, exc);
goto L_RAISE;
}
CASE(OP_EXT1, Z) {
insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _1(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_EXT2, Z) {
insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _2(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_EXT3, Z) {
uint8_t insn = READ_B();
switch (insn) {
#define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _3(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
#include "mruby/ops.h"
#undef OPCODE
}
pc--;
NEXT;
}
CASE(OP_STOP, Z) {
/* stop VM */
CHECKPOINT_RESTORE(RBREAK_TAG_STOP) {
/* do nothing */
}
CHECKPOINT_MAIN(RBREAK_TAG_STOP) {
UNWIND_ENSURE(mrb, mrb->c->ci, pc, RBREAK_TAG_STOP, proc, mrb_nil_value());
}
CHECKPOINT_END(RBREAK_TAG_STOP);
L_STOP:
mrb->jmp = prev_jmp;
if (mrb->exc) {
mrb_assert(mrb->exc->tt == MRB_TT_EXCEPTION);
return mrb_obj_value(mrb->exc);
}
return regs[irep->nlocals];
}
}
END_DISPATCH;
#undef regs
}
MRB_CATCH(&c_jmp) {
mrb_callinfo *ci = mrb->c->ci;
while (ci > mrb->c->cibase && ci->cci == CINFO_DIRECT) {
ci = cipop(mrb);
}
exc_catched = TRUE;
pc = ci->pc;
goto RETRY_TRY_BLOCK;
}
MRB_END_EXC(&c_jmp);
}
| 1
|
512,495
|
void Item_in_optimizer::restore_first_argument()
{
if (!invisible_mode())
{
args[0]= ((Item_in_subselect *)args[1])->left_expr;
}
}
| 0
|
437,689
|
static inline void irqenable_tx(struct cx23885_dev *dev, u32 mask)
{
mask &= IRQEN_TSE;
cx23888_ir_and_or4(dev, CX23888_IR_IRQEN_REG, ~IRQEN_TSE, mask);
}
| 0
|
139,211
|
gfx::Rect OverlayWindowViews::CalculateControlsBounds(int x,
const gfx::Size& size) {
return gfx::Rect(
gfx::Point(x, (GetBounds().size().height() - size.height()) / 2), size);
}
| 0
|
505,658
|
static int smtp_command_parse(struct smtp_command_parser *parser)
{
const unsigned char *begin;
size_t size, old_bytes = 0;
int ret;
while ((ret = i_stream_read_data(parser->input, &begin, &size,
old_bytes)) > 0) {
parser->cur = begin;
parser->end = parser->cur + size;
ret = smtp_command_parse_line(parser);
i_stream_skip(parser->input, parser->cur - begin);
if (ret != 0)
return ret;
old_bytes = i_stream_get_data_size(parser->input);
}
if (ret == -2) {
/* should not really happen */
smtp_command_parser_error(parser,
SMTP_COMMAND_PARSE_ERROR_LINE_TOO_LONG,
"%s line is too long",
(parser->auth_response ?
"AUTH response" : "Command"));
return -1;
}
if (ret < 0) {
i_assert(parser->input->eof);
if (parser->input->stream_errno == 0) {
if (parser->state.state == SMTP_COMMAND_PARSE_STATE_INIT)
ret = -2;
smtp_command_parser_error(parser,
SMTP_COMMAND_PARSE_ERROR_BROKEN_COMMAND,
"Premature end of input");
} else {
smtp_command_parser_error(parser,
SMTP_COMMAND_PARSE_ERROR_BROKEN_STREAM,
"Stream error: %s",
i_stream_get_error(parser->input));
}
}
return ret;
}
| 0
|
439,107
|
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+512))
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
| 0
|
473,964
|
compile_string_node(Node* node, regex_t* reg)
{
int r, len, prev_len, slen, ambig;
OnigEncoding enc = reg->enc;
UChar *p, *prev, *end;
StrNode* sn;
sn = NSTR(node);
if (sn->end <= sn->s)
return 0;
end = sn->end;
ambig = NSTRING_IS_AMBIG(node);
p = prev = sn->s;
prev_len = enclen(enc, p, end);
p += prev_len;
slen = 1;
for (; p < end; ) {
len = enclen(enc, p, end);
if (len == prev_len) {
slen++;
}
else {
r = add_compile_string(prev, prev_len, slen, reg, ambig);
if (r) return r;
prev = p;
slen = 1;
prev_len = len;
}
p += len;
}
return add_compile_string(prev, prev_len, slen, reg, ambig);
}
| 0
|
247,160
|
static void gf_fs_print_jsf_connection(GF_FilterSession *session, char *filter_name, GF_Filter *js_filter, void (*print_fn)(FILE *output, GF_SysPrintArgFlags flags, const char *fmt, ...) )
{
GF_CapsBundleStore capstore;
const char *js_name = NULL;
GF_Err e=GF_OK;
u32 i, j, count, nb_js_caps;
GF_List *sources, *sinks;
GF_FilterRegister loaded_freg;
Bool has_output, has_input;
if (!js_filter) {
js_filter = gf_fs_load_filter(session, filter_name, &e);
if (!js_filter) return;
}
js_name = strrchr(filter_name, '/');
if (!js_name) js_name = strrchr(filter_name, '\\');
if (js_name) js_name++;
else js_name = filter_name;
nb_js_caps = gf_filter_caps_bundle_count(js_filter->forced_caps, js_filter->nb_forced_caps);
//fake a new register with only the caps set
memset(&loaded_freg, 0, sizeof(GF_FilterRegister));
loaded_freg.caps = js_filter->forced_caps;
loaded_freg.nb_caps = js_filter->nb_forced_caps;
has_output = gf_filter_has_out_caps(js_filter->forced_caps, js_filter->nb_forced_caps);
has_input = gf_filter_has_in_caps(js_filter->forced_caps, js_filter->nb_forced_caps);
memset(&capstore, 0, sizeof(GF_CapsBundleStore));
sources = gf_list_new();
sinks = gf_list_new();
//edges for JS are for the unloaded JSF (eg accept anything, output anything).
//we need to do a manual check
count = gf_list_count(session->links);
for (i=0; i<count; i++) {
u32 nb_src_caps, k, l;
Bool src_match = GF_FALSE;
Bool sink_match = GF_FALSE;
GF_FilterRegDesc *a_reg = gf_list_get(session->links, i);
if (a_reg->freg == js_filter->freg) continue;
//check which cap of this filter matches our destination
nb_src_caps = gf_filter_caps_bundle_count(a_reg->freg->caps, a_reg->freg->nb_caps);
for (k=0; k<nb_src_caps; k++) {
for (l=0; l<nb_js_caps; l++) {
s32 bundle_idx;
u32 loaded_filter_only_flags = 0;
u32 path_weight;
if (has_input && !src_match) {
path_weight = gf_filter_caps_to_caps_match(a_reg->freg, k, (const GF_FilterRegister *) &loaded_freg, NULL, &bundle_idx, l, &loaded_filter_only_flags, &capstore);
if (path_weight && (bundle_idx == l))
src_match = GF_TRUE;
}
if (has_output && !sink_match) {
loaded_filter_only_flags = 0;
path_weight = gf_filter_caps_to_caps_match(&loaded_freg, l, a_reg->freg, NULL, &bundle_idx, k, &loaded_filter_only_flags, &capstore);
if (path_weight && (bundle_idx == k))
sink_match = GF_TRUE;
}
}
if (src_match && sink_match)
break;
}
if (src_match) gf_list_add(sources, (void *) a_reg->freg);
if (sink_match) gf_list_add(sinks, (void *) a_reg->freg);
}
for (i=0; i<2; i++) {
GF_List *from = i ? sinks : sources;
char *type = i ? "sinks" : "sources";
count = gf_list_count(from);
if (!count) {
if (print_fn)
print_fn(stderr, 1, "%s: no %s\n", js_name, type);
else {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, ("%s: no %s\n", type));
}
continue;
}
if (print_fn)
print_fn(stderr, 1, "%s %s:", js_name, type);
else {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, ("%s %s:", js_name, type));
}
for (j=0; j<count; j++) {
GF_FilterRegister *a_reg = gf_list_get(from, j);
if (print_fn)
print_fn(stderr, 0, " %s", a_reg->name);
else {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, (" %s", a_reg->name));
}
}
if (print_fn)
print_fn(stderr, 0, "\n");
else {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, ("\n"));
}
}
if (capstore.bundles_cap_found) gf_free(capstore.bundles_cap_found);
if (capstore.bundles_in_ok) gf_free(capstore.bundles_in_ok);
if (capstore.bundles_in_scores) gf_free(capstore.bundles_in_scores);
gf_list_del(sources);
gf_list_del(sinks);
}
| 0
|
401,514
|
static void invalidate_batched_entropy(void)
{
int cpu;
unsigned long flags;
for_each_possible_cpu (cpu) {
struct batched_entropy *batched_entropy;
batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
spin_lock_irqsave(&batched_entropy->batch_lock, flags);
batched_entropy->position = 0;
spin_unlock(&batched_entropy->batch_lock);
batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
spin_lock(&batched_entropy->batch_lock);
batched_entropy->position = 0;
spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
}
}
| 0
|
259,543
|
static CURLUcode parseurl_and_replace(const char *url, CURLU *u,
unsigned int flags)
{
CURLUcode result;
CURLU tmpurl;
memset(&tmpurl, 0, sizeof(tmpurl));
result = parseurl(url, &tmpurl, flags);
if(!result) {
free_urlhandle(u);
*u = tmpurl;
}
else
free_urlhandle(&tmpurl);
return result;
}
| 0
|
221,078
|
u32 gf_bs_read_ue_log_idx3(GF_BitStream *bs, const char *fname, s32 idx1, s32 idx2, s32 idx3)
{
u32 val=0, code;
s32 nb_lead = -1;
u32 bits = 0;
for (code=0; !code; nb_lead++) {
if (nb_lead>=32) {
break;
}
code = gf_bs_read_int(bs, 1);
bits++;
}
if (nb_lead>=32) {
//gf_bs_read_int keeps returning 0 on EOS, so if no more bits available, rbsp was truncated otherwise code is broken in rbsp)
//we only test once nb_lead>=32 to avoid testing at each bit read
if (!gf_bs_available(bs)) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Core] exp-golomb read failed, not enough bits in bitstream !\n"));
} else {
GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Core] corrupted exp-golomb code, %d leading zeros, max 31 allowed !\n", nb_lead));
}
return 0;
}
if (nb_lead) {
u32 leads=1;
val = gf_bs_read_int(bs, nb_lead);
leads <<= nb_lead;
leads -= 1;
val += leads;
// val += (1 << nb_lead) - 1;
bits += nb_lead;
}
if (fname) {
gf_bs_log_idx(bs, bits, fname, val, idx1, idx2, idx3);
}
return val;
}
| 0
|
487,627
|
asmlinkage long sys_newuname(struct new_utsname __user * name)
{
int errno = 0;
down_read(&uts_sem);
if (copy_to_user(name, utsname(), sizeof *name))
errno = -EFAULT;
up_read(&uts_sem);
return errno;
}
| 0
|
233,876
|
*/
static int wddx_stack_is_empty(wddx_stack *stack)
{
if (stack->top == 0) {
return 1;
} else {
return 0;
}
| 0
|
437,383
|
copy_opt_exact(OptExact* to, OptExact* from)
{
*to = *from;
}
| 0
|
379,657
|
R_API void r_anal_var_set_access(RAnalVar *var, const char *reg, ut64 access_addr, int access_type, st64 stackptr) {
r_return_if_fail (var);
st64 offset = access_addr - var->fcn->addr;
// accesses are stored ordered by offset, use binary search to get the matching existing or the index to insert a new one
size_t index;
r_vector_lower_bound (&var->accesses, offset, index, ACCESS_CMP);
RAnalVarAccess *acc = NULL;
if (index < var->accesses.len) {
acc = r_vector_index_ptr (&var->accesses, index);
}
if (!acc || acc->offset != offset) {
acc = r_vector_insert (&var->accesses, index, NULL);
acc->offset = offset;
acc->type = 0;
}
acc->type |= (ut8)access_type;
acc->stackptr = stackptr;
acc->reg = r_str_constpool_get (&var->fcn->anal->constpool, reg);
// add the inverse reference from the instruction to the var
RPVector *inst_accesses = ht_up_find (var->fcn->inst_vars, (ut64)offset, NULL);
if (!inst_accesses) {
inst_accesses = r_pvector_new (NULL);
if (!inst_accesses) {
return;
}
ht_up_insert (var->fcn->inst_vars, (ut64)offset, inst_accesses);
}
if (!r_pvector_contains (inst_accesses, var)) {
r_pvector_push (inst_accesses, var);
}
}
| 0
|
329,904
|
mul8_8 (uint8_t a, uint8_t b)
{
uint16_t t = a * (uint16_t)b + ONE_HALF;
return ((t >> G_SHIFT) + t) >> G_SHIFT;
}
| 0
|
436,138
|
static void io_ring_exit_work(struct work_struct *work)
{
struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
unsigned long timeout = jiffies + HZ * 60 * 5;
struct io_tctx_exit exit;
struct io_tctx_node *node;
int ret;
/*
* If we're doing polled IO and end up having requests being
* submitted async (out-of-line), then completions can come in while
* we're waiting for refs to drop. We need to reap these manually,
* as nobody else will be looking for them.
*/
do {
io_uring_try_cancel_requests(ctx, NULL, true);
if (ctx->sq_data) {
struct io_sq_data *sqd = ctx->sq_data;
struct task_struct *tsk;
io_sq_thread_park(sqd);
tsk = sqd->thread;
if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
io_wq_cancel_cb(tsk->io_uring->io_wq,
io_cancel_ctx_cb, ctx, true);
io_sq_thread_unpark(sqd);
}
WARN_ON_ONCE(time_after(jiffies, timeout));
} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
init_completion(&exit.completion);
init_task_work(&exit.task_work, io_tctx_exit_cb);
exit.ctx = ctx;
/*
* Some may use context even when all refs and requests have been put,
* and they are free to do so while still holding uring_lock or
* completion_lock, see io_req_task_submit(). Apart from other work,
* this lock/unlock section also waits them to finish.
*/
mutex_lock(&ctx->uring_lock);
while (!list_empty(&ctx->tctx_list)) {
WARN_ON_ONCE(time_after(jiffies, timeout));
node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
ctx_node);
/* don't spin on a single task if cancellation failed */
list_rotate_left(&ctx->tctx_list);
ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
if (WARN_ON_ONCE(ret))
continue;
wake_up_process(node->task);
mutex_unlock(&ctx->uring_lock);
wait_for_completion(&exit.completion);
mutex_lock(&ctx->uring_lock);
}
mutex_unlock(&ctx->uring_lock);
spin_lock_irq(&ctx->completion_lock);
spin_unlock_irq(&ctx->completion_lock);
io_ring_ctx_free(ctx);
| 0
|
463,220
|
static int find_cb(void *rock, const char *key, size_t keylen,
const char *data, size_t datalen)
{
struct find_rock *frock = (struct find_rock *) rock;
const char *mboxname, *entry, *userid;
unsigned int uid;
char newkey[MAX_MAILBOX_PATH+1];
size_t newkeylen;
struct buf value = BUF_INITIALIZER;
struct annotate_metadata mdata;
int r;
assert(keylen < MAX_MAILBOX_PATH);
r = split_key(frock->d, key, keylen, &mboxname,
&uid, &entry, &userid);
if (r) {
syslog(LOG_ERR, "find_cb: can't split bogus key %*.s", (int)keylen, key);
return r;
}
newkeylen = make_key(mboxname, uid, entry, userid, newkey, sizeof(newkey));
if (keylen != newkeylen || strncmp(newkey, key, keylen)) {
syslog(LOG_ERR, "find_cb: bogus key %s %d %s %s (%d %d)", mboxname, uid, entry, userid, (int)keylen, (int)newkeylen);
}
r = split_attribs(data, datalen, &value, &mdata);
if (r) {
buf_free(&value);
return r;
}
#if DEBUG
syslog(LOG_ERR, "find_cb: found key %s in %s with modseq " MODSEQ_FMT,
key_as_string(frock->d, key, keylen), frock->d->filename, mdata.modseq);
#endif
if (frock->since_modseq && frock->since_modseq >= mdata.modseq) {
#if DEBUG
syslog(LOG_ERR,"find_cb: ignoring key %s: " " modseq " MODSEQ_FMT " is <= " MODSEQ_FMT,
key_as_string(frock->d, key, keylen), mdata.modseq, frock->since_modseq);
#endif
buf_free(&value);
return 0;
}
if (((mdata.flags & ANNOTATE_FLAG_DELETED) || !buf_len(&value)) &&
!(frock->flags & ANNOTATE_TOMBSTONES)) {
#if DEBUG
syslog(LOG_ERR, "find_cb: ignoring key %s, tombstones are ignored",
key_as_string(frock->d, key, keylen));
#endif
buf_free(&value);
return 0;
}
if (!r) r = frock->proc(mboxname, uid, entry, userid, &value, &mdata,
frock->rock);
buf_free(&value);
return r;
}
| 0
|
229,272
|
future<utils::chunked_vector<client_data>> cql_server::get_client_data() {
utils::chunked_vector<client_data> ret;
co_await for_each_gently([&ret] (const generic_server::connection& c) {
const connection& conn = dynamic_cast<const connection&>(c);
ret.emplace_back(conn.make_client_data());
});
co_return ret;
}
| 0
|
462,410
|
AcceptConnReq(ptcplstn_t *pLstn, int *newSock, prop_t **peerName, prop_t **peerIP)
{
int sockflags;
struct sockaddr_storage addr;
socklen_t addrlen = sizeof(addr);
int iNewSock = -1;
DEFiRet;
iNewSock = accept(pLstn->sock, (struct sockaddr*) &addr, &addrlen);
if(iNewSock < 0) {
if(errno == EAGAIN || errno == EWOULDBLOCK || errno == EMFILE)
ABORT_FINALIZE(RS_RET_NO_MORE_DATA);
ABORT_FINALIZE(RS_RET_ACCEPT_ERR);
}
if(pLstn->pSrv->bKeepAlive)
EnableKeepAlive(pLstn, iNewSock);/* we ignore errors, best to do! */
CHKiRet(getPeerNames(peerName, peerIP, (struct sockaddr *) &addr, pLstn->pSrv->bUnixSocket));
/* set the new socket to non-blocking IO */
if((sockflags = fcntl(iNewSock, F_GETFL)) != -1) {
sockflags |= O_NONBLOCK;
/* SETFL could fail too, so get it caught by the subsequent
* error check.
*/
sockflags = fcntl(iNewSock, F_SETFL, sockflags);
}
if(sockflags == -1) {
DBGPRINTF("error %d setting fcntl(O_NONBLOCK) on tcp socket %d", errno, iNewSock);
prop.Destruct(peerName);
prop.Destruct(peerIP);
ABORT_FINALIZE(RS_RET_IO_ERROR);
}
*newSock = iNewSock;
finalize_it:
if(iRet != RS_RET_OK) {
/* the close may be redundant, but that doesn't hurt... */
if(iNewSock != -1)
close(iNewSock);
}
RETiRet;
}
| 0
|
436,054
|
static int io_uring_create(unsigned entries, struct io_uring_params *p,
struct io_uring_params __user *params)
{
struct io_ring_ctx *ctx;
struct file *file;
int ret;
if (!entries)
return -EINVAL;
if (entries > IORING_MAX_ENTRIES) {
if (!(p->flags & IORING_SETUP_CLAMP))
return -EINVAL;
entries = IORING_MAX_ENTRIES;
}
/*
* Use twice as many entries for the CQ ring. It's possible for the
* application to drive a higher depth than the size of the SQ ring,
* since the sqes are only used at submission time. This allows for
* some flexibility in overcommitting a bit. If the application has
* set IORING_SETUP_CQSIZE, it will have passed in the desired number
* of CQ ring entries manually.
*/
p->sq_entries = roundup_pow_of_two(entries);
if (p->flags & IORING_SETUP_CQSIZE) {
/*
* If IORING_SETUP_CQSIZE is set, we do the same roundup
* to a power-of-two, if it isn't already. We do NOT impose
* any cq vs sq ring sizing.
*/
if (!p->cq_entries)
return -EINVAL;
if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
if (!(p->flags & IORING_SETUP_CLAMP))
return -EINVAL;
p->cq_entries = IORING_MAX_CQ_ENTRIES;
}
p->cq_entries = roundup_pow_of_two(p->cq_entries);
if (p->cq_entries < p->sq_entries)
return -EINVAL;
} else {
p->cq_entries = 2 * p->sq_entries;
}
ctx = io_ring_ctx_alloc(p);
if (!ctx)
return -ENOMEM;
ctx->compat = in_compat_syscall();
if (!capable(CAP_IPC_LOCK))
ctx->user = get_uid(current_user());
/*
* This is just grabbed for accounting purposes. When a process exits,
* the mm is exited and dropped before the files, hence we need to hang
* on to this mm purely for the purposes of being able to unaccount
* memory (locked/pinned vm). It's not used for anything else.
*/
mmgrab(current->mm);
ctx->mm_account = current->mm;
ret = io_allocate_scq_urings(ctx, p);
if (ret)
goto err;
ret = io_sq_offload_create(ctx, p);
if (ret)
goto err;
/* always set a rsrc node */
ret = io_rsrc_node_switch_start(ctx);
if (ret)
goto err;
io_rsrc_node_switch(ctx, NULL);
memset(&p->sq_off, 0, sizeof(p->sq_off));
p->sq_off.head = offsetof(struct io_rings, sq.head);
p->sq_off.tail = offsetof(struct io_rings, sq.tail);
p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
p->sq_off.flags = offsetof(struct io_rings, sq_flags);
p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
memset(&p->cq_off, 0, sizeof(p->cq_off));
p->cq_off.head = offsetof(struct io_rings, cq.head);
p->cq_off.tail = offsetof(struct io_rings, cq.tail);
p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
p->cq_off.cqes = offsetof(struct io_rings, cqes);
p->cq_off.flags = offsetof(struct io_rings, cq_flags);
p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
IORING_FEAT_RSRC_TAGS;
if (copy_to_user(params, p, sizeof(*p))) {
ret = -EFAULT;
goto err;
}
file = io_uring_get_file(ctx);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
goto err;
}
/*
* Install ring fd as the very last thing, so we don't risk someone
* having closed it before we finish setup
*/
ret = io_uring_install_fd(ctx, file);
if (ret < 0) {
/* fput will clean it up */
fput(file);
return ret;
}
trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
return ret;
err:
io_ring_ctx_wait_and_kill(ctx);
return ret;
| 0
|
225,744
|
GF_Box *stvi_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_StereoVideoBox, GF_ISOM_BOX_TYPE_STVI);
return (GF_Box *)tmp;
| 0
|
274,674
|
callbacks_window_scroll_event(GtkWidget *widget, GdkEventScroll *event)
{
switch (event->direction) {
case GDK_SCROLL_UP:
render_zoom_display (ZOOM_IN_CMOUSE, 0, event->x, event->y);
break;
case GDK_SCROLL_DOWN:
render_zoom_display (ZOOM_OUT_CMOUSE, 0, event->x, event->y);
break;
case GDK_SCROLL_LEFT:
/* Ignore */
case GDK_SCROLL_RIGHT:
/* Ignore */
default:
return TRUE;
}
return TRUE;
} /* scroll_event */
| 0
|
336,607
|
static int reds_set_migration_dest_info(RedsState *reds,
const char* dest,
int port, int secure_port,
const char* cert_subject)
{
RedsMigSpice *spice_migration = NULL;
reds_mig_release(reds->config);
if ((port == -1 && secure_port == -1) || !dest) {
return FALSE;
}
spice_migration = g_new0(RedsMigSpice, 1);
spice_migration->port = port;
spice_migration->sport = secure_port;
spice_migration->host = g_strdup(dest);
if (cert_subject) {
spice_migration->cert_subject = g_strdup(cert_subject);
}
reds->config->mig_spice = spice_migration;
return TRUE;
}
| 0
|
369,250
|
static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
{
struct io_async_rw *iorw = req->async_data;
struct iovec *iov;
int ret;
/* submission path, ->uring_lock should already be taken */
ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
if (unlikely(ret < 0))
return ret;
iorw->bytes_done = 0;
iorw->free_iovec = iov;
if (iov)
req->flags |= REQ_F_NEED_CLEANUP;
return 0;
}
| 0
|
455,391
|
xfs_inode_clear_reclaim_tag(
struct xfs_perag *pag,
xfs_ino_t ino)
{
radix_tree_tag_clear(&pag->pag_ici_root,
XFS_INO_TO_AGINO(pag->pag_mount, ino),
XFS_ICI_RECLAIM_TAG);
xfs_perag_clear_reclaim_tag(pag);
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.