idx
int64
func
string
target
int64
225,669
GF_Box *padb_box_new() { ISOM_DECL_BOX_ALLOC(GF_PaddingBitsBox, GF_ISOM_BOX_TYPE_PADB); return (GF_Box *)tmp; }
0
247,359
rpmRC pgpVerifySig(pgpDig dig, DIGEST_CTX hashctx) { if (dig == NULL || hashctx == NULL) return RPMRC_FAIL; return pgpVerifySignature(pgpDigGetParams(dig, PGPTAG_PUBLIC_KEY), pgpDigGetParams(dig, PGPTAG_SIGNATURE), hashctx); }
0
274,890
TEST(ComparisonsTest, QuantizedInt8LessWithBroadcast) { const float kMin = -127.f; const float kMax = 127.f; std::vector<std::vector<int>> test_shapes = { {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}}; for (int i = 0; i < test_shapes.size(); ++i) { ComparisonOpModel model({TensorType_INT8, test_shapes[i], kMin, kMax}, {TensorType_INT8, {}, kMin, kMax}, TensorType_INT8, BuiltinOperator_LESS); model.QuantizeAndPopulate<int8_t>(model.input1(), {20, -2, -71, 8, 11, 20}); model.QuantizeAndPopulate<int8_t>(model.input2(), {8}); model.Invoke(); EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false, false, false)) << "With shape number " << i; } }
0
256,449
JANET_CORE_FN(cfun_array_new, "(array/new capacity)", "Creates a new empty array with a pre-allocated capacity. The same as " "`(array)` but can be more efficient if the maximum size of an array is known.") { janet_fixarity(argc, 1); int32_t cap = janet_getinteger(argv, 0); JanetArray *array = janet_array(cap); return janet_wrap_array(array); }
0
194,994
Status ImmutableExecutorState::Initialize(const Graph& graph) { TF_RETURN_IF_ERROR(gview_.Initialize(&graph)); // Build the information about frames in this subgraph. ControlFlowInfo cf_info; TF_RETURN_IF_ERROR(BuildControlFlowInfo(&graph, &cf_info)); for (auto& it : cf_info.unique_frame_names) { EnsureFrameInfo(it)->nodes = absl::make_unique<std::vector<const NodeItem*>>(); } root_frame_info_ = frame_info_[""].get(); pending_ids_.resize(gview_.num_nodes()); // Preprocess every node in the graph to create an instance of op // kernel for each node. requires_control_flow_ = false; for (const Node* n : graph.nodes()) { if (IsSink(n)) continue; if (IsSwitch(n) || IsMerge(n) || IsEnter(n) || IsExit(n)) { requires_control_flow_ = true; } else if (IsRecv(n)) { // A Recv node from a different device may produce dead tensors from // non-local control-flow nodes. // // TODO(mrry): Track whether control flow was present in the // pre-partitioned graph, and enable the caller (e.g. // `DirectSession`) to relax this constraint. string send_device; string recv_device; TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "send_device", &send_device)); TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "recv_device", &recv_device)); if (send_device != recv_device) { requires_control_flow_ = true; } } const int id = n->id(); const string& frame_name = cf_info.frame_names[id]; FrameInfo* frame_info = EnsureFrameInfo(frame_name); NodeItem* item = gview_.node(id); item->node_id = id; item->input_start = frame_info->total_inputs; frame_info->total_inputs += n->num_inputs(); Status s = params_.create_kernel(n->properties(), &item->kernel); if (!s.ok()) { item->kernel = nullptr; s = AttachDef(s, *n); return s; } CHECK(item->kernel); item->kernel_is_async = (item->kernel->AsAsync() != nullptr); item->is_merge = IsMerge(n); item->is_any_consumer_merge_or_control_trigger = false; for (const Node* consumer : n->out_nodes()) { if (IsMerge(consumer) || IsControlTrigger(consumer)) { item->is_any_consumer_merge_or_control_trigger = true; break; } } const Tensor* const_tensor = item->kernel->const_tensor(); if (const_tensor) { // Hold onto a shallow copy of the constant tensor in `*this` so that the // reference count does not drop to 1. This prevents the constant tensor // from being forwarded, and its buffer reused. const_tensors_.emplace_back(*const_tensor); } item->const_tensor = const_tensor; item->is_noop = (item->kernel->type_string_view() == "NoOp"); item->is_enter = IsEnter(n); if (item->is_enter) { bool is_constant_enter; TF_RETURN_IF_ERROR( GetNodeAttr(n->attrs(), "is_constant", &is_constant_enter)); item->is_constant_enter = is_constant_enter; string frame_name; TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "frame_name", &frame_name)); FrameInfo* frame_info = frame_info_[frame_name].get(); int parallel_iterations; TF_RETURN_IF_ERROR( GetNodeAttr(n->attrs(), "parallel_iterations", &parallel_iterations)); if (frame_info->parallel_iterations == -1) { frame_info->parallel_iterations = parallel_iterations; } else if (frame_info->parallel_iterations != parallel_iterations) { LOG(WARNING) << "Loop frame \"" << frame_name << "\" had two different values for parallel_iterations: " << frame_info->parallel_iterations << " vs. " << parallel_iterations << "."; } if (enter_frame_info_.size() <= id) { enter_frame_info_.resize(id + 1); } enter_frame_info_[id] = frame_info; } else { item->is_constant_enter = false; } item->is_exit = IsExit(n); item->is_control_trigger = IsControlTrigger(n); item->is_source = IsSource(n); item->is_enter_exit_or_next_iter = (IsEnter(n) || IsExit(n) || IsNextIteration(n)); item->is_transfer_node = IsTransferNode(n); item->is_initialization_op = IsInitializationOp(n); item->is_recv_or_switch = IsRecv(n) || IsSwitch(n); item->is_next_iteration = IsNextIteration(n); item->is_distributed_communication = IsDistributedCommunication(n); // Compute the maximum values we'll store for this node in the // pending counts data structure, and allocate a handle in // that frame's pending counts data structure that has enough // space to store these maximal count values. size_t max_pending, max_dead; GetMaxPendingCounts(n, &max_pending, &max_dead); pending_ids_[id] = frame_info->pending_counts_layout.CreateHandle(max_pending, max_dead); // See if this node is a root node, and if so, add item to root_nodes_. if (n->in_edges().empty()) { root_nodes_.push_back(item); } // Initialize static information about the frames in the graph. frame_info->nodes->push_back(item); if (item->is_enter) { string enter_name; TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "frame_name", &enter_name)); EnsureFrameInfo(enter_name)->input_count++; } // Record information about whether each output of the op is used. std::unique_ptr<bool[]> outputs_required(new bool[n->num_outputs()]); std::fill(&outputs_required[0], &outputs_required[n->num_outputs()], false); int32_t unused_outputs = n->num_outputs(); for (const Edge* e : n->out_edges()) { if (IsSink(e->dst())) continue; if (e->src_output() >= 0) { if (!outputs_required[e->src_output()]) { --unused_outputs; outputs_required[e->src_output()] = true; } } } if (unused_outputs > 0) { for (int i = 0; i < n->num_outputs(); ++i) { if (!outputs_required[i]) { metrics::RecordUnusedOutput(n->type_string()); } } item->outputs_required = std::move(outputs_required); } } // Rewrite each `EdgeInfo::input_slot` member to refer directly to the input // location. for (const Node* n : graph.nodes()) { if (IsSink(n)) continue; const int id = n->id(); NodeItem* item = gview_.node(id); for (EdgeInfo& e : item->mutable_output_edges()) { const int dst_id = e.dst_id; NodeItem* dst_item = gview_.node(dst_id); e.input_slot += dst_item->input_start; } } // Initialize PendingCounts only after pending_ids_[node.id] is initialized // for all nodes. InitializePending(&graph, cf_info); return gview_.SetAllocAttrs(&graph, params_.device); }
1
484,757
static void netback_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); wake_up_all(&module_wq); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (xennet_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: netdev_notify_peers(netdev); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; fallthrough; /* Missed the backend's CLOSING state */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; } }
0
408,973
ins_eol(int c) { int i; if (echeck_abbr(c + ABBR_OFF)) return OK; if (stop_arrow() == FAIL) return FAIL; undisplay_dollar(); /* * Strange Vi behaviour: In Replace mode, typing a NL will not delete the * character under the cursor. Only push a NUL on the replace stack, * nothing to put back when the NL is deleted. */ if ((State & REPLACE_FLAG) && !(State & VREPLACE_FLAG)) replace_push(NUL); /* * In VREPLACE mode, a NL replaces the rest of the line, and starts * replacing the next line, so we push all of the characters left on the * line onto the replace stack. This is not done here though, it is done * in open_line(). */ // Put cursor on NUL if on the last char and coladd is 1 (happens after // CTRL-O). if (virtual_active() && curwin->w_cursor.coladd > 0) coladvance(getviscol()); #ifdef FEAT_RIGHTLEFT // NL in reverse insert will always start in the end of // current line. if (revins_on) curwin->w_cursor.col += (colnr_T)STRLEN(ml_get_cursor()); #endif AppendToRedobuff(NL_STR); i = open_line(FORWARD, has_format_option(FO_RET_COMS) ? OPENLINE_DO_COM : 0, old_indent, NULL); old_indent = 0; #ifdef FEAT_CINDENT can_cindent = TRUE; #endif #ifdef FEAT_FOLDING // When inserting a line the cursor line must never be in a closed fold. foldOpenCursor(); #endif return i; }
0
512,811
uint cols() const { return arg_count; }
0
259,269
static int mov_seek_stream(AVFormatContext *s, AVStream *st, int64_t timestamp, int flags) { MOVStreamContext *sc = st->priv_data; FFStream *const sti = ffstream(st); int sample, time_sample, ret; unsigned int i; // Here we consider timestamp to be PTS, hence try to offset it so that we // can search over the DTS timeline. timestamp -= (sc->min_corrected_pts + sc->dts_shift); ret = mov_seek_fragment(s, st, timestamp); if (ret < 0) return ret; for (;;) { sample = av_index_search_timestamp(st, timestamp, flags); av_log(s, AV_LOG_TRACE, "stream %d, timestamp %"PRId64", sample %d\n", st->index, timestamp, sample); if (sample < 0 && sti->nb_index_entries && timestamp < sti->index_entries[0].timestamp) sample = 0; if (sample < 0) /* not sure what to do */ return AVERROR_INVALIDDATA; if (!sample || can_seek_to_key_sample(st, sample, timestamp)) break; timestamp -= FFMAX(sc->min_sample_duration, 1); } mov_current_sample_set(sc, sample); av_log(s, AV_LOG_TRACE, "stream %d, found sample %d\n", st->index, sc->current_sample); /* adjust ctts index */ if (sc->ctts_data) { time_sample = 0; for (i = 0; i < sc->ctts_count; i++) { int next = time_sample + sc->ctts_data[i].count; if (next > sc->current_sample) { sc->ctts_index = i; sc->ctts_sample = sc->current_sample - time_sample; break; } time_sample = next; } } /* adjust stsd index */ if (sc->chunk_count) { time_sample = 0; for (i = 0; i < sc->stsc_count; i++) { int64_t next = time_sample + mov_get_stsc_samples(sc, i); if (next > sc->current_sample) { sc->stsc_index = i; sc->stsc_sample = sc->current_sample - time_sample; break; } av_assert0(next == (int)next); time_sample = next; } } return sample; }
0
513,350
bool Item_func_eq::check_equality(THD *thd, COND_EQUAL *cond_equal, List<Item> *eq_list) { Item *left_item= arguments()[0]; Item *right_item= arguments()[1]; if (left_item->type() == Item::ROW_ITEM && right_item->type() == Item::ROW_ITEM) { return check_row_equality(thd, cmp.subcomparators(), (Item_row *) left_item, (Item_row *) right_item, cond_equal, eq_list); } return check_simple_equality(thd, Context(ANY_SUBST, compare_type(), compare_collation()), left_item, right_item, cond_equal); }
0
216,800
rpa_read_buffer(pool_t pool, const unsigned char **data, const unsigned char *end, unsigned char **buffer) { const unsigned char *p = *data; unsigned int len; if (p > end) return 0; len = *p++; if (p + len > end) return 0; *buffer = p_malloc(pool, len); memcpy(*buffer, p, len); *data += 1 + len; return len; }
1
294,717
c_weeknum_to_jd(int y, int w, int d, int f, double sg, int *rjd, int *ns) { int rjd2, ns2; c_find_fdoy(y, sg, &rjd2, &ns2); rjd2 += 6; *rjd = (rjd2 - MOD(((rjd2 - f) + 1), 7) - 7) + 7 * w + d; *ns = (*rjd < sg) ? 0 : 1; }
0
244,151
void chnl_box_del(GF_Box *s) { gf_free(s); }
0
432,212
void cpu_watchpoint_remove_all(CPUState *cpu, int mask) { #if 0 CPUWatchpoint *wp, *next; QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) { if (wp->flags & mask) { cpu_watchpoint_remove_by_ref(cpu, wp); } } #endif }
0
175,685
virtual void ConnectToCellularNetwork(const CellularNetwork* network) {}
0
359,518
DEFUN (bgp_bestpath_compare_router_id, bgp_bestpath_compare_router_id_cmd, "bgp bestpath compare-routerid", "BGP specific commands\n" "Change the default bestpath selection\n" "Compare router-id for identical EBGP paths\n") { struct bgp *bgp; bgp = vty->index; bgp_flag_set (bgp, BGP_FLAG_COMPARE_ROUTER_ID); return CMD_SUCCESS; }
0
384,195
static int nf_tables_newtable(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const nla[]) { struct nftables_pernet *nft_net = nft_pernet(info->net); struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_next(info->net); u8 family = info->nfmsg->nfgen_family; struct net *net = info->net; const struct nlattr *attr; struct nft_table *table; struct nft_ctx ctx; u32 flags = 0; int err; if (!nft_supported_family(family)) return -EOPNOTSUPP; lockdep_assert_held(&nft_net->commit_mutex); attr = nla[NFTA_TABLE_NAME]; table = nft_table_lookup(net, attr, family, genmask, NETLINK_CB(skb).portid); if (IS_ERR(table)) { if (PTR_ERR(table) != -ENOENT) return PTR_ERR(table); } else { if (info->nlh->nlmsg_flags & NLM_F_EXCL) { NL_SET_BAD_ATTR(extack, attr); return -EEXIST; } if (info->nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla); return nf_tables_updtable(&ctx); } if (nla[NFTA_TABLE_FLAGS]) { flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS])); if (flags & ~NFT_TABLE_F_MASK) return -EOPNOTSUPP; } err = -ENOMEM; table = kzalloc(sizeof(*table), GFP_KERNEL_ACCOUNT); if (table == NULL) goto err_kzalloc; table->name = nla_strdup(attr, GFP_KERNEL_ACCOUNT); if (table->name == NULL) goto err_strdup; if (nla[NFTA_TABLE_USERDATA]) { table->udata = nla_memdup(nla[NFTA_TABLE_USERDATA], GFP_KERNEL_ACCOUNT); if (table->udata == NULL) goto err_table_udata; table->udlen = nla_len(nla[NFTA_TABLE_USERDATA]); } err = rhltable_init(&table->chains_ht, &nft_chain_ht_params); if (err) goto err_chain_ht; INIT_LIST_HEAD(&table->chains); INIT_LIST_HEAD(&table->sets); INIT_LIST_HEAD(&table->objects); INIT_LIST_HEAD(&table->flowtables); table->family = family; table->flags = flags; table->handle = ++nft_net->table_handle; if (table->flags & NFT_TABLE_F_OWNER) table->nlpid = NETLINK_CB(skb).portid; nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla); err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE); if (err < 0) goto err_trans; list_add_tail_rcu(&table->list, &nft_net->tables); return 0; err_trans: rhltable_destroy(&table->chains_ht); err_chain_ht: kfree(table->udata); err_table_udata: kfree(table->name); err_strdup: kfree(table); err_kzalloc: return err; }
0
517,455
static void do_head(HttpResponse res, const char *path, const char *name, int refresh) { StringBuffer_append(res->outputbuffer, "<!DOCTYPE html>"\ "<html>"\ "<head>"\ "<title>Monit: %s</title> "\ "<style type=\"text/css\"> "\ " html, body {height: 100%%;margin: 0;} "\ " body {background-color: white;font: normal normal normal 16px/20px 'HelveticaNeue', Helvetica, Arial, sans-serif; color:#222;} "\ " h1 {padding:30px 0 10px 0; text-align:center;color:#222;font-size:28px;} "\ " h2 {padding:20px 0 10px 0; text-align:center;color:#555;font-size:22px;} "\ " a:hover {text-decoration: none;} "\ " a {text-decoration: underline;color:#222} "\ " table {border-collapse:collapse; border:0;} "\ " .stripe {background:#EDF5FF} "\ " .rule {background:#ddd} "\ " .red-text {color:#ff0000;} "\ " .green-text {color:#00ff00;} "\ " .gray-text {color:#999999;} "\ " .blue-text {color:#0000ff;} "\ " .yellow-text {color:#ffff00;} "\ " .orange-text {color:#ff8800;} "\ " .short {overflow: hidden; text-overflow: ellipsis; white-space: nowrap; max-width: 350px;}"\ " .column {min-width: 80px;} "\ " .left {text-align:left} "\ " .right {text-align:right} "\ " .center {text-align:center} "\ " #wrap {min-height: 100%%;} "\ " #main {overflow:auto; padding-bottom:50px;} "\ " /*Opera Fix*/body:before {content:\"\";height:100%%;float:left;width:0;margin-top:-32767px;} "\ " #footer {position: relative;margin-top: -50px; height: 50px; clear:both; font-size:11px;color:#777;text-align:center;} "\ " #footer a {color:#333;} #footer a:hover {text-decoration: none;} "\ " #nav {background:#ddd;font:normal normal normal 14px/0px 'HelveticaNeue', Helvetica;} "\ " #nav td {padding:5px 10px;} "\ " #header {margin-bottom:30px;background:#EFF7FF} "\ " #nav, #header {border-bottom:1px solid #ccc;} "\ " #header-row {width:95%%;} "\ " #header-row th {padding:30px 10px 10px 10px;font-size:120%%;} "\ " #header-row td {padding:3px 10px;} "\ " #header-row .first {min-width:200px;width:200px;white-space:nowrap;overflow:hidden;text-overflow:ellipsis;} "\ " #status-table {width:95%%;} "\ " #status-table th {text-align:left;background:#edf5ff;font-weight:normal;} "\ " #status-table th, #status-table td, #status-table tr {border:1px solid #ccc;padding:5px;} "\ " #buttons {font-size:20px; margin:40px 0 20px 0;} "\ " #buttons td {padding-right:50px;} "\ " #buttons input {font-size:18px;padding:5px;} "\ "</style>"\ "<meta HTTP-EQUIV='REFRESH' CONTENT=%d> "\ "<meta HTTP-EQUIV='Expires' Content=0> "\ "<meta HTTP-EQUIV='Pragma' CONTENT='no-cache'> "\ "<meta charset='UTF-8'>" \ "<link rel='shortcut icon' href='favicon.ico'>"\ "</head>"\ "<body><div id='wrap'><div id='main'>" \ "<table id='nav' width='100%%'>"\ " <tr>"\ " <td width='20%%'><a href='.'>Home</a>&nbsp;&gt;&nbsp;<a href='%s'>%s</a></td>"\ " <td width='60%%' style='text-align:center;'>Use <a href='https://mmonit.com/'>M/Monit</a> to manage all your Monit instances</td>"\ " <td width='20%%'><p class='right'><a href='_about'>Monit %s</a></td>"\ " </tr>"\ "</table>"\ "<center>", Run.system->name, refresh, path, name, VERSION); }
0
218,771
static void XDrawBevel(Display *display,const XWindowInfo *window_info, const XWidgetInfo *bevel_info) { int x1, x2, y1, y2; unsigned int bevel_width; XPoint points[6]; /* Draw upper and left beveled border. */ x1=bevel_info->x; y1=bevel_info->y+bevel_info->height; x2=bevel_info->x+bevel_info->width; y2=bevel_info->y; bevel_width=bevel_info->bevel_width; points[0].x=x1; points[0].y=y1; points[1].x=x1; points[1].y=y2; points[2].x=x2; points[2].y=y2; points[3].x=x2+bevel_width; points[3].y=y2-bevel_width; points[4].x=x1-bevel_width; points[4].y=y2-bevel_width; points[5].x=x1-bevel_width; points[5].y=y1+bevel_width; XSetBevelColor(display,window_info,bevel_info->raised); (void) XFillPolygon(display,window_info->id,window_info->widget_context, points,6,Complex,CoordModeOrigin); /* Draw lower and right beveled border. */ points[0].x=x1; points[0].y=y1; points[1].x=x2; points[1].y=y1; points[2].x=x2; points[2].y=y2; points[3].x=x2+bevel_width; points[3].y=y2-bevel_width; points[4].x=x2+bevel_width; points[4].y=y1+bevel_width; points[5].x=x1-bevel_width; points[5].y=y1+bevel_width; XSetBevelColor(display,window_info,!bevel_info->raised); (void) XFillPolygon(display,window_info->id,window_info->widget_context, points,6,Complex,CoordModeOrigin); (void) XSetFillStyle(display,window_info->widget_context,FillSolid); }
0
313,797
handle_tabmenu(void) { switch (current_tabmenu) { case TABLINE_MENU_CLOSE: if (current_tab == 0) do_cmdline_cmd((char_u *)"tabclose"); else { vim_snprintf((char *)IObuff, IOSIZE, "tabclose %d", current_tab); do_cmdline_cmd(IObuff); } break; case TABLINE_MENU_NEW: if (current_tab == 0) do_cmdline_cmd((char_u *)"$tabnew"); else { vim_snprintf((char *)IObuff, IOSIZE, "%dtabnew", current_tab - 1); do_cmdline_cmd(IObuff); } break; case TABLINE_MENU_OPEN: if (current_tab == 0) do_cmdline_cmd((char_u *)"browse $tabnew"); else { vim_snprintf((char *)IObuff, IOSIZE, "browse %dtabnew", current_tab - 1); do_cmdline_cmd(IObuff); } break; } }
0
276,896
static int cmd_i2c_set_bus_num(unsigned int busnum) { struct udevice *bus; int ret; ret = uclass_get_device_by_seq(UCLASS_I2C, busnum, &bus); if (ret) { debug("%s: No bus %d\n", __func__, busnum); return ret; } i2c_cur_bus = bus; return 0; }
0
273,077
log_fatal_err(int domain, const char *func, int line, int err) { DPRINTF(E_FATAL, domain, "%s failed at line %d, error %d (%s)\n", func, line, err, strerror(err)); abort(); }
0
238,391
njs_function_constructor(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t async) { njs_chb_t chain; njs_int_t ret; njs_str_t str, file; njs_uint_t i; njs_lexer_t lexer; njs_parser_t parser; njs_vm_code_t *code; njs_function_t *function; njs_generator_t generator; njs_parser_node_t *node; njs_parser_scope_t *scope; njs_function_lambda_t *lambda; const njs_token_type_t *type; static const njs_token_type_t safe_ast[] = { NJS_TOKEN_END, NJS_TOKEN_FUNCTION_EXPRESSION, NJS_TOKEN_STATEMENT, NJS_TOKEN_RETURN, NJS_TOKEN_THIS, NJS_TOKEN_ILLEGAL }; static const njs_token_type_t safe_ast_async[] = { NJS_TOKEN_END, NJS_TOKEN_ASYNC_FUNCTION_EXPRESSION, NJS_TOKEN_STATEMENT, NJS_TOKEN_RETURN, NJS_TOKEN_THIS, NJS_TOKEN_ILLEGAL }; if (!vm->options.unsafe && nargs != 2) { goto fail; } njs_chb_init(&chain, vm->mem_pool); if (async) { njs_chb_append_literal(&chain, "(async function("); } else { njs_chb_append_literal(&chain, "(function("); } for (i = 1; i < nargs - 1; i++) { ret = njs_value_to_chain(vm, &chain, njs_argument(args, i)); if (njs_slow_path(ret < NJS_OK)) { return ret; } if (i != (nargs - 2)) { njs_chb_append_literal(&chain, ","); } } njs_chb_append_literal(&chain, "){"); ret = njs_value_to_chain(vm, &chain, njs_argument(args, nargs - 1)); if (njs_slow_path(ret < NJS_OK)) { return ret; } njs_chb_append_literal(&chain, "})"); ret = njs_chb_join(&chain, &str); if (njs_slow_path(ret != NJS_OK)) { njs_memory_error(vm); return NJS_ERROR; } file = njs_str_value("runtime"); ret = njs_lexer_init(vm, &lexer, &file, str.start, str.start + str.length, 1); if (njs_slow_path(ret != NJS_OK)) { return ret; } njs_memzero(&parser, sizeof(njs_parser_t)); parser.lexer = &lexer; ret = njs_parser(vm, &parser); if (njs_slow_path(ret != NJS_OK)) { return ret; } if (!vm->options.unsafe) { /* * Safe mode exception: * "(new Function('return this'))" is often used to get * the global object in a portable way. */ node = parser.node; type = (async) ? &safe_ast_async[0] : &safe_ast[0]; for (; *type != NJS_TOKEN_ILLEGAL; type++, node = node->right) { if (node == NULL) { goto fail; } if (node->left != NULL && node->token_type != NJS_TOKEN_FUNCTION_EXPRESSION && node->left->token_type != NJS_TOKEN_NAME) { goto fail; } if (node->token_type != *type) { goto fail; } } } scope = parser.scope; ret = njs_variables_copy(vm, &scope->variables, vm->variables_hash); if (njs_slow_path(ret != NJS_OK)) { return ret; } ret = njs_generator_init(&generator, 0, 1); if (njs_slow_path(ret != NJS_OK)) { njs_internal_error(vm, "njs_generator_init() failed"); return NJS_ERROR; } code = njs_generate_scope(vm, &generator, scope, &njs_entry_anonymous); if (njs_slow_path(code == NULL)) { if (!njs_is_error(&vm->retval)) { njs_internal_error(vm, "njs_generate_scope() failed"); } return NJS_ERROR; } njs_chb_destroy(&chain); lambda = ((njs_vmcode_function_t *) generator.code_start)->lambda; function = njs_function_alloc(vm, lambda, (njs_bool_t) async); if (njs_slow_path(function == NULL)) { return NJS_ERROR; } function->global = 1; function->global_this = 1; function->args_count = lambda->nargs - lambda->rest_parameters; njs_set_function(&vm->retval, function); return NJS_OK; fail: njs_type_error(vm, "function constructor is disabled in \"safe\" mode"); return NJS_ERROR; }
0
221,667
long Socket::checkCertValid(String &hostname) { //check we have a certificate X509 *peerCert = SSL_get_peer_certificate(ssl); if (peerCert == NULL) { return -1; } X509_free(peerCert); #if OPENSSL_VERSION_NUMBER < 0x10100000L #else // section for openssl1.1 X509_VERIFY_PARAM *param; param = X509_VERIFY_PARAM_new() ; X509_VERIFY_PARAM_set1_host(param,hostname.c_str(), hostname.length()); SSL_CTX_set1_param(ctx,param); X509_VERIFY_PARAM_free(param); #endif return SSL_get_verify_result(ssl); }
0
248,252
DLLIMPORT void *cfg_getptr(cfg_t *cfg, const char *name) { return cfg_getnptr(cfg, name, 0); }
0
418,780
is_mouse_key(int c) { return c == K_LEFTMOUSE || c == K_LEFTMOUSE_NM || c == K_LEFTDRAG || c == K_LEFTRELEASE || c == K_LEFTRELEASE_NM || c == K_MOUSEMOVE || c == K_MIDDLEMOUSE || c == K_MIDDLEDRAG || c == K_MIDDLERELEASE || c == K_RIGHTMOUSE || c == K_RIGHTDRAG || c == K_RIGHTRELEASE || c == K_MOUSEDOWN || c == K_MOUSEUP || c == K_MOUSELEFT || c == K_MOUSERIGHT || c == K_X1MOUSE || c == K_X1DRAG || c == K_X1RELEASE || c == K_X2MOUSE || c == K_X2DRAG || c == K_X2RELEASE; }
0
317,149
static int smack_kernel_create_files_as(struct cred *new, struct inode *inode) { struct inode_smack *isp = smack_inode(inode); struct task_smack *tsp = smack_cred(new); tsp->smk_forked = isp->smk_inode; tsp->smk_task = tsp->smk_forked; return 0; }
0
247,557
TEST_P(SslSocketTest, RsaAndEcdsaPrivateKeyProviderMultiCertFail) { const std::string client_ctx_yaml = absl::StrCat(R"EOF( common_tls_context: tls_params: tls_minimum_protocol_version: TLSv1_2 tls_maximum_protocol_version: TLSv1_2 cipher_suites: - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 validation_context: verify_certificate_hash: )EOF", TEST_SELFSIGNED_ECDSA_P256_CERT_256_HASH); const std::string server_ctx_yaml = R"EOF( common_tls_context: tls_certificates: - certificate_chain: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_cert.pem" private_key_provider: provider_name: test typed_config: "@type": type.googleapis.com/google.protobuf.Struct value: private_key_file: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_key.pem" expected_operation: sign sync_mode: false mode: rsa - certificate_chain: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_cert.pem" private_key_provider: provider_name: test typed_config: "@type": type.googleapis.com/google.protobuf.Struct value: private_key_file: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/selfsigned_ecdsa_p256_key.pem" expected_operation: sign async_method_error: true mode: ecdsa )EOF"; TestUtilOptions failing_test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam()); testUtil(failing_test_options.setPrivateKeyMethodExpected(true) .setExpectedServerCloseEvent(Network::ConnectionEvent::LocalClose) .setExpectedServerStats("ssl.connection_error")); }
0
462,540
void controller::update_visible_feeds() { std::lock_guard<std::mutex> feedslock(feeds_mutex); v->update_visible_feeds(feeds); }
0
247,159
Bool gf_fs_fire_event(GF_FilterSession *fs, GF_Filter *f, GF_FilterEvent *evt, Bool upstream) { Bool ret = GF_FALSE; if (!fs || !evt) return GF_FALSE; GF_FilterPid *on_pid = evt->base.on_pid; evt->base.on_pid = NULL; if (f) { if (evt->base.type==GF_FEVT_USER) { if (f->freg->process_event && f->event_target) { gf_mx_p(f->tasks_mx); f->freg->process_event(f, evt); gf_mx_v(f->tasks_mx); ret = GF_TRUE; } } if (!ret) { gf_mx_p(f->tasks_mx); if (f->num_output_pids && upstream) ret = GF_TRUE; else if (f->num_input_pids && !upstream) ret = GF_TRUE; gf_filter_send_event(f, evt, upstream); gf_mx_v(f->tasks_mx); } } else { u32 i, count; gf_fs_lock_filters(fs, GF_TRUE); count = gf_list_count(fs->filters); for (i=0; i<count; i++) { Bool canceled; f = gf_list_get(fs->filters, i); if (f->disabled || f->removed) continue; if (f->multi_sink_target) continue; if (!f->freg->process_event) continue; if (!f->event_target) continue; gf_mx_p(f->tasks_mx); canceled = f->freg->process_event(f, evt); gf_mx_v(f->tasks_mx); ret = GF_TRUE; if (canceled) break; } gf_fs_lock_filters(fs, GF_FALSE); } evt->base.on_pid = on_pid; return ret; }
0
230,972
mrb_ci_bidx(mrb_callinfo *ci) { return mrb_bidx(ci->n|(ci->nk<<4)); }
0
417,059
mp_sint32 PlayerGeneric::getNumMaxVirChannels() const { #ifndef MILKYTRACKER if (player) { if (player->getType() == PlayerBase::PlayerType_IT) { return static_cast<PlayerIT*>(player)->getNumMaxVirChannels(); } } #endif return numMaxVirChannels; }
0
477,271
static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending) { struct tipc_aead *tmp1, *tmp2 = NULL; struct tipc_key key; bool aligned = false; u8 new_passive = 0; int x; spin_lock(&rx->lock); key = rx->key; if (key.pending == new_pending) { aligned = true; goto exit; } if (key.active) goto exit; if (!key.pending) goto exit; if (tipc_aead_users(rx->aead[key.pending]) > 0) goto exit; /* Try to "isolate" this pending key first */ tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock); if (!refcount_dec_if_one(&tmp1->refcnt)) goto exit; rcu_assign_pointer(rx->aead[key.pending], NULL); /* Move passive key if any */ if (key.passive) { tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock)); x = (key.passive - key.pending + new_pending) % KEY_MAX; new_passive = (x <= 0) ? x + KEY_MAX : x; } /* Re-allocate the key(s) */ tipc_crypto_key_set_state(rx, new_passive, 0, new_pending); rcu_assign_pointer(rx->aead[new_pending], tmp1); if (new_passive) rcu_assign_pointer(rx->aead[new_passive], tmp2); refcount_set(&tmp1->refcnt, 1); aligned = true; pr_info_ratelimited("%s: key[%d] -> key[%d]\n", rx->name, key.pending, new_pending); exit: spin_unlock(&rx->lock); return aligned; }
0
455,412
xfs_reclaim_inode( struct xfs_inode *ip, struct xfs_perag *pag, int sync_mode) { struct xfs_buf *bp = NULL; xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */ int error; restart: error = 0; xfs_ilock(ip, XFS_ILOCK_EXCL); if (!xfs_iflock_nowait(ip)) { if (!(sync_mode & SYNC_WAIT)) goto out; xfs_iflock(ip); } if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { xfs_iunpin_wait(ip); /* xfs_iflush_abort() drops the flush lock */ xfs_iflush_abort(ip, false); goto reclaim; } if (xfs_ipincount(ip)) { if (!(sync_mode & SYNC_WAIT)) goto out_ifunlock; xfs_iunpin_wait(ip); } if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) { xfs_ifunlock(ip); goto reclaim; } /* * Never flush out dirty data during non-blocking reclaim, as it would * just contend with AIL pushing trying to do the same job. */ if (!(sync_mode & SYNC_WAIT)) goto out_ifunlock; /* * Now we have an inode that needs flushing. * * Note that xfs_iflush will never block on the inode buffer lock, as * xfs_ifree_cluster() can lock the inode buffer before it locks the * ip->i_lock, and we are doing the exact opposite here. As a result, * doing a blocking xfs_imap_to_bp() to get the cluster buffer would * result in an ABBA deadlock with xfs_ifree_cluster(). * * As xfs_ifree_cluser() must gather all inodes that are active in the * cache to mark them stale, if we hit this case we don't actually want * to do IO here - we want the inode marked stale so we can simply * reclaim it. Hence if we get an EAGAIN error here, just unlock the * inode, back off and try again. Hopefully the next pass through will * see the stale flag set on the inode. */ error = xfs_iflush(ip, &bp); if (error == -EAGAIN) { xfs_iunlock(ip, XFS_ILOCK_EXCL); /* backoff longer than in xfs_ifree_cluster */ delay(2); goto restart; } if (!error) { error = xfs_bwrite(bp); xfs_buf_relse(bp); } reclaim: ASSERT(!xfs_isiflocked(ip)); /* * Because we use RCU freeing we need to ensure the inode always appears * to be reclaimed with an invalid inode number when in the free state. * We do this as early as possible under the ILOCK so that * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to * detect races with us here. By doing this, we guarantee that once * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that * it will see either a valid inode that will serialise correctly, or it * will see an invalid inode that it can skip. */ spin_lock(&ip->i_flags_lock); ip->i_flags = XFS_IRECLAIM; ip->i_ino = 0; spin_unlock(&ip->i_flags_lock); xfs_iunlock(ip, XFS_ILOCK_EXCL); XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); /* * Remove the inode from the per-AG radix tree. * * Because radix_tree_delete won't complain even if the item was never * added to the tree assert that it's been there before to catch * problems with the inode life time early on. */ spin_lock(&pag->pag_ici_lock); if (!radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(ip->i_mount, ino))) ASSERT(0); xfs_perag_clear_reclaim_tag(pag); spin_unlock(&pag->pag_ici_lock); /* * Here we do an (almost) spurious inode lock in order to coordinate * with inode cache radix tree lookups. This is because the lookup * can reference the inodes in the cache without taking references. * * We make that OK here by ensuring that we wait until the inode is * unlocked after the lookup before we go ahead and free it. */ xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_qm_dqdetach(ip); xfs_iunlock(ip, XFS_ILOCK_EXCL); __xfs_inode_free(ip); return error; out_ifunlock: xfs_ifunlock(ip); out: xfs_iflags_clear(ip, XFS_IRECLAIM); xfs_iunlock(ip, XFS_ILOCK_EXCL); /* * We could return -EAGAIN here to make reclaim rescan the inode tree in * a short while. However, this just burns CPU time scanning the tree * waiting for IO to complete and the reclaim work never goes back to * the idle state. Instead, return 0 to let the next scheduled * background reclaim attempt to reclaim the inode again. */ return 0; }
0
498,618
bgr2rgb (guchar *dest, const guchar *src, guint width, guint bytes, guint alpha) { guint x; if (alpha) { for (x = 0; x < width; x++) { *(dest++) = src[2]; *(dest++) = src[1]; *(dest++) = src[0]; *(dest++) = src[3]; src += bytes; } } else { for (x = 0; x < width; x++) { *(dest++) = src[2]; *(dest++) = src[1]; *(dest++) = src[0]; src += bytes; } } }
0
277,476
static MOBI_RET mobi_parse_tagx(MOBIBuffer *buf, MOBITagx *tagx) { tagx->control_byte_count = 0; tagx->tags_count = 0; tagx->tags = NULL; mobi_buffer_seek(buf, 4); /* skip header */ uint32_t tagx_record_length = mobi_buffer_get32(buf); if (tagx_record_length < 12) { debug_print("INDX record too short: %u\n", tagx_record_length); return MOBI_DATA_CORRUPT; } tagx->control_byte_count = mobi_buffer_get32(buf); tagx_record_length -= 12; if (tagx_record_length + buf->offset > buf->maxlen) { debug_print("INDX record too long: %u\n", tagx_record_length); return MOBI_DATA_CORRUPT; } tagx->tags = malloc(tagx_record_length * sizeof(TAGXTags)); if (tagx->tags == NULL) { debug_print("%s", "Memory allocation failed for TAGX tags\n"); return MOBI_MALLOC_FAILED; } size_t i = 0; const size_t tagx_data_length = tagx_record_length / 4; size_t control_byte_count = 0; while (i < tagx_data_length) { tagx->tags[i].tag = mobi_buffer_get8(buf); tagx->tags[i].values_count = mobi_buffer_get8(buf); tagx->tags[i].bitmask = mobi_buffer_get8(buf); const uint8_t control_byte = mobi_buffer_get8(buf); if (control_byte) { control_byte_count++; } tagx->tags[i].control_byte = control_byte; debug_print("tagx[%zu]:\t%i\t%i\t%i\t%i\n", i, tagx->tags[i].tag, tagx->tags[i].values_count, tagx->tags[i].bitmask, control_byte); i++; } if (tagx->control_byte_count != control_byte_count) { debug_print("Wrong count of control bytes: %zu != %zu\n", tagx->control_byte_count, control_byte_count); free(tagx->tags); tagx->tags = NULL; tagx->control_byte_count = 0; return MOBI_DATA_CORRUPT; } tagx->tags_count = i; return MOBI_SUCCESS; }
0
272,333
encode_algorithm_id(cms_context *cms, SECItem *der, SECOidTag tag) { SECAlgorithmID id; int rc = generate_algorithm_id(cms, &id, tag); if (rc < 0) return rc; void *ret; ret = SEC_ASN1EncodeItem(cms->arena, der, &id, SECOID_AlgorithmIDTemplate); if (ret == NULL) cnreterr(-1, cms, "could not encode Algorithm ID"); return 0; }
0
247,697
TEST_P(SslSocketTest, GetIssueExpireTimesPeerCert) { const std::string client_ctx_yaml = R"EOF( common_tls_context: tls_certificates: certificate_chain: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem" private_key: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem" )EOF"; const std::string server_ctx_yaml = R"EOF( common_tls_context: tls_certificates: certificate_chain: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" private_key: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem" validation_context: trusted_ca: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" require_client_certificate: true )EOF"; TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam()); testUtil(test_options.setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL) .setExpectedValidFromTimePeerCert(TEST_NO_SAN_CERT_NOT_BEFORE) .setExpectedExpirationTimePeerCert(TEST_NO_SAN_CERT_NOT_AFTER)); }
0
175,787
GatherGlobalUsageTask( UsageTracker* tracker, QuotaClient* client) : GatherUsageTaskBase(tracker, client), client_(client), callback_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { DCHECK(tracker); DCHECK(client); }
0
225,385
static inline void set_done(struct v4l2l_buffer *buffer) { buffer->buffer.flags &= ~V4L2_BUF_FLAG_QUEUED; buffer->buffer.flags |= V4L2_BUF_FLAG_DONE; }
0
294,556
c_valid_weeknum_p(int y, int w, int d, int f, double sg, int *rw, int *rd, int *rjd, int *ns) { int ns2, ry2, rw2, rd2; if (d < 0) d += 7; if (w < 0) { int rjd2; c_weeknum_to_jd(y + 1, 1, f, f, sg, &rjd2, &ns2); c_jd_to_weeknum(rjd2 + w * 7, f, sg, &ry2, &rw2, &rd2); if (ry2 != y) return 0; w = rw2; } c_weeknum_to_jd(y, w, d, f, sg, rjd, ns); c_jd_to_weeknum(*rjd, f, sg, &ry2, rw, rd); if (y != ry2 || w != *rw || d != *rd) return 0; return 1; }
0
400,724
void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe, size_t count) { BUG_ON(direction != READ); WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); *i = (struct iov_iter){ .iter_type = ITER_PIPE, .data_source = false, .pipe = pipe, .head = pipe->head, .start_head = pipe->head, .iov_offset = 0, .count = count }; }
0
488,367
static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32, struct lib64_elfinfo *v64) { Elf32_Sym *sym32; #ifdef CONFIG_PPC64 Elf64_Sym *sym64; sym64 = find_symbol64(v64, "__kernel_datapage_offset"); if (sym64 == NULL) { printk(KERN_ERR "vDSO64: Can't find symbol " "__kernel_datapage_offset !\n"); return -1; } *((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) = (vdso64_pages << PAGE_SHIFT) - (sym64->st_value - VDSO64_LBASE); #endif /* CONFIG_PPC64 */ sym32 = find_symbol32(v32, "__kernel_datapage_offset"); if (sym32 == NULL) { printk(KERN_ERR "vDSO32: Can't find symbol " "__kernel_datapage_offset !\n"); return -1; } *((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) = (vdso32_pages << PAGE_SHIFT) - (sym32->st_value - VDSO32_LBASE); return 0; }
0
376,345
gpg_import_keys_sync (CamelCipherContext *context, CamelStream *istream, GCancellable *cancellable, GError **error) { struct _GpgCtx *gpg; gboolean success = FALSE; gpg = gpg_ctx_new (context); gpg_ctx_set_mode (gpg, GPG_CTX_MODE_IMPORT); gpg_ctx_set_istream (gpg, istream); if (!gpg_ctx_op_start (gpg, error)) goto fail; while (!gpg_ctx_op_complete (gpg)) { if (gpg_ctx_op_step (gpg, cancellable, error) == -1) { gpg_ctx_op_cancel (gpg); goto fail; } } if (gpg_ctx_op_wait (gpg) != 0) { const gchar *diagnostics; diagnostics = gpg_ctx_get_diagnostics (gpg); g_set_error ( error, CAMEL_ERROR, CAMEL_ERROR_GENERIC, "%s", (diagnostics != NULL && *diagnostics != '\0') ? diagnostics : _("Failed to execute gpg.")); goto fail; } success = TRUE; fail: gpg_ctx_free (gpg); return success; }
0
234,805
int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, u64 *start, u64 *len) { /* FIXME use last free of some kind */ return find_free_dev_extent_start(device, num_bytes, 0, start, len); }
0
282,969
LJ_NOINLINE void lj_err_lex(lua_State *L, GCstr *src, const char *tok, BCLine line, ErrMsg em, va_list argp) { char buff[LUA_IDSIZE]; const char *msg; lj_debug_shortname(buff, src); msg = lj_str_pushvf(L, err2msg(em), argp); msg = lj_str_pushf(L, "%s:%d: %s", buff, line, msg); if (tok) lj_str_pushf(L, err2msg(LJ_ERR_XNEAR), msg, tok); lj_err_throw(L, LUA_ERRSYNTAX); }
0
139,212
void OverlayWindowViews::Close() { views::Widget::Close(); }
0
231,743
TEST_F(QuicServerTransportTest, TestRegisterAndHandleTransportKnobParams) { int flag = 0; server->registerKnobParamHandler( 199, [&](QuicServerConnectionState* /* server_conn */, uint64_t val) { EXPECT_EQ(val, 10); flag = 1; }); server->registerKnobParamHandler( 200, [&](QuicServerConnectionState* /* server_conn */, uint64_t /* val */) { flag = 2; }); server->handleKnobParams({ {199, 10}, {201, 20}, }); EXPECT_EQ(flag, 1); // ovewrite will fail, the new handler won't be called server->registerKnobParamHandler( 199, [&](QuicServerConnectionState* /* server_conn */, uint64_t val) { EXPECT_EQ(val, 30); flag = 3; }); server->handleKnobParams({ {199, 10}, {201, 20}, }); EXPECT_EQ(flag, 1); }
0
512,372
bool Item_func_not::fix_fields(THD *thd, Item **ref) { args[0]->under_not(this); if (args[0]->type() == FIELD_ITEM) { /* replace "NOT <field>" with "<field> == 0" */ Query_arena backup, *arena; Item *new_item; bool rc= TRUE; arena= thd->activate_stmt_arena_if_needed(&backup); if ((new_item= new (thd->mem_root) Item_func_eq(thd, args[0], new (thd->mem_root) Item_int(thd, 0, 1)))) { new_item->name= name; rc= (*ref= new_item)->fix_fields(thd, ref); } if (arena) thd->restore_active_arena(arena, &backup); return rc; } return Item_func::fix_fields(thd, ref); }
0
270,386
static inline uint32_t ok_inflater_read_bits(ok_inflater *inflater, unsigned int num_bits) { uint32_t ans = inflater->input_buffer & ((1 << num_bits) - 1); inflater->input_buffer >>= num_bits; inflater->input_buffer_bits -= num_bits; return ans; }
0
294,600
m_local_df_in_day(union DateData *x) { return isec_to_day(m_local_df(x)); }
0
512,337
Item *in_subq_field_transformer_for_having(THD *thd, uchar *arg) { return convert_to_basic_const_item(thd); }
0
222,515
Status InstantiateNode(const NodeDef& fnode, AttrSlice attrs) { const OpDef* fnode_sig = nullptr; TF_CHECK_OK(get_function_(fnode.op(), &fnode_sig)); NodeDef* gnode = AddNode(fnode.name()); gnode->set_op(fnode.op()); gnode->set_device(fnode.device()); int gnode_idx = nodes_.size() - 1; // Input const int num_args = fnode_sig->input_arg_size(); bool is_type_list; // ignored DataTypeVector dtypes; int fnode_arg_index = 0; for (int i = 0; i < num_args; ++i) { TF_RETURN_IF_ERROR( ArgNumType(attrs, fnode_sig->input_arg(i), &is_type_list, &dtypes)); // Consume inputs (indexed by fnode_arg_index) until we have // matched each element of dtypes (indexed by j). for (size_t j = 0; j < dtypes.size(); ++fnode_arg_index) { if (fnode_arg_index >= fnode.input_size()) { // Should never happen if we computed dtypes correctly. return errors::InvalidArgument( "Attempt to access beyond input size: ", fnode_arg_index, " >= ", fnode.input_size()); } // Look up the next input. const string& input_name = fnode.input(fnode_arg_index); const auto* item = GetItemOrNull(input_name); if (item == nullptr) { return errors::InvalidArgument( "input ", input_name, " is not found: ", FormatNodeDefForError(fnode)); } if (item->dtypes.size() > dtypes.size() - j) { return errors::InvalidArgument("Input ", input_name, " too long for ", fnode_sig->input_arg(i).name()); } // Match up all the elements of this input (indexed by k) with // elements of dtypes (advancing j). for (int k = 0; k < item->dtypes.size(); ++k, ++j) { if (item->dtypes[k] != dtypes[j]) { return errors::InvalidArgument( "input ", fnode_sig->input_arg(i).name(), "[", j, "] expected type ", DataTypeString(dtypes[j]), " != ", DataTypeString(item->dtypes[k]), ", the type of ", input_name, "[", k, "]"); } if (item->is_func_arg) { AddInput(gnode_idx, item->nid + k, 0); } else { AddInput(gnode_idx, item->nid, item->idx + k); } } } } // Control deps. for (int i = fnode_arg_index; i < fnode.input_size(); ++i) { const string& input = fnode.input(i); if (input.empty() || input[0] != '^') { return errors::InvalidArgument("Expected input[", i, "] == '", input, "' to be a control input."); } int nid = -1; const string node_name = input.substr(1); const string node_colon = node_name + ":"; const string node_colon_bound = node_name + ";"; // index_ is a map sorted lexicographically, so the key we are looking for // must lie in the range [node_name, node_colon_bound). auto it = index_.lower_bound(node_name); while (it != index_.end() && it->first <= node_colon_bound) { if (it->first == node_name || absl::StartsWith(it->first, node_colon)) { nid = it->second.nid; break; } ++it; } if (nid == -1) { return errors::InvalidArgument("input[", i, "] == '", input, "', is not found."); } AddDep(gnode_idx, nid); } // Attrs. for (const auto& p : attrs) { (*gnode->mutable_attr())[p.first] = p.second; } // Experimental_debug_info. if (fnode.has_experimental_debug_info()) { gnode->mutable_experimental_debug_info()->MergeFrom( fnode.experimental_debug_info()); } // Tye info. // TODO(mdan): Might this need adjustment at instantiation? if (fnode.has_experimental_type()) { *gnode->mutable_experimental_type() = fnode.experimental_type(); } return Status::OK(); }
0
512,940
virtual bool is_fixed() const { return true; }
0
329,880
_pixman_image_add_tristrip (pixman_image_t *image, int dst_x, int dst_y, cairo_tristrip_t *strip) { pixman_triangle_t tri; pixman_point_fixed_t *p[3] = {&tri.p1, &tri.p2, &tri.p3 }; int n; set_point (p[0], &strip->points[0]); set_point (p[1], &strip->points[1]); set_point (p[2], &strip->points[2]); pixman_add_triangles (image, -dst_x, -dst_y, 1, &tri); for (n = 3; n < strip->num_points; n++) { set_point (p[n%3], &strip->points[n]); pixman_add_triangles (image, -dst_x, -dst_y, 1, &tri); } }
0
428,228
ConnectionExists(struct SessionHandle *data, struct connectdata *needle, struct connectdata **usethis, bool *force_reuse) { struct connectdata *check; struct connectdata *chosen = 0; bool canPipeline = IsPipeliningPossible(data, needle); bool wantNTLMhttp = ((data->state.authhost.want & CURLAUTH_NTLM) || (data->state.authhost.want & CURLAUTH_NTLM_WB)) && (needle->handler->protocol & PROTO_FAMILY_HTTP) ? TRUE : FALSE; struct connectbundle *bundle; *force_reuse = FALSE; /* We can't pipe if the site is blacklisted */ if(canPipeline && Curl_pipeline_site_blacklisted(data, needle)) { canPipeline = FALSE; } /* Look up the bundle with all the connections to this particular host */ bundle = Curl_conncache_find_bundle(data->state.conn_cache, needle->host.name); if(bundle) { size_t max_pipe_len = Curl_multi_max_pipeline_length(data->multi); size_t best_pipe_len = max_pipe_len; struct curl_llist_element *curr; infof(data, "Found bundle for host %s: %p\n", needle->host.name, (void *)bundle); /* We can't pipe if we don't know anything about the server */ if(canPipeline && !bundle->server_supports_pipelining) { infof(data, "Server doesn't support pipelining\n"); canPipeline = FALSE; } curr = bundle->conn_list->head; while(curr) { bool match = FALSE; #if defined(USE_NTLM) bool credentialsMatch = FALSE; #endif size_t pipeLen; /* * Note that if we use a HTTP proxy, we check connections to that * proxy and not to the actual remote server. */ check = curr->ptr; curr = curr->next; if(disconnect_if_dead(check, data)) continue; pipeLen = check->send_pipe->size + check->recv_pipe->size; if(canPipeline) { /* Make sure the pipe has only GET requests */ struct SessionHandle* sh = gethandleathead(check->send_pipe); struct SessionHandle* rh = gethandleathead(check->recv_pipe); if(sh) { if(!IsPipeliningPossible(sh, check)) continue; } else if(rh) { if(!IsPipeliningPossible(rh, check)) continue; } } else { if(pipeLen > 0) { /* can only happen within multi handles, and means that another easy handle is using this connection */ continue; } if(Curl_resolver_asynch()) { /* ip_addr_str[0] is NUL only if the resolving of the name hasn't completed yet and until then we don't re-use this connection */ if(!check->ip_addr_str[0]) { infof(data, "Connection #%ld is still name resolving, can't reuse\n", check->connection_id); continue; } } if((check->sock[FIRSTSOCKET] == CURL_SOCKET_BAD) || check->bits.close) { /* Don't pick a connection that hasn't connected yet or that is going to get closed. */ infof(data, "Connection #%ld isn't open enough, can't reuse\n", check->connection_id); #ifdef DEBUGBUILD if(check->recv_pipe->size > 0) { infof(data, "BAD! Unconnected #%ld has a non-empty recv pipeline!\n", check->connection_id); } #endif continue; } } if((needle->handler->flags&PROTOPT_SSL) != (check->handler->flags&PROTOPT_SSL)) /* don't do mixed SSL and non-SSL connections */ if(!(needle->handler->protocol & check->handler->protocol)) /* except protocols that have been upgraded via TLS */ continue; if(needle->handler->flags&PROTOPT_SSL) { if((data->set.ssl.verifypeer != check->verifypeer) || (data->set.ssl.verifyhost != check->verifyhost)) continue; } if(needle->bits.proxy != check->bits.proxy) /* don't do mixed proxy and non-proxy connections */ continue; if(!canPipeline && check->inuse) /* this request can't be pipelined but the checked connection is already in use so we skip it */ continue; if(needle->localdev || needle->localport) { /* If we are bound to a specific local end (IP+port), we must not re-use a random other one, although if we didn't ask for a particular one we can reuse one that was bound. This comparison is a bit rough and too strict. Since the input parameters can be specified in numerous ways and still end up the same it would take a lot of processing to make it really accurate. Instead, this matching will assume that re-uses of bound connections will most likely also re-use the exact same binding parameters and missing out a few edge cases shouldn't hurt anyone very much. */ if((check->localport != needle->localport) || (check->localportrange != needle->localportrange) || !check->localdev || !needle->localdev || strcmp(check->localdev, needle->localdev)) continue; } if((!(needle->handler->flags & PROTOPT_CREDSPERREQUEST)) || wantNTLMhttp) { /* This protocol requires credentials per connection or is HTTP+NTLM, so verify that we're using the same name and password as well */ if(!strequal(needle->user, check->user) || !strequal(needle->passwd, check->passwd)) { /* one of them was different */ continue; } #if defined(USE_NTLM) credentialsMatch = TRUE; #endif } if(!needle->bits.httpproxy || needle->handler->flags&PROTOPT_SSL || (needle->bits.httpproxy && check->bits.httpproxy && needle->bits.tunnel_proxy && check->bits.tunnel_proxy && Curl_raw_equal(needle->proxy.name, check->proxy.name) && (needle->port == check->port))) { /* The requested connection does not use a HTTP proxy or it uses SSL or it is a non-SSL protocol tunneled over the same http proxy name and port number or it is a non-SSL protocol which is allowed to be upgraded via TLS */ if((Curl_raw_equal(needle->handler->scheme, check->handler->scheme) || needle->handler->protocol & check->handler->protocol) && Curl_raw_equal(needle->host.name, check->host.name) && needle->remote_port == check->remote_port) { if(needle->handler->flags & PROTOPT_SSL) { /* This is a SSL connection so verify that we're using the same SSL options as well */ if(!Curl_ssl_config_matches(&needle->ssl_config, &check->ssl_config)) { DEBUGF(infof(data, "Connection #%ld has different SSL parameters, " "can't reuse\n", check->connection_id)); continue; } else if(check->ssl[FIRSTSOCKET].state != ssl_connection_complete) { DEBUGF(infof(data, "Connection #%ld has not started SSL connect, " "can't reuse\n", check->connection_id)); continue; } } match = TRUE; } } else { /* The requested needle connection is using a proxy, is the checked one using the same host, port and type? */ if(check->bits.proxy && (needle->proxytype == check->proxytype) && (needle->bits.tunnel_proxy == check->bits.tunnel_proxy) && Curl_raw_equal(needle->proxy.name, check->proxy.name) && needle->port == check->port) { /* This is the same proxy connection, use it! */ match = TRUE; } } if(match) { #if defined(USE_NTLM) /* If we are looking for an HTTP+NTLM connection, check if this is already authenticating with the right credentials. If not, keep looking so that we can reuse NTLM connections if possible. (Especially we must not reuse the same connection if partway through a handshake!) */ if(wantNTLMhttp) { if(credentialsMatch && check->ntlm.state != NTLMSTATE_NONE) { chosen = check; /* We must use this connection, no other */ *force_reuse = TRUE; break; } else if(credentialsMatch) /* this is a backup choice */ chosen = check; continue; } #endif if(canPipeline) { /* We can pipeline if we want to. Let's continue looking for the optimal connection to use, i.e the shortest pipe that is not blacklisted. */ if(pipeLen == 0) { /* We have the optimal connection. Let's stop looking. */ chosen = check; break; } /* We can't use the connection if the pipe is full */ if(pipeLen >= max_pipe_len) continue; /* We can't use the connection if the pipe is penalized */ if(Curl_pipeline_penalized(data, check)) continue; if(pipeLen < best_pipe_len) { /* This connection has a shorter pipe so far. We'll pick this and continue searching */ chosen = check; best_pipe_len = pipeLen; continue; } } else { /* We have found a connection. Let's stop searching. */ chosen = check; break; } } } } if(chosen) { *usethis = chosen; return TRUE; /* yes, we found one to use! */ } return FALSE; /* no matching connecting exists */ }
0
219,989
int callback_glewlwyd_get_user_middleware_module_list (const struct _u_request * request, struct _u_response * response, void * user_middleware_data) { UNUSED(request); struct config_elements * config = (struct config_elements *)user_middleware_data; json_t * j_module; j_module = get_user_middleware_module_list(config); if (check_result_value(j_module, G_OK)) { ulfius_set_json_body_response(response, 200, json_object_get(j_module, "module")); } else { y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_get_user_middleware_module_list - Error get_user_middleware_module_list"); response->status = 500; } json_decref(j_module); return U_CALLBACK_CONTINUE; }
0
219,972
int callback_glewlwyd_server_configuration (const struct _u_request * request, struct _u_response * response, void * user_data) { UNUSED(request); json_t * json_body = json_pack("{ssssssss}", "api_prefix", ((struct config_elements *)user_data)->api_prefix, "admin_scope", ((struct config_elements *)user_data)->admin_scope, "profile_scope", ((struct config_elements *)user_data)->profile_scope, "delete_profile", ((struct config_elements *)user_data)->delete_profile==GLEWLWYD_PROFILE_DELETE_UNAUTHORIZED?"no":"yes"); ulfius_set_json_body_response(response, 200, json_body); json_decref(json_body); return U_CALLBACK_CONTINUE; }
0
282,990
static void err_raise_ext(int errcode) { RaiseException(LJ_EXCODE_MAKE(errcode), 1 /* EH_NONCONTINUABLE */, 0, NULL); }
0
359,508
DEFUN (no_bgp_bestpath_aspath_ignore, no_bgp_bestpath_aspath_ignore_cmd, "no bgp bestpath as-path ignore", NO_STR "BGP specific commands\n" "Change the default bestpath selection\n" "AS-path attribute\n" "Ignore as-path length in selecting a route\n") { struct bgp *bgp; bgp = vty->index; bgp_flag_unset (bgp, BGP_FLAG_ASPATH_IGNORE); return CMD_SUCCESS; }
0
291,779
int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index) { /* If no path, return -1 for block layer not to try again */ int cnt = -1; struct rtrs_con *con; struct rtrs_clt_path *clt_path; struct path_it it; rcu_read_lock(); for (path_it_init(&it, clt); (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) continue; con = clt_path->s.con[index + 1]; cnt = ib_process_cq_direct(con->cq, -1); if (cnt) break; } path_it_deinit(&it); rcu_read_unlock(); return cnt; }
0
202,659
static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data; __be16 *p = (__be16 *)(skb->data + offset); int grehlen = offset + 4; struct ip6_tnl *t; __be16 flags; flags = p[0]; if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) { if (flags&(GRE_VERSION|GRE_ROUTING)) return; if (flags&GRE_KEY) { grehlen += 4; if (flags&GRE_CSUM) grehlen += 4; } } /* If only 8 bytes returned, keyed message will be dropped here */ if (!pskb_may_pull(skb, grehlen)) return; ipv6h = (const struct ipv6hdr *)skb->data; p = (__be16 *)(skb->data + offset); t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr, flags & GRE_KEY ? *(((__be32 *)p) + (grehlen / 4) - 1) : 0, p[1]); if (!t) return; switch (type) { __u32 teli; struct ipv6_tlv_tnl_enc_lim *tel; __u32 mtu; case ICMPV6_DEST_UNREACH: net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", t->parms.name); break; case ICMPV6_TIME_EXCEED: if (code == ICMPV6_EXC_HOPLIMIT) { net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", t->parms.name); } break; case ICMPV6_PARAMPROB: teli = 0; if (code == ICMPV6_HDR_FIELD) teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); if (teli && teli == be32_to_cpu(info) - 2) { tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; if (tel->encap_limit == 0) { net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", t->parms.name); } } else { net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", t->parms.name); } break; case ICMPV6_PKT_TOOBIG: mtu = be32_to_cpu(info) - offset; if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; t->dev->mtu = mtu; break; } if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO)) t->err_count++; else t->err_count = 1; t->err_time = jiffies; }
1
513,325
int safe_index_read(JOIN_TAB *tab) { int error; TABLE *table= tab->table; if ((error= table->file->ha_index_read_map(table->record[0], tab->ref.key_buff, make_prev_keypart_map(tab->ref.key_parts), HA_READ_KEY_EXACT))) return report_error(table, error); return 0; }
0
384,802
hex2nr(int c) { if (c >= 'a' && c <= 'f') return c - 'a' + 10; if (c >= 'A' && c <= 'F') return c - 'A' + 10; return c - '0'; }
0
310,181
_nc_do_color(int old_pair, int pair, int reverse, NCURSES_OUTC outc) { SetSafeOutcWrapper(outc); NCURSES_SP_NAME(_nc_do_color) (CURRENT_SCREEN, old_pair, pair, reverse, _nc_outc_wrapper); }
0
198,556
fiber_switch(mrb_state *mrb, mrb_value self, mrb_int len, const mrb_value *a, mrb_bool resume, mrb_bool vmexec) { struct mrb_context *c = fiber_check(mrb, self); struct mrb_context *old_c = mrb->c; enum mrb_fiber_state status; mrb_value value; fiber_check_cfunc(mrb, c); status = c->status; switch (status) { case MRB_FIBER_TRANSFERRED: if (resume) { mrb_raise(mrb, E_FIBER_ERROR, "resuming transferred fiber"); } break; case MRB_FIBER_RUNNING: case MRB_FIBER_RESUMED: mrb_raise(mrb, E_FIBER_ERROR, "double resume"); break; case MRB_FIBER_TERMINATED: mrb_raise(mrb, E_FIBER_ERROR, "resuming dead fiber"); break; default: break; } old_c->status = resume ? MRB_FIBER_RESUMED : MRB_FIBER_TRANSFERRED; c->prev = resume ? mrb->c : (c->prev ? c->prev : mrb->root_c); fiber_switch_context(mrb, c); if (status == MRB_FIBER_CREATED) { mrb_value *b, *e; if (!c->ci->proc) { mrb_raise(mrb, E_FIBER_ERROR, "double resume (current)"); } mrb_stack_extend(mrb, len+2); /* for receiver and (optional) block */ b = c->stbase+1; e = b + len; while (b<e) { *b++ = *a++; } if (vmexec) { c->ci--; /* pop dummy callinfo */ } c->cibase->n = len; value = c->stbase[0] = MRB_PROC_ENV(c->cibase->proc)->stack[0]; } else { value = fiber_result(mrb, a, len); if (vmexec) { c->ci[1].stack[0] = value; } } if (vmexec) { c->vmexec = TRUE; value = mrb_vm_exec(mrb, c->ci->proc, c->ci->pc); mrb->c = old_c; } else { MARK_CONTEXT_MODIFY(c); } return value; }
1
244,120
GF_Err emsg_box_size(GF_Box *s) { GF_EventMessageBox *ptr = (GF_EventMessageBox*) s; if (ptr->version) { ptr->size += 20; } else { ptr->size += 16; } ptr->size+=2; //1 NULL-terminated strings if (ptr->scheme_id_uri) ptr->size += strlen(ptr->scheme_id_uri); if (ptr->value) ptr->size += strlen(ptr->value); if (ptr->message_data) ptr->size += ptr->message_data_size; return GF_OK; }
0
455,331
parse_long_options (argv, arg_start, arg_end) char **argv; int arg_start, arg_end; { int arg_index, longarg, i; char *arg_string; arg_index = arg_start; while ((arg_index != arg_end) && (arg_string = argv[arg_index]) && (*arg_string == '-')) { longarg = 0; /* Make --login equivalent to -login. */ if (arg_string[1] == '-' && arg_string[2]) { longarg = 1; arg_string++; } for (i = 0; long_args[i].name; i++) { if (STREQ (arg_string + 1, long_args[i].name)) { if (long_args[i].type == Int) *long_args[i].int_value = 1; else if (argv[++arg_index] == 0) { report_error (_("%s: option requires an argument"), long_args[i].name); exit (EX_BADUSAGE); } else *long_args[i].char_value = argv[arg_index]; break; } } if (long_args[i].name == 0) { if (longarg) { report_error (_("%s: invalid option"), argv[arg_index]); show_shell_usage (stderr, 0); exit (EX_BADUSAGE); } break; /* No such argument. Maybe flag arg. */ } arg_index++; } return (arg_index); }
0
385,887
SYSCALL_DEFINE2(creat, const char __user *, pathname, umode_t, mode) { return sys_open(pathname, O_CREAT | O_WRONLY | O_TRUNC, mode); }
0
404,711
__acquires(files->file_lock) { struct fdtable *fdt; int expanded = 0; repeat: fdt = files_fdtable(files); /* Do we need to expand? */ if (nr < fdt->max_fds) return expanded; /* Can we expand? */ if (nr >= sysctl_nr_open) return -EMFILE; if (unlikely(files->resize_in_progress)) { spin_unlock(&files->file_lock); expanded = 1; wait_event(files->resize_wait, !files->resize_in_progress); spin_lock(&files->file_lock); goto repeat; } /* All good, so we try */ files->resize_in_progress = true; expanded = expand_fdtable(files, nr); files->resize_in_progress = false; wake_up_all(&files->resize_wait); return expanded; }
0
233,823
void fmtutil_read_atari_palette(deark *c, dbuf *f, i64 pos, de_color *dstpal, i64 ncolors_to_read, i64 ncolors_used, unsigned int flags) { i64 i; unsigned int n; int pal_bits = 0; // 9, 12, or 15. 0 = not yet determined u8 cr, cg, cb; u8 cr1, cg1, cb1; char cbuf[32]; char tmps[64]; const char *s; s = de_get_ext_option(c, "atari:palbits"); if(s) { pal_bits = de_atoi(s); } if(pal_bits==0 && (flags&DE_FLAG_ATARI_15BIT_PAL)) { pal_bits = 15; } if(pal_bits==0) { // Pre-scan the palette, and try to guess whether Atari STE-style 12-bit // colors are used, instead of the usual 9-bit colors. // I don't know the best way to do this. Sometimes the 4th bit in each // nibble is used for extra color detail, and sometimes it just seems to // contain garbage. Maybe the logic should also depend on the file // format, or the number of colors. int bit_3_used = 0; int nibble_3_used = 0; for(i=0; i<ncolors_to_read; i++) { n = (unsigned int)dbuf_getu16be(f, pos + i*2); if(n&0xf000) { nibble_3_used = 1; } if(n&0x0888) { bit_3_used = 1; } } if(bit_3_used && !nibble_3_used) { de_dbg(c, "12-bit palette colors detected"); pal_bits = 12; } } if(pal_bits<12) { // Default to 9 if <12 pal_bits = 9; } else if(pal_bits<15) { pal_bits = 12; } else { pal_bits = 15; } for(i=0; i<ncolors_to_read; i++) { n = (unsigned int)dbuf_getu16be(f, pos + 2*i); if(pal_bits==15) { cr1 = (u8)((n>>6)&0x1c); if(n&0x0800) cr1+=2; if(n&0x8000) cr1++; cg1 = (u8)((n>>2)&0x1c); if(n&0x0080) cg1+=2; if(n&0x4000) cg1++; cb1 = (u8)((n<<2)&0x1c); if(n&0x0008) cb1+=2; if(n&0x2000) cb1++; cr = de_scale_n_to_255(31, cr1); cg = de_scale_n_to_255(31, cg1); cb = de_scale_n_to_255(31, cb1); de_snprintf(cbuf, sizeof(cbuf), "%2d,%2d,%2d", (int)cr1, (int)cg1, (int)cb1); } else if(pal_bits==12) { cr1 = (u8)((n>>7)&0x0e); if(n&0x800) cr1++; cg1 = (u8)((n>>3)&0x0e); if(n&0x080) cg1++; cb1 = (u8)((n<<1)&0x0e); if(n&0x008) cb1++; cr = scale_15_to_255(cr1); cg = scale_15_to_255(cg1); cb = scale_15_to_255(cb1); de_snprintf(cbuf, sizeof(cbuf), "%2d,%2d,%2d", (int)cr1, (int)cg1, (int)cb1); } else { cr1 = (u8)((n>>8)&0x07); cg1 = (u8)((n>>4)&0x07); cb1 = (u8)(n&0x07); cr = scale_7_to_255(cr1); cg = scale_7_to_255(cg1); cb = scale_7_to_255(cb1); de_snprintf(cbuf, sizeof(cbuf), "%d,%d,%d", (int)cr1, (int)cg1, (int)cb1); } dstpal[i] = DE_MAKE_RGB(cr, cg, cb); de_snprintf(tmps, sizeof(tmps), "0x%04x (%s) "DE_CHAR_RIGHTARROW" ", n, cbuf); de_dbg_pal_entry2(c, i, dstpal[i], tmps, NULL, (i>=ncolors_used)?" [unused]":""); } }
0
238,615
static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, u32 func_id, s16 offset, struct module **btf_modp) { if (offset) { if (offset < 0) { /* In the future, this can be allowed to increase limit * of fd index into fd_array, interpreted as u16. */ verbose(env, "negative offset disallowed for kernel module function call\n"); return ERR_PTR(-EINVAL); } return __find_kfunc_desc_btf(env, offset, btf_modp); } return btf_vmlinux ?: ERR_PTR(-ENOENT); }
0
254,735
njs_typed_array_prototype_set(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { double num; int64_t i, length, src_length, offset; njs_int_t ret; njs_value_t *this, *src, *value, prop; njs_array_t *array; njs_typed_array_t *self, *src_tarray; njs_array_buffer_t *buffer; this = njs_argument(args, 0); if (njs_slow_path(!njs_is_typed_array(this))) { njs_type_error(vm, "this is not a typed array"); return NJS_ERROR; } self = njs_typed_array(this); src = njs_arg(args, nargs, 1); value = njs_arg(args, nargs, 2); ret = njs_value_to_integer(vm, value, &offset); if (njs_slow_path(ret != NJS_OK)) { return NJS_ERROR; } if (njs_slow_path(offset < 0)) { njs_range_error(vm, "offset is out of bounds"); return NJS_ERROR; } buffer = njs_typed_array_writable(vm, self); if (njs_slow_path(buffer == NULL)) { return NJS_ERROR; } length = njs_typed_array_length(self); if (njs_is_typed_array(src)) { src_tarray = njs_typed_array(src); if (njs_slow_path(njs_is_detached_buffer(src_tarray->buffer))) { njs_type_error(vm, "detached buffer"); return NJS_ERROR; } src_length = njs_typed_array_length(src_tarray); if (njs_slow_path((src_length > length) || (offset > length - src_length))) { njs_range_error(vm, "source is too large"); return NJS_ERROR; } length = njs_min(njs_typed_array_length(src_tarray), length - offset); for (i = 0; i < length; i++) { njs_typed_array_prop_set(vm, self, offset + i, njs_typed_array_prop(src_tarray, i)); } } else { if (njs_is_fast_array(src)) { array = njs_array(src); src_length = array->length; if (njs_slow_path((src_length > length) || (offset > length - src_length))) { njs_range_error(vm, "source is too large"); return NJS_ERROR; } length = njs_min(array->length, length - offset); for (i = 0; i < length; i++) { ret = njs_value_to_number(vm, &array->start[i], &num); if (ret == NJS_OK) { njs_typed_array_prop_set(vm, self, offset + i, num); } } goto done; } ret = njs_value_to_object(vm, src); if (njs_slow_path(ret != NJS_OK)) { return ret; } ret = njs_object_length(vm, src, &src_length); if (njs_slow_path(ret == NJS_ERROR)) { return ret; } if (njs_slow_path((src_length > length) || (offset > length - src_length))) { njs_range_error(vm, "source is too large"); return NJS_ERROR; } length = njs_min(src_length, length - offset); for (i = 0; i < length; i++) { ret = njs_value_property_i64(vm, src, i, &prop); if (njs_slow_path(ret == NJS_ERROR)) { return NJS_ERROR; } num = NAN; if (ret == NJS_OK) { ret = njs_value_to_number(vm, &prop, &num); if (njs_slow_path(ret == NJS_ERROR)) { return NJS_ERROR; } } if (njs_slow_path(njs_is_detached_buffer(buffer))) { njs_type_error(vm, "detached buffer"); return NJS_ERROR; } njs_typed_array_prop_set(vm, self, offset + i, num); } } done: njs_set_undefined(&vm->retval); return NJS_OK; }
0
316,980
static int selinux_lsm_notifier_avc_callback(u32 event) { if (event == AVC_CALLBACK_RESET) { sel_ib_pkey_flush(); call_blocking_lsm_notifier(LSM_POLICY_CHANGE, NULL); } return 0; }
0
513,149
void plugin_mutex_init() { #ifdef HAVE_PSI_INTERFACE init_plugin_psi_keys(); #endif mysql_mutex_init(key_LOCK_plugin, &LOCK_plugin, MY_MUTEX_INIT_FAST); }
0
293,941
get_number_indent(linenr_T lnum) { colnr_T col; pos_T pos; regmatch_T regmatch; int lead_len = 0; // length of comment leader if (lnum > curbuf->b_ml.ml_line_count) return -1; pos.lnum = 0; // In format_lines() (i.e. not insert mode), fo+=q is needed too... if ((State & INSERT) || has_format_option(FO_Q_COMS)) lead_len = get_leader_len(ml_get(lnum), NULL, FALSE, TRUE); regmatch.regprog = vim_regcomp(curbuf->b_p_flp, RE_MAGIC); if (regmatch.regprog != NULL) { regmatch.rm_ic = FALSE; // vim_regexec() expects a pointer to a line. This lets us // start matching for the flp beyond any comment leader... if (vim_regexec(&regmatch, ml_get(lnum) + lead_len, (colnr_T)0)) { pos.lnum = lnum; pos.col = (colnr_T)(*regmatch.endp - ml_get(lnum)); pos.coladd = 0; } vim_regfree(regmatch.regprog); } if (pos.lnum == 0 || *ml_get_pos(&pos) == NUL) return -1; getvcol(curwin, &pos, &col, NULL, NULL); return (int)col; }
0
317,172
static void selinux_d_instantiate(struct dentry *dentry, struct inode *inode) { if (inode) inode_doinit_with_dentry(inode, dentry); }
0
338,216
Expression* WasmBinaryBuilder::getBlockOrSingleton(Type type) { Name label = getNextLabel(); breakStack.push_back({label, type}); auto start = expressionStack.size(); processExpressions(); size_t end = expressionStack.size(); if (end < start) { throwError("block cannot pop from outside"); } breakStack.pop_back(); auto* block = allocator.alloc<Block>(); pushBlockElements(block, type, start); block->name = label; block->finalize(type); // maybe we don't need a block here? if (breakTargetNames.find(block->name) == breakTargetNames.end() && exceptionTargetNames.find(block->name) == exceptionTargetNames.end()) { block->name = Name(); if (block->list.size() == 1) { return block->list[0]; } } breakTargetNames.erase(block->name); return block; }
0
253,538
smb2_is_read_op(__u32 oplock) { return oplock == SMB2_OPLOCK_LEVEL_II; }
0
355,645
handle_subscript( char_u **arg, char_u *name_start, typval_T *rettv, evalarg_T *evalarg, int verbose) // give error messages { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); int ret = OK; dict_T *selfdict = NULL; int check_white = TRUE; int getnext; char_u *p; while (ret == OK) { // When at the end of the line and ".name" or "->{" or "->X" follows in // the next line then consume the line break. p = eval_next_non_blank(*arg, evalarg, &getnext); if (getnext && ((rettv->v_type == VAR_DICT && *p == '.' && eval_isdictc(p[1])) || (p[0] == '-' && p[1] == '>' && (p[2] == '{' || ASCII_ISALPHA(in_vim9script() ? *skipwhite(p + 2) : p[2]))))) { *arg = eval_next_line(evalarg); p = *arg; check_white = FALSE; } if (rettv->v_type == VAR_ANY) { char_u *exp_name; int cc; int idx; ufunc_T *ufunc; type_T *type; // Found script from "import {name} as name", script item name must // follow. "rettv->vval.v_number" has the script ID. if (**arg != '.') { if (verbose) semsg(_(e_expected_dot_after_name_str), name_start != NULL ? name_start: *arg); ret = FAIL; break; } ++*arg; if (IS_WHITE_OR_NUL(**arg)) { if (verbose) emsg(_(e_no_white_space_allowed_after_dot)); ret = FAIL; break; } // isolate the name exp_name = *arg; while (eval_isnamec(**arg)) ++*arg; cc = **arg; **arg = NUL; idx = find_exported(rettv->vval.v_number, exp_name, &ufunc, &type, evalarg->eval_cctx, verbose); **arg = cc; if (idx < 0 && ufunc == NULL) { ret = FAIL; break; } if (idx >= 0) { scriptitem_T *si = SCRIPT_ITEM(rettv->vval.v_number); svar_T *sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; copy_tv(sv->sv_tv, rettv); } else { rettv->v_type = VAR_FUNC; rettv->vval.v_string = vim_strsave(ufunc->uf_name); } continue; } if ((**arg == '(' && (!evaluate || rettv->v_type == VAR_FUNC || rettv->v_type == VAR_PARTIAL)) && (!check_white || !VIM_ISWHITE(*(*arg - 1)))) { ret = call_func_rettv(arg, evalarg, rettv, evaluate, selfdict, NULL); // Stop the expression evaluation when immediately aborting on // error, or when an interrupt occurred or an exception was thrown // but not caught. if (aborting()) { if (ret == OK) clear_tv(rettv); ret = FAIL; } dict_unref(selfdict); selfdict = NULL; } else if (p[0] == '-' && p[1] == '>') { if (in_vim9script()) *arg = skipwhite(p + 2); else *arg = p + 2; if (ret == OK) { if (VIM_ISWHITE(**arg)) { emsg(_(e_no_white_space_allowed_before_parenthesis)); ret = FAIL; } else if ((**arg == '{' && !in_vim9script()) || **arg == '(') // expr->{lambda}() or expr->(lambda)() ret = eval_lambda(arg, rettv, evalarg, verbose); else // expr->name() ret = eval_method(arg, rettv, evalarg, verbose); } } // "." is ".name" lookup when we found a dict or when evaluating and // scriptversion is at least 2, where string concatenation is "..". else if (**arg == '[' || (**arg == '.' && (rettv->v_type == VAR_DICT || (!evaluate && (*arg)[1] != '.' && !in_old_script(2))))) { dict_unref(selfdict); if (rettv->v_type == VAR_DICT) { selfdict = rettv->vval.v_dict; if (selfdict != NULL) ++selfdict->dv_refcount; } else selfdict = NULL; if (eval_index(arg, rettv, evalarg, verbose) == FAIL) { clear_tv(rettv); ret = FAIL; } } else break; } // Turn "dict.Func" into a partial for "Func" bound to "dict". // Don't do this when "Func" is already a partial that was bound // explicitly (pt_auto is FALSE). if (selfdict != NULL && (rettv->v_type == VAR_FUNC || (rettv->v_type == VAR_PARTIAL && (rettv->vval.v_partial->pt_auto || rettv->vval.v_partial->pt_dict == NULL)))) selfdict = make_partial(selfdict, rettv); dict_unref(selfdict); return ret; }
0
226,156
GF_Box *trun_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackFragmentRunBox, GF_ISOM_BOX_TYPE_TRUN); //NO FLAGS SET BY DEFAULT return (GF_Box *)tmp;
0
313,736
find_ident_at_pos( win_T *wp, linenr_T lnum, colnr_T startcol, char_u **text, int *textcol, // column where "text" starts, can be NULL int find_type) { char_u *ptr; int col = 0; // init to shut up GCC int i; int this_class = 0; int prev_class; int prevcol; int bn = 0; // bracket nesting // if i == 0: try to find an identifier // if i == 1: try to find any non-white text ptr = ml_get_buf(wp->w_buffer, lnum, FALSE); for (i = (find_type & FIND_IDENT) ? 0 : 1; i < 2; ++i) { /* * 1. skip to start of identifier/text */ col = startcol; if (has_mbyte) { while (ptr[col] != NUL) { // Stop at a ']' to evaluate "a[x]". if ((find_type & FIND_EVAL) && ptr[col] == ']') break; this_class = mb_get_class(ptr + col); if (this_class != 0 && (i == 1 || this_class != 1)) break; col += (*mb_ptr2len)(ptr + col); } } else while (ptr[col] != NUL && (i == 0 ? !vim_iswordc(ptr[col]) : VIM_ISWHITE(ptr[col])) && (!(find_type & FIND_EVAL) || ptr[col] != ']') ) ++col; // When starting on a ']' count it, so that we include the '['. bn = ptr[col] == ']'; /* * 2. Back up to start of identifier/text. */ if (has_mbyte) { // Remember class of character under cursor. if ((find_type & FIND_EVAL) && ptr[col] == ']') this_class = mb_get_class((char_u *)"a"); else this_class = mb_get_class(ptr + col); while (col > 0 && this_class != 0) { prevcol = col - 1 - (*mb_head_off)(ptr, ptr + col - 1); prev_class = mb_get_class(ptr + prevcol); if (this_class != prev_class && (i == 0 || prev_class == 0 || (find_type & FIND_IDENT)) && (!(find_type & FIND_EVAL) || prevcol == 0 || !find_is_eval_item(ptr + prevcol, &prevcol, &bn, BACKWARD)) ) break; col = prevcol; } // If we don't want just any old text, or we've found an // identifier, stop searching. if (this_class > 2) this_class = 2; if (!(find_type & FIND_STRING) || this_class == 2) break; } else { while (col > 0 && ((i == 0 ? vim_iswordc(ptr[col - 1]) : (!VIM_ISWHITE(ptr[col - 1]) && (!(find_type & FIND_IDENT) || !vim_iswordc(ptr[col - 1])))) || ((find_type & FIND_EVAL) && col > 1 && find_is_eval_item(ptr + col - 1, &col, &bn, BACKWARD)) )) --col; // If we don't want just any old text, or we've found an // identifier, stop searching. if (!(find_type & FIND_STRING) || vim_iswordc(ptr[col])) break; } } if (ptr[col] == NUL || (i == 0 && (has_mbyte ? this_class != 2 : !vim_iswordc(ptr[col])))) { // didn't find an identifier or text if ((find_type & FIND_NOERROR) == 0) { if (find_type & FIND_STRING) emsg(_(e_no_string_under_cursor)); else emsg(_(e_no_identifier_under_cursor)); } return 0; } ptr += col; *text = ptr; if (textcol != NULL) *textcol = col; /* * 3. Find the end if the identifier/text. */ bn = 0; startcol -= col; col = 0; if (has_mbyte) { // Search for point of changing multibyte character class. this_class = mb_get_class(ptr); while (ptr[col] != NUL && ((i == 0 ? mb_get_class(ptr + col) == this_class : mb_get_class(ptr + col) != 0) || ((find_type & FIND_EVAL) && col <= (int)startcol && find_is_eval_item(ptr + col, &col, &bn, FORWARD)) )) col += (*mb_ptr2len)(ptr + col); } else while ((i == 0 ? vim_iswordc(ptr[col]) : (ptr[col] != NUL && !VIM_ISWHITE(ptr[col]))) || ((find_type & FIND_EVAL) && col <= (int)startcol && find_is_eval_item(ptr + col, &col, &bn, FORWARD)) ) ++col; return col; }
0
226,237
GF_Err sdp_box_read(GF_Box *s, GF_BitStream *bs) { u32 length; GF_SDPBox *ptr = (GF_SDPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; length = (u32) (ptr->size); if (length >= (u32)0xFFFFFFFF) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid length %lu in sdp box\n", length)); return GF_ISOM_INVALID_FILE; } //sdp text has no delimiter !!! ptr->sdpText = (char*)gf_malloc(sizeof(char) * (length+1)); if (!ptr->sdpText) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->sdpText, length); ptr->sdpText[length] = 0; return GF_OK; }
0
439,140
static Image *ReadPALMImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; IndexPacket index; MagickBooleanType status; MagickOffsetType totalOffset, seekNextDepth; MagickPixelPacket transpix; register IndexPacket *indexes; register ssize_t i, x; register PixelPacket *q; size_t bytes_per_row, bits_per_pixel, extent, flags, version, nextDepthOffset, transparentIndex, compressionType, byte, mask, redbits, greenbits, bluebits, one, pad, size, bit; ssize_t count, y; unsigned char *last_row, *one_row, *ptr; unsigned short color16; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { (void) DestroyImageList(image); return((Image *) NULL); } totalOffset=0; do { image->columns=ReadBlobMSBShort(image); image->rows=ReadBlobMSBShort(image); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if ((image->columns == 0) || (image->rows == 0)) ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize"); status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } (void) SetImageBackgroundColor(image); bytes_per_row=ReadBlobMSBShort(image); flags=ReadBlobMSBShort(image); bits_per_pixel=(size_t) ReadBlobByte(image); if ((bits_per_pixel != 1) && (bits_per_pixel != 2) && (bits_per_pixel != 4) && (bits_per_pixel != 8) && (bits_per_pixel != 16)) ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel"); version=(size_t) ReadBlobByte(image); if ((version != 0) && (version != 1) && (version != 2)) ThrowReaderException(CorruptImageError,"FileFormatVersionMismatch"); nextDepthOffset=(size_t) ReadBlobMSBShort(image); transparentIndex=(size_t) ReadBlobByte(image); compressionType=(size_t) ReadBlobByte(image); if ((compressionType != PALM_COMPRESSION_NONE) && (compressionType != PALM_COMPRESSION_SCANLINE ) && (compressionType != PALM_COMPRESSION_RLE)) ThrowReaderException(CorruptImageError,"UnrecognizedImageCompression"); pad=ReadBlobMSBShort(image); (void) pad; /* Initialize image colormap. */ one=1; if ((bits_per_pixel < 16) && (AcquireImageColormap(image,one << bits_per_pixel) == MagickFalse)) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); GetMagickPixelPacket(image,&transpix); if (bits_per_pixel == 16) /* Direct Color */ { redbits=(size_t) ReadBlobByte(image); /* # of bits of red */ (void) redbits; greenbits=(size_t) ReadBlobByte(image); /* # of bits of green */ (void) greenbits; bluebits=(size_t) ReadBlobByte(image); /* # of bits of blue */ (void) bluebits; ReadBlobByte(image); /* reserved by Palm */ ReadBlobByte(image); /* reserved by Palm */ transpix.red=(MagickRealType) (QuantumRange*ReadBlobByte(image)/31); transpix.green=(MagickRealType) (QuantumRange*ReadBlobByte(image)/63); transpix.blue=(MagickRealType) (QuantumRange*ReadBlobByte(image)/31); } if (bits_per_pixel == 8) { IndexPacket index; if (flags & PALM_HAS_COLORMAP_FLAG) { count=(ssize_t) ReadBlobMSBShort(image); for (i=0; i < (ssize_t) count; i++) { ReadBlobByte(image); index=ConstrainColormapIndex(image,(ssize_t) (255-i)); image->colormap[(int) index].red=ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); image->colormap[(int) index].green=ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); image->colormap[(int) index].blue=ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); } } else for (i=0; i < (ssize_t) (1L << bits_per_pixel); i++) { index=ConstrainColormapIndex(image,(ssize_t) (255-i)); image->colormap[(int) index].red=ScaleCharToQuantum( PalmPalette[i][0]); image->colormap[(int) index].green=ScaleCharToQuantum( PalmPalette[i][1]); image->colormap[(int) index].blue=ScaleCharToQuantum( PalmPalette[i][2]); } } if (flags & PALM_IS_COMPRESSED_FLAG) size=ReadBlobMSBShort(image); (void) size; image->storage_class=DirectClass; if (bits_per_pixel < 16) { image->storage_class=PseudoClass; image->depth=8; } if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(image); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } extent=MagickMax(bytes_per_row,2*image->columns); one_row=(unsigned char *) AcquireQuantumMemory(extent,sizeof(*one_row)); if (one_row == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(one_row,0,extent*sizeof(*one_row)); last_row=(unsigned char *) NULL; if (compressionType == PALM_COMPRESSION_SCANLINE) { last_row=(unsigned char *) AcquireQuantumMemory(MagickMax(bytes_per_row, 2*image->columns),sizeof(*last_row)); if (last_row == (unsigned char *) NULL) { one_row=(unsigned char *) RelinquishMagickMemory(one_row); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(last_row,0,MagickMax(bytes_per_row,2*image->columns)* sizeof(*last_row)); } mask=(size_t) (1U << bits_per_pixel)-1; for (y=0; y < (ssize_t) image->rows; y++) { if ((flags & PALM_IS_COMPRESSED_FLAG) == 0) { /* TODO move out of loop! */ image->compression=NoCompression; count=ReadBlob(image,bytes_per_row,one_row); if (count != (ssize_t) bytes_per_row) break; } else { if (compressionType == PALM_COMPRESSION_RLE) { /* TODO move out of loop! */ image->compression=RLECompression; for (i=0; i < (ssize_t) bytes_per_row; ) { count=(ssize_t) ReadBlobByte(image); if (count < 0) break; count=MagickMin(count,(ssize_t) bytes_per_row-i); byte=(size_t) ReadBlobByte(image); (void) memset(one_row+i,(int) byte,(size_t) count); i+=count; } } else if (compressionType == PALM_COMPRESSION_SCANLINE) { size_t one; /* TODO move out of loop! */ one=1; image->compression=FaxCompression; for (i=0; i < (ssize_t) bytes_per_row; i+=8) { count=(ssize_t) ReadBlobByte(image); if (count < 0) break; byte=(size_t) MagickMin((ssize_t) bytes_per_row-i,8); for (bit=0; bit < byte; bit++) { if ((y == 0) || (count & (one << (7 - bit)))) one_row[i+bit]=(unsigned char) ReadBlobByte(image); else one_row[i+bit]=last_row[i+bit]; } } (void) memcpy(last_row, one_row, bytes_per_row); } } ptr=one_row; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); if (bits_per_pixel == 16) { if (image->columns > (2*bytes_per_row)) { one_row=(unsigned char *) RelinquishMagickMemory(one_row); if (compressionType == PALM_COMPRESSION_SCANLINE) last_row=(unsigned char *) RelinquishMagickMemory(last_row); ThrowReaderException(CorruptImageError,"CorruptImage"); } for (x=0; x < (ssize_t) image->columns; x++) { color16=(*ptr++ << 8); color16|=(*ptr++); SetPixelRed(q,(QuantumRange*((color16 >> 11) & 0x1f))/0x1f); SetPixelGreen(q,(QuantumRange*((color16 >> 5) & 0x3f))/0x3f); SetPixelBlue(q,(QuantumRange*((color16 >> 0) & 0x1f))/0x1f); SetPixelOpacity(q,OpaqueOpacity); q++; } } else { bit=8-bits_per_pixel; for (x=0; x < (ssize_t) image->columns; x++) { if ((size_t) (ptr-one_row) >= bytes_per_row) { one_row=(unsigned char *) RelinquishMagickMemory(one_row); if (compressionType == PALM_COMPRESSION_SCANLINE) last_row=(unsigned char *) RelinquishMagickMemory(last_row); ThrowReaderException(CorruptImageError,"CorruptImage"); } index=(IndexPacket) (mask-(((*ptr) & (mask << bit)) >> bit)); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); if (bit) bit-=bits_per_pixel; else { ptr++; bit=8-bits_per_pixel; } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } if (flags & PALM_HAS_TRANSPARENCY_FLAG) { IndexPacket index=ConstrainColormapIndex(image,(mask-transparentIndex)); if (bits_per_pixel != 16) SetMagickPixelPacket(image,image->colormap+(ssize_t) index, (const IndexPacket *) NULL,&transpix); (void) TransparentPaintImage(image,&transpix,(Quantum) TransparentOpacity,MagickFalse); } one_row=(unsigned char *) RelinquishMagickMemory(one_row); if (compressionType == PALM_COMPRESSION_SCANLINE) last_row=(unsigned char *) RelinquishMagickMemory(last_row); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } /* Proceed to next image. Copied from coders/pnm.c */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; if (nextDepthOffset != 0) { /* Skip to next image. */ totalOffset+=(MagickOffsetType) (nextDepthOffset*4); if (totalOffset >= (MagickOffsetType) GetBlobSize(image)) ThrowReaderException(CorruptImageError,"ImproperImageHeader") else seekNextDepth=SeekBlob(image,totalOffset,SEEK_SET); if (seekNextDepth != totalOffset) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Allocate next image structure. Copied from coders/pnm.c */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { status=MagickFalse; break; } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while (nextDepthOffset != 0); (void) CloseBlob(image); if (status == MagickFalse) return(DestroyImageList(image)); return(GetFirstImageInList(image)); }
0
201,872
_gnutls_server_select_suite(gnutls_session_t session, uint8_t * data, unsigned int datalen) { int ret; unsigned int i, j, cipher_suites_size; size_t pk_algos_size; uint8_t cipher_suites[MAX_CIPHERSUITE_SIZE]; int retval; gnutls_pk_algorithm_t pk_algos[MAX_ALGOS]; /* will hold the pk algorithms * supported by the peer. */ for (i = 0; i < datalen; i += 2) { /* TLS_RENEGO_PROTECTION_REQUEST = { 0x00, 0xff } */ if (session->internals.priorities.sr != SR_DISABLED && data[i] == GNUTLS_RENEGO_PROTECTION_REQUEST_MAJOR && data[i + 1] == GNUTLS_RENEGO_PROTECTION_REQUEST_MINOR) { _gnutls_handshake_log ("HSK[%p]: Received safe renegotiation CS\n", session); retval = _gnutls_ext_sr_recv_cs(session); if (retval < 0) { gnutls_assert(); return retval; } } /* TLS_FALLBACK_SCSV */ if (data[i] == GNUTLS_FALLBACK_SCSV_MAJOR && data[i + 1] == GNUTLS_FALLBACK_SCSV_MINOR) { _gnutls_handshake_log ("HSK[%p]: Received fallback CS\n", session); if (gnutls_protocol_get_version(session) != GNUTLS_TLS_VERSION_MAX) return GNUTLS_E_INAPPROPRIATE_FALLBACK; } } pk_algos_size = MAX_ALGOS; ret = server_find_pk_algos_in_ciphersuites(data, datalen, pk_algos, &pk_algos_size); if (ret < 0) return gnutls_assert_val(ret); ret = _gnutls_supported_ciphersuites(session, cipher_suites, sizeof(cipher_suites)); if (ret < 0) return gnutls_assert_val(ret); cipher_suites_size = ret; /* Here we remove any ciphersuite that does not conform * the certificate requested, or to the * authentication requested (e.g. SRP). */ ret = _gnutls_remove_unwanted_ciphersuites(session, cipher_suites, cipher_suites_size, pk_algos, pk_algos_size); if (ret <= 0) { gnutls_assert(); if (ret < 0) return ret; else return GNUTLS_E_UNKNOWN_CIPHER_SUITE; } cipher_suites_size = ret; /* Data length should be zero mod 2 since * every ciphersuite is 2 bytes. (this check is needed * see below). */ if (datalen % 2 != 0) { gnutls_assert(); return GNUTLS_E_UNEXPECTED_PACKET_LENGTH; } memset(session->security_parameters.cipher_suite, 0, 2); retval = GNUTLS_E_UNKNOWN_CIPHER_SUITE; _gnutls_handshake_log ("HSK[%p]: Requested cipher suites[size: %d]: \n", session, (int) datalen); if (session->internals.priorities.server_precedence == 0) { for (j = 0; j < datalen; j += 2) { _gnutls_handshake_log("\t0x%.2x, 0x%.2x %s\n", data[j], data[j + 1], _gnutls_cipher_suite_get_name (&data[j])); for (i = 0; i < cipher_suites_size; i += 2) { if (memcmp(&cipher_suites[i], &data[j], 2) == 0) { _gnutls_handshake_log ("HSK[%p]: Selected cipher suite: %s\n", session, _gnutls_cipher_suite_get_name (&data[j])); memcpy(session-> security_parameters. cipher_suite, &cipher_suites[i], 2); _gnutls_epoch_set_cipher_suite (session, EPOCH_NEXT, session->security_parameters. cipher_suite); retval = 0; goto finish; } } } } else { /* server selects */ for (i = 0; i < cipher_suites_size; i += 2) { for (j = 0; j < datalen; j += 2) { if (memcmp(&cipher_suites[i], &data[j], 2) == 0) { _gnutls_handshake_log ("HSK[%p]: Selected cipher suite: %s\n", session, _gnutls_cipher_suite_get_name (&data[j])); memcpy(session-> security_parameters. cipher_suite, &cipher_suites[i], 2); _gnutls_epoch_set_cipher_suite (session, EPOCH_NEXT, session->security_parameters. cipher_suite); retval = 0; goto finish; } } } } finish: if (retval != 0) { gnutls_assert(); return retval; } /* check if the credentials (username, public key etc.) are ok */ if (_gnutls_get_kx_cred (session, _gnutls_cipher_suite_get_kx_algo(session->security_parameters. cipher_suite)) == NULL) { gnutls_assert(); return GNUTLS_E_INSUFFICIENT_CREDENTIALS; } /* set the mod_auth_st to the appropriate struct * according to the KX algorithm. This is needed since all the * handshake functions are read from there; */ session->internals.auth_struct = _gnutls_kx_auth_struct(_gnutls_cipher_suite_get_kx_algo (session->security_parameters. cipher_suite)); if (session->internals.auth_struct == NULL) { _gnutls_handshake_log ("HSK[%p]: Cannot find the appropriate handler for the KX algorithm\n", session); gnutls_assert(); return GNUTLS_E_INTERNAL_ERROR; } return 0; }
1
223,390
static void do_caselesscmp(compiler_common *common) { DEFINE_COMPILER; struct sljit_jump *jump; struct sljit_label *label; int char1_reg = STR_END; int char2_reg; int lcc_table; int opt_type = 0; if (HAS_VIRTUAL_REGISTERS) { char2_reg = STACK_TOP; lcc_table = STACK_LIMIT; } else { char2_reg = RETURN_ADDR; lcc_table = TMP3; } if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) opt_type = 1; else if (sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_SUPP | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)) == SLJIT_SUCCESS) opt_type = 2; sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0); OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, TMP2, 0); OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, char1_reg, 0); if (char2_reg == STACK_TOP) { OP1(SLJIT_MOV, TMP3, 0, char2_reg, 0); OP1(SLJIT_MOV, RETURN_ADDR, 0, lcc_table, 0); } OP1(SLJIT_MOV, lcc_table, 0, SLJIT_IMM, common->lcc); if (opt_type == 1) { label = LABEL(); sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_POST, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); } else if (opt_type == 2) { OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1)); OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); label = LABEL(); sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char1_reg, SLJIT_MEM1(TMP1), IN_UCHARS(1)); sljit_emit_mem(compiler, MOV_UCHAR | SLJIT_MEM_PRE, char2_reg, SLJIT_MEM1(STR_PTR), IN_UCHARS(1)); } else { label = LABEL(); OP1(MOV_UCHAR, char1_reg, 0, SLJIT_MEM1(TMP1), 0); OP1(MOV_UCHAR, char2_reg, 0, SLJIT_MEM1(STR_PTR), 0); OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, IN_UCHARS(1)); } #if PCRE2_CODE_UNIT_WIDTH != 8 jump = CMP(SLJIT_GREATER, char1_reg, 0, SLJIT_IMM, 255); #endif OP1(SLJIT_MOV_U8, char1_reg, 0, SLJIT_MEM2(lcc_table, char1_reg), 0); #if PCRE2_CODE_UNIT_WIDTH != 8 JUMPHERE(jump); jump = CMP(SLJIT_GREATER, char2_reg, 0, SLJIT_IMM, 255); #endif OP1(SLJIT_MOV_U8, char2_reg, 0, SLJIT_MEM2(lcc_table, char2_reg), 0); #if PCRE2_CODE_UNIT_WIDTH != 8 JUMPHERE(jump); #endif if (opt_type == 0) OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); jump = CMP(SLJIT_NOT_EQUAL, char1_reg, 0, char2_reg, 0); OP2(SLJIT_SUB | SLJIT_SET_Z, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1)); JUMPTO(SLJIT_NOT_ZERO, label); JUMPHERE(jump); OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0); if (opt_type == 2) OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1)); if (char2_reg == STACK_TOP) { OP1(SLJIT_MOV, char2_reg, 0, TMP3, 0); OP1(SLJIT_MOV, lcc_table, 0, RETURN_ADDR, 0); } OP1(SLJIT_MOV, char1_reg, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1); OP_SRC(SLJIT_FAST_RETURN, TMP1, 0); }
0
281,126
struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) { struct xfrm_policy *policy; policy = kzalloc(sizeof(struct xfrm_policy), gfp); if (policy) { write_pnet(&policy->xp_net, net); INIT_LIST_HEAD(&policy->walk.all); INIT_HLIST_NODE(&policy->bydst); INIT_HLIST_NODE(&policy->byidx); rwlock_init(&policy->lock); refcount_set(&policy->refcnt, 1); skb_queue_head_init(&policy->polq.hold_queue); setup_timer(&policy->timer, xfrm_policy_timer, (unsigned long)policy); setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process, (unsigned long)policy); policy->flo.ops = &xfrm_policy_fc_ops; } return policy; }
0
222,841
bool SameShapes(ShapeHandle inferred_shape, ShapeHandle annotated_shape) const { if (inferred_shape.SameHandle(annotated_shape)) { return true; } if (InferenceContext::Rank(inferred_shape) != InferenceContext::Rank(annotated_shape)) { return false; } const int rank = InferenceContext::Rank(inferred_shape); for (int i = 0; i < rank; ++i) { int64_t val1 = InferenceContext::Value( InferenceContext::DimKnownRank(inferred_shape, i)); int64_t val2 = InferenceContext::Value( InferenceContext::DimKnownRank(annotated_shape, i)); if (val1 != val2) { return false; } } return true; }
0
437,386
add_opcode(regex_t* reg, int opcode) { BB_ADD1(reg, opcode); return 0; }
0
259,597
void HierarchicalBitmapRequester::CropEncodingRegion(RectAngle<LONG> &region,const struct RectangleRequest *) { #if ACCUSOFT_CODE int i; ClipToImage(region); // Find the region to request. for(i = 0;i < m_ucCount;i++) { if (m_pulReadyLines[i] < ULONG(region.ra_MinY)) region.ra_MinY = m_pulReadyLines[i]; } #else NOREF(region); #endif }
0
468,350
g_socket_client_add_application_proxy (GSocketClient *client, const gchar *protocol) { g_hash_table_add (client->priv->app_proxies, g_strdup (protocol)); }
0
139,236
void OverlayWindowViews::OnNativeWidgetDestroyed() { controller_->OnWindowDestroyed(); }
0
90,225
virtual bool wifi_available() const { return false; }
0
512,504
void set_geometry_type(uint type) { DBUG_ASSERT(0); }
0
276,977
void AddReference() { ++m_ReferenceCount; }
0
352,962
serialNumberAndIssuerNormalize( slap_mask_t usage, Syntax *syntax, MatchingRule *mr, struct berval *in, struct berval *out, void *ctx ) { struct berval sn, sn2, sn3, i, ni; char sbuf2[SLAP_SN_BUFLEN]; char sbuf3[SLAP_SN_BUFLEN]; char *p; int rc; assert( in != NULL ); assert( out != NULL ); Debug( LDAP_DEBUG_TRACE, ">>> serialNumberAndIssuerNormalize: <%s>\n", in->bv_val ); rc = serialNumberAndIssuerCheck( in, &sn, &i, ctx ); if ( rc ) { return rc; } rc = dnNormalize( usage, syntax, mr, &i, &ni, ctx ); if ( in->bv_val[0] == '{' && in->bv_val[in->bv_len-1] == '}' ) { slap_sl_free( i.bv_val, ctx ); } if ( rc ) { return LDAP_INVALID_SYNTAX; } /* Convert sn to canonical hex */ sn2.bv_val = sbuf2; if ( sn.bv_len > sizeof( sbuf2 ) ) { sn2.bv_val = slap_sl_malloc( sn.bv_len, ctx ); } sn2.bv_len = sn.bv_len; sn3.bv_val = sbuf3; sn3.bv_len = sizeof(sbuf3); if ( lutil_str2bin( &sn, &sn2, ctx ) || slap_bin2hex( &sn2, &sn3, ctx ) ) { rc = LDAP_INVALID_SYNTAX; goto func_leave; } out->bv_len = STRLENOF( "{ serialNumber , issuer rdnSequence:\"\" }" ) + sn3.bv_len + ni.bv_len; out->bv_val = slap_sl_malloc( out->bv_len + 1, ctx ); if ( out->bv_val == NULL ) { out->bv_len = 0; rc = LDAP_OTHER; goto func_leave; } p = out->bv_val; p = lutil_strcopy( p, "{ serialNumber " /*}*/ ); p = lutil_strbvcopy( p, &sn3 ); p = lutil_strcopy( p, ", issuer rdnSequence:\"" ); p = lutil_strbvcopy( p, &ni ); p = lutil_strcopy( p, /*{*/ "\" }" ); assert( p == &out->bv_val[out->bv_len] ); func_leave: Debug( LDAP_DEBUG_TRACE, "<<< serialNumberAndIssuerNormalize: <%s> => <%s>\n", in->bv_val, rc == LDAP_SUCCESS ? out->bv_val : "(err)" ); if ( sn2.bv_val != sbuf2 ) { slap_sl_free( sn2.bv_val, ctx ); } if ( sn3.bv_val != sbuf3 ) { slap_sl_free( sn3.bv_val, ctx ); } slap_sl_free( ni.bv_val, ctx ); return rc; }
0
240,588
void Compute(OpKernelContext* context) override { core::RefCountPtr<Var> variable; OP_REQUIRES_OK(context, LookupResource(context, HandleFromInput(context, 0), &variable)); const Tensor& value = context->input(1); // TODO(apassos): We could possibly avoid the copy done by // PrepareToUpdateVariable() for commutative operations like Op == // ADD if value's refcount was 1. mutex_lock ml(*variable->mu()); Tensor* var_tensor = variable->tensor(); OP_REQUIRES(context, var_tensor->shape().IsSameSize(value.shape()), errors::InvalidArgument("Cannot update variable with shape ", var_tensor->shape().DebugString(), " using a Tensor with shape ", value.shape().DebugString(), ", shapes must be equal.")); OP_REQUIRES_OK( context, PrepareToUpdateVariable<Device, T>( context, var_tensor, variable->copy_on_read_mode.load())); functor::DenseUpdate<Device, T, Op> update_functor; update_functor(context->eigen_device<Device>(), var_tensor->flat<T>(), value.flat<T>()); }
0
512,461
ValueBuffer() { reset_buffer(); }
0
390,573
ProcXkbSetIndicatorMap(ClientPtr client) { int i, bit; int nIndicators; DeviceIntPtr dev; xkbIndicatorMapWireDesc *from; int rc; REQUEST(xkbSetIndicatorMapReq); REQUEST_AT_LEAST_SIZE(xkbSetIndicatorMapReq); if (!(client->xkbClientFlags&_XkbClientInitialized)) return BadAccess; CHK_KBD_DEVICE(dev, stuff->deviceSpec, client, DixSetAttrAccess); if (stuff->which==0) return client->noClientException; for (nIndicators=i=0,bit=1;i<XkbNumIndicators;i++,bit<<=1) { if (stuff->which&bit) nIndicators++; } if (stuff->length!=((SIZEOF(xkbSetIndicatorMapReq)+ (nIndicators*SIZEOF(xkbIndicatorMapWireDesc)))/4)) { return BadLength; } from = (xkbIndicatorMapWireDesc *)&stuff[1]; for (i=0,bit=1;i<XkbNumIndicators;i++,bit<<=1) { if (stuff->which&bit) { if (client->swapped) { int n; swaps(&from->virtualMods,n); swapl(&from->ctrls,n); } CHK_MASK_LEGAL(i,from->whichGroups,XkbIM_UseAnyGroup); CHK_MASK_LEGAL(i,from->whichMods,XkbIM_UseAnyMods); from++; } } from = (xkbIndicatorMapWireDesc *)&stuff[1]; rc = _XkbSetIndicatorMap(client, dev, stuff->which, from); if (rc != Success) return rc; if (stuff->deviceSpec == XkbUseCoreKbd) { DeviceIntPtr other; for (other = inputInfo.devices; other; other = other->next) { if ((other != dev) && other->key && !other->isMaster && (other->u.master == dev)) { rc = XaceHook(XACE_DEVICE_ACCESS, client, other, DixSetAttrAccess); if (rc == Success) _XkbSetIndicatorMap(client, other, stuff->which, from); } } } return Success; }
0
267,981
R_API bool r_bin_file_set_cur_by_fd(RBin *bin, ut32 bin_fd) { RBinFile *bf = r_bin_file_find_by_fd (bin, bin_fd); return bf? r_bin_file_set_cur_binfile (bin, bf): false; }
0