idx
int64
func
string
target
int64
353,160
bool SplashOutputDev::tilingBitmapSrc(void *data, SplashColorPtr colorLine, unsigned char *alphaLine) { TilingSplashOutBitmap *imgData = (TilingSplashOutBitmap *)data; if (imgData->y == imgData->bitmap->getHeight()) { imgData->repeatY--; if (imgData->repeatY == 0) return false; imgData->y = 0; } if (imgData->paintType == 1) { const SplashColorMode cMode = imgData->bitmap->getMode(); SplashColorPtr q = colorLine; // For splashModeBGR8 and splashModeXBGR8 we need to use getPixel // for the others we can use raw access if (cMode == splashModeBGR8 || cMode == splashModeXBGR8) { for (int m = 0; m < imgData->repeatX; m++) { for (int x = 0; x < imgData->bitmap->getWidth(); x++) { imgData->bitmap->getPixel(x, imgData->y, q); q += splashColorModeNComps[cMode]; } } } else { const int n = imgData->bitmap->getRowSize(); SplashColorPtr p; for (int m = 0; m < imgData->repeatX; m++) { p = imgData->bitmap->getDataPtr() + imgData->y * imgData->bitmap->getRowSize(); for (int x = 0; x < n; ++x) { *q++ = *p++; } } } if (alphaLine != nullptr) { SplashColorPtr aq = alphaLine; SplashColorPtr p; const int n = imgData->bitmap->getWidth() - 1; for (int m = 0; m < imgData->repeatX; m++) { p = imgData->bitmap->getAlphaPtr() + imgData->y * imgData->bitmap->getWidth(); for (int x = 0; x < n; ++x) { *aq++ = *p++; } // This is a hack, because of how Splash antialias works if we overwrite the // last alpha pixel of the tile most/all of the files look much better *aq++ = (n == 0) ? *p : *(p - 1); } } } else { SplashColor col, pat; SplashColorPtr dest = colorLine; for (int m = 0; m < imgData->repeatX; m++) { for (int x = 0; x < imgData->bitmap->getWidth(); x++) { imgData->bitmap->getPixel(x, imgData->y, col); imgData->pattern->getColor(x, imgData->y, pat); for (int i = 0; i < splashColorModeNComps[imgData->colorMode]; ++i) { #ifdef SPLASH_CMYK if (imgData->colorMode == splashModeCMYK8 || imgData->colorMode == splashModeDeviceN8) dest[i] = div255(pat[i] * (255 - col[0])); else #endif dest[i] = 255 - div255((255 - pat[i]) * (255 - col[0])); } dest += splashColorModeNComps[imgData->colorMode]; } } if (alphaLine != nullptr) { const int y = (imgData->y == imgData->bitmap->getHeight() - 1 && imgData->y > 50) ? imgData->y - 1 : imgData->y; SplashColorPtr aq = alphaLine; SplashColorPtr p; const int n = imgData->bitmap->getWidth(); for (int m = 0; m < imgData->repeatX; m++) { p = imgData->bitmap->getAlphaPtr() + y * imgData->bitmap->getWidth(); for (int x = 0; x < n; ++x) { *aq++ = *p++; } } } } ++imgData->y; return true; }
0
225,498
bool MutableGraphView::AddFaninInternal(NodeDef* node, const OutputPort& fanin) { int num_regular_fanins = NumFanins(*node, /*include_controlling_nodes=*/false); bool input_is_control = IsOutputPortControlling(fanin); bool can_dedup_control_with_regular_input = CanDedupControlWithRegularInput(*this, *fanin.node); // Don't add duplicate control dependencies. if (input_is_control) { const int start = can_dedup_control_with_regular_input ? 0 : num_regular_fanins; for (int i = start; i < node->input_size(); ++i) { if (ParseTensorName(node->input(i)).node() == fanin.node->name()) { return false; } } } InputPort input; input.node = node; input.port_id = input_is_control ? Graph::kControlSlot : num_regular_fanins; node->add_input(TensorIdToString({fanin.node->name(), fanin.port_id})); if (!input_is_control) { const int last_node_input = node->input_size() - 1; // If there are control dependencies in node, move newly inserted fanin to // be before such control dependencies. if (num_regular_fanins < last_node_input) { node->mutable_input()->SwapElements(last_node_input, num_regular_fanins); } } fanouts()[fanin].insert(input); if (max_regular_output_port()[fanin.node] < fanin.port_id) { max_regular_output_port()[fanin.node] = fanin.port_id; } // Update max input port and dedup control dependencies. if (!input_is_control) { max_regular_input_port()[node] = num_regular_fanins; if (can_dedup_control_with_regular_input) { RemoveControllingFaninInternal(node, fanin.node); } } return true; }
0
255,787
void Compute(OpKernelContext* ctx) override { try { const Tensor& input = ctx->input(kInputTensorIndex); OP_REQUIRES( ctx, input.dims() == 4, errors::InvalidArgument("Current RequantizePerChannel operator" "supports 4D tensors only.")); const Tensor& input_min_vec = ctx->input(kInputMinVecIndex); size_t depth = input_min_vec.NumElements(); float* input_min_vec_data = (float*)const_cast<void*>( static_cast<const void*>(input_min_vec.flat<float>().data())); const Tensor& input_max_vec = ctx->input(kInputMaxVecIndex); OP_REQUIRES( ctx, input_max_vec.NumElements() == depth, errors::InvalidArgument("input_max has incorrect size, expected ", depth, " was ", input_max_vec.NumElements())); float* input_max_vec_data = (float*)const_cast<void*>( static_cast<const void*>(input_max_vec.flat<float>().data())); const Tensor& input_requested_min = ctx->input(this->kRequestMinIndex); OP_REQUIRES( ctx, input_requested_min.NumElements() == 1, errors::InvalidArgument("requested_output_min must be a scalar")); const float input_requested_min_float = input_requested_min.flat<float>()(0); const Tensor& input_requested_max = ctx->input(this->kRequestMaxIndex); OP_REQUIRES( ctx, input_requested_min.NumElements() == 1, errors::InvalidArgument("requested_output_max must be a scalar")); const float input_requested_max_float = input_requested_max.flat<float>()(0); if (out_type_ == DT_QINT8) { OP_REQUIRES(ctx, input_requested_min_float < 0.0f, errors::InvalidArgument( "If out_type is QINT8, requested_output_max must be " "non negative, got ", input_requested_min_float)); } const float factor = (out_type_ == DT_QINT8) ? 127.0f : 255.0f; const float requested_min_max = std::max(std::abs(input_requested_min_float), std::abs(input_requested_max_float)); Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(kOutputTensorIndex, input.shape(), &output)); std::vector<float> scales(depth); for (int i = 0; i < depth; ++i) { float min_max_from_vec = std::max(std::abs(input_min_vec_data[i]), std::abs(input_max_vec_data[i])); scales[i] = factor * (min_max_from_vec / requested_min_max / static_cast<float>(1L << 31)); } mkldnn::primitive_attr reorder_attr; reorder_attr.set_output_scales(2, scales); memory::dims dims_mkl_order = TFShapeToMklDnnDimsInNCHW(input.shape(), FORMAT_NHWC); memory::desc input_md = memory::desc(dims_mkl_order, MklDnnType<qint32>(), memory::format_tag::nhwc); memory::desc output_md = (out_type_ == DT_QINT8) ? memory::desc(dims_mkl_order, MklDnnType<qint8>(), memory::format_tag::nhwc) : memory::desc(dims_mkl_order, MklDnnType<quint8>(), memory::format_tag::nhwc); void* input_buf = static_cast<void*>(const_cast<qint32*>(input.flat<qint32>().data())); void* output_buf; if (out_type_ == DT_QINT8) { output_buf = static_cast<void*>( const_cast<qint8*>(output->flat<qint8>().data())); } else { output_buf = static_cast<void*>( const_cast<quint8*>(output->flat<quint8>().data())); } std::unique_ptr<memory> input_mem_prim( new memory(input_md, cpu_engine_, input_buf)); std::unique_ptr<memory> output_mem_prim( new memory(output_md, cpu_engine_, output_buf)); mkldnn::reorder::primitive_desc reorder_pd = ReorderPd(cpu_engine_, input_mem_prim->get_desc(), cpu_engine_, output_mem_prim->get_desc(), reorder_attr); std::shared_ptr<stream> reorder_stream; MklDnnThreadPool eigen_tp(ctx); reorder_stream.reset(CreateStream(&eigen_tp, cpu_engine_)); std::unordered_map<int, mkldnn::memory> reorder_args = { {MKLDNN_ARG_FROM, *input_mem_prim}, {MKLDNN_ARG_TO, *output_mem_prim}}; std::unique_ptr<mkldnn::primitive> reorder_prim( new mkldnn::reorder(reorder_pd)); reorder_prim->execute(*reorder_stream, reorder_args); Tensor* output_min = nullptr; Tensor* output_max = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(kOutputMinIndex, {}, &output_min)); OP_REQUIRES_OK(ctx, ctx->allocate_output(kOutputMaxIndex, {}, &output_max)); output_min->flat<float>()(0) = input_requested_min_float; output_max->flat<float>()(0) = input_requested_max_float; } catch (mkldnn::error& e) { string error_msg = "Status: " + std::to_string(e.status) + ", message: " + std::string(e.message) + ", in file " + std::string(__FILE__) + ":" + std::to_string(__LINE__); OP_REQUIRES_OK( ctx, errors::Aborted("Operation received an exception:", error_msg)); } }
0
383,340
gdImageSetThickness (gdImagePtr im, int thickness) { im->thick = thickness; }
0
445,931
fr_window_folder_tree_drag_data_get (GtkWidget *widget, GdkDragContext *context, GtkSelectionData *selection_data, guint info, guint time, gpointer user_data) { FrWindow *window = user_data; GList *file_list; char *uri; GFile *destination; GFile *destination_folder; debug (DEBUG_INFO, "::DragDataGet -->\n"); if (window->priv->activity_ref > 0) return FALSE; file_list = fr_window_get_folder_tree_selection (window, TRUE, NULL); if (file_list == NULL) return FALSE; if (gtk_selection_data_get_target (selection_data) == XFR_ATOM) { FrClipboardData *tmp; char *data; tmp = fr_clipboard_data_new (); tmp->files = file_list; tmp->op = FR_CLIPBOARD_OP_COPY; tmp->base_dir = g_strdup (fr_window_get_current_location (window)); data = get_selection_data_from_clipboard_data (window, tmp); gtk_selection_data_set (selection_data, XFR_ATOM, 8, (guchar *) data, strlen (data)); fr_clipboard_data_unref (tmp); g_free (data); return TRUE; } if (! nautilus_xds_dnd_is_valid_xds_context (context)) return FALSE; uri = get_xds_atom_value (context); g_return_val_if_fail (uri != NULL, FALSE); destination = g_file_new_for_uri (uri); destination_folder = g_file_get_parent (destination); g_object_unref (destination); /* check whether the extraction can be performed in the destination * folder */ g_clear_error (&window->priv->drag_error); if (! _g_file_check_permissions (destination_folder, R_OK | W_OK)) { char *display_name; display_name = _g_file_get_display_basename (destination_folder); window->priv->drag_error = g_error_new (FR_ERROR, 0, _("You don't have the right permissions to extract archives in the folder \"%s\""), display_name); g_free (display_name); } if (window->priv->drag_error == NULL) { _g_object_unref (window->priv->drag_destination_folder); g_free (window->priv->drag_base_dir); _g_string_list_free (window->priv->drag_file_list); window->priv->drag_destination_folder = g_object_ref (destination_folder); window->priv->drag_base_dir = fr_window_get_selected_folder_in_tree_view (window); window->priv->drag_file_list = file_list; } g_object_unref (destination_folder); /* sends back the response */ gtk_selection_data_set (selection_data, gtk_selection_data_get_target (selection_data), 8, (guchar *) ((window->priv->drag_error == NULL) ? "S" : "E"), 1); debug (DEBUG_INFO, "::DragDataGet <--\n"); return TRUE; }
0
210,636
static void mkiss_close(struct tty_struct *tty) { struct mkiss *ax; write_lock_irq(&disc_data_lock); ax = tty->disc_data; tty->disc_data = NULL; write_unlock_irq(&disc_data_lock); if (!ax) return; /* * We have now ensured that nobody can start using ap from now on, but * we have to wait for all existing users to finish. */ if (!refcount_dec_and_test(&ax->refcnt)) wait_for_completion(&ax->dead); /* * Halt the transmit queue so that a new transmit cannot scribble * on our buffers */ netif_stop_queue(ax->dev); ax->tty = NULL; unregister_netdev(ax->dev); /* Free all AX25 frame buffers after unreg. */ kfree(ax->rbuff); kfree(ax->xbuff); free_netdev(ax->dev); }
1
401,493
static ssize_t _extract_entropy(struct entropy_store *r, void *buf, size_t nbytes, int fips) { ssize_t ret = 0, i; __u8 tmp[EXTRACT_SIZE]; unsigned long flags; while (nbytes) { extract_buf(r, tmp); if (fips) { spin_lock_irqsave(&r->lock, flags); if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) panic("Hardware RNG duplicated output!\n"); memcpy(r->last_data, tmp, EXTRACT_SIZE); spin_unlock_irqrestore(&r->lock, flags); } i = min_t(int, nbytes, EXTRACT_SIZE); memcpy(buf, tmp, i); nbytes -= i; buf += i; ret += i; } /* Wipe data just returned from memory */ memzero_explicit(tmp, sizeof(tmp)); return ret; }
0
226,194
void elng_box_del(GF_Box *s) { GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s; if (ptr == NULL) return; if (ptr->extended_language) gf_free(ptr->extended_language); gf_free(ptr); }
0
226,383
GF_Box *jp2h_box_new() { ISOM_DECL_BOX_ALLOC(GF_J2KHeaderBox, GF_ISOM_BOX_TYPE_JP2H); return (GF_Box *)tmp;
0
477,975
void simplestring_addn(simplestring* target, const char* source, size_t add_len) { size_t newsize = target->size, incr = 0; if(target && source) { if(!target->str) { simplestring_init_str(target); } if((SIZE_MAX - add_len) < target->len || (SIZE_MAX - add_len - 1) < target->len) { /* check for overflows, if there's a potential overflow do nothing */ return; } if(target->len + add_len + 1 > target->size) { /* newsize is current length + new length */ newsize = target->len + add_len + 1; incr = target->size * 2; /* align to SIMPLESTRING_INCR increments */ if (incr) { newsize = newsize - (newsize % incr) + incr; } if(newsize < (target->len + add_len + 1)) { /* some kind of overflow happened */ return; } target->str = (char*)realloc(target->str, newsize); target->size = target->str ? newsize : 0; } if(target->str) { if(add_len) { memcpy(target->str + target->len, source, add_len); } target->len += add_len; target->str[target->len] = 0; /* null terminate */ } } }
0
455,352
operate_and_get_next (count, c) int count, c; { int where; /* Accept the current line. */ rl_newline (1, c); /* Find the current line, and find the next line to use. */ where = rl_explicit_arg ? count : where_history (); if (HISTORY_FULL () || (where >= history_length - 1) || rl_explicit_arg) saved_history_line_to_use = where; else saved_history_line_to_use = where + 1; old_rl_startup_hook = rl_startup_hook; rl_startup_hook = set_saved_history; return 0; }
0
333,037
post2nfa(int *postfix, int *end, int nfa_calc_size) { int *p; int mopen; int mclose; Frag_T *stack = NULL; Frag_T *stackp = NULL; Frag_T *stack_end = NULL; Frag_T e1; Frag_T e2; Frag_T e; nfa_state_T *s; nfa_state_T *s1; nfa_state_T *matchstate; nfa_state_T *ret = NULL; if (postfix == NULL) return NULL; #define PUSH(s) st_push((s), &stackp, stack_end) #define POP() st_pop(&stackp, stack); \ if (stackp < stack) \ { \ st_error(postfix, end, p); \ vim_free(stack); \ return NULL; \ } if (nfa_calc_size == FALSE) { // Allocate space for the stack. Max states on the stack: "nstate". stack = ALLOC_MULT(Frag_T, nstate + 1); if (stack == NULL) return NULL; stackp = stack; stack_end = stack + (nstate + 1); } for (p = postfix; p < end; ++p) { switch (*p) { case NFA_CONCAT: // Concatenation. // Pay attention: this operator does not exist in the r.e. itself // (it is implicit, really). It is added when r.e. is translated // to postfix form in re2post(). if (nfa_calc_size == TRUE) { // nstate += 0; break; } e2 = POP(); e1 = POP(); patch(e1.out, e2.start); PUSH(frag(e1.start, e2.out)); break; case NFA_OR: // Alternation if (nfa_calc_size == TRUE) { nstate++; break; } e2 = POP(); e1 = POP(); s = alloc_state(NFA_SPLIT, e1.start, e2.start); if (s == NULL) goto theend; PUSH(frag(s, append(e1.out, e2.out))); break; case NFA_STAR: // Zero or more, prefer more if (nfa_calc_size == TRUE) { nstate++; break; } e = POP(); s = alloc_state(NFA_SPLIT, e.start, NULL); if (s == NULL) goto theend; patch(e.out, s); PUSH(frag(s, list1(&s->out1))); break; case NFA_STAR_NONGREEDY: // Zero or more, prefer zero if (nfa_calc_size == TRUE) { nstate++; break; } e = POP(); s = alloc_state(NFA_SPLIT, NULL, e.start); if (s == NULL) goto theend; patch(e.out, s); PUSH(frag(s, list1(&s->out))); break; case NFA_QUEST: // one or zero atoms=> greedy match if (nfa_calc_size == TRUE) { nstate++; break; } e = POP(); s = alloc_state(NFA_SPLIT, e.start, NULL); if (s == NULL) goto theend; PUSH(frag(s, append(e.out, list1(&s->out1)))); break; case NFA_QUEST_NONGREEDY: // zero or one atoms => non-greedy match if (nfa_calc_size == TRUE) { nstate++; break; } e = POP(); s = alloc_state(NFA_SPLIT, NULL, e.start); if (s == NULL) goto theend; PUSH(frag(s, append(e.out, list1(&s->out)))); break; case NFA_END_COLL: case NFA_END_NEG_COLL: // On the stack is the sequence starting with NFA_START_COLL or // NFA_START_NEG_COLL and all possible characters. Patch it to // add the output to the start. if (nfa_calc_size == TRUE) { nstate++; break; } e = POP(); s = alloc_state(NFA_END_COLL, NULL, NULL); if (s == NULL) goto theend; patch(e.out, s); e.start->out1 = s; PUSH(frag(e.start, list1(&s->out))); break; case NFA_RANGE: // Before this are two characters, the low and high end of a // range. Turn them into two states with MIN and MAX. if (nfa_calc_size == TRUE) { // nstate += 0; break; } e2 = POP(); e1 = POP(); e2.start->val = e2.start->c; e2.start->c = NFA_RANGE_MAX; e1.start->val = e1.start->c; e1.start->c = NFA_RANGE_MIN; patch(e1.out, e2.start); PUSH(frag(e1.start, e2.out)); break; case NFA_EMPTY: // 0-length, used in a repetition with max/min count of 0 if (nfa_calc_size == TRUE) { nstate++; break; } s = alloc_state(NFA_EMPTY, NULL, NULL); if (s == NULL) goto theend; PUSH(frag(s, list1(&s->out))); break; case NFA_OPT_CHARS: { int n; // \%[abc] implemented as: // NFA_SPLIT // +-CHAR(a) // | +-NFA_SPLIT // | +-CHAR(b) // | | +-NFA_SPLIT // | | +-CHAR(c) // | | | +-next // | | +- next // | +- next // +- next n = *++p; // get number of characters if (nfa_calc_size == TRUE) { nstate += n; break; } s = NULL; // avoid compiler warning e1.out = NULL; // stores list with out1's s1 = NULL; // previous NFA_SPLIT to connect to while (n-- > 0) { e = POP(); // get character s = alloc_state(NFA_SPLIT, e.start, NULL); if (s == NULL) goto theend; if (e1.out == NULL) e1 = e; patch(e.out, s1); append(e1.out, list1(&s->out1)); s1 = s; } PUSH(frag(s, e1.out)); break; } case NFA_PREV_ATOM_NO_WIDTH: case NFA_PREV_ATOM_NO_WIDTH_NEG: case NFA_PREV_ATOM_JUST_BEFORE: case NFA_PREV_ATOM_JUST_BEFORE_NEG: case NFA_PREV_ATOM_LIKE_PATTERN: { int before = (*p == NFA_PREV_ATOM_JUST_BEFORE || *p == NFA_PREV_ATOM_JUST_BEFORE_NEG); int pattern = (*p == NFA_PREV_ATOM_LIKE_PATTERN); int start_state; int end_state; int n = 0; nfa_state_T *zend; nfa_state_T *skip; switch (*p) { case NFA_PREV_ATOM_NO_WIDTH: start_state = NFA_START_INVISIBLE; end_state = NFA_END_INVISIBLE; break; case NFA_PREV_ATOM_NO_WIDTH_NEG: start_state = NFA_START_INVISIBLE_NEG; end_state = NFA_END_INVISIBLE_NEG; break; case NFA_PREV_ATOM_JUST_BEFORE: start_state = NFA_START_INVISIBLE_BEFORE; end_state = NFA_END_INVISIBLE; break; case NFA_PREV_ATOM_JUST_BEFORE_NEG: start_state = NFA_START_INVISIBLE_BEFORE_NEG; end_state = NFA_END_INVISIBLE_NEG; break; default: // NFA_PREV_ATOM_LIKE_PATTERN: start_state = NFA_START_PATTERN; end_state = NFA_END_PATTERN; break; } if (before) n = *++p; // get the count // The \@= operator: match the preceding atom with zero width. // The \@! operator: no match for the preceding atom. // The \@<= operator: match for the preceding atom. // The \@<! operator: no match for the preceding atom. // Surrounds the preceding atom with START_INVISIBLE and // END_INVISIBLE, similarly to MOPEN. if (nfa_calc_size == TRUE) { nstate += pattern ? 4 : 2; break; } e = POP(); s1 = alloc_state(end_state, NULL, NULL); if (s1 == NULL) goto theend; s = alloc_state(start_state, e.start, s1); if (s == NULL) goto theend; if (pattern) { // NFA_ZEND -> NFA_END_PATTERN -> NFA_SKIP -> what follows. skip = alloc_state(NFA_SKIP, NULL, NULL); if (skip == NULL) goto theend; zend = alloc_state(NFA_ZEND, s1, NULL); if (zend == NULL) goto theend; s1->out= skip; patch(e.out, zend); PUSH(frag(s, list1(&skip->out))); } else { patch(e.out, s1); PUSH(frag(s, list1(&s1->out))); if (before) { if (n <= 0) // See if we can guess the maximum width, it avoids a // lot of pointless tries. n = nfa_max_width(e.start, 0); s->val = n; // store the count } } break; } case NFA_COMPOSING: // char with composing char #if 0 // TODO if (regflags & RF_ICOMBINE) { // use the base character only } #endif // FALLTHROUGH case NFA_MOPEN: // \( \) Submatch case NFA_MOPEN1: case NFA_MOPEN2: case NFA_MOPEN3: case NFA_MOPEN4: case NFA_MOPEN5: case NFA_MOPEN6: case NFA_MOPEN7: case NFA_MOPEN8: case NFA_MOPEN9: #ifdef FEAT_SYN_HL case NFA_ZOPEN: // \z( \) Submatch case NFA_ZOPEN1: case NFA_ZOPEN2: case NFA_ZOPEN3: case NFA_ZOPEN4: case NFA_ZOPEN5: case NFA_ZOPEN6: case NFA_ZOPEN7: case NFA_ZOPEN8: case NFA_ZOPEN9: #endif case NFA_NOPEN: // \%( \) "Invisible Submatch" if (nfa_calc_size == TRUE) { nstate += 2; break; } mopen = *p; switch (*p) { case NFA_NOPEN: mclose = NFA_NCLOSE; break; #ifdef FEAT_SYN_HL case NFA_ZOPEN: mclose = NFA_ZCLOSE; break; case NFA_ZOPEN1: mclose = NFA_ZCLOSE1; break; case NFA_ZOPEN2: mclose = NFA_ZCLOSE2; break; case NFA_ZOPEN3: mclose = NFA_ZCLOSE3; break; case NFA_ZOPEN4: mclose = NFA_ZCLOSE4; break; case NFA_ZOPEN5: mclose = NFA_ZCLOSE5; break; case NFA_ZOPEN6: mclose = NFA_ZCLOSE6; break; case NFA_ZOPEN7: mclose = NFA_ZCLOSE7; break; case NFA_ZOPEN8: mclose = NFA_ZCLOSE8; break; case NFA_ZOPEN9: mclose = NFA_ZCLOSE9; break; #endif case NFA_COMPOSING: mclose = NFA_END_COMPOSING; break; default: // NFA_MOPEN, NFA_MOPEN1 .. NFA_MOPEN9 mclose = *p + NSUBEXP; break; } // Allow "NFA_MOPEN" as a valid postfix representation for // the empty regexp "". In this case, the NFA will be // NFA_MOPEN -> NFA_MCLOSE. Note that this also allows // empty groups of parenthesis, and empty mbyte chars if (stackp == stack) { s = alloc_state(mopen, NULL, NULL); if (s == NULL) goto theend; s1 = alloc_state(mclose, NULL, NULL); if (s1 == NULL) goto theend; patch(list1(&s->out), s1); PUSH(frag(s, list1(&s1->out))); break; } // At least one node was emitted before NFA_MOPEN, so // at least one node will be between NFA_MOPEN and NFA_MCLOSE e = POP(); s = alloc_state(mopen, e.start, NULL); // `(' if (s == NULL) goto theend; s1 = alloc_state(mclose, NULL, NULL); // `)' if (s1 == NULL) goto theend; patch(e.out, s1); if (mopen == NFA_COMPOSING) // COMPOSING->out1 = END_COMPOSING patch(list1(&s->out1), s1); PUSH(frag(s, list1(&s1->out))); break; case NFA_BACKREF1: case NFA_BACKREF2: case NFA_BACKREF3: case NFA_BACKREF4: case NFA_BACKREF5: case NFA_BACKREF6: case NFA_BACKREF7: case NFA_BACKREF8: case NFA_BACKREF9: #ifdef FEAT_SYN_HL case NFA_ZREF1: case NFA_ZREF2: case NFA_ZREF3: case NFA_ZREF4: case NFA_ZREF5: case NFA_ZREF6: case NFA_ZREF7: case NFA_ZREF8: case NFA_ZREF9: #endif if (nfa_calc_size == TRUE) { nstate += 2; break; } s = alloc_state(*p, NULL, NULL); if (s == NULL) goto theend; s1 = alloc_state(NFA_SKIP, NULL, NULL); if (s1 == NULL) goto theend; patch(list1(&s->out), s1); PUSH(frag(s, list1(&s1->out))); break; case NFA_LNUM: case NFA_LNUM_GT: case NFA_LNUM_LT: case NFA_VCOL: case NFA_VCOL_GT: case NFA_VCOL_LT: case NFA_COL: case NFA_COL_GT: case NFA_COL_LT: case NFA_MARK: case NFA_MARK_GT: case NFA_MARK_LT: { int n = *++p; // lnum, col or mark name if (nfa_calc_size == TRUE) { nstate += 1; break; } s = alloc_state(p[-1], NULL, NULL); if (s == NULL) goto theend; s->val = n; PUSH(frag(s, list1(&s->out))); break; } case NFA_ZSTART: case NFA_ZEND: default: // Operands if (nfa_calc_size == TRUE) { nstate++; break; } s = alloc_state(*p, NULL, NULL); if (s == NULL) goto theend; PUSH(frag(s, list1(&s->out))); break; } // switch(*p) } // for(p = postfix; *p; ++p) if (nfa_calc_size == TRUE) { nstate++; goto theend; // Return value when counting size is ignored anyway } e = POP(); if (stackp != stack) { vim_free(stack); EMSG_RET_NULL(_("E875: (NFA regexp) (While converting from postfix to NFA), too many states left on stack")); } if (istate >= nstate) { vim_free(stack); EMSG_RET_NULL(_("E876: (NFA regexp) Not enough space to store the whole NFA ")); } matchstate = &state_ptr[istate++]; // the match state matchstate->c = NFA_MATCH; matchstate->out = matchstate->out1 = NULL; matchstate->id = 0; patch(e.out, matchstate); ret = e.start; theend: vim_free(stack); return ret; #undef POP1 #undef PUSH1 #undef POP2 #undef PUSH2 #undef POP #undef PUSH }
0
332,390
cursor_pos_info(dict_T *dict) { char_u *p; char_u buf1[50]; char_u buf2[40]; linenr_T lnum; varnumber_T byte_count = 0; varnumber_T bom_count = 0; varnumber_T byte_count_cursor = 0; varnumber_T char_count = 0; varnumber_T char_count_cursor = 0; varnumber_T word_count = 0; varnumber_T word_count_cursor = 0; int eol_size; varnumber_T last_check = 100000L; long line_count_selected = 0; pos_T min_pos, max_pos; oparg_T oparg; struct block_def bd; /* * Compute the length of the file in characters. */ if (curbuf->b_ml.ml_flags & ML_EMPTY) { if (dict == NULL) { msg(_(no_lines_msg)); return; } } else { if (get_fileformat(curbuf) == EOL_DOS) eol_size = 2; else eol_size = 1; if (VIsual_active) { if (LT_POS(VIsual, curwin->w_cursor)) { min_pos = VIsual; max_pos = curwin->w_cursor; } else { min_pos = curwin->w_cursor; max_pos = VIsual; } if (*p_sel == 'e' && max_pos.col > 0) --max_pos.col; if (VIsual_mode == Ctrl_V) { #ifdef FEAT_LINEBREAK char_u * saved_sbr = p_sbr; char_u * saved_w_sbr = curwin->w_p_sbr; // Make 'sbr' empty for a moment to get the correct size. p_sbr = empty_option; curwin->w_p_sbr = empty_option; #endif oparg.is_VIsual = 1; oparg.block_mode = TRUE; oparg.op_type = OP_NOP; getvcols(curwin, &min_pos, &max_pos, &oparg.start_vcol, &oparg.end_vcol); #ifdef FEAT_LINEBREAK p_sbr = saved_sbr; curwin->w_p_sbr = saved_w_sbr; #endif if (curwin->w_curswant == MAXCOL) oparg.end_vcol = MAXCOL; // Swap the start, end vcol if needed if (oparg.end_vcol < oparg.start_vcol) { oparg.end_vcol += oparg.start_vcol; oparg.start_vcol = oparg.end_vcol - oparg.start_vcol; oparg.end_vcol -= oparg.start_vcol; } } line_count_selected = max_pos.lnum - min_pos.lnum + 1; } for (lnum = 1; lnum <= curbuf->b_ml.ml_line_count; ++lnum) { // Check for a CTRL-C every 100000 characters. if (byte_count > last_check) { ui_breakcheck(); if (got_int) return; last_check = byte_count + 100000L; } // Do extra processing for VIsual mode. if (VIsual_active && lnum >= min_pos.lnum && lnum <= max_pos.lnum) { char_u *s = NULL; long len = 0L; switch (VIsual_mode) { case Ctrl_V: virtual_op = virtual_active(); block_prep(&oparg, &bd, lnum, 0); virtual_op = MAYBE; s = bd.textstart; len = (long)bd.textlen; break; case 'V': s = ml_get(lnum); len = MAXCOL; break; case 'v': { colnr_T start_col = (lnum == min_pos.lnum) ? min_pos.col : 0; colnr_T end_col = (lnum == max_pos.lnum) ? max_pos.col - start_col + 1 : MAXCOL; s = ml_get(lnum) + start_col; len = end_col; } break; } if (s != NULL) { byte_count_cursor += line_count_info(s, &word_count_cursor, &char_count_cursor, len, eol_size); if (lnum == curbuf->b_ml.ml_line_count && !curbuf->b_p_eol && (curbuf->b_p_bin || !curbuf->b_p_fixeol) && (long)STRLEN(s) < len) byte_count_cursor -= eol_size; } } else { // In non-visual mode, check for the line the cursor is on if (lnum == curwin->w_cursor.lnum) { word_count_cursor += word_count; char_count_cursor += char_count; byte_count_cursor = byte_count + line_count_info(ml_get(lnum), &word_count_cursor, &char_count_cursor, (varnumber_T)(curwin->w_cursor.col + 1), eol_size); } } // Add to the running totals byte_count += line_count_info(ml_get(lnum), &word_count, &char_count, (varnumber_T)MAXCOL, eol_size); } // Correction for when last line doesn't have an EOL. if (!curbuf->b_p_eol && (curbuf->b_p_bin || !curbuf->b_p_fixeol)) byte_count -= eol_size; if (dict == NULL) { if (VIsual_active) { if (VIsual_mode == Ctrl_V && curwin->w_curswant < MAXCOL) { getvcols(curwin, &min_pos, &max_pos, &min_pos.col, &max_pos.col); vim_snprintf((char *)buf1, sizeof(buf1), _("%ld Cols; "), (long)(oparg.end_vcol - oparg.start_vcol + 1)); } else buf1[0] = NUL; if (char_count_cursor == byte_count_cursor && char_count == byte_count) vim_snprintf((char *)IObuff, IOSIZE, _("Selected %s%ld of %ld Lines; %lld of %lld Words; %lld of %lld Bytes"), buf1, line_count_selected, (long)curbuf->b_ml.ml_line_count, (varnumber_T)word_count_cursor, (varnumber_T)word_count, (varnumber_T)byte_count_cursor, (varnumber_T)byte_count); else vim_snprintf((char *)IObuff, IOSIZE, _("Selected %s%ld of %ld Lines; %lld of %lld Words; %lld of %lld Chars; %lld of %lld Bytes"), buf1, line_count_selected, (long)curbuf->b_ml.ml_line_count, (varnumber_T)word_count_cursor, (varnumber_T)word_count, (varnumber_T)char_count_cursor, (varnumber_T)char_count, (varnumber_T)byte_count_cursor, (varnumber_T)byte_count); } else { p = ml_get_curline(); validate_virtcol(); col_print(buf1, sizeof(buf1), (int)curwin->w_cursor.col + 1, (int)curwin->w_virtcol + 1); col_print(buf2, sizeof(buf2), (int)STRLEN(p), linetabsize(p)); if (char_count_cursor == byte_count_cursor && char_count == byte_count) vim_snprintf((char *)IObuff, IOSIZE, _("Col %s of %s; Line %ld of %ld; Word %lld of %lld; Byte %lld of %lld"), (char *)buf1, (char *)buf2, (long)curwin->w_cursor.lnum, (long)curbuf->b_ml.ml_line_count, (varnumber_T)word_count_cursor, (varnumber_T)word_count, (varnumber_T)byte_count_cursor, (varnumber_T)byte_count); else vim_snprintf((char *)IObuff, IOSIZE, _("Col %s of %s; Line %ld of %ld; Word %lld of %lld; Char %lld of %lld; Byte %lld of %lld"), (char *)buf1, (char *)buf2, (long)curwin->w_cursor.lnum, (long)curbuf->b_ml.ml_line_count, (varnumber_T)word_count_cursor, (varnumber_T)word_count, (varnumber_T)char_count_cursor, (varnumber_T)char_count, (varnumber_T)byte_count_cursor, (varnumber_T)byte_count); } } bom_count = bomb_size(); if (dict == NULL && bom_count > 0) { size_t len = STRLEN(IObuff); vim_snprintf((char *)IObuff + len, IOSIZE - len, _("(+%lld for BOM)"), (varnumber_T)bom_count); } if (dict == NULL) { // Don't shorten this message, the user asked for it. p = p_shm; p_shm = (char_u *)""; msg((char *)IObuff); p_shm = p; } } #if defined(FEAT_EVAL) if (dict != NULL) { dict_add_number(dict, "words", word_count); dict_add_number(dict, "chars", char_count); dict_add_number(dict, "bytes", byte_count + bom_count); dict_add_number(dict, VIsual_active ? "visual_bytes" : "cursor_bytes", byte_count_cursor); dict_add_number(dict, VIsual_active ? "visual_chars" : "cursor_chars", char_count_cursor); dict_add_number(dict, VIsual_active ? "visual_words" : "cursor_words", word_count_cursor); } #endif }
0
513,048
static int cmp_row_type(Item* item1, Item* item2) { uint n= item1->cols(); if (item2->check_cols(n)) return 1; for (uint i=0; i<n; i++) { if (item2->element_index(i)->check_cols(item1->element_index(i)->cols()) || (item1->element_index(i)->result_type() == ROW_RESULT && cmp_row_type(item1->element_index(i), item2->element_index(i)))) return 1; } return 0; }
0
240,296
get_y_current(void) { return y_current; }
0
512,284
bool Item_func_regexp_instr::fix_fields(THD *thd, Item **ref) { re.set_recursion_limit(thd); return Item_int_func::fix_fields(thd, ref); }
0
294,418
cmp_dd(VALUE self, VALUE other) { get_d2(self, other); { VALUE a_nth, b_nth, a_sf, b_sf; int a_jd, b_jd, a_df, b_df; m_canonicalize_jd(self, adat); m_canonicalize_jd(other, bdat); a_nth = m_nth(adat); b_nth = m_nth(bdat); if (f_eqeq_p(a_nth, b_nth)) { a_jd = m_jd(adat); b_jd = m_jd(bdat); if (a_jd == b_jd) { a_df = m_df(adat); b_df = m_df(bdat); if (a_df == b_df) { a_sf = m_sf(adat); b_sf = m_sf(bdat); if (f_eqeq_p(a_sf, b_sf)) { return INT2FIX(0); } else if (f_lt_p(a_sf, b_sf)) { return INT2FIX(-1); } else { return INT2FIX(1); } } else if (a_df < b_df) { return INT2FIX(-1); } else { return INT2FIX(1); } } else if (a_jd < b_jd) { return INT2FIX(-1); } else { return INT2FIX(1); } } else if (f_lt_p(a_nth, b_nth)) { return INT2FIX(-1); } else { return INT2FIX(1); } } }
0
175,778
CallbackList& unlimited_callbacks() { return unlimited_callbacks_; }
0
226,074
#ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sbgp_box_write(GF_Box *s, GF_BitStream *bs) { u32 i; GF_Err e; GF_SampleGroupBox *p = (GF_SampleGroupBox*)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, p->grouping_type); if (p->version==1) gf_bs_write_u32(bs, p->grouping_type_parameter); gf_bs_write_u32(bs, p->entry_count); for (i = 0; i<p->entry_count; i++ ) { gf_bs_write_u32(bs, p->sample_entries[i].sample_count); gf_bs_write_u32(bs, p->sample_entries[i].group_description_index); } return GF_OK;
0
369,226
static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) { struct io_tctx_node *node; enum io_wq_cancel cret; bool ret = false; mutex_lock(&ctx->uring_lock); list_for_each_entry(node, &ctx->tctx_list, ctx_node) { struct io_uring_task *tctx = node->task->io_uring; /* * io_wq will stay alive while we hold uring_lock, because it's * killed after ctx nodes, which requires to take the lock. */ if (!tctx || !tctx->io_wq) continue; cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true); ret |= (cret != IO_WQ_CANCEL_NOTFOUND); } mutex_unlock(&ctx->uring_lock); return ret;
0
238,549
static bool type_is_rdonly_mem(u32 type) { return type & MEM_RDONLY; }
0
391,668
bool is_deferred_open_async(const void *ptr) { const struct deferred_open_record *state = (const struct deferred_open_record *)ptr; return state->async_open; }
0
445,965
fr_window_history_add (FrWindow *window, const char *path) { if ((window->priv->history_current == NULL) || (g_strcmp0 (path, window->priv->history_current->data) != 0)) { GList *scan; GList *new_current = NULL; /* search the path in the history */ for (scan = window->priv->history_current; scan; scan = scan->next) { char *path_in_history = scan->data; if (g_strcmp0 (path, path_in_history) == 0) { new_current = scan; break; } } if (new_current != NULL) { window->priv->history_current = new_current; } else { /* remove all the paths after the current position */ for (scan = window->priv->history; scan && (scan != window->priv->history_current); /* void */) { GList *next = scan->next; window->priv->history = g_list_remove_link (window->priv->history, scan); _g_string_list_free (scan); scan = next; } window->priv->history = g_list_prepend (window->priv->history, g_strdup (path)); window->priv->history_current = window->priv->history; } } }
0
312,479
free_efm_list(efm_T **efm_first) { efm_T *efm_ptr; for (efm_ptr = *efm_first; efm_ptr != NULL; efm_ptr = *efm_first) { *efm_first = efm_ptr->next; vim_regfree(efm_ptr->prog); vim_free(efm_ptr); } fmt_start = NULL; }
0
430,358
seq_hlist_start_percpu(struct hlist_head __percpu *head, int *cpu, loff_t pos) { struct hlist_node *node; for_each_possible_cpu(*cpu) { hlist_for_each(node, per_cpu_ptr(head, *cpu)) { if (pos-- == 0) return node; } } return NULL; }
0
389,722
check_for_opt_dict_arg(typval_T *args, int idx) { return (args[idx].v_type == VAR_UNKNOWN || check_for_dict_arg(args, idx) != FAIL); }
0
246,464
const char *r_bin_wasm_get_function_name(RBinWasmObj *bin, ut32 idx) { if (!(bin && bin->g_names)) { return NULL; }; RListIter *iter; RBinWasmCustomNameEntry *nam; r_list_foreach (bin->g_names, iter, nam) { if (nam->type == R_BIN_WASM_NAMETYPE_Function) { const char *n = r_id_storage_get (nam->func->names, idx); if (n) { return n; } } } return NULL; }
0
521,472
bool writeDirectoryEntry (OutputStream& target) { target.writeInt (0x02014b50); target.writeShort (symbolicLink ? 0x0314 : 0x0014); writeFlagsAndSizes (target); target.writeShort (0); // comment length target.writeShort (0); // start disk num target.writeShort (0); // internal attributes target.writeInt ((int) (symbolicLink ? 0xA1ED0000 : 0)); // external attributes target.writeInt ((int) (uint32) headerStart); target << storedPathname; return true; }
0
232,961
void Curl_unencode_cleanup(struct Curl_easy *data) { (void) data; }
0
343,308
void setprocessname(const char * const title) { #ifndef NO_PROCNAME_CHANGE # ifdef HAVE_SETPROCTITLE setproctitle("-%s", title); # elif defined(__linux__) if (argv0 != NULL && argv_lth > strlen(title) - 2) { memset(argv0[0], 0, argv_lth); strncpy(argv0[0], title, argv_lth - 2); argv0[1] = NULL; } # elif defined(__hpux__) union pstun pst; pst.pst_command = title; pstat(PSTAT_SETCMD, pst, strlen(title), 0, 0); # endif #endif (void) title; }
0
316,973
static int selinux_inode_copy_up_xattr(const char *name) { /* The copy_up hook above sets the initial context on an inode, but we * don't then want to overwrite it by blindly copying all the lower * xattrs up. Instead, we have to filter out SELinux-related xattrs. */ if (strcmp(name, XATTR_NAME_SELINUX) == 0) return 1; /* Discard */ /* * Any other attribute apart from SELINUX is not claimed, supported * by selinux. */ return -EOPNOTSUPP; }
0
359,654
DEFUN (no_bgp_fast_external_failover, no_bgp_fast_external_failover_cmd, "no bgp fast-external-failover", NO_STR BGP_STR "Immediately reset session if a link to a directly connected external peer goes down\n") { struct bgp *bgp; bgp = vty->index; bgp_flag_set (bgp, BGP_FLAG_NO_FAST_EXT_FAILOVER); return CMD_SUCCESS; }
0
297,212
static int exif_process_unicode(image_info_type *ImageInfo, xp_field_type *xp_field, int tag, char *szValuePtr, int ByteCount TSRMLS_DC) { xp_field->tag = tag; xp_field->value = NULL; /* XXX this will fail again if encoding_converter returns on error something different than SIZE_MAX */ if (zend_multibyte_encoding_converter( (unsigned char**)&xp_field->value, &xp_field->size, (unsigned char*)szValuePtr, ByteCount, zend_multibyte_fetch_encoding(ImageInfo->encode_unicode TSRMLS_CC), zend_multibyte_fetch_encoding(ImageInfo->motorola_intel ? ImageInfo->decode_unicode_be : ImageInfo->decode_unicode_le TSRMLS_CC) TSRMLS_CC) == (size_t)-1) { xp_field->size = exif_process_string_raw(&xp_field->value, szValuePtr, ByteCount); } return xp_field->size; }
0
498,623
upsample (guchar *dest, const guchar *src, guint width, guint bytes, guint alpha) { guint x; for (x = 0; x < width; x++) { dest[0] = ((src[1] << 1) & 0xf8); dest[0] += (dest[0] >> 5); dest[1] = ((src[0] & 0xe0) >> 2) + ((src[1] & 0x03) << 6); dest[1] += (dest[1] >> 5); dest[2] = ((src[0] << 3) & 0xf8); dest[2] += (dest[2] >> 5); if (alpha) { dest[3] = (src[1] & 0x80) ? 255 : 0; dest += 4; } else { dest += 3; } src += bytes; } }
0
436,115
static int io_openat2(struct io_kiocb *req, unsigned int issue_flags) { struct open_flags op; struct file *file; bool nonblock_set; bool resolve_nonblock; int ret; ret = build_open_flags(&req->open.how, &op); if (ret) goto err; nonblock_set = op.open_flag & O_NONBLOCK; resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED; if (issue_flags & IO_URING_F_NONBLOCK) { /* * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open, * it'll always -EAGAIN */ if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE)) return -EAGAIN; op.lookup_flags |= LOOKUP_CACHED; op.open_flag |= O_NONBLOCK; } ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile); if (ret < 0) goto err; file = do_filp_open(req->open.dfd, req->open.filename, &op); if (IS_ERR(file)) { /* * We could hang on to this 'fd' on retrying, but seems like * marginal gain for something that is now known to be a slower * path. So just put it, and we'll get a new one when we retry. */ put_unused_fd(ret); ret = PTR_ERR(file); /* only retry if RESOLVE_CACHED wasn't already set by application */ if (ret == -EAGAIN && (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK))) return -EAGAIN; goto err; } if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set) file->f_flags &= ~O_NONBLOCK; fsnotify_open(file); fd_install(ret, file); err: putname(req->open.filename); req->flags &= ~REQ_F_NEED_CLEANUP; if (ret < 0) req_set_fail(req); __io_req_complete(req, issue_flags, ret, 0); return 0; }
0
256,420
PJ_DEF(void) pjmedia_rtcp_rx_rtcp( pjmedia_rtcp_session *sess, const void *pkt, pj_size_t size) { pj_uint8_t *p, *p_end; p = (pj_uint8_t*)pkt; p_end = p + size; while (p < p_end) { pjmedia_rtcp_common *common; unsigned len; if (p + sizeof(pjmedia_rtcp_common) > p_end) { TRACE_((sess->name, "Receiving truncated RTCP packet (1)")); break; } common = (pjmedia_rtcp_common*)p; len = (pj_ntohs((pj_uint16_t)common->length)+1) * 4; if (p + len > p_end) { TRACE_((sess->name, "Receiving truncated RTCP packet (2)")); break; } switch(common->pt) { case RTCP_SR: case RTCP_RR: case RTCP_XR: parse_rtcp_report(sess, p, len); break; case RTCP_SDES: parse_rtcp_sdes(sess, p, len); break; case RTCP_BYE: parse_rtcp_bye(sess, p, len); break; case RTCP_RTPFB: case RTCP_PSFB: parse_rtcp_fb(sess, p, len); break; default: /* Ignore unknown RTCP */ TRACE_((sess->name, "Received unknown RTCP packet type=%d", common->pt)); break; } p += len; } }
0
292,243
set_default_modes (server *serv) { char modes[8]; modes[0] = '+'; modes[1] = '\0'; if (prefs.hex_irc_wallops) strcat (modes, "w"); if (prefs.hex_irc_servernotice) strcat (modes, "s"); if (prefs.hex_irc_invisible) strcat (modes, "i"); if (prefs.hex_irc_hidehost) strcat (modes, "x"); if (modes[1] != '\0') { serv->p_mode (serv, serv->nick, modes); } }
0
244,152
void svhd_box_del(GF_Box *s) { GF_SphericalVideoInfoBox *ptr = (GF_SphericalVideoInfoBox *)s; if (ptr->string) gf_free(ptr->string); gf_free(s); }
0
294,583
h_trunc(VALUE h, VALUE *fr) { VALUE rh; if (wholenum_p(h)) { rh = to_integer(h); *fr = INT2FIX(0); } else { rh = f_idiv(h, INT2FIX(1)); *fr = f_mod(h, INT2FIX(1)); *fr = f_quo(*fr, INT2FIX(24)); } return rh; }
0
404,702
int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags) { int new_fd; int error; error = security_file_receive(file); if (error) return error; new_fd = get_unused_fd_flags(o_flags); if (new_fd < 0) return new_fd; if (ufd) { error = put_user(new_fd, ufd); if (error) { put_unused_fd(new_fd); return error; } } fd_install(new_fd, get_file(file)); __receive_sock(file); return new_fd; }
0
301,355
static int vfswrap_setxattr(struct vfs_handle_struct *handle, const char *path, const char *name, const void *value, size_t size, int flags) { return setxattr(path, name, value, size, flags); }
0
486,823
static unsigned get_bit(const uint8_t *mac, unsigned bit) { unsigned byte; byte = mac[bit / 8]; byte >>= (bit & 0x7); byte &= 1; return byte; }
0
220,398
join_ary(mrb_state *mrb, mrb_value ary, mrb_value sep, mrb_value list) { mrb_int i; mrb_value result, val, tmp; /* check recursive */ for (i=0; i<RARRAY_LEN(list); i++) { if (mrb_obj_equal(mrb, ary, RARRAY_PTR(list)[i])) { mrb_raise(mrb, E_ARGUMENT_ERROR, "recursive array join"); } } mrb_ary_push(mrb, list, ary); result = mrb_str_new_capa(mrb, 64); for (i=0; i<RARRAY_LEN(ary); i++) { if (i > 0 && !mrb_nil_p(sep)) { mrb_str_cat_str(mrb, result, sep); } val = RARRAY_PTR(ary)[i]; switch (mrb_type(val)) { case MRB_TT_ARRAY: ary_join: val = join_ary(mrb, val, sep, list); /* fall through */ case MRB_TT_STRING: str_join: mrb_str_cat_str(mrb, result, val); break; default: if (!mrb_immediate_p(val)) { tmp = mrb_check_string_type(mrb, val); if (!mrb_nil_p(tmp)) { val = tmp; goto str_join; } tmp = mrb_check_array_type(mrb, val); if (!mrb_nil_p(tmp)) { val = tmp; goto ary_join; } } val = mrb_obj_as_string(mrb, val); goto str_join; } } mrb_ary_pop(mrb, list); return result; }
0
387,859
static int binary_search(const Array<Method*>* methods, const Symbol* name) { int len = methods->length(); // methods are sorted, so do binary search int l = 0; int h = len - 1; while (l <= h) { int mid = (l + h) >> 1; Method* m = methods->at(mid); assert(m->is_method(), "must be method"); int res = m->name()->fast_compare(name); if (res == 0) { return mid; } else if (res < 0) { l = mid + 1; } else { h = mid - 1; } } return -1; }
0
210,090
cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h, uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount) { const cdf_section_header_t *shp; cdf_section_header_t sh; const uint8_t *p, *q, *e; size_t i, o4, nelements, j, slen, left; cdf_property_info_t *inp; if (offs > UINT32_MAX / 4) { errno = EFTYPE; goto out; } shp = CAST(const cdf_section_header_t *, cdf_offset(sst->sst_tab, offs)); if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1) goto out; sh.sh_len = CDF_TOLE4(shp->sh_len); if (sh.sh_len > CDF_SHLEN_LIMIT) { errno = EFTYPE; goto out; } if (cdf_check_stream_offset(sst, h, shp, sh.sh_len, __LINE__) == -1) goto out; sh.sh_properties = CDF_TOLE4(shp->sh_properties); DPRINTF(("section len: %u properties %u\n", sh.sh_len, sh.sh_properties)); if (sh.sh_properties > CDF_PROP_LIMIT) goto out; inp = cdf_grow_info(info, maxcount, sh.sh_properties); if (inp == NULL) goto out; inp += *count; *count += sh.sh_properties; p = CAST(const uint8_t *, cdf_offset(sst->sst_tab, offs + sizeof(sh))); e = CAST(const uint8_t *, cdf_offset(shp, sh.sh_len)); if (p >= e || cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1) goto out; for (i = 0; i < sh.sh_properties; i++) { if ((q = cdf_get_property_info_pos(sst, h, p, e, i)) == NULL) goto out; inp[i].pi_id = CDF_GETUINT32(p, i << 1); left = CAST(size_t, e - q); if (left < sizeof(uint32_t)) { DPRINTF(("short info (no type)_\n")); goto out; } inp[i].pi_type = CDF_GETUINT32(q, 0); DPRINTF(("%" SIZE_T_FORMAT "u) id=%#x type=%#x offs=%#tx,%#x\n", i, inp[i].pi_id, inp[i].pi_type, q - p, offs)); if (inp[i].pi_type & CDF_VECTOR) { if (left < sizeof(uint32_t) * 2) { DPRINTF(("missing CDF_VECTOR length\n")); goto out; } nelements = CDF_GETUINT32(q, 1); if (nelements == 0) { DPRINTF(("CDF_VECTOR with nelements == 0\n")); goto out; } slen = 2; } else { nelements = 1; slen = 1; } o4 = slen * sizeof(uint32_t); if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED)) goto unknown; switch (inp[i].pi_type & CDF_TYPEMASK) { case CDF_NULL: case CDF_EMPTY: break; case CDF_SIGNED16: if (!cdf_copy_info(&inp[i], &q[o4], e, sizeof(int16_t))) goto unknown; break; case CDF_SIGNED32: case CDF_BOOL: case CDF_UNSIGNED32: case CDF_FLOAT: if (!cdf_copy_info(&inp[i], &q[o4], e, sizeof(int32_t))) goto unknown; break; case CDF_SIGNED64: case CDF_UNSIGNED64: case CDF_DOUBLE: case CDF_FILETIME: if (!cdf_copy_info(&inp[i], &q[o4], e, sizeof(int64_t))) goto unknown; break; case CDF_LENGTH32_STRING: case CDF_LENGTH32_WSTRING: if (nelements > 1) { size_t nelem = inp - *info; inp = cdf_grow_info(info, maxcount, nelements); if (inp == NULL) goto out; inp += nelem; } DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n", nelements)); for (j = 0; j < nelements && i < sh.sh_properties; j++, i++) { uint32_t l; if (o4 + sizeof(uint32_t) > left) goto out; l = CDF_GETUINT32(q, slen); o4 += sizeof(uint32_t); if (o4 + l > left) goto out; inp[i].pi_str.s_len = l; inp[i].pi_str.s_buf = CAST(const char *, CAST(const void *, &q[o4])); DPRINTF(("o=%" SIZE_T_FORMAT "u l=%d(%" SIZE_T_FORMAT "u), t=%" SIZE_T_FORMAT "u s=%s\n", o4, l, CDF_ROUND(l, sizeof(l)), left, inp[i].pi_str.s_buf)); if (l & 1) l++; slen += l >> 1; o4 = slen * sizeof(uint32_t); } i--; break; case CDF_CLIPBOARD: if (inp[i].pi_type & CDF_VECTOR) goto unknown; break; default: unknown: memset(&inp[i].pi_val, 0, sizeof(inp[i].pi_val)); DPRINTF(("Don't know how to deal with %#x\n", inp[i].pi_type)); break; } } return 0; out: free(*info); *info = NULL; *count = 0; *maxcount = 0; errno = EFTYPE; return -1; }
1
247,527
TEST_P(SslSocketTest, FailedClientCertificateHashAndSpkiVerificationNoClientCertificate) { envoy::config::listener::v3::Listener listener; envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains(); envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert = tls_context.mutable_common_tls_context()->add_tls_certificates(); server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute( "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem")); server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute( "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem")); envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext* server_validation_ctx = tls_context.mutable_common_tls_context()->mutable_validation_context(); server_validation_ctx->mutable_trusted_ca()->set_filename(TestEnvironment::substitute( "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem")); server_validation_ctx->add_verify_certificate_hash( "0000000000000000000000000000000000000000000000000000000000000000"); server_validation_ctx->add_verify_certificate_spki(TEST_SAN_URI_CERT_SPKI); updateFilterChain(tls_context, *filter_chain); envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client; TestUtilOptionsV2 test_options(listener, client, false, GetParam()); testUtilV2(test_options.setExpectedServerStats("ssl.fail_verify_no_cert") .setExpectedTransportFailureReasonContains("SSLV3_ALERT_HANDSHAKE_FAILURE")); // Fails even with client renegotiation. client.set_allow_renegotiation(true); testUtilV2(test_options); }
0
221,137
void ResetTextConfig(GF_TextConfig *desc) { GF_List *bck; while (gf_list_count(desc->sample_descriptions)) { GF_TextSampleDescriptor *sd = (GF_TextSampleDescriptor *)gf_list_get(desc->sample_descriptions, 0); gf_list_rem(desc->sample_descriptions, 0); gf_odf_del_tx3g(sd); } bck = desc->sample_descriptions; memset(desc, 0, sizeof(GF_TextConfig)); desc->tag = GF_ODF_TEXT_CFG_TAG; desc->sample_descriptions = bck; }
0
369,325
static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg, unsigned size, unsigned type) { struct io_uring_rsrc_update2 up; if (size != sizeof(up)) return -EINVAL; if (copy_from_user(&up, arg, sizeof(up))) return -EFAULT; if (!up.nr || up.resv || up.resv2) return -EINVAL; return __io_register_rsrc_update(ctx, type, &up, up.nr);
0
317,163
static int smack_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen) { return smack_inode_setsecurity(inode, XATTR_SMACK_SUFFIX, ctx, ctxlen, 0); }
0
359,567
DEFUN (bgp_redistribute_ipv6_metric_rmap, bgp_redistribute_ipv6_metric_rmap_cmd, "redistribute (connected|kernel|ospf6|ripng|static) metric <0-4294967295> route-map WORD", "Redistribute information from another routing protocol\n" "Connected\n" "Kernel routes\n" "Open Shurtest Path First (OSPFv3)\n" "Routing Information Protocol (RIPng)\n" "Static routes\n" "Metric for redistributed routes\n" "Default metric\n" "Route map reference\n" "Pointer to route-map entries\n") { int type; u_int32_t metric; type = bgp_str2route_type (AFI_IP6, argv[0]); if (! type) { vty_out (vty, "%% Invalid route type%s", VTY_NEWLINE); return CMD_WARNING; } VTY_GET_INTEGER ("metric", metric, argv[1]); bgp_redistribute_metric_set (vty->index, AFI_IP6, type, metric); bgp_redistribute_rmap_set (vty->index, AFI_IP6, type, argv[2]); return bgp_redistribute_set (vty->index, AFI_IP6, type); }
0
238,647
static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, struct bpf_func_state *cur, struct bpf_id_pair *idmap) { int i, spi; /* walk slots of the explored stack and ignore any additional * slots in the current stack, since explored(safe) state * didn't use them */ for (i = 0; i < old->allocated_stack; i++) { spi = i / BPF_REG_SIZE; if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) { i += BPF_REG_SIZE - 1; /* explored state didn't use this */ continue; } if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) continue; /* explored stack has more populated slots than current stack * and these slots were used */ if (i >= cur->allocated_stack) return false; /* if old state was safe with misc data in the stack * it will be safe with zero-initialized stack. * The opposite is not true */ if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) continue; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != cur->stack[spi].slot_type[i % BPF_REG_SIZE]) /* Ex: old explored (safe) state has STACK_SPILL in * this stack slot, but current has STACK_MISC -> * this verifier states are not equivalent, * return false to continue verification of this path */ return false; if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) continue; if (!is_spilled_reg(&old->stack[spi])) continue; if (!regsafe(env, &old->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap)) /* when explored and current stack slot are both storing * spilled registers, check that stored pointers types * are the same as well. * Ex: explored safe path could have stored * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} * but current path has stored: * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} * such verifier states are not equivalent. * return false to continue verification of this path */ return false; } return true; }
0
387,590
int snd_ctl_get_preferred_subdevice(struct snd_card *card, int type) { struct snd_ctl_file *kctl; int subdevice = -1; unsigned long flags; read_lock_irqsave(&card->ctl_files_rwlock, flags); list_for_each_entry(kctl, &card->ctl_files, list) { if (kctl->pid == task_pid(current)) { subdevice = kctl->preferred_subdevice[type]; if (subdevice != -1) break; } } read_unlock_irqrestore(&card->ctl_files_rwlock, flags); return subdevice; }
0
313,137
testCleanupImages(void) { VIR_FREE(qemuimg); VIR_FREE(absraw); VIR_FREE(absqcow2); VIR_FREE(abswrap); VIR_FREE(absqed); VIR_FREE(absdir); VIR_FREE(abslink2); if (chdir(abs_builddir) < 0) { fprintf(stderr, "unable to return to correct directory, refusing to " "clean up %s\n", datadir); return; } virFileDeleteTree(datadir); }
0
401,535
void add_timer(struct timer_list *timer) { BUG_ON(timer_pending(timer)); __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); }
0
401,552
static void do_numa_crng_init(struct work_struct *work) { int i; struct crng_state *crng; struct crng_state **pool; pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); for_each_online_node(i) { crng = kmalloc_node(sizeof(struct crng_state), GFP_KERNEL | __GFP_NOFAIL, i); spin_lock_init(&crng->lock); crng_initialize_secondary(crng); pool[i] = crng; } mb(); if (cmpxchg(&crng_node_pool, NULL, pool)) { for_each_node(i) kfree(pool[i]); kfree(pool); } }
0
343,217
static void updatepidfile(void) { int fd; char buf[42]; size_t buf_len; if (SNCHECK(snprintf(buf, sizeof buf, "%lu\n", (unsigned long) getpid()), sizeof buf)) { return; } if (unlink(pid_file) != 0 && errno != ENOENT) { return; } if ((fd = open(pid_file, O_CREAT | O_WRONLY | O_TRUNC | O_NOFOLLOW, (mode_t) 0644)) == -1) { return; } buf_len = strlen(buf); if (safe_write(fd, buf, buf_len, -1) != (ssize_t) buf_len) { (void) ftruncate(fd, (off_t) 0); } (void) close(fd); }
0
226,029
void gitn_box_del(GF_Box *s) { u32 i; GroupIdToNameBox *ptr = (GroupIdToNameBox *)s; if (ptr == NULL) return; if (ptr->entries) { for (i=0; i<ptr->nb_entries; i++) { if (ptr->entries[i].name) gf_free(ptr->entries[i].name); } gf_free(ptr->entries); } gf_free(ptr);
0
294,588
sec_to_ns(VALUE s) { if (safe_mul_p(s, SECOND_IN_NANOSECONDS)) return LONG2FIX(FIX2LONG(s) * SECOND_IN_NANOSECONDS); return f_mul(s, INT2FIX(SECOND_IN_NANOSECONDS)); }
0
226,081
void edts_box_del(GF_Box *s) { gf_free(s); }
0
308,163
static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment) { struct fastrpc_dma_buf_attachment *a; struct fastrpc_buf *buffer = dmabuf->priv; int ret; a = kzalloc(sizeof(*a), GFP_KERNEL); if (!a) return -ENOMEM; ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt, FASTRPC_PHYS(buffer->phys), buffer->size); if (ret < 0) { dev_err(buffer->dev, "failed to get scatterlist from DMA API\n"); kfree(a); return -EINVAL; } a->dev = attachment->dev; INIT_LIST_HEAD(&a->node); attachment->priv = a; mutex_lock(&buffer->lock); list_add(&a->node, &buffer->attachments); mutex_unlock(&buffer->lock); return 0; }
0
344,766
sock_set_v6only(int s) { #if defined(IPV6_V6ONLY) && !defined(__OpenBSD__) int on = 1; debug3("%s: set socket %d IPV6_V6ONLY", __func__, s); if (setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on)) == -1) error("setsockopt IPV6_V6ONLY: %s", strerror(errno)); #endif }
0
417,131
bool PlayerGeneric::getPowerOfTwoCompensationFlag() const { return compensateBufferFlag; }
0
369,274
static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd, int start, int end) { struct file *file; int offset; for (offset = start; offset < end; offset++) { offset = array_index_nospec(offset, IO_RINGFD_REG_MAX); if (tctx->registered_rings[offset]) continue; file = fget(fd); if (!file) { return -EBADF; } else if (file->f_op != &io_uring_fops) { fput(file); return -EOPNOTSUPP; } tctx->registered_rings[offset] = file; return offset; } return -EBUSY;
0
445,997
_remove_files_begin (SaveData *save_data, gpointer user_data) { LoadData *load_data = LOAD_DATA (save_data); RemoveData *remove_data = user_data; fr_archive_progress_set_total_files (load_data->archive, remove_data->n_files_to_remove); fr_archive_progress_set_total_bytes (load_data->archive, FR_ARCHIVE_LIBARCHIVE (load_data->archive)->priv->uncompressed_size); }
0
234,745
void btrfs_free_device(struct btrfs_device *device) { WARN_ON(!list_empty(&device->post_commit_list)); rcu_string_free(device->name); extent_io_tree_release(&device->alloc_state); bio_put(device->flush_bio); btrfs_destroy_dev_zone_info(device); kfree(device); }
0
244,088
GF_Box *chnl_box_new() { ISOM_DECL_BOX_ALLOC(GF_ChannelLayoutBox, GF_ISOM_BOX_TYPE_CHNL); return (GF_Box *)tmp; }
0
301,345
static ssize_t vfswrap_flistxattr(struct vfs_handle_struct *handle, struct files_struct *fsp, char *list, size_t size) { return flistxattr(fsp->fh->fd, list, size); }
0
459,192
int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, struct tcf_block_ext_info *ei, struct netlink_ext_ack *extack) { struct net *net = qdisc_net(q); struct tcf_block *block = NULL; int err; if (ei->block_index) /* block_index not 0 means the shared block is requested */ block = tcf_block_refcnt_get(net, ei->block_index); if (!block) { block = tcf_block_create(net, q, ei->block_index, extack); if (IS_ERR(block)) return PTR_ERR(block); if (tcf_block_shared(block)) { err = tcf_block_insert(block, net, extack); if (err) goto err_block_insert; } } err = tcf_block_owner_add(block, q, ei->binder_type); if (err) goto err_block_owner_add; tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); err = tcf_chain0_head_change_cb_add(block, ei, extack); if (err) goto err_chain0_head_change_cb_add; err = tcf_block_offload_bind(block, q, ei, extack); if (err) goto err_block_offload_bind; *p_block = block; return 0; err_block_offload_bind: tcf_chain0_head_change_cb_del(block, ei); err_chain0_head_change_cb_add: tcf_block_owner_del(block, q, ei->binder_type); err_block_owner_add: err_block_insert: tcf_block_refcnt_put(block, true); return err; }
0
264,684
lexer_compare_identifier_to_chars (const uint8_t *left_p, /**< left identifier */ const uint8_t *right_p, /**< right identifier string */ size_t size) /**< byte size of the two identifiers */ { uint8_t utf8_buf[6]; do { if (*left_p == *right_p) { left_p++; right_p++; size--; continue; } size_t escape_size; if (*left_p == LIT_CHAR_BACKSLASH) { left_p += 2; lit_code_point_t code_point = lexer_unchecked_hex_to_character (&left_p); escape_size = lit_code_point_to_cesu8_bytes (utf8_buf, code_point); } else if (*left_p >= LIT_UTF8_4_BYTE_MARKER) { lit_four_byte_utf8_char_to_cesu8 (utf8_buf, left_p); escape_size = 3 * 2; left_p += 4; } else { return false; } size -= escape_size; uint8_t *utf8_p = utf8_buf; do { if (*right_p++ != *utf8_p++) { return false; } } while (--escape_size > 0); } while (size > 0); return true; } /* lexer_compare_identifier_to_chars */
0
343,281
static int fortune(void) { int fd; char *buf; char *bufpnt; char *bufend; struct stat st; off_t gl; char *fortunepnt; char fortune[2048]; if (fortunes_file == NULL || *fortunes_file == 0) { return 0; } if ((fd = open(fortunes_file, O_RDONLY)) == -1) { logfile(LOG_ERR, MSG_OPEN_FAILURE, fortunes_file); return -1; } if (fstat(fd, &st) < 0 || (((S_IRUSR | S_IRGRP | S_IROTH) & st.st_mode) != (S_IRUSR | S_IRGRP | S_IROTH)) || !(S_ISREG(st.st_mode) || S_ISLNK(st.st_mode)) || st.st_size < 2 || (buf = mmap(NULL, (size_t) st.st_size, PROT_READ, MAP_FILE | MAP_SHARED, fd, (off_t) 0)) == (void *) MAP_FAILED) { (void) close(fd); logfile(LOG_ERR, MSG_OPEN_FAILURE, fortunes_file); return -1; } # ifdef HAVE_RANDOM gl = (off_t) (random() % (st.st_size - 1U)); # else gl = (off_t) (rand() % (st.st_size - 1U)); # endif bufpnt = buf + gl; bufend = buf + st.st_size; while (bufpnt != buf) { if (bufpnt[0] == '\n') { if (&bufpnt[-1] != buf && bufpnt[-1] == '%') { if (&bufpnt[-2] != buf && bufpnt[-2] == '\n') { break; } } } bufpnt--; } if (bufpnt != buf) { while (bufpnt != bufend && *bufpnt == '\n') { bufpnt++; } } fortunepnt = fortune; while (*bufpnt != 0 && bufpnt != bufend && fortunepnt != &fortune[sizeof fortune - 1U]) { if (bufpnt[0] == '\n') { if (&bufpnt[1] != bufend && bufpnt[1] == '%') { if (&bufpnt[2] != bufend && bufpnt[2] == '\n') { break; } } } *fortunepnt++ = *bufpnt++; } if (fortunepnt == fortune) { goto bye; } do { fortunepnt--; } while (fortunepnt != fortune && (*fortunepnt == '\n' || isspace((unsigned char) *fortunepnt))); fortunepnt[1] = 0; fortunepnt = fortune; while (*fortunepnt == '\n') { fortunepnt++; } if (*fortunepnt == 0) { goto bye; } addreply(220, "%s", fortunepnt); bye: (void) munmap(buf, st.st_size); (void) close(fd); return 1; }
0
462,232
PJ_DEF(pj_status_t) pj_stun_sockaddr_attr_create(pj_pool_t *pool, int attr_type, pj_bool_t xor_ed, const pj_sockaddr_t *addr, unsigned addr_len, pj_stun_sockaddr_attr **p_attr) { pj_stun_sockaddr_attr *attr; PJ_ASSERT_RETURN(pool && p_attr, PJ_EINVAL); attr = PJ_POOL_ZALLOC_T(pool, pj_stun_sockaddr_attr); *p_attr = attr; return pj_stun_sockaddr_attr_init(attr, attr_type, xor_ed, addr, addr_len); }
0
244,100
GF_Err clap_box_size(GF_Box *s) { s->size += 32; return GF_OK; }
0
211,110
LZWDecodeCompat(TIFF* tif, uint8* op0, tmsize_t occ0, uint16 s) { static const char module[] = "LZWDecodeCompat"; LZWCodecState *sp = DecoderState(tif); char *op = (char*) op0; long occ = (long) occ0; char *tp; unsigned char *bp; int code, nbits; long nextbits, nextdata, nbitsmask; code_t *codep, *free_entp, *maxcodep, *oldcodep; (void) s; assert(sp != NULL); /* Fail if value does not fit in long. */ if ((tmsize_t) occ != occ0) return (0); /* * Restart interrupted output operation. */ if (sp->dec_restart) { long residue; codep = sp->dec_codep; residue = codep->length - sp->dec_restart; if (residue > occ) { /* * Residue from previous decode is sufficient * to satisfy decode request. Skip to the * start of the decoded string, place decoded * values in the output buffer, and return. */ sp->dec_restart += occ; do { codep = codep->next; } while (--residue > occ); tp = op + occ; do { *--tp = codep->value; codep = codep->next; } while (--occ); return (1); } /* * Residue satisfies only part of the decode request. */ op += residue; occ -= residue; tp = op; do { *--tp = codep->value; codep = codep->next; } while (--residue); sp->dec_restart = 0; } bp = (unsigned char *)tif->tif_rawcp; #ifdef LZW_CHECKEOS sp->dec_bitsleft = (((uint64)tif->tif_rawcc) << 3); #endif nbits = sp->lzw_nbits; nextdata = sp->lzw_nextdata; nextbits = sp->lzw_nextbits; nbitsmask = sp->dec_nbitsmask; oldcodep = sp->dec_oldcodep; free_entp = sp->dec_free_entp; maxcodep = sp->dec_maxcodep; while (occ > 0) { NextCode(tif, sp, bp, code, GetNextCodeCompat); if (code == CODE_EOI) break; if (code == CODE_CLEAR) { do { free_entp = sp->dec_codetab + CODE_FIRST; _TIFFmemset(free_entp, 0, (CSIZE - CODE_FIRST) * sizeof (code_t)); nbits = BITS_MIN; nbitsmask = MAXCODE(BITS_MIN); maxcodep = sp->dec_codetab + nbitsmask; NextCode(tif, sp, bp, code, GetNextCodeCompat); } while (code == CODE_CLEAR); /* consecutive CODE_CLEAR codes */ if (code == CODE_EOI) break; if (code > CODE_CLEAR) { TIFFErrorExt(tif->tif_clientdata, tif->tif_name, "LZWDecode: Corrupted LZW table at scanline %d", tif->tif_row); return (0); } *op++ = (char)code; occ--; oldcodep = sp->dec_codetab + code; continue; } codep = sp->dec_codetab + code; /* * Add the new entry to the code table. */ if (free_entp < &sp->dec_codetab[0] || free_entp >= &sp->dec_codetab[CSIZE]) { TIFFErrorExt(tif->tif_clientdata, module, "Corrupted LZW table at scanline %d", tif->tif_row); return (0); } free_entp->next = oldcodep; if (free_entp->next < &sp->dec_codetab[0] || free_entp->next >= &sp->dec_codetab[CSIZE]) { TIFFErrorExt(tif->tif_clientdata, module, "Corrupted LZW table at scanline %d", tif->tif_row); return (0); } free_entp->firstchar = free_entp->next->firstchar; free_entp->length = free_entp->next->length+1; free_entp->value = (codep < free_entp) ? codep->firstchar : free_entp->firstchar; if (++free_entp > maxcodep) { if (++nbits > BITS_MAX) /* should not happen */ nbits = BITS_MAX; nbitsmask = MAXCODE(nbits); maxcodep = sp->dec_codetab + nbitsmask; } oldcodep = codep; if (code >= 256) { /* * Code maps to a string, copy string * value to output (written in reverse). */ if(codep->length == 0) { TIFFErrorExt(tif->tif_clientdata, module, "Wrong length of decoded " "string: data probably corrupted at scanline %d", tif->tif_row); return (0); } if (codep->length > occ) { /* * String is too long for decode buffer, * locate portion that will fit, copy to * the decode buffer, and setup restart * logic for the next decoding call. */ sp->dec_codep = codep; do { codep = codep->next; } while (codep->length > occ); sp->dec_restart = occ; tp = op + occ; do { *--tp = codep->value; codep = codep->next; } while (--occ); break; } assert(occ >= codep->length); op += codep->length; occ -= codep->length; tp = op; do { *--tp = codep->value; } while( (codep = codep->next) != NULL ); } else { *op++ = (char)code; occ--; } } tif->tif_rawcc -= (tmsize_t)( (uint8*) bp - tif->tif_rawcp ); tif->tif_rawcp = (uint8*) bp; sp->lzw_nbits = (unsigned short)nbits; sp->lzw_nextdata = nextdata; sp->lzw_nextbits = nextbits; sp->dec_nbitsmask = nbitsmask; sp->dec_oldcodep = oldcodep; sp->dec_free_entp = free_entp; sp->dec_maxcodep = maxcodep; if (occ > 0) { #if defined(__WIN32__) && (defined(_MSC_VER) || defined(__MINGW32__)) TIFFErrorExt(tif->tif_clientdata, module, "Not enough data at scanline %d (short %I64d bytes)", tif->tif_row, (unsigned __int64) occ); #else TIFFErrorExt(tif->tif_clientdata, module, "Not enough data at scanline %d (short %llu bytes)", tif->tif_row, (unsigned long long) occ); #endif return (0); } return (1); }
1
139,241
void OverlayWindowViews::SetAlwaysHidePlayPauseButton(bool is_visible) { always_hide_play_pause_button_ = !is_visible; }
0
398,517
static size_t parse_opcodes(const ut8 *obuf, size_t len, const RzBinDwarfLineHeader *hdr, RzVector *ops_out, RzBinDwarfSMRegisters *regs, RZ_NULLABLE RzBinSourceLineInfoBuilder *bob, RZ_NULLABLE RzBinDwarfDebugInfo *info, RZ_NULLABLE RzBinDwarfLineFileCache fnc, bool big_endian, ut8 target_addr_size) { const ut8 *buf, *buf_end; ut8 opcode; if (!obuf || !len) { return 0; } buf = obuf; buf_end = obuf + len; while (buf < buf_end) { opcode = *buf++; RzBinDwarfLineOp op = { 0 }; if (!opcode) { buf = parse_ext_opcode(&op, hdr, buf, (buf_end - buf), big_endian, target_addr_size); } else if (opcode >= hdr->opcode_base) { // special opcode without args, no further parsing needed op.type = RZ_BIN_DWARF_LINE_OP_TYPE_SPEC; op.opcode = opcode; } else { buf = parse_std_opcode(&op, hdr, buf, (buf_end - buf), opcode, big_endian); } if (!buf) { break; } if (bob) { rz_bin_dwarf_line_op_run(hdr, regs, &op, bob, info, fnc); } if (ops_out) { rz_vector_push(ops_out, &op); } else { rz_bin_dwarf_line_op_fini(&op); } } if (!buf) { return 0; } return (size_t)(buf - obuf); // number of bytes we've moved by }
0
462,308
status_fonts_extended(stream * s, pcl_state_t * pcs, pcl_data_storage_t storage) { return status_do_fonts(s, pcs, storage, true); }
0
229,287
void cql_server::response::write_value(std::optional<query::result_bytes_view> value) { if (!value) { write_int(-1); return; } write_int(value->size_bytes()); using boost::range::for_each; for_each(*value, [&] (bytes_view fragment) { _body.write(fragment); }); }
0
317,216
static int selinux_socket_create(int family, int type, int protocol, int kern) { const struct task_security_struct *tsec = selinux_cred(current_cred()); u32 newsid; u16 secclass; int rc; if (kern) return 0; secclass = socket_type_to_security_class(family, type, protocol); rc = socket_sockcreate_sid(tsec, secclass, &newsid); if (rc) return rc; return avc_has_perm(&selinux_state, tsec->sid, newsid, secclass, SOCKET__CREATE, NULL); }
0
455,425
xfs_inode_alloc( struct xfs_mount *mp, xfs_ino_t ino) { struct xfs_inode *ip; /* * if this didn't occur in transactions, we could use * KM_MAYFAIL and return NULL here on ENOMEM. Set the * code up to do this anyway. */ ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); if (!ip) return NULL; if (inode_init_always(mp->m_super, VFS_I(ip))) { kmem_zone_free(xfs_inode_zone, ip); return NULL; } /* VFS doesn't initialise i_mode! */ VFS_I(ip)->i_mode = 0; XFS_STATS_INC(mp, vn_active); ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(!xfs_isiflocked(ip)); ASSERT(ip->i_ino == 0); /* initialise the xfs inode */ ip->i_ino = ino; ip->i_mount = mp; memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); ip->i_afp = NULL; ip->i_cowfp = NULL; ip->i_cnextents = 0; ip->i_cformat = XFS_DINODE_FMT_EXTENTS; memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); ip->i_flags = 0; ip->i_delayed_blks = 0; memset(&ip->i_d, 0, sizeof(ip->i_d)); return ip; }
0
90,217
virtual EthernetNetwork* ethernet_network() { return ethernet_; }
0
521,464
ZipFile::Builder::Builder() {}
0
473,870
utf16be_mbc_case_fold(OnigCaseFoldType flag, const UChar** pp, const UChar* end, UChar* fold, OnigEncoding enc) { const UChar* p = *pp; if (ONIGENC_IS_ASCII_CODE(*(p+1)) && *p == 0) { p++; #ifdef USE_UNICODE_CASE_FOLD_TURKISH_AZERI if ((flag & ONIGENC_CASE_FOLD_TURKISH_AZERI) != 0) { if (*p == 0x49) { *fold++ = 0x01; *fold = 0x31; (*pp) += 2; return 2; } } #endif *fold++ = 0; *fold = ONIGENC_ASCII_CODE_TO_LOWER_CASE(*p); *pp += 2; return 2; } else return onigenc_unicode_mbc_case_fold(enc, flag, pp, end, fold); }
0
318,106
static int rsi_usb_host_intf_write_pkt(struct rsi_hw *adapter, u8 *pkt, u32 len) { u32 queueno = ((pkt[1] >> 4) & 0x7); u8 endpoint; endpoint = ((queueno == RSI_WIFI_MGMT_Q || queueno == RSI_WIFI_DATA_Q || queueno == RSI_COEX_Q) ? WLAN_EP : BT_EP); return rsi_write_multiple(adapter, endpoint, (u8 *)pkt, len); }
0
386,525
void DL_Dxf::writeHatchEdge(DL_WriterA& dw, const DL_HatchEdgeData& data) { if (data.type<1 || data.type>4) { printf("WARNING: unsupported hatch edge type: %d", data.type); } dw.dxfInt(72, data.type); switch (data.type) { // line: case 1: dw.dxfReal(10, data.x1); dw.dxfReal(20, data.y1); dw.dxfReal(11, data.x2); dw.dxfReal(21, data.y2); break; // arc: case 2: dw.dxfReal(10, data.cx); dw.dxfReal(20, data.cy); dw.dxfReal(40, data.radius); dw.dxfReal(50, data.angle1/(2*M_PI)*360.0); dw.dxfReal(51, data.angle2/(2*M_PI)*360.0); dw.dxfInt(73, (int)(data.ccw)); break; // ellipse arc: case 3: dw.dxfReal(10, data.cx); dw.dxfReal(20, data.cy); dw.dxfReal(11, data.mx); dw.dxfReal(21, data.my); dw.dxfReal(40, data.ratio); dw.dxfReal(50, data.angle1/(2*M_PI)*360.0); dw.dxfReal(51, data.angle2/(2*M_PI)*360.0); dw.dxfInt(73, (int)(data.ccw)); break; // spline: case 4: dw.dxfInt(94, data.degree); dw.dxfBool(73, data.rational); dw.dxfBool(74, data.periodic); dw.dxfInt(95, data.nKnots); dw.dxfInt(96, data.nControl); for (unsigned int i=0; i<data.knots.size(); i++) { dw.dxfReal(40, data.knots[i]); } for (unsigned int i=0; i<data.controlPoints.size(); i++) { dw.dxfReal(10, data.controlPoints[i][0]); dw.dxfReal(20, data.controlPoints[i][1]); } for (unsigned int i=0; i<data.weights.size(); i++) { dw.dxfReal(42, data.weights[i]); } if (data.nFit>0) { dw.dxfInt(97, data.nFit); for (unsigned int i=0; i<data.fitPoints.size(); i++) { dw.dxfReal(11, data.fitPoints[i][0]); dw.dxfReal(21, data.fitPoints[i][1]); } } if (fabs(data.startTangentX)>1.0e-4 || fabs(data.startTangentY)>1.0e-4) { dw.dxfReal(12, data.startTangentX); dw.dxfReal(22, data.startTangentY); } if (fabs(data.endTangentX)>1.0e-4 || fabs(data.endTangentY)>1.0e-4) { dw.dxfReal(13, data.endTangentX); dw.dxfReal(23, data.endTangentY); } break; default: break; } }
0
244,165
GF_Err fdsa_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_HintSample *ptr = (GF_HintSample *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; e = gf_isom_box_array_write(s, ptr->packetTable, bs); if (e) return e; if (ptr->extra_data) { e = gf_isom_box_write((GF_Box *)ptr->extra_data, bs); if (e) return e; } return GF_OK; }
0
381,877
int8_t udf_next_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t *elen, int inc) { int8_t etype; unsigned int indirections = 0; while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) == (EXT_NEXT_EXTENT_ALLOCDESCS >> 30)) { udf_pblk_t block; if (++indirections > UDF_MAX_INDIR_EXTS) { udf_err(inode->i_sb, "too many indirect extents in inode %lu\n", inode->i_ino); return -1; } epos->block = *eloc; epos->offset = sizeof(struct allocExtDesc); brelse(epos->bh); block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0); epos->bh = udf_tread(inode->i_sb, block); if (!epos->bh) { udf_debug("reading block %u failed!\n", block); return -1; } } return etype; }
0
376,678
load_image (const gchar *filename, GError **error) { gchar *name; gint fd; BrushHeader bh; guchar *brush_buf = NULL; gint32 image_ID; gint32 layer_ID; GimpParasite *parasite; GimpDrawable *drawable; GimpPixelRgn pixel_rgn; gint bn_size; GimpImageBaseType base_type; GimpImageType image_type; gsize size; fd = g_open (filename, O_RDONLY | _O_BINARY, 0); if (fd == -1) { g_set_error (error, G_FILE_ERROR, g_file_error_from_errno (errno), _("Could not open '%s' for reading: %s"), gimp_filename_to_utf8 (filename), g_strerror (errno)); return -1; } gimp_progress_init_printf (_("Opening '%s'"), gimp_filename_to_utf8 (filename)); if (read (fd, &bh, sizeof (BrushHeader)) != sizeof (BrushHeader)) { close (fd); return -1; } /* rearrange the bytes in each unsigned int */ bh.header_size = g_ntohl (bh.header_size); bh.version = g_ntohl (bh.version); bh.width = g_ntohl (bh.width); bh.height = g_ntohl (bh.height); bh.bytes = g_ntohl (bh.bytes); bh.magic_number = g_ntohl (bh.magic_number); bh.spacing = g_ntohl (bh.spacing); /* Sanitize values */ if ((bh.width == 0) || (bh.width > GIMP_MAX_IMAGE_SIZE) || (bh.height == 0) || (bh.height > GIMP_MAX_IMAGE_SIZE) || ((bh.bytes != 1) && (bh.bytes != 2) && (bh.bytes != 4) && (bh.bytes != 18)) || (G_MAXSIZE / bh.width / bh.height / bh.bytes < 1)) { g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED, _("Invalid header data in '%s': width=%lu, height=%lu, " "bytes=%lu"), gimp_filename_to_utf8 (filename), (unsigned long int)bh.width, (unsigned long int)bh.height, (unsigned long int)bh.bytes); return -1; } switch (bh.version) { case 1: /* Version 1 didn't have a magic number and had no spacing */ bh.spacing = 25; /* And we need to rewind the handle, 4 due spacing and 4 due magic */ lseek (fd, -8, SEEK_CUR); bh.header_size += 8; break; case 3: /* cinepaint brush */ if (bh.bytes == 18 /* FLOAT16_GRAY_GIMAGE */) { bh.bytes = 2; } else { g_message (_("Unsupported brush format")); close (fd); return -1; } /* fallthrough */ case 2: if (bh.magic_number == GBRUSH_MAGIC && bh.header_size > sizeof (BrushHeader)) break; default: g_message (_("Unsupported brush format")); close (fd); return -1; } if ((bn_size = (bh.header_size - sizeof (BrushHeader))) > 0) { gchar *temp = g_new (gchar, bn_size); if ((read (fd, temp, bn_size)) < bn_size || temp[bn_size - 1] != '\0') { g_set_error (error, G_FILE_ERROR, G_FILE_ERROR_FAILED, _("Error in GIMP brush file '%s'"), gimp_filename_to_utf8 (filename)); close (fd); g_free (temp); return -1; } name = gimp_any_to_utf8 (temp, -1, _("Invalid UTF-8 string in brush file '%s'."), gimp_filename_to_utf8 (filename)); g_free (temp); } else { name = g_strdup (_("Unnamed")); } /* Now there's just raw data left. */ size = bh.width * bh.height * bh.bytes; brush_buf = g_malloc (size); if (read (fd, brush_buf, size) != size) { close (fd); g_free (brush_buf); g_free (name); return -1; } switch (bh.bytes) { case 1: { PatternHeader ph; /* For backwards-compatibility, check if a pattern follows. The obsolete .gpb format did it this way. */ if (read (fd, &ph, sizeof (PatternHeader)) == sizeof(PatternHeader)) { /* rearrange the bytes in each unsigned int */ ph.header_size = g_ntohl (ph.header_size); ph.version = g_ntohl (ph.version); ph.width = g_ntohl (ph.width); ph.height = g_ntohl (ph.height); ph.bytes = g_ntohl (ph.bytes); ph.magic_number = g_ntohl (ph.magic_number); if (ph.magic_number == GPATTERN_MAGIC && ph.version == 1 && ph.header_size > sizeof (PatternHeader) && ph.bytes == 3 && ph.width == bh.width && ph.height == bh.height && lseek (fd, ph.header_size - sizeof (PatternHeader), SEEK_CUR) > 0) { guchar *plain_brush = brush_buf; gint i; bh.bytes = 4; brush_buf = g_malloc (4 * bh.width * bh.height); for (i = 0; i < ph.width * ph.height; i++) { if (read (fd, brush_buf + i * 4, 3) != 3) { close (fd); g_free (name); g_free (plain_brush); g_free (brush_buf); return -1; } brush_buf[i * 4 + 3] = plain_brush[i]; } g_free (plain_brush); } } } break; case 2: { guint16 *buf = (guint16 *) brush_buf; gint i; for (i = 0; i < bh.width * bh.height; i++, buf++) { union { guint16 u[2]; gfloat f; } short_float; #if G_BYTE_ORDER == G_LITTLE_ENDIAN short_float.u[0] = 0; short_float.u[1] = GUINT16_FROM_BE (*buf); #else short_float.u[0] = GUINT16_FROM_BE (*buf); short_float.u[1] = 0; #endif brush_buf[i] = (guchar) (short_float.f * 255.0 + 0.5); } bh.bytes = 1; } break; default: break; } /* * Create a new image of the proper size and * associate the filename with it. */ switch (bh.bytes) { case 1: base_type = GIMP_GRAY; image_type = GIMP_GRAY_IMAGE; break; case 4: base_type = GIMP_RGB; image_type = GIMP_RGBA_IMAGE; break; default: g_message ("Unsupported brush depth: %d\n" "GIMP Brushes must be GRAY or RGBA\n", bh.bytes); g_free (name); return -1; } image_ID = gimp_image_new (bh.width, bh.height, base_type); gimp_image_set_filename (image_ID, filename); parasite = gimp_parasite_new ("gimp-brush-name", GIMP_PARASITE_PERSISTENT, strlen (name) + 1, name); gimp_image_attach_parasite (image_ID, parasite); gimp_parasite_free (parasite); layer_ID = gimp_layer_new (image_ID, name, bh.width, bh.height, image_type, 100, GIMP_NORMAL_MODE); gimp_image_insert_layer (image_ID, layer_ID, -1, 0); g_free (name); drawable = gimp_drawable_get (layer_ID); gimp_pixel_rgn_init (&pixel_rgn, drawable, 0, 0, drawable->width, drawable->height, TRUE, FALSE); gimp_pixel_rgn_set_rect (&pixel_rgn, brush_buf, 0, 0, bh.width, bh.height); g_free (brush_buf); if (image_type == GIMP_GRAY_IMAGE) gimp_invert (layer_ID); close (fd); gimp_drawable_flush (drawable); gimp_progress_update (1.0); return image_ID; }
0
261,397
void read_coding_quadtree(thread_context* tctx, int x0, int y0, int log2CbSize, int ctDepth) { logtrace(LogSlice,"- read_coding_quadtree %d;%d cbsize:%d depth:%d POC:%d\n",x0,y0,1<<log2CbSize,ctDepth,tctx->img->PicOrderCntVal); de265_image* img = tctx->img; const seq_parameter_set& sps = img->get_sps(); const pic_parameter_set& pps = img->get_pps(); int split_flag; // We only send a split flag if CU is larger than minimum size and // completely contained within the image area. // If it is partly outside the image area and not at minimum size, // it is split. If already at minimum size, it is not split further. if (x0+(1<<log2CbSize) <= sps.pic_width_in_luma_samples && y0+(1<<log2CbSize) <= sps.pic_height_in_luma_samples && log2CbSize > sps.Log2MinCbSizeY) { split_flag = decode_split_cu_flag(tctx, x0,y0, ctDepth); } else { if (log2CbSize > sps.Log2MinCbSizeY) { split_flag=1; } else { split_flag=0; } } if (pps.cu_qp_delta_enabled_flag && log2CbSize >= pps.Log2MinCuQpDeltaSize) { tctx->IsCuQpDeltaCoded = 0; tctx->CuQpDelta = 0; } else { // shdr->CuQpDelta = 0; // TODO check: is this the right place to set to default value ? } if (tctx->shdr->cu_chroma_qp_offset_enabled_flag && log2CbSize >= pps.Log2MinCuChromaQpOffsetSize) { tctx->IsCuChromaQpOffsetCoded = 0; } if (split_flag) { int x1 = x0 + (1<<(log2CbSize-1)); int y1 = y0 + (1<<(log2CbSize-1)); read_coding_quadtree(tctx,x0,y0, log2CbSize-1, ctDepth+1); if (x1<sps.pic_width_in_luma_samples) read_coding_quadtree(tctx,x1,y0, log2CbSize-1, ctDepth+1); if (y1<sps.pic_height_in_luma_samples) read_coding_quadtree(tctx,x0,y1, log2CbSize-1, ctDepth+1); if (x1<sps.pic_width_in_luma_samples && y1<sps.pic_height_in_luma_samples) read_coding_quadtree(tctx,x1,y1, log2CbSize-1, ctDepth+1); } else { // set ctDepth of this CU img->set_ctDepth(x0,y0, log2CbSize, ctDepth); read_coding_unit(tctx, x0,y0, log2CbSize, ctDepth); } logtrace(LogSlice,"-\n"); }
0
405,378
xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin, struct xfrm_policy *policy, u8 dir) { struct xfrm_pol_inexact_node *n; struct net *net; net = xp_net(policy); lockdep_assert_held(&net->xfrm.xfrm_policy_lock); if (xfrm_policy_inexact_insert_use_any_list(policy)) return &bin->hhead; if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr, policy->family, policy->selector.prefixlen_d)) { write_seqcount_begin(&bin->count); n = xfrm_policy_inexact_insert_node(net, &bin->root_s, &policy->selector.saddr, policy->family, policy->selector.prefixlen_s, dir); write_seqcount_end(&bin->count); if (!n) return NULL; return &n->hhead; } /* daddr is fixed */ write_seqcount_begin(&bin->count); n = xfrm_policy_inexact_insert_node(net, &bin->root_d, &policy->selector.daddr, policy->family, policy->selector.prefixlen_d, dir); write_seqcount_end(&bin->count); if (!n) return NULL; /* saddr is wildcard */ if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr, policy->family, policy->selector.prefixlen_s)) return &n->hhead; write_seqcount_begin(&bin->count); n = xfrm_policy_inexact_insert_node(net, &n->root, &policy->selector.saddr, policy->family, policy->selector.prefixlen_s, dir); write_seqcount_end(&bin->count); if (!n) return NULL; return &n->hhead; }
0
387,796
Method* InstanceKlass::find_method(const Symbol* name, const Symbol* signature) const { return find_method_impl(name, signature, find_overpass, find_static, find_private); }
0
273,052
uuid_make(char *str) { uuid_t uu; uuid_generate_random(uu); uuid_unparse_upper(uu, str); }
0
516,254
static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs, int queues, bool enable) { int i; for (i = 0; i < queues; i++) { if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 && enable) { while (--i >= 0) { virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false); } return true; } } return false; }
0
215,312
asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) { struct rlimit new_rlim, *old_rlim; unsigned long it_prof_secs; int retval; if (resource >= RLIM_NLIMITS) return -EINVAL; if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) return -EFAULT; if (new_rlim.rlim_cur > new_rlim.rlim_max) return -EINVAL; old_rlim = current->signal->rlim + resource; if ((new_rlim.rlim_max > old_rlim->rlim_max) && !capable(CAP_SYS_RESOURCE)) return -EPERM; if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN) return -EPERM; retval = security_task_setrlimit(resource, &new_rlim); if (retval) return retval; task_lock(current->group_leader); *old_rlim = new_rlim; task_unlock(current->group_leader); if (resource != RLIMIT_CPU) goto out; /* * RLIMIT_CPU handling. Note that the kernel fails to return an error * code if it rejected the user's attempt to set RLIMIT_CPU. This is a * very long-standing error, and fixing it now risks breakage of * applications, so we live with it */ if (new_rlim.rlim_cur == RLIM_INFINITY) goto out; it_prof_secs = cputime_to_secs(current->signal->it_prof_expires); if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) { unsigned long rlim_cur = new_rlim.rlim_cur; cputime_t cputime; if (rlim_cur == 0) { /* * The caller is asking for an immediate RLIMIT_CPU * expiry. But we use the zero value to mean "it was * never set". So let's cheat and make it one second * instead */ rlim_cur = 1; } cputime = secs_to_cputime(rlim_cur); read_lock(&tasklist_lock); spin_lock_irq(&current->sighand->siglock); set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); spin_unlock_irq(&current->sighand->siglock); read_unlock(&tasklist_lock); } out: return 0; }
1
273,087
safe_snreplace(char *s, size_t sz, const char *pattern, const char *replacement) { char *ptr; char *src; char *dst; size_t num; if (!s) return -1; if (!pattern || !replacement) return 0; size_t p_len = strlen(pattern); size_t r_len = strlen(replacement); size_t s_len = strlen(s) + 1; // Incl terminator ptr = s; while ((ptr = strstr(ptr, pattern))) { // We will move the part of the string after the pattern from src to dst src = ptr + p_len; dst = ptr + r_len; num = s_len - (src - s); // Number of bytes w/terminator we need to move if (dst + num > s + sz) return -1; // Not enough room // Shift everything after the pattern to the right, use memmove since // there might be an overlap memmove(dst, src, num); // Write replacement, no null terminater memcpy(ptr, replacement, r_len); // Advance ptr to avoid infinite looping ptr = dst; } return 0; }
0
522,354
static void ScaWrd(GmfMshSct *msh, void *ptr) { #ifdef WITH_GMF_AIO if(read(msh->FilDes, ptr, WrdSiz) != WrdSiz) #else if(fread(ptr, WrdSiz, 1, msh->hdl) != 1) #endif longjmp(msh->err, -26); if(msh->cod != 1) SwpWrd((char *)ptr, WrdSiz); }
0
502,713
SSL_SESSION *ssl_session_dup(SSL_SESSION *src, int ticket) { SSL_SESSION *dest; dest = OPENSSL_malloc(sizeof(*src)); if (dest == NULL) { goto err; } memcpy(dest, src, sizeof(*dest)); #ifndef OPENSSL_NO_KRB5 dest->krb5_client_princ_len = dest->krb5_client_princ_len; if (src->krb5_client_princ_len > 0) memcpy(dest->krb5_client_princ, src->krb5_client_princ, src->krb5_client_princ_len); #endif #ifndef OPENSSL_NO_PSK if (src->psk_identity_hint) { dest->psk_identity_hint = BUF_strdup(src->psk_identity_hint); if (dest->psk_identity_hint == NULL) { goto err; } } else { dest->psk_identity_hint = NULL; } if (src->psk_identity) { dest->psk_identity = BUF_strdup(src->psk_identity); if (dest->psk_identity == NULL) { goto err; } } else { dest->psk_identity = NULL; } #endif if (src->sess_cert != NULL) CRYPTO_add(&src->sess_cert->references, 1, CRYPTO_LOCK_SSL_SESS_CERT); if (src->peer != NULL) CRYPTO_add(&src->peer->references, 1, CRYPTO_LOCK_X509); dest->references = 1; if(src->ciphers != NULL) { dest->ciphers = sk_SSL_CIPHER_dup(src->ciphers); if (dest->ciphers == NULL) goto err; } else { dest->ciphers = NULL; } if (!CRYPTO_dup_ex_data(CRYPTO_EX_INDEX_SSL_SESSION, &dest->ex_data, &src->ex_data)) { goto err; } /* We deliberately don't copy the prev and next pointers */ dest->prev = NULL; dest->next = NULL; #ifndef OPENSSL_NO_TLSEXT if (src->tlsext_hostname) { dest->tlsext_hostname = BUF_strdup(src->tlsext_hostname); if (dest->tlsext_hostname == NULL) { goto err; } } else { dest->tlsext_hostname = NULL; } # ifndef OPENSSL_NO_EC if (src->tlsext_ecpointformatlist) { dest->tlsext_ecpointformatlist = BUF_memdup(src->tlsext_ecpointformatlist, src->tlsext_ecpointformatlist_length); if (dest->tlsext_ecpointformatlist == NULL) goto err; dest->tlsext_ecpointformatlist_length = src->tlsext_ecpointformatlist_length; } if (src->tlsext_ellipticcurvelist) { dest->tlsext_ellipticcurvelist = BUF_memdup(src->tlsext_ellipticcurvelist, src->tlsext_ellipticcurvelist_length); if (dest->tlsext_ellipticcurvelist == NULL) goto err; dest->tlsext_ellipticcurvelist_length = src->tlsext_ellipticcurvelist_length; } # endif #endif if (ticket != 0) { dest->tlsext_tick_lifetime_hint = src->tlsext_tick_lifetime_hint; dest->tlsext_ticklen = src->tlsext_ticklen; if((dest->tlsext_tick = OPENSSL_malloc(src->tlsext_ticklen)) == NULL) { goto err; } } #ifndef OPENSSL_NO_SRP dest->srp_username = NULL; if (src->srp_username) { dest->srp_username = BUF_strdup(src->srp_username); if (dest->srp_username == NULL) { goto err; } } else { dest->srp_username = NULL; } #endif return dest; err: SSLerr(SSL_F_SSL_SESSION_DUP, ERR_R_MALLOC_FAILURE); SSL_SESSION_free(dest); return NULL; }
0
317,021
static int selinux_inode_permission(struct inode *inode, int mask) { const struct cred *cred = current_cred(); u32 perms; bool from_access; bool no_block = mask & MAY_NOT_BLOCK; struct inode_security_struct *isec; u32 sid; struct av_decision avd; int rc, rc2; u32 audited, denied; from_access = mask & MAY_ACCESS; mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND); /* No permission to check. Existence test. */ if (!mask) return 0; validate_creds(cred); if (unlikely(IS_PRIVATE(inode))) return 0; perms = file_mask_to_av(inode->i_mode, mask); sid = cred_sid(cred); isec = inode_security_rcu(inode, no_block); if (IS_ERR(isec)) return PTR_ERR(isec); rc = avc_has_perm_noaudit(&selinux_state, sid, isec->sid, isec->sclass, perms, 0, &avd); audited = avc_audit_required(perms, &avd, rc, from_access ? FILE__AUDIT_ACCESS : 0, &denied); if (likely(!audited)) return rc; rc2 = audit_inode_permission(inode, perms, audited, denied, rc); if (rc2) return rc2; return rc; }
0
307,848
ciEnv::ciEnv(Arena* arena) : _ciEnv_arena(mtCompiler) { ASSERT_IN_VM; // Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc. CompilerThread* current_thread = CompilerThread::current(); assert(current_thread->env() == NULL, "must be"); current_thread->set_env(this); assert(ciEnv::current() == this, "sanity"); _oop_recorder = NULL; _debug_info = NULL; _dependencies = NULL; _failure_reason = NULL; _compilable = MethodCompilable_never; _break_at_compile = false; _compiler_data = NULL; #ifndef PRODUCT assert(firstEnv, "must be first"); firstEnv = false; #endif /* !PRODUCT */ _system_dictionary_modification_counter = 0; _num_inlined_bytecodes = 0; _task = NULL; _log = NULL; // Temporary buffer for creating symbols and such. _name_buffer = NULL; _name_buffer_len = 0; _arena = arena; _factory = new (_arena) ciObjectFactory(_arena, 128); // Preload commonly referenced system ciObjects. // During VM initialization, these instances have not yet been created. // Assertions ensure that these instances are not accessed before // their initialization. assert(Universe::is_fully_initialized(), "must be"); _NullPointerException_instance = NULL; _ArithmeticException_instance = NULL; _ArrayIndexOutOfBoundsException_instance = NULL; _ArrayStoreException_instance = NULL; _ClassCastException_instance = NULL; _the_null_string = NULL; _the_min_jint_string = NULL; _jvmti_can_hotswap_or_post_breakpoint = false; _jvmti_can_access_local_variables = false; _jvmti_can_post_on_exceptions = false; _jvmti_can_pop_frame = false; }
0
259,171
static int build_open_gop_key_points(AVStream *st) { int k; int sample_id = 0; uint32_t cra_index; MOVStreamContext *sc = st->priv_data; if (st->codecpar->codec_id != AV_CODEC_ID_HEVC || !sc->sync_group_count) return 0; /* Build an unrolled index of the samples */ sc->sample_offsets_count = 0; for (uint32_t i = 0; i < sc->ctts_count; i++) { if (sc->ctts_data[i].count > INT_MAX - sc->sample_offsets_count) return AVERROR(ENOMEM); sc->sample_offsets_count += sc->ctts_data[i].count; } av_freep(&sc->sample_offsets); sc->sample_offsets = av_calloc(sc->sample_offsets_count, sizeof(*sc->sample_offsets)); if (!sc->sample_offsets) return AVERROR(ENOMEM); k = 0; for (uint32_t i = 0; i < sc->ctts_count; i++) for (int j = 0; j < sc->ctts_data[i].count; j++) sc->sample_offsets[k++] = sc->ctts_data[i].duration; /* The following HEVC NAL type reveal the use of open GOP sync points * (TODO: BLA types may also be concerned) */ cra_index = get_sgpd_sync_index(sc, HEVC_NAL_CRA_NUT); /* Clean Random Access */ if (!cra_index) return 0; /* Build a list of open-GOP key samples */ sc->open_key_samples_count = 0; for (uint32_t i = 0; i < sc->sync_group_count; i++) if (sc->sync_group[i].index == cra_index) { if (sc->sync_group[i].count > INT_MAX - sc->open_key_samples_count) return AVERROR(ENOMEM); sc->open_key_samples_count += sc->sync_group[i].count; } av_freep(&sc->open_key_samples); sc->open_key_samples = av_calloc(sc->open_key_samples_count, sizeof(*sc->open_key_samples)); if (!sc->open_key_samples) return AVERROR(ENOMEM); k = 0; for (uint32_t i = 0; i < sc->sync_group_count; i++) { const MOVSbgp *sg = &sc->sync_group[i]; if (sg->index == cra_index) for (uint32_t j = 0; j < sg->count; j++) sc->open_key_samples[k++] = sample_id; if (sg->count > INT_MAX - sample_id) return AVERROR_PATCHWELCOME; sample_id += sg->count; } /* Identify the minimal time step between samples */ sc->min_sample_duration = UINT_MAX; for (uint32_t i = 0; i < sc->stts_count; i++) sc->min_sample_duration = FFMIN(sc->min_sample_duration, sc->stts_data[i].duration); return 0; }
0
390,630
ProcXkbListComponents(ClientPtr client) { DeviceIntPtr dev; xkbListComponentsReply rep; unsigned len; int status; unsigned char * str; XkbSrvListInfoRec list; REQUEST(xkbListComponentsReq); REQUEST_AT_LEAST_SIZE(xkbListComponentsReq); if (!(client->xkbClientFlags&_XkbClientInitialized)) return BadAccess; CHK_KBD_DEVICE(dev, stuff->deviceSpec, client, DixGetAttrAccess); status= Success; str= (unsigned char *)&stuff[1]; bzero(&list,sizeof(XkbSrvListInfoRec)); list.maxRtrn= stuff->maxNames; list.pattern[_XkbListKeymaps]= GetComponentSpec(&str,False,&status); list.pattern[_XkbListKeycodes]= GetComponentSpec(&str,False,&status); list.pattern[_XkbListTypes]= GetComponentSpec(&str,False,&status); list.pattern[_XkbListCompat]= GetComponentSpec(&str,False,&status); list.pattern[_XkbListSymbols]= GetComponentSpec(&str,False,&status); list.pattern[_XkbListGeometry]= GetComponentSpec(&str,False,&status); if (status!=Success) return status; len= str-((unsigned char *)stuff); if ((XkbPaddedSize(len)/4)!=stuff->length) return BadLength; if ((status=XkbDDXList(dev,&list,client))!=Success) { if (list.pool) { _XkbFree(list.pool); list.pool= NULL; } return status; } bzero(&rep,sizeof(xkbListComponentsReply)); rep.type= X_Reply; rep.deviceID = dev->id; rep.sequenceNumber = client->sequence; rep.length = XkbPaddedSize(list.nPool)/4; rep.nKeymaps = list.nFound[_XkbListKeymaps]; rep.nKeycodes = list.nFound[_XkbListKeycodes]; rep.nTypes = list.nFound[_XkbListTypes]; rep.nCompatMaps = list.nFound[_XkbListCompat]; rep.nSymbols = list.nFound[_XkbListSymbols]; rep.nGeometries = list.nFound[_XkbListGeometry]; rep.extra= 0; if (list.nTotal>list.maxRtrn) rep.extra = (list.nTotal-list.maxRtrn); if (client->swapped) { register int n; swaps(&rep.sequenceNumber,n); swapl(&rep.length,n); swaps(&rep.nKeymaps,n); swaps(&rep.nKeycodes,n); swaps(&rep.nTypes,n); swaps(&rep.nCompatMaps,n); swaps(&rep.nSymbols,n); swaps(&rep.nGeometries,n); swaps(&rep.extra,n); } WriteToClient(client,SIZEOF(xkbListComponentsReply),(char *)&rep); if (list.nPool && list.pool) { WriteToClient(client,XkbPaddedSize(list.nPool), (char *)list.pool); _XkbFree(list.pool); list.pool= NULL; } return client->noClientException; }
0