idx
int64
func
string
target
int64
7,862
ast_for_decorator(struct compiling *c, const node *n) { /* decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE */ expr_ty d = NULL; expr_ty name_expr; REQ(n, decorator); REQ(CHILD(n, 0), AT); REQ(RCHILD(n, -1), NEWLINE); name_expr = ast_for_dotted_name(c, CHILD(n, 1)); if (!name_expr) return NULL; if (NCH(n) == 3) { /* No arguments */ d = name_expr; name_expr = NULL; } else if (NCH(n) == 5) { /* Call with no arguments */ d = Call(name_expr, NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); if (!d) return NULL; name_expr = NULL; } else { d = ast_for_call(c, CHILD(n, 3), name_expr); if (!d) return NULL; name_expr = NULL; } return d; }
1
205,443
png_write_tRNS(png_structp png_ptr, png_bytep trans, png_color_16p tran, int num_trans, int color_type) { #ifdef PNG_USE_LOCAL_ARRAYS PNG_tRNS; #endif png_byte buf[6]; png_debug(1, "in png_write_tRNS"); if (color_type == PNG_COLOR_TYPE_PALETTE) { if (num_trans <= 0 || num_trans > (int)png_ptr->num_palette) { png_warning(png_ptr, "Invalid number of transparent colors specified"); return; } /* Write the chunk out as it is */ png_write_chunk(png_ptr, (png_bytep)png_tRNS, trans, (png_size_t)num_trans); } else if (color_type == PNG_COLOR_TYPE_GRAY) { /* One 16 bit value */ if (tran->gray >= (1 << png_ptr->bit_depth)) { png_warning(png_ptr, "Ignoring attempt to write tRNS chunk out-of-range for bit_depth"); return; } png_save_uint_16(buf, tran->gray); png_write_chunk(png_ptr, (png_bytep)png_tRNS, buf, (png_size_t)2); } else if (color_type == PNG_COLOR_TYPE_RGB) { /* Three 16 bit values */ png_save_uint_16(buf, tran->red); png_save_uint_16(buf + 2, tran->green); png_save_uint_16(buf + 4, tran->blue); if (png_ptr->bit_depth == 8 && (buf[0] | buf[2] | buf[4])) { png_warning(png_ptr, "Ignoring attempt to write 16-bit tRNS chunk when bit_depth is 8"); return; } png_write_chunk(png_ptr, (png_bytep)png_tRNS, buf, (png_size_t)6); } else { png_warning(png_ptr, "Can't write tRNS with an alpha channel"); } }
0
287,845
void CrosLibrary::TestApi::SetMountLibrary( MountLibrary* library, bool own) { library_->mount_lib_.SetImpl(library, own); }
1
256,790
static int read_intra_segment_id ( VP9_COMMON * const cm , MACROBLOCKD * const xd , int mi_row , int mi_col , vp9_reader * r ) { struct segmentation * const seg = & cm -> seg ; const BLOCK_SIZE bsize = xd -> mi [ 0 ] . src_mi -> mbmi . sb_type ; int segment_id ; if ( ! seg -> enabled ) return 0 ; if ( ! seg -> update_map ) return 0 ; segment_id = read_segment_id ( r , seg ) ; set_segment_id ( cm , bsize , mi_row , mi_col , segment_id ) ; return segment_id ; }
0
389,333
XMLRPC_VALUE_TYPE get_zval_xmlrpc_type(zval* value, zval* newvalue) /* {{{ */ { XMLRPC_VALUE_TYPE type = xmlrpc_none; if (value) { switch (Z_TYPE_P(value)) { case IS_NULL: type = xmlrpc_base64; break; #ifndef BOOL_AS_LONG /* Right thing to do, but it breaks some legacy code. */ case IS_TRUE: case IS_FALSE: type = xmlrpc_boolean; break; #else case IS_BOOL: #endif case IS_LONG: case IS_RESOURCE: type = xmlrpc_int; break; case IS_DOUBLE: type = xmlrpc_double; break; case IS_CONSTANT: type = xmlrpc_string; break; case IS_STRING: type = xmlrpc_string; break; case IS_ARRAY: type = xmlrpc_vector; break; case IS_OBJECT: { zval* attr; type = xmlrpc_vector; if ((attr = zend_hash_str_find(Z_OBJPROP_P(value), OBJECT_TYPE_ATTR, sizeof(OBJECT_TYPE_ATTR) - 1)) != NULL) { if (Z_TYPE_P(attr) == IS_STRING) { type = xmlrpc_str_as_type(Z_STRVAL_P(attr)); } } break; } } /* if requested, return an unmolested (magic removed) copy of the value */ if (newvalue) { zval* val; if ((type == xmlrpc_base64 && Z_TYPE_P(value) == IS_OBJECT) || type == xmlrpc_datetime) { if ((val = zend_hash_str_find(Z_OBJPROP_P(value), OBJECT_VALUE_ATTR, sizeof(OBJECT_VALUE_ATTR) - 1)) != NULL) { ZVAL_COPY_VALUE(newvalue, val); } } else { ZVAL_COPY_VALUE(newvalue, value); } } } return type; }
0
102,712
sixel_output_t *sixel_output_create(Image *image) { sixel_output_t *output; output = (sixel_output_t *) AcquireQuantumMemory(sizeof(sixel_output_t) + SIXEL_OUTPUT_PACKET_SIZE * 2, 1); output->has_8bit_control = 0; output->save_pixel = 0; output->save_count = 0; output->active_palette = (-1); output->node_top = NULL; output->node_free = NULL; output->image = image; output->pos = 0; return output; }
0
451,198
uint64_t byteSize() const override { return HeaderMapImpl::byteSize(); }
0
41,411
static void vht_build_mcs_mask(u16 vht_mcs_map, u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) { u8 nss; for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { vht_mcs_mask[nss] = vht_mcs_map_to_mcs_mask(vht_mcs_map & 0x03); vht_mcs_map >>= 2; } }
0
392,027
http_DissectResponse(struct worker *w, const struct http_conn *htc, struct http *hp) { int j; uint16_t retval = 0; char *p; CHECK_OBJ_NOTNULL(htc, HTTP_CONN_MAGIC); CHECK_OBJ_NOTNULL(hp, HTTP_MAGIC); hp->logtag = HTTP_Rx; if (http_splitline(w, htc->fd, hp, htc, HTTP_HDR_PROTO, HTTP_HDR_STATUS, HTTP_HDR_RESPONSE)) retval = 503; if (retval == 0 && memcmp(hp->hd[HTTP_HDR_PROTO].b, "HTTP/1.", 7)) retval = 503; if (retval == 0 && Tlen(hp->hd[HTTP_HDR_STATUS]) != 3) retval = 503; if (retval == 0) { hp->status = 0; p = hp->hd[HTTP_HDR_STATUS].b; for (j = 100; j != 0; j /= 10) { if (!vct_isdigit(*p)) { retval = 503; break; } hp->status += (uint16_t)(j * (*p - '0')); p++; } if (*p != '\0') retval = 503; } if (retval != 0) { WSLR(w, SLT_HttpGarbage, htc->fd, htc->rxbuf); assert(retval >= 100 && retval <= 999); hp->status = retval; } else { http_ProtoVer(hp); } if (hp->hd[HTTP_HDR_RESPONSE].b == NULL || !Tlen(hp->hd[HTTP_HDR_RESPONSE])) { /* Backend didn't send a response string, use the standard */ hp->hd[HTTP_HDR_RESPONSE].b = TRUST_ME(http_StatusMessage(hp->status)); hp->hd[HTTP_HDR_RESPONSE].e = strchr(hp->hd[HTTP_HDR_RESPONSE].b, '\0'); } return (retval); }
0
483,048
rrset_equal(struct ub_packed_rrset_key* k1, struct ub_packed_rrset_key* k2) { struct packed_rrset_data* d1 = (struct packed_rrset_data*) k1->entry.data; struct packed_rrset_data* d2 = (struct packed_rrset_data*) k2->entry.data; size_t i, t; if(k1->rk.dname_len != k2->rk.dname_len || k1->rk.flags != k2->rk.flags || k1->rk.type != k2->rk.type || k1->rk.rrset_class != k2->rk.rrset_class || query_dname_compare(k1->rk.dname, k2->rk.dname) != 0) return 0; if( /* do not check ttl: d1->ttl != d2->ttl || */ d1->count != d2->count || d1->rrsig_count != d2->rrsig_count || d1->trust != d2->trust || d1->security != d2->security) return 0; t = d1->count + d1->rrsig_count; for(i=0; i<t; i++) { if(d1->rr_len[i] != d2->rr_len[i] || /* no ttl check: d1->rr_ttl[i] != d2->rr_ttl[i] ||*/ memcmp(d1->rr_data[i], d2->rr_data[i], d1->rr_len[i]) != 0) return 0; } return 1; }
0
416,903
static int rtnetlink_bind(struct net *net, int group) { switch (group) { case RTNLGRP_IPV4_MROUTE_R: case RTNLGRP_IPV6_MROUTE_R: if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; break; } return 0; }
0
512,097
sh_single_quote (string) const char *string; { register int c; char *result, *r; const char *s; result = (char *)xmalloc (3 + (4 * strlen (string))); r = result; *r++ = '\''; for (s = string; s && (c = *s); s++) { *r++ = c; if (c == '\'') { *r++ = '\\'; /* insert escaped single quote */ *r++ = '\''; *r++ = '\''; /* start new quoted string */ } } *r++ = '\''; *r = '\0'; return (result); }
0
325,252
static int lance_init(SysBusDevice *dev) { SysBusPCNetState *d = FROM_SYSBUS(SysBusPCNetState, dev); PCNetState *s = &d->state; memory_region_init_io(&s->mmio, &lance_mem_ops, s, "lance-mmio", 4); qdev_init_gpio_in(&dev->qdev, parent_lance_reset, 1); sysbus_init_mmio_region(dev, &s->mmio); sysbus_init_irq(dev, &s->irq); s->phys_mem_read = ledma_memory_read; s->phys_mem_write = ledma_memory_write; return pcnet_common_init(&dev->qdev, s, &net_lance_info); }
1
18,529
const char * trunc_right ( const char * src , size_t width ) { size_t sl ; char * out ; sl = strlen ( src ) ; if ( sl > width && LIB_BUFLENGTH - 1 > width && width > 0 ) { LIB_GETBUF ( out ) ; memcpy ( out , src , width ) ; out [ width ] = '\0' ; return out ; } return src ; }
0
93,309
verbose_stop(void) { if (verbose_fd != NULL) { fclose(verbose_fd); verbose_fd = NULL; } verbose_did_open = FALSE; }
0
31,754
int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash, __u32 start_minor_hash, __u32 *next_hash) { struct dx_hash_info hinfo; struct ext4_dir_entry_2 *de; struct dx_frame frames[2], *frame; struct inode *dir; ext4_lblk_t block; int count = 0; int ret, err; __u32 hashval; dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n", start_hash, start_minor_hash)); dir = dir_file->f_path.dentry->d_inode; if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) { hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; if (hinfo.hash_version <= DX_HASH_TEA) hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo, start_hash, start_minor_hash); *next_hash = ~0; return count; } hinfo.hash = start_hash; hinfo.minor_hash = 0; frame = dx_probe(NULL, dir, &hinfo, frames, &err); if (!frame) return err; /* Add '.' and '..' from the htree header */ if (!start_hash && !start_minor_hash) { de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data; if ((err = ext4_htree_store_dirent(dir_file, 0, 0, de)) != 0) goto errout; count++; } if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) { de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data; de = ext4_next_entry(de, dir->i_sb->s_blocksize); if ((err = ext4_htree_store_dirent(dir_file, 2, 0, de)) != 0) goto errout; count++; } while (1) { block = dx_get_block(frame->at); ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo, start_hash, start_minor_hash); if (ret < 0) { err = ret; goto errout; } count += ret; hashval = ~0; ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS, frame, frames, &hashval); *next_hash = hashval; if (ret < 0) { err = ret; goto errout; } /* * Stop if: (a) there are no more entries, or * (b) we have inserted at least one entry and the * next hash value is not a continuation */ if ((ret == 0) || (count && ((hashval & 1) == 0))) break; } dx_release(frames); dxtrace(printk(KERN_DEBUG "Fill tree: returned %d entries, " "next hash: %x\n", count, *next_hash)); return count; errout: dx_release(frames); return (err); }
0
401,303
cr_input_peek_char (CRInput const * a_this, guint32 * a_char) { enum CRStatus status = CR_OK; gulong consumed = 0, nb_bytes_left = 0; g_return_val_if_fail (a_this && PRIVATE (a_this) && a_char, CR_BAD_PARAM_ERROR); if (PRIVATE (a_this)->next_byte_index >= PRIVATE (a_this)->in_buf_size) { return CR_END_OF_INPUT_ERROR; } nb_bytes_left = cr_input_get_nb_bytes_left (a_this); if (nb_bytes_left < 1) { return CR_END_OF_INPUT_ERROR; } status = cr_utils_read_char_from_utf8_buf (PRIVATE (a_this)->in_buf + PRIVATE (a_this)->next_byte_index, nb_bytes_left, a_char, &consumed); return status; }
0
220,262
WORD32 ih264d_get_vui_params(iv_obj_t *dec_hdl, void *pv_api_ip, void *pv_api_op) { ih264d_ctl_get_vui_params_ip_t *ps_ip; ih264d_ctl_get_vui_params_op_t *ps_op; dec_struct_t *ps_dec = dec_hdl->pv_codec_handle; dec_seq_params_t *ps_sps; vui_t *ps_vui; WORD32 i; UWORD32 u4_size; ps_ip = (ih264d_ctl_get_vui_params_ip_t *)pv_api_ip; ps_op = (ih264d_ctl_get_vui_params_op_t *)pv_api_op; UNUSED(ps_ip); u4_size = ps_op->u4_size; memset(ps_op, 0, sizeof(ih264d_ctl_get_vui_params_op_t)); ps_op->u4_size = u4_size; if(NULL == ps_dec->ps_cur_sps) { ps_op->u4_error_code = ERROR_VUI_PARAMS_NOT_FOUND; return IV_FAIL; } ps_sps = ps_dec->ps_cur_sps; if((0 == ps_sps->u1_is_valid) || (0 == ps_sps->u1_vui_parameters_present_flag)) { ps_op->u4_error_code = ERROR_VUI_PARAMS_NOT_FOUND; return IV_FAIL; } ps_vui = &ps_sps->s_vui; ps_op->u1_aspect_ratio_idc = ps_vui->u1_aspect_ratio_idc; ps_op->u2_sar_width = ps_vui->u2_sar_width; ps_op->u2_sar_height = ps_vui->u2_sar_height; ps_op->u1_overscan_appropriate_flag = ps_vui->u1_overscan_appropriate_flag; ps_op->u1_video_format = ps_vui->u1_video_format; ps_op->u1_video_full_range_flag = ps_vui->u1_video_full_range_flag; ps_op->u1_colour_primaries = ps_vui->u1_colour_primaries; ps_op->u1_tfr_chars = ps_vui->u1_tfr_chars; ps_op->u1_matrix_coeffs = ps_vui->u1_matrix_coeffs; ps_op->u1_cr_top_field = ps_vui->u1_cr_top_field; ps_op->u1_cr_bottom_field = ps_vui->u1_cr_bottom_field; ps_op->u4_num_units_in_tick = ps_vui->u4_num_units_in_tick; ps_op->u4_time_scale = ps_vui->u4_time_scale; ps_op->u1_fixed_frame_rate_flag = ps_vui->u1_fixed_frame_rate_flag; ps_op->u1_nal_hrd_params_present = ps_vui->u1_nal_hrd_params_present; ps_op->u1_vcl_hrd_params_present = ps_vui->u1_vcl_hrd_params_present; ps_op->u1_low_delay_hrd_flag = ps_vui->u1_low_delay_hrd_flag; ps_op->u1_pic_struct_present_flag = ps_vui->u1_pic_struct_present_flag; ps_op->u1_bitstream_restriction_flag = ps_vui->u1_bitstream_restriction_flag; ps_op->u1_mv_over_pic_boundaries_flag = ps_vui->u1_mv_over_pic_boundaries_flag; ps_op->u4_max_bytes_per_pic_denom = ps_vui->u4_max_bytes_per_pic_denom; ps_op->u4_max_bits_per_mb_denom = ps_vui->u4_max_bits_per_mb_denom; ps_op->u4_log2_max_mv_length_horz = ps_vui->u4_log2_max_mv_length_horz; ps_op->u4_log2_max_mv_length_vert = ps_vui->u4_log2_max_mv_length_vert; ps_op->u4_num_reorder_frames = ps_vui->u4_num_reorder_frames; ps_op->u4_max_dec_frame_buffering = ps_vui->u4_max_dec_frame_buffering; return IV_SUCCESS; }
0
44,045
static bool esilbreak_mem_read(RAnalEsil *esil, ut64 addr, ut8 *buf, int len) { ut8 str[128]; if (addr != UT64_MAX) { esilbreak_last_read = addr; } handle_var_stack_access (esil, addr, R_ANAL_VAR_ACCESS_TYPE_READ, len); if (myvalid (mycore->io, addr) && r_io_read_at (mycore->io, addr, (ut8*)buf, len)) { ut64 refptr; bool trace = true; switch (len) { case 2: esilbreak_last_data = refptr = (ut64)r_read_ble16 (buf, esil->anal->big_endian); break; case 4: esilbreak_last_data = refptr = (ut64)r_read_ble32 (buf, esil->anal->big_endian); break; case 8: esilbreak_last_data = refptr = r_read_ble64 (buf, esil->anal->big_endian); break; default: trace = false; r_io_read_at (mycore->io, addr, (ut8*)buf, len); break; } // TODO incorrect bool validRef = false; if (trace && myvalid (mycore->io, refptr)) { if (ntarget == UT64_MAX || ntarget == refptr) { str[0] = 0; if (r_io_read_at (mycore->io, refptr, str, sizeof (str)) < 1) { //eprintf ("Invalid read\n"); str[0] = 0; validRef = false; } else { r_anal_xrefs_set (mycore->anal, esil->address, refptr, R_ANAL_REF_TYPE_DATA); str[sizeof (str) - 1] = 0; add_string_ref (mycore, esil->address, refptr); esilbreak_last_data = UT64_MAX; validRef = true; } } } /** resolve ptr */ if (ntarget == UT64_MAX || ntarget == addr || (ntarget == UT64_MAX && !validRef)) { r_anal_xrefs_set (mycore->anal, esil->address, addr, R_ANAL_REF_TYPE_DATA); } } return false; // fallback }
0
101,732
static RBinObject *r_bin_object_new(RBinFile *binfile, RBinPlugin *plugin, ut64 baseaddr, ut64 loadaddr, ut64 offset, ut64 sz) { const ut8 *bytes = binfile? r_buf_buffer (binfile->buf): NULL; ut64 bytes_sz = binfile? r_buf_size (binfile->buf): 0; Sdb *sdb = binfile? binfile->sdb: NULL; RBinObject *o = R_NEW0 (RBinObject); if (!o) { return NULL; } o->obj_size = bytes && (bytes_sz >= sz + offset)? sz: 0; o->boffset = offset; o->id = r_num_rand (0xfffff000); o->kv = sdb_new0 (); o->baddr = baseaddr; o->baddr_shift = 0; o->plugin = plugin; o->loadaddr = loadaddr != UT64_MAX ? loadaddr : 0; // XXX more checking will be needed here // only use LoadBytes if buffer offset != 0 // if (offset != 0 && bytes && plugin && plugin->load_bytes && (bytes_sz // >= sz + offset) ) { if (bytes && plugin && plugin->load_bytes && (bytes_sz >= sz + offset)) { ut64 bsz = bytes_sz - offset; if (sz < bsz) { bsz = sz; } o->bin_obj = plugin->load_bytes (binfile, bytes + offset, sz, loadaddr, sdb); if (!o->bin_obj) { bprintf ( "Error in r_bin_object_new: load_bytes failed " "for %s plugin\n", plugin->name); sdb_free (o->kv); free (o); return NULL; } } else if (binfile && plugin && plugin->load) { // XXX - haha, this is a hack. // switching out the current object for the new // one to be processed RBinObject *old_o = binfile->o; binfile->o = o; if (plugin->load (binfile)) { binfile->sdb_info = o->kv; // mark as do not walk sdb_ns_set (binfile->sdb, "info", o->kv); } else { binfile->o = old_o; } o->obj_size = sz; } else { sdb_free (o->kv); free (o); return NULL; } // XXX - binfile could be null here meaning an improper load // XXX - object size cant be set here and needs to be set where // where the object is created from. The reason for this is to prevent // mis-reporting when the file is loaded from impartial bytes or is // extracted // from a set of bytes in the file r_bin_object_set_items (binfile, o); r_bin_file_object_add (binfile, o); // XXX this is a very hacky alternative to rewriting the // RIO stuff, as discussed here: return o; }
0
332,120
static void shpc_set_status(SHPCDevice *shpc, int slot, uint8_t value, uint16_t msk) { uint8_t *status = shpc->config + SHPC_SLOT_STATUS(slot); pci_word_test_and_clear_mask(status, msk); pci_word_test_and_set_mask(status, value << (ffs(msk) - 1)); }
0
53,276
static inline unsigned long tcp_probe0_when(const struct sock *sk, unsigned long max_when) { u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff; return (unsigned long)min_t(u64, when, max_when); }
0
149,588
static int network_config_set_interface (const oconfig_item_t *ci, /* {{{ */ int *interface) { if ((ci->values_num != 1) || (ci->values[0].type != OCONFIG_TYPE_STRING)) { WARNING ("network plugin: The `Interface' config option needs exactly " "one string argument."); return (-1); } if (interface == NULL) return (-1); *interface = if_nametoindex (ci->values[0].value.string); return (0); } /* }}} int network_config_set_interface */
0
165,634
ofputil_encode_tlv_table_reply(const struct ofp_header *oh, struct ofputil_tlv_table_reply *ttr) { struct ofpbuf *b; struct nx_tlv_table_reply *nx_ttr; b = ofpraw_alloc_reply(OFPRAW_NXT_TLV_TABLE_REPLY, oh, 0); nx_ttr = ofpbuf_put_zeros(b, sizeof *nx_ttr); nx_ttr->max_option_space = htonl(ttr->max_option_space); nx_ttr->max_fields = htons(ttr->max_fields); encode_tlv_table_mappings(b, &ttr->mappings); return b; }
0
398,495
static int megasas_ld_get_info_submit(SCSIDevice *sdev, int lun, MegasasCmd *cmd) { struct mfi_ld_info *info = cmd->iov_buf; size_t dcmd_size = sizeof(struct mfi_ld_info); uint8_t cdb[6]; SCSIRequest *req; ssize_t len, resid; uint16_t sdev_id = ((sdev->id & 0xFF) << 8) | (lun & 0xFF); uint64_t ld_size; if (!cmd->iov_buf) { cmd->iov_buf = g_malloc0(dcmd_size); info = cmd->iov_buf; megasas_setup_inquiry(cdb, 0x83, sizeof(info->vpd_page83)); req = scsi_req_new(sdev, cmd->index, lun, cdb, cmd); if (!req) { trace_megasas_dcmd_req_alloc_failed(cmd->index, "LD get info vpd inquiry"); g_free(cmd->iov_buf); cmd->iov_buf = NULL; return MFI_STAT_FLASH_ALLOC_FAIL; } trace_megasas_dcmd_internal_submit(cmd->index, "LD get info vpd inquiry", lun); len = scsi_req_enqueue(req); if (len > 0) { cmd->iov_size = len; scsi_req_continue(req); } return MFI_STAT_INVALID_STATUS; } info->ld_config.params.state = MFI_LD_STATE_OPTIMAL; info->ld_config.properties.ld.v.target_id = lun; info->ld_config.params.stripe_size = 3; info->ld_config.params.num_drives = 1; info->ld_config.params.is_consistent = 1; /* Logical device size is in blocks */ blk_get_geometry(sdev->conf.blk, &ld_size); info->size = cpu_to_le64(ld_size); memset(info->ld_config.span, 0, sizeof(info->ld_config.span)); info->ld_config.span[0].start_block = 0; info->ld_config.span[0].num_blocks = info->size; info->ld_config.span[0].array_ref = cpu_to_le16(sdev_id); resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg); g_free(cmd->iov_buf); cmd->iov_size = dcmd_size - resid; cmd->iov_buf = NULL; return MFI_STAT_OK; }
0
66,951
static void pf_req_sense(struct pf_unit *pf, int quiet) { char rs_cmd[12] = { ATAPI_REQ_SENSE, pf->lun << 5, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 }; char buf[16]; int r; r = pf_command(pf, rs_cmd, 16, "Request sense"); mdelay(1); if (!r) pf_completion(pf, buf, "Request sense"); if ((!r) && (!quiet)) printk("%s: Sense key: %x, ASC: %x, ASQ: %x\n", pf->name, buf[2] & 0xf, buf[12], buf[13]); }
0
39,329
parse_string(struct xkb_compose_table *table, const char *string, size_t len, const char *file_name) { struct scanner s; scanner_init(&s, table->ctx, string, len, file_name, NULL); if (!parse(table, &s, 0)) return false; /* Maybe the allocator can use the excess space. */ darray_shrink(table->nodes); darray_shrink(table->utf8); return true; }
0
134,988
static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, int broadcast_flags, struct sock *one_sk, struct net *net) { struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); struct sock *sk; struct sk_buff *skb2 = NULL; int err = -ESRCH; /* XXX Do we need something like netlink_overrun? I think * XXX PF_KEY socket apps will not mind current behavior. */ if (!skb) return -ENOMEM; rcu_read_lock(); sk_for_each_rcu(sk, &net_pfkey->table) { struct pfkey_sock *pfk = pfkey_sk(sk); int err2; /* Yes, it means that if you are meant to receive this * pfkey message you receive it twice as promiscuous * socket. */ if (pfk->promisc) pfkey_broadcast_one(skb, &skb2, allocation, sk); /* the exact target will be processed later */ if (sk == one_sk) continue; if (broadcast_flags != BROADCAST_ALL) { if (broadcast_flags & BROADCAST_PROMISC_ONLY) continue; if ((broadcast_flags & BROADCAST_REGISTERED) && !pfk->registered) continue; if (broadcast_flags & BROADCAST_ONE) continue; } err2 = pfkey_broadcast_one(skb, &skb2, allocation, sk); /* Error is cleare after succecful sending to at least one * registered KM */ if ((broadcast_flags & BROADCAST_REGISTERED) && err) err = err2; } rcu_read_unlock(); if (one_sk != NULL) err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); kfree_skb(skb2); kfree_skb(skb); return err; }
0
90,235
R_API void r_bin_java_bootstrap_method_argument_free(void /*RBinJavaBootStrapArgument*/ *b) { RBinJavaBootStrapArgument *bsm_arg = b; if (bsm_arg) { RBinJavaCPTypeMetas *tm = (RBinJavaCPTypeMetas*)bsm_arg->argument_info_cp_obj; if (tm) { if (tm && (size_t)(tm->allocs) > 1024 && tm->allocs->delete_obj) { tm->allocs->delete_obj (tm); } bsm_arg->argument_info_cp_obj = NULL; } free (bsm_arg); } }
0
25,821
void vp9_ ## type ## _predictor_ ## size ## x ## size ## _c ( uint8_t * dst , ptrdiff_t stride , const uint8_t * above , const uint8_t * left ) { type ## _predictor ( dst , stride , size , above , left ) ; } # if CONFIG_VP9_HIGHBITDEPTH # define intra_pred_high_sized ( type , size ) void vp9_high_ ## type ## _predictor_ ## size ## x ## size ## _c ( uint16_t * dst , ptrdiff_t stride , const uint16_t * above , const uint16_t * left , int bd ) { high_ ## type ## _predictor ( dst , stride , size , above , left , bd ) ; } # define intra_pred_allsizes ( type ) intra_pred_sized ( type , 4 ) intra_pred_sized ( type , 8 ) intra_pred_sized ( type , 16 ) intra_pred_sized ( type , 32 ) intra_pred_high_sized ( type , 4 ) intra_pred_high_sized ( type , 8 ) intra_pred_high_sized ( type , 16 ) intra_pred_high_sized ( type , 32 ) # else # define intra_pred_allsizes ( type ) intra_pred_sized ( type , 4 ) intra_pred_sized ( type , 8 ) intra_pred_sized ( type , 16 ) intra_pred_sized ( type , 32 ) # endif # if CONFIG_VP9_HIGHBITDEPTH static INLINE void high_d207_predictor ( uint16_t * dst , ptrdiff_t stride , int bs , const uint16_t * above , const uint16_t * left , int bd ) { int r , c ; ( void ) above ; ( void ) bd ; for ( r = 0 ; r < bs - 1 ; ++ r ) { dst [ r * stride ] = ROUND_POWER_OF_TWO ( left [ r ] + left [ r + 1 ] , 1 ) ; } dst [ ( bs - 1 ) * stride ] = left [ bs - 1 ] ; dst ++ ; for ( r = 0 ; r < bs - 2 ; ++ r ) { dst [ r * stride ] = ROUND_POWER_OF_TWO ( left [ r ] + left [ r + 1 ] * 2 + left [ r + 2 ] , 2 ) ; } dst [ ( bs - 2 ) * stride ] = ROUND_POWER_OF_TWO ( left [ bs - 2 ] + left [ bs - 1 ] * 3 , 2 ) ; dst [ ( bs - 1 ) * stride ] = left [ bs - 1 ] ; dst ++ ; for ( c = 0 ; c < bs - 2 ; ++ c ) dst [ ( bs - 1 ) * stride + c ] = left [ bs - 1 ] ; for ( r = bs - 2 ; r >= 0 ; -- r ) { for ( c = 0 ; c < bs - 2 ; ++ c ) dst [ r * stride + c ] = dst [ ( r + 1 ) * stride + c - 2 ] ; } } static INLINE void high_d63_predictor ( uint16_t * dst , ptrdiff_t stride , int bs , const uint16_t * above , const uint16_t * left , int bd ) { int r , c ; ( void ) left ; ( void ) bd ; for ( r = 0 ; r < bs ; ++ r ) { for ( c = 0 ; c < bs ; ++ c ) { dst [ c ] = r & 1 ? ROUND_POWER_OF_TWO ( above [ r / 2 + c ] + above [ r / 2 + c + 1 ] * 2 + above [ r / 2 + c + 2 ] , 2 ) : ROUND_POWER_OF_TWO ( above [ r / 2 + c ] + above [ r / 2 + c + 1 ] , 1 ) ; } dst += stride ; } } static INLINE void high_d45_predictor ( uint16_t * dst , ptrdiff_t stride , int bs , const uint16_t * above , const uint16_t * left , int bd ) { int r , c ; ( void ) left ; ( void ) bd ; for ( r = 0 ; r < bs ; ++ r ) { for ( c = 0 ; c < bs ; ++ c ) { dst [ c ] = r + c + 2 < bs * 2 ? ROUND_POWER_OF_TWO ( above [ r + c ] + above [ r + c + 1 ] * 2 + above [ r + c + 2 ] , 2 ) : above [ bs * 2 - 1 ] ; } dst += stride ; } } static INLINE void high_d117_predictor ( uint16_t * dst , ptrdiff_t stride , int bs , const uint16_t * above , const uint16_t * left , int bd ) { int r , c ; ( void ) bd ; for ( c = 0 ; c < bs ; c ++ ) dst [ c ] = ROUND_POWER_OF_TWO ( above [ c - 1 ] + above [ c ] , 1 ) ; dst += stride ; dst [ 0 ] = ROUND_POWER_OF_TWO ( left [ 0 ] + above [ - 1 ] * 2 + above [ 0 ] , 2 ) ; for ( c = 1 ; c < bs ; c ++ ) dst [ c ] = ROUND_POWER_OF_TWO ( above [ c - 2 ] + above [ c - 1 ] * 2 + above [ c ] , 2 ) ; dst += stride ; dst [ 0 ] = ROUND_POWER_OF_TWO ( above [ - 1 ] + left [ 0 ] * 2 + left [ 1 ] , 2 ) ; for ( r = 3 ; r < bs ; ++ r ) dst [ ( r - 2 ) * stride ] = ROUND_POWER_OF_TWO ( left [ r - 3 ] + left [ r - 2 ] * 2 + left [ r - 1 ] , 2 ) ; for ( r = 2 ; r < bs ; ++ r ) { for ( c = 1 ; c < bs ; c ++ ) dst [ c ] = dst [ - 2 * stride + c - 1 ] ; dst += stride ; } } static INLINE void high_d135_predictor ( uint16_t * dst , ptrdiff_t stride , int bs , const uint16_t * above , const uint16_t * left , int bd ) { int r , c ; ( void ) bd ; dst [ 0 ] = ROUND_POWER_OF_TWO ( left [ 0 ] + above [ - 1 ] * 2 + above [ 0 ] , 2 ) ; for ( c = 1 ; c < bs ; c ++ ) dst [ c ] = ROUND_POWER_OF_TWO ( above [ c - 2 ] + above [ c - 1 ] * 2 + above [ c ] , 2 ) ; dst [ stride ] = ROUND_POWER_OF_TWO ( above [ - 1 ] + left [ 0 ] * 2 + left [ 1 ] , 2 ) ; for ( r = 2 ; r < bs ; ++ r ) dst [ r * stride ] = ROUND_POWER_OF_TWO ( left [ r - 2 ] + left [ r - 1 ] * 2 + left [ r ] , 2 ) ; dst += stride ; for ( r = 1 ; r < bs ; ++ r ) { for ( c = 1 ; c < bs ; c ++ ) dst [ c ] = dst [ - stride + c - 1 ] ; dst += stride ; } } static INLINE void high_d153_predictor ( uint16_t * dst , ptrdiff_t stride , int bs , const uint16_t * above , const uint16_t * left , int bd ) { int r , c ; ( void ) bd ; dst [ 0 ] = ROUND_POWER_OF_TWO ( above [ - 1 ] + left [ 0 ] , 1 ) ; for ( r = 1 ; r < bs ; r ++ ) dst [ r * stride ] = ROUND_POWER_OF_TWO ( left [ r - 1 ] + left [ r ] , 1 ) ; dst ++ ; dst [ 0 ] = ROUND_POWER_OF_TWO ( left [ 0 ] + above [ - 1 ] * 2 + above [ 0 ] , 2 ) ; dst [ stride ] = ROUND_POWER_OF_TWO ( above [ - 1 ] + left [ 0 ] * 2 + left [ 1 ] , 2 ) ; for ( r = 2 ; r < bs ; r ++ ) dst [ r * stride ] = ROUND_POWER_OF_TWO ( left [ r - 2 ] + left [ r - 1 ] * 2 + left [ r ] , 2 ) ; dst ++ ; for ( c = 0 ; c < bs - 2 ; c ++ ) dst [ c ] = ROUND_POWER_OF_TWO ( above [ c - 1 ] + above [ c ] * 2 + above [ c + 1 ] , 2 ) ; dst += stride ; for ( r = 1 ; r < bs ; ++ r ) { for ( c = 0 ; c < bs - 2 ; c ++ ) dst [ c ] = dst [ - stride + c - 2 ] ; dst += stride ; } } static INLINE void high_v_predictor ( uint16_t * dst , ptrdiff_t stride , int bs , const uint16_t * above , const uint16_t * left , int bd ) { int r ; ( void ) left ; ( void ) bd ; for ( r = 0 ; r < bs ; r ++ ) { vpx_memcpy ( dst , above , bs * sizeof ( uint16_t ) ) ; dst += stride ; } } static INLINE void high_h_predictor ( uint16_t * dst , ptrdiff_t stride , int bs , const uint16_t * above , const uint16_t * left , int bd ) { int r ; ( void ) above ; ( void ) bd ; for ( r = 0 ; r < bs ; r ++ ) { vpx_memset16 ( dst , left [ r ] , bs ) ; dst += stride ; } } static INLINE void high_tm_predictor ( uint16_t * dst , ptrdiff_t stride , int bs , const uint16_t * above , const uint16_t * left , int bd ) { int r , c ; int ytop_left = above [ - 1 ] ; ( void ) bd ; for ( r = 0 ; r < bs ; r ++ ) { for ( c = 0 ; c < bs ; c ++ ) dst [ c ] = clip_pixel_high ( left [ r ] + above [ c ] - ytop_left , bd ) ; dst += stride ; } } static INLINE void high_dc_128_predictor ( uint16_t * dst , ptrdiff_t stride , int bs , const uint16_t * above , const uint16_t * left , int bd ) { int r ; ( void ) above ; ( void ) left ; for ( r = 0 ; r < bs ; r ++ ) { vpx_memset16 ( dst , 128 << ( bd - 8 ) , bs ) ; dst += stride ; } } static INLINE void high_dc_left_predictor ( uint16_t * dst , ptrdiff_t stride , int bs , const uint16_t * above , const uint16_t * left , int bd ) { int i , r , expected_dc , sum = 0 ; ( void ) above ; ( void ) bd ; for ( i = 0 ; i < bs ; i ++ ) sum += left [ i ] ; expected_dc = ( sum + ( bs >> 1 ) ) / bs ; for ( r = 0 ; r < bs ; r ++ ) { vpx_memset16 ( dst , expected_dc , bs ) ; dst += stride ; } } static INLINE void high_dc_top_predictor ( uint16_t * dst , ptrdiff_t stride , int bs , const uint16_t * above , const uint16_t * left , int bd ) { int i , r , expected_dc , sum = 0 ; ( void ) left ; ( void ) bd ; for ( i = 0 ; i < bs ; i ++ ) sum += above [ i ] ; expected_dc = ( sum + ( bs >> 1 ) ) / bs ; for ( r = 0 ; r < bs ; r ++ ) { vpx_memset16 ( dst , expected_dc , bs ) ; dst += stride ; } } static INLINE void high_dc_predictor ( uint16_t * dst , ptrdiff_t stride , int bs , const uint16_t * above , const uint16_t * left , int bd ) { int i , r , expected_dc , sum = 0 ; const int count = 2 * bs ; ( void ) bd ; for ( i = 0 ; i < bs ; i ++ ) { sum += above [ i ] ; sum += left [ i ] ; } expected_dc = ( sum + ( count >> 1 ) ) / count ; for ( r = 0 ; r < bs ; r ++ ) { vpx_memset16 ( dst , expected_dc , bs ) ; dst += stride ; } } # endif static INLINE void d207_predictor ( uint8_t * dst , ptrdiff_t stride , int bs , const uint8_t * above , const uint8_t * left ) { int r , c ; ( void ) above ; for ( r = 0 ; r < bs - 1 ; ++ r ) dst [ r * stride ] = ROUND_POWER_OF_TWO ( left [ r ] + left [ r + 1 ] , 1 ) ; dst [ ( bs - 1 ) * stride ] = left [ bs - 1 ] ; dst ++ ; for ( r = 0 ; r < bs - 2 ; ++ r ) dst [ r * stride ] = ROUND_POWER_OF_TWO ( left [ r ] + left [ r + 1 ] * 2 + left [ r + 2 ] , 2 ) ; dst [ ( bs - 2 ) * stride ] = ROUND_POWER_OF_TWO ( left [ bs - 2 ] + left [ bs - 1 ] * 3 , 2 ) ; dst [ ( bs - 1 ) * stride ] = left [ bs - 1 ] ; dst ++ ; for ( c = 0 ; c < bs - 2 ; ++ c ) dst [ ( bs - 1 ) * stride + c ] = left [ bs - 1 ] ; for ( r = bs - 2 ; r >= 0 ; -- r ) for ( c = 0 ; c < bs - 2 ; ++ c ) dst [ r * stride + c ] = dst [ ( r + 1 ) * stride + c - 2 ] ; } intra_pred_allsizes ( d207 ) static INLINE void d63_predictor ( uint8_t * dst , ptrdiff_t stride , int bs , const uint8_t * above , const uint8_t * left ) { int r , c ; ( void ) left ; for ( r = 0 ; r < bs ; ++ r ) { for ( c = 0 ; c < bs ; ++ c ) dst [ c ] = r & 1 ? ROUND_POWER_OF_TWO ( above [ r / 2 + c ] + above [ r / 2 + c + 1 ] * 2 + above [ r / 2 + c + 2 ] , 2 ) : ROUND_POWER_OF_TWO ( above [ r / 2 + c ] + above [ r / 2 + c + 1 ] , 1 ) ; dst += stride ; } } intra_pred_allsizes ( d63 ) static INLINE void d45_predictor ( uint8_t * dst , ptrdiff_t stride , int bs , const uint8_t * above , const uint8_t * left ) { int r , c ; ( void ) left ; for ( r = 0 ; r < bs ; ++ r ) { for ( c = 0 ; c < bs ; ++ c ) dst [ c ] = r + c + 2 < bs * 2 ? ROUND_POWER_OF_TWO ( above [ r + c ] + above [ r + c + 1 ] * 2 + above [ r + c + 2 ] , 2 ) : above [ bs * 2 - 1 ] ; dst += stride ; } } intra_pred_allsizes ( d45 ) static INLINE void d117_predictor ( uint8_t * dst , ptrdiff_t stride , int bs , const uint8_t * above , const uint8_t * left ) { int r , c ; for ( c = 0 ; c < bs ; c ++ ) dst [ c ] = ROUND_POWER_OF_TWO ( above [ c - 1 ] + above [ c ] , 1 ) ; dst += stride ; dst [ 0 ] = ROUND_POWER_OF_TWO ( left [ 0 ] + above [ - 1 ] * 2 + above [ 0 ] , 2 ) ; for ( c = 1 ; c < bs ; c ++ ) dst [ c ] = ROUND_POWER_OF_TWO ( above [ c - 2 ] + above [ c - 1 ] * 2 + above [ c ] , 2 ) ; dst += stride ; dst [ 0 ] = ROUND_POWER_OF_TWO ( above [ - 1 ] + left [ 0 ] * 2 + left [ 1 ] , 2 ) ; for ( r = 3 ; r < bs ; ++ r ) dst [ ( r - 2 ) * stride ] = ROUND_POWER_OF_TWO ( left [ r - 3 ] + left [ r - 2 ] * 2 + left [ r - 1 ] , 2 ) ; for ( r = 2 ; r < bs ; ++ r ) { for ( c = 1 ; c < bs ; c ++ ) dst [ c ] = dst [ - 2 * stride + c - 1 ] ; dst += stride ; } } intra_pred_allsizes ( d117 ) static INLINE void d135_predictor ( uint8_t * dst , ptrdiff_t stride , int bs , const uint8_t * above , const uint8_t * left ) { int r , c ; dst [ 0 ] = ROUND_POWER_OF_TWO ( left [ 0 ] + above [ - 1 ] * 2 + above [ 0 ] , 2 ) ; for ( c = 1 ; c < bs ; c ++ ) dst [ c ] = ROUND_POWER_OF_TWO ( above [ c - 2 ] + above [ c - 1 ] * 2 + above [ c ] , 2 ) ; dst [ stride ] = ROUND_POWER_OF_TWO ( above [ - 1 ] + left [ 0 ] * 2 + left [ 1 ] , 2 ) ; for ( r = 2 ; r < bs ; ++ r ) dst [ r * stride ] = ROUND_POWER_OF_TWO ( left [ r - 2 ] + left [ r - 1 ] * 2 + left [ r ] , 2 ) ; dst += stride ; for ( r = 1 ; r < bs ; ++ r ) { for ( c = 1 ; c < bs ; c ++ ) dst [ c ] = dst [ - stride + c - 1 ] ; dst += stride ; } } intra_pred_allsizes ( d135 ) static INLINE void d153_predictor ( uint8_t * dst , ptrdiff_t stride , int bs , const uint8_t * above , const uint8_t * left ) { int r , c ; dst [ 0 ] = ROUND_POWER_OF_TWO ( above [ - 1 ] + left [ 0 ] , 1 ) ; for ( r = 1 ; r < bs ; r ++ ) dst [ r * stride ] = ROUND_POWER_OF_TWO ( left [ r - 1 ] + left [ r ] , 1 ) ; dst ++ ; dst [ 0 ] = ROUND_POWER_OF_TWO ( left [ 0 ] + above [ - 1 ] * 2 + above [ 0 ] , 2 ) ; dst [ stride ] = ROUND_POWER_OF_TWO ( above [ - 1 ] + left [ 0 ] * 2 + left [ 1 ] , 2 ) ; for ( r = 2 ; r < bs ; r ++ ) dst [ r * stride ] = ROUND_POWER_OF_TWO ( left [ r - 2 ] + left [ r - 1 ] * 2 + left [ r ] , 2 ) ; dst ++ ; for ( c = 0 ; c < bs - 2 ; c ++ ) dst [ c ] = ROUND_POWER_OF_TWO ( above [ c - 1 ] + above [ c ] * 2 + above [ c + 1 ] , 2 ) ; dst += stride ; for ( r = 1 ; r < bs ; ++ r ) { for ( c = 0 ; c < bs - 2 ; c ++ ) dst [ c ] = dst [ - stride + c - 2 ] ; dst += stride ; } } intra_pred_allsizes ( d153 ) static INLINE void v_predictor ( uint8_t * dst , ptrdiff_t stride , int bs , const uint8_t * above , const uint8_t * left ) { int r ; ( void ) left ; for ( r = 0 ; r < bs ; r ++ ) { vpx_memcpy ( dst , above , bs ) ; dst += stride ; } } intra_pred_allsizes ( v ) static INLINE void h_predictor ( uint8_t * dst , ptrdiff_t stride , int bs , const uint8_t * above , const uint8_t * left ) { int r ; ( void ) above ; for ( r = 0 ; r < bs ; r ++ ) { vpx_memset ( dst , left [ r ] , bs ) ; dst += stride ; } } intra_pred_allsizes ( h ) static INLINE void tm_predictor ( uint8_t * dst , ptrdiff_t stride , int bs , const uint8_t * above , const uint8_t * left ) { int r , c ; int ytop_left = above [ - 1 ] ; for ( r = 0 ; r < bs ; r ++ ) { for ( c = 0 ; c < bs ; c ++ ) dst [ c ] = clip_pixel ( left [ r ] + above [ c ] - ytop_left ) ; dst += stride ; } } intra_pred_allsizes ( tm ) static INLINE void dc_128_predictor ( uint8_t * dst , ptrdiff_t stride , int bs , const uint8_t * above , const uint8_t * left ) { int r ; ( void ) above ; ( void ) left ; for ( r = 0 ; r < bs ; r ++ ) { vpx_memset ( dst , 128 , bs ) ; dst += stride ; } } intra_pred_allsizes ( dc_128 ) static INLINE void dc_left_predictor ( uint8_t * dst , ptrdiff_t stride , int bs , const uint8_t * above , const uint8_t * left ) { int i , r , expected_dc , sum = 0 ; ( void ) above ; for ( i = 0 ; i < bs ; i ++ ) sum += left [ i ] ; expected_dc = ( sum + ( bs >> 1 ) ) / bs ; for ( r = 0 ; r < bs ; r ++ ) { vpx_memset ( dst , expected_dc , bs ) ; dst += stride ; } } intra_pred_allsizes ( dc_left ) static INLINE void dc_top_predictor ( uint8_t * dst , ptrdiff_t stride , int bs , const uint8_t * above , const uint8_t * left ) { int i , r , expected_dc , sum = 0 ; ( void ) left ; for ( i = 0 ; i < bs ; i ++ ) sum += above [ i ] ; expected_dc = ( sum + ( bs >> 1 ) ) / bs ; for ( r = 0 ; r < bs ; r ++ ) { vpx_memset ( dst , expected_dc , bs ) ; dst += stride ; } } intra_pred_allsizes ( dc_top ) static INLINE void dc_predictor ( uint8_t * dst , ptrdiff_t stride , int bs , const uint8_t * above , const uint8_t * left ) { int i , r , expected_dc , sum = 0 ; const int count = 2 * bs ; for ( i = 0 ; i < bs ; i ++ ) { sum += above [ i ] ; sum += left [ i ] ; } expected_dc = ( sum + ( count >> 1 ) ) / count ; for ( r = 0 ; r < bs ; r ++ ) { vpx_memset ( dst , expected_dc , bs ) ; dst += stride ; } } intra_pred_allsizes ( dc )
0
372,443
cmsHPROFILE CMSEXPORT cmsOpenProfileFromIOhandlerTHR(cmsContext ContextID, cmsIOHANDLER* io) { _cmsICCPROFILE* NewIcc; cmsHPROFILE hEmpty = cmsCreateProfilePlaceholder(ContextID); if (hEmpty == NULL) return NULL; NewIcc = (_cmsICCPROFILE*) hEmpty; NewIcc ->IOhandler = io; if (!_cmsReadHeader(NewIcc)) goto Error; return hEmpty; Error: cmsCloseProfile(hEmpty); return NULL; }
0
336,193
static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, uint8_t *dest, int dstW, int dstY) { int dummy=0; switch(c->dstFormat) { #ifdef HAVE_MMX case IMGFMT_BGR32: { asm volatile( YSCALEYUV2RGBX WRITEBGR32(%4, %5, %%REGa) :: "r" (&c->redDither), "m" (dummy), "m" (dummy), "m" (dummy), "r" (dest), "m" (dstW) : "%"REG_a, "%"REG_d, "%"REG_S ); } break; case IMGFMT_BGR24: { asm volatile( YSCALEYUV2RGBX "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" //FIXME optimize "add %4, %%"REG_b" \n\t" WRITEBGR24(%%REGb, %5, %%REGa) :: "r" (&c->redDither), "m" (dummy), "m" (dummy), "m" (dummy), "r" (dest), "m" (dstW) : "%"REG_a, "%"REG_b, "%"REG_d, "%"REG_S //FIXME ebx ); } break; case IMGFMT_BGR15: { asm volatile( YSCALEYUV2RGBX /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP "paddusb "MANGLE(b5Dither)", %%mm2\n\t" "paddusb "MANGLE(g5Dither)", %%mm4\n\t" "paddusb "MANGLE(r5Dither)", %%mm5\n\t" #endif WRITEBGR15(%4, %5, %%REGa) :: "r" (&c->redDither), "m" (dummy), "m" (dummy), "m" (dummy), "r" (dest), "m" (dstW) : "%"REG_a, "%"REG_d, "%"REG_S ); } break; case IMGFMT_BGR16: { asm volatile( YSCALEYUV2RGBX /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP "paddusb "MANGLE(b5Dither)", %%mm2\n\t" "paddusb "MANGLE(g6Dither)", %%mm4\n\t" "paddusb "MANGLE(r5Dither)", %%mm5\n\t" #endif WRITEBGR16(%4, %5, %%REGa) :: "r" (&c->redDither), "m" (dummy), "m" (dummy), "m" (dummy), "r" (dest), "m" (dstW) : "%"REG_a, "%"REG_d, "%"REG_S ); } break; case IMGFMT_YUY2: { asm volatile( YSCALEYUV2PACKEDX /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ "psraw $3, %%mm3 \n\t" "psraw $3, %%mm4 \n\t" "psraw $3, %%mm1 \n\t" "psraw $3, %%mm7 \n\t" WRITEYUY2(%4, %5, %%REGa) :: "r" (&c->redDither), "m" (dummy), "m" (dummy), "m" (dummy), "r" (dest), "m" (dstW) : "%"REG_a, "%"REG_d, "%"REG_S ); } break; #endif default: #ifdef HAVE_ALTIVEC /* The following list of supported dstFormat values should match what's found in the body of altivec_yuv2packedX() */ if(c->dstFormat==IMGFMT_ABGR || c->dstFormat==IMGFMT_BGRA || c->dstFormat==IMGFMT_BGR24 || c->dstFormat==IMGFMT_RGB24 || c->dstFormat==IMGFMT_RGBA || c->dstFormat==IMGFMT_ARGB) altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize, chrFilter, chrSrc, chrFilterSize, dest, dstW, dstY); else #endif yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize, chrFilter, chrSrc, chrFilterSize, dest, dstW, dstY); break; } }
1
144,534
static PyObject *__pyx_pf_17clickhouse_driver_14bufferedwriter_2__pyx_unpickle_BufferedSocketWriter(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_BufferedSocketWriter", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0x3baf4af: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0x3baf4af = (buffer, buffer_size, position, sock))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0x3baf4af) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0x3baf4af: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0x3baf4af = (buffer, buffer_size, position, sock))" % __pyx_checksum) * __pyx_result = BufferedSocketWriter.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0x3baf4af: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0x3baf4af = (buffer, buffer_size, position, sock))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = BufferedSocketWriter.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0x3b, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0x3baf4af: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0x3baf4af = (buffer, buffer_size, position, sock))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0x3baf4af = (buffer, buffer_size, position, sock))" % __pyx_checksum) * __pyx_result = BufferedSocketWriter.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_BufferedSocketWriter__set_state(<BufferedSocketWriter> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_ptype_17clickhouse_driver_14bufferedwriter_BufferedSocketWriter), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0x3baf4af = (buffer, buffer_size, position, sock))" % __pyx_checksum) * __pyx_result = BufferedSocketWriter.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_BufferedSocketWriter__set_state(<BufferedSocketWriter> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = BufferedSocketWriter.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_BufferedSocketWriter__set_state(<BufferedSocketWriter> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_BufferedSocketWriter__set_state(BufferedSocketWriter __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) __pyx_t_3 = __pyx_f_17clickhouse_driver_14bufferedwriter___pyx_unpickle_BufferedSocketWriter__set_state(((struct __pyx_obj_17clickhouse_driver_14bufferedwriter_BufferedSocketWriter *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0x3baf4af = (buffer, buffer_size, position, sock))" % __pyx_checksum) * __pyx_result = BufferedSocketWriter.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_BufferedSocketWriter__set_state(<BufferedSocketWriter> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_BufferedSocketWriter__set_state(<BufferedSocketWriter> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_BufferedSocketWriter__set_state(BufferedSocketWriter __pyx_result, tuple __pyx_state): * __pyx_result.buffer = __pyx_state[0]; __pyx_result.buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_BufferedSocketWriter(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("clickhouse_driver.bufferedwriter.__pyx_unpickle_BufferedSocketWriter", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; }
0
106,032
static size_t encode_block(char *str, char *buf, size_t buflen, const char *fromcode, const char *tocode, encoder_t encoder) { if (!fromcode) { return (*encoder)(str, buf, buflen, tocode); } const iconv_t cd = mutt_ch_iconv_open(tocode, fromcode, 0); assert(cd != (iconv_t)(-1)); const char *ib = buf; size_t ibl = buflen; char tmp[ENCWORD_LEN_MAX - ENCWORD_LEN_MIN + 1]; char *ob = tmp; size_t obl = sizeof(tmp) - strlen(tocode); const size_t n1 = iconv(cd, (ICONV_CONST char **) &ib, &ibl, &ob, &obl); const size_t n2 = iconv(cd, NULL, NULL, &ob, &obl); assert(n1 != (size_t)(-1) && n2 != (size_t)(-1)); iconv_close(cd); return (*encoder)(str, tmp, ob - tmp, tocode); }
0
265,213
R_API bool r_socket_is_connected (RSocket *s) { return false; }
0
64,731
void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6hdr *iph; struct sk_buff *skb; struct ip6_mtuinfo *mtu_info; if (!np->rxopt.bits.rxpmtu) return; skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC); if (!skb) return; skb_put(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); iph = ipv6_hdr(skb); iph->daddr = fl6->daddr; mtu_info = IP6CBMTU(skb); mtu_info->ip6m_mtu = mtu; mtu_info->ip6m_addr.sin6_family = AF_INET6; mtu_info->ip6m_addr.sin6_port = 0; mtu_info->ip6m_addr.sin6_flowinfo = 0; mtu_info->ip6m_addr.sin6_scope_id = fl6->flowi6_oif; mtu_info->ip6m_addr.sin6_addr = ipv6_hdr(skb)->daddr; __skb_pull(skb, skb_tail_pointer(skb) - skb->data); skb_reset_transport_header(skb); skb = xchg(&np->rxpmtu, skb); kfree_skb(skb); }
0
523,770
int BN_set_word(BIGNUM *a, BN_ULONG w) { bn_check_top(a); if (bn_expand(a, (int)sizeof(BN_ULONG) * 8) == NULL) return (0); a->neg = 0; a->d[0] = w; a->top = (w ? 1 : 0); bn_check_top(a); return (1); }
0
249,442
void RenderFrameImpl::willRequestAfterPreconnect( blink::WebLocalFrame* frame, blink::WebURLRequest& request) { DCHECK(!frame_ || frame_ == frame); WebString custom_user_agent; DCHECK(!request.extraData()); bool was_after_preconnect_request = true; RequestExtraData* extra_data = new RequestExtraData(); extra_data->set_custom_user_agent(custom_user_agent); extra_data->set_was_after_preconnect_request(was_after_preconnect_request); request.setExtraData(extra_data); }
0
244,151
char* menu_cache_item_get_file_path( MenuCacheItem* item ) { if( ! item->file_name || ! item->file_dir ) return NULL; return g_build_filename( item->file_dir->dir + 1, item->file_name, NULL ); }
0
94,707
static int finish_read (lua_State *L, int status, lua_KContext ctx) { int rc; struct ssh_userdata *sshu = NULL; sshu = (struct ssh_userdata *) nseU_checkudata(L, 1, SSH2_UDATA, "ssh2"); if (lua_toboolean(L, -2)) { size_t n = 0; size_t l = 0; lua_getuservalue(L, 1); lua_getfield(L, -1, "sp_buff"); lua_pushvalue(L, 3); lua_concat(L, 2); const char *data = lua_tolstring(L, -1, &l); lua_pushliteral(L, ""); lua_setfield(L, 4, "sp_buff"); while (n < l) { #ifdef WIN32 rc = send(sshu->sp[1], data + n, l - n, 0); #else rc = write(sshu->sp[1], data + n, l - n); #endif if (rc == -1 && errno != EAGAIN) { luaL_error(L, "Writing to socket pair: %s", strerror(errno)); } else if (rc == -1 && errno == EAGAIN) { lua_pushlstring(L, data + n, l - n); lua_setfield(L, 4, "sp_buff"); break; } else { n += rc; } } return 0; } else { return lua_error(L); /* uses idx 6 */ } }
0
399,501
void hrtimer_run_queues(void) { struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); ktime_t now; if (__hrtimer_hres_active(cpu_base)) return; /* * This _is_ ugly: We have to check periodically, whether we * can switch to highres and / or nohz mode. The clocksource * switch happens with xtime_lock held. Notification from * there only sets the check bit in the tick_oneshot code, * otherwise we might deadlock vs. xtime_lock. */ if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) { hrtimer_switch_to_hres(); return; } raw_spin_lock(&cpu_base->lock); now = hrtimer_update_base(cpu_base); __hrtimer_run_queues(cpu_base, now); raw_spin_unlock(&cpu_base->lock); }
0
482,104
static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct netfront_info *np = netdev_priv(dev); if (np->broken) return -ENODEV; switch (xdp->command) { case XDP_SETUP_PROG: return xennet_xdp_set(dev, xdp->prog, xdp->extack); default: return -EINVAL; } }
0
396,982
otherinfo(ParsedURL *target, ParsedURL *current, char *referer) { Str s = Strnew(); const int *no_referer_ptr; int no_referer; Strcat_charp(s, "User-Agent: "); if (UserAgent == NULL || *UserAgent == '\0') Strcat_charp(s, w3m_version); else Strcat_charp(s, UserAgent); Strcat_charp(s, "\r\n"); Strcat_m_charp(s, "Accept: ", AcceptMedia, "\r\n", NULL); Strcat_m_charp(s, "Accept-Encoding: ", AcceptEncoding, "\r\n", NULL); Strcat_m_charp(s, "Accept-Language: ", AcceptLang, "\r\n", NULL); if (target->host) { Strcat_charp(s, "Host: "); Strcat_charp(s, target->host); if (target->port != DefaultPort[target->scheme]) Strcat(s, Sprintf(":%d", target->port)); Strcat_charp(s, "\r\n"); } if (target->is_nocache || NoCache) { Strcat_charp(s, "Pragma: no-cache\r\n"); Strcat_charp(s, "Cache-control: no-cache\r\n"); } no_referer = NoSendReferer; no_referer_ptr = query_SCONF_NO_REFERER_FROM(current); no_referer = no_referer || (no_referer_ptr && *no_referer_ptr); no_referer_ptr = query_SCONF_NO_REFERER_TO(target); no_referer = no_referer || (no_referer_ptr && *no_referer_ptr); if (!no_referer) { #ifdef USE_SSL if (current && current->scheme == SCM_HTTPS && target->scheme != SCM_HTTPS) { /* Don't send Referer: if https:// -> http:// */ } else #endif if (referer == NULL && current && current->scheme != SCM_LOCAL && current->scheme != SCM_LOCAL_CGI && (current->scheme != SCM_FTP || (current->user == NULL && current->pass == NULL))) { char *p = current->label; Strcat_charp(s, "Referer: "); current->label = NULL; Strcat(s, parsedURL2Str(current)); current->label = p; Strcat_charp(s, "\r\n"); } else if (referer != NULL && referer != NO_REFERER) { char *p = strchr(referer, '#'); Strcat_charp(s, "Referer: "); if (p) Strcat_charp_n(s, referer, p - referer); else Strcat_charp(s, referer); Strcat_charp(s, "\r\n"); } } return s->ptr; }
0
444,743
TEST_F(Http1ServerConnectionImplTest, ChunkedBody) { initialize(); InSequence sequence; MockRequestDecoder decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); TestRequestHeaderMapImpl expected_headers{ {":path", "/"}, {":method", "POST"}, {"transfer-encoding", "chunked"}, }; EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); Buffer::OwnedImpl expected_data("Hello World"); EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); // Call to decodeData("", true) happens after. Buffer::OwnedImpl empty(""); EXPECT_CALL(decoder, decodeData(BufferEqual(&empty), true)); Buffer::OwnedImpl buffer("POST / HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n" "6\r\nHello \r\n" "5\r\nWorld\r\n" "0\r\n\r\n"); auto status = codec_->dispatch(buffer); EXPECT_TRUE(status.ok()); EXPECT_EQ(0U, buffer.length()); }
0
383,333
apdu_shutdown_reader (int slot) { int sw; if (DBG_READER) log_debug ("enter: apdu_shutdown_reader: slot=%d\n", slot); if (slot < 0 || slot >= MAX_READER || !reader_table[slot].used ) { if (DBG_READER) log_debug ("leave: apdu_shutdown_reader => SW_HOST_NO_DRIVER\n"); return SW_HOST_NO_DRIVER; } sw = apdu_disconnect (slot); if (sw) { if (DBG_READER) log_debug ("leave: apdu_shutdown_reader => 0x%x (apdu_disconnect)\n", sw); return sw; } if (reader_table[slot].shutdown_reader) { sw = reader_table[slot].shutdown_reader (slot); if (DBG_READER) log_debug ("leave: apdu_shutdown_reader => 0x%x (close_reader)\n", sw); return sw; } if (DBG_READER) log_debug ("leave: apdu_shutdown_reader => SW_HOST_NOT_SUPPORTED\n"); return SW_HOST_NOT_SUPPORTED; }
0
273,441
getTiffCompressedFormat(l_uint16 tiffcomp) { l_int32 comptype; switch (tiffcomp) { case COMPRESSION_CCITTFAX4: comptype = IFF_TIFF_G4; break; case COMPRESSION_CCITTFAX3: comptype = IFF_TIFF_G3; break; case COMPRESSION_CCITTRLE: comptype = IFF_TIFF_RLE; break; case COMPRESSION_PACKBITS: comptype = IFF_TIFF_PACKBITS; break; case COMPRESSION_LZW: comptype = IFF_TIFF_LZW; break; case COMPRESSION_ADOBE_DEFLATE: comptype = IFF_TIFF_ZIP; break; case COMPRESSION_JPEG: comptype = IFF_TIFF_JPEG; break; default: comptype = IFF_TIFF; break; } return comptype; }
0
217,964
standard_info_part1(standard_display *dp, png_structp pp, png_infop pi) { if (png_get_bit_depth(pp, pi) != dp->bit_depth) png_error(pp, "validate: bit depth changed"); if (png_get_color_type(pp, pi) != dp->colour_type) png_error(pp, "validate: color type changed"); if (png_get_filter_type(pp, pi) != PNG_FILTER_TYPE_BASE) png_error(pp, "validate: filter type changed"); if (png_get_interlace_type(pp, pi) != dp->interlace_type) png_error(pp, "validate: interlacing changed"); if (png_get_compression_type(pp, pi) != PNG_COMPRESSION_TYPE_BASE) png_error(pp, "validate: compression type changed"); dp->w = png_get_image_width(pp, pi); if (dp->w != standard_width(pp, dp->id)) png_error(pp, "validate: image width changed"); dp->h = png_get_image_height(pp, pi); if (dp->h != standard_height(pp, dp->id)) png_error(pp, "validate: image height changed"); /* Record (but don't check at present) the input sBIT according to the colour * type information. */ { png_color_8p sBIT = 0; if (png_get_sBIT(pp, pi, &sBIT) & PNG_INFO_sBIT) { int sBIT_invalid = 0; if (sBIT == 0) png_error(pp, "validate: unexpected png_get_sBIT result"); if (dp->colour_type & PNG_COLOR_MASK_COLOR) { if (sBIT->red == 0 || sBIT->red > dp->bit_depth) sBIT_invalid = 1; else dp->red_sBIT = sBIT->red; if (sBIT->green == 0 || sBIT->green > dp->bit_depth) sBIT_invalid = 1; else dp->green_sBIT = sBIT->green; if (sBIT->blue == 0 || sBIT->blue > dp->bit_depth) sBIT_invalid = 1; else dp->blue_sBIT = sBIT->blue; } else /* !COLOR */ { if (sBIT->gray == 0 || sBIT->gray > dp->bit_depth) sBIT_invalid = 1; else dp->blue_sBIT = dp->green_sBIT = dp->red_sBIT = sBIT->gray; } /* All 8 bits in tRNS for a palette image are significant - see the * spec. */ if (dp->colour_type & PNG_COLOR_MASK_ALPHA) { if (sBIT->alpha == 0 || sBIT->alpha > dp->bit_depth) sBIT_invalid = 1; else dp->alpha_sBIT = sBIT->alpha; } if (sBIT_invalid) png_error(pp, "validate: sBIT value out of range"); } } /* Important: this is validating the value *before* any transforms have been * put in place. It doesn't matter for the standard tests, where there are * no transforms, but it does for other tests where rowbytes may change after * png_read_update_info. */ if (png_get_rowbytes(pp, pi) != standard_rowsize(pp, dp->id)) png_error(pp, "validate: row size changed"); /* Validate the colour type 3 palette (this can be present on other color * types.) */ standard_palette_validate(dp, pp, pi); /* In any case always check for a tranparent color (notice that the * colour type 3 case must not give a successful return on the get_tRNS call * with these arguments!) */ { png_color_16p trans_color = 0; if (png_get_tRNS(pp, pi, 0, 0, &trans_color) & PNG_INFO_tRNS) { if (trans_color == 0) png_error(pp, "validate: unexpected png_get_tRNS (color) result"); switch (dp->colour_type) { case 0: dp->transparent.red = dp->transparent.green = dp->transparent.blue = trans_color->gray; dp->has_tRNS = 1; break; case 2: dp->transparent.red = trans_color->red; dp->transparent.green = trans_color->green; dp->transparent.blue = trans_color->blue; dp->has_tRNS = 1; break; case 3: /* Not expected because it should result in the array case * above. */ png_error(pp, "validate: unexpected png_get_tRNS result"); break; default: png_error(pp, "validate: invalid tRNS chunk with alpha image"); } } } /* Read the number of passes - expected to match the value used when * creating the image (interlaced or not). This has the side effect of * turning on interlace handling (if do_interlace is not set.) */ dp->npasses = npasses_from_interlace_type(pp, dp->interlace_type); if (!dp->do_interlace) { # ifdef PNG_READ_INTERLACING_SUPPORTED if (dp->npasses != png_set_interlace_handling(pp)) png_error(pp, "validate: file changed interlace type"); # else /* !READ_INTERLACING */ /* This should never happen: the relevant tests (!do_interlace) should * not be run. */ if (dp->npasses > 1) png_error(pp, "validate: no libpng interlace support"); # endif /* !READ_INTERLACING */ } /* Caller calls png_read_update_info or png_start_read_image now, then calls * part2. */ }
0
156,746
void CServer::ConchainModCommandUpdate(IConsole::IResult *pResult, void *pUserData, IConsole::FCommandCallback pfnCallback, void *pCallbackUserData) { if(pResult->NumArguments() == 2) { CServer *pThis = static_cast<CServer *>(pUserData); const IConsole::CCommandInfo *pInfo = pThis->Console()->GetCommandInfo(pResult->GetString(0), CFGFLAG_SERVER, false); int OldAccessLevel = 0; if(pInfo) OldAccessLevel = pInfo->GetAccessLevel(); pfnCallback(pResult, pCallbackUserData); if(pInfo && OldAccessLevel != pInfo->GetAccessLevel()) { for(int i = 0; i < MAX_CLIENTS; ++i) { if(pThis->m_aClients[i].m_State == CServer::CClient::STATE_EMPTY || pThis->m_aClients[i].m_Authed != CServer::AUTHED_MOD || (pThis->m_aClients[i].m_pRconCmdToSend && str_comp(pResult->GetString(0), pThis->m_aClients[i].m_pRconCmdToSend->m_pName) >= 0)) continue; if(OldAccessLevel == IConsole::ACCESS_LEVEL_ADMIN) pThis->SendRconCmdAdd(pInfo, i); else pThis->SendRconCmdRem(pInfo, i); } } } else pfnCallback(pResult, pCallbackUserData); }
0
63,733
static int foreach_comment(void *user, const char *k, const char *v) { RAnalMetaUserItem *ui = user; RCore *core = ui->anal->user; const char *cmd = ui->user; if (!strncmp (k, "meta.C.", 7)) { char *cmt = (char *)sdb_decode (v, 0); if (cmt) { r_core_cmdf (core, "s %s", k + 7); r_core_cmd0 (core, cmd); free (cmt); } } return 1; }
0
104,436
static void TagToFilterModuleName(const char *tag,char *name) { assert(tag != (char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",tag); assert(name != (char *) NULL); #if defined(MAGICKCORE_WINDOWS_SUPPORT) (void) FormatLocaleString(name,MagickPathExtent,"FILTER_%s_.dll",tag); #elif !defined(MAGICKCORE_LTDL_DELEGATE) (void) FormatLocaleString(name,MagickPathExtent,"%s.dll",tag); #else (void) FormatLocaleString(name,MagickPathExtent,"%s.la",tag); #endif }
0
368,905
hook_info_get_hashtable (struct t_weechat_plugin *plugin, const char *info_name, struct t_hashtable *hashtable) { struct t_hook *ptr_hook, *next_hook; struct t_hashtable *value; /* make C compiler happy */ (void) plugin; if (!info_name || !info_name[0]) return NULL; hook_exec_start (); ptr_hook = weechat_hooks[HOOK_TYPE_INFO_HASHTABLE]; while (ptr_hook) { next_hook = ptr_hook->next_hook; if (!ptr_hook->deleted && !ptr_hook->running && (string_strcasecmp (HOOK_INFO_HASHTABLE(ptr_hook, info_name), info_name) == 0)) { ptr_hook->running = 1; value = (HOOK_INFO_HASHTABLE(ptr_hook, callback)) (ptr_hook->callback_data, info_name, hashtable); ptr_hook->running = 0; hook_exec_end (); return value; } ptr_hook = next_hook; } hook_exec_end (); /* info not found */ return NULL; }
0
511,173
array_value (s, quoted, flags, rtype, indp) char *s; int quoted, flags, *rtype; arrayind_t *indp; { return (array_value_internal (s, quoted, flags|AV_ALLOWALL, rtype, indp)); }
0
299,457
unsigned long CjfifDecode::GetPosEmbedEnd() { return m_nPosEmbedEnd; }
0
11,858
void ExtensionTtsController::OnSpeechFinished( int request_id, const std::string& error_message) { if (!current_utterance_ || request_id != current_utterance_->id()) return; current_utterance_->set_error(error_message); FinishCurrentUtterance(); SpeakNextUtterance(); }
1
393,021
xmlSchemaFormatQName(xmlChar **buf, const xmlChar *namespaceName, const xmlChar *localName) { FREE_AND_NULL(*buf) if (namespaceName != NULL) { *buf = xmlStrdup(BAD_CAST "{"); *buf = xmlStrcat(*buf, namespaceName); *buf = xmlStrcat(*buf, BAD_CAST "}"); } if (localName != NULL) { if (namespaceName == NULL) return(localName); *buf = xmlStrcat(*buf, localName); } else { *buf = xmlStrcat(*buf, BAD_CAST "(NULL)"); } return ((const xmlChar *) *buf); }
0
224,142
void SoftAMR::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) { if (portIndex != 1) { return; } switch (mOutputPortSettingsChange) { case NONE: break; case AWAITING_DISABLED: { CHECK(!enabled); mOutputPortSettingsChange = AWAITING_ENABLED; break; } default: { CHECK_EQ((int)mOutputPortSettingsChange, (int)AWAITING_ENABLED); CHECK(enabled); mOutputPortSettingsChange = NONE; break; } } }
0
153,221
static int lock_request(struct fuse_conn *fc, struct fuse_req *req) { int err = 0; if (req) { spin_lock(&fc->lock); if (req->aborted) err = -ENOENT; else req->locked = 1; spin_unlock(&fc->lock); } return err; }
0
125,566
static int tcos_init(sc_card_t *card) { unsigned long flags; tcos_data *data = malloc(sizeof(tcos_data)); if (!data) return SC_ERROR_OUT_OF_MEMORY; card->name = "TCOS"; card->drv_data = (void *)data; card->cla = 0x00; flags = SC_ALGORITHM_RSA_RAW; flags |= SC_ALGORITHM_RSA_PAD_PKCS1; flags |= SC_ALGORITHM_RSA_HASH_NONE; _sc_card_add_rsa_alg(card, 512, flags, 0); _sc_card_add_rsa_alg(card, 768, flags, 0); _sc_card_add_rsa_alg(card, 1024, flags, 0); if (card->type == SC_CARD_TYPE_TCOS_V3) { card->caps |= SC_CARD_CAP_APDU_EXT; _sc_card_add_rsa_alg(card, 1280, flags, 0); _sc_card_add_rsa_alg(card, 1536, flags, 0); _sc_card_add_rsa_alg(card, 1792, flags, 0); _sc_card_add_rsa_alg(card, 2048, flags, 0); } return 0; }
0
59,053
int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags) { struct ext4_ext_path *path = NULL; struct ext4_extent newex, *ex, *ex2; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); ext4_fsblk_t newblock = 0; int free_on_err = 0, err = 0, depth, ret; unsigned int allocated = 0, offset = 0; unsigned int allocated_clusters = 0; struct ext4_allocation_request ar; ext4_io_end_t *io = ext4_inode_aio(inode); ext4_lblk_t cluster_offset; int set_unwritten = 0; bool map_from_cluster = false; ext_debug("blocks %u/%u requested for inode %lu\n", map->m_lblk, map->m_len, inode->i_ino); trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); /* find extent for this block */ path = ext4_find_extent(inode, map->m_lblk, NULL, 0); if (IS_ERR(path)) { err = PTR_ERR(path); path = NULL; goto out2; } depth = ext_depth(inode); /* * consistent leaf must not be empty; * this situation is possible, though, _during_ tree modification; * this is why assert can't be put in ext4_find_extent() */ if (unlikely(path[depth].p_ext == NULL && depth != 0)) { EXT4_ERROR_INODE(inode, "bad extent address " "lblock: %lu, depth: %d pblock %lld", (unsigned long) map->m_lblk, depth, path[depth].p_block); err = -EFSCORRUPTED; goto out2; } ex = path[depth].p_ext; if (ex) { ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); ext4_fsblk_t ee_start = ext4_ext_pblock(ex); unsigned short ee_len; /* * unwritten extents are treated as holes, except that * we split out initialized portions during a write. */ ee_len = ext4_ext_get_actual_len(ex); trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); /* if found extent covers block, simply return it */ if (in_range(map->m_lblk, ee_block, ee_len)) { newblock = map->m_lblk - ee_block + ee_start; /* number of remaining blocks in the extent */ allocated = ee_len - (map->m_lblk - ee_block); ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, ee_block, ee_len, newblock); /* * If the extent is initialized check whether the * caller wants to convert it to unwritten. */ if ((!ext4_ext_is_unwritten(ex)) && (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { allocated = convert_initialized_extent( handle, inode, map, &path, flags, allocated, newblock); goto out2; } else if (!ext4_ext_is_unwritten(ex)) goto out; ret = ext4_ext_handle_unwritten_extents( handle, inode, map, &path, flags, allocated, newblock); if (ret < 0) err = ret; else allocated = ret; goto out2; } } /* * requested block isn't allocated yet; * we couldn't try to create block if create flag is zero */ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { /* * put just found gap into cache to speed up * subsequent requests */ ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); goto out2; } /* * Okay, we need to do block allocation. */ newex.ee_block = cpu_to_le32(map->m_lblk); cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); /* * If we are doing bigalloc, check to see if the extent returned * by ext4_find_extent() implies a cluster we can use. */ if (cluster_offset && ex && get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { ar.len = allocated = map->m_len; newblock = map->m_pblk; map_from_cluster = true; goto got_allocated_blocks; } /* find neighbour allocated blocks */ ar.lleft = map->m_lblk; err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); if (err) goto out2; ar.lright = map->m_lblk; ex2 = NULL; err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); if (err) goto out2; /* Check if the extent after searching to the right implies a * cluster we can use. */ if ((sbi->s_cluster_ratio > 1) && ex2 && get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { ar.len = allocated = map->m_len; newblock = map->m_pblk; map_from_cluster = true; goto got_allocated_blocks; } /* * See if request is beyond maximum number of blocks we can have in * a single extent. For an initialized extent this limit is * EXT_INIT_MAX_LEN and for an unwritten extent this limit is * EXT_UNWRITTEN_MAX_LEN. */ if (map->m_len > EXT_INIT_MAX_LEN && !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) map->m_len = EXT_INIT_MAX_LEN; else if (map->m_len > EXT_UNWRITTEN_MAX_LEN && (flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) map->m_len = EXT_UNWRITTEN_MAX_LEN; /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ newex.ee_len = cpu_to_le16(map->m_len); err = ext4_ext_check_overlap(sbi, inode, &newex, path); if (err) allocated = ext4_ext_get_actual_len(&newex); else allocated = map->m_len; /* allocate new block */ ar.inode = inode; ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); ar.logical = map->m_lblk; /* * We calculate the offset from the beginning of the cluster * for the logical block number, since when we allocate a * physical cluster, the physical block should start at the * same offset from the beginning of the cluster. This is * needed so that future calls to get_implied_cluster_alloc() * work correctly. */ offset = EXT4_LBLK_COFF(sbi, map->m_lblk); ar.len = EXT4_NUM_B2C(sbi, offset+allocated); ar.goal -= offset; ar.logical -= offset; if (S_ISREG(inode->i_mode)) ar.flags = EXT4_MB_HINT_DATA; else /* disable in-core preallocation for non-regular files */ ar.flags = 0; if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) ar.flags |= EXT4_MB_HINT_NOPREALLOC; if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) ar.flags |= EXT4_MB_DELALLOC_RESERVED; if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) ar.flags |= EXT4_MB_USE_RESERVED; newblock = ext4_mb_new_blocks(handle, &ar, &err); if (!newblock) goto out2; ext_debug("allocate new block: goal %llu, found %llu/%u\n", ar.goal, newblock, allocated); free_on_err = 1; allocated_clusters = ar.len; ar.len = EXT4_C2B(sbi, ar.len) - offset; if (ar.len > allocated) ar.len = allocated; got_allocated_blocks: /* try to insert new extent into found leaf and return */ ext4_ext_store_pblock(&newex, newblock + offset); newex.ee_len = cpu_to_le16(ar.len); /* Mark unwritten */ if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){ ext4_ext_mark_unwritten(&newex); map->m_flags |= EXT4_MAP_UNWRITTEN; /* * io_end structure was created for every IO write to an * unwritten extent. To avoid unnecessary conversion, * here we flag the IO that really needs the conversion. * For non asycn direct IO case, flag the inode state * that we need to perform conversion when IO is done. */ if (flags & EXT4_GET_BLOCKS_PRE_IO) set_unwritten = 1; } err = 0; if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len); if (!err) err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags); if (!err && set_unwritten) { if (io) ext4_set_io_unwritten_flag(inode, io); else ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); } if (err && free_on_err) { int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ? EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0; /* free data blocks we just allocated */ /* not a good idea to call discard here directly, * but otherwise we'd need to call it every free() */ ext4_discard_preallocations(inode); ext4_free_blocks(handle, inode, NULL, newblock, EXT4_C2B(sbi, allocated_clusters), fb_flags); goto out2; } /* previous routine could use block we allocated */ newblock = ext4_ext_pblock(&newex); allocated = ext4_ext_get_actual_len(&newex); if (allocated > map->m_len) allocated = map->m_len; map->m_flags |= EXT4_MAP_NEW; /* * Update reserved blocks/metadata blocks after successful * block allocation which had been deferred till now. */ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { unsigned int reserved_clusters; /* * Check how many clusters we had reserved this allocated range */ reserved_clusters = get_reserved_cluster_alloc(inode, map->m_lblk, allocated); if (!map_from_cluster) { BUG_ON(allocated_clusters < reserved_clusters); if (reserved_clusters < allocated_clusters) { struct ext4_inode_info *ei = EXT4_I(inode); int reservation = allocated_clusters - reserved_clusters; /* * It seems we claimed few clusters outside of * the range of this allocation. We should give * it back to the reservation pool. This can * happen in the following case: * * * Suppose s_cluster_ratio is 4 (i.e., each * cluster has 4 blocks. Thus, the clusters * are [0-3],[4-7],[8-11]... * * First comes delayed allocation write for * logical blocks 10 & 11. Since there were no * previous delayed allocated blocks in the * range [8-11], we would reserve 1 cluster * for this write. * * Next comes write for logical blocks 3 to 8. * In this case, we will reserve 2 clusters * (for [0-3] and [4-7]; and not for [8-11] as * that range has a delayed allocated blocks. * Thus total reserved clusters now becomes 3. * * Now, during the delayed allocation writeout * time, we will first write blocks [3-8] and * allocate 3 clusters for writing these * blocks. Also, we would claim all these * three clusters above. * * Now when we come here to writeout the * blocks [10-11], we would expect to claim * the reservation of 1 cluster we had made * (and we would claim it since there are no * more delayed allocated blocks in the range * [8-11]. But our reserved cluster count had * already gone to 0. * * Thus, at the step 4 above when we determine * that there are still some unwritten delayed * allocated blocks outside of our current * block range, we should increment the * reserved clusters count so that when the * remaining blocks finally gets written, we * could claim them. */ dquot_reserve_block(inode, EXT4_C2B(sbi, reservation)); spin_lock(&ei->i_block_reservation_lock); ei->i_reserved_data_blocks += reservation; spin_unlock(&ei->i_block_reservation_lock); } /* * We will claim quota for all newly allocated blocks. * We're updating the reserved space *after* the * correction above so we do not accidentally free * all the metadata reservation because we might * actually need it later on. */ ext4_da_update_reserve_space(inode, allocated_clusters, 1); } } /* * Cache the extent and update transaction to commit on fdatasync only * when it is _not_ an unwritten extent. */ if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0) ext4_update_inode_fsync_trans(handle, inode, 1); else ext4_update_inode_fsync_trans(handle, inode, 0); out: if (allocated > map->m_len) allocated = map->m_len; ext4_ext_show_leaf(inode, path); map->m_flags |= EXT4_MAP_MAPPED; map->m_pblk = newblock; map->m_len = allocated; out2: ext4_ext_drop_refs(path); kfree(path); trace_ext4_ext_map_blocks_exit(inode, flags, map, err ? err : allocated); return err ? err : allocated; }
0
57,314
static int mmap_kmem(struct file *file, struct vm_area_struct *vma) { unsigned long pfn; /* Turn a kernel-virtual address into a physical page frame */ pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; /* * RED-PEN: on some architectures there is more mapped memory than * available in mem_map which pfn_valid checks for. Perhaps should add a * new macro here. * * RED-PEN: vmalloc is not supported right now. */ if (!pfn_valid(pfn)) return -EIO; vma->vm_pgoff = pfn; return mmap_mem(file, vma); }
0
129,036
static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) { if (likely(fm10k_desc_unused(tx_ring) >= size)) return 0; return __fm10k_maybe_stop_tx(tx_ring, size); }
0
196,295
void RenderingHelper::DeleteTexture(GLuint texture_id) { glDeleteTextures(1, &texture_id); CHECK_EQ(static_cast<int>(glGetError()), GL_NO_ERROR); }
0
492,577
gimp_channel_get_node (GimpFilter *filter) { GimpDrawable *drawable = GIMP_DRAWABLE (filter); GimpChannel *channel = GIMP_CHANNEL (filter); GeglNode *node; GeglNode *source; GeglNode *mode_node; const Babl *color_format; node = GIMP_FILTER_CLASS (parent_class)->get_node (filter); source = gimp_drawable_get_source_node (drawable); gegl_node_add_child (node, source); g_warn_if_fail (channel->color_node == NULL); if (gimp_drawable_get_linear (drawable)) color_format = babl_format ("RGBA float"); else color_format = babl_format ("R'G'B'A float"); channel->color_node = gegl_node_new_child (node, "operation", "gegl:color", "format", color_format, NULL); gimp_gegl_node_set_color (channel->color_node, &channel->color); g_warn_if_fail (channel->mask_node == NULL); channel->mask_node = gegl_node_new_child (node, "operation", "gegl:opacity", NULL); gegl_node_connect_to (channel->color_node, "output", channel->mask_node, "input"); g_warn_if_fail (channel->invert_node == NULL); channel->invert_node = gegl_node_new_child (node, "operation", "gegl:invert-linear", NULL); if (channel->show_masked) { gegl_node_connect_to (source, "output", channel->invert_node, "input"); gegl_node_connect_to (channel->invert_node, "output", channel->mask_node, "aux"); } else { gegl_node_connect_to (source, "output", channel->mask_node, "aux"); } mode_node = gimp_drawable_get_mode_node (drawable); gegl_node_connect_to (channel->mask_node, "output", mode_node, "aux"); return node; }
0
434,342
static void _alternates_clean (void) { int i; if (Context && Context->msgcount) { for (i = 0; i < Context->msgcount; i++) Context->hdrs[i]->recip_valid = 0; } }
0
351,088
connection_ap_handshake_rewrite_and_attach(entry_connection_t *conn, origin_circuit_t *circ, crypt_path_t *cpath) { socks_request_t *socks = conn->socks_request; const or_options_t *options = get_options(); connection_t *base_conn = ENTRY_TO_CONN(conn); time_t now = time(NULL); rewrite_result_t rr; /* First we'll do the rewrite part. Let's see if we get a reasonable * answer. */ memset(&rr, 0, sizeof(rr)); connection_ap_handshake_rewrite(conn,&rr); if (rr.should_close) { /* connection_ap_handshake_rewrite told us to close the connection: * either because it sent back an answer, or because it sent back an * error */ connection_mark_unattached_ap(conn, rr.end_reason); if (END_STREAM_REASON_DONE == (rr.end_reason & END_STREAM_REASON_MASK)) return 0; else return -1; } const time_t map_expires = rr.map_expires; const int automap = rr.automap; const addressmap_entry_source_t exit_source = rr.exit_source; /* Now see whether the hostname is bogus. This could happen because of an * onion hostname whose format we don't recognize. */ hostname_type_t addresstype; if (!parse_extended_hostname(socks->address, &addresstype)) { control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); if (addresstype == BAD_HOSTNAME) { conn->socks_request->socks_extended_error_code = SOCKS5_HS_BAD_ADDRESS; } connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } /* If this is a .exit hostname, strip off the .name.exit part, and * see whether we're willing to connect there, and otherwise handle the * .exit address. * * We'll set chosen_exit_name and/or close the connection as appropriate. */ if (addresstype == EXIT_HOSTNAME) { /* If StrictNodes is not set, then .exit overrides ExcludeNodes but * not ExcludeExitNodes. */ routerset_t *excludeset = options->StrictNodes ? options->ExcludeExitNodesUnion_ : options->ExcludeExitNodes; const node_t *node = NULL; /* If this .exit was added by an AUTOMAP, then it came straight from * a user. That's not safe. */ if (exit_source == ADDRMAPSRC_AUTOMAP) { /* Whoops; this one is stale. It must have gotten added earlier? * (Probably this is not possible, since AllowDotExit no longer * exists.) */ log_warn(LD_APP,"Stale automapped address for '%s.exit'. Refusing.", safe_str_client(socks->address)); control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); tor_assert_nonfatal_unreached(); return -1; } /* Double-check to make sure there are no .exits coming from * impossible/weird sources. */ if (exit_source == ADDRMAPSRC_DNS || exit_source == ADDRMAPSRC_NONE) { /* It shouldn't be possible to get a .exit address from any of these * sources. */ log_warn(LD_BUG,"Address '%s.exit', with impossible source for the " ".exit part. Refusing.", safe_str_client(socks->address)); control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } tor_assert(!automap); /* Now, find the character before the .(name) part. * (The ".exit" part got stripped off by "parse_extended_hostname"). * * We're going to put the exit name into conn->chosen_exit_name, and * look up a node correspondingly. */ char *s = strrchr(socks->address,'.'); if (s) { /* The address was of the form "(stuff).(name).exit */ if (s[1] != '\0') { /* Looks like a real .exit one. */ conn->chosen_exit_name = tor_strdup(s+1); node = node_get_by_nickname(conn->chosen_exit_name, 0); if (exit_source == ADDRMAPSRC_TRACKEXIT) { /* We 5 tries before it expires the addressmap */ conn->chosen_exit_retries = TRACKHOSTEXITS_RETRIES; } *s = 0; } else { /* Oops, the address was (stuff)..exit. That's not okay. */ log_warn(LD_APP,"Malformed exit address '%s.exit'. Refusing.", safe_str_client(socks->address)); control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } } else { /* It looks like they just asked for "foo.exit". That's a special * form that means (foo's address).foo.exit. */ conn->chosen_exit_name = tor_strdup(socks->address); node = node_get_by_nickname(conn->chosen_exit_name, 0); if (node) { *socks->address = 0; node_get_address_string(node, socks->address, sizeof(socks->address)); } } /* Now make sure that the chosen exit exists... */ if (!node) { log_warn(LD_APP, "Unrecognized relay in exit address '%s.exit'. Refusing.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } /* ...and make sure that it isn't excluded. */ if (routerset_contains_node(excludeset, node)) { log_warn(LD_APP, "Excluded relay in exit address '%s.exit'. Refusing.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } /* XXXX-1090 Should we also allow foo.bar.exit if ExitNodes is set and Bar is not listed in it? I say yes, but our revised manpage branch implies no. */ } /* Now, we handle everything that isn't a .onion address. */ if (addresstype != ONION_V3_HOSTNAME && addresstype != ONION_V2_HOSTNAME) { /* Not a hidden-service request. It's either a hostname or an IP, * possibly with a .exit that we stripped off. We're going to check * if we're allowed to connect/resolve there, and then launch the * appropriate request. */ /* Check for funny characters in the address. */ if (address_is_invalid_destination(socks->address, 1)) { control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); log_warn(LD_APP, "Destination '%s' seems to be an invalid hostname. Failing.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } /* socks->address is a non-onion hostname or IP address. * If we can't do any non-onion requests, refuse the connection. * If we have a hostname but can't do DNS, refuse the connection. * If we have an IP address, but we can't use that address family, * refuse the connection. * * If we can do DNS requests, and we can use at least one address family, * then we have to resolve the address first. Then we'll know if it * resolves to a usable address family. */ /* First, check if all non-onion traffic is disabled */ if (!conn->entry_cfg.dns_request && !conn->entry_cfg.ipv4_traffic && !conn->entry_cfg.ipv6_traffic) { log_warn(LD_APP, "Refusing to connect to non-hidden-service hostname " "or IP address %s because Port has OnionTrafficOnly set (or " "NoDNSRequest, NoIPv4Traffic, and NoIPv6Traffic).", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } /* Then check if we have a hostname or IP address, and whether DNS or * the IP address family are permitted. Reject if not. */ tor_addr_t dummy_addr; int socks_family = tor_addr_parse(&dummy_addr, socks->address); /* family will be -1 for a non-onion hostname that's not an IP */ if (socks_family == -1) { if (!conn->entry_cfg.dns_request) { log_warn(LD_APP, "Refusing to connect to hostname %s " "because Port has NoDNSRequest set.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } } else if (socks_family == AF_INET) { if (!conn->entry_cfg.ipv4_traffic) { log_warn(LD_APP, "Refusing to connect to IPv4 address %s because " "Port has NoIPv4Traffic set.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } } else if (socks_family == AF_INET6) { if (!conn->entry_cfg.ipv6_traffic) { log_warn(LD_APP, "Refusing to connect to IPv6 address %s because " "Port has NoIPv6Traffic set.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } } else { tor_assert_nonfatal_unreached_once(); } /* See if this is a hostname lookup that we can answer immediately. * (For example, an attempt to look up the IP address for an IP address.) */ if (socks->command == SOCKS_COMMAND_RESOLVE) { tor_addr_t answer; /* Reply to resolves immediately if we can. */ if (tor_addr_parse(&answer, socks->address) >= 0) {/* is it an IP? */ /* remember _what_ is supposed to have been resolved. */ strlcpy(socks->address, rr.orig_address, sizeof(socks->address)); connection_ap_handshake_socks_resolved_addr(conn, &answer, -1, map_expires); connection_mark_unattached_ap(conn, END_STREAM_REASON_DONE | END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED); return 0; } tor_assert(!automap); rep_hist_note_used_resolve(now); /* help predict this next time */ } else if (socks->command == SOCKS_COMMAND_CONNECT) { /* Now see if this is a connect request that we can reject immediately */ tor_assert(!automap); /* Don't allow connections to port 0. */ if (socks->port == 0) { log_notice(LD_APP,"Application asked to connect to port 0. Refusing."); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } /* You can't make connections to internal addresses, by default. * Exceptions are begindir requests (where the address is meaningless), * or cases where you've hand-configured a particular exit, thereby * making the local address meaningful. */ if (options->ClientRejectInternalAddresses && !conn->use_begindir && !conn->chosen_exit_name && !circ) { /* If we reach this point then we don't want to allow internal * addresses. Check if we got one. */ tor_addr_t addr; if (tor_addr_hostname_is_local(socks->address) || (tor_addr_parse(&addr, socks->address) >= 0 && tor_addr_is_internal(&addr, 0))) { /* If this is an explicit private address with no chosen exit node, * then we really don't want to try to connect to it. That's * probably an error. */ if (conn->is_transparent_ap) { #define WARN_INTRVL_LOOP 300 static ratelim_t loop_warn_limit = RATELIM_INIT(WARN_INTRVL_LOOP); char *m; if ((m = rate_limit_log(&loop_warn_limit, approx_time()))) { log_warn(LD_NET, "Rejecting request for anonymous connection to private " "address %s on a TransPort or NATDPort. Possible loop " "in your NAT rules?%s", safe_str_client(socks->address), m); tor_free(m); } } else { #define WARN_INTRVL_PRIV 300 static ratelim_t priv_warn_limit = RATELIM_INIT(WARN_INTRVL_PRIV); char *m; if ((m = rate_limit_log(&priv_warn_limit, approx_time()))) { log_warn(LD_NET, "Rejecting SOCKS request for anonymous connection to " "private address %s.%s", safe_str_client(socks->address),m); tor_free(m); } } connection_mark_unattached_ap(conn, END_STREAM_REASON_PRIVATE_ADDR); return -1; } } /* end "if we should check for internal addresses" */ /* Okay. We're still doing a CONNECT, and it wasn't a private * address. Here we do special handling for literal IP addresses, * to see if we should reject this preemptively, and to set up * fields in conn->entry_cfg to tell the exit what AF we want. */ { tor_addr_t addr; /* XXX Duplicate call to tor_addr_parse. */ if (tor_addr_parse(&addr, socks->address) >= 0) { /* If we reach this point, it's an IPv4 or an IPv6 address. */ sa_family_t family = tor_addr_family(&addr); if ((family == AF_INET && ! conn->entry_cfg.ipv4_traffic) || (family == AF_INET6 && ! conn->entry_cfg.ipv6_traffic)) { /* You can't do an IPv4 address on a v6-only socks listener, * or vice versa. */ log_warn(LD_NET, "Rejecting SOCKS request for an IP address " "family that this listener does not support."); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } else if (family == AF_INET6 && socks->socks_version == 4) { /* You can't make a socks4 request to an IPv6 address. Socks4 * doesn't support that. */ log_warn(LD_NET, "Rejecting SOCKS4 request for an IPv6 address."); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } else if (socks->socks_version == 4 && !conn->entry_cfg.ipv4_traffic) { /* You can't do any kind of Socks4 request when IPv4 is forbidden. * * XXX raise this check outside the enclosing block? */ log_warn(LD_NET, "Rejecting SOCKS4 request on a listener with " "no IPv4 traffic supported."); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } else if (family == AF_INET6) { /* Tell the exit: we won't accept any ipv4 connection to an IPv6 * address. */ conn->entry_cfg.ipv4_traffic = 0; } else if (family == AF_INET) { /* Tell the exit: we won't accept any ipv6 connection to an IPv4 * address. */ conn->entry_cfg.ipv6_traffic = 0; } /* Next, yet another check: we know it's a direct IP address. Is it * the IP address of a known relay and its ORPort, or of a directory * authority and its OR or Dir Port? If so, and if a consensus param * says to, then exit relays will refuse this request (see ticket * 2667 for details). Let's just refuse it locally right now, to * save time and network load but also to give the user a more * useful log message. */ if (!network_reentry_is_allowed() && nodelist_reentry_contains(&addr, socks->port)) { log_warn(LD_APP, "Not attempting connection to %s:%d because " "the network would reject it. Are you trying to send " "Tor traffic over Tor? This traffic can be harmful to " "the Tor network. If you really need it, try using " "a bridge as a workaround.", safe_str_client(socks->address), socks->port); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } } } /* we never allow IPv6 answers on socks4. (TODO: Is this smart?) */ if (socks->socks_version == 4) conn->entry_cfg.ipv6_traffic = 0; /* Still handling CONNECT. Now, check for exit enclaves. (Which we * don't do on BEGIN_DIR, or when there is a chosen exit.) * * TODO: Should we remove this? Exit enclaves are nutty and don't * work very well */ if (!conn->use_begindir && !conn->chosen_exit_name && !circ) { /* see if we can find a suitable enclave exit */ const node_t *r = router_find_exact_exit_enclave(socks->address, socks->port); if (r) { log_info(LD_APP, "Redirecting address %s to exit at enclave router %s", safe_str_client(socks->address), node_describe(r)); /* use the hex digest, not nickname, in case there are two routers with this nickname */ conn->chosen_exit_name = tor_strdup(hex_str(r->identity, DIGEST_LEN)); conn->chosen_exit_optional = 1; } } /* Still handling CONNECT: warn or reject if it's using a dangerous * port. */ if (!conn->use_begindir && !conn->chosen_exit_name && !circ) if (consider_plaintext_ports(conn, socks->port) < 0) return -1; /* Remember the port so that we will predict that more requests there will happen in the future. */ if (!conn->use_begindir) { /* help predict this next time */ rep_hist_note_used_port(now, socks->port); } } else if (socks->command == SOCKS_COMMAND_RESOLVE_PTR) { rep_hist_note_used_resolve(now); /* help predict this next time */ /* no extra processing needed */ } else { /* We should only be doing CONNECT, RESOLVE, or RESOLVE_PTR! */ tor_fragile_assert(); } /* Okay. At this point we've set chosen_exit_name if needed, rewritten the * address, and decided not to reject it for any number of reasons. Now * mark the connection as waiting for a circuit, and try to attach it! */ base_conn->state = AP_CONN_STATE_CIRCUIT_WAIT; /* If we were given a circuit to attach to, try to attach. Otherwise, * try to find a good one and attach to that. */ int rv; if (circ) { rv = connection_ap_handshake_attach_chosen_circuit(conn, circ, cpath); } else { /* We'll try to attach it at the next event loop, or whenever * we call connection_ap_attach_pending() */ connection_ap_mark_as_pending_circuit(conn); rv = 0; } /* If the above function returned 0 then we're waiting for a circuit. * if it returned 1, we're attached. Both are okay. But if it returned * -1, there was an error, so make sure the connection is marked, and * return -1. */ if (rv < 0) { if (!base_conn->marked_for_close) connection_mark_unattached_ap(conn, END_STREAM_REASON_CANT_ATTACH); return -1; } return 0; } else { /* If we get here, it's a request for a .onion address! */ /* We don't support v2 onions anymore. Log a warning and bail. */ if (addresstype == ONION_V2_HOSTNAME) { log_warn(LD_PROTOCOL, "Tried to connect to a v2 onion address, but this " "version of Tor no longer supports them. Please encourage the " "site operator to upgrade. For more information see " "https://blog.torproject.org/v2-deprecation-timeline."); control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); /* Send back the 0xF6 extended code indicating a bad hostname. This is * mostly so Tor Browser can make a proper UX with regards to v2 * addresses. */ conn->socks_request->socks_extended_error_code = SOCKS5_HS_BAD_ADDRESS; connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } tor_assert(addresstype == ONION_V3_HOSTNAME); tor_assert(!automap); return connection_ap_handle_onion(conn, socks, circ); } return 0; /* unreached but keeps the compiler happy */ }
1
178,149
void Range::processNodes(ActionType action, Vector<RefPtr<Node> >& nodes, PassRefPtr<Node> oldContainer, PassRefPtr<Node> newContainer, ExceptionCode& ec) { for (unsigned i = 0; i < nodes.size(); i++) { switch (action) { case DELETE_CONTENTS: oldContainer->removeChild(nodes[i].get(), ec); break; case EXTRACT_CONTENTS: newContainer->appendChild(nodes[i].release(), ec); // will remove n from its parent break; case CLONE_CONTENTS: newContainer->appendChild(nodes[i]->cloneNode(true), ec); break; } } }
0
469,849
do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IPT_SO_GET_INFO: ret = get_info(user, len, 0); break; case IPT_SO_GET_ENTRIES: ret = get_entries(user, len); break; case IPT_SO_GET_REVISION_MATCH: case IPT_SO_GET_REVISION_TARGET: { struct ipt_get_revision rev; int target; if (*len != sizeof(rev)) { ret = -EINVAL; break; } if (copy_from_user(&rev, user, sizeof(rev)) != 0) { ret = -EFAULT; break; } if (cmd == IPT_SO_GET_REVISION_TARGET) target = 1; else target = 0; try_then_request_module(xt_find_revision(AF_INET, rev.name, rev.revision, target, &ret), "ipt_%s", rev.name); break; } default: duprintf("do_ipt_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; }
0
220,015
bool IsBlockedNavigation(net::Error error_code) {
0
232,460
MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const ssize_t offset, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType number_pixels; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); if (width == 0) return(threshold_image); if (SetImageStorageClass(threshold_image,DirectClass) == MagickFalse) { InheritException(exception,&threshold_image->exception); threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Local adaptive threshold. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&zero); number_pixels=(MagickRealType) (width*height); image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket channel_bias, channel_sum; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p, *magick_restrict r; register IndexPacket *magick_restrict threshold_indexes; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) height/2L,image->columns+width,height,exception); q=GetCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); threshold_indexes=GetCacheViewAuthenticIndexQueue(threshold_view); channel_bias=zero; channel_sum=zero; r=p; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) { channel_bias.red+=r[u].red; channel_bias.green+=r[u].green; channel_bias.blue+=r[u].blue; channel_bias.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } channel_sum.red+=r[u].red; channel_sum.green+=r[u].green; channel_sum.blue+=r[u].blue; channel_sum.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } r+=image->columns+width; } for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket mean; mean=zero; r=p; channel_sum.red-=channel_bias.red; channel_sum.green-=channel_bias.green; channel_sum.blue-=channel_bias.blue; channel_sum.opacity-=channel_bias.opacity; channel_sum.index-=channel_bias.index; channel_bias=zero; for (v=0; v < (ssize_t) height; v++) { channel_bias.red+=r[0].red; channel_bias.green+=r[0].green; channel_bias.blue+=r[0].blue; channel_bias.opacity+=r[0].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+0); channel_sum.red+=r[width-1].red; channel_sum.green+=r[width-1].green; channel_sum.blue+=r[width-1].blue; channel_sum.opacity+=r[width-1].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+ width-1); r+=image->columns+width; } mean.red=(MagickRealType) (channel_sum.red/number_pixels+offset); mean.green=(MagickRealType) (channel_sum.green/number_pixels+offset); mean.blue=(MagickRealType) (channel_sum.blue/number_pixels+offset); mean.opacity=(MagickRealType) (channel_sum.opacity/number_pixels+offset); if (image->colorspace == CMYKColorspace) mean.index=(MagickRealType) (channel_sum.index/number_pixels+offset); SetPixelRed(q,((MagickRealType) GetPixelRed(q) <= mean.red) ? 0 : QuantumRange); SetPixelGreen(q,((MagickRealType) GetPixelGreen(q) <= mean.green) ? 0 : QuantumRange); SetPixelBlue(q,((MagickRealType) GetPixelBlue(q) <= mean.blue) ? 0 : QuantumRange); SetPixelOpacity(q,((MagickRealType) GetPixelOpacity(q) <= mean.opacity) ? 0 : QuantumRange); if (image->colorspace == CMYKColorspace) SetPixelIndex(threshold_indexes+x,(((MagickRealType) GetPixelIndex( threshold_indexes+x) <= mean.index) ? 0 : QuantumRange)); p++; q++; } sync=SyncCacheViewAuthenticPixels(threshold_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); }
0
437,184
static void acm_port_destruct(struct tty_port *port) { struct acm *acm = container_of(port, struct acm, port); acm_release_minor(acm); usb_put_intf(acm->control); kfree(acm->country_codes); kfree(acm); }
0
489,097
BOOL nla_set_service_principal(rdpNla* nla, LPTSTR principal) { if (!nla || !principal) return FALSE; nla->ServicePrincipalName = principal; return TRUE; }
0
267,210
static void JPEGProgressHandler(j_common_ptr jpeg_info) { ErrorManager *error_manager; ExceptionInfo *exception; Image *image; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; exception=error_manager->exception; if (jpeg_info->is_decompressor == 0) return; if (((j_decompress_ptr) jpeg_info)->input_scan_number < MaxJPEGScans) return; (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "too many scans","`%s'",image->filename); longjmp(error_manager->error_recovery,1); }
0
312,073
long FS_SV_FOpenFileRead(const char *filename, fileHandle_t *fp) { char *ospath; fileHandle_t f = 0; if ( !fs_searchpaths ) { Com_Error( ERR_FATAL, "Filesystem call made without initialization" ); } f = FS_HandleForFile(); fsh[f].zipFile = qfalse; Q_strncpyz( fsh[f].name, filename, sizeof( fsh[f].name ) ); ospath = FS_BuildOSPath( fs_homepath->string, filename, "" ); ospath[strlen(ospath)-1] = '\0'; if ( fs_debug->integer ) { Com_Printf( "FS_SV_FOpenFileRead (fs_homepath): %s\n", ospath ); } fsh[f].handleFiles.file.o = Sys_FOpen( ospath, "rb" ); fsh[f].handleSync = qfalse; if (!fsh[f].handleFiles.file.o) { if (Q_stricmp(fs_homepath->string,fs_basepath->string)) { ospath = FS_BuildOSPath( fs_basepath->string, filename, "" ); ospath[strlen(ospath)-1] = '\0'; if ( fs_debug->integer ) { Com_Printf( "FS_SV_FOpenFileRead (fs_basepath): %s\n", ospath ); } fsh[f].handleFiles.file.o = Sys_FOpen( ospath, "rb" ); fsh[f].handleSync = qfalse; } #ifndef STANDALONE if (!fsh[f].handleFiles.file.o && fs_steampath->string[0]) { ospath = FS_BuildOSPath( fs_steampath->string, filename, "" ); ospath[strlen(ospath)-1] = '\0'; if ( fs_debug->integer ) { Com_Printf( "FS_SV_FOpenFileRead (fs_steampath): %s\n", ospath ); } fsh[f].handleFiles.file.o = Sys_FOpen( ospath, "rb" ); fsh[f].handleSync = qfalse; } #endif if ( !fsh[f].handleFiles.file.o ) { f = 0; } } *fp = f; if (f) { return FS_filelength(f); } return -1; }
0
283,360
BlockedPlugin::~BlockedPlugin() { }
0
442,524
static void server_stats(ADD_STAT add_stats, conn *c) { pid_t pid = getpid(); rel_time_t now = current_time; struct thread_stats thread_stats; threadlocal_stats_aggregate(&thread_stats); struct slab_stats slab_stats; slab_stats_aggregate(&thread_stats, &slab_stats); #ifdef EXTSTORE struct extstore_stats st; #endif #ifndef WIN32 struct rusage usage; getrusage(RUSAGE_SELF, &usage); #endif /* !WIN32 */ STATS_LOCK(); APPEND_STAT("pid", "%lu", (long)pid); APPEND_STAT("uptime", "%u", now - ITEM_UPDATE_INTERVAL); APPEND_STAT("time", "%ld", now + (long)process_started); APPEND_STAT("version", "%s", VERSION); APPEND_STAT("libevent", "%s", event_get_version()); APPEND_STAT("pointer_size", "%d", (int)(8 * sizeof(void *))); #ifndef WIN32 append_stat("rusage_user", add_stats, c, "%ld.%06ld", (long)usage.ru_utime.tv_sec, (long)usage.ru_utime.tv_usec); append_stat("rusage_system", add_stats, c, "%ld.%06ld", (long)usage.ru_stime.tv_sec, (long)usage.ru_stime.tv_usec); #endif /* !WIN32 */ APPEND_STAT("max_connections", "%d", settings.maxconns); APPEND_STAT("curr_connections", "%llu", (unsigned long long)stats_state.curr_conns - 1); APPEND_STAT("total_connections", "%llu", (unsigned long long)stats.total_conns); if (settings.maxconns_fast) { APPEND_STAT("rejected_connections", "%llu", (unsigned long long)stats.rejected_conns); } APPEND_STAT("connection_structures", "%u", stats_state.conn_structs); APPEND_STAT("response_obj_bytes", "%llu", (unsigned long long)thread_stats.response_obj_bytes); APPEND_STAT("response_obj_total", "%llu", (unsigned long long)thread_stats.response_obj_total); APPEND_STAT("response_obj_free", "%llu", (unsigned long long)thread_stats.response_obj_free); APPEND_STAT("response_obj_oom", "%llu", (unsigned long long)thread_stats.response_obj_oom); APPEND_STAT("read_buf_bytes", "%llu", (unsigned long long)thread_stats.read_buf_bytes); APPEND_STAT("read_buf_bytes_free", "%llu", (unsigned long long)thread_stats.read_buf_bytes_free); APPEND_STAT("read_buf_oom", "%llu", (unsigned long long)thread_stats.read_buf_oom); APPEND_STAT("reserved_fds", "%u", stats_state.reserved_fds); APPEND_STAT("cmd_get", "%llu", (unsigned long long)thread_stats.get_cmds); APPEND_STAT("cmd_set", "%llu", (unsigned long long)slab_stats.set_cmds); APPEND_STAT("cmd_flush", "%llu", (unsigned long long)thread_stats.flush_cmds); APPEND_STAT("cmd_touch", "%llu", (unsigned long long)thread_stats.touch_cmds); APPEND_STAT("cmd_meta", "%llu", (unsigned long long)thread_stats.meta_cmds); APPEND_STAT("get_hits", "%llu", (unsigned long long)slab_stats.get_hits); APPEND_STAT("get_misses", "%llu", (unsigned long long)thread_stats.get_misses); APPEND_STAT("get_expired", "%llu", (unsigned long long)thread_stats.get_expired); APPEND_STAT("get_flushed", "%llu", (unsigned long long)thread_stats.get_flushed); #ifdef EXTSTORE if (c->thread->storage) { APPEND_STAT("get_extstore", "%llu", (unsigned long long)thread_stats.get_extstore); APPEND_STAT("get_aborted_extstore", "%llu", (unsigned long long)thread_stats.get_aborted_extstore); APPEND_STAT("get_oom_extstore", "%llu", (unsigned long long)thread_stats.get_oom_extstore); APPEND_STAT("recache_from_extstore", "%llu", (unsigned long long)thread_stats.recache_from_extstore); APPEND_STAT("miss_from_extstore", "%llu", (unsigned long long)thread_stats.miss_from_extstore); APPEND_STAT("badcrc_from_extstore", "%llu", (unsigned long long)thread_stats.badcrc_from_extstore); } #endif APPEND_STAT("delete_misses", "%llu", (unsigned long long)thread_stats.delete_misses); APPEND_STAT("delete_hits", "%llu", (unsigned long long)slab_stats.delete_hits); APPEND_STAT("incr_misses", "%llu", (unsigned long long)thread_stats.incr_misses); APPEND_STAT("incr_hits", "%llu", (unsigned long long)slab_stats.incr_hits); APPEND_STAT("decr_misses", "%llu", (unsigned long long)thread_stats.decr_misses); APPEND_STAT("decr_hits", "%llu", (unsigned long long)slab_stats.decr_hits); APPEND_STAT("cas_misses", "%llu", (unsigned long long)thread_stats.cas_misses); APPEND_STAT("cas_hits", "%llu", (unsigned long long)slab_stats.cas_hits); APPEND_STAT("cas_badval", "%llu", (unsigned long long)slab_stats.cas_badval); APPEND_STAT("touch_hits", "%llu", (unsigned long long)slab_stats.touch_hits); APPEND_STAT("touch_misses", "%llu", (unsigned long long)thread_stats.touch_misses); APPEND_STAT("auth_cmds", "%llu", (unsigned long long)thread_stats.auth_cmds); APPEND_STAT("auth_errors", "%llu", (unsigned long long)thread_stats.auth_errors); if (settings.idle_timeout) { APPEND_STAT("idle_kicks", "%llu", (unsigned long long)thread_stats.idle_kicks); } APPEND_STAT("bytes_read", "%llu", (unsigned long long)thread_stats.bytes_read); APPEND_STAT("bytes_written", "%llu", (unsigned long long)thread_stats.bytes_written); APPEND_STAT("limit_maxbytes", "%llu", (unsigned long long)settings.maxbytes); APPEND_STAT("accepting_conns", "%u", stats_state.accepting_conns); APPEND_STAT("listen_disabled_num", "%llu", (unsigned long long)stats.listen_disabled_num); APPEND_STAT("time_in_listen_disabled_us", "%llu", stats.time_in_listen_disabled_us); APPEND_STAT("threads", "%d", settings.num_threads); APPEND_STAT("conn_yields", "%llu", (unsigned long long)thread_stats.conn_yields); APPEND_STAT("hash_power_level", "%u", stats_state.hash_power_level); APPEND_STAT("hash_bytes", "%llu", (unsigned long long)stats_state.hash_bytes); APPEND_STAT("hash_is_expanding", "%u", stats_state.hash_is_expanding); if (settings.slab_reassign) { APPEND_STAT("slab_reassign_rescues", "%llu", stats.slab_reassign_rescues); APPEND_STAT("slab_reassign_chunk_rescues", "%llu", stats.slab_reassign_chunk_rescues); APPEND_STAT("slab_reassign_evictions_nomem", "%llu", stats.slab_reassign_evictions_nomem); APPEND_STAT("slab_reassign_inline_reclaim", "%llu", stats.slab_reassign_inline_reclaim); APPEND_STAT("slab_reassign_busy_items", "%llu", stats.slab_reassign_busy_items); APPEND_STAT("slab_reassign_busy_deletes", "%llu", stats.slab_reassign_busy_deletes); APPEND_STAT("slab_reassign_running", "%u", stats_state.slab_reassign_running); APPEND_STAT("slabs_moved", "%llu", stats.slabs_moved); } if (settings.lru_crawler) { APPEND_STAT("lru_crawler_running", "%u", stats_state.lru_crawler_running); APPEND_STAT("lru_crawler_starts", "%u", stats.lru_crawler_starts); } if (settings.lru_maintainer_thread) { APPEND_STAT("lru_maintainer_juggles", "%llu", (unsigned long long)stats.lru_maintainer_juggles); } APPEND_STAT("malloc_fails", "%llu", (unsigned long long)stats.malloc_fails); APPEND_STAT("log_worker_dropped", "%llu", (unsigned long long)stats.log_worker_dropped); APPEND_STAT("log_worker_written", "%llu", (unsigned long long)stats.log_worker_written); APPEND_STAT("log_watcher_skipped", "%llu", (unsigned long long)stats.log_watcher_skipped); APPEND_STAT("log_watcher_sent", "%llu", (unsigned long long)stats.log_watcher_sent); STATS_UNLOCK(); #ifdef EXTSTORE if (c->thread->storage) { STATS_LOCK(); APPEND_STAT("extstore_compact_lost", "%llu", (unsigned long long)stats.extstore_compact_lost); APPEND_STAT("extstore_compact_rescues", "%llu", (unsigned long long)stats.extstore_compact_rescues); APPEND_STAT("extstore_compact_skipped", "%llu", (unsigned long long)stats.extstore_compact_skipped); STATS_UNLOCK(); extstore_get_stats(c->thread->storage, &st); APPEND_STAT("extstore_page_allocs", "%llu", (unsigned long long)st.page_allocs); APPEND_STAT("extstore_page_evictions", "%llu", (unsigned long long)st.page_evictions); APPEND_STAT("extstore_page_reclaims", "%llu", (unsigned long long)st.page_reclaims); APPEND_STAT("extstore_pages_free", "%llu", (unsigned long long)st.pages_free); APPEND_STAT("extstore_pages_used", "%llu", (unsigned long long)st.pages_used); APPEND_STAT("extstore_objects_evicted", "%llu", (unsigned long long)st.objects_evicted); APPEND_STAT("extstore_objects_read", "%llu", (unsigned long long)st.objects_read); APPEND_STAT("extstore_objects_written", "%llu", (unsigned long long)st.objects_written); APPEND_STAT("extstore_objects_used", "%llu", (unsigned long long)st.objects_used); APPEND_STAT("extstore_bytes_evicted", "%llu", (unsigned long long)st.bytes_evicted); APPEND_STAT("extstore_bytes_written", "%llu", (unsigned long long)st.bytes_written); APPEND_STAT("extstore_bytes_read", "%llu", (unsigned long long)st.bytes_read); APPEND_STAT("extstore_bytes_used", "%llu", (unsigned long long)st.bytes_used); APPEND_STAT("extstore_bytes_fragmented", "%llu", (unsigned long long)st.bytes_fragmented); APPEND_STAT("extstore_limit_maxbytes", "%llu", (unsigned long long)(st.page_count * st.page_size)); APPEND_STAT("extstore_io_queue", "%llu", (unsigned long long)(st.io_queue)); } #endif #ifdef TLS if (settings.ssl_enabled) { APPEND_STAT("ssl_handshake_errors", "%llu", (unsigned long long)stats.ssl_handshake_errors); APPEND_STAT("time_since_server_cert_refresh", "%u", now - settings.ssl_last_cert_refresh_time); } #endif }
0
514,499
~CloseReq() { uv_fs_req_cleanup(req()); promise_.Reset(); ref_.Reset(); }
0
407,958
static int parsekeyword(unsigned char **pattern, unsigned char *charset) { parsekey_state state = CURLFNM_PKW_INIT; #define KEYLEN 10 char keyword[KEYLEN] = { 0 }; int found = FALSE; int i; unsigned char *p = *pattern; for(i = 0; !found; i++) { char c = *p++; if(i >= KEYLEN) return SETCHARSET_FAIL; switch(state) { case CURLFNM_PKW_INIT: if(ISALPHA(c) && ISLOWER(c)) keyword[i] = c; else if(c == ':') state = CURLFNM_PKW_DDOT; else return 0; break; case CURLFNM_PKW_DDOT: if(c == ']') found = TRUE; else return SETCHARSET_FAIL; } } #undef KEYLEN *pattern = p; /* move caller's pattern pointer */ if(strcmp(keyword, "digit") == 0) charset[CURLFNM_DIGIT] = 1; else if(strcmp(keyword, "alnum") == 0) charset[CURLFNM_ALNUM] = 1; else if(strcmp(keyword, "alpha") == 0) charset[CURLFNM_ALPHA] = 1; else if(strcmp(keyword, "xdigit") == 0) charset[CURLFNM_XDIGIT] = 1; else if(strcmp(keyword, "print") == 0) charset[CURLFNM_PRINT] = 1; else if(strcmp(keyword, "graph") == 0) charset[CURLFNM_GRAPH] = 1; else if(strcmp(keyword, "space") == 0) charset[CURLFNM_SPACE] = 1; else if(strcmp(keyword, "blank") == 0) charset[CURLFNM_BLANK] = 1; else if(strcmp(keyword, "upper") == 0) charset[CURLFNM_UPPER] = 1; else if(strcmp(keyword, "lower") == 0) charset[CURLFNM_LOWER] = 1; else return SETCHARSET_FAIL; return SETCHARSET_OK; }
0
112,879
static u64 fixed_mtrr_seg_unit_size(int seg) { return 8 << fixed_seg_table[seg].range_shift; }
0
83,642
static int __init acpi_enforce_resources_setup(char *str) { if (str == NULL || *str == '\0') return 0; if (!strcmp("strict", str)) acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; else if (!strcmp("lax", str)) acpi_enforce_resources = ENFORCE_RESOURCES_LAX; else if (!strcmp("no", str)) acpi_enforce_resources = ENFORCE_RESOURCES_NO; return 1; }
0
101,277
static void kvm_free_physmem_slot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { if (!dont || free->dirty_bitmap != dont->dirty_bitmap) kvm_destroy_dirty_bitmap(free); kvm_arch_free_memslot(free, dont); free->npages = 0; }
0
415,294
_bluetooth_client_get_default_adapter_powered (BluetoothClient *self) { BluetoothClientPrivate *priv = BLUETOOTH_CLIENT_GET_PRIVATE (self); GtkTreePath *path; GtkTreeIter iter; gboolean ret; if (priv->default_adapter == NULL) return FALSE; path = gtk_tree_row_reference_get_path (priv->default_adapter); gtk_tree_model_get_iter (GTK_TREE_MODEL (priv->store), &iter, path); gtk_tree_model_get (GTK_TREE_MODEL (priv->store), &iter, BLUETOOTH_COLUMN_POWERED, &ret, -1); gtk_tree_path_free (path); return ret; }
0
204,293
void ImageDataPlatformBackend::Unmap() { }
0
41,273
list_update_cgroup_event(struct perf_event *event, struct perf_event_context *ctx, bool add) { struct perf_cpu_context *cpuctx; struct list_head *cpuctx_entry; if (!is_cgroup_event(event)) return; if (add && ctx->nr_cgroups++) return; else if (!add && --ctx->nr_cgroups) return; /* * Because cgroup events are always per-cpu events, * this will always be called from the right CPU. */ cpuctx = __get_cpu_context(ctx); cpuctx_entry = &cpuctx->cgrp_cpuctx_entry; /* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/ if (add) { list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list)); if (perf_cgroup_from_task(current, ctx) == event->cgrp) cpuctx->cgrp = event->cgrp; } else { list_del(cpuctx_entry); cpuctx->cgrp = NULL; } }
0
350,142
TEST_F(ServerSelectorTestFixture, ShouldSelectPreferredIfAvailable) { TopologyStateMachine stateMachine(sdamConfiguration); auto topologyDescription = std::make_shared<TopologyDescription>(sdamConfiguration); const auto now = Date_t::now(); const auto d0 = now - Milliseconds(1000); const auto s0 = ServerDescriptionBuilder() .withAddress(HostAndPort("s0")) .withType(ServerType::kRSPrimary) .withRtt(sdamConfiguration.getLocalThreshold()) .withSetName("set") .withHost(HostAndPort("s0")) .withHost(HostAndPort("s1")) .withMinWireVersion(WireVersion::SUPPORTS_OP_MSG) .withMaxWireVersion(WireVersion::LATEST_WIRE_VERSION) .withLastWriteDate(d0) .withTag("tag", "primary") .instance(); stateMachine.onServerDescription(*topologyDescription, s0); const auto s1 = ServerDescriptionBuilder() .withAddress(HostAndPort("s1")) .withType(ServerType::kRSSecondary) .withRtt(sdamConfiguration.getLocalThreshold()) .withSetName("set") .withHost(HostAndPort("s0")) .withHost(HostAndPort("s1")) .withMinWireVersion(WireVersion::SUPPORTS_OP_MSG) .withMaxWireVersion(WireVersion::LATEST_WIRE_VERSION) .withLastWriteDate(d0) .withTag("tag", "secondary") .instance(); stateMachine.onServerDescription(*topologyDescription, s1); const auto primaryPreferredTagSecondary = ReadPreferenceSetting(ReadPreference::PrimaryPreferred, TagSets::secondarySet); auto result1 = selector.selectServer(topologyDescription, primaryPreferredTagSecondary); ASSERT(result1 != boost::none); ASSERT_EQ(HostAndPort("s0"), (*result1)->getAddress()); const auto secondaryPreferredWithTag = ReadPreferenceSetting(ReadPreference::SecondaryPreferred, TagSets::secondarySet); auto result2 = selector.selectServer(topologyDescription, secondaryPreferredWithTag); ASSERT(result2 != boost::none); ASSERT_EQ(HostAndPort("s1"), (*result2)->getAddress()); const auto secondaryPreferredNoTag = ReadPreferenceSetting(ReadPreference::SecondaryPreferred); auto result3 = selector.selectServer(topologyDescription, secondaryPreferredNoTag); ASSERT(result3 != boost::none); ASSERT_EQ(HostAndPort("s1"), (*result2)->getAddress()); }
1
103,120
static void cmd_agraph_print(RCore *core, const char *input) { switch (*input) { case 'k': // "aggk" { Sdb *db = r_agraph_get_sdb (core->graph); char *o = sdb_querys (db, "null", 0, "*"); r_cons_print (o); free (o); break; } case 'v': // "aggv" { const char *cmd = r_config_get (core->config, "cmd.graph"); if (cmd && *cmd) { char *newCmd = strdup (cmd); if (newCmd) { newCmd = r_str_replace (newCmd, "ag $$", "aggd", 0); r_core_cmd0 (core, newCmd); free (newCmd); } } else { r_core_cmd0 (core, "agf"); } break; } case 'i': // "aggi" - open current core->graph in interactive mode { RANode *ran = r_agraph_get_first_node (core->graph); if (ran) { r_agraph_set_title (core->graph, r_config_get (core->config, "graph.title")); r_agraph_set_curnode (core->graph, ran); core->graph->force_update_seek = true; core->graph->need_set_layout = true; core->graph->layout = r_config_get_i (core->config, "graph.layout"); int ov = r_config_get_i (core->config, "scr.interactive"); core->graph->need_update_dim = true; r_core_visual_graph (core, core->graph, NULL, true); r_config_set_i (core->config, "scr.interactive", ov); r_cons_show_cursor (true); } else { eprintf ("This graph contains no nodes\n"); } break; } case 'd': // "aggd" - dot format r_cons_printf ("digraph code {\ngraph [bgcolor=white];\n" "node [color=lightgray, style=filled shape=box " "fontname=\"Courier\" fontsize=\"8\"];\n"); r_agraph_foreach (core->graph, agraph_print_node_dot, NULL); r_agraph_foreach_edge (core->graph, agraph_print_edge_dot, NULL); r_cons_printf ("}\n"); break; case '*': // "agg*" - r_agraph_foreach (core->graph, agraph_print_node, NULL); r_agraph_foreach_edge (core->graph, agraph_print_edge, NULL); break; case '?': r_core_cmd_help (core, help_msg_agg); break; default: core->graph->can->linemode = r_config_get_i (core->config, "graph.linemode"); core->graph->can->color = r_config_get_i (core->config, "scr.color"); r_agraph_set_title (core->graph, r_config_get (core->config, "graph.title")); r_agraph_print (core->graph); break; } }
0
280,365
void FrameView::removeSlowRepaintObject() { ASSERT(m_slowRepaintObjectCount > 0); m_slowRepaintObjectCount--; if (!m_slowRepaintObjectCount) { if (Page* page = m_frame->page()) { if (ScrollingCoordinator* scrollingCoordinator = page->scrollingCoordinator()) scrollingCoordinator->frameViewHasSlowRepaintObjectsDidChange(this); } } }
0
428,774
R_API int r_egg_encode(REgg *egg, const char *name) { REggPlugin *p; RListIter *iter; RBuffer *b; r_list_foreach (egg->plugins, iter, p) { if (p->type == R_EGG_PLUGIN_ENCODER && !strcmp (name, p->name)) { b = p->build (egg); if (!b) { return false; } r_buf_free (egg->bin); egg->bin = b; return true; } } return false; }
0
331,640
static void pxa2xx_gpio_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { PXA2xxGPIOInfo *s = (PXA2xxGPIOInfo *) opaque; int bank; if (offset >= 0x200) return; bank = pxa2xx_gpio_regs[offset].bank; switch (pxa2xx_gpio_regs[offset].reg) { case GPDR: /* GPIO Pin-Direction registers */ s->dir[bank] = value; pxa2xx_gpio_handler_update(s); break; case GPSR: /* GPIO Pin-Output Set registers */ s->olevel[bank] |= value; pxa2xx_gpio_handler_update(s); s->gpsr[bank] = value; break; case GPCR: /* GPIO Pin-Output Clear registers */ s->olevel[bank] &= ~value; pxa2xx_gpio_handler_update(s); break; case GRER: /* GPIO Rising-Edge Detect Enable registers */ s->rising[bank] = value; break; case GFER: /* GPIO Falling-Edge Detect Enable registers */ s->falling[bank] = value; break; case GAFR_L: /* GPIO Alternate Function registers */ s->gafr[bank * 2] = value; break; case GAFR_U: /* GPIO Alternate Function registers */ s->gafr[bank * 2 + 1] = value; break; case GEDR: /* GPIO Edge Detect Status registers */ s->status[bank] &= ~value; pxa2xx_gpio_irq_update(s); break; default: hw_error("%s: Bad offset " REG_FMT "\n", __FUNCTION__, offset); } }
1
182,943
GLint GLES2Implementation::GetFragDataIndexEXT(GLuint program, const char* name) { GPU_CLIENT_SINGLE_THREAD_CHECK(); GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetFragDataIndexEXT(" << program << ", " << name << ")"); TRACE_EVENT0("gpu", "GLES2::GetFragDataIndexEXT"); GLint loc = share_group_->program_info_manager()->GetFragDataIndex( this, program, name); GPU_CLIENT_LOG("returned " << loc); CheckGLError(); return loc; }
0
416,301
int wait_for_token_by_slotlabel(pkcs11_handle_t *h, const char *wanted_slot_label, const char *wanted_token_label, unsigned int *slot_num) { int rv; do { /* see if the card we're looking for is inserted */ rv = find_slot_by_slotlabel_and_tokenlabel (h, wanted_slot_label, wanted_token_label, slot_num); if (rv != 0) { /* could call C_WaitForSlotEvent, for now just poll */ sleep(10); refresh_slots(h); continue; } } while (rv != 0); return rv; }
0
209,418
const char* menu_cache_app_get_working_dir( MenuCacheApp* app ) { return app->working_dir; }
0
323,404
int qemu_loadvm_state(QEMUFile *f) { LIST_HEAD(, LoadStateEntry) loadvm_handlers = LIST_HEAD_INITIALIZER(loadvm_handlers); LoadStateEntry *le, *new_le; uint8_t section_type; unsigned int v; int ret; v = qemu_get_be32(f); if (v != QEMU_VM_FILE_MAGIC) return -EINVAL; v = qemu_get_be32(f); if (v == QEMU_VM_FILE_VERSION_COMPAT) { fprintf(stderr, "SaveVM v2 format is obsolete and don't work anymore\n"); return -ENOTSUP; } if (v != QEMU_VM_FILE_VERSION) return -ENOTSUP; while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) { uint32_t instance_id, version_id, section_id; SaveStateEntry *se; char idstr[257]; int len; switch (section_type) { case QEMU_VM_SECTION_START: case QEMU_VM_SECTION_FULL: /* Read section start */ section_id = qemu_get_be32(f); len = qemu_get_byte(f); qemu_get_buffer(f, (uint8_t *)idstr, len); idstr[len] = 0; instance_id = qemu_get_be32(f); version_id = qemu_get_be32(f); /* Find savevm section */ se = find_se(idstr, instance_id); if (se == NULL) { fprintf(stderr, "Unknown savevm section or instance '%s' %d\n", idstr, instance_id); ret = -EINVAL; goto out; } /* Validate version */ if (version_id > se->version_id) { fprintf(stderr, "savevm: unsupported version %d for '%s' v%d\n", version_id, idstr, se->version_id); ret = -EINVAL; goto out; } /* Add entry */ le = qemu_mallocz(sizeof(*le)); le->se = se; le->section_id = section_id; le->version_id = version_id; LIST_INSERT_HEAD(&loadvm_handlers, le, entry); ret = vmstate_load(f, le->se, le->version_id); if (ret < 0) { fprintf(stderr, "qemu: warning: error while loading state for instance 0x%x of device '%s'\n", instance_id, idstr); goto out; } break; case QEMU_VM_SECTION_PART: case QEMU_VM_SECTION_END: section_id = qemu_get_be32(f); LIST_FOREACH(le, &loadvm_handlers, entry) { if (le->section_id == section_id) { break; } } if (le == NULL) { fprintf(stderr, "Unknown savevm section %d\n", section_id); ret = -EINVAL; goto out; } ret = vmstate_load(f, le->se, le->version_id); if (ret < 0) { fprintf(stderr, "qemu: warning: error while loading state section id %d\n", section_id); goto out; } break; default: fprintf(stderr, "Unknown savevm section type %d\n", section_type); ret = -EINVAL; goto out; } } ret = 0; out: LIST_FOREACH_SAFE(le, &loadvm_handlers, entry, new_le) { LIST_REMOVE(le, entry); qemu_free(le); } if (qemu_file_has_error(f)) ret = -EIO; return ret; }
0
465,310
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { vcpu_load(vcpu); memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); vcpu_put(vcpu); return 0; }
0
318,724
test_opts_range_unvisited(void) { intList *list = NULL; intList *tail; QemuOpts *opts; Visitor *v; opts = qemu_opts_parse(qemu_find_opts("userdef"), "ilist=0-2", false, &error_abort); v = opts_visitor_new(opts); visit_start_struct(v, NULL, NULL, 0, &error_abort); /* Would be simpler if the visitor genuinely supported virtual walks */ visit_start_list(v, "ilist", (GenericList **)&list, sizeof(*list), &error_abort); tail = list; visit_type_int(v, NULL, &tail->value, &error_abort); g_assert_cmpint(tail->value, ==, 0); tail = (intList *)visit_next_list(v, (GenericList *)tail, sizeof(*list)); g_assert(tail); visit_type_int(v, NULL, &tail->value, &error_abort); g_assert_cmpint(tail->value, ==, 1); tail = (intList *)visit_next_list(v, (GenericList *)tail, sizeof(*list)); g_assert(tail); visit_check_list(v, &error_abort); /* BUG: unvisited tail not reported */ visit_end_list(v, (void **)&list); visit_check_struct(v, &error_abort); visit_end_struct(v, NULL); qapi_free_intList(list); visit_free(v); qemu_opts_del(opts); }
1
467,173
void RGWInfo_ObjStore_SWIFT::list_slo_data(Formatter& formatter, const ConfigProxy& config, RGWRados& store) { formatter.open_object_section("slo"); formatter.dump_int("max_manifest_segments", config->rgw_max_slo_entries); formatter.close_section(); }
0
238,829
void SpeechRecognitionManagerImpl::OnSoundStart(int session_id) { DCHECK_CURRENTLY_ON(BrowserThread::IO); if (!SessionExists(session_id)) return; DCHECK_EQ(primary_session_id_, session_id); if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) delegate_listener->OnSoundStart(session_id); if (SpeechRecognitionEventListener* listener = GetListener(session_id)) listener->OnSoundStart(session_id); }
0
176,754
set_deny(u32 deny, struct nfs4_ol_stateid *stp) { unsigned char mask = 1 << deny; WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); stp->st_deny_bmap |= mask; }
0
63,615
int main(int argc,char* argv[]){ int i, j; uint64_t sse=0; uint64_t dev; FILE *f[2]; uint8_t buf[2][SIZE]; uint64_t psnr; int len= argc<4 ? 1 : atoi(argv[3]); int64_t max= (1<<(8*len))-1; int shift= argc<5 ? 0 : atoi(argv[4]); int skip_bytes = argc<6 ? 0 : atoi(argv[5]); if(argc<3){ printf("tiny_psnr <file1> <file2> [<elem size> [<shift> [<skip bytes>]]]\n"); printf("For WAV files use the following:\n"); printf("./tiny_psnr file1.wav file2.wav 2 0 44 to skip the header.\n"); return -1; } f[0]= fopen(argv[1], "rb"); f[1]= fopen(argv[2], "rb"); if(!f[0] || !f[1]){ fprintf(stderr, "Could not open input files.\n"); return -1; } fseek(f[shift<0], shift < 0 ? -shift : shift, SEEK_SET); fseek(f[0],skip_bytes,SEEK_CUR); fseek(f[1],skip_bytes,SEEK_CUR); for(i=0;;){ if( fread(buf[0], SIZE, 1, f[0]) != 1) break; if( fread(buf[1], SIZE, 1, f[1]) != 1) break; for(j=0; j<SIZE; i++,j++){ int64_t a= buf[0][j]; int64_t b= buf[1][j]; if(len==2){ a= (int16_t)(a | (buf[0][++j]<<8)); b= (int16_t)(b | (buf[1][ j]<<8)); } sse += (a-b) * (a-b); } } if(!i) i=1; dev= int_sqrt( ((sse/i)*F*F) + (((sse%i)*F*F) + i/2)/i ); if(sse) psnr= ((2*log16(max<<16) + log16(i) - log16(sse))*284619LL*F + (1<<31)) / (1LL<<32); else psnr= 100*F-1; //floating point free infinity :) printf("stddev:%3d.%02d PSNR:%2d.%02d bytes:%d\n", (int)(dev/F), (int)(dev%F), (int)(psnr/F), (int)(psnr%F), i*len); return 0; }
0
292,635
var2fpos( typval_T *varp, int dollar_lnum, // TRUE when $ is last line int *fnum, // set to fnum for '0, 'A, etc. int charcol) // return character column { char_u *name; static pos_T pos; pos_T *pp; // Argument can be [lnum, col, coladd]. if (varp->v_type == VAR_LIST) { list_T *l; int len; int error = FALSE; listitem_T *li; l = varp->vval.v_list; if (l == NULL) return NULL; // Get the line number pos.lnum = list_find_nr(l, 0L, &error); if (error || pos.lnum <= 0 || pos.lnum > curbuf->b_ml.ml_line_count) return NULL; // invalid line number if (charcol) len = (long)mb_charlen(ml_get(pos.lnum)); else len = (long)STRLEN(ml_get(pos.lnum)); // Get the column number // We accept "$" for the column number: last column. li = list_find(l, 1L); if (li != NULL && li->li_tv.v_type == VAR_STRING && li->li_tv.vval.v_string != NULL && STRCMP(li->li_tv.vval.v_string, "$") == 0) { pos.col = len + 1; } else { pos.col = list_find_nr(l, 1L, &error); if (error) return NULL; } // Accept a position up to the NUL after the line. if (pos.col == 0 || (int)pos.col > len + 1) return NULL; // invalid column number --pos.col; // Get the virtual offset. Defaults to zero. pos.coladd = list_find_nr(l, 2L, &error); if (error) pos.coladd = 0; return &pos; } if (in_vim9script() && check_for_string_arg(varp, 0) == FAIL) return NULL; name = tv_get_string_chk(varp); if (name == NULL) return NULL; if (name[0] == '.') // cursor { pos = curwin->w_cursor; if (charcol) pos.col = buf_byteidx_to_charidx(curbuf, pos.lnum, pos.col); return &pos; } if (name[0] == 'v' && name[1] == NUL) // Visual start { if (VIsual_active) pos = VIsual; else pos = curwin->w_cursor; if (charcol) pos.col = buf_byteidx_to_charidx(curbuf, pos.lnum, pos.col); return &pos; } if (name[0] == '\'') // mark { pp = getmark_buf_fnum(curbuf, name[1], FALSE, fnum); if (pp == NULL || pp == (pos_T *)-1 || pp->lnum <= 0) return NULL; if (charcol) pp->col = buf_byteidx_to_charidx(curbuf, pp->lnum, pp->col); return pp; } pos.coladd = 0; if (name[0] == 'w' && dollar_lnum) { pos.col = 0; if (name[1] == '0') // "w0": first visible line { update_topline(); // In silent Ex mode topline is zero, but that's not a valid line // number; use one instead. pos.lnum = curwin->w_topline > 0 ? curwin->w_topline : 1; return &pos; } else if (name[1] == '$') // "w$": last visible line { validate_botline(); // In silent Ex mode botline is zero, return zero then. pos.lnum = curwin->w_botline > 0 ? curwin->w_botline - 1 : 0; return &pos; } } else if (name[0] == '$') // last column or line { if (dollar_lnum) { pos.lnum = curbuf->b_ml.ml_line_count; pos.col = 0; } else { pos.lnum = curwin->w_cursor.lnum; if (charcol) pos.col = (colnr_T)mb_charlen(ml_get_curline()); else pos.col = (colnr_T)STRLEN(ml_get_curline()); } return &pos; } if (in_vim9script()) semsg(_(e_invalid_value_for_line_number_str), name); return NULL; }
0
364,380
static NOINLINE int send_renew(uint32_t xid, uint32_t server, uint32_t ciaddr) { struct dhcp_packet packet; /* * RFC 2131 4.3.2 DHCPREQUEST message * ... * DHCPREQUEST generated during RENEWING state: * * 'server identifier' MUST NOT be filled in, 'requested IP address' * option MUST NOT be filled in, 'ciaddr' MUST be filled in with * client's IP address. In this situation, the client is completely * configured, and is trying to extend its lease. This message will * be unicast, so no relay agents will be involved in its * transmission. Because 'giaddr' is therefore not filled in, the * DHCP server will trust the value in 'ciaddr', and use it when * replying to the client. */ /* Fill in: op, htype, hlen, cookie, chaddr fields, * random xid field (we override it below), * client-id option (unless -C), message type option: */ init_packet(&packet, DHCPREQUEST); packet.xid = xid; packet.ciaddr = ciaddr; /* Add options: maxsize, * optionally: hostname, fqdn, vendorclass, * "param req" option according to -O, and options specified with -x */ add_client_options(&packet); bb_info_msg("Sending renew..."); if (server) return udhcp_send_kernel_packet(&packet, ciaddr, CLIENT_PORT, server, SERVER_PORT); return raw_bcast_from_client_config_ifindex(&packet); }
0