idx
int64
func
string
target
int64
42,078
static void rdev_free(struct kobject *ko) { struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); kfree(rdev); }
0
463,251
TileBufferTask::execute () { try { // // Calculate information about the tile // Box2i tileRange = OPENEXR_IMF_INTERNAL_NAMESPACE::dataWindowForTile ( _ifd->tileDesc, _ifd->minX, _ifd->maxX, _ifd->minY, _ifd->maxY, _tileBuffer->dx, _tileBuffer->dy, _tileBuffer->lx, _tileBuffer->ly); // // Get the size of the tile. // Array<unsigned int> numPixelsPerScanLine; numPixelsPerScanLine.resizeErase(tileRange.max.y - tileRange.min.y + 1); int sizeOfTile = 0; int maxBytesPerTileLine = 0; for (int y = tileRange.min.y; y <= tileRange.max.y; y++) { numPixelsPerScanLine[y - tileRange.min.y] = 0; int bytesPerLine = 0; for (int x = tileRange.min.x; x <= tileRange.max.x; x++) { int xOffset = _ifd->sampleCountXTileCoords * tileRange.min.x; int yOffset = _ifd->sampleCountYTileCoords * tileRange.min.y; int count = _ifd->getSampleCount(x - xOffset, y - yOffset); for (unsigned int c = 0; c < _ifd->slices.size(); ++c) { // This slice does not exist in the file. if ( !_ifd->slices[c]->fill) { sizeOfTile += count * pixelTypeSize(_ifd->slices[c]->typeInFile); bytesPerLine += count * pixelTypeSize(_ifd->slices[c]->typeInFile); } } numPixelsPerScanLine[y - tileRange.min.y] += count; } if (bytesPerLine > maxBytesPerTileLine) maxBytesPerTileLine = bytesPerLine; } // (TODO) don't do this every time. if (_tileBuffer->compressor != 0) delete _tileBuffer->compressor; _tileBuffer->compressor = newTileCompressor (_ifd->header.compression(), maxBytesPerTileLine, _ifd->tileDesc.ySize, _ifd->header); // // Uncompress the data, if necessary // if (_tileBuffer->compressor && _tileBuffer->dataSize < static_cast<Int64>(sizeOfTile)) { _tileBuffer->format = _tileBuffer->compressor->format(); _tileBuffer->dataSize = _tileBuffer->compressor->uncompressTile (_tileBuffer->buffer, _tileBuffer->dataSize, tileRange, _tileBuffer->uncompressedData); } else { // // If the line is uncompressed, it's in XDR format, // regardless of the compressor's output format. // _tileBuffer->format = Compressor::XDR; _tileBuffer->uncompressedData = _tileBuffer->buffer; } // // sanity check data size: the uncompressed data should be exactly // 'sizeOfTile' (if it's less, the file is corrupt and there'll be a buffer overrun) // if (_tileBuffer->dataSize != static_cast<Int64>(sizeOfTile)) { THROW (IEX_NAMESPACE::InputExc, "size mismatch when reading deep tile: expected " << sizeOfTile << "bytes of uncompressed data but got " << _tileBuffer->dataSize); } // // Convert the tile of pixel data back from the machine-independent // representation, and store the result in the frame buffer. // const char *readPtr = _tileBuffer->uncompressedData; // points to where we // read from in the // tile block // // Iterate over the scan lines in the tile. // for (int y = tileRange.min.y; y <= tileRange.max.y; ++y) { // // Iterate over all image channels. // for (unsigned int i = 0; i < _ifd->slices.size(); ++i) { TInSliceInfo &slice = *_ifd->slices[i]; // // These offsets are used to facilitate both // absolute and tile-relative pixel coordinates. // int xOffsetForData = (slice.xTileCoords == 0) ? 0 : tileRange.min.x; int yOffsetForData = (slice.yTileCoords == 0) ? 0 : tileRange.min.y; int xOffsetForSampleCount = (_ifd->sampleCountXTileCoords == 0) ? 0 : tileRange.min.x; int yOffsetForSampleCount = (_ifd->sampleCountYTileCoords == 0) ? 0 : tileRange.min.y; // // Fill the frame buffer with pixel data. // if (slice.skip) { // // The file contains data for this channel, but // the frame buffer contains no slice for this channel. // skipChannel (readPtr, slice.typeInFile, numPixelsPerScanLine[y - tileRange.min.y]); } else { // // The frame buffer contains a slice for this channel. // copyIntoDeepFrameBuffer (readPtr, slice.pointerArrayBase, _ifd->sampleCountSliceBase, _ifd->sampleCountXStride, _ifd->sampleCountYStride, y, tileRange.min.x, tileRange.max.x, xOffsetForSampleCount, yOffsetForSampleCount, xOffsetForData, yOffsetForData, slice.sampleStride, slice.xStride, slice.yStride, slice.fill, slice.fillValue, _tileBuffer->format, slice.typeInFrameBuffer, slice.typeInFile); } } } } catch (std::exception &e) { if (!_tileBuffer->hasException) { _tileBuffer->exception = e.what (); _tileBuffer->hasException = true; } } catch (...) { if (!_tileBuffer->hasException) { _tileBuffer->exception = "unrecognized exception"; _tileBuffer->hasException = true; } } }
0
394,036
static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) { activate_task(rq, p, en_flags); p->on_rq = TASK_ON_RQ_QUEUED; /* if a worker is waking up, notify workqueue */ if (p->flags & PF_WQ_WORKER) wq_worker_waking_up(p, cpu_of(rq)); }
0
161,819
static int data_pkt(git_pkt **out, const char *line, size_t len) { git_pkt_data *pkt; size_t alloclen; line++; len--; GITERR_CHECK_ALLOC_ADD(&alloclen, sizeof(git_pkt_progress), len); pkt = git__malloc(alloclen); GITERR_CHECK_ALLOC(pkt); pkt->type = GIT_PKT_DATA; pkt->len = (int) len; memcpy(pkt->data, line, len); *out = (git_pkt *) pkt; return 0; }
0
22,540
wtap_open_return_val k12_open ( wtap * wth , int * err , gchar * * err_info ) { k12_src_desc_t * rec ; guint8 header_buffer [ K12_FILE_HDR_LEN ] ; guint8 * read_buffer ; guint32 type ; long offset ; long len ; guint port_type ; guint32 rec_len ; guint32 hwpart_len ; guint32 name_len ; guint32 stack_len ; guint i ; k12_t * file_data ; # ifdef DEBUG_K12 gchar * env_level = getenv ( "K12_DEBUG_LEVEL" ) ; env_file = getenv ( "K12_DEBUG_FILENAME" ) ; if ( env_file ) { dbg_out = ws_fopen ( env_file , "w" ) ; if ( dbg_out == NULL ) { dbg_out = stderr ; K12_DBG ( 1 , ( "unable to open K12 DEBUG FILENAME for writing! Logging to standard error" ) ) ; } } else dbg_out = stderr ; if ( env_level ) debug_level = ( unsigned int ) strtoul ( env_level , NULL , 10 ) ; K12_DBG ( 1 , ( "k12_open: ENTER debug_level=%u" , debug_level ) ) ; # endif if ( ! wtap_read_bytes ( wth -> fh , header_buffer , K12_FILE_HDR_LEN , err , err_info ) ) { K12_DBG ( 1 , ( "k12_open: FILE HEADER TOO SHORT OR READ ERROR" ) ) ; if ( * err != WTAP_ERR_SHORT_READ ) { return WTAP_OPEN_ERROR ; } return WTAP_OPEN_NOT_MINE ; } if ( memcmp ( header_buffer , k12_file_magic , 8 ) != 0 ) { K12_DBG ( 1 , ( "k12_open: BAD MAGIC" ) ) ; return WTAP_OPEN_NOT_MINE ; } offset = K12_FILE_HDR_LEN ; file_data = new_k12_file_data ( ) ; file_data -> file_len = pntoh32 ( header_buffer + 0x8 ) ; if ( memiszero ( header_buffer + 0x10 , K12_FILE_HDR_LEN - 0x10 ) ) { file_data -> num_of_records = pntoh32 ( header_buffer + 0x0C ) ; } else { file_data -> num_of_records = pntoh32 ( header_buffer + K12_FILE_HDR_RECORD_COUNT_1 ) ; if ( file_data -> num_of_records != pntoh32 ( header_buffer + K12_FILE_HDR_RECORD_COUNT_2 ) ) { * err = WTAP_ERR_BAD_FILE ; * err_info = g_strdup_printf ( "k12: two different record counts, %u at 0x%02x and %u at 0x%02x" , file_data -> num_of_records , K12_FILE_HDR_RECORD_COUNT_1 , pntoh32 ( header_buffer + K12_FILE_HDR_RECORD_COUNT_2 ) , K12_FILE_HDR_RECORD_COUNT_2 ) ; return WTAP_OPEN_ERROR ; } } K12_DBG ( 5 , ( "k12_open: FILE_HEADER OK: offset=%x file_len=%i records=%i" , offset , file_data -> file_len , file_data -> num_of_records ) ) ; do { if ( file_data -> num_of_records == 0 ) { * err = WTAP_ERR_SHORT_READ ; destroy_k12_file_data ( file_data ) ; return WTAP_OPEN_ERROR ; } len = get_record ( file_data , wth -> fh , offset , FALSE , err , err_info ) ; if ( len < 0 ) { K12_DBG ( 1 , ( "k12_open: BAD HEADER RECORD" , len ) ) ; destroy_k12_file_data ( file_data ) ; return WTAP_OPEN_ERROR ; } if ( len == 0 ) { K12_DBG ( 1 , ( "k12_open: BAD HEADER RECORD" , len ) ) ; * err = WTAP_ERR_SHORT_READ ; destroy_k12_file_data ( file_data ) ; return WTAP_OPEN_ERROR ; } read_buffer = file_data -> seq_read_buff ; rec_len = pntoh32 ( read_buffer + K12_RECORD_LEN ) ; if ( rec_len < K12_RECORD_TYPE + 4 ) { * err = WTAP_ERR_BAD_FILE ; * err_info = g_strdup_printf ( "k12_open: record length %u < %u" , rec_len , K12_RECORD_TYPE + 4 ) ; return WTAP_OPEN_ERROR ; } type = pntoh32 ( read_buffer + K12_RECORD_TYPE ) ; if ( ( type & K12_MASK_PACKET ) == K12_REC_PACKET || ( type & K12_MASK_PACKET ) == K12_REC_D0020 ) { if ( file_seek ( wth -> fh , offset , SEEK_SET , err ) == - 1 ) { destroy_k12_file_data ( file_data ) ; return WTAP_OPEN_ERROR ; } K12_DBG ( 5 , ( "k12_open: FIRST PACKET offset=%x" , offset ) ) ; break ; } switch ( type ) { case K12_REC_SRCDSC : case K12_REC_SRCDSC2 : rec = g_new0 ( k12_src_desc_t , 1 ) ; if ( rec_len < K12_SRCDESC_HWPART ) { * err = WTAP_ERR_BAD_FILE ; * err_info = g_strdup_printf ( "k12_open: source descriptor record length %u < %u" , rec_len , K12_SRCDESC_HWPART ) ; destroy_k12_file_data ( file_data ) ; g_free ( rec ) ; return WTAP_OPEN_ERROR ; } port_type = read_buffer [ K12_SRCDESC_PORT_TYPE ] ; hwpart_len = pntoh16 ( read_buffer + K12_SRCDESC_HWPARTLEN ) ; name_len = pntoh16 ( read_buffer + K12_SRCDESC_NAMELEN ) ; stack_len = pntoh16 ( read_buffer + K12_SRCDESC_STACKLEN ) ; rec -> input = pntoh32 ( read_buffer + K12_RECORD_SRC_ID ) ; K12_DBG ( 5 , ( "k12_open: INTERFACE RECORD offset=%x interface=%x" , offset , rec -> input ) ) ; if ( name_len == 0 ) { K12_DBG ( 5 , ( "k12_open: failed (name_len == 0 in source description" ) ) ; destroy_k12_file_data ( file_data ) ; g_free ( rec ) ; return WTAP_OPEN_NOT_MINE ; } if ( stack_len == 0 ) { K12_DBG ( 5 , ( "k12_open: failed (stack_len == 0 in source description" ) ) ; destroy_k12_file_data ( file_data ) ; g_free ( rec ) ; return WTAP_OPEN_NOT_MINE ; } if ( rec_len < K12_SRCDESC_HWPART + hwpart_len + name_len + stack_len ) { * err = WTAP_ERR_BAD_FILE ; * err_info = g_strdup_printf ( "k12_open: source descriptor record length %u < %u (%u + %u + %u + %u)" , rec_len , K12_SRCDESC_HWPART + hwpart_len + name_len + stack_len , K12_SRCDESC_HWPART , hwpart_len , name_len , stack_len ) ; destroy_k12_file_data ( file_data ) ; g_free ( rec ) ; return WTAP_OPEN_ERROR ; } if ( hwpart_len ) { if ( hwpart_len < 4 ) { * err = WTAP_ERR_BAD_FILE ; * err_info = g_strdup_printf ( "k12_open: source descriptor hardware part length %u < 4" , hwpart_len ) ; destroy_k12_file_data ( file_data ) ; g_free ( rec ) ; return WTAP_OPEN_ERROR ; } switch ( ( rec -> input_type = pntoh32 ( read_buffer + K12_SRCDESC_HWPART + K12_SRCDESC_HWPARTTYPE ) ) ) { case K12_PORT_DS0S : rec -> input_info . ds0mask = 0x00000000 ; if ( hwpart_len > K12_SRCDESC_DS0_MASK ) { for ( i = 0 ; i < hwpart_len - K12_SRCDESC_DS0_MASK ; i ++ ) { rec -> input_info . ds0mask |= ( * ( read_buffer + K12_SRCDESC_HWPART + K12_SRCDESC_DS0_MASK + i ) == 0xff ) ? 1U << ( 31 - i ) : 0x0 ; } } break ; case K12_PORT_ATMPVC : if ( hwpart_len < K12_SRCDESC_ATM_VCI + 2 ) { * err = WTAP_ERR_BAD_FILE ; * err_info = g_strdup_printf ( "k12_open: source descriptor hardware part length %u < %u" , hwpart_len , K12_SRCDESC_ATM_VCI + 2 ) ; destroy_k12_file_data ( file_data ) ; g_free ( rec ) ; return WTAP_OPEN_ERROR ; } rec -> input_info . atm . vp = pntoh16 ( read_buffer + K12_SRCDESC_HWPART + K12_SRCDESC_ATM_VPI ) ; rec -> input_info . atm . vc = pntoh16 ( read_buffer + K12_SRCDESC_HWPART + K12_SRCDESC_ATM_VCI ) ; break ; default : break ; } } else { if ( port_type >= 0x14 && port_type <= 0x17 ) { rec -> input_type = K12_PORT_ATMPVC ; rec -> input_info . atm . vp = 0 ; rec -> input_info . atm . vc = 0 ; } } if ( read_buffer [ K12_SRCDESC_HWPART + hwpart_len + name_len - 1 ] != '\0' ) { * err = WTAP_ERR_BAD_FILE ; * err_info = g_strdup ( "k12_open: source descriptor record contains non-null-terminated link-layer name" ) ; destroy_k12_file_data ( file_data ) ; g_free ( rec ) ; return WTAP_OPEN_ERROR ; } if ( read_buffer [ K12_SRCDESC_HWPART + hwpart_len + name_len + stack_len - 1 ] != '\0' ) { * err = WTAP_ERR_BAD_FILE ; * err_info = g_strdup ( "k12_open: source descriptor record contains non-null-terminated stack path" ) ; destroy_k12_file_data ( file_data ) ; g_free ( rec ) ; return WTAP_OPEN_ERROR ; } rec -> input_name = ( gchar * ) g_memdup ( read_buffer + K12_SRCDESC_HWPART + hwpart_len , name_len ) ; rec -> stack_file = ( gchar * ) g_memdup ( read_buffer + K12_SRCDESC_HWPART + hwpart_len + name_len , stack_len ) ; ascii_strdown_inplace ( rec -> stack_file ) ; g_hash_table_insert ( file_data -> src_by_id , GUINT_TO_POINTER ( rec -> input ) , rec ) ; g_hash_table_insert ( file_data -> src_by_name , rec -> stack_file , rec ) ; break ; case K12_REC_STK_FILE : K12_DBG ( 1 , ( "k12_open: K12_REC_STK_FILE" ) ) ; K12_DBG ( 1 , ( "Field 1: 0x%08x" , pntoh32 ( read_buffer + 0x08 ) ) ) ; K12_DBG ( 1 , ( "Field 2: 0x%08x" , pntoh32 ( read_buffer + 0x0c ) ) ) ; K12_ASCII_DUMP ( 1 , read_buffer , rec_len , 16 ) ; break ; default : K12_DBG ( 1 , ( "k12_open: RECORD TYPE 0x%08x" , type ) ) ; break ; } offset += len ; file_data -> num_of_records -- ; } while ( 1 ) ; wth -> file_type_subtype = WTAP_FILE_TYPE_SUBTYPE_K12 ; wth -> file_encap = WTAP_ENCAP_K12 ; wth -> snapshot_length = 0 ; wth -> subtype_read = k12_read ; wth -> subtype_seek_read = k12_seek_read ; wth -> subtype_close = k12_close ; wth -> priv = ( void * ) file_data ; wth -> file_tsprec = WTAP_TSPREC_NSEC ; return WTAP_OPEN_MINE ; }
0
136,139
Config::~Config() { del(); }
0
253,031
void on_firmware_memory_dump(char *buffer, int buffer_size) { JNIHelper helper(mVM); /* ALOGD("on_firmware_memory_dump called, vm = %p, obj = %p, env = %p buffer_size = %d" , mVM, mCls, env, buffer_size); */ if (buffer_size > 0) { JNIObject<jbyteArray> dump = helper.newByteArray(buffer_size); jbyte *bytes = (jbyte *) (buffer); helper.setByteArrayRegion(dump, 0, buffer_size, bytes); helper.reportEvent(mCls,"onWifiFwMemoryAvailable","([B)V", dump.get()); } }
0
326,852
static int parallels_open(BlockDriverState *bs, int flags) { BDRVParallelsState *s = bs->opaque; int i; struct parallels_header ph; bs->read_only = 1; // no write support yet if (bdrv_pread(bs->file, 0, &ph, sizeof(ph)) != sizeof(ph)) goto fail; if (memcmp(ph.magic, HEADER_MAGIC, 16) || (le32_to_cpu(ph.version) != HEADER_VERSION)) { goto fail; } bs->total_sectors = le32_to_cpu(ph.nb_sectors); s->tracks = le32_to_cpu(ph.tracks); s->catalog_size = le32_to_cpu(ph.catalog_entries); s->catalog_bitmap = g_malloc(s->catalog_size * 4); if (bdrv_pread(bs->file, 64, s->catalog_bitmap, s->catalog_size * 4) != s->catalog_size * 4) goto fail; for (i = 0; i < s->catalog_size; i++) le32_to_cpus(&s->catalog_bitmap[i]); qemu_co_mutex_init(&s->lock); return 0; fail: if (s->catalog_bitmap) g_free(s->catalog_bitmap); return -1; }
1
136,375
PJ_DEF(void) pj_dns_init_srv_rr( pj_dns_parsed_rr *rec, const pj_str_t *res_name, unsigned dnsclass, unsigned ttl, unsigned prio, unsigned weight, unsigned port, const pj_str_t *target) { pj_bzero(rec, sizeof(*rec)); rec->name = *res_name; rec->type = PJ_DNS_TYPE_SRV; rec->dnsclass = (pj_uint16_t) dnsclass; rec->ttl = ttl; rec->rdata.srv.prio = (pj_uint16_t) prio; rec->rdata.srv.weight = (pj_uint16_t) weight; rec->rdata.srv.port = (pj_uint16_t) port; rec->rdata.srv.target = *target; }
0
117,054
bool HHVM_FUNCTION(natcasesort, VRefParam array) { return php_asort(array, SORT_NATURAL_CASE, true, false); }
0
71,867
backend_forkexec(Port *port) { char *av[4]; int ac = 0; av[ac++] = "postgres"; av[ac++] = "--forkbackend"; av[ac++] = NULL; /* filled in by internal_forkexec */ av[ac] = NULL; Assert(ac < lengthof(av)); return internal_forkexec(ac, av, port); }
0
71,847
static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask) { struct xregs_state *xstate = ((__force struct xregs_state *)buf); u32 lmask = mask; u32 hmask = mask >> 32; int err; stac(); XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); clac(); return err; }
0
309,699
bool NormalPage::isEmpty() { HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(payload()); return header->isFree() && header->size() == payloadSize(); }
0
433,758
lmp_print(netdissect_options *ndo, register const u_char *pptr, register u_int len) { const struct lmp_common_header *lmp_com_header; const struct lmp_object_header *lmp_obj_header; const u_char *tptr,*obj_tptr; u_int tlen,lmp_obj_len,lmp_obj_ctype,obj_tlen; int hexdump, ret; u_int offset; u_int link_type; union { /* int to float conversion buffer */ float f; uint32_t i; } bw; tptr=pptr; lmp_com_header = (const struct lmp_common_header *)pptr; ND_TCHECK(*lmp_com_header); /* * Sanity checking of the header. */ if (LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]) != LMP_VERSION) { ND_PRINT((ndo, "LMP version %u packet not supported", LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]))); return; } /* in non-verbose mode just lets print the basic Message Type*/ if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "LMPv%u %s Message, length: %u", LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]), tok2str(lmp_msg_type_values, "unknown (%u)",lmp_com_header->msg_type), len)); return; } /* ok they seem to want to know everything - lets fully decode it */ tlen=EXTRACT_16BITS(lmp_com_header->length); ND_PRINT((ndo, "\n\tLMPv%u, msg-type: %s, Flags: [%s], length: %u", LMP_EXTRACT_VERSION(lmp_com_header->version_res[0]), tok2str(lmp_msg_type_values, "unknown, type: %u",lmp_com_header->msg_type), bittok2str(lmp_header_flag_values,"none",lmp_com_header->flags), tlen)); if (tlen < sizeof(const struct lmp_common_header)) { ND_PRINT((ndo, " (too short)")); return; } if (tlen > len) { ND_PRINT((ndo, " (too long)")); tlen = len; } tptr+=sizeof(const struct lmp_common_header); tlen-=sizeof(const struct lmp_common_header); while(tlen>0) { /* did we capture enough for fully decoding the object header ? */ ND_TCHECK2(*tptr, sizeof(struct lmp_object_header)); lmp_obj_header = (const struct lmp_object_header *)tptr; lmp_obj_len=EXTRACT_16BITS(lmp_obj_header->length); lmp_obj_ctype=(lmp_obj_header->ctype)&0x7f; ND_PRINT((ndo, "\n\t %s Object (%u), Class-Type: %s (%u) Flags: [%snegotiable], length: %u", tok2str(lmp_obj_values, "Unknown", lmp_obj_header->class_num), lmp_obj_header->class_num, tok2str(lmp_ctype_values, "Unknown", ((lmp_obj_header->class_num)<<8)+lmp_obj_ctype), lmp_obj_ctype, (lmp_obj_header->ctype)&0x80 ? "" : "non-", lmp_obj_len)); if (lmp_obj_len < 4) { ND_PRINT((ndo, " (too short)")); return; } if ((lmp_obj_len % 4) != 0) { ND_PRINT((ndo, " (not a multiple of 4)")); return; } obj_tptr=tptr+sizeof(struct lmp_object_header); obj_tlen=lmp_obj_len-sizeof(struct lmp_object_header); /* did we capture enough for fully decoding the object ? */ ND_TCHECK2(*tptr, lmp_obj_len); hexdump=FALSE; switch(lmp_obj_header->class_num) { case LMP_OBJ_CC_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_LOC: case LMP_CTYPE_RMT: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Control Channel ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_LINK_ID: case LMP_OBJ_INTERFACE_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4_LOC: case LMP_CTYPE_IPV4_RMT: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t IPv4 Link ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr), EXTRACT_32BITS(obj_tptr))); break; case LMP_CTYPE_IPV6_LOC: case LMP_CTYPE_IPV6_RMT: if (obj_tlen != 16) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t IPv6 Link ID: %s (0x%08x)", ip6addr_string(ndo, obj_tptr), EXTRACT_32BITS(obj_tptr))); break; case LMP_CTYPE_UNMD_LOC: case LMP_CTYPE_UNMD_RMT: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Link ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_MESSAGE_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_1: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Message ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; case LMP_CTYPE_2: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Message ID Ack: %u (0x%08x)", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_NODE_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_LOC: case LMP_CTYPE_RMT: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Node ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr), EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_CONFIG: switch(lmp_obj_ctype) { case LMP_CTYPE_HELLO_CONFIG: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Hello Interval: %u\n\t Hello Dead Interval: %u", EXTRACT_16BITS(obj_tptr), EXTRACT_16BITS(obj_tptr+2))); break; default: hexdump=TRUE; } break; case LMP_OBJ_HELLO: switch(lmp_obj_ctype) { case LMP_CTYPE_HELLO: if (obj_tlen != 8) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Tx Seq: %u, Rx Seq: %u", EXTRACT_32BITS(obj_tptr), EXTRACT_32BITS(obj_tptr+4))); break; default: hexdump=TRUE; } break; case LMP_OBJ_TE_LINK: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: if (obj_tlen != 12) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_te_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Link-ID: %s (0x%08x)" "\n\t Remote Link-ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), ipaddr_string(ndo, obj_tptr+8), EXTRACT_32BITS(obj_tptr+8))); break; case LMP_CTYPE_IPV6: if (obj_tlen != 36) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_te_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Link-ID: %s (0x%08x)" "\n\t Remote Link-ID: %s (0x%08x)", ip6addr_string(ndo, obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), ip6addr_string(ndo, obj_tptr+20), EXTRACT_32BITS(obj_tptr+20))); break; case LMP_CTYPE_UNMD: if (obj_tlen != 12) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_te_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Link-ID: %u (0x%08x)" "\n\t Remote Link-ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), EXTRACT_32BITS(obj_tptr+8), EXTRACT_32BITS(obj_tptr+8))); break; default: hexdump=TRUE; } break; case LMP_OBJ_DATA_LINK: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: if (obj_tlen < 12) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_data_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Interface ID: %s (0x%08x)" "\n\t Remote Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), ipaddr_string(ndo, obj_tptr+8), EXTRACT_32BITS(obj_tptr+8))); ret = lmp_print_data_link_subobjs(ndo, obj_tptr, obj_tlen - 12, 12); if (ret == -1) goto trunc; if (ret == TRUE) hexdump=TRUE; break; case LMP_CTYPE_IPV6: if (obj_tlen < 36) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_data_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Interface ID: %s (0x%08x)" "\n\t Remote Interface ID: %s (0x%08x)", ip6addr_string(ndo, obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), ip6addr_string(ndo, obj_tptr+20), EXTRACT_32BITS(obj_tptr+20))); ret = lmp_print_data_link_subobjs(ndo, obj_tptr, obj_tlen - 36, 36); if (ret == -1) goto trunc; if (ret == TRUE) hexdump=TRUE; break; case LMP_CTYPE_UNMD: if (obj_tlen < 12) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: [%s]", bittok2str(lmp_obj_data_link_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Local Interface ID: %u (0x%08x)" "\n\t Remote Interface ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr+4), EXTRACT_32BITS(obj_tptr+4), EXTRACT_32BITS(obj_tptr+8), EXTRACT_32BITS(obj_tptr+8))); ret = lmp_print_data_link_subobjs(ndo, obj_tptr, obj_tlen - 12, 12); if (ret == -1) goto trunc; if (ret == TRUE) hexdump=TRUE; break; default: hexdump=TRUE; } break; case LMP_OBJ_VERIFY_BEGIN: switch(lmp_obj_ctype) { case LMP_CTYPE_1: if (obj_tlen != 20) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: %s", bittok2str(lmp_obj_begin_verify_flag_values, "none", EXTRACT_16BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t Verify Interval: %u", EXTRACT_16BITS(obj_tptr+2))); ND_PRINT((ndo, "\n\t Data links: %u", EXTRACT_32BITS(obj_tptr+4))); ND_PRINT((ndo, "\n\t Encoding type: %s", tok2str(gmpls_encoding_values, "Unknown", *(obj_tptr+8)))); ND_PRINT((ndo, "\n\t Verify Transport Mechanism: %u (0x%x)%s", EXTRACT_16BITS(obj_tptr+10), EXTRACT_16BITS(obj_tptr+10), EXTRACT_16BITS(obj_tptr+10)&8000 ? " (Payload test messages capable)" : "")); bw.i = EXTRACT_32BITS(obj_tptr+12); ND_PRINT((ndo, "\n\t Transmission Rate: %.3f Mbps",bw.f*8/1000000)); ND_PRINT((ndo, "\n\t Wavelength: %u", EXTRACT_32BITS(obj_tptr+16))); break; default: hexdump=TRUE; } break; case LMP_OBJ_VERIFY_BEGIN_ACK: switch(lmp_obj_ctype) { case LMP_CTYPE_1: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Verify Dead Interval: %u" "\n\t Verify Transport Response: %u", EXTRACT_16BITS(obj_tptr), EXTRACT_16BITS(obj_tptr+2))); break; default: hexdump=TRUE; } break; case LMP_OBJ_VERIFY_ID: switch(lmp_obj_ctype) { case LMP_CTYPE_1: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Verify ID: %u", EXTRACT_32BITS(obj_tptr))); break; default: hexdump=TRUE; } break; case LMP_OBJ_CHANNEL_STATUS: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: offset = 0; /* Decode pairs: <Interface_ID (4 bytes), Channel_status (4 bytes)> */ while (offset+8 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); ND_PRINT((ndo, "\n\t\t Active: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>31) ? "Allocated" : "Non-allocated", (EXTRACT_32BITS(obj_tptr+offset+4)>>31))); ND_PRINT((ndo, "\n\t\t Direction: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1 ? "Transmit" : "Receive", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1)); ND_PRINT((ndo, "\n\t\t Channel Status: %s (%u)", tok2str(lmp_obj_channel_status_values, "Unknown", EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF), EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF)); offset+=8; } break; case LMP_CTYPE_IPV6: offset = 0; /* Decode pairs: <Interface_ID (16 bytes), Channel_status (4 bytes)> */ while (offset+20 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)", ip6addr_string(ndo, obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); ND_PRINT((ndo, "\n\t\t Active: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+16)>>31) ? "Allocated" : "Non-allocated", (EXTRACT_32BITS(obj_tptr+offset+16)>>31))); ND_PRINT((ndo, "\n\t\t Direction: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+16)>>30)&0x1 ? "Transmit" : "Receive", (EXTRACT_32BITS(obj_tptr+offset+16)>>30)&0x1)); ND_PRINT((ndo, "\n\t\t Channel Status: %s (%u)", tok2str(lmp_obj_channel_status_values, "Unknown", EXTRACT_32BITS(obj_tptr+offset+16)&0x3FFFFFF), EXTRACT_32BITS(obj_tptr+offset+16)&0x3FFFFFF)); offset+=20; } break; case LMP_CTYPE_UNMD: offset = 0; /* Decode pairs: <Interface_ID (4 bytes), Channel_status (4 bytes)> */ while (offset+8 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); ND_PRINT((ndo, "\n\t\t Active: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>31) ? "Allocated" : "Non-allocated", (EXTRACT_32BITS(obj_tptr+offset+4)>>31))); ND_PRINT((ndo, "\n\t\t Direction: %s (%u)", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1 ? "Transmit" : "Receive", (EXTRACT_32BITS(obj_tptr+offset+4)>>30)&0x1)); ND_PRINT((ndo, "\n\t\t Channel Status: %s (%u)", tok2str(lmp_obj_channel_status_values, "Unknown", EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF), EXTRACT_32BITS(obj_tptr+offset+4)&0x3FFFFFF)); offset+=8; } break; default: hexdump=TRUE; } break; case LMP_OBJ_CHANNEL_STATUS_REQ: switch(lmp_obj_ctype) { case LMP_CTYPE_IPV4: offset = 0; while (offset+4 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); offset+=4; } break; case LMP_CTYPE_IPV6: offset = 0; while (offset+16 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %s (0x%08x)", ip6addr_string(ndo, obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); offset+=16; } break; case LMP_CTYPE_UNMD: offset = 0; while (offset+4 <= obj_tlen) { ND_PRINT((ndo, "\n\t Interface ID: %u (0x%08x)", EXTRACT_32BITS(obj_tptr+offset), EXTRACT_32BITS(obj_tptr+offset))); offset+=4; } break; default: hexdump=TRUE; } break; case LMP_OBJ_ERROR_CODE: switch(lmp_obj_ctype) { case LMP_CTYPE_BEGIN_VERIFY_ERROR: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Error Code: %s", bittok2str(lmp_obj_begin_verify_error_values, "none", EXTRACT_32BITS(obj_tptr)))); break; case LMP_CTYPE_LINK_SUMMARY_ERROR: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Error Code: %s", bittok2str(lmp_obj_link_summary_error_values, "none", EXTRACT_32BITS(obj_tptr)))); break; default: hexdump=TRUE; } break; case LMP_OBJ_SERVICE_CONFIG: switch (lmp_obj_ctype) { case LMP_CTYPE_SERVICE_CONFIG_SP: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Flags: %s", bittok2str(lmp_obj_service_config_sp_flag_values, "none", EXTRACT_8BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t UNI Version: %u", EXTRACT_8BITS(obj_tptr + 1))); break; case LMP_CTYPE_SERVICE_CONFIG_CPSA: if (obj_tlen != 16) { ND_PRINT((ndo, " (not correct for object)")); break; } link_type = EXTRACT_8BITS(obj_tptr); ND_PRINT((ndo, "\n\t Link Type: %s (%u)", tok2str(lmp_sd_service_config_cpsa_link_type_values, "Unknown", link_type), link_type)); switch (link_type) { case LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SDH: ND_PRINT((ndo, "\n\t Signal Type: %s (%u)", tok2str(lmp_sd_service_config_cpsa_signal_type_sdh_values, "Unknown", EXTRACT_8BITS(obj_tptr + 1)), EXTRACT_8BITS(obj_tptr + 1))); break; case LMP_SD_SERVICE_CONFIG_CPSA_LINK_TYPE_SONET: ND_PRINT((ndo, "\n\t Signal Type: %s (%u)", tok2str(lmp_sd_service_config_cpsa_signal_type_sonet_values, "Unknown", EXTRACT_8BITS(obj_tptr + 1)), EXTRACT_8BITS(obj_tptr + 1))); break; } ND_PRINT((ndo, "\n\t Transparency: %s", bittok2str(lmp_obj_service_config_cpsa_tp_flag_values, "none", EXTRACT_8BITS(obj_tptr + 2)))); ND_PRINT((ndo, "\n\t Contiguous Concatenation Types: %s", bittok2str(lmp_obj_service_config_cpsa_cct_flag_values, "none", EXTRACT_8BITS(obj_tptr + 3)))); ND_PRINT((ndo, "\n\t Minimum NCC: %u", EXTRACT_16BITS(obj_tptr+4))); ND_PRINT((ndo, "\n\t Maximum NCC: %u", EXTRACT_16BITS(obj_tptr+6))); ND_PRINT((ndo, "\n\t Minimum NVC:%u", EXTRACT_16BITS(obj_tptr+8))); ND_PRINT((ndo, "\n\t Maximum NVC:%u", EXTRACT_16BITS(obj_tptr+10))); ND_PRINT((ndo, "\n\t Local Interface ID: %s (0x%08x)", ipaddr_string(ndo, obj_tptr+12), EXTRACT_32BITS(obj_tptr+12))); break; case LMP_CTYPE_SERVICE_CONFIG_TRANSPARENCY_TCM: if (obj_tlen != 8) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Transparency Flags: %s", bittok2str( lmp_obj_service_config_nsa_transparency_flag_values, "none", EXTRACT_32BITS(obj_tptr)))); ND_PRINT((ndo, "\n\t TCM Monitoring Flags: %s", bittok2str( lmp_obj_service_config_nsa_tcm_flag_values, "none", EXTRACT_8BITS(obj_tptr + 7)))); break; case LMP_CTYPE_SERVICE_CONFIG_NETWORK_DIVERSITY: if (obj_tlen != 4) { ND_PRINT((ndo, " (not correct for object)")); break; } ND_PRINT((ndo, "\n\t Diversity: Flags: %s", bittok2str( lmp_obj_service_config_nsa_network_diversity_flag_values, "none", EXTRACT_8BITS(obj_tptr + 3)))); break; default: hexdump = TRUE; } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo,obj_tptr,"\n\t ",obj_tlen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag > 1 || hexdump==TRUE) print_unknown_data(ndo,tptr+sizeof(struct lmp_object_header),"\n\t ", lmp_obj_len-sizeof(struct lmp_object_header)); tptr+=lmp_obj_len; tlen-=lmp_obj_len; } return; trunc: ND_PRINT((ndo, "%s", tstr)); }
0
335,538
static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len) { unsigned int offset; int i; offset = 0; for (i = 0; i < elem->in_num; i++) { size_t size = MIN(len - offset, elem->in_sg[i].iov_len); cpu_physical_memory_unmap(elem->in_sg[i].iov_base, elem->in_sg[i].iov_len, 1, size); offset += size; } for (i = 0; i < elem->out_num; i++) cpu_physical_memory_unmap(elem->out_sg[i].iov_base, elem->out_sg[i].iov_len, 0, elem->out_sg[i].iov_len); }
0
331,638
static void i82374_init(I82374State *s) { DMA_init(1, NULL); memset(s->commands, 0, sizeof(s->commands)); }
1
24,303
void hb_face_destroy ( hb_face_t * face ) { if ( ! hb_object_destroy ( face ) ) return ; for ( hb_face_t : : plan_node_t * node = face -> shape_plans ; node ; ) { hb_face_t : : plan_node_t * next = node -> next ; hb_shape_plan_destroy ( node -> shape_plan ) ; free ( node ) ; node = next ; } # define HB_SHAPER_IMPLEMENT ( shaper ) HB_SHAPER_DATA_DESTROY ( shaper , face ) ; # include "hb-shaper-list.hh" # undef HB_SHAPER_IMPLEMENT if ( face -> destroy ) face -> destroy ( face -> user_data ) ; free ( face ) ; }
0
329,689
static void nbd_client_close(NBDClient *client) { qemu_set_fd_handler2(client->sock, NULL, NULL, NULL, NULL); close(client->sock); client->sock = -1; if (client->close) { client->close(client); } nbd_client_put(client); }
1
94,790
X509 *d2i_X509_bio(BIO *bp, X509 **x509) { return ASN1_item_d2i_bio(ASN1_ITEM_rptr(X509), bp, x509); }
0
515,172
static const char *start_cond_section(cmd_parms *cmd, void *mconfig, const char *arg) { const char *endp = ap_strrchr_c(arg, '>'); int result, not = (arg[0] == '!'); test_cond_section_fn testfn = (test_cond_section_fn)cmd->info; const char *arg1; if (endp == NULL) { return unclosed_directive(cmd); } arg = apr_pstrmemdup(cmd->temp_pool, arg, endp - arg); if (not) { arg++; } arg1 = ap_getword_conf(cmd->temp_pool, &arg); if (!arg1[0]) { return missing_container_arg(cmd); } result = testfn(cmd, arg1); if ((!not && result) || (not && !result)) { ap_directive_t *parent = NULL; ap_directive_t *current = NULL; const char *retval; retval = ap_build_cont_config(cmd->pool, cmd->temp_pool, cmd, &current, &parent, (char *)cmd->cmd->name); *(ap_directive_t **)mconfig = current; return retval; } else { *(ap_directive_t **)mconfig = NULL; return ap_soak_end_container(cmd, (char *)cmd->cmd->name); } }
0
417,660
static int link_set_handler(sd_netlink *rtnl, sd_netlink_message *m, void *userdata) { _cleanup_link_unref_ Link *link = userdata; int r; log_link_debug(link, "Set link"); r = sd_netlink_message_get_errno(m); if (r < 0 && r != -EEXIST) { log_link_error_errno(link, r, "Could not join netdev: %m"); link_enter_failed(link); return 1; } return 0; }
0
411,325
template<typename tp, typename tc, typename to> CImg<floatT> get_object3dtoCImg3d(const CImgList<tp>& primitives, const CImgList<tc>& colors, const to& opacities, const bool full_check=true) const { CImg<charT> error_message(1024); if (!is_object3d(primitives,colors,opacities,full_check,error_message)) throw CImgInstanceException(_cimg_instance "object3dtoCImg3d(): Invalid specified 3d object (%u,%u) (%s).", cimg_instance,_width,primitives._width,error_message.data()); CImg<floatT> res(1,_size_object3dtoCImg3d(primitives,colors,opacities)); float *ptrd = res._data; // Put magick number. *(ptrd++) = 'C' + 0.5f; *(ptrd++) = 'I' + 0.5f; *(ptrd++) = 'm' + 0.5f; *(ptrd++) = 'g' + 0.5f; *(ptrd++) = '3' + 0.5f; *(ptrd++) = 'd' + 0.5f; // Put number of vertices and primitives. *(ptrd++) = cimg::uint2float(_width); *(ptrd++) = cimg::uint2float(primitives._width); // Put vertex data. if (is_empty() || !primitives) return res; const T *ptrx = data(0,0), *ptry = data(0,1), *ptrz = data(0,2); cimg_forX(*this,p) { *(ptrd++) = (float)*(ptrx++); *(ptrd++) = (float)*(ptry++); *(ptrd++) = (float)*(ptrz++); } // Put primitive data. cimglist_for(primitives,p) { *(ptrd++) = (float)primitives[p].size(); const tp *ptrp = primitives[p]._data; cimg_foroff(primitives[p],i) *(ptrd++) = cimg::uint2float((unsigned int)*(ptrp++)); } // Put color/texture data. const unsigned int csiz = std::min(colors._width,primitives._width); for (int c = 0; c<(int)csiz; ++c) { const CImg<tc>& color = colors[c]; const tc *ptrc = color._data; if (color.size()==3) { *(ptrd++) = (float)*(ptrc++); *(ptrd++) = (float)*(ptrc++); *(ptrd++) = (float)*ptrc; } else { *(ptrd++) = -128.0f; int shared_ind = -1; if (color.is_shared()) for (int i = 0; i<c; ++i) if (ptrc==colors[i]._data) { shared_ind = i; break; } if (shared_ind<0) { *(ptrd++) = (float)color._width; *(ptrd++) = (float)color._height; *(ptrd++) = (float)color._spectrum; cimg_foroff(color,l) *(ptrd++) = (float)*(ptrc++); } else { *(ptrd++) = (float)shared_ind; *(ptrd++) = 0; *(ptrd++) = 0; } } } const int csiz2 = primitives.width() - colors.width(); for (int c = 0; c<csiz2; ++c) { *(ptrd++) = 200.0f; *(ptrd++) = 200.0f; *(ptrd++) = 200.0f; } // Put opacity data. ptrd = _object3dtoCImg3d(opacities,ptrd); const float *ptre = res.end(); while (ptrd<ptre) *(ptrd++) = 1.0f; return res;
0
195,273
static MagickBooleanType WritePDBImage(const ImageInfo *image_info,Image *image) { const char *comment; int bits; MagickBooleanType status; PDBImage pdb_image; PDBInfo pdb_info; QuantumInfo *quantum_info; register const PixelPacket *p; register ssize_t x; register unsigned char *q; size_t bits_per_pixel, literal, packets, packet_size, repeat; ssize_t y; unsigned char *buffer, *runlength, *scanline; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); (void) TransformImageColorspace(image,sRGBColorspace); if ((image -> colors <= 2 ) || (GetImageType(image,&image->exception ) == BilevelType)) { bits_per_pixel=1; } else if (image->colors <= 4) { bits_per_pixel=2; } else if (image->colors <= 8) { bits_per_pixel=3; } else { bits_per_pixel=4; } (void) ResetMagickMemory(&pdb_info,0,sizeof(pdb_info)); (void) CopyMagickString(pdb_info.name,image_info->filename, sizeof(pdb_info.name)); pdb_info.attributes=0; pdb_info.version=0; pdb_info.create_time=time(NULL); pdb_info.modify_time=pdb_info.create_time; pdb_info.archive_time=0; pdb_info.modify_number=0; pdb_info.application_info=0; pdb_info.sort_info=0; (void) CopyMagickMemory(pdb_info.type,"vIMG",4); (void) CopyMagickMemory(pdb_info.id,"View",4); pdb_info.seed=0; pdb_info.next_record=0; comment=GetImageProperty(image,"comment"); pdb_info.number_records=(comment == (const char *) NULL ? 1 : 2); (void) WriteBlob(image,sizeof(pdb_info.name),(unsigned char *) pdb_info.name); (void) WriteBlobMSBShort(image,(unsigned short) pdb_info.attributes); (void) WriteBlobMSBShort(image,(unsigned short) pdb_info.version); (void) WriteBlobMSBLong(image,(unsigned int) pdb_info.create_time); (void) WriteBlobMSBLong(image,(unsigned int) pdb_info.modify_time); (void) WriteBlobMSBLong(image,(unsigned int) pdb_info.archive_time); (void) WriteBlobMSBLong(image,(unsigned int) pdb_info.modify_number); (void) WriteBlobMSBLong(image,(unsigned int) pdb_info.application_info); (void) WriteBlobMSBLong(image,(unsigned int) pdb_info.sort_info); (void) WriteBlob(image,4,(unsigned char *) pdb_info.type); (void) WriteBlob(image,4,(unsigned char *) pdb_info.id); (void) WriteBlobMSBLong(image,(unsigned int) pdb_info.seed); (void) WriteBlobMSBLong(image,(unsigned int) pdb_info.next_record); (void) WriteBlobMSBShort(image,(unsigned short) pdb_info.number_records); (void) CopyMagickString(pdb_image.name,pdb_info.name,sizeof(pdb_image.name)); pdb_image.version=1; /* RLE Compressed */ switch (bits_per_pixel) { case 1: pdb_image.type=(unsigned char) 0xff; break; /* monochrome */ case 2: pdb_image.type=(unsigned char) 0x00; break; /* 2 bit gray */ default: pdb_image.type=(unsigned char) 0x02; /* 4 bit gray */ } pdb_image.reserved_1=0; pdb_image.note=0; pdb_image.x_last=0; pdb_image.y_last=0; pdb_image.reserved_2=0; pdb_image.x_anchor=(unsigned short) 0xffff; pdb_image.y_anchor=(unsigned short) 0xffff; pdb_image.width=(short) image->columns; if (image->columns % 16) pdb_image.width=(short) (16*(image->columns/16+1)); pdb_image.height=(short) image->rows; packets=((bits_per_pixel*image->columns+7)/8); runlength=(unsigned char *) AcquireQuantumMemory(9UL*packets, image->rows*sizeof(*runlength)); if (runlength == (unsigned char *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); buffer=(unsigned char *) AcquireQuantumMemory(512,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); packet_size=(size_t) (image->depth > 8 ? 2 : 1); scanline=(unsigned char *) AcquireQuantumMemory(image->columns,packet_size* sizeof(*scanline)); if (scanline == (unsigned char *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace); /* Convert to GRAY raster scanline. */ quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); status=SetQuantumDepth(image,quantum_info,image->depth > 8 ? 16 : 8); bits=8/(int) bits_per_pixel-1; /* start at most significant bits */ literal=0; repeat=0; q=runlength; buffer[0]=0x00; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL,quantum_info, GrayQuantum,scanline,&image->exception); for (x=0; x < (ssize_t) pdb_image.width; x++) { if (x < (ssize_t) image->columns) buffer[literal+repeat]|=(0xff-scanline[x*packet_size]) >> (8-bits_per_pixel) << bits*bits_per_pixel; bits--; if (bits < 0) { if (((literal+repeat) > 0) && (buffer[literal+repeat] == buffer[literal+repeat-1])) { if (repeat == 0) { literal--; repeat++; } repeat++; if (0x7f < repeat) { q=EncodeRLE(q,buffer,literal,repeat); literal=0; repeat=0; } } else { if (repeat >= 2) literal+=repeat; else { q=EncodeRLE(q,buffer,literal,repeat); buffer[0]=buffer[literal+repeat]; literal=0; } literal++; repeat=0; if (0x7f < literal) { q=EncodeRLE(q,buffer,(literal < 0x80 ? literal : 0x80),0); (void) CopyMagickMemory(buffer,buffer+literal+repeat,0x80); literal-=0x80; } } bits=8/(int) bits_per_pixel-1; buffer[literal+repeat]=0x00; } } status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } q=EncodeRLE(q,buffer,literal,repeat); scanline=(unsigned char *) RelinquishMagickMemory(scanline); buffer=(unsigned char *) RelinquishMagickMemory(buffer); quantum_info=DestroyQuantumInfo(quantum_info); /* Write the Image record header. */ (void) WriteBlobMSBLong(image,(unsigned int) (TellBlob(image)+8* pdb_info.number_records)); (void) WriteBlobByte(image,0x40); (void) WriteBlobByte(image,0x6f); (void) WriteBlobByte(image,0x80); (void) WriteBlobByte(image,0); if (pdb_info.number_records > 1) { /* Write the comment record header. */ (void) WriteBlobMSBLong(image,(unsigned int) (TellBlob(image)+8+58+q- runlength)); (void) WriteBlobByte(image,0x40); (void) WriteBlobByte(image,0x6f); (void) WriteBlobByte(image,0x80); (void) WriteBlobByte(image,1); } /* Write the Image data. */ (void) WriteBlob(image,sizeof(pdb_image.name),(unsigned char *) pdb_image.name); (void) WriteBlobByte(image,(unsigned char) pdb_image.version); (void) WriteBlobByte(image,pdb_image.type); (void) WriteBlobMSBLong(image,(unsigned int) pdb_image.reserved_1); (void) WriteBlobMSBLong(image,(unsigned int) pdb_image.note); (void) WriteBlobMSBShort(image,(unsigned short) pdb_image.x_last); (void) WriteBlobMSBShort(image,(unsigned short) pdb_image.y_last); (void) WriteBlobMSBLong(image,(unsigned int) pdb_image.reserved_2); (void) WriteBlobMSBShort(image,pdb_image.x_anchor); (void) WriteBlobMSBShort(image,pdb_image.y_anchor); (void) WriteBlobMSBShort(image,(unsigned short) pdb_image.width); (void) WriteBlobMSBShort(image,(unsigned short) pdb_image.height); (void) WriteBlob(image,(size_t) (q-runlength),runlength); runlength=(unsigned char *) RelinquishMagickMemory(runlength); if (pdb_info.number_records > 1) (void) WriteBlobString(image,comment); (void) CloseBlob(image); return(MagickTrue); }
0
317,930
int i2d_ECPrivateKey_fp(FILE *fp, EC_KEY *eckey) { return ASN1_i2d_fp_of(EC_KEY,i2d_ECPrivateKey,fp,eckey); }
0
117,611
static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, struct zone *zone, gfp_t gfp_mask, unsigned long *total_scanned) { struct mem_cgroup *victim = NULL; int total = 0; int loop = 0; unsigned long excess; unsigned long nr_scanned; struct mem_cgroup_reclaim_cookie reclaim = { .zone = zone, .priority = 0, }; excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT; while (1) { victim = mem_cgroup_iter(root_memcg, victim, &reclaim); if (!victim) { loop++; if (loop >= 2) { /* * If we have not been able to reclaim * anything, it might because there are * no reclaimable pages under this hierarchy */ if (!total) break; /* * We want to do more targeted reclaim. * excess >> 2 is not to excessive so as to * reclaim too much, nor too less that we keep * coming back to reclaim from this cgroup */ if (total >= (excess >> 2) || (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) break; } continue; } if (!mem_cgroup_reclaimable(victim, false)) continue; total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, zone, &nr_scanned); *total_scanned += nr_scanned; if (!res_counter_soft_limit_excess(&root_memcg->res)) break; } mem_cgroup_iter_break(root_memcg, victim); return total; }
0
267,450
Status DepthwiseConv2DNativeShapeImpl(shape_inference::InferenceContext* c, bool supports_explicit_padding) { ShapeHandle input_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input_shape)); ShapeHandle filter_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 4, &filter_shape)); std::vector<int32> strides; TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides)); if (strides.size() != 4) { return errors::InvalidArgument( "DepthwiseConv2D requires the stride attribute to contain 4 values, " "but got: ", strides.size()); } std::vector<int32> dilations; if (!c->GetAttr("dilations", &dilations).ok()) { dilations.resize(4, 1); } if (dilations.size() != 4) { return errors::InvalidArgument( "DepthwiseConv2D requires the dilations attribute to contain 4 values, " "but got: ", dilations.size()); } string data_format_str; Status s = c->GetAttr("data_format", &data_format_str); TensorFormat data_format; if (!s.ok() || !FormatFromString(data_format_str, &data_format)) { data_format = FORMAT_NHWC; } int32_t stride_rows; int32_t stride_cols; int32_t dilation_rows; int32_t dilation_cols; if (data_format == FORMAT_NCHW) { // Canonicalize input shape to NHWC so the shape inference code below can // process it. input_shape = c->MakeShape({{c->Dim(input_shape, 0), c->Dim(input_shape, 2), c->Dim(input_shape, 3), c->Dim(input_shape, 1)}}); stride_rows = strides[2]; stride_cols = strides[3]; dilation_rows = dilations[2]; dilation_cols = dilations[3]; } else { stride_rows = strides[1]; stride_cols = strides[2]; dilation_rows = dilations[1]; dilation_cols = dilations[2]; } DimensionHandle batch_size_dim = c->Dim(input_shape, 0); DimensionHandle in_rows_dim = c->Dim(input_shape, 1); DimensionHandle in_cols_dim = c->Dim(input_shape, 2); DimensionHandle filter_rows_dim = c->Dim(filter_shape, 0); DimensionHandle filter_cols_dim = c->Dim(filter_shape, 1); DimensionHandle input_depth = c->Dim(filter_shape, 2); DimensionHandle depth_multiplier = c->Dim(filter_shape, 3); // Check that the input depths are compatible. TF_RETURN_IF_ERROR( c->Merge(c->Dim(input_shape, 3), input_depth, &input_depth)); DimensionHandle output_depth; TF_RETURN_IF_ERROR(c->Multiply(input_depth, depth_multiplier, &output_depth)); Padding padding; TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding)); std::vector<int64> explicit_paddings; if (supports_explicit_padding) { Status status = c->GetAttr("explicit_paddings", &explicit_paddings); // Use the default value, which is an empty list, if the attribute is not // found. Otherwise return the error to the caller. if (!status.ok() && !errors::IsNotFound(status)) { return status; } TF_RETURN_IF_ERROR(CheckValidPadding(padding, explicit_paddings, /*num_dims=*/4, data_format)); } else { DCHECK(padding != Padding::EXPLICIT); } // TODO(mrry,shlens): Raise an error if the stride would cause // information in the input to be ignored. This will require a change // in the kernel implementation. DimensionHandle output_rows, output_cols; int64_t pad_rows_before = -1, pad_rows_after = -1; int64_t pad_cols_before = -1, pad_cols_after = -1; if (padding == Padding::EXPLICIT) { GetExplicitPaddingForDim(explicit_paddings, data_format, 'H', &pad_rows_before, &pad_rows_after); GetExplicitPaddingForDim(explicit_paddings, data_format, 'W', &pad_cols_before, &pad_cols_after); } TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2( c, in_rows_dim, filter_rows_dim, dilation_rows, stride_rows, padding, pad_rows_before, pad_rows_after, &output_rows)); TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2( c, in_cols_dim, filter_cols_dim, dilation_cols, stride_cols, padding, pad_cols_before, pad_cols_after, &output_cols)); ShapeHandle output_shape; if (data_format == FORMAT_NCHW) { output_shape = c->MakeShape({batch_size_dim, output_depth, output_rows, output_cols}); } else { output_shape = c->MakeShape({batch_size_dim, output_rows, output_cols, output_depth}); } c->set_output(0, output_shape); return Status::OK(); }
0
205,183
void Document::EnforceSandboxFlags(SandboxFlags mask) { scoped_refptr<const SecurityOrigin> stand_in_origin = GetSecurityOrigin(); bool is_potentially_trustworthy = stand_in_origin && stand_in_origin->IsPotentiallyTrustworthy(); ApplySandboxFlags(mask, is_potentially_trustworthy); }
0
374,135
popup_dialog_idle (GSWindow *window) { popup_dialog (window); window->priv->popup_dialog_idle_id = 0; return FALSE; }
0
19,400
static void t38_defragment_init ( void ) { reassembly_table_init ( & data_reassembly_table , & addresses_reassembly_table_functions ) ; }
0
295,831
address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, const void *buf, hwaddr len) { hwaddr addr1, l; MemoryRegion *mr; l = len; mr = address_space_translate_cached(cache, addr, &addr1, &l, true, MEMTXATTRS_UNSPECIFIED); return flatview_write_continue(cache->fv, addr, MEMTXATTRS_UNSPECIFIED, buf, len, addr1, l, mr); }
0
76,615
COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags, const struct compat_itimerspec __user *, utmr, struct compat_itimerspec __user *, otmr) { struct itimerspec new, old; int ret; if (get_compat_itimerspec(&new, utmr)) return -EFAULT; ret = do_timerfd_settime(ufd, flags, &new, &old); if (ret) return ret; if (otmr && put_compat_itimerspec(otmr, &old)) return -EFAULT; return ret; }
0
245,707
JNI_EXPORT jint JNI_OnLoad(JavaVM* vm, void* reserved) { base::android::InitVM(vm); if (!content::android::OnJNIOnLoadInit()) return -1; content::SetContentMainDelegate(new content::ShellMainDelegate(true)); return JNI_VERSION_1_4; }
0
438,592
uint64_t Projection::PayloadSize() const { uint64_t size = EbmlElementSize(libwebm::kMkvProjection, static_cast<uint64>(type_)); if (private_data_length_ > 0 && private_data_ != NULL) { size += EbmlElementSize(libwebm::kMkvProjectionPrivate, private_data_, private_data_length_); } size += EbmlElementSize(libwebm::kMkvProjectionPoseYaw, pose_yaw_); size += EbmlElementSize(libwebm::kMkvProjectionPosePitch, pose_pitch_); size += EbmlElementSize(libwebm::kMkvProjectionPoseRoll, pose_roll_); return size; }
0
141,010
void BytecodeFunctionGenerator::setJumpTable( std::vector<uint32_t> &&jumpTable) { assert(!jumpTable.empty() && "invoked with no jump table"); jumpTable_ = std::move(jumpTable); }
0
451,357
TEST(HeaderMapImplTest, DoubleInlineSet) { TestRequestHeaderMapImpl headers; headers.setReferenceKey(Headers::get().ContentType, "blah"); headers.setReferenceKey(Headers::get().ContentType, "text/html"); EXPECT_EQ("text/html", headers.getContentTypeValue()); EXPECT_EQ(1UL, headers.size()); }
0
363,566
irc_server_set_nick (struct t_irc_server *server, const char *nick) { struct t_irc_channel *ptr_channel; if (server->nick) free (server->nick); server->nick = (nick) ? strdup (nick) : NULL; /* set local variable "nick" for server and all channels/pv */ weechat_buffer_set (server->buffer, "localvar_set_nick", nick); for (ptr_channel = server->channels; ptr_channel; ptr_channel = ptr_channel->next_channel) { weechat_buffer_set (ptr_channel->buffer, "localvar_set_nick", nick); } weechat_bar_item_update ("input_prompt"); }
0
82,709
struct socket *tun_get_socket(struct file *file) { struct tun_file *tfile; if (file->f_op != &tun_fops) return ERR_PTR(-EINVAL); tfile = file->private_data; if (!tfile) return ERR_PTR(-EBADFD); return &tfile->socket; }
0
365,144
dirserv_set_cached_directory(const char *directory, time_t published, int is_running_routers) { time_t now = time(NULL); if (is_running_routers) { if (published >= now - MAX_V1_RR_AGE) set_cached_dir(&cached_runningrouters, tor_strdup(directory), published); } else { if (published >= now - MAX_V1_DIRECTORY_AGE) { cached_dir_decref(cached_directory); cached_directory = new_cached_dir(tor_strdup(directory), published); } } }
0
143,733
void ext4_da_update_reserve_space(struct inode *inode, int used, int quota_claim) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); int mdb_free = 0, allocated_meta_blocks = 0; spin_lock(&ei->i_block_reservation_lock); trace_ext4_da_update_reserve_space(inode, used); if (unlikely(used > ei->i_reserved_data_blocks)) { ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d " "with only %d reserved data blocks\n", __func__, inode->i_ino, used, ei->i_reserved_data_blocks); WARN_ON(1); used = ei->i_reserved_data_blocks; } /* Update per-inode reservations */ ei->i_reserved_data_blocks -= used; used += ei->i_allocated_meta_blocks; ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; allocated_meta_blocks = ei->i_allocated_meta_blocks; ei->i_allocated_meta_blocks = 0; percpu_counter_sub(&sbi->s_dirtyblocks_counter, used); if (ei->i_reserved_data_blocks == 0) { /* * We can release all of the reserved metadata blocks * only when we have written all of the delayed * allocation blocks. */ mdb_free = ei->i_reserved_meta_blocks; ei->i_reserved_meta_blocks = 0; ei->i_da_metadata_calc_len = 0; percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); } spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); /* Update quota subsystem */ if (quota_claim) { vfs_dq_claim_block(inode, used); if (mdb_free) vfs_dq_release_reservation_block(inode, mdb_free); } else { /* * We did fallocate with an offset that is already delayed * allocated. So on delayed allocated writeback we should * not update the quota for allocated blocks. But then * converting an fallocate region to initialized region would * have caused a metadata allocation. So claim quota for * that */ if (allocated_meta_blocks) vfs_dq_claim_block(inode, allocated_meta_blocks); vfs_dq_release_reservation_block(inode, mdb_free + used); } /* * If we have done all the pending block allocations and if * there aren't any writers on the inode, we can discard the * inode's preallocations. */ if ((ei->i_reserved_data_blocks == 0) && (atomic_read(&inode->i_writecount) == 0)) ext4_discard_preallocations(inode); }
0
376,695
void HGlobalValueNumberer::ProcessLoopBlock( HBasicBlock* block, HBasicBlock* loop_header, GVNFlagSet loop_kills, GVNFlagSet* first_time_depends, GVNFlagSet* first_time_changes) { HBasicBlock* pre_header = loop_header->predecessors()->at(0); GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills); TRACE_GVN_2("Loop invariant motion for B%d %s\n", block->block_id(), *GetGVNFlagsString(depends_flags)); HInstruction* instr = block->first(); while (instr != NULL) { HInstruction* next = instr->next(); bool hoisted = false; if (instr->CheckFlag(HValue::kUseGVN)) { TRACE_GVN_4("Checking instruction %d (%s) %s. Loop %s\n", instr->id(), instr->Mnemonic(), *GetGVNFlagsString(instr->gvn_flags()), *GetGVNFlagsString(loop_kills)); bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags); if (can_hoist && !graph()->use_optimistic_licm()) { can_hoist = block->IsLoopSuccessorDominator(); } if (can_hoist) { bool inputs_loop_invariant = true; for (int i = 0; i < instr->OperandCount(); ++i) { if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) { inputs_loop_invariant = false; } } if (inputs_loop_invariant && ShouldMove(instr, loop_header)) { TRACE_GVN_1("Hoisting loop invariant instruction %d\n", instr->id()); // Move the instruction out of the loop. instr->Unlink(); instr->InsertBefore(pre_header->end()); if (instr->HasSideEffects()) removed_side_effects_ = true; hoisted = true; } } } if (!hoisted) { // If an instruction is not hoisted, we have to account for its side // effects when hoisting later HTransitionElementsKind instructions. GVNFlagSet previous_depends = *first_time_depends; GVNFlagSet previous_changes = *first_time_changes; first_time_depends->Add(instr->DependsOnFlags()); first_time_changes->Add(instr->ChangesFlags()); if (!(previous_depends == *first_time_depends)) { TRACE_GVN_1("Updated first-time accumulated %s\n", *GetGVNFlagsString(*first_time_depends)); } if (!(previous_changes == *first_time_changes)) { TRACE_GVN_1("Updated first-time accumulated %s\n", *GetGVNFlagsString(*first_time_changes)); } } instr = next; } }
0
31,186
static KeydbResourceType rt_from_file ( const char * filename , int * r_found , int * r_openpgp ) { u32 magic ; unsigned char verbuf [ 4 ] ; FILE * fp ; KeydbResourceType rt = KEYDB_RESOURCE_TYPE_NONE ; * r_found = * r_openpgp = 0 ; fp = fopen ( filename , "rb" ) ; if ( fp ) { * r_found = 1 ; if ( fread ( & magic , 4 , 1 , fp ) == 1 ) { if ( magic == 0x13579ace || magic == 0xce9a5713 ) ; else if ( fread ( & verbuf , 4 , 1 , fp ) == 1 && verbuf [ 0 ] == 1 && fread ( & magic , 4 , 1 , fp ) == 1 && ! memcmp ( & magic , "KBXf" , 4 ) ) { if ( ( verbuf [ 3 ] & 0x02 ) ) * r_openpgp = 1 ; rt = KEYDB_RESOURCE_TYPE_KEYBOX ; } else rt = KEYDB_RESOURCE_TYPE_KEYRING ; } else rt = KEYDB_RESOURCE_TYPE_KEYRING ; fclose ( fp ) ; } return rt ; }
0
417,378
static char *get_default_remote(void) { char *dest = NULL, *ret; unsigned char sha1[20]; struct strbuf sb = STRBUF_INIT; const char *refname = resolve_ref_unsafe("HEAD", 0, sha1, NULL); if (!refname) die(_("No such ref: %s"), "HEAD"); /* detached HEAD */ if (!strcmp(refname, "HEAD")) return xstrdup("origin"); if (!skip_prefix(refname, "refs/heads/", &refname)) die(_("Expecting a full ref name, got %s"), refname); strbuf_addf(&sb, "branch.%s.remote", refname); if (git_config_get_string(sb.buf, &dest)) ret = xstrdup("origin"); else ret = dest; strbuf_release(&sb); return ret; }
0
353,956
set_gray_color_procs(gx_device * pdev, dev_t_proc_encode_color((*encode_color), gx_device), dev_t_proc_decode_color((*decode_color), gx_device)) { set_color_procs(pdev, encode_color, decode_color, gx_default_DevGray_get_color_mapping_procs, gx_default_DevGray_get_color_comp_index); }
1
83,120
eval_init(void) { evalvars_init(); func_init(); #ifdef EBCDIC /* * Sort the function table, to enable binary search. */ sortFunctions(); #endif }
0
426,341
DnD_TransportReqPacket(DnDTransportBuffer *buf, // IN DnDTransportPacketHeader **packet) // OUT { *packet = Util_SafeMalloc(DND_TRANSPORT_PACKET_HEADER_SIZE); (*packet)->type = DND_TRANSPORT_PACKET_TYPE_REQUEST; (*packet)->seqNum = buf->seqNum; (*packet)->totalSize = buf->totalSize; (*packet)->payloadSize = 0; (*packet)->offset = buf->offset; return DND_TRANSPORT_PACKET_HEADER_SIZE; }
0
13,144
lib_file_open(gs_file_path_ptr lib_path, const gs_memory_t *mem, i_ctx_t *i_ctx_p, const char *fname, uint flen, char *buffer, int blen, uint *pclen, ref *pfile) { /* i_ctx_p is NULL running arg (@) files. * lib_path and mem are never NULL */ bool starting_arg_file = (i_ctx_p == NULL) ? true : i_ctx_p->starting_arg_file; bool search_with_no_combine = false; bool search_with_combine = false; char fmode[2] = { 'r', 0}; gx_io_device *iodev = iodev_default(mem); gs_main_instance *minst = get_minst_from_memory(mem); int code; /* when starting arg files (@ files) iodev_default is not yet set */ if (iodev == 0) iodev = (gx_io_device *)gx_io_device_table[0]; search_with_combine = false; } else { search_with_no_combine = starting_arg_file; search_with_combine = true; }
1
288,275
void StyleResolver::matchUARules(ElementRuleCollector& collector) { collector.setMatchingUARules(true); if (CSSDefaultStyleSheets::simpleDefaultStyleSheet) collector.matchedResult().isCacheable = false; RuleSet* userAgentStyleSheet = m_medium->mediaTypeMatchSpecific("print") ? CSSDefaultStyleSheets::defaultPrintStyle : CSSDefaultStyleSheets::defaultStyle; matchUARules(collector, userAgentStyleSheet); if (document().inQuirksMode()) matchUARules(collector, CSSDefaultStyleSheets::defaultQuirksStyle); if (document().isViewSource()) matchUARules(collector, CSSDefaultStyleSheets::viewSourceStyle()); collector.setMatchingUARules(false); matchWatchSelectorRules(collector); }
1
456,300
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) { int ret = -1; uint8_t *host_startaddr = rb->host + start; if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { error_report("ram_block_discard_range: Unaligned start address: %p", host_startaddr); goto err; } if ((start + length) <= rb->used_length) { bool need_madvise, need_fallocate; if (!QEMU_IS_ALIGNED(length, rb->page_size)) { error_report("ram_block_discard_range: Unaligned length: %zx", length); goto err; } errno = ENOTSUP; /* If we are missing MADVISE etc */ /* The logic here is messy; * madvise DONTNEED fails for hugepages * fallocate works on hugepages and shmem */ need_madvise = (rb->page_size == qemu_host_page_size); need_fallocate = rb->fd != -1; if (need_fallocate) { /* For a file, this causes the area of the file to be zero'd * if read, and for hugetlbfs also causes it to be unmapped * so a userfault will trigger. */ #ifdef CONFIG_FALLOCATE_PUNCH_HOLE ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, start, length); if (ret) { ret = -errno; error_report("ram_block_discard_range: Failed to fallocate " "%s:%" PRIx64 " +%zx (%d)", rb->idstr, start, length, ret); goto err; } #else ret = -ENOSYS; error_report("ram_block_discard_range: fallocate not available/file" "%s:%" PRIx64 " +%zx (%d)", rb->idstr, start, length, ret); goto err; #endif } if (need_madvise) { /* For normal RAM this causes it to be unmapped, * for shared memory it causes the local mapping to disappear * and to fall back on the file contents (which we just * fallocate'd away). */ #if defined(CONFIG_MADVISE) ret = madvise(host_startaddr, length, MADV_DONTNEED); if (ret) { ret = -errno; error_report("ram_block_discard_range: Failed to discard range " "%s:%" PRIx64 " +%zx (%d)", rb->idstr, start, length, ret); goto err; } #else ret = -ENOSYS; error_report("ram_block_discard_range: MADVISE not available" "%s:%" PRIx64 " +%zx (%d)", rb->idstr, start, length, ret); goto err; #endif } trace_ram_block_discard_range(rb->idstr, host_startaddr, length, need_madvise, need_fallocate, ret); } else { error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")", rb->idstr, start, length, rb->used_length); } err: return ret; }
0
166,184
static int cloop_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVCloopState *s = bs->opaque; uint32_t offsets_size, max_compressed_block_size = 1, i; int ret; bs->read_only = 1; /* read header */ ret = bdrv_pread(bs->file, 128, &s->block_size, 4); if (ret < 0) { return ret; } s->block_size = be32_to_cpu(s->block_size); if (s->block_size % 512) { error_setg(errp, "block_size %u must be a multiple of 512", s->block_size); return -EINVAL; } if (s->block_size == 0) { error_setg(errp, "block_size cannot be zero"); return -EINVAL; } /* cloop's create_compressed_fs.c warns about block sizes beyond 256 KB but * we can accept more. Prevent ridiculous values like 4 GB - 1 since we * need a buffer this big. */ if (s->block_size > MAX_BLOCK_SIZE) { error_setg(errp, "block_size %u must be %u MB or less", s->block_size, MAX_BLOCK_SIZE / (1024 * 1024)); return -EINVAL; } ret = bdrv_pread(bs->file, 128 + 4, &s->n_blocks, 4); if (ret < 0) { return ret; } s->n_blocks = be32_to_cpu(s->n_blocks); /* read offsets */ if (s->n_blocks > UINT32_MAX / sizeof(uint64_t)) { /* Prevent integer overflow */ error_setg(errp, "n_blocks %u must be %zu or less", s->n_blocks, UINT32_MAX / sizeof(uint64_t)); return -EINVAL; } offsets_size = s->n_blocks * sizeof(uint64_t); s->offsets = g_malloc(offsets_size); if (i > 0) { uint32_t size = s->offsets[i] - s->offsets[i - 1]; if (size > max_compressed_block_size) { max_compressed_block_size = size; } } }
0
185,265
bool MatchFilter::MatchesDomain(const std::string& domain) { if (!details_->HasKey(keys::kDomainKey)) return true; std::string filter_value; if (!details_->GetString(keys::kDomainKey, &filter_value)) return false; if (net::CookieMonster::DomainIsHostOnly(filter_value)) filter_value.insert(0, "."); std::string sub_domain(domain); if (!net::CookieMonster::DomainIsHostOnly(sub_domain)) sub_domain = sub_domain.substr(1); // Now check whether the domain argument is a subdomain of the filter domain. for (sub_domain.insert(0, "."); sub_domain.length() >= filter_value.length();) { if (sub_domain == filter_value) return true; const size_t next_dot = sub_domain.find('.', 1); // Skip over leading dot. sub_domain.erase(0, next_dot); } return false; }
0
77,266
static void dwc3_ep_inc_trb(u8 *index) { (*index)++; if (*index == (DWC3_TRB_NUM - 1)) *index = 0; }
0
475,630
static bool lockdep_commit_lock_is_held(const struct net *net) { #ifdef CONFIG_PROVE_LOCKING struct nftables_pernet *nft_net = nft_pernet(net); return lockdep_is_held(&nft_net->commit_mutex); #else return true; #endif }
0
25,573
static void dtap_cc_notify ( tvbuff_t * tvb , proto_tree * tree , packet_info * pinfo _U_ , guint32 offset , guint len ) { guint32 curr_offset ; guint32 consumed ; guint curr_len ; curr_offset = offset ; curr_len = len ; is_uplink = IS_UPLINK_FALSE ; ELEM_MAND_V ( GSM_A_PDU_TYPE_DTAP , DE_NOT_IND , NULL ) ; EXTRANEOUS_DATA_CHECK ( curr_len , 0 , pinfo , & ei_gsm_a_dtap_extraneous_data ) ; }
0
386,974
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf) { struct usb_usbvision *usbvision = video_drvdata(file); int ret; ret = vidioc_try_fmt_vid_cap(file, priv, vf); if (ret) return ret; /* stop io in case it is already in progress */ if (usbvision->streaming == stream_on) { ret = usbvision_stream_interrupt(usbvision); if (ret) return ret; } usbvision_frames_free(usbvision); usbvision_empty_framequeues(usbvision); usbvision->cur_frame = NULL; /* by now we are committed to the new data... */ usbvision_set_output(usbvision, vf->fmt.pix.width, vf->fmt.pix.height); return 0; }
0
207,057
u8* h264bsdAllocateDpbImage(dpbStorage_t *dpb) { /* Variables */ /* Code */ ASSERT( !dpb->buffer[dpb->dpbSize].toBeDisplayed && !IS_REFERENCE(dpb->buffer[dpb->dpbSize]) ); ASSERT(dpb->fullness <= dpb->dpbSize); dpb->currentOut = dpb->buffer + dpb->dpbSize; return(dpb->currentOut->data); }
0
424,744
int isFileInDir(char *dir, char *file){ size_t length, dirLength; char *fullpath = NULL; FILE *f = NULL; int foundFile = 0; dirLength = strlen(dir); /* Constuct 'full' path */ if (dir[dirLength-1] == DIR_SEPARATOR) { /* remove trailing '/' */ dir[dirLength-1] = '\0'; dirLength--; } length = dirLength + strlen(file) + 2; /* 2= '/' + null char */ fullpath = malloc(length); if (NULL != fullpath) { strcpy(fullpath, dir); fullpath[dirLength] = DIR_SEPARATOR; strcpy(fullpath+dirLength+1, file); /* See if file exists - use fopen() for portability */ f = fopen(fullpath, "rb"); if (NULL != f) { foundFile = 1; fclose(f); } free(fullpath); } return foundFile; }
0
203,250
void AudioContext::disposeOutputs(AudioNode& node) { ASSERT(isGraphOwner()); ASSERT(isMainThread()); for (unsigned i = 0; i < node.numberOfOutputs(); ++i) node.output(i)->dispose(); }
0
90,848
static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) { struct smb_t2_rsp *pSMB2 = (struct smb_t2_rsp *)psecond; struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)pTargetSMB; char *data_area_of_target; char *data_area_of_buf2; int remaining; __u16 byte_count, total_data_size, total_in_buf, total_in_buf2; total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); if (total_data_size != get_unaligned_le16(&pSMB2->t2_rsp.TotalDataCount)) cFYI(1, "total data size of primary and secondary t2 differ"); total_in_buf = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); remaining = total_data_size - total_in_buf; if (remaining < 0) return -EINVAL; if (remaining == 0) /* nothing to do, ignore */ return 0; total_in_buf2 = get_unaligned_le16(&pSMB2->t2_rsp.DataCount); if (remaining < total_in_buf2) { cFYI(1, "transact2 2nd response contains too much data"); } /* find end of first SMB data area */ data_area_of_target = (char *)&pSMBt->hdr.Protocol + get_unaligned_le16(&pSMBt->t2_rsp.DataOffset); /* validate target area */ data_area_of_buf2 = (char *)&pSMB2->hdr.Protocol + get_unaligned_le16(&pSMB2->t2_rsp.DataOffset); data_area_of_target += total_in_buf; /* copy second buffer into end of first buffer */ memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2); total_in_buf += total_in_buf2; put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount); byte_count = get_bcc_le(pTargetSMB); byte_count += total_in_buf2; put_bcc_le(byte_count, pTargetSMB); byte_count = pTargetSMB->smb_buf_length; byte_count += total_in_buf2; /* BB also add check that we are not beyond maximum buffer size */ pTargetSMB->smb_buf_length = byte_count; if (remaining == total_in_buf2) { cFYI(1, "found the last secondary response"); return 0; /* we are done */ } else /* more responses to go */ return 1; }
0
321,662
static void spapr_cpu_core_realize(DeviceState *dev, Error **errp) { sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev)); CPUCore *cc = CPU_CORE(OBJECT(dev)); const char *typename = object_class_get_name(sc->cpu_class); size_t size = object_type_get_instance_size(typename); Error *local_err = NULL; Object *obj; int i; sc->threads = g_malloc0(size * cc->nr_threads); for (i = 0; i < cc->nr_threads; i++) { char id[32]; void *obj = sc->threads + i * size; object_initialize(obj, size, typename); snprintf(id, sizeof(id), "thread[%d]", i); object_property_add_child(OBJECT(sc), id, obj, &local_err); if (local_err) { goto err; } } object_child_foreach(OBJECT(dev), spapr_cpu_core_realize_child, &local_err); if (local_err) { goto err; } else { return; } err: while (--i >= 0) { obj = sc->threads + i * size; object_unparent(obj); } g_free(sc->threads); error_propagate(errp, local_err); }
1
467,317
TEST(QueryProjectionTest, IdExclusionWithExclusionProjectionDoesNotPreserveId) { auto proj = createProjection("{}", "{_id: 0, a: 0}"); ASSERT_FALSE(proj.isFieldRetainedExactly("_id")); ASSERT_TRUE(proj.isFieldRetainedExactly("b")); ASSERT_FALSE(proj.isFieldRetainedExactly("a")); }
0
26,046
int mime_hdr_length_get ( MIMEHdrImpl * mh ) { unsigned int length , index ; MIMEFieldBlockImpl * fblock ; MIMEField * field ; length = 2 ; for ( fblock = & ( mh -> m_first_fblock ) ; fblock != nullptr ; fblock = fblock -> m_next ) { for ( index = 0 ; index < fblock -> m_freetop ; index ++ ) { field = & ( fblock -> m_field_slots [ index ] ) ; if ( field -> is_live ( ) ) { length += mime_field_length_get ( field ) ; } } } return length ; }
0
345,224
static int date_from_ISO8601 (const char *text, time_t * value) { struct tm tm; int n; int i; char buf[30]; if (strchr (text, '-')) { char *p = (char *) text, *p2 = buf; while (p && *p) { if (*p != '-') { *p2 = *p; p2++; if (p2-buf >= sizeof(buf)) { return -1; } } p++; } text = buf; } tm.tm_isdst = -1; #define XMLRPC_IS_NUMBER(x) if (x < '0' || x > '9') return -1; n = 1000; tm.tm_year = 0; for(i = 0; i < 4; i++) { XMLRPC_IS_NUMBER(text[i]) tm.tm_year += (text[i]-'0')*n; n /= 10; } n = 10; tm.tm_mon = 0; for(i = 0; i < 2; i++) { XMLRPC_IS_NUMBER(text[i]) tm.tm_mon += (text[i+4]-'0')*n; n /= 10; } tm.tm_mon --; n = 10; tm.tm_mday = 0; for(i = 0; i < 2; i++) { XMLRPC_IS_NUMBER(text[i]) tm.tm_mday += (text[i+6]-'0')*n; n /= 10; } n = 10; tm.tm_hour = 0; for(i = 0; i < 2; i++) { XMLRPC_IS_NUMBER(text[i]) tm.tm_hour += (text[i+9]-'0')*n; n /= 10; } n = 10; tm.tm_min = 0; for(i = 0; i < 2; i++) { XMLRPC_IS_NUMBER(text[i]) tm.tm_min += (text[i+12]-'0')*n; n /= 10; } n = 10; tm.tm_sec = 0; for(i = 0; i < 2; i++) { XMLRPC_IS_NUMBER(text[i]) tm.tm_sec += (text[i+15]-'0')*n; n /= 10; } tm.tm_year -= 1900; *value = mkgmtime(&tm); return 0; }
1
167,476
int test_kron(BIO *bp, BN_CTX *ctx) { BN_GENCB cb; BIGNUM *a,*b,*r,*t; int i; int legendre, kronecker; int ret = 0; a = BN_new(); b = BN_new(); r = BN_new(); t = BN_new(); if (a == NULL || b == NULL || r == NULL || t == NULL) goto err; BN_GENCB_set(&cb, genprime_cb, NULL); /* We test BN_kronecker(a, b, ctx) just for b odd (Jacobi symbol). * In this case we know that if b is prime, then BN_kronecker(a, b, ctx) * is congruent to $a^{(b-1)/2}$, modulo $b$ (Legendre symbol). * So we generate a random prime b and compare these values * for a number of random a's. (That is, we run the Solovay-Strassen * primality test to confirm that b is prime, except that we * don't want to test whether b is prime but whether BN_kronecker * works.) */ if (!BN_generate_prime_ex(b, 512, 0, NULL, NULL, &cb)) goto err; b->neg = rand_neg(); putc('\n', stderr); for (i = 0; i < num0; i++) { if (!BN_bntest_rand(a, 512, 0, 0)) goto err; a->neg = rand_neg(); /* t := (|b|-1)/2 (note that b is odd) */ if (!BN_copy(t, b)) goto err; t->neg = 0; if (!BN_sub_word(t, 1)) goto err; if (!BN_rshift1(t, t)) goto err; /* r := a^t mod b */ b->neg=0; if (!BN_mod_exp_recp(r, a, t, b, ctx)) goto err; b->neg=1; if (BN_is_word(r, 1)) legendre = 1; else if (BN_is_zero(r)) legendre = 0; else { if (!BN_add_word(r, 1)) goto err; if (0 != BN_ucmp(r, b)) { fprintf(stderr, "Legendre symbol computation failed\n"); goto err; } legendre = -1; } kronecker = BN_kronecker(a, b, ctx); if (kronecker < -1) goto err; /* we actually need BN_kronecker(a, |b|) */ if (a->neg && b->neg) kronecker = -kronecker; if (legendre != kronecker) { fprintf(stderr, "legendre != kronecker; a = "); BN_print_fp(stderr, a); fprintf(stderr, ", b = "); BN_print_fp(stderr, b); fprintf(stderr, "\n"); goto err; } putc('.', stderr); fflush(stderr); } putc('\n', stderr); fflush(stderr); ret = 1; err: if (a != NULL) BN_free(a); if (b != NULL) BN_free(b); if (r != NULL) BN_free(r); if (t != NULL) BN_free(t); return ret; }
0
80,021
request_env(agooReq req, VALUE self) { if (Qnil == (VALUE)req->env) { volatile VALUE env = rb_hash_new(); // As described by // http://www.rubydoc.info/github/rack/rack/master/file/SPEC and // https://github.com/rack/rack/blob/master/SPEC. rb_hash_aset(env, request_method_val, req_method(req)); rb_hash_aset(env, script_name_val, req_script_name(req)); rb_hash_aset(env, path_info_val, req_path_info(req)); rb_hash_aset(env, query_string_val, req_query_string(req)); rb_hash_aset(env, remote_addr_val, req_remote_addr(req)); rb_hash_aset(env, server_port_val, req_server_port(req)); rb_hash_aset(env, server_name_val, req_server_name(req)); fill_headers(req, env); rb_hash_aset(env, rack_version_val, rack_version_val_val); rb_hash_aset(env, rack_url_scheme_val, req_rack_url_scheme(req)); rb_hash_aset(env, rack_input_val, req_rack_input(req)); rb_hash_aset(env, rack_errors_val, req_rack_errors(req)); rb_hash_aset(env, rack_multithread_val, req_rack_multithread(req)); rb_hash_aset(env, rack_multiprocess_val, Qfalse); rb_hash_aset(env, rack_run_once_val, Qfalse); rb_hash_aset(env, rack_logger_val, req_rack_logger(req)); rb_hash_aset(env, rack_upgrade_val, req_rack_upgrade(req)); rb_hash_aset(env, rack_hijackq_val, Qtrue); // TBD should return IO on #call and set hijack_io on env object that // has a call method that wraps the req->res->con->sock then set the // sock to 0 or maybe con. mutex? env[rack.hijack_io] = IO.new(sock, // "rw") - maybe it works. // // set a flag on con to indicate it has been hijacked // then set sock to 0 in con loop and destroy con rb_hash_aset(env, rack_hijack_val, self); rb_hash_aset(env, rack_hijack_io_val, Qnil); if (agoo_server.rack_early_hints) { volatile VALUE eh = agoo_early_hints_new(req); rb_hash_aset(env, early_hints_val, eh); } req->env = (void*)env; } return (VALUE)req->env; }
0
43,775
read_redo(int init, int old_redo) { static buffblock_T *bp; static char_u *p; int c; int n; char_u buf[MB_MAXBYTES + 1]; int i; if (init) { if (old_redo) bp = old_redobuff.bh_first.b_next; else bp = redobuff.bh_first.b_next; if (bp == NULL) return FAIL; p = bp->b_str; return OK; } if ((c = *p) != NUL) { /* Reverse the conversion done by add_char_buff() */ /* For a multi-byte character get all the bytes and return the * converted character. */ if (has_mbyte && (c != K_SPECIAL || p[1] == KS_SPECIAL)) n = MB_BYTE2LEN_CHECK(c); else n = 1; for (i = 0; ; ++i) { if (c == K_SPECIAL) /* special key or escaped K_SPECIAL */ { c = TO_SPECIAL(p[1], p[2]); p += 2; } #ifdef FEAT_GUI if (c == CSI) /* escaped CSI */ p += 2; #endif if (*++p == NUL && bp->b_next != NULL) { bp = bp->b_next; p = bp->b_str; } buf[i] = c; if (i == n - 1) /* last byte of a character */ { if (n != 1) c = (*mb_ptr2char)(buf); break; } c = *p; if (c == NUL) /* cannot happen? */ break; } } return c; }
0
282,929
const SSL_CIPHER *ssl3_get_cipher(unsigned int u) { if (u < SSL3_NUM_CIPHERS) return (&(ssl3_ciphers[SSL3_NUM_CIPHERS - 1 - u])); else return (NULL); }
0
304,181
static struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb) { const struct net_offload *ops; struct sk_buff **pp = NULL; struct sk_buff *p; const struct iphdr *iph; unsigned int hlen; unsigned int off; unsigned int id; int flush = 1; int proto; off = skb_gro_offset(skb); hlen = off + sizeof(*iph); iph = skb_gro_header_fast(skb, off); if (skb_gro_header_hard(skb, hlen)) { iph = skb_gro_header_slow(skb, hlen, off); if (unlikely(!iph)) goto out; } proto = iph->protocol; rcu_read_lock(); ops = rcu_dereference(inet_offloads[proto]); if (!ops || !ops->callbacks.gro_receive) goto out_unlock; if (*(u8 *)iph != 0x45) goto out_unlock; if (unlikely(ip_fast_csum((u8 *)iph, 5))) goto out_unlock; id = ntohl(*(__be32 *)&iph->id); flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF)); id >>= 16; for (p = *head; p; p = p->next) { struct iphdr *iph2; if (!NAPI_GRO_CB(p)->same_flow) continue; iph2 = (struct iphdr *)(p->data + off); /* The above works because, with the exception of the top * (inner most) layer, we only aggregate pkts with the same * hdr length so all the hdrs we'll need to verify will start * at the same offset. */ if ((iph->protocol ^ iph2->protocol) | ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) | ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } /* All fields must match except length and checksum. */ NAPI_GRO_CB(p)->flush |= (iph->ttl ^ iph2->ttl) | (iph->tos ^ iph2->tos) | ((iph->frag_off ^ iph2->frag_off) & htons(IP_DF)); /* Save the IP ID check to be included later when we get to * the transport layer so only the inner most IP ID is checked. * This is because some GSO/TSO implementations do not * correctly increment the IP ID for the outer hdrs. */ NAPI_GRO_CB(p)->flush_id = ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id); NAPI_GRO_CB(p)->flush |= flush; } NAPI_GRO_CB(skb)->flush |= flush; skb_set_network_header(skb, off); /* The above will be needed by the transport layer if there is one * immediately following this IP hdr. */ /* Note : No need to call skb_gro_postpull_rcsum() here, * as we already checked checksum over ipv4 header was 0 */ skb_gro_pull(skb, sizeof(*iph)); skb_set_transport_header(skb, skb_gro_offset(skb)); pp = ops->callbacks.gro_receive(head, skb); out_unlock: rcu_read_unlock(); out: NAPI_GRO_CB(skb)->flush |= flush; return pp; }
0
287,040
int migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page, struct buffer_head *head, enum migrate_mode mode, int extra_count) { int expected_count = 1 + extra_count; void **pslot; if (!mapping) { /* Anonymous page without mapping */ if (page_count(page) != expected_count) return -EAGAIN; /* No turning back from here */ set_page_memcg(newpage, page_memcg(page)); newpage->index = page->index; newpage->mapping = page->mapping; if (PageSwapBacked(page)) SetPageSwapBacked(newpage); return MIGRATEPAGE_SUCCESS; } spin_lock_irq(&mapping->tree_lock); pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page)); expected_count += 1 + page_has_private(page); if (page_count(page) != expected_count || radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { spin_unlock_irq(&mapping->tree_lock); return -EAGAIN; } if (!page_freeze_refs(page, expected_count)) { spin_unlock_irq(&mapping->tree_lock); return -EAGAIN; } /* * In the async migration case of moving a page with buffers, lock the * buffers using trylock before the mapping is moved. If the mapping * was moved, we later failed to lock the buffers and could not move * the mapping back due to an elevated page count, we would have to * block waiting on other references to be dropped. */ if (mode == MIGRATE_ASYNC && head && !buffer_migrate_lock_buffers(head, mode)) { page_unfreeze_refs(page, expected_count); spin_unlock_irq(&mapping->tree_lock); return -EAGAIN; } /* * Now we know that no one else is looking at the page: * no turning back from here. */ set_page_memcg(newpage, page_memcg(page)); newpage->index = page->index; newpage->mapping = page->mapping; if (PageSwapBacked(page)) SetPageSwapBacked(newpage); get_page(newpage); /* add cache reference */ if (PageSwapCache(page)) { SetPageSwapCache(newpage); set_page_private(newpage, page_private(page)); } radix_tree_replace_slot(pslot, newpage); /* * Drop cache reference from old page by unfreezing * to one less reference. * We know this isn't the last reference. */ page_unfreeze_refs(page, expected_count - 1); /* * If moved to a different zone then also account * the page for that zone. Other VM counters will be * taken care of when we establish references to the * new page and drop references to the old page. * * Note that anonymous pages are accounted for * via NR_FILE_PAGES and NR_ANON_PAGES if they * are mapped to swap space. */ __dec_zone_page_state(page, NR_FILE_PAGES); __inc_zone_page_state(newpage, NR_FILE_PAGES); if (!PageSwapCache(page) && PageSwapBacked(page)) { __dec_zone_page_state(page, NR_SHMEM); __inc_zone_page_state(newpage, NR_SHMEM); } spin_unlock_irq(&mapping->tree_lock); return MIGRATEPAGE_SUCCESS; }
1
520,557
bool Arg_comparator::set_cmp_func_datetime() { THD *thd= current_thd; m_compare_collation= &my_charset_numeric; func= is_owner_equal_func() ? &Arg_comparator::compare_e_datetime : &Arg_comparator::compare_datetime; a= cache_converted_constant(thd, a, &a_cache, compare_type_handler()); b= cache_converted_constant(thd, b, &b_cache, compare_type_handler()); return false; }
0
163,216
void ATSParser::Program::signalDiscontinuity( DiscontinuityType type, const sp<AMessage> &extra) { int64_t mediaTimeUs; if ((type & DISCONTINUITY_TIME) && extra != NULL && extra->findInt64( IStreamListener::kKeyMediaTimeUs, &mediaTimeUs)) { mFirstPTSValid = false; } for (size_t i = 0; i < mStreams.size(); ++i) { mStreams.editValueAt(i)->signalDiscontinuity(type, extra); } }
0
403,550
int __init early_acpi_boot_init(void) { /* * If acpi_disabled, bail out */ if (acpi_disabled) return 1; /* * Process the Multiple APIC Description Table (MADT), if present */ early_acpi_process_madt(); /* * Hardware-reduced ACPI mode initialization: */ acpi_reduced_hw_init(); return 0; }
0
133,919
String HHVM_FUNCTION(mb_output_handler, const String& contents, int status) { mbfl_string string, result; int last_feed; mbfl_encoding *encoding = MBSTRG(current_http_output_encoding); /* start phase only */ if (status & k_PHP_OUTPUT_HANDLER_START) { /* delete the converter just in case. */ if (MBSTRG(outconv)) { MBSTRG(illegalchars) += mbfl_buffer_illegalchars(MBSTRG(outconv)); mbfl_buffer_converter_delete(MBSTRG(outconv)); MBSTRG(outconv) = nullptr; } if (encoding == nullptr) { return contents; } /* analyze mime type */ String mimetype = g_context->getMimeType(); if (!mimetype.empty()) { const char *charset = encoding->mime_name; if (charset) { g_context->setContentType(mimetype, charset); } /* activate the converter */ MBSTRG(outconv) = mbfl_buffer_converter_new2 (MBSTRG(current_internal_encoding), encoding, 0); } } /* just return if the converter is not activated. */ if (MBSTRG(outconv) == nullptr) { return contents; } /* flag */ last_feed = ((status & k_PHP_OUTPUT_HANDLER_END) != 0); /* mode */ mbfl_buffer_converter_illegal_mode (MBSTRG(outconv), MBSTRG(current_filter_illegal_mode)); mbfl_buffer_converter_illegal_substchar (MBSTRG(outconv), MBSTRG(current_filter_illegal_substchar)); /* feed the string */ mbfl_string_init(&string); string.no_language = MBSTRG(current_language); string.no_encoding = MBSTRG(current_internal_encoding)->no_encoding; string.val = (unsigned char *)contents.data(); string.len = contents.size(); mbfl_buffer_converter_feed(MBSTRG(outconv), &string); if (last_feed) { mbfl_buffer_converter_flush(MBSTRG(outconv)); } /* get the converter output, and return it */ mbfl_buffer_converter_result(MBSTRG(outconv), &result); /* delete the converter if it is the last feed. */ if (last_feed) { MBSTRG(illegalchars) += mbfl_buffer_illegalchars(MBSTRG(outconv)); mbfl_buffer_converter_delete(MBSTRG(outconv)); MBSTRG(outconv) = nullptr; } return String(reinterpret_cast<char*>(result.val), result.len, AttachString); }
0
170,035
reassemble_files_name(const char *certfile, const char *keyfile) { char *ret; if (keyfile != NULL) { if (asprintf(&ret, "FILE:%s,%s", certfile, keyfile) < 0) return NULL; } else { if (asprintf(&ret, "FILE:%s", certfile) < 0) return NULL; } return ret; }
0
156,314
dump_config_as_events() { g_hash_table_foreach(uzbl.comm.proto_var, dump_var_hash_as_event, NULL); }
0
8,089
void ZRLE_DECODE (const Rect& r, rdr::InStream* is, rdr::ZlibInStream* zis, const PixelFormat& pf, ModifiablePixelBuffer* pb) { int length = is->readU32(); zis->setUnderlying(is, length); Rect t; PIXEL_T buf[64 * 64]; for (t.tl.y = r.tl.y; t.tl.y < r.br.y; t.tl.y += 64) { t.br.y = __rfbmin(r.br.y, t.tl.y + 64); for (t.tl.x = r.tl.x; t.tl.x < r.br.x; t.tl.x += 64) { t.br.x = __rfbmin(r.br.x, t.tl.x + 64); int mode = zis->readU8(); bool rle = mode & 128; int palSize = mode & 127; PIXEL_T palette[128]; for (int i = 0; i < palSize; i++) { palette[i] = READ_PIXEL(zis); } if (palSize == 1) { PIXEL_T pix = palette[0]; pb->fillRect(pf, t, &pix); continue; } if (!rle) { if (palSize == 0) { // raw #ifdef CPIXEL for (PIXEL_T* ptr = buf; ptr < buf+t.area(); ptr++) { *ptr = READ_PIXEL(zis); } #else zis->readBytes(buf, t.area() * (BPP / 8)); #endif } else { // packed pixels int bppp = ((palSize > 16) ? 8 : ((palSize > 4) ? 4 : ((palSize > 2) ? 2 : 1))); PIXEL_T* ptr = buf; for (int i = 0; i < t.height(); i++) { PIXEL_T* eol = ptr + t.width(); rdr::U8 byte = 0; rdr::U8 nbits = 0; while (ptr < eol) { if (nbits == 0) { byte = zis->readU8(); nbits = 8; } nbits -= bppp; rdr::U8 index = (byte >> nbits) & ((1 << bppp) - 1) & 127; *ptr++ = palette[index]; } } } } else { if (palSize == 0) { // plain RLE PIXEL_T* ptr = buf; PIXEL_T* end = ptr + t.area(); while (ptr < end) { PIXEL_T pix = READ_PIXEL(zis); int len = 1; int b; do { b = zis->readU8(); len += b; } while (b == 255); if (end - ptr < len) { throw Exception ("ZRLE decode error"); } while (len-- > 0) *ptr++ = pix; } } else { // palette RLE PIXEL_T* ptr = buf; PIXEL_T* end = ptr + t.area(); while (ptr < end) { int index = zis->readU8(); int len = 1; if (index & 128) { int b; do { b = zis->readU8(); len += b; } while (b == 255); if (end - ptr < len) { throw Exception ("ZRLE decode error"); } } index &= 127; PIXEL_T pix = palette[index]; while (len-- > 0) *ptr++ = pix; } } } pb->imageRect(pf, t, buf); } } zis->removeUnderlying(); }
1
144,667
static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos) { struct sock *sk = ping_get_first(seq, 0); if (sk) while (pos && (sk = ping_get_next(seq, sk)) != NULL) --pos; return pos ? NULL : sk; }
0
168,571
void RenderViewImpl::OnDisableScrollbarsForSmallWindows( const gfx::Size& disable_scrollbar_size_limit) { disable_scrollbars_size_limit_ = disable_scrollbar_size_limit; }
0
60,508
int dir_size(const std::string& path) { std::vector<std::string> files, dirs; get_files_in_dir(path, &files, &dirs, ENTIRE_FILE_PATH); int res = 0; BOOST_FOREACH(const std::string& file_path, files) { res += file_size(file_path); } BOOST_FOREACH(const std::string& dir_path, dirs) { // FIXME: this could result in infinite recursion with symlinks!! res += dir_size(dir_path); } return res; }
0
286,768
static int aasc_decode_frame ( AVCodecContext * avctx , void * data , int * got_frame , AVPacket * avpkt ) { const uint8_t * buf = avpkt -> data ; int buf_size = avpkt -> size ; AascContext * s = avctx -> priv_data ; int compr , i , stride , ret ; s -> frame . reference = 1 ; s -> frame . buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE ; if ( ( ret = avctx -> reget_buffer ( avctx , & s -> frame ) ) < 0 ) { av_log ( avctx , AV_LOG_ERROR , "reget_buffer() failed\n" ) ; return ret ; } compr = AV_RL32 ( buf ) ; buf += 4 ; buf_size -= 4 ; switch ( compr ) { case 0 : stride = ( avctx -> width * 3 + 3 ) & ~ 3 ; for ( i = avctx -> height - 1 ; i >= 0 ; i -- ) { memcpy ( s -> frame . data [ 0 ] + i * s -> frame . linesize [ 0 ] , buf , avctx -> width * 3 ) ; buf += stride ; } break ; case 1 : bytestream2_init ( & s -> gb , buf , buf_size ) ; ff_msrle_decode ( avctx , ( AVPicture * ) & s -> frame , 8 , & s -> gb ) ; break ; default : av_log ( avctx , AV_LOG_ERROR , "Unknown compression type %d\n" , compr ) ; return AVERROR_INVALIDDATA ; } * got_frame = 1 ; * ( AVFrame * ) data = s -> frame ; return buf_size ; }
1
474,209
static void __attribute__((destructor)) libcryptsetup_exit(void) { crypt_token_unload_external_all(NULL); crypt_backend_destroy(); crypt_random_exit(); }
0
271,218
static int ext2_unfreeze(struct super_block *sb) { /* Just write sb to clear EXT2_VALID_FS flag */ ext2_write_super(sb); return 0; }
0
103,915
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAddParams*>(node->builtin_data); TFLITE_DCHECK(node->user_data != nullptr); const OpData* data = static_cast<const OpData*>(node->user_data); const TfLiteEvalTensor* input1 = tflite::micro::GetEvalInput(context, node, kInputTensor1); const TfLiteEvalTensor* input2 = tflite::micro::GetEvalInput(context, node, kInputTensor2); TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32) { EvalAdd(context, node, params, data, input1, input2, output); } else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, data, input1, input2, output)); } else { TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", TfLiteTypeGetName(output->type), output->type); return kTfLiteError; } return kTfLiteOk; }
0
435,281
static void lsi_do_dma(LSIState *s, int out) { uint32_t count; dma_addr_t addr; SCSIDevice *dev; assert(s->current); if (!s->current->dma_len) { /* Wait until data is available. */ trace_lsi_do_dma_unavailable(); return; } dev = s->current->req->dev; assert(dev); count = s->dbc; if (count > s->current->dma_len) count = s->current->dma_len; addr = s->dnad; /* both 40 and Table Indirect 64-bit DMAs store upper bits in dnad64 */ if (lsi_dma_40bit(s) || lsi_dma_ti64bit(s)) addr |= ((uint64_t)s->dnad64 << 32); else if (s->dbms) addr |= ((uint64_t)s->dbms << 32); else if (s->sbms) addr |= ((uint64_t)s->sbms << 32); trace_lsi_do_dma(addr, count); s->csbc += count; s->dnad += count; s->dbc -= count; if (s->current->dma_buf == NULL) { s->current->dma_buf = scsi_req_get_buf(s->current->req); } /* ??? Set SFBR to first data byte. */ if (out) { lsi_mem_read(s, addr, s->current->dma_buf, count); } else { lsi_mem_write(s, addr, s->current->dma_buf, count); } s->current->dma_len -= count; if (s->current->dma_len == 0) { s->current->dma_buf = NULL; scsi_req_continue(s->current->req); } else { s->current->dma_buf += count; lsi_resume_script(s); } }
0
189,265
TopSitesImpl::ThumbnailEvent TopSitesImpl::SetPageThumbnailImpl( const GURL& url, const gfx::Image& thumbnail, const ThumbnailScore& score) { DCHECK(thread_checker_.CalledOnValidThread()); if (!loaded_) { return THUMBNAIL_FAILURE; } bool add_temp_thumbnail = false; if (!IsKnownURL(url)) { if (IsNonForcedFull()) { return THUMBNAIL_TOPSITES_FULL; } add_temp_thumbnail = true; } if (!can_add_url_to_history_.Run(url)) return THUMBNAIL_FAILURE; // It's not a real webpage. scoped_refptr<base::RefCountedBytes> thumbnail_data; if (!EncodeBitmap(thumbnail, &thumbnail_data)) return THUMBNAIL_FAILURE; if (add_temp_thumbnail) { RemoveTemporaryThumbnailByURL(url); AddTemporaryThumbnail(url, thumbnail_data.get(), score); return THUMBNAIL_ADDED_TEMP; } bool success = SetPageThumbnailEncoded(url, thumbnail_data.get(), score); return success ? THUMBNAIL_ADDED_REGULAR : THUMBNAIL_KEPT_EXISTING; }
0
170,719
void ImageLoader::timerFired(Timer<ImageLoader>*) { m_element->deref(); }
0
458,863
write_ct_md(struct dp_packet *pkt, uint16_t zone, const struct conn *conn, const struct conn_key *key, const struct alg_exp_node *alg_exp) { pkt->md.ct_state |= CS_TRACKED; pkt->md.ct_zone = zone; pkt->md.ct_mark = conn ? conn->mark : 0; pkt->md.ct_label = conn ? conn->label : OVS_U128_ZERO; /* Use the original direction tuple if we have it. */ if (conn) { if (conn->alg_related) { key = &conn->master_key; } else { key = &conn->key; } } else if (alg_exp) { pkt->md.ct_mark = alg_exp->master_mark; pkt->md.ct_label = alg_exp->master_label; key = &alg_exp->master_key; } pkt->md.ct_orig_tuple_ipv6 = false; if (key) { if (key->dl_type == htons(ETH_TYPE_IP)) { pkt->md.ct_orig_tuple.ipv4 = (struct ovs_key_ct_tuple_ipv4) { key->src.addr.ipv4_aligned, key->dst.addr.ipv4_aligned, key->nw_proto != IPPROTO_ICMP ? key->src.port : htons(key->src.icmp_type), key->nw_proto != IPPROTO_ICMP ? key->dst.port : htons(key->src.icmp_code), key->nw_proto, }; } else { pkt->md.ct_orig_tuple_ipv6 = true; pkt->md.ct_orig_tuple.ipv6 = (struct ovs_key_ct_tuple_ipv6) { key->src.addr.ipv6_aligned, key->dst.addr.ipv6_aligned, key->nw_proto != IPPROTO_ICMPV6 ? key->src.port : htons(key->src.icmp_type), key->nw_proto != IPPROTO_ICMPV6 ? key->dst.port : htons(key->src.icmp_code), key->nw_proto, }; } } else { memset(&pkt->md.ct_orig_tuple, 0, sizeof pkt->md.ct_orig_tuple); } }
0
206,313
static int dummydomain(i_ctx_t * i_ctx_p, ref *space, float *ptr) { return 0; }
0
375,220
_copyClosePortalStmt(const ClosePortalStmt *from) { ClosePortalStmt *newnode = makeNode(ClosePortalStmt); COPY_STRING_FIELD(portalname); return newnode; }
0
99,330
rdpRdp* rdp_new(rdpContext* context) { rdpRdp* rdp; DWORD flags; BOOL newSettings = FALSE; rdp = (rdpRdp*)calloc(1, sizeof(rdpRdp)); if (!rdp) return NULL; rdp->context = context; rdp->instance = context->instance; flags = 0; if (context->ServerMode) flags |= FREERDP_SETTINGS_SERVER_MODE; if (!context->settings) { context->settings = freerdp_settings_new(flags); if (!context->settings) goto out_free; newSettings = TRUE; } rdp->settings = context->settings; if (context->instance) { rdp->settings->instance = context->instance; context->instance->settings = rdp->settings; } else if (context->peer) { rdp->settings->instance = context->peer; context->peer->settings = rdp->settings; } rdp->transport = transport_new(context); if (!rdp->transport) goto out_free_settings; rdp->license = license_new(rdp); if (!rdp->license) goto out_free_transport; rdp->input = input_new(rdp); if (!rdp->input) goto out_free_license; rdp->update = update_new(rdp); if (!rdp->update) goto out_free_input; rdp->fastpath = fastpath_new(rdp); if (!rdp->fastpath) goto out_free_update; rdp->nego = nego_new(rdp->transport); if (!rdp->nego) goto out_free_fastpath; rdp->mcs = mcs_new(rdp->transport); if (!rdp->mcs) goto out_free_nego; rdp->redirection = redirection_new(); if (!rdp->redirection) goto out_free_mcs; rdp->autodetect = autodetect_new(); if (!rdp->autodetect) goto out_free_redirection; rdp->heartbeat = heartbeat_new(); if (!rdp->heartbeat) goto out_free_autodetect; rdp->multitransport = multitransport_new(); if (!rdp->multitransport) goto out_free_heartbeat; rdp->bulk = bulk_new(context); if (!rdp->bulk) goto out_free_multitransport; return rdp; out_free_multitransport: multitransport_free(rdp->multitransport); out_free_heartbeat: heartbeat_free(rdp->heartbeat); out_free_autodetect: autodetect_free(rdp->autodetect); out_free_redirection: redirection_free(rdp->redirection); out_free_mcs: mcs_free(rdp->mcs); out_free_nego: nego_free(rdp->nego); out_free_fastpath: fastpath_free(rdp->fastpath); out_free_update: update_free(rdp->update); out_free_input: input_free(rdp->input); out_free_license: license_free(rdp->license); out_free_transport: transport_free(rdp->transport); out_free_settings: if (newSettings) freerdp_settings_free(rdp->settings); out_free: free(rdp); return NULL; }
0
241,158
void ImageLoader::updatedHasPendingEvent() { bool wasProtected = m_elementIsProtected; m_elementIsProtected = m_hasPendingLoadEvent || m_hasPendingErrorEvent; if (wasProtected == m_elementIsProtected) return; if (m_elementIsProtected) { if (m_derefElementTimer.isActive()) m_derefElementTimer.stop(); else m_keepAlive = m_element; } else { DCHECK(!m_derefElementTimer.isActive()); m_derefElementTimer.startOneShot(0, BLINK_FROM_HERE); } }
0
367,081
static inline int security_inode_alloc(struct inode *inode) { return 0; }
0
118,854
rename_buffer(char_u *new_fname) { char_u *fname, *sfname, *xfname; buf_T *buf; buf = curbuf; apply_autocmds(EVENT_BUFFILEPRE, NULL, NULL, FALSE, curbuf); // buffer changed, don't change name now if (buf != curbuf) return FAIL; #ifdef FEAT_EVAL if (aborting()) // autocmds may abort script processing return FAIL; #endif /* * The name of the current buffer will be changed. * A new (unlisted) buffer entry needs to be made to hold the old file * name, which will become the alternate file name. * But don't set the alternate file name if the buffer didn't have a * name. */ fname = curbuf->b_ffname; sfname = curbuf->b_sfname; xfname = curbuf->b_fname; curbuf->b_ffname = NULL; curbuf->b_sfname = NULL; if (setfname(curbuf, new_fname, NULL, TRUE) == FAIL) { curbuf->b_ffname = fname; curbuf->b_sfname = sfname; return FAIL; } curbuf->b_flags |= BF_NOTEDITED; if (xfname != NULL && *xfname != NUL) { buf = buflist_new(fname, xfname, curwin->w_cursor.lnum, 0); if (buf != NULL && (cmdmod.cmod_flags & CMOD_KEEPALT) == 0) curwin->w_alt_fnum = buf->b_fnum; } vim_free(fname); vim_free(sfname); apply_autocmds(EVENT_BUFFILEPOST, NULL, NULL, FALSE, curbuf); // Change directories when the 'acd' option is set. DO_AUTOCHDIR; return OK; }
0
405,707
WandExport ClipPathUnits DrawGetClipUnits(const DrawingWand *wand) { assert(wand != (const DrawingWand *) NULL); assert(wand->signature == MagickWandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); return(CurrentContext->clip_units); }
0
164,308
void Browser::FileSelectedWithExtraInfo( const ui::SelectedFileInfo& file_info, int index, void* params) { profile_->set_last_selected_directory(file_info.file_path.DirName()); const FilePath& path = file_info.local_path; GURL file_url = net::FilePathToFileURL(path); #if defined(OS_CHROMEOS) drive::util::ModifyDriveFileResourceUrl(profile_, path, &file_url); #endif if (file_url.is_empty()) return; OpenURL(OpenURLParams( file_url, Referrer(), CURRENT_TAB, content::PAGE_TRANSITION_TYPED, false)); }
0
487,718
ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, struct partial_cluster *partial, ext4_lblk_t start, ext4_lblk_t end) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); int err = 0, correct_index = 0; int depth = ext_depth(inode), credits, revoke_credits; struct ext4_extent_header *eh; ext4_lblk_t a, b; unsigned num; ext4_lblk_t ex_ee_block; unsigned short ex_ee_len; unsigned unwritten = 0; struct ext4_extent *ex; ext4_fsblk_t pblk; /* the header must be checked already in ext4_ext_remove_space() */ ext_debug(inode, "truncate since %u in leaf to %u\n", start, end); if (!path[depth].p_hdr) path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); eh = path[depth].p_hdr; if (unlikely(path[depth].p_hdr == NULL)) { EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); return -EFSCORRUPTED; } /* find where to start removing */ ex = path[depth].p_ext; if (!ex) ex = EXT_LAST_EXTENT(eh); ex_ee_block = le32_to_cpu(ex->ee_block); ex_ee_len = ext4_ext_get_actual_len(ex); trace_ext4_ext_rm_leaf(inode, start, ex, partial); while (ex >= EXT_FIRST_EXTENT(eh) && ex_ee_block + ex_ee_len > start) { if (ext4_ext_is_unwritten(ex)) unwritten = 1; else unwritten = 0; ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block, unwritten, ex_ee_len); path[depth].p_ext = ex; a = ex_ee_block > start ? ex_ee_block : start; b = ex_ee_block+ex_ee_len - 1 < end ? ex_ee_block+ex_ee_len - 1 : end; ext_debug(inode, " border %u:%u\n", a, b); /* If this extent is beyond the end of the hole, skip it */ if (end < ex_ee_block) { /* * We're going to skip this extent and move to another, * so note that its first cluster is in use to avoid * freeing it when removing blocks. Eventually, the * right edge of the truncated/punched region will * be just to the left. */ if (sbi->s_cluster_ratio > 1) { pblk = ext4_ext_pblock(ex); partial->pclu = EXT4_B2C(sbi, pblk); partial->state = nofree; } ex--; ex_ee_block = le32_to_cpu(ex->ee_block); ex_ee_len = ext4_ext_get_actual_len(ex); continue; } else if (b != ex_ee_block + ex_ee_len - 1) { EXT4_ERROR_INODE(inode, "can not handle truncate %u:%u " "on extent %u:%u", start, end, ex_ee_block, ex_ee_block + ex_ee_len - 1); err = -EFSCORRUPTED; goto out; } else if (a != ex_ee_block) { /* remove tail of the extent */ num = a - ex_ee_block; } else { /* remove whole extent: excellent! */ num = 0; } /* * 3 for leaf, sb, and inode plus 2 (bmap and group * descriptor) for each block group; assume two block * groups plus ex_ee_len/blocks_per_block_group for * the worst case */ credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); if (ex == EXT_FIRST_EXTENT(eh)) { correct_index = 1; credits += (ext_depth(inode)) + 1; } credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); /* * We may end up freeing some index blocks and data from the * punched range. Note that partial clusters are accounted for * by ext4_free_data_revoke_credits(). */ revoke_credits = ext4_free_metadata_revoke_credits(inode->i_sb, ext_depth(inode)) + ext4_free_data_revoke_credits(inode, b - a + 1); err = ext4_datasem_ensure_credits(handle, inode, credits, credits, revoke_credits); if (err) { if (err > 0) err = -EAGAIN; goto out; } err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; err = ext4_remove_blocks(handle, inode, ex, partial, a, b); if (err) goto out; if (num == 0) /* this extent is removed; mark slot entirely unused */ ext4_ext_store_pblock(ex, 0); ex->ee_len = cpu_to_le16(num); /* * Do not mark unwritten if all the blocks in the * extent have been removed. */ if (unwritten && num) ext4_ext_mark_unwritten(ex); /* * If the extent was completely released, * we need to remove it from the leaf */ if (num == 0) { if (end != EXT_MAX_BLOCKS - 1) { /* * For hole punching, we need to scoot all the * extents up when an extent is removed so that * we dont have blank extents in the middle */ memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * sizeof(struct ext4_extent)); /* Now get rid of the one at the end */ memset(EXT_LAST_EXTENT(eh), 0, sizeof(struct ext4_extent)); } le16_add_cpu(&eh->eh_entries, -1); } err = ext4_ext_dirty(handle, inode, path + depth); if (err) goto out; ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num, ext4_ext_pblock(ex)); ex--; ex_ee_block = le32_to_cpu(ex->ee_block); ex_ee_len = ext4_ext_get_actual_len(ex); } if (correct_index && eh->eh_entries) err = ext4_ext_correct_indexes(handle, inode, path); /* * If there's a partial cluster and at least one extent remains in * the leaf, free the partial cluster if it isn't shared with the * current extent. If it is shared with the current extent * we reset the partial cluster because we've reached the start of the * truncated/punched region and we're done removing blocks. */ if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) { pblk = ext4_ext_pblock(ex) + ex_ee_len - 1; if (partial->pclu != EXT4_B2C(sbi, pblk)) { int flags = get_default_free_blocks_flags(inode); if (ext4_is_pending(inode, partial->lblk)) flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; ext4_free_blocks(handle, inode, NULL, EXT4_C2B(sbi, partial->pclu), sbi->s_cluster_ratio, flags); if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) ext4_rereserve_cluster(inode, partial->lblk); } partial->state = initial; } /* if this leaf is free, then we should * remove it from index block above */ if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) err = ext4_ext_rm_idx(handle, inode, path, depth); out: return err; }
0
403,338
PHP_MINFO_FUNCTION(openssl) { php_info_print_table_start(); php_info_print_table_row(2, "OpenSSL support", "enabled"); php_info_print_table_row(2, "OpenSSL Library Version", SSLeay_version(SSLEAY_VERSION)); php_info_print_table_row(2, "OpenSSL Header Version", OPENSSL_VERSION_TEXT); php_info_print_table_row(2, "Openssl default config", default_ssl_conf_filename); php_info_print_table_end(); DISPLAY_INI_ENTRIES(); }
0
238,089
void DiskCacheBackendTest::BackendInvalidRankings2() { ASSERT_TRUE(CopyTestCache("bad_rankings")); DisableFirstCleanup(); InitCache(); disk_cache::Entry *entry1, *entry2; EXPECT_NE(net::OK, OpenEntry("the first key", &entry1)); ASSERT_THAT(OpenEntry("some other key", &entry2), IsOk()); entry2->Close(); DisableIntegrityCheck(); }
0
19,234
static int _compare_path_table ( const void * v1 , const void * v2 ) { const struct isoent * p1 , * p2 ; const char * s1 , * s2 ; int cmp , l ; p1 = * ( ( const struct isoent * * ) ( uintptr_t ) v1 ) ; p2 = * ( ( const struct isoent * * ) ( uintptr_t ) v2 ) ; cmp = p1 -> parent -> dir_number - p2 -> parent -> dir_number ; if ( cmp != 0 ) return ( cmp ) ; s1 = p1 -> identifier ; s2 = p2 -> identifier ; l = p1 -> ext_off ; if ( l > p2 -> ext_off ) l = p2 -> ext_off ; cmp = strncmp ( s1 , s2 , l ) ; if ( cmp != 0 ) return ( cmp ) ; if ( p1 -> ext_off < p2 -> ext_off ) { s2 += l ; l = p2 -> ext_off - p1 -> ext_off ; while ( l -- ) if ( 0x20 != * s2 ++ ) return ( 0x20 - * ( const unsigned char * ) ( s2 - 1 ) ) ; } else if ( p1 -> ext_off > p2 -> ext_off ) { s1 += l ; l = p1 -> ext_off - p2 -> ext_off ; while ( l -- ) if ( 0x20 != * s1 ++ ) return ( * ( const unsigned char * ) ( s1 - 1 ) - 0x20 ) ; } return ( 0 ) ; }
0
65,782
Status InferenceContext::Max(DimensionHandle first, DimensionOrConstant second, DimensionHandle* out) { const int64_t first_value = Value(first); const int64_t second_value = Value(second); if (first_value == kUnknownDim || second_value == kUnknownDim) { *out = UnknownDim(); } else { if (first_value >= second_value) { *out = first; } else { *out = MakeDim(second); } } return Status::OK(); }
0
349,007
_gcry_ecc_ecdsa_sign (gcry_mpi_t input, ECC_secret_key *skey, gcry_mpi_t r, gcry_mpi_t s, int flags, int hashalgo) { gpg_err_code_t rc = 0; int extraloops = 0; gcry_mpi_t k, dr, sum, k_1, x; mpi_point_struct I; gcry_mpi_t hash; const void *abuf; unsigned int abits, qbits; mpi_ec_t ctx; gcry_mpi_t b; /* Random number needed for blinding. */ gcry_mpi_t bi; /* multiplicative inverse of B. */ if (DBG_CIPHER) log_mpidump ("ecdsa sign hash ", input ); qbits = mpi_get_nbits (skey->E.n); /* Convert the INPUT into an MPI if needed. */ rc = _gcry_dsa_normalize_hash (input, &hash, qbits); if (rc) return rc; b = mpi_snew (qbits); bi = mpi_snew (qbits); do { _gcry_mpi_randomize (b, qbits, GCRY_WEAK_RANDOM); mpi_mod (b, b, skey->E.n); } while (!mpi_invm (bi, b, skey->E.n)); k = NULL; dr = mpi_alloc (0); sum = mpi_alloc (0); k_1 = mpi_alloc (0); x = mpi_alloc (0); point_init (&I); ctx = _gcry_mpi_ec_p_internal_new (skey->E.model, skey->E.dialect, 0, skey->E.p, skey->E.a, skey->E.b); /* Two loops to avoid R or S are zero. This is more of a joke than a real demand because the probability of them being zero is less than any hardware failure. Some specs however require it. */ do { do { mpi_free (k); k = NULL; if ((flags & PUBKEY_FLAG_RFC6979) && hashalgo) { /* Use Pornin's method for deterministic DSA. If this flag is set, it is expected that HASH is an opaque MPI with the to be signed hash. That hash is also used as h1 from 3.2.a. */ if (!mpi_is_opaque (input)) { rc = GPG_ERR_CONFLICT; goto leave; } abuf = mpi_get_opaque (input, &abits); rc = _gcry_dsa_gen_rfc6979_k (&k, skey->E.n, skey->d, abuf, (abits+7)/8, hashalgo, extraloops); if (rc) goto leave; extraloops++; } else k = _gcry_dsa_gen_k (skey->E.n, GCRY_STRONG_RANDOM); _gcry_mpi_ec_mul_point (&I, k, &skey->E.G, ctx); if (_gcry_mpi_ec_get_affine (x, NULL, &I, ctx)) { if (DBG_CIPHER) log_debug ("ecc sign: Failed to get affine coordinates\n"); rc = GPG_ERR_BAD_SIGNATURE; goto leave; } mpi_mod (r, x, skey->E.n); /* r = x mod n */ } while (!mpi_cmp_ui (r, 0)); mpi_mulm (dr, b, skey->d, skey->E.n); mpi_mulm (dr, dr, r, skey->E.n); /* dr = d*r mod n (blinded with b) */ mpi_mulm (sum, b, hash, skey->E.n); mpi_addm (sum, sum, dr, skey->E.n); /* sum = hash + (d*r) mod n (blinded with b) */ mpi_mulm (sum, bi, sum, skey->E.n); /* undo blinding by b^-1 */ mpi_invm (k_1, k, skey->E.n); /* k_1 = k^(-1) mod n */ mpi_mulm (s, k_1, sum, skey->E.n); /* s = k^(-1)*(hash+(d*r)) mod n */ } while (!mpi_cmp_ui (s, 0)); if (DBG_CIPHER) { log_mpidump ("ecdsa sign result r ", r); log_mpidump ("ecdsa sign result s ", s); } leave: mpi_free (b); mpi_free (bi); _gcry_mpi_ec_free (ctx); point_free (&I); mpi_free (x); mpi_free (k_1); mpi_free (sum); mpi_free (dr); mpi_free (k); if (hash != input) mpi_free (hash); return rc; }
1