idx
int64
func
string
target
int64
16,289
static int SpoolssEnumPrinters_q ( tvbuff_t * tvb , int offset , packet_info * pinfo , proto_tree * tree , dcerpc_info * di , guint8 * drep _U_ ) { guint32 level , flags ; dcerpc_call_value * dcv = ( dcerpc_call_value * ) di -> call_data ; static const int * hf_flags [ ] = { & hf_enumprinters_flags_network , & hf_enumprinters_flags_shared , & hf_enumprinters_flags_remote , & hf_enumprinters_flags_name , & hf_enumprinters_flags_connections , & hf_enumprinters_flags_local , & hf_enumprinters_flags_default , NULL } ; offset = dissect_ndr_uint32 ( tvb , offset , pinfo , NULL , di , drep , - 1 , & flags ) ; proto_tree_add_bitmask_value ( tree , tvb , offset - 4 , hf_enumprinters_flags , ett_enumprinters_flags , hf_flags , flags ) ; offset = dissect_ndr_str_pointer_item ( tvb , offset , pinfo , tree , di , drep , NDR_POINTER_UNIQUE , "Server name" , hf_servername , 0 ) ; offset = dissect_ndr_uint32 ( tvb , offset , pinfo , tree , di , drep , hf_level , & level ) ; if ( ! pinfo -> fd -> flags . visited ) { dcv -> se_data = GINT_TO_POINTER ( ( int ) level ) ; } col_append_fstr ( pinfo -> cinfo , COL_INFO , ", level %d" , level ) ; offset = dissect_spoolss_buffer ( tvb , offset , pinfo , tree , di , drep , NULL ) ; offset = dissect_ndr_uint32 ( tvb , offset , pinfo , tree , di , drep , hf_offered , NULL ) ; return offset ; }
0
46,680
int mbedtls_x509_crt_parse( mbedtls_x509_crt *chain, const unsigned char *buf, size_t buflen ) { #if defined(MBEDTLS_PEM_PARSE_C) int success = 0, first_error = 0, total_failed = 0; int buf_format = MBEDTLS_X509_FORMAT_DER; #endif /* * Check for valid input */ if( chain == NULL || buf == NULL ) return( MBEDTLS_ERR_X509_BAD_INPUT_DATA ); /* * Determine buffer content. Buffer contains either one DER certificate or * one or more PEM certificates. */ #if defined(MBEDTLS_PEM_PARSE_C) if( buflen != 0 && buf[buflen - 1] == '\0' && strstr( (const char *) buf, "-----BEGIN CERTIFICATE-----" ) != NULL ) { buf_format = MBEDTLS_X509_FORMAT_PEM; } if( buf_format == MBEDTLS_X509_FORMAT_DER ) return mbedtls_x509_crt_parse_der( chain, buf, buflen ); #else return mbedtls_x509_crt_parse_der( chain, buf, buflen ); #endif #if defined(MBEDTLS_PEM_PARSE_C) if( buf_format == MBEDTLS_X509_FORMAT_PEM ) { int ret; mbedtls_pem_context pem; /* 1 rather than 0 since the terminating NULL byte is counted in */ while( buflen > 1 ) { size_t use_len; mbedtls_pem_init( &pem ); /* If we get there, we know the string is null-terminated */ ret = mbedtls_pem_read_buffer( &pem, "-----BEGIN CERTIFICATE-----", "-----END CERTIFICATE-----", buf, NULL, 0, &use_len ); if( ret == 0 ) { /* * Was PEM encoded */ buflen -= use_len; buf += use_len; } else if( ret == MBEDTLS_ERR_PEM_BAD_INPUT_DATA ) { return( ret ); } else if( ret != MBEDTLS_ERR_PEM_NO_HEADER_FOOTER_PRESENT ) { mbedtls_pem_free( &pem ); /* * PEM header and footer were found */ buflen -= use_len; buf += use_len; if( first_error == 0 ) first_error = ret; total_failed++; continue; } else break; ret = mbedtls_x509_crt_parse_der( chain, pem.buf, pem.buflen ); mbedtls_pem_free( &pem ); if( ret != 0 ) { /* * Quit parsing on a memory error */ if( ret == MBEDTLS_ERR_X509_ALLOC_FAILED ) return( ret ); if( first_error == 0 ) first_error = ret; total_failed++; continue; } success = 1; } } if( success ) return( total_failed ); else if( first_error ) return( first_error ); else return( MBEDTLS_ERR_X509_CERT_UNKNOWN_FORMAT ); #endif /* MBEDTLS_PEM_PARSE_C */ }
0
116,454
static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, struct hrtimer_sleeper *timeout) { /* * The task state is guaranteed to be set before another task can * wake it. set_current_state() is implemented using smp_store_mb() and * queue_me() calls spin_unlock() upon completion, both serializing * access to the hash list and forcing another memory barrier. */ set_current_state(TASK_INTERRUPTIBLE); queue_me(q, hb); /* Arm the timer */ if (timeout) hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); /* * If we have been removed from the hash list, then another task * has tried to wake us, and we can skip the call to schedule(). */ if (likely(!plist_node_empty(&q->list))) { /* * If the timer has already expired, current will already be * flagged for rescheduling. Only call schedule if there * is no timeout, or if it has yet to expire. */ if (!timeout || timeout->task) freezable_schedule(); } __set_current_state(TASK_RUNNING); }
0
279,777
LayerTreeHostTestReadyToDrawEmpty() : did_notify_ready_to_draw_(false), all_tiles_required_for_draw_are_ready_to_draw_(false), required_for_draw_count_(0) {}
0
202,435
GooString *ASCII85Stream::getPSFilter(int psLevel, const char *indent) { GooString *s; if (psLevel < 2) { return NULL; } if (!(s = str->getPSFilter(psLevel, indent))) { return NULL; } s->append(indent)->append("/ASCII85Decode filter\n"); return s; }
0
64,430
void SetAttrValue(StringPiece value, AttrValue* out) { out->set_s(value.data(), value.size()); }
0
277,899
static void png_flush_data(png_structp png_ptr) { (void) png_ptr; }
0
368,852
static void construct_client(void *obj) { PgSocket *client = obj; memset(client, 0, sizeof(PgSocket)); list_init(&client->head); sbuf_init(&client->sbuf, client_proto); client->state = CL_FREE; }
0
179,815
static rsRetVal createSocket(instanceConf_t* info, void** sock) { int rv; sublist* sub; *sock = zsocket_new(s_context, info->type); if (!sock) { errmsg.LogError(0, RS_RET_INVALID_PARAMS, "zsocket_new failed: %s, for type %d", zmq_strerror(errno),info->type); /* DK: invalid params seems right here */ return RS_RET_INVALID_PARAMS; } DBGPRINTF("imzmq3: socket of type %d created successfully\n", info->type) /* Set options *before* the connect/bind. */ if (info->identity) zsocket_set_identity(*sock, info->identity); if (info->sndBuf > -1) zsocket_set_sndbuf(*sock, info->sndBuf); if (info->rcvBuf > -1) zsocket_set_rcvbuf(*sock, info->rcvBuf); if (info->linger > -1) zsocket_set_linger(*sock, info->linger); if (info->backlog > -1) zsocket_set_backlog(*sock, info->backlog); if (info->sndTimeout > -1) zsocket_set_sndtimeo(*sock, info->sndTimeout); if (info->rcvTimeout > -1) zsocket_set_rcvtimeo(*sock, info->rcvTimeout); if (info->maxMsgSize > -1) zsocket_set_maxmsgsize(*sock, info->maxMsgSize); if (info->rate > -1) zsocket_set_rate(*sock, info->rate); if (info->recoveryIVL > -1) zsocket_set_recovery_ivl(*sock, info->recoveryIVL); if (info->multicastHops > -1) zsocket_set_multicast_hops(*sock, info->multicastHops); if (info->reconnectIVL > -1) zsocket_set_reconnect_ivl(*sock, info->reconnectIVL); if (info->reconnectIVLMax > -1) zsocket_set_reconnect_ivl_max(*sock, info->reconnectIVLMax); if (info->ipv4Only > -1) zsocket_set_ipv4only(*sock, info->ipv4Only); if (info->affinity > -1) zsocket_set_affinity(*sock, info->affinity); if (info->sndHWM > -1 ) zsocket_set_sndhwm(*sock, info->sndHWM); if (info->rcvHWM > -1 ) zsocket_set_rcvhwm(*sock, info->rcvHWM); /* Set subscriptions.*/ if (info->type == ZMQ_SUB) { for(sub = info->subscriptions; sub!=NULL; sub=sub->next) { zsocket_set_subscribe(*sock, sub->subscribe); } } /* Do the bind/connect... */ if (info->action==ACTION_CONNECT) { rv = zsocket_connect(*sock, "%s", info->description); if (rv == -1) { errmsg.LogError(0, RS_RET_INVALID_PARAMS, "zmq_connect using %s failed: %s", info->description, zmq_strerror(errno)); return RS_RET_INVALID_PARAMS; } DBGPRINTF("imzmq3: connect for %s successful\n",info->description); } else { rv = zsocket_bind(*sock, "%s", info->description); if (rv == -1) { errmsg.LogError(0, RS_RET_INVALID_PARAMS, "zmq_bind using %s failed: %s", info->description, zmq_strerror(errno)); return RS_RET_INVALID_PARAMS; } DBGPRINTF("imzmq3: bind for %s successful\n",info->description); } return RS_RET_OK; }
0
134,253
TEST_F(SslContextImplTest, MustHaveSubjectOrSAN) { envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; const std::string tls_context_yaml = R"EOF( common_tls_context: tls_certificates: - certificate_chain: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_subject_cert.pem" private_key: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_subject_key.pem" )EOF"; TestUtility::loadFromYaml(TestEnvironment::substitute(tls_context_yaml), tls_context); ServerContextConfigImpl server_context_config(tls_context, factory_context_); EXPECT_THROW_WITH_REGEX( manager_.createSslServerContext(store_, server_context_config, {}, nullptr), EnvoyException, "has neither subject CN nor SAN names"); }
0
435,741
const struct submodule *submodule_from_ce(const struct cache_entry *ce) { if (!S_ISGITLINK(ce->ce_mode)) return NULL; if (!should_update_submodules()) return NULL; return submodule_from_path(null_sha1, ce->name); }
0
62,893
xfs_vm_releasepage( struct page *page, gfp_t gfp_mask) { int delalloc, unwritten; trace_xfs_releasepage(page->mapping->host, page, 0, 0); /* * mm accommodates an old ext3 case where clean pages might not have had * the dirty bit cleared. Thus, it can send actual dirty pages to * ->releasepage() via shrink_active_list(). Conversely, * block_invalidatepage() can send pages that are still marked dirty * but otherwise have invalidated buffers. * * We've historically freed buffers on the latter. Instead, quietly * filter out all dirty pages to avoid spurious buffer state warnings. * This can likely be removed once shrink_active_list() is fixed. */ if (PageDirty(page)) return 0; xfs_count_page_state(page, &delalloc, &unwritten); if (WARN_ON_ONCE(delalloc)) return 0; if (WARN_ON_ONCE(unwritten)) return 0; return try_to_free_buffers(page); }
0
225,186
bool SessionModelAssociator::SessionWindowHasNoTabsToSync( const SessionWindow& window) { int num_populated = 0; for (std::vector<SessionTab*>::const_iterator i = window.tabs.begin(); i != window.tabs.end(); ++i) { const SessionTab* tab = *i; if (IsValidSessionTab(*tab)) num_populated++; } if (num_populated == 0) return true; return false; }
0
225,034
static int phar_file_action(phar_archive_data *phar, phar_entry_info *info, char *mime_type, int code, char *entry, int entry_len, char *arch, char *basename, char *ru, int ru_len TSRMLS_DC) /* {{{ */ { char *name = NULL, buf[8192]; const char *cwd; zend_syntax_highlighter_ini syntax_highlighter_ini; sapi_header_line ctr = {0}; size_t got; int dummy = 1, name_len; zend_file_handle file_handle; zend_op_array *new_op_array; zval *result = NULL; php_stream *fp; off_t position; switch (code) { case PHAR_MIME_PHPS: efree(basename); /* highlight source */ if (entry[0] == '/') { name_len = spprintf(&name, 4096, "phar://%s%s", arch, entry); } else { name_len = spprintf(&name, 4096, "phar://%s/%s", arch, entry); } php_get_highlight_struct(&syntax_highlighter_ini); highlight_file(name, &syntax_highlighter_ini TSRMLS_CC); efree(name); #ifdef PHP_WIN32 efree(arch); #endif zend_bailout(); case PHAR_MIME_OTHER: /* send headers, output file contents */ efree(basename); ctr.line_len = spprintf(&(ctr.line), 0, "Content-type: %s", mime_type); sapi_header_op(SAPI_HEADER_REPLACE, &ctr TSRMLS_CC); efree(ctr.line); ctr.line_len = spprintf(&(ctr.line), 0, "Content-length: %u", info->uncompressed_filesize); sapi_header_op(SAPI_HEADER_REPLACE, &ctr TSRMLS_CC); efree(ctr.line); if (FAILURE == sapi_send_headers(TSRMLS_C)) { zend_bailout(); } /* prepare to output */ fp = phar_get_efp(info, 1 TSRMLS_CC); if (!fp) { char *error; if (!phar_open_jit(phar, info, &error TSRMLS_CC)) { if (error) { zend_throw_exception_ex(phar_ce_PharException, 0 TSRMLS_CC, "%s", error); efree(error); } return -1; } fp = phar_get_efp(info, 1 TSRMLS_CC); } position = 0; phar_seek_efp(info, 0, SEEK_SET, 0, 1 TSRMLS_CC); do { got = php_stream_read(fp, buf, MIN(8192, info->uncompressed_filesize - position)); if (got > 0) { PHPWRITE(buf, got); position += got; if (position == (off_t) info->uncompressed_filesize) { break; } } } while (1); zend_bailout(); case PHAR_MIME_PHP: if (basename) { phar_mung_server_vars(arch, entry, entry_len, basename, ru_len TSRMLS_CC); efree(basename); } if (entry[0] == '/') { name_len = spprintf(&name, 4096, "phar://%s%s", arch, entry); } else { name_len = spprintf(&name, 4096, "phar://%s/%s", arch, entry); } file_handle.type = ZEND_HANDLE_FILENAME; file_handle.handle.fd = 0; file_handle.filename = name; file_handle.opened_path = NULL; file_handle.free_filename = 0; PHAR_G(cwd) = NULL; PHAR_G(cwd_len) = 0; if (zend_hash_add(&EG(included_files), name, name_len+1, (void *)&dummy, sizeof(int), NULL) == SUCCESS) { if ((cwd = zend_memrchr(entry, '/', entry_len))) { PHAR_G(cwd_init) = 1; if (entry == cwd) { /* root directory */ PHAR_G(cwd_len) = 0; PHAR_G(cwd) = NULL; } else if (entry[0] == '/') { PHAR_G(cwd_len) = cwd - (entry + 1); PHAR_G(cwd) = estrndup(entry + 1, PHAR_G(cwd_len)); } else { PHAR_G(cwd_len) = cwd - entry; PHAR_G(cwd) = estrndup(entry, PHAR_G(cwd_len)); } } new_op_array = zend_compile_file(&file_handle, ZEND_REQUIRE TSRMLS_CC); if (!new_op_array) { zend_hash_del(&EG(included_files), name, name_len+1); } zend_destroy_file_handle(&file_handle TSRMLS_CC); } else { efree(name); new_op_array = NULL; } #ifdef PHP_WIN32 efree(arch); #endif if (new_op_array) { EG(return_value_ptr_ptr) = &result; EG(active_op_array) = new_op_array; zend_try { zend_execute(new_op_array TSRMLS_CC); if (PHAR_G(cwd)) { efree(PHAR_G(cwd)); PHAR_G(cwd) = NULL; PHAR_G(cwd_len) = 0; } PHAR_G(cwd_init) = 0; efree(name); destroy_op_array(new_op_array TSRMLS_CC); efree(new_op_array); if (EG(return_value_ptr_ptr) && *EG(return_value_ptr_ptr)) { zval_ptr_dtor(EG(return_value_ptr_ptr)); } } zend_catch { if (PHAR_G(cwd)) { efree(PHAR_G(cwd)); PHAR_G(cwd) = NULL; PHAR_G(cwd_len) = 0; } PHAR_G(cwd_init) = 0; efree(name); } zend_end_try(); zend_bailout(); } return PHAR_MIME_PHP; } return -1; } /* }}} */
0
214,680
void NPN_ForceRedraw(NPP id) { NOTIMPLEMENTED(); }
0
111,176
ex_tabnext(exarg_T *eap) { int tab_number; if (ERROR_IF_POPUP_WINDOW) return; switch (eap->cmdidx) { case CMD_tabfirst: case CMD_tabrewind: goto_tabpage(1); break; case CMD_tablast: goto_tabpage(9999); break; case CMD_tabprevious: case CMD_tabNext: if (eap->arg && *eap->arg != NUL) { char_u *p = eap->arg; char_u *p_save = p; tab_number = getdigits(&p); if (p == p_save || *p_save == '-' || *p != NUL || tab_number == 0) { // No numbers as argument. eap->errmsg = ex_errmsg(e_invalid_argument_str, eap->arg); return; } } else { if (eap->addr_count == 0) tab_number = 1; else { tab_number = eap->line2; if (tab_number < 1) { eap->errmsg = _(e_invalid_range); return; } } } goto_tabpage(-tab_number); break; default: // CMD_tabnext tab_number = get_tabpage_arg(eap); if (eap->errmsg == NULL) goto_tabpage(tab_number); break; } }
0
292,748
static inline void skb_orphan_try(struct sk_buff *skb) { struct sock *sk = skb->sk; if (sk && !skb_shinfo(skb)->tx_flags) { /* skb_tx_hash() wont be able to get sk. * We copy sk_hash into skb->rxhash */ if (!skb->rxhash) skb->rxhash = sk->sk_hash; skb_orphan(skb); } }
0
24,935
bool one_thread_per_connection_end ( THD * thd , bool put_in_cache ) { DBUG_ENTER ( "one_thread_per_connection_end" ) ; unlink_thd ( thd ) ; my_pthread_setspecific_ptr ( THR_THD , 0 ) ; if ( put_in_cache ) { mysql_mutex_lock ( & LOCK_thread_count ) ; put_in_cache = cache_thread ( ) ; mysql_mutex_unlock ( & LOCK_thread_count ) ; if ( put_in_cache ) DBUG_RETURN ( 0 ) ; } DBUG_PRINT ( "signal" , ( "Broadcasting COND_thread_count" ) ) ; DBUG_LEAVE ; # if defined ( HAVE_OPENSSL ) && ! defined ( EMBEDDED_LIBRARY ) ERR_remove_state ( 0 ) ; # endif my_thread_end ( ) ; mysql_cond_broadcast ( & COND_thread_count ) ; pthread_exit ( 0 ) ; return 0 ; }
0
255,952
MagickExport void *AcquireQuantumMemory(const size_t count,const size_t quantum) { size_t extent; if (CheckMemoryOverflow(count,quantum) != MagickFalse) return((void *) NULL); extent=count*quantum; return(AcquireMagickMemory(extent)); }
1
81,097
static int madvise_need_mmap_write(int behavior) { switch (behavior) { case MADV_REMOVE: case MADV_WILLNEED: case MADV_DONTNEED: case MADV_FREE: return 0; default: /* be safe, default to 1. list exceptions explicitly */ return 1; } }
0
332,251
bool memory_region_is_unassigned(MemoryRegion *mr) { return mr != &io_mem_ram && mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device && mr != &io_mem_watch; }
1
465,953
static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm, struct kvm_vcpu *vcpu, struct kvm_tlb_range *range) { u64 ept_pointer = to_vmx(vcpu)->ept_pointer; /* * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs address * of the base of EPT PML4 table, strip off EPT configuration * information. */ if (range) return hyperv_flush_guest_mapping_range(ept_pointer & PAGE_MASK, kvm_fill_hv_flush_list_func, (void *)range); else return hyperv_flush_guest_mapping(ept_pointer & PAGE_MASK); }
0
521,392
static int check_func_double(THD *thd, struct st_mysql_sys_var *var, void *save, st_mysql_value *value) { double v; my_bool fixed; struct my_option option; value->val_real(value, &v); plugin_opt_set_limits(&option, var); *(double *) save= getopt_double_limit_value(v, &option, &fixed); return throw_bounds_warning(thd, var->name, fixed, v); }
0
332,189
static void v4l2_free_buffer(void *opaque, uint8_t *unused) { V4L2Buffer* avbuf = opaque; V4L2m2mContext *s = buf_to_m2mctx(avbuf); atomic_fetch_sub_explicit(&s->refcount, 1, memory_order_acq_rel); if (s->reinit) { if (!atomic_load(&s->refcount)) sem_post(&s->refsync); return; } if (avbuf->context->streamon) { ff_v4l2_buffer_enqueue(avbuf); return; } if (!atomic_load(&s->refcount)) ff_v4l2_m2m_codec_end(s->avctx); }
1
248,643
MockTopSitesObserver() {}
0
148,655
unsigned fuse_file_poll(struct file *file, poll_table *wait) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; struct fuse_poll_out outarg; struct fuse_req *req; int err; if (fc->no_poll) return DEFAULT_POLLMASK; poll_wait(file, &ff->poll_wait, wait); /* * Ask for notification iff there's someone waiting for it. * The client may ignore the flag and always notify. */ if (waitqueue_active(&ff->poll_wait)) { inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; fuse_register_polled_file(fc, ff); } req = fuse_get_req(fc); if (IS_ERR(req)) return POLLERR; req->in.h.opcode = FUSE_POLL; req->in.h.nodeid = ff->nodeid; req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; req->out.numargs = 1; req->out.args[0].size = sizeof(outarg); req->out.args[0].value = &outarg; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); if (!err) return outarg.revents; if (err == -ENOSYS) { fc->no_poll = 1; return DEFAULT_POLLMASK; } return POLLERR; }
0
425,818
static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, u32 *entry_failure_code) { if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) { if (!nested_cr3_valid(vcpu, cr3)) { *entry_failure_code = ENTRY_FAIL_DEFAULT; return 1; } /* * If PAE paging and EPT are both on, CR3 is not used by the CPU and * must not be dereferenced. */ if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) && !nested_ept) { if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) { *entry_failure_code = ENTRY_FAIL_PDPTE; return 1; } } } if (!nested_ept) kvm_mmu_new_cr3(vcpu, cr3, false); vcpu->arch.cr3 = cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); kvm_init_mmu(vcpu, false); return 0; }
0
153,787
static int nfs3svc_release_getacl(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_getaclres *resp) { fh_put(&resp->fh); posix_acl_release(resp->acl_access); posix_acl_release(resp->acl_default); return 1; }
0
335,892
static OSStatus audioDeviceIOProc( AudioDeviceID inDevice, const AudioTimeStamp* inNow, const AudioBufferList* inInputData, const AudioTimeStamp* inInputTime, AudioBufferList* outOutputData, const AudioTimeStamp* inOutputTime, void* hwptr) { UInt32 frame, frameCount; float *out = outOutputData->mBuffers[0].mData; HWVoiceOut *hw = hwptr; coreaudioVoiceOut *core = (coreaudioVoiceOut *) hwptr; int rpos, live; st_sample_t *src; #ifndef FLOAT_MIXENG #ifdef RECIPROCAL const float scale = 1.f / UINT_MAX; #else const float scale = UINT_MAX; #endif #endif if (coreaudio_lock (core, "audioDeviceIOProc")) { inInputTime = 0; return 0; } frameCount = core->audioDevicePropertyBufferFrameSize; live = core->live; /* if there are not enough samples, set signal and return */ if (live < frameCount) { inInputTime = 0; coreaudio_unlock (core, "audioDeviceIOProc(empty)"); return 0; } rpos = core->rpos; src = hw->mix_buf + rpos; /* fill buffer */ for (frame = 0; frame < frameCount; frame++) { #ifdef FLOAT_MIXENG *out++ = src[frame].l; /* left channel */ *out++ = src[frame].r; /* right channel */ #else #ifdef RECIPROCAL *out++ = src[frame].l * scale; /* left channel */ *out++ = src[frame].r * scale; /* right channel */ #else *out++ = src[frame].l / scale; /* left channel */ *out++ = src[frame].r / scale; /* right channel */ #endif #endif } rpos = (rpos + frameCount) % hw->samples; core->decr += frameCount; core->rpos = rpos; coreaudio_unlock (core, "audioDeviceIOProc"); return 0; }
0
404,566
ecc_verify (gcry_sexp_t s_sig, gcry_sexp_t s_data, gcry_sexp_t s_keyparms) { gcry_err_code_t rc; struct pk_encoding_ctx ctx; gcry_sexp_t l1 = NULL; char *curvename = NULL; gcry_mpi_t mpi_g = NULL; gcry_mpi_t mpi_q = NULL; gcry_mpi_t sig_r = NULL; gcry_mpi_t sig_s = NULL; gcry_mpi_t data = NULL; ECC_public_key pk; int sigflags; memset (&pk, 0, sizeof pk); _gcry_pk_util_init_encoding_ctx (&ctx, PUBKEY_OP_VERIFY, ecc_get_nbits (s_keyparms)); /* Extract the data. */ rc = _gcry_pk_util_data_to_mpi (s_data, &data, &ctx); if (rc) goto leave; if (DBG_CIPHER) log_mpidump ("ecc_verify data", data); /* * Extract the signature value. */ rc = _gcry_pk_util_preparse_sigval (s_sig, ecc_names, &l1, &sigflags); if (rc) goto leave; rc = sexp_extract_param (l1, NULL, (sigflags & PUBKEY_FLAG_EDDSA)? "/rs":"rs", &sig_r, &sig_s, NULL); if (rc) goto leave; if (DBG_CIPHER) { log_mpidump ("ecc_verify s_r", sig_r); log_mpidump ("ecc_verify s_s", sig_s); } if ((ctx.flags & PUBKEY_FLAG_EDDSA) ^ (sigflags & PUBKEY_FLAG_EDDSA)) { rc = GPG_ERR_CONFLICT; /* Inconsistent use of flag/algoname. */ goto leave; } /* * Extract the key. */ if ((ctx.flags & PUBKEY_FLAG_PARAM)) rc = sexp_extract_param (s_keyparms, NULL, "-p?a?b?g?n?h?/q", &pk.E.p, &pk.E.a, &pk.E.b, &mpi_g, &pk.E.n, &pk.E.h, &mpi_q, NULL); else rc = sexp_extract_param (s_keyparms, NULL, "/q", &mpi_q, NULL); if (rc) goto leave; if (mpi_g) { point_init (&pk.E.G); rc = _gcry_ecc_os2ec (&pk.E.G, mpi_g); if (rc) goto leave; } /* Add missing parameters using the optional curve parameter. */ sexp_release (l1); l1 = sexp_find_token (s_keyparms, "curve", 5); if (l1) { curvename = sexp_nth_string (l1, 1); if (curvename) { rc = _gcry_ecc_fill_in_curve (0, curvename, &pk.E, NULL); if (rc) goto leave; } } /* Guess required fields if a curve parameter has not been given. FIXME: This is a crude hacks. We need to fix that. */ if (!curvename) { pk.E.model = ((sigflags & PUBKEY_FLAG_EDDSA) ? MPI_EC_EDWARDS : MPI_EC_WEIERSTRASS); pk.E.dialect = ((sigflags & PUBKEY_FLAG_EDDSA) ? ECC_DIALECT_ED25519 : ECC_DIALECT_STANDARD); if (!pk.E.h) pk.E.h = mpi_const (MPI_C_ONE); } if (DBG_CIPHER) { log_debug ("ecc_verify info: %s/%s%s\n", _gcry_ecc_model2str (pk.E.model), _gcry_ecc_dialect2str (pk.E.dialect), (sigflags & PUBKEY_FLAG_EDDSA)? "+EdDSA":""); if (pk.E.name) log_debug ("ecc_verify name: %s\n", pk.E.name); log_printmpi ("ecc_verify p", pk.E.p); log_printmpi ("ecc_verify a", pk.E.a); log_printmpi ("ecc_verify b", pk.E.b); log_printpnt ("ecc_verify g", &pk.E.G, NULL); log_printmpi ("ecc_verify n", pk.E.n); log_printmpi ("ecc_verify h", pk.E.h); log_printmpi ("ecc_verify q", mpi_q); } if (!pk.E.p || !pk.E.a || !pk.E.b || !pk.E.G.x || !pk.E.n || !pk.E.h || !mpi_q) { rc = GPG_ERR_NO_OBJ; goto leave; } /* * Verify the signature. */ if ((sigflags & PUBKEY_FLAG_EDDSA)) { rc = _gcry_ecc_eddsa_verify (data, &pk, sig_r, sig_s, ctx.hash_algo, mpi_q); } else if ((sigflags & PUBKEY_FLAG_GOST)) { point_init (&pk.Q); rc = _gcry_ecc_os2ec (&pk.Q, mpi_q); if (rc) goto leave; rc = _gcry_ecc_gost_verify (data, &pk, sig_r, sig_s); } else { point_init (&pk.Q); if (pk.E.dialect == ECC_DIALECT_ED25519) { mpi_ec_t ec; /* Fixme: Factor the curve context setup out of eddsa_verify and ecdsa_verify. So that we don't do it twice. */ ec = _gcry_mpi_ec_p_internal_new (pk.E.model, pk.E.dialect, 0, pk.E.p, pk.E.a, pk.E.b); rc = _gcry_ecc_eddsa_decodepoint (mpi_q, ec, &pk.Q, NULL, NULL); _gcry_mpi_ec_free (ec); } else { rc = _gcry_ecc_os2ec (&pk.Q, mpi_q); } if (rc) goto leave; if (mpi_is_opaque (data)) { const void *abuf; unsigned int abits, qbits; gcry_mpi_t a; qbits = mpi_get_nbits (pk.E.n); abuf = mpi_get_opaque (data, &abits); rc = _gcry_mpi_scan (&a, GCRYMPI_FMT_USG, abuf, (abits+7)/8, NULL); if (!rc) { if (abits > qbits) mpi_rshift (a, a, abits - qbits); rc = _gcry_ecc_ecdsa_verify (a, &pk, sig_r, sig_s); _gcry_mpi_release (a); } } else rc = _gcry_ecc_ecdsa_verify (data, &pk, sig_r, sig_s); } leave: _gcry_mpi_release (pk.E.p); _gcry_mpi_release (pk.E.a); _gcry_mpi_release (pk.E.b); _gcry_mpi_release (mpi_g); point_free (&pk.E.G); _gcry_mpi_release (pk.E.n); _gcry_mpi_release (pk.E.h); _gcry_mpi_release (mpi_q); point_free (&pk.Q); _gcry_mpi_release (data); _gcry_mpi_release (sig_r); _gcry_mpi_release (sig_s); xfree (curvename); sexp_release (l1); _gcry_pk_util_free_encoding_ctx (&ctx); if (DBG_CIPHER) log_debug ("ecc_verify => %s\n", rc?gpg_strerror (rc):"Good"); return rc; }
0
75,029
int get_compat_itimerspec64(struct itimerspec64 *its, const struct compat_itimerspec __user *uits) { if (__compat_get_timespec64(&its->it_interval, &uits->it_interval) || __compat_get_timespec64(&its->it_value, &uits->it_value)) return -EFAULT; return 0; }
0
402,371
static void lease_match_parser_new_file( uint32_t num_files, const struct leases_db_file *files, struct lease_match_state *state) { uint32_t i; for (i = 0; i < num_files; i++) { const struct leases_db_file *f = &files[i]; if (strequal(state->servicepath, f->servicepath)) { state->match_status = NT_STATUS_INVALID_PARAMETER; return; } } /* Dynamic share case. Break leases on all other files. */ state->match_status = leases_db_copy_file_ids(state->mem_ctx, num_files, files, &state->ids); if (!NT_STATUS_IS_OK(state->match_status)) { return; } state->num_file_ids = num_files; state->match_status = NT_STATUS_OPLOCK_NOT_GRANTED; return; }
0
112,561
cockpit_web_response_is_header_value (const gchar *string) { string += strcspn (string, "\r\n\v"); return string[0] == '\0'; }
0
168,460
StepRange HTMLInputElement::createStepRange(AnyStepHandling anyStepHandling) const { return m_inputType->createStepRange(anyStepHandling); }
0
243,991
static void updateHashChain(Hash* hash, size_t wpos, unsigned hashval, unsigned short numzeros) { hash->val[wpos] = (int)hashval; if(hash->head[hashval] != -1) hash->chain[wpos] = hash->head[hashval]; hash->head[hashval] = wpos; hash->zeros[wpos] = numzeros; if(hash->headz[numzeros] != -1) hash->chainz[wpos] = hash->headz[numzeros]; hash->headz[numzeros] = wpos; }
0
85,720
static stf_status ikev2_send_auth(struct connection *c, struct state *st, enum phase1_role role, unsigned int np, unsigned char *idhash_out, pb_stream *outpbs) { struct ikev2_a a; pb_stream a_pbs; struct state *pst = st; if (st->st_clonedfrom != 0) pst = state_with_serialno(st->st_clonedfrom); a.isaa_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); a.isaa_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } a.isaa_np = np; if (c->policy & POLICY_RSASIG) { a.isaa_type = v2_AUTH_RSA; } else if (c->policy & POLICY_PSK) { a.isaa_type = v2_AUTH_SHARED; } else { /* what else is there?... DSS not implemented. */ return STF_FAIL; } if (!out_struct(&a, &ikev2_a_desc, outpbs, &a_pbs)) return STF_INTERNAL_ERROR; if (c->policy & POLICY_RSASIG) { if (!ikev2_calculate_rsa_sha1(pst, role, idhash_out, &a_pbs)) return STF_FATAL + v2N_AUTHENTICATION_FAILED; } else if (c->policy & POLICY_PSK) { if (!ikev2_calculate_psk_auth(pst, role, idhash_out, &a_pbs)) return STF_FAIL + v2N_AUTHENTICATION_FAILED; } close_output_pbs(&a_pbs); return STF_OK; }
0
207,242
explicit AlwaysDrawSwapPromise(const ui::LatencyInfo& latency_info) : latency_info_(latency_info) {}
0
354,780
static unsigned int bad_file_poll(struct file *filp, poll_table *wait) { return POLLERR; }
0
421,192
static int hso_mux_serial_write_data(struct hso_serial *serial) { if (NULL == serial) return -EINVAL; return mux_device_request(serial, USB_CDC_SEND_ENCAPSULATED_COMMAND, serial->parent->port_spec & HSO_PORT_MASK, serial->tx_urb, &serial->ctrl_req_tx, serial->tx_data, serial->tx_data_count); }
0
434,445
errcode_t quota_compare_and_update(quota_ctx_t qctx, enum quota_type qtype, int *usage_inconsistent) { struct quota_handle qh; struct scan_dquots_data scan_data; struct dquot *dq; dnode_t *n; dict_t *dict = qctx->quota_dict[qtype]; errcode_t err = 0; if (!dict) goto out; err = quota_file_open(qctx, &qh, 0, qtype, -1, 0); if (err) { log_debug("Open quota file failed"); goto out; } scan_data.quota_dict = qctx->quota_dict[qtype]; scan_data.update_limits = 1; scan_data.update_usage = 0; scan_data.check_consistency = 1; scan_data.usage_is_inconsistent = 0; err = qh.qh_ops->scan_dquots(&qh, scan_dquots_callback, &scan_data); if (err) { log_debug("Error scanning dquots"); *usage_inconsistent = 1; goto out_close_qh; } for (n = dict_first(dict); n; n = dict_next(dict, n)) { dq = dnode_get(n); if (!dq) continue; if ((dq->dq_flags & DQF_SEEN) == 0) { fprintf(stderr, "[QUOTA WARNING] " "Missing quota entry ID %d\n", dq->dq_id); scan_data.usage_is_inconsistent = 1; } } *usage_inconsistent = scan_data.usage_is_inconsistent; out_close_qh: err = quota_file_close(qctx, &qh); if (err) { log_debug("Cannot close quotafile: %s", error_message(errno)); if (qh.qh_qf.e2_file) ext2fs_file_close(qh.qh_qf.e2_file); } out: return err; }
0
43,999
MagickExport MagickBooleanType GetImageEntropy(const Image *image, double *entropy,ExceptionInfo *exception) { ChannelStatistics *channel_statistics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); *entropy=channel_statistics[CompositePixelChannel].entropy; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); }
0
216,246
AppListSyncableService::GetSyncItem(const std::string& id) const { SyncItemMap::const_iterator iter = sync_items_.find(id); if (iter != sync_items_.end()) return iter->second; return NULL; }
0
518,295
void TABLE::use_index(int key_to_save) { uint i= 1; DBUG_ASSERT(!created && key_to_save < (int)s->keys); if (key_to_save >= 0) /* Save the given key. */ memmove(key_info, key_info + key_to_save, sizeof(KEY)); else /* Drop all keys; */ i= 0; s->keys= i; }
0
512,685
int ssh_scp_init(ssh_scp scp) { int rc; char execbuffer[1024] = {0}; char *quoted_location = NULL; size_t quoted_location_len = 0; size_t scp_location_len; if (scp == NULL) { return SSH_ERROR; } if (scp->state != SSH_SCP_NEW) { ssh_set_error(scp->session, SSH_FATAL, "ssh_scp_init called under invalid state"); return SSH_ERROR; } if (scp->location == NULL) { ssh_set_error(scp->session, SSH_FATAL, "Invalid scp context: location is NULL"); return SSH_ERROR; } SSH_LOG(SSH_LOG_PROTOCOL, "Initializing scp session %s %son location '%s'", scp->mode == SSH_SCP_WRITE?"write":"read", scp->recursive ? "recursive " : "", scp->location); scp->channel = ssh_channel_new(scp->session); if (scp->channel == NULL) { ssh_set_error(scp->session, SSH_FATAL, "Channel creation failed for scp"); scp->state = SSH_SCP_ERROR; return SSH_ERROR; } rc = ssh_channel_open_session(scp->channel); if (rc == SSH_ERROR) { ssh_set_error(scp->session, SSH_FATAL, "Failed to open channel for scp"); scp->state = SSH_SCP_ERROR; return SSH_ERROR; } /* In the worst case, each character would be replaced by 3 plus the string * terminator '\0' */ scp_location_len = strlen(scp->location); quoted_location_len = ((size_t)3 * scp_location_len) + 1; /* Paranoia check */ if (quoted_location_len < scp_location_len) { ssh_set_error(scp->session, SSH_FATAL, "Buffer overflow detected"); scp->state = SSH_SCP_ERROR; return SSH_ERROR; } quoted_location = (char *)calloc(1, quoted_location_len); if (quoted_location == NULL) { ssh_set_error(scp->session, SSH_FATAL, "Failed to allocate memory for quoted location"); scp->state = SSH_SCP_ERROR; return SSH_ERROR; } rc = ssh_quote_file_name(scp->location, quoted_location, quoted_location_len); if (rc <= 0) { ssh_set_error(scp->session, SSH_FATAL, "Failed to single quote command location"); SAFE_FREE(quoted_location); scp->state = SSH_SCP_ERROR; return SSH_ERROR; } if (scp->mode == SSH_SCP_WRITE) { snprintf(execbuffer, sizeof(execbuffer), "scp -t %s %s", scp->recursive ? "-r" : "", quoted_location); } else { snprintf(execbuffer, sizeof(execbuffer), "scp -f %s %s", scp->recursive ? "-r" : "", quoted_location); } SAFE_FREE(quoted_location); SSH_LOG(SSH_LOG_DEBUG, "Executing command: %s", execbuffer); rc = ssh_channel_request_exec(scp->channel, execbuffer); if (rc == SSH_ERROR){ ssh_set_error(scp->session, SSH_FATAL, "Failed executing command: %s", execbuffer); scp->state = SSH_SCP_ERROR; return SSH_ERROR; } if (scp->mode == SSH_SCP_WRITE) { rc = ssh_scp_response(scp, NULL); if (rc != 0) { return SSH_ERROR; } } else { ssh_channel_write(scp->channel, "", 1); } if (scp->mode == SSH_SCP_WRITE) { scp->state = SSH_SCP_WRITE_INITED; } else { scp->state = SSH_SCP_READ_INITED; } return SSH_OK; }
0
299,037
AP4_MetaData::AddDcfStringEntry(AP4_DcfStringAtom* atom, const char* namespc) { AP4_String key_name; ResolveKeyName(atom->GetType(), key_name); AP4_MetaData::Value* value = new AP4_StringMetaDataValue(atom->GetValue().GetChars()); m_Entries.Add(new Entry(key_name.GetChars(), namespc, value)); return AP4_SUCCESS; }
0
448,287
static u32 nfsd_eof_on_read(struct file *file, loff_t offset, ssize_t len, size_t expected) { if (expected != 0 && len == 0) return 1; if (offset+len >= i_size_read(file_inode(file))) return 1; return 0; }
0
468,043
int blk_register_queue(struct gendisk *disk) { int ret; struct device *dev = disk_to_dev(disk); struct request_queue *q = disk->queue; if (WARN_ON(!q)) return -ENXIO; WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags), "%s is registering an already registered queue\n", kobject_name(&dev->kobj)); blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); /* * SCSI probing may synchronously create and destroy a lot of * request_queues for non-existent devices. Shutting down a fully * functional queue takes measureable wallclock time as RCU grace * periods are involved. To avoid excessive latency in these * cases, a request_queue starts out in a degraded mode which is * faster to shut down and is made fully functional here as * request_queues for non-existent devices never get registered. */ if (!blk_queue_init_done(q)) { blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); percpu_ref_switch_to_percpu(&q->q_usage_counter); } ret = blk_trace_init_sysfs(dev); if (ret) return ret; /* Prevent changes through sysfs until registration is completed. */ mutex_lock(&q->sysfs_lock); ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); if (ret < 0) { blk_trace_remove_sysfs(dev); goto unlock; } ret = sysfs_create_group(&q->kobj, &queue_attr_group); if (ret) { blk_trace_remove_sysfs(dev); kobject_del(&q->kobj); kobject_put(&dev->kobj); goto unlock; } if (queue_is_mq(q)) { __blk_mq_register_dev(dev, q); blk_mq_debugfs_register(q); } kobject_uevent(&q->kobj, KOBJ_ADD); wbt_enable_default(q); blk_throtl_register_queue(q); if (q->elevator) { ret = elv_register_queue(q); if (ret) { mutex_unlock(&q->sysfs_lock); kobject_uevent(&q->kobj, KOBJ_REMOVE); kobject_del(&q->kobj); blk_trace_remove_sysfs(dev); kobject_put(&dev->kobj); return ret; } } ret = 0; unlock: mutex_unlock(&q->sysfs_lock); return ret; }
0
200,777
void FileAPIMessageFilter::OnCloneBlob( const GURL& url, const GURL& src_url) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); blob_storage_context_->controller()->CloneBlob(url, src_url); blob_urls_.insert(url.spec()); }
0
86,529
static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr) { u8 rc = LLC_PDU_LEN_U; if (addr->sllc_test || addr->sllc_xid) rc = LLC_PDU_LEN_U; else if (sk->sk_type == SOCK_STREAM) rc = LLC_PDU_LEN_I; return rc; }
0
117,559
TEST_F(QuicServerTransportTest, DestroyWithoutClosing) { StreamId streamId = server->createBidirectionalStream().value(); MockReadCallback readCb; server->setReadCallback(streamId, &readCb); EXPECT_CALL(connCallback, onConnectionError(_)).Times(0); EXPECT_CALL(connCallback, onConnectionEnd()).Times(0); MockDeliveryCallback deliveryCallback; auto write = IOBuf::copyBuffer("no"); server->writeChain(streamId, write->clone(), true, &deliveryCallback); EXPECT_CALL(deliveryCallback, onCanceled(_, _)); EXPECT_CALL(readCb, readError(_, _)); server.reset(); }
0
302,550
sctp_disposition_t sctp_sf_do_ecne(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { sctp_ecnehdr_t *ecne; struct sctp_chunk *chunk = arg; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); ecne = (sctp_ecnehdr_t *) chunk->skb->data; skb_pull(chunk->skb, sizeof(sctp_ecnehdr_t)); /* If this is a newer ECNE than the last CWR packet we sent out */ sctp_add_cmd_sf(commands, SCTP_CMD_ECN_ECNE, SCTP_U32(ntohl(ecne->lowest_tsn))); return SCTP_DISPOSITION_CONSUME; }
0
249,583
scoped_refptr<const PermissionSet> PermissionsData::GetTabSpecificPermissions( int tab_id) const { base::AutoLock auto_lock(runtime_lock_); CHECK_GE(tab_id, 0); TabPermissionsMap::const_iterator iter = tab_specific_permissions_.find(tab_id); return (iter != tab_specific_permissions_.end()) ? iter->second : NULL; }
0
250,075
void TabsUpdateFunction::OnExecuteCodeFinished( const std::string& error, const GURL& url, const base::ListValue& script_result) { if (error.empty()) PopulateResult(); else error_ = error; SendResponse(error.empty()); }
0
249,577
void HTMLFormControlElement::didMoveToNewDocument(Document& oldDocument) { ListedElement::didMoveToNewDocument(oldDocument); HTMLElement::didMoveToNewDocument(oldDocument); }
0
219,869
cc::SnapFlingController::GestureScrollUpdateInfo GetGestureScrollUpdateInfo( const WebGestureEvent& event) { cc::SnapFlingController::GestureScrollUpdateInfo info; info.delta = gfx::Vector2dF(-event.data.scroll_update.delta_x, -event.data.scroll_update.delta_y); info.is_in_inertial_phase = event.data.scroll_update.inertial_phase == blink::WebGestureEvent::InertialPhaseState::kMomentum; info.event_time = event.TimeStamp(); return info; }
0
415,262
pipe_echo_finish (Pipe *pipe) { GIOStatus status; gsize bytes_read; char buf[512]; do { bytes_read = 0; status = g_io_channel_read_chars (pipe->channel, buf, sizeof (buf), &bytes_read, NULL); if (bytes_read) { fprintf (pipe->logf, "%.*s", (int) bytes_read, buf); fflush (pipe->logf); } } while (status == G_IO_STATUS_NORMAL); }
0
345,771
static HashTable* soap_create_typemap(sdlPtr sdl, HashTable *ht TSRMLS_DC) { zval **tmp; HashTable *ht2; HashPosition pos1, pos2; HashTable *typemap = NULL; zend_hash_internal_pointer_reset_ex(ht, &pos1); while (zend_hash_get_current_data_ex(ht, (void**)&tmp, &pos1) == SUCCESS) { char *type_name = NULL; char *type_ns = NULL; zval *to_xml = NULL; zval *to_zval = NULL; encodePtr enc, new_enc; if (Z_TYPE_PP(tmp) != IS_ARRAY) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Wrong 'typemap' option"); return NULL; } ht2 = Z_ARRVAL_PP(tmp); zend_hash_internal_pointer_reset_ex(ht2, &pos2); while (zend_hash_get_current_data_ex(ht2, (void**)&tmp, &pos2) == SUCCESS) { char *name = NULL; unsigned int name_len; ulong index; zend_hash_get_current_key_ex(ht2, &name, &name_len, &index, 0, &pos2); if (name) { if (name_len == sizeof("type_name") && strncmp(name, "type_name", sizeof("type_name")-1) == 0) { if (Z_TYPE_PP(tmp) == IS_STRING) { type_name = Z_STRVAL_PP(tmp); } else if (Z_TYPE_PP(tmp) != IS_NULL) { } } else if (name_len == sizeof("type_ns") && strncmp(name, "type_ns", sizeof("type_ns")-1) == 0) { if (Z_TYPE_PP(tmp) == IS_STRING) { type_ns = Z_STRVAL_PP(tmp); } else if (Z_TYPE_PP(tmp) != IS_NULL) { } } else if (name_len == sizeof("to_xml") && strncmp(name, "to_xml", sizeof("to_xml")-1) == 0) { to_xml = *tmp; } else if (name_len == sizeof("from_xml") && strncmp(name, "from_xml", sizeof("from_xml")-1) == 0) { to_zval = *tmp; } } zend_hash_move_forward_ex(ht2, &pos2); } if (type_name) { smart_str nscat = {0}; if (type_ns) { enc = get_encoder(sdl, type_ns, type_name); } else { enc = get_encoder_ex(sdl, type_name, strlen(type_name)); } new_enc = emalloc(sizeof(encode)); memset(new_enc, 0, sizeof(encode)); if (enc) { new_enc->details.type = enc->details.type; new_enc->details.ns = estrdup(enc->details.ns); new_enc->details.type_str = estrdup(enc->details.type_str); new_enc->details.sdl_type = enc->details.sdl_type; } else { enc = get_conversion(UNKNOWN_TYPE); new_enc->details.type = enc->details.type; if (type_ns) { new_enc->details.ns = estrdup(type_ns); } new_enc->details.type_str = estrdup(type_name); } new_enc->to_xml = enc->to_xml; new_enc->to_zval = enc->to_zval; new_enc->details.map = emalloc(sizeof(soapMapping)); memset(new_enc->details.map, 0, sizeof(soapMapping)); if (to_xml) { zval_add_ref(&to_xml); new_enc->details.map->to_xml = to_xml; new_enc->to_xml = to_xml_user; } else if (enc->details.map && enc->details.map->to_xml) { zval_add_ref(&enc->details.map->to_xml); new_enc->details.map->to_xml = enc->details.map->to_xml; } if (to_zval) { zval_add_ref(&to_zval); new_enc->details.map->to_zval = to_zval; new_enc->to_zval = to_zval_user; } else if (enc->details.map && enc->details.map->to_zval) { zval_add_ref(&enc->details.map->to_zval); new_enc->details.map->to_zval = enc->details.map->to_zval; } if (!typemap) { typemap = emalloc(sizeof(HashTable)); zend_hash_init(typemap, 0, NULL, delete_encoder, 0); } if (type_ns) { smart_str_appends(&nscat, type_ns); smart_str_appendc(&nscat, ':'); } smart_str_appends(&nscat, type_name); smart_str_0(&nscat); zend_hash_update(typemap, nscat.c, nscat.len + 1, &new_enc, sizeof(encodePtr), NULL); smart_str_free(&nscat); } zend_hash_move_forward_ex(ht, &pos1); } return typemap; }
1
90,506
libraw_processed_image_t *LibRaw::dcraw_make_mem_image(int *errcode) { int width, height, colors, bps; get_mem_image_format(&width, &height, &colors, &bps); int stride = width * (bps/8) * colors; unsigned ds = height * stride; libraw_processed_image_t *ret = (libraw_processed_image_t*)::malloc(sizeof(libraw_processed_image_t)+ds); if(!ret) { if(errcode) *errcode= ENOMEM; return NULL; } memset(ret,0,sizeof(libraw_processed_image_t)); // metadata init ret->type = LIBRAW_IMAGE_BITMAP; ret->height = height; ret->width = width; ret->colors = colors; ret->bits = bps; ret->data_size = ds; copy_mem_image(ret->data, stride, 0); return ret; }
0
439,120
static void SFDDumpLangName(FILE *sfd, struct ttflangname *ln) { int i, end; fprintf( sfd, "LangName: %d", ln->lang ); for ( end = ttf_namemax; end>0 && ln->names[end-1]==NULL; --end ); for ( i=0; i<end; ++i ) { putc(' ',sfd); SFDDumpUTF7Str(sfd,ln->names[i]); } putc('\n',sfd); }
0
87,286
const char *gf_m4v_get_profile_name(u8 video_pl) { u32 i, count = GF_ARRAY_LENGTH(M4VProfiles); for (i=0; i<count; i++) { if ((u32)video_pl == M4VProfiles[i].value) return M4VProfiles[i].name; } return "ISO Reserved Profile"; }
0
258,704
static void hns_gmac_get_tx_auto_pause_frames(void *mac_drv, u16 *newval) { struct mac_driver *drv = (struct mac_driver *)mac_drv; *newval = dsaf_get_dev_field(drv, GMAC_FC_TX_TIMER_REG, GMAC_FC_TX_TIMER_M, GMAC_FC_TX_TIMER_S); }
0
232,017
GDataFile::GDataFile(GDataDirectory* parent, GDataRootDirectory* root) : GDataEntry(parent, root), kind_(DocumentEntry::UNKNOWN), is_hosted_document_(false) { file_info_.is_directory = false; }
0
498,583
transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_clause) { AttrNumber parent_attno; Relation relation; TupleDesc tupleDesc; TupleConstr *constr; AclResult aclresult; char *comment; ParseCallbackState pcbstate; setup_parser_errposition_callback(&pcbstate, cxt->pstate, table_like_clause->relation->location); /* we could support LIKE in many cases, but worry about it another day */ if (cxt->isforeign) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("LIKE is not supported for creating foreign tables"))); /* Open the relation referenced by the LIKE clause */ relation = relation_openrv(table_like_clause->relation, AccessShareLock); if (relation->rd_rel->relkind != RELKIND_RELATION && relation->rd_rel->relkind != RELKIND_VIEW && relation->rd_rel->relkind != RELKIND_MATVIEW && relation->rd_rel->relkind != RELKIND_COMPOSITE_TYPE && relation->rd_rel->relkind != RELKIND_FOREIGN_TABLE && relation->rd_rel->relkind != RELKIND_PARTITIONED_TABLE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a table, view, materialized view, composite type, or foreign table", RelationGetRelationName(relation)))); cancel_parser_errposition_callback(&pcbstate); /* * Check for privileges */ if (relation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE) { aclresult = pg_type_aclcheck(relation->rd_rel->reltype, GetUserId(), ACL_USAGE); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, OBJECT_TYPE, RelationGetRelationName(relation)); } else { aclresult = pg_class_aclcheck(RelationGetRelid(relation), GetUserId(), ACL_SELECT); if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, get_relkind_objtype(relation->rd_rel->relkind), RelationGetRelationName(relation)); } tupleDesc = RelationGetDescr(relation); constr = tupleDesc->constr; /* * Insert the copied attributes into the cxt for the new table definition. * We must do this now so that they appear in the table in the relative * position where the LIKE clause is, as required by SQL99. */ for (parent_attno = 1; parent_attno <= tupleDesc->natts; parent_attno++) { Form_pg_attribute attribute = TupleDescAttr(tupleDesc, parent_attno - 1); char *attributeName = NameStr(attribute->attname); ColumnDef *def; /* * Ignore dropped columns in the parent. */ if (attribute->attisdropped) continue; /* * Create a new column, which is marked as NOT inherited. * * For constraints, ONLY the NOT NULL constraint is inherited by the * new column definition per SQL99. */ def = makeNode(ColumnDef); def->colname = pstrdup(attributeName); def->typeName = makeTypeNameFromOid(attribute->atttypid, attribute->atttypmod); def->inhcount = 0; def->is_local = true; def->is_not_null = attribute->attnotnull; def->is_from_type = false; def->storage = 0; def->raw_default = NULL; def->cooked_default = NULL; def->collClause = NULL; def->collOid = attribute->attcollation; def->constraints = NIL; def->location = -1; /* * Add to column list */ cxt->columns = lappend(cxt->columns, def); /* * Copy default, if present and the default has been requested */ if (attribute->atthasdef && (table_like_clause->options & CREATE_TABLE_LIKE_DEFAULTS)) { Node *this_default = NULL; AttrDefault *attrdef; int i; /* Find default in constraint structure */ Assert(constr != NULL); attrdef = constr->defval; for (i = 0; i < constr->num_defval; i++) { if (attrdef[i].adnum == parent_attno) { this_default = stringToNode(attrdef[i].adbin); break; } } Assert(this_default != NULL); /* * If default expr could contain any vars, we'd need to fix 'em, * but it can't; so default is ready to apply to child. */ def->cooked_default = this_default; } /* * Copy identity if requested */ if (attribute->attidentity && (table_like_clause->options & CREATE_TABLE_LIKE_IDENTITY)) { Oid seq_relid; List *seq_options; /* * find sequence owned by old column; extract sequence parameters; * build new create sequence command */ seq_relid = getOwnedSequence(RelationGetRelid(relation), attribute->attnum); seq_options = sequence_options(seq_relid); generateSerialExtraStmts(cxt, def, InvalidOid, seq_options, true, NULL, NULL); def->identity = attribute->attidentity; } /* Likewise, copy storage if requested */ if (table_like_clause->options & CREATE_TABLE_LIKE_STORAGE) def->storage = attribute->attstorage; else def->storage = 0; /* Likewise, copy comment if requested */ if ((table_like_clause->options & CREATE_TABLE_LIKE_COMMENTS) && (comment = GetComment(attribute->attrelid, RelationRelationId, attribute->attnum)) != NULL) { CommentStmt *stmt = makeNode(CommentStmt); stmt->objtype = OBJECT_COLUMN; stmt->object = (Node *) list_make3(makeString(cxt->relation->schemaname), makeString(cxt->relation->relname), makeString(def->colname)); stmt->comment = comment; cxt->alist = lappend(cxt->alist, stmt); } } /* We use oids if at least one LIKE'ed table has oids. */ cxt->hasoids |= relation->rd_rel->relhasoids; /* * We cannot yet deal with CHECK constraints or indexes, since we don't * yet know what column numbers the copied columns will have in the * finished table. If any of those options are specified, add the LIKE * clause to cxt->likeclauses so that expandTableLikeClause will be called * after we do know that. Also, remember the relation OID so that * expandTableLikeClause is certain to open the same table. */ if (table_like_clause->options & (CREATE_TABLE_LIKE_CONSTRAINTS | CREATE_TABLE_LIKE_INDEXES)) { table_like_clause->relationOid = RelationGetRelid(relation); cxt->likeclauses = lappend(cxt->likeclauses, table_like_clause); } /* * We may copy extended statistics if requested, since the representation * of CreateStatsStmt doesn't depend on column numbers. */ if (table_like_clause->options & CREATE_TABLE_LIKE_STATISTICS) { List *parent_extstats; ListCell *l; parent_extstats = RelationGetStatExtList(relation); foreach(l, parent_extstats) { Oid parent_stat_oid = lfirst_oid(l); CreateStatsStmt *stats_stmt; stats_stmt = generateClonedExtStatsStmt(cxt->relation, RelationGetRelid(relation), parent_stat_oid); /* Copy comment on statistics object, if requested */ if (table_like_clause->options & CREATE_TABLE_LIKE_COMMENTS) { comment = GetComment(parent_stat_oid, StatisticExtRelationId, 0); /* * We make use of CreateStatsStmt's stxcomment option, so as * not to need to know now what name the statistics will have. */ stats_stmt->stxcomment = comment; } cxt->extstats = lappend(cxt->extstats, stats_stmt); } list_free(parent_extstats); } /* * Close the parent rel, but keep our AccessShareLock on it until xact * commit. That will prevent someone else from deleting or ALTERing the * parent before we can run expandTableLikeClause. */ heap_close(relation, NoLock); }
0
102,168
static bool update_sd_pick_busiest(struct lb_env *env, struct sd_lb_stats *sds, struct sched_group *sg, struct sg_lb_stats *sgs) { struct sg_lb_stats *busiest = &sds->busiest_stat; /* * Don't try to pull misfit tasks we can't help. * We can use max_capacity here as reduction in capacity on some * CPUs in the group should either be possible to resolve * internally or be covered by avg_load imbalance (eventually). */ if (sgs->group_type == group_misfit_task && (!group_smaller_max_cpu_capacity(sg, sds->local) || !group_has_capacity(env, &sds->local_stat))) return false; if (sgs->group_type > busiest->group_type) return true; if (sgs->group_type < busiest->group_type) return false; if (sgs->avg_load <= busiest->avg_load) return false; if (!(env->sd->flags & SD_ASYM_CPUCAPACITY)) goto asym_packing; /* * Candidate sg has no more than one task per CPU and * has higher per-CPU capacity. Migrating tasks to less * capable CPUs may harm throughput. Maximize throughput, * power/energy consequences are not considered. */ if (sgs->sum_nr_running <= sgs->group_weight && group_smaller_min_cpu_capacity(sds->local, sg)) return false; /* * If we have more than one misfit sg go with the biggest misfit. */ if (sgs->group_type == group_misfit_task && sgs->group_misfit_task_load < busiest->group_misfit_task_load) return false; asym_packing: /* This is the busiest node in its class. */ if (!(env->sd->flags & SD_ASYM_PACKING)) return true; /* No ASYM_PACKING if target CPU is already busy */ if (env->idle == CPU_NOT_IDLE) return true; /* * ASYM_PACKING needs to move all the work to the highest * prority CPUs in the group, therefore mark all groups * of lower priority than ourself as busy. */ if (sgs->sum_nr_running && sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) { if (!sds->busiest) return true; /* Prefer to move from lowest priority CPU's work */ if (sched_asym_prefer(sds->busiest->asym_prefer_cpu, sg->asym_prefer_cpu)) return true; } return false; }
0
12,038
void RTCSessionDescriptionRequestImpl::requestSucceeded(PassRefPtr<RTCSessionDescriptionDescriptor> descriptor) { if (m_successCallback) { RefPtr<RTCSessionDescription> sessionDescription = RTCSessionDescription::create(descriptor); m_successCallback->handleEvent(sessionDescription.get()); } clear(); }
1
21,958
static int dissect_h245_INTEGER_1_9216 ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) { offset = dissect_per_constrained_integer ( tvb , offset , actx , tree , hf_index , 1U , 9216U , NULL , FALSE ) ; return offset ; }
0
311,887
bool HTMLMediaElement::textTracksAreReady() const { for (const auto& textTrack : m_textTracksWhenResourceSelectionBegan) { if (textTrack->getReadinessState() == TextTrack::Loading || textTrack->getReadinessState() == TextTrack::NotLoaded) return false; } return true; }
0
408,009
static int dvb_frontend_get_event(struct dvb_frontend *fe, struct dvb_frontend_event *event, int flags) { struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dvb_fe_events *events = &fepriv->events; dev_dbg(fe->dvb->device, "%s:\n", __func__); if (events->overflow) { events->overflow = 0; return -EOVERFLOW; } if (events->eventw == events->eventr) { int ret; if (flags & O_NONBLOCK) return -EWOULDBLOCK; up(&fepriv->sem); ret = wait_event_interruptible (events->wait_queue, events->eventw != events->eventr); if (down_interruptible (&fepriv->sem)) return -ERESTARTSYS; if (ret < 0) return ret; } mutex_lock(&events->mtx); *event = events->events[events->eventr]; events->eventr = (events->eventr + 1) % MAX_EVENT; mutex_unlock(&events->mtx); return 0; }
0
371,682
_g_utf8_strstr (const char *haystack, const char *needle) { const char *s; gsize i; gsize haystack_len = g_utf8_strlen (haystack, -1); gsize needle_len = g_utf8_strlen (needle, -1); int needle_size = strlen (needle); s = haystack; for (i = 0; i <= haystack_len - needle_len; i++) { if (strncmp (s, needle, needle_size) == 0) return s; s = g_utf8_next_char(s); } return NULL; }
0
52,730
static int cqspi_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; struct cqspi_st *cqspi; struct resource *res; struct resource *res_ahb; int ret; int irq; cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL); if (!cqspi) return -ENOMEM; mutex_init(&cqspi->bus_mutex); cqspi->pdev = pdev; platform_set_drvdata(pdev, cqspi); /* Obtain configuration from OF. */ ret = cqspi_of_get_pdata(pdev); if (ret) { dev_err(dev, "Cannot get mandatory OF data.\n"); return -ENODEV; } /* Obtain QSPI clock. */ cqspi->clk = devm_clk_get(dev, NULL); if (IS_ERR(cqspi->clk)) { dev_err(dev, "Cannot claim QSPI clock.\n"); return PTR_ERR(cqspi->clk); } /* Obtain and remap controller address. */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); cqspi->iobase = devm_ioremap_resource(dev, res); if (IS_ERR(cqspi->iobase)) { dev_err(dev, "Cannot remap controller address.\n"); return PTR_ERR(cqspi->iobase); } /* Obtain and remap AHB address. */ res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1); cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb); if (IS_ERR(cqspi->ahb_base)) { dev_err(dev, "Cannot remap AHB address.\n"); return PTR_ERR(cqspi->ahb_base); } init_completion(&cqspi->transfer_complete); /* Obtain IRQ line. */ irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(dev, "Cannot obtain IRQ.\n"); return -ENXIO; } ret = clk_prepare_enable(cqspi->clk); if (ret) { dev_err(dev, "Cannot enable QSPI clock.\n"); return ret; } cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, pdev->name, cqspi); if (ret) { dev_err(dev, "Cannot request IRQ.\n"); goto probe_irq_failed; } cqspi_wait_idle(cqspi); cqspi_controller_init(cqspi); cqspi->current_cs = -1; cqspi->sclk = 0; ret = cqspi_setup_flash(cqspi, np); if (ret) { dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret); goto probe_setup_failed; } return ret; probe_irq_failed: cqspi_controller_enable(cqspi, 0); probe_setup_failed: clk_disable_unprepare(cqspi->clk); return ret; }
0
111,670
TRIO_PUBLIC_STRING int trio_xstring_append_char TRIO_ARGS2((self, character), trio_string_t* self, char character) { assert(self); if ((int)self->length >= trio_string_size(self)) { if (!internal_string_grow(self, 0)) goto error; } self->content[self->length] = character; self->length++; return TRUE; error: return FALSE; }
0
416,506
pdf14_open(gx_device *dev) { pdf14_device *pdev = (pdf14_device *)dev; gs_int_rect rect; if_debug2m('v', dev->memory, "[v]pdf14_open: width = %d, height = %d\n", dev->width, dev->height); rect.p.x = 0; rect.p.y = 0; rect.q.x = dev->width; rect.q.y = dev->height; /* If we are reenabling the device dont create a new ctx. Bug 697456 */ if (pdev->ctx == NULL) pdev->ctx = pdf14_ctx_new(&rect, dev->color_info.num_components, pdev->color_info.polarity != GX_CINFO_POLARITY_SUBTRACTIVE, dev); if (pdev->ctx == NULL) return_error(gs_error_VMerror); pdev->free_devicen = true; pdev->text_group = PDF14_TEXTGROUP_NO_BT; return 0; }
0
182,686
virtual void afterTest() { EXPECT_GE(2, m_numDraws); EXPECT_EQ(1, m_numCommits); }
0
209,392
void TestingAutomationProvider::OmniboxMovePopupSelection( Browser* browser, DictionaryValue* args, IPC::Message* reply_message) { int count; AutomationJSONReply reply(this, reply_message); if (!args->GetInteger("count", &count)) { reply.SendError("count missing"); return; } LocationBar* loc_bar = browser->window()->GetLocationBar(); AutocompleteEditModel* model = loc_bar->location_entry()->model(); model->OnUpOrDownKeyPressed(count); reply.SendSuccess(NULL); }
0
490,021
piv_finish(sc_card_t *card) { piv_private_data_t * priv = PIV_DATA(card); int i; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); if (priv) { sc_file_free(priv->aid_file); if (priv->w_buf) free(priv->w_buf); if (priv->offCardCertURL) free(priv->offCardCertURL); for (i = 0; i < PIV_OBJ_LAST_ENUM - 1; i++) { sc_log(card->ctx, "DEE freeing #%d, 0x%02x %p:%"SC_FORMAT_LEN_SIZE_T"u %p:%"SC_FORMAT_LEN_SIZE_T"u", i, priv->obj_cache[i].flags, priv->obj_cache[i].obj_data, priv->obj_cache[i].obj_len, priv->obj_cache[i].internal_obj_data, priv->obj_cache[i].internal_obj_len); if (priv->obj_cache[i].obj_data) free(priv->obj_cache[i].obj_data); if (priv->obj_cache[i].internal_obj_data) free(priv->obj_cache[i].internal_obj_data); } free(priv); card->drv_data = NULL; /* priv */ } return 0; }
0
223,798
bool roleAllowsSort(AccessibilityRole role) { return role == ColumnHeaderRole || role == RowHeaderRole; }
0
53,442
void ieee80211_check_fast_xmit(struct sta_info *sta) { struct ieee80211_fast_tx build = {}, *fast_tx = NULL, *old; struct ieee80211_local *local = sta->local; struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_hdr *hdr = (void *)build.hdr; struct ieee80211_chanctx_conf *chanctx_conf; __le16 fc; if (!ieee80211_hw_check(&local->hw, SUPPORT_FAST_XMIT)) return; /* Locking here protects both the pointer itself, and against concurrent * invocations winning data access races to, e.g., the key pointer that * is used. * Without it, the invocation of this function right after the key * pointer changes wouldn't be sufficient, as another CPU could access * the pointer, then stall, and then do the cache update after the CPU * that invalidated the key. * With the locking, such scenarios cannot happen as the check for the * key and the fast-tx assignment are done atomically, so the CPU that * modifies the key will either wait or other one will see the key * cleared/changed already. */ spin_lock_bh(&sta->lock); if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) && sdata->vif.type == NL80211_IFTYPE_STATION) goto out; if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) goto out; if (test_sta_flag(sta, WLAN_STA_PS_STA) || test_sta_flag(sta, WLAN_STA_PS_DRIVER) || test_sta_flag(sta, WLAN_STA_PS_DELIVER) || test_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT)) goto out; if (sdata->noack_map) goto out; /* fast-xmit doesn't handle fragmentation at all */ if (local->hw.wiphy->frag_threshold != (u32)-1 && !ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG)) goto out; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) { rcu_read_unlock(); goto out; } build.band = chanctx_conf->def.chan->band; rcu_read_unlock(); fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); switch (sdata->vif.type) { case NL80211_IFTYPE_ADHOC: /* DA SA BSSID */ build.da_offs = offsetof(struct ieee80211_hdr, addr1); build.sa_offs = offsetof(struct ieee80211_hdr, addr2); memcpy(hdr->addr3, sdata->u.ibss.bssid, ETH_ALEN); build.hdr_len = 24; break; case NL80211_IFTYPE_STATION: if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { /* DA SA BSSID */ build.da_offs = offsetof(struct ieee80211_hdr, addr1); build.sa_offs = offsetof(struct ieee80211_hdr, addr2); memcpy(hdr->addr3, sdata->u.mgd.bssid, ETH_ALEN); build.hdr_len = 24; break; } if (sdata->u.mgd.use_4addr) { /* non-regular ethertype cannot use the fastpath */ fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN); memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); build.da_offs = offsetof(struct ieee80211_hdr, addr3); build.sa_offs = offsetof(struct ieee80211_hdr, addr4); build.hdr_len = 30; break; } fc |= cpu_to_le16(IEEE80211_FCTL_TODS); /* BSSID SA DA */ memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN); build.da_offs = offsetof(struct ieee80211_hdr, addr3); build.sa_offs = offsetof(struct ieee80211_hdr, addr2); build.hdr_len = 24; break; case NL80211_IFTYPE_AP_VLAN: if (sdata->wdev.use_4addr) { fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); build.da_offs = offsetof(struct ieee80211_hdr, addr3); build.sa_offs = offsetof(struct ieee80211_hdr, addr4); build.hdr_len = 30; break; } fallthrough; case NL80211_IFTYPE_AP: fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); /* DA BSSID SA */ build.da_offs = offsetof(struct ieee80211_hdr, addr1); memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); build.sa_offs = offsetof(struct ieee80211_hdr, addr3); build.hdr_len = 24; break; default: /* not handled on fast-xmit */ goto out; } if (sta->sta.wme) { build.hdr_len += 2; fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); } /* We store the key here so there's no point in using rcu_dereference() * but that's fine because the code that changes the pointers will call * this function after doing so. For a single CPU that would be enough, * for multiple see the comment above. */ build.key = rcu_access_pointer(sta->ptk[sta->ptk_idx]); if (!build.key) build.key = rcu_access_pointer(sdata->default_unicast_key); if (build.key) { bool gen_iv, iv_spc, mmic; gen_iv = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV; iv_spc = build.key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE; mmic = build.key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | IEEE80211_KEY_FLAG_PUT_MIC_SPACE); /* don't handle software crypto */ if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) goto out; /* Key is being removed */ if (build.key->flags & KEY_FLAG_TAINTED) goto out; switch (build.key->conf.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: if (gen_iv) build.pn_offs = build.hdr_len; if (gen_iv || iv_spc) build.hdr_len += IEEE80211_CCMP_HDR_LEN; break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (gen_iv) build.pn_offs = build.hdr_len; if (gen_iv || iv_spc) build.hdr_len += IEEE80211_GCMP_HDR_LEN; break; case WLAN_CIPHER_SUITE_TKIP: /* cannot handle MMIC or IV generation in xmit-fast */ if (mmic || gen_iv) goto out; if (iv_spc) build.hdr_len += IEEE80211_TKIP_IV_LEN; break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: /* cannot handle IV generation in fast-xmit */ if (gen_iv) goto out; if (iv_spc) build.hdr_len += IEEE80211_WEP_IV_LEN; break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: WARN(1, "management cipher suite 0x%x enabled for data\n", build.key->conf.cipher); goto out; default: /* we don't know how to generate IVs for this at all */ if (WARN_ON(gen_iv)) goto out; /* pure hardware keys are OK, of course */ if (!(build.key->flags & KEY_FLAG_CIPHER_SCHEME)) break; /* cipher scheme might require space allocation */ if (iv_spc && build.key->conf.iv_len > IEEE80211_FAST_XMIT_MAX_IV) goto out; if (iv_spc) build.hdr_len += build.key->conf.iv_len; } fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); } hdr->frame_control = fc; memcpy(build.hdr + build.hdr_len, rfc1042_header, sizeof(rfc1042_header)); build.hdr_len += sizeof(rfc1042_header); fast_tx = kmemdup(&build, sizeof(build), GFP_ATOMIC); /* if the kmemdup fails, continue w/o fast_tx */ if (!fast_tx) goto out; out: /* we might have raced against another call to this function */ old = rcu_dereference_protected(sta->fast_tx, lockdep_is_held(&sta->lock)); rcu_assign_pointer(sta->fast_tx, fast_tx); if (old) kfree_rcu(old, rcu_head); spin_unlock_bh(&sta->lock); }
0
115,484
PHP_FUNCTION(imagesetpixel) { zval *IM; long x, y, col; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlll", &IM, &x, &y, &col) == FAILURE) { return; } ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd); gdImageSetPixel(im, x, y, col); RETURN_TRUE; }
0
45,294
TEST_F(TestCustomAllocation, ResizeTensorsWithoutEnoughMemory) { // Set custom allocations for all input tensors. AssignCustomAllocForTensor(interpreter_->inputs()[0], /*required_alignment=*/kDefaultTensorAlignment); AssignCustomAllocForTensor(interpreter_->inputs()[1], /*required_alignment=*/kDefaultTensorAlignment); ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk); // Now resize tensors to double the size. ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {2, 3}), kTfLiteOk); ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2, 3}), kTfLiteOk); // Since the custom memory previously allocated isn't enough, // AllocateTensors() will fail. ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteError); // Interpreter should no longer be in invokable state, so expect failure. ASSERT_EQ(interpreter_->Invoke(), kTfLiteError); }
0
433,742
print_smb(netdissect_options *ndo, const u_char *buf, const u_char *maxbuf) { uint16_t flags2; int nterrcodes; int command; uint32_t nterror; const u_char *words, *maxwords, *data; const struct smbfns *fn; const char *fmt_smbheader = "[P4]SMB Command = [B]\nError class = [BP1]\nError code = [d]\nFlags1 = [B]\nFlags2 = [B][P13]\nTree ID = [d]\nProc ID = [d]\nUID = [d]\nMID = [d]\nWord Count = [b]\n"; int smboffset; ND_TCHECK(buf[9]); request = (buf[9] & 0x80) ? 0 : 1; startbuf = buf; command = buf[4]; fn = smbfind(command, smb_fns); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, "SMB PACKET: %s (%s)\n", fn->name, request ? "REQUEST" : "REPLY")); if (ndo->ndo_vflag < 2) return; ND_TCHECK_16BITS(&buf[10]); flags2 = EXTRACT_LE_16BITS(&buf[10]); unicodestr = flags2 & 0x8000; nterrcodes = flags2 & 0x4000; /* print out the header */ smb_fdata(ndo, buf, fmt_smbheader, buf + 33, unicodestr); if (nterrcodes) { nterror = EXTRACT_LE_32BITS(&buf[5]); if (nterror) ND_PRINT((ndo, "NTError = %s\n", nt_errstr(nterror))); } else { if (buf[5]) ND_PRINT((ndo, "SMBError = %s\n", smb_errstr(buf[5], EXTRACT_LE_16BITS(&buf[7])))); } smboffset = 32; for (;;) { const char *f1, *f2; int wct; u_int bcc; int newsmboffset; words = buf + smboffset; ND_TCHECK(words[0]); wct = words[0]; data = words + 1 + wct * 2; maxwords = min(data, maxbuf); if (request) { f1 = fn->descript.req_f1; f2 = fn->descript.req_f2; } else { f1 = fn->descript.rep_f1; f2 = fn->descript.rep_f2; } if (fn->descript.fn) (*fn->descript.fn)(ndo, words, data, buf, maxbuf); else { if (wct) { if (f1) smb_fdata(ndo, words + 1, f1, words + 1 + wct * 2, unicodestr); else { int i; int v; for (i = 0; &words[1 + 2 * i] < maxwords; i++) { ND_TCHECK2(words[1 + 2 * i], 2); v = EXTRACT_LE_16BITS(words + 1 + 2 * i); ND_PRINT((ndo, "smb_vwv[%d]=%d (0x%X)\n", i, v, v)); } } } ND_TCHECK2(*data, 2); bcc = EXTRACT_LE_16BITS(data); ND_PRINT((ndo, "smb_bcc=%u\n", bcc)); if (f2) { if (bcc > 0) smb_fdata(ndo, data + 2, f2, data + 2 + bcc, unicodestr); } else { if (bcc > 0) { ND_PRINT((ndo, "smb_buf[]=\n")); smb_print_data(ndo, data + 2, min(bcc, PTR_DIFF(maxbuf, data + 2))); } } } if ((fn->flags & FLG_CHAIN) == 0) break; if (wct == 0) break; ND_TCHECK(words[1]); command = words[1]; if (command == 0xFF) break; ND_TCHECK2(words[3], 2); newsmboffset = EXTRACT_LE_16BITS(words + 3); fn = smbfind(command, smb_fns); ND_PRINT((ndo, "\nSMB PACKET: %s (%s) (CHAINED)\n", fn->name, request ? "REQUEST" : "REPLY")); if (newsmboffset <= smboffset) { ND_PRINT((ndo, "Bad andX offset: %u <= %u\n", newsmboffset, smboffset)); break; } smboffset = newsmboffset; } ND_PRINT((ndo, "\n")); return; trunc: ND_PRINT((ndo, "%s", tstr)); }
0
504,759
rsvg_filter_primitive_composite_render (RsvgFilterPrimitive * self, RsvgFilterContext * ctx) { guchar i; gint x, y; gint rowstride, height, width; RsvgIRect boundarys; guchar *in_pixels; guchar *in2_pixels; guchar *output_pixels; RsvgFilterPrimitiveComposite *upself; GdkPixbuf *output; GdkPixbuf *in; GdkPixbuf *in2; upself = (RsvgFilterPrimitiveComposite *) self; boundarys = rsvg_filter_primitive_get_bounds (self, ctx); in = rsvg_filter_get_in (self->in, ctx); in_pixels = gdk_pixbuf_get_pixels (in); in2 = rsvg_filter_get_in (upself->in2, ctx); in2_pixels = gdk_pixbuf_get_pixels (in2); height = gdk_pixbuf_get_height (in); width = gdk_pixbuf_get_width (in); rowstride = gdk_pixbuf_get_rowstride (in); output = _rsvg_pixbuf_new_cleared (GDK_COLORSPACE_RGB, 1, 8, width, height); output_pixels = gdk_pixbuf_get_pixels (output); if (upself->mode == COMPOSITE_MODE_ARITHMETIC) for (y = boundarys.y0; y < boundarys.y1; y++) for (x = boundarys.x0; x < boundarys.x1; x++) { int qr, qa, qb; qa = in_pixels[4 * x + y * rowstride + 3]; qb = in2_pixels[4 * x + y * rowstride + 3]; qr = (upself->k1 * qa * qb / 255 + upself->k2 * qa + upself->k3 * qb) / 255; if (qr > 255) qr = 255; if (qr < 0) qr = 0; output_pixels[4 * x + y * rowstride + 3] = qr; if (qr) for (i = 0; i < 3; i++) { int ca, cb, cr; ca = in_pixels[4 * x + y * rowstride + i]; cb = in2_pixels[4 * x + y * rowstride + i]; cr = (ca * cb * upself->k1 / 255 + ca * upself->k2 + cb * upself->k3 + upself->k4 * qr) / 255; if (cr > qr) cr = qr; if (cr < 0) cr = 0; output_pixels[4 * x + y * rowstride + i] = cr; } } else for (y = boundarys.y0; y < boundarys.y1; y++) for (x = boundarys.x0; x < boundarys.x1; x++) { int qr, cr, qa, qb, ca, cb, Fa, Fb, Fab, Fo; qa = in_pixels[4 * x + y * rowstride + 3]; qb = in2_pixels[4 * x + y * rowstride + 3]; cr = 0; Fa = Fb = Fab = Fo = 0; switch (upself->mode) { case COMPOSITE_MODE_OVER: Fa = 255; Fb = 255 - qa; break; case COMPOSITE_MODE_IN: Fa = qb; Fb = 0; break; case COMPOSITE_MODE_OUT: Fa = 255 - qb; Fb = 0; break; case COMPOSITE_MODE_ATOP: Fa = qb; Fb = 255 - qa; break; case COMPOSITE_MODE_XOR: Fa = 255 - qb; Fb = 255 - qa; break; default: break; } qr = (Fa * qa + Fb * qb) / 255; if (qr > 255) qr = 255; if (qr < 0) qr = 0; for (i = 0; i < 3; i++) { ca = in_pixels[4 * x + y * rowstride + i]; cb = in2_pixels[4 * x + y * rowstride + i]; cr = (ca * Fa + cb * Fb + ca * cb * Fab + Fo) / 255; if (cr > qr) cr = qr; if (cr < 0) cr = 0; output_pixels[4 * x + y * rowstride + i] = cr; } output_pixels[4 * x + y * rowstride + 3] = qr; } rsvg_filter_store_result (self->result, output, ctx); g_object_unref (in); g_object_unref (in2); g_object_unref (output); }
0
303,162
edit_and_execute_command (count, c, editing_mode, edit_command) int count, c, editing_mode; char *edit_command; { char *command, *metaval; int r, rrs, metaflag; sh_parser_state_t ps; rrs = rl_readline_state; saved_command_line_count = current_command_line_count; /* Accept the current line. */ rl_newline (1, c); if (rl_explicit_arg) { command = (char *)xmalloc (strlen (edit_command) + 8); sprintf (command, "%s %d", edit_command, count); } else { /* Take the command we were just editing, add it to the history file, then call fc to operate on it. We have to add a dummy command to the end of the history because fc ignores the last command (assumes it's supposed to deal with the command before the `fc'). */ /* This breaks down when using command-oriented history and are not finished with the command, so we should not ignore the last command */ using_history (); current_command_line_count++; /* for rl_newline above */ bash_add_history (rl_line_buffer); current_command_line_count = 0; /* for dummy history entry */ bash_add_history (""); history_lines_this_session++; using_history (); command = savestring (edit_command); } metaval = rl_variable_value ("input-meta"); metaflag = RL_BOOLEAN_VARIABLE_VALUE (metaval); if (rl_deprep_term_function) (*rl_deprep_term_function) (); save_parser_state (&ps); r = parse_and_execute (command, (editing_mode == VI_EDITING_MODE) ? "v" : "C-xC-e", SEVAL_NOHIST); restore_parser_state (&ps); if (rl_prep_term_function) (*rl_prep_term_function) (metaflag); current_command_line_count = saved_command_line_count; /* Now erase the contents of the current line and undo the effects of the rl_accept_line() above. We don't even want to make the text we just executed available for undoing. */ rl_line_buffer[0] = '\0'; /* XXX */ rl_point = rl_end = 0; rl_done = 0; rl_readline_state = rrs; #if defined (VI_MODE) if (editing_mode == VI_EDITING_MODE) rl_vi_insertion_mode (1, c); #endif rl_forced_update_display (); return r; }
0
252,566
static int dsa_pub_encode(X509_PUBKEY *pk, const EVP_PKEY *pkey) { DSA *dsa; int ptype; unsigned char *penc = NULL; int penclen; ASN1_STRING *str = NULL; dsa = pkey->pkey.dsa; if (pkey->save_parameters && dsa->p && dsa->q && dsa->g) { str = ASN1_STRING_new(); if (!str) { DSAerr(DSA_F_DSA_PUB_ENCODE, ERR_R_MALLOC_FAILURE); goto err; } str->length = i2d_DSAparams(dsa, &str->data); if (str->length <= 0) { DSAerr(DSA_F_DSA_PUB_ENCODE, ERR_R_MALLOC_FAILURE); goto err; } ptype = V_ASN1_SEQUENCE; } else ptype = V_ASN1_UNDEF; dsa->write_params = 0; penclen = i2d_DSAPublicKey(dsa, &penc); if (penclen <= 0) { DSAerr(DSA_F_DSA_PUB_ENCODE, ERR_R_MALLOC_FAILURE); goto err; } if (X509_PUBKEY_set0_param(pk, OBJ_nid2obj(EVP_PKEY_DSA), ptype, str, penc, penclen)) return 1; err: if (penc) OPENSSL_free(penc); if (str) ASN1_STRING_free(str); return 0; }
0
481,102
reset_ept_shadow_zero_bits_mask(struct kvm_mmu *context, bool execonly) { __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, reserved_hpa_bits(), execonly, max_huge_page_level); }
0
284,846
void BrowserWindowGtk::ConnectAccelerators() { accel_group_ = gtk_accel_group_new(); gtk_window_add_accel_group(window_, accel_group_); AcceleratorsGtk* accelerators = AcceleratorsGtk::GetInstance(); for (AcceleratorsGtk::const_iterator iter = accelerators->begin(); iter != accelerators->end(); ++iter) { gtk_accel_group_connect( accel_group_, iter->second.GetGdkKeyCode(), static_cast<GdkModifierType>(iter->second.modifiers()), GtkAccelFlags(0), g_cclosure_new(G_CALLBACK(OnGtkAccelerator), GINT_TO_POINTER(iter->first), NULL)); } }
0
414,987
void SoundTouch::setPitch(double newPitch) { virtualPitch = newPitch; calcEffectiveRateAndTempo(); }
0
230,980
static bool SniffForOfficeDocs(const char* content, size_t size, const GURL& url, bool* have_enough_content, std::string* result) { *have_enough_content &= TruncateSize(kBytesRequiredForOfficeMagic, &size); std::string office_version; if (!CheckForMagicNumbers(content, size, kOfficeMagicNumbers, arraysize(kOfficeMagicNumbers), &office_version)) return false; OfficeDocType type = DOC_TYPE_NONE; base::StringPiece url_path = url.path_piece(); for (size_t i = 0; i < arraysize(kOfficeExtensionTypes); ++i) { if (url_path.length() < kOfficeExtensionTypes[i].extension_len) continue; base::StringPiece extension = url_path.substr( url_path.length() - kOfficeExtensionTypes[i].extension_len); if (base::EqualsCaseInsensitiveASCII( extension, base::StringPiece(kOfficeExtensionTypes[i].extension, kOfficeExtensionTypes[i].extension_len))) { type = kOfficeExtensionTypes[i].doc_type; break; } } if (type == DOC_TYPE_NONE) return false; if (office_version == "CFB") { switch (type) { case DOC_TYPE_WORD: *result = "application/msword"; return true; case DOC_TYPE_EXCEL: *result = "application/vnd.ms-excel"; return true; case DOC_TYPE_POWERPOINT: *result = "application/vnd.ms-powerpoint"; return true; case DOC_TYPE_NONE: NOTREACHED(); return false; } } else if (office_version == "OOXML") { switch (type) { case DOC_TYPE_WORD: *result = "application/vnd.openxmlformats-officedocument." "wordprocessingml.document"; return true; case DOC_TYPE_EXCEL: *result = "application/vnd.openxmlformats-officedocument." "spreadsheetml.sheet"; return true; case DOC_TYPE_POWERPOINT: *result = "application/vnd.openxmlformats-officedocument." "presentationml.presentation"; return true; case DOC_TYPE_NONE: NOTREACHED(); return false; } } NOTREACHED(); return false; }
0
292,812
//! Save image as a BMP file \overloading. const CImg<T>& save_bmp(std::FILE *const file) const { return _save_bmp(file,0);
0
316,096
void InitXKeyEventForTesting(EventType type, KeyboardCode key_code, int flags, XEvent* event) { CHECK(event); Display* display = GetXDisplay(); XKeyEvent key_event; key_event.type = XKeyEventType(type); CHECK_NE(0, key_event.type); key_event.serial = 0; key_event.send_event = 0; key_event.display = display; key_event.time = 0; key_event.window = 0; key_event.root = 0; key_event.subwindow = 0; key_event.x = 0; key_event.y = 0; key_event.x_root = 0; key_event.y_root = 0; key_event.state = XKeyEventState(flags); key_event.keycode = XKeyEventKeyCode(key_code, flags, display); key_event.same_screen = 1; event->type = key_event.type; event->xkey = key_event; }
0
496,310
static bool ad_pack_xattrs(struct vfs_handle_struct *handle, struct adouble *ad, files_struct *fsp) { struct ad_xattr_header *h = &ad->adx_header; size_t oldsize; uint32_t off; uint32_t data_off; uint16_t i; bool ok; if (ad->adx_entries == NULL) { /* No xattrs, nothing to pack */ return true; } if (fsp == NULL) { DBG_ERR("fsp unexpectedly NULL\n"); return false; } oldsize = talloc_get_size(ad->ad_data); if (oldsize < AD_XATTR_MAX_HDR_SIZE) { ad->ad_data = talloc_realloc(ad, ad->ad_data, char, AD_XATTR_MAX_HDR_SIZE); if (ad->ad_data == NULL) { return false; } memset(ad->ad_data + oldsize, 0, AD_XATTR_MAX_HDR_SIZE - oldsize); } /* * First, let's calculate the start of the xattr data area which will be * after the xattr header + header entries. */ data_off = ad_getentryoff(ad, ADEID_FINDERI); data_off += ADEDLEN_FINDERI + AD_XATTR_HDR_SIZE; /* 2 bytes padding */ data_off += 2; for (i = 0; i < h->adx_num_attrs; i++) { struct ad_xattr_entry *e = &ad->adx_entries[i]; /* Align on 4 byte boundary */ data_off = (data_off + 3) & ~3; data_off += e->adx_namelen + ADX_ENTRY_FIXED_SIZE; if (data_off >= AD_XATTR_MAX_HDR_SIZE) { return false; } } off = ad_getentryoff(ad, ADEID_FINDERI); off += ADEDLEN_FINDERI + AD_XATTR_HDR_SIZE; /* 2 bytes padding */ off += 2; for (i = 0; i < h->adx_num_attrs; i++) { struct ad_xattr_entry *e = &ad->adx_entries[i]; /* Align on 4 byte boundary */ off = (off + 3) & ~3; e->adx_offset = data_off; data_off += e->adx_length; DBG_DEBUG("%zu(%s){%zu}: off [%zu] adx_length [%zu] " "adx_data_off [%zu]\n", (size_t)i, e->adx_name, (size_t)e->adx_namelen, (size_t)off, (size_t)e->adx_length, (size_t)e->adx_offset); if (off + 4 >= AD_XATTR_MAX_HDR_SIZE) { return false; } RSIVAL(ad->ad_data, off, e->adx_offset); off += 4; if (off + 4 >= AD_XATTR_MAX_HDR_SIZE) { return false; } RSIVAL(ad->ad_data, off, e->adx_length); off += 4; if (off + 2 >= AD_XATTR_MAX_HDR_SIZE) { return false; } RSSVAL(ad->ad_data, off, e->adx_flags); off += 2; if (off + 1 >= AD_XATTR_MAX_HDR_SIZE) { return false; } SCVAL(ad->ad_data, off, e->adx_namelen); off += 1; if (off + e->adx_namelen >= AD_XATTR_MAX_HDR_SIZE) { return false; } memcpy(ad->ad_data + off, e->adx_name, e->adx_namelen); off += e->adx_namelen; } h->adx_data_start = off; h->adx_data_length = talloc_get_size(ad->adx_data); h->adx_total_size = h->adx_data_start + h->adx_data_length; if (talloc_get_size(ad->ad_data) < h->adx_total_size) { ad->ad_data = talloc_realloc(ad, ad->ad_data, char, h->adx_total_size); if (ad->ad_data == NULL) { return false; } } memcpy(ad->ad_data + h->adx_data_start, ad->adx_data, h->adx_data_length); ad_setentrylen(ad, ADEID_FINDERI, h->adx_total_size - ad_getentryoff(ad, ADEID_FINDERI)); ad_setentryoff(ad, ADEID_RFORK, ad_getentryoff(ad, ADEID_FINDERI) + ad_getentrylen(ad, ADEID_FINDERI)); memcpy(ad->ad_data + ADEDOFF_FILLER, AD_FILLER_TAG_OSX, ADEDLEN_FILLER); /* * Rewind, then update the header fields. */ off = ad_getentryoff(ad, ADEID_FINDERI) + ADEDLEN_FINDERI; /* 2 bytes padding */ off += 2; RSIVAL(ad->ad_data, off, AD_XATTR_HDR_MAGIC); off += 4; RSIVAL(ad->ad_data, off, 0); off += 4; RSIVAL(ad->ad_data, off, h->adx_total_size); off += 4; RSIVAL(ad->ad_data, off, h->adx_data_start); off += 4; RSIVAL(ad->ad_data, off, h->adx_data_length); off += 4; /* adx_reserved and adx_flags */ memset(ad->ad_data + off, 0, 3 * 4 + 2); off += 3 * 4 + 2; RSSVAL(ad->ad_data, off, h->adx_num_attrs); off += 2; ok = ad_pack_move_reso(handle, ad, fsp); if (!ok) { DBG_ERR("Moving resourcefork of [%s] failed\n", fsp_str_dbg(fsp)); return false; } return true; }
0
116,088
static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, u8 ndata, u8 *insn_bytes, u8 insn_size) { struct kvm_run *run = vcpu->run; u64 info[5]; u8 info_start; /* * Zero the whole array used to retrieve the exit info, as casting to * u32 for select entries will leave some chunks uninitialized. */ memset(&info, 0, sizeof(info)); static_call(kvm_x86_get_exit_info)(vcpu, (u32 *)&info[0], &info[1], &info[2], (u32 *)&info[3], (u32 *)&info[4]); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION; /* * There's currently space for 13 entries, but 5 are used for the exit * reason and info. Restrict to 4 to reduce the maintenance burden * when expanding kvm_run.emulation_failure in the future. */ if (WARN_ON_ONCE(ndata > 4)) ndata = 4; /* Always include the flags as a 'data' entry. */ info_start = 1; run->emulation_failure.flags = 0; if (insn_size) { BUILD_BUG_ON((sizeof(run->emulation_failure.insn_size) + sizeof(run->emulation_failure.insn_bytes) != 16)); info_start += 2; run->emulation_failure.flags |= KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES; run->emulation_failure.insn_size = insn_size; memset(run->emulation_failure.insn_bytes, 0x90, sizeof(run->emulation_failure.insn_bytes)); memcpy(run->emulation_failure.insn_bytes, insn_bytes, insn_size); } memcpy(&run->internal.data[info_start], info, sizeof(info)); memcpy(&run->internal.data[info_start + ARRAY_SIZE(info)], data, ndata * sizeof(data[0])); run->emulation_failure.ndata = info_start + ARRAY_SIZE(info) + ndata; }
0
491,413
display_debug_rnglists_list (unsigned char * start, unsigned char * finish, unsigned int pointer_size, dwarf_vma offset, dwarf_vma base_address, unsigned int offset_size) { unsigned char *next = start; unsigned int debug_addr_section_hdr_len; if (offset_size == 4) debug_addr_section_hdr_len = 8; else debug_addr_section_hdr_len = 16; while (1) { dwarf_vma off = offset + (start - next); enum dwarf_range_list_entry rlet; /* Initialize it due to a false compiler warning. */ dwarf_vma begin = -1, length, end = -1; if (start >= finish) { warn (_("Range list starting at offset 0x%s is not terminated.\n"), dwarf_vmatoa ("x", offset)); break; } printf (" "); print_dwarf_vma (off, 4); SAFE_BYTE_GET_AND_INC (rlet, start, 1, finish); switch (rlet) { case DW_RLE_end_of_list: printf (_("<End of list>\n")); break; case DW_RLE_base_addressx: READ_ULEB (base_address, start, finish); print_dwarf_vma (base_address, pointer_size); printf (_("(base address index) ")); base_address = fetch_indexed_addr ((base_address * pointer_size) + debug_addr_section_hdr_len, pointer_size); print_dwarf_vma (base_address, pointer_size); printf (_("(base address)\n")); break; case DW_RLE_startx_endx: READ_ULEB (begin, start, finish); READ_ULEB (end, start, finish); begin = fetch_indexed_addr ((begin * pointer_size) + debug_addr_section_hdr_len, pointer_size); end = fetch_indexed_addr ((begin * pointer_size) + debug_addr_section_hdr_len, pointer_size); break; case DW_RLE_startx_length: READ_ULEB (begin, start, finish); READ_ULEB (length, start, finish); begin = fetch_indexed_addr ((begin * pointer_size) + debug_addr_section_hdr_len, pointer_size); end = begin + length; break; case DW_RLE_offset_pair: READ_ULEB (begin, start, finish); READ_ULEB (end, start, finish); break; case DW_RLE_base_address: SAFE_BYTE_GET_AND_INC (base_address, start, pointer_size, finish); print_dwarf_vma (base_address, pointer_size); printf (_("(base address)\n")); break; case DW_RLE_start_end: SAFE_BYTE_GET_AND_INC (begin, start, pointer_size, finish); SAFE_BYTE_GET_AND_INC (end, start, pointer_size, finish); break; case DW_RLE_start_length: SAFE_BYTE_GET_AND_INC (begin, start, pointer_size, finish); READ_ULEB (length, start, finish); end = begin + length; break; default: error (_("Invalid range list entry type %d\n"), rlet); rlet = DW_RLE_end_of_list; break; } if (rlet == DW_RLE_end_of_list) break; if (rlet == DW_RLE_base_address || rlet == DW_RLE_base_addressx) continue; /* Only a DW_RLE_offset_pair needs the base address added. */ if (rlet == DW_RLE_offset_pair) { begin += base_address; end += base_address; } print_dwarf_vma (begin, pointer_size); print_dwarf_vma (end, pointer_size); if (begin == end) fputs (_("(start == end)"), stdout); else if (begin > end) fputs (_("(start > end)"), stdout); putchar ('\n'); } return start; }
0
499,085
static void analPathFollow(RzCoreAnalPaths *p, ut64 addr, PJ *pj) { if (addr == UT64_MAX) { return; } bool found; ht_uu_find(p->visited, addr, &found); if (!found) { p->cur = rz_analysis_find_most_relevant_block_in(p->core->analysis, addr); analPaths(p, pj); } }
0
219,683
PHP_FUNCTION(pg_transaction_status) { zval *pgsql_link = NULL; int id = -1; PGconn *pgsql; if (zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS(), "r", &pgsql_link) == FAILURE) { RETURN_FALSE; } ZEND_FETCH_RESOURCE2(pgsql, PGconn *, pgsql_link, id, "PostgreSQL link", le_link, le_plink); RETURN_LONG(PQtransactionStatus(pgsql)); }
0
414,090
psg_json_value_set_str_ne(PsgJsonValue *doc, const char *name, const char *val, size_t size) { if (size > 0) { return psg_json_value_set_str(doc, name, val, size); } else { return NULL; } }
0
387,255
static int isdn_ppp_skip_ac(struct ippp_struct *is, struct sk_buff *skb) { if (skb->len < 1) return -1; if (skb->data[0] == 0xff) { if (skb->len < 2) return -1; if (skb->data[1] != 0x03) return -1; // skip address/control (AC) field skb_pull(skb, 2); } else { if (is->pppcfg & SC_REJ_COMP_AC) // if AC compression was not negotiated, but used, discard packet return -1; } return 0; }
0
143,357
static Jsi_RC eventInfoCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { return InfoEventCmd(interp, args, _this, ret, funcPtr); }
0
147,938
static char *print_string(cJSON *item,printbuffer *p) {return print_string_ptr(item->valuestring,p);}
0
423,006
static inline u64 ioat_chansts_32(struct ioat_chan_common *chan) { u8 ver = chan->device->version; u64 status; u32 status_lo; /* We need to read the low address first as this causes the * chipset to latch the upper bits for the subsequent read */ status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver)); status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver)); status <<= 32; status |= status_lo; return status; }
0
58,787
smb2_echo_callback(struct mid_q_entry *mid) { struct TCP_Server_Info *server = mid->callback_data; struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf; struct cifs_credits credits = { .value = 0, .instance = 0 }; if (mid->mid_state == MID_RESPONSE_RECEIVED || mid->mid_state == MID_RESPONSE_MALFORMED) { credits.value = le16_to_cpu(rsp->sync_hdr.CreditRequest); credits.instance = server->reconnect_instance; } DeleteMidQEntry(mid); add_credits(server, &credits, CIFS_ECHO_OP); }
0