idx
int64
func
string
target
int64
181,283
static __init int init_trace_selftests(void) { struct trace_selftests *p, *n; struct tracer *t, **last; int ret; selftests_can_run = true; mutex_lock(&trace_types_lock); if (list_empty(&postponed_selftests)) goto out; pr_info("Running postponed tracer tests:\n"); list_for_each_entry_safe(p, n, &postponed_selftests, list) { ret = run_tracer_selftest(p->type); /* If the test fails, then warn and remove from available_tracers */ if (ret < 0) { WARN(1, "tracer: %s failed selftest, disabling\n", p->type->name); last = &trace_types; for (t = trace_types; t; t = t->next) { if (t == p->type) { *last = t->next; break; } last = &t->next; } } list_del(&p->list); kfree(p); } out: mutex_unlock(&trace_types_lock); return 0; }
0
31,473
close_anchor(struct html_feed_environ *h_env, struct readbuffer *obuf) { if (obuf->anchor.url) { int i; char *p = NULL; int is_erased = 0; for (i = obuf->tag_sp - 1; i >= 0; i--) { if (obuf->tag_stack[i]->cmd == HTML_A) break; } if (i < 0 && obuf->anchor.hseq > 0 && Strlastchar(obuf->line) == ' ') { Strshrink(obuf->line, 1); obuf->pos--; is_erased = 1; } if (i >= 0 || (p = has_hidden_link(obuf, HTML_A))) { if (obuf->anchor.hseq > 0) { HTMLlineproc1(ANSP, h_env); set_space_to_prevchar(obuf->prevchar); } else { if (i >= 0) { obuf->tag_sp--; bcopy(&obuf->tag_stack[i + 1], &obuf->tag_stack[i], (obuf->tag_sp - i) * sizeof(struct cmdtable *)); } else { passthrough(obuf, p, 1); } bzero((void *)&obuf->anchor, sizeof(obuf->anchor)); return; } is_erased = 0; } if (is_erased) { Strcat_char(obuf->line, ' '); obuf->pos++; } push_tag(obuf, "</a>", HTML_N_A); } bzero((void *)&obuf->anchor, sizeof(obuf->anchor)); }
0
125,781
kvp_get_ip_info(int family, char *if_name, int op, void *out_buffer, int length) { struct ifaddrs *ifap; struct ifaddrs *curp; int offset = 0; int sn_offset = 0; int error = 0; char *buffer; struct hv_kvp_ipaddr_value *ip_buffer; char cidr_mask[5]; /* /xyz */ int weight; int i; unsigned int *w; char *sn_str; struct sockaddr_in6 *addr6; if (op == KVP_OP_ENUMERATE) { buffer = out_buffer; } else { ip_buffer = out_buffer; buffer = (char *)ip_buffer->ip_addr; ip_buffer->addr_family = 0; } /* * On entry into this function, the buffer is capable of holding the * maximum key value. */ if (getifaddrs(&ifap)) { strcpy(buffer, "getifaddrs failed\n"); return HV_E_FAIL; } curp = ifap; while (curp != NULL) { if (curp->ifa_addr == NULL) { curp = curp->ifa_next; continue; } if ((if_name != NULL) && (strncmp(curp->ifa_name, if_name, strlen(if_name)))) { /* * We want info about a specific interface; * just continue. */ curp = curp->ifa_next; continue; } /* * We only support two address families: AF_INET and AF_INET6. * If a family value of 0 is specified, we collect both * supported address families; if not we gather info on * the specified address family. */ if ((((family != 0) && (curp->ifa_addr->sa_family != family))) || (curp->ifa_flags & IFF_LOOPBACK)) { curp = curp->ifa_next; continue; } if ((curp->ifa_addr->sa_family != AF_INET) && (curp->ifa_addr->sa_family != AF_INET6)) { curp = curp->ifa_next; continue; } if (op == KVP_OP_GET_IP_INFO) { /* * Gather info other than the IP address. * IP address info will be gathered later. */ if (curp->ifa_addr->sa_family == AF_INET) { ip_buffer->addr_family |= ADDR_FAMILY_IPV4; /* * Get subnet info. */ error = kvp_process_ip_address( curp->ifa_netmask, AF_INET, (char *) ip_buffer->sub_net, length, &sn_offset); if (error) goto gather_ipaddr; } else { ip_buffer->addr_family |= ADDR_FAMILY_IPV6; /* * Get subnet info in CIDR format. */ weight = 0; sn_str = (char *)ip_buffer->sub_net; addr6 = (struct sockaddr_in6 *) curp->ifa_netmask; w = addr6->sin6_addr.s6_addr32; for (i = 0; i < 4; i++) weight += hweight32(&w[i]); sprintf(cidr_mask, "/%d", weight); if ((length - sn_offset) < (strlen(cidr_mask) + 1)) goto gather_ipaddr; if (sn_offset == 0) strcpy(sn_str, cidr_mask); else strcat(sn_str, cidr_mask); strcat((char *)ip_buffer->sub_net, ";"); sn_offset += strlen(sn_str) + 1; } /* * Collect other ip related configuration info. */ kvp_get_ipconfig_info(if_name, ip_buffer); } gather_ipaddr: error = kvp_process_ip_address(curp->ifa_addr, curp->ifa_addr->sa_family, buffer, length, &offset); if (error) goto getaddr_done; curp = curp->ifa_next; } getaddr_done: freeifaddrs(ifap); return error; }
0
186,449
static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) { struct kvm *kvm = mmu_notifier_to_kvm(mn); int need_tlb_flush = 0, idx; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); /* * The count increase must become visible at unlock time as no * spte can be established without taking the mmu_lock and * count is also read inside the mmu_lock critical section. */ kvm->mmu_notifier_count++; for (; start < end; start += PAGE_SIZE) need_tlb_flush |= kvm_unmap_hva(kvm, start); need_tlb_flush |= kvm->tlbs_dirty; /* we've to flush the tlb before the pages can be freed */ if (need_tlb_flush) kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); }
0
509,174
void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) { manager_status_printf(u->manager, false, status, unit_status_msg_format, unit_description(u)); }
0
169,247
static int setcos_construct_fci(sc_card_t *card, const sc_file_t *file, u8 *out, size_t *outlen) { if (card->type == SC_CARD_TYPE_SETCOS_44 || card->type == SC_CARD_TYPE_SETCOS_NIDEL || SETCOS_IS_EID_APPLET(card)) return setcos_construct_fci_44(card, file, out, outlen); else return iso_ops->construct_fci(card, file, out, outlen); }
0
10,827
RTCVoidRequestTask(MockWebRTCPeerConnectionHandler* object, const WebKit::WebRTCVoidRequest& request, bool succeeded) : MethodTask<MockWebRTCPeerConnectionHandler>(object) , m_request(request) , m_succeeded(succeeded) { }
1
222,851
const UsbDeviceHandle::ResultCallback& callback() const { return callback_; }
0
418,243
~RGWPutObj() override { delete slo_info; }
0
467,406
void msetCommand(client *c) { msetGenericCommand(c,0); }
0
94,905
TEST_P(Http2CodecImplTest, ResponseDataFloodMitigationDisabled) { Runtime::LoaderSingleton::getExisting()->mergeValues( {{"envoy.reloadable_features.http2_protocol_options.max_outbound_frames", "2147483647"}}); initialize(); TestHeaderMapImpl request_headers; HttpTestUtility::addDefaultHeaders(request_headers); EXPECT_CALL(request_decoder_, decodeHeaders_(_, false)); request_encoder_->encodeHeaders(request_headers, false); // +2 is to account for HEADERS and PING ACK, that is used to trigger mitigation EXPECT_CALL(server_connection_, write(_, _)) .Times(Http2Settings::DEFAULT_MAX_OUTBOUND_FRAMES + 2); EXPECT_CALL(response_decoder_, decodeHeaders_(_, false)).Times(1); EXPECT_CALL(response_decoder_, decodeData(_, false)) .Times(Http2Settings::DEFAULT_MAX_OUTBOUND_FRAMES); TestHeaderMapImpl response_headers{{":status", "200"}}; response_encoder_->encodeHeaders(response_headers, false); // Account for the single HEADERS frame above for (uint32_t i = 0; i < Http2Settings::DEFAULT_MAX_OUTBOUND_FRAMES; ++i) { Buffer::OwnedImpl data("0"); EXPECT_NO_THROW(response_encoder_->encodeData(data, false)); } // Presently flood mitigation is done only when processing downstream data // So we need to send stream from downstream client to trigger mitigation EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); EXPECT_NO_THROW(client_->sendPendingFrames()); }
0
352,088
TPMI_SH_AUTH_SESSION_Unmarshal(TPMI_SH_AUTH_SESSION *target, BYTE **buffer, INT32 *size, BOOL allowPwd) { TPM_RC rc = TPM_RC_SUCCESS; if (rc == TPM_RC_SUCCESS) { rc = TPM_HANDLE_Unmarshal(target, buffer, size); } if (rc == TPM_RC_SUCCESS) { BOOL isNotHmacSession = (*target < HMAC_SESSION_FIRST ) || (*target > HMAC_SESSION_LAST); BOOL isNotPolicySession = (*target < POLICY_SESSION_FIRST) || (*target > POLICY_SESSION_LAST); BOOL isNotLegalPwd = (*target != TPM_RS_PW) || !allowPwd; if (isNotHmacSession && isNotPolicySession && isNotLegalPwd) { rc = TPM_RC_VALUE; } } return rc; }
1
418,075
virtual int init_dest_policy() { return 0; }
0
376,911
static int write_reftable_entry(BlockDriverState *bs, int rt_index) { BDRVQcowState *s = bs->opaque; uint64_t buf[RT_ENTRIES_PER_SECTOR]; int rt_start_index; int i, ret; rt_start_index = rt_index & ~(RT_ENTRIES_PER_SECTOR - 1); for (i = 0; i < RT_ENTRIES_PER_SECTOR; i++) { buf[i] = cpu_to_be64(s->refcount_table[rt_start_index + i]); } ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_REFCOUNT_TABLE, s->refcount_table_offset + rt_start_index * sizeof(uint64_t), sizeof(buf)); if (ret < 0) { return ret; } BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE); ret = bdrv_pwrite_sync(bs->file, s->refcount_table_offset + rt_start_index * sizeof(uint64_t), buf, sizeof(buf)); if (ret < 0) { return ret; } return 0; }
0
154,703
static bool toggle_switch(Transport *transport, bool &setting) { setting = !setting; transport->sendString(setting ? "On\n" : "Off\n"); return true; }
0
216,340
void RenderWidgetHostImpl::Init() { DCHECK(process_->HasConnection()); renderer_initialized_ = true; GpuSurfaceTracker::Get()->SetSurfaceHandle( surface_id_, GetCompositingSurface()); Send(new ViewMsg_CreatingNew_ACK(routing_id_)); GetProcess()->ResumeRequestsForView(routing_id_); WasResized(); }
0
188,298
static int state_machine(SSL *s, int server) { BUF_MEM *buf = NULL; unsigned long Time = (unsigned long)time(NULL); void (*cb) (const SSL *ssl, int type, int val) = NULL; OSSL_STATEM *st = &s->statem; int ret = -1; int ssret; if (st->state == MSG_FLOW_ERROR) { /* Shouldn't have been called if we're already in the error state */ return -1; } RAND_add(&Time, sizeof(Time), 0); ERR_clear_error(); clear_sys_error(); cb = get_callback(s); st->in_handshake++; if (!SSL_in_init(s) || SSL_in_before(s)) { if (!SSL_clear(s)) return -1; } #ifndef OPENSSL_NO_SCTP if (SSL_IS_DTLS(s)) { /* * Notify SCTP BIO socket to enter handshake mode and prevent stream * identifier other than 0. Will be ignored if no SCTP is used. */ BIO_ctrl(SSL_get_wbio(s), BIO_CTRL_DGRAM_SCTP_SET_IN_HANDSHAKE, st->in_handshake, NULL); } #endif #ifndef OPENSSL_NO_HEARTBEATS /* * If we're awaiting a HeartbeatResponse, pretend we already got and * don't await it anymore, because Heartbeats don't make sense during * handshakes anyway. */ if (s->tlsext_hb_pending) { if (SSL_IS_DTLS(s)) dtls1_stop_timer(s); s->tlsext_hb_pending = 0; s->tlsext_hb_seq++; } #endif /* Initialise state machine */ if (st->state == MSG_FLOW_RENEGOTIATE) { s->renegotiate = 1; if (!server) s->ctx->stats.sess_connect_renegotiate++; } if (st->state == MSG_FLOW_UNINITED || st->state == MSG_FLOW_RENEGOTIATE) { if (st->state == MSG_FLOW_UNINITED) { st->hand_state = TLS_ST_BEFORE; } s->server = server; if (cb != NULL) cb(s, SSL_CB_HANDSHAKE_START, 1); if (SSL_IS_DTLS(s)) { if ((s->version & 0xff00) != (DTLS1_VERSION & 0xff00) && (server || (s->version & 0xff00) != (DTLS1_BAD_VER & 0xff00))) { SSLerr(SSL_F_STATE_MACHINE, ERR_R_INTERNAL_ERROR); goto end; } } else { if ((s->version >> 8) != SSL3_VERSION_MAJOR) { SSLerr(SSL_F_STATE_MACHINE, ERR_R_INTERNAL_ERROR); goto end; } } if (!ssl_security(s, SSL_SECOP_VERSION, 0, s->version, NULL)) { SSLerr(SSL_F_STATE_MACHINE, SSL_R_VERSION_TOO_LOW); goto end; } if (s->init_buf == NULL) { if ((buf = BUF_MEM_new()) == NULL) { goto end; } if (!BUF_MEM_grow(buf, SSL3_RT_MAX_PLAIN_LENGTH)) { goto end; } s->init_buf = buf; buf = NULL; } if (!ssl3_setup_buffers(s)) { goto end; } s->init_num = 0; /* * Should have been reset by tls_process_finished, too. */ s->s3->change_cipher_spec = 0; /* * Ok, we now need to push on a buffering BIO ...but not with * SCTP */ #ifndef OPENSSL_NO_SCTP if (!SSL_IS_DTLS(s) || !BIO_dgram_is_sctp(SSL_get_wbio(s))) #endif if (!ssl_init_wbio_buffer(s)) { goto end; } if (!server || st->state != MSG_FLOW_RENEGOTIATE) { if (!ssl3_init_finished_mac(s)) { ossl_statem_set_error(s); goto end; } } if (server) { if (st->state != MSG_FLOW_RENEGOTIATE) { s->ctx->stats.sess_accept++; } else if (!s->s3->send_connection_binding && !(s->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION)) { /* * Server attempting to renegotiate with client that doesn't * support secure renegotiation. */ SSLerr(SSL_F_STATE_MACHINE, SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED); ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); ossl_statem_set_error(s); goto end; } else { /* * st->state == MSG_FLOW_RENEGOTIATE, we will just send a * HelloRequest */ s->ctx->stats.sess_accept_renegotiate++; } } else { s->ctx->stats.sess_connect++; /* mark client_random uninitialized */ memset(s->s3->client_random, 0, sizeof(s->s3->client_random)); s->hit = 0; s->s3->tmp.cert_request = 0; if (SSL_IS_DTLS(s)) { st->use_timer = 1; } } st->state = MSG_FLOW_WRITING; init_write_state_machine(s); st->read_state_first_init = 1; } while (st->state != MSG_FLOW_FINISHED) { if (st->state == MSG_FLOW_READING) { ssret = read_state_machine(s); if (ssret == SUB_STATE_FINISHED) { st->state = MSG_FLOW_WRITING; init_write_state_machine(s); } else { /* NBIO or error */ goto end; } } else if (st->state == MSG_FLOW_WRITING) { ssret = write_state_machine(s); if (ssret == SUB_STATE_FINISHED) { st->state = MSG_FLOW_READING; init_read_state_machine(s); } else if (ssret == SUB_STATE_END_HANDSHAKE) { st->state = MSG_FLOW_FINISHED; } else { /* NBIO or error */ goto end; } } else { /* Error */ ossl_statem_set_error(s); goto end; } } st->state = MSG_FLOW_UNINITED; ret = 1; end: st->in_handshake--; #ifndef OPENSSL_NO_SCTP if (SSL_IS_DTLS(s)) { /* * Notify SCTP BIO socket to leave handshake mode and allow stream * identifier other than 0. Will be ignored if no SCTP is used. */ BIO_ctrl(SSL_get_wbio(s), BIO_CTRL_DGRAM_SCTP_SET_IN_HANDSHAKE, st->in_handshake, NULL); } #endif BUF_MEM_free(buf); if (cb != NULL) { if (server) cb(s, SSL_CB_ACCEPT_EXIT, ret); else cb(s, SSL_CB_CONNECT_EXIT, ret); } return ret; }
0
359,934
filter_hidden_and_backup_partition_callback (gpointer data, gpointer callback_data) { NautilusFile *file; FilterOptions options; file = NAUTILUS_FILE (data); options = GPOINTER_TO_INT (callback_data); return nautilus_file_should_show (file, options & SHOW_HIDDEN, options & SHOW_BACKUP, TRUE); }
0
60,129
char VariableUnserializer::peek() const { check(); return *m_buf; }
0
191,301
int php_wddx_deserialize_ex(char *value, int vallen, zval *return_value) { wddx_stack stack; XML_Parser parser; st_entry *ent; int retval; wddx_stack_init(&stack); parser = XML_ParserCreate("UTF-8"); XML_SetUserData(parser, &stack); XML_SetElementHandler(parser, php_wddx_push_element, php_wddx_pop_element); XML_SetCharacterDataHandler(parser, php_wddx_process_data); XML_Parse(parser, value, vallen, 1); XML_ParserFree(parser); if (stack.top == 1) { wddx_stack_top(&stack, (void**)&ent); *return_value = *(ent->data); zval_copy_ctor(return_value); retval = SUCCESS; } else { retval = FAILURE; } wddx_stack_destroy(&stack); return retval; }
0
276,821
Document::Document(const DocumentInit& initializer, DocumentClassFlags documentClasses) : ContainerNode(0, CreateDocument) , TreeScope(*this) , m_hasNodesWithPlaceholderStyle(false) , m_evaluateMediaQueriesOnStyleRecalc(false) , m_pendingSheetLayout(NoLayoutWithPendingSheets) , m_frame(initializer.frame()) , m_domWindow(m_frame ? m_frame->localDOMWindow() : 0) , m_importsController(initializer.importsController()) , m_contextFeatures(ContextFeatures::defaultSwitch()) , m_wellFormed(false) , m_printing(false) , m_wasPrinting(false) , m_paginatedForScreen(false) , m_compatibilityMode(NoQuirksMode) , m_compatibilityModeLocked(false) , m_executeScriptsWaitingForResourcesTask(CancellableTaskFactory::create(this, &Document::executeScriptsWaitingForResources)) , m_hasAutofocused(false) , m_clearFocusedElementTimer(this, &Document::clearFocusedElementTimerFired) , m_domTreeVersion(++s_globalTreeVersion) , m_styleVersion(0) , m_listenerTypes(0) , m_mutationObserverTypes(0) , m_visitedLinkState(VisitedLinkState::create(*this)) , m_visuallyOrdered(false) , m_readyState(Complete) , m_parsingState(FinishedParsing) , m_gotoAnchorNeededAfterStylesheetsLoad(false) , m_containsValidityStyleRules(false) , m_containsPlugins(false) , m_updateFocusAppearanceSelectionBahavior(SelectionBehaviorOnFocus::Reset) , m_ignoreDestructiveWriteCount(0) , m_markers(new DocumentMarkerController(*this)) , m_updateFocusAppearanceTimer(this, &Document::updateFocusAppearanceTimerFired) , m_cssTarget(nullptr) , m_loadEventProgress(LoadEventNotRun) , m_startTime(currentTime()) , m_scriptRunner(ScriptRunner::create(this)) , m_xmlVersion("1.0") , m_xmlStandalone(StandaloneUnspecified) , m_hasXMLDeclaration(0) , m_designMode(false) , m_isRunningExecCommand(false) , m_hasAnnotatedRegions(false) , m_annotatedRegionsDirty(false) , m_useSecureKeyboardEntryWhenActive(false) , m_documentClasses(documentClasses) , m_isViewSource(false) , m_sawElementsInKnownNamespaces(false) , m_isSrcdocDocument(false) , m_isMobileDocument(false) , m_layoutView(0) , m_contextDocument(initializer.contextDocument()) , m_hasFullscreenSupplement(false) , m_loadEventDelayCount(0) , m_loadEventDelayTimer(this, &Document::loadEventDelayTimerFired) , m_pluginLoadingTimer(this, &Document::pluginLoadingTimerFired) , m_documentTiming(*this) , m_writeRecursionIsTooDeep(false) , m_writeRecursionDepth(0) , m_taskRunner(MainThreadTaskRunner::create(this)) , m_registrationContext(initializer.registrationContext(this)) , m_elementDataCacheClearTimer(this, &Document::elementDataCacheClearTimerFired) , m_timeline(AnimationTimeline::create(this)) , m_compositorPendingAnimations(new CompositorPendingAnimations()) , m_templateDocumentHost(nullptr) , m_didAssociateFormControlsTimer(this, &Document::didAssociateFormControlsTimerFired) , m_timers(timerTaskRunner()->adoptClone()) , m_hasViewportUnits(false) , m_parserSyncPolicy(AllowAsynchronousParsing) , m_nodeCount(0) { if (m_frame) { DCHECK(m_frame->page()); provideContextFeaturesToDocumentFrom(*this, *m_frame->page()); m_fetcher = m_frame->loader().documentLoader()->fetcher(); FrameFetchContext::provideDocumentToContext(m_fetcher->context(), this); } else if (m_importsController) { m_fetcher = FrameFetchContext::createContextAndFetcher(nullptr, this); } else { m_fetcher = ResourceFetcher::create(nullptr); } ViewportScrollCallback* applyScroll = nullptr; if (isInMainFrame()) { applyScroll = RootScrollerController::createViewportApplyScroll( frameHost()->topControls(), frameHost()->overscrollController()); } m_rootScrollerController = RootScrollerController::create(*this, applyScroll); if (initializer.shouldSetURL()) setURL(initializer.url()); initSecurityContext(initializer); initDNSPrefetch(); InstanceCounters::incrementCounter(InstanceCounters::DocumentCounter); m_lifecycle.advanceTo(DocumentLifecycle::Inactive); m_styleEngine = StyleEngine::create(*this); DCHECK(!parentDocument() || !parentDocument()->activeDOMObjectsAreSuspended()); #ifndef NDEBUG liveDocumentSet().add(this); #endif }
0
92,338
print_inode_name(FILE * hFile, TSK_FS_INFO * fs, TSK_INUM_T inum) { HFS_INFO *hfs = (HFS_INFO *) fs; char fn[HFS_MAXNAMLEN + 1]; HFS_ENTRY entry; if (hfs_cat_file_lookup(hfs, inum, &entry, FALSE)) return 1; if (hfs_UTF16toUTF8(fs, entry.thread.name.unicode, tsk_getu16(fs->endian, entry.thread.name.length), fn, HFS_MAXNAMLEN + 1, HFS_U16U8_FLAG_REPLACE_SLASH)) return 1; tsk_fprintf(hFile, "%s", fn); return 0; }
0
90,604
bool vhost_dev_has_owner(struct vhost_dev *dev) { return dev->mm; }
0
153,158
static unsigned char port_inl(const struct si_sm_io *io, unsigned int offset) { unsigned int addr = io->addr_data; return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; }
0
473,770
ieee80211_process_tdls_channel_switch_resp(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee802_11_elems elems; struct sta_info *sta; struct ieee80211_tdls_data *tf = (void *)skb->data; bool local_initiator; struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); int baselen = offsetof(typeof(*tf), u.chan_switch_resp.variable); struct ieee80211_tdls_ch_sw_params params = {}; int ret; params.action_code = WLAN_TDLS_CHANNEL_SWITCH_RESPONSE; params.timestamp = rx_status->device_timestamp; if (skb->len < baselen) { tdls_dbg(sdata, "TDLS channel switch resp too short: %d\n", skb->len); return -EINVAL; } mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, tf->sa); if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) { tdls_dbg(sdata, "TDLS chan switch from non-peer sta %pM\n", tf->sa); ret = -EINVAL; goto out; } params.sta = &sta->sta; params.status = le16_to_cpu(tf->u.chan_switch_resp.status_code); if (params.status != 0) { ret = 0; goto call_drv; } ieee802_11_parse_elems(tf->u.chan_switch_resp.variable, skb->len - baselen, false, &elems, NULL, NULL); if (elems.parse_error) { tdls_dbg(sdata, "Invalid IEs in TDLS channel switch resp\n"); ret = -EINVAL; goto out; } if (!elems.ch_sw_timing || !elems.lnk_id) { tdls_dbg(sdata, "TDLS channel switch resp - missing IEs\n"); ret = -EINVAL; goto out; } /* validate the initiator is set correctly */ local_initiator = !memcmp(elems.lnk_id->init_sta, sdata->vif.addr, ETH_ALEN); if (local_initiator == sta->sta.tdls_initiator) { tdls_dbg(sdata, "TDLS chan switch invalid lnk-id initiator\n"); ret = -EINVAL; goto out; } params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time); params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout); params.tmpl_skb = ieee80211_tdls_ch_sw_resp_tmpl_get(sta, &params.ch_sw_tm_ie); if (!params.tmpl_skb) { ret = -ENOENT; goto out; } ret = 0; call_drv: drv_tdls_recv_channel_switch(sdata->local, sdata, &params); tdls_dbg(sdata, "TDLS channel switch response received from %pM status %d\n", tf->sa, params.status); out: mutex_unlock(&local->sta_mtx); dev_kfree_skb_any(params.tmpl_skb); return ret; }
0
170,197
static void sequenceLongMethodMethod(const v8::FunctionCallbackInfo<v8::Value>& info) { TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder()); v8SetReturnValue(info, v8Array(imp->sequenceLongMethod(), info.GetIsolate())); }
0
223,091
LayoutObject* HTMLMediaElement::createLayoutObject(const ComputedStyle&) { return new LayoutMedia(this); }
0
178,071
static unsigned inflateHuffmanBlock(ucvector* out, const unsigned char* in, size_t* bp, size_t* pos, size_t inlength, unsigned btype) { unsigned error = 0; HuffmanTree tree_ll; /*the huffman tree for literal and length codes*/ HuffmanTree tree_d; /*the huffman tree for distance codes*/ size_t inbitlength = inlength * 8; HuffmanTree_init(&tree_ll); HuffmanTree_init(&tree_d); if(btype == 1) { error = getTreeInflateFixed(&tree_ll, &tree_d); if (error) { HuffmanTree_cleanup(&tree_ll); HuffmanTree_cleanup(&tree_d); return error; } } else if(btype == 2) error = getTreeInflateDynamic(&tree_ll, &tree_d, in, bp, inlength); while(!error) /*decode all symbols until end reached, breaks at end code*/ { /*code_ll is literal, length or end code*/ unsigned code_ll = huffmanDecodeSymbol(in, bp, &tree_ll, inbitlength); if(code_ll <= 255) /*literal symbol*/ { /*ucvector_push_back would do the same, but for some reason the two lines below run 10% faster*/ if(!ucvector_resize(out, (*pos) + 1)) ERROR_BREAK(83 /*alloc fail*/); out->data[*pos] = (unsigned char)code_ll; (*pos)++; } else if(code_ll >= FIRST_LENGTH_CODE_INDEX && code_ll <= LAST_LENGTH_CODE_INDEX) /*length code*/ { unsigned code_d, distance; unsigned numextrabits_l, numextrabits_d; /*extra bits for length and distance*/ size_t start, forward, backward, length; /*part 1: get length base*/ length = LENGTHBASE[code_ll - FIRST_LENGTH_CODE_INDEX]; /*part 2: get extra bits and add the value of that to length*/ numextrabits_l = LENGTHEXTRA[code_ll - FIRST_LENGTH_CODE_INDEX]; if(*bp >= inbitlength) ERROR_BREAK(51); /*error, bit pointer will jump past memory*/ length += readBitsFromStream(bp, in, numextrabits_l); /*part 3: get distance code*/ code_d = huffmanDecodeSymbol(in, bp, &tree_d, inbitlength); if(code_d > 29) { if(code_ll == (unsigned)(-1)) /*huffmanDecodeSymbol returns (unsigned)(-1) in case of error*/ { /*return error code 10 or 11 depending on the situation that happened in huffmanDecodeSymbol (10=no endcode, 11=wrong jump outside of tree)*/ error = (*bp) > inlength * 8 ? 10 : 11; } else error = 18; /*error: invalid distance code (30-31 are never used)*/ break; } distance = DISTANCEBASE[code_d]; /*part 4: get extra bits from distance*/ numextrabits_d = DISTANCEEXTRA[code_d]; if(*bp >= inbitlength) ERROR_BREAK(51); /*error, bit pointer will jump past memory*/ distance += readBitsFromStream(bp, in, numextrabits_d); /*part 5: fill in all the out[n] values based on the length and dist*/ start = (*pos); if(distance > start) ERROR_BREAK(52); /*too long backward distance*/ backward = start - distance; if(!ucvector_resize(out, (*pos) + length)) ERROR_BREAK(83 /*alloc fail*/); for(forward = 0; forward < length; forward++) { out->data[(*pos)] = out->data[backward]; (*pos)++; backward++; if(backward >= start) backward = start - distance; } } else if(code_ll == 256) { break; /*end code, break the loop*/ } else /*if(code == (unsigned)(-1))*/ /*huffmanDecodeSymbol returns (unsigned)(-1) in case of error*/ { /*return error code 10 or 11 depending on the situation that happened in huffmanDecodeSymbol (10=no endcode, 11=wrong jump outside of tree)*/ error = (*bp) > inlength * 8 ? 10 : 11; break; } } HuffmanTree_cleanup(&tree_ll); HuffmanTree_cleanup(&tree_d); return error; }
0
434,468
static unsigned int find_free_dqentry(struct quota_handle *h, struct dquot *dquot, int *err) { int blk, i; struct qt_disk_dqdbheader *dh; struct qtree_mem_dqinfo *info = &h->qh_info.u.v2_mdqi.dqi_qtree; char *ddquot; dqbuf_t buf; *err = 0; buf = getdqbuf(); if (!buf) { *err = -ENOMEM; return 0; } dh = (struct qt_disk_dqdbheader *)buf; if (info->dqi_free_entry) { blk = info->dqi_free_entry; read_blk(h, blk, buf); } else { blk = get_free_dqblk(h); if (blk < 0) { freedqbuf(buf); *err = blk; return 0; } memset(buf, 0, QT_BLKSIZE); info->dqi_free_entry = blk; mark_quotafile_info_dirty(h); } /* Block will be full? */ if (ext2fs_le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) remove_free_dqentry(h, buf, blk); dh->dqdh_entries = ext2fs_cpu_to_le16(ext2fs_le16_to_cpu(dh->dqdh_entries) + 1); /* Find free structure in block */ ddquot = buf + sizeof(struct qt_disk_dqdbheader); for (i = 0; i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot); i++) ddquot += info->dqi_entry_size; if (i == qtree_dqstr_in_blk(info)) log_err("find_free_dqentry(): Data block full unexpectedly."); write_blk(h, blk, buf); dquot->dq_dqb.u.v2_mdqb.dqb_off = (blk << QT_BLKSIZE_BITS) + sizeof(struct qt_disk_dqdbheader) + i * info->dqi_entry_size; freedqbuf(buf); return blk; }
0
421,618
struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) { struct bpf_map *inner_map, *inner_map_meta; u32 inner_map_meta_size; struct fd f; f = fdget(inner_map_ufd); inner_map = __bpf_map_get(f); if (IS_ERR(inner_map)) return inner_map; /* prog_array->owner_prog_type and owner_jited * is a runtime binding. Doing static check alone * in the verifier is not enough. */ if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY || inner_map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || inner_map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { fdput(f); return ERR_PTR(-ENOTSUPP); } /* Does not support >1 level map-in-map */ if (inner_map->inner_map_meta) { fdput(f); return ERR_PTR(-EINVAL); } inner_map_meta_size = sizeof(*inner_map_meta); /* In some cases verifier needs to access beyond just base map. */ if (inner_map->ops == &array_map_ops) inner_map_meta_size = sizeof(struct bpf_array); inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER); if (!inner_map_meta) { fdput(f); return ERR_PTR(-ENOMEM); } inner_map_meta->map_type = inner_map->map_type; inner_map_meta->key_size = inner_map->key_size; inner_map_meta->value_size = inner_map->value_size; inner_map_meta->map_flags = inner_map->map_flags; inner_map_meta->max_entries = inner_map->max_entries; /* Misc members not needed in bpf_map_meta_equal() check. */ inner_map_meta->ops = inner_map->ops; if (inner_map->ops == &array_map_ops) { inner_map_meta->unpriv_array = inner_map->unpriv_array; container_of(inner_map_meta, struct bpf_array, map)->index_mask = container_of(inner_map, struct bpf_array, map)->index_mask; } fdput(f); return inner_map_meta; }
0
6,432
test_js (void) { GString *result = g_string_new(""); /* simple javascript can be evaluated and returned */ parse_cmd_line("js ('x' + 345).toUpperCase()", result); g_assert_cmpstr("X345", ==, result->str); /* uzbl commands can be run from javascript */ uzbl.net.useragent = "Test useragent"; parse_cmd_line("js Uzbl.run('print @useragent').toUpperCase();", result); g_assert_cmpstr("TEST USERAGENT", ==, result->str); g_string_free(result, TRUE); }
1
80,917
int ip_defrag(struct sk_buff *skb, u32 user) { struct ipq *qp; struct net *net; net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev); IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); /* Start by cleaning up the memory. */ if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) ip_evictor(net); /* Lookup (or create) queue header */ if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) { int ret; spin_lock(&qp->q.lock); ret = ip_frag_queue(qp, skb); spin_unlock(&qp->q.lock); ipq_put(qp); return ret; } IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -ENOMEM; }
0
489,056
static SECURITY_STATUS SEC_ENTRY negotiate_SetCredentialsAttributesA(PCredHandle phCredential, ULONG ulAttribute, void* pBuffer, ULONG cbBuffer) { MechCred* creds; BOOL success = FALSE; SECURITY_STATUS secStatus; creds = sspi_SecureHandleGetLowerPointer(phCredential); if (!creds) return SEC_E_INVALID_HANDLE; for (size_t i = 0; i < MECH_COUNT; i++) { MechCred* cred = &creds[i]; if (!cred->valid) continue; WINPR_ASSERT(cred->mech); WINPR_ASSERT(cred->mech->pkg); WINPR_ASSERT(cred->mech->pkg->table); WINPR_ASSERT(cred->mech->pkg->table->SetCredentialsAttributesA); secStatus = cred->mech->pkg->table->SetCredentialsAttributesA(&cred->cred, ulAttribute, pBuffer, cbBuffer); if (secStatus == SEC_E_OK) { success = TRUE; } } // return success if at least one submodule accepts the credential attribute return (success ? SEC_E_OK : SEC_E_UNSUPPORTED_FUNCTION); }
0
367,624
dump_stack (void) { show_stack(NULL, NULL); }
0
470,024
l2la(UINT8 *out, const UINT8 *in, int xsize) { int x; for (x = 0; x < xsize; x++) { UINT8 v = *in++; *out++ = v; *out++ = v; *out++ = v; *out++ = 255; } }
0
418,503
RGWPutMetadataObject() : dlo_manifest(NULL) {}
0
4,166
static void tw5864_handle_frame(struct tw5864_h264_frame *frame) { #define SKIP_VLCBUF_BYTES 3 struct tw5864_input *input = frame->input; struct tw5864_dev *dev = input->root; struct tw5864_buf *vb; struct vb2_v4l2_buffer *v4l2_buf; int frame_len = frame->vlc_len - SKIP_VLCBUF_BYTES; u8 *dst = input->buf_cur_ptr; u8 tail_mask, vlc_mask = 0; int i; u8 vlc_first_byte = ((u8 *)(frame->vlc.addr + SKIP_VLCBUF_BYTES))[0]; unsigned long flags; int zero_run; u8 *src; u8 *src_end; #ifdef DEBUG if (frame->checksum != tw5864_vlc_checksum((u32 *)frame->vlc.addr, frame_len)) dev_err(&dev->pci->dev, "Checksum of encoded frame doesn't match!\n"); #endif spin_lock_irqsave(&input->slock, flags); vb = input->vb; input->vb = NULL; spin_unlock_irqrestore(&input->slock, flags); v4l2_buf = to_vb2_v4l2_buffer(&vb->vb.vb2_buf); if (!vb) { /* Gone because of disabling */ dev_dbg(&dev->pci->dev, "vb is empty, dropping frame\n"); return; } /* * Check for space. * Mind the overhead of startcode emulation prevention. */ if (input->buf_cur_space_left < frame_len * 5 / 4) { dev_err_once(&dev->pci->dev, "Left space in vb2 buffer, %d bytes, is less than considered safely enough to put frame of length %d. Dropping this frame.\n", input->buf_cur_space_left, frame_len); return; } for (i = 0; i < 8 - input->tail_nb_bits; i++) vlc_mask |= 1 << i; tail_mask = (~vlc_mask) & 0xff; dst[0] = (input->tail & tail_mask) | (vlc_first_byte & vlc_mask); frame_len--; dst++; /* H.264 startcode emulation prevention */ src = frame->vlc.addr + SKIP_VLCBUF_BYTES + 1; src_end = src + frame_len; zero_run = 0; for (; src < src_end; src++) { if (zero_run < 2) { if (*src == 0) ++zero_run; else zero_run = 0; } else { if ((*src & ~0x03) == 0) *dst++ = 0x03; zero_run = *src == 0; } *dst++ = *src; } vb2_set_plane_payload(&vb->vb.vb2_buf, 0, dst - (u8 *)vb2_plane_vaddr(&vb->vb.vb2_buf, 0)); vb->vb.vb2_buf.timestamp = frame->timestamp; v4l2_buf->field = V4L2_FIELD_INTERLACED; v4l2_buf->sequence = frame->seqno; /* Check for motion flags */ if (frame->gop_seqno /* P-frame */ && tw5864_is_motion_triggered(frame)) { struct v4l2_event ev = { .type = V4L2_EVENT_MOTION_DET, .u.motion_det = { .flags = V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ, .frame_sequence = v4l2_buf->sequence, }, }; v4l2_event_queue(&input->vdev, &ev); } vb2_buffer_done(&vb->vb.vb2_buf, VB2_BUF_STATE_DONE); }
1
430,952
static int zr364xx_vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct zr364xx_camera *cam = video_drvdata(file); strscpy(cap->driver, DRIVER_DESC, sizeof(cap->driver)); if (cam->udev->product) strscpy(cap->card, cam->udev->product, sizeof(cap->card)); strscpy(cap->bus_info, dev_name(&cam->udev->dev), sizeof(cap->bus_info)); cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; }
0
210,941
void LocalSiteCharacteristicsDataImpl::IncrementFeatureObservationDuration( SiteCharacteristicsFeatureProto* feature_proto, base::TimeDelta extra_observation_duration) { if (!feature_proto->has_use_timestamp() || InternalRepresentationToTimeDelta(feature_proto->use_timestamp()) .is_zero()) { feature_proto->set_observation_duration(TimeDeltaToInternalRepresentation( InternalRepresentationToTimeDelta( feature_proto->observation_duration()) + extra_observation_duration)); } }
0
423,424
dns_zone_getkeyopts(dns_zone_t *zone) { REQUIRE(DNS_ZONE_VALID(zone)); return (zone->keyopts); }
0
481,507
static __cold int io_init_bl_list(struct io_ring_ctx *ctx) { int i; ctx->io_bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list), GFP_KERNEL); if (!ctx->io_bl) return -ENOMEM; for (i = 0; i < BGID_ARRAY; i++) { INIT_LIST_HEAD(&ctx->io_bl[i].buf_list); ctx->io_bl[i].bgid = i; } return 0; }
0
359,587
sort_vpn_connections (gconstpointer a, gconstpointer b) { return strcmp (get_connection_id (NM_CONNECTION (a)), get_connection_id (NM_CONNECTION (b))); }
0
173,038
explicit MultipleThreadMain(int16 id) : id_(id) {}
0
348,689
DataLocId CiffComponent::dataLocation(uint16_t tag) { DataLocId di = invalidDataLocId; switch (tag & 0xc000) { case 0x0000: di = valueData; break; case 0x4000: di = directoryData; break; } return di; } // CiffComponent::dataLocation
1
226,963
bool HTMLInputElement::isTelephoneField() const { return m_inputType->isTelephoneField(); }
0
415,719
int sscanf(const char *buf, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i = vsscanf(buf, fmt, args); va_end(args); return i; }
0
187,823
virtual status_t queryKeyStatus(Vector<uint8_t> const &sessionId, KeyedVector<String8, String8> &infoMap) const { Parcel data, reply; data.writeInterfaceToken(IDrm::getInterfaceDescriptor()); writeVector(data, sessionId); status_t status = remote()->transact(QUERY_KEY_STATUS, data, &reply); if (status != OK) { return status; } infoMap.clear(); size_t count = reply.readInt32(); for (size_t i = 0; i < count; i++) { String8 key = reply.readString8(); String8 value = reply.readString8(); infoMap.add(key, value); } return reply.readInt32(); }
0
520,135
uint8 subselect_union_engine::uncacheable() { return unit->uncacheable; }
0
233,770
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, void (*handle_output)(VirtIODevice *, VirtQueue *)) { int i; for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { if (vdev->vq[i].vring.num == 0) break; } if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) abort(); vdev->vq[i].vring.num = queue_size; vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN; vdev->vq[i].handle_output = handle_output; return &vdev->vq[i]; }
0
80,833
static int ext4_unlink(struct inode *dir, struct dentry *dentry) { int retval; struct inode *inode; struct buffer_head *bh; struct ext4_dir_entry_2 *de; handle_t *handle; trace_ext4_unlink_enter(dir, dentry); /* Initialize quotas before so that eventual writes go * in separate transaction */ dquot_initialize(dir); dquot_initialize(dentry->d_inode); handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); retval = -ENOENT; bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); if (!bh) goto end_unlink; inode = dentry->d_inode; retval = -EIO; if (le32_to_cpu(de->inode) != inode->i_ino) goto end_unlink; if (!inode->i_nlink) { ext4_warning(inode->i_sb, "Deleting nonexistent file (%lu), %d", inode->i_ino, inode->i_nlink); set_nlink(inode, 1); } retval = ext4_delete_entry(handle, dir, de, bh); if (retval) goto end_unlink; dir->i_ctime = dir->i_mtime = ext4_current_time(dir); ext4_update_dx_flag(dir); ext4_mark_inode_dirty(handle, dir); drop_nlink(inode); if (!inode->i_nlink) ext4_orphan_add(handle, inode); inode->i_ctime = ext4_current_time(inode); ext4_mark_inode_dirty(handle, inode); retval = 0; end_unlink: ext4_journal_stop(handle); brelse(bh); trace_ext4_unlink_exit(dentry, retval); return retval; }
0
338,070
static int socket_close(void *opaque) { QEMUFileSocket *s = opaque; closesocket(s->fd); g_free(s); return 0; }
1
65,605
TEST_P(Http2FloodMitigationTest, ZerolenHeader) { beginSession(); // Send invalid request. uint32_t request_idx = 0; auto request = Http2Frame::makeMalformedRequestWithZerolenHeader(request_idx, "host", "/"); sendFame(request); tcp_client_->waitForDisconnect(); EXPECT_EQ(1, test_server_->counter("http2.rx_messaging_error")->value()); EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_cx_delayed_close_timeout")->value()); }
0
208,586
DEFUN (show_ip_bgp_vpnv4_all_neighbor_routes, show_ip_bgp_vpnv4_all_neighbor_routes_cmd, "show ip bgp vpnv4 all neighbors A.B.C.D routes", SHOW_STR IP_STR BGP_STR "Display VPNv4 NLRI specific information\n" "Display information about all VPNv4 NLRIs\n" "Detailed information on TCP and BGP neighbor connections\n" "Neighbor to display information about\n" "Display routes learned from neighbor\n") { union sockunion su; struct peer *peer; int ret; ret = str2sockunion (argv[0], &su); if (ret < 0) { vty_out (vty, "Malformed address: %s%s", argv[0], VTY_NEWLINE); return CMD_WARNING; } peer = peer_lookup (NULL, &su); if (! peer || ! peer->afc[AFI_IP][SAFI_MPLS_VPN]) { vty_out (vty, "%% No such neighbor or address family%s", VTY_NEWLINE); return CMD_WARNING; } return bgp_show_mpls_vpn (vty, NULL, bgp_show_type_neighbor, &su, 0); }
0
104,272
int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *val, unsigned int bytes, struct x86_exception *exception) { return emulator_read_write(ctxt, addr, (void *)val, bytes, exception, &write_emultor); }
0
369,230
copyset (charclass const src, charclass dst) { memcpy (dst, src, sizeof (charclass)); }
0
157,709
OSDMapRef get_nextmap_reserved() { Mutex::Locker l(pre_publish_lock); if (!next_osdmap) return OSDMapRef(); epoch_t e = next_osdmap->get_epoch(); map<epoch_t, unsigned>::iterator i = map_reservations.insert(make_pair(e, 0)).first; i->second++; return next_osdmap; }
0
480,288
static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_submit_link *link = &ctx->submit_state.link; int ret; ret = io_init_req(ctx, req, sqe); if (unlikely(ret)) { fail_req: if (link->head) { /* fail even hard links since we don't submit */ req_set_fail(link->head); io_req_complete_failed(link->head, -ECANCELED); link->head = NULL; } io_req_complete_failed(req, ret); return ret; } ret = io_req_prep(req, sqe); if (unlikely(ret)) goto fail_req; /* don't need @sqe from now on */ trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data, req->flags, true, ctx->flags & IORING_SETUP_SQPOLL); /* * If we already have a head request, queue this one for async * submittal once the head completes. If we don't have a head but * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be * submitted sync once the chain is complete. If none of those * conditions are true (normal request), then just queue it. */ if (link->head) { struct io_kiocb *head = link->head; ret = io_req_prep_async(req); if (unlikely(ret)) goto fail_req; trace_io_uring_link(ctx, req, head); link->last->link = req; link->last = req; /* last request of a link, enqueue the link */ if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { link->head = NULL; io_queue_sqe(head); } } else { if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { link->head = req; link->last = req; } else { io_queue_sqe(req); } } return 0;
0
85,531
ipf_v6_key_extract(struct dp_packet *pkt, ovs_be16 dl_type, uint16_t zone, struct ipf_list_key *key, uint16_t *start_data_byte, uint16_t *end_data_byte, bool *ff, bool *lf) { const struct ovs_16aligned_ip6_hdr *l3 = dp_packet_l3(pkt); uint8_t nw_frag = 0; uint8_t nw_proto = l3->ip6_nxt; const void *data = l3 + 1; size_t datasize = dp_packet_l3_size(pkt) - sizeof *l3; const struct ovs_16aligned_ip6_frag *frag_hdr = NULL; parse_ipv6_ext_hdrs(&data, &datasize, &nw_proto, &nw_frag, &frag_hdr); ovs_assert(nw_frag && frag_hdr); ovs_be16 ip6f_offlg = frag_hdr->ip6f_offlg; *start_data_byte = ntohs(ip6f_offlg & IP6F_OFF_MASK) + sizeof (struct ovs_16aligned_ip6_frag); *end_data_byte = *start_data_byte + dp_packet_l4_size(pkt) - 1; *ff = ipf_is_first_v6_frag(ip6f_offlg); *lf = ipf_is_last_v6_frag(ip6f_offlg); memset(key, 0, sizeof *key); key->ip_id = get_16aligned_be32(&frag_hdr->ip6f_ident); key->dl_type = dl_type; memcpy(&key->src_addr.ipv6, &l3->ip6_src, sizeof key->src_addr.ipv6); /* We are not supporting parsing of the routing header to use as the * dst address part of the key. */ memcpy(&key->dst_addr.ipv6, &l3->ip6_dst, sizeof key->dst_addr.ipv6); key->nw_proto = 0; /* Not used for key for V6. */ key->zone = zone; key->recirc_id = pkt->md.recirc_id; }
0
295,563
CAMLexport mlsize_t caml_string_length(value s) { mlsize_t temp; temp = Bosize_val(s) - 1; Assert (Byte (s, temp - Byte (s, temp)) == 0); return temp - Byte (s, temp); }
0
252,248
void Tab::SelectedStateChanged() { UpdateForegroundColors(); }
0
43,369
void handle_no_error(struct st_command *command) { DBUG_ENTER("handle_no_error"); if (command->expected_errors.err[0].type == ERR_ERRNO && command->expected_errors.err[0].code.errnum != 0) { /* Error code we wanted was != 0, i.e. not an expected success */ die("query '%s' succeeded - should have failed with errno %d...", command->query, command->expected_errors.err[0].code.errnum); } else if (command->expected_errors.err[0].type == ERR_SQLSTATE && strcmp(command->expected_errors.err[0].code.sqlstate,"00000") != 0) { /* SQLSTATE we wanted was != "00000", i.e. not an expected success */ die("query '%s' succeeded - should have failed with sqlstate %s...", command->query, command->expected_errors.err[0].code.sqlstate); } DBUG_VOID_RETURN; }
0
490,845
char *get_rule_prefix(filter_rule *rule, const char *pat, int for_xfer, unsigned int *plen_ptr) { static char buf[MAX_RULE_PREFIX+1]; char *op = buf; int legal_len = for_xfer && protocol_version < 29 ? 1 : MAX_RULE_PREFIX-1; if (rule->rflags & FILTRULE_PERDIR_MERGE) { if (legal_len == 1) return NULL; *op++ = ':'; } else if (rule->rflags & FILTRULE_INCLUDE) *op++ = '+'; else if (legal_len != 1 || ((*pat == '-' || *pat == '+') && pat[1] == ' ')) *op++ = '-'; else legal_len = 0; if (rule->rflags & FILTRULE_ABS_PATH) *op++ = '/'; if (rule->rflags & FILTRULE_NEGATE) *op++ = '!'; if (rule->rflags & FILTRULE_CVS_IGNORE) *op++ = 'C'; else { if (rule->rflags & FILTRULE_NO_INHERIT) *op++ = 'n'; if (rule->rflags & FILTRULE_WORD_SPLIT) *op++ = 'w'; if (rule->rflags & FILTRULE_NO_PREFIXES) { if (rule->rflags & FILTRULE_INCLUDE) *op++ = '+'; else *op++ = '-'; } } if (rule->rflags & FILTRULE_EXCLUDE_SELF) *op++ = 'e'; if (rule->rflags & FILTRULE_XATTR) *op++ = 'x'; if (rule->rflags & FILTRULE_SENDER_SIDE && (!for_xfer || protocol_version >= 29)) *op++ = 's'; if (rule->rflags & FILTRULE_RECEIVER_SIDE && (!for_xfer || protocol_version >= 29 || (delete_excluded && am_sender))) *op++ = 'r'; if (rule->rflags & FILTRULE_PERISHABLE) { if (!for_xfer || protocol_version >= 30) *op++ = 'p'; else if (am_sender) return NULL; } if (op - buf > legal_len) return NULL; if (legal_len) *op++ = ' '; *op = '\0'; if (plen_ptr) *plen_ptr = op - buf; return buf; }
0
344,036
static NOINLINE char *xmalloc_optname_optval(uint8_t *option, const struct dhcp_optflag *optflag, const char *opt_name) { unsigned upper_length; int len, type, optlen; char *dest, *ret; /* option points to OPT_DATA, need to go back to get OPT_LEN */ len = option[-OPT_DATA + OPT_LEN]; type = optflag->flags & OPTION_TYPE_MASK; optlen = dhcp_option_lengths[type]; upper_length = len_of_option_as_string[type] * ((unsigned)(len + optlen - 1) / (unsigned)optlen); dest = ret = xmalloc(upper_length + strlen(opt_name) + 2); dest += sprintf(ret, "%s=", opt_name); while (len >= optlen) { switch (type) { case OPTION_IP: case OPTION_IP_PAIR: dest += sprint_nip(dest, "", option); if (type == OPTION_IP) break; dest += sprint_nip(dest, "/", option + 4); break; // case OPTION_BOOLEAN: // dest += sprintf(dest, *option ? "yes" : "no"); // break; case OPTION_U8: dest += sprintf(dest, "%u", *option); break; // case OPTION_S16: case OPTION_U16: { uint16_t val_u16; move_from_unaligned16(val_u16, option); dest += sprintf(dest, "%u", ntohs(val_u16)); break; } case OPTION_S32: case OPTION_U32: { uint32_t val_u32; move_from_unaligned32(val_u32, option); dest += sprintf(dest, type == OPTION_U32 ? "%lu" : "%ld", (unsigned long) ntohl(val_u32)); break; } /* Note: options which use 'return' instead of 'break' * (for example, OPTION_STRING) skip the code which handles * the case of list of options. */ case OPTION_STRING: memcpy(dest, option, len); dest[len] = '\0'; return ret; case OPTION_STATIC_ROUTES: { /* Option binary format: * mask [one byte, 0..32] * ip [big endian, 0..4 bytes depending on mask] * router [big endian, 4 bytes] * may be repeated * * We convert it to a string "IP/MASK ROUTER IP2/MASK2 ROUTER2" */ const char *pfx = ""; while (len >= 1 + 4) { /* mask + 0-byte ip + router */ uint32_t nip; uint8_t *p; unsigned mask; int bytes; mask = *option++; if (mask > 32) break; len--; nip = 0; p = (void*) &nip; bytes = (mask + 7) / 8; /* 0 -> 0, 1..8 -> 1, 9..16 -> 2 etc */ while (--bytes >= 0) { *p++ = *option++; len--; } if (len < 4) break; /* print ip/mask */ dest += sprint_nip(dest, pfx, (void*) &nip); pfx = " "; dest += sprintf(dest, "/%u ", mask); /* print router */ dest += sprint_nip(dest, "", option); option += 4; len -= 4; } return ret; } case OPTION_6RD: /* Option binary format (see RFC 5969): * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | OPTION_6RD | option-length | IPv4MaskLen | 6rdPrefixLen | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | 6rdPrefix | * ... (16 octets) ... * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * ... 6rdBRIPv4Address(es) ... * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * We convert it to a string * "IPv4MaskLen 6rdPrefixLen 6rdPrefix 6rdBRIPv4Address..." * * Sanity check: ensure that our length is at least 22 bytes, that * IPv4MaskLen <= 32, * 6rdPrefixLen <= 128, * 6rdPrefixLen + (32 - IPv4MaskLen) <= 128 * (2nd condition need no check - it follows from 1st and 3rd). * Else, return envvar with empty value ("optname=") */ if (len >= (1 + 1 + 16 + 4) && option[0] <= 32 && (option[1] + 32 - option[0]) <= 128 ) { /* IPv4MaskLen */ dest += sprintf(dest, "%u ", *option++); /* 6rdPrefixLen */ dest += sprintf(dest, "%u ", *option++); /* 6rdPrefix */ dest += sprint_nip6(dest, /* "", */ option); option += 16; len -= 1 + 1 + 16 + 4; /* "+ 4" above corresponds to the length of IPv4 addr * we consume in the loop below */ while (1) { /* 6rdBRIPv4Address(es) */ dest += sprint_nip(dest, " ", option); option += 4; len -= 4; /* do we have yet another 4+ bytes? */ if (len < 0) break; /* no */ } } return ret; #if ENABLE_FEATURE_UDHCP_RFC3397 case OPTION_DNS_STRING: /* unpack option into dest; use ret for prefix (i.e., "optname=") */ dest = dname_dec(option, len, ret); if (dest) { free(ret); return dest; } /* error. return "optname=" string */ return ret; case OPTION_SIP_SERVERS: /* Option binary format: * type: byte * type=0: domain names, dns-compressed * type=1: IP addrs */ option++; len--; if (option[-1] == 0) { dest = dname_dec(option, len, ret); if (dest) { free(ret); return dest; } } else if (option[-1] == 1) { const char *pfx = ""; while (1) { len -= 4; if (len < 0) break; dest += sprint_nip(dest, pfx, option); pfx = " "; option += 4; } } return ret; #endif } /* switch */ /* If we are here, try to format any remaining data * in the option as another, similarly-formatted option */ option += optlen; len -= optlen; // TODO: it can be a list only if (optflag->flags & OPTION_LIST). // Should we bail out/warn if we see multi-ip option which is // not allowed to be such (for example, DHCP_BROADCAST)? - if (len < optlen /* || !(optflag->flags & OPTION_LIST) */) break; *dest++ = ' '; *dest = '\0'; } /* while */ return ret; }
1
148,374
static void TraceCircle(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; TraceEllipse(primitive_info,start,offset,degrees); }
0
22,204
static int check_vcdiff_header ( xd3_stream * stream , const char * input , const char * line_start , const char * matches , int yes_or_no ) { int ret ; char vcmd [ TESTBUFSIZE ] , gcmd [ TESTBUFSIZE ] ; snprintf_func ( vcmd , TESTBUFSIZE , "%s printhdr -f %s %s" , program_name , input , TEST_RECON2_FILE ) ; if ( ( ret = system ( vcmd ) ) != 0 ) { XPR ( NT "printhdr command: %s\n" , vcmd ) ; stream -> msg = "printhdr cmd failed" ; return XD3_INTERNAL ; } snprintf_func ( gcmd , TESTBUFSIZE , "grep \"%s.*%s.*\" %s > /devull" , line_start , matches , TEST_RECON2_FILE ) ; if ( yes_or_no ) { if ( ( ret = do_cmd ( stream , gcmd ) ) ) { XPR ( NT "%s\n" , gcmd ) ; return ret ; } } else { if ( ( ret = do_fail ( stream , gcmd ) ) ) { XPR ( NT "%s\n" , gcmd ) ; return ret ; } } return 0 ; }
0
364,002
int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) { int num_entries; size_t i; off_t j; void *temp; struct agp_bridge_data *bridge; int mask_type; bridge = mem->bridge; if (!bridge) return -EINVAL; if (mem->page_count == 0) return 0; temp = bridge->current_size; switch (bridge->driver->size_type) { case U8_APER_SIZE: num_entries = A_SIZE_8(temp)->num_entries; break; case U16_APER_SIZE: num_entries = A_SIZE_16(temp)->num_entries; break; case U32_APER_SIZE: num_entries = A_SIZE_32(temp)->num_entries; break; case FIXED_APER_SIZE: num_entries = A_SIZE_FIX(temp)->num_entries; break; case LVL2_APER_SIZE: /* The generic routines can't deal with 2 level gatt's */ return -EINVAL; break; default: num_entries = 0; break; } num_entries -= agp_memory_reserved/PAGE_SIZE; if (num_entries < 0) num_entries = 0; if (type != mem->type) return -EINVAL; mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); if (mask_type != 0) { /* The generic routines know nothing of memory types */ return -EINVAL; } if (((pg_start + mem->page_count) > num_entries) || ((pg_start + mem->page_count) < pg_start)) return -EINVAL; j = pg_start; while (j < (pg_start + mem->page_count)) { if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j))) return -EBUSY; j++; } if (!mem->is_flushed) { bridge->driver->cache_flush(); mem->is_flushed = true; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { writel(bridge->driver->mask_memory(bridge, page_to_phys(mem->pages[i]), mask_type), bridge->gatt_table+j); } readl(bridge->gatt_table+j-1); /* PCI Posting. */ bridge->driver->tlb_flush(mem); return 0; }
0
519,229
Field_bit::do_last_null_byte() const { /* Code elsewhere is assuming that bytes are 8 bits, so I'm using that value instead of the correct one: CHAR_BIT. REFACTOR SUGGESTION (Matz): Change to use the correct number of bits. On systems with CHAR_BIT > 8 (not very common), the storage will lose the extra bits. */ DBUG_PRINT("test", ("bit_ofs: %d, bit_len: %d bit_ptr: %p", bit_ofs, bit_len, bit_ptr)); uchar *result; if (bit_len == 0) result= null_ptr; else if (bit_ofs + bit_len > 8) result= bit_ptr + 1; else result= bit_ptr; if (result) return (size_t) (result - table->record[0]) + 1; return LAST_NULL_BYTE_UNDEF; }
0
209,800
PHP_FUNCTION(mb_ereg_search_setpos) { long position; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &position) == FAILURE) { return; } if (position < 0 || (MBREX(search_str) != NULL && Z_TYPE_P(MBREX(search_str)) == IS_STRING && position >= Z_STRLEN_P(MBREX(search_str)))) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Position is out of range"); MBREX(search_pos) = 0; RETURN_FALSE; } MBREX(search_pos) = position; RETURN_TRUE; }
0
403,661
static void xfrm_hash_rebuild(struct work_struct *work) { struct net *net = container_of(work, struct net, xfrm.policy_hthresh.work); unsigned int hmask; struct xfrm_policy *pol; struct xfrm_policy *policy; struct hlist_head *chain; struct hlist_head *odst; struct hlist_node *newpos; int i; int dir; unsigned seq; u8 lbits4, rbits4, lbits6, rbits6; mutex_lock(&hash_resize_mutex); /* read selector prefixlen thresholds */ do { seq = read_seqbegin(&net->xfrm.policy_hthresh.lock); lbits4 = net->xfrm.policy_hthresh.lbits4; rbits4 = net->xfrm.policy_hthresh.rbits4; lbits6 = net->xfrm.policy_hthresh.lbits6; rbits6 = net->xfrm.policy_hthresh.rbits6; } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq)); spin_lock_bh(&net->xfrm.xfrm_policy_lock); /* reset the bydst and inexact table in all directions */ for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); hmask = net->xfrm.policy_bydst[dir].hmask; odst = net->xfrm.policy_bydst[dir].table; for (i = hmask; i >= 0; i--) INIT_HLIST_HEAD(odst + i); if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) { /* dir out => dst = remote, src = local */ net->xfrm.policy_bydst[dir].dbits4 = rbits4; net->xfrm.policy_bydst[dir].sbits4 = lbits4; net->xfrm.policy_bydst[dir].dbits6 = rbits6; net->xfrm.policy_bydst[dir].sbits6 = lbits6; } else { /* dir in/fwd => dst = local, src = remote */ net->xfrm.policy_bydst[dir].dbits4 = lbits4; net->xfrm.policy_bydst[dir].sbits4 = rbits4; net->xfrm.policy_bydst[dir].dbits6 = lbits6; net->xfrm.policy_bydst[dir].sbits6 = rbits6; } } /* re-insert all policies by order of creation */ list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) { /* skip socket policies */ continue; } newpos = NULL; chain = policy_hash_bysel(net, &policy->selector, policy->family, xfrm_policy_id2dir(policy->index)); hlist_for_each_entry(pol, chain, bydst) { if (policy->priority >= pol->priority) newpos = &pol->bydst; else break; } if (newpos) hlist_add_behind(&policy->bydst, newpos); else hlist_add_head(&policy->bydst, chain); } spin_unlock_bh(&net->xfrm.xfrm_policy_lock); mutex_unlock(&hash_resize_mutex); }
0
117,648
static int check_stack_access_within_bounds( struct bpf_verifier_env *env, int regno, int off, int access_size, enum stack_access_src src, enum bpf_access_type type) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = regs + regno; struct bpf_func_state *state = func(env, reg); int min_off, max_off; int err; char *err_extra; if (src == ACCESS_HELPER) /* We don't know if helpers are reading or writing (or both). */ err_extra = " indirect access to"; else if (type == BPF_READ) err_extra = " read from"; else err_extra = " write to"; if (tnum_is_const(reg->var_off)) { min_off = reg->var_off.value + off; if (access_size > 0) max_off = min_off + access_size - 1; else max_off = min_off; } else { if (reg->smax_value >= BPF_MAX_VAR_OFF || reg->smin_value <= -BPF_MAX_VAR_OFF) { verbose(env, "invalid unbounded variable-offset%s stack R%d\n", err_extra, regno); return -EACCES; } min_off = reg->smin_value + off; if (access_size > 0) max_off = reg->smax_value + off + access_size - 1; else max_off = min_off; } err = check_stack_slot_within_bounds(min_off, state, type); if (!err) err = check_stack_slot_within_bounds(max_off, state, type); if (err) { if (tnum_is_const(reg->var_off)) { verbose(env, "invalid%s stack R%d off=%d size=%d\n", err_extra, regno, off, access_size); } else { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n", err_extra, regno, tn_buf, access_size); } } return err; }
0
303,127
static size_t p_seek(void *fp, size_t offset) { #if (defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS == 64) || \ (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 200112L) || \ (defined(_XOPEN_SOURCE) && _XOPEN_SOURCE >= 600) fseeko((FILE *) fp, (off_t) offset, SEEK_SET); #else fseek((FILE *) fp, (long) offset, SEEK_SET); #endif return (size_t) ftell((FILE *) fp); }
0
294,220
static void lsr_read_focus(GF_LASeRCodec *lsr, SVG_Focus *foc, const char *name) { u32 flag; if (foc->target.string) { gf_free(foc->target.string); foc->target.string = NULL; } if (foc->target.target) foc->target.target = NULL; gf_node_unregister_iri(lsr->sg, &foc->target); GF_LSR_READ_INT(lsr, flag, 1, "isEnum"); if (flag) { GF_LSR_READ_INT(lsr, foc->type, 1, "enum"); } else { foc->type = SVG_FOCUS_IRI; lsr_read_codec_IDREF(lsr, &foc->target, "id"); } }
0
242,498
int ChromeBrowserMainParts::PreCreateThreadsImpl() { TRACE_EVENT0("startup", "ChromeBrowserMainParts::PreCreateThreadsImpl") run_message_loop_ = false; #if !defined(OS_ANDROID) chrome::MaybeShowInvalidUserDataDirWarningDialog(); #endif // !defined(OS_ANDROID) if (!PathService::Get(chrome::DIR_USER_DATA, &user_data_dir_)) return chrome::RESULT_CODE_MISSING_DATA; MediaCaptureDevicesDispatcher::GetInstance(); #if !defined(OS_ANDROID) process_singleton_.reset(new ChromeProcessSingleton( user_data_dir_, base::Bind(&ProcessSingletonNotificationCallback))); first_run::IsChromeFirstRun(); #endif // !defined(OS_ANDROID) scoped_refptr<base::SequencedTaskRunner> local_state_task_runner = JsonPrefStore::GetTaskRunnerForFile( base::FilePath(chrome::kLocalStorePoolName), BrowserThread::GetBlockingPool()); { TRACE_EVENT0("startup", "ChromeBrowserMainParts::PreCreateThreadsImpl:InitBrowswerProcessImpl"); browser_process_.reset(new BrowserProcessImpl(local_state_task_runner.get(), parsed_command_line())); } if (parsed_command_line().HasSwitch(switches::kEnableProfiling)) { TRACE_EVENT0("startup", "ChromeBrowserMainParts::PreCreateThreadsImpl:InitProfiling"); std::string flag = parsed_command_line().GetSwitchValueASCII(switches::kEnableProfiling); tracked_objects::ThreadData::Status status = tracked_objects::ThreadData::PROFILING_ACTIVE; if (flag.compare("0") != 0) status = tracked_objects::ThreadData::DEACTIVATED; tracked_objects::ThreadData::InitializeAndSetTrackingStatus(status); } local_state_ = InitializeLocalState( local_state_task_runner.get(), parsed_command_line()); #if !defined(OS_ANDROID) master_prefs_.reset(new first_run::MasterPrefs); browser_creator_.reset(new StartupBrowserCreator); chrome::UMABrowsingActivityObserver::Init(); #endif // !defined(OS_ANDROID) #if !defined(OS_CHROMEOS) { TRACE_EVENT0("startup", "ChromeBrowserMainParts::PreCreateThreadsImpl:ConvertFlags"); about_flags::PrefServiceFlagsStorage flags_storage_( g_browser_process->local_state()); about_flags::ConvertFlagsToSwitches(&flags_storage_, base::CommandLine::ForCurrentProcess(), about_flags::kAddSentinels); } #endif // !defined(OS_CHROMEOS) local_state_->UpdateCommandLinePrefStore( new CommandLinePrefStore(base::CommandLine::ForCurrentProcess())); crash_keys::SetSwitchesFromCommandLine( base::CommandLine::ForCurrentProcess()); #if defined(OS_MACOSX) std::string locale = parameters().ui_task ? "en-US" : l10n_util::GetLocaleOverride(); browser_process_->SetApplicationLocale(locale); #else const std::string locale = local_state_->GetString(prefs::kApplicationLocale); TRACE_EVENT_BEGIN0("startup", "ChromeBrowserMainParts::PreCreateThreadsImpl:InitResourceBundle"); const std::string loaded_locale = ui::ResourceBundle::InitSharedInstanceWithLocale( locale, NULL, ui::ResourceBundle::LOAD_COMMON_RESOURCES); TRACE_EVENT_END0("startup", "ChromeBrowserMainParts::PreCreateThreadsImpl:InitResourceBundle"); if (loaded_locale.empty() && !parsed_command_line().HasSwitch(switches::kNoErrorDialogs)) { ShowMissingLocaleMessageBox(); return chrome::RESULT_CODE_MISSING_DATA; } CHECK(!loaded_locale.empty()) << "Locale could not be found for " << locale; browser_process_->SetApplicationLocale(loaded_locale); { TRACE_EVENT0("startup", "ChromeBrowserMainParts::PreCreateThreadsImpl:AddDataPack"); base::FilePath resources_pack_path; PathService::Get(chrome::FILE_RESOURCES_PACK, &resources_pack_path); #if defined(OS_ANDROID) ui::LoadMainAndroidPackFile("assets/resources.pak", resources_pack_path); #else ResourceBundle::GetSharedInstance().AddDataPackFromPath( resources_pack_path, ui::SCALE_FACTOR_NONE); #endif // defined(OS_ANDROID) } #endif // defined(OS_MACOSX) #if !defined(OS_ANDROID) && !defined(OS_CHROMEOS) if (first_run::IsChromeFirstRun()) { first_run::ProcessMasterPreferencesResult pmp_result = first_run::ProcessMasterPreferences(user_data_dir_, master_prefs_.get()); if (pmp_result == first_run::EULA_EXIT_NOW) return chrome::RESULT_CODE_EULA_REFUSED; if (!parsed_command_line().HasSwitch(switches::kApp) && !parsed_command_line().HasSwitch(switches::kAppId) && !parsed_command_line().HasSwitch(switches::kShowAppList)) { AddFirstRunNewTabs(browser_creator_.get(), master_prefs_->new_tabs); } if (!master_prefs_->variations_seed.empty() || !master_prefs_->compressed_variations_seed.empty()) { if (!master_prefs_->variations_seed.empty()) { local_state_->SetString(chrome_variations::prefs::kVariationsSeed, master_prefs_->variations_seed); } if (!master_prefs_->compressed_variations_seed.empty()) { local_state_->SetString( chrome_variations::prefs::kVariationsCompressedSeed, master_prefs_->compressed_variations_seed); } if (!master_prefs_->variations_seed_signature.empty()) { local_state_->SetString( chrome_variations::prefs::kVariationsSeedSignature, master_prefs_->variations_seed_signature); } local_state_->SetInt64(chrome_variations::prefs::kVariationsSeedDate, base::Time::Now().ToInternalValue()); } if (!master_prefs_->suppress_default_browser_prompt_for_version.empty()) { local_state_->SetString( prefs::kBrowserSuppressDefaultBrowserPrompt, master_prefs_->suppress_default_browser_prompt_for_version); } #if defined(OS_WIN) if (!master_prefs_->welcome_page_on_os_upgrade_enabled) local_state_->SetBoolean(prefs::kWelcomePageOnOSUpgradeEnabled, false); #endif } #endif // !defined(OS_ANDROID) && !defined(OS_CHROMEOS) #if defined(OS_LINUX) || defined(OS_OPENBSD) || defined(OS_MACOSX) base::debug::SetCrashKeyValue(crash_keys::kChannel, chrome::GetChannelString()); #endif // defined(OS_LINUX) || defined(OS_OPENBSD) || defined(OS_MACOSX) tracking_synchronizer_ = new metrics::TrackingSynchronizer( make_scoped_ptr(new base::DefaultTickClock())); #if defined(OS_MACOSX) SecKeychainAddCallback(&KeychainCallback, 0, NULL); #endif // defined(OS_MACOSX) #if defined(OS_CHROMEOS) chromeos::CrosSettings::Initialize(); #endif // defined(OS_CHROMEOS) SetupMetricsAndFieldTrials(); browser_process_->PreCreateThreads(); return content::RESULT_CODE_NORMAL_EXIT; }
0
263,718
b64enc(bs, inp, inlen, outp) struct b64state *bs; u_char *inp; int inlen; u_char *outp; { int outlen = 0; while (inlen > 0) { bs->bs_bits = (bs->bs_bits << 8) | *inp++; inlen--; bs->bs_offs += 8; if (bs->bs_offs >= 24) { *outp++ = base64[(bs->bs_bits >> 18) & 0x3F]; *outp++ = base64[(bs->bs_bits >> 12) & 0x3F]; *outp++ = base64[(bs->bs_bits >> 6) & 0x3F]; *outp++ = base64[bs->bs_bits & 0x3F]; outlen += 4; bs->bs_offs = 0; bs->bs_bits = 0; } } return (outlen); }
0
38,093
cifs_iovec_read(struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *poffset) { ssize_t rc; size_t len, cur_len; ssize_t total_read = 0; loff_t offset = *poffset; unsigned int npages; struct cifs_sb_info *cifs_sb; struct cifs_tcon *tcon; struct cifsFileInfo *open_file; struct cifs_readdata *rdata, *tmp; struct list_head rdata_list; pid_t pid; if (!nr_segs) return 0; len = iov_length(iov, nr_segs); if (!len) return 0; INIT_LIST_HEAD(&rdata_list); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); open_file = file->private_data; tcon = tlink_tcon(open_file->tlink); if (!tcon->ses->server->ops->async_readv) return -ENOSYS; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) pid = open_file->pid; else pid = current->tgid; if ((file->f_flags & O_ACCMODE) == O_WRONLY) cifs_dbg(FYI, "attempting read on write only file instance\n"); do { cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize); npages = DIV_ROUND_UP(cur_len, PAGE_SIZE); /* allocate a readdata struct */ rdata = cifs_readdata_alloc(npages, cifs_uncached_readv_complete); if (!rdata) { rc = -ENOMEM; goto error; } rc = cifs_read_allocate_pages(rdata, npages); if (rc) goto error; rdata->cfile = cifsFileInfo_get(open_file); rdata->nr_pages = npages; rdata->offset = offset; rdata->bytes = cur_len; rdata->pid = pid; rdata->pagesz = PAGE_SIZE; rdata->read_into_pages = cifs_uncached_read_into_pages; rc = cifs_retry_async_readv(rdata); error: if (rc) { kref_put(&rdata->refcount, cifs_uncached_readdata_release); break; } list_add_tail(&rdata->list, &rdata_list); offset += cur_len; len -= cur_len; } while (len > 0); /* if at least one read request send succeeded, then reset rc */ if (!list_empty(&rdata_list)) rc = 0; /* the loop below should proceed in the order of increasing offsets */ restart_loop: list_for_each_entry_safe(rdata, tmp, &rdata_list, list) { if (!rc) { ssize_t copied; /* FIXME: freezable sleep too? */ rc = wait_for_completion_killable(&rdata->done); if (rc) rc = -EINTR; else if (rdata->result) rc = rdata->result; else { rc = cifs_readdata_to_iov(rdata, iov, nr_segs, *poffset, &copied); total_read += copied; } /* resend call if it's a retryable error */ if (rc == -EAGAIN) { rc = cifs_retry_async_readv(rdata); goto restart_loop; } } list_del_init(&rdata->list); kref_put(&rdata->refcount, cifs_uncached_readdata_release); } cifs_stats_bytes_read(tcon, total_read); *poffset += total_read; /* mask nodata case */ if (rc == -ENODATA) rc = 0; return total_read ? total_read : rc; }
0
326,319
static void unplug_nic(PCIBus *b, PCIDevice *d) { if (pci_get_word(d->config + PCI_CLASS_DEVICE) == PCI_CLASS_NETWORK_ETHERNET) { qdev_unplug(&(d->qdev), NULL); } }
0
399,731
static void CONCAT(send_hextile_tile_, NAME)(VncState *vs, int x, int y, int w, int h, void *last_bg_, void *last_fg_, int *has_bg, int *has_fg) { VncDisplay *vd = vs->vd; uint8_t *row = vnc_server_fb_ptr(vd, x, y); pixel_t *irow = (pixel_t *)row; int j, i; pixel_t *last_bg = (pixel_t *)last_bg_; pixel_t *last_fg = (pixel_t *)last_fg_; pixel_t bg = 0; pixel_t fg = 0; int n_colors = 0; int bg_count = 0; int fg_count = 0; int flags = 0; uint8_t data[(vs->client_pf.bytes_per_pixel + 2) * 16 * 16]; int n_data = 0; int n_subtiles = 0; for (j = 0; j < h; j++) { for (i = 0; i < w; i++) { switch (n_colors) { case 0: bg = irow[i]; n_colors = 1; break; case 1: if (irow[i] != bg) { fg = irow[i]; n_colors = 2; } break; case 2: if (irow[i] != bg && irow[i] != fg) { n_colors = 3; } else { if (irow[i] == bg) bg_count++; else if (irow[i] == fg) fg_count++; } break; default: break; } } if (n_colors > 2) break; irow += vnc_server_fb_stride(vd) / sizeof(pixel_t); } if (n_colors > 1 && fg_count > bg_count) { pixel_t tmp = fg; fg = bg; bg = tmp; } if (!*has_bg || *last_bg != bg) { flags |= 0x02; *has_bg = 1; *last_bg = bg; } if (n_colors < 3 && (!*has_fg || *last_fg != fg)) { flags |= 0x04; *has_fg = 1; *last_fg = fg; } switch (n_colors) { case 1: n_data = 0; break; case 2: flags |= 0x08; irow = (pixel_t *)row; for (j = 0; j < h; j++) { int min_x = -1; for (i = 0; i < w; i++) { if (irow[i] == fg) { if (min_x == -1) min_x = i; } else if (min_x != -1) { hextile_enc_cord(data + n_data, min_x, j, i - min_x, 1); n_data += 2; n_subtiles++; min_x = -1; } } if (min_x != -1) { hextile_enc_cord(data + n_data, min_x, j, i - min_x, 1); n_data += 2; n_subtiles++; } irow += vnc_server_fb_stride(vd) / sizeof(pixel_t); } break; case 3: flags |= 0x18; irow = (pixel_t *)row; if (!*has_bg || *last_bg != bg) flags |= 0x02; for (j = 0; j < h; j++) { int has_color = 0; int min_x = -1; pixel_t color = 0; /* shut up gcc */ for (i = 0; i < w; i++) { if (!has_color) { if (irow[i] == bg) continue; color = irow[i]; min_x = i; has_color = 1; } else if (irow[i] != color) { has_color = 0; #ifdef GENERIC vnc_convert_pixel(vs, data + n_data, color); n_data += vs->client_pf.bytes_per_pixel; #else memcpy(data + n_data, &color, sizeof(color)); n_data += sizeof(pixel_t); #endif hextile_enc_cord(data + n_data, min_x, j, i - min_x, 1); n_data += 2; n_subtiles++; min_x = -1; if (irow[i] != bg) { color = irow[i]; min_x = i; has_color = 1; } } } if (has_color) { #ifdef GENERIC vnc_convert_pixel(vs, data + n_data, color); n_data += vs->client_pf.bytes_per_pixel; #else memcpy(data + n_data, &color, sizeof(color)); n_data += sizeof(pixel_t); #endif hextile_enc_cord(data + n_data, min_x, j, i - min_x, 1); n_data += 2; n_subtiles++; } irow += vnc_server_fb_stride(vd) / sizeof(pixel_t); } /* A SubrectsColoured subtile invalidates the foreground color */ *has_fg = 0; if (n_data > (w * h * sizeof(pixel_t))) { n_colors = 4; flags = 0x01; *has_bg = 0; /* we really don't have to invalidate either the bg or fg but we've lost the old values. oh well. */ } break; default: break; } if (n_colors > 3) { flags = 0x01; *has_fg = 0; *has_bg = 0; n_colors = 4; } vnc_write_u8(vs, flags); if (n_colors < 4) { if (flags & 0x02) vs->write_pixels(vs, last_bg, sizeof(pixel_t)); if (flags & 0x04) vs->write_pixels(vs, last_fg, sizeof(pixel_t)); if (n_subtiles) { vnc_write_u8(vs, n_subtiles); vnc_write(vs, data, n_data); } } else { for (j = 0; j < h; j++) { vs->write_pixels(vs, row, w * 4); row += vnc_server_fb_stride(vd); } } }
0
107,586
void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) { const struct iphdr *iph = (const struct iphdr *)icmp_skb->data; struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2)); struct inet_connection_sock *icsk; struct tcp_sock *tp; struct inet_sock *inet; const int type = icmp_hdr(icmp_skb)->type; const int code = icmp_hdr(icmp_skb)->code; struct sock *sk; struct sk_buff *skb; struct request_sock *fastopen; __u32 seq, snd_una; __u32 remaining; int err; struct net *net = dev_net(icmp_skb->dev); sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr, th->dest, iph->saddr, ntohs(th->source), inet_iif(icmp_skb)); if (!sk) { __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return; } if (sk->sk_state == TCP_TIME_WAIT) { inet_twsk_put(inet_twsk(sk)); return; } seq = ntohl(th->seq); if (sk->sk_state == TCP_NEW_SYN_RECV) return tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB || type == ICMP_TIME_EXCEEDED || (type == ICMP_DEST_UNREACH && (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))); bh_lock_sock(sk); /* If too many ICMPs get dropped on busy * servers this needs to be solved differently. * We do take care of PMTU discovery (RFC1191) special case : * we can receive locally generated ICMP messages while socket is held. */ if (sock_owned_by_user(sk)) { if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)) __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); } if (sk->sk_state == TCP_CLOSE) goto out; if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto out; } icsk = inet_csk(sk); tp = tcp_sk(sk); /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ fastopen = tp->fastopen_rsk; snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; if (sk->sk_state != TCP_LISTEN && !between(seq, snd_una, tp->snd_nxt)) { __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } switch (type) { case ICMP_REDIRECT: do_redirect(icmp_skb, sk); goto out; case ICMP_SOURCE_QUENCH: /* Just silently ignore these. */ goto out; case ICMP_PARAMETERPROB: err = EPROTO; break; case ICMP_DEST_UNREACH: if (code > NR_ICMP_UNREACH) goto out; if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ /* We are not interested in TCP_LISTEN and open_requests * (SYN-ACKs send out by Linux are always <576bytes so * they should go through unfragmented). */ if (sk->sk_state == TCP_LISTEN) goto out; tp->mtu_info = info; if (!sock_owned_by_user(sk)) { tcp_v4_mtu_reduced(sk); } else { if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags)) sock_hold(sk); } goto out; } err = icmp_err_convert[code].errno; /* check if icmp_skb allows revert of backoff * (see draft-zimmermann-tcp-lcd) */ if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH) break; if (seq != tp->snd_una || !icsk->icsk_retransmits || !icsk->icsk_backoff || fastopen) break; if (sock_owned_by_user(sk)) break; icsk->icsk_backoff--; icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT; icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); skb = tcp_write_queue_head(sk); BUG_ON(!skb); remaining = icsk->icsk_rto - min(icsk->icsk_rto, tcp_time_stamp - tcp_skb_timestamp(skb)); if (remaining) { inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, remaining, TCP_RTO_MAX); } else { /* RTO revert clocked out retransmission. * Will retransmit now */ tcp_retransmit_timer(sk); } break; case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; default: goto out; } switch (sk->sk_state) { case TCP_SYN_SENT: case TCP_SYN_RECV: /* Only in fast or simultaneous open. If a fast open socket is * is already accepted it is treated as a connected one below. */ if (fastopen && !fastopen->sk) break; if (!sock_owned_by_user(sk)) { sk->sk_err = err; sk->sk_error_report(sk); tcp_done(sk); } else { sk->sk_err_soft = err; } goto out; } /* If we've already connected we will keep trying * until we time out, or the user gives up. * * rfc1122 4.2.3.9 allows to consider as hard errors * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too, * but it is obsoleted by pmtu discovery). * * Note, that in modern internet, where routing is unreliable * and in each dark corner broken firewalls sit, sending random * errors ordered by their masters even this two messages finally lose * their original sense (even Linux sends invalid PORT_UNREACHs) * * Now we are in compliance with RFCs. * --ANK (980905) */ inet = inet_sk(sk); if (!sock_owned_by_user(sk) && inet->recverr) { sk->sk_err = err; sk->sk_error_report(sk); } else { /* Only an error on timeout */ sk->sk_err_soft = err; } out: bh_unlock_sock(sk); sock_put(sk); }
0
407,083
static CURLcode imap_block_statemach(struct connectdata *conn) { CURLcode result = CURLE_OK; struct imap_conn *imapc = &conn->proto.imapc; while(imapc->state != IMAP_STOP && !result) result = Curl_pp_statemach(&imapc->pp, TRUE); return result; }
0
15,602
static int load_matrix ( MpegEncContext * s , uint16_t matrix0 [ 64 ] , uint16_t matrix1 [ 64 ] , int intra ) { int i ; for ( i = 0 ; i < 64 ; i ++ ) { int j = s -> dsp . idct_permutation [ ff_zigzag_direct [ i ] ] ; int v = get_bits ( & s -> gb , 8 ) ; if ( v == 0 ) { av_log ( s -> avctx , AV_LOG_ERROR , "matrix damaged\n" ) ; return - 1 ; } if ( intra && i == 0 && v != 8 ) { av_log ( s -> avctx , AV_LOG_ERROR , "intra matrix invalid, ignoring\n" ) ; v = 8 ; } matrix0 [ j ] = v ; if ( matrix1 ) matrix1 [ j ] = v ; } return 0 ; }
0
68,415
TEST_F(QuotedString_ExtractFrom_Tests, EscapedNewline) { whenInputIs("\"hello \\nworld\\n\""); resultMustBe("hello \nworld\n"); }
0
487,763
__read_extent_tree_block(const char *function, unsigned int line, struct inode *inode, ext4_fsblk_t pblk, int depth, int flags) { struct buffer_head *bh; int err; gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS; if (flags & EXT4_EX_NOFAIL) gfp_flags |= __GFP_NOFAIL; bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags); if (unlikely(!bh)) return ERR_PTR(-ENOMEM); if (!bh_uptodate_or_lock(bh)) { trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); err = bh_submit_read(bh); if (err < 0) goto errout; } if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) return bh; err = __ext4_ext_check(function, line, inode, ext_block_hdr(bh), depth, pblk); if (err) goto errout; set_buffer_verified(bh); /* * If this is a leaf block, cache all of its entries */ if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { struct ext4_extent_header *eh = ext_block_hdr(bh); ext4_cache_extents(inode, eh); } return bh; errout: put_bh(bh); return ERR_PTR(err); }
0
242,698
void RenderWidgetHostImpl::SetEditCommandsForNextKeyEvent( const std::vector<EditCommand>& commands) { Send(new ViewMsg_SetEditCommandsForNextKeyEvent(GetRoutingID(), commands)); }
0
203,136
cricket::SessionDescription* JingleSessionManager::CreateHostSessionDescription( const CandidateSessionConfig* config, const std::string& certificate) { cricket::SessionDescription* desc = new cricket::SessionDescription(); desc->AddContent( ContentDescription::kChromotingContentName, kChromotingXmlNamespace, new ContentDescription(config, "", certificate)); return desc; }
0
173,093
fbCombineConjointXorC (CARD32 *dest, CARD32 *src, CARD32 *mask, int width) { fbCombineConjointGeneralC (dest, src, mask, width, CombineXor); }
0
506,267
SSL_CIPHER *SSL_get_current_cipher(const SSL *s) { if ((s->session != NULL) && (s->session->cipher != NULL)) return(s->session->cipher); return(NULL); }
0
57,046
static Sg_device *sg_lookup_dev(int dev) { return idr_find(&sg_index_idr, dev); }
0
423,774
zone_shutdown(isc_task_t *task, isc_event_t *event) { dns_zone_t *zone = (dns_zone_t *) event->ev_arg; bool free_needed, linked = false; dns_zone_t *raw = NULL, *secure = NULL; UNUSED(task); REQUIRE(DNS_ZONE_VALID(zone)); INSIST(event->ev_type == DNS_EVENT_ZONECONTROL); INSIST(isc_refcount_current(&zone->erefs) == 0); zone_debuglog(zone, "zone_shutdown", 3, "shutting down"); /* * Stop things being restarted after we cancel them below. */ LOCK_ZONE(zone); DNS_ZONE_SETFLAG(zone, DNS_ZONEFLG_EXITING); UNLOCK_ZONE(zone); /* * If we were waiting for xfrin quota, step out of * the queue. * If there's no zone manager, we can't be waiting for the * xfrin quota */ if (zone->zmgr != NULL) { RWLOCK(&zone->zmgr->rwlock, isc_rwlocktype_write); if (zone->statelist == &zone->zmgr->waiting_for_xfrin) { ISC_LIST_UNLINK(zone->zmgr->waiting_for_xfrin, zone, statelink); linked = true; zone->statelist = NULL; } if (zone->statelist == &zone->zmgr->xfrin_in_progress) { ISC_LIST_UNLINK(zone->zmgr->xfrin_in_progress, zone, statelink); zone->statelist = NULL; zmgr_resume_xfrs(zone->zmgr, false); } RWUNLOCK(&zone->zmgr->rwlock, isc_rwlocktype_write); } /* * In task context, no locking required. See zone_xfrdone(). */ if (zone->xfr != NULL) dns_xfrin_shutdown(zone->xfr); /* Safe to release the zone now */ if (zone->zmgr != NULL) dns_zonemgr_releasezone(zone->zmgr, zone); LOCK_ZONE(zone); INSIST(zone != zone->raw); if (linked) { INSIST(zone->irefs > 0); zone->irefs--; } if (zone->request != NULL) { dns_request_cancel(zone->request); } if (zone->readio != NULL) zonemgr_cancelio(zone->readio); if (zone->lctx != NULL) dns_loadctx_cancel(zone->lctx); if (!DNS_ZONE_FLAG(zone, DNS_ZONEFLG_FLUSH) || !DNS_ZONE_FLAG(zone, DNS_ZONEFLG_DUMPING)) { if (zone->writeio != NULL) zonemgr_cancelio(zone->writeio); if (zone->dctx != NULL) dns_dumpctx_cancel(zone->dctx); } notify_cancel(zone); forward_cancel(zone); if (zone->timer != NULL) { isc_timer_detach(&zone->timer); INSIST(zone->irefs > 0); zone->irefs--; } /* * We have now canceled everything set the flag to allow exit_check() * to succeed. We must not unlock between setting this flag and * calling exit_check(). */ DNS_ZONE_SETFLAG(zone, DNS_ZONEFLG_SHUTDOWN); free_needed = exit_check(zone); if (inline_secure(zone)) { raw = zone->raw; zone->raw = NULL; } if (inline_raw(zone)) { secure = zone->secure; zone->secure = NULL; } UNLOCK_ZONE(zone); if (raw != NULL) dns_zone_detach(&raw); if (secure != NULL) dns_zone_idetach(&secure); if (free_needed) zone_free(zone); }
0
497,089
static void update_ltp(AACContext *ac, SingleChannelElement *sce) { IndividualChannelStream *ics = &sce->ics; float *saved = sce->saved; float *saved_ltp = sce->coeffs; const float *lwindow = ics->use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024; const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128; int i; if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) { memcpy(saved_ltp, saved, 512 * sizeof(float)); memset(saved_ltp + 576, 0, 448 * sizeof(float)); ac->fdsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64); for (i = 0; i < 64; i++) saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * swindow[63 - i]; } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) { memcpy(saved_ltp, ac->buf_mdct + 512, 448 * sizeof(float)); memset(saved_ltp + 576, 0, 448 * sizeof(float)); ac->fdsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64); for (i = 0; i < 64; i++) saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * swindow[63 - i]; } else { // LONG_STOP or ONLY_LONG ac->fdsp.vector_fmul_reverse(saved_ltp, ac->buf_mdct + 512, &lwindow[512], 512); for (i = 0; i < 512; i++) saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * lwindow[511 - i]; } memcpy(sce->ltp_state, sce->ltp_state+1024, 1024 * sizeof(*sce->ltp_state)); memcpy(sce->ltp_state+1024, sce->ret, 1024 * sizeof(*sce->ltp_state)); memcpy(sce->ltp_state+2048, saved_ltp, 1024 * sizeof(*sce->ltp_state)); }
0
307,105
void TopSitesImpl::OnTopSitesAvailableFromHistory( const MostVisitedURLList* pages) { DCHECK(pages); SetTopSites(*pages, CALL_LOCATION_FROM_OTHER_PLACES); }
0
220,658
MockAudioManager() : AudioManagerPlatform(std::make_unique<media::TestAudioThread>(), &fake_audio_log_factory_), num_output_devices_(2), num_input_devices_(2) {}
0
386,454
/* {{{ proto int strtotime(string time [, int now ]) Convert string representation of date and time to a timestamp */ PHP_FUNCTION(strtotime) { char *times, *initial_ts; int time_len, error1, error2; struct timelib_error_container *error; long preset_ts = 0, ts; timelib_time *t, *now; timelib_tzinfo *tzi; tzi = get_timezone_info(TSRMLS_C); if (zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "sl", &times, &time_len, &preset_ts) != FAILURE) { /* We have an initial timestamp */ now = timelib_time_ctor(); initial_ts = emalloc(25); snprintf(initial_ts, 24, "@%ld UTC", preset_ts); t = timelib_strtotime(initial_ts, strlen(initial_ts), NULL, DATE_TIMEZONEDB, php_date_parse_tzfile_wrapper); /* we ignore the error here, as this should never fail */ timelib_update_ts(t, tzi); now->tz_info = tzi; now->zone_type = TIMELIB_ZONETYPE_ID; timelib_unixtime2local(now, t->sse); timelib_time_dtor(t); efree(initial_ts); } else if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|l", &times, &time_len, &preset_ts) != FAILURE) { /* We have no initial timestamp */ now = timelib_time_ctor(); now->tz_info = tzi; now->zone_type = TIMELIB_ZONETYPE_ID; timelib_unixtime2local(now, (timelib_sll) time(NULL)); } else { RETURN_FALSE; } if (!time_len) { timelib_time_dtor(now); RETURN_FALSE; } t = timelib_strtotime(times, time_len, &error, DATE_TIMEZONEDB, php_date_parse_tzfile_wrapper); error1 = error->error_count; timelib_error_container_dtor(error); timelib_fill_holes(t, now, TIMELIB_NO_CLONE); timelib_update_ts(t, tzi); ts = timelib_date_to_int(t, &error2); timelib_time_dtor(now); timelib_time_dtor(t); if (error1 || error2) { RETURN_FALSE; } else { RETURN_LONG(ts);
0
57,016
Variant HHVM_FUNCTION(mcrypt_cfb, const String& cipher, const String& key, const String& data, const Variant& mode, const Variant& viv /* = null_string */) { raise_deprecated("Function mcrypt_cfb() is deprecated"); String iv = viv.toString(); return php_mcrypt_do_crypt(cipher, key, data, "cfb", iv, mode.toInt32(), "mcrypt_cfb"); }
0
207,082
bool RenderThreadImpl::Send(IPC::Message* msg) { bool pumping_events = false; if (msg->is_sync()) { if (msg->is_caller_pumping_messages()) { pumping_events = true; } } bool suspend_webkit_shared_timer = true; // default value std::swap(suspend_webkit_shared_timer, suspend_webkit_shared_timer_); bool notify_webkit_of_modal_loop = true; // default value std::swap(notify_webkit_of_modal_loop, notify_webkit_of_modal_loop_); #if defined(ENABLE_PLUGINS) int render_view_id = MSG_ROUTING_NONE; #endif if (pumping_events) { if (suspend_webkit_shared_timer) webkit_platform_support_->SuspendSharedTimer(); if (notify_webkit_of_modal_loop) WebView::willEnterModalLoop(); #if defined(ENABLE_PLUGINS) RenderViewImpl* render_view = RenderViewImpl::FromRoutingID(msg->routing_id()); if (render_view) { render_view_id = msg->routing_id(); PluginChannelHost::Broadcast( new PluginMsg_SignalModalDialogEvent(render_view_id)); } #endif } bool rv = ChildThread::Send(msg); if (pumping_events) { #if defined(ENABLE_PLUGINS) if (render_view_id != MSG_ROUTING_NONE) { PluginChannelHost::Broadcast( new PluginMsg_ResetModalDialogEvent(render_view_id)); } #endif if (notify_webkit_of_modal_loop) WebView::didExitModalLoop(); if (suspend_webkit_shared_timer) webkit_platform_support_->ResumeSharedTimer(); } return rv; }
0
347,270
void receive_xattr(int f, struct file_struct *file) { static item_list temp_xattr = EMPTY_ITEM_LIST; int count, num; #ifdef HAVE_LINUX_XATTRS int need_sort = 0; #else int need_sort = 1; #endif int ndx = read_varint(f); if (ndx < 0 || (size_t)ndx > rsync_xal_l.count) { rprintf(FERROR, "receive_xattr: xa index %d out of" " range for %s\n", ndx, f_name(file, NULL)); exit_cleanup(RERR_STREAMIO); } if (ndx != 0) { F_XATTR(file) = ndx - 1; return; } if ((count = read_varint(f)) != 0) { (void)EXPAND_ITEM_LIST(&temp_xattr, rsync_xa, count); temp_xattr.count = 0; } for (num = 1; num <= count; num++) { char *ptr, *name; rsync_xa *rxa; size_t name_len = read_varint(f); size_t datum_len = read_varint(f); size_t dget_len = datum_len > MAX_FULL_DATUM ? 1 + MAX_DIGEST_LEN : datum_len; size_t extra_len = MIGHT_NEED_RPRE ? RPRE_LEN : 0; if ((dget_len + extra_len < dget_len) || (dget_len + extra_len + name_len < dget_len + extra_len)) overflow_exit("receive_xattr"); ptr = new_array(char, dget_len + extra_len + name_len); if (!ptr) out_of_memory("receive_xattr"); name = ptr + dget_len + extra_len; read_buf(f, name, name_len); if (dget_len == datum_len) read_buf(f, ptr, dget_len); else { *ptr = XSTATE_ABBREV; read_buf(f, ptr + 1, MAX_DIGEST_LEN); } if (saw_xattr_filter) { if (name_is_excluded(name, NAME_IS_XATTR, ALL_FILTERS)) { free(ptr); continue; } } #ifdef HAVE_LINUX_XATTRS /* Non-root can only save the user namespace. */ if (am_root <= 0 && !HAS_PREFIX(name, USER_PREFIX)) { if (!am_root && !saw_xattr_filter) { free(ptr); continue; } name -= RPRE_LEN; name_len += RPRE_LEN; memcpy(name, RSYNC_PREFIX, RPRE_LEN); need_sort = 1; } #else /* This OS only has a user namespace, so we either * strip the user prefix, or we put a non-user * namespace inside our rsync hierarchy. */ if (HAS_PREFIX(name, USER_PREFIX)) { name += UPRE_LEN; name_len -= UPRE_LEN; } else if (am_root) { name -= RPRE_LEN; name_len += RPRE_LEN; memcpy(name, RSYNC_PREFIX, RPRE_LEN); } else { free(ptr); continue; } #endif /* No rsync.%FOO attributes are copied w/o 2 -X options. */ if (preserve_xattrs < 2 && name_len > RPRE_LEN && name[RPRE_LEN] == '%' && HAS_PREFIX(name, RSYNC_PREFIX)) { free(ptr); continue; } rxa = EXPAND_ITEM_LIST(&temp_xattr, rsync_xa, 1); rxa->name = name; rxa->datum = ptr; rxa->name_len = name_len; rxa->datum_len = datum_len; rxa->num = num; } if (need_sort && count > 1) qsort(temp_xattr.items, count, sizeof (rsync_xa), rsync_xal_compare_names); ndx = rsync_xal_store(&temp_xattr); /* adds item to rsync_xal_l */ F_XATTR(file) = ndx; }
1
216,501
bool ShouldUseClientLoFiForRequest( const ResourceRequest& request, WebURLRequest::PreviewsState frame_previews_state) { if (request.GetPreviewsState() != WebURLRequest::kPreviewsUnspecified) return request.GetPreviewsState() & WebURLRequest::kClientLoFiOn; if (!(frame_previews_state & WebURLRequest::kClientLoFiOn)) return false; if (frame_previews_state & WebURLRequest::kServerLoFiOn) return request.Url().ProtocolIs("https"); return true; }
0
238,559
int equalizer_get_band_level(equalizer_context_t *context, int32_t band) { ALOGV("%s: band: %d level: %d", __func__, band, context->band_levels[band] * 100); return context->band_levels[band] * 100; }
0
356,117
static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) { struct page *page = NULL; int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); if (page) unlock_page(page); return page; }
0
114,799
static int __submit_flush_wait(struct f2fs_sb_info *sbi, struct block_device *bdev) { struct bio *bio = f2fs_bio_alloc(0); int ret; bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; bio_set_dev(bio, bdev); ret = submit_bio_wait(bio); bio_put(bio); trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER), test_opt(sbi, FLUSH_MERGE), ret); return ret; }
0
275,412
bool JSTestEventTargetConstructor::getOwnPropertyDescriptor(JSObject* object, ExecState* exec, const Identifier& propertyName, PropertyDescriptor& descriptor) { return getStaticValueDescriptor<JSTestEventTargetConstructor, JSDOMWrapper>(exec, &JSTestEventTargetConstructorTable, jsCast<JSTestEventTargetConstructor*>(object), propertyName, descriptor); }
0