idx
int64
func
string
target
int64
519,179
void Field_time_hires::store_TIME(const MYSQL_TIME *ltime) { DBUG_ASSERT(ltime->year == 0); DBUG_ASSERT(ltime->month == 0); ulonglong packed= sec_part_shift(pack_time(ltime), dec) + zero_point; store_bigendian(packed, ptr, Field_time_hires::pack_length()); }
0
187,046
void WebPageProxy::drawPagesToPDF(WebFrameProxy* frame, uint32_t first, uint32_t count, PassRefPtr<DataCallback> prpCallback) { RefPtr<DataCallback> callback = prpCallback; if (!isValid()) { callback->invalidate(); return; } uint64_t callbackID = callback->callbackID(); m_dataCallbacks.set(callbackID, callback.get()); process()->send(Messages::WebPage::DrawPagesToPDF(frame->frameID(), first, count, callbackID), m_pageID, m_isPerformingDOMPrintOperation ? CoreIPC::DispatchMessageEvenWhenWaitingForSyncReply : 0); }
0
257,999
static char * main_format_millis ( long millis , shortbuf * buf ) { if ( millis < 1000 ) { short_sprintf ( * buf , "%lu ms" , millis ) ; } else if ( millis < 10000 ) { short_sprintf ( * buf , "%.1f sec" , millis / 1000.0 ) ; } else { short_sprintf ( * buf , "%lu sec" , millis / 1000L ) ; } return buf -> buf ; }
0
68,103
bool Scanner::read(size_t want) { DASSERT(!files.empty()); for (size_t i = files.size(); i --> 0; ) { Input *in = files[i]; const size_t have = fread(lim, 1, want, in->file); in->so = lim; lim += have; in->eo = lim; want -= have; // buffer filled if (want == 0) return true; } return false; }
0
194,048
void AddIncompatibleApplicationsStrings(content::WebUIDataSource* html_source) { LocalizedString localized_strings[] = { {"incompatibleApplicationsResetCardTitle", IDS_SETTINGS_INCOMPATIBLE_APPLICATIONS_RESET_CARD_TITLE}, {"incompatibleApplicationsSubpageSubtitle", IDS_SETTINGS_INCOMPATIBLE_APPLICATIONS_SUBPAGE_SUBTITLE}, {"incompatibleApplicationsSubpageSubtitleNoAdminRights", IDS_SETTINGS_INCOMPATIBLE_APPLICATIONS_SUBPAGE_SUBTITLE_NO_ADMIN_RIGHTS}, {"incompatibleApplicationsListTitle", IDS_SETTINGS_INCOMPATIBLE_APPLICATIONS_LIST_TITLE}, {"incompatibleApplicationsRemoveButton", IDS_SETTINGS_INCOMPATIBLE_APPLICATIONS_REMOVE_BUTTON}, {"incompatibleApplicationsUpdateButton", IDS_SETTINGS_INCOMPATIBLE_APPLICATIONS_UPDATE_BUTTON}, {"incompatibleApplicationsDone", IDS_SETTINGS_INCOMPATIBLE_APPLICATIONS_DONE}, }; AddLocalizedStringsBulk(html_source, localized_strings, arraysize(localized_strings)); base::string16 learn_how_text = l10n_util::GetStringFUTF16( IDS_SETTINGS_INCOMPATIBLE_APPLICATIONS_SUBPAGE_LEARN_HOW, base::ASCIIToUTF16("chrome://placeholder")); html_source->AddString("incompatibleApplicationsSubpageLearnHow", learn_how_text); }
0
494,592
jwe_t * r_jwe_quick_parse(const char * jwe_str, uint32_t parse_flags, int x5u_flags) { return r_jwe_quick_parsen(jwe_str, o_strlen(jwe_str), parse_flags, x5u_flags); }
0
197,370
void FFmpegVideoDecodeEngine::ProduceVideoFrame( scoped_refptr<VideoFrame> frame) { DCHECK(frame.get() && !frame->IsEndOfStream()); pending_output_buffers_++; frame_queue_available_.push_back(frame); if (flush_pending_) { TryToFinishPendingFlush(); } else if (!output_eos_reached_) { ReadInput(); } }
0
508,929
int ssl3_get_cert_verify(SSL *s) { EVP_PKEY *pkey = NULL; unsigned char *p; int al, ok, ret = 0; long n; int type = 0, i, j; X509 *peer; const EVP_MD *md = NULL; EVP_MD_CTX mctx; EVP_MD_CTX_init(&mctx); /* * We should only process a CertificateVerify message if we have received * a Certificate from the client. If so then |s->session->peer| will be non * NULL. In some instances a CertificateVerify message is not required even * if the peer has sent a Certificate (e.g. such as in the case of static * DH). In that case the ClientKeyExchange processing will skip the * CertificateVerify state so we should not arrive here. */ if (s->session->peer == NULL) { ret = 1; goto end; } n = s->method->ssl_get_message(s, SSL3_ST_SR_CERT_VRFY_A, SSL3_ST_SR_CERT_VRFY_B, SSL3_MT_CERTIFICATE_VERIFY, SSL3_RT_MAX_PLAIN_LENGTH, &ok); if (!ok) return ((int)n); peer = s->session->peer; pkey = X509_get_pubkey(peer); type = X509_certificate_type(peer, pkey); if (!(type & EVP_PKT_SIGN)) { SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, SSL_R_SIGNATURE_FOR_NON_SIGNING_CERTIFICATE); al = SSL_AD_ILLEGAL_PARAMETER; goto f_err; } /* we now have a signature that we need to verify */ p = (unsigned char *)s->init_msg; /* Check for broken implementations of GOST ciphersuites */ /* * If key is GOST and n is exactly 64, it is bare signature without * length field */ if (n == 64 && (pkey->type == NID_id_GostR3410_94 || pkey->type == NID_id_GostR3410_2001)) { i = 64; } else { if (TLS1_get_version(s) >= TLS1_2_VERSION) { int sigalg = tls12_get_sigid(pkey); /* Should never happen */ if (sigalg == -1) { SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, ERR_R_INTERNAL_ERROR); al = SSL_AD_INTERNAL_ERROR; goto f_err; } /* Check key type is consistent with signature */ if (sigalg != (int)p[1]) { SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, SSL_R_WRONG_SIGNATURE_TYPE); al = SSL_AD_DECODE_ERROR; goto f_err; } md = tls12_get_hash(p[0]); if (md == NULL) { SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, SSL_R_UNKNOWN_DIGEST); al = SSL_AD_DECODE_ERROR; goto f_err; } #ifdef SSL_DEBUG fprintf(stderr, "USING TLSv1.2 HASH %s\n", EVP_MD_name(md)); #endif p += 2; n -= 2; } n2s(p, i); n -= 2; if (i > n) { SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, SSL_R_LENGTH_MISMATCH); al = SSL_AD_DECODE_ERROR; goto f_err; } } j = EVP_PKEY_size(pkey); if ((i > j) || (n > j) || (n <= 0)) { SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, SSL_R_WRONG_SIGNATURE_SIZE); al = SSL_AD_DECODE_ERROR; goto f_err; } if (TLS1_get_version(s) >= TLS1_2_VERSION) { long hdatalen = 0; void *hdata; hdatalen = BIO_get_mem_data(s->s3->handshake_buffer, &hdata); if (hdatalen <= 0) { SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, ERR_R_INTERNAL_ERROR); al = SSL_AD_INTERNAL_ERROR; goto f_err; } #ifdef SSL_DEBUG fprintf(stderr, "Using TLS 1.2 with client verify alg %s\n", EVP_MD_name(md)); #endif if (!EVP_VerifyInit_ex(&mctx, md, NULL) || !EVP_VerifyUpdate(&mctx, hdata, hdatalen)) { SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, ERR_R_EVP_LIB); al = SSL_AD_INTERNAL_ERROR; goto f_err; } if (EVP_VerifyFinal(&mctx, p, i, pkey) <= 0) { al = SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, SSL_R_BAD_SIGNATURE); goto f_err; } } else #ifndef OPENSSL_NO_RSA if (pkey->type == EVP_PKEY_RSA) { i = RSA_verify(NID_md5_sha1, s->s3->tmp.cert_verify_md, MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH, p, i, pkey->pkey.rsa); if (i < 0) { al = SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, SSL_R_BAD_RSA_DECRYPT); goto f_err; } if (i == 0) { al = SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, SSL_R_BAD_RSA_SIGNATURE); goto f_err; } } else #endif #ifndef OPENSSL_NO_DSA if (pkey->type == EVP_PKEY_DSA) { j = DSA_verify(pkey->save_type, &(s->s3->tmp.cert_verify_md[MD5_DIGEST_LENGTH]), SHA_DIGEST_LENGTH, p, i, pkey->pkey.dsa); if (j <= 0) { /* bad signature */ al = SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, SSL_R_BAD_DSA_SIGNATURE); goto f_err; } } else #endif #ifndef OPENSSL_NO_ECDSA if (pkey->type == EVP_PKEY_EC) { j = ECDSA_verify(pkey->save_type, &(s->s3->tmp.cert_verify_md[MD5_DIGEST_LENGTH]), SHA_DIGEST_LENGTH, p, i, pkey->pkey.ec); if (j <= 0) { /* bad signature */ al = SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, SSL_R_BAD_ECDSA_SIGNATURE); goto f_err; } } else #endif if (pkey->type == NID_id_GostR3410_94 || pkey->type == NID_id_GostR3410_2001) { unsigned char signature[64]; int idx; EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new(pkey, NULL); if (pctx == NULL) { al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, ERR_R_MALLOC_FAILURE); goto f_err; } if (EVP_PKEY_verify_init(pctx) <= 0) { EVP_PKEY_CTX_free(pctx); al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, ERR_R_INTERNAL_ERROR); goto f_err; } if (i != 64) { fprintf(stderr, "GOST signature length is %d", i); } for (idx = 0; idx < 64; idx++) { signature[63 - idx] = p[idx]; } j = EVP_PKEY_verify(pctx, signature, 64, s->s3->tmp.cert_verify_md, 32); EVP_PKEY_CTX_free(pctx); if (j <= 0) { al = SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, SSL_R_BAD_ECDSA_SIGNATURE); goto f_err; } } else { SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, ERR_R_INTERNAL_ERROR); al = SSL_AD_UNSUPPORTED_CERTIFICATE; goto f_err; } ret = 1; if (0) { f_err: ssl3_send_alert(s, SSL3_AL_FATAL, al); s->state = SSL_ST_ERR; } end: if (s->s3->handshake_buffer) { BIO_free(s->s3->handshake_buffer); s->s3->handshake_buffer = NULL; s->s3->flags &= ~TLS1_FLAGS_KEEP_HANDSHAKE; } EVP_MD_CTX_cleanup(&mctx); EVP_PKEY_free(pkey); return (ret); }
0
344,128
int main(void) { char *output = NULL; CuSuite* suite = CuSuiteNew(); CuSuiteSetup(suite, NULL, NULL); SUITE_ADD_TEST(suite, testDefault); SUITE_ADD_TEST(suite, testNoLoad); SUITE_ADD_TEST(suite, testNoAutoload); SUITE_ADD_TEST(suite, testInvalidLens); SUITE_ADD_TEST(suite, testLoadSave); SUITE_ADD_TEST(suite, testLoadDefined); SUITE_ADD_TEST(suite, testDefvarExpr); SUITE_ADD_TEST(suite, testReloadChanged); SUITE_ADD_TEST(suite, testReloadDirty); SUITE_ADD_TEST(suite, testReloadDeleted); SUITE_ADD_TEST(suite, testReloadDeletedMeta); SUITE_ADD_TEST(suite, testReloadExternalMod); SUITE_ADD_TEST(suite, testReloadAfterSaveNewfile); SUITE_ADD_TEST(suite, testParseErrorReported); SUITE_ADD_TEST(suite, testLoadExclWithRoot); SUITE_ADD_TEST(suite, testLoadTrailingExcl); abs_top_srcdir = getenv("abs_top_srcdir"); if (abs_top_srcdir == NULL) die("env var abs_top_srcdir must be set"); abs_top_builddir = getenv("abs_top_builddir"); if (abs_top_builddir == NULL) die("env var abs_top_builddir must be set"); if (asprintf(&root, "%s/tests/root", abs_top_srcdir) < 0) { die("failed to set root"); } if (asprintf(&loadpath, "%s/lenses", abs_top_srcdir) < 0) { die("failed to set loadpath"); } CuSuiteRun(suite); CuSuiteSummary(suite, &output); CuSuiteDetails(suite, &output); printf("%s\n", output); free(output); return suite->failCount; }
1
302,784
_Unpickler_NewMemo(Py_ssize_t new_size) { PyObject **memo = PyMem_NEW(PyObject *, new_size); if (memo == NULL) { PyErr_NoMemory(); return NULL; } memset(memo, 0, new_size * sizeof(PyObject *)); return memo; }
0
224,664
void HWNDMessageHandler::OnInputLangChange(DWORD character_set, HKL input_language_id) { delegate_->HandleInputLanguageChange(character_set, input_language_id); }
0
361,027
maybe_kill_dialog (GSWindow *window) { if (!window->priv->dialog_shake_in_progress && window->priv->dialog_quit_requested && window->priv->lock_pid > 0) { kill (window->priv->lock_pid, SIGTERM); } }
0
208,429
void WebContentsImpl::OnFindMatchRectsReply( int version, const std::vector<gfx::RectF>& rects, const gfx::RectF& active_rect) { if (delegate_) delegate_->FindMatchRectsReply(this, version, rects, active_rect); }
0
39,217
static VOID SendDeviceIoControlRequestWorkItemRoutine (PDEVICE_OBJECT rootDeviceObject, SendDeviceIoControlRequestWorkItemArgs *arg) { arg->Status = SendDeviceIoControlRequest (arg->deviceObject, arg->ioControlCode, arg->inputBuffer, arg->inputBufferSize, arg->outputBuffer, arg->outputBufferSize); KeSetEvent (&arg->WorkItemCompletedEvent, IO_NO_INCREMENT, FALSE); }
0
87,772
static void reply_pending_requests(struct btd_adapter *adapter) { GSList *l; if (!adapter) return; /* pending bonding */ for (l = adapter->devices; l; l = l->next) { struct btd_device *device = l->data; if (device_is_bonding(device, NULL)) device_bonding_failed(device, HCI_OE_USER_ENDED_CONNECTION); } }
0
32,499
GF_Box *cprt_box_new() { ISOM_DECL_BOX_ALLOC(GF_CopyrightBox, GF_ISOM_BOX_TYPE_CPRT); tmp->packedLanguageCode[0] = 'u'; tmp->packedLanguageCode[1] = 'n'; tmp->packedLanguageCode[2] = 'd'; return (GF_Box *)tmp; }
0
341,638
static uint64_t mpc8544_guts_read(void *opaque, target_phys_addr_t addr, unsigned size) { uint32_t value = 0; CPUPPCState *env = cpu_single_env; addr &= MPC8544_GUTS_MMIO_SIZE - 1; switch (addr) { case MPC8544_GUTS_ADDR_PVR: value = env->spr[SPR_PVR]; break; case MPC8544_GUTS_ADDR_SVR: value = env->spr[SPR_E500_SVR]; break; default: fprintf(stderr, "guts: Unknown register read: %x\n", (int)addr); break; } return value; }
0
445,960
static void rfx_profiler_free(RFX_CONTEXT* context) { PROFILER_FREE(context->priv->prof_rfx_decode_rgb) PROFILER_FREE(context->priv->prof_rfx_decode_component) PROFILER_FREE(context->priv->prof_rfx_rlgr_decode) PROFILER_FREE(context->priv->prof_rfx_differential_decode) PROFILER_FREE(context->priv->prof_rfx_quantization_decode) PROFILER_FREE(context->priv->prof_rfx_dwt_2d_decode) PROFILER_FREE(context->priv->prof_rfx_ycbcr_to_rgb) PROFILER_FREE(context->priv->prof_rfx_encode_rgb) PROFILER_FREE(context->priv->prof_rfx_encode_component) PROFILER_FREE(context->priv->prof_rfx_rlgr_encode) PROFILER_FREE(context->priv->prof_rfx_differential_encode) PROFILER_FREE(context->priv->prof_rfx_quantization_encode) PROFILER_FREE(context->priv->prof_rfx_dwt_2d_encode) PROFILER_FREE(context->priv->prof_rfx_rgb_to_ycbcr) PROFILER_FREE(context->priv->prof_rfx_encode_format_rgb) }
0
24,230
static ssize_t iso9660_write_data ( struct archive_write * a , const void * buff , size_t s ) { struct iso9660 * iso9660 = a -> format_data ; ssize_t r ; if ( iso9660 -> cur_file == NULL ) return ( 0 ) ; if ( archive_entry_filetype ( iso9660 -> cur_file -> entry ) != AE_IFREG ) return ( 0 ) ; if ( s > iso9660 -> bytes_remaining ) s = ( size_t ) iso9660 -> bytes_remaining ; if ( s == 0 ) return ( 0 ) ; r = write_iso9660_data ( a , buff , s ) ; if ( r > 0 ) iso9660 -> bytes_remaining -= r ; return ( r ) ; }
0
57,407
static int proc_open(const char *path, struct fuse_file_info *fi) { int type = -1; struct file_info *info; if (strcmp(path, "/proc/meminfo") == 0) type = LXC_TYPE_PROC_MEMINFO; else if (strcmp(path, "/proc/cpuinfo") == 0) type = LXC_TYPE_PROC_CPUINFO; else if (strcmp(path, "/proc/uptime") == 0) type = LXC_TYPE_PROC_UPTIME; else if (strcmp(path, "/proc/stat") == 0) type = LXC_TYPE_PROC_STAT; else if (strcmp(path, "/proc/diskstats") == 0) type = LXC_TYPE_PROC_DISKSTATS; if (type == -1) return -ENOENT; info = malloc(sizeof(*info)); if (!info) return -ENOMEM; memset(info, 0, sizeof(*info)); info->type = type; info->buflen = get_procfile_size(path) + BUF_RESERVE_SIZE; do { info->buf = malloc(info->buflen); } while (!info->buf); memset(info->buf, 0, info->buflen); /* set actual size to buffer size */ info->size = info->buflen; fi->fh = (unsigned long)info; return 0; }
0
188,376
static int assign_host_irq(struct kvm *kvm, struct kvm_assigned_dev_kernel *dev, __u32 host_irq_type) { int r = -EEXIST; if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK) return r; snprintf(dev->irq_name, sizeof(dev->irq_name), "kvm:%s", pci_name(dev->dev)); switch (host_irq_type) { case KVM_DEV_IRQ_HOST_INTX: r = assigned_device_enable_host_intx(kvm, dev); break; #ifdef __KVM_HAVE_MSI case KVM_DEV_IRQ_HOST_MSI: r = assigned_device_enable_host_msi(kvm, dev); break; #endif #ifdef __KVM_HAVE_MSIX case KVM_DEV_IRQ_HOST_MSIX: r = assigned_device_enable_host_msix(kvm, dev); break; #endif default: r = -EINVAL; } if (!r) dev->irq_requested_type |= host_irq_type; return r; }
0
251,603
barrier_blocks_device(struct PointerBarrierClient *client, DeviceIntPtr dev) { int i; int master_id; /* Clients with no devices are treated as * if they specified XIAllDevices. */ if (client->num_devices == 0) return TRUE; master_id = GetMaster(dev, POINTER_OR_FLOAT)->id; for (i = 0; i < client->num_devices; i++) { int device_id = client->device_ids[i]; if (device_id == XIAllDevices || device_id == XIAllMasterDevices || device_id == master_id) return TRUE; } return FALSE; }
0
183,990
psh_glyph_init( PSH_Glyph glyph, FT_Outline* outline, PS_Hints ps_hints, PSH_Globals globals ) { FT_Error error; FT_Memory memory; /* clear all fields */ FT_MEM_ZERO( glyph, sizeof ( *glyph ) ); memory = glyph->memory = globals->memory; /* allocate and setup points + contours arrays */ if ( FT_NEW_ARRAY( glyph->points, outline->n_points ) || FT_NEW_ARRAY( glyph->contours, outline->n_contours ) ) goto Exit; glyph->num_points = outline->n_points; glyph->num_contours = outline->n_contours; { FT_UInt first = 0, next, n; PSH_Point points = glyph->points; PSH_Contour contour = glyph->contours; for ( n = 0; n < glyph->num_contours; n++ ) { FT_Int count; PSH_Point point; next = outline->contours[n] + 1; count = next - first; contour->start = points + first; contour->count = (FT_UInt)count; if ( count > 0 ) { point = points + first; point->prev = points + next - 1; point->contour = contour; for ( ; count > 1; count-- ) { point[0].next = point + 1; point[1].prev = point; point++; point->contour = contour; } point->next = points + first; } contour++; first = next; } } { PSH_Point points = glyph->points; PSH_Point point = points; FT_Vector* vec = outline->points; FT_UInt n; for ( n = 0; n < glyph->num_points; n++, point++ ) { FT_Int n_prev = (FT_Int)( point->prev - points ); FT_Int n_next = (FT_Int)( point->next - points ); FT_Pos dxi, dyi, dxo, dyo; if ( !( outline->tags[n] & FT_CURVE_TAG_ON ) ) point->flags = PSH_POINT_OFF; dxi = vec[n].x - vec[n_prev].x; dyi = vec[n].y - vec[n_prev].y; point->dir_in = (FT_Char)psh_compute_dir( dxi, dyi ); dxo = vec[n_next].x - vec[n].x; dyo = vec[n_next].y - vec[n].y; point->dir_out = (FT_Char)psh_compute_dir( dxo, dyo ); /* detect smooth points */ if ( point->flags & PSH_POINT_OFF ) point->flags |= PSH_POINT_SMOOTH; else if ( point->dir_in == point->dir_out ) { if ( point->dir_out != PSH_DIR_NONE || psh_corner_is_flat( dxi, dyi, dxo, dyo ) ) point->flags |= PSH_POINT_SMOOTH; } } } glyph->outline = outline; glyph->globals = globals; #ifdef COMPUTE_INFLEXS psh_glyph_load_points( glyph, 0 ); psh_glyph_compute_inflections( glyph ); #endif /* COMPUTE_INFLEXS */ /* now deal with hints tables */ error = psh_hint_table_init( &glyph->hint_tables [0], &ps_hints->dimension[0].hints, &ps_hints->dimension[0].masks, &ps_hints->dimension[0].counters, memory ); if ( error ) goto Exit; error = psh_hint_table_init( &glyph->hint_tables [1], &ps_hints->dimension[1].hints, &ps_hints->dimension[1].masks, &ps_hints->dimension[1].counters, memory ); if ( error ) goto Exit; Exit: return error; }
0
374,865
_copyBitmapAnd(const BitmapAnd *from) { BitmapAnd *newnode = makeNode(BitmapAnd); /* * copy node superclass fields */ CopyPlanFields((const Plan *) from, (Plan *) newnode); /* * copy remainder of node */ COPY_NODE_FIELD(bitmapplans); return newnode; }
0
425,536
static struct bpf_blk *_gen_bpf_action(struct bpf_state *state, struct bpf_blk *blk, uint32_t action) { struct bpf_instr instr; _BPF_INSTR(instr, _BPF_OP(state->arch, BPF_RET), _BPF_JMP_NO, _BPF_JMP_NO, _BPF_K(state->arch, action)); return _blk_append(state, blk, &instr); }
0
172,260
SVGDocumentExtensions& Document::AccessSVGExtensions() { if (!svg_extensions_) svg_extensions_ = new SVGDocumentExtensions(this); return *svg_extensions_; }
0
229,673
std::string ExtensionWebContentsObserver::GetExtensionId( content::RenderViewHost* render_view_host) { const GURL& site = render_view_host->GetSiteInstance()->GetSiteURL(); if (!site.SchemeIs(kExtensionScheme)) return std::string(); return site.host(); }
0
134,999
static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) { unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; unsigned long escr_mask[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE)]; int cpu = smp_processor_id(); struct hw_perf_event *hwc; struct p4_event_bind *bind; unsigned int i, thread, num; int cntr_idx, escr_idx; bitmap_zero(used_mask, X86_PMC_IDX_MAX); bitmap_zero(escr_mask, P4_ESCR_MSR_TABLE_SIZE); for (i = 0, num = n; i < n; i++, num--) { hwc = &cpuc->event_list[i]->hw; thread = p4_ht_thread(cpu); bind = p4_config_get_bind(hwc->config); escr_idx = p4_get_escr_idx(bind->escr_msr[thread]); if (unlikely(escr_idx == -1)) goto done; if (hwc->idx != -1 && !p4_should_swap_ts(hwc->config, cpu)) { cntr_idx = hwc->idx; if (assign) assign[i] = hwc->idx; goto reserve; } cntr_idx = p4_next_cntr(thread, used_mask, bind); if (cntr_idx == -1 || test_bit(escr_idx, escr_mask)) goto done; p4_pmu_swap_config_ts(hwc, cpu); if (assign) assign[i] = cntr_idx; reserve: set_bit(cntr_idx, used_mask); set_bit(escr_idx, escr_mask); } done: return num ? -ENOSPC : 0; }
0
23,887
static void dissect_rsvp_template_filter ( proto_item * ti , proto_tree * rsvp_object_tree , tvbuff_t * tvb , int offset , int obj_length , int rsvp_class _U_ , int type , rsvp_conversation_info * rsvph ) { int offset2 = offset + 4 ; proto_item_set_text ( ti , "%s" , summary_template ( tvb , offset ) ) ; switch ( type ) { case 1 : proto_tree_add_uint_format_value ( rsvp_object_tree , hf_rsvp_ctype , tvb , offset + 3 , 1 , type , "1 - IPv4" ) ; proto_tree_add_item ( rsvp_object_tree , hf_rsvp_filter [ RSVPF_SENDER_IP ] , tvb , offset2 , 4 , ENC_BIG_ENDIAN ) ; proto_tree_add_item ( rsvp_object_tree , hf_rsvp_filter [ RSVPF_SENDER_PORT ] , tvb , offset2 + 6 , 2 , ENC_BIG_ENDIAN ) ; set_address_tvb ( & rsvph -> source , AT_IPv4 , 4 , tvb , offset2 ) ; rsvph -> udp_source_port = tvb_get_ntohs ( tvb , offset2 + 6 ) ; break ; case 2 : proto_tree_add_uint_format_value ( rsvp_object_tree , hf_rsvp_ctype , tvb , offset + 3 , 1 , type , "2 - IPv6" ) ; proto_tree_add_item ( rsvp_object_tree , hf_rsvp_template_filter_source_address_ipv6 , tvb , offset2 , 16 , ENC_NA ) ; proto_tree_add_item ( rsvp_object_tree , hf_rsvp_template_filter_source_port , tvb , offset2 + 18 , 2 , ENC_BIG_ENDIAN ) ; break ; case 7 : proto_tree_add_uint_format_value ( rsvp_object_tree , hf_rsvp_ctype , tvb , offset + 3 , 1 , type , "7 - IPv4 LSP" ) ; proto_tree_add_item ( rsvp_object_tree , hf_rsvp_filter [ RSVPF_SENDER_IP ] , tvb , offset2 , 4 , ENC_BIG_ENDIAN ) ; if ( rsvp_class == RSVP_CLASS_SENDER_TEMPLATE ) { proto_tree_add_item ( rsvp_object_tree , hf_rsvp_filter [ RSVPF_SENDER_SHORT_CALL_ID ] , tvb , offset2 + 4 , 2 , ENC_BIG_ENDIAN ) ; } proto_tree_add_item ( rsvp_object_tree , hf_rsvp_filter [ RSVPF_SENDER_LSP_ID ] , tvb , offset2 + 6 , 2 , ENC_BIG_ENDIAN ) ; set_address_tvb ( & rsvph -> source , AT_IPv4 , 4 , tvb , offset2 ) ; rsvph -> udp_source_port = tvb_get_ntohs ( tvb , offset2 + 6 ) ; break ; case 8 : proto_tree_add_uint_format_value ( rsvp_object_tree , hf_rsvp_ctype , tvb , offset + 3 , 1 , type , "8 - IPv6 LSP" ) ; proto_tree_add_item ( rsvp_object_tree , hf_rsvp_filter [ RSVPF_SENDER_IP ] , tvb , offset2 , 16 , ENC_BIG_ENDIAN ) ; if ( rsvp_class == RSVP_CLASS_SENDER_TEMPLATE ) { proto_tree_add_item ( rsvp_object_tree , hf_rsvp_filter [ RSVPF_SENDER_SHORT_CALL_ID ] , tvb , offset2 + 16 , 2 , ENC_BIG_ENDIAN ) ; } proto_tree_add_item ( rsvp_object_tree , hf_rsvp_filter [ RSVPF_SENDER_LSP_ID ] , tvb , offset2 + 18 , 2 , ENC_BIG_ENDIAN ) ; set_address_tvb ( & rsvph -> source , AT_IPv6 , 16 , tvb , offset2 ) ; rsvph -> udp_source_port = tvb_get_ntohs ( tvb , offset2 + 18 ) ; break ; case 9 : proto_tree_add_uint_format_value ( rsvp_object_tree , hf_rsvp_ctype , tvb , offset + 3 , 1 , type , "9 - IPv4 Aggregate" ) ; proto_tree_add_item ( rsvp_object_tree , hf_rsvp_filter [ RSVPF_SENDER_IP ] , tvb , offset2 , 4 , ENC_BIG_ENDIAN ) ; set_address_tvb ( & rsvph -> source , AT_IPv4 , 4 , tvb , offset2 ) ; break ; default : proto_tree_add_uint_format_value ( rsvp_object_tree , hf_rsvp_ctype , tvb , offset + 3 , 1 , type , "Unknown (%u)" , type ) ; proto_tree_add_item ( rsvp_object_tree , hf_rsvp_template_filter_data , tvb , offset2 , obj_length - 4 , ENC_NA ) ; break ; } }
0
375,431
WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, bool fetching_ckpt, XLogRecPtr tliRecPtr) { static pg_time_t last_fail_time = 0; pg_time_t now; /*------- * Standby mode is implemented by a state machine: * * 1. Read from either archive or pg_xlog (XLOG_FROM_ARCHIVE), or just * pg_xlog (XLOG_FROM_XLOG) * 2. Check trigger file * 3. Read from primary server via walreceiver (XLOG_FROM_STREAM) * 4. Rescan timelines * 5. Sleep 5 seconds, and loop back to 1. * * Failure to read from the current source advances the state machine to * the next state. * * 'currentSource' indicates the current state. There are no currentSource * values for "check trigger", "rescan timelines", and "sleep" states, * those actions are taken when reading from the previous source fails, as * part of advancing to the next state. *------- */ if (!InArchiveRecovery) currentSource = XLOG_FROM_PG_XLOG; else if (currentSource == 0) currentSource = XLOG_FROM_ARCHIVE; for (;;) { int oldSource = currentSource; /* * First check if we failed to read from the current source, and * advance the state machine if so. The failure to read might've * happened outside this function, e.g when a CRC check fails on a * record, or within this loop. */ if (lastSourceFailed) { switch (currentSource) { case XLOG_FROM_ARCHIVE: case XLOG_FROM_PG_XLOG: /* * Check to see if the trigger file exists. Note that we * do this only after failure, so when you create the * trigger file, we still finish replaying as much as we * can from archive and pg_xlog before failover. */ if (StandbyMode && CheckForStandbyTrigger()) { ShutdownWalRcv(); return false; } /* * Not in standby mode, and we've now tried the archive * and pg_xlog. */ if (!StandbyMode) return false; /* * If primary_conninfo is set, launch walreceiver to try * to stream the missing WAL. * * If fetching_ckpt is TRUE, RecPtr points to the initial * checkpoint location. In that case, we use RedoStartLSN * as the streaming start position instead of RecPtr, so * that when we later jump backwards to start redo at * RedoStartLSN, we will have the logs streamed already. */ if (PrimaryConnInfo) { XLogRecPtr ptr; TimeLineID tli; if (fetching_ckpt) { ptr = RedoStartLSN; tli = ControlFile->checkPointCopy.ThisTimeLineID; } else { ptr = tliRecPtr; tli = tliOfPointInHistory(tliRecPtr, expectedTLEs); if (curFileTLI > 0 && tli < curFileTLI) elog(ERROR, "according to history file, WAL location %X/%X belongs to timeline %u, but previous recovered WAL file came from timeline %u", (uint32) (ptr >> 32), (uint32) ptr, tli, curFileTLI); } curFileTLI = tli; RequestXLogStreaming(tli, ptr, PrimaryConnInfo, PrimarySlotName); receivedUpto = 0; } /* * Move to XLOG_FROM_STREAM state in either case. We'll * get immediate failure if we didn't launch walreceiver, * and move on to the next state. */ currentSource = XLOG_FROM_STREAM; break; case XLOG_FROM_STREAM: /* * Failure while streaming. Most likely, we got here * because streaming replication was terminated, or * promotion was triggered. But we also get here if we * find an invalid record in the WAL streamed from master, * in which case something is seriously wrong. There's * little chance that the problem will just go away, but * PANIC is not good for availability either, especially * in hot standby mode. So, we treat that the same as * disconnection, and retry from archive/pg_xlog again. * The WAL in the archive should be identical to what was * streamed, so it's unlikely that it helps, but one can * hope... */ /* * Before we leave XLOG_FROM_STREAM state, make sure that * walreceiver is not active, so that it won't overwrite * WAL that we restore from archive. */ if (WalRcvStreaming()) ShutdownWalRcv(); /* * Before we sleep, re-scan for possible new timelines if * we were requested to recover to the latest timeline. */ if (recoveryTargetIsLatest) { if (rescanLatestTimeLine()) { currentSource = XLOG_FROM_ARCHIVE; break; } } /* * XLOG_FROM_STREAM is the last state in our state * machine, so we've exhausted all the options for * obtaining the requested WAL. We're going to loop back * and retry from the archive, but if it hasn't been long * since last attempt, sleep 5 seconds to avoid * busy-waiting. */ now = (pg_time_t) time(NULL); if ((now - last_fail_time) < 5) { pg_usleep(1000000L * (5 - (now - last_fail_time))); now = (pg_time_t) time(NULL); } last_fail_time = now; currentSource = XLOG_FROM_ARCHIVE; break; default: elog(ERROR, "unexpected WAL source %d", currentSource); } } else if (currentSource == XLOG_FROM_PG_XLOG) { /* * We just successfully read a file in pg_xlog. We prefer files in * the archive over ones in pg_xlog, so try the next file again * from the archive first. */ if (InArchiveRecovery) currentSource = XLOG_FROM_ARCHIVE; } if (currentSource != oldSource) elog(DEBUG2, "switched WAL source from %s to %s after %s", xlogSourceNames[oldSource], xlogSourceNames[currentSource], lastSourceFailed ? "failure" : "success"); /* * We've now handled possible failure. Try to read from the chosen * source. */ lastSourceFailed = false; switch (currentSource) { case XLOG_FROM_ARCHIVE: case XLOG_FROM_PG_XLOG: /* Close any old file we might have open. */ if (readFile >= 0) { close(readFile); readFile = -1; } /* Reset curFileTLI if random fetch. */ if (randAccess) curFileTLI = 0; /* * Try to restore the file from archive, or read an existing * file from pg_xlog. */ readFile = XLogFileReadAnyTLI(readSegNo, DEBUG2, currentSource == XLOG_FROM_ARCHIVE ? XLOG_FROM_ANY : currentSource); if (readFile >= 0) return true; /* success! */ /* * Nope, not found in archive or pg_xlog. */ lastSourceFailed = true; break; case XLOG_FROM_STREAM: { bool havedata; /* * Check if WAL receiver is still active. */ if (!WalRcvStreaming()) { lastSourceFailed = true; break; } /* * Walreceiver is active, so see if new data has arrived. * * We only advance XLogReceiptTime when we obtain fresh * WAL from walreceiver and observe that we had already * processed everything before the most recent "chunk" * that it flushed to disk. In steady state where we are * keeping up with the incoming data, XLogReceiptTime will * be updated on each cycle. When we are behind, * XLogReceiptTime will not advance, so the grace time * allotted to conflicting queries will decrease. */ if (RecPtr < receivedUpto) havedata = true; else { XLogRecPtr latestChunkStart; receivedUpto = GetWalRcvWriteRecPtr(&latestChunkStart, &receiveTLI); if (RecPtr < receivedUpto && receiveTLI == curFileTLI) { havedata = true; if (latestChunkStart <= RecPtr) { XLogReceiptTime = GetCurrentTimestamp(); SetCurrentChunkStartTime(XLogReceiptTime); } } else havedata = false; } if (havedata) { /* * Great, streamed far enough. Open the file if it's * not open already. Also read the timeline history * file if we haven't initialized timeline history * yet; it should be streamed over and present in * pg_xlog by now. Use XLOG_FROM_STREAM so that * source info is set correctly and XLogReceiptTime * isn't changed. */ if (readFile < 0) { if (!expectedTLEs) expectedTLEs = readTimeLineHistory(receiveTLI); readFile = XLogFileRead(readSegNo, PANIC, receiveTLI, XLOG_FROM_STREAM, false); Assert(readFile >= 0); } else { /* just make sure source info is correct... */ readSource = XLOG_FROM_STREAM; XLogReceiptSource = XLOG_FROM_STREAM; return true; } break; } /* * Data not here yet. Check for trigger, then wait for * walreceiver to wake us up when new WAL arrives. */ if (CheckForStandbyTrigger()) { /* * Note that we don't "return false" immediately here. * After being triggered, we still want to replay all * the WAL that was already streamed. It's in pg_xlog * now, so we just treat this as a failure, and the * state machine will move on to replay the streamed * WAL from pg_xlog, and then recheck the trigger and * exit replay. */ lastSourceFailed = true; break; } /* * Wait for more WAL to arrive. Time out after 5 seconds, * like when polling the archive, to react to a trigger * file promptly. */ WaitLatch(&XLogCtl->recoveryWakeupLatch, WL_LATCH_SET | WL_TIMEOUT, 5000L); ResetLatch(&XLogCtl->recoveryWakeupLatch); break; } default: elog(ERROR, "unexpected WAL source %d", currentSource); } /* * This possibly-long loop needs to handle interrupts of startup * process. */ HandleStartupProcInterrupts(); } while (StandbyMode); return false; }
0
365,183
command_process_destroy_cell(cell_t *cell, or_connection_t *conn) { circuit_t *circ; int reason; circ = circuit_get_by_circid_orconn(cell->circ_id, conn); reason = (uint8_t)cell->payload[0]; if (!circ) { log_info(LD_OR,"unknown circuit %d on connection from %s:%d. Dropping.", cell->circ_id, conn->_base.address, conn->_base.port); return; } log_debug(LD_OR,"Received for circID %d.",cell->circ_id); if (!CIRCUIT_IS_ORIGIN(circ) && cell->circ_id == TO_OR_CIRCUIT(circ)->p_circ_id) { /* the destroy came from behind */ circuit_set_p_circid_orconn(TO_OR_CIRCUIT(circ), 0, NULL); circuit_mark_for_close(circ, reason|END_CIRC_REASON_FLAG_REMOTE); } else { /* the destroy came from ahead */ circuit_set_n_circid_orconn(circ, 0, NULL); if (CIRCUIT_IS_ORIGIN(circ)) { circuit_mark_for_close(circ, reason|END_CIRC_REASON_FLAG_REMOTE); } else { char payload[1]; log_debug(LD_OR, "Delivering 'truncated' back."); payload[0] = (char)reason; relay_send_command_from_edge(0, circ, RELAY_COMMAND_TRUNCATED, payload, sizeof(payload), NULL); } } }
0
228,438
TT_DotFix14( FT_Int32 ax, FT_Int32 ay, FT_Int bx, FT_Int by ) { FT_Int32 m, s, hi1, hi2, hi; FT_UInt32 l, lo1, lo2, lo; /* compute ax*bx as 64-bit value */ l = (FT_UInt32)( ( ax & 0xFFFFU ) * bx ); m = ( ax >> 16 ) * bx; lo1 = l + ( (FT_UInt32)m << 16 ); hi1 = ( m >> 16 ) + ( (FT_Int32)l >> 31 ) + ( lo1 < l ); /* compute ay*by as 64-bit value */ l = (FT_UInt32)( ( ay & 0xFFFFU ) * by ); m = ( ay >> 16 ) * by; lo2 = l + ( (FT_UInt32)m << 16 ); hi2 = ( m >> 16 ) + ( (FT_Int32)l >> 31 ) + ( lo2 < l ); /* add them */ lo = lo1 + lo2; hi = hi1 + hi2 + ( lo < lo1 ); /* divide the result by 2^14 with rounding */ s = hi >> 31; l = lo + (FT_UInt32)s; hi += s + ( l < lo ); lo = l; l = lo + 0x2000U; hi += ( l < lo ); return (FT_Int32)( ( (FT_UInt32)hi << 18 ) | ( l >> 14 ) ); }
0
357,868
dx_probe(const struct qstr *d_name, struct inode *dir, struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err) { unsigned count, indirect; struct dx_entry *at, *entries, *p, *q, *m; struct dx_root *root; struct buffer_head *bh; struct dx_frame *frame = frame_in; u32 hash; frame->bh = NULL; if (!(bh = ext4_bread (NULL,dir, 0, 0, err))) goto fail; root = (struct dx_root *) bh->b_data; if (root->info.hash_version != DX_HASH_TEA && root->info.hash_version != DX_HASH_HALF_MD4 && root->info.hash_version != DX_HASH_LEGACY) { ext4_warning(dir->i_sb, __func__, "Unrecognised inode hash code %d", root->info.hash_version); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } hinfo->hash_version = root->info.hash_version; if (hinfo->hash_version <= DX_HASH_TEA) hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed; if (d_name) ext4fs_dirhash(d_name->name, d_name->len, hinfo); hash = hinfo->hash; if (root->info.unused_flags & 1) { ext4_warning(dir->i_sb, __func__, "Unimplemented inode hash flags: %#06x", root->info.unused_flags); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } if ((indirect = root->info.indirect_levels) > 1) { ext4_warning(dir->i_sb, __func__, "Unimplemented inode hash depth: %#06x", root->info.indirect_levels); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } entries = (struct dx_entry *) (((char *)&root->info) + root->info.info_length); if (dx_get_limit(entries) != dx_root_limit(dir, root->info.info_length)) { ext4_warning(dir->i_sb, __func__, "dx entry: limit != root limit"); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail; } dxtrace(printk("Look up %x", hash)); while (1) { count = dx_get_count(entries); if (!count || count > dx_get_limit(entries)) { ext4_warning(dir->i_sb, __func__, "dx entry: no count or count > limit"); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail2; } p = entries + 1; q = entries + count - 1; while (p <= q) { m = p + (q - p)/2; dxtrace(printk(".")); if (dx_get_hash(m) > hash) q = m - 1; else p = m + 1; } if (0) // linear search cross check { unsigned n = count - 1; at = entries; while (n--) { dxtrace(printk(",")); if (dx_get_hash(++at) > hash) { at--; break; } } assert (at == p - 1); } at = p - 1; dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at))); frame->bh = bh; frame->entries = entries; frame->at = at; if (!indirect--) return frame; if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err))) goto fail2; at = entries = ((struct dx_node *) bh->b_data)->entries; if (dx_get_limit(entries) != dx_node_limit (dir)) { ext4_warning(dir->i_sb, __func__, "dx entry: limit != node limit"); brelse(bh); *err = ERR_BAD_DX_DIR; goto fail2; } frame++; frame->bh = NULL; } fail2: while (frame >= frame_in) { brelse(frame->bh); frame--; } fail: if (*err == ERR_BAD_DX_DIR) ext4_warning(dir->i_sb, __func__, "Corrupt dir inode %ld, running e2fsck is " "recommended.", dir->i_ino); return NULL; }
0
439,846
ipmi_sdr_find_sdr_bytype(struct ipmi_intf *intf, uint8_t type) { struct sdr_get_rs *header; struct sdr_record_list *e; struct sdr_record_list *head; if (!sdr_list_itr) { sdr_list_itr = ipmi_sdr_start(intf, 0); if (!sdr_list_itr) { lprintf(LOG_ERR, "Unable to open SDR for reading"); return NULL; } } head = malloc(sizeof (struct sdr_record_list)); if (!head) { lprintf(LOG_ERR, "ipmitool: malloc failure"); return NULL; } memset(head, 0, sizeof (struct sdr_record_list)); /* check what we've already read */ for (e = sdr_list_head; e; e = e->next) if (e->type == type) __sdr_list_add(head, e); /* now keep looking */ while ((header = ipmi_sdr_get_next_header(intf, sdr_list_itr))) { uint8_t *rec; struct sdr_record_list *sdrr; sdrr = malloc(sizeof (struct sdr_record_list)); if (!sdrr) { lprintf(LOG_ERR, "ipmitool: malloc failure"); break; } memset(sdrr, 0, sizeof (struct sdr_record_list)); sdrr->id = header->id; sdrr->type = header->type; rec = ipmi_sdr_get_record(intf, header, sdr_list_itr); if (!rec) { if (sdrr) { free(sdrr); sdrr = NULL; } continue; } switch (header->type) { case SDR_RECORD_TYPE_FULL_SENSOR: case SDR_RECORD_TYPE_COMPACT_SENSOR: sdrr->record.common = (struct sdr_record_common_sensor *) rec; break; case SDR_RECORD_TYPE_EVENTONLY_SENSOR: sdrr->record.eventonly = (struct sdr_record_eventonly_sensor *) rec; break; case SDR_RECORD_TYPE_GENERIC_DEVICE_LOCATOR: sdrr->record.genloc = (struct sdr_record_generic_locator *) rec; break; case SDR_RECORD_TYPE_FRU_DEVICE_LOCATOR: sdrr->record.fruloc = (struct sdr_record_fru_locator *) rec; break; case SDR_RECORD_TYPE_MC_DEVICE_LOCATOR: sdrr->record.mcloc = (struct sdr_record_mc_locator *) rec; break; case SDR_RECORD_TYPE_ENTITY_ASSOC: sdrr->record.entassoc = (struct sdr_record_entity_assoc *) rec; break; default: free(rec); rec = NULL; if (sdrr) { free(sdrr); sdrr = NULL; } continue; } if (header->type == type) __sdr_list_add(head, sdrr); /* add to global record list */ if (!sdr_list_head) sdr_list_head = sdrr; else sdr_list_tail->next = sdrr; sdr_list_tail = sdrr; } return head; }
0
442,757
static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_clock *rp = (void *) skb->data; struct hci_cp_read_clock *cp; struct hci_conn *conn; BT_DBG("%s", hdev->name); if (skb->len < sizeof(*rp)) return; if (rp->status) return; hci_dev_lock(hdev); cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); if (!cp) goto unlock; if (cp->which == 0x00) { hdev->clock = le32_to_cpu(rp->clock); goto unlock; } conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (conn) { conn->clock = le32_to_cpu(rp->clock); conn->clock_accuracy = le16_to_cpu(rp->accuracy); } unlock: hci_dev_unlock(hdev); }
0
325,194
static void term_hist_add(const char *cmdline) { char *hist_entry, *new_entry; int idx; if (cmdline[0] == '\0') return; new_entry = NULL; if (term_hist_entry != -1) { /* We were editing an existing history entry: replace it */ hist_entry = term_history[term_hist_entry]; idx = term_hist_entry; if (strcmp(hist_entry, cmdline) == 0) { goto same_entry; } } /* Search cmdline in history buffers */ for (idx = 0; idx < TERM_MAX_CMDS; idx++) { hist_entry = term_history[idx]; if (hist_entry == NULL) break; if (strcmp(hist_entry, cmdline) == 0) { same_entry: new_entry = hist_entry; /* Put this entry at the end of history */ memmove(&term_history[idx], &term_history[idx + 1], &term_history[TERM_MAX_CMDS] - &term_history[idx + 1]); term_history[TERM_MAX_CMDS - 1] = NULL; for (; idx < TERM_MAX_CMDS; idx++) { if (term_history[idx] == NULL) break; } break; } } if (idx == TERM_MAX_CMDS) { /* Need to get one free slot */ free(term_history[0]); memcpy(term_history, &term_history[1], &term_history[TERM_MAX_CMDS] - &term_history[1]); term_history[TERM_MAX_CMDS - 1] = NULL; idx = TERM_MAX_CMDS - 1; } if (new_entry == NULL) new_entry = strdup(cmdline); term_history[idx] = new_entry; term_hist_entry = -1; }
0
115,020
static int snd_seq_ioctl_set_queue_tempo(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_tempo *tempo = arg; int result; result = snd_seq_set_queue_tempo(client->number, tempo); return result < 0 ? result : 0; }
0
315,191
AppCacheResponseWriter* AppCacheUpdateJob::CreateResponseWriter() { AppCacheResponseWriter* writer = storage_->CreateResponseWriter(manifest_url_, group_->group_id()); stored_response_ids_.push_back(writer->response_id()); return writer; }
0
213,285
void OneClickSigninHelper::ShowSigninErrorBubble(Browser* browser, const std::string& error) { DCHECK(!error.empty()); browser->window()->ShowOneClickSigninBubble( BrowserWindow::ONE_CLICK_SIGNIN_BUBBLE_TYPE_BUBBLE, string16(), /* no SAML email */ UTF8ToUTF16(error), BrowserWindow::StartSyncCallback()); }
0
75,940
static int core_pre_connection(conn_rec *c, void *csd) { core_net_rec *net; apr_status_t rv; if (c->master) { return DONE; } net = apr_palloc(c->pool, sizeof(*net)); /* The Nagle algorithm says that we should delay sending partial * packets in hopes of getting more data. We don't want to do * this; we are not telnet. There are bad interactions between * persistent connections and Nagle's algorithm that have very severe * performance penalties. (Failing to disable Nagle is not much of a * problem with simple HTTP.) */ rv = apr_socket_opt_set(csd, APR_TCP_NODELAY, 1); if (rv != APR_SUCCESS && rv != APR_ENOTIMPL) { /* expected cause is that the client disconnected already, * hence the debug level */ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c, APLOGNO(00139) "apr_socket_opt_set(APR_TCP_NODELAY)"); } /* The core filter requires the timeout mode to be set, which * incidentally sets the socket to be nonblocking. If this * is not initialized correctly, Linux - for example - will * be initially blocking, while Solaris will be non blocking * and any initial read will fail. */ rv = apr_socket_timeout_set(csd, c->base_server->timeout); if (rv != APR_SUCCESS) { /* expected cause is that the client disconnected already */ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c, APLOGNO(00140) "apr_socket_timeout_set"); } net->c = c; net->in_ctx = NULL; net->out_ctx = NULL; net->client_socket = csd; ap_set_core_module_config(net->c->conn_config, csd); /* only the master connection talks to the network */ if (c->master == NULL) { ap_add_input_filter_handle(ap_core_input_filter_handle, net, NULL, net->c); ap_add_output_filter_handle(ap_core_output_filter_handle, net, NULL, net->c); } return DONE; }
0
104,542
cmsBool CMSEXPORT cmsIT8SetPropertyUncooked(cmsHANDLE hIT8, const char* Key, const char* Buffer) { cmsIT8* it8 = (cmsIT8*) hIT8; return AddToList(it8, &GetTable(it8)->HeaderList, Key, NULL, Buffer, WRITE_UNCOOKED) != NULL; }
0
199,727
void BluetoothDeviceChromeOS::SetPasskey(uint32 passkey) { if (!pairing_context_.get()) return; pairing_context_->SetPasskey(passkey); }
0
81,259
struct inode *ext4_iget(struct super_block *sb, unsigned long ino) { struct ext4_iloc iloc; struct ext4_inode *raw_inode; struct ext4_inode_info *ei; struct inode *inode; journal_t *journal = EXT4_SB(sb)->s_journal; long ret; int block; uid_t i_uid; gid_t i_gid; projid_t i_projid; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; ei = EXT4_I(inode); iloc.bh = NULL; ret = __ext4_get_inode_loc(inode, &iloc, 0); if (ret < 0) goto bad_inode; raw_inode = ext4_raw_inode(&iloc); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > EXT4_INODE_SIZE(inode->i_sb)) { EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)", EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize, EXT4_INODE_SIZE(inode->i_sb)); ret = -EFSCORRUPTED; goto bad_inode; } } else ei->i_extra_isize = 0; /* Precompute checksum seed for inode metadata */ if (ext4_has_metadata_csum(sb)) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); __u32 csum; __le32 inum = cpu_to_le32(inode->i_ino); __le32 gen = raw_inode->i_generation; csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, sizeof(inum)); ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, sizeof(gen)); } if (!ext4_inode_csum_verify(inode, raw_inode, ei)) { EXT4_ERROR_INODE(inode, "checksum invalid"); ret = -EFSBADCRC; goto bad_inode; } inode->i_mode = le16_to_cpu(raw_inode->i_mode); i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_PROJECT) && EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid); else i_projid = EXT4_DEF_PROJID; if (!(test_opt(inode->i_sb, NO_UID32))) { i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; } i_uid_write(inode, i_uid); i_gid_write(inode, i_gid); ei->i_projid = make_kprojid(&init_user_ns, i_projid); set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ ei->i_inline_off = 0; ei->i_dir_start_lookup = 0; ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); /* We now have enough fields to check if the inode was active or not. * This is needed because nfsd might try to access dead inodes * the test is that same one that e2fsck uses * NeilBrown 1999oct15 */ if (inode->i_nlink == 0) { if ((inode->i_mode == 0 || !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) && ino != EXT4_BOOT_LOADER_INO) { /* this inode is deleted */ ret = -ESTALE; goto bad_inode; } /* The only unlinked inodes we let through here have * valid i_mode and are being read by the orphan * recovery code: that's fine, we're about to complete * the process of deleting those. * OR it is the EXT4_BOOT_LOADER_INO which is * not initialized on a new filesystem. */ } ei->i_flags = le32_to_cpu(raw_inode->i_flags); inode->i_blocks = ext4_inode_blocks(raw_inode, ei); ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); if (ext4_has_feature_64bit(sb)) ei->i_file_acl |= ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; inode->i_size = ext4_isize(raw_inode); ei->i_disksize = inode->i_size; #ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; #endif inode->i_generation = le32_to_cpu(raw_inode->i_generation); ei->i_block_group = iloc.block_group; ei->i_last_alloc_group = ~0; /* * NOTE! The in-memory inode i_data array is in little-endian order * even on big-endian machines: we do NOT byteswap the block numbers! */ for (block = 0; block < EXT4_N_BLOCKS; block++) ei->i_data[block] = raw_inode->i_block[block]; INIT_LIST_HEAD(&ei->i_orphan); /* * Set transaction id's of transactions that have to be committed * to finish f[data]sync. We set them to currently running transaction * as we cannot be sure that the inode or some of its metadata isn't * part of the transaction - the inode could have been reclaimed and * now it is reread from disk. */ if (journal) { transaction_t *transaction; tid_t tid; read_lock(&journal->j_state_lock); if (journal->j_running_transaction) transaction = journal->j_running_transaction; else transaction = journal->j_committing_transaction; if (transaction) tid = transaction->t_tid; else tid = journal->j_commit_sequence; read_unlock(&journal->j_state_lock); ei->i_sync_tid = tid; ei->i_datasync_tid = tid; } if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { if (ei->i_extra_isize == 0) { /* The extra space is currently unused. Use it. */ ei->i_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; } else { ext4_iget_extra_inode(inode, raw_inode, ei); } } EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { inode->i_version = le32_to_cpu(raw_inode->i_disk_version); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) inode->i_version |= (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; } } ret = 0; if (ei->i_file_acl && !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", ei->i_file_acl); ret = -EFSCORRUPTED; goto bad_inode; } else if (!ext4_has_inline_data(inode)) { if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || (S_ISLNK(inode->i_mode) && !ext4_inode_is_fast_symlink(inode)))) /* Validate extent which is part of inode */ ret = ext4_ext_check_inode(inode); } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || (S_ISLNK(inode->i_mode) && !ext4_inode_is_fast_symlink(inode))) { /* Validate block references which are part of inode */ ret = ext4_ind_check_inode(inode); } } if (ret) goto bad_inode; if (S_ISREG(inode->i_mode)) { inode->i_op = &ext4_file_inode_operations; inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &ext4_dir_inode_operations; inode->i_fop = &ext4_dir_operations; } else if (S_ISLNK(inode->i_mode)) { if (ext4_encrypted_inode(inode)) { inode->i_op = &ext4_encrypted_symlink_inode_operations; ext4_set_aops(inode); } else if (ext4_inode_is_fast_symlink(inode)) { inode->i_link = (char *)ei->i_data; inode->i_op = &ext4_fast_symlink_inode_operations; nd_terminate_link(ei->i_data, inode->i_size, sizeof(ei->i_data) - 1); } else { inode->i_op = &ext4_symlink_inode_operations; ext4_set_aops(inode); } inode_nohighmem(inode); } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { inode->i_op = &ext4_special_inode_operations; if (raw_inode->i_block[0]) init_special_inode(inode, inode->i_mode, old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); else init_special_inode(inode, inode->i_mode, new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); } else if (ino == EXT4_BOOT_LOADER_INO) { make_bad_inode(inode); } else { ret = -EFSCORRUPTED; EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); goto bad_inode; } brelse(iloc.bh); ext4_set_inode_flags(inode); unlock_new_inode(inode); return inode; bad_inode: brelse(iloc.bh); iget_failed(inode); return ERR_PTR(ret); }
0
353,773
findTable (const char *tableName) { /* Search paths for tables */ FILE *tableFile; char *pathList; char pathEnd[2]; char trialPath[MAXSTRING]; if (tableName == NULL || tableName[0] == 0) return NULL; strcpy (trialPath, tablePath); strcat (trialPath, tableName); if ((tableFile = fopen (trialPath, "rb"))) return tableFile; pathEnd[0] = DIR_SEP; pathEnd[1] = 0; /* See if table is on environment path LOUIS_TABLEPATH */ pathList = getenv ("LOUIS_TABLEPATH"); if (pathList) while (1) { int k; int listLength; int currentListPos = 0; listLength = strlen (pathList); for (k = 0; k < listLength; k++) if (pathList[k] == ',') break; if (k == listLength || k == 0) { /* Only one file */ strcpy (trialPath, pathList); strcat (trialPath, pathEnd); strcat (trialPath, tableName); if ((tableFile = fopen (trialPath, "rb"))) break; } else { /* Compile a list of files */ strncpy (trialPath, pathList, k); trialPath[k] = 0; strcat (trialPath, pathEnd); strcat (trialPath, tableName); currentListPos = k + 1; if ((tableFile = fopen (trialPath, "rb"))) break; while (currentListPos < listLength) { for (k = currentListPos; k < listLength; k++) if (pathList[k] == ',') break; strncpy (trialPath, &pathList[currentListPos], k - currentListPos); trialPath[k - currentListPos] = 0; strcat (trialPath, pathEnd); strcat (trialPath, tableName); if ((tableFile = fopen (trialPath, "rb"))) currentListPos = k + 1; break; } } break; } if (tableFile) return tableFile; /* See if table in current directory or on a path in * the table name*/ if ((tableFile = fopen (tableName, "rb"))) return tableFile; /* See if table on dataPath. */ pathList = lou_getDataPath (); if (pathList) { strcpy (trialPath, pathList); strcat (trialPath, pathEnd); #ifdef _WIN32 strcat (trialPath, "liblouis\\tables\\"); #else strcat (trialPath, "liblouis/tables/"); #endif strcat (trialPath, tableName); if ((tableFile = fopen (trialPath, "rb"))) return tableFile; } /* See if table on installed or program path. */ #ifdef _WIN32 strcpy (trialPath, lou_getProgramPath ()); strcat (trialPath, "\\share\\liblouis\\tables\\"); #else strcpy (trialPath, TABLESDIR); strcat (trialPath, pathEnd); #endif strcat (trialPath, tableName); if ((tableFile = fopen (trialPath, "rb"))) return tableFile; return NULL; }
1
521,648
bool Field_geom::load_data_set_null(THD *thd) { Field_blob::reset(); if (!maybe_null()) { my_error(ER_WARN_NULL_TO_NOTNULL, MYF(0), field_name, thd->get_stmt_da()->current_row_for_warning()); return true; } set_null(); set_has_explicit_value(); // Do not auto-update this field return false; }
0
226,125
static int req_aprtable2luatable_cb(void *l, const char *key, const char *value) { int t; lua_State *L = (lua_State *) l; /* [table<s,t>, table<s,s>] */ /* rstack_dump(L, RRR, "start of cb"); */ /* L is [table<s,t>, table<s,s>] */ /* build complex */ lua_getfield(L, -1, key); /* [VALUE, table<s,t>, table<s,s>] */ /* rstack_dump(L, RRR, "after getfield"); */ t = lua_type(L, -1); switch (t) { case LUA_TNIL: case LUA_TNONE:{ lua_pop(L, 1); /* [table<s,t>, table<s,s>] */ lua_newtable(L); /* [array, table<s,t>, table<s,s>] */ lua_pushnumber(L, 1); /* [1, array, table<s,t>, table<s,s>] */ lua_pushstring(L, value); /* [string, 1, array, table<s,t>, table<s,s>] */ lua_settable(L, -3); /* [array, table<s,t>, table<s,s>] */ lua_setfield(L, -2, key); /* [table<s,t>, table<s,s>] */ break; } case LUA_TTABLE:{ /* [array, table<s,t>, table<s,s>] */ int size = lua_rawlen(L, -1); lua_pushnumber(L, size + 1); /* [#, array, table<s,t>, table<s,s>] */ lua_pushstring(L, value); /* [string, #, array, table<s,t>, table<s,s>] */ lua_settable(L, -3); /* [array, table<s,t>, table<s,s>] */ lua_setfield(L, -2, key); /* [table<s,t>, table<s,s>] */ break; } } /* L is [table<s,t>, table<s,s>] */ /* build simple */ lua_getfield(L, -2, key); /* [VALUE, table<s,s>, table<s,t>] */ if (lua_isnoneornil(L, -1)) { /* only set if not already set */ lua_pop(L, 1); /* [table<s,s>, table<s,t>]] */ lua_pushstring(L, value); /* [string, table<s,s>, table<s,t>] */ lua_setfield(L, -3, key); /* [table<s,s>, table<s,t>] */ } else { lua_pop(L, 1); } return 1; }
0
403,499
static void stmt_clear_error(MYSQL_STMT *stmt) { if (stmt->last_errno) { stmt->last_errno= 0; stmt->last_error[0]= '\0'; strmov(stmt->sqlstate, not_error_sqlstate); } }
0
397,273
static int ehci_get_fetch_addr(EHCIState *s, int async) { return async ? s->a_fetch_addr : s->p_fetch_addr; }
0
281,702
void VerifyPrintPreviewFailed(bool did_fail) { bool print_preview_failed = (render_thread_.sink().GetUniqueMessageMatching( PrintHostMsg_PrintPreviewFailed::ID) != NULL); EXPECT_EQ(did_fail, print_preview_failed); }
0
291,585
GF_Err jp2h_box_size(GF_Box *s) { return GF_OK;
0
279,773
size_t ZSTD_compress_advanced_internal( ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, ZSTD_CCtx_params params) { DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (U32)srcSize); CHECK_F( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, params, srcSize, ZSTDb_not_buffered) ); return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); }
0
450,506
static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, struct nfs_fattr *fattr, struct iattr *sattr, struct nfs_open_context *ctx, struct nfs4_label *ilabel, struct nfs4_label *olabel) { struct nfs_server *server = NFS_SERVER(inode); __u32 bitmask[NFS4_BITMASK_SZ]; struct nfs4_state *state = ctx ? ctx->state : NULL; struct nfs_setattrargs arg = { .fh = NFS_FH(inode), .iap = sattr, .server = server, .bitmask = bitmask, .label = ilabel, }; struct nfs_setattrres res = { .fattr = fattr, .label = olabel, .server = server, }; struct nfs4_exception exception = { .state = state, .inode = inode, .stateid = &arg.stateid, }; int err; do { nfs4_bitmap_copy_adjust_setattr(bitmask, nfs4_bitmask(server, olabel), inode); err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx); switch (err) { case -NFS4ERR_OPENMODE: if (!(sattr->ia_valid & ATTR_SIZE)) { pr_warn_once("NFSv4: server %s is incorrectly " "applying open mode checks to " "a SETATTR that is not " "changing file size.\n", server->nfs_client->cl_hostname); } if (state && !(state->state & FMODE_WRITE)) { err = -EBADF; if (sattr->ia_valid & ATTR_OPEN) err = -EACCES; goto out; } } err = nfs4_handle_exception(server, err, &exception); } while (exception.retry); out: return err; }
0
153,048
bool kvm_is_reserved_pfn(kvm_pfn_t pfn) { if (pfn_valid(pfn)) return PageReserved(pfn_to_page(pfn)); return true; }
0
254,423
void AuthenticatorBlePowerOnManualSheetModel::OnAccept() { dialog_model()->ContinueWithFlowAfterBleAdapterPowered(); }
0
441,339
int RGWCompleteMultipart_ObjStore_S3::get_params() { int ret = RGWCompleteMultipart_ObjStore::get_params(); if (ret < 0) { return ret; } map_qs_metadata(s); return do_aws4_auth_completion(); }
0
256,883
static void print_xml_comment ( FILE * xml_file , size_t len , const char * comment_string ) { const char * end ; fputs ( "<!-- " , xml_file ) ; for ( end = comment_string + len ; comment_string != end ; comment_string ++ ) { switch ( * comment_string ) { case '-' : if ( * ( comment_string + 1 ) == '-' ) break ; default : fputc ( * comment_string , xml_file ) ; break ; } } fputs ( " -->\n" , xml_file ) ; check_io ( xml_file ) ; }
0
521,873
bool Item_func::count_string_result_length(enum_field_types field_type_arg, Item **items, uint nitems) { if (agg_arg_charsets_for_string_result(collation, items, nitems, 1)) return true; if (is_temporal_type(field_type_arg)) count_datetime_length(field_type_arg, items, nitems); else { count_only_length(items, nitems); decimals= max_length ? NOT_FIXED_DEC : 0; } return false; }
0
282,670
void RenderThread::OnCreateNewView(gfx::NativeViewId parent_hwnd, const RendererPreferences& renderer_prefs, const WebPreferences& webkit_prefs, int32 view_id) { EnsureWebKitInitialized(); RenderView::Create( this, parent_hwnd, MSG_ROUTING_NONE, renderer_prefs, webkit_prefs, new SharedRenderViewCounter(0), view_id); }
0
380,841
XMLRPC_VALUE XMLRPC_CreateValueDouble(const char* id, double d) { XMLRPC_VALUE val = XMLRPC_CreateValueEmpty(); if(val) { XMLRPC_SetValueDouble(val, d); if(id) { XMLRPC_SetValueID(val, id, 0); } } return val; }
0
139,073
static void *counter_func(void *arg){ { set_pid_priority(0,SCHED_FIFO,sched_get_priority_min(SCHED_FIFO),"Unable to set SCHED_FIFO for %d (\"%s\"). (%s)", "the counter_func"); } for(;;){ counter++; if(verbose) print_error(stderr,"counter set to %d",counter); sleep(increasetime); } return NULL; }
0
153,958
static struct hash_cell *__get_dev_cell(uint64_t dev) { struct mapped_device *md; struct hash_cell *hc; md = dm_get_md(huge_decode_dev(dev)); if (!md) return NULL; hc = dm_get_mdptr(md); if (!hc) { dm_put(md); return NULL; } return hc; }
0
171,272
void GLES2Implementation::GetTransformFeedbackVaryingsCHROMIUM(GLuint program, GLsizei bufsize, GLsizei* size, void* info) { GPU_CLIENT_SINGLE_THREAD_CHECK(); if (bufsize < 0) { SetGLError(GL_INVALID_VALUE, "glGetTransformFeedbackVaryingsCHROMIUM", "bufsize less than 0."); return; } if (size == nullptr) { SetGLError(GL_INVALID_VALUE, "glGetTransformFeedbackVaryingsCHROMIUM", "size is null."); return; } DCHECK_EQ(0, *size); std::vector<int8_t> result; GetTransformFeedbackVaryingsCHROMIUMHelper(program, &result); if (result.empty()) { return; } *size = result.size(); if (!info) { return; } if (static_cast<size_t>(bufsize) < result.size()) { SetGLError(GL_INVALID_OPERATION, "glGetTransformFeedbackVaryingsCHROMIUM", "bufsize is too small for result."); return; } memcpy(info, &result[0], result.size()); }
0
90,433
Word enqueueSharedQueueHandler(void* raw_context, Word token, Word data_ptr, Word data_size) { auto context = WASM_CONTEXT(raw_context); auto data = context->wasmVm()->getMemory(data_ptr.u64_, data_size.u64_); if (!data) { return wasmResultToWord(WasmResult::InvalidMemoryAccess); } return wasmResultToWord(context->enqueueSharedQueue(token.u32(), data.value())); }
0
435,201
psutil_boot_time(PyObject *self, PyObject *args) { // fetch sysctl "kern.boottime" static int request[2] = { CTL_KERN, KERN_BOOTTIME }; struct timeval boottime; size_t len = sizeof(boottime); if (sysctl(request, 2, &boottime, &len, NULL, 0) == -1) return PyErr_SetFromErrno(PyExc_OSError); return Py_BuildValue("d", (double)boottime.tv_sec); }
0
201,181
static sk_sp<SkImage> unPremulSkImageToPremul(SkImage* input) { SkImageInfo info = SkImageInfo::Make(input->width(), input->height(), kN32_SkColorType, kPremul_SkAlphaType); RefPtr<Uint8Array> dstPixels = copySkImageData(input, info); if (!dstPixels) return nullptr; return newSkImageFromRaster( info, std::move(dstPixels), static_cast<unsigned>(input->width()) * info.bytesPerPixel()); }
0
68,414
static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; }
0
389,556
config_tos( config_tree *ptree ) { attr_val * tos; int item; double val; #ifdef __GNUC__ item = -1; /* quiet warning */ #endif tos = HEAD_PFIFO(ptree->orphan_cmds); for (; tos != NULL; tos = tos->link) { val = tos->value.d; switch(tos->attr) { default: INSIST(0); break; case T_Ceiling: if (val > STRATUM_UNSPEC - 1) { msyslog(LOG_WARNING, "Using maximum tos ceiling %d, %g requested", STRATUM_UNSPEC - 1, val); val = STRATUM_UNSPEC - 1; } item = PROTO_CEILING; break; case T_Floor: item = PROTO_FLOOR; break; case T_Cohort: item = PROTO_COHORT; break; case T_Orphan: item = PROTO_ORPHAN; break; case T_Orphanwait: item = PROTO_ORPHWAIT; break; case T_Mindist: item = PROTO_MINDISP; break; case T_Maxdist: item = PROTO_MAXDIST; break; case T_Minclock: item = PROTO_MINCLOCK; break; case T_Maxclock: item = PROTO_MAXCLOCK; break; case T_Minsane: item = PROTO_MINSANE; break; case T_Beacon: item = PROTO_BEACON; break; } proto_config(item, 0, val, NULL); } }
0
351,141
static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) { struct ieee80211_sub_if_data *sdata = rx->sdata; struct sk_buff *skb = rx->skb; struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); bool multicast = is_multicast_ether_addr(hdr->addr1); switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: if (!bssid && !sdata->u.mgd.use_4addr) return false; if (multicast) return true; return ether_addr_equal(sdata->vif.addr, hdr->addr1); case NL80211_IFTYPE_ADHOC: if (!bssid) return false; if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) return false; if (ieee80211_is_beacon(hdr->frame_control)) return true; if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) return false; if (!multicast && !ether_addr_equal(sdata->vif.addr, hdr->addr1)) return false; if (!rx->sta) { int rate_idx; if (status->encoding != RX_ENC_LEGACY) rate_idx = 0; /* TODO: HT/VHT rates */ else rate_idx = status->rate_idx; ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, BIT(rate_idx)); } return true; case NL80211_IFTYPE_OCB: if (!bssid) return false; if (!ieee80211_is_data_present(hdr->frame_control)) return false; if (!is_broadcast_ether_addr(bssid)) return false; if (!multicast && !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) return false; if (!rx->sta) { int rate_idx; if (status->encoding != RX_ENC_LEGACY) rate_idx = 0; /* TODO: HT rates */ else rate_idx = status->rate_idx; ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2, BIT(rate_idx)); } return true; case NL80211_IFTYPE_MESH_POINT: if (ether_addr_equal(sdata->vif.addr, hdr->addr2)) return false; if (multicast) return true; return ether_addr_equal(sdata->vif.addr, hdr->addr1); case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_AP: if (!bssid) return ether_addr_equal(sdata->vif.addr, hdr->addr1); if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) { /* * Accept public action frames even when the * BSSID doesn't match, this is used for P2P * and location updates. Note that mac80211 * itself never looks at these frames. */ if (!multicast && !ether_addr_equal(sdata->vif.addr, hdr->addr1)) return false; if (ieee80211_is_public_action(hdr, skb->len)) return true; return ieee80211_is_beacon(hdr->frame_control); } if (!ieee80211_has_tods(hdr->frame_control)) { /* ignore data frames to TDLS-peers */ if (ieee80211_is_data(hdr->frame_control)) return false; /* ignore action frames to TDLS-peers */ if (ieee80211_is_action(hdr->frame_control) && !is_broadcast_ether_addr(bssid) && !ether_addr_equal(bssid, hdr->addr1)) return false; } /* * 802.11-2016 Table 9-26 says that for data frames, A1 must be * the BSSID - we've checked that already but may have accepted * the wildcard (ff:ff:ff:ff:ff:ff). * * It also says: * The BSSID of the Data frame is determined as follows: * a) If the STA is contained within an AP or is associated * with an AP, the BSSID is the address currently in use * by the STA contained in the AP. * * So we should not accept data frames with an address that's * multicast. * * Accepting it also opens a security problem because stations * could encrypt it with the GTK and inject traffic that way. */ if (ieee80211_is_data(hdr->frame_control) && multicast) return false; return true; case NL80211_IFTYPE_WDS: if (bssid || !ieee80211_is_data(hdr->frame_control)) return false; return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2); case NL80211_IFTYPE_P2P_DEVICE: return ieee80211_is_public_action(hdr, skb->len) || ieee80211_is_probe_req(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control) || ieee80211_is_beacon(hdr->frame_control); case NL80211_IFTYPE_NAN: /* Currently no frames on NAN interface are allowed */ return false; default: break; } WARN_ON_ONCE(1); return false; }
1
99,724
static void binder_deferred_flush(struct binder_proc *proc) { struct rb_node *n; int wake_count = 0; binder_inner_proc_lock(proc); for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); thread->looper_need_return = true; if (thread->looper & BINDER_LOOPER_STATE_WAITING) { wake_up_interruptible(&thread->wait); wake_count++; } } binder_inner_proc_unlock(proc); binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_flush: %d woke %d threads\n", proc->pid, wake_count); }
0
39,035
static int opt_show_entries(void *optctx, const char *opt, const char *arg) { const char *p = arg; int ret = 0; while (*p) { AVDictionary *entries = NULL; char *section_name = av_get_token(&p, "=:"); int show_all_entries = 0; if (!section_name) { av_log(NULL, AV_LOG_ERROR, "Missing section name for option '%s'\n", opt); return AVERROR(EINVAL); } if (*p == '=') { p++; while (*p && *p != ':') { char *entry = av_get_token(&p, ",:"); if (!entry) break; av_log(NULL, AV_LOG_VERBOSE, "Adding '%s' to the entries to show in section '%s'\n", entry, section_name); av_dict_set(&entries, entry, "", AV_DICT_DONT_STRDUP_KEY); if (*p == ',') p++; } } else { show_all_entries = 1; } ret = match_section(section_name, show_all_entries, entries); if (ret == 0) { av_log(NULL, AV_LOG_ERROR, "No match for section '%s'\n", section_name); ret = AVERROR(EINVAL); } av_dict_free(&entries); av_free(section_name); if (ret <= 0) break; if (*p) p++; } return ret; }
0
453,082
static MagickBooleanType Classify(Image *image,short **extrema, const double cluster_threshold, const double weighting_exponent,const MagickBooleanType verbose, ExceptionInfo *exception) { #define SegmentImageTag "Segment/Image" #define ThrowClassifyException(severity,tag,label) \ {\ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) \ { \ next_cluster=cluster->next; \ cluster=(Cluster *) RelinquishMagickMemory(cluster); \ } \ if (squares != (double *) NULL) \ { \ squares-=255; \ free_squares=squares; \ free_squares=(double *) RelinquishMagickMemory(free_squares); \ } \ ThrowBinaryException(severity,tag,label); \ } CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickOffsetType progress; double *free_squares; MagickStatusType status; register ssize_t i; register double *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; squares=(double *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(double) ScaleQuantumToChar( GetPixelRed(image,p)); cluster->green.center+=(double) ScaleQuantumToChar( GetPixelGreen(image,p)); cluster->blue.center+=(double) ScaleQuantumToChar( GetPixelBlue(image,p)); cluster->count++; break; } p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowClassifyException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (double *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(double) i*(double) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *clust; register const PixelInfo *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) 0,q); for (clust=head; clust != (Cluster *) NULL; clust=clust->next) { if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) >= (clust->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) <= (clust->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) >= (clust->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) <= (clust->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) >= (clust->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) <= (clust->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(image,(Quantum) clust->id,q); break; } } if (clust == (Cluster *) NULL) { double distance_squared, local_minima, numerator, ratio, sum; register ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared=squares[(ssize_t) ScaleQuantumToChar( GetPixelRed(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->blue))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared=squares[(ssize_t) ScaleQuantumToChar( GetPixelRed(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[ (ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[ (ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->blue))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(image,(Quantum) j,q); } } } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image,exception); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(double *) RelinquishMagickMemory(free_squares); return(MagickTrue); }
0
227,643
PPVarArrayFromNPVariantArray::~PPVarArrayFromNPVariantArray() { for (size_t i = 0; i < size_; i++) Var::PluginReleasePPVar(array_[i]); }
0
251,022
static void *t_start(struct seq_file *m, loff_t *pos) { struct trace_array *tr = m->private; struct tracer *t; loff_t l = 0; mutex_lock(&trace_types_lock); t = get_tracer_for_array(tr, trace_types); for (; t && l < *pos; t = t_next(m, t, &l)) ; return t; }
0
425,417
static void __iommu_flush_context(struct intel_iommu *iommu, u16 did, u16 source_id, u8 function_mask, u64 type) { u64 val = 0; unsigned long flag; switch (type) { case DMA_CCMD_GLOBAL_INVL: val = DMA_CCMD_GLOBAL_INVL; break; case DMA_CCMD_DOMAIN_INVL: val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did); break; case DMA_CCMD_DEVICE_INVL: val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did) | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask); break; default: BUG(); } val |= DMA_CCMD_ICC; raw_spin_lock_irqsave(&iommu->register_lock, flag); dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); /* Make sure hardware complete it */ IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, dmar_readq, (!(val & DMA_CCMD_ICC)), val); raw_spin_unlock_irqrestore(&iommu->register_lock, flag); }
0
111,299
int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb) { struct sock *sk, *prev = NULL; int ret = NET_RX_SUCCESS; u16 pan_id, short_addr; /* Data frame processing */ BUG_ON(dev->type != ARPHRD_IEEE802154); pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev); read_lock(&dgram_lock); sk_for_each(sk, &dgram_head) { if (ieee802154_match_sock(dev->dev_addr, pan_id, short_addr, dgram_sk(sk))) { if (prev) { struct sk_buff *clone; clone = skb_clone(skb, GFP_ATOMIC); if (clone) dgram_rcv_skb(prev, clone); } prev = sk; } } if (prev) dgram_rcv_skb(prev, skb); else { kfree_skb(skb); ret = NET_RX_DROP; } read_unlock(&dgram_lock); return ret; }
0
334,650
static always_inline void gen_op_subfo_64 (void) { gen_op_move_T2_T0(); gen_op_subf(); gen_op_check_subfo_64(); }
1
285,840
vmxnet3_on_rx_done_update_stats(VMXNET3State *s, int qidx, Vmxnet3PktStatus status) { struct UPT1_RxStats *stats = &s->rxq_descr[qidx].rxq_stats; size_t tot_len = vmxnet_rx_pkt_get_total_len(s->rx_pkt); switch (status) { case VMXNET3_PKT_STATUS_OUT_OF_BUF: stats->pktsRxOutOfBuf++; break; case VMXNET3_PKT_STATUS_ERROR: stats->pktsRxError++; break; case VMXNET3_PKT_STATUS_OK: switch (vmxnet_rx_pkt_get_packet_type(s->rx_pkt)) { case ETH_PKT_BCAST: stats->bcastPktsRxOK++; stats->bcastBytesRxOK += tot_len; break; case ETH_PKT_MCAST: stats->mcastPktsRxOK++; stats->mcastBytesRxOK += tot_len; break; case ETH_PKT_UCAST: stats->ucastPktsRxOK++; stats->ucastBytesRxOK += tot_len; break; default: g_assert_not_reached(); } if (tot_len > s->mtu) { stats->LROPktsRxOK++; stats->LROBytesRxOK += tot_len; } break; default: g_assert_not_reached(); } }
0
343,215
void mips_cpu_do_interrupt(CPUState *cs) { #if !defined(CONFIG_USER_ONLY) MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; target_ulong offset; int cause = -1; const char *name; if (qemu_log_enabled() && cs->exception_index != EXCP_EXT_INTERRUPT) { if (cs->exception_index < 0 || cs->exception_index > EXCP_LAST) { name = "unknown"; } else { name = excp_names[cs->exception_index]; } qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " %s exception\n", __func__, env->active_tc.PC, env->CP0_EPC, name); } if (cs->exception_index == EXCP_EXT_INTERRUPT && (env->hflags & MIPS_HFLAG_DM)) { cs->exception_index = EXCP_DINT; } offset = 0x180; switch (cs->exception_index) { case EXCP_DSS: env->CP0_Debug |= 1 << CP0DB_DSS; /* Debug single step cannot be raised inside a delay slot and resume will always occur on the next instruction (but we assume the pc has always been updated during code translation). */ env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16); goto enter_debug_mode; case EXCP_DINT: env->CP0_Debug |= 1 << CP0DB_DINT; goto set_DEPC; case EXCP_DIB: env->CP0_Debug |= 1 << CP0DB_DIB; goto set_DEPC; case EXCP_DBp: env->CP0_Debug |= 1 << CP0DB_DBp; goto set_DEPC; case EXCP_DDBS: env->CP0_Debug |= 1 << CP0DB_DDBS; goto set_DEPC; case EXCP_DDBL: env->CP0_Debug |= 1 << CP0DB_DDBL; set_DEPC: env->CP0_DEPC = exception_resume_pc(env); env->hflags &= ~MIPS_HFLAG_BMASK; enter_debug_mode: env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_64 | MIPS_HFLAG_CP0; env->hflags &= ~(MIPS_HFLAG_KSU); /* EJTAG probe trap enable is not implemented... */ if (!(env->CP0_Status & (1 << CP0St_EXL))) env->CP0_Cause &= ~(1 << CP0Ca_BD); env->active_tc.PC = (int32_t)0xBFC00480; set_hflags_for_handler(env); break; case EXCP_RESET: cpu_reset(CPU(cpu)); break; case EXCP_SRESET: env->CP0_Status |= (1 << CP0St_SR); memset(env->CP0_WatchLo, 0, sizeof(*env->CP0_WatchLo)); goto set_error_EPC; case EXCP_NMI: env->CP0_Status |= (1 << CP0St_NMI); set_error_EPC: env->CP0_ErrorEPC = exception_resume_pc(env); env->hflags &= ~MIPS_HFLAG_BMASK; env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV); env->hflags |= MIPS_HFLAG_64 | MIPS_HFLAG_CP0; env->hflags &= ~(MIPS_HFLAG_KSU); if (!(env->CP0_Status & (1 << CP0St_EXL))) env->CP0_Cause &= ~(1 << CP0Ca_BD); env->active_tc.PC = (int32_t)0xBFC00000; set_hflags_for_handler(env); break; case EXCP_EXT_INTERRUPT: cause = 0; if (env->CP0_Cause & (1 << CP0Ca_IV)) offset = 0x200; if (env->CP0_Config3 & ((1 << CP0C3_VInt) | (1 << CP0C3_VEIC))) { /* Vectored Interrupts. */ unsigned int spacing; unsigned int vector; unsigned int pending = (env->CP0_Cause & CP0Ca_IP_mask) >> 8; pending &= env->CP0_Status >> 8; /* Compute the Vector Spacing. */ spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & ((1 << 6) - 1); spacing <<= 5; if (env->CP0_Config3 & (1 << CP0C3_VInt)) { /* For VInt mode, the MIPS computes the vector internally. */ for (vector = 7; vector > 0; vector--) { if (pending & (1 << vector)) { /* Found it. */ break; } } } else { /* For VEIC mode, the external interrupt controller feeds the vector through the CP0Cause IP lines. */ vector = pending; } offset = 0x200 + vector * spacing; } goto set_EPC; case EXCP_LTLBL: cause = 1; goto set_EPC; case EXCP_TLBL: cause = 2; if (env->error_code == 1 && !(env->CP0_Status & (1 << CP0St_EXL))) { #if defined(TARGET_MIPS64) int R = env->CP0_BadVAddr >> 62; int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; if (((R == 0 && UX) || (R == 1 && SX) || (R == 3 && KX)) && (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) offset = 0x080; else #endif offset = 0x000; } goto set_EPC; case EXCP_TLBS: cause = 3; if (env->error_code == 1 && !(env->CP0_Status & (1 << CP0St_EXL))) { #if defined(TARGET_MIPS64) int R = env->CP0_BadVAddr >> 62; int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; if (((R == 0 && UX) || (R == 1 && SX) || (R == 3 && KX)) && (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) offset = 0x080; else #endif offset = 0x000; } goto set_EPC; case EXCP_AdEL: cause = 4; goto set_EPC; case EXCP_AdES: cause = 5; goto set_EPC; case EXCP_IBE: cause = 6; goto set_EPC; case EXCP_DBE: cause = 7; goto set_EPC; case EXCP_SYSCALL: cause = 8; goto set_EPC; case EXCP_BREAK: cause = 9; goto set_EPC; case EXCP_RI: cause = 10; goto set_EPC; case EXCP_CpU: cause = 11; env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) | (env->error_code << CP0Ca_CE); goto set_EPC; case EXCP_OVERFLOW: cause = 12; goto set_EPC; case EXCP_TRAP: cause = 13; goto set_EPC; case EXCP_FPE: cause = 15; goto set_EPC; case EXCP_C2E: cause = 18; goto set_EPC; case EXCP_MDMX: cause = 22; goto set_EPC; case EXCP_DWATCH: cause = 23; /* XXX: TODO: manage defered watch exceptions */ goto set_EPC; case EXCP_MCHECK: cause = 24; goto set_EPC; case EXCP_THREAD: cause = 25; goto set_EPC; case EXCP_DSPDIS: cause = 26; goto set_EPC; case EXCP_CACHE: cause = 30; if (env->CP0_Status & (1 << CP0St_BEV)) { offset = 0x100; } else { offset = 0x20000100; } set_EPC: if (!(env->CP0_Status & (1 << CP0St_EXL))) { env->CP0_EPC = exception_resume_pc(env); if (env->hflags & MIPS_HFLAG_BMASK) { env->CP0_Cause |= (1 << CP0Ca_BD); } else { env->CP0_Cause &= ~(1 << CP0Ca_BD); } env->CP0_Status |= (1 << CP0St_EXL); env->hflags |= MIPS_HFLAG_64 | MIPS_HFLAG_CP0; env->hflags &= ~(MIPS_HFLAG_KSU); } env->hflags &= ~MIPS_HFLAG_BMASK; if (env->CP0_Status & (1 << CP0St_BEV)) { env->active_tc.PC = (int32_t)0xBFC00200; } else { env->active_tc.PC = (int32_t)(env->CP0_EBase & ~0x3ff); } env->active_tc.PC += offset; set_hflags_for_handler(env); env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) | (cause << CP0Ca_EC); break; default: qemu_log("Invalid MIPS exception %d. Exiting\n", cs->exception_index); printf("Invalid MIPS exception %d. Exiting\n", cs->exception_index); exit(1); } if (qemu_log_enabled() && cs->exception_index != EXCP_EXT_INTERRUPT) { qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n" " S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n", __func__, env->active_tc.PC, env->CP0_EPC, cause, env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr, env->CP0_DEPC); } #endif cs->exception_index = EXCP_NONE; }
1
184,560
GURL GetFileManagerMainPageUrl() { return GetFileManagerUrl("/main.html"); }
0
189,412
error::Error GLES2DecoderImpl::HandleCoverFillPathInstancedCHROMIUM( uint32_t immediate_data_size, const volatile void* cmd_data) { static const char kFunctionName[] = "glCoverFillPathInstancedCHROMIUM"; const volatile gles2::cmds::CoverFillPathInstancedCHROMIUM& c = *static_cast<const volatile gles2::cmds::CoverFillPathInstancedCHROMIUM*>( cmd_data); if (!features().chromium_path_rendering) return error::kUnknownCommand; PathCommandValidatorContext v(this, kFunctionName); GLuint num_paths = 0; GLenum path_name_type = GL_NONE; GLenum cover_mode = GL_BOUNDING_BOX_OF_BOUNDING_BOXES_CHROMIUM; GLenum transform_type = GL_NONE; if (!v.GetPathCountAndType(c, &num_paths, &path_name_type) || !v.GetCoverMode(c, &cover_mode) || !v.GetTransformType(c, &transform_type)) return v.error(); if (num_paths == 0) return error::kNoError; std::unique_ptr<GLuint[]> paths; if (!v.GetPathNameData(c, num_paths, path_name_type, &paths)) return v.error(); const GLfloat* transforms = nullptr; if (!v.GetTransforms(c, num_paths, transform_type, &transforms)) return v.error(); if (!CheckBoundDrawFramebufferValid(kFunctionName)) return error::kNoError; ApplyDirtyState(); api()->glCoverFillPathInstancedNVFn(num_paths, GL_UNSIGNED_INT, paths.get(), 0, cover_mode, transform_type, transforms); return error::kNoError; }
0
129,725
void rose_loopback_init(void) { skb_queue_head_init(&loopback_queue); init_timer(&loopback_timer); }
0
280,238
String8 Parcel::readString8() const { int32_t size = readInt32(); if (size > 0 && size < INT32_MAX) { const char* str = (const char*)readInplace(size+1); if (str) return String8(str, size); } return String8(); }
0
409,759
*/ bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu) { return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
0
387,828
png_set_filter(png_structrp png_ptr, int method, int filters) { png_debug(1, "in png_set_filter"); if (png_ptr == NULL) return; #ifdef PNG_MNG_FEATURES_SUPPORTED if ((png_ptr->mng_features_permitted & PNG_FLAG_MNG_FILTER_64) != 0 && (method == PNG_INTRAPIXEL_DIFFERENCING)) method = PNG_FILTER_TYPE_BASE; #endif if (method == PNG_FILTER_TYPE_BASE) { switch (filters & (PNG_ALL_FILTERS | 0x07)) { #ifdef PNG_WRITE_FILTER_SUPPORTED case 5: case 6: case 7: png_app_error(png_ptr, "Unknown row filter for method 0"); /* FALL THROUGH */ #endif /* WRITE_FILTER */ case PNG_FILTER_VALUE_NONE: png_ptr->do_filter = PNG_FILTER_NONE; break; #ifdef PNG_WRITE_FILTER_SUPPORTED case PNG_FILTER_VALUE_SUB: png_ptr->do_filter = PNG_FILTER_SUB; break; case PNG_FILTER_VALUE_UP: png_ptr->do_filter = PNG_FILTER_UP; break; case PNG_FILTER_VALUE_AVG: png_ptr->do_filter = PNG_FILTER_AVG; break; case PNG_FILTER_VALUE_PAETH: png_ptr->do_filter = PNG_FILTER_PAETH; break; default: png_ptr->do_filter = (png_byte)filters; break; #else default: png_app_error(png_ptr, "Unknown row filter for method 0"); #endif /* WRITE_FILTER */ } #ifdef PNG_WRITE_FILTER_SUPPORTED /* If we have allocated the row_buf, this means we have already started * with the image and we should have allocated all of the filter buffers * that have been selected. If prev_row isn't already allocated, then * it is too late to start using the filters that need it, since we * will be missing the data in the previous row. If an application * wants to start and stop using particular filters during compression, * it should start out with all of the filters, and then remove them * or add them back after the start of compression. * * NOTE: this is a nasty constraint on the code, because it means that the * prev_row buffer must be maintained even if there are currently no * 'prev_row' requiring filters active. */ if (png_ptr->row_buf != NULL) { int num_filters; png_alloc_size_t buf_size; /* Repeat the checks in png_write_start_row; 1 pixel high or wide * images cannot benefit from certain filters. If this isn't done here * the check below will fire on 1 pixel high images. */ if (png_ptr->height == 1) filters &= ~(PNG_FILTER_UP|PNG_FILTER_AVG|PNG_FILTER_PAETH); if (png_ptr->width == 1) filters &= ~(PNG_FILTER_SUB|PNG_FILTER_AVG|PNG_FILTER_PAETH); if ((filters & (PNG_FILTER_UP|PNG_FILTER_AVG|PNG_FILTER_PAETH)) != 0 && png_ptr->prev_row == NULL) { /* This is the error case, however it is benign - the previous row * is not available so the filter can't be used. Just warn here. */ png_app_warning(png_ptr, "png_set_filter: UP/AVG/PAETH cannot be added after start"); filters &= ~(PNG_FILTER_UP|PNG_FILTER_AVG|PNG_FILTER_PAETH); } num_filters = 0; if (filters & PNG_FILTER_SUB) num_filters++; if (filters & PNG_FILTER_UP) num_filters++; if (filters & PNG_FILTER_AVG) num_filters++; if (filters & PNG_FILTER_PAETH) num_filters++; /* Allocate needed row buffers if they have not already been * allocated. */ buf_size = PNG_ROWBYTES(png_ptr->usr_channels * png_ptr->usr_bit_depth, png_ptr->width) + 1; if (png_ptr->try_row == NULL) png_ptr->try_row = png_voidcast(png_bytep, png_malloc(png_ptr, buf_size)); if (num_filters > 1) { if (png_ptr->tst_row == NULL) png_ptr->tst_row = png_voidcast(png_bytep, png_malloc(png_ptr, buf_size)); } } png_ptr->do_filter = (png_byte)filters; #endif } else png_error(png_ptr, "Unknown custom filter method"); }
0
438,362
VideoTrack::~VideoTrack() { delete colour_; delete projection_; }
0
245,675
bool NPJSObject::NP_Construct(NPObject* npObject, const NPVariant* arguments, uint32_t argumentCount, NPVariant* result) { return toNPJSObject(npObject)->construct(arguments, argumentCount, result); }
0
358,343
static int vmx_vcpu_setup(struct vcpu_vmx *vmx) { u32 host_sysenter_cs, msr_low, msr_high; u32 junk; u64 host_pat, tsc_this, tsc_base; unsigned long a; struct descriptor_table dt; int i; unsigned long kvm_vmx_return; u32 exec_control; /* I/O */ vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a)); vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b)); if (cpu_has_vmx_msr_bitmap()) vmcs_write64(MSR_BITMAP, page_to_phys(vmx_msr_bitmap)); vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ /* Control */ vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmcs_config.pin_based_exec_ctrl); exec_control = vmcs_config.cpu_based_exec_ctrl; if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) { exec_control &= ~CPU_BASED_TPR_SHADOW; #ifdef CONFIG_X86_64 exec_control |= CPU_BASED_CR8_STORE_EXITING | CPU_BASED_CR8_LOAD_EXITING; #endif } if (!vm_need_ept()) exec_control |= CPU_BASED_CR3_STORE_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_INVLPG_EXITING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); if (cpu_has_secondary_exec_ctrls()) { exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; if (vmx->vpid == 0) exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; if (!vm_need_ept()) exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf); vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */ vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */ vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */ vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ #ifdef CONFIG_X86_64 rdmsrl(MSR_FS_BASE, a); vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ rdmsrl(MSR_GS_BASE, a); vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */ #else vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ #endif vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ kvm_get_idt(&dt); vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */ asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk); vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs); rdmsrl(MSR_IA32_SYSENTER_ESP, a); vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */ rdmsrl(MSR_IA32_SYSENTER_EIP, a); vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */ if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high); host_pat = msr_low | ((u64) msr_high << 32); vmcs_write64(HOST_IA32_PAT, host_pat); } if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high); host_pat = msr_low | ((u64) msr_high << 32); /* Write the default value follow host pat */ vmcs_write64(GUEST_IA32_PAT, host_pat); /* Keep arch.pat sync with GUEST_IA32_PAT */ vmx->vcpu.arch.pat = host_pat; } for (i = 0; i < NR_VMX_MSR; ++i) { u32 index = vmx_msr_index[i]; u32 data_low, data_high; u64 data; int j = vmx->nmsrs; if (rdmsr_safe(index, &data_low, &data_high) < 0) continue; if (wrmsr_safe(index, data_low, data_high) < 0) continue; data = data_low | ((u64)data_high << 32); vmx->host_msrs[j].index = index; vmx->host_msrs[j].reserved = 0; vmx->host_msrs[j].data = data; vmx->guest_msrs[j] = vmx->host_msrs[j]; ++vmx->nmsrs; } vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); /* 22.2.1, 20.8.1 */ vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl); vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc; rdtscll(tsc_this); if (tsc_this < vmx->vcpu.kvm->arch.vm_init_tsc) tsc_base = tsc_this; guest_write_tsc(0, tsc_base); return 0; }
0
291,523
static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, enum tcp_synack_type synack_type) { struct inet_request_sock *ireq = inet_rsk(req); struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_txoptions *opt; struct flowi6 *fl6 = &fl->u.ip6; struct sk_buff *skb; int err = -ENOMEM; /* First, grab a route. */ if (!dst && (dst = inet6_csk_route_req(sk, fl6, req, IPPROTO_TCP)) == NULL) goto done; skb = tcp_make_synack(sk, dst, req, foc, synack_type); if (skb) { __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, &ireq->ir_v6_rmt_addr); fl6->daddr = ireq->ir_v6_rmt_addr; if (np->repflow && ireq->pktopts) fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); rcu_read_lock(); opt = ireq->ipv6_opt; if (!opt) opt = rcu_dereference(np->opt); err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass); rcu_read_unlock(); err = net_xmit_eval(err); } done: return err; }
0
483,870
static int memory_block_change_state(struct memory_block *mem, unsigned long to_state, unsigned long from_state_req) { int ret = 0; if (mem->state != from_state_req) return -EINVAL; if (to_state == MEM_OFFLINE) mem->state = MEM_GOING_OFFLINE; ret = memory_block_action(mem->start_section_nr, to_state, mem->online_type, mem->nid); mem->state = ret ? from_state_req : to_state; return ret; }
0
475,176
static bool check_transfer_iovec(struct vrend_resource *res, const struct vrend_transfer_info *info) { return (info->iovec && info->iovec_cnt) || res->iov; }
0
32,547
void av_max_alloc(size_t max){ max_alloc_size = max; }
0
12,770
int ssl3_accept(SSL *s) { BUF_MEM *buf; unsigned long alg_k,Time=(unsigned long)time(NULL); void (*cb)(const SSL *ssl,int type,int val)=NULL; int ret= -1; int new_state,state,skip=0; RAND_add(&Time,sizeof(Time),0); ERR_clear_error(); clear_sys_error(); if (s->info_callback != NULL) cb=s->info_callback; else if (s->ctx->info_callback != NULL) cb=s->ctx->info_callback; /* init things to blank */ s->in_handshake++; if (!SSL_in_init(s) || SSL_in_before(s)) SSL_clear(s); if (s->cert == NULL) { SSLerr(SSL_F_SSL3_ACCEPT,SSL_R_NO_CERTIFICATE_SET); return(-1); } #ifndef OPENSSL_NO_HEARTBEATS /* If we're awaiting a HeartbeatResponse, pretend we * already got and don't await it anymore, because * Heartbeats don't make sense during handshakes anyway. */ if (s->tlsext_hb_pending) { s->tlsext_hb_pending = 0; s->tlsext_hb_seq++; } #endif for (;;) { state=s->state; switch (s->state) { case SSL_ST_RENEGOTIATE: s->renegotiate=1; /* s->state=SSL_ST_ACCEPT; */ case SSL_ST_BEFORE: case SSL_ST_ACCEPT: case SSL_ST_BEFORE|SSL_ST_ACCEPT: case SSL_ST_OK|SSL_ST_ACCEPT: s->server=1; if (cb != NULL) cb(s,SSL_CB_HANDSHAKE_START,1); if ((s->version>>8) != 3) { SSLerr(SSL_F_SSL3_ACCEPT, ERR_R_INTERNAL_ERROR); return -1; } if (!ssl_security(s, SSL_SECOP_VERSION, 0, s->version, NULL)) { SSLerr(SSL_F_SSL3_ACCEPT, SSL_R_VERSION_TOO_LOW); return -1; } s->type=SSL_ST_ACCEPT; if (s->init_buf == NULL) { if ((buf=BUF_MEM_new()) == NULL) { ret= -1; goto end; } if (!BUF_MEM_grow(buf,SSL3_RT_MAX_PLAIN_LENGTH)) { BUF_MEM_free(buf); ret= -1; goto end; } s->init_buf=buf; } if (!ssl3_setup_buffers(s)) { ret= -1; goto end; } s->init_num=0; s->s3->flags &= ~TLS1_FLAGS_SKIP_CERT_VERIFY; s->s3->flags &= ~SSL3_FLAGS_CCS_OK; /* Should have been reset by ssl3_get_finished, too. */ s->s3->change_cipher_spec = 0; if (s->state != SSL_ST_RENEGOTIATE) { /* Ok, we now need to push on a buffering BIO so that * the output is sent in a way that TCP likes :-) */ if (!ssl_init_wbio_buffer(s,1)) { ret= -1; goto end; } ssl3_init_finished_mac(s); s->state=SSL3_ST_SR_CLNT_HELLO_A; s->ctx->stats.sess_accept++; } else if (!s->s3->send_connection_binding && !(s->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION)) { /* Server attempting to renegotiate with * client that doesn't support secure * renegotiation. */ SSLerr(SSL_F_SSL3_ACCEPT, SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED); ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_HANDSHAKE_FAILURE); ret = -1; goto end; } else { /* s->state == SSL_ST_RENEGOTIATE, * we will just send a HelloRequest */ s->ctx->stats.sess_accept_renegotiate++; s->state=SSL3_ST_SW_HELLO_REQ_A; } break; case SSL3_ST_SW_HELLO_REQ_A: case SSL3_ST_SW_HELLO_REQ_B: s->shutdown=0; ret=ssl3_send_hello_request(s); if (ret <= 0) goto end; s->s3->tmp.next_state=SSL3_ST_SW_HELLO_REQ_C; s->state=SSL3_ST_SW_FLUSH; s->init_num=0; ssl3_init_finished_mac(s); break; case SSL3_ST_SW_HELLO_REQ_C: s->state=SSL_ST_OK; break; case SSL3_ST_SR_CLNT_HELLO_A: case SSL3_ST_SR_CLNT_HELLO_B: case SSL3_ST_SR_CLNT_HELLO_C: ret=ssl3_get_client_hello(s); if (ret <= 0) goto end; #ifndef OPENSSL_NO_SRP s->state = SSL3_ST_SR_CLNT_HELLO_D; case SSL3_ST_SR_CLNT_HELLO_D: { int al; if ((ret = ssl_check_srp_ext_ClientHello(s,&al)) < 0) { /* callback indicates firther work to be done */ s->rwstate=SSL_X509_LOOKUP; goto end; } if (ret != SSL_ERROR_NONE) { ssl3_send_alert(s,SSL3_AL_FATAL,al); /* This is not really an error but the only means to for a client to detect whether srp is supported. */ if (al != TLS1_AD_UNKNOWN_PSK_IDENTITY) SSLerr(SSL_F_SSL3_ACCEPT,SSL_R_CLIENTHELLO_TLSEXT); ret = SSL_TLSEXT_ERR_ALERT_FATAL; ret= -1; goto end; } } #endif s->renegotiate = 2; s->state=SSL3_ST_SW_SRVR_HELLO_A; s->init_num=0; break; case SSL3_ST_SW_SRVR_HELLO_A: case SSL3_ST_SW_SRVR_HELLO_B: ret=ssl3_send_server_hello(s); if (ret <= 0) goto end; #ifndef OPENSSL_NO_TLSEXT if (s->hit) { if (s->tlsext_ticket_expected) s->state=SSL3_ST_SW_SESSION_TICKET_A; else s->state=SSL3_ST_SW_CHANGE_A; } #else if (s->hit) s->state=SSL3_ST_SW_CHANGE_A; #endif else s->state = SSL3_ST_SW_CERT_A; s->init_num = 0; break; case SSL3_ST_SW_CERT_A: case SSL3_ST_SW_CERT_B: /* Check if it is anon DH or anon ECDH, */ /* normal PSK or KRB5 or SRP */ if (!(s->s3->tmp.new_cipher->algorithm_auth & (SSL_aNULL|SSL_aKRB5|SSL_aSRP)) && !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK)) { ret=ssl3_send_server_certificate(s); if (ret <= 0) goto end; #ifndef OPENSSL_NO_TLSEXT if (s->tlsext_status_expected) s->state=SSL3_ST_SW_CERT_STATUS_A; else s->state=SSL3_ST_SW_KEY_EXCH_A; } else { skip = 1; s->state=SSL3_ST_SW_KEY_EXCH_A; } #else } else skip=1; s->state=SSL3_ST_SW_KEY_EXCH_A; #endif s->init_num=0; break; case SSL3_ST_SW_KEY_EXCH_A: case SSL3_ST_SW_KEY_EXCH_B: alg_k = s->s3->tmp.new_cipher->algorithm_mkey; /* clear this, it may get reset by * send_server_key_exchange */ if ((s->options & SSL_OP_EPHEMERAL_RSA) #ifndef OPENSSL_NO_KRB5 && !(alg_k & SSL_kKRB5) #endif /* OPENSSL_NO_KRB5 */ ) /* option SSL_OP_EPHEMERAL_RSA sends temporary RSA key * even when forbidden by protocol specs * (handshake may fail as clients are not required to * be able to handle this) */ s->s3->tmp.use_rsa_tmp=1; else s->s3->tmp.use_rsa_tmp=0; /* only send if a DH key exchange, fortezza or * RSA but we have a sign only certificate * * PSK: may send PSK identity hints * * For ECC ciphersuites, we send a serverKeyExchange * message only if the cipher suite is either * ECDH-anon or ECDHE. In other cases, the * server certificate contains the server's * public key for key exchange. */ if (s->s3->tmp.use_rsa_tmp /* PSK: send ServerKeyExchange if PSK identity * hint if provided */ #ifndef OPENSSL_NO_PSK || ((alg_k & SSL_kPSK) && s->ctx->psk_identity_hint) #endif #ifndef OPENSSL_NO_SRP /* SRP: send ServerKeyExchange */ || (alg_k & SSL_kSRP) #endif || (alg_k & SSL_kDHE) || (alg_k & SSL_kECDHE) || ((alg_k & SSL_kRSA) && (s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey == NULL || (SSL_C_IS_EXPORT(s->s3->tmp.new_cipher) && EVP_PKEY_size(s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey)*8 > SSL_C_EXPORT_PKEYLENGTH(s->s3->tmp.new_cipher) ) ) ) ) { ret=ssl3_send_server_key_exchange(s); if (ret <= 0) goto end; } else skip=1; s->state=SSL3_ST_SW_CERT_REQ_A; s->init_num=0; break; case SSL3_ST_SW_CERT_REQ_A: case SSL3_ST_SW_CERT_REQ_B: if (/* don't request cert unless asked for it: */ !(s->verify_mode & SSL_VERIFY_PEER) || /* if SSL_VERIFY_CLIENT_ONCE is set, * don't request cert during re-negotiation: */ ((s->session->peer != NULL) && (s->verify_mode & SSL_VERIFY_CLIENT_ONCE)) || /* never request cert in anonymous ciphersuites * (see section "Certificate request" in SSL 3 drafts * and in RFC 2246): */ ((s->s3->tmp.new_cipher->algorithm_auth & SSL_aNULL) && /* ... except when the application insists on verification * (against the specs, but s3_clnt.c accepts this for SSL 3) */ !(s->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT)) || /* never request cert in Kerberos ciphersuites */ (s->s3->tmp.new_cipher->algorithm_auth & SSL_aKRB5) || /* don't request certificate for SRP auth */ (s->s3->tmp.new_cipher->algorithm_auth & SSL_aSRP) /* With normal PSK Certificates and * Certificate Requests are omitted */ || (s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK)) { /* no cert request */ skip=1; s->s3->tmp.cert_request=0; s->state=SSL3_ST_SW_SRVR_DONE_A; if (s->s3->handshake_buffer) if (!ssl3_digest_cached_records(s)) return -1; } else { s->s3->tmp.cert_request=1; ret=ssl3_send_certificate_request(s); if (ret <= 0) goto end; #ifndef NETSCAPE_HANG_BUG s->state=SSL3_ST_SW_SRVR_DONE_A; #else s->state=SSL3_ST_SW_FLUSH; s->s3->tmp.next_state=SSL3_ST_SR_CERT_A; #endif s->init_num=0; } break; case SSL3_ST_SW_SRVR_DONE_A: case SSL3_ST_SW_SRVR_DONE_B: ret=ssl3_send_server_done(s); if (ret <= 0) goto end; s->s3->tmp.next_state=SSL3_ST_SR_CERT_A; s->state=SSL3_ST_SW_FLUSH; s->init_num=0; break; case SSL3_ST_SW_FLUSH: /* This code originally checked to see if * any data was pending using BIO_CTRL_INFO * and then flushed. This caused problems * as documented in PR#1939. The proposed * fix doesn't completely resolve this issue * as buggy implementations of BIO_CTRL_PENDING * still exist. So instead we just flush * unconditionally. */ s->rwstate=SSL_WRITING; if (BIO_flush(s->wbio) <= 0) { ret= -1; goto end; } s->rwstate=SSL_NOTHING; s->state=s->s3->tmp.next_state; break; case SSL3_ST_SR_CERT_A: case SSL3_ST_SR_CERT_B: if (s->s3->tmp.cert_request) { ret=ssl3_get_client_certificate(s); if (ret <= 0) goto end; } s->init_num=0; s->state=SSL3_ST_SR_KEY_EXCH_A; break; case SSL3_ST_SR_KEY_EXCH_A: case SSL3_ST_SR_KEY_EXCH_B: ret=ssl3_get_client_key_exchange(s); if (ret <= 0) goto end; if (ret == 2) { /* For the ECDH ciphersuites when * the client sends its ECDH pub key in * a certificate, the CertificateVerify * message is not sent. * Also for GOST ciphersuites when * the client uses its key from the certificate * for key exchange. */ #if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG) s->state=SSL3_ST_SR_FINISHED_A; #else if (s->s3->next_proto_neg_seen) s->state=SSL3_ST_SR_NEXT_PROTO_A; else s->state=SSL3_ST_SR_FINISHED_A; #endif s->init_num = 0; } else if (SSL_USE_SIGALGS(s)) { s->state=SSL3_ST_SR_CERT_VRFY_A; s->init_num=0; if (!s->session->peer) break; /* For sigalgs freeze the handshake buffer * at this point and digest cached records. */ if (!s->s3->handshake_buffer) { SSLerr(SSL_F_SSL3_ACCEPT,ERR_R_INTERNAL_ERROR); return -1; } s->s3->flags |= TLS1_FLAGS_KEEP_HANDSHAKE; if (!ssl3_digest_cached_records(s)) return -1; } else { int offset=0; int dgst_num; s->state=SSL3_ST_SR_CERT_VRFY_A; s->init_num=0; /* We need to get hashes here so if there is * a client cert, it can be verified * FIXME - digest processing for CertificateVerify * should be generalized. But it is next step */ if (s->s3->handshake_buffer) if (!ssl3_digest_cached_records(s)) return -1; for (dgst_num=0; dgst_num<SSL_MAX_DIGEST;dgst_num++) if (s->s3->handshake_dgst[dgst_num]) { int dgst_size; s->method->ssl3_enc->cert_verify_mac(s,EVP_MD_CTX_type(s->s3->handshake_dgst[dgst_num]),&(s->s3->tmp.cert_verify_md[offset])); dgst_size=EVP_MD_CTX_size(s->s3->handshake_dgst[dgst_num]); if (dgst_size < 0) { ret = -1; goto end; } offset+=dgst_size; } } break; case SSL3_ST_SR_CERT_VRFY_A: case SSL3_ST_SR_CERT_VRFY_B: /* * This *should* be the first time we enable CCS, but be * extra careful about surrounding code changes. We need * to set this here because we don't know if we're * expecting a CertificateVerify or not. */ if (!s->s3->change_cipher_spec) s->s3->flags |= SSL3_FLAGS_CCS_OK; /* we should decide if we expected this one */ ret=ssl3_get_cert_verify(s); if (ret <= 0) goto end; #if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG) s->state=SSL3_ST_SR_FINISHED_A; #else if (s->s3->next_proto_neg_seen) s->state=SSL3_ST_SR_NEXT_PROTO_A; else s->state=SSL3_ST_SR_FINISHED_A; #endif s->init_num=0; break; #if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG) case SSL3_ST_SR_NEXT_PROTO_A: case SSL3_ST_SR_NEXT_PROTO_B: /* * Enable CCS for resumed handshakes with NPN. * In a full handshake with NPN, we end up here through * SSL3_ST_SR_CERT_VRFY_B, where SSL3_FLAGS_CCS_OK was * already set. Receiving a CCS clears the flag, so make * sure not to re-enable it to ban duplicates. * s->s3->change_cipher_spec is set when a CCS is * processed in s3_pkt.c, and remains set until * the client's Finished message is read. */ if (!s->s3->change_cipher_spec) s->s3->flags |= SSL3_FLAGS_CCS_OK; ret=ssl3_get_next_proto(s); if (ret <= 0) goto end; s->init_num = 0; s->state=SSL3_ST_SR_FINISHED_A; break; #endif case SSL3_ST_SR_FINISHED_A: case SSL3_ST_SR_FINISHED_B: /* * Enable CCS for resumed handshakes without NPN. * In a full handshake, we end up here through * SSL3_ST_SR_CERT_VRFY_B, where SSL3_FLAGS_CCS_OK was * already set. Receiving a CCS clears the flag, so make * sure not to re-enable it to ban duplicates. * s->s3->change_cipher_spec is set when a CCS is * processed in s3_pkt.c, and remains set until * the client's Finished message is read. */ if (!s->s3->change_cipher_spec) s->s3->flags |= SSL3_FLAGS_CCS_OK; ret=ssl3_get_finished(s,SSL3_ST_SR_FINISHED_A, SSL3_ST_SR_FINISHED_B); if (ret <= 0) goto end; if (s->hit) s->state=SSL_ST_OK; #ifndef OPENSSL_NO_TLSEXT else if (s->tlsext_ticket_expected) s->state=SSL3_ST_SW_SESSION_TICKET_A; #endif else s->state=SSL3_ST_SW_CHANGE_A; s->init_num=0; break; #ifndef OPENSSL_NO_TLSEXT case SSL3_ST_SW_SESSION_TICKET_A: case SSL3_ST_SW_SESSION_TICKET_B: ret=ssl3_send_newsession_ticket(s); if (ret <= 0) goto end; s->state=SSL3_ST_SW_CHANGE_A; s->init_num=0; break; case SSL3_ST_SW_CERT_STATUS_A: case SSL3_ST_SW_CERT_STATUS_B: ret=ssl3_send_cert_status(s); if (ret <= 0) goto end; s->state=SSL3_ST_SW_KEY_EXCH_A; s->init_num=0; break; #endif case SSL3_ST_SW_CHANGE_A: case SSL3_ST_SW_CHANGE_B: s->session->cipher=s->s3->tmp.new_cipher; if (!s->method->ssl3_enc->setup_key_block(s)) { ret= -1; goto end; } ret=ssl3_send_change_cipher_spec(s, SSL3_ST_SW_CHANGE_A,SSL3_ST_SW_CHANGE_B); if (ret <= 0) goto end; s->state=SSL3_ST_SW_FINISHED_A; s->init_num=0; if (!s->method->ssl3_enc->change_cipher_state(s, SSL3_CHANGE_CIPHER_SERVER_WRITE)) { ret= -1; goto end; } break; case SSL3_ST_SW_FINISHED_A: case SSL3_ST_SW_FINISHED_B: ret=ssl3_send_finished(s, SSL3_ST_SW_FINISHED_A,SSL3_ST_SW_FINISHED_B, s->method->ssl3_enc->server_finished_label, s->method->ssl3_enc->server_finished_label_len); if (ret <= 0) goto end; s->state=SSL3_ST_SW_FLUSH; if (s->hit) { #if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG) s->s3->tmp.next_state=SSL3_ST_SR_FINISHED_A; #else if (s->s3->next_proto_neg_seen) { s->s3->tmp.next_state=SSL3_ST_SR_NEXT_PROTO_A; } else s->s3->tmp.next_state=SSL3_ST_SR_FINISHED_A; #endif } else s->s3->tmp.next_state=SSL_ST_OK; s->init_num=0; break; case SSL_ST_OK: /* clean a few things up */ ssl3_cleanup_key_block(s); BUF_MEM_free(s->init_buf); s->init_buf=NULL; /* remove buffering on output */ ssl_free_wbio_buffer(s); s->init_num=0; if (s->renegotiate == 2) /* skipped if we just sent a HelloRequest */ { s->renegotiate=0; s->new_session=0; ssl_update_cache(s,SSL_SESS_CACHE_SERVER); s->ctx->stats.sess_accept_good++; /* s->server=1; */ s->handshake_func=ssl3_accept; if (cb != NULL) cb(s,SSL_CB_HANDSHAKE_DONE,1); } ret = 1; goto end; /* break; */ default: SSLerr(SSL_F_SSL3_ACCEPT,SSL_R_UNKNOWN_STATE); ret= -1; goto end; /* break; */ } if (!s->s3->tmp.reuse_message && !skip) { if (s->debug) { if ((ret=BIO_flush(s->wbio)) <= 0) goto end; } if ((cb != NULL) && (s->state != state)) { new_state=s->state; s->state=state; cb(s,SSL_CB_ACCEPT_LOOP,1); s->state=new_state; } } skip=0; }
1
117,877
static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) { }
0
58,743
bool Router::Route(string_view url, ViewPtr view, const Strings& methods) { assert(view); // TODO: More error check routes_.push_back({ ToString(url), {}, view, methods }); return true; }
0
478,319
CImg<T> get_fill(const T& val0, const T& val1, const T& val2) const { return CImg<T>(_width,_height,_depth,_spectrum).fill(val0,val1,val2); }
0
132,416
static bool tcp_pause_early_retransmit(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); unsigned long delay; /* Delay early retransmit and entering fast recovery for * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples * available, or RTO is scheduled to fire first. */ if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 || (flag & FLAG_ECE) || !tp->srtt_us) return false; delay = max(usecs_to_jiffies(tp->srtt_us >> 5), msecs_to_jiffies(2)); if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) return false; inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay, TCP_RTO_MAX); return true; }
0
220,950
static int is_handler(const struct dirent *dirent) { if (strncmp(dirent->d_name, "handler_", 8)) return 0; return 1; }
0
10,234
string DecodeFile(const string& filename, int num_threads) { libvpx_test::WebMVideoSource video(filename); video.Init(); vpx_codec_dec_cfg_t cfg = {0}; cfg.threads = num_threads; libvpx_test::VP9Decoder decoder(cfg, 0); libvpx_test::MD5 md5; for (video.Begin(); video.cxdata(); video.Next()) { const vpx_codec_err_t res = decoder.DecodeFrame(video.cxdata(), video.frame_size()); if (res != VPX_CODEC_OK) { EXPECT_EQ(VPX_CODEC_OK, res) << decoder.DecodeError(); break; } libvpx_test::DxDataIterator dec_iter = decoder.GetDxData(); const vpx_image_t *img = NULL; while ((img = dec_iter.Next())) { md5.Add(img); } } return string(md5.Get()); }
1
415,428
pwg_free_finishings( _pwg_finishings_t *f) /* I - Finishings value */ { cupsFreeOptions(f->num_options, f->options); free(f); }
0
52,620
bool close() { bool noError = true; if (isValid()) { if (zip_fclose(m_zipFile) != 0) { noError = false; } m_zipFile = nullptr; } return noError; }
0