idx
int64
func
string
target
int64
19,313
static int noise_motion_thresh ( BLOCK_SIZE bs , int increase_denoising ) { ( void ) bs ; ( void ) increase_denoising ; return 25 * 25 ; }
0
21,519
static inline void tb_hash_remove ( TranslationBlock * * ptb , TranslationBlock * tb ) { TranslationBlock * tb1 ; for ( ; ; ) { tb1 = * ptb ; if ( tb1 == tb ) { * ptb = tb1 -> phys_hash_next ; break ; } ptb = & tb1 -> phys_hash_next ; } }
0
87,089
static int init_shdr(ELFOBJ *bin) { ut32 shdr_size; ut8 shdr[sizeof (Elf_(Shdr))] = {0}; int i, j, len; if (!bin || bin->shdr) { return true; } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size < 1) { return false; } if (shdr_size > bin->size) { return false; } if (bin->ehdr.e_shoff > bin->size) { return false; } if (bin->ehdr.e_shoff + shdr_size > bin->size) { return false; } if (!(bin->shdr = calloc (1, shdr_size + 1))) { perror ("malloc (shdr)"); return false; } sdb_num_set (bin->kv, "elf_shdr.offset", bin->ehdr.e_shoff, 0); sdb_num_set (bin->kv, "elf_shdr.size", sizeof (Elf_(Shdr)), 0); sdb_set (bin->kv, "elf_s_type.cparse", "enum elf_s_type {SHT_NULL=0,SHT_PROGBITS=1," "SHT_SYMTAB=2,SHT_STRTAB=3,SHT_RELA=4,SHT_HASH=5,SHT_DYNAMIC=6,SHT_NOTE=7," "SHT_NOBITS=8,SHT_REL=9,SHT_SHLIB=10,SHT_DYNSYM=11,SHT_LOOS=0x60000000," "SHT_HIOS=0x6fffffff,SHT_LOPROC=0x70000000,SHT_HIPROC=0x7fffffff};", 0); for (i = 0; i < bin->ehdr.e_shnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_shoff + i * sizeof (Elf_(Shdr)), shdr, sizeof (Elf_(Shdr))); if (len < 1) { bprintf ("Warning: read (shdr) at 0x%"PFMT64x"\n", (ut64) bin->ehdr.e_shoff); R_FREE (bin->shdr); return false; } bin->shdr[i].sh_name = READ32 (shdr, j) bin->shdr[i].sh_type = READ32 (shdr, j) #if R_BIN_ELF64 bin->shdr[i].sh_flags = READ64 (shdr, j) bin->shdr[i].sh_addr = READ64 (shdr, j) bin->shdr[i].sh_offset = READ64 (shdr, j) bin->shdr[i].sh_size = READ64 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ64 (shdr, j) bin->shdr[i].sh_entsize = READ64 (shdr, j) #else bin->shdr[i].sh_flags = READ32 (shdr, j) bin->shdr[i].sh_addr = READ32 (shdr, j) bin->shdr[i].sh_offset = READ32 (shdr, j) bin->shdr[i].sh_size = READ32 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ32 (shdr, j) bin->shdr[i].sh_entsize = READ32 (shdr, j) #endif } #if R_BIN_ELF64 sdb_set (bin->kv, "elf_s_flags_64.cparse", "enum elf_s_flags_64 {SF64_None=0,SF64_Exec=1," "SF64_Alloc=2,SF64_Alloc_Exec=3,SF64_Write=4,SF64_Write_Exec=5," "SF64_Write_Alloc=6,SF64_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[8]Eqqqxxqq name (elf_s_type)type" " (elf_s_flags_64)flags addr offset size link info addralign entsize", 0); #else sdb_set (bin->kv, "elf_s_flags_32.cparse", "enum elf_s_flags_32 {SF32_None=0,SF32_Exec=1," "SF32_Alloc=2,SF32_Alloc_Exec=3,SF32_Write=4,SF32_Write_Exec=5," "SF32_Write_Alloc=6,SF32_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[4]Exxxxxxx name (elf_s_type)type" " (elf_s_flags_32)flags addr offset size link info addralign entsize", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_s_type.cparse`; td `k bin/cur/info/elf_s_flags_64.cparse` // > pf `k bin/cur/info/elf_shdr.format` @ `k bin/cur/info/elf_shdr.offset` }
0
140,914
export_pamenv (void) { char **env; /* This is a copy but don't care to free as we exec later anyways. */ env = pam_getenvlist (pamh); while (env && *env) { if (putenv (*env) != 0) err (EXIT_FAILURE, NULL); env++; } }
0
12,077
DeviceRequest( int requesting_process_id, int requesting_frame_id, int page_request_id, bool user_gesture, MediaStreamRequestType request_type, const StreamControls& controls, MediaDeviceSaltAndOrigin salt_and_origin, DeviceStoppedCallback device_stopped_cb = DeviceStoppedCallback()) : requesting_process_id(requesting_process_id), requesting_frame_id(requesting_frame_id), page_request_id(page_request_id), user_gesture(user_gesture), controls(controls), salt_and_origin(std::move(salt_and_origin)), device_stopped_cb(std::move(device_stopped_cb)), state_(NUM_MEDIA_TYPES, MEDIA_REQUEST_STATE_NOT_REQUESTED), request_type_(request_type), audio_type_(MEDIA_NO_SERVICE), video_type_(MEDIA_NO_SERVICE), target_process_id_(-1), target_frame_id_(-1) {}
1
103,756
void BlockCodec::sync1() { m_savedPositionNextFrame = m_track->fpos_next_frame; m_savedNextFrame = m_track->nextfframe; }
0
54,188
void __module_get(struct module *module) { if (module) { preempt_disable(); atomic_inc(&module->refcnt); trace_module_get(module, _RET_IP_); preempt_enable(); } }
0
92,149
RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill) { InterruptDisabler disabler; RefPtr<PhysicalPage> page = find_free_user_physical_page(); if (!page) { if (m_user_physical_regions.is_empty()) { kprintf("MM: no user physical regions available (?)\n"); } for_each_vmobject([&](auto& vmobject) { if (vmobject.is_purgeable()) { auto& purgeable_vmobject = static_cast<PurgeableVMObject&>(vmobject); int purged_page_count = purgeable_vmobject.purge_with_interrupts_disabled({}); if (purged_page_count) { kprintf("MM: Purge saved the day! Purged %d pages from PurgeableVMObject{%p}\n", purged_page_count, &purgeable_vmobject); page = find_free_user_physical_page(); ASSERT(page); return IterationDecision::Break; } } return IterationDecision::Continue; }); if (!page) { kprintf("MM: no user physical pages available\n"); ASSERT_NOT_REACHED(); return {}; } } #ifdef MM_DEBUG dbgprintf("MM: allocate_user_physical_page vending P%p\n", page->paddr().get()); #endif if (should_zero_fill == ShouldZeroFill::Yes) { auto* ptr = (u32*)quickmap_page(*page); fast_u32_fill(ptr, 0, PAGE_SIZE / sizeof(u32)); unquickmap_page(); } ++m_user_physical_pages_used; return page; }
0
412,337
rb_dir_exists_p(VALUE obj, VALUE fname) { rb_warning("Dir.exists? is a deprecated name, use Dir.exist? instead"); return rb_file_directory_p(obj, fname); }
0
466,981
dnscrypt_pad(uint8_t *buf, const size_t len, const size_t max_len, const uint8_t *nonce, const uint8_t *secretkey) { uint8_t *buf_padding_area = buf + len; size_t padded_len; uint32_t rnd; // no padding if (max_len < len + DNSCRYPT_MIN_PAD_LEN) return len; assert(nonce[crypto_box_HALF_NONCEBYTES] == nonce[0]); crypto_stream((unsigned char *)&rnd, (unsigned long long)sizeof(rnd), nonce, secretkey); padded_len = len + DNSCRYPT_MIN_PAD_LEN + rnd % (max_len - len - DNSCRYPT_MIN_PAD_LEN + 1); padded_len += DNSCRYPT_BLOCK_SIZE - padded_len % DNSCRYPT_BLOCK_SIZE; if (padded_len > max_len) padded_len = max_len; memset(buf_padding_area, 0, padded_len - len); *buf_padding_area = 0x80; return padded_len; }
0
387,642
xz_avail(xz_statep state) { lzma_stream *strm = &(state->strm); if (state->err != LZMA_OK) return -1; if (state->eof == 0) { /* avail_in is size_t, which is not necessary sizeof(unsigned) */ unsigned tmp = strm->avail_in; if (xz_load(state, state->in, state->size, &tmp) == -1) { strm->avail_in = tmp; return -1; } strm->avail_in = tmp; strm->next_in = state->in; } return 0; }
0
264,913
explicit ReverseOp(OpKernelConstruction* context) : OpKernel(context) {}
0
177,011
static char **GetTransformTokens(void *context,const char *text, int *number_tokens) { char **tokens; register const char *p, *q; register ssize_t i; SVGInfo *svg_info; svg_info=(SVGInfo *) context; *number_tokens=0; if (text == (const char *) NULL) return((char **) NULL); /* Determine the number of arguments. */ for (p=text; *p != '\0'; p++) { if (*p == '(') (*number_tokens)+=2; } tokens=(char **) AcquireQuantumMemory(*number_tokens+2UL,sizeof(*tokens)); if (tokens == (char **) NULL) { (void) ThrowMagickException(svg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",text); return((char **) NULL); } /* Convert string to an ASCII list. */ i=0; p=text; for (q=p; *q != '\0'; q++) { if ((*q != '(') && (*q != ')') && (*q != '\0')) continue; tokens[i]=AcquireString(p); (void) CopyMagickString(tokens[i],p,(size_t) (q-p+1)); StripString(tokens[i++]); p=q+1; } tokens[i]=AcquireString(p); (void) CopyMagickString(tokens[i],p,(size_t) (q-p+1)); StripString(tokens[i++]); tokens[i]=(char *) NULL; return(tokens); }
0
31,045
static cmsBool Type_S15Fixed16_Write ( struct _cms_typehandler_struct * self , cmsIOHANDLER * io , void * Ptr , cmsUInt32Number nItems ) { cmsFloat64Number * Value = ( cmsFloat64Number * ) Ptr ; cmsUInt32Number i ; for ( i = 0 ; i < nItems ; i ++ ) { if ( ! _cmsWrite15Fixed16Number ( io , Value [ i ] ) ) return FALSE ; } return TRUE ; cmsUNUSED_PARAMETER ( self ) ; }
0
21,491
void # ifdef M_DEBUG mpi_debug_free_limb_space ( mpi_ptr_t a , const char * info ) # else mpi_free_limb_space ( mpi_ptr_t a ) # endif { if ( ! a ) return ; if ( DBG_MEMORY ) log_debug ( "mpi_free_limb_space of size %lu\n" , ( ulong ) m_size ( a ) * 8 ) ; # if 0 if ( ! m_is_secure ( a ) ) { size_t nlimbs = m_size ( a ) / 4 ; void * p = a ; if ( nlimbs == 5 ) { * a = unused_limbs_5 ; unused_limbs_5 = a ; return ; } else if ( nlimbs == 32 ) { * a = unused_limbs_32 ; unused_limbs_32 = a ; return ; } else if ( nlimbs == 64 ) { * a = unused_limbs_64 ; unused_limbs_64 = a ; return ; } } # endif xfree ( a ) ; }
0
271,661
SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) { /* This is only valid for single tasks */ if (pid <= 0 || tgid <= 0) return -EINVAL; return do_tkill(tgid, pid, sig); }
0
154,233
TfLiteRegistration* Register_MAX_POOL_2D() { return Register_MAX_POOL_GENERIC_OPT(); }
0
65,065
ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) { struct ip6_tnl **tp; for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) { if (t == *tp) { spin_lock_bh(&ip6_tnl_lock); *tp = t->next; spin_unlock_bh(&ip6_tnl_lock); break; } } }
0
319,678
static inline int num_effective_busses(XilinxSPIPS *s) { return (s->regs[R_LQSPI_STS] & LQSPI_CFG_SEP_BUS && s->regs[R_LQSPI_STS] & LQSPI_CFG_TWO_MEM) ? s->num_busses : 1; }
0
230,747
int dom_document_preserve_whitespace_read(dom_object *obj, zval **retval TSRMLS_DC) { dom_doc_propsptr doc_prop; ALLOC_ZVAL(*retval); if (obj->document) { doc_prop = dom_get_doc_props(obj->document); ZVAL_BOOL(*retval, doc_prop->preservewhitespace); } else { ZVAL_FALSE(*retval); } return SUCCESS; }
0
499,784
QPDFWriter::writeTrailer(trailer_e which, int size, bool xref_stream, qpdf_offset_t prev, int linearization_pass) { QPDFObjectHandle trailer = getTrimmedTrailer(); if (! xref_stream) { writeString("trailer <<"); } writeStringQDF("\n"); if (which == t_lin_second) { writeString(" /Size "); writeString(QUtil::int_to_string(size)); } else { std::set<std::string> keys = trailer.getKeys(); for (std::set<std::string>::iterator iter = keys.begin(); iter != keys.end(); ++iter) { std::string const& key = *iter; writeStringQDF(" "); writeStringNoQDF(" "); writeString(QPDF_Name::normalizeName(key)); writeString(" "); if (key == "/Size") { writeString(QUtil::int_to_string(size)); if (which == t_lin_first) { writeString(" /Prev "); qpdf_offset_t pos = this->m->pipeline->getCount(); writeString(QUtil::int_to_string(prev)); int nspaces = QIntC::to_int(pos - this->m->pipeline->getCount() + 21); if (nspaces < 0) { throw std::logic_error( "QPDFWriter: no padding required in trailer"); } writePad(nspaces); } } else { unparseChild(trailer.getKey(key), 1, 0); } writeStringQDF("\n"); } } // Write ID writeStringQDF(" "); writeString(" /ID ["); if (linearization_pass == 1) { std::string original_id1 = getOriginalID1(); if (original_id1.empty()) { writeString("<00000000000000000000000000000000>"); } else { // Write a string of zeroes equal in length to the // representation of the original ID. While writing the // original ID would have the same number of bytes, it // would cause a change to the deterministic ID generated // by older versions of the software that hard-coded the // length of the ID to 16 bytes. writeString("<"); size_t len = QPDF_String(original_id1).unparse(true).length() - 2; for (size_t i = 0; i < len; ++i) { writeString("0"); } writeString(">"); } writeString("<00000000000000000000000000000000>"); } else { if ((linearization_pass == 0) && (this->m->deterministic_id)) { computeDeterministicIDData(); } generateID(); writeString(QPDF_String(this->m->id1).unparse(true)); writeString(QPDF_String(this->m->id2).unparse(true)); } writeString("]"); if (which != t_lin_second) { // Write reference to encryption dictionary if (this->m->encrypted) { writeString(" /Encrypt "); writeString(QUtil::int_to_string(this->m->encryption_dict_objid)); writeString(" 0 R"); } } writeStringQDF("\n"); writeStringNoQDF(" "); writeString(">>"); }
0
325,850
static int net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, const char *model, const char *name, const char *ifname, const char *script, const char *downscript, const char *vhostfdname, int vnet_hdr, int fd) { TAPState *s; s = net_tap_fd_init(peer, model, name, fd, vnet_hdr); if (!s) { close(fd); return -1; } if (tap_set_sndbuf(s->fd, tap) < 0) { return -1; } if (tap->has_fd || tap->has_fds) { snprintf(s->nc.info_str, sizeof(s->nc.info_str), "fd=%d", fd); } else if (tap->has_helper) { snprintf(s->nc.info_str, sizeof(s->nc.info_str), "helper=%s", tap->helper); } else { snprintf(s->nc.info_str, sizeof(s->nc.info_str), "ifname=%s,script=%s,downscript=%s", ifname, script, downscript); if (strcmp(downscript, "no") != 0) { snprintf(s->down_script, sizeof(s->down_script), "%s", downscript); snprintf(s->down_script_arg, sizeof(s->down_script_arg), "%s", ifname); } } if (tap->has_vhost ? tap->vhost : vhostfdname || (tap->has_vhostforce && tap->vhostforce)) { int vhostfd; if (tap->has_vhostfd) { vhostfd = monitor_handle_fd_param(cur_mon, vhostfdname); if (vhostfd == -1) { return -1; } } else { vhostfd = -1; } s->vhost_net = vhost_net_init(&s->nc, vhostfd, tap->has_vhostforce && tap->vhostforce); if (!s->vhost_net) { error_report("vhost-net requested but could not be initialized"); return -1; } } else if (tap->has_vhostfd || tap->has_vhostfds) { error_report("vhostfd= is not valid without vhost"); return -1; } return 0; }
0
395,909
_gnutls_session_cert_type_supported(gnutls_session_t session, gnutls_certificate_type_t cert_type) { unsigned i; unsigned cert_found = 0; gnutls_certificate_credentials_t cred; if (session->security_parameters.entity == GNUTLS_SERVER) { cred = (gnutls_certificate_credentials_t) _gnutls_get_cred(session, GNUTLS_CRD_CERTIFICATE); if (cred == NULL) return GNUTLS_E_UNSUPPORTED_CERTIFICATE_TYPE; if (cred->get_cert_callback == NULL && cred->get_cert_callback2 == NULL) { for (i = 0; i < cred->ncerts; i++) { if (cred->certs[i].cert_list[0].type == cert_type) { cert_found = 1; break; } } if (cert_found == 0) /* no certificate is of that type. */ return GNUTLS_E_UNSUPPORTED_CERTIFICATE_TYPE; } } if (session->internals.priorities.cert_type.algorithms == 0 && cert_type == DEFAULT_CERT_TYPE) return 0; for (i = 0; i < session->internals.priorities.cert_type.algorithms; i++) { if (session->internals.priorities.cert_type.priority[i] == cert_type) { return 0; /* ok */ } } return GNUTLS_E_UNSUPPORTED_CERTIFICATE_TYPE; }
0
351,773
CalendarRegressionTest::runIndexedTest( int32_t index, UBool exec, const char* &name, char* /*par*/ ) { // if (exec) logln((UnicodeString)"TestSuite NumberFormatRegressionTest"); switch (index) { CASE(0,test4100311); CASE(1,test4074758); CASE(2,test4028518); CASE(3,test4031502); CASE(4,test4035301); CASE(5,test4040996); CASE(6,test4051765); CASE(7,test4061476); CASE(8,test4070502); CASE(9,test4071197); CASE(10,test4071385); CASE(11,test4073929); CASE(12,test4083167); CASE(13,test4086724); CASE(14,test4095407); CASE(15,test4096231); CASE(16,test4096539); CASE(17,test41003112); CASE(18,test4103271); CASE(19,test4106136); CASE(20,test4108764); CASE(21,test4114578); CASE(22,test4118384); CASE(23,test4125881); CASE(24,test4125892); CASE(25,test4141665); CASE(26,test4142933); CASE(27,test4145158); CASE(28,test4145983); CASE(29,test4147269); CASE(30,Test4149677); CASE(31,Test4162587); CASE(32,Test4165343); CASE(33,Test4166109); CASE(34,Test4167060); CASE(35,Test4197699); CASE(36,TestJ81); CASE(37,TestJ438); CASE(38,TestLeapFieldDifference); CASE(39,TestMalaysianInstance); CASE(40,test4059654); CASE(41,test4092362); CASE(42,TestWeekShift); CASE(43,TestTimeZoneTransitionAdd); CASE(44,TestDeprecates); CASE(45,TestT5555); CASE(46,TestT6745); CASE(47,TestT8057); CASE(48,TestT8596); CASE(49,Test9019); CASE(50,TestT9452); CASE(51,TestT11632); default: name = ""; break; } }
1
22,867
static uint64_t xhci_oper_read ( void * ptr , hwaddr reg , unsigned size ) { XHCIState * xhci = ptr ; uint32_t ret ; switch ( reg ) { case 0x00 : ret = xhci -> usbcmd ; break ; case 0x04 : ret = xhci -> usbsts ; break ; case 0x08 : ret = 1 ; break ; case 0x14 : ret = xhci -> dnctrl ; break ; case 0x18 : ret = xhci -> crcr_low & ~ 0xe ; break ; case 0x1c : ret = xhci -> crcr_high ; break ; case 0x30 : ret = xhci -> dcbaap_low ; break ; case 0x34 : ret = xhci -> dcbaap_high ; break ; case 0x38 : ret = xhci -> config ; break ; default : trace_usb_xhci_unimplemented ( "oper read" , reg ) ; ret = 0 ; } trace_usb_xhci_oper_read ( reg , ret ) ; return ret ; }
0
439,367
static CURLcode file_disconnect(struct connectdata *conn, bool dead_connection) { struct FILEPROTO *file = conn->data->req.protop; (void)dead_connection; /* not used */ if(file) { Curl_safefree(file->freepath); file->path = NULL; if(file->fd != -1) close(file->fd); file->fd = -1; } return CURLE_OK; }
0
200,980
static inline bool migration_bitmap_set_dirty(ram_addr_t addr) { bool ret; int nr = addr >> TARGET_PAGE_BITS; ret = test_and_set_bit(nr, migration_bitmap); if (!ret) { migration_dirty_pages++; } return ret; }
0
66,124
int unregister_reboot_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); }
0
169,779
void LogHTMLForm(SavePasswordProgressLogger* logger, SavePasswordProgressLogger::StringID message_id, const WebFormElement& form) { logger->LogHTMLForm(message_id, form.GetName().Utf8(), GURL(form.Action().Utf8())); }
0
247,083
static void php_zlib_cleanup_ob_gzhandler_mess(TSRMLS_D) { if (ZLIBG(ob_gzhandler)) { deflateEnd(&(ZLIBG(ob_gzhandler)->Z)); php_zlib_output_handler_context_dtor(ZLIBG(ob_gzhandler) TSRMLS_CC); ZLIBG(ob_gzhandler) = NULL; } }
0
197,839
void LayerTreeCoordinator::paintContents(const WebCore::GraphicsLayer* graphicsLayer, WebCore::GraphicsContext& graphicsContext, WebCore::GraphicsLayerPaintingPhase, const WebCore::IntRect& clipRect) { if (graphicsLayer == m_nonCompositedContentLayer) { m_webPage->drawRect(graphicsContext, clipRect); return; } if (graphicsLayer == m_pageOverlayLayer) { graphicsContext.clearRect(clipRect); m_webPage->drawPageOverlay(graphicsContext, clipRect); return; } }
0
351,482
static void vt_disallocate_all(void) { struct vc_data *vc[MAX_NR_CONSOLES]; int i; console_lock(); for (i = 1; i < MAX_NR_CONSOLES; i++) if (!vt_busy(i)) vc[i] = vc_deallocate(i); else vc[i] = NULL; console_unlock(); for (i = 1; i < MAX_NR_CONSOLES; i++) { if (vc[i] && i >= MIN_NR_CONSOLES) { tty_port_destroy(&vc[i]->port); kfree(vc[i]); } } }
1
218,711
void Document::SetTitleElement(Element* title_element) { if (isSVGSVGElement(documentElement())) { title_element_ = Traversal<SVGTitleElement>::FirstChild(*documentElement()); } else { if (title_element_ && title_element_ != title_element) title_element_ = Traversal<HTMLTitleElement>::FirstWithin(*this); else title_element_ = title_element; if (isSVGTitleElement(title_element_)) { title_element_ = nullptr; return; } } if (isHTMLTitleElement(title_element_)) UpdateTitle(toHTMLTitleElement(title_element_)->text()); else if (isSVGTitleElement(title_element_)) UpdateTitle(toSVGTitleElement(title_element_)->textContent()); }
0
138,083
void vmalloc_sync_all(void) { unsigned long address; if (SHARED_KERNEL_PMD) return; for (address = VMALLOC_START & PMD_MASK; address >= TASK_SIZE && address < FIXADDR_TOP; address += PMD_SIZE) { struct page *page; spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { spinlock_t *pgt_lock; pmd_t *ret; /* the pgt_lock only for Xen */ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); ret = vmalloc_sync_one(page_address(page), address); spin_unlock(pgt_lock); if (!ret) break; } spin_unlock(&pgd_lock); } }
0
159,492
void Pipe::DelayedDelivery::flush() { lgeneric_subdout(pipe->msgr->cct, ms, 20) << *pipe << "DelayedDelivery::flush" << dendl; Mutex::Locker l(delay_lock); flush_count = delay_queue.size(); delay_cond.Signal(); }
0
458,224
void OPJ_CALLCONV opj_set_default_encoder_parameters(opj_cparameters_t *parameters) { if (parameters) { memset(parameters, 0, sizeof(opj_cparameters_t)); /* default coding parameters */ parameters->cp_cinema = OPJ_OFF; /* DEPRECATED */ parameters->rsiz = OPJ_PROFILE_NONE; parameters->max_comp_size = 0; parameters->numresolution = OPJ_COMP_PARAM_DEFAULT_NUMRESOLUTION; parameters->cp_rsiz = OPJ_STD_RSIZ; /* DEPRECATED */ parameters->cblockw_init = OPJ_COMP_PARAM_DEFAULT_CBLOCKW; parameters->cblockh_init = OPJ_COMP_PARAM_DEFAULT_CBLOCKH; parameters->prog_order = OPJ_COMP_PARAM_DEFAULT_PROG_ORDER; parameters->roi_compno = -1; /* no ROI */ parameters->subsampling_dx = 1; parameters->subsampling_dy = 1; parameters->tp_on = 0; parameters->decod_format = -1; parameters->cod_format = -1; parameters->tcp_rates[0] = 0; parameters->tcp_numlayers = 0; parameters->cp_disto_alloc = 0; parameters->cp_fixed_alloc = 0; parameters->cp_fixed_quality = 0; parameters->jpip_on = OPJ_FALSE; /* UniPG>> */ #ifdef USE_JPWL parameters->jpwl_epc_on = OPJ_FALSE; parameters->jpwl_hprot_MH = -1; /* -1 means unassigned */ { int i; for (i = 0; i < JPWL_MAX_NO_TILESPECS; i++) { parameters->jpwl_hprot_TPH_tileno[i] = -1; /* unassigned */ parameters->jpwl_hprot_TPH[i] = 0; /* absent */ } }; { int i; for (i = 0; i < JPWL_MAX_NO_PACKSPECS; i++) { parameters->jpwl_pprot_tileno[i] = -1; /* unassigned */ parameters->jpwl_pprot_packno[i] = -1; /* unassigned */ parameters->jpwl_pprot[i] = 0; /* absent */ } }; parameters->jpwl_sens_size = 0; /* 0 means no ESD */ parameters->jpwl_sens_addr = 0; /* 0 means auto */ parameters->jpwl_sens_range = 0; /* 0 means packet */ parameters->jpwl_sens_MH = -1; /* -1 means unassigned */ { int i; for (i = 0; i < JPWL_MAX_NO_TILESPECS; i++) { parameters->jpwl_sens_TPH_tileno[i] = -1; /* unassigned */ parameters->jpwl_sens_TPH[i] = -1; /* absent */ } }; #endif /* USE_JPWL */ /* <<UniPG */ } }
0
215,929
void StatusBubbleGtk::SetURL(const GURL& url, const string16& languages) { url_ = url; languages_ = languages; if (url.is_empty() && !status_text_.empty()) { url_text_ = std::string(); SetStatusTextTo(status_text_); return; } SetStatusTextToURL(); }
0
197,085
bool OmniboxViewWin::IsImeComposing() const { bool ime_composing = false; HIMC context = ImmGetContext(m_hWnd); if (context) { ime_composing = !!ImmGetCompositionString(context, GCS_COMPSTR, NULL, 0); ImmReleaseContext(m_hWnd, context); } return ime_composing; }
0
10,050
net::WebSocket* WebSocketExperimentTask::Context::CreateWebSocket( const Config& config, net::WebSocketDelegate* delegate) { URLRequestContextGetter* getter = Profile::GetDefaultRequestContext(); if (!getter) return NULL; net::WebSocket::Request* request( new net::WebSocket::Request(config.url, config.ws_protocol, config.ws_origin, config.ws_location, getter->GetURLRequestContext())); return new net::WebSocket(request, delegate); }
1
170,639
int validation_checkfp(int count, int argc, char **argv) { int result; checkfp_command command; checkfp_control control; command.number[0] = 0; command.limit = 3; command.verbose = verbose; command.ctimes = 0; command.cmillions = 0; command.cinvalid = 0; command.cnoaccept = 0; while (--argc > 0) { ++argv; if (argc > 1 && strcmp(*argv, "-l") == 0) { --argc; command.limit = atoi(*++argv); } else { fprintf(stderr, "unknown argument %s\n", *argv); return 1; } } control.cnumber = 0; control.check_state = start; control.at_start = 1; control.cdigits_in_state = 0; control.limit = command.limit; control.state = 0; control.is_negative = 0; control.is_zero = 1; control.number_was_valid = 0; result = check_all_characters(&command, control); printf("checkfp: %s: checked %d,%.3d,%.3d,%.3d strings (%d invalid)\n", result ? "pass" : "FAIL", command.cmillions / 1000, command.cmillions % 1000, command.ctimes / 1000, command.ctimes % 1000, command.cinvalid); return result; }
0
160,887
int CLASS ljpeg_start (struct jhead *jh, int info_only) { int c, tag; ushort len; uchar data[0x10000]; const uchar *dp; memset (jh, 0, sizeof *jh); jh->restart = INT_MAX; fread (data, 2, 1, ifp); if (data[1] != 0xd8) return 0; do { fread (data, 2, 2, ifp); tag = data[0] << 8 | data[1]; len = (data[2] << 8 | data[3]) - 2; if (tag <= 0xff00) return 0; fread (data, 1, len, ifp); switch (tag) { case 0xffc3: jh->sraw = ((data[7] >> 4) * (data[7] & 15) - 1) & 3; case 0xffc0: jh->bits = data[0]; jh->high = data[1] << 8 | data[2]; jh->wide = data[3] << 8 | data[4]; jh->clrs = data[5] + jh->sraw; if (len == 9 && !dng_version) getc(ifp); break; case 0xffc4: if (info_only) break; for (dp = data; dp < data+len && (c = *dp++) < 4; ) jh->free[c] = jh->huff[c] = make_decoder_ref (&dp); break; case 0xffda: jh->psv = data[1+data[0]*2]; jh->bits -= data[3+data[0]*2] & 15; break; case 0xffdd: jh->restart = data[0] << 8 | data[1]; } } while (tag != 0xffda); if (info_only) return 1; if (jh->clrs > 6 || !jh->huff[0]) return 0; FORC(5) if (!jh->huff[c+1]) jh->huff[c+1] = jh->huff[c]; if (jh->sraw) { FORC(4) jh->huff[2+c] = jh->huff[1]; FORC(jh->sraw) jh->huff[1+c] = jh->huff[0]; } jh->row = (ushort *) calloc (jh->wide*jh->clrs, 4); merror (jh->row, "ljpeg_start()"); return zero_after_ff = 1; }
0
21,598
rfbClientPtr rfbNewUDPClient ( rfbScreenInfoPtr rfbScreen ) { return ( ( rfbScreen -> udpClient = rfbNewTCPOrUDPClient ( rfbScreen , rfbScreen -> udpSock , TRUE ) ) ) ; }
0
317,908
void SetPageInfoBubbleIdentityInfo( const PageInfoUI::IdentityInfo& identity_info) { static_cast<PageInfoBubbleView*>(PageInfoBubbleView::GetPageInfoBubble()) ->SetIdentityInfo(identity_info); }
0
430,454
ippGetGroupTag(ipp_attribute_t *attr) /* I - IPP attribute */ { /* * Range check input... */ if (!attr) return (IPP_TAG_ZERO); /* * Return the group... */ return (attr->group_tag); }
0
331,731
int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { BlockDriver *drv = bs->drv; int ret; if (!drv) { return -ENOMEDIUM; } if (!drv->bdrv_write_compressed) { return -ENOTSUP; } ret = bdrv_check_request(bs, sector_num, nb_sectors); if (ret < 0) { return ret; } assert(QLIST_EMPTY(&bs->dirty_bitmaps)); return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors); }
0
70,827
static PHP_FUNCTION(session_decode) { char *str; int str_len; if (PS(session_status) == php_session_none) { RETURN_FALSE; } if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &str, &str_len) == FAILURE) { return; } RETVAL_BOOL(php_session_decode(str, str_len TSRMLS_CC) == SUCCESS); }
0
95,235
static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, struct page *parent, int start) { struct page *page; int err; if (!nid) return ERR_PTR(-ENOENT); f2fs_bug_on(sbi, check_nid_range(sbi, nid)); repeat: page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); if (!page) return ERR_PTR(-ENOMEM); err = read_node_page(page, 0); if (err < 0) { f2fs_put_page(page, 1); return ERR_PTR(err); } else if (err == LOCKED_PAGE) { goto page_hit; } if (parent) ra_node_pages(parent, start + 1, MAX_RA_NODE); lock_page(page); if (unlikely(page->mapping != NODE_MAPPING(sbi))) { f2fs_put_page(page, 1); goto repeat; } if (unlikely(!PageUptodate(page))) goto out_err; page_hit: if(unlikely(nid != nid_of_node(page))) { f2fs_bug_on(sbi, 1); ClearPageUptodate(page); out_err: f2fs_put_page(page, 1); return ERR_PTR(-EIO); } return page; }
0
109,573
void setupConnection() override {}
0
343,504
int __udp_lib_get_port(struct sock *sk, unsigned short snum, struct hlist_head udptable[], int *port_rover, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2 ) ) { struct hlist_node *node; struct hlist_head *head; struct sock *sk2; int error = 1; write_lock_bh(&udp_hash_lock); if (snum == 0) { int best_size_so_far, best, result, i; if (*port_rover > sysctl_local_port_range[1] || *port_rover < sysctl_local_port_range[0]) *port_rover = sysctl_local_port_range[0]; best_size_so_far = 32767; best = result = *port_rover; for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) { int size; head = &udptable[result & (UDP_HTABLE_SIZE - 1)]; if (hlist_empty(head)) { if (result > sysctl_local_port_range[1]) result = sysctl_local_port_range[0] + ((result - sysctl_local_port_range[0]) & (UDP_HTABLE_SIZE - 1)); goto gotit; } size = 0; sk_for_each(sk2, node, head) { if (++size >= best_size_so_far) goto next; } best_size_so_far = size; best = result; next: ; } result = best; for (i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) { if (result > sysctl_local_port_range[1]) result = sysctl_local_port_range[0] + ((result - sysctl_local_port_range[0]) & (UDP_HTABLE_SIZE - 1)); if (! __udp_lib_lport_inuse(result, udptable)) break; } if (i >= (1 << 16) / UDP_HTABLE_SIZE) goto fail; gotit: *port_rover = snum = result; } else { head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; sk_for_each(sk2, node, head) if (sk2->sk_hash == snum && sk2 != sk && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && (*saddr_comp)(sk, sk2) ) goto fail; } inet_sk(sk)->num = snum; sk->sk_hash = snum; if (sk_unhashed(sk)) { head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; sk_add_node(sk, head); sock_prot_inc_use(sk->sk_prot); } error = 0; fail: write_unlock_bh(&udp_hash_lock); return error; }
1
96,701
f_funcref(typval_T *argvars, typval_T *rettv) { common_function(argvars, rettv, TRUE); }
0
85,749
void setContext(Context* context) { contexts_[context->id()] = context; }
0
100,703
mt_copy(mrb_state *mrb, mt_tbl *t) { mt_tbl *t2; size_t i; if (t == NULL) return NULL; if (t->alloc == 0) return NULL; if (t->size == 0) return NULL; t2 = mt_new(mrb); for (i=0; i<t->alloc; i++) { struct mt_elem *slot = &t->table[i]; if (slot->key) { mt_put(mrb, t2, slot->key, slot->func_p, slot->noarg_p, slot->ptr); } } return t2; }
0
302,875
static struct sk_buff * sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) { struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list); struct tcf_result cl_res; if (!cl) return skb; /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */ qdisc_bstats_cpu_update(cl->q, skb); switch (tcf_classify(skb, cl, &cl_res, false)) { case TC_ACT_OK: case TC_ACT_RECLASSIFY: skb->tc_index = TC_H_MIN(cl_res.classid); break; case TC_ACT_SHOT: qdisc_qstats_cpu_drop(cl->q); *ret = NET_XMIT_DROP; kfree_skb(skb); return NULL; case TC_ACT_STOLEN: case TC_ACT_QUEUED: case TC_ACT_TRAP: *ret = NET_XMIT_SUCCESS; consume_skb(skb); return NULL; case TC_ACT_REDIRECT: /* No need to push/pop skb's mac_header here on egress! */ skb_do_redirect(skb); *ret = NET_XMIT_SUCCESS; return NULL; default: break; } return skb;
0
160,135
test_userpass_cookie_check (Test *test, gconstpointer data) { GAsyncResult *result = NULL; CockpitWebService *service; CockpitWebService *prev_service; CockpitCreds *creds; CockpitCreds *prev_creds; JsonObject *response = NULL; GError *error = NULL; GHashTable *headers; headers = mock_auth_basic_header ("me", "this is the password"); g_hash_table_insert (headers, g_strdup ("X-Authorize"), g_strdup ("password")); cockpit_auth_login_async (test->auth, "/cockpit/", NULL, headers, on_ready_get_result, &result); g_hash_table_unref (headers); while (result == NULL) g_main_context_iteration (NULL, TRUE); headers = web_socket_util_new_headers (); response = cockpit_auth_login_finish (test->auth, result, NULL, headers, &error); g_assert_no_error (error); /* Get the service */ mock_auth_include_cookie_as_if_client (headers, headers, "cockpit"); service = cockpit_auth_check_cookie (test->auth, "/cockpit", headers); g_object_unref (result); g_assert (service != NULL); g_assert (response != NULL); creds = cockpit_web_service_get_creds (service); g_assert_cmpstr ("me", ==, cockpit_creds_get_user (creds)); g_assert_cmpstr ("cockpit", ==, cockpit_creds_get_application (creds)); g_assert_cmpstr ("this is the password", ==, g_bytes_get_data (cockpit_creds_get_password (creds), NULL)); prev_service = service; g_object_unref (service); service = NULL; prev_creds = creds; creds = NULL; mock_auth_include_cookie_as_if_client (headers, headers, "cockpit"); service = cockpit_auth_check_cookie (test->auth, "/cockpit", headers); g_assert (prev_service == service); creds = cockpit_web_service_get_creds (service); g_assert (prev_creds == creds); g_assert_cmpstr ("me", ==, cockpit_creds_get_user (creds)); g_assert_cmpstr ("this is the password", ==, g_bytes_get_data (cockpit_creds_get_password (creds), NULL)); g_hash_table_destroy (headers); g_object_unref (service); json_object_unref (response); }
0
110,725
static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, int id) { struct ion_handle *handle; handle = idr_find(&client->idr, id); if (handle) ion_handle_get(handle); return handle ? handle : ERR_PTR(-EINVAL); }
0
277,226
INLINE UWORD32 impeg2d_bit_stream_get(void* pv_ctxt, UWORD32 u4_num_bits) { UWORD32 u4_next_bits = impeg2d_bit_stream_nxt(pv_ctxt, u4_num_bits); impeg2d_bit_stream_flush(pv_ctxt, u4_num_bits); return(u4_next_bits); }
0
278,192
status_t ATSParser::parseAdaptationField(ABitReader *br, unsigned PID) { unsigned adaptation_field_length = br->getBits(8); if (adaptation_field_length > 0) { if (adaptation_field_length * 8 > br->numBitsLeft()) { ALOGV("Adaptation field should be included in a single TS packet."); return ERROR_MALFORMED; } unsigned discontinuity_indicator = br->getBits(1); if (discontinuity_indicator) { ALOGV("PID 0x%04x: discontinuity_indicator = 1 (!!!)", PID); } br->skipBits(2); unsigned PCR_flag = br->getBits(1); size_t numBitsRead = 4; if (PCR_flag) { if (adaptation_field_length * 8 < 52) { return ERROR_MALFORMED; } br->skipBits(4); uint64_t PCR_base = br->getBits(32); PCR_base = (PCR_base << 1) | br->getBits(1); br->skipBits(6); unsigned PCR_ext = br->getBits(9); size_t byteOffsetFromStartOfTSPacket = (188 - br->numBitsLeft() / 8); uint64_t PCR = PCR_base * 300 + PCR_ext; ALOGV("PID 0x%04x: PCR = 0x%016" PRIx64 " (%.2f)", PID, PCR, PCR / 27E6); size_t byteOffsetFromStart = mNumTSPacketsParsed * 188 + byteOffsetFromStartOfTSPacket; for (size_t i = 0; i < mPrograms.size(); ++i) { updatePCR(PID, PCR, byteOffsetFromStart); } numBitsRead += 52; } br->skipBits(adaptation_field_length * 8 - numBitsRead); } return OK; }
0
293,526
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, struct eb_objects *eb, struct drm_i915_gem_relocation_entry *reloc) { struct drm_device *dev = obj->base.dev; struct drm_gem_object *target_obj; uint32_t target_offset; int ret = -EINVAL; /* we've already hold a reference to all valid objects */ target_obj = &eb_get_object(eb, reloc->target_handle)->base; if (unlikely(target_obj == NULL)) return -ENOENT; target_offset = to_intel_bo(target_obj)->gtt_offset; /* The target buffer should have appeared before us in the * exec_object list, so it should have a GTT space bound by now. */ if (unlikely(target_offset == 0)) { DRM_DEBUG("No GTT space found for object %d\n", reloc->target_handle); return ret; } /* Validate that the target is in a valid r/w GPU domain */ if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { DRM_DEBUG("reloc with multiple write domains: " "obj %p target %d offset %d " "read %08x write %08x", obj, reloc->target_handle, (int) reloc->offset, reloc->read_domains, reloc->write_domain); return ret; } if (unlikely((reloc->write_domain | reloc->read_domains) & ~I915_GEM_GPU_DOMAINS)) { DRM_DEBUG("reloc with read/write non-GPU domains: " "obj %p target %d offset %d " "read %08x write %08x", obj, reloc->target_handle, (int) reloc->offset, reloc->read_domains, reloc->write_domain); return ret; } if (unlikely(reloc->write_domain && target_obj->pending_write_domain && reloc->write_domain != target_obj->pending_write_domain)) { DRM_DEBUG("Write domain conflict: " "obj %p target %d offset %d " "new %08x old %08x\n", obj, reloc->target_handle, (int) reloc->offset, reloc->write_domain, target_obj->pending_write_domain); return ret; } target_obj->pending_read_domains |= reloc->read_domains; target_obj->pending_write_domain |= reloc->write_domain; /* If the relocation already has the right value in it, no * more work needs to be done. */ if (target_offset == reloc->presumed_offset) return 0; /* Check that the relocation address is valid... */ if (unlikely(reloc->offset > obj->base.size - 4)) { DRM_DEBUG("Relocation beyond object bounds: " "obj %p target %d offset %d size %d.\n", obj, reloc->target_handle, (int) reloc->offset, (int) obj->base.size); return ret; } if (unlikely(reloc->offset & 3)) { DRM_DEBUG("Relocation not 4-byte aligned: " "obj %p target %d offset %d.\n", obj, reloc->target_handle, (int) reloc->offset); return ret; } reloc->delta += target_offset; if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { uint32_t page_offset = reloc->offset & ~PAGE_MASK; char *vaddr; vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); *(uint32_t *)(vaddr + page_offset) = reloc->delta; kunmap_atomic(vaddr); } else { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t __iomem *reloc_entry; void __iomem *reloc_page; /* We can't wait for rendering with pagefaults disabled */ if (obj->active && in_atomic()) return -EFAULT; ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret) return ret; /* Map the page containing the relocation we're going to perform. */ reloc->offset += obj->gtt_offset; reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, reloc->offset & PAGE_MASK); reloc_entry = (uint32_t __iomem *) (reloc_page + (reloc->offset & ~PAGE_MASK)); iowrite32(reloc->delta, reloc_entry); io_mapping_unmap_atomic(reloc_page); } /* and update the user's relocation entry */ reloc->presumed_offset = target_offset; return 0; }
0
142,592
static int fixup_bpf_calls(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; bool expect_blinding = bpf_jit_blinding_enabled(prog); struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; const int insn_cnt = prog->len; const struct bpf_map_ops *ops; struct bpf_insn_aux_data *aux; struct bpf_insn insn_buf[16]; struct bpf_prog *new_prog; struct bpf_map *map_ptr; int i, ret, cnt, delta = 0; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || insn->code == (BPF_ALU | BPF_MOD | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; struct bpf_insn mask_and_div[] = { BPF_MOV32_REG(insn->src_reg, insn->src_reg), /* Rx div 0 -> 0 */ BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), BPF_JMP_IMM(BPF_JA, 0, 0, 1), *insn, }; struct bpf_insn mask_and_mod[] = { BPF_MOV32_REG(insn->src_reg, insn->src_reg), /* Rx mod 0 -> Rx */ BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), *insn, }; struct bpf_insn *patchlet; if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { patchlet = mask_and_div + (is64 ? 1 : 0); cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); } else { patchlet = mask_and_mod + (is64 ? 1 : 0); cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); } new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (BPF_CLASS(insn->code) == BPF_LD && (BPF_MODE(insn->code) == BPF_ABS || BPF_MODE(insn->code) == BPF_IND)) { cnt = env->ops->gen_ld_abs(insn, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; struct bpf_insn insn_buf[16]; struct bpf_insn *patch = &insn_buf[0]; bool issrc, isneg; u32 off_reg; aux = &env->insn_aux_data[i + delta]; if (!aux->alu_state || aux->alu_state == BPF_ALU_NON_POINTER) continue; isneg = aux->alu_state & BPF_ALU_NEG_VALUE; issrc = (aux->alu_state & BPF_ALU_SANITIZE) == BPF_ALU_SANITIZE_SRC; off_reg = issrc ? insn->src_reg : insn->dst_reg; if (isneg) *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); if (issrc) { *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); insn->src_reg = BPF_REG_AX; } else { *patch++ = BPF_ALU64_REG(BPF_AND, off_reg, BPF_REG_AX); } if (isneg) insn->code = insn->code == code_add ? code_sub : code_add; *patch++ = *insn; if (issrc && isneg) *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); cnt = patch - insn_buf; new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (insn->code != (BPF_JMP | BPF_CALL)) continue; if (insn->src_reg == BPF_PSEUDO_CALL) continue; if (insn->imm == BPF_FUNC_get_route_realm) prog->dst_needed = 1; if (insn->imm == BPF_FUNC_get_prandom_u32) bpf_user_rnd_init_once(); if (insn->imm == BPF_FUNC_override_return) prog->kprobe_override = 1; if (insn->imm == BPF_FUNC_tail_call) { /* If we tail call into other programs, we * cannot make any assumptions since they can * be replaced dynamically during runtime in * the program array. */ prog->cb_access = 1; env->prog->aux->stack_depth = MAX_BPF_STACK; env->prog->aux->max_pkt_offset = MAX_PACKET_OFF; /* mark bpf_tail_call as different opcode to avoid * conditional branch in the interpeter for every normal * call and to prevent accidental JITing by JIT compiler * that doesn't support bpf_tail_call yet */ insn->imm = 0; insn->code = BPF_JMP | BPF_TAIL_CALL; aux = &env->insn_aux_data[i + delta]; if (env->bpf_capable && !expect_blinding && prog->jit_requested && !bpf_map_key_poisoned(aux) && !bpf_map_ptr_poisoned(aux) && !bpf_map_ptr_unpriv(aux)) { struct bpf_jit_poke_descriptor desc = { .reason = BPF_POKE_REASON_TAIL_CALL, .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state), .tail_call.key = bpf_map_key_immediate(aux), }; ret = bpf_jit_add_poke_descriptor(prog, &desc); if (ret < 0) { verbose(env, "adding tail call poke descriptor failed\n"); return ret; } insn->imm = ret + 1; continue; } if (!bpf_map_ptr_unpriv(aux)) continue; /* instead of changing every JIT dealing with tail_call * emit two extra insns: * if (index >= max_entries) goto out; * index &= array->index_mask; * to avoid out-of-bounds cpu speculation */ if (bpf_map_ptr_poisoned(aux)) { verbose(env, "tail_call abusing map_ptr\n"); return -EINVAL; } map_ptr = BPF_MAP_PTR(aux->map_ptr_state); insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, map_ptr->max_entries, 2); insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, container_of(map_ptr, struct bpf_array, map)->index_mask); insn_buf[2] = *insn; cnt = 3; new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup * and other inlining handlers are currently limited to 64 bit * only. */ if (prog->jit_requested && BITS_PER_LONG == 64 && (insn->imm == BPF_FUNC_map_lookup_elem || insn->imm == BPF_FUNC_map_update_elem || insn->imm == BPF_FUNC_map_delete_elem || insn->imm == BPF_FUNC_map_push_elem || insn->imm == BPF_FUNC_map_pop_elem || insn->imm == BPF_FUNC_map_peek_elem)) { aux = &env->insn_aux_data[i + delta]; if (bpf_map_ptr_poisoned(aux)) goto patch_call_imm; map_ptr = BPF_MAP_PTR(aux->map_ptr_state); ops = map_ptr->ops; if (insn->imm == BPF_FUNC_map_lookup_elem && ops->map_gen_lookup) { cnt = ops->map_gen_lookup(map_ptr, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, (void *(*)(struct bpf_map *map, void *key))NULL)); BUILD_BUG_ON(!__same_type(ops->map_delete_elem, (int (*)(struct bpf_map *map, void *key))NULL)); BUILD_BUG_ON(!__same_type(ops->map_update_elem, (int (*)(struct bpf_map *map, void *key, void *value, u64 flags))NULL)); BUILD_BUG_ON(!__same_type(ops->map_push_elem, (int (*)(struct bpf_map *map, void *value, u64 flags))NULL)); BUILD_BUG_ON(!__same_type(ops->map_pop_elem, (int (*)(struct bpf_map *map, void *value))NULL)); BUILD_BUG_ON(!__same_type(ops->map_peek_elem, (int (*)(struct bpf_map *map, void *value))NULL)); switch (insn->imm) { case BPF_FUNC_map_lookup_elem: insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - __bpf_call_base; continue; case BPF_FUNC_map_update_elem: insn->imm = BPF_CAST_CALL(ops->map_update_elem) - __bpf_call_base; continue; case BPF_FUNC_map_delete_elem: insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - __bpf_call_base; continue; case BPF_FUNC_map_push_elem: insn->imm = BPF_CAST_CALL(ops->map_push_elem) - __bpf_call_base; continue; case BPF_FUNC_map_pop_elem: insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - __bpf_call_base; continue; case BPF_FUNC_map_peek_elem: insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - __bpf_call_base; continue; } goto patch_call_imm; } if (prog->jit_requested && BITS_PER_LONG == 64 && insn->imm == BPF_FUNC_jiffies64) { struct bpf_insn ld_jiffies_addr[2] = { BPF_LD_IMM64(BPF_REG_0, (unsigned long)&jiffies), }; insn_buf[0] = ld_jiffies_addr[0]; insn_buf[1] = ld_jiffies_addr[1]; insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0); cnt = 3; new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } patch_call_imm: fn = env->ops->get_func_proto(insn->imm, env->prog); /* all functions that have prototype and verifier allowed * programs to call them, must be real in-kernel functions */ if (!fn->func) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(insn->imm), insn->imm); return -EFAULT; } insn->imm = fn->func - __bpf_call_base; } /* Since poke tab is now finalized, publish aux to tracker. */ for (i = 0; i < prog->aux->size_poke_tab; i++) { map_ptr = prog->aux->poke_tab[i].tail_call.map; if (!map_ptr->ops->map_poke_track || !map_ptr->ops->map_poke_untrack || !map_ptr->ops->map_poke_run) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); if (ret < 0) { verbose(env, "tracking tail call prog failed\n"); return ret; } } return 0; }
0
128,738
PKCS7_ISSUER_AND_SERIAL *PKCS7_get_issuer_and_serial(PKCS7 *p7, int idx) { STACK_OF(PKCS7_RECIP_INFO) *rsk; PKCS7_RECIP_INFO *ri; int i; i = OBJ_obj2nid(p7->type); if (i != NID_pkcs7_signedAndEnveloped) return NULL; if (p7->d.signed_and_enveloped == NULL) return NULL; rsk = p7->d.signed_and_enveloped->recipientinfo; if (rsk == NULL) return NULL; ri = sk_PKCS7_RECIP_INFO_value(rsk, 0); if (sk_PKCS7_RECIP_INFO_num(rsk) <= idx) return (NULL); ri = sk_PKCS7_RECIP_INFO_value(rsk, idx); return (ri->issuer_and_serial); }
0
199,813
Response EmulationHandler::SetEmitTouchEventsForMouse( bool enabled, Maybe<std::string> configuration) { touch_emulation_enabled_ = enabled; touch_emulation_configuration_ = configuration.fromMaybe(""); UpdateTouchEventEmulationState(); return Response::OK(); }
0
115,332
static int __init packet_init(void) { int rc = proto_register(&packet_proto, 0); if (rc != 0) goto out; sock_register(&packet_family_ops); register_pernet_subsys(&packet_net_ops); register_netdevice_notifier(&packet_netdev_notifier); out: return rc; }
0
67,360
static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, void (*op)(const void *, size_t, int)) { unsigned long pfn; size_t left = size; pfn = page_to_pfn(page) + offset / PAGE_SIZE; offset %= PAGE_SIZE; /* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. * If highmem is not configured then the bulk of this loop gets * optimized out. */ do { size_t len = left; void *vaddr; page = pfn_to_page(pfn); if (PageHighMem(page)) { if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; if (cache_is_vipt_nonaliasing()) { vaddr = kmap_atomic(page); op(vaddr + offset, len, dir); kunmap_atomic(vaddr); } else { vaddr = kmap_high_get(page); if (vaddr) { op(vaddr + offset, len, dir); kunmap_high(page); } } } else { vaddr = page_address(page) + offset; op(vaddr, len, dir); } offset = 0; pfn++; left -= len; } while (left); }
0
138,659
void httpParseConnectionField(HttpConnection *connection, char_t *value) { char_t *p; char_t *token; //Get the first value of the list token = osStrtok_r(value, ",", &p); //Parse the comma-separated list while(token != NULL) { //Trim whitespace characters value = strTrimWhitespace(token); //Check current value if(!osStrcasecmp(value, "keep-alive")) { //The connection is persistent connection->request.keepAlive = TRUE; } else if(!osStrcasecmp(value, "close")) { //The connection will be closed after completion of the response connection->request.keepAlive = FALSE; } #if (HTTP_SERVER_WEB_SOCKET_SUPPORT == ENABLED) else if(!osStrcasecmp(value, "upgrade")) { //Upgrade the connection connection->request.connectionUpgrade = TRUE; } #endif //Get next value token = osStrtok_r(NULL, ",", &p); } }
0
372,786
void CLASS remove_zeroes() { unsigned row, col, tot, n, r, c; #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,0,2); #endif for (row=0; row < height; row++) for (col=0; col < width; col++) if (BAYER(row,col) == 0) { tot = n = 0; for (r = row-2; r <= row+2; r++) for (c = col-2; c <= col+2; c++) if (r < height && c < width && FC(r,c) == FC(row,col) && BAYER(r,c)) tot += (n++,BAYER(r,c)); if (n) BAYER(row,col) = tot/n; } #ifdef LIBRAW_LIBRARY_BUILD RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES,1,2); #endif }
0
332,546
static int vdi_co_read(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sectors) { BDRVVdiState *s = bs->opaque; uint32_t bmap_entry; uint32_t block_index; uint32_t sector_in_block; uint32_t n_sectors; int ret; logout("\n"); restart: block_index = sector_num / s->block_sectors; sector_in_block = sector_num % s->block_sectors; n_sectors = s->block_sectors - sector_in_block; if (n_sectors > nb_sectors) { n_sectors = nb_sectors; } logout("will read %u sectors starting at sector %" PRIu64 "\n", n_sectors, sector_num); /* prepare next AIO request */ bmap_entry = le32_to_cpu(s->bmap[block_index]); if (!VDI_IS_ALLOCATED(bmap_entry)) { /* Block not allocated, return zeros, no need to wait. */ memset(buf, 0, n_sectors * SECTOR_SIZE); ret = 0; } else { uint64_t offset = s->header.offset_data / SECTOR_SIZE + (uint64_t)bmap_entry * s->block_sectors + sector_in_block; ret = bdrv_read(bs->file, offset, buf, n_sectors); } logout("%u sectors read\n", n_sectors); nb_sectors -= n_sectors; sector_num += n_sectors; buf += n_sectors * SECTOR_SIZE; if (ret >= 0 && nb_sectors > 0) { goto restart; } return ret; }
0
135,391
int sdma_send_txreq(struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *tx, bool pkts_sent) { int ret = 0; u16 tail; unsigned long flags; /* user should have supplied entire packet */ if (unlikely(tx->tlen)) return -EINVAL; tx->wait = iowait_ioww_to_iow(wait); spin_lock_irqsave(&sde->tail_lock, flags); retry: if (unlikely(!__sdma_running(sde))) goto unlock_noconn; if (unlikely(tx->num_desc > sde->desc_avail)) goto nodesc; tail = submit_tx(sde, tx); if (wait) iowait_sdma_inc(iowait_ioww_to_iow(wait)); sdma_update_tail(sde, tail); unlock: spin_unlock_irqrestore(&sde->tail_lock, flags); return ret; unlock_noconn: if (wait) iowait_sdma_inc(iowait_ioww_to_iow(wait)); tx->next_descq_idx = 0; #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER tx->sn = sde->tail_sn++; trace_hfi1_sdma_in_sn(sde, tx->sn); #endif spin_lock(&sde->flushlist_lock); list_add_tail(&tx->list, &sde->flushlist); spin_unlock(&sde->flushlist_lock); iowait_inc_wait_count(wait, tx->num_desc); queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); ret = -ECOMM; goto unlock; nodesc: ret = sdma_check_progress(sde, wait, tx, pkts_sent); if (ret == -EAGAIN) { ret = 0; goto retry; } sde->descq_full_count++; goto unlock; }
0
505,796
int fips_drbg_cprng_test(DRBG_CTX *dctx, const unsigned char *out) { /* No CPRNG in test mode */ if (dctx->xflags & DRBG_FLAG_TEST) return 1; /* Check block is valid: should never happen */ if (dctx->lb_valid == 0) { FIPSerr(FIPS_F_FIPS_DRBG_CPRNG_TEST, FIPS_R_INTERNAL_ERROR); fips_set_selftest_fail(); return 0; } if (drbg_stick) memcpy(dctx->lb, out, dctx->blocklength); /* Check against last block: fail if match */ if (!memcmp(dctx->lb, out, dctx->blocklength)) { FIPSerr(FIPS_F_FIPS_DRBG_CPRNG_TEST, FIPS_R_DRBG_STUCK); fips_set_selftest_fail(); return 0; } /* Save last block for next comparison */ memcpy(dctx->lb, out, dctx->blocklength); return 1; }
0
194,926
PHP_FUNCTION(ini_restore) { char *varname; int varname_len; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &varname, &varname_len) == FAILURE) { return; } zend_restore_ini_entry(varname, varname_len+1, PHP_INI_STAGE_RUNTIME); }
0
462,070
void *zcalloc_usable(size_t size, size_t *usable) { void *ptr = ztrycalloc_usable(size, usable); if (!ptr) zmalloc_oom_handler(size); return ptr; }
0
484,185
mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq, struct rte_mbuf **__rte_restrict pkts, unsigned int pkts_n, struct mlx5_txq_local *__rte_restrict loc, unsigned int olx) { /* * Subroutine is the part of mlx5_tx_burst_single() * and sends single-segment packet with SEND opcode. */ MLX5_ASSERT(loc->elts_free && loc->wqe_free); MLX5_ASSERT(pkts_n > loc->pkts_sent); pkts += loc->pkts_sent + 1; pkts_n -= loc->pkts_sent; for (;;) { struct mlx5_wqe *__rte_restrict wqe; enum mlx5_txcmp_code ret; MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); if (MLX5_TXOFF_CONFIG(TXPP)) { enum mlx5_txcmp_code wret; /* Generate WAIT for scheduling if requested. */ wret = mlx5_tx_schedule_send(txq, loc, olx); if (wret == MLX5_TXCMP_CODE_EXIT) return MLX5_TXCMP_CODE_EXIT; if (wret == MLX5_TXCMP_CODE_ERROR) return MLX5_TXCMP_CODE_ERROR; } if (MLX5_TXOFF_CONFIG(INLINE)) { unsigned int inlen, vlan = 0; inlen = rte_pktmbuf_data_len(loc->mbuf); if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) { vlan = sizeof(struct rte_vlan_hdr); inlen += vlan; static_assert((sizeof(struct rte_vlan_hdr) + sizeof(struct rte_ether_hdr)) == MLX5_ESEG_MIN_INLINE_SIZE, "invalid min inline data size"); } /* * If inlining is enabled at configuration time * the limit must be not less than minimal size. * Otherwise we would do extra check for data * size to avoid crashes due to length overflow. */ MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE); if (inlen <= txq->inlen_send) { unsigned int seg_n, wqe_n; rte_prefetch0(rte_pktmbuf_mtod (loc->mbuf, uint8_t *)); /* Check against minimal length. */ if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE) return MLX5_TXCMP_CODE_ERROR; if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) { /* * The hint flag not to inline packet * data is set. Check whether we can * follow the hint. */ if ((!MLX5_TXOFF_CONFIG(EMPW) && txq->inlen_mode) || (MLX5_TXOFF_CONFIG(MPW) && txq->inlen_mode)) { if (inlen <= txq->inlen_send) goto single_inline; /* * The hardware requires the * minimal inline data header. */ goto single_min_inline; } if (MLX5_TXOFF_CONFIG(VLAN) && vlan && !txq->vlan_en) { /* * We must insert VLAN tag * by software means. */ goto single_part_inline; } goto single_no_inline; } single_inline: /* * Completely inlined packet data WQE: * - Control Segment, SEND opcode * - Ethernet Segment, no VLAN insertion * - Data inlined, VLAN optionally inserted * - Alignment to MLX5_WSEG_SIZE * Have to estimate amount of WQEBBs */ seg_n = (inlen + 3 * MLX5_WSEG_SIZE - MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; /* Check if there are enough WQEBBs. */ wqe_n = (seg_n + 3) / 4; if (wqe_n > loc->wqe_free) return MLX5_TXCMP_CODE_EXIT; wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); loc->wqe_last = wqe; mlx5_tx_cseg_init(txq, loc, wqe, seg_n, MLX5_OPCODE_SEND, olx); mlx5_tx_eseg_data(txq, loc, wqe, vlan, inlen, 0, olx); txq->wqe_ci += wqe_n; loc->wqe_free -= wqe_n; /* * Packet data are completely inlined, * free the packet immediately. */ rte_pktmbuf_free_seg(loc->mbuf); } else if ((!MLX5_TXOFF_CONFIG(EMPW) || MLX5_TXOFF_CONFIG(MPW)) && txq->inlen_mode) { /* * If minimal inlining is requested the eMPW * feature should be disabled due to data is * inlined into Ethernet Segment, which can * not contain inlined data for eMPW due to * segment shared for all packets. */ struct mlx5_wqe_dseg *__rte_restrict dseg; unsigned int ds; uint8_t *dptr; /* * The inline-mode settings require * to inline the specified amount of * data bytes to the Ethernet Segment. * We should check the free space in * WQE ring buffer to inline partially. */ single_min_inline: MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode); MLX5_ASSERT(inlen > txq->inlen_mode); MLX5_ASSERT(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE); /* * Check whether there are enough free WQEBBs: * - Control Segment * - Ethernet Segment * - First Segment of inlined Ethernet data * - ... data continued ... * - Finishing Data Segment of pointer type */ ds = (MLX5_WQE_CSEG_SIZE + MLX5_WQE_ESEG_SIZE + MLX5_WQE_DSEG_SIZE + txq->inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; if (loc->wqe_free < ((ds + 3) / 4)) return MLX5_TXCMP_CODE_EXIT; /* * Build the ordinary SEND WQE: * - Control Segment * - Ethernet Segment, inline inlen_mode bytes * - Data Segment of pointer type */ wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); loc->wqe_last = wqe; mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx); dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, txq->inlen_mode, 0, olx); dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + txq->inlen_mode - vlan; inlen -= txq->inlen_mode; mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, inlen, olx); /* * WQE is built, update the loop parameters * and got to the next packet. */ txq->wqe_ci += (ds + 3) / 4; loc->wqe_free -= (ds + 3) / 4; /* We have to store mbuf in elts.*/ MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE)); txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; --loc->elts_free; } else { uint8_t *dptr; unsigned int dlen; /* * Partially inlined packet data WQE, we have * some space in title WQEBB, we can fill it * with some packet data. It takes one WQEBB, * it is available, no extra space check: * - Control Segment, SEND opcode * - Ethernet Segment, no VLAN insertion * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data * - Data Segment, pointer type * * We also get here if VLAN insertion is not * supported by HW, the inline is enabled. */ single_part_inline: wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); loc->wqe_last = wqe; mlx5_tx_cseg_init(txq, loc, wqe, 4, MLX5_OPCODE_SEND, olx); mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx); dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + MLX5_ESEG_MIN_INLINE_SIZE - vlan; /* * The length check is performed above, by * comparing with txq->inlen_send. We should * not get overflow here. */ MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE); dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE; mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1], dptr, dlen, olx); ++txq->wqe_ci; --loc->wqe_free; /* We have to store mbuf in elts.*/ MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE)); txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf; --loc->elts_free; } #ifdef MLX5_PMD_SOFT_COUNTERS /* Update sent data bytes counter. */ txq->stats.obytes += vlan + rte_pktmbuf_data_len(loc->mbuf); #endif } else { /* * No inline at all, it means the CPU cycles saving * is prioritized at configuration, we should not * copy any packet data to WQE. * * SEND WQE, one WQEBB: * - Control Segment, SEND opcode * - Ethernet Segment, optional VLAN, no inline * - Data Segment, pointer type */ single_no_inline: wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m); loc->wqe_last = wqe; mlx5_tx_cseg_init(txq, loc, wqe, 3, MLX5_OPCODE_SEND, olx); mlx5_tx_eseg_none(txq, loc, wqe, olx); mlx5_tx_dseg_ptr (txq, loc, &wqe->dseg[0], rte_pktmbuf_mtod(loc->mbuf, uint8_t *), rte_pktmbuf_data_len(loc->mbuf), olx); ++txq->wqe_ci; --loc->wqe_free; /* * We should not store mbuf pointer in elts * if no inlining is configured, this is done * by calling routine in a batch copy. */ MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE)); --loc->elts_free; #ifdef MLX5_PMD_SOFT_COUNTERS /* Update sent data bytes counter. */ txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf); if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) txq->stats.obytes += sizeof(struct rte_vlan_hdr); #endif } ++loc->pkts_sent; --pkts_n; if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) return MLX5_TXCMP_CODE_EXIT; loc->mbuf = *pkts++; if (pkts_n > 1) rte_prefetch0(*pkts); ret = mlx5_tx_able_to_empw(txq, loc, olx, true); if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE)) return ret; } MLX5_ASSERT(false); }
0
348,946
static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev) { struct device_node *np = dev->of_node; struct device_node *pp; struct ad5755_platform_data *pdata; unsigned int tmp; unsigned int tmparray[3]; int devnr, i; pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return NULL; pdata->ext_dc_dc_compenstation_resistor = of_property_read_bool(np, "adi,ext-dc-dc-compenstation-resistor"); if (!of_property_read_u32(np, "adi,dc-dc-phase", &tmp)) pdata->dc_dc_phase = tmp; else pdata->dc_dc_phase = AD5755_DC_DC_PHASE_ALL_SAME_EDGE; pdata->dc_dc_freq = AD5755_DC_DC_FREQ_410kHZ; if (!of_property_read_u32(np, "adi,dc-dc-freq-hz", &tmp)) { for (i = 0; i < ARRAY_SIZE(ad5755_dcdc_freq_table); i++) { if (tmp == ad5755_dcdc_freq_table[i][0]) { pdata->dc_dc_freq = ad5755_dcdc_freq_table[i][1]; break; } } if (i == ARRAY_SIZE(ad5755_dcdc_freq_table)) { dev_err(dev, "adi,dc-dc-freq out of range selecting 410kHz"); } } pdata->dc_dc_maxv = AD5755_DC_DC_MAXV_23V; if (!of_property_read_u32(np, "adi,dc-dc-max-microvolt", &tmp)) { for (i = 0; i < ARRAY_SIZE(ad5755_dcdc_maxv_table); i++) { if (tmp == ad5755_dcdc_maxv_table[i][0]) { pdata->dc_dc_maxv = ad5755_dcdc_maxv_table[i][1]; break; } } if (i == ARRAY_SIZE(ad5755_dcdc_maxv_table)) { dev_err(dev, "adi,dc-dc-maxv out of range selecting 23V"); } } devnr = 0; for_each_child_of_node(np, pp) { if (devnr > AD5755_NUM_CHANNELS) { dev_err(dev, "There is to many channels defined in DT\n"); goto error_out; } if (!of_property_read_u32(pp, "adi,mode", &tmp)) pdata->dac[devnr].mode = tmp; else pdata->dac[devnr].mode = AD5755_MODE_CURRENT_4mA_20mA; pdata->dac[devnr].ext_current_sense_resistor = of_property_read_bool(pp, "adi,ext-current-sense-resistor"); pdata->dac[devnr].enable_voltage_overrange = of_property_read_bool(pp, "adi,enable-voltage-overrange"); if (!of_property_read_u32_array(pp, "adi,slew", tmparray, 3)) { pdata->dac[devnr].slew.enable = tmparray[0]; pdata->dac[devnr].slew.rate = AD5755_SLEW_RATE_64k; for (i = 0; i < ARRAY_SIZE(ad5755_slew_rate_table); i++) { if (tmparray[1] == ad5755_slew_rate_table[i][0]) { pdata->dac[devnr].slew.rate = ad5755_slew_rate_table[i][1]; break; } } if (i == ARRAY_SIZE(ad5755_slew_rate_table)) { dev_err(dev, "channel %d slew rate out of range selecting 64kHz", devnr); } pdata->dac[devnr].slew.step_size = AD5755_SLEW_STEP_SIZE_1; for (i = 0; i < ARRAY_SIZE(ad5755_slew_step_table); i++) { if (tmparray[2] == ad5755_slew_step_table[i][0]) { pdata->dac[devnr].slew.step_size = ad5755_slew_step_table[i][1]; break; } } if (i == ARRAY_SIZE(ad5755_slew_step_table)) { dev_err(dev, "channel %d slew step size out of range selecting 1 LSB", devnr); } } else { pdata->dac[devnr].slew.enable = false; pdata->dac[devnr].slew.rate = AD5755_SLEW_RATE_64k; pdata->dac[devnr].slew.step_size = AD5755_SLEW_STEP_SIZE_1; } devnr++; } return pdata; error_out: devm_kfree(dev, pdata); return NULL; }
1
497,008
TF_LITE_MICRO_TEST(GatherNd_BatchedIndexingIntoMatrix2) { // For input_dims[], index_dims[], or output_dims[], element 0 is the // number of dimensions in that array, not the actual dimension data. int input_dims[] = {2, 2, 2}; int index_dims[] = {3, 2, 1, 2}; const int32_t index_data[] = {0, 0, 1, 1}; const float input_data[] = {1.1, 1.2, 2.1, 2.2}; const float golden_data[] = {1.1, 2.2}; float output_data[2]; int output_dims[] = {3, 0, 0, 0}; tflite::testing::TestGatherNd<float, int32_t>( input_dims, input_data, index_dims, index_data, output_dims, output_data, golden_data); }
0
217,150
MetricsLog::~MetricsLog() { }
0
266,793
FUNC_DECODER(dissector_postgresql) { DECLARE_DISP_PTR(ptr); struct ec_session *s = NULL; void *ident = NULL; char tmp[MAX_ASCII_ADDR_LEN]; struct postgresql_status *conn_status; /* don't complain about unused var */ (void) DECODE_DATA; (void) DECODE_DATALEN; (void) DECODED_LEN; if (FROM_CLIENT("postgresql", PACKET)) { if (PACKET->DATA.len < 4) return NULL; dissect_create_ident(&ident, PACKET, DISSECT_CODE(dissector_postgresql)); /* if the session does not exist... */ if (session_get(&s, ident, DISSECT_IDENT_LEN) == -ENOTFOUND) { /* search for user and database strings, look for StartupMessage */ unsigned char *u = memmem(ptr, PACKET->DATA.len, "user", 4); unsigned char *d = memmem(ptr, PACKET->DATA.len, "database", 8); if (!memcmp(ptr + 4, "\x00\x03\x00\x00", 4) && u && d) { /* create the new session */ dissect_create_session(&s, PACKET, DISSECT_CODE(dissector_postgresql)); /* remember the state (used later) */ SAFE_CALLOC(s->data, 1, sizeof(struct postgresql_status)); conn_status = (struct postgresql_status *) s->data; conn_status->status = WAIT_AUTH; /* user is always null-terminated */ strncpy((char*)conn_status->user, (char*)(u + 5), 65); conn_status->user[64] = 0; /* database is always null-terminated */ strncpy((char*)conn_status->database, (char*)(d + 9), 65); conn_status->database[64] = 0; /* save the session */ session_put(s); } } else { conn_status = (struct postgresql_status *) s->data; if (conn_status->status == WAIT_RESPONSE) { /* check for PasswordMessage packet */ if (ptr[0] == 'p' && conn_status->type == MD5) { DEBUG_MSG("\tDissector_postgresql RESPONSE type is MD5"); if(memcmp(ptr + 1, "\x00\x00\x00\x28", 4)) { DEBUG_MSG("\tDissector_postgresql BUG, expected length is 40"); return NULL; } if (PACKET->DATA.len < 40) { DEBUG_MSG("\tDissector_postgresql BUG, expected length is 40"); return NULL; } memcpy(conn_status->hash, ptr + 5 + 3, 32); conn_status->hash[32] = 0; DISSECT_MSG("%s:$postgres$%s*%s*%s:%s:%d\n", conn_status->user, conn_status->user, conn_status->salt, conn_status->hash, ip_addr_ntoa(&PACKET->L3.dst, tmp), ntohs(PACKET->L4.dst)); dissect_wipe_session(PACKET, DISSECT_CODE(dissector_postgresql)); } else if (ptr[0] == 'p' && conn_status->type == CT) { int length; DEBUG_MSG("\tDissector_postgresql RESPONSE type is clear-text!"); GET_ULONG_BE(length, ptr, 1); length -= 4; if (length < 0 || length > 65 || PACKET->DATA.len < length+5) { dissect_wipe_session(PACKET, DISSECT_CODE(dissector_postgresql)); return NULL; } snprintf((char*)conn_status->password, length+1, "%s", (char*)(ptr + 5)); DISSECT_MSG("PostgreSQL credentials:%s-%d:%s:%s\n", ip_addr_ntoa(&PACKET->L3.dst, tmp), ntohs(PACKET->L4.dst), conn_status->user, conn_status->password); dissect_wipe_session(PACKET, DISSECT_CODE(dissector_postgresql)); } } } } else { /* Packets coming from the server */ if (PACKET->DATA.len < 9) return NULL; dissect_create_ident(&ident, PACKET, DISSECT_CODE(dissector_postgresql)); if (session_get(&s, ident, DISSECT_IDENT_LEN) == ESUCCESS) { conn_status = (struct postgresql_status *) s->data; if (conn_status->status == WAIT_AUTH && ptr[0] == 'R' && !memcmp(ptr + 1, "\x00\x00\x00\x0c", 4) && !memcmp(ptr + 5, "\x00\x00\x00\x05", 4)) { conn_status->status = WAIT_RESPONSE; conn_status->type = MD5; DEBUG_MSG("\tDissector_postgresql AUTH type is MD5"); hex_encode(ptr + 9, 4, conn_status->salt); /* save salt */ } else if (conn_status->status == WAIT_AUTH && ptr[0] == 'R' && !memcmp(ptr + 1, "\x00\x00\x00\x08", 4) && !memcmp(ptr + 5, "\x00\x00\x00\x03", 4)) { conn_status->status = WAIT_RESPONSE; conn_status->type = CT; DEBUG_MSG("\tDissector_postgresql AUTH type is clear-text!"); } } } SAFE_FREE(ident); return NULL; }
0
262,120
pixSetLowContrast(PIX *pixs1, PIX *pixs2, l_int32 mindiff) { l_int32 i, j, w, h, d, wpl, val1, val2, found; l_uint32 *data1, *data2, *line1, *line2; PROCNAME("pixSetLowContrast"); if (!pixs1 || !pixs2) return ERROR_INT("pixs1 and pixs2 not both defined", procName, 1); if (pixSizesEqual(pixs1, pixs2) == 0) return ERROR_INT("pixs1 and pixs2 not equal size", procName, 1); pixGetDimensions(pixs1, &w, &h, &d); if (d != 8) return ERROR_INT("depth not 8 bpp", procName, 1); if (mindiff > 254) return 0; data1 = pixGetData(pixs1); data2 = pixGetData(pixs2); wpl = pixGetWpl(pixs1); found = 0; /* init to not finding any diffs >= mindiff */ for (i = 0; i < h; i++) { line1 = data1 + i * wpl; line2 = data2 + i * wpl; for (j = 0; j < w; j++) { val1 = GET_DATA_BYTE(line1, j); val2 = GET_DATA_BYTE(line2, j); if (L_ABS(val1 - val2) >= mindiff) { found = 1; break; } } if (found) break; } if (!found) { L_WARNING("no pixel pair diffs as large as mindiff\n", procName); pixClearAll(pixs1); pixClearAll(pixs2); return 1; } for (i = 0; i < h; i++) { line1 = data1 + i * wpl; line2 = data2 + i * wpl; for (j = 0; j < w; j++) { val1 = GET_DATA_BYTE(line1, j); val2 = GET_DATA_BYTE(line2, j); if (L_ABS(val1 - val2) < mindiff) { SET_DATA_BYTE(line1, j, 0); SET_DATA_BYTE(line2, j, 0); } } } return 0; }
0
359,586
get_tip_for_vpn (NMActiveConnection *active, NMVPNConnectionState state, NMApplet *applet) { NMConnectionScope scope; char *tip = NULL; const char *path, *id = NULL; GSList *iter, *list; scope = nm_active_connection_get_scope (active); path = nm_active_connection_get_connection (active); g_return_val_if_fail (path != NULL, NULL); list = applet_get_all_connections (applet); for (iter = list; iter; iter = g_slist_next (iter)) { NMConnection *candidate = NM_CONNECTION (iter->data); NMSettingConnection *s_con; if ( (nm_connection_get_scope (candidate) == scope) && !strcmp (nm_connection_get_path (candidate), path)) { s_con = NM_SETTING_CONNECTION (nm_connection_get_setting (candidate, NM_TYPE_SETTING_CONNECTION)); id = nm_setting_connection_get_id (s_con); break; } } g_slist_free (list); if (!id) return NULL; switch (state) { case NM_VPN_CONNECTION_STATE_CONNECT: case NM_VPN_CONNECTION_STATE_PREPARE: tip = g_strdup_printf (_("Starting VPN connection '%s'..."), id); break; case NM_VPN_CONNECTION_STATE_NEED_AUTH: tip = g_strdup_printf (_("User authentication required for VPN connection '%s'..."), id); break; case NM_VPN_CONNECTION_STATE_IP_CONFIG_GET: tip = g_strdup_printf (_("Requesting a VPN address for '%s'..."), id); break; case NM_VPN_CONNECTION_STATE_ACTIVATED: tip = g_strdup_printf (_("VPN connection '%s' active"), id); break; default: break; } return tip; }
0
350,531
Status explain(OperationContext* opCtx, const OpMsgRequest& request, ExplainOptions::Verbosity verbosity, rpc::ReplyBuilderInterface* result) const override { std::string dbname = request.getDatabase().toString(); const BSONObj& cmdObj = request.body; // Acquire locks and resolve possible UUID. The RAII object is optional, because in the case // of a view, the locks need to be released. boost::optional<AutoGetCollectionForReadCommand> ctx; ctx.emplace(opCtx, CommandHelpers::parseNsOrUUID(dbname, cmdObj), AutoGetCollection::ViewMode::kViewsPermitted); const auto nss = ctx->getNss(); const ExtensionsCallbackReal extensionsCallback(opCtx, &nss); auto parsedDistinct = uassertStatusOK(ParsedDistinct::parse(opCtx, nss, cmdObj, extensionsCallback, true)); if (ctx->getView()) { // Relinquish locks. The aggregation command will re-acquire them. ctx.reset(); auto viewAggregation = parsedDistinct.asAggregationCommand(); if (!viewAggregation.isOK()) { return viewAggregation.getStatus(); } auto viewAggRequest = AggregationRequest::parseFromBSON(nss, viewAggregation.getValue(), verbosity); if (!viewAggRequest.isOK()) { return viewAggRequest.getStatus(); } return runAggregate( opCtx, nss, viewAggRequest.getValue(), viewAggregation.getValue(), result); } Collection* const collection = ctx->getCollection(); auto executor = uassertStatusOK( getExecutorDistinct(opCtx, collection, QueryPlannerParams::DEFAULT, &parsedDistinct)); auto bodyBuilder = result->getBodyBuilder(); Explain::explainStages(executor.get(), collection, verbosity, &bodyBuilder); return Status::OK(); }
1
307,215
int ShellBrowserMain(const content::MainFunctionParams& parameters) { bool layout_test_mode = CommandLine::ForCurrentProcess()->HasSwitch(switches::kDumpRenderTree); base::ScopedTempDir browser_context_path_for_layout_tests; if (layout_test_mode) { CHECK(browser_context_path_for_layout_tests.CreateUniqueTempDir()); CHECK(!browser_context_path_for_layout_tests.path().MaybeAsASCII().empty()); CommandLine::ForCurrentProcess()->AppendSwitchASCII( switches::kContentShellDataPath, browser_context_path_for_layout_tests.path().MaybeAsASCII()); } scoped_ptr<content::BrowserMainRunner> main_runner_( content::BrowserMainRunner::Create()); int exit_code = main_runner_->Initialize(parameters); if (exit_code >= 0) return exit_code; if (CommandLine::ForCurrentProcess()->HasSwitch( switches::kCheckLayoutTestSysDeps)) { MessageLoop::current()->PostTask(FROM_HERE, MessageLoop::QuitClosure()); main_runner_->Run(); main_runner_->Shutdown(); return 0; } if (layout_test_mode) { content::WebKitTestController test_controller; std::string test_string; CommandLine::StringVector args = CommandLine::ForCurrentProcess()->GetArgs(); size_t command_line_position = 0; bool ran_at_least_once = false; #if defined(OS_ANDROID) std::cout << "#READY\n"; std::cout.flush(); #endif FilePath original_cwd; { // We're outside of the message loop here, and this is a test. base::ThreadRestrictions::ScopedAllowIO allow_io; file_util::GetCurrentDirectory(&original_cwd); } while (GetNextTest(args, &command_line_position, &test_string)) { if (test_string.empty()) continue; if (test_string == "QUIT") break; bool enable_pixel_dumps; std::string pixel_hash; FilePath cwd; GURL test_url = GetURLForLayoutTest( test_string, &cwd, &enable_pixel_dumps, &pixel_hash); if (!content::WebKitTestController::Get()->PrepareForLayoutTest( test_url, cwd, enable_pixel_dumps, pixel_hash)) { break; } ran_at_least_once = true; main_runner_->Run(); { // We're outside of the message loop here, and this is a test. base::ThreadRestrictions::ScopedAllowIO allow_io; file_util::SetCurrentDirectory(original_cwd); } if (!content::WebKitTestController::Get()->ResetAfterLayoutTest()) break; } if (!ran_at_least_once) { MessageLoop::current()->PostTask(FROM_HERE, MessageLoop::QuitClosure()); main_runner_->Run(); } exit_code = 0; } else { exit_code = main_runner_->Run(); } main_runner_->Shutdown(); return exit_code; }
0
301,292
static int pnm_getsint(jas_stream_t *in, int wordsize, int_fast32_t *val) { uint_fast32_t tmpval; if (pnm_getuint(in, wordsize, &tmpval)) { return -1; } if ((tmpval & (1 << (wordsize - 1))) != 0) { jas_eprintf("PNM decoder does not fully support signed data\n"); return -1; } if (val) { *val = tmpval; } return 0; }
0
301,403
ApcLoadJob(void *handle, int index) : m_handle(handle), m_index(index) {}
0
70,221
void __ext4_error_inode(struct inode *inode, const char *function, unsigned int line, ext4_fsblk_t block, const char *fmt, ...) { va_list args; struct va_format vaf; struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; es->s_last_error_ino = cpu_to_le32(inode->i_ino); es->s_last_error_block = cpu_to_le64(block); if (ext4_error_ratelimit(inode->i_sb)) { va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (block) printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " "inode #%lu: block %llu: comm %s: %pV\n", inode->i_sb->s_id, function, line, inode->i_ino, block, current->comm, &vaf); else printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id, function, line, inode->i_ino, current->comm, &vaf); va_end(args); } save_error_info(inode->i_sb, function, line); ext4_handle_error(inode->i_sb); }
0
483,997
int memhp_online_type_from_str(const char *str) { int i; for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) { if (sysfs_streq(str, online_type_to_str[i])) return i; } return -EINVAL; }
0
219,212
void MediaControlToggleClosedCaptionsButtonElement::defaultEventHandler( Event* event) { if (event->type() == EventTypeNames::click) { if (mediaElement().textTracks()->length() == 1) { if (mediaElement().textTracks()->hasShowingTracks()) { mediaControls().disableShowingTextTracks(); } else { mediaControls().showTextTrackAtIndex(0); } } else { mediaControls().toggleTextTrackList(); } updateDisplayType(); event->setDefaultHandled(); } MediaControlInputElement::defaultEventHandler(event); }
0
121,944
static inline uint64_t memory_region_shift_write_access(uint64_t *value, signed shift, uint64_t mask) { uint64_t tmp; if (shift >= 0) { tmp = (*value >> shift) & mask; } else { tmp = (*value << -shift) & mask; } return tmp; }
0
321,683
int cpu_get_dump_info(ArchDumpInfo *info, const struct GuestPhysBlockList *guest_phys_blocks) { PowerPCCPU *cpu = POWERPC_CPU(first_cpu); PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); info->d_machine = PPC_ELF_MACHINE; info->d_class = ELFCLASS; if ((*pcc->interrupts_big_endian)(cpu)) { info->d_endian = ELFDATA2MSB; } else { info->d_endian = ELFDATA2LSB; } /* 64KB is the max page size for pseries kernel */ if (strncmp(object_get_typename(qdev_get_machine()), "pseries-", 8) == 0) { info->page_size = (1U << 16); } return 0; }
0
183,031
static MagickBooleanType EncodeImage(const ImageInfo *image_info,Image *image, const size_t data_size) { #define MaxCode(number_bits) ((one << (number_bits))-1) #define MaxHashTable 5003 #define MaxGIFBits 12UL #define MaxGIFTable (1UL << MaxGIFBits) #define GIFOutputCode(code) \ { \ /* \ Emit a code. \ */ \ if (bits > 0) \ datum|=(code) << bits; \ else \ datum=code; \ bits+=number_bits; \ while (bits >= 8) \ { \ /* \ Add a character to current packet. \ */ \ packet[length++]=(unsigned char) (datum & 0xff); \ if (length >= 254) \ { \ (void) WriteBlobByte(image,(unsigned char) length); \ (void) WriteBlob(image,length,packet); \ length=0; \ } \ datum>>=8; \ bits-=8; \ } \ if (free_code > max_code) \ { \ number_bits++; \ if (number_bits == MaxGIFBits) \ max_code=MaxGIFTable; \ else \ max_code=MaxCode(number_bits); \ } \ } IndexPacket index; register ssize_t i; short *hash_code, *hash_prefix, waiting_code; size_t bits, clear_code, datum, end_of_information_code, free_code, length, max_code, next_pixel, number_bits, one, pass; ssize_t displacement, offset, k, y; unsigned char *packet, *hash_suffix; /* Allocate encoder tables. */ assert(image != (Image *) NULL); one=1; packet=(unsigned char *) AcquireQuantumMemory(256,sizeof(*packet)); hash_code=(short *) AcquireQuantumMemory(MaxHashTable,sizeof(*hash_code)); hash_prefix=(short *) AcquireQuantumMemory(MaxHashTable,sizeof(*hash_prefix)); hash_suffix=(unsigned char *) AcquireQuantumMemory(MaxHashTable, sizeof(*hash_suffix)); if ((packet == (unsigned char *) NULL) || (hash_code == (short *) NULL) || (hash_prefix == (short *) NULL) || (hash_suffix == (unsigned char *) NULL)) { if (packet != (unsigned char *) NULL) packet=(unsigned char *) RelinquishMagickMemory(packet); if (hash_code != (short *) NULL) hash_code=(short *) RelinquishMagickMemory(hash_code); if (hash_prefix != (short *) NULL) hash_prefix=(short *) RelinquishMagickMemory(hash_prefix); if (hash_suffix != (unsigned char *) NULL) hash_suffix=(unsigned char *) RelinquishMagickMemory(hash_suffix); return(MagickFalse); } /* Initialize GIF encoder. */ number_bits=data_size; max_code=MaxCode(number_bits); clear_code=((short) one << (data_size-1)); end_of_information_code=clear_code+1; free_code=clear_code+2; length=0; datum=0; bits=0; for (i=0; i < MaxHashTable; i++) hash_code[i]=0; GIFOutputCode(clear_code); /* Encode pixels. */ offset=0; pass=0; waiting_code=0; for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,offset,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); if (y == 0) waiting_code=(short) (*indexes); for (x=(ssize_t) (y == 0 ? 1 : 0); x < (ssize_t) image->columns; x++) { /* Probe hash table. */ index=(IndexPacket) ((size_t) GetPixelIndex(indexes+x) & 0xff); p++; k=(ssize_t) (((size_t) index << (MaxGIFBits-8))+waiting_code); if (k >= MaxHashTable) k-=MaxHashTable; next_pixel=MagickFalse; displacement=1; if (hash_code[k] > 0) { if ((hash_prefix[k] == waiting_code) && (hash_suffix[k] == (unsigned char) index)) { waiting_code=hash_code[k]; continue; } if (k != 0) displacement=MaxHashTable-k; for ( ; ; ) { k-=displacement; if (k < 0) k+=MaxHashTable; if (hash_code[k] == 0) break; if ((hash_prefix[k] == waiting_code) && (hash_suffix[k] == (unsigned char) index)) { waiting_code=hash_code[k]; next_pixel=MagickTrue; break; } } if (next_pixel != MagickFalse) continue; } GIFOutputCode((size_t) waiting_code); if (free_code < MaxGIFTable) { hash_code[k]=(short) free_code++; hash_prefix[k]=waiting_code; hash_suffix[k]=(unsigned char) index; } else { /* Fill the hash table with empty entries. */ for (k=0; k < MaxHashTable; k++) hash_code[k]=0; /* Reset compressor and issue a clear code. */ free_code=clear_code+2; GIFOutputCode(clear_code); number_bits=data_size; max_code=MaxCode(number_bits); } waiting_code=(short) index; } if (image_info->interlace == NoInterlace) offset++; else switch (pass) { case 0: default: { offset+=8; if (offset >= (ssize_t) image->rows) { pass++; offset=4; } break; } case 1: { offset+=8; if (offset >= (ssize_t) image->rows) { pass++; offset=2; } break; } case 2: { offset+=4; if (offset >= (ssize_t) image->rows) { pass++; offset=1; } break; } case 3: { offset+=2; break; } } } /* Flush out the buffered code. */ GIFOutputCode((size_t) waiting_code); GIFOutputCode(end_of_information_code); if (bits > 0) { /* Add a character to current packet. */ packet[length++]=(unsigned char) (datum & 0xff); if (length >= 254) { (void) WriteBlobByte(image,(unsigned char) length); (void) WriteBlob(image,length,packet); length=0; } } /* Flush accumulated data. */ if (length > 0) { (void) WriteBlobByte(image,(unsigned char) length); (void) WriteBlob(image,length,packet); } /* Free encoder memory. */ hash_suffix=(unsigned char *) RelinquishMagickMemory(hash_suffix); hash_prefix=(short *) RelinquishMagickMemory(hash_prefix); hash_code=(short *) RelinquishMagickMemory(hash_code); packet=(unsigned char *) RelinquishMagickMemory(packet); return(MagickTrue); }
0
180,780
void RenderWidgetHostViewAura::DidCreateNewRendererCompositorFrameSink( cc::mojom::MojoCompositorFrameSinkClient* renderer_compositor_frame_sink) { renderer_compositor_frame_sink_ = renderer_compositor_frame_sink; if (delegated_frame_host_) { delegated_frame_host_->DidCreateNewRendererCompositorFrameSink( renderer_compositor_frame_sink_); } }
0
311,935
void BackendIO::DoomAllEntries() { operation_ = OP_DOOM_ALL; }
0
204,401
void V8Proxy::registerExtension(v8::Extension* extension) { registerExtensionWithV8(extension); staticExtensionsList().append(extension); }
0
237,812
virtual void SetUp() { UUT_ = GET_PARAM(2); #if CONFIG_VP9_HIGHBITDEPTH if (UUT_->use_highbd_ != 0) mask_ = (1 << UUT_->use_highbd_) - 1; else mask_ = 255; #endif /* Set up guard blocks for an inner block centered in the outer block */ for (int i = 0; i < kOutputBufferSize; ++i) { if (IsIndexInBorder(i)) output_[i] = 255; else output_[i] = 0; } ::libvpx_test::ACMRandom prng; for (int i = 0; i < kInputBufferSize; ++i) { if (i & 1) { input_[i] = 255; #if CONFIG_VP9_HIGHBITDEPTH input16_[i] = mask_; #endif } else { input_[i] = prng.Rand8Extremes(); #if CONFIG_VP9_HIGHBITDEPTH input16_[i] = prng.Rand16() & mask_; #endif } } }
0
473,244
valid_ordinal_p(VALUE y, int d, double sg, VALUE *nth, int *ry, int *rd, int *rjd, int *ns) { double style = guess_style(y, sg); int r; if (style == 0) { int jd; r = c_valid_ordinal_p(FIX2INT(y), d, sg, rd, &jd, ns); if (!r) return 0; decode_jd(INT2FIX(jd), nth, rjd); if (f_zero_p(*nth)) *ry = FIX2INT(y); else { VALUE nth2; decode_year(y, *ns ? -1 : +1, &nth2, ry); } } else { decode_year(y, style, nth, ry); r = c_valid_ordinal_p(*ry, d, style, rd, rjd, ns); } return r; }
0
86,637
static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) { unsigned long long blocks; sector_t new; if (kstrtoull(buf, 10, &blocks) < 0) return -EINVAL; if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) return -EINVAL; /* sector conversion overflow */ new = blocks * 2; if (new != blocks * 2) return -EINVAL; /* unsigned long long to sector_t overflow */ *sectors = new; return 0; }
0
84,053
PJ_DEF(void) pj_scan_save_state( const pj_scanner *scanner, pj_scan_state *state) { state->curptr = scanner->curptr; state->line = scanner->line; state->start_line = scanner->start_line; }
0
78,899
static int opfiadd(RAsm *a, ut8 *data, const Opcode *op) { int l = 0; switch (op->operands_count) { case 1: if ( op->operands[0].type & OT_MEMORY ) { if ( op->operands[0].type & OT_WORD ) { data[l++] = 0xde; data[l++] = 0x00 | op->operands[0].regs[0]; } else if ( op->operands[0].type & OT_DWORD ) { data[l++] = 0xda; data[l++] = 0x00 | op->operands[0].regs[0]; } else { return -1; } } else { return -1; } break; default: return -1; } return l; }
0
86,647
void CL_AddToLimboChat( const char *str ) { int len = 0; char *p; int i; cl.limboChatPos = LIMBOCHAT_HEIGHT - 1; // copy old strings for ( i = cl.limboChatPos; i > 0; i-- ) { strcpy( cl.limboChatMsgs[i], cl.limboChatMsgs[i - 1] ); } // copy new string p = cl.limboChatMsgs[0]; *p = 0; while ( *str ) { if ( len > LIMBOCHAT_WIDTH - 1 ) { break; } if ( Q_IsColorString( str ) ) { *p++ = *str++; *p++ = *str++; continue; } *p++ = *str++; len++; } *p = 0; }
0
214,753
gsicc_get_srcprofile(gsicc_colorbuffer_t data_cs, gs_graphics_type_tag_t graphics_type_tag, cmm_srcgtag_profile_t *srcgtag_profile, cmm_profile_t **profile, gsicc_rendering_param_t *render_cond) { (*profile) = NULL; (*render_cond).rendering_intent = gsPERCEPTUAL; switch (graphics_type_tag & ~GS_DEVICE_ENCODES_TAGS) { case GS_UNKNOWN_TAG: case GS_UNTOUCHED_TAG: default: break; case GS_PATH_TAG: if (data_cs == gsRGB) { (*profile) = srcgtag_profile->rgb_profiles[gsSRC_GRAPPRO]; *render_cond = srcgtag_profile->rgb_rend_cond[gsSRC_GRAPPRO]; } else if (data_cs == gsCMYK) { (*profile) = srcgtag_profile->cmyk_profiles[gsSRC_GRAPPRO]; *render_cond = srcgtag_profile->cmyk_rend_cond[gsSRC_GRAPPRO]; } else if (data_cs == gsGRAY) { (*profile) = srcgtag_profile->gray_profiles[gsSRC_GRAPPRO]; *render_cond = srcgtag_profile->gray_rend_cond[gsSRC_GRAPPRO]; } break; case GS_IMAGE_TAG: if (data_cs == gsRGB) { (*profile) = srcgtag_profile->rgb_profiles[gsSRC_IMAGPRO]; *render_cond = srcgtag_profile->rgb_rend_cond[gsSRC_IMAGPRO]; } else if (data_cs == gsCMYK) { (*profile) = srcgtag_profile->cmyk_profiles[gsSRC_IMAGPRO]; *render_cond = srcgtag_profile->cmyk_rend_cond[gsSRC_IMAGPRO]; } else if (data_cs == gsGRAY) { (*profile) = srcgtag_profile->gray_profiles[gsSRC_IMAGPRO]; *render_cond = srcgtag_profile->gray_rend_cond[gsSRC_IMAGPRO]; } break; case GS_TEXT_TAG: if (data_cs == gsRGB) { (*profile) = srcgtag_profile->rgb_profiles[gsSRC_TEXTPRO]; *render_cond = srcgtag_profile->rgb_rend_cond[gsSRC_TEXTPRO]; } else if (data_cs == gsCMYK) { (*profile) = srcgtag_profile->cmyk_profiles[gsSRC_TEXTPRO]; *render_cond = srcgtag_profile->cmyk_rend_cond[gsSRC_TEXTPRO]; } else if (data_cs == gsGRAY) { (*profile) = srcgtag_profile->gray_profiles[gsSRC_TEXTPRO]; *render_cond = srcgtag_profile->gray_rend_cond[gsSRC_TEXTPRO]; } break; } }
0
307,450
void setCanCollapseMarginAfterWithChildren(bool collapse) { m_canCollapseMarginAfterWithChildren = collapse; }
0
395,653
int manager_load_unit_prepare( Manager *m, const char *name, const char *path, sd_bus_error *e, Unit **_ret) { Unit *ret; UnitType t; int r; assert(m); assert(name || path); /* This will prepare the unit for loading, but not actually * load anything from disk. */ if (path && !is_path(path)) return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Path %s is not absolute.", path); if (!name) name = basename(path); t = unit_name_to_type(name); if (t == _UNIT_TYPE_INVALID || !unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE)) { if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is missing the instance name.", name); return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is not valid.", name); } ret = manager_get_unit(m, name); if (ret) { *_ret = ret; return 1; } ret = unit_new(m, unit_vtable[t]->object_size); if (!ret) return -ENOMEM; if (path) { ret->fragment_path = strdup(path); if (!ret->fragment_path) { unit_free(ret); return -ENOMEM; } } r = unit_add_name(ret, name); if (r < 0) { unit_free(ret); return r; } unit_add_to_load_queue(ret); unit_add_to_dbus_queue(ret); unit_add_to_gc_queue(ret); if (_ret) *_ret = ret; return 0; }
0
481,597
static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) { struct io_kiocb *nxt; /* * If LINK is set, we have dependent requests in this chain. If we * didn't fail this request, queue the first one up, moving any other * dependencies to the next request. In case of failure, fail the rest * of the chain. */ if (unlikely(req->flags & IO_DISARM_MASK)) __io_req_find_next_prep(req); nxt = req->link; req->link = NULL; return nxt; }
0