idx
int64
func
string
target
int64
359,214
BPF_CALL_3(bpf_ringbuf_reserve, struct bpf_map *, map, u64, size, u64, flags) { struct bpf_ringbuf_map *rb_map; if (unlikely(flags)) return 0; rb_map = container_of(map, struct bpf_ringbuf_map, map); return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size); }
0
230,617
void derive_zero_motion_vector_candidates(const slice_segment_header* shdr, PBMotion* out_mergeCandList, int* inout_numCurrMergeCand, int maxCandidates) { logtrace(LogMotion,"derive_zero_motion_vector_candidates\n"); int numRefIdx; if (shdr->slice_type==SLICE_TYPE_P) { numRefIdx = shdr->num_ref_idx_l0_active; } else { numRefIdx = libde265_min(shdr->num_ref_idx_l0_active, shdr->num_ref_idx_l1_active); } //int numInputMergeCand = *inout_numMergeCand; int zeroIdx = 0; while (*inout_numCurrMergeCand < maxCandidates) { // 1. logtrace(LogMotion,"zeroIdx:%d numRefIdx:%d\n", zeroIdx, numRefIdx); PBMotion* newCand = &out_mergeCandList[*inout_numCurrMergeCand]; const int refIdx = (zeroIdx < numRefIdx) ? zeroIdx : 0; if (shdr->slice_type==SLICE_TYPE_P) { newCand->refIdx[0] = refIdx; newCand->refIdx[1] = -1; newCand->predFlag[0] = 1; newCand->predFlag[1] = 0; } else { newCand->refIdx[0] = refIdx; newCand->refIdx[1] = refIdx; newCand->predFlag[0] = 1; newCand->predFlag[1] = 1; } newCand->mv[0].x = 0; newCand->mv[0].y = 0; newCand->mv[1].x = 0; newCand->mv[1].y = 0; (*inout_numCurrMergeCand)++; // 2. zeroIdx++; } }
0
197,824
static GF_Err BM_ParseGlobalQuantizer(GF_BifsDecoder *codec, GF_BitStream *bs, GF_List *com_list) { GF_Node *node; GF_Command *com; GF_CommandField *inf; node = gf_bifs_dec_node(codec, bs, NDT_SFWorldNode); if (!node) return GF_NON_COMPLIANT_BITSTREAM; /*reset global QP*/ if (codec->scenegraph->global_qp) { gf_node_unregister(codec->scenegraph->global_qp, NULL); } codec->ActiveQP = NULL; codec->scenegraph->global_qp = NULL; if (gf_node_get_tag(node) != TAG_MPEG4_QuantizationParameter) { gf_node_unregister(node, NULL); return GF_NON_COMPLIANT_BITSTREAM; } /*register global QP*/ codec->ActiveQP = (M_QuantizationParameter *) node; codec->ActiveQP->isLocal = 0; codec->scenegraph->global_qp = node; /*register TWICE: once for the command, and for the scenegraph globalQP*/ node->sgprivate->num_instances = 2; com = gf_sg_command_new(codec->current_graph, GF_SG_GLOBAL_QUANTIZER); inf = gf_sg_command_field_new(com); inf->new_node = node; inf->field_ptr = &inf->new_node; inf->fieldType = GF_SG_VRML_SFNODE; gf_list_add(com_list, com); return GF_OK; }
1
199,159
static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file) { int err = 0; unsigned int saved_f_flags; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_format_t format; unsigned long width; size_t size; substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; if (substream != NULL) { runtime = substream->runtime; if (atomic_read(&substream->mmap_count)) goto __direct; err = snd_pcm_oss_make_ready(substream); if (err < 0) return err; atomic_inc(&runtime->oss.rw_ref); if (mutex_lock_interruptible(&runtime->oss.params_lock)) { atomic_dec(&runtime->oss.rw_ref); return -ERESTARTSYS; } format = snd_pcm_oss_format_from(runtime->oss.format); width = snd_pcm_format_physical_width(format); if (runtime->oss.buffer_used > 0) { #ifdef OSS_DEBUG pcm_dbg(substream->pcm, "sync: buffer_used\n"); #endif size = (8 * (runtime->oss.period_bytes - runtime->oss.buffer_used) + 7) / width; snd_pcm_format_set_silence(format, runtime->oss.buffer + runtime->oss.buffer_used, size); err = snd_pcm_oss_sync1(substream, runtime->oss.period_bytes); if (err < 0) goto unlock; } else if (runtime->oss.period_ptr > 0) { #ifdef OSS_DEBUG pcm_dbg(substream->pcm, "sync: period_ptr\n"); #endif size = runtime->oss.period_bytes - runtime->oss.period_ptr; snd_pcm_format_set_silence(format, runtime->oss.buffer, size * 8 / width); err = snd_pcm_oss_sync1(substream, size); if (err < 0) goto unlock; } /* * The ALSA's period might be a bit large than OSS one. * Fill the remain portion of ALSA period with zeros. */ size = runtime->control->appl_ptr % runtime->period_size; if (size > 0) { size = runtime->period_size - size; if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED) snd_pcm_lib_write(substream, NULL, size); else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) snd_pcm_lib_writev(substream, NULL, size); } unlock: mutex_unlock(&runtime->oss.params_lock); atomic_dec(&runtime->oss.rw_ref); if (err < 0) return err; /* * finish sync: drain the buffer */ __direct: saved_f_flags = substream->f_flags; substream->f_flags &= ~O_NONBLOCK; err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, NULL); substream->f_flags = saved_f_flags; if (err < 0) return err; mutex_lock(&runtime->oss.params_lock); runtime->oss.prepare = 1; mutex_unlock(&runtime->oss.params_lock); } substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; if (substream != NULL) { err = snd_pcm_oss_make_ready(substream); if (err < 0) return err; runtime = substream->runtime; err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); if (err < 0) return err; mutex_lock(&runtime->oss.params_lock); runtime->oss.buffer_used = 0; runtime->oss.prepare = 1; mutex_unlock(&runtime->oss.params_lock); } return 0; }
1
225,879
GF_Box *fdsa_box_new() { ISOM_DECL_BOX_ALLOC(GF_HintSample, GF_ISOM_BOX_TYPE_FDSA); if (!tmp) return NULL; tmp->packetTable = gf_list_new(); tmp->hint_subtype = GF_ISOM_BOX_TYPE_FDP_STSD; return (GF_Box*)tmp;
0
291,831
static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path) { struct rtrs_clt_sess *clt = clt_path->clt; struct rtrs_clt_path *next; bool wait_for_grace = false; int cpu; mutex_lock(&clt->paths_mutex); list_del_rcu(&clt_path->s.entry); /* Make sure everybody observes path removal. */ synchronize_rcu(); /* * At this point nobody sees @sess in the list, but still we have * dangling pointer @pcpu_path which _can_ point to @sess. Since * nobody can observe @sess in the list, we guarantee that IO path * will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal * to @sess, but can never again become @sess. */ /* * Decrement paths number only after grace period, because * caller of do_each_path() must firstly observe list without * path and only then decremented paths number. * * Otherwise there can be the following situation: * o Two paths exist and IO is coming. * o One path is removed: * CPU#0 CPU#1 * do_each_path(): rtrs_clt_remove_path_from_arr(): * path = get_next_path() * ^^^ list_del_rcu(path) * [!CONNECTED path] clt->paths_num-- * ^^^^^^^^^ * load clt->paths_num from 2 to 1 * ^^^^^^^^^ * sees 1 * * path is observed as !CONNECTED, but do_each_path() loop * ends, because expression i < clt->paths_num is false. */ clt->paths_num--; /* * Get @next connection from current @sess which is going to be * removed. If @sess is the last element, then @next is NULL. */ rcu_read_lock(); next = list_next_or_null_rr_rcu(&clt->paths_list, &clt_path->s.entry, typeof(*next), s.entry); rcu_read_unlock(); /* * @pcpu paths can still point to the path which is going to be * removed, so change the pointer manually. */ for_each_possible_cpu(cpu) { struct rtrs_clt_path __rcu **ppcpu_path; ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu); if (rcu_dereference_protected(*ppcpu_path, lockdep_is_held(&clt->paths_mutex)) != clt_path) /* * synchronize_rcu() was called just after deleting * entry from the list, thus IO code path cannot * change pointer back to the pointer which is going * to be removed, we are safe here. */ continue; /* * We race with IO code path, which also changes pointer, * thus we have to be careful not to overwrite it. */ if (xchg_paths(ppcpu_path, clt_path, next)) /* * @ppcpu_path was successfully replaced with @next, * that means that someone could also pick up the * @sess and dereferencing it right now, so wait for * a grace period is required. */ wait_for_grace = true; } if (wait_for_grace) synchronize_rcu(); mutex_unlock(&clt->paths_mutex); }
0
437,703
static u64 ns_to_pulse_clocks(u32 ns) { u64 clocks; u32 rem; clocks = CX23888_IR_REFCLK_FREQ / 1000000 * (u64) ns; /* millicycles */ rem = do_div(clocks, 1000); /* /1000 = cycles */ if (rem >= 1000 / 2) clocks++; return clocks; }
0
231,062
BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) { BaseType_t xEntryTimeSet = pdFALSE; TimeOut_t xTimeOut; Queue_t * const pxQueue = xQueue; /* Check the pointer is not NULL. */ configASSERT( ( pxQueue ) ); /* The buffer into which data is received can only be NULL if the data size * is zero (so no data is copied into the buffer). */ configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) ); /* Cannot block if the scheduler is suspended. */ #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) { configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); } #endif /*lint -save -e904 This function relaxes the coding standard somewhat to * allow return statements within the function itself. This is done in the * interest of execution time efficiency. */ for( ; ; ) { taskENTER_CRITICAL(); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; /* Is there data in the queue now? To be running the calling task * must be the highest priority task wanting to access the queue. */ if( uxMessagesWaiting > ( UBaseType_t ) 0 ) { /* Data available, remove one item. */ prvCopyDataFromQueue( pxQueue, pvBuffer ); traceQUEUE_RECEIVE( pxQueue ); pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1; /* There is now space in the queue, were any tasks waiting to * post to the queue? If so, unblock the highest priority waiting * task. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) { queueYIELD_IF_USING_PREEMPTION(); } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } taskEXIT_CRITICAL(); return pdPASS; } else { if( xTicksToWait == ( TickType_t ) 0 ) { /* The queue was empty and no block time is specified (or * the block time has expired) so leave now. */ taskEXIT_CRITICAL(); traceQUEUE_RECEIVE_FAILED( pxQueue ); return errQUEUE_EMPTY; } else if( xEntryTimeSet == pdFALSE ) { /* The queue was empty and a block time was specified so * configure the timeout structure. */ vTaskInternalSetTimeOutState( &xTimeOut ); xEntryTimeSet = pdTRUE; } else { /* Entry time was already set. */ mtCOVERAGE_TEST_MARKER(); } } } taskEXIT_CRITICAL(); /* Interrupts and other tasks can send to and receive from the queue * now the critical section has been exited. */ vTaskSuspendAll(); prvLockQueue( pxQueue ); /* Update the timeout state to see if it has expired yet. */ if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { /* The timeout has not expired. If the queue is still empty place * the task on the list of tasks waiting to receive from the queue. */ if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) { traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); prvUnlockQueue( pxQueue ); if( xTaskResumeAll() == pdFALSE ) { portYIELD_WITHIN_API(); } else { mtCOVERAGE_TEST_MARKER(); } } else { /* The queue contains data again. Loop back to try and read the * data. */ prvUnlockQueue( pxQueue ); ( void ) xTaskResumeAll(); } } else { /* Timed out. If there is no data in the queue exit, otherwise loop * back and attempt to read the data. */ prvUnlockQueue( pxQueue ); ( void ) xTaskResumeAll(); if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) { traceQUEUE_RECEIVE_FAILED( pxQueue ); return errQUEUE_EMPTY; } else { mtCOVERAGE_TEST_MARKER(); } } } /*lint -restore */ }
0
231,534
getcwd_nothrow (char *buf, size_t size) { char *result; TRY_MSVC_INVAL { result = _getcwd (buf, size); } CATCH_MSVC_INVAL { result = NULL; errno = ERANGE; } DONE_MSVC_INVAL; return result; }
0
509,558
int ha_maria::extra_opt(enum ha_extra_function operation, ulong cache_size) { if ((specialflag & SPECIAL_SAFE_MODE) && operation == HA_EXTRA_WRITE_CACHE) return 0; return maria_extra(file, operation, (void*) &cache_size); }
0
301,380
static DIR *vfswrap_fdopendir(vfs_handle_struct *handle, files_struct *fsp, const char *mask, uint32 attr) { DIR *result; START_PROFILE(syscall_fdopendir); result = sys_fdopendir(fsp->fh->fd); END_PROFILE(syscall_fdopendir); return result; }
0
293,542
PJ_DEF(void) pj_cis_del_str( pj_cis_t *cis, const char *str) { while (*str) { PJ_CIS_CLR(cis, *str); ++str; } }
0
307,831
void ciEnv::cache_jvmti_state() { VM_ENTRY_MARK; // Get Jvmti capabilities under lock to get consistant values. MutexLocker mu(JvmtiThreadState_lock); _jvmti_can_hotswap_or_post_breakpoint = JvmtiExport::can_hotswap_or_post_breakpoint(); _jvmti_can_access_local_variables = JvmtiExport::can_access_local_variables(); _jvmti_can_post_on_exceptions = JvmtiExport::can_post_on_exceptions(); _jvmti_can_pop_frame = JvmtiExport::can_pop_frame(); }
0
259,611
void HierarchicalBitmapRequester::Push8Lines(UBYTE c) { int cnt; ULONG y = m_pulY[c]; // for(cnt = 0;cnt < 8 && y < m_pulHeight[c];cnt++) { assert(m_ppEncodingMCU[cnt | (c << 3)]); m_pLargestScale->PushLine(m_ppEncodingMCU[cnt | (c << 3)],c); m_ppEncodingMCU[cnt | (c << 3)] = NULL; y++; } m_pulY[c] = y; }
0
383,372
gdImageColorExact (gdImagePtr im, int r, int g, int b) { return gdImageColorExactAlpha (im, r, g, b, gdAlphaOpaque); }
0
247,100
void *gf_filter_claim_opengl_provider(GF_Filter *filter) { return NULL; }
0
359,290
DEFUN (no_neighbor_strict_capability, no_neighbor_strict_capability_cmd, NO_NEIGHBOR_CMD "strict-capability-match", NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR "Strict capability negotiation match\n") { return peer_flag_unset_vty (vty, argv[0], PEER_FLAG_STRICT_CAP_MATCH); }
0
219,966
int callback_glewlwyd_set_client_module (const struct _u_request * request, struct _u_response * response, void * client_data) { struct config_elements * config = (struct config_elements *)client_data; json_t * j_module, * j_module_valid, * j_search_module; j_search_module = get_client_module(config, u_map_get(request->map_url, "name")); if (check_result_value(j_search_module, G_OK)) { j_module = ulfius_get_json_body_request(request, NULL); if (j_module != NULL) { json_object_del(j_module, "enabled"); j_module_valid = is_client_module_valid(config, j_module, 0); if (check_result_value(j_module_valid, G_OK)) { if (set_client_module(config, u_map_get(request->map_url, "name"), j_module) != G_OK) { y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_set_client_module - Error set_client_module"); response->status = 500; } else { y_log_message(Y_LOG_LEVEL_INFO, "Event - Client backend module '%s' updated", u_map_get(request->map_url, "name")); } } else if (check_result_value(j_module_valid, G_ERROR_PARAM)) { if (json_object_get(j_module_valid, "error") != NULL) { ulfius_set_json_body_response(response, 400, json_object_get(j_module_valid, "error")); } else { response->status = 400; } } else if (!check_result_value(j_module_valid, G_OK)) { y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_set_client_module - Error is_client_module_valid"); response->status = 500; } json_decref(j_module_valid); } else { response->status = 400; } json_decref(j_module); } else if (check_result_value(j_search_module, G_ERROR_NOT_FOUND)) { response->status = 404; } else { y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_set_client_module - Error get_client_module"); response->status = 500; } json_decref(j_search_module); return U_CALLBACK_CONTINUE; }
0
359,498
DEFUN (clear_ip_bgp_all_ipv4_soft_out, clear_ip_bgp_all_ipv4_soft_out_cmd, "clear ip bgp * ipv4 (unicast|multicast) soft out", CLEAR_STR IP_STR BGP_STR "Clear all peers\n" "Address family\n" "Address Family modifier\n" "Address Family modifier\n" "Soft reconfig\n" "Soft reconfig outbound update\n") { if (strncmp (argv[0], "m", 1) == 0) return bgp_clear_vty (vty, NULL, AFI_IP, SAFI_MULTICAST, clear_all, BGP_CLEAR_SOFT_OUT, NULL); return bgp_clear_vty (vty, NULL, AFI_IP, SAFI_UNICAST, clear_all, BGP_CLEAR_SOFT_OUT, NULL); }
0
259,714
static int validate_certificate_from_root(json_t * j_params, gnutls_x509_crt_t cert_leaf, cbor_item_t * x5c_array) { int ret = G_ERROR_NOT_FOUND, res; unsigned int result; gnutls_datum_t cert_dat = {NULL, 0}, issuer_dat = {NULL, 0}; gnutls_x509_trust_list_t tlist = NULL; gnutls_x509_crt_t cert_x509[cbor_array_size(x5c_array)+1], root_x509 = NULL; json_t * j_cert = NULL; cbor_item_t * cbor_cert = NULL; size_t index = 0, i = 0, x5c_array_size = cbor_array_size(x5c_array); char * issuer; for (i=0; i<x5c_array_size+1; i++) { cert_x509[i] = NULL; } if ((res = gnutls_x509_crt_get_issuer_dn2(cert_leaf, &issuer_dat)) >= 0) { issuer = o_strndup((const char *)issuer_dat.data, issuer_dat.size); json_array_foreach(json_object_get(j_params, "root-ca-array"), index, j_cert) { if (0 == o_strcmp(issuer, json_string_value(json_object_get(j_cert, "dn")))) { cert_dat.data = (unsigned char *)json_string_value(json_object_get(j_cert, "x509")); cert_dat.size = json_string_length(json_object_get(j_cert, "x509")); if (!gnutls_x509_crt_init(&root_x509) && !gnutls_x509_crt_import(root_x509, &cert_dat, GNUTLS_X509_FMT_PEM)) { cert_x509[0] = cert_leaf; for (i=1; i<x5c_array_size; i++) { cbor_cert = cbor_array_get(x5c_array, i); cert_dat.data = cbor_bytestring_handle(cbor_cert); cert_dat.size = cbor_bytestring_length(cbor_cert); if (gnutls_x509_crt_init(&cert_x509[i]) < 0 || gnutls_x509_crt_import(cert_x509[i], &cert_dat, GNUTLS_X509_FMT_DER) < 0) { y_log_message(Y_LOG_LEVEL_ERROR, "validate_certificate_from_root - Error import chain cert at index %zu", i); ret = G_ERROR; } cbor_decref(&cbor_cert); } cert_x509[x5c_array_size] = root_x509; ret = G_OK; } else { y_log_message(Y_LOG_LEVEL_ERROR, "validate_certificate_from_root - Error import root cert"); ret = G_ERROR; } } } o_free(issuer); } else { y_log_message(Y_LOG_LEVEL_ERROR, "validate_certificate_from_root - Error gnutls_x509_crt_get_issuer_dn2: %d", res); ret = G_ERROR; } gnutls_free(issuer_dat.data); if (ret == G_OK) { if (!gnutls_x509_trust_list_init(&tlist, 0)) { if (gnutls_x509_trust_list_add_cas(tlist, &root_x509, 1, 0) >= 0) { if (gnutls_x509_trust_list_verify_crt(tlist, cert_x509, 2, 0, &result, NULL) >= 0) { if (result) { y_log_message(Y_LOG_LEVEL_DEBUG, "validate_certificate_from_root - certificate chain invalid"); ret = G_ERROR; } } else { y_log_message(Y_LOG_LEVEL_ERROR, "validate_certificate_from_root - Error gnutls_x509_trust_list_verify_crt"); ret = G_ERROR; } } else { y_log_message(Y_LOG_LEVEL_ERROR, "validate_certificate_from_root - Error gnutls_x509_trust_list_add_cas"); ret = G_ERROR; } } else { y_log_message(Y_LOG_LEVEL_ERROR, "validate_certificate_from_root - Error gnutls_x509_trust_list_init"); ret = G_ERROR; } } gnutls_x509_crt_deinit(root_x509); for (i=1; i<x5c_array_size; i++) { gnutls_x509_crt_deinit(cert_x509[i]); } gnutls_x509_trust_list_deinit(tlist, 0); return ret; }
0
359,350
DEFUN (neighbor_passive, neighbor_passive_cmd, NEIGHBOR_CMD2 "passive", NEIGHBOR_STR NEIGHBOR_ADDR_STR2 "Don't send open messages to this neighbor\n") { return peer_flag_set_vty (vty, argv[0], PEER_FLAG_PASSIVE); }
0
222,902
bool HasAnyUnknownDimensions(const TensorShapeProto& proto) { if (proto.unknown_rank()) { return true; } for (const auto& dim : proto.dim()) { if (dim.size() < 0) { return true; } } return false; }
0
248,332
DLLIMPORT double cfg_opt_getnfloat(cfg_opt_t *opt, unsigned int index) { if (!opt || opt->type != CFGT_FLOAT) { errno = EINVAL; return 0; } if (opt->values && index < opt->nvalues) return opt->values[index]->fpnumber; if (opt->simple_value.fpnumber) return *opt->simple_value.fpnumber; return 0; }
0
294,615
d_lite_initialize(int argc, VALUE *argv, VALUE self) { VALUE jd, vjd, vdf, sf, vsf, vof, vsg; int df, of; double sg; rb_check_frozen(self); rb_scan_args(argc, argv, "05", &vjd, &vdf, &vsf, &vof, &vsg); jd = INT2FIX(0); df = 0; sf = INT2FIX(0); of = 0; sg = DEFAULT_SG; switch (argc) { case 5: val2sg(vsg, sg); case 4: val2off(vof, of); case 3: sf = vsf; if (f_lt_p(sf, INT2FIX(0)) || f_ge_p(sf, INT2FIX(SECOND_IN_NANOSECONDS))) rb_raise(eDateError, "invalid second fraction"); case 2: df = NUM2INT(vdf); if (df < 0 || df >= DAY_IN_SECONDS) rb_raise(eDateError, "invalid day fraction"); case 1: jd = vjd; } { VALUE nth; int rjd; get_d1(self); decode_jd(jd, &nth, &rjd); if (!df && f_zero_p(sf) && !of) { set_to_simple(self, &dat->s, nth, rjd, sg, 0, 0, 0, HAVE_JD); } else { if (!complex_dat_p(dat)) rb_raise(rb_eArgError, "cannot load complex into simple"); set_to_complex(self, &dat->c, nth, rjd, df, sf, of, sg, 0, 0, 0, 0, 0, 0, HAVE_JD | HAVE_DF); } } return self; }
0
225,656
GF_Box *paen_box_new() { ISOM_DECL_BOX_ALLOC(FDPartitionEntryBox, GF_ISOM_BOX_TYPE_PAEN); return (GF_Box *)tmp;
0
387,615
static int __snd_ctl_elem_info(struct snd_card *card, struct snd_kcontrol *kctl, struct snd_ctl_elem_info *info, struct snd_ctl_file *ctl) { struct snd_kcontrol_volatile *vd; unsigned int index_offset; int result; #ifdef CONFIG_SND_DEBUG info->access = 0; #endif result = snd_power_ref_and_wait(card); if (!result) result = kctl->info(kctl, info); snd_power_unref(card); if (result >= 0) { snd_BUG_ON(info->access); index_offset = snd_ctl_get_ioff(kctl, &info->id); vd = &kctl->vd[index_offset]; snd_ctl_build_ioff(&info->id, kctl, index_offset); info->access = vd->access; if (vd->owner) { info->access |= SNDRV_CTL_ELEM_ACCESS_LOCK; if (vd->owner == ctl) info->access |= SNDRV_CTL_ELEM_ACCESS_OWNER; info->owner = pid_vnr(vd->owner->pid); } else { info->owner = -1; } if (!snd_ctl_skip_validation(info) && snd_ctl_check_elem_info(card, info) < 0) result = -EINVAL; } return result; }
0
224,537
Status SparseReduceShapeFn(InferenceContext* c) { // Input 0: input_indices // Input 1: input_values // Input 2: input_shape // Input 3: reduction_axes // Attr: keep_dims bool keep_dims = false; TF_RETURN_IF_ERROR(c->GetAttr("keep_dims", &keep_dims)); const Tensor* shape_tensor = c->input_tensor(2); const Tensor* axes_tensor = c->input_tensor(3); if (shape_tensor != nullptr && axes_tensor != nullptr) { auto shape_vec = shape_tensor->flat<int64_t>(); auto axes_vec = axes_tensor->flat<int32>(); int64_t ndims = shape_vec.size(); absl::flat_hash_set<int64_t> axes; if (ndims == 0) return errors::InvalidArgument( "Number of dims in shape tensor must not be 0"); for (int i = 0; i < axes_vec.size(); i++) { axes.insert((axes_vec(i) + ndims) % ndims); } std::vector<DimensionHandle> dims; if (keep_dims) { dims.reserve(ndims); for (int d = 0; d < ndims; ++d) { if (axes.find(d) == axes.end()) { dims.push_back(c->MakeDim(shape_vec(d))); } else { dims.push_back(c->MakeDim(1)); } } } else { for (int d = 0; d < ndims; ++d) { if (axes.find(d) == axes.end()) { dims.push_back(c->MakeDim(shape_vec(d))); } } } c->set_output(0, c->MakeShape(dims)); return Status::OK(); } return UnknownShape(c); }
0
430,426
static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb) { const struct nlattr *ovs_key = nla_data(a); int key_type = nla_type(ovs_key); struct nlattr *start; int err; switch (key_type) { case OVS_KEY_ATTR_TUNNEL_INFO: { struct ovs_tunnel_info *ovs_tun = nla_data(ovs_key); struct ip_tunnel_info *tun_info = &ovs_tun->tun_dst->u.tun_info; start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_SET); if (!start) return -EMSGSIZE; err = ip_tun_to_nlattr(skb, &tun_info->key, ip_tunnel_info_opts(tun_info), tun_info->options_len, ip_tunnel_info_af(tun_info), tun_info->mode); if (err) return err; nla_nest_end(skb, start); break; } default: if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key)) return -EMSGSIZE; break; } return 0; }
0
294,657
c_julian_to_yday(int y, int m, int d) { assert(m >= 1 && m <= 12); return yeartab[c_julian_leap_p(y) ? 1 : 0][m] + d; }
0
424,901
iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, struct iwl_fw_error_dump_fw_mon *fw_mon_data) { u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB; base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB; write_ptr = DBGC_CUR_DBGBUF_STATUS; wrap_cnt = DBGC_DBGBUF_WRAP_AROUND; } else if (trans->dbg.dest_tlv) { write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); } else { base = MON_BUFF_BASE_ADDR; write_ptr = MON_BUFF_WRPTR; wrap_cnt = MON_BUFF_CYCLE_CNT; } write_ptr_val = iwl_read_prph(trans, write_ptr); fw_mon_data->fw_mon_cycle_cnt = cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); fw_mon_data->fw_mon_base_ptr = cpu_to_le32(iwl_read_prph(trans, base)); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { fw_mon_data->fw_mon_base_high_ptr = cpu_to_le32(iwl_read_prph(trans, base_high)); write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; } fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val); }
0
463,183
HIDDEN int annotate_state_set_message(annotate_state_t *state, struct mailbox *mailbox, unsigned int uid) { return annotate_state_set_scope(state, NULL, mailbox, uid); }
0
247,167
static void gf_fs_print_not_connected_filters(GF_FilterSession *fsess, GF_List *filters_done, Bool ignore_sinks) { u32 i, count; Bool has_unconnected=GF_FALSE; count=gf_list_count(fsess->filters); for (i=0; i<count; i++) { GF_Filter *f = gf_list_get(fsess->filters, i); //only dump not connected ones if (f->num_input_pids || f->num_output_pids || f->multi_sink_target || f->nb_tasks_done) continue; if (ignore_sinks) { Bool has_outputs; if (f->forced_caps) has_outputs = gf_filter_has_out_caps(f->forced_caps, f->nb_forced_caps); else has_outputs = gf_filter_has_out_caps(f->freg->caps, f->freg->nb_caps); if (!has_outputs) continue; } if (!has_unconnected) { has_unconnected = GF_TRUE; GF_LOG(GF_LOG_WARNING, GF_LOG_APP, ("Filters not connected:\n")); } gf_fs_print_filter_outputs(f, filters_done, 0, NULL, NULL, 0, GF_FALSE); } }
0
245,712
static int read_request_line (struct conn_s *connptr) { ssize_t len; retry: len = readline (connptr->client_fd, &connptr->request_line); if (len <= 0) { log_message (LOG_ERR, "read_request_line: Client (file descriptor: %d) " "closed socket before read.", connptr->client_fd); return -1; } /* * Strip the new line and carriage return from the string. */ if (chomp (connptr->request_line, len) == len) { /* * If the number of characters removed is the same as the * length then it was a blank line. Free the buffer and * try again (since we're looking for a request line.) */ safefree (connptr->request_line); goto retry; } log_message (LOG_CONN, "Request (file descriptor %d): %s", connptr->client_fd, connptr->request_line); return 0; }
0
275,964
uECC_VLI_API int uECC_generate_random_int(uECC_word_t *random, const uECC_word_t *top, wordcount_t num_words) { uECC_word_t mask = (uECC_word_t)-1; uECC_word_t tries; bitcount_t num_bits = uECC_vli_numBits(top, num_words); if (!g_rng_function) { return 0; } for (tries = 0; tries < uECC_RNG_MAX_TRIES; ++tries) { if (!g_rng_function((uint8_t *)random, num_words * uECC_WORD_SIZE)) { return 0; } random[num_words - 1] &= mask >> ((bitcount_t)(num_words * uECC_WORD_SIZE * 8 - num_bits)); if (!uECC_vli_isZero(random, num_words) && uECC_vli_cmp(top, random, num_words) == 1) { return 1; } } return 0; }
0
352,937
deliveryMethodValidate( Syntax *syntax, struct berval *val ) { #undef LENOF #define LENOF(s) (sizeof(s)-1) struct berval tmp = *val; /* * DeliveryMethod = pdm *( WSP DOLLAR WSP DeliveryMethod ) * pdm = "any" / "mhs" / "physical" / "telex" / "teletex" / * "g3fax" / "g4fax" / "ia5" / "videotex" / "telephone" */ again: if( tmp.bv_len < 3 ) return LDAP_INVALID_SYNTAX; switch( tmp.bv_val[0] ) { case 'a': case 'A': if(( tmp.bv_len >= LENOF("any") ) && ( strncasecmp(tmp.bv_val, "any", LENOF("any")) == 0 )) { tmp.bv_len -= LENOF("any"); tmp.bv_val += LENOF("any"); break; } return LDAP_INVALID_SYNTAX; case 'm': case 'M': if(( tmp.bv_len >= LENOF("mhs") ) && ( strncasecmp(tmp.bv_val, "mhs", LENOF("mhs")) == 0 )) { tmp.bv_len -= LENOF("mhs"); tmp.bv_val += LENOF("mhs"); break; } return LDAP_INVALID_SYNTAX; case 'p': case 'P': if(( tmp.bv_len >= LENOF("physical") ) && ( strncasecmp(tmp.bv_val, "physical", LENOF("physical")) == 0 )) { tmp.bv_len -= LENOF("physical"); tmp.bv_val += LENOF("physical"); break; } return LDAP_INVALID_SYNTAX; case 't': case 'T': /* telex or teletex or telephone */ if(( tmp.bv_len >= LENOF("telex") ) && ( strncasecmp(tmp.bv_val, "telex", LENOF("telex")) == 0 )) { tmp.bv_len -= LENOF("telex"); tmp.bv_val += LENOF("telex"); break; } if(( tmp.bv_len >= LENOF("teletex") ) && ( strncasecmp(tmp.bv_val, "teletex", LENOF("teletex")) == 0 )) { tmp.bv_len -= LENOF("teletex"); tmp.bv_val += LENOF("teletex"); break; } if(( tmp.bv_len >= LENOF("telephone") ) && ( strncasecmp(tmp.bv_val, "telephone", LENOF("telephone")) == 0 )) { tmp.bv_len -= LENOF("telephone"); tmp.bv_val += LENOF("telephone"); break; } return LDAP_INVALID_SYNTAX; case 'g': case 'G': /* g3fax or g4fax */ if(( tmp.bv_len >= LENOF("g3fax") ) && ( ( strncasecmp(tmp.bv_val, "g3fax", LENOF("g3fax")) == 0 ) || ( strncasecmp(tmp.bv_val, "g4fax", LENOF("g4fax")) == 0 ))) { tmp.bv_len -= LENOF("g3fax"); tmp.bv_val += LENOF("g3fax"); break; } return LDAP_INVALID_SYNTAX; case 'i': case 'I': if(( tmp.bv_len >= LENOF("ia5") ) && ( strncasecmp(tmp.bv_val, "ia5", LENOF("ia5")) == 0 )) { tmp.bv_len -= LENOF("ia5"); tmp.bv_val += LENOF("ia5"); break; } return LDAP_INVALID_SYNTAX; case 'v': case 'V': if(( tmp.bv_len >= LENOF("videotex") ) && ( strncasecmp(tmp.bv_val, "videotex", LENOF("videotex")) == 0 )) { tmp.bv_len -= LENOF("videotex"); tmp.bv_val += LENOF("videotex"); break; } return LDAP_INVALID_SYNTAX; default: return LDAP_INVALID_SYNTAX; } if( BER_BVISEMPTY( &tmp ) ) return LDAP_SUCCESS; while( !BER_BVISEMPTY( &tmp ) && ( tmp.bv_val[0] == ' ' ) ) { tmp.bv_len--; tmp.bv_val++; } if( !BER_BVISEMPTY( &tmp ) && ( tmp.bv_val[0] == '$' ) ) { tmp.bv_len--; tmp.bv_val++; } else { return LDAP_INVALID_SYNTAX; } while( !BER_BVISEMPTY( &tmp ) && ( tmp.bv_val[0] == ' ' ) ) { tmp.bv_len--; tmp.bv_val++; } goto again; }
0
220,934
static void mpgviddmx_check_dur(GF_Filter *filter, GF_MPGVidDmxCtx *ctx) { FILE *stream; GF_BitStream *bs; GF_M4VParser *vparser; GF_M4VDecSpecInfo dsi; GF_Err e; u64 duration, cur_dur, rate; const GF_PropertyValue *p; if (!ctx->opid || ctx->timescale || ctx->file_loaded) return; if (ctx->index<=0) { ctx->file_loaded = GF_TRUE; return; } p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_FILEPATH); if (!p || !p->value.string || !strncmp(p->value.string, "gmem://", 7)) { ctx->is_file = GF_FALSE; ctx->file_loaded = GF_TRUE; return; } ctx->is_file = GF_TRUE; stream = gf_fopen(p->value.string, "rb"); if (!stream) return; ctx->index_size = 0; bs = gf_bs_from_file(stream, GF_BITSTREAM_READ); vparser = gf_m4v_parser_bs_new(bs, ctx->is_mpg12); e = gf_m4v_parse_config(vparser, &dsi); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[MPGVid] Could not parse video header - duration not estimated\n")); ctx->file_loaded = GF_TRUE; return; } duration = 0; cur_dur = 0; while (gf_bs_available(bs)) { u8 ftype; u32 tinc; u64 fsize, start; Bool is_coded; u64 pos; pos = gf_m4v_get_object_start(vparser); e = gf_m4v_parse_frame(vparser, &dsi, &ftype, &tinc, &fsize, &start, &is_coded); if (e<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[MPGVid] Could not parse video frame\n")); continue; } duration += ctx->cur_fps.den; cur_dur += ctx->cur_fps.den; //only index at I-frame start if (pos && (ftype==0) && (cur_dur >= ctx->index * ctx->cur_fps.num) ) { if (!ctx->index_alloc_size) ctx->index_alloc_size = 10; else if (ctx->index_alloc_size == ctx->index_size) ctx->index_alloc_size *= 2; ctx->indexes = gf_realloc(ctx->indexes, sizeof(MPGVidIdx)*ctx->index_alloc_size); ctx->indexes[ctx->index_size].pos = pos; ctx->indexes[ctx->index_size].start_time = (Double) (duration-ctx->cur_fps.den); ctx->indexes[ctx->index_size].start_time /= ctx->cur_fps.num; ctx->index_size ++; cur_dur = 0; } } rate = gf_bs_get_position(bs); gf_m4v_parser_del(vparser); gf_fclose(stream); if (!ctx->duration.num || (ctx->duration.num * ctx->cur_fps.num != duration * ctx->duration.den)) { ctx->duration.num = (s32) duration; ctx->duration.den = ctx->cur_fps.num; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DURATION, & PROP_FRAC64(ctx->duration)); if (duration && !gf_sys_is_test_mode() ) { rate *= 8 * ctx->duration.den; rate /= ctx->duration.num; ctx->bitrate = (u32) rate; } } p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_FILE_CACHED); if (p && p->value.boolean) ctx->file_loaded = GF_TRUE; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CAN_DATAREF, & PROP_BOOL(GF_TRUE ) ); }
0
389,671
check_for_list_or_dict_arg(typval_T *args, int idx) { if (args[idx].v_type != VAR_LIST && args[idx].v_type != VAR_DICT) { semsg(_(e_list_or_dict_required_for_argument_nr), idx + 1); return FAIL; } return OK; }
0
276,960
WriteAc4Header(AP4_ByteStream& output, unsigned int frame_size) { unsigned char bits[7]; bits[0] = 0xac; bits[1] = 0x40; bits[2] = 0xff; bits[3] = 0xff; bits[4] = (frame_size>>16)&0xFF; bits[5] = (frame_size>>8 )&0xFF; bits[6] = (frame_size )&0xFF; return output.Write(bits, 7); }
0
373,529
ipf_addr_hash_add(uint32_t hash, const union ipf_addr *addr) { BUILD_ASSERT_DECL(sizeof *addr % 4 == 0); return hash_add_bytes32(hash, (const uint32_t *) addr, sizeof *addr); }
0
513,359
void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab, table_map last_remaining_tables, bool first_alt, uint no_jbuf_before, double *outer_rec_count, double *reopt_cost) { double cost, rec_count; table_map reopt_remaining_tables= last_remaining_tables; uint i; if (first_tab > join->const_tables) { cost= join->positions[first_tab - 1].prefix_cost.total_cost(); rec_count= join->positions[first_tab - 1].prefix_record_count; } else { cost= 0.0; rec_count= 1; } *outer_rec_count= rec_count; for (i= first_tab; i <= last_tab; i++) reopt_remaining_tables |= join->positions[i].table->table->map; /* best_access_path() optimization depends on the value of join->cur_sj_inner_tables. Our goal in this function is to do a re-optimization with disabled join buffering, but no other changes. In order to achieve this, cur_sj_inner_tables needs have the same value it had during the original invocations of best_access_path. We know that this function, optimize_wo_join_buffering() is called to re-optimize semi-join join order range, which allows to conclude that the "original" value of cur_sj_inner_tables was 0. */ table_map save_cur_sj_inner_tables= join->cur_sj_inner_tables; join->cur_sj_inner_tables= 0; for (i= first_tab; i <= last_tab; i++) { JOIN_TAB *rs= join->positions[i].table; POSITION pos, loose_scan_pos; if ((i == first_tab && first_alt) || join->positions[i].use_join_buffer) { /* Find the best access method that would not use join buffering */ best_access_path(join, rs, reopt_remaining_tables, join->positions, i, TRUE, rec_count, &pos, &loose_scan_pos); } else pos= join->positions[i]; if ((i == first_tab && first_alt)) pos= loose_scan_pos; reopt_remaining_tables &= ~rs->table->map; rec_count= COST_MULT(rec_count, pos.records_read); cost= COST_ADD(cost, pos.read_time); if (!rs->emb_sj_nest) *outer_rec_count= COST_MULT(*outer_rec_count, pos.records_read); } join->cur_sj_inner_tables= save_cur_sj_inner_tables; *reopt_cost= cost; }
0
385,865
static int may_delete(struct inode *dir,struct dentry *victim,int isdir) { int error; if (!victim->d_inode) return -ENOENT; BUG_ON(victim->d_parent->d_inode != dir); audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE); error = inode_permission(dir, MAY_WRITE | MAY_EXEC); if (error) return error; if (IS_APPEND(dir)) return -EPERM; if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)|| IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) return -EPERM; if (isdir) { if (!S_ISDIR(victim->d_inode->i_mode)) return -ENOTDIR; if (IS_ROOT(victim)) return -EBUSY; } else if (S_ISDIR(victim->d_inode->i_mode)) return -EISDIR; if (IS_DEADDIR(dir)) return -ENOENT; if (victim->d_flags & DCACHE_NFSFS_RENAMED) return -EBUSY; return 0; }
0
482,482
lou_getEmphClasses(const char *tableList) { const char *names[MAX_EMPH_CLASSES + 1]; unsigned int count = 0; const TranslationTableHeader *table = _lou_getTranslationTable(tableList); if (!table) return NULL; while (count < MAX_EMPH_CLASSES) { char const *name = table->emphClassNames[count]; if (!name) break; names[count++] = name; } names[count++] = NULL; { unsigned int size = count * sizeof(names[0]); char const **result = malloc(size); if (!result) return NULL; /* The void* cast is necessary to stop MSVC from warning about * different 'const' qualifiers (C4090). */ memcpy((void *)result, names, size); return result; } }
0
500,090
kssl_ctx_free(KSSL_CTX *kssl_ctx) { if (kssl_ctx == NULL) return kssl_ctx; if (kssl_ctx->key) OPENSSL_cleanse(kssl_ctx->key, kssl_ctx->length); if (kssl_ctx->key) kssl_free(kssl_ctx->key); if (kssl_ctx->client_princ) kssl_free(kssl_ctx->client_princ); if (kssl_ctx->service_host) kssl_free(kssl_ctx->service_host); if (kssl_ctx->service_name) kssl_free(kssl_ctx->service_name); if (kssl_ctx->keytab_file) kssl_free(kssl_ctx->keytab_file); kssl_free(kssl_ctx); return (KSSL_CTX *) NULL; }
0
221,630
DynamicBroadcastInDimOpLowering::DynamicBroadcastInDimOpLowering( MLIRContext* ctx) : Base(ctx) {}
0
234,778
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->balance_mutex); if (!fs_info->balance_ctl) { mutex_unlock(&fs_info->balance_mutex); return -ENOTCONN; } /* * A paused balance with the item stored on disk can be resumed at * mount time if the mount is read-write. Otherwise it's still paused * and we must not allow cancelling as it deletes the item. */ if (sb_rdonly(fs_info->sb)) { mutex_unlock(&fs_info->balance_mutex); return -EROFS; } atomic_inc(&fs_info->balance_cancel_req); /* * if we are running just wait and return, balance item is * deleted in btrfs_balance in this case */ if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { mutex_unlock(&fs_info->balance_mutex); wait_event(fs_info->balance_wait_q, !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); mutex_lock(&fs_info->balance_mutex); } else { mutex_unlock(&fs_info->balance_mutex); /* * Lock released to allow other waiters to continue, we'll * reexamine the status again. */ mutex_lock(&fs_info->balance_mutex); if (fs_info->balance_ctl) { reset_balance_state(fs_info); btrfs_exclop_finish(fs_info); btrfs_info(fs_info, "balance: canceled"); } } BUG_ON(fs_info->balance_ctl || test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); atomic_dec(&fs_info->balance_cancel_req); mutex_unlock(&fs_info->balance_mutex); return 0; }
0
442,790
convert_from_network(char *buffer, size_t length) { CURLcode rc; /* translate from the network encoding to the host encoding */ char *input_ptr, *output_ptr; size_t in_bytes, out_bytes; /* open an iconv conversion descriptor if necessary */ if(inbound_cd == (iconv_t)-1) { inbound_cd = iconv_open(CURL_ICONV_CODESET_OF_HOST, CURL_ICONV_CODESET_OF_NETWORK); if(inbound_cd == (iconv_t)-1) { return CURLE_CONV_FAILED; } } /* call iconv */ input_ptr = output_ptr = buffer; in_bytes = out_bytes = length; rc = iconv(inbound_cd, &input_ptr, &in_bytes, &output_ptr, &out_bytes); if ((rc == -1) || (in_bytes != 0)) { return CURLE_CONV_FAILED; } return CURLE_OK; }
0
484,050
START_TEST(SecureChannel_sendAsymmetricOPNMessage_SecurityModeSign) { // Configure our channel correctly for OPN messages and setup dummy message UA_OpenSecureChannelResponse dummyResponse; createDummyResponse(&dummyResponse); testChannel.securityMode = UA_MESSAGESECURITYMODE_SIGN; UA_StatusCode retval = UA_SecureChannel_sendAsymmetricOPNMessage(&testChannel, 42, &dummyResponse, &UA_TYPES[UA_TYPES_OPENSECURECHANNELRESPONSE]); ck_assert_msg(retval == UA_STATUSCODE_GOOD, "Expected function to succeed"); ck_assert_msg(fCalled.asym_enc, "Expected message to have been encrypted but it was not"); ck_assert_msg(fCalled.asym_sign, "Expected message to have been signed but it was not"); }END_TEST
0
195,328
char *gf_text_get_utf8_line(char *szLine, u32 lineSize, FILE *txt_in, s32 unicode_type) { u32 i, j, len; char *sOK; char szLineConv[1024]; unsigned short *sptr; memset(szLine, 0, sizeof(char)*lineSize); sOK = gf_fgets(szLine, lineSize, txt_in); if (!sOK) return NULL; if (unicode_type<=1) { j=0; len = (u32) strlen(szLine); for (i=0; i<len; i++) { if (!unicode_type && (szLine[i] & 0x80)) { /*non UTF8 (likely some win-CP)*/ if ((szLine[i+1] & 0xc0) != 0x80) { szLineConv[j] = 0xc0 | ( (szLine[i] >> 6) & 0x3 ); j++; szLine[i] &= 0xbf; } /*UTF8 2 bytes char*/ else if ( (szLine[i] & 0xe0) == 0xc0) { szLineConv[j] = szLine[i]; i++; j++; } /*UTF8 3 bytes char*/ else if ( (szLine[i] & 0xf0) == 0xe0) { szLineConv[j] = szLine[i]; i++; j++; szLineConv[j] = szLine[i]; i++; j++; } /*UTF8 4 bytes char*/ else if ( (szLine[i] & 0xf8) == 0xf0) { szLineConv[j] = szLine[i]; i++; j++; szLineConv[j] = szLine[i]; i++; j++; szLineConv[j] = szLine[i]; i++; j++; } else { i+=1; continue; } } szLineConv[j] = szLine[i]; j++; } szLineConv[j] = 0; strcpy(szLine, szLineConv); return sOK; } #ifdef GPAC_BIG_ENDIAN if (unicode_type==3) #else if (unicode_type==2) #endif { i=0; while (1) { char c; if (!szLine[i] && !szLine[i+1]) break; c = szLine[i+1]; szLine[i+1] = szLine[i]; szLine[i] = c; i+=2; } } sptr = (u16 *)szLine; i = (u32) gf_utf8_wcstombs(szLineConv, 1024, (const unsigned short **) &sptr); szLineConv[i] = 0; strcpy(szLine, szLineConv); /*this is ugly indeed: since input is UTF16-LE, there are many chances the gf_fgets never reads the \0 after a \n*/ if (unicode_type==3) gf_fgetc(txt_in); return sOK; }
1
442,826
static void help(void) { int i; static const char * const helptext[]={ "Usage: curl [options...] <url>", "Options: (H) means HTTP/HTTPS only, (F) means FTP only", " -a/--append Append to target file when uploading (F)", " -A/--user-agent <string> User-Agent to send to server (H)", " --anyauth Pick \"any\" authentication method (H)", " -b/--cookie <name=string/file> Cookie string or file to read cookies from (H)", " --basic Use HTTP Basic Authentication (H)", " -B/--use-ascii Use ASCII/text transfer", " -c/--cookie-jar <file> Write cookies to this file after operation (H)", " -C/--continue-at <offset> Resumed transfer offset", " -d/--data <data> HTTP POST data (H)", " --data-ascii <data> HTTP POST ASCII data (H)", " --data-binary <data> HTTP POST binary data (H)", " --negotiate Use HTTP Negotiate Authentication (H)", " --digest Use HTTP Digest Authentication (H)", " --disable-eprt Inhibit using EPRT or LPRT (F)", " --disable-epsv Inhibit using EPSV (F)", " -D/--dump-header <file> Write the headers to this file", " --egd-file <file> EGD socket path for random data (SSL)", " --tcp-nodelay Use the TCP_NODELAY option", #ifdef USE_ENVIRONMENT " --environment Write results to environment variables (RISC OS)", #endif " -e/--referer Referer URL (H)", " -E/--cert <cert[:passwd]> Client certificate file and password (SSL)", " --cert-type <type> Certificate file type (DER/PEM/ENG) (SSL)", " --key <key> Private key file name (SSL/SSH)", " --key-type <type> Private key file type (DER/PEM/ENG) (SSL)", " --pass <pass> Pass phrase for the private key (SSL/SSH)", " --pubkey <key> Public key file name (SSH)", " --engine <eng> Crypto engine to use (SSL). \"--engine list\" for list", " --cacert <file> CA certificate to verify peer against (SSL)", " --capath <directory> CA directory (made using c_rehash) to verify", " peer against (SSL)", " --ciphers <list> SSL ciphers to use (SSL)", " --compressed Request compressed response (using deflate or gzip)", " --connect-timeout <seconds> Maximum time allowed for connection", " --create-dirs Create necessary local directory hierarchy", " --crlf Convert LF to CRLF in upload", " -f/--fail Fail silently (no output at all) on HTTP errors (H)", " --ftp-account <data> Account data to send when requested by server (F)", " --ftp-alternative-to-user String to replace \"USER [name]\" (F)", " --ftp-create-dirs Create the remote dirs if not present (F)", " --ftp-method [multicwd/nocwd/singlecwd] Control CWD usage (F)", " --ftp-pasv Use PASV/EPSV instead of PORT (F)", " --ftp-skip-pasv-ip Skip the IP address for PASV (F)\n" " --ftp-ssl Try SSL/TLS for ftp transfer (F)", " --ftp-ssl-control Require SSL/TLS for ftp login, clear for transfer (F)", " --ftp-ssl-reqd Require SSL/TLS for ftp transfer (F)", " --ftp-ssl-ccc Send CCC after authenticating (F)", " --ftp-ssl-ccc-mode [active/passive] Set CCC mode (F)", " -F/--form <name=content> Specify HTTP multipart POST data (H)", " --form-string <name=string> Specify HTTP multipart POST data (H)", " -g/--globoff Disable URL sequences and ranges using {} and []", " -G/--get Send the -d data with a HTTP GET (H)", " -h/--help This help text", " -H/--header <line> Custom header to pass to server (H)", " --ignore-content-length Ignore the HTTP Content-Length header", " -i/--include Include protocol headers in the output (H/F)", " -I/--head Show document info only", " -j/--junk-session-cookies Ignore session cookies read from file (H)", " --interface <interface> Specify network interface/address to use", " --krb4 <level> Enable krb4 with specified security level (F)", " -k/--insecure Allow connections to SSL sites without certs (H)", " -K/--config Specify which config file to read", " --libcurl <file> Dump libcurl equivalent code of this command line", " -l/--list-only List only names of an FTP directory (F)", " --limit-rate <rate> Limit transfer speed to this rate", " --local-port <num>[-num] Force use of these local port numbers\n", " -L/--location Follow Location: hints (H)", " --location-trusted Follow Location: and send authentication even ", " to other hostnames (H)", " -m/--max-time <seconds> Maximum time allowed for the transfer", " --max-redirs <num> Maximum number of redirects allowed (H)", " --max-filesize <bytes> Maximum file size to download (H/F)", " -M/--manual Display the full manual", " -n/--netrc Must read .netrc for user name and password", " --netrc-optional Use either .netrc or URL; overrides -n", " --ntlm Use HTTP NTLM authentication (H)", " -N/--no-buffer Disable buffering of the output stream", " --no-sessionid Disable SSL session-ID reusing (SSL)", " -o/--output <file> Write output to <file> instead of stdout", " -O/--remote-name Write output to a file named as the remote file", " -p/--proxytunnel Operate through a HTTP proxy tunnel (using CONNECT)", " --proxy-anyauth Pick \"any\" proxy authentication method (H)", " --proxy-basic Use Basic authentication on the proxy (H)", " --proxy-digest Use Digest authentication on the proxy (H)", " --proxy-ntlm Use NTLM authentication on the proxy (H)", " -P/--ftp-port <address> Use PORT with address instead of PASV (F)", " -q If used as the first parameter disables .curlrc", " -Q/--quote <cmd> Send command(s) to server before file transfer (F/SFTP)", " -r/--range <range> Retrieve a byte range from a HTTP/1.1 or FTP server", " --random-file <file> File for reading random data from (SSL)", " --raw Pass HTTP \"raw\", without any transfer decoding (H)", " -R/--remote-time Set the remote file's time on the local output", " --retry <num> Retry request <num> times if transient problems occur", " --retry-delay <seconds> When retrying, wait this many seconds between each", " --retry-max-time <seconds> Retry only within this period", " -s/--silent Silent mode. Don't output anything", " -S/--show-error Show error. With -s, make curl show errors when they occur", " --socks4 <host[:port]> Use SOCKS4 proxy on given host + port", " --socks5 <host[:port]> Use SOCKS5 proxy on given host + port", " --stderr <file> Where to redirect stderr. - means stdout", " -t/--telnet-option <OPT=val> Set telnet option", " --trace <file> Write a debug trace to the given file", " --trace-ascii <file> Like --trace but without the hex output", " --trace-time Add time stamps to trace/verbose output", " -T/--upload-file <file> Transfer <file> to remote site", " --url <URL> Set URL to work with", " -u/--user <user[:password]> Set server user and password", " -U/--proxy-user <user[:password]> Set proxy user and password", " -v/--verbose Make the operation more talkative", " -V/--version Show version number and quit", #ifdef MSDOS " --wdebug Turn on Watt-32 debugging under DJGPP", #endif " -w/--write-out [format] What to output after completion", " -x/--proxy <host[:port]> Use HTTP proxy on given port", " -X/--request <command> Specify request command to use", " -y/--speed-time Time needed to trig speed-limit abort. Defaults to 30", " -Y/--speed-limit Stop transfer if below speed-limit for 'speed-time' secs", " -z/--time-cond <time> Transfer based on a time condition", " -0/--http1.0 Use HTTP 1.0 (H)", " -1/--tlsv1 Use TLSv1 (SSL)", " -2/--sslv2 Use SSLv2 (SSL)", " -3/--sslv3 Use SSLv3 (SSL)", " -4/--ipv4 Resolve name to IPv4 address", " -6/--ipv6 Resolve name to IPv6 address", " -#/--progress-bar Display transfer progress as a progress bar", NULL }; for(i=0; helptext[i]; i++) { puts(helptext[i]); #ifdef __NOVELL_LIBC__ if (i && ((i % 23) == 0)) pressanykey(); #endif } }
0
301,482
sug_compare(const void *s1, const void *s2) { suggest_T *p1 = (suggest_T *)s1; suggest_T *p2 = (suggest_T *)s2; int n = p1->st_score - p2->st_score; if (n == 0) { n = p1->st_altscore - p2->st_altscore; if (n == 0) n = STRICMP(p1->st_word, p2->st_word); } return n; }
0
238,433
static int copy_func_state(struct bpf_func_state *dst, const struct bpf_func_state *src) { int err; memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); err = copy_reference_state(dst, src); if (err) return err; return copy_stack_state(dst, src); }
0
234,844
static int btrfs_free_stale_devices(const char *path, struct btrfs_device *skip_device) { struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; struct btrfs_device *device, *tmp_device; int ret = 0; if (path) ret = -ENOENT; list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, dev_list) { if (skip_device && skip_device == device) continue; if (path && !device->name) continue; if (path && !device_path_matched(path, device)) continue; if (fs_devices->opened) { /* for an already deleted device return 0 */ if (path && ret != 0) ret = -EBUSY; break; } /* delete the stale device */ fs_devices->num_devices--; list_del(&device->dev_list); btrfs_free_device(device); ret = 0; } mutex_unlock(&fs_devices->device_list_mutex); if (fs_devices->num_devices == 0) { btrfs_sysfs_remove_fsid(fs_devices); list_del(&fs_devices->fs_list); free_fs_devices(fs_devices); } } return ret; }
0
274,860
TEST(ComparisonsTest, QuantizedInt8LessEqualWithBroadcast) { const float kMin = -127.f; const float kMax = 127.f; std::vector<std::vector<int>> test_shapes = { {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}}; for (int i = 0; i < test_shapes.size(); ++i) { ComparisonOpModel model({TensorType_INT8, test_shapes[i], kMin, kMax}, {TensorType_INT8, {}, kMin, kMax}, TensorType_INT8, BuiltinOperator_LESS_EQUAL); model.QuantizeAndPopulate<int8_t>(model.input1(), {20, -2, -71, 8, 11, 20}); model.QuantizeAndPopulate<int8_t>(model.input2(), {8}); model.Invoke(); EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, true, false, false)) << "With shape number " << i; } }
0
442,572
static unsigned long __get_clean_virt(RedMemSlotInfo *info, QXLPHYSICAL addr) { return addr & info->memslot_clean_virt_mask; }
0
317,174
static void selinux_nf_ip_exit(void) { pr_debug("SELinux: Unregistering netfilter hooks\n"); unregister_pernet_subsys(&selinux_net_ops); }
0
261,447
static void read_cross_comp_pred(thread_context* tctx, int cIdxMinus1) { int log2_res_scale_abs_plus1 = decode_log2_res_scale_abs_plus1(tctx,cIdxMinus1); int ResScaleVal; if (log2_res_scale_abs_plus1 != 0) { int res_scale_sign_flag = decode_res_scale_sign_flag(tctx,cIdxMinus1); ResScaleVal = 1 << (log2_res_scale_abs_plus1 - 1); ResScaleVal *= 1 - 2 * res_scale_sign_flag; } else { ResScaleVal = 0; } tctx->ResScaleVal = ResScaleVal; }
0
424,522
static PresentationContext* PresentationContext_new(VideoClientContext* video, BYTE PresentationId, UINT32 x, UINT32 y, UINT32 width, UINT32 height) { size_t s; VideoClientContextPriv* priv = video->priv; PresentationContext* ret; s = width * height * 4ULL; if (s > INT32_MAX) return NULL; ret = calloc(1, sizeof(*ret)); if (!ret) return NULL; ret->video = video; ret->PresentationId = PresentationId; ret->h264 = h264_context_new(FALSE); if (!ret->h264) { WLog_ERR(TAG, "unable to create a h264 context"); goto error_h264; } h264_context_reset(ret->h264, width, height); ret->currentSample = Stream_New(NULL, 4096); if (!ret->currentSample) { WLog_ERR(TAG, "unable to create current packet stream"); goto error_currentSample; } ret->surfaceData = BufferPool_Take(priv->surfacePool, s); if (!ret->surfaceData) { WLog_ERR(TAG, "unable to allocate surfaceData"); goto error_surfaceData; } ret->surface = video->createSurface(video, ret->surfaceData, x, y, width, height); if (!ret->surface) { WLog_ERR(TAG, "unable to create surface"); goto error_surface; } ret->yuv = yuv_context_new(FALSE); if (!ret->yuv) { WLog_ERR(TAG, "unable to create YUV decoder"); goto error_yuv; } yuv_context_reset(ret->yuv, width, height); ret->refCounter = 1; return ret; error_yuv: video->deleteSurface(video, ret->surface); error_surface: BufferPool_Return(priv->surfacePool, ret->surfaceData); error_surfaceData: Stream_Free(ret->currentSample, TRUE); error_currentSample: h264_context_free(ret->h264); error_h264: free(ret); return NULL; }
0
310,083
drv_hwlabel(TERMINAL_CONTROL_BLOCK * TCB, int labnum, char *text) { SCREEN *sp = TCB->csp; AssertTCB(); if (labnum > 0 && labnum <= num_labels) { NCURSES_PUTP2("plab_norm", TPARM_2(plab_norm, labnum, text)); } }
0
389,743
check_for_blob_arg(typval_T *args, int idx) { if (args[idx].v_type != VAR_BLOB) { semsg(_(e_blob_required_for_argument_nr), idx + 1); return FAIL; } return OK; }
0
513,213
plugin_ref plugin_lock(THD *thd, plugin_ref ptr) { LEX *lex= thd ? thd->lex : 0; plugin_ref rc; DBUG_ENTER("plugin_lock"); #ifdef DBUG_OFF /* In optimized builds we don't do reference counting for built-in (plugin->plugin_dl == 0) plugins. Note that we access plugin->plugin_dl outside of LOCK_plugin, and for dynamic plugins a 'plugin' could correspond to plugin that was unloaded meanwhile! But because st_plugin_int is always allocated on plugin_mem_root, the pointer can never be invalid - the memory is never freed. Of course, the memory that 'plugin' points to can be overwritten by another plugin being loaded, but plugin->plugin_dl can never change from zero to non-zero or vice versa. That is, it's always safe to check for plugin->plugin_dl==0 even without a mutex. */ if (! plugin_dlib(ptr)) { plugin_ref_to_int(ptr)->locks_total++; DBUG_RETURN(ptr); } #endif mysql_mutex_lock(&LOCK_plugin); plugin_ref_to_int(ptr)->locks_total++; rc= intern_plugin_lock(lex, ptr); mysql_mutex_unlock(&LOCK_plugin); DBUG_RETURN(rc); }
0
364,738
set_tagstack(win_T *wp, dict_T *d, int action) { dictitem_T *di; list_T *l = NULL; #ifdef FEAT_EVAL // not allowed to alter the tag stack entries from inside tagfunc if (tfu_in_use) { emsg(_(e_cannot_modify_tag_stack_within_tagfunc)); return FAIL; } #endif if ((di = dict_find(d, (char_u *)"items", -1)) != NULL) { if (di->di_tv.v_type != VAR_LIST) { emsg(_(e_list_required)); return FAIL; } l = di->di_tv.vval.v_list; } if ((di = dict_find(d, (char_u *)"curidx", -1)) != NULL) tagstack_set_curidx(wp, (int)tv_get_number(&di->di_tv) - 1); if (action == 't') // truncate the stack { taggy_T *tagstack = wp->w_tagstack; int tagstackidx = wp->w_tagstackidx; int tagstacklen = wp->w_tagstacklen; // delete all the tag stack entries above the current entry while (tagstackidx < tagstacklen) tagstack_clear_entry(&tagstack[--tagstacklen]); wp->w_tagstacklen = tagstacklen; } if (l != NULL) { if (action == 'r') // replace the stack tagstack_clear(wp); tagstack_push_items(wp, l); // set the current index after the last entry wp->w_tagstackidx = wp->w_tagstacklen; } return OK; }
0
308,174
static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment) { struct fastrpc_dma_buf_attachment *a = attachment->priv; struct fastrpc_buf *buffer = dmabuf->priv; mutex_lock(&buffer->lock); list_del(&a->node); mutex_unlock(&buffer->lock); sg_free_table(&a->sgt); kfree(a); }
0
226,112
GF_Err stts_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_TimeToSampleBox *ptr = (GF_TimeToSampleBox *)s; #ifndef GPAC_DISABLE_ISOM_WRITE ptr->w_LastDTS = 0; #endif ISOM_DECREASE_SIZE(ptr, 4); ptr->nb_entries = gf_bs_read_u32(bs); if (ptr->size / 8 < ptr->nb_entries || (u64)ptr->nb_entries > (u64)SIZE_MAX/sizeof(GF_SttsEntry)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stts\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } ptr->alloc_size = ptr->nb_entries; ptr->entries = gf_malloc(sizeof(GF_SttsEntry)*ptr->alloc_size); if (!ptr->entries) return GF_OUT_OF_MEM; for (i=0; i<ptr->nb_entries; i++) { ptr->entries[i].sampleCount = gf_bs_read_u32(bs); ptr->entries[i].sampleDelta = gf_bs_read_u32(bs); #ifndef GPAC_DISABLE_ISOM_WRITE ptr->w_currentSampleNum += ptr->entries[i].sampleCount; ptr->w_LastDTS += (u64)ptr->entries[i].sampleCount * ptr->entries[i].sampleDelta; #endif if (ptr->max_ts_delta<ptr->entries[i].sampleDelta) ptr->max_ts_delta = ptr->entries[i].sampleDelta; if (!ptr->entries[i].sampleDelta) { if ((i+1<ptr->nb_entries) ) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Found stts entry with sample_delta=0 - forbidden ! Fixing to 1\n" )); ptr->entries[i].sampleDelta = 1; } else if (ptr->entries[i].sampleCount>1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] more than one stts entry at the end of the track with sample_delta=0 - forbidden ! Fixing to 1\n" )); ptr->entries[i].sampleDelta = 1; } } //cf issue 1644: some media streams may have sample duration > 2^31 (ttml mostly), we cannot patch this //for now we disable the check, one opt could be to have the check only for some media types, or only for the first entry #if 0 else if ((s32) ptr->entries[i].sampleDelta < 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] stts entry %d has negative duration %d - forbidden ! Fixing to 1, sync may get lost (consider reimport raw media)\n", i, (s32) ptr->entries[i].sampleDelta )); ptr->entries[i].sampleDelta = 1; } #endif } ISOM_DECREASE_SIZE(ptr, ptr->nb_entries*8); //remove the last sample delta. #ifndef GPAC_DISABLE_ISOM_WRITE if (ptr->nb_entries) ptr->w_LastDTS -= ptr->entries[ptr->nb_entries-1].sampleDelta; #endif return GF_OK; }
0
224,535
Status MaxPoolV2Shape(shape_inference::InferenceContext* c, int num_inputs) { string data_format_str; TensorFormat data_format; Status s = c->GetAttr("data_format", &data_format_str); if (s.ok()) { FormatFromString(data_format_str, &data_format); } else { data_format = FORMAT_NHWC; } const int rank = (data_format == FORMAT_NCHW_VECT_C) ? 5 : 4; ShapeHandle input_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &input_shape)); TF_RETURN_IF_ERROR( CheckFormatConstraintsOnShape(data_format, input_shape, "input", c)); std::vector<int32> kernel_sizes; std::vector<int32> strides; if (c->num_inputs() + 2 == num_inputs) { TF_RETURN_IF_ERROR(c->GetAttr("ksize", &kernel_sizes)); TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides)); } else { // Verify shape of ksize and strides input. ShapeHandle size; DimensionHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(c->num_inputs() - 2), 1, &size)); TF_RETURN_IF_ERROR(c->WithValue(c->Dim(size, 0), 4, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(c->num_inputs() - 1), 1, &size)); TF_RETURN_IF_ERROR(c->WithValue(c->Dim(size, 0), 4, &unused)); const Tensor* kernel_sizes_tensor = c->input_tensor(c->num_inputs() - 2); if (kernel_sizes_tensor == nullptr) { c->set_output(0, c->UnknownShape()); return Status::OK(); } kernel_sizes.resize(kernel_sizes_tensor->shape().num_elements()); auto kernel_sizes_vec = kernel_sizes_tensor->flat<int32>(); std::copy_n(&kernel_sizes_vec(0), kernel_sizes.size(), kernel_sizes.begin()); const Tensor* strides_tensor = c->input_tensor(c->num_inputs() - 1); if (strides_tensor == nullptr) { c->set_output(0, c->UnknownShape()); return Status::OK(); } strides.resize(strides_tensor->shape().num_elements()); auto strides_vec = strides_tensor->flat<int32>(); std::copy_n(&strides_vec(0), strides.size(), strides.begin()); } if (strides.size() != 4) { return errors::InvalidArgument( "MaxPool requires the stride attribute to contain 4 values, but " "got: ", strides.size()); } if (kernel_sizes.size() != 4) { return errors::InvalidArgument( "MaxPool requires the ksize attribute to contain 4 values, but got: ", kernel_sizes.size()); } int32_t stride_depth = GetTensorDim(strides, data_format, 'C'); int32_t stride_rows = GetTensorDim(strides, data_format, 'H'); int32_t stride_cols = GetTensorDim(strides, data_format, 'W'); int32_t kernel_depth = GetTensorDim(kernel_sizes, data_format, 'C'); int32_t kernel_rows = GetTensorDim(kernel_sizes, data_format, 'H'); int32_t kernel_cols = GetTensorDim(kernel_sizes, data_format, 'W'); constexpr int num_spatial_dims = 2; DimensionHandle batch_size_dim = c->Dim( input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'N')); DimensionHandle in_rows_dim = c->Dim( input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'H')); DimensionHandle in_cols_dim = c->Dim( input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'W')); DimensionHandle in_depth_dim = c->Dim( input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'C')); Padding padding; TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding)); ShapeHandle output_shape; DimensionHandle output_rows, output_cols, output_depth; TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims( c, in_rows_dim, kernel_rows, stride_rows, padding, &output_rows)); TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims( c, in_cols_dim, kernel_cols, stride_cols, padding, &output_cols)); TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims( c, in_depth_dim, kernel_depth, stride_depth, padding, &output_depth)); TF_RETURN_IF_ERROR(MakeShapeFromFormat(data_format, batch_size_dim, {output_rows, output_cols}, output_depth, &output_shape, c)); c->set_output(0, output_shape); return Status::OK(); }
0
246,452
static inline RPVector *parse_vec(RBinWasmObj *bin, ut64 bound, ParseEntryFcn parse_entry, RPVectorFree free_entry) { RBuffer *buf = bin->buf; ut32 count; if (!consume_u32_r (buf, bound, &count)) { return NULL; } RPVector *vec = r_pvector_new (free_entry); if (vec) { r_pvector_reserve (vec, count); ut32 i; for (i = 0; i < count; i++) { ut64 start = r_buf_tell (buf); void *e = parse_entry (bin, bound, i); if (!e || !r_pvector_push (vec, e)) { eprintf ("[wasm] Failed to parse entry %u/%u of vec at 0x%" PFMT64x "\n", i, count, start); free_entry (e); break; } } } return vec; }
0
252,320
static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; }
0
220,807
int GetOutputSize(int max_seen, int max_length, int min_length) { return max_length > 0 ? max_length : std::max((max_seen + 1), min_length); }
0
200,287
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct tipc_sock *tsk = tipc_sk(sk); struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name; long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); struct list_head *clinks = &tsk->cong_links; bool syn = !tipc_sk_type_connectionless(sk); struct tipc_group *grp = tsk->group; struct tipc_msg *hdr = &tsk->phdr; struct tipc_socket_addr skaddr; struct sk_buff_head pkts; int atype, mtu, rc; if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE)) return -EMSGSIZE; if (ua) { if (!tipc_uaddr_valid(ua, m->msg_namelen)) return -EINVAL; atype = ua->addrtype; } /* If socket belongs to a communication group follow other paths */ if (grp) { if (!ua) return tipc_send_group_bcast(sock, m, dlen, timeout); if (atype == TIPC_SERVICE_ADDR) return tipc_send_group_anycast(sock, m, dlen, timeout); if (atype == TIPC_SOCKET_ADDR) return tipc_send_group_unicast(sock, m, dlen, timeout); if (atype == TIPC_SERVICE_RANGE) return tipc_send_group_mcast(sock, m, dlen, timeout); return -EINVAL; } if (!ua) { ua = (struct tipc_uaddr *)&tsk->peer; if (!syn && ua->family != AF_TIPC) return -EDESTADDRREQ; atype = ua->addrtype; } if (unlikely(syn)) { if (sk->sk_state == TIPC_LISTEN) return -EPIPE; if (sk->sk_state != TIPC_OPEN) return -EISCONN; if (tsk->published) return -EOPNOTSUPP; if (atype == TIPC_SERVICE_ADDR) tsk->conn_addrtype = atype; msg_set_syn(hdr, 1); } /* Determine destination */ if (atype == TIPC_SERVICE_RANGE) { return tipc_sendmcast(sock, ua, m, dlen, timeout); } else if (atype == TIPC_SERVICE_ADDR) { skaddr.node = ua->lookup_node; ua->scope = tipc_node2scope(skaddr.node); if (!tipc_nametbl_lookup_anycast(net, ua, &skaddr)) return -EHOSTUNREACH; } else if (atype == TIPC_SOCKET_ADDR) { skaddr = ua->sk; } else { return -EINVAL; } /* Block or return if destination link is congested */ rc = tipc_wait_for_cond(sock, &timeout, !tipc_dest_find(clinks, skaddr.node, 0)); if (unlikely(rc)) return rc; /* Finally build message header */ msg_set_destnode(hdr, skaddr.node); msg_set_destport(hdr, skaddr.ref); if (atype == TIPC_SERVICE_ADDR) { msg_set_type(hdr, TIPC_NAMED_MSG); msg_set_hdr_sz(hdr, NAMED_H_SIZE); msg_set_nametype(hdr, ua->sa.type); msg_set_nameinst(hdr, ua->sa.instance); msg_set_lookup_scope(hdr, ua->scope); } else { /* TIPC_SOCKET_ADDR */ msg_set_type(hdr, TIPC_DIRECT_MSG); msg_set_lookup_scope(hdr, 0); msg_set_hdr_sz(hdr, BASIC_H_SIZE); } /* Add message body */ __skb_queue_head_init(&pkts); mtu = tipc_node_get_mtu(net, skaddr.node, tsk->portid, true); rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); if (unlikely(rc != dlen)) return rc; if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) { __skb_queue_purge(&pkts); return -ENOMEM; } /* Send message */ trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " "); rc = tipc_node_xmit(net, &pkts, skaddr.node, tsk->portid); if (unlikely(rc == -ELINKCONG)) { tipc_dest_push(clinks, skaddr.node, 0); tsk->cong_link_cnt++; rc = 0; } if (unlikely(syn && !rc)) { tipc_set_sk_state(sk, TIPC_CONNECTING); if (dlen && timeout) { timeout = msecs_to_jiffies(timeout); tipc_wait_for_connect(sock, &timeout); } } return rc ? rc : dlen; }
1
242,937
int mbedtls_ssl_read( mbedtls_ssl_context *ssl, unsigned char *buf, size_t len ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; size_t n; if( ssl == NULL || ssl->conf == NULL ) return( MBEDTLS_ERR_SSL_BAD_INPUT_DATA ); MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> read" ) ); #if defined(MBEDTLS_SSL_PROTO_DTLS) if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM ) { if( ( ret = mbedtls_ssl_flush_output( ssl ) ) != 0 ) return( ret ); if( ssl->handshake != NULL && ssl->handshake->retransmit_state == MBEDTLS_SSL_RETRANS_SENDING ) { if( ( ret = mbedtls_ssl_flight_transmit( ssl ) ) != 0 ) return( ret ); } } #endif /* * Check if renegotiation is necessary and/or handshake is * in process. If yes, perform/continue, and fall through * if an unexpected packet is received while the client * is waiting for the ServerHello. * * (There is no equivalent to the last condition on * the server-side as it is not treated as within * a handshake while waiting for the ClientHello * after a renegotiation request.) */ #if defined(MBEDTLS_SSL_RENEGOTIATION) ret = ssl_check_ctr_renegotiate( ssl ); if( ret != MBEDTLS_ERR_SSL_WAITING_SERVER_HELLO_RENEGO && ret != 0 ) { MBEDTLS_SSL_DEBUG_RET( 1, "ssl_check_ctr_renegotiate", ret ); return( ret ); } #endif if( ssl->state != MBEDTLS_SSL_HANDSHAKE_OVER ) { ret = mbedtls_ssl_handshake( ssl ); if( ret != MBEDTLS_ERR_SSL_WAITING_SERVER_HELLO_RENEGO && ret != 0 ) { MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_handshake", ret ); return( ret ); } } /* Loop as long as no application data record is available */ while( ssl->in_offt == NULL ) { /* Start timer if not already running */ if( ssl->f_get_timer != NULL && ssl->f_get_timer( ssl->p_timer ) == -1 ) { mbedtls_ssl_set_timer( ssl, ssl->conf->read_timeout ); } if( ( ret = mbedtls_ssl_read_record( ssl, 1 ) ) != 0 ) { if( ret == MBEDTLS_ERR_SSL_CONN_EOF ) return( 0 ); MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_read_record", ret ); return( ret ); } if( ssl->in_msglen == 0 && ssl->in_msgtype == MBEDTLS_SSL_MSG_APPLICATION_DATA ) { /* * OpenSSL sends empty messages to randomize the IV */ if( ( ret = mbedtls_ssl_read_record( ssl, 1 ) ) != 0 ) { if( ret == MBEDTLS_ERR_SSL_CONN_EOF ) return( 0 ); MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_read_record", ret ); return( ret ); } } if( ssl->in_msgtype == MBEDTLS_SSL_MSG_HANDSHAKE ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "received handshake message" ) ); /* * - For client-side, expect SERVER_HELLO_REQUEST. * - For server-side, expect CLIENT_HELLO. * - Fail (TLS) or silently drop record (DTLS) in other cases. */ #if defined(MBEDTLS_SSL_CLI_C) if( ssl->conf->endpoint == MBEDTLS_SSL_IS_CLIENT && ( ssl->in_msg[0] != MBEDTLS_SSL_HS_HELLO_REQUEST || ssl->in_hslen != mbedtls_ssl_hs_hdr_len( ssl ) ) ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "handshake received (not HelloRequest)" ) ); /* With DTLS, drop the packet (probably from last handshake) */ #if defined(MBEDTLS_SSL_PROTO_DTLS) if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM ) { continue; } #endif return( MBEDTLS_ERR_SSL_UNEXPECTED_MESSAGE ); } #endif /* MBEDTLS_SSL_CLI_C */ #if defined(MBEDTLS_SSL_SRV_C) if( ssl->conf->endpoint == MBEDTLS_SSL_IS_SERVER && ssl->in_msg[0] != MBEDTLS_SSL_HS_CLIENT_HELLO ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "handshake received (not ClientHello)" ) ); /* With DTLS, drop the packet (probably from last handshake) */ #if defined(MBEDTLS_SSL_PROTO_DTLS) if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM ) { continue; } #endif return( MBEDTLS_ERR_SSL_UNEXPECTED_MESSAGE ); } #endif /* MBEDTLS_SSL_SRV_C */ #if defined(MBEDTLS_SSL_RENEGOTIATION) /* Determine whether renegotiation attempt should be accepted */ if( ! ( ssl->conf->disable_renegotiation == MBEDTLS_SSL_RENEGOTIATION_DISABLED || ( ssl->secure_renegotiation == MBEDTLS_SSL_LEGACY_RENEGOTIATION && ssl->conf->allow_legacy_renegotiation == MBEDTLS_SSL_LEGACY_NO_RENEGOTIATION ) ) ) { /* * Accept renegotiation request */ /* DTLS clients need to know renego is server-initiated */ #if defined(MBEDTLS_SSL_PROTO_DTLS) if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM && ssl->conf->endpoint == MBEDTLS_SSL_IS_CLIENT ) { ssl->renego_status = MBEDTLS_SSL_RENEGOTIATION_PENDING; } #endif ret = mbedtls_ssl_start_renegotiation( ssl ); if( ret != MBEDTLS_ERR_SSL_WAITING_SERVER_HELLO_RENEGO && ret != 0 ) { MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_start_renegotiation", ret ); return( ret ); } } else #endif /* MBEDTLS_SSL_RENEGOTIATION */ { /* * Refuse renegotiation */ MBEDTLS_SSL_DEBUG_MSG( 3, ( "refusing renegotiation, sending alert" ) ); #if defined(MBEDTLS_SSL_PROTO_SSL3) if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_0 ) { /* SSLv3 does not have a "no_renegotiation" warning, so we send a fatal alert and abort the connection. */ mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_UNEXPECTED_MESSAGE ); return( MBEDTLS_ERR_SSL_UNEXPECTED_MESSAGE ); } else #endif /* MBEDTLS_SSL_PROTO_SSL3 */ #if defined(MBEDTLS_SSL_PROTO_TLS1) || defined(MBEDTLS_SSL_PROTO_TLS1_1) || \ defined(MBEDTLS_SSL_PROTO_TLS1_2) if( ssl->minor_ver >= MBEDTLS_SSL_MINOR_VERSION_1 ) { if( ( ret = mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_WARNING, MBEDTLS_SSL_ALERT_MSG_NO_RENEGOTIATION ) ) != 0 ) { return( ret ); } } else #endif /* MBEDTLS_SSL_PROTO_TLS1 || MBEDTLS_SSL_PROTO_TLS1_1 || MBEDTLS_SSL_PROTO_TLS1_2 */ { MBEDTLS_SSL_DEBUG_MSG( 1, ( "should never happen" ) ); return( MBEDTLS_ERR_SSL_INTERNAL_ERROR ); } } /* At this point, we don't know whether the renegotiation has been * completed or not. The cases to consider are the following: * 1) The renegotiation is complete. In this case, no new record * has been read yet. * 2) The renegotiation is incomplete because the client received * an application data record while awaiting the ServerHello. * 3) The renegotiation is incomplete because the client received * a non-handshake, non-application data message while awaiting * the ServerHello. * In each of these case, looping will be the proper action: * - For 1), the next iteration will read a new record and check * if it's application data. * - For 2), the loop condition isn't satisfied as application data * is present, hence continue is the same as break * - For 3), the loop condition is satisfied and read_record * will re-deliver the message that was held back by the client * when expecting the ServerHello. */ continue; } #if defined(MBEDTLS_SSL_RENEGOTIATION) else if( ssl->renego_status == MBEDTLS_SSL_RENEGOTIATION_PENDING ) { if( ssl->conf->renego_max_records >= 0 ) { if( ++ssl->renego_records_seen > ssl->conf->renego_max_records ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "renegotiation requested, " "but not honored by client" ) ); return( MBEDTLS_ERR_SSL_UNEXPECTED_MESSAGE ); } } } #endif /* MBEDTLS_SSL_RENEGOTIATION */ /* Fatal and closure alerts handled by mbedtls_ssl_read_record() */ if( ssl->in_msgtype == MBEDTLS_SSL_MSG_ALERT ) { MBEDTLS_SSL_DEBUG_MSG( 2, ( "ignoring non-fatal non-closure alert" ) ); return( MBEDTLS_ERR_SSL_WANT_READ ); } if( ssl->in_msgtype != MBEDTLS_SSL_MSG_APPLICATION_DATA ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad application data message" ) ); return( MBEDTLS_ERR_SSL_UNEXPECTED_MESSAGE ); } ssl->in_offt = ssl->in_msg; /* We're going to return something now, cancel timer, * except if handshake (renegotiation) is in progress */ if( ssl->state == MBEDTLS_SSL_HANDSHAKE_OVER ) mbedtls_ssl_set_timer( ssl, 0 ); #if defined(MBEDTLS_SSL_PROTO_DTLS) /* If we requested renego but received AppData, resend HelloRequest. * Do it now, after setting in_offt, to avoid taking this branch * again if ssl_write_hello_request() returns WANT_WRITE */ #if defined(MBEDTLS_SSL_SRV_C) && defined(MBEDTLS_SSL_RENEGOTIATION) if( ssl->conf->endpoint == MBEDTLS_SSL_IS_SERVER && ssl->renego_status == MBEDTLS_SSL_RENEGOTIATION_PENDING ) { if( ( ret = mbedtls_ssl_resend_hello_request( ssl ) ) != 0 ) { MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_resend_hello_request", ret ); return( ret ); } } #endif /* MBEDTLS_SSL_SRV_C && MBEDTLS_SSL_RENEGOTIATION */ #endif /* MBEDTLS_SSL_PROTO_DTLS */ } n = ( len < ssl->in_msglen ) ? len : ssl->in_msglen; memcpy( buf, ssl->in_offt, n ); ssl->in_msglen -= n; /* Zeroising the plaintext buffer to erase unused application data from the memory. */ mbedtls_platform_zeroize( ssl->in_offt, n ); if( ssl->in_msglen == 0 ) { /* all bytes consumed */ ssl->in_offt = NULL; ssl->keep_current_message = 0; } else { /* more data available */ ssl->in_offt += n; } MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= read" ) ); return( (int) n ); }
0
242,642
void isor_reader_release_sample(ISOMChannel *ch) { if (ch->sample) ch->au_seq_num++; ch->sample = NULL; ch->sai_buffer_size = 0; }
0
246,462
static void wasm_custom_name_local_free(RBinWasmCustomNameLocalName *name) { if (name) { r_id_storage_free (name->names); R_FREE (name); } }
0
242,961
static int ssl_write_real( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) { int ret = mbedtls_ssl_get_max_out_record_payload( ssl ); const size_t max_len = (size_t) ret; if( ret < 0 ) { MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_get_max_out_record_payload", ret ); return( ret ); } if( len > max_len ) { #if defined(MBEDTLS_SSL_PROTO_DTLS) if( ssl->conf->transport == MBEDTLS_SSL_TRANSPORT_DATAGRAM ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "fragment larger than the (negotiated) " "maximum fragment length: %" MBEDTLS_PRINTF_SIZET " > %" MBEDTLS_PRINTF_SIZET, len, max_len ) ); return( MBEDTLS_ERR_SSL_BAD_INPUT_DATA ); } else #endif len = max_len; } if( ssl->out_left != 0 ) { /* * The user has previously tried to send the data and * MBEDTLS_ERR_SSL_WANT_WRITE or the message was only partially * written. In this case, we expect the high-level write function * (e.g. mbedtls_ssl_write()) to be called with the same parameters */ if( ( ret = mbedtls_ssl_flush_output( ssl ) ) != 0 ) { MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_flush_output", ret ); return( ret ); } } else { /* * The user is trying to send a message the first time, so we need to * copy the data into the internal buffers and setup the data structure * to keep track of partial writes */ ssl->out_msglen = len; ssl->out_msgtype = MBEDTLS_SSL_MSG_APPLICATION_DATA; memcpy( ssl->out_msg, buf, len ); if( ( ret = mbedtls_ssl_write_record( ssl, SSL_FORCE_FLUSH ) ) != 0 ) { MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_write_record", ret ); return( ret ); } } return( (int) len ); }
0
247,574
void setExpectedServerStats(const std::string& expected_server_stats) { expected_server_stats_ = expected_server_stats; }
0
313,565
__acquires(rose_route_list_lock) { struct rose_route *rose_route; int i = 1; spin_lock_bh(&rose_route_list_lock); if (*pos == 0) return SEQ_START_TOKEN; for (rose_route = rose_route_list; rose_route && i < *pos; rose_route = rose_route->next, ++i); return (i == *pos) ? rose_route : NULL; }
0
195,274
bool ConstantFolding::MulConvPushDown(GraphDef* optimized_graph, NodeDef* node, const GraphProperties& properties) { // Push down multiplication on ConvND. // * ConvND // / \ / \ // ConvND C2 -- > X * // / \ / \ // X C1 C1 C2 // // where C1 and C2 are constants and X is non-constant. // // TODO(rmlarsen): Use PrepareConstantPushDown() to simplify this code. if (!IsAnyMul(*node) || NumNonControlInputs(*node) != 2) return false; NodeDef* mul_left_child = node_map_->GetNode(node->input(0)); NodeDef* mul_right_child = node_map_->GetNode(node->input(1)); // One child must be constant, and the second must be Conv op. const bool left_child_is_constant = IsReallyConstant(*mul_left_child); const bool right_child_is_constant = IsReallyConstant(*mul_right_child); if (!left_child_is_constant && !right_child_is_constant) { return false; } NodeDef* conv_node = left_child_is_constant ? mul_right_child : mul_left_child; if (!IsConv2D(*conv_node) && !IsConv3D(*conv_node)) { return false; } if (node->device() != mul_left_child->device() || node->device() != mul_right_child->device()) { return false; } // Make sure that it is safe to change the value of the convolution // output. if (conv_node->input_size() < 2 || NumNonControlOutputs(*conv_node, *node_map_) > 1 || nodes_to_preserve_.find(conv_node->name()) != nodes_to_preserve_.end()) { return false; } // Identify the nodes to swap. NodeDef* conv_left_child = node_map_->GetNode(conv_node->input(0)); NodeDef* conv_right_child = node_map_->GetNode(conv_node->input(1)); const bool conv_left_is_constant = IsReallyConstant(*conv_left_child); const bool conv_right_is_constant = IsReallyConstant(*conv_right_child); if (!conv_left_is_constant && !conv_right_is_constant) { // At least one of the convolution inputs should be constant. return false; } if (conv_left_is_constant && conv_right_is_constant) { // Leverage regular constant folding to handle this. return false; } const auto& mul_props = properties.GetOutputProperties(node->name()); const auto& conv_props = properties.GetOutputProperties(conv_node->name()); if (mul_props.empty() || conv_props.empty()) { return false; } const auto& mul_shape = mul_props[0].shape(); const auto& conv_shape = conv_props[0].shape(); if (!ShapesSymbolicallyEqual(mul_shape, conv_shape)) { return false; } const auto& input_props = properties.GetInputProperties(conv_node->name()); if (input_props.size() < 2) { return false; } const auto& filter_shape = input_props[1].shape(); NodeDef* const_node = left_child_is_constant ? mul_left_child : mul_right_child; const auto& const_props = properties.GetOutputProperties(const_node->name()); if (const_props.empty()) { return false; } const auto& const_shape = const_props[0].shape(); if (!IsValidConstShapeForMulConvPushDown( conv_node->attr().at("data_format").s(), filter_shape, const_shape)) { return false; } string mul_new_name = AddPrefixToNodeName("merged_input", conv_node->name()); if (node_map_->NodeExists(mul_new_name)) { return false; } // Make sure we don't introduce loops in the graph by removing control // dependencies from the conv2d node to c2. string conv_const_input = conv_left_is_constant ? conv_node->input(0) : conv_node->input(1); if (MaybeRemoveControlInput(conv_node->name(), const_node, optimized_graph, node_map_.get())) { // Add a control dep from c1 to c2 to ensure c2 is in the right frame MaybeAddControlInput(conv_const_input, const_node, optimized_graph, node_map_.get()); } conv_node->set_name(node->name()); node->set_name(mul_new_name); if (conv_left_is_constant) { node_map_->UpdateInput(conv_node->name(), node->input(0), mul_new_name); conv_node->set_input(0, mul_new_name); } else { node_map_->UpdateInput(conv_node->name(), node->input(1), mul_new_name); conv_node->set_input(1, mul_new_name); } NodeDef* conv_const_node = conv_left_is_constant ? conv_left_child : conv_right_child; if (left_child_is_constant) { node->set_input(1, conv_const_node->name()); } else { node->set_input(0, conv_const_node->name()); } node_map_->AddNode(mul_new_name, node); return true; }
1
252,307
static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif }
0
387,578
static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file, struct snd_ctl_tlv __user *buf, int op_flag) { struct snd_ctl_tlv header; unsigned int __user *container; unsigned int container_size; struct snd_kcontrol *kctl; struct snd_ctl_elem_id id; struct snd_kcontrol_volatile *vd; if (copy_from_user(&header, buf, sizeof(header))) return -EFAULT; /* In design of control core, numerical ID starts at 1. */ if (header.numid == 0) return -EINVAL; /* At least, container should include type and length fields. */ if (header.length < sizeof(unsigned int) * 2) return -EINVAL; container_size = header.length; container = buf->tlv; kctl = snd_ctl_find_numid(file->card, header.numid); if (kctl == NULL) return -ENOENT; /* Calculate index of the element in this set. */ id = kctl->id; snd_ctl_build_ioff(&id, kctl, header.numid - id.numid); vd = &kctl->vd[snd_ctl_get_ioff(kctl, &id)]; if (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { return call_tlv_handler(file, op_flag, kctl, &id, container, container_size); } else { if (op_flag == SNDRV_CTL_TLV_OP_READ) { return read_tlv_buf(kctl, &id, container, container_size); } } /* Not supported. */ return -ENXIO; }
0
313,848
clear_showcmd(void) { if (!p_sc) return; if (VIsual_active && !char_avail()) { int cursor_bot = LT_POS(VIsual, curwin->w_cursor); long lines; colnr_T leftcol, rightcol; linenr_T top, bot; // Show the size of the Visual area. if (cursor_bot) { top = VIsual.lnum; bot = curwin->w_cursor.lnum; } else { top = curwin->w_cursor.lnum; bot = VIsual.lnum; } # ifdef FEAT_FOLDING // Include closed folds as a whole. (void)hasFolding(top, &top, NULL); (void)hasFolding(bot, NULL, &bot); # endif lines = bot - top + 1; if (VIsual_mode == Ctrl_V) { # ifdef FEAT_LINEBREAK char_u *saved_sbr = p_sbr; char_u *saved_w_sbr = curwin->w_p_sbr; // Make 'sbr' empty for a moment to get the correct size. p_sbr = empty_option; curwin->w_p_sbr = empty_option; # endif getvcols(curwin, &curwin->w_cursor, &VIsual, &leftcol, &rightcol); # ifdef FEAT_LINEBREAK p_sbr = saved_sbr; curwin->w_p_sbr = saved_w_sbr; # endif sprintf((char *)showcmd_buf, "%ldx%ld", lines, (long)(rightcol - leftcol + 1)); } else if (VIsual_mode == 'V' || VIsual.lnum != curwin->w_cursor.lnum) sprintf((char *)showcmd_buf, "%ld", lines); else { char_u *s, *e; int l; int bytes = 0; int chars = 0; if (cursor_bot) { s = ml_get_pos(&VIsual); e = ml_get_cursor(); } else { s = ml_get_cursor(); e = ml_get_pos(&VIsual); } while ((*p_sel != 'e') ? s <= e : s < e) { l = (*mb_ptr2len)(s); if (l == 0) { ++bytes; ++chars; break; // end of line } bytes += l; ++chars; s += l; } if (bytes == chars) sprintf((char *)showcmd_buf, "%d", chars); else sprintf((char *)showcmd_buf, "%d-%d", chars, bytes); } showcmd_buf[SHOWCMD_COLS] = NUL; // truncate showcmd_visual = TRUE; } else { showcmd_buf[0] = NUL; showcmd_visual = FALSE; // Don't actually display something if there is nothing to clear. if (showcmd_is_clear) return; } display_showcmd(); }
0
309,880
init_color(NCURSES_COLOR_T color, NCURSES_COLOR_T r, NCURSES_COLOR_T g, NCURSES_COLOR_T b) { return NCURSES_SP_NAME(init_color) (CURRENT_SCREEN, color, r, g, b); }
0
276,899
static int do_i2c(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { struct cmd_tbl *c; #ifdef CONFIG_NEEDS_MANUAL_RELOC i2c_reloc(); #endif if (argc < 2) return CMD_RET_USAGE; /* Strip off leading 'i2c' command argument */ argc--; argv++; c = find_cmd_tbl(argv[0], &cmd_i2c_sub[0], ARRAY_SIZE(cmd_i2c_sub)); if (c) return c->cmd(cmdtp, flag, argc, argv); else return CMD_RET_USAGE; }
0
505,656
static int smtp_command_parse_identifier(struct smtp_command_parser *parser) { const unsigned char *p; /* The commands themselves are alphabetic characters. */ p = parser->cur + parser->state.poff; i_assert(p <= parser->end); while (p < parser->end && i_isalpha(*p)) p++; if ((p - parser->cur) > SMTP_COMMAND_PARSER_MAX_COMMAND_LENGTH) { smtp_command_parser_error(parser, SMTP_COMMAND_PARSE_ERROR_BAD_COMMAND, "Command name is too long"); return -1; } parser->state.poff = p - parser->cur; if (p == parser->end) return 0; parser->state.cmd_name = str_ucase(i_strdup_until(parser->cur, p)); parser->cur = p; parser->state.poff = 0; return 1; }
0
224,219
R_API int r_io_bank_write_to_submap_at(RIO *io, const ut32 bankid, ut64 addr, const ut8 *buf, int len) { RIOBank *bank = r_io_bank_get (io, bankid); r_return_val_if_fail (io && bank, -1); if (!len) { return 0; } RRBNode *node; if (bank->last_used && r_io_submap_contain (((RIOSubMap *)bank->last_used->data), addr)) { node = bank->last_used; } else { node = r_crbtree_find_node (bank->submaps, &addr, _find_sm_by_vaddr_cb, NULL); if (!node) { return 0; } bank->last_used = node; } RIOSubMap *sm = (RIOSubMap *)node->data; if (!r_io_submap_contain (sm, addr)) { return 0; } RIOMap *map = r_io_map_get_by_ref (io, &sm->mapref); if (!map || !(map->perm & R_PERM_W)) { return -1; } const int write_len = R_MIN (len, r_io_submap_to (sm) - addr + 1); const ut64 paddr = addr - r_io_map_from (map) + map->delta; return r_io_fd_write_at (io, map->fd, paddr, buf, write_len); }
0
310,137
decode_xterm_SGR1006(SCREEN *sp, MEVENT * eventp) { SGR_DATA data; bool result = FALSE; if (read_SGR(sp, &data)) { int b = data.params[0]; int b3 = 1 + (b & 3); int wheel = ((b & 64) == 64); if (b >= 132) { b3 = MAX_BUTTONS + 1; } else if (b >= 128) { b3 = (b - 120); /* buttons 8-11 */ } else if (b >= 64) { b3 = (b - 60); /* buttons 6-7 */ } eventp->id = NORMAL_EVENT; if (data.final == 'M') { (void) handle_wheel(sp, eventp, b, wheel); } else if (b3 > MAX_BUTTONS) { eventp->bstate = REPORT_MOUSE_POSITION; } else { mmask_t pressed = (mmask_t) NCURSES_MOUSE_MASK(b3, NCURSES_BUTTON_PRESSED); mmask_t release = (mmask_t) NCURSES_MOUSE_MASK(b3, NCURSES_BUTTON_RELEASED); if (sp->_mouse_bstate & pressed) { eventp->bstate = release; sp->_mouse_bstate &= ~pressed; } else { eventp->bstate = REPORT_MOUSE_POSITION; } } if (b & 4) { eventp->bstate |= BUTTON_SHIFT; } if (b & 8) { eventp->bstate |= BUTTON_ALT; } if (b & 16) { eventp->bstate |= BUTTON_CTRL; } result = (eventp->bstate & REPORT_MOUSE_POSITION) ? TRUE : FALSE; eventp->x = (data.params[1] ? (data.params[1] - 1) : 0); eventp->y = (data.params[2] ? (data.params[2] - 1) : 0); } return result; }
0
90,150
virtual bool GetWifiAccessPoints(WifiAccessPointVector* result) { return false; }
0
474,094
utf16le_mbc_enc_len(const UChar* p, const OnigUChar* e, OnigEncoding enc ARG_UNUSED) { int len = (int)(e - p); UChar byte; if (len < 2) return ONIGENC_CONSTRUCT_MBCLEN_NEEDMORE(1); byte = p[1]; if (!UTF16_IS_SURROGATE(byte)) { return ONIGENC_CONSTRUCT_MBCLEN_CHARFOUND(2); } if (UTF16_IS_SURROGATE_FIRST(byte)) { if (len < 4) return ONIGENC_CONSTRUCT_MBCLEN_NEEDMORE(4-len); if (UTF16_IS_SURROGATE_SECOND(p[3])) return ONIGENC_CONSTRUCT_MBCLEN_CHARFOUND(4); } return ONIGENC_CONSTRUCT_MBCLEN_INVALID(); }
0
427,214
static void forstat (LexState *ls, int line) { /* forstat -> FOR (fornum | forlist) END */ FuncState *fs = ls->fs; TString *varname; BlockCnt bl; enterblock(fs, &bl, 1); /* scope for loop and control variables */ luaX_next(ls); /* skip 'for' */ varname = str_checkname(ls); /* first variable name */ switch (ls->t.token) { case '=': fornum(ls, varname, line); break; case ',': case TK_IN: forlist(ls, varname); break; default: luaX_syntaxerror(ls, "'=' or 'in' expected"); } check_match(ls, TK_END, TK_FOR, line); leaveblock(fs); /* loop scope ('break' jumps to this point) */ }
0
465,847
static int nfcmrvl_nci_fw_download(struct nci_dev *ndev, const char *firmware_name) { return nfcmrvl_fw_dnld_start(ndev, firmware_name); }
0
389,693
typval_tostring(typval_T *arg, int quotes) { char_u *tofree; char_u numbuf[NUMBUFLEN]; char_u *ret = NULL; if (arg == NULL) return vim_strsave((char_u *)"(does not exist)"); if (!quotes && arg->v_type == VAR_STRING) { ret = vim_strsave(arg->vval.v_string == NULL ? (char_u *)"" : arg->vval.v_string); } else { ret = tv2string(arg, &tofree, numbuf, 0); // Make a copy if we have a value but it's not in allocated memory. if (ret != NULL && tofree == NULL) ret = vim_strsave(ret); } return ret; }
0
353,150
void SplashOutputDev::updateStrokeOverprint(GfxState *state) { splash->setStrokeOverprint(state->getStrokeOverprint()); }
0
332,396
skip_comment( char_u *line, int process, int include_space, int *is_comment) { char_u *comment_flags = NULL; int lead_len; int leader_offset = get_last_leader_offset(line, &comment_flags); *is_comment = FALSE; if (leader_offset != -1) { // Let's check whether the line ends with an unclosed comment. // If the last comment leader has COM_END in flags, there's no comment. while (*comment_flags) { if (*comment_flags == COM_END || *comment_flags == ':') break; ++comment_flags; } if (*comment_flags != COM_END) *is_comment = TRUE; } if (process == FALSE) return line; lead_len = get_leader_len(line, &comment_flags, FALSE, include_space); if (lead_len == 0) return line; // Find: // - COM_END, // - colon, // whichever comes first. while (*comment_flags) { if (*comment_flags == COM_END || *comment_flags == ':') break; ++comment_flags; } // If we found a colon, it means that we are not processing a line // starting with a closing part of a three-part comment. That's good, // because we don't want to remove those as this would be annoying. if (*comment_flags == ':' || *comment_flags == NUL) line += lead_len; return line; }
0
317,191
static __init void init_smack_known_list(void) { /* * Initialize rule list locks */ mutex_init(&smack_known_huh.smk_rules_lock); mutex_init(&smack_known_hat.smk_rules_lock); mutex_init(&smack_known_floor.smk_rules_lock); mutex_init(&smack_known_star.smk_rules_lock); mutex_init(&smack_known_web.smk_rules_lock); /* * Initialize rule lists */ INIT_LIST_HEAD(&smack_known_huh.smk_rules); INIT_LIST_HEAD(&smack_known_hat.smk_rules); INIT_LIST_HEAD(&smack_known_star.smk_rules); INIT_LIST_HEAD(&smack_known_floor.smk_rules); INIT_LIST_HEAD(&smack_known_web.smk_rules); /* * Create the known labels list */ smk_insert_entry(&smack_known_huh); smk_insert_entry(&smack_known_hat); smk_insert_entry(&smack_known_star); smk_insert_entry(&smack_known_floor); smk_insert_entry(&smack_known_web); }
0
294,427
date_s_commercial(int argc, VALUE *argv, VALUE klass) { VALUE vy, vw, vd, vsg, y, fr, fr2, ret; int w, d; double sg; rb_scan_args(argc, argv, "04", &vy, &vw, &vd, &vsg); y = INT2FIX(-4712); w = 1; d = 1; fr2 = INT2FIX(0); sg = DEFAULT_SG; switch (argc) { case 4: val2sg(vsg, sg); case 3: check_numeric(vd, "cwday"); num2int_with_frac(d, positive_inf); case 2: check_numeric(vw, "cweek"); w = NUM2INT(vw); case 1: check_numeric(vy, "year"); y = vy; } { VALUE nth; int ry, rw, rd, rjd, ns; if (!valid_commercial_p(y, w, d, sg, &nth, &ry, &rw, &rd, &rjd, &ns)) rb_raise(eDateError, "invalid date"); ret = d_simple_new_internal(klass, nth, rjd, sg, 0, 0, 0, HAVE_JD); } add_frac(); return ret; }
0
331,792
Q_GUI_EXPORT QPainterPath qt_painterPathFromVectorPath(const QVectorPath &path) { const qreal *points = path.points(); const QPainterPath::ElementType *types = path.elements(); QPainterPath p; if (types) { int id = 0; for (int i=0; i<path.elementCount(); ++i) { switch(types[i]) { case QPainterPath::MoveToElement: p.moveTo(QPointF(points[id], points[id+1])); id+=2; break; case QPainterPath::LineToElement: p.lineTo(QPointF(points[id], points[id+1])); id+=2; break; case QPainterPath::CurveToElement: { QPointF p1(points[id], points[id+1]); QPointF p2(points[id+2], points[id+3]); QPointF p3(points[id+4], points[id+5]); p.cubicTo(p1, p2, p3); id+=6; break; } case QPainterPath::CurveToDataElement: ; break; } } } else { p.moveTo(QPointF(points[0], points[1])); int id = 2; for (int i=1; i<path.elementCount(); ++i) { p.lineTo(QPointF(points[id], points[id+1])); id+=2; } } if (path.hints() & QVectorPath::WindingFill) p.setFillRule(Qt::WindingFill); return p; }
0
256,155
TensorInfoCache() : lock(), entries() {}
0
255,937
Status ShapeRefiner::AddNodeInternal( const Node* node, shape_inference::InferenceContext* outer_context) { // Create the inference context for this node with the existing input shapes. std::unique_ptr<InferenceContext> ic(new InferenceContext( graph_def_version_, node->def(), node->op_def(), std::vector<ShapeHandle>(node->num_inputs()), {}, {}, {})); TF_RETURN_IF_ERROR(ic->construction_status()); // For each 'input' of this node, fetch the corresponding shape // from 'input's InferenceContext, and store into this node's // InferenceContext. for (const Edge* e : node->in_edges()) { if (e->IsControlEdge()) continue; if (e->dst_input() < 0) { return tensorflow::errors::Internal( "Index ", e->dst_input(), " is negative but not a control edge."); } const Node* input = e->src(); auto it = node_to_context_.find(input); if (it == node_to_context_.end()) { // v1 control flow adds loops to the graph; we have to break them // somewhere, so we'll ignore this input and leave its shape undefined. ic->SetInput(e->dst_input(), ic->UnknownShape()); continue; } InferenceContext* input_ic = it->second->get_context(); ic->SetInput(e->dst_input(), input_ic->output(e->src_output())); const auto* in_v = input_ic->output_handle_shapes_and_types(e->src_output()); if (in_v != nullptr) { DataType input_type = e->src()->output_type(e->src_output()); DCHECK(input_type == DT_RESOURCE || input_type == DT_VARIANT); ic->set_input_handle_shapes_and_types(e->dst_input(), std::vector<ShapeAndType>(*in_v)); } } // Get the shape function for this node const OpRegistrationData* op_reg_data; TF_RETURN_IF_ERROR(ops_registry_->LookUp(node->type_string(), &op_reg_data)); if (op_reg_data->shape_inference_fn == nullptr && require_shape_inference_fns_) { return errors::InvalidArgument( "No shape inference function exists for op '", node->type_string(), "', did you forget to define it?"); } std::unique_ptr<ExtendedInferenceContext> ec( new ExtendedInferenceContext(std::move(ic), node)); // Run the shape inference function, and return if there was an error. TF_RETURN_IF_ERROR(RunShapeFn(node, op_reg_data, ec.get(), outer_context)); // Store the resulting context object in the map. node_to_context_[node].swap(ec); return Status::OK(); }
0
328,844
R_API void r_bin_java_print_float_cp_summary(RBinJavaCPTypeObj *obj) { ut8 *b = NULL; if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* Double.\n"); return; } b = obj->info.cp_float.bytes.raw; printf ("Float ConstantPool Type (%d) ", obj->metas->ord); printf (" Offset: 0x%08"PFMT64x "", obj->file_offset); printf (" Bytes = %02x %02x %02x %02x\n", b[0], b[1], b[2], b[3]); printf (" Float = %f\n", R_BIN_JAVA_FLOAT (obj->info.cp_float.bytes.raw, 0)); }
0
225,030
PQfreeCancel(PGcancel *cancel) { if (cancel) free(cancel); }
0
281,058
int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { const struct xfrm_policy *p; struct xfrm_policy *np; int i, ret = 0; rcu_read_lock(); for (i = 0; i < 2; i++) { p = rcu_dereference(osk->sk_policy[i]); if (p) { np = clone_policy(p, i); if (unlikely(!np)) { ret = -ENOMEM; break; } rcu_assign_pointer(sk->sk_policy[i], np); } } rcu_read_unlock(); return ret; }
0
259,233
static int mov_read_glbl(MOVContext *c, AVIOContext *pb, MOVAtom atom) { AVStream *st; int ret; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; if ((uint64_t)atom.size > (1<<30)) return AVERROR_INVALIDDATA; if (atom.size >= 10) { // Broken files created by legacy versions of libavformat will // wrap a whole fiel atom inside of a glbl atom. unsigned size = avio_rb32(pb); unsigned type = avio_rl32(pb); if (avio_feof(pb)) return AVERROR_INVALIDDATA; avio_seek(pb, -8, SEEK_CUR); if (type == MKTAG('f','i','e','l') && size == atom.size) return mov_read_default(c, pb, atom); } if (st->codecpar->extradata_size > 1 && st->codecpar->extradata) { av_log(c->fc, AV_LOG_WARNING, "ignoring multiple glbl\n"); return 0; } ret = ff_get_extradata(c->fc, st->codecpar, pb, atom.size); if (ret < 0) return ret; if (atom.type == MKTAG('h','v','c','C') && st->codecpar->codec_tag == MKTAG('d','v','h','1')) /* HEVC-based Dolby Vision derived from hvc1. Happens to match with an identifier previously utilized for DV. Thus, if we have the hvcC extradata box available as specified, set codec to HEVC */ st->codecpar->codec_id = AV_CODEC_ID_HEVC; return 0; }
0
512,321
longlong Item_func_like::val_int() { DBUG_ASSERT(fixed == 1); DBUG_ASSERT(escape != ESCAPE_NOT_INITIALIZED); String* res= args[0]->val_str(&cmp_value1); if (args[0]->null_value) { null_value=1; return 0; } String* res2= args[1]->val_str(&cmp_value2); if (args[1]->null_value) { null_value=1; return 0; } null_value=0; if (canDoTurboBM) return turboBM_matches(res->ptr(), res->length()) ? !negated : negated; return my_wildcmp(cmp_collation.collation, res->ptr(),res->ptr()+res->length(), res2->ptr(),res2->ptr()+res2->length(), escape,wild_one,wild_many) ? negated : !negated; }
0