idx
int64 | func
string | target
int64 |
|---|---|---|
55,734
|
void CL_DemoCompleted( void )
{
char buffer[ MAX_STRING_CHARS ];
if( cl_timedemo && cl_timedemo->integer )
{
int time;
time = Sys_Milliseconds() - clc.timeDemoStart;
if( time > 0 )
{
// Millisecond times are frame durations:
// minimum/average/maximum/std deviation
Com_sprintf( buffer, sizeof( buffer ),
"%i frames %3.1f seconds %3.1f fps %d.0/%.1f/%d.0/%.1f ms\n",
clc.timeDemoFrames,
time/1000.0,
clc.timeDemoFrames*1000.0 / time,
clc.timeDemoMinDuration,
time / (float)clc.timeDemoFrames,
clc.timeDemoMaxDuration,
CL_DemoFrameDurationSDev( ) );
Com_Printf( "%s", buffer );
// Write a log of all the frame durations
if( cl_timedemoLog && strlen( cl_timedemoLog->string ) > 0 )
{
int i;
int numFrames;
fileHandle_t f;
if( ( clc.timeDemoFrames - 1 ) > MAX_TIMEDEMO_DURATIONS )
numFrames = MAX_TIMEDEMO_DURATIONS;
else
numFrames = clc.timeDemoFrames - 1;
f = FS_FOpenFileWrite( cl_timedemoLog->string );
if( f )
{
FS_Printf( f, "# %s", buffer );
for( i = 0; i < numFrames; i++ )
FS_Printf( f, "%d\n", clc.timeDemoDurations[ i ] );
FS_FCloseFile( f );
Com_Printf( "%s written\n", cl_timedemoLog->string );
}
else
{
Com_Printf( "Couldn't open %s for writing\n",
cl_timedemoLog->string );
}
}
}
}
CL_Disconnect( qtrue );
CL_NextDemo();
}
| 0
|
162,319
|
static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
{
int err = -1;
struct inet6_ifaddr *ifp;
read_lock_bh(&idev->lock);
list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
if (ifp->scope > IFA_LINK)
break;
if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
memcpy(eui, ifp->addr.s6_addr+8, 8);
err = 0;
break;
}
}
read_unlock_bh(&idev->lock);
return err;
}
| 0
|
518,535
|
void Field_new_decimal::sort_string(uchar *buff, uint length)
{
memcpy(buff, ptr, length);
}
| 0
|
418,511
|
RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter()
{
const size_t aligned_legnth = length + (-length % alignment);
ceph::bufferlist junk;
DecoratedStreamGetter::get_exactly(aligned_legnth - position, junk);
}
| 0
|
213,762
|
static bool vmxnet3_verify_driver_magic(hwaddr dshmem)
{
return (VMXNET3_READ_DRV_SHARED32(dshmem, magic) == VMXNET3_REV1_MAGIC);
}
| 0
|
122,383
|
static ut64 p_ptr(ut64 decorated_addr, RKernelCacheObj *obj) {
RParsedPointer ptr;
r_parse_pointer (&ptr, decorated_addr, obj);
return ptr.address;
}
| 0
|
382,410
|
ProcWaitForSignal(void)
{
PGSemaphoreLock(&MyProc->sem, true);
}
| 0
|
250,389
|
getGnashExecutable()
{
std::string procname;
bool process_found = false;
struct stat procstats;
char *gnash_env = std::getenv("GNASH_PLAYER");
if (gnash_env) {
procname = gnash_env;
process_found = (0 == stat(procname.c_str(), &procstats));
if (!process_found) {
gnash::log_error("Invalid path to gnash executable: ");
return "";
}
}
if (!process_found) {
procname = GNASHBINDIR "/gtk-gnash";
process_found = (0 == stat(procname.c_str(), &procstats));
}
if (!process_found) {
procname = GNASHBINDIR "/qt4-gnash";
process_found = (0 == stat(procname.c_str(), &procstats));
}
if (!process_found) {
gnash::log_error(std::string("Unable to find Gnash in ") + GNASHBINDIR);
return "";
}
return procname;
}
| 0
|
225,035
|
LiveSyncTest::~LiveSyncTest() {}
| 0
|
63,151
|
TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) {
TFLITE_DCHECK(node->user_data != nullptr);
const OpData* data = static_cast<const OpData*>(node->user_data);
const TfLiteEvalTensor* input1 =
tflite::micro::GetEvalInput(context, node, kInputTensor1);
const TfLiteEvalTensor* input2 =
tflite::micro::GetEvalInput(context, node, kInputTensor2);
TfLiteEvalTensor* output =
tflite::micro::GetEvalOutput(context, node, kOutputTensor);
RuntimeShape input1_shape = tflite::micro::GetTensorShape(input1);
RuntimeShape input2_shape = tflite::micro::GetTensorShape(input2);
RuntimeShape output_shape = tflite::micro::GetTensorShape(output);
bool* output_data = tflite::micro::GetTensorData<bool>(output);
bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2);
switch (input1->type) {
case kTfLiteFloat32:
requires_broadcast
? reference_ops::Broadcast4DSlowLessNoScaling(
data->params, input1_shape,
tflite::micro::GetTensorData<float>(input1), input2_shape,
tflite::micro::GetTensorData<float>(input2), output_shape,
output_data)
: reference_ops::LessNoScaling(
data->params, input1_shape,
tflite::micro::GetTensorData<float>(input1), input2_shape,
tflite::micro::GetTensorData<float>(input2), output_shape,
output_data);
break;
case kTfLiteInt32:
requires_broadcast
? reference_ops::Broadcast4DSlowLessNoScaling(
data->params, input1_shape,
tflite::micro::GetTensorData<int32_t>(input1), input2_shape,
tflite::micro::GetTensorData<int32_t>(input2), output_shape,
output_data)
: reference_ops::LessNoScaling(
data->params, input1_shape,
tflite::micro::GetTensorData<int32_t>(input1), input2_shape,
tflite::micro::GetTensorData<int32_t>(input2), output_shape,
output_data);
break;
case kTfLiteInt64:
requires_broadcast
? reference_ops::Broadcast4DSlowLessNoScaling(
data->params, input1_shape,
tflite::micro::GetTensorData<int64_t>(input1), input2_shape,
tflite::micro::GetTensorData<int64_t>(input2), output_shape,
output_data)
: reference_ops::LessNoScaling(
data->params, input1_shape,
tflite::micro::GetTensorData<int64_t>(input1), input2_shape,
tflite::micro::GetTensorData<int64_t>(input2), output_shape,
output_data);
break;
case kTfLiteUInt8:
requires_broadcast
? reference_ops::Broadcast4DSlowLessWithScaling(
data->params, input1_shape,
tflite::micro::GetTensorData<uint8_t>(input1), input2_shape,
tflite::micro::GetTensorData<uint8_t>(input2), output_shape,
output_data)
: reference_ops::LessWithScaling(
data->params, input1_shape,
tflite::micro::GetTensorData<uint8_t>(input1), input2_shape,
tflite::micro::GetTensorData<uint8_t>(input2), output_shape,
output_data);
break;
case kTfLiteInt8:
requires_broadcast
? reference_ops::Broadcast4DSlowLessWithScaling(
data->params, input1_shape,
tflite::micro::GetTensorData<int8_t>(input1), input2_shape,
tflite::micro::GetTensorData<int8_t>(input2), output_shape,
output_data)
: reference_ops::LessWithScaling(
data->params, input1_shape,
tflite::micro::GetTensorData<int8_t>(input1), input2_shape,
tflite::micro::GetTensorData<int8_t>(input2), output_shape,
output_data);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
TfLiteTypeGetName(input1->type), input1->type);
return kTfLiteError;
}
return kTfLiteOk;
}
| 0
|
80,386
|
void disable_TSC(void)
{
preempt_disable();
if (!test_and_set_thread_flag(TIF_NOTSC))
/*
* Must flip the CPU state synchronously with
* TIF_NOTSC in the current running context.
*/
cr4_set_bits(X86_CR4_TSD);
preempt_enable();
}
| 0
|
279,791
|
ScrollLatencyBrowserTest() {}
| 0
|
258,899
|
inline float PrintOneElement(bfloat16 f, bool print_v2) {
return static_cast<float>(f);
}
| 0
|
202,702
|
void ServiceWorkerPaymentInstrument::OnPaymentAppInvoked(
mojom::PaymentHandlerResponsePtr response) {
if (delegate_ != nullptr) {
delegate_->OnInstrumentDetailsReady(response->method_name,
response->stringified_details);
delegate_ = nullptr;
}
}
| 0
|
473,390
|
clearenv (void)
{
if (environ != NULL)
environ[0] = NULL;
return 0;
}
| 0
|
87,571
|
static inline uint8_t *bcf_unpack_fmt_core1(uint8_t *ptr, int n_sample, bcf_fmt_t *fmt)
{
uint8_t *ptr_start = ptr;
fmt->id = bcf_dec_typed_int1(ptr, &ptr);
fmt->n = bcf_dec_size(ptr, &ptr, &fmt->type);
fmt->size = fmt->n << bcf_type_shift[fmt->type];
fmt->p = ptr;
fmt->p_off = ptr - ptr_start;
fmt->p_free = 0;
ptr += n_sample * fmt->size;
fmt->p_len = ptr - fmt->p;
return ptr;
}
| 0
|
137,712
|
void Transform::interpolate_bilinear( RawTile& in, unsigned int resampled_width, unsigned int resampled_height ){
// Pointer to input buffer
unsigned char *input = (unsigned char*) in.data;
int channels = in.channels;
unsigned int width = in.width;
unsigned int height = in.height;
// Define a max index position on the input buffer
unsigned long max = ( (width*height) - 1 ) * channels;
// Create new buffer and pointer for our output - make sure we have enough digits via unsigned long long
unsigned char *output = new unsigned char[(unsigned long long)resampled_width*resampled_height*channels];
// Calculate our scale
float xscale = (float)(width) / (float)resampled_width;
float yscale = (float)(height) / (float)resampled_height;
// Do not parallelize for small images (256x256 pixels) as this can be slower that single threaded
#if defined(__ICC) || defined(__INTEL_COMPILER)
#pragma ivdep
#elif defined(_OPENMP)
#pragma omp parallel for if( resampled_width*resampled_height > PARALLEL_THRESHOLD )
#endif
for( unsigned int j=0; j<resampled_height; j++ ){
// Index to the current pyramid resolution's top left pixel
int jj = (int) floor( j*yscale );
// Calculate some weights - do this in the highest loop possible
float jscale = j*yscale;
float c = (float)(jj+1) - jscale;
float d = jscale - (float)jj;
for( unsigned int i=0; i<resampled_width; i++ ){
// Index to the current pyramid resolution's top left pixel
int ii = (int) floor( i*xscale );
// Calculate the indices of the 4 surrounding pixels
unsigned long p11, p12, p21, p22;
unsigned long jj_w = jj*width;
p11 = (unsigned long) ( channels * ( ii + jj_w ) );
p12 = (unsigned long) ( channels * ( ii + (jj_w+width) ) );
p21 = (unsigned long) ( channels * ( (ii+1) + jj_w ) );
p22 = (unsigned long) ( channels * ( (ii+1) + (jj_w+width) ) );
// Make sure we don't stray outside our input buffer boundary
// - replicate at the edge
p12 = (p12<=max)? p12 : max;
p21 = (p21<=max)? p21 : max;
p22 = (p22<=max)? p22 : max;
// Calculate the rest of our weights
float iscale = i*xscale;
float a = (float)(ii+1) - iscale;
float b = iscale - (float)ii;
// Output buffer index
unsigned long long resampled_index = (unsigned long long)( (j*resampled_width + i) * channels );
for( int k=0; k<channels; k++ ){
float tx = input[p11+k]*a + input[p21+k]*b;
float ty = input[p12+k]*a + input[p22+k]*b;
unsigned char r = (unsigned char)( c*tx + d*ty );
output[resampled_index+k] = r;
}
}
}
// Delete original buffer
delete[] (unsigned char*) input;
// Correctly set our Rawtile info
in.width = resampled_width;
in.height = resampled_height;
in.dataLength = (size_t)resampled_width * (size_t)resampled_height * (size_t)channels * (size_t)(in.bpc/8);
in.data = output;
}
| 0
|
64,384
|
void user_enable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
task->thread.debug.dbcr0 &= ~DBCR0_BT;
task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
regs->msr |= MSR_DE;
#else
regs->msr &= ~MSR_BE;
regs->msr |= MSR_SE;
#endif
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
| 0
|
172,099
|
AXObjectCache* Document::existingAXObjectCache() const
{
if (!AXObjectCache::accessibilityEnabled())
return 0;
if (!topDocument()->renderer())
return 0;
return topDocument()->m_axObjectCache.get();
}
| 0
|
76,597
|
static inline u64 paravirt_read_msr(unsigned msr)
{
return PVOP_CALL1(u64, cpu.read_msr, msr);
}
| 0
|
469,861
|
struct xt_target *xt_find_target(int af, const char *name, u8 revision)
{
struct xt_target *t;
int err = 0;
if (mutex_lock_interruptible(&xt[af].mutex) != 0)
return ERR_PTR(-EINTR);
list_for_each_entry(t, &xt[af].target, list) {
if (strcmp(t->name, name) == 0) {
if (t->revision == revision) {
if (try_module_get(t->me)) {
mutex_unlock(&xt[af].mutex);
return t;
}
} else
err = -EPROTOTYPE; /* Found something. */
}
}
mutex_unlock(&xt[af].mutex);
return ERR_PTR(err);
}
| 0
|
106,069
|
static void sample_queue_push(HintSampleQueue *queue, uint8_t *data, int size,
int sample)
{
/* No need to keep track of smaller samples, since describing them
* with immediates is more efficient. */
if (size <= 14)
return;
if (!queue->samples || queue->len >= queue->size) {
HintSample *samples;
samples = av_realloc(queue->samples, sizeof(HintSample) * (queue->size + 10));
if (!samples)
return;
queue->size += 10;
queue->samples = samples;
}
queue->samples[queue->len].data = data;
queue->samples[queue->len].size = size;
queue->samples[queue->len].sample_number = sample;
queue->samples[queue->len].offset = 0;
queue->samples[queue->len].own_data = 0;
queue->len++;
}
| 1
|
119,481
|
static gboolean
verify_generic_parameters (MonoClass *class)
{
int i;
MonoGenericContainer *gc = class->generic_container;
MonoBitSet *used_args = mono_bitset_new (gc->type_argc, 0);
for (i = 0; i < gc->type_argc; ++i) {
MonoGenericParamInfo *param_info = mono_generic_container_get_param_info (gc, i);
MonoClass **constraints;
if (!param_info->constraints)
continue;
mono_bitset_clear_all (used_args);
mono_bitset_set_fast (used_args, i);
for (constraints = param_info->constraints; *constraints; ++constraints) {
MonoClass *ctr = *constraints;
MonoType *constraint_type = &ctr->byval_arg;
if (!mono_type_is_valid_type_in_context (constraint_type, &gc->context))
goto fail;
if (mono_type_is_generic_argument (constraint_type) && !recursive_mark_constraint_args (used_args, gc, constraint_type))
goto fail;
if (ctr->generic_class && !mono_class_is_valid_generic_instantiation (NULL, ctr))
goto fail;
}
}
mono_bitset_free (used_args);
return TRUE;
fail:
mono_bitset_free (used_args);
return FALSE;
| 0
|
317,519
|
int RenderLayerScrollableArea::scrollSize(ScrollbarOrientation orientation) const
{
IntSize scrollDimensions = maximumScrollPosition() - minimumScrollPosition();
return (orientation == HorizontalScrollbar) ? scrollDimensions.width() : scrollDimensions.height();
}
| 0
|
332,161
|
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4],
const uint8_t *src_data[4], const int src_linesizes[4],
enum AVPixelFormat pix_fmt, int width, int height)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
if (!desc || desc->flags & PIX_FMT_HWACCEL)
if (desc->flags & PIX_FMT_PAL ||
desc->flags & PIX_FMT_PSEUDOPAL) {
av_image_copy_plane(dst_data[0], dst_linesizes[0],
src_data[0], src_linesizes[0],
width, height);
/* copy the palette */
memcpy(dst_data[1], src_data[1], 4*256);
} else {
int i, planes_nb = 0;
for (i = 0; i < desc->nb_components; i++)
planes_nb = FFMAX(planes_nb, desc->comp[i].plane + 1);
for (i = 0; i < planes_nb; i++) {
int h = height;
int bwidth = av_image_get_linesize(pix_fmt, width, i);
if (i == 1 || i == 2) {
h= -((-height)>>desc->log2_chroma_h);
av_image_copy_plane(dst_data[i], dst_linesizes[i],
src_data[i], src_linesizes[i],
bwidth, h);
| 1
|
192,102
|
lvm2_lv_create_found_device (Device *device,
CreateLvm2LVData *data)
{
if (strlen (data->fstype) > 0)
{
device_filesystem_create_internal (device,
data->fstype,
data->fsoptions,
lvm2_lv_create_filesystem_create_hook,
NULL,
data->context);
}
else
{
dbus_g_method_return (data->context, device->priv->object_path);
}
}
| 0
|
284,868
|
bool ProfileSyncService::HasUnsyncedItems() const {
if (backend_.get() && backend_initialized_) {
return backend_->HasUnsyncedItems();
}
NOTREACHED();
return false;
}
| 0
|
55,041
|
void put_futex_key(int fshared, union futex_key *key)
{
drop_futex_key_refs(key);
}
| 0
|
405,448
|
static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
unsigned int alloc_order)
{
int mt;
unsigned long max_managed, flags;
/*
* Limit the number reserved to 1 pageblock or roughly 1% of a zone.
* Check is race-prone but harmless.
*/
max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
if (zone->nr_reserved_highatomic >= max_managed)
return;
spin_lock_irqsave(&zone->lock, flags);
/* Recheck the nr_reserved_highatomic limit under the lock */
if (zone->nr_reserved_highatomic >= max_managed)
goto out_unlock;
/* Yoink! */
mt = get_pageblock_migratetype(page);
if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
&& !is_migrate_cma(mt)) {
zone->nr_reserved_highatomic += pageblock_nr_pages;
set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
}
out_unlock:
spin_unlock_irqrestore(&zone->lock, flags);
}
| 0
|
267,030
|
bit_read_BOT (Bit_Chain *dat)
{
unsigned char two_bit_code;
two_bit_code = bit_read_BB (dat);
if (two_bit_code == 0)
return bit_read_RC (dat);
else if (two_bit_code == 1)
return bit_read_RC (dat) + 0x1f0;
else
return bit_read_RS (dat);
}
| 0
|
251,502
|
find_share(sa_handle_impl_t impl_handle, const char *sharepath)
{
sa_share_impl_t impl_share;
impl_share = impl_handle->shares;
while (impl_share != NULL) {
if (strcmp(impl_share->sharepath, sharepath) == 0) {
break;
}
impl_share = impl_share->next;
}
return (impl_share);
}
| 0
|
300,169
|
static int unregister_transfer_cb(int id, void *ptr, void *data)
{
struct loop_device *lo = ptr;
struct loop_func_table *xfer = data;
mutex_lock(&lo->lo_ctl_mutex);
if (lo->lo_encryption == xfer)
loop_release_xfer(lo);
mutex_unlock(&lo->lo_ctl_mutex);
return 0;
}
| 0
|
179,269
|
static int h2c_frt_init(struct connection *conn)
{
struct h2c *h2c;
struct task *t = NULL;
struct session *sess = conn->owner;
h2c = pool_alloc(pool_head_h2c);
if (!h2c)
goto fail;
h2c->shut_timeout = h2c->timeout = sess->fe->timeout.client;
if (tick_isset(sess->fe->timeout.clientfin))
h2c->shut_timeout = sess->fe->timeout.clientfin;
h2c->task = NULL;
if (tick_isset(h2c->timeout)) {
t = task_new(tid_bit);
if (!t)
goto fail;
h2c->task = t;
t->process = h2_timeout_task;
t->context = h2c;
t->expire = tick_add(now_ms, h2c->timeout);
}
h2c->ddht = hpack_dht_alloc(h2_settings_header_table_size);
if (!h2c->ddht)
goto fail;
/* Initialise the context. */
h2c->st0 = H2_CS_PREFACE;
h2c->conn = conn;
h2c->max_id = -1;
h2c->errcode = H2_ERR_NO_ERROR;
h2c->flags = H2_CF_NONE;
h2c->rcvd_c = 0;
h2c->rcvd_s = 0;
h2c->nb_streams = 0;
h2c->dbuf = &buf_empty;
h2c->dsi = -1;
h2c->msi = -1;
h2c->last_sid = -1;
h2c->mbuf = &buf_empty;
h2c->miw = 65535; /* mux initial window size */
h2c->mws = 65535; /* mux window size */
h2c->mfs = 16384; /* initial max frame size */
h2c->streams_by_id = EB_ROOT_UNIQUE;
LIST_INIT(&h2c->send_list);
LIST_INIT(&h2c->fctl_list);
LIST_INIT(&h2c->buf_wait.list);
conn->mux_ctx = h2c;
if (t)
task_queue(t);
conn_xprt_want_recv(conn);
/* mux->wake will be called soon to complete the operation */
return 0;
fail:
if (t)
task_free(t);
pool_free(pool_head_h2c, h2c);
return -1;
}
| 0
|
214,118
|
static void perContextEnabledLongAttributeAttributeGetterCallback(v8::Local<v8::String>, const v8::PropertyCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMGetter");
TestObjectPythonV8Internal::perContextEnabledLongAttributeAttributeGetter(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| 0
|
443,830
|
g_tls_connection_base_set_property (GObject *object,
guint prop_id,
const GValue *value,
GParamSpec *pspec)
{
GTlsConnectionBase *tls = G_TLS_CONNECTION_BASE (object);
GTlsConnectionBasePrivate *priv = g_tls_connection_base_get_instance_private (tls);
GInputStream *istream;
GOutputStream *ostream;
gboolean system_certdb;
GTlsBackend *backend;
switch (prop_id)
{
case PROP_BASE_IO_STREAM:
g_assert (!g_value_get_object (value) || !priv->base_socket);
if (priv->base_io_stream)
{
g_object_unref (priv->base_io_stream);
priv->base_istream = NULL;
priv->base_ostream = NULL;
}
priv->base_io_stream = g_value_dup_object (value);
if (!priv->base_io_stream)
return;
istream = g_io_stream_get_input_stream (priv->base_io_stream);
ostream = g_io_stream_get_output_stream (priv->base_io_stream);
if (G_IS_POLLABLE_INPUT_STREAM (istream) &&
g_pollable_input_stream_can_poll (G_POLLABLE_INPUT_STREAM (istream)))
{
priv->base_istream = G_POLLABLE_INPUT_STREAM (istream);
priv->tls_istream = g_tls_input_stream_new (tls);
}
if (G_IS_POLLABLE_OUTPUT_STREAM (ostream) &&
g_pollable_output_stream_can_poll (G_POLLABLE_OUTPUT_STREAM (ostream)))
{
priv->base_ostream = G_POLLABLE_OUTPUT_STREAM (ostream);
priv->tls_ostream = g_tls_output_stream_new (tls);
}
break;
case PROP_BASE_SOCKET:
g_assert (!g_value_get_object (value) || !priv->base_io_stream);
g_clear_object (&priv->base_socket);
priv->base_socket = g_value_dup_object (value);
break;
case PROP_REQUIRE_CLOSE_NOTIFY:
priv->require_close_notify = g_value_get_boolean (value);
break;
case PROP_REHANDSHAKE_MODE:
priv->rehandshake_mode = g_value_get_enum (value);
break;
case PROP_USE_SYSTEM_CERTDB:
system_certdb = g_value_get_boolean (value);
if (system_certdb != priv->is_system_certdb)
{
g_clear_object (&priv->database);
if (system_certdb)
{
backend = g_tls_backend_get_default ();
priv->database = g_tls_backend_get_default_database (backend);
}
priv->is_system_certdb = system_certdb;
priv->database_is_unset = FALSE;
}
break;
case PROP_DATABASE:
g_clear_object (&priv->database);
priv->database = g_value_dup_object (value);
priv->is_system_certdb = FALSE;
priv->database_is_unset = FALSE;
break;
case PROP_CERTIFICATE:
if (priv->certificate)
g_object_unref (priv->certificate);
priv->certificate = g_value_dup_object (value);
break;
case PROP_INTERACTION:
g_clear_object (&priv->interaction);
priv->interaction = g_value_dup_object (value);
break;
case PROP_ADVERTISED_PROTOCOLS:
g_clear_pointer (&priv->advertised_protocols, g_strfreev);
priv->advertised_protocols = g_value_dup_boxed (value);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
}
}
| 0
|
462,706
|
TEST_F(QueryPlannerTest, LockstepOrEnumerationSanityCheckTwoChildrenDifferentNumSolutions) {
params.options =
QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::ENUMERATE_OR_CHILDREN_LOCKSTEP;
addIndex(BSON("a" << 1 << "b" << 1));
addIndex(BSON("a" << 1 << "c" << 1));
runQueryAsCommand(fromjson("{find: 'testns', filter: {a: 1, $or: [{b: 1}, {b: 2, c: 2}]}}"));
assertNumSolutions(4U);
assertSolutionExists(
"{fetch: {filter: null, node: {or: {nodes: [{ixscan: {pattern: {a: 1, b: 1}}}, {fetch: "
"{filter: {c: {$eq: 2}}, node: {ixscan: {pattern: {a: 1, b: 1}}}}}]}}}}");
assertSolutionExists(
"{fetch: {filter: null, node: {or: {nodes: [{ixscan: {pattern: {a: 1, b: 1}}}, {fetch: "
"{filter: {b: {$eq: 2}}, node: {ixscan: {pattern: {a: 1, c: 1}}}}}]}}}}");
assertSolutionExists(
"{fetch: {filter: {$or: [{b: {$eq: 1}}, {b: {$eq: 2}, c: {$eq: 2}}]}, node: {ixscan: "
"{pattern: {a: 1, b: 1}}}}}}}");
assertSolutionExists(
"{fetch: {filter: {$or: [{b: {$eq: 1}}, {b: {$eq: 2}, c: {$eq: 2}}]}, node: {ixscan: "
"{pattern: {a: 1, c: 1}}}}}}}");
}
| 0
|
442,220
|
roundToNextMultiple(int n, int d)
{
return ((n + d - 1) / d) * d;
}
| 0
|
414,384
|
static void intrinsicLatencyMode(void) {
long long test_end, run_time, max_latency = 0, runs = 0;
run_time = config.intrinsic_latency_duration*1000000;
test_end = ustime() + run_time;
signal(SIGINT, intrinsicLatencyModeStop);
while(1) {
long long start, end, latency;
start = ustime();
compute_something_fast();
end = ustime();
latency = end-start;
runs++;
if (latency <= 0) continue;
/* Reporting */
if (latency > max_latency) {
max_latency = latency;
printf("Max latency so far: %lld microseconds.\n", max_latency);
}
double avg_us = (double)run_time/runs;
double avg_ns = avg_us * 1e3;
if (force_cancel_loop || end > test_end) {
printf("\n%lld total runs "
"(avg latency: "
"%.4f microseconds / %.2f nanoseconds per run).\n",
runs, avg_us, avg_ns);
printf("Worst run took %.0fx longer than the average latency.\n",
max_latency / avg_us);
exit(0);
}
}
}
| 0
|
214,760
|
void BrowserRenderProcessHost::ReceivedBadMessage(uint16 msg_type) {
BadMessageTerminateProcess(msg_type, process_.handle());
}
| 0
|
240,755
|
BlockEntry::BlockEntry(Cluster* p, long idx) : m_pCluster(p), m_index(idx) {}
| 0
|
161,207
|
static int buffer_want_with_caps(const git_remote_head *head, transport_smart_caps *caps, git_buf *buf)
{
git_buf str = GIT_BUF_INIT;
char oid[GIT_OID_HEXSZ +1] = {0};
size_t len;
/* Prefer multi_ack_detailed */
if (caps->multi_ack_detailed)
git_buf_puts(&str, GIT_CAP_MULTI_ACK_DETAILED " ");
else if (caps->multi_ack)
git_buf_puts(&str, GIT_CAP_MULTI_ACK " ");
/* Prefer side-band-64k if the server supports both */
if (caps->side_band_64k)
git_buf_printf(&str, "%s ", GIT_CAP_SIDE_BAND_64K);
else if (caps->side_band)
git_buf_printf(&str, "%s ", GIT_CAP_SIDE_BAND);
if (caps->include_tag)
git_buf_puts(&str, GIT_CAP_INCLUDE_TAG " ");
if (caps->thin_pack)
git_buf_puts(&str, GIT_CAP_THIN_PACK " ");
if (caps->ofs_delta)
git_buf_puts(&str, GIT_CAP_OFS_DELTA " ");
if (git_buf_oom(&str))
return -1;
len = strlen("XXXXwant ") + GIT_OID_HEXSZ + 1 /* NUL */ +
git_buf_len(&str) + 1 /* LF */;
if (len > 0xffff) {
giterr_set(GITERR_NET,
"Tried to produce packet with invalid length %" PRIuZ, len);
return -1;
}
git_buf_grow_by(buf, len);
git_oid_fmt(oid, &head->oid);
git_buf_printf(buf,
"%04xwant %s %s\n", (unsigned int)len, oid, git_buf_cstr(&str));
git_buf_free(&str);
GITERR_CHECK_ALLOC_BUF(buf);
return 0;
}
| 0
|
81,193
|
static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
{
struct tg3_napi *tnapi = dev_id;
struct tg3 *tp = tnapi->tp;
struct tg3_hw_status *sblk = tnapi->hw_status;
unsigned int handled = 1;
/* In INTx mode, it is possible for the interrupt to arrive at
* the CPU before the status block posted prior to the interrupt.
* Reading the PCI State register will confirm whether the
* interrupt is ours and will flush the status block.
*/
if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
if (tg3_flag(tp, CHIP_RESETTING) ||
(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
handled = 0;
goto out;
}
}
/*
* writing any value to intr-mbox-0 clears PCI INTA# and
* chip-internal interrupt pending events.
* writing non-zero to intr-mbox-0 additional tells the
* NIC to stop sending us irqs, engaging "in-intr-handler"
* event coalescing.
*
* Flush the mailbox to de-assert the IRQ immediately to prevent
* spurious interrupts. The flush impacts performance but
* excessive spurious interrupts can be worse in some cases.
*/
tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
/*
* In a shared interrupt configuration, sometimes other devices'
* interrupts will scream. We record the current status tag here
* so that the above check can report that the screaming interrupts
* are unhandled. Eventually they will be silenced.
*/
tnapi->last_irq_tag = sblk->status_tag;
if (tg3_irq_sync(tp))
goto out;
prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
napi_schedule(&tnapi->napi);
out:
return IRQ_RETVAL(handled);
}
| 0
|
162,315
|
static int cdeque_push_back(struct cdeque* d, void* item) {
if(d == NULL)
return CDE_PARAM;
if(d->size == d->cap_mask + 1)
return CDE_OUT_OF_BOUNDS;
d->arr[d->end_pos] = (size_t) item;
d->end_pos = (d->end_pos + 1) & d->cap_mask;
d->size++;
return CDE_OK;
}
| 0
|
293,269
|
mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
struct ieee_types_wmm_parameter *wmm_ie)
{
u16 cw_min, avg_back_off, tmp[4];
u32 i, j, num_ac;
u8 ac_idx;
if (!wmm_ie || !priv->wmm_enabled) {
/* WMM is not enabled, just set the defaults and return */
mwifiex_wmm_default_queue_priorities(priv);
return;
}
mwifiex_dbg(priv->adapter, INFO,
"info: WMM Parameter IE: version=%d,\t"
"qos_info Parameter Set Count=%d, Reserved=%#x\n",
wmm_ie->version, wmm_ie->qos_info_bitmap &
IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
wmm_ie->reserved);
for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
u8 aci_aifsn = wmm_ie->ac_params[num_ac].aci_aifsn_bitmap;
cw_min = (1 << (ecw & MWIFIEX_ECW_MIN)) - 1;
avg_back_off = (cw_min >> 1) + (aci_aifsn & MWIFIEX_AIFSN);
ac_idx = wmm_aci_to_qidx_map[(aci_aifsn & MWIFIEX_ACI) >> 5];
priv->wmm.queue_priority[ac_idx] = ac_idx;
tmp[ac_idx] = avg_back_off;
mwifiex_dbg(priv->adapter, INFO,
"info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
(1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
cw_min, avg_back_off);
mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
}
/* Bubble sort */
for (i = 0; i < num_ac; i++) {
for (j = 1; j < num_ac - i; j++) {
if (tmp[j - 1] > tmp[j]) {
swap(tmp[j - 1], tmp[j]);
swap(priv->wmm.queue_priority[j - 1],
priv->wmm.queue_priority[j]);
} else if (tmp[j - 1] == tmp[j]) {
if (priv->wmm.queue_priority[j - 1]
< priv->wmm.queue_priority[j])
swap(priv->wmm.queue_priority[j - 1],
priv->wmm.queue_priority[j]);
}
}
}
mwifiex_wmm_queue_priorities_tid(priv);
}
| 0
|
80,705
|
std::string get_binary_file_location(const std::string& type, const std::string& filename)
{
DBG_FS << "Looking for '" << filename << "'." << std::endl;
if (filename.empty()) {
LOG_FS << " invalid filename (type: " << type <<")" << std::endl;
return std::string();
}
// Some parts of Wesnoth enjoy putting ".." inside filenames. This is
// bad and should be fixed. But in the meantime, deal with them in a dumb way.
std::string::size_type pos = filename.rfind("../");
if (pos != std::string::npos) {
std::string nf = filename.substr(pos + 3);
LOG_FS << "Illegal path '" << filename << "' replaced by '" << nf << "'" << std::endl;
return get_binary_file_location(type, nf);
}
if (filename.find("..") != std::string::npos) {
ERR_FS << "Illegal path '" << filename << "' (\"..\" not allowed)." << std::endl;
return std::string();
}
BOOST_FOREACH(const std::string &path, get_binary_paths(type))
{
const std::string file = path + filename;
DBG_FS << " checking '" << path << "'" << std::endl;
if(file_exists(file)) {
DBG_FS << " found at '" << file << "'" << std::endl;
return file;
}
}
DBG_FS << " not found" << std::endl;
return std::string();
}
| 0
|
86,340
|
int BlackPreservingGrayOnlySampler(register const cmsUInt16Number In[], register cmsUInt16Number Out[], register void* Cargo)
{
GrayOnlyParams* bp = (GrayOnlyParams*) Cargo;
// If going across black only, keep black only
if (In[0] == 0 && In[1] == 0 && In[2] == 0) {
// TAC does not apply because it is black ink!
Out[0] = Out[1] = Out[2] = 0;
Out[3] = cmsEvalToneCurve16(bp->KTone, In[3]);
return TRUE;
}
// Keep normal transform for other colors
bp ->cmyk2cmyk ->Eval16Fn(In, Out, bp ->cmyk2cmyk->Data);
return TRUE;
}
| 0
|
103,964
|
mii_read (struct net_device *dev, int phy_addr, int reg_num)
{
u32 cmd;
int i;
u32 retval = 0;
/* Preamble */
mii_send_bits (dev, 0xffffffff, 32);
/* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
/* ST,OP = 0110'b for read operation */
cmd = (0x06 << 10 | phy_addr << 5 | reg_num);
mii_send_bits (dev, cmd, 14);
/* Turnaround */
if (mii_getbit (dev))
goto err_out;
/* Read data */
for (i = 0; i < 16; i++) {
retval |= mii_getbit (dev);
retval <<= 1;
}
/* End cycle */
mii_getbit (dev);
return (retval >> 1) & 0xffff;
err_out:
return 0;
}
| 0
|
289,458
|
void qdev_init_nofail ( DeviceState * dev ) {
DeviceInfo * info = dev -> info ;
if ( qdev_init ( dev ) < 0 ) hw_error ( "Initialization of device %s failed\n" , info -> name ) ;
}
| 0
|
290,512
|
static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct dm_pr *pr = data;
const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
if (!ops || !ops->pr_register)
return -EOPNOTSUPP;
return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
}
| 0
|
66,125
|
int32_t ByteArray::Size() { return storage_length_; }
| 0
|
284,791
|
bool XSSAuditor::FilterCharacterToken(const FilterTokenRequest& request) {
DCHECK(script_tag_nesting_level_);
DCHECK_NE(state_, kUninitialized);
if (state_ == kPermittingAdjacentCharacterTokens)
return false;
if (state_ == kFilteringTokens && script_tag_found_in_request_) {
String snippet = CanonicalizedSnippetForJavaScript(request);
if (IsContainedInRequest(snippet))
state_ = kSuppressingAdjacentCharacterTokens;
else if (!snippet.IsEmpty())
state_ = kPermittingAdjacentCharacterTokens;
}
if (state_ == kSuppressingAdjacentCharacterTokens) {
request.token.EraseCharacters();
request.token.AppendToCharacter(' ');
return true;
}
return false;
}
| 0
|
85,950
|
static bool parse_reconnect(struct pool *pool, json_t *val)
{
char *sockaddr_url, *stratum_port, *tmp;
char *url, *port, address[256];
memset(address, 0, 255);
url = (char *)json_string_value(json_array_get(val, 0));
if (!url)
url = pool->sockaddr_url;
else {
char *dot_pool, *dot_reconnect;
dot_pool = strchr(pool->sockaddr_url, '.');
if (!dot_pool) {
applog(LOG_ERR, "Denied stratum reconnect request for pool without domain '%s'",
pool->sockaddr_url);
return false;
}
dot_reconnect = strchr(url, '.');
if (!dot_reconnect) {
applog(LOG_ERR, "Denied stratum reconnect request to url without domain '%s'",
url);
return false;
}
if (strcmp(dot_pool, dot_reconnect)) {
applog(LOG_ERR, "Denied stratum reconnect request to non-matching domain url '%s'",
pool->sockaddr_url);
return false;
}
}
port = (char *)json_string_value(json_array_get(val, 1));
if (!port)
port = pool->stratum_port;
snprintf(address, 254, "%s:%s", url, port);
if (!extract_sockaddr(address, &sockaddr_url, &stratum_port))
return false;
applog(LOG_WARNING, "Stratum reconnect requested from pool %d to %s", pool->pool_no, address);
clear_pool_work(pool);
mutex_lock(&pool->stratum_lock);
__suspend_stratum(pool);
tmp = pool->sockaddr_url;
pool->sockaddr_url = sockaddr_url;
pool->stratum_url = pool->sockaddr_url;
free(tmp);
tmp = pool->stratum_port;
pool->stratum_port = stratum_port;
free(tmp);
mutex_unlock(&pool->stratum_lock);
if (!restart_stratum(pool)) {
pool_failed(pool);
return false;
}
return true;
}
| 0
|
258,933
|
PHP_FUNCTION(imagegrabwindow)
{
HWND window;
long client_area = 0;
RECT rc = {0};
RECT rc_win = {0};
int Width, Height;
HDC hdc;
HDC memDC;
HBITMAP memBM;
HBITMAP hOld;
HINSTANCE handle;
long lwindow_handle;
typedef BOOL (WINAPI *tPrintWindow)(HWND, HDC,UINT);
tPrintWindow pPrintWindow = 0;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l|l", &lwindow_handle, &client_area) == FAILURE) {
RETURN_FALSE;
}
window = (HWND) lwindow_handle;
if (!IsWindow(window)) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Invalid window handle");
RETURN_FALSE;
}
hdc = GetDC(0);
if (client_area) {
GetClientRect(window, &rc);
Width = rc.right;
Height = rc.bottom;
} else {
GetWindowRect(window, &rc);
Width = rc.right - rc.left;
Height = rc.bottom - rc.top;
}
Width = (Width/4)*4;
memDC = CreateCompatibleDC(hdc);
memBM = CreateCompatibleBitmap(hdc, Width, Height);
hOld = (HBITMAP) SelectObject (memDC, memBM);
handle = LoadLibrary("User32.dll");
if ( handle == 0 ) {
goto clean;
}
pPrintWindow = (tPrintWindow) GetProcAddress(handle, "PrintWindow");
if ( pPrintWindow ) {
pPrintWindow(window, memDC, (UINT) client_area);
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Windows API too old");
goto clean;
}
FreeLibrary(handle);
im = gdImageCreateTrueColor(Width, Height);
if (im) {
int x,y;
for (y=0; y <= Height; y++) {
for (x=0; x <= Width; x++) {
int c = GetPixel(memDC, x,y);
gdImageSetPixel(im, x, y, gdTrueColor(GetRValue(c), GetGValue(c), GetBValue(c)));
}
}
}
clean:
SelectObject(memDC,hOld);
DeleteObject(memBM);
DeleteDC(memDC);
ReleaseDC( 0, hdc );
if (!im) {
RETURN_FALSE;
} else {
ZEND_REGISTER_RESOURCE(return_value, im, le_gd);
}
}
| 0
|
391,453
|
static int snd_emu0204_ch_switch_update(struct usb_mixer_interface *mixer,
int value)
{
struct snd_usb_audio *chip = mixer->chip;
int err;
unsigned char buf[2];
err = snd_usb_lock_shutdown(chip);
if (err < 0)
return err;
buf[0] = 0x01;
buf[1] = value ? 0x02 : 0x01;
err = snd_usb_ctl_msg(chip->dev,
usb_sndctrlpipe(chip->dev, 0), UAC_SET_CUR,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
0x0400, 0x0e00, buf, 2);
snd_usb_unlock_shutdown(chip);
return err;
}
| 0
|
469,495
|
static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
int in_len, int out_len)
{
struct rdma_ucm_disconnect cmd;
struct ucma_context *ctx;
int ret;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
ret = rdma_disconnect(ctx->cm_id);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
| 0
|
235,439
|
static void single_inst_client_free(SingleInstClient* client)
{
g_io_channel_shutdown(client->channel, FALSE, NULL);
g_io_channel_unref(client->channel);
g_source_remove(client->watch);
g_free(client->cwd);
g_ptr_array_foreach(client->argv, (GFunc)g_free, NULL);
g_ptr_array_free(client->argv, TRUE);
g_slice_free(SingleInstClient, client);
/* g_debug("free client"); */
}
| 0
|
495,005
|
static int snd_pcm_playback_open(struct inode *inode, struct file *file)
{
struct snd_pcm *pcm;
int err = nonseekable_open(inode, file);
if (err < 0)
return err;
pcm = snd_lookup_minor_data(iminor(inode),
SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
if (pcm)
snd_card_unref(pcm->card);
return err;
}
| 0
|
4,468
|
gss_init_sec_context (minor_status,
claimant_cred_handle,
context_handle,
target_name,
req_mech_type,
req_flags,
time_req,
input_chan_bindings,
input_token,
actual_mech_type,
output_token,
ret_flags,
time_rec)
OM_uint32 * minor_status;
gss_cred_id_t claimant_cred_handle;
gss_ctx_id_t * context_handle;
gss_name_t target_name;
gss_OID req_mech_type;
OM_uint32 req_flags;
OM_uint32 time_req;
gss_channel_bindings_t input_chan_bindings;
gss_buffer_t input_token;
gss_OID * actual_mech_type;
gss_buffer_t output_token;
OM_uint32 * ret_flags;
OM_uint32 * time_rec;
{
OM_uint32 status, temp_minor_status;
gss_union_name_t union_name;
gss_union_cred_t union_cred;
gss_name_t internal_name;
gss_union_ctx_id_t union_ctx_id;
gss_OID selected_mech;
gss_mechanism mech;
gss_cred_id_t input_cred_handle;
status = val_init_sec_ctx_args(minor_status,
claimant_cred_handle,
context_handle,
target_name,
req_mech_type,
req_flags,
time_req,
input_chan_bindings,
input_token,
actual_mech_type,
output_token,
ret_flags,
time_rec);
if (status != GSS_S_COMPLETE)
return (status);
status = gssint_select_mech_type(minor_status, req_mech_type,
&selected_mech);
if (status != GSS_S_COMPLETE)
return (status);
union_name = (gss_union_name_t)target_name;
/*
* obtain the gss mechanism information for the requested
* mechanism. If mech_type is NULL, set it to the resultant
* mechanism
*/
mech = gssint_get_mechanism(selected_mech);
if (mech == NULL)
return (GSS_S_BAD_MECH);
if (mech->gss_init_sec_context == NULL)
return (GSS_S_UNAVAILABLE);
/*
* If target_name is mechanism_specific, then it must match the
* mech_type that we're about to use. Otherwise, do an import on
* the external_name form of the target name.
*/
if (union_name->mech_type &&
g_OID_equal(union_name->mech_type, selected_mech)) {
internal_name = union_name->mech_name;
} else {
if ((status = gssint_import_internal_name(minor_status, selected_mech,
union_name,
&internal_name)) != GSS_S_COMPLETE)
return (status);
}
/*
* if context_handle is GSS_C_NO_CONTEXT, allocate a union context
* descriptor to hold the mech type information as well as the
* underlying mechanism context handle. Otherwise, cast the
* value of *context_handle to the union context variable.
*/
if(*context_handle == GSS_C_NO_CONTEXT) {
status = GSS_S_FAILURE;
union_ctx_id = (gss_union_ctx_id_t)
malloc(sizeof(gss_union_ctx_id_desc));
if (union_ctx_id == NULL)
goto end;
if (generic_gss_copy_oid(&temp_minor_status, selected_mech,
&union_ctx_id->mech_type) != GSS_S_COMPLETE) {
free(union_ctx_id);
goto end;
}
/* copy the supplied context handle */
union_ctx_id->internal_ctx_id = GSS_C_NO_CONTEXT;
} else
union_ctx_id = (gss_union_ctx_id_t)*context_handle;
/*
* get the appropriate cred handle from the union cred struct.
* defaults to GSS_C_NO_CREDENTIAL if there is no cred, which will
* use the default credential.
*/
union_cred = (gss_union_cred_t) claimant_cred_handle;
input_cred_handle = gssint_get_mechanism_cred(union_cred, selected_mech);
/*
* now call the approprate underlying mechanism routine
*/
status = mech->gss_init_sec_context(
minor_status,
input_cred_handle,
&union_ctx_id->internal_ctx_id,
internal_name,
gssint_get_public_oid(selected_mech),
req_flags,
time_req,
input_chan_bindings,
input_token,
actual_mech_type,
output_token,
ret_flags,
time_rec);
if (status != GSS_S_COMPLETE && status != GSS_S_CONTINUE_NEEDED) {
/*
* The spec says the preferred method is to delete all context info on
* the first call to init, and on all subsequent calls make the caller
* responsible for calling gss_delete_sec_context. However, if the
* mechanism decided to delete the internal context, we should also
* delete the union context.
*/
map_error(minor_status, mech);
if (union_ctx_id->internal_ctx_id == GSS_C_NO_CONTEXT)
*context_handle = GSS_C_NO_CONTEXT;
if (*context_handle == GSS_C_NO_CONTEXT) {
free(union_ctx_id->mech_type->elements);
free(union_ctx_id->mech_type);
free(union_ctx_id);
}
} else if (*context_handle == GSS_C_NO_CONTEXT) {
union_ctx_id->loopback = union_ctx_id;
*context_handle = (gss_ctx_id_t)union_ctx_id;
}
end:
if (union_name->mech_name == NULL ||
union_name->mech_name != internal_name) {
(void) gssint_release_internal_name(&temp_minor_status,
selected_mech, &internal_name);
}
return(status);
}
| 1
|
48,333
|
int pgpPrtParams(const uint8_t * pkts, size_t pktlen, unsigned int pkttype,
pgpDigParams * ret)
{
const uint8_t *p = pkts;
const uint8_t *pend = pkts + pktlen;
pgpDigParams digp = NULL;
pgpDigParams selfsig = NULL;
int i = 0;
int alloced = 16; /* plenty for normal cases */
struct pgpPkt *all = xmalloc(alloced * sizeof(*all));
int rc = -1; /* assume failure */
int expect = 0;
int prevtag = 0;
while (p < pend) {
struct pgpPkt *pkt = &all[i];
if (decodePkt(p, (pend - p), pkt))
break;
if (digp == NULL) {
if (pkttype && pkt->tag != pkttype) {
break;
} else {
digp = pgpDigParamsNew(pkt->tag);
}
}
if (expect) {
if (pkt->tag != expect)
break;
selfsig = pgpDigParamsNew(pkt->tag);
}
if (pgpPrtPkt(pkt, selfsig ? selfsig : digp))
break;
if (selfsig) {
/* subkeys must be followed by binding signature */
if (prevtag == PGPTAG_PUBLIC_SUBKEY) {
if (selfsig->sigtype != PGPSIGTYPE_SUBKEY_BINDING)
break;
}
int xx = pgpVerifySelf(digp, selfsig, all, i);
selfsig = pgpDigParamsFree(selfsig);
if (xx)
break;
expect = 0;
}
if (pkt->tag == PGPTAG_PUBLIC_SUBKEY)
expect = PGPTAG_SIGNATURE;
prevtag = pkt->tag;
i++;
p += (pkt->body - pkt->head) + pkt->blen;
if (pkttype == PGPTAG_SIGNATURE)
break;
if (alloced <= i) {
alloced *= 2;
all = xrealloc(all, alloced * sizeof(*all));
}
}
rc = (digp && (p == pend) && expect == 0) ? 0 : -1;
free(all);
if (ret && rc == 0) {
*ret = digp;
} else {
pgpDigParamsFree(digp);
}
return rc;
}
| 0
|
169,453
|
void Pack<WebGLImageConversion::kDataFormatRGBA4444,
WebGLImageConversion::kAlphaDoNothing,
uint8_t,
uint16_t>(const uint8_t* source,
uint16_t* destination,
unsigned pixels_per_row) {
#if WTF_CPU_ARM_NEON
SIMD::PackOneRowOfRGBA8ToUnsignedShort4444(source, destination,
pixels_per_row);
#endif
#if HAVE_MIPS_MSA_INTRINSICS
SIMD::packOneRowOfRGBA8ToUnsignedShort4444MSA(source, destination,
pixels_per_row);
#endif
for (unsigned i = 0; i < pixels_per_row; ++i) {
*destination = (((source[0] & 0xF0) << 8) | ((source[1] & 0xF0) << 4) |
(source[2] & 0xF0) | (source[3] >> 4));
source += 4;
destination += 1;
}
}
| 0
|
281,452
|
SPL_METHOD(SplObjectStorage, key)
{
spl_SplObjectStorage *intern = (spl_SplObjectStorage*)zend_object_store_get_object(getThis() TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_LONG(intern->index);
} /* }}} */
/* {{{ proto mixed SplObjectStorage::current()
| 0
|
419,195
|
control_notify_pane_mode_changed(int pane)
{
struct client *c;
TAILQ_FOREACH(c, &clients, entry) {
if (!CONTROL_SHOULD_NOTIFY_CLIENT(c))
continue;
control_write(c, "%%pane-mode-changed %%%u", pane);
}
}
| 0
|
129,228
|
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
switch (cmd) {
case SIOCGHWTSTAMP:
return i40e_ptp_get_ts_config(pf, ifr);
case SIOCSHWTSTAMP:
return i40e_ptp_set_ts_config(pf, ifr);
default:
return -EOPNOTSUPP;
}
}
| 0
|
152,900
|
void esp32EthDisableIrq(NetInterface *interface)
{
//Valid Ethernet PHY or switch driver?
if(interface->phyDriver != NULL)
{
//Disable Ethernet PHY interrupts
interface->phyDriver->disableIrq(interface);
}
else if(interface->switchDriver != NULL)
{
//Disable Ethernet switch interrupts
interface->switchDriver->disableIrq(interface);
}
else
{
//Just for sanity
}
}
| 0
|
345,673
|
SPL_METHOD(SplDoublyLinkedList, current)
{
spl_dllist_object *intern = (spl_dllist_object*)zend_object_store_get_object(getThis() TSRMLS_CC);
spl_ptr_llist_element *element = intern->traverse_pointer;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (element == NULL || element->data == NULL) {
RETURN_NULL();
} else {
zval *data = (zval *)element->data;
RETURN_ZVAL(data, 1, 0);
}
}
| 1
|
50,380
|
Jsi_vtype Jsi_ValueTypeGet(Jsi_Value *pv) { return pv->vt; }
| 0
|
283,365
|
static uint8_t avrc_proc_far_msg(uint8_t handle, uint8_t label, uint8_t cr,
BT_HDR** pp_pkt, tAVRC_MSG_VENDOR* p_msg) {
BT_HDR* p_pkt = *pp_pkt;
uint8_t* p_data;
uint8_t drop_code = 0;
bool buf_overflow = false;
BT_HDR* p_rsp = NULL;
BT_HDR* p_cmd = NULL;
bool req_continue = false;
BT_HDR* p_pkt_new = NULL;
uint8_t pkt_type;
tAVRC_RASM_CB* p_rcb;
tAVRC_NEXT_CMD avrc_cmd;
tAVRC_STS status;
p_data = (uint8_t*)(p_pkt + 1) + p_pkt->offset;
/* Skip over vendor header (ctype, subunit*, opcode, CO_ID) */
p_data += AVRC_VENDOR_HDR_SIZE;
pkt_type = *(p_data + 1) & AVRC_PKT_TYPE_MASK;
AVRC_TRACE_DEBUG("pkt_type %d", pkt_type);
p_rcb = &avrc_cb.rcb[handle];
/* check if the message needs to be re-assembled */
if (pkt_type == AVRC_PKT_SINGLE || pkt_type == AVRC_PKT_START) {
/* previous fragments need to be dropped, when received another new message
*/
p_rcb->rasm_offset = 0;
osi_free_and_reset((void**)&p_rcb->p_rmsg);
}
if (pkt_type != AVRC_PKT_SINGLE && cr == AVCT_RSP) {
/* not a single response packet - need to re-assemble metadata messages */
if (pkt_type == AVRC_PKT_START) {
/* Allocate buffer for re-assembly */
p_rcb->rasm_pdu = *p_data;
p_rcb->p_rmsg = (BT_HDR*)osi_malloc(BT_DEFAULT_BUFFER_SIZE);
/* Copy START packet to buffer for re-assembling fragments */
memcpy(p_rcb->p_rmsg, p_pkt, sizeof(BT_HDR)); /* Copy bt hdr */
/* Copy metadata message */
memcpy((uint8_t*)(p_rcb->p_rmsg + 1),
(uint8_t*)(p_pkt + 1) + p_pkt->offset, p_pkt->len);
/* offset of start of metadata response in reassembly buffer */
p_rcb->p_rmsg->offset = p_rcb->rasm_offset = 0;
/*
* Free original START packet, replace with pointer to
* reassembly buffer.
*/
osi_free(p_pkt);
*pp_pkt = p_rcb->p_rmsg;
/*
* Set offset to point to where to copy next - use the same
* reassembly logic as AVCT.
*/
p_rcb->p_rmsg->offset += p_rcb->p_rmsg->len;
req_continue = true;
} else if (p_rcb->p_rmsg == NULL) {
/* Received a CONTINUE/END, but no corresponding START
(or previous fragmented response was dropped) */
AVRC_TRACE_DEBUG(
"Received a CONTINUE/END without no corresponding START \
(or previous fragmented response was dropped)");
drop_code = 5;
osi_free(p_pkt);
*pp_pkt = NULL;
} else {
/* get size of buffer holding assembled message */
/*
* NOTE: The buffer is allocated above at the beginning of the
* reassembly, and is always of size BT_DEFAULT_BUFFER_SIZE.
*/
uint16_t buf_len = BT_DEFAULT_BUFFER_SIZE - sizeof(BT_HDR);
/* adjust offset and len of fragment for header byte */
p_pkt->offset += (AVRC_VENDOR_HDR_SIZE + AVRC_MIN_META_HDR_SIZE);
p_pkt->len -= (AVRC_VENDOR_HDR_SIZE + AVRC_MIN_META_HDR_SIZE);
/* verify length */
if ((p_rcb->p_rmsg->offset + p_pkt->len) > buf_len) {
AVRC_TRACE_WARNING(
"Fragmented message too big! - report the partial message");
p_pkt->len = buf_len - p_rcb->p_rmsg->offset;
pkt_type = AVRC_PKT_END;
buf_overflow = true;
}
/* copy contents of p_pkt to p_rx_msg */
memcpy((uint8_t*)(p_rcb->p_rmsg + 1) + p_rcb->p_rmsg->offset,
(uint8_t*)(p_pkt + 1) + p_pkt->offset, p_pkt->len);
if (pkt_type == AVRC_PKT_END) {
p_rcb->p_rmsg->offset = p_rcb->rasm_offset;
p_rcb->p_rmsg->len += p_pkt->len;
p_pkt_new = p_rcb->p_rmsg;
p_rcb->rasm_offset = 0;
p_rcb->p_rmsg = NULL;
p_msg->p_vendor_data = (uint8_t*)(p_pkt_new + 1) + p_pkt_new->offset;
p_msg->hdr.ctype = p_msg->p_vendor_data[0] & AVRC_CTYPE_MASK;
/* 6 = ctype, subunit*, opcode & CO_ID */
p_msg->p_vendor_data += AVRC_VENDOR_HDR_SIZE;
p_msg->vendor_len = p_pkt_new->len - AVRC_VENDOR_HDR_SIZE;
p_data = p_msg->p_vendor_data + 1; /* skip pdu */
*p_data++ = AVRC_PKT_SINGLE;
UINT16_TO_BE_STREAM(p_data,
(p_msg->vendor_len - AVRC_MIN_META_HDR_SIZE));
AVRC_TRACE_DEBUG("end frag:%d, total len:%d, offset:%d", p_pkt->len,
p_pkt_new->len, p_pkt_new->offset);
} else {
p_rcb->p_rmsg->offset += p_pkt->len;
p_rcb->p_rmsg->len += p_pkt->len;
p_pkt_new = NULL;
req_continue = true;
}
osi_free(p_pkt);
*pp_pkt = p_pkt_new;
}
}
if (cr == AVCT_CMD) {
p_rsp = avrc_proc_vendor_command(handle, label, *pp_pkt, p_msg);
if (p_rsp) {
AVCT_MsgReq(handle, label, AVCT_RSP, p_rsp);
osi_free_and_reset((void**)pp_pkt);
drop_code = 3;
} else if (p_msg->hdr.opcode == AVRC_OP_DROP) {
drop_code = 1;
} else if (p_msg->hdr.opcode == AVRC_OP_DROP_N_FREE)
drop_code = 4;
} else if (cr == AVCT_RSP) {
if (req_continue) {
avrc_cmd.pdu = AVRC_PDU_REQUEST_CONTINUATION_RSP;
drop_code = 2;
} else if (buf_overflow) {
/* Incoming message too big to fit in BT_DEFAULT_BUFFER_SIZE. Send abort
* to peer */
avrc_cmd.pdu = AVRC_PDU_ABORT_CONTINUATION_RSP;
drop_code = 4;
} else {
return drop_code;
}
avrc_cmd.status = AVRC_STS_NO_ERROR;
avrc_cmd.target_pdu = p_rcb->rasm_pdu;
tAVRC_COMMAND avrc_command;
avrc_command.continu = avrc_cmd;
status = AVRC_BldCommand(&avrc_command, &p_cmd);
if (status == AVRC_STS_NO_ERROR) {
AVRC_MsgReq(handle, (uint8_t)(label), AVRC_CMD_CTRL, p_cmd);
}
}
return drop_code;
}
| 0
|
436,555
|
static void holtekff_send(struct holtekff_device *holtekff,
struct hid_device *hid,
const u8 data[HOLTEKFF_MSG_LENGTH])
{
int i;
for (i = 0; i < HOLTEKFF_MSG_LENGTH; i++) {
holtekff->field->value[i] = data[i];
}
dbg_hid("sending %7ph\n", data);
hid_hw_request(hid, holtekff->field->report, HID_REQ_SET_REPORT);
}
| 0
|
352,873
|
static void
_int_free (mstate av, mchunkptr p, int have_lock)
{
INTERNAL_SIZE_T size; /* its size */
mfastbinptr *fb; /* associated fastbin */
mchunkptr nextchunk; /* next contiguous chunk */
INTERNAL_SIZE_T nextsize; /* its size */
int nextinuse; /* true if nextchunk is used */
INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
mchunkptr bck; /* misc temp for linking */
mchunkptr fwd; /* misc temp for linking */
size = chunksize (p);
/* Little security check which won't hurt performance: the
allocator never wrapps around at the end of the address space.
Therefore we can exclude some size values which might appear
here by accident or by "design" from some intruder. */
if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
|| __builtin_expect (misaligned_chunk (p), 0))
malloc_printerr ("free(): invalid pointer");
/* We know that each chunk is at least MINSIZE bytes in size or a
multiple of MALLOC_ALIGNMENT. */
if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
malloc_printerr ("free(): invalid size");
check_inuse_chunk(av, p);
#if USE_TCACHE
{
size_t tc_idx = csize2tidx (size);
if (tcache
&& tc_idx < mp_.tcache_bins
&& tcache->counts[tc_idx] < mp_.tcache_count)
{
tcache_put (p, tc_idx);
return;
}
}
#endif
/*
If eligible, place chunk on a fastbin so it can be found
and used quickly in malloc.
*/
if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
#if TRIM_FASTBINS
/*
If TRIM_FASTBINS set, don't place chunks
bordering top into fastbins
*/
&& (chunk_at_offset(p, size) != av->top)
#endif
) {
if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
<= 2 * SIZE_SZ, 0)
|| __builtin_expect (chunksize (chunk_at_offset (p, size))
>= av->system_mem, 0))
{
bool fail = true;
/* We might not have a lock at this point and concurrent modifications
of system_mem might result in a false positive. Redo the test after
getting the lock. */
if (!have_lock)
{
__libc_lock_lock (av->mutex);
fail = (chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
|| chunksize (chunk_at_offset (p, size)) >= av->system_mem);
__libc_lock_unlock (av->mutex);
}
if (fail)
malloc_printerr ("free(): invalid next size (fast)");
}
free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
atomic_store_relaxed (&av->have_fastchunks, true);
unsigned int idx = fastbin_index(size);
fb = &fastbin (av, idx);
/* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
mchunkptr old = *fb, old2;
if (SINGLE_THREAD_P)
{
/* Check that the top of the bin is not the record we are going to
add (i.e., double free). */
if (__builtin_expect (old == p, 0))
malloc_printerr ("double free or corruption (fasttop)");
p->fd = old;
*fb = p;
}
else
do
{
/* Check that the top of the bin is not the record we are going to
add (i.e., double free). */
if (__builtin_expect (old == p, 0))
malloc_printerr ("double free or corruption (fasttop)");
p->fd = old2 = old;
}
while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
!= old2);
/* Check that size of fastbin chunk at the top is the same as
size of the chunk that we are adding. We can dereference OLD
only if we have the lock, otherwise it might have already been
allocated again. */
if (have_lock && old != NULL
&& __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
malloc_printerr ("invalid fastbin entry (free)");
}
/*
Consolidate other non-mmapped chunks as they arrive.
*/
else if (!chunk_is_mmapped(p)) {
/* If we're single-threaded, don't lock the arena. */
if (SINGLE_THREAD_P)
have_lock = true;
if (!have_lock)
__libc_lock_lock (av->mutex);
nextchunk = chunk_at_offset(p, size);
/* Lightweight tests: check whether the block is already the
top block. */
if (__glibc_unlikely (p == av->top))
malloc_printerr ("double free or corruption (top)");
/* Or whether the next chunk is beyond the boundaries of the arena. */
if (__builtin_expect (contiguous (av)
&& (char *) nextchunk
>= ((char *) av->top + chunksize(av->top)), 0))
malloc_printerr ("double free or corruption (out)");
/* Or whether the block is actually not marked used. */
if (__glibc_unlikely (!prev_inuse(nextchunk)))
malloc_printerr ("double free or corruption (!prev)");
nextsize = chunksize(nextchunk);
if (__builtin_expect (chunksize_nomask (nextchunk) <= 2 * SIZE_SZ, 0)
|| __builtin_expect (nextsize >= av->system_mem, 0))
malloc_printerr ("free(): invalid next size (normal)");
free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
/* consolidate backward */
if (!prev_inuse(p)) {
prevsize = prev_size (p);
size += prevsize;
p = chunk_at_offset(p, -((long) prevsize));
unlink(av, p, bck, fwd);
}
if (nextchunk != av->top) {
/* get and clear inuse bit */
nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
/* consolidate forward */
if (!nextinuse) {
unlink(av, nextchunk, bck, fwd);
size += nextsize;
} else
clear_inuse_bit_at_offset(nextchunk, 0);
/*
Place the chunk in unsorted chunk list. Chunks are
not placed into regular bins until after they have
been given one chance to be used in malloc.
*/
bck = unsorted_chunks(av);
fwd = bck->fd;
if (__glibc_unlikely (fwd->bk != bck))
malloc_printerr ("free(): corrupted unsorted chunks");
p->fd = fwd;
p->bk = bck;
if (!in_smallbin_range(size))
{
p->fd_nextsize = NULL;
p->bk_nextsize = NULL;
}
bck->fd = p;
fwd->bk = p;
set_head(p, size | PREV_INUSE);
set_foot(p, size);
check_free_chunk(av, p);
}
/*
If the chunk borders the current high end of memory,
consolidate into top
*/
else {
size += nextsize;
set_head(p, size | PREV_INUSE);
av->top = p;
check_chunk(av, p);
}
/*
If freeing a large space, consolidate possibly-surrounding
chunks. Then, if the total unused topmost memory exceeds trim
threshold, ask malloc_trim to reduce top.
Unless max_fast is 0, we don't know if there are fastbins
bordering top, so we cannot tell for sure whether threshold
has been reached unless fastbins are consolidated. But we
don't want to consolidate on each free. As a compromise,
consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
is reached.
*/
if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
if (atomic_load_relaxed (&av->have_fastchunks))
malloc_consolidate(av);
if (av == &main_arena) {
#ifndef MORECORE_CANNOT_TRIM
if ((unsigned long)(chunksize(av->top)) >=
(unsigned long)(mp_.trim_threshold))
systrim(mp_.top_pad, av);
#endif
} else {
/* Always try heap_trim(), even if the top chunk is not
large, because the corresponding heap might go away. */
heap_info *heap = heap_for_ptr(top(av));
assert(heap->ar_ptr == av);
heap_trim(heap, mp_.top_pad);
}
}
if (!have_lock)
__libc_lock_unlock (av->mutex);
}
/*
If the chunk was allocated via mmap, release via munmap().
*/
else {
munmap_chunk (p);
}
| 1
|
216,123
|
format_CT_CLEAR(const struct ofpact_null *a OVS_UNUSED, struct ds *s)
{
ds_put_format(s, "%sct_clear%s", colors.value, colors.end);
}
| 0
|
84,323
|
int fib6_tables_dump(struct net *net, struct notifier_block *nb)
{
struct fib6_dump_arg arg;
struct fib6_walker *w;
unsigned int h;
w = kzalloc(sizeof(*w), GFP_ATOMIC);
if (!w)
return -ENOMEM;
w->func = fib6_node_dump;
arg.net = net;
arg.nb = nb;
w->args = &arg;
for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv6.fib_table_hash[h];
struct fib6_table *tb;
hlist_for_each_entry_rcu(tb, head, tb6_hlist)
fib6_table_dump(net, tb, w);
}
kfree(w);
return 0;
}
| 0
|
185,693
|
bool ChromeContentBrowserClient::LogWebUIUrl(const GURL& web_ui_url) const {
return webui::LogWebUIUrl(web_ui_url);
}
| 0
|
100,067
|
static void create_initterm_syms(RKext *kext, RList *ret, int type, ut64 *pointers) {
int i = 0;
int count = 0;
for (; pointers[i]; i++) {
ut64 func_vaddr = pointers[i];
ut64 text_start = kext->vaddr;
ut64 text_end = text_start + kext->text_range.size;
if (text_start == text_end) {
continue;
}
if (text_start > func_vaddr || func_vaddr >= text_end) {
continue;
}
RBinSymbol *sym = R_NEW0 (RBinSymbol);
if (!sym) {
break;
}
sym->name = r_str_newf ("%s.%s.%d", kext_short_name (kext), (type == R_BIN_ENTRY_TYPE_INIT) ? "init" : "fini", count++);
sym->vaddr = func_vaddr;
sym->paddr = func_vaddr - kext->pa2va_exec;
sym->size = 0;
sym->forwarder = "NONE";
sym->bind = "GLOBAL";
sym->type = "FUNC";
r_list_append (ret, sym);
}
}
| 0
|
56,615
|
int fb_get_color_depth(struct fb_var_screeninfo *var,
struct fb_fix_screeninfo *fix)
{
int depth = 0;
if (fix->visual == FB_VISUAL_MONO01 ||
fix->visual == FB_VISUAL_MONO10)
depth = 1;
else {
if (var->green.length == var->blue.length &&
var->green.length == var->red.length &&
var->green.offset == var->blue.offset &&
var->green.offset == var->red.offset)
depth = var->green.length;
else
depth = var->green.length + var->red.length +
var->blue.length;
}
return depth;
}
| 0
|
297,201
|
void CLASS wavelet_denoise()
{
float *fimg = 0, *temp, thold, mul[2], avg, diff;
int scale = 1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2];
ushort *window[4];
static const float noise[] = {0.8002, 0.2735, 0.1202, 0.0585, 0.0291, 0.0152, 0.0080, 0.0044};
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("Wavelet denoising...\n"));
#endif
while (maximum << scale < 0x10000)
scale++;
maximum <<= --scale;
black <<= scale;
FORC4 cblack[c] <<= scale;
if ((size = iheight * iwidth) < 0x15550000)
fimg = (float *)malloc((size * 3 + iheight + iwidth) * sizeof *fimg);
merror(fimg, "wavelet_denoise()");
temp = fimg + size * 3;
if ((nc = colors) == 3 && filters)
nc++;
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp parallel default(shared) private(i, col, row, thold, lev, lpass, hpass, temp, c) firstprivate(scale, size)
#endif
{
temp = (float *)malloc((iheight + iwidth) * sizeof *fimg);
FORC(nc)
{ /* denoise R,G1,B,G3 individually */
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i = 0; i < size; i++)
fimg[i] = 256 * sqrt((double)(image[i][c] << scale));
for (hpass = lev = 0; lev < 5; lev++)
{
lpass = size * ((lev & 1) + 1);
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (row = 0; row < iheight; row++)
{
hat_transform(temp, fimg + hpass + row * iwidth, 1, iwidth, 1 << lev);
for (col = 0; col < iwidth; col++)
fimg[lpass + row * iwidth + col] = temp[col] * 0.25;
}
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (col = 0; col < iwidth; col++)
{
hat_transform(temp, fimg + lpass + col, iwidth, iheight, 1 << lev);
for (row = 0; row < iheight; row++)
fimg[lpass + row * iwidth + col] = temp[row] * 0.25;
}
thold = threshold * noise[lev];
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i = 0; i < size; i++)
{
fimg[hpass + i] -= fimg[lpass + i];
if (fimg[hpass + i] < -thold)
fimg[hpass + i] += thold;
else if (fimg[hpass + i] > thold)
fimg[hpass + i] -= thold;
else
fimg[hpass + i] = 0;
if (hpass)
fimg[i] += fimg[hpass + i];
}
hpass = lpass;
}
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i = 0; i < size; i++)
image[i][c] = CLIP(SQR(fimg[i] + fimg[lpass + i]) / 0x10000);
}
free(temp);
} /* end omp parallel */
/* the following loops are hard to parallize, no idea yes,
* problem is wlast which is carrying dependency
* second part should be easyer, but did not yet get it right.
*/
if (filters && colors == 3)
{ /* pull G1 and G3 closer together */
for (row = 0; row < 2; row++)
{
mul[row] = 0.125 * pre_mul[FC(row + 1, 0) | 1] / pre_mul[FC(row, 0) | 1];
blk[row] = cblack[FC(row, 0) | 1];
}
for (i = 0; i < 4; i++)
window[i] = (ushort *)fimg + width * i;
for (wlast = -1, row = 1; row < height - 1; row++)
{
while (wlast < row + 1)
{
for (wlast++, i = 0; i < 4; i++)
window[(i + 3) & 3] = window[i];
for (col = FC(wlast, 1) & 1; col < width; col += 2)
window[2][col] = BAYER(wlast, col);
}
thold = threshold / 512;
for (col = (FC(row, 0) & 1) + 1; col < width - 1; col += 2)
{
avg = (window[0][col - 1] + window[0][col + 1] + window[2][col - 1] + window[2][col + 1] - blk[~row & 1] * 4) *
mul[row & 1] +
(window[1][col] + blk[row & 1]) * 0.5;
avg = avg < 0 ? 0 : sqrt(avg);
diff = sqrt((double)BAYER(row, col)) - avg;
if (diff < -thold)
diff += thold;
else if (diff > thold)
diff -= thold;
else
diff = 0;
BAYER(row, col) = CLIP(SQR(avg + diff) + 0.5);
}
}
}
free(fimg);
}
| 0
|
224,709
|
ProfileLaunchObserver() {
registrar_.Add(this, chrome::NOTIFICATION_PROFILE_DESTROYED,
content::NotificationService::AllSources());
BrowserList::AddObserver(this);
}
| 0
|
99,075
|
**/
static CImg<T> string(const char *const str, const bool is_last_zero=true, const bool is_shared=false) {
if (!str) return CImg<T>();
return CImg<T>(str,(unsigned int)std::strlen(str) + (is_last_zero?1:0),1,1,1,is_shared);
| 0
|
419,074
|
server_client_set_key_table(struct client *c, const char *name)
{
if (name == NULL)
name = server_client_get_key_table(c);
key_bindings_unref_table(c->keytable);
c->keytable = key_bindings_get_table(name, 1);
c->keytable->references++;
}
| 0
|
162,050
|
static int setup_post_proxy_fail(REQUEST *request)
{
DICT_VALUE *dval = NULL;
VALUE_PAIR *vp;
request->child_state = REQUEST_RUNNING;
if (request->packet->code == PW_AUTHENTICATION_REQUEST) {
dval = dict_valbyname(PW_POST_PROXY_TYPE, "Fail-Authentication");
} else if (request->packet->code == PW_ACCOUNTING_REQUEST) {
dval = dict_valbyname(PW_POST_PROXY_TYPE, "Fail-Accounting");
#ifdef WITH_COA
/*
* See no_response_to_coa_request
*/
} else if (((request->packet->code >> 8) & 0xff) == PW_COA_REQUEST) {
request->packet->code &= 0xff; /* restore it */
if (request->proxy->code == PW_COA_REQUEST) {
dval = dict_valbyname(PW_POST_PROXY_TYPE, "Fail-CoA");
} else if (request->proxy->code == PW_DISCONNECT_REQUEST) {
dval = dict_valbyname(PW_POST_PROXY_TYPE, "Fail-Disconnect");
} else {
return 0;
}
#endif
} else {
return 0;
}
if (!dval) dval = dict_valbyname(PW_POST_PROXY_TYPE, "Fail");
if (!dval) {
pairdelete(&request->config_items, PW_POST_PROXY_TYPE);
return 0;
}
vp = pairfind(request->config_items, PW_POST_PROXY_TYPE);
if (!vp) vp = radius_paircreate(request, &request->config_items,
PW_POST_PROXY_TYPE, PW_TYPE_INTEGER);
vp->vp_integer = dval->value;
rad_assert(request->proxy_reply == NULL);
return 1;
}
| 0
|
259,617
|
int modbus_set_error_recovery(modbus_t *ctx,
modbus_error_recovery_mode error_recovery)
{
if (ctx == NULL) {
errno = EINVAL;
return -1;
}
/* The type of modbus_error_recovery_mode is unsigned enum */
ctx->error_recovery = (uint8_t) error_recovery;
return 0;
}
| 0
|
317,374
|
bool V8TestObject::HasInstance(v8::Local<v8::Value> v8_value, v8::Isolate* isolate) {
return V8PerIsolateData::From(isolate)->HasInstance(V8TestObject::GetWrapperTypeInfo(), v8_value);
}
| 0
|
130,680
|
Json::Value SGXWalletServer::complaintResponseImpl(const string &_polyName, int _ind) {
spdlog::info("Entering {}", __FUNCTION__);
INIT_RESULT(result)
try {
if (!checkName(_polyName, "POLY")) {
throw SGXException(INVALID_POLY_NAME, "Invalid polynomial name");
}
string shareG2_name = "shareG2_" + _polyName + "_" + to_string(_ind) + ":";
shared_ptr <string> shareG2_ptr = readFromDb(shareG2_name);
string DHKey = decryptDHKey(_polyName, _ind);
result["share*G2"] = *shareG2_ptr;
result["dhKey"] = DHKey;
} HANDLE_SGX_EXCEPTION(result)
RETURN_SUCCESS(result);
}
| 0
|
161,963
|
struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
{
return NULL;
}
| 0
|
12,281
|
KioskNextHomeInterfaceBrokerImpl::KioskNextHomeInterfaceBrokerImpl(
content::BrowserContext* context)
: connector_(content::BrowserContext::GetConnectorFor(context)->Clone()),
app_controller_(std::make_unique<AppControllerImpl>(
Profile::FromBrowserContext(context))) {}
| 1
|
76,130
|
static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen,
bool *fragstolen)
{
int eaten;
struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue);
__skb_pull(skb, hdrlen);
eaten = (tail &&
tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0;
tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
if (!eaten) {
__skb_queue_tail(&sk->sk_receive_queue, skb);
skb_set_owner_r(skb, sk);
}
return eaten;
}
| 0
|
461,567
|
Header headerImport(void * blob, unsigned int bsize, headerImportFlags flags)
{
Header h = NULL;
struct hdrblob_s hblob;
char *buf = NULL;
void * b = blob;
if (flags & HEADERIMPORT_COPY) {
if (bsize == 0 && hdrblobInit(b, 0, 0, 0, &hblob, &buf) == RPMRC_OK)
bsize = hblob.pvlen;
if (bsize == 0)
goto exit;
b = memcpy(xmalloc(bsize), b, bsize);
}
/* Sanity checks on header intro. */
if (hdrblobInit(b, bsize, 0, 0, &hblob, &buf) == RPMRC_OK)
hdrblobImport(&hblob, (flags & HEADERIMPORT_FAST), &h, &buf);
exit:
if (h == NULL && b != blob)
free(b);
free(buf);
return h;
}
| 0
|
216,911
|
vmxnet3_read_next_rx_descr(VMXNET3State *s, int qidx, int ridx,
struct Vmxnet3_RxDesc *dbuf, uint32_t *didx)
{
PCIDevice *d = PCI_DEVICE(s);
Vmxnet3Ring *ring = &s->rxq_descr[qidx].rx_ring[ridx];
*didx = vmxnet3_ring_curr_cell_idx(ring);
vmxnet3_ring_read_curr_cell(d, ring, dbuf);
}
| 0
|
121,652
|
int propagate_mount_busy(struct mount *mnt, int refcnt)
{
struct mount *m, *child;
struct mount *parent = mnt->mnt_parent;
int ret = 0;
if (mnt == parent)
return do_refcount_check(mnt, refcnt);
/*
* quickly check if the current mount can be unmounted.
* If not, we don't have to go checking for all other
* mounts
*/
if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt))
return 1;
for (m = propagation_next(parent, parent); m;
m = propagation_next(m, parent)) {
child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
if (child && list_empty(&child->mnt_mounts) &&
(ret = do_refcount_check(child, 1)))
break;
}
return ret;
}
| 0
|
74,652
|
const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq)
{
#ifdef CONFIG_SMP
return rq ? &rq->avg_dl : NULL;
#else
return NULL;
#endif
}
| 0
|
226,725
|
static void TestInterfaceEmptyFrozenArrayAttributeAttributeGetter(const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::Local<v8::Object> holder = info.Holder();
TestObject* impl = V8TestObject::ToImpl(holder);
V8SetReturnValue(info, FreezeV8Object(ToV8(impl->testInterfaceEmptyFrozenArrayAttribute(), info.Holder(), info.GetIsolate()), info.GetIsolate()));
}
| 0
|
22,263
|
fz_colorspace * pdf_xobject_colorspace ( fz_context * ctx , pdf_xobject * xobj ) {
pdf_obj * group = pdf_dict_get ( ctx , xobj -> obj , PDF_NAME_Group ) ;
if ( group ) {
pdf_obj * cs = pdf_dict_get ( ctx , group , PDF_NAME_CS ) ;
if ( cs ) {
fz_colorspace * colorspace = NULL ;
fz_try ( ctx ) colorspace = pdf_load_colorspace ( ctx , cs ) ;
fz_catch ( ctx ) fz_warn ( ctx , "cannot load xobject colorspace" ) ;
return colorspace ;
}
}
return NULL ;
}
| 0
|
228,402
|
void V8Console::timelineEndCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
ConsoleHelper(info).reportDeprecatedCall("V8Console#timelineEnd", "'console.timelineEnd' is deprecated. Please use 'console.timeEnd' instead.");
timeEndFunction(info, true);
}
| 0
|
391,661
|
xmlDOMWrapReconcileNamespaces(xmlDOMWrapCtxtPtr ctxt ATTRIBUTE_UNUSED,
xmlNodePtr elem,
int options)
{
int depth = -1, adoptns = 0, parnsdone = 0;
xmlNsPtr ns, prevns;
xmlDocPtr doc;
xmlNodePtr cur, curElem = NULL;
xmlNsMapPtr nsMap = NULL;
xmlNsMapItemPtr /* topmi = NULL, */ mi;
/* @ancestorsOnly should be set by an option flag. */
int ancestorsOnly = 0;
int optRemoveRedundantNS =
((xmlDOMReconcileNSOptions) options & XML_DOM_RECONNS_REMOVEREDUND) ? 1 : 0;
xmlNsPtr *listRedund = NULL;
int sizeRedund = 0, nbRedund = 0, ret, i, j;
if ((elem == NULL) || (elem->doc == NULL) ||
(elem->type != XML_ELEMENT_NODE))
return (-1);
doc = elem->doc;
cur = elem;
do {
switch (cur->type) {
case XML_ELEMENT_NODE:
adoptns = 1;
curElem = cur;
depth++;
/*
* Namespace declarations.
*/
if (cur->nsDef != NULL) {
prevns = NULL;
ns = cur->nsDef;
while (ns != NULL) {
if (! parnsdone) {
if ((elem->parent) &&
((xmlNodePtr) elem->parent->doc != elem->parent)) {
/*
* Gather ancestor in-scope ns-decls.
*/
if (xmlDOMWrapNSNormGatherInScopeNs(&nsMap,
elem->parent) == -1)
goto internal_error;
}
parnsdone = 1;
}
/*
* Lookup the ns ancestor-axis for equal ns-decls in scope.
*/
if (optRemoveRedundantNS && XML_NSMAP_NOTEMPTY(nsMap)) {
XML_NSMAP_FOREACH(nsMap, mi) {
if ((mi->depth >= XML_TREE_NSMAP_PARENT) &&
(mi->shadowDepth == -1) &&
((ns->prefix == mi->newNs->prefix) ||
xmlStrEqual(ns->prefix, mi->newNs->prefix)) &&
((ns->href == mi->newNs->href) ||
xmlStrEqual(ns->href, mi->newNs->href)))
{
/*
* A redundant ns-decl was found.
* Add it to the list of redundant ns-decls.
*/
if (xmlDOMWrapNSNormAddNsMapItem2(&listRedund,
&sizeRedund, &nbRedund, ns, mi->newNs) == -1)
goto internal_error;
/*
* Remove the ns-decl from the element-node.
*/
if (prevns)
prevns->next = ns->next;
else
cur->nsDef = ns->next;
goto next_ns_decl;
}
}
}
/*
* Skip ns-references handling if the referenced
* ns-decl is declared on the same element.
*/
if ((cur->ns != NULL) && adoptns && (cur->ns == ns))
adoptns = 0;
/*
* Does it shadow any ns-decl?
*/
if (XML_NSMAP_NOTEMPTY(nsMap)) {
XML_NSMAP_FOREACH(nsMap, mi) {
if ((mi->depth >= XML_TREE_NSMAP_PARENT) &&
(mi->shadowDepth == -1) &&
((ns->prefix == mi->newNs->prefix) ||
xmlStrEqual(ns->prefix, mi->newNs->prefix))) {
mi->shadowDepth = depth;
}
}
}
/*
* Push mapping.
*/
if (xmlDOMWrapNsMapAddItem(&nsMap, -1, ns, ns,
depth) == NULL)
goto internal_error;
prevns = ns;
next_ns_decl:
ns = ns->next;
}
}
if (! adoptns)
goto ns_end;
/* No break on purpose. */
case XML_ATTRIBUTE_NODE:
/* No ns, no fun. */
if (cur->ns == NULL)
goto ns_end;
if (! parnsdone) {
if ((elem->parent) &&
((xmlNodePtr) elem->parent->doc != elem->parent)) {
if (xmlDOMWrapNSNormGatherInScopeNs(&nsMap,
elem->parent) == -1)
goto internal_error;
}
parnsdone = 1;
}
/*
* Adjust the reference if this was a redundant ns-decl.
*/
if (listRedund) {
for (i = 0, j = 0; i < nbRedund; i++, j += 2) {
if (cur->ns == listRedund[j]) {
cur->ns = listRedund[++j];
break;
}
}
}
/*
* Adopt ns-references.
*/
if (XML_NSMAP_NOTEMPTY(nsMap)) {
/*
* Search for a mapping.
*/
XML_NSMAP_FOREACH(nsMap, mi) {
if ((mi->shadowDepth == -1) &&
(cur->ns == mi->oldNs)) {
cur->ns = mi->newNs;
goto ns_end;
}
}
}
/*
* Aquire a normalized ns-decl and add it to the map.
*/
if (xmlDOMWrapNSNormAquireNormalizedNs(doc, curElem,
cur->ns, &ns,
&nsMap, depth,
ancestorsOnly,
(cur->type == XML_ATTRIBUTE_NODE) ? 1 : 0) == -1)
goto internal_error;
cur->ns = ns;
ns_end:
if ((cur->type == XML_ELEMENT_NODE) &&
(cur->properties != NULL)) {
/*
* Process attributes.
*/
cur = (xmlNodePtr) cur->properties;
continue;
}
break;
default:
goto next_sibling;
}
into_content:
if ((cur->type == XML_ELEMENT_NODE) &&
(cur->children != NULL)) {
/*
* Process content of element-nodes only.
*/
cur = cur->children;
continue;
}
next_sibling:
if (cur == elem)
break;
if (cur->type == XML_ELEMENT_NODE) {
if (XML_NSMAP_NOTEMPTY(nsMap)) {
/*
* Pop mappings.
*/
while ((nsMap->last != NULL) &&
(nsMap->last->depth >= depth))
{
XML_NSMAP_POP(nsMap, mi)
}
/*
* Unshadow.
*/
XML_NSMAP_FOREACH(nsMap, mi) {
if (mi->shadowDepth >= depth)
mi->shadowDepth = -1;
}
}
depth--;
}
if (cur->next != NULL)
cur = cur->next;
else {
if (cur->type == XML_ATTRIBUTE_NODE) {
cur = cur->parent;
goto into_content;
}
cur = cur->parent;
goto next_sibling;
}
} while (cur != NULL);
ret = 0;
goto exit;
internal_error:
ret = -1;
exit:
if (listRedund) {
for (i = 0, j = 0; i < nbRedund; i++, j += 2) {
xmlFreeNs(listRedund[j]);
}
xmlFree(listRedund);
}
if (nsMap != NULL)
xmlDOMWrapNsMapFree(nsMap);
return (ret);
}
| 0
|
816
|
static int16_t * wmv2_pred_motion ( Wmv2Context * w , int * px , int * py ) {
MpegEncContext * const s = & w -> s ;
int xy , wrap , diff , type ;
int16_t * A , * B , * C , * mot_val ;
wrap = s -> b8_stride ;
xy = s -> block_index [ 0 ] ;
mot_val = s -> current_picture . f . motion_val [ 0 ] [ xy ] ;
A = s -> current_picture . f . motion_val [ 0 ] [ xy - 1 ] ;
B = s -> current_picture . f . motion_val [ 0 ] [ xy - wrap ] ;
C = s -> current_picture . f . motion_val [ 0 ] [ xy + 2 - wrap ] ;
if ( s -> mb_x && ! s -> first_slice_line && ! s -> mspel && w -> top_left_mv_flag ) diff = FFMAX ( FFABS ( A [ 0 ] - B [ 0 ] ) , FFABS ( A [ 1 ] - B [ 1 ] ) ) ;
else diff = 0 ;
if ( diff >= 8 ) type = get_bits1 ( & s -> gb ) ;
else type = 2 ;
if ( type == 0 ) {
* px = A [ 0 ] ;
* py = A [ 1 ] ;
}
else if ( type == 1 ) {
* px = B [ 0 ] ;
* py = B [ 1 ] ;
}
else {
if ( s -> first_slice_line ) {
* px = A [ 0 ] ;
* py = A [ 1 ] ;
}
else {
* px = mid_pred ( A [ 0 ] , B [ 0 ] , C [ 0 ] ) ;
* py = mid_pred ( A [ 1 ] , B [ 1 ] , C [ 1 ] ) ;
}
}
return mot_val ;
}
| 1
|
388,709
|
int SMB_VFS_FSYNC_RECV(struct tevent_req *req, int *perrno)
{
struct smb_vfs_call_fsync_state *state = tevent_req_data(
req, struct smb_vfs_call_fsync_state);
int err;
if (tevent_req_is_unix_error(req, &err)) {
*perrno = err;
return -1;
}
return state->retval;
}
| 0
|
321,550
|
uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
{
uint64_t tl, th;
muls64(&tl, &th, op1, op2);
/* If th != 0 && th != -1, then we had an overflow */
if (unlikely((th + 1) > 1)) {
arith_excp(env, GETPC(), EXC_M_IOV, 0);
}
return tl;
}
| 1
|
322,071
|
static void pc_dimm_plug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
int slot;
HotplugHandlerClass *hhc;
Error *local_err = NULL;
PCMachineState *pcms = PC_MACHINE(hotplug_dev);
MachineState *machine = MACHINE(hotplug_dev);
PCDIMMDevice *dimm = PC_DIMM(dev);
PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
MemoryRegion *mr = ddc->get_memory_region(dimm);
uint64_t addr = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP,
&local_err);
if (local_err) {
addr = pc_dimm_get_free_addr(pcms->hotplug_memory_base,
memory_region_size(&pcms->hotplug_memory),
!addr ? NULL : &addr,
memory_region_size(mr), &local_err);
if (local_err) {
object_property_set_int(OBJECT(dev), addr, PC_DIMM_ADDR_PROP, &local_err);
if (local_err) {
trace_mhp_pc_dimm_assigned_address(addr);
slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, &local_err);
if (local_err) {
slot = pc_dimm_get_free_slot(slot == PC_DIMM_UNASSIGNED_SLOT ? NULL : &slot,
machine->ram_slots, &local_err);
if (local_err) {
object_property_set_int(OBJECT(dev), slot, PC_DIMM_SLOT_PROP, &local_err);
if (local_err) {
trace_mhp_pc_dimm_assigned_slot(slot);
if (!pcms->acpi_dev) {
error_setg(&local_err,
"memory hotplug is not enabled: missing acpi device");
memory_region_add_subregion(&pcms->hotplug_memory,
addr - pcms->hotplug_memory_base, mr);
vmstate_register_ram(mr, dev);
hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev);
hhc->plug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &local_err);
out:
error_propagate(errp, local_err);
| 1
|
342,561
|
static void xlnx_zynqmp_qspips_reset(DeviceState *d)
{
XlnxZynqMPQSPIPS *s = XLNX_ZYNQMP_QSPIPS(d);
int i;
xilinx_spips_reset(d);
for (i = 0; i < XLNX_ZYNQMP_SPIPS_R_MAX; i++) {
s->regs[i] = 0;
}
fifo8_reset(&s->rx_fifo_g);
fifo8_reset(&s->rx_fifo_g);
fifo32_reset(&s->fifo_g);
s->regs[R_INTR_STATUS] = R_INTR_STATUS_RESET;
s->regs[R_GPIO] = 1;
s->regs[R_LPBK_DLY_ADJ] = R_LPBK_DLY_ADJ_RESET;
s->regs[R_GQSPI_GFIFO_THRESH] = 0x10;
s->regs[R_MOD_ID] = 0x01090101;
s->regs[R_GQSPI_IMR] = R_GQSPI_IMR_RESET;
s->regs[R_GQSPI_TX_THRESH] = 1;
s->regs[R_GQSPI_RX_THRESH] = 1;
s->regs[R_GQSPI_GPIO] = 1;
s->regs[R_GQSPI_LPBK_DLY_ADJ] = R_GQSPI_LPBK_DLY_ADJ_RESET;
s->regs[R_GQSPI_MOD_ID] = R_GQSPI_MOD_ID_RESET;
s->regs[R_QSPIDMA_DST_CTRL] = R_QSPIDMA_DST_CTRL_RESET;
s->regs[R_QSPIDMA_DST_I_MASK] = R_QSPIDMA_DST_I_MASK_RESET;
s->regs[R_QSPIDMA_DST_CTRL2] = R_QSPIDMA_DST_CTRL2_RESET;
s->man_start_com_g = false;
s->gqspi_irqline = 0;
xlnx_zynqmp_qspips_update_ixr(s);
}
| 0
|
479,601
|
CImg<T>& noise(const double sigma, const unsigned int noise_type=0) {
if (is_empty()) return *this;
const Tfloat vmin = (Tfloat)cimg::type<T>::min(), vmax = (Tfloat)cimg::type<T>::max();
Tfloat nsigma = (Tfloat)sigma, m = 0, M = 0;
if (nsigma==0 && noise_type!=3) return *this;
if (nsigma<0 || noise_type==2) m = (Tfloat)min_max(M);
if (nsigma<0) nsigma = (Tfloat)(-nsigma*(M-m)/100.);
switch (noise_type) {
case 0 : { // Gaussian noise
cimg_pragma_openmp(parallel cimg_openmp_if_size(size(),131072)) {
cimg_uint64 rng = (cimg::_rand(),cimg::rng());
#if cimg_use_openmp!=0
rng+=omp_get_thread_num();
#endif
cimg_pragma_openmp(for)
cimg_rofoff(*this,off) {
Tfloat val = (Tfloat)(_data[off] + nsigma*cimg::grand(&rng));
if (val>vmax) val = vmax;
if (val<vmin) val = vmin;
_data[off] = (T)val;
}
cimg::srand(rng);
}
} break;
case 1 : { // Uniform noise
cimg_pragma_openmp(parallel cimg_openmp_if_size(size(),131072)) {
cimg_uint64 rng = (cimg::_rand(),cimg::rng());
#if cimg_use_openmp!=0
rng+=omp_get_thread_num();
#endif
cimg_pragma_openmp(for)
cimg_rofoff(*this,off) {
Tfloat val = (Tfloat)(_data[off] + nsigma*cimg::rand(-1,1,&rng));
if (val>vmax) val = vmax;
if (val<vmin) val = vmin;
_data[off] = (T)val;
}
cimg::srand(rng);
}
} break;
case 2 : { // Salt & Pepper noise
if (nsigma<0) nsigma = -nsigma;
if (M==m) {
if (cimg::type<T>::is_float()) { --m; ++M; }
else { m = (Tfloat)cimg::type<T>::min(); M = (Tfloat)cimg::type<T>::max(); }
}
cimg_pragma_openmp(parallel cimg_openmp_if_size(size(),131072)) {
cimg_uint64 rng = (cimg::_rand(),cimg::rng());
#if cimg_use_openmp!=0
rng+=omp_get_thread_num();
#endif
cimg_pragma_openmp(for)
cimg_rofoff(*this,off) if (cimg::rand(100,&rng)<nsigma) _data[off] = (T)(cimg::rand(1,&rng)<0.5?M:m);
cimg::srand(rng);
}
} break;
case 3 : { // Poisson Noise
cimg_pragma_openmp(parallel cimg_openmp_if_size(size(),131072)) {
cimg_uint64 rng = (cimg::_rand(),cimg::rng());
#if cimg_use_openmp!=0
rng+=omp_get_thread_num();
#endif
cimg_pragma_openmp(for)
cimg_rofoff(*this,off) _data[off] = (T)cimg::prand(_data[off],&rng);
cimg::srand(rng);
}
} break;
case 4 : { // Rice noise
const Tfloat sqrt2 = (Tfloat)std::sqrt(2.);
cimg_pragma_openmp(parallel cimg_openmp_if_size(size(),131072)) {
cimg_uint64 rng = (cimg::_rand(),cimg::rng());
#if cimg_use_openmp!=0
rng+=omp_get_thread_num();
#endif
cimg_pragma_openmp(for)
cimg_rofoff(*this,off) {
const Tfloat
val0 = (Tfloat)_data[off]/sqrt2,
re = (Tfloat)(val0 + nsigma*cimg::grand(&rng)),
im = (Tfloat)(val0 + nsigma*cimg::grand(&rng));
Tfloat val = cimg::hypot(re,im);
if (val>vmax) val = vmax;
if (val<vmin) val = vmin;
_data[off] = (T)val;
}
cimg::srand(rng);
}
} break;
default :
throw CImgArgumentException(_cimg_instance
"noise(): Invalid specified noise type %d "
"(should be { 0=gaussian | 1=uniform | 2=salt&Pepper | 3=poisson }).",
cimg_instance,
noise_type);
}
return *this;
}
| 0
|
386,903
|
node_get_content (xmlNodePtr node)
{
if (node == NULL)
return NULL;
switch (node->type)
{
case XML_ELEMENT_NODE:
return node_get_content (node->children);
break;
case XML_TEXT_NODE:
return (const char *) node->content;
break;
default:
return NULL;
}
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.