functionSource stringlengths 20 97.4k | CWE-119 bool 2
classes | CWE-120 bool 2
classes | CWE-469 bool 2
classes | CWE-476 bool 2
classes | CWE-other bool 2
classes | combine int64 0 1 |
|---|---|---|---|---|---|---|
hawki_vector_get_mode(cpl_vector * vec)
{
int nb ;
int nbins ;
double min, max ;
double bin_size ;
cpl_bivector * hist ;
cpl_vector * hist_x ;
cpl_vector * hist_y ;
double cur_val ;
int cur_bin ;
double max_val ;
int max_bin ;
double mode ;
int i ;
/* Test entries */
if (vec == NULL) return -1.0 ;
/* Initialise */
nb = cpl_vector_get_size(vec) ;
/* Create the histogram */
nbins = 10 ;
min = cpl_vector_get_min(vec) ;
max = cpl_vector_get_max(vec) ;
bin_size = (max-min)/nbins ;
hist = cpl_bivector_new(nbins) ;
hist_x = cpl_bivector_get_x(hist) ;
hist_y = cpl_bivector_get_y(hist) ;
cpl_vector_fill(hist_x, 0.0) ;
cpl_vector_fill(hist_y, 0.0) ;
for (i=0 ; i<nbins ; i++) {
cpl_vector_set(hist_x, i, min + i * bin_size) ;
}
for (i=0 ; i<nb ; i++) {
cur_val = cpl_vector_get(vec, i) ;
cur_bin = (int)((cur_val - min) / bin_size) ;
if (cur_bin >= nbins) cur_bin -= 1.0 ;
cur_val = cpl_vector_get(hist_y, cur_bin) ;
cur_val += 1.0 ;
cpl_vector_set(hist_y, cur_bin, cur_val) ;
}
/* Get the mode of the histogram */
max_val = cpl_vector_get(hist_y, 0) ;
max_bin = 0 ;
for (i=0 ; i<nbins ; i++) {
cur_val = cpl_vector_get(hist_y, i) ;
if (cur_val > max_val) {
max_val = cur_val ;
max_bin = i ;
}
}
mode = cpl_vector_get(hist_x, max_bin) ;
cpl_bivector_delete(hist) ;
return mode ;
} | false | false | false | false | false | 0 |
gst_auto_video_convert_add_autoconvert (GstAutoVideoConvert * autovideoconvert)
{
GstPad *pad;
if (autovideoconvert->autoconvert)
return TRUE;
autovideoconvert->autoconvert =
gst_element_factory_make ("autoconvert", "autoconvertchild");
if (!autovideoconvert->autoconvert) {
GST_ERROR_OBJECT (autovideoconvert,
"Could not create autoconvert instance");
return FALSE;
}
/* first add autoconvert in bin */
gst_bin_add (GST_BIN (autovideoconvert),
gst_object_ref (autovideoconvert->autoconvert));
/* get sinkpad and link it to ghost sink pad */
pad = gst_element_get_static_pad (autovideoconvert->autoconvert, "sink");
gst_ghost_pad_set_target (GST_GHOST_PAD_CAST (autovideoconvert->sinkpad),
pad);
gst_object_unref (pad);
/* get srcpad and link it to ghost src pad */
pad = gst_element_get_static_pad (autovideoconvert->autoconvert, "src");
gst_ghost_pad_set_target (GST_GHOST_PAD_CAST (autovideoconvert->srcpad), pad);
gst_object_unref (pad);
return TRUE;
} | false | false | false | false | false | 0 |
alpasscv_nn_fetch(register alpasscv_susp_type susp, snd_list_type snd_list)
{
int cnt = 0; /* how many samples computed */
int togo;
int n;
sample_block_type out;
register sample_block_values_type out_ptr;
register sample_block_values_type out_ptr_reg;
register sample_type * delayptr_reg;
register sample_type * endptr_reg;
register sample_block_values_type feedback_ptr_reg;
register sample_block_values_type input_ptr_reg;
falloc_sample_block(out, "alpasscv_nn_fetch");
out_ptr = out->samples;
snd_list->block = out;
while (cnt < max_sample_block_len) { /* outer loop */
/* first compute how many samples to generate in inner loop: */
/* don't overflow the output sample block: */
togo = max_sample_block_len - cnt;
/* don't run past the input input sample block: */
susp_check_term_samples(input, input_ptr, input_cnt);
togo = min(togo, susp->input_cnt);
/* don't run past the feedback input sample block: */
susp_check_samples(feedback, feedback_ptr, feedback_cnt);
togo = min(togo, susp->feedback_cnt);
/* don't run past terminate time */
if (susp->terminate_cnt != UNKNOWN &&
susp->terminate_cnt <= susp->susp.current + cnt + togo) {
togo = susp->terminate_cnt - (susp->susp.current + cnt);
if (togo == 0) break;
}
n = togo;
delayptr_reg = susp->delayptr;
endptr_reg = susp->endptr;
feedback_ptr_reg = susp->feedback_ptr;
input_ptr_reg = susp->input_ptr;
out_ptr_reg = out_ptr;
if (n) do { /* the inner sample computation loop */
register sample_type y, z, fb;
y = *delayptr_reg;
*delayptr_reg++ = z = (sample_type) ((fb = *feedback_ptr_reg++) * y + *input_ptr_reg++);
*out_ptr_reg++ = (sample_type) (y - fb * z);
if (delayptr_reg >= endptr_reg) delayptr_reg = susp->delaybuf;;
} while (--n); /* inner loop */
susp->delayptr = delayptr_reg;
/* using feedback_ptr_reg is a bad idea on RS/6000: */
susp->feedback_ptr += togo;
/* using input_ptr_reg is a bad idea on RS/6000: */
susp->input_ptr += togo;
out_ptr += togo;
susp_took(input_cnt, togo);
susp_took(feedback_cnt, togo);
cnt += togo;
} /* outer loop */
/* test for termination */
if (togo == 0 && cnt == 0) {
snd_list_terminate(snd_list);
} else {
snd_list->block_len = cnt;
susp->susp.current += cnt;
}
} | false | false | false | false | false | 0 |
nvkm_disp_class_new(struct nvkm_device *device,
const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
const struct nvkm_disp_oclass *sclass = oclass->engn;
struct nvkm_disp *disp = nvkm_disp(oclass->engine);
struct nvkm_oproxy *oproxy;
int ret;
ret = nvkm_oproxy_new_(&nvkm_disp_class, oclass, &oproxy);
if (ret)
return ret;
*pobject = &oproxy->base;
mutex_lock(&disp->engine.subdev.mutex);
if (disp->client) {
mutex_unlock(&disp->engine.subdev.mutex);
return -EBUSY;
}
disp->client = oproxy;
mutex_unlock(&disp->engine.subdev.mutex);
return sclass->ctor(disp, oclass, data, size, &oproxy->object);
} | false | false | false | false | false | 0 |
regfind_count(const char* from, const char* exp, int offset)
{
regmatch_t pm;
int n = 0;
pm.begin = 0;
pm.end = 0;
pm = regfind(from,exp);
while(pm.begin != -1)
{
n++;
//printf("trovato a partire da %d e fino a %d, riparto da %ld\n",
// pm.begin,pm.end,pm.end - offset);
from += pm.end - offset;
pm = regfind(from,exp);
}
return n;
} | false | false | false | false | false | 0 |
buildregular()
{
if (mcount != 0x0)
{
buildea(&target);
postb |= regbits[source.base];
}
} | false | false | false | false | false | 0 |
ast_unregister_application(const char *app)
{
struct ast_app *tmp;
AST_RWLIST_WRLOCK(&apps);
AST_RWLIST_TRAVERSE_SAFE_BEGIN(&apps, tmp, list) {
if (!strcasecmp(app, tmp->name)) {
unreference_cached_app(tmp);
AST_RWLIST_REMOVE_CURRENT(list);
ast_verb(2, "Unregistered application '%s'\n", tmp->name);
ast_string_field_free_memory(tmp);
ast_free(tmp);
break;
}
}
AST_RWLIST_TRAVERSE_SAFE_END;
AST_RWLIST_UNLOCK(&apps);
return tmp ? 0 : -1;
} | true | true | false | false | false | 1 |
l_getpmentry(hfsvol *vol, Partition *map, unsigned long bnum)
{
block b;
const byte *ptr = b;
int i;
if (b_readpb(vol, bnum, &b, 1) == -1)
goto fail;
d_fetchsw(&ptr, &map->pmSig);
d_fetchsw(&ptr, &map->pmSigPad);
d_fetchsl(&ptr, &map->pmMapBlkCnt);
d_fetchsl(&ptr, &map->pmPyPartStart);
d_fetchsl(&ptr, &map->pmPartBlkCnt);
strncpy((char *) map->pmPartName, (const char *) ptr, 32);
map->pmPartName[32] = 0;
ptr += 32;
strncpy((char *) map->pmParType, (const char *) ptr, 32);
map->pmParType[32] = 0;
ptr += 32;
d_fetchsl(&ptr, &map->pmLgDataStart);
d_fetchsl(&ptr, &map->pmDataCnt);
d_fetchsl(&ptr, &map->pmPartStatus);
d_fetchsl(&ptr, &map->pmLgBootStart);
d_fetchsl(&ptr, &map->pmBootSize);
d_fetchsl(&ptr, &map->pmBootAddr);
d_fetchsl(&ptr, &map->pmBootAddr2);
d_fetchsl(&ptr, &map->pmBootEntry);
d_fetchsl(&ptr, &map->pmBootEntry2);
d_fetchsl(&ptr, &map->pmBootCksum);
strncpy((char *) map->pmProcessor, (const char *) ptr, 16);
map->pmProcessor[16] = 0;
ptr += 16;
for (i = 0; i < 188; ++i)
d_fetchsw(&ptr, &map->pmPad[i]);
ASSERT(ptr - b == HFS_BLOCKSZ);
return 0;
fail:
return -1;
} | false | true | false | false | false | 1 |
mca_btl_openib_proc_remove(ompi_proc_t *proc,
mca_btl_base_endpoint_t *endpoint)
{
size_t i;
mca_btl_openib_proc_t* ib_proc = NULL;
/* Remove endpoint from the openib BTL version of the proc as
well */
ib_proc = mca_btl_openib_proc_lookup_ompi(proc);
if (NULL != ib_proc) {
for (i = 0; i < ib_proc->proc_endpoint_count; ++i) {
if (ib_proc->proc_endpoints[i] == endpoint) {
ib_proc->proc_endpoints[i] = NULL;
if (i == ib_proc->proc_endpoint_count - 1) {
--ib_proc->proc_endpoint_count;
}
return OMPI_SUCCESS;
}
}
}
return OMPI_ERR_NOT_FOUND;
} | false | false | false | false | false | 0 |
set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
const struct itimerval *const value,
struct itimerval *const ovalue)
{
cputime_t cval, nval, cinterval, ninterval;
s64 ns_ninterval, ns_nval;
u32 error, incr_error;
struct cpu_itimer *it = &tsk->signal->it[clock_id];
nval = timeval_to_cputime(&value->it_value);
ns_nval = timeval_to_ns(&value->it_value);
ninterval = timeval_to_cputime(&value->it_interval);
ns_ninterval = timeval_to_ns(&value->it_interval);
error = cputime_sub_ns(nval, ns_nval);
incr_error = cputime_sub_ns(ninterval, ns_ninterval);
spin_lock_irq(&tsk->sighand->siglock);
cval = it->expires;
cinterval = it->incr;
if (cval || nval) {
if (nval > 0)
nval += cputime_one_jiffy;
set_process_cpu_timer(tsk, clock_id, &nval, &cval);
}
it->expires = nval;
it->incr = ninterval;
it->error = error;
it->incr_error = incr_error;
trace_itimer_state(clock_id == CPUCLOCK_VIRT ?
ITIMER_VIRTUAL : ITIMER_PROF, value, nval);
spin_unlock_irq(&tsk->sighand->siglock);
if (ovalue) {
cputime_to_timeval(cval, &ovalue->it_value);
cputime_to_timeval(cinterval, &ovalue->it_interval);
}
} | false | false | false | false | false | 0 |
ceci_log_v(enum libcec_log_level level, const char *function,
const char *format, va_list args)
{
const char *prefix;
struct timeval tv;
struct tm *loc;
#ifndef ENABLE_DEBUG_LOGGING
if (level < ceci_global_log_level)
return;
#endif
switch (level) {
case LIBCEC_LOG_LEVEL_DEBUG:
prefix = "debug";
break;
case LIBCEC_LOG_LEVEL_INFO:
prefix = "info";
break;
case LIBCEC_LOG_LEVEL_WARNING:
prefix = "warning";
break;
case LIBCEC_LOG_LEVEL_ERROR:
prefix = "error";
break;
default:
prefix = "unknown";
break;
}
gettimeofday(&tv, (struct timezone *)0);
loc = localtime(&tv.tv_sec);
fprintf(ceci_logger, "%04d.%02d.%02d %02d:%02d:%02d.%03ld libcec:%s [%s] ",
loc->tm_year+1900, loc->tm_mon+1, loc->tm_mday, loc->tm_hour,
loc->tm_min, loc->tm_sec, tv.tv_usec/1000, prefix, function);
vfprintf(ceci_logger, format, args);
fprintf(ceci_logger, "\n");
fflush(ceci_logger);
} | false | false | false | false | false | 0 |
GC_register_altstack (void *stack, int stack_size, void *altstack, int altstack_size)
{
GC_thread thread;
LOCK();
thread = (void *)GC_lookup_thread(pthread_self());
if (thread) {
thread->stack = stack;
thread->stack_size = stack_size;
thread->altstack = altstack;
thread->altstack_size = altstack_size;
} else {
/*
* This happens if we are called before GC_thr_init ().
*/
main_pthread_self = pthread_self ();
main_stack = stack;
main_stack_size = stack_size;
main_altstack = altstack;
main_altstack_size = altstack_size;
}
UNLOCK();
} | false | false | false | false | false | 0 |
really_spawn (void *d)
{
spawn_data *data = (spawn_data *)d;
scm_t_catch_body body = data->body;
void *body_data = data->body_data;
scm_t_catch_handler handler = data->handler;
void *handler_data = data->handler_data;
scm_i_thread *t = SCM_I_CURRENT_THREAD;
scm_i_scm_pthread_mutex_lock (&data->mutex);
data->thread = scm_current_thread ();
scm_i_pthread_cond_signal (&data->cond);
scm_i_pthread_mutex_unlock (&data->mutex);
if (handler == NULL)
t->result = body (body_data);
else
t->result = scm_internal_catch (SCM_BOOL_T,
body, body_data,
handler, handler_data);
return 0;
} | false | false | false | false | false | 0 |
regerror (errcode, preg, errbuf, errbuf_size)
int errcode;
const regex_t *preg;
char *errbuf;
size_t errbuf_size;
{
const char *msg;
size_t msg_size;
if (errcode < 0
|| errcode >= (sizeof (re_error_msgid) / sizeof (re_error_msgid[0])))
/* Only error codes returned by the rest of the code should be passed
to this routine. If we are given anything else, or if other regex
code generates an invalid error code, then the program has a bug.
Dump core so we can fix it. */
abort ();
msg = gettext (re_error_msgid[errcode]);
msg_size = strlen (msg) + 1; /* Includes the null. */
if (errbuf_size != 0)
{
if (msg_size > errbuf_size)
{
strncpy (errbuf, msg, errbuf_size - 1);
errbuf[errbuf_size - 1] = 0;
}
else
strcpy (errbuf, msg);
}
return msg_size;
} | false | false | false | false | false | 0 |
s_border_properties_cell (const char * border_color,
const char * border_style,
const char * border_width,
const char * color,
PP_PropertyMap::Line & line,
const PP_PropertyMap::Line lineTable
)
{
/* cell-border properties:
*
* (1) color - defaults to value of "color" property
* (2) line-style - defaults to solid (in contrast to "none" in CSS)
* (3) thickness - defaults to 1 layout unit (??, vs "medium" in CSS)
*/
line.reset ();
PP_PropertyMap::TypeColor t_border_color = PP_PropertyMap::color_type (border_color);
if (t_border_color)
{
line.m_t_color = t_border_color;
if (t_border_color == PP_PropertyMap::color_color)
UT_parseColor (border_color, line.m_color);
}
else if (color)
{
PP_PropertyMap::TypeColor t_color = PP_PropertyMap::color_type (color);
line.m_t_color = t_color;
if (t_color == PP_PropertyMap::color_color)
UT_parseColor (color, line.m_color);
}
else if(lineTable.m_t_color)
{
line.m_t_color = lineTable.m_t_color;
line.m_color = lineTable.m_color;
}
line.m_t_linestyle = PP_PropertyMap::linestyle_type (border_style);
if (!line.m_t_linestyle)
{
if(lineTable.m_t_linestyle)
{
line.m_t_linestyle = lineTable.m_t_linestyle;
}
else
{
line.m_t_linestyle = PP_PropertyMap::linestyle_solid;
}
}
line.m_t_thickness = PP_PropertyMap::thickness_type (border_width);
if (line.m_t_thickness == PP_PropertyMap::thickness_length)
{
if (UT_determineDimension (border_width, (UT_Dimension)-1) == DIM_PX)
{
double thickness = UT_LAYOUT_RESOLUTION * UT_convertDimensionless (border_width);
line.m_thickness = static_cast<UT_sint32>(thickness / UT_PAPER_UNITS_PER_INCH);
}
else
line.m_thickness = UT_convertToLogicalUnits (border_width);
if (!line.m_thickness)
{
double thickness = UT_LAYOUT_RESOLUTION;
line.m_thickness = static_cast<UT_sint32>(thickness / UT_PAPER_UNITS_PER_INCH);
}
}
else if(lineTable.m_t_thickness == PP_PropertyMap::thickness_length)
{
line.m_thickness = lineTable.m_thickness;
line.m_t_thickness = lineTable.m_t_thickness;
}
else //
{
// default to 0.72pt
line.m_t_thickness = PP_PropertyMap::thickness_length;
double thickness = UT_LAYOUT_RESOLUTION;
line.m_thickness = static_cast<UT_sint32>(thickness / UT_PAPER_UNITS_PER_INCH);
}
} | false | false | false | false | false | 0 |
jbg_set_default_l0(struct jbg_enc_state *s)
{
s->l0 = jbg_ceil_half(s->yd, s->d) / 35; /* 35 stripes/image */
while ((s->l0 << s->d) > 128) /* but <= 128 lines/stripe */
--s->l0;
if (s->l0 < 2) s->l0 = 2;
} | false | false | false | false | false | 0 |
get_related(vector<int> &out, int pivot_sid, int limit)
{
string query =
"SELECT pos FROM Filter NATURAL INNER JOIN Library "
"WHERE sid IN ("
"SELECT L.sid FROM C.Correlations AS C INNER JOIN Last AS L "
"ON CASE WHEN C.x = ? THEN C.y ELSE C.x END = L.sid "
"WHERE C.weight > 0 AND (C.x = ? OR C.y = ?) AND L.last > ? "
"ORDER BY C.weight DESC LIMIT " + itos(limit) + ");";
try {
Q q(query);
q << pivot_sid << pivot_sid << pivot_sid << (time(0) - HOUR);
while (q.next())
{
int pos;
q >> pos;
out.push_back(pos);
}
}
WARNIFFAILED();
} | false | false | false | false | false | 0 |
gda_dsn_split (const gchar *string, gchar **out_dsn, gchar **out_username, gchar **out_password)
{
const gchar *ptr;
g_return_if_fail (string);
g_return_if_fail (out_dsn);
g_return_if_fail (out_username);
g_return_if_fail (out_password);
*out_dsn = NULL;
*out_username = NULL;
*out_password = NULL;
for (ptr = string; *ptr; ptr++) {
if (*ptr == '@') {
const gchar *tmp = ptr;
*out_dsn = g_strdup (ptr+1);
for (ptr = string; ptr < tmp; ptr++) {
if (*ptr == ':') {
*out_username = g_strndup (string, ptr - string);
*out_password = g_strndup (ptr+1, tmp - ptr - 1);
}
}
if (!*out_username)
*out_username = g_strndup (string, tmp - string);
break;
}
}
if (!*out_dsn)
*out_dsn = g_strdup (string);
/* RFC 1738 decode username and password strings */
gda_rfc1738_decode (*out_username);
gda_rfc1738_decode (*out_password);
} | false | false | false | false | false | 0 |
GetBonds(bool exterior) const
{
OBAtom *atom;
vector<OBBond*> bonds;
OBBitVec idxs;
unsigned int sz;
sz = (unsigned int) _atoms.size();
for ( unsigned int i = 0 ; i < sz ; ++i )
{
atom = _atoms[i];
OBBond *bond;
vector<OBBond*>::iterator b;
for (bond = atom->BeginBond(b) ; bond ; bond = atom->NextBond(b))
{
if (!idxs.BitIsOn(bond->GetIdx()))
{
if (!exterior)
{
if (bond->GetNbrAtom(atom)->GetResidue() == this)
bonds.push_back(&(*bond));
}
else
bonds.push_back(&(*bond));
idxs.SetBitOn(bond->GetIdx());
}
}
}
return bonds;
} | false | false | false | false | false | 0 |
clear_buff(void)
{
int count;
/*** called when spadbuf gives me a line incase there is something already
on the line ****/
if (buff_pntr > 0) {
/*** backup to the beginning of the line ***/
for (count = curr_pntr; count > 0; count--)
myputchar(_BKSPC);
/** blank over the line ***/
for (count = 0; count < buff_pntr; count++) {
myputchar(_BLANK);
}
/** back up again ***/
for (count = buff_pntr; count > 0; count--)
myputchar(_BKSPC);
init_buff(buff, buff_pntr);
init_flag(buff_flag, buff_pntr);
curr_pntr = buff_pntr = 0;
}
} | false | false | false | false | false | 0 |
var_parse_file_compile( VAR_PARSE_FILE const * parse, compiler * c )
{
int i;
for ( i = 0; i < parse->filename->size; ++i )
var_parse_group_compile( dynamic_array_at( VAR_PARSE_GROUP *,
parse->filename, parse->filename->size - i - 1 ), c );
compile_emit( c, INSTR_APPEND_STRINGS, parse->filename->size );
for ( i = 0; i < parse->contents->size; ++i )
var_parse_group_compile( dynamic_array_at( VAR_PARSE_GROUP *,
parse->contents, parse->contents->size - i - 1 ), c );
compile_emit( c, INSTR_WRITE_FILE, parse->contents->size );
} | false | false | false | false | false | 0 |
write_typed_alloc_defns (const type_p structures, const pair_p typedefs)
{
type_p s;
pair_p p;
oprintf (header_file,
"\n/* Allocators for known structs and unions. */\n\n");
for (s = structures; s; s = s->next)
{
if (!USED_BY_TYPED_GC_P (s))
continue;
write_typed_struct_alloc_def (s, "", single, any_zone);
write_typed_struct_alloc_def (s, "cleared_", single, any_zone);
write_typed_struct_alloc_def (s, "vec_", vector, any_zone);
write_typed_struct_alloc_def (s, "cleared_vec_", vector, any_zone);
write_typed_struct_alloc_def (s, "zone_", single, specific_zone);
write_typed_struct_alloc_def (s, "zone_cleared_", single,
specific_zone);
write_typed_struct_alloc_def (s, "zone_vec_", vector, specific_zone);
write_typed_struct_alloc_def (s, "zone_cleared_vec_", vector,
specific_zone);
}
oprintf (header_file, "\n/* Allocators for known typedefs. */\n");
for (p = typedefs; p; p = p->next)
{
s = p->type;
if (!USED_BY_TYPED_GC_P (s) || (strcmp (p->name, s->u.s.tag) == 0))
continue;
write_typed_typedef_alloc_def (p, "", single, any_zone);
write_typed_typedef_alloc_def (p, "cleared_", single, any_zone);
write_typed_typedef_alloc_def (p, "vec_", vector, any_zone);
write_typed_typedef_alloc_def (p, "cleared_vec_", vector, any_zone);
write_typed_typedef_alloc_def (p, "zone_", single, specific_zone);
write_typed_typedef_alloc_def (p, "zone_cleared_", single,
specific_zone);
write_typed_typedef_alloc_def (p, "zone_cleared_vec_", vector,
specific_zone);
}
} | false | false | false | false | false | 0 |
readTrueHeading(double* pdfTrueHeading, int iToken, const char* pszTokenDesc)
{
int bRet = readDoubleWithBounds(pdfTrueHeading, iToken, pszTokenDesc, -180., 360.);
if (bRet)
{
if (*pdfTrueHeading < 0.)
*pdfTrueHeading += 180.;
}
return bRet;
} | false | false | false | false | false | 0 |
set(const char *module, const char *entry, int value)
{
int ret;
DB_LIST list;
if(!module || !entry)
return false;
list = eXdbmGetList(dbid, 0, (char *)module);
if (list == 0)
{
ret = eXdbmCreateList(dbid, 0, (char *)module, 0);
if (ret == -1)
{
puts(eXdbmGetErrorString(eXdbmGetLastError()));
return false;
}
list = eXdbmGetList(dbid, 0, (char *)module);
if (list == 0)
{
puts(eXdbmGetErrorString(eXdbmGetLastError()));
return false;
}
}
// We have the list
ret = eXdbmChangeVarInt(dbid, list, (char *)entry, value);
if (ret == -1)
{
ret = eXdbmCreateVarInt(dbid, list, (char *)entry, 0, value);
if (ret == -1)
{
puts("\n\n\n\ndidn't work");
puts(eXdbmGetErrorString(eXdbmGetLastError()));
puts("\n\n\n\n");
return false;
}
}
ret = eXdbmUpdateDatabase(dbid);
if (ret == -1)
{
puts(eXdbmGetErrorString(eXdbmGetLastError()));
return false;
}
return true;
} | false | false | false | false | false | 0 |
ls_unlock_v6(struct dlm_ls_info *lsinfo, uint32_t lkid,
uint32_t flags, struct dlm_lksb *lksb, void *astarg)
{
struct dlm_write_request req;
set_version_v6(&req);
req.cmd = DLM_USER_UNLOCK;
req.i.lock.lkid = lkid;
req.i.lock.flags = (flags & ~LKF_WAIT);
req.i.lock.lksb = lksb;
req.i.lock.namelen = 0;
req.i.lock.castparam = astarg;
/* DLM_USER_UNLOCK will default to existing completion AST */
req.i.lock.castaddr = 0;
lksb->sb_status = EINPROG;
if (flags & LKF_WAIT)
return sync_write_v6(lsinfo, &req, sizeof(req));
else
return write(lsinfo->fd, &req, sizeof(req));
} | false | false | false | false | false | 0 |
build_tool_dialog_init_text_entries (BuildToolDialog* self) {
GtkEntry* _tmp0_ = NULL;
GtkEntry* _tmp1_ = NULL;
GtkEntry* _tmp2_ = NULL;
GtkEntry* _tmp3_ = NULL;
g_return_if_fail (self != NULL);
_tmp0_ = (GtkEntry*) gtk_entry_new ();
g_object_ref_sink (_tmp0_);
_g_object_unref0 (self->priv->_entry_label);
self->priv->_entry_label = _tmp0_;
_tmp1_ = (GtkEntry*) gtk_entry_new ();
g_object_ref_sink (_tmp1_);
_g_object_unref0 (self->priv->_entry_desc);
self->priv->_entry_desc = _tmp1_;
_tmp2_ = (GtkEntry*) gtk_entry_new ();
g_object_ref_sink (_tmp2_);
_g_object_unref0 (self->priv->_entry_extensions);
self->priv->_entry_extensions = _tmp2_;
_tmp3_ = (GtkEntry*) gtk_entry_new ();
g_object_ref_sink (_tmp3_);
_g_object_unref0 (self->priv->_entry_files_to_open);
self->priv->_entry_files_to_open = _tmp3_;
} | false | false | false | false | false | 0 |
getNextVectorRegister(unsigned Reg, unsigned Stride = 1) {
while (Stride--) {
switch (Reg) {
default:
llvm_unreachable("Vector register expected!");
case AArch64::Q0: Reg = AArch64::Q1; break;
case AArch64::Q1: Reg = AArch64::Q2; break;
case AArch64::Q2: Reg = AArch64::Q3; break;
case AArch64::Q3: Reg = AArch64::Q4; break;
case AArch64::Q4: Reg = AArch64::Q5; break;
case AArch64::Q5: Reg = AArch64::Q6; break;
case AArch64::Q6: Reg = AArch64::Q7; break;
case AArch64::Q7: Reg = AArch64::Q8; break;
case AArch64::Q8: Reg = AArch64::Q9; break;
case AArch64::Q9: Reg = AArch64::Q10; break;
case AArch64::Q10: Reg = AArch64::Q11; break;
case AArch64::Q11: Reg = AArch64::Q12; break;
case AArch64::Q12: Reg = AArch64::Q13; break;
case AArch64::Q13: Reg = AArch64::Q14; break;
case AArch64::Q14: Reg = AArch64::Q15; break;
case AArch64::Q15: Reg = AArch64::Q16; break;
case AArch64::Q16: Reg = AArch64::Q17; break;
case AArch64::Q17: Reg = AArch64::Q18; break;
case AArch64::Q18: Reg = AArch64::Q19; break;
case AArch64::Q19: Reg = AArch64::Q20; break;
case AArch64::Q20: Reg = AArch64::Q21; break;
case AArch64::Q21: Reg = AArch64::Q22; break;
case AArch64::Q22: Reg = AArch64::Q23; break;
case AArch64::Q23: Reg = AArch64::Q24; break;
case AArch64::Q24: Reg = AArch64::Q25; break;
case AArch64::Q25: Reg = AArch64::Q26; break;
case AArch64::Q26: Reg = AArch64::Q27; break;
case AArch64::Q27: Reg = AArch64::Q28; break;
case AArch64::Q28: Reg = AArch64::Q29; break;
case AArch64::Q29: Reg = AArch64::Q30; break;
case AArch64::Q30: Reg = AArch64::Q31; break;
// Vector lists can wrap around.
case AArch64::Q31:
Reg = AArch64::Q0;
break;
}
}
return Reg;
} | false | false | false | false | false | 0 |
x509_fabricate_name(struct x509_parse_context *ctx, size_t hdrlen,
unsigned char tag,
char **_name, size_t vlen)
{
const void *name, *data = (const void *)ctx->data;
size_t namesize;
char *buffer;
if (*_name)
return -EINVAL;
/* Empty name string if no material */
if (!ctx->cn_size && !ctx->o_size && !ctx->email_size) {
buffer = kmalloc(1, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
buffer[0] = 0;
goto done;
}
if (ctx->cn_size && ctx->o_size) {
/* Consider combining O and CN, but use only the CN if it is
* prefixed by the O, or a significant portion thereof.
*/
namesize = ctx->cn_size;
name = data + ctx->cn_offset;
if (ctx->cn_size >= ctx->o_size &&
memcmp(data + ctx->cn_offset, data + ctx->o_offset,
ctx->o_size) == 0)
goto single_component;
if (ctx->cn_size >= 7 &&
ctx->o_size >= 7 &&
memcmp(data + ctx->cn_offset, data + ctx->o_offset, 7) == 0)
goto single_component;
buffer = kmalloc(ctx->o_size + 2 + ctx->cn_size + 1,
GFP_KERNEL);
if (!buffer)
return -ENOMEM;
memcpy(buffer,
data + ctx->o_offset, ctx->o_size);
buffer[ctx->o_size + 0] = ':';
buffer[ctx->o_size + 1] = ' ';
memcpy(buffer + ctx->o_size + 2,
data + ctx->cn_offset, ctx->cn_size);
buffer[ctx->o_size + 2 + ctx->cn_size] = 0;
goto done;
} else if (ctx->cn_size) {
namesize = ctx->cn_size;
name = data + ctx->cn_offset;
} else if (ctx->o_size) {
namesize = ctx->o_size;
name = data + ctx->o_offset;
} else {
namesize = ctx->email_size;
name = data + ctx->email_offset;
}
single_component:
buffer = kmalloc(namesize + 1, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
memcpy(buffer, name, namesize);
buffer[namesize] = 0;
done:
*_name = buffer;
ctx->cn_size = 0;
ctx->o_size = 0;
ctx->email_size = 0;
return 0;
} | false | false | false | false | false | 0 |
insertPlaylist( int topModelRow, Playlists::PlaylistPtr playlist )
{
insertPlaylists( topModelRow, Playlists::PlaylistList() << playlist );
} | false | false | false | false | false | 0 |
tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
int off)
{
struct throtl_grp *tg = pd_to_tg(pd);
unsigned int v = *(unsigned int *)((void *)tg + off);
if (v == -1)
return 0;
return __blkg_prfill_u64(sf, pd, v);
} | false | false | false | false | false | 0 |
help_popup(){
int i, j, k;
int cmds;
int len;
char *htxt;
char *cmd;
static GtkWidget *dialog = NULL;
cmds = 0;
for(i=0; i<NumFunctionRows; i++){
cmds += rowinf[i].numBtns;
}
if(NULL == (htxt = (char*)malloc((3000+cmds*60)*sizeof(char)))){
perror("help_popup: malloc");
return;
} else {
strcpy(htxt, HELP_TXT);
/* append the list of commands to the help text */
for(i=0; i<NumFunctionRows; i++){
for(j=0; j<rowinf[i].numBtns; j++){
cmd = rowinf[i].fi[j].cmd;
if(cmd != NULL){
strcat(htxt, cmd);
len = 12 - strlen(cmd);
for(k=0; k<len; k++) strcat(htxt, " ");
strcat(htxt, rowinf[i].fi[j].help);
strcat(htxt, "\n");
}
}
}
}
popup_window(&dialog, htxt, "Help");
free(htxt);
} | false | true | false | false | false | 1 |
direct_print_memcpy_routines( void )
{
int i;
u32 config_flags = 0;
direct_log_printf( NULL, "\nPossible values for memcpy option are:\n\n" );
for (i=1; memcpy_method[i].name; i++) {
bool unsupported = (memcpy_method[i].cpu_require & ~config_flags);
direct_log_printf( NULL, " %-10s %-27s %s\n", memcpy_method[i].name,
memcpy_method[i].desc, unsupported ? "" : "supported" );
}
direct_log_printf( NULL, "\n" );
} | false | false | false | false | false | 0 |
set_nat_binding_delay (unsigned delay)
{
PTRACE (3, "Ekiga\tNat binding delay set to " << delay);
if (delay > 0)
SetNATBindingTimeout (PTimeInterval (0, delay));
} | false | false | false | false | false | 0 |
send_unknown(struct gspca_dev *gspca_dev, int sensor)
{
reg_w(gspca_dev, 0x01, 0x0000); /* bridge reset */
switch (sensor) {
case SENSOR_PAS106:
reg_w(gspca_dev, 0x03, 0x003a);
reg_w(gspca_dev, 0x0c, 0x003b);
reg_w(gspca_dev, 0x08, 0x0038);
break;
case SENSOR_ADCM2700:
case SENSOR_GC0305:
case SENSOR_OV7620:
case SENSOR_MT9V111_1:
case SENSOR_MT9V111_3:
case SENSOR_PB0330:
case SENSOR_PO2030:
reg_w(gspca_dev, 0x0d, 0x003a);
reg_w(gspca_dev, 0x02, 0x003b);
reg_w(gspca_dev, 0x00, 0x0038);
break;
case SENSOR_HV7131R:
case SENSOR_PAS202B:
reg_w(gspca_dev, 0x03, 0x003b);
reg_w(gspca_dev, 0x0c, 0x003a);
reg_w(gspca_dev, 0x0b, 0x0039);
if (sensor == SENSOR_PAS202B)
reg_w(gspca_dev, 0x0b, 0x0038);
break;
}
} | false | false | false | false | false | 0 |
diff_filespec_is_binary(struct diff_filespec *one)
{
if (one->is_binary == -1) {
diff_filespec_load_driver(one);
if (one->driver->binary != -1)
one->is_binary = one->driver->binary;
else {
if (!one->data && DIFF_FILE_VALID(one))
diff_populate_filespec(one, 0);
if (one->data)
one->is_binary = buffer_is_binary(one->data,
one->size);
if (one->is_binary == -1)
one->is_binary = 0;
}
}
return one->is_binary;
} | false | false | false | false | false | 0 |
summary_update_unread(SummaryView *summaryview, FolderItem *removed_item)
{
guint new, unread, unreadmarked, marked, total;
guint replied, forwarded, locked, ignored, watched;
static gboolean tips_initialized = FALSE;
#if !(GTK_CHECK_VERSION(2,12,0))
GtkTooltips *tips = summaryview->tooltips;
#endif
if (prefs_common.layout_mode != SMALL_LAYOUT) {
if (tips_initialized) {
summary_set_folder_pixmap(summaryview, STOCK_PIXMAP_DIR_OPEN);
CLAWS_SET_TIP(summaryview->folder_pixmap_eventbox,
NULL);
tips_initialized = FALSE;
}
return;
}
folder_count_total_msgs(&new, &unread, &unreadmarked, &marked, &total,
&replied, &forwarded, &locked, &ignored,
&watched);
if (removed_item) {
total -= removed_item->total_msgs;
new -= removed_item->new_msgs;
unread -= removed_item->unread_msgs;
}
if (new > 0 || unread > 0) {
tips_initialized = TRUE;
summary_set_folder_pixmap(summaryview, STOCK_PIXMAP_DIR_OPEN_HRM);
CLAWS_SET_TIP(summaryview->folder_pixmap_eventbox,
_("Go back to the folder list (You have unread messages)"));
} else {
tips_initialized = TRUE;
summary_set_folder_pixmap(summaryview, STOCK_PIXMAP_DIR_OPEN);
CLAWS_SET_TIP(summaryview->folder_pixmap_eventbox,
_("Go back to the folder list"));
}
} | false | false | false | false | false | 0 |
conn_closecon(struct conn_sslcon *con) {
if(con->state == CONNECTION_STATE_TCPCONNECTING || con->state == CONNECTION_STATE_SSLCONNECTING) {
close(con->fd);
con->state = CONNECTION_STATE_CLOSED;
return 0;
}
if(con->state == CONNECTION_STATE_SSLCLOSING || con->state == CONNECTION_STATE_CLOSED) return 0;
if(con->state != CONNECTION_STATE_CONNECTED) {
close(con->fd);
con->state = CONNECTION_STATE_CLOSED;
return 0;
}
con->ssl_want_read = 0;
con->ssl_want_write = 1;
con->state = CONNECTION_STATE_SSLCLOSING;
return 0;
} | false | false | false | false | false | 0 |
delete_algo_list(GF_List *algos)
{
u32 i;
for (i=0;i<gf_list_count(algos); i++) {
GF_IPMPX_Authentication *ip_auth = (GF_IPMPX_Authentication *)gf_list_get(algos, i);
GF_IPMPX_AUTH_Delete(ip_auth);
}
gf_list_del(algos);
} | false | false | false | false | false | 0 |
on_tvw_results_button_press_event (GtkWidget *w, GdkEventButton *event, GbemolLibrary* lib)
{
gtk_tree_model_filter_refilter (GTK_TREE_MODEL_FILTER (lib->priv->fil_results));
if (event->type == GDK_BUTTON_PRESS)
if (event->button == 3)
{
gtk_menu_popup (GTK_MENU(gtk_ui_manager_get_widget (lib->priv->manager, "/ResultsMenu")),
NULL, NULL, NULL, NULL,
event->button, event->time);
return TRUE;
}
return FALSE;
} | false | false | false | false | false | 0 |
list_insert_at(list_t *restrict l, const void *data, unsigned int pos) {
struct list_entry_s *lent, *succ, *prec;
if (l->iter_active || pos > l->numels) return -1;
/* this code optimizes malloc() with a free-list */
if (l->spareelsnum > 0) {
lent = l->spareels[l->spareelsnum-1];
l->spareelsnum--;
} else {
lent = (struct list_entry_s *)malloc(sizeof(struct list_entry_s));
if (lent == NULL)
return -1;
}
if (l->attrs.copy_data) {
/* make room for user' data (has to be copied) */
size_t datalen = l->attrs.meter(data);
lent->data = (struct list_entry_s *)malloc(datalen);
memcpy(lent->data, data, datalen);
} else {
lent->data = (void*)data;
}
/* actually append element */
prec = list_findpos(l, pos-1);
succ = prec->next;
prec->next = lent;
lent->prev = prec;
lent->next = succ;
succ->prev = lent;
l->numels++;
/* fix mid pointer */
if (l->numels == 1) { /* first element, set pointer */
l->mid = lent;
} else if (l->numels % 2) { /* now odd */
if (pos >= (l->numels-1)/2) l->mid = l->mid->next;
} else { /* now even */
if (pos <= (l->numels-1)/2) l->mid = l->mid->prev;
}
assert(list_repOk(l));
return 1;
} | false | false | false | false | false | 0 |
netsys_fallocate(value fd, value start, value len) {
#ifdef HAVE_POSIX_FALLOCATE
int r;
int64 start_int, len_int;
off_t start_off, len_off;
/* Att: off_t might be 64 bit even on 32 bit systems! */
start_int = Int64_val(start);
len_int = Int64_val(len);
if ( ((int64) ((off_t) start_int)) != start_int )
failwith("Netsys.fadvise: large files not supported on this OS");
if ( ((int64) ((off_t) len_int)) != len_int )
failwith("Netsys.fadvise: large files not supported on this OS");
start_off = start_int;
len_off = len_int;
r = posix_fallocate(Int_val(fd), start_off, len_off);
/* does not set errno! */
if (r != 0)
unix_error(r, "posix_fallocate64", Nothing);
return Val_unit;
#else
invalid_argument("Netsys.fallocate not available");
#endif
} | false | false | false | false | false | 0 |
fsl_asrc_runtime_resume(struct device *dev)
{
struct fsl_asrc *asrc_priv = dev_get_drvdata(dev);
int i, ret;
ret = clk_prepare_enable(asrc_priv->mem_clk);
if (ret)
return ret;
ret = clk_prepare_enable(asrc_priv->ipg_clk);
if (ret)
goto disable_mem_clk;
for (i = 0; i < ASRC_CLK_MAX_NUM; i++) {
ret = clk_prepare_enable(asrc_priv->asrck_clk[i]);
if (ret)
goto disable_asrck_clk;
}
return 0;
disable_asrck_clk:
for (i--; i >= 0; i--)
clk_disable_unprepare(asrc_priv->asrck_clk[i]);
clk_disable_unprepare(asrc_priv->ipg_clk);
disable_mem_clk:
clk_disable_unprepare(asrc_priv->mem_clk);
return ret;
} | false | false | false | false | false | 0 |
preferredPos(Corona *corona) const
{
Q_ASSERT(corona);
if (isPanelContainment()) {
//kDebug() << "is a panel, so put it at" << preferredPanelPos(corona);
return preferredPanelPos(corona);
}
QPointF pos(0, 0);
QTransform t;
while (QGraphicsItem *i = corona->itemAt(pos, t)) {
pos.setX(i->scenePos().x() + i->boundingRect().width() + 10);
}
//kDebug() << "not a panel, put it at" << pos;
return pos;
} | false | false | false | false | false | 0 |
nfs4_destroy_session(struct nfs4_session *session)
{
struct rpc_xprt *xprt;
struct rpc_cred *cred;
cred = nfs4_get_clid_cred(session->clp);
nfs4_proc_destroy_session(session, cred);
if (cred)
put_rpccred(cred);
rcu_read_lock();
xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
rcu_read_unlock();
dprintk("%s Destroy backchannel for xprt %p\n",
__func__, xprt);
xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
nfs4_destroy_session_slot_tables(session);
kfree(session);
} | false | false | false | false | false | 0 |
clipToStrokePath() {
double xMin, yMin, xMax, yMax, x, y, t0, t1;
GfxSubpath *subpath;
int i, j;
xMin = xMax = yMin = yMax = 0; // make gcc happy
for (i = 0; i < path->getNumSubpaths(); ++i) {
subpath = path->getSubpath(i);
for (j = 0; j < subpath->getNumPoints(); ++j) {
transform(subpath->getX(j), subpath->getY(j), &x, &y);
if (i == 0 && j == 0) {
xMin = xMax = x;
yMin = yMax = y;
} else {
if (x < xMin) {
xMin = x;
} else if (x > xMax) {
xMax = x;
}
if (y < yMin) {
yMin = y;
} else if (y > yMax) {
yMax = y;
}
}
}
}
// allow for the line width
//~ miter joins can extend farther than this
t0 = fabs(ctm[0]);
t1 = fabs(ctm[2]);
if (t0 > t1) {
xMin -= 0.5 * lineWidth * t0;
xMax += 0.5 * lineWidth * t0;
} else {
xMin -= 0.5 * lineWidth * t1;
xMax += 0.5 * lineWidth * t1;
}
t0 = fabs(ctm[0]);
t1 = fabs(ctm[3]);
if (t0 > t1) {
yMin -= 0.5 * lineWidth * t0;
yMax += 0.5 * lineWidth * t0;
} else {
yMin -= 0.5 * lineWidth * t1;
yMax += 0.5 * lineWidth * t1;
}
if (xMin > clipXMin) {
clipXMin = xMin;
}
if (yMin > clipYMin) {
clipYMin = yMin;
}
if (xMax < clipXMax) {
clipXMax = xMax;
}
if (yMax < clipYMax) {
clipYMax = yMax;
}
} | false | false | false | false | false | 0 |
bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1,
struct bfi_ioc_image_hdr_s *fwhdr_2)
{
int i;
for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++)
if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
return BFA_FALSE;
return BFA_TRUE;
} | false | false | false | false | false | 0 |
bindGL(const bool useShadow) const
{
if (m_isReady) m_program.bind();
#if !defined(NDEBUG)
else
{
fprintf(stderr, "Attempt to bind non-ready shader\n");
}
#endif
} | false | false | false | false | false | 0 |
clear_diff_rs(list old_vs_group, virtual_server_t * old_vs)
{
element e;
list l = old_vs->rs;
list new = get_rs_list(old_vs);
real_server_t *rs;
/* If old vs didn't own rs then nothing return */
if (LIST_ISEMPTY(l))
return 1;
/* remove RS from old vs which are not found in new vs */
list rs_to_remove = alloc_list (NULL, NULL);
for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
rs = ELEMENT_DATA(e);
if (!rs_exist(rs, new)) {
/* Reset inhibit flag to delete inhibit entries */
log_message(LOG_INFO, "service %s no longer exist"
, FMT_RS(rs));
rs->inhibit = 0;
list_add (rs_to_remove, rs);
}
}
int ret = clear_service_rs (old_vs_group, old_vs, rs_to_remove);
free_list (rs_to_remove);
return ret;
} | false | false | false | false | false | 0 |
knowledge_god_add(struct knowledge_player *current, const char *item, const struct knowledge_type *type, player *pl) {
char *dup = strdup_local(item), *pos = strchr(dup, ':');
StringBuffer *buf;
int what, i;
knowledge_item* check;
if (!pos) {
LOG(llevError, "knowledge_god_add: invalid god item %s\n", item);
free(dup);
return 0;
}
*pos = '\0';
what = atoi(pos + 1);
for (i = 0; i < current->item_count; i++) {
check = current->items[i];
if (check->handler != type)
/* Only consider our own type. */
continue;
if (strncmp(check->item, dup, strlen(dup)) == 0) {
/* Already known, update information. */
int known, result;
pos = strchr(check->item, ':');
known = atoi(pos + 1);
result = known | what;
buf = stringbuffer_new();
stringbuffer_append_printf(buf, "%s:%d", dup, result);
free_string(check->item);
check->item = stringbuffer_finish_shared(buf);
free(dup);
return (result != known);
}
}
free(dup);
/* Not known, so just add it regularly. */
return knowledge_add(current, item, type, pl);
} | false | false | false | false | true | 1 |
pages_map(void *addr, size_t size, bool noreserve)
{
void *ret;
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
int flags = MAP_PRIVATE | MAP_ANON;
#ifdef MAP_NORESERVE
if (noreserve)
flags |= MAP_NORESERVE;
#endif
ret = mmap(addr, size, PROT_READ | PROT_WRITE, flags, -1, 0);
assert(ret != NULL);
if (ret == MAP_FAILED)
ret = NULL;
else if (addr != NULL && ret != addr) {
/*
* We succeeded in mapping memory, but not in the right place.
*/
if (munmap(ret, size) == -1) {
char buf[BUFERROR_BUF];
buferror(errno, buf, sizeof(buf));
malloc_write("<jemalloc>: Error in munmap(): ");
malloc_write(buf);
malloc_write("\n");
if (opt_abort)
abort();
}
ret = NULL;
}
assert(ret == NULL || (addr == NULL && ret != addr)
|| (addr != NULL && ret == addr));
return (ret);
} | false | false | false | false | false | 0 |
add_user_account(char *user, int dynamic)
{
if(user && !turn_params.users_params.use_auth_secret_with_timestamp) {
char *s = strstr(user, ":");
if(!s || (s==user) || (strlen(s)<2)) {
TURN_LOG_FUNC(TURN_LOG_LEVEL_ERROR, "Wrong user account: %s\n",user);
} else {
size_t ulen = s-user;
char *usname = (char*)turn_malloc(sizeof(char)*(ulen+1));
strncpy(usname,user,ulen);
usname[ulen]=0;
if(SASLprep((u08bits*)usname)<0) {
TURN_LOG_FUNC(TURN_LOG_LEVEL_ERROR, "Wrong user name: %s\n",user);
turn_free(usname,sizeof(char)*(ulen+1));
return -1;
}
s = skip_blanks(s+1);
hmackey_t *key = (hmackey_t*)turn_malloc(sizeof(hmackey_t));
if(strstr(s,"0x")==s) {
char *keysource = s + 2;
size_t sz = get_hmackey_size(turn_params.shatype);
if(strlen(keysource)<sz*2) {
TURN_LOG_FUNC(TURN_LOG_LEVEL_ERROR, "Wrong key format: %s\n",s);
} if(convert_string_key_to_binary(keysource, *key, sz)<0) {
TURN_LOG_FUNC(TURN_LOG_LEVEL_ERROR, "Wrong key: %s\n",s);
turn_free(usname,strlen((char*)usname)+1);
turn_free(key,sizeof(hmackey_t));
return -1;
}
} else {
stun_produce_integrity_key_str((u08bits*)usname, (u08bits*)turn_params.users_params.global_realm, (u08bits*)s, *key, turn_params.shatype);
}
if(dynamic) {
ur_string_map_lock(turn_params.users_params.users.dynamic_accounts);
ur_string_map_put(turn_params.users_params.users.dynamic_accounts, (ur_string_map_key_type)usname, (ur_string_map_value_type)*key);
ur_string_map_unlock(turn_params.users_params.users.dynamic_accounts);
} else {
ur_string_map_lock(turn_params.users_params.users.static_accounts);
ur_string_map_put(turn_params.users_params.users.static_accounts, (ur_string_map_key_type)usname, (ur_string_map_value_type)*key);
ur_string_map_unlock(turn_params.users_params.users.static_accounts);
}
turn_params.users_params.users_number++;
turn_free(usname,strlen(usname)+1);
return 0;
}
}
return -1;
} | false | true | false | false | false | 1 |
gee_tree_set_sub_set_real_sub_set (GeeAbstractSortedSet* base, gconstpointer after, gconstpointer before) {
GeeTreeSetSubSet * self;
GeeSortedSet* result = NULL;
GeeTreeSet* _tmp0_ = NULL;
GeeTreeSetRange* _tmp1_ = NULL;
gconstpointer _tmp2_ = NULL;
gconstpointer _tmp3_ = NULL;
GeeTreeSetRange* _tmp4_ = NULL;
GeeTreeSetRange* _tmp5_ = NULL;
GeeTreeSetSubSet* _tmp6_ = NULL;
GeeSortedSet* _tmp7_ = NULL;
self = (GeeTreeSetSubSet*) base;
_tmp0_ = self->priv->set;
_tmp1_ = self->priv->range;
_tmp2_ = after;
_tmp3_ = before;
_tmp4_ = gee_tree_set_range_cut (_tmp1_, _tmp2_, _tmp3_);
_tmp5_ = _tmp4_;
_tmp6_ = gee_tree_set_sub_set_new_from_range (self->priv->g_type, (GBoxedCopyFunc) self->priv->g_dup_func, self->priv->g_destroy_func, _tmp0_, _tmp5_);
_tmp7_ = (GeeSortedSet*) _tmp6_;
_gee_tree_set_range_unref0 (_tmp5_);
result = _tmp7_;
return result;
} | false | false | false | false | false | 0 |
H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id,
const H5S_t *mem_space, const H5S_t *file_space,
hid_t dxpl_id, const void *buf)
{
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
/* Direct chunk write */
if(direct_write) {
H5P_genplist_t *plist; /* Property list pointer */
uint32_t direct_filters;
hsize_t *direct_offset;
uint32_t direct_datasize;
int ndims = 0;
hsize_t dims[H5O_LAYOUT_NDIMS];
hsize_t internal_offset[H5O_LAYOUT_NDIMS];
unsigned u;
/* Get the dataset transfer property list */
if(NULL == (plist = (H5P_genplist_t *)H5I_object(dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list")
if(H5D_CHUNKED != dset->shared->layout.type)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset")
/* Retrieve parameters for direct chunk write */
if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_NAME, &direct_filters) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error getting filter info for direct chunk write")
if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_NAME, &direct_offset) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error getting offset info for direct chunk write")
if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_NAME, &direct_datasize) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error getting data size for direct chunk write")
/* The library's chunking code requires the offset terminates with a zero. So transfer the
* offset array to an internal offset array */
if((ndims = H5S_get_simple_extent_dims(dset->shared->space, dims, NULL)) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve dataspace extent dims")
for(u = 0; u < ndims; u++) {
/* Make sure the offset doesn't exceed the dataset's dimensions */
if(direct_offset[u] > dims[u])
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset exceeds dimensions of dataset")
/* Make sure the offset fall right on a chunk's boundary */
if(direct_offset[u] % dset->shared->layout.u.chunk.dim[u])
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset doesn't fall on chunks's boundary")
internal_offset[u] = direct_offset[u];
} /* end for */
/* Terminate the offset with a zero */
internal_offset[ndims] = 0;
/* write raw data */
if(H5D__chunk_direct_write(dset, dxpl_id, direct_filters, internal_offset, direct_datasize, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write chunk directly")
} /* end if */
else { /* Normal write */
/* write raw data */
if(H5D__write(dset, mem_type_id, mem_space, file_space, dxpl_id, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data")
} /* end else */
done:
FUNC_LEAVE_NOAPI(ret_value)
} | false | false | false | false | false | 0 |
SeqSetEntropy(ss_type P)
{
long i;
double *freq,H;
freq = tFreqSeqSet(P);
for(H=0.0,i = 1; i <= nAlpha(P->A); i++){
if(freq[i] > 0.0) H += freq[i] * log(freq[i]);
}
return (-1.442695041*H);
} | false | false | false | false | false | 0 |
prop_gen_cons(list *l) {
list *nl = malloc(sizeof(list));
if (nl == NULL) { return THEFT_TRIAL_ERROR; }
nl->v = 0;
nl->next = l;
theft_trial_res res;
if (list_length(nl) == list_length(l) + 1) {
res = THEFT_TRIAL_PASS;
} else {
res = THEFT_TRIAL_FAIL;
}
free(nl);
return res;
} | false | false | false | false | false | 0 |
background(void)
{
RETSIGTYPE (*x)(int);
/*
* C shell
* puts its children in a different process group.
* The process group the terminal in is the forground.
*
* Only available on systems with job control.
*/
#ifdef SIGSTOP
if (getpgrp(CONF_getpgrp_arg) != tcgetpgrp(0))
return 1;
#endif
/*
* Bourne shell
* sets its children to ignore SIGINT
*/
x = signal(SIGINT, SIG_IGN);
if (x == SIG_IGN)
return 1;
signal(SIGINT, x);
/*
* probably forground
*/
return 0;
} | false | false | false | false | false | 0 |
get_entry_by_match_function(private_ike_sa_manager_t *this,
ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
linked_list_match_t match, void *param)
{
table_item_t *item;
u_int row, seg;
row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
seg = row & this->segment_mask;
lock_single_segment(this, seg);
item = this->ike_sa_table[row];
while (item)
{
if (match(item->value, param))
{
*entry = item->value;
*segment = seg;
/* the locked segment has to be unlocked by the caller */
return SUCCESS;
}
item = item->next;
}
unlock_single_segment(this, seg);
return NOT_FOUND;
} | false | false | false | false | false | 0 |
qof_query_core_predicate_free (QofQueryPredData *pdata)
{
QueryPredDataFree free_fcn;
g_return_if_fail (pdata);
g_return_if_fail (pdata->type_name);
free_fcn = qof_query_predicate_free (pdata->type_name);
free_fcn (pdata);
} | false | false | false | false | false | 0 |
interv(double *xt, int *lxt, double *x, int *left, int *mflag)
{
static int ilo = 1;
static int istep, middle, ihi;
// Parameter adjustments
--xt;
// from * a practical guide to splines * by C. de Boor
// computes left = max( i : xt(i) .lt. xt(lxt) .and. xt(i) .le. x )
//
// ****** i n p u t ******
// xt.....a double sequence, of length lxt , assumed to be nondecreasing
// lxt.....number of terms in the sequence xt
// x.....the point whose location with respect to the sequence xt is
// to be determined
//
// ****** o u t p u t ******
// left, mflag.....both integers, whose value is
//
// 1 -1 if x .lt. xt(1)
// i 0 if xt(i) .le. x .lt. xt(i+1)
// i 0 if xt(i) .lt. x .eq. xt(i+1) .eq. xt(lxt)
// i 1 if xt(i) .lt. xt(i+1) .eq. xt(lxt) .lt. x
//
// In particular, mflag = 0 is the 'usual' case. mflag .ne. 0
//
// indicates that x lies outside the CLOSED interval
// xt(1) .le. y .le. xt(lxt) . The asymmetric treatment of the
// intervals is due to the decision to make all pp functions
// continuous from the right, but, by returning mflag = 0 even if
// x = xt(lxt), there is the option of having the computed pp
// function continuous from the left at xt(lxt).
//
// ****** m e t h o d ******
// The program is designed to be efficient in the common situation that
// it is called repeatedly, with x taken from an increasing or decrea-
// sing sequence. This will happen, e.g., when a pp function is to be
// graphed. The first guess for left is therefore taken to be the val-
// ue returned at the previous call and stored in the l o c a l varia-
// ble ilo . A first check ascertains that ilo .lt. lxt (this is nec-
// essary since the present call may have nothing to do with the previ-
// ous call). Then, if xt(ilo) .le. x .lt. xt(ilo+1), we set
// left = ilo and are done after just three comparisons
// Otherwise, we repeatedly double the difference istep = ihi - ilo
// while also moving ilo and ihi in the direction of x , until
// xt(ilo) .le. x .lt. xt(ihi),
// after which we use bisection to get, in addition,
// ilo+1 = ihi. left = ilo is then returned
ihi = ilo + 1;
if (ihi < *lxt) {
goto L20;
}
if (*x >= xt[*lxt]) {
goto L110;
}
if (*lxt <= 1) {
goto L90;
}
ilo = *lxt - 1;
ihi = *lxt;
L20:
if (*x >= xt[ihi]) {
goto L40;
}
if (*x >= xt[ilo]) {
goto L100;
}
// **** now x .lt. xt(ilo) . decrease ilo to capture x .
istep = 1;
L31:
ihi = ilo;
ilo = ihi - istep;
if (ilo <= 1) {
goto L35;
}
if (*x >= xt[ilo]) {
goto L50;
}
istep <<= 1;
goto L31;
L35:
ilo = 1;
if (*x < xt[1]) {
goto L90;
}
goto L50;
// **** now x .ge. xt(ihi) . increase ihi to capture x .
L40:
istep = 1;
L41:
ilo = ihi;
ihi = ilo + istep;
if (ihi >= *lxt) {
goto L45;
}
if (*x < xt[ihi]) {
goto L50;
}
istep <<= 1;
goto L41;
L45:
if (*x >= xt[*lxt]) {
goto L110;
}
ihi = *lxt;
// Now xt(ilo) .le. x .lt. xt(ihi) . narrow the interval.
L50:
middle = (ilo + ihi) / 2;
if (middle == ilo) {
goto L100;
}
// Note. it is assumed that middle = ilo in case ihi = ilo+1
if (*x < xt[middle]) {
goto L53;
}
ilo = middle;
goto L50;
L53:
ihi = middle;
goto L50;
// Set output and return.
L90:
*mflag = -1;
*left = 1;
return 0;
L100:
*mflag = 0;
*left = ilo;
return 0;
L110:
*mflag = 1;
if (*x == xt[*lxt]) {
*mflag = 0;
}
*left = *lxt;
L111:
if (*left == 1) {
return 0;
}
--(*left);
if (xt[*left] < xt[*lxt]) {
return 0;
}
goto L111;
} | false | false | false | false | false | 0 |
parsebranch(struct vars * v,
int stopper, /* EOS or ')' */
int type, /* LACON (lookahead subRE) or PLAIN */
struct state * left, /* leftmost state */
struct state * right, /* rightmost state */
int partial) /* is this only part of a branch? */
{
struct state *lp; /* left end of current construct */
int seencontent; /* is there anything in this branch yet? */
struct subre *t;
lp = left;
seencontent = 0;
t = subre(v, '=', 0, left, right); /* op '=' is tentative */
NOERRN();
while (!SEE('|') && !SEE(stopper) && !SEE(EOS))
{
if (seencontent)
{ /* implicit concat operator */
lp = newstate(v->nfa);
NOERRN();
moveins(v->nfa, right, lp);
}
seencontent = 1;
/* NB, recursion in parseqatom() may swallow rest of branch */
parseqatom(v, stopper, type, lp, right, t);
NOERRN();
}
if (!seencontent)
{ /* empty branch */
if (!partial)
NOTE(REG_UUNSPEC);
assert(lp == left);
EMPTYARC(left, right);
}
return t;
} | false | false | false | false | false | 0 |
amitk_volumes_get_max_size(GList * objects) {
amide_real_t temp, max_size;
if (objects == NULL) return -1.0; /* invalid */
/* first process the rest of the list */
max_size = amitk_volumes_get_max_size(objects->next);
/* now process and compare to the children */
temp = amitk_volumes_get_max_size(AMITK_OBJECT_CHILDREN(objects->data));
if (temp > max_size) max_size = temp;
/* and process this guy */
if (AMITK_IS_VOLUME(objects->data))
if (AMITK_VOLUME_VALID(objects->data)) {
temp = point_max_dim(AMITK_VOLUME_CORNER(objects->data));
if (temp > max_size) max_size = temp;
}
return max_size;
} | false | false | false | false | false | 0 |
_relative_date_fmt(const struct tm *when)
{
static int todays_date;
int distance = 1000 * (when->tm_year + 1900) + when->tm_yday;
if (!todays_date) {
time_t now = time(NULL);
struct tm tm;
localtime_r(&now, &tm);
todays_date = 1000 * (tm.tm_year + 1900) + tm.tm_yday;
}
distance -= todays_date;
if (distance == -1) /* yesterday */
return "Ystday %H:%M";
if (distance == 0) /* same day */
return "%H:%M:%S";
if (distance == 1) /* tomorrow */
return "Tomorr %H:%M";
if (distance < -365 || distance > 365) /* far distance */
return "%-d %b %Y";
if (distance < -1 || distance > 6) /* medium distance */
return "%-d %b %H:%M";
return "%a %H:%M"; /* near distance */
} | false | false | false | false | false | 0 |
book_backend_get_contact_list_uids_sync (EBookBackend *backend,
const gchar *query,
GQueue *out_uids,
GCancellable *cancellable,
GError **error)
{
EBookBackendClass *class;
GQueue queue = G_QUEUE_INIT;
gboolean success;
class = E_BOOK_BACKEND_GET_CLASS (backend);
g_return_val_if_fail (class->get_contact_list_sync != NULL, FALSE);
success = class->get_contact_list_sync (
backend, query, &queue, cancellable, error);
if (success) {
while (!g_queue_is_empty (&queue)) {
EContact *contact;
gchar *uid;
contact = g_queue_pop_head (&queue);
uid = e_contact_get (contact, E_CONTACT_UID);
g_queue_push_tail (out_uids, uid);
g_object_unref (contact);
}
}
g_warn_if_fail (g_queue_is_empty (&queue));
return success;
} | false | false | false | false | false | 0 |
engine_take_move (char *line)
{
byte *move = move_read (line);
movstack_trunc ();
movstack_push (cur_pos.board, move);
if (game_stateful)
{
void *newstate = game_newstate (&cur_pos, move);
statestack_push (newstate);
cur_pos.state = statestack_peek ();
}
move_apply (cur_pos.board, move);
cur_pos.num_moves++;
cur_pos.player = cur_pos.player == WHITE ? BLACK : WHITE;
} | false | false | false | false | false | 0 |
ro_step(struct ro_spine *s, dm_block_t new_child)
{
int r;
if (s->count == 2) {
unlock_block(s->info, s->nodes[0]);
s->nodes[0] = s->nodes[1];
s->count--;
}
r = bn_read_lock(s->info, new_child, s->nodes + s->count);
if (!r)
s->count++;
return r;
} | false | false | false | false | false | 0 |
DeregisterForEvents(int handle, unsigned long reserved)
{
struct spnode *node = get_spnode_from_handle(handle);
if (node == NULL || !node->opened)
return RC_INVALID_HANDLE;
io_cancel(node->event_fd);
return RC_SUCCESS;
} | false | false | false | false | false | 0 |
IDirectFBImageProvider_DFIFF_GetSurfaceDescription( IDirectFBImageProvider *thiz,
DFBSurfaceDescription *dsc )
{
const DFIFFHeader *header;
DIRECT_INTERFACE_GET_DATA (IDirectFBImageProvider_DFIFF)
header = data->ptr;
dsc->flags = DSDESC_WIDTH | DSDESC_HEIGHT | DSDESC_PIXELFORMAT;
dsc->width = header->width;
dsc->height = header->height;
dsc->pixelformat = header->format;
return DFB_OK;
} | false | false | false | false | false | 0 |
namespace_save (CamelStoreSummary *s,
FILE *out,
CamelIMAPXNamespaceList *nsl)
{
CamelIMAPXStoreNamespace *ns, *cur = NULL;
guint32 i, n;
for (i = 0; i < 3; i++) {
switch (i) {
case 0:
cur = nsl->personal;
break;
case 1:
cur = nsl->shared;
break;
case 2:
cur = nsl->other;
break;
}
for (ns = cur, n = 0; ns; n++)
ns = ns->next;
if (camel_file_util_encode_fixed_int32 (out, n) == -1)
return -1;
ns = cur;
while (ns != NULL) {
if (camel_file_util_encode_string (out, ns->prefix) == -1)
return -1;
/* XXX This redundancy is for backward-compatibility. */
if (camel_file_util_encode_string (out, ns->prefix) == -1)
return -1;
if (camel_file_util_encode_uint32 (out, ns->sep) == -1)
return -1;
ns = ns->next;
}
}
return 0;
} | false | false | false | false | false | 0 |
Update()
{
// I don't like to override Update, or call Modified() in Update,
// but this allows updates to be forced where MTimes can't be used
bool resampleToScreenPixels = (this->ResampleToScreenPixels != 0);
vtkRenderer *ren = 0;
if (this->AutoAdjustImageQuality && resampleToScreenPixels)
{
// only use image-size texture if image is smaller than render window,
// since otherwise there is far less advantage in doing so
vtkImageSlice *prop = this->GetCurrentProp();
ren = this->GetCurrentRenderer();
if (ren && prop)
{
int *rsize = ren->GetSize();
int maxrsize = (rsize[0] > rsize[1] ? rsize[0] : rsize[1]);
int *isize = this->GetInput()->GetDimensions();
int maxisize = (isize[0] > isize[1] ? isize[0] : isize[1]);
maxisize = (isize[2] > maxisize ? isize[2] : maxisize);
if (maxisize <= maxrsize && maxisize <= 1024)
{
resampleToScreenPixels = (prop->GetAllocatedRenderTime() >= 1.0);
}
}
}
if (resampleToScreenPixels)
{
// force update if quality has increased to "ResampleToScreenPixels"
if (!this->InternalResampleToScreenPixels)
{
this->Modified();
}
else
{
// force update if renderer size has changes, since the texture
// size is equal to the renderer size for "ResampleToScreenPixels"
if (!ren)
{
ren = this->GetCurrentRenderer();
}
if (ren)
{
int *extent = this->ImageReslice->GetOutputExtent();
int *size = ren->GetSize();
if (size[0] != (extent[1] - extent[0] + 1) ||
size[1] != (extent[3] - extent[2] + 1))
{
this->Modified();
}
}
}
}
else if (this->InternalResampleToScreenPixels)
{
// if execution reaches this point in the code, then the
// rendering has just switched to interactive quality, and it is
// necessary to force update if modified since the last update
if (this->GetMTime() > this->UpdateTime.GetMTime())
{
this->Modified();
}
else
{
// don't switch yet: wait until the camera changes position,
// which will cause the MTime to change
resampleToScreenPixels = true;
}
}
this->InternalResampleToScreenPixels = resampleToScreenPixels;
// Always update if something else caused the input to update
vtkImageData *input = this->GetInput();
if (input && input->GetUpdateTime() > this->UpdateTime.GetMTime())
{
this->Modified();
}
this->Superclass::Update();
this->UpdateTime.Modified();
} | false | false | false | false | false | 0 |
eval_ip_dst_addr2cnet(struct ftxlate_action *ftxa,
char *rec, struct fts3rec_offsets *fo)
{
struct fts3rec_all2 cur;
FT_RECGET_DSTADDR(cur,rec,*fo);
if ((cur.dstaddr & 0x80000000) == 0)
*((u_int32*)(rec+(*fo).dstaddr)) &= 0xFF000000;
else if ((cur.dstaddr & 0xC0000000) == 0x80000000)
*((u_int32*)(rec+(*fo).dstaddr)) &= 0xFFFF0000;
else if ((cur.dstaddr & 0xC0000000) == 0xC0000000)
*((u_int32*)(rec+(*fo).dstaddr)) &= 0xFFFFFF00;
} | false | false | false | false | false | 0 |
Add(const Barry::DatabaseDatabase &dbdb)
{
DatabaseDatabase::DatabaseArrayType::const_iterator
b = dbdb.Databases.begin(),
e = dbdb.Databases.end();
for( ; b != e; ++b ) {
// hmmm, could optimize this and only add ids
// with RecordCount > 0, but let's stick with this
// for now... it might flush bugs out of the system
DBLabel id(b->Number, b->Name);
m_dbIds.push_back(id);
}
} | false | false | false | false | false | 0 |
hv_kvp_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
u32 recvlen;
u64 requestid;
struct hv_kvp_msg *kvp_msg;
struct icmsg_hdr *icmsghdrp;
struct icmsg_negotiate *negop = NULL;
int util_fw_version;
int kvp_srv_version;
static enum {NEGO_NOT_STARTED,
NEGO_IN_PROGRESS,
NEGO_FINISHED} host_negotiatied = NEGO_NOT_STARTED;
if (host_negotiatied == NEGO_NOT_STARTED &&
kvp_transaction.state < HVUTIL_READY) {
/*
* If userspace daemon is not connected and host is asking
* us to negotiate we need to delay to not lose messages.
* This is important for Failover IP setting.
*/
host_negotiatied = NEGO_IN_PROGRESS;
schedule_delayed_work(&kvp_host_handshake_work,
HV_UTIL_NEGO_TIMEOUT * HZ);
return;
}
if (kvp_transaction.state > HVUTIL_READY)
return;
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
&requestid);
if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&recv_buffer[
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
/*
* Based on the host, select appropriate
* framework and service versions we will
* negotiate.
*/
switch (vmbus_proto_version) {
case (VERSION_WS2008):
util_fw_version = UTIL_WS2K8_FW_VERSION;
kvp_srv_version = WS2008_SRV_VERSION;
break;
case (VERSION_WIN7):
util_fw_version = UTIL_FW_VERSION;
kvp_srv_version = WIN7_SRV_VERSION;
break;
default:
util_fw_version = UTIL_FW_VERSION;
kvp_srv_version = WIN8_SRV_VERSION;
}
vmbus_prep_negotiate_resp(icmsghdrp, negop,
recv_buffer, util_fw_version,
kvp_srv_version);
} else {
kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
/*
* Stash away this global state for completing the
* transaction; note transactions are serialized.
*/
kvp_transaction.recv_len = recvlen;
kvp_transaction.recv_req_id = requestid;
kvp_transaction.kvp_msg = kvp_msg;
if (kvp_transaction.state < HVUTIL_READY) {
/* Userspace is not registered yet */
kvp_respond_to_host(NULL, HV_E_FAIL);
return;
}
kvp_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
/*
* Get the information from the
* user-mode component.
* component. This transaction will be
* completed when we get the value from
* the user-mode component.
* Set a timeout to deal with
* user-mode not responding.
*/
schedule_work(&kvp_sendkey_work);
schedule_delayed_work(&kvp_timeout_work,
HV_UTIL_TIMEOUT * HZ);
return;
}
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, recv_buffer,
recvlen, requestid,
VM_PKT_DATA_INBAND, 0);
host_negotiatied = NEGO_FINISHED;
}
} | false | false | false | false | false | 0 |
ifEqualDomainMG(DomainMG*g, DomainMG* right)
{
if(g->xSize != right->xSize) return FALSE;
if(g->ySize != right->ySize) return FALSE;
if(g->zSize != right->zSize) return FALSE;
if(g->x0 != right->x0) return FALSE;
if(g->y0 != right->y0) return FALSE;
if(g->z0 != right->z0) return FALSE;
if(g->xLength != right->xLength) return FALSE;
if(g->yLength != right->yLength) return FALSE;
if(g->zLength != right->zLength) return FALSE;
if(g->xh != right->xh) return FALSE;
if(g->yh != right->yh) return FALSE;
if(g->zh != right->zh) return FALSE;
if(g->size != right->size) return FALSE;
if(g->laplacianOrder != right->laplacianOrder) return FALSE;
return TRUE;
} | false | false | false | false | false | 0 |
buildScratchLoadStore(MachineBasicBlock::iterator MI,
unsigned LoadStoreOp,
unsigned Value,
unsigned ScratchRsrcReg,
unsigned ScratchOffset,
int64_t Offset,
RegScavenger *RS) const {
MachineBasicBlock *MBB = MI->getParent();
const MachineFunction *MF = MI->getParent()->getParent();
const SIInstrInfo *TII =
static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
LLVMContext &Ctx = MF->getFunction()->getContext();
DebugLoc DL = MI->getDebugLoc();
bool IsLoad = TII->get(LoadStoreOp).mayLoad();
bool RanOutOfSGPRs = false;
unsigned SOffset = ScratchOffset;
unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
unsigned Size = NumSubRegs * 4;
if (!isUInt<12>(Offset + Size)) {
SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
if (SOffset == AMDGPU::NoRegister) {
RanOutOfSGPRs = true;
SOffset = AMDGPU::SGPR0;
}
BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
.addReg(ScratchOffset)
.addImm(Offset);
Offset = 0;
}
if (RanOutOfSGPRs)
Ctx.emitError("Ran out of SGPRs for spilling VGPRS");
for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
unsigned SubReg = NumSubRegs > 1 ?
getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
Value;
bool IsKill = (i == e - 1);
BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
.addReg(SubReg, getDefRegState(IsLoad))
.addReg(ScratchRsrcReg, getKillRegState(IsKill))
.addReg(SOffset)
.addImm(Offset)
.addImm(0) // glc
.addImm(0) // slc
.addImm(0) // tfe
.addReg(Value, RegState::Implicit | getDefRegState(IsLoad))
.setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
}
} | false | false | false | false | false | 0 |
calculateRhoMatrix(int id,const RhoDMatrix & rhoin,
const vector<RhoDMatrix> & rhoout) const {
// vectors for the helicities
vector<int> ihel1(_outspin.size()+1),ihel2(_outspin.size()+1);
// rhomatrix to be returned
RhoDMatrix output(_outspin[id], false);
// loop over all helicity components of the matrix element
// outer loop
Complex temp;
unsigned int ix,iy,iz;
int ixa,iya;
for(ix=0;ix<_matrixelement.size();++ix) {
// map the vector index to the helicities
for(ixa=_outspin.size();ixa>=0;--ixa)
ihel1[ixa]=(ix%_constants[ixa])/_constants[ixa+1];
// inner loop
for(iy=0;iy<_matrixelement.size();++iy) {
// map the vector index to the helicities
for(iya=_outspin.size();iya>=0;--iya)
ihel2[iya]=(iy%_constants[iya])/_constants[iya+1];
// matrix element piece
temp=_matrixelement[ix]*conj(_matrixelement[iy]);
// spin denisty matrix for the incoming particle
temp *= rhoin(ihel1[0],ihel2[0]);
// spin density matrix for the outgoing particles
for(iz=0;iz<_outspin.size()-1;++iz) {
if(int(iz)<id) temp*=rhoout[iz](ihel1[iz+1],ihel2[iz+1]);
else temp*=rhoout[iz](ihel1[iz+2],ihel2[iz+2]);
}
// add to the rho matrix
output(ihel1[id+1],ihel2[id+1])+=temp;
}
}
// return the answer
output.normalize();
return output;
} | false | false | false | false | false | 0 |
brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
void *nvram, u32 nvram_len)
{
struct brcmf_bus *bus = dev_get_drvdata(dev);
struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie;
struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
struct brcmf_commonring **flowrings;
int ret;
u32 i;
brcmf_pcie_attach(devinfo);
ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
if (ret)
goto fail;
devinfo->state = BRCMFMAC_PCIE_STATE_UP;
ret = brcmf_pcie_init_ringbuffers(devinfo);
if (ret)
goto fail;
ret = brcmf_pcie_init_scratchbuffers(devinfo);
if (ret)
goto fail;
brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
ret = brcmf_pcie_request_irq(devinfo);
if (ret)
goto fail;
/* hook the commonrings in the bus structure. */
for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
bus->msgbuf->commonrings[i] =
&devinfo->shared.commonrings[i]->commonring;
flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*flowrings),
GFP_KERNEL);
if (!flowrings)
goto fail;
for (i = 0; i < devinfo->shared.nrof_flowrings; i++)
flowrings[i] = &devinfo->shared.flowrings[i].commonring;
bus->msgbuf->flowrings = flowrings;
bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
bus->msgbuf->nrof_flowrings = devinfo->shared.nrof_flowrings;
init_waitqueue_head(&devinfo->mbdata_resp_wait);
brcmf_pcie_intr_enable(devinfo);
if (brcmf_pcie_attach_bus(bus->dev) == 0)
return;
brcmf_pcie_bus_console_read(devinfo);
fail:
device_release_driver(dev);
} | false | false | false | false | false | 0 |
RegisterTupleQueueOnFunnel(TupleQueueFunnel *funnel, shm_mq_handle *handle)
{
if (funnel->nqueues < funnel->maxqueues)
{
funnel->queue[funnel->nqueues++] = handle;
return;
}
if (funnel->nqueues >= funnel->maxqueues)
{
int newsize = funnel->nqueues * 2;
Assert(funnel->nqueues == funnel->maxqueues);
funnel->queue = repalloc(funnel->queue,
newsize * sizeof(shm_mq_handle *));
funnel->maxqueues = newsize;
}
funnel->queue[funnel->nqueues++] = handle;
} | false | false | false | false | false | 0 |
iface_get_param_count(struct iface_rec *iface, int iface_all)
{
int num_found = 0, rc;
struct iface_param_count iface_params;
log_debug(8, "In iface_get_param_count\n");
iface_params.primary = iface;
iface_params.count = 0;
if (iface_all)
rc = iface_for_each_iface(&iface_params, 0, &num_found,
__iface_get_param_count);
else
rc = __iface_get_param_count(&iface_params, iface);
log_debug(8, "iface_get_param_count: rc = %d, count = %d\n",
rc, iface_params.count);
return iface_params.count;
} | false | false | false | false | false | 0 |
acpi_fix_checksum ( struct acpi_description_header *acpi ) {
unsigned int i = 0;
uint8_t sum = 0;
for ( i = 0 ; i < acpi->length ; i++ ) {
sum += *( ( ( uint8_t * ) acpi ) + i );
}
acpi->checksum -= sum;
} | false | false | false | false | false | 0 |
write(hsStream* S, plResManager* mgr) {
pfGUIControlMod::write(S, mgr);
S->writeInt(fControls.getSize());
for (size_t i=0; i<fControls.getSize(); i++)
mgr->writeKey(S, fControls[i]);
S->writeShort(fDefaultValue);
} | false | false | false | false | false | 0 |
update_softlimit(gfarm_time_t *exceedp, gfarm_time_t now, gfarm_time_t grace,
gfarm_int64_t val, gfarm_int64_t soft)
{
if (!quota_limit_is_valid(grace) /* disable all softlimit */ ||
!quota_limit_is_valid(soft) /* disable this softlimit */ ||
val <= soft /* not exceed */
) {
*exceedp = GFARM_QUOTA_INVALID;
return;
} else if (*exceedp >= 0)
return; /* already exceeded */
else if (val > soft)
*exceedp = now; /* exceed now */
} | false | false | false | false | false | 0 |
calc_scan_param (pixma_sane_t * ss, pixma_scan_param_t * sp)
{
int x1, y1, x2, y2;
int error;
memset (sp, 0, sizeof (*sp));
sp->channels = (OVAL (opt_mode).w == 0) ? 3 : 1;
sp->depth = (OVAL (opt_mode).w == 2) ? 1 : 8;
sp->xdpi = sp->ydpi = OVAL (opt_resolution).w;
#define PIXEL(x,dpi) (int)((SANE_UNFIX(x) / 25.4 * (dpi)) + 0.5)
x1 = PIXEL (OVAL (opt_tl_x).w, sp->xdpi);
x2 = PIXEL (OVAL (opt_br_x).w, sp->xdpi);
if (x2 < x1)
{
int temp = x1;
x1 = x2;
x2 = temp;
}
y1 = PIXEL (OVAL (opt_tl_y).w, sp->ydpi);
y2 = PIXEL (OVAL (opt_br_y).w, sp->ydpi);
if (y2 < y1)
{
int temp = y1;
y1 = y2;
y2 = temp;
}
#undef PIXEL
sp->x = x1;
sp->y = y1;
sp->w = x2 - x1;
sp->h = y2 - y1;
if (sp->w == 0)
sp->w = 1;
if (sp->h == 0)
sp->h = 1;
sp->tpu_offset_added = 0;
sp->gamma_table = (OVAL (opt_custom_gamma).b) ? ss->gamma_table : NULL;
sp->source = ss->source_map[OVAL (opt_source).w];
sp->mode = ss->mode_map[OVAL (opt_mode).w];
sp->adf_pageid = ss->page_count;
sp->threshold = 2.55 * OVAL (opt_threshold).w;
sp->threshold_curve = OVAL (opt_threshold_curve).w;
error = pixma_check_scan_param (ss->s, sp);
if (error < 0)
{
PDBG (pixma_dbg (1, "BUG:calc_scan_param() failed %d\n", error));
PDBG (print_scan_param (1, sp));
}
return error;
} | false | false | false | false | false | 0 |
xps_read_dir_part(xps_document *doc, char *name)
{
char buf[2048];
xps_part *part;
FILE *file;
int count, size, offset, i, n;
int seen_last = 0;
fz_strlcpy(buf, doc->directory, sizeof buf);
fz_strlcat(buf, name, sizeof buf);
/* All in one piece */
file = fopen(buf, "rb");
if (file)
{
fseek(file, 0, SEEK_END);
size = ftell(file);
fseek(file, 0, SEEK_SET);
part = xps_new_part(doc, name, size);
fread(part->data, 1, size, file);
fclose(file);
return part;
}
/* Count the number of pieces and their total size */
count = 0;
size = 0;
while (!seen_last)
{
sprintf(buf, "%s%s/[%d].piece", doc->directory, name, count);
file = fopen(buf, "rb");
if (!file)
{
sprintf(buf, "%s%s/[%d].last.piece", doc->directory, name, count);
file = fopen(buf, "rb");
seen_last = (file != NULL);
}
if (!file)
break;
count ++;
fseek(file, 0, SEEK_END);
size += ftell(file);
fclose(file);
}
if (!seen_last)
fz_throw(doc->ctx, "cannot find all pieces for part '%s'", name);
/* Inflate the pieces */
if (count)
{
part = xps_new_part(doc, name, size);
offset = 0;
for (i = 0; i < count; i++)
{
if (i < count - 1)
sprintf(buf, "%s%s/[%d].piece", doc->directory, name, i);
else
sprintf(buf, "%s%s/[%d].last.piece", doc->directory, name, i);
file = fopen(buf, "rb");
if (!file)
{
xps_free_part(doc, part);
fz_throw(doc->ctx, "cannot open file '%s'", buf);
}
n = fread(part->data + offset, 1, size - offset, file);
offset += n;
fclose(file);
}
return part;
}
fz_throw(doc->ctx, "cannot find part '%s'", name);
return NULL;
} | false | false | false | false | true | 1 |
gf_isom_get_media_timescale(GF_ISOFile *the_file, u32 trackNumber)
{
GF_TrackBox *trak;
trak = gf_isom_get_track_from_file(the_file, trackNumber);
if (!trak) return 0;
return trak->Media->mediaHeader->timeScale;
} | false | false | false | false | false | 0 |
operator[](int i) const
{
REPORT
if (i < 0 || i >= n) Throw(Logic_error("array index out of range"));
return a[i];
} | false | false | false | false | false | 0 |
charData(void *userData, const char *s, int len)
{
ParserState *ps = reinterpret_cast<ParserState *>(userData);
if (len)
{
std::string str(s,len);
ps->LastData[ps->Depth] += str;
}
} | false | false | false | false | false | 0 |
JS_XDRMemGetData(JSXDRState *xdr, uint32 *lp)
{
if (xdr->ops != &xdrmem_ops)
return NULL;
*lp = MEM_COUNT(xdr);
return MEM_BASE(xdr);
} | false | false | false | false | false | 0 |
forward_ldap_entry(struct pine *ps, LDAP_CHOOSE_S *winning_e)
{
STORE_S *srcstore = NULL;
SourceType srctype = CharStar;
dprint((9, "- forward_ldap_entry -\n"));
if((srcstore = prep_ldap_for_viewing(ps,winning_e,srctype,NULL)) != NULL){
forward_text(ps, so_text(srcstore), srctype);
ps->mangled_screen = 1;
so_give(&srcstore);
}
else
q_status_message(SM_ORDER, 0, 2, _("Error allocating space"));
} | false | false | false | false | false | 0 |
pdf_to_png(char *pdf)
{
char *png, *out;
if (pdf == NULL) return NULL;
if (strstr(pdf, ".pdf") != NULL)
png = strdup_new_extension(pdf, ".pdf", ".png");
else if (strstr(pdf, ".PDF") != NULL)
png = strdup_new_extension(pdf, ".PDF", ".png");
else
return NULL;
out = SysGraphicsConvert(CONVERT_PDF, 0, g_dots_per_inch, pdf, png);
safe_free(png);
return out;
} | false | false | false | false | false | 0 |
FDKsbrEnc_InitPSEncode(
HANDLE_PS_ENCODE hPsEncode,
const PS_BANDS psEncMode,
const FIXP_DBL iidQuantErrorThreshold
)
{
FDK_PSENC_ERROR error = PSENC_OK;
if (NULL==hPsEncode) {
error = PSENC_INVALID_HANDLE;
}
else {
if (PSENC_OK != (InitPSData(&hPsEncode->psData))) {
goto bail;
}
switch(psEncMode){
case PS_BANDS_COARSE:
case PS_BANDS_MID:
hPsEncode->nQmfIidGroups = QMF_GROUPS_LO_RES;
hPsEncode->nSubQmfIidGroups = SUBQMF_GROUPS_LO_RES;
FDKmemcpy(hPsEncode->iidGroupBorders, iidGroupBordersLoRes, (hPsEncode->nQmfIidGroups + hPsEncode->nSubQmfIidGroups + 1)*sizeof(INT));
FDKmemcpy(hPsEncode->subband2parameterIndex, subband2parameter20, (hPsEncode->nQmfIidGroups + hPsEncode->nSubQmfIidGroups) *sizeof(INT));
FDKmemcpy(hPsEncode->iidGroupWidthLd, iidGroupWidthLdLoRes, (hPsEncode->nQmfIidGroups + hPsEncode->nSubQmfIidGroups) *sizeof(UCHAR));
break;
default:
error = PSENC_INIT_ERROR;
goto bail;
}
hPsEncode->psEncMode = psEncMode;
hPsEncode->iidQuantErrorThreshold = iidQuantErrorThreshold;
FDKsbrEnc_initPsBandNrgScale(hPsEncode);
}
bail:
return error;
} | false | false | false | false | false | 0 |
compute_quant_weights(spx_lsp_t *qlsp, spx_word16_t *quant_weight, int order)
{
int i;
spx_word16_t tmp1, tmp2;
for (i=0;i<order;i++)
{
if (i==0)
tmp1 = qlsp[i];
else
tmp1 = qlsp[i]-qlsp[i-1];
if (i==order-1)
tmp2 = LSP_PI-qlsp[i];
else
tmp2 = qlsp[i+1]-qlsp[i];
if (tmp2<tmp1)
tmp1 = tmp2;
#ifdef FIXED_POINT
quant_weight[i] = DIV32_16(81920,ADD16(300,tmp1));
#else
quant_weight[i] = 10/(.04+tmp1);
#endif
}
} | false | false | false | false | false | 0 |
bonding_init(void)
{
int i;
int res;
pr_info("%s", bond_version);
res = bond_check_params(&bonding_defaults);
if (res)
goto out;
res = register_pernet_subsys(&bond_net_ops);
if (res)
goto out;
res = bond_netlink_init();
if (res)
goto err_link;
bond_create_debugfs();
for (i = 0; i < max_bonds; i++) {
res = bond_create(&init_net, NULL);
if (res)
goto err;
}
register_netdevice_notifier(&bond_netdev_notifier);
out:
return res;
err:
bond_destroy_debugfs();
bond_netlink_fini();
err_link:
unregister_pernet_subsys(&bond_net_ops);
goto out;
} | false | false | false | false | false | 0 |
cleanreturn(int retval, PyObject *freelist)
{
if (freelist && retval != 0) {
/* We were successful, reset the destructors so that they
don't get called. */
Py_ssize_t len = PyList_GET_SIZE(freelist), i;
for (i = 0; i < len; i++)
PyCapsule_SetDestructor(PyList_GET_ITEM(freelist, i), NULL);
}
Py_XDECREF(freelist);
return retval;
} | false | false | false | false | false | 0 |
gen_split_2385 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands)
{
rtx operand0;
rtx operand1;
rtx operand2;
rtx _val = 0;
start_sequence ();
#line 1020 "../../src/gcc/config/arm/neon.md"
{
if (!REG_P (operands[2]))
operands[2] = operands[0];
}
operand0 = operands[0];
(void) operand0;
operand1 = operands[1];
(void) operand1;
operand2 = operands[2];
(void) operand2;
emit_insn (gen_rtx_SET (VOIDmode,
operand2,
const0_rtx));
emit (gen_rtx_PARALLEL (VOIDmode,
gen_rtvec (2,
gen_rtx_SET (VOIDmode,
operand0,
gen_rtx_MINUS (DImode,
copy_rtx (operand2),
operand1)),
gen_hard_reg_clobber (CCmode, 24))));
_val = get_insns ();
end_sequence ();
return _val;
} | false | false | false | false | false | 0 |
mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
struct sk_buff *skb,
struct mwifiex_tx_param *tx_param)
{
struct usb_card_rec *card = adapter->card;
struct urb_context *context = NULL;
struct usb_tx_data_port *port = NULL;
u8 *data = (u8 *)skb->data;
struct urb *tx_urb;
int idx, ret;
if (adapter->is_suspended) {
mwifiex_dbg(adapter, ERROR,
"%s: not allowed while suspended\n", __func__);
return -1;
}
if (adapter->surprise_removed) {
mwifiex_dbg(adapter, ERROR, "%s: device removed\n", __func__);
return -1;
}
mwifiex_dbg(adapter, INFO, "%s: ep=%d\n", __func__, ep);
if (ep == card->tx_cmd_ep) {
context = &card->tx_cmd;
} else {
for (idx = 0; idx < MWIFIEX_TX_DATA_PORT; idx++) {
if (ep == card->port[idx].tx_data_ep) {
port = &card->port[idx];
if (atomic_read(&port->tx_data_urb_pending)
>= MWIFIEX_TX_DATA_URB) {
port->block_status = true;
ret = -EBUSY;
goto done;
}
if (port->tx_data_ix >= MWIFIEX_TX_DATA_URB)
port->tx_data_ix = 0;
context =
&port->tx_data_list[port->tx_data_ix++];
break;
}
}
if (!port) {
mwifiex_dbg(adapter, ERROR, "Wrong usb tx data port\n");
return -1;
}
}
context->adapter = adapter;
context->ep = ep;
context->skb = skb;
tx_urb = context->urb;
usb_fill_bulk_urb(tx_urb, card->udev, usb_sndbulkpipe(card->udev, ep),
data, skb->len, mwifiex_usb_tx_complete,
(void *)context);
tx_urb->transfer_flags |= URB_ZERO_PACKET;
if (ep == card->tx_cmd_ep)
atomic_inc(&card->tx_cmd_urb_pending);
else
atomic_inc(&port->tx_data_urb_pending);
if (usb_submit_urb(tx_urb, GFP_ATOMIC)) {
mwifiex_dbg(adapter, ERROR,
"%s: usb_submit_urb failed\n", __func__);
if (ep == card->tx_cmd_ep) {
atomic_dec(&card->tx_cmd_urb_pending);
} else {
atomic_dec(&port->tx_data_urb_pending);
port->block_status = false;
if (port->tx_data_ix)
port->tx_data_ix--;
else
port->tx_data_ix = MWIFIEX_TX_DATA_URB;
}
return -1;
} else {
if (ep != card->tx_cmd_ep &&
atomic_read(&port->tx_data_urb_pending) ==
MWIFIEX_TX_DATA_URB) {
port->block_status = true;
ret = -ENOSR;
goto done;
}
}
return -EINPROGRESS;
done:
if (ep != card->tx_cmd_ep)
adapter->data_sent = mwifiex_usb_data_sent(adapter);
return ret;
} | false | false | false | true | false | 1 |
UnpicklePrologue(void)
{
m_startTable.resize(kPickleNumStates);
m_endTable.resize(kPickleNumStates);
m_startTable[kPickleData]["judgement"] = &GameRewards::NullHandler;
m_endTable[kPickleData]["judgement"] = &GameRewards::HandleJudgementEnd;
m_startTable[kPickleData]["timed"] = &GameRewards::NullHandler;
m_endTable[kPickleData]["timed"] = &GameRewards::HandleTimedEnd;
m_endTable[kPickleData]["rewards"] = &GameRewards::HandleRewardsEnd;
m_pickleState=kPickleData;
} | false | false | false | false | false | 0 |
http_get( HTTP_OBJ* this )
{
FILE* fp;
char buf[HTML_CHUNK_SIZE];
int bytes_read, bytes_written;
int error = 0;
int handler_id;
struct stat file_stat;
int chk_cnt = 0;
printf("received GET command: %s\n", this->rcvbuf );
/* check whether CGI handler exists */
if( ( handler_id = _find_cgi_handler( this ) ) >= 0 )
{
/* and if so invoke it */
error = _call_cgi_handler( this, handler_id );
}
else
{
/* otherwise deliver static content (html, javascript, jpeg, etc) */
/* check for correct file status ( must be ordinary file, no directory ) */
error = stat( this->frl, & file_stat );
if( !error )
{
if( !( file_stat.st_mode & S_IFREG ) )
{
HTTP_SendHeader( this, HTTP_ACK_NOT_FOUND );
return HTTP_FILE_NOT_FOUND;
}
}
/* set content length in http header */
_http_set_content_length_to_file_len( this );
/* open and copy static content from file system */
fp = fopen( this->frl, "r" );
if( fp == NULL )
{
HTTP_SendHeader( this, HTTP_ACK_NOT_FOUND );
return HTTP_FILE_NOT_FOUND;
}
else
{
/* generate header */
error = HTTP_SendHeader( this, HTTP_ACK_OK );
if( error < 0 )
{
return error;
}
}
/* write header/content separation line */
if( HTTP_SOCKET_SEND( this->socket, "\r\n\r\n", 4) != 4 )
{
error = HTTP_SEND_ERROR;
}
/* read file blockwise and send it to the server */
do {
bytes_read = fread( buf, sizeof(char), HTML_CHUNK_SIZE, fp );
bytes_written = HTTP_SOCKET_SEND( this->socket, buf, bytes_read );
chk_cnt += bytes_read;
} while( bytes_read > 0 && bytes_written == bytes_read );
fclose( fp );
}
return error;
} | false | false | false | false | true | 1 |
convert_cmyk_to_rgb (struct jpeg_decompress_struct *cinfo,
guchar **lines)
{
gint i, j;
g_return_if_fail (cinfo != NULL);
g_return_if_fail (cinfo->output_components == 4);
g_return_if_fail (cinfo->out_color_space == JCS_CMYK);
for (i = cinfo->rec_outbuf_height - 1; i >= 0; i--) {
guchar *p;
p = lines[i];
for (j = 0; j < cinfo->output_width; j++) {
int c, m, y, k;
c = p[0];
m = p[1];
y = p[2];
k = p[3];
if (cinfo->saw_Adobe_marker) {
p[0] = k*c / 255;
p[1] = k*m / 255;
p[2] = k*y / 255;
}
else {
p[0] = (255 - k)*(255 - c) / 255;
p[1] = (255 - k)*(255 - m) / 255;
p[2] = (255 - k)*(255 - y) / 255;
}
p[3] = 255;
p += 4;
}
}
} | false | false | false | false | false | 0 |
listRec(fcb* file){
fcb* temp;
/*If it is a directory*/
if(file->bits & FCB_DIR_BITMASK){
printf("Directory %s, %d files:\n", file->filename, file->dirHead->size);
temp = file->dirHead->head;
while(temp != NULL){
listRec(temp);
temp = temp->next;
}
printf("End of directory %s\n", file->filename);
}
else{
printf("Filename: %s Size: %d\n", file->filename, file->block_queue->size);
}
} | false | false | false | false | false | 0 |
delEvent( const QList<GenericPlayer *> & list, GenericEvent * event )
{
AttalPlayerSocket * socket;
uint nbSockets = (uint) list.count();
for( uint k = 0; k < nbSockets; ++k ) {
socket = findSocket(list.at(k));
if( socket->canSee( event->getCell() ) ) {
socket->sendDelEvent( event );
}
}
} | false | false | false | false | false | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.