functionSource stringlengths 20 97.4k | CWE-119 bool 2
classes | CWE-120 bool 2
classes | CWE-469 bool 2
classes | CWE-476 bool 2
classes | CWE-other bool 2
classes | combine int64 0 1 |
|---|---|---|---|---|---|---|
sum_square_rows() const
{
REPORT
GeneralMatrix* gm = ((BaseMatrix&)*this).Evaluate();
int nr = gm->nrows();
ColumnVector ssq(nr);
if (gm->size() == 0) { REPORT ssq = 0.0; }
else
{
MatrixRow mr(gm, LoadOnEntry);
for (int i = 1; i <= nr; ++i)
{
Real sum = 0.0;
int s = mr.Storage();
Real* in = mr.Data();
while (s--) sum += square(*in++);
ssq(i) = sum;
mr.Next();
}
}
gm->tDelete();
ssq.release(); return ssq.for_return();
} | false | false | false | false | false | 0 |
zsetdevice(i_ctx_t *i_ctx_p)
{
gx_device *dev = gs_currentdevice(igs);
os_ptr op = osp;
int code = 0;
check_write_type(*op, t_device);
if (dev->LockSafetyParams) { /* do additional checking if locked */
if(op->value.pdevice != dev) /* don't allow a different device */
return_error(e_invalidaccess);
}
#ifndef PSI_INCLUDED
/* the language switching build shouldn't install a new device
here. The language switching machinery installs a shared
device. */
if (op->value.pdevice == 0)
return gs_note_error(gs_error_undefined);
code = gs_setdevice_no_erase(igs, op->value.pdevice);
if (code < 0)
return code;
#endif
make_bool(op, code != 0); /* erase page if 1 */
invalidate_stack_devices(i_ctx_p);
clear_pagedevice(istate);
return code;
} | false | false | false | false | false | 0 |
isxqt_file (puuconf, argc, argv, pvar, pinfo)
pointer puuconf ATTRIBUTE_UNUSED;
int argc;
char **argv;
pointer pvar ATTRIBUTE_UNUSED;
pointer pinfo ATTRIBUTE_UNUSED;
{
if (argc != 2 && argc != 3)
return UUCONF_CMDTABRET_CONTINUE;
/* If this file is not in the spool directory, just ignore it. */
if (! fspool_file (argv[1]))
return UUCONF_CMDTABRET_CONTINUE;
++cSxqt_files;
pazSxqt_files = (char **) xrealloc ((pointer) pazSxqt_files,
cSxqt_files * sizeof (char *));
pazSxqt_files[cSxqt_files - 1] = zbufcpy (argv[1]);
return UUCONF_CMDTABRET_CONTINUE;
} | false | false | false | false | false | 0 |
JAM_ReadMsgHeader( s_JamBase* Base_PS,
ulong MsgNo_I,
s_JamMsgHeader* Header_PS,
s_JamSubPacket** SubfieldPack_PPS )
{
s_JamIndex Index_S;
if ( !Base_PS || !Header_PS )
return JAM_BAD_PARAM;
/* find index record */
if ( fseek( Base_PS->IdxFile_PS, MsgNo_I * sizeof( s_JamIndex ), SEEK_SET ) ) {
Base_PS->Errno_I = errno;
return JAM_IO_ERROR;
}
/* read index record */
if ( 1 > freadjamindex(Base_PS->IdxFile_PS,&Index_S) ) {
Base_PS->Errno_I = errno;
return JAM_IO_ERROR;
}
/* message is not there */
if(Index_S.HdrOffset == 0xffffffff && Index_S.UserCRC == 0xffffffff)
{
return JAM_NO_MESSAGE;
}
/* find header */
if ( fseek( Base_PS->HdrFile_PS, Index_S.HdrOffset, SEEK_SET ) ) {
Base_PS->Errno_I = errno;
return JAM_IO_ERROR;
}
/* read header */
if ( 1 > freadjammsgheader(Base_PS->HdrFile_PS,Header_PS) ) {
Base_PS->Errno_I = errno;
return JAM_IO_ERROR;
}
/* are Subfields requested? */
if ( SubfieldPack_PPS && Header_PS->SubfieldLen ) {
s_JamSubPacket* SubPacket_PS;
s_JamSubfield Subfield_S;
char* Buf_PC;
char* Ptr_PC;
char* Roof_PC;
int BufSize_I = Header_PS->SubfieldLen;
Buf_PC = (void*) malloc( BufSize_I );
if ( !Buf_PC )
return JAM_NO_MEMORY;
/* read all subfields */
if ( 1 > fread( Buf_PC, BufSize_I, 1, Base_PS->HdrFile_PS ) ) {
Base_PS->Errno_I = errno;
free (Buf_PC);
return JAM_IO_ERROR;
}
SubPacket_PS = JAM_NewSubPacket();
if ( !SubPacket_PS ) {
free (Buf_PC);
return JAM_NO_MEMORY;
}
Roof_PC = Buf_PC + BufSize_I;
/* cut out the subfields */
for ( Ptr_PC = Buf_PC;
Ptr_PC < Roof_PC;
Ptr_PC += Subfield_S.DatLen + SIZE_JAMSAVESUBFIELD ) {
int Status_I;
getjamsubfield(Ptr_PC,&Subfield_S);
if((char *)Subfield_S.Buffer + Subfield_S.DatLen > Roof_PC) {
JAM_DelSubPacket( SubPacket_PS );
free (Buf_PC);
return JAM_CORRUPT_MSG;
}
Status_I = JAM_PutSubfield( SubPacket_PS, &Subfield_S );
if ( Status_I ) {
JAM_DelSubPacket( SubPacket_PS );
free (Buf_PC);
return Status_I;
}
}
free( Buf_PC );
*SubfieldPack_PPS = SubPacket_PS;
}
else
if ( SubfieldPack_PPS )
/* fields requested but none found */
/* return an empty packet */
*SubfieldPack_PPS = JAM_NewSubPacket();
return 0;
} | false | false | false | false | false | 0 |
e100_free(struct nic *nic)
{
if (nic->mem) {
pci_free_consistent(nic->pdev, sizeof(struct mem),
nic->mem, nic->dma_addr);
nic->mem = NULL;
}
} | false | false | false | false | false | 0 |
write_router_file(RouterT *r, FILE *f, ErrorHandler *errh)
{
if (!r)
return;
String config_str = r->configuration_string();
// create archive if necessary
const Vector<ArchiveElement> &archive = r->archive();
if (archive.size()) {
Vector<ArchiveElement> narchive;
// add configuration
ArchiveElement config_ae;
config_ae.name = "config";
config_ae.date = time(0);
config_ae.uid = geteuid();
config_ae.gid = getegid();
config_ae.mode = 0644;
config_ae.data = config_str;
narchive.push_back(config_ae);
// add other archive elements
for (int i = 0; i < archive.size(); i++)
if (archive[i].live() && archive[i].name != "config")
narchive.push_back(archive[i]);
if (narchive.size() > 1)
config_str = create_ar_string(narchive, errh);
}
fwrite(config_str.data(), 1, config_str.length(), f);
} | false | false | false | false | false | 0 |
errorfb (lua_State *L) {
int level = 1; /* skip level 0 (it's this function) */
int firstpart = 1; /* still before eventual `...' */
lua_Debug ar;
luaL_Buffer b;
luaL_buffinit(L, &b);
luaL_addstring(&b, "error: ");
luaL_addstring(&b, luaL_check_string(L, 1));
luaL_addstring(&b, "\n");
while (lua_getstack(L, level++, &ar)) {
char buff[120]; /* enough to fit following `sprintf's */
if (level == 2)
luaL_addstring(&b, "stack traceback:\n");
else if (level > LEVELS1 && firstpart) {
/* no more than `LEVELS2' more levels? */
if (!lua_getstack(L, level+LEVELS2, &ar))
level--; /* keep going */
else {
luaL_addstring(&b, " ...\n"); /* too many levels */
while (lua_getstack(L, level+LEVELS2, &ar)) /* find last levels */
level++;
}
firstpart = 0;
continue;
}
sprintf(buff, "%4d: ", level-1);
luaL_addstring(&b, buff);
lua_getinfo(L, "Snl", &ar);
switch (*ar.namewhat) {
case 'g': case 'l': /* global, local */
sprintf(buff, "function `%.50s'", ar.name);
break;
case 'f': /* field */
sprintf(buff, "method `%.50s'", ar.name);
break;
case 't': /* tag method */
sprintf(buff, "`%.50s' tag method", ar.name);
break;
default: {
if (*ar.what == 'm') /* main? */
sprintf(buff, "main of %.70s", ar.short_src);
else if (*ar.what == 'C') /* C function? */
sprintf(buff, "%.70s", ar.short_src);
else
sprintf(buff, "function <%d:%.70s>", ar.linedefined, ar.short_src);
ar.source = NULL; /* do not print source again */
}
}
luaL_addstring(&b, buff);
if (ar.currentline > 0) {
sprintf(buff, " at line %d", ar.currentline);
luaL_addstring(&b, buff);
}
if (ar.source) {
sprintf(buff, " [%.70s]", ar.short_src);
luaL_addstring(&b, buff);
}
luaL_addstring(&b, "\n");
}
luaL_pushresult(&b);
lua_getglobal(L, LUA_ALERT);
if (lua_isfunction(L, -1)) { /* avoid loop if _ALERT is not defined */
lua_pushvalue(L, -2); /* error message */
lua_rawcall(L, 1, 0);
}
return 0;
} | true | true | false | false | false | 1 |
baselinePosition( bool f ) const
{
if (m_useListBox)
return RenderFormElement::baselinePosition(f);
int bTop = shouldDisableNativeBorders() ? borderTop() : 0;
int bBottom = shouldDisableNativeBorders() ? borderBottom() : 0;
int ret = (height()-RenderWidget::paddingTop()-RenderWidget::paddingBottom()-bTop-bBottom+1)/2;
ret += marginTop() + RenderWidget::paddingTop() + bTop;
ret += ((fontMetrics( f ).ascent())/2)-2;
return ret;
} | false | false | false | false | false | 0 |
dlist_get(dlist_head_t *head, dlist_t *dlist) {
dlist_t *tmp = dlist_getl(head, dlist);
void *data;
if (!tmp)
return NULL;
else {
data = tmp->data;
dlist_free(tmp);
return data;
}
} | false | false | false | false | false | 0 |
bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
{
struct bfa_rport_speed_req_s *m;
/*
* check for room in queue to send request now
*/
m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
if (!m) {
bfa_trc(rp->bfa, rp->rport_info.speed);
return BFA_FALSE;
}
bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
bfa_fn_lpu(rp->bfa));
m->fw_handle = rp->fw_handle;
m->speed = (u8)rp->rport_info.speed;
/*
* queue I/O message to firmware
*/
bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
return BFA_TRUE;
} | false | false | false | false | false | 0 |
Curl_resolver_is_resolved(struct connectdata *conn,
struct Curl_dns_entry **entry)
{
struct SessionHandle *data = conn->data;
struct thread_data *td = (struct thread_data*) conn->async.os_specific;
int done = 0;
*entry = NULL;
if(!td) {
DEBUGASSERT(td);
return CURLE_COULDNT_RESOLVE_HOST;
}
Curl_mutex_acquire(td->tsd.mtx);
done = td->tsd.done;
Curl_mutex_release(td->tsd.mtx);
if(done) {
getaddrinfo_complete(conn);
if(!conn->async.dns) {
CURLcode rc = resolver_error(conn);
destroy_async_data(&conn->async);
return rc;
}
destroy_async_data(&conn->async);
*entry = conn->async.dns;
}
else {
/* poll for name lookup done with exponential backoff up to 250ms */
long elapsed = Curl_tvdiff(Curl_tvnow(), data->progress.t_startsingle);
if(elapsed < 0)
elapsed = 0;
if(td->poll_interval == 0)
/* Start at 1ms poll interval */
td->poll_interval = 1;
else if(elapsed >= td->interval_end)
/* Back-off exponentially if last interval expired */
td->poll_interval *= 2;
if(td->poll_interval > 250)
td->poll_interval = 250;
td->interval_end = elapsed + td->poll_interval;
Curl_expire(conn->data, td->poll_interval);
}
return CURLE_OK;
} | false | false | false | false | false | 0 |
do_sigtimedwait(const sigset_t *which, siginfo_t *info,
const struct timespec *ts)
{
struct task_struct *tsk = current;
long timeout = MAX_SCHEDULE_TIMEOUT;
sigset_t mask = *which;
int sig;
if (ts) {
if (!timespec_valid(ts))
return -EINVAL;
timeout = timespec_to_jiffies(ts);
/*
* We can be close to the next tick, add another one
* to ensure we will wait at least the time asked for.
*/
if (ts->tv_sec || ts->tv_nsec)
timeout++;
}
/*
* Invert the set of allowed signals to get those we want to block.
*/
sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
signotset(&mask);
spin_lock_irq(&tsk->sighand->siglock);
sig = dequeue_signal(tsk, &mask, info);
if (!sig && timeout) {
/*
* None ready, temporarily unblock those we're interested
* while we are sleeping in so that we'll be awakened when
* they arrive. Unblocking is always fine, we can avoid
* set_current_blocked().
*/
tsk->real_blocked = tsk->blocked;
sigandsets(&tsk->blocked, &tsk->blocked, &mask);
recalc_sigpending();
spin_unlock_irq(&tsk->sighand->siglock);
timeout = freezable_schedule_timeout_interruptible(timeout);
spin_lock_irq(&tsk->sighand->siglock);
__set_task_blocked(tsk, &tsk->real_blocked);
sigemptyset(&tsk->real_blocked);
sig = dequeue_signal(tsk, &mask, info);
}
spin_unlock_irq(&tsk->sighand->siglock);
if (sig)
return sig;
return timeout ? -EINTR : -EAGAIN;
} | false | false | false | false | false | 0 |
Evaluate(int32 indexA, int32 indexB, qreal t) const
{
b2Transform xfA, xfB;
m_sweepA.GetTransform(&xfA, t);
m_sweepB.GetTransform(&xfB, t);
switch (m_type)
{
case e_points:
{
b2Vec2 axisA = b2MulT(xfA.R, m_axis);
b2Vec2 axisB = b2MulT(xfB.R, -m_axis);
b2Vec2 localPointA = m_proxyA->GetVertex(indexA);
b2Vec2 localPointB = m_proxyB->GetVertex(indexB);
b2Vec2 pointA = b2Mul(xfA, localPointA);
b2Vec2 pointB = b2Mul(xfB, localPointB);
qreal separation = b2Dot(pointB - pointA, m_axis);
return separation;
}
case e_faceA:
{
b2Vec2 normal = b2Mul(xfA.R, m_axis);
b2Vec2 pointA = b2Mul(xfA, m_localPoint);
b2Vec2 axisB = b2MulT(xfB.R, -normal);
b2Vec2 localPointB = m_proxyB->GetVertex(indexB);
b2Vec2 pointB = b2Mul(xfB, localPointB);
qreal separation = b2Dot(pointB - pointA, normal);
return separation;
}
case e_faceB:
{
b2Vec2 normal = b2Mul(xfB.R, m_axis);
b2Vec2 pointB = b2Mul(xfB, m_localPoint);
b2Vec2 axisA = b2MulT(xfA.R, -normal);
b2Vec2 localPointA = m_proxyA->GetVertex(indexA);
b2Vec2 pointA = b2Mul(xfA, localPointA);
qreal separation = b2Dot(pointA - pointB, normal);
return separation;
}
default:
b2Assert(false);
return 0.0f;
}
} | false | false | false | false | false | 0 |
glade_gtk_assistant_get_children (GladeWidgetAdaptor *adaptor,
GObject *container)
{
GtkAssistant *assist = GTK_ASSISTANT (container);
gint i, n_pages = gtk_assistant_get_n_pages (assist);
GList *children = NULL, *parent_children;
/* Chain up */
if (GWA_GET_CLASS (GTK_TYPE_WINDOW)->get_children)
parent_children = GWA_GET_CLASS (GTK_TYPE_WINDOW)->get_children (adaptor, container);
else
parent_children = NULL;
for (i = 0; i < n_pages; i++)
children = g_list_prepend (children, gtk_assistant_get_nth_page (assist, i));
children = g_list_reverse (children);
return glade_util_purify_list (g_list_concat (children, parent_children));
} | false | false | false | false | false | 0 |
doDebug(QJDns *jdns, int index)
{
QStringList lines = jdns->debugLines();
if(db)
db->d->addDebug(dbname + QString::number(index), lines);
} | false | false | false | false | false | 0 |
__ecereConstructor___ecereNameSpace__ecere__net__ServerNameCache(struct __ecereNameSpace__ecere__com__Instance * this)
{
struct __ecereNameSpace__ecere__net__ServerNameCache * __ecerePointer___ecereNameSpace__ecere__net__ServerNameCache = (struct __ecereNameSpace__ecere__net__ServerNameCache *)(this ? (((char *)this) + __ecereClass___ecereNameSpace__ecere__net__ServerNameCache->offset) : 0);
__ecerePointer___ecereNameSpace__ecere__net__ServerNameCache->mutex = __ecereNameSpace__ecere__com__eInstance_New(__ecereClass___ecereNameSpace__ecere__sys__Mutex);
(__ecerePointer___ecereNameSpace__ecere__net__ServerNameCache->servers.CompareKey = (void *)__ecereMethod___ecereNameSpace__ecere__sys__BinaryTree_CompareString);
return 0x1;
} | false | false | false | false | false | 0 |
resolve_on_symbol(int symbol, Topform c1, Topform c2)
{
Plist resolvents = NULL;
Literals l1, l2;
for (l1 = c1->literals; l1; l1 = l1->next)
for (l2 = c2->literals; l2; l2 = l2->next) {
if (l1->sign != l2->sign &&
SYMNUM(l1->atom) == symbol &&
SYMNUM(l2->atom) == symbol) {
Topform res;
if (c1->id == 0) {
assign_clause_id(c1);
fwrite_clause(stdout, c1, CL_FORM_STD);
}
if (c2->id == 0) {
assign_clause_id(c2);
fwrite_clause(stdout, c2, CL_FORM_STD);
}
res = resolve3(c1, l1, c2, l2, TRUE);
if (res) {
if (tautology(res->literals))
delete_clause(res);
else {
resolvents = plist_append(resolvents, res);
}
}
}
}
return resolvents;
} | false | false | false | false | false | 0 |
strio_each(int argc, VALUE *argv, VALUE self)
{
struct StringIO *ptr = StringIO(self);
VALUE line;
RETURN_ENUMERATOR(self, argc, argv);
if (argc > 0 && !NIL_P(argv[argc-1]) && NIL_P(rb_check_string_type(argv[argc-1])) &&
NUM2LONG(argv[argc-1]) == 0) {
rb_raise(rb_eArgError, "invalid limit: 0 for each_line");
}
while (!NIL_P(line = strio_getline(argc, argv, readable(ptr)))) {
rb_yield(line);
}
return self;
} | false | false | false | false | false | 0 |
random_level()
{
const double P = 0.5;
int lvl = (int)(log(frand())/log(1.-P));
return lvl < max_level ? lvl : max_level;
} | false | false | false | false | false | 0 |
reLoad( bool show )
{
int orgx = -1;
int orgy = -1;
if ( scrollWidget ) {
orgx = scrollWidget->GetScrollPosX();
orgy = scrollWidget->GetScrollPosY();
}
delete scrollWidget;
scrollWidget = new PG_ScrollWidget( this , PG_Rect( 0, 0, Width(), Height() - 30 ));
scrollWidget->SetTransparency(255);
widgets.clear();
int x = 0;
int y = 0;
factory->restart();
SelectionWidget* w;
while ( (w = factory->spawnNextItem( scrollWidget, PG_Point(x,y))) ) {
if ( columnCount < 0 )
columnCount = scrollWidget->Width() / (w->Width() + gapWidth);
if ( visibleRowCount < 0 )
visibleRowCount = scrollWidget->Height() / (w->Height() + gapWidth);
w->itemSelected.connect( SigC::bind( SigC::slot( *this, &ItemSelectorWidget::itemSelected ), true ));
w->itemMarked.connect( SigC::slot( *this, &ItemSelectorWidget::markItem ));
w->setSelectionCallback( &selectionCallBack );
widgets.push_back ( w );
x += w->Width() + gapWidth;
if ( x + w->Width() + gapWidth >= scrollWidget->Width() ) {
x = 0;
y += w->Height() + gapWidth;
}
}
if ( orgx >= 0 && orgy >= 0 )
scrollWidget->ScrollTo ( orgx, orgy );
if ( show )
// scrollWidget->Update();
scrollWidget->Show();
} | false | false | false | false | false | 0 |
make_ranged_throw(object_type *o_ptr, int y, int x) {
struct attack_result result = {FALSE, 0, 0, "hit"};
monster_type *m_ptr = cave_monster(cave, cave->m_idx[y][x]);
monster_race *r_ptr = &r_info[m_ptr->r_idx];
int bonus = p_ptr->state.to_h + o_ptr->to_h;
int chance = p_ptr->state.skills[SKILL_TO_HIT_THROW] + bonus * BTH_PLUS_ADJ;
int chance2 = chance - distance(p_ptr->py, p_ptr->px, y, x);
int multiplier = 1;
const struct slay *best_s_ptr = NULL;
/* If we missed then we're done */
if (!test_hit(chance2, r_ptr->ac, m_ptr->ml)) return result;
result.success = TRUE;
improve_attack_modifier(o_ptr, m_ptr, &best_s_ptr, TRUE, FALSE);
/* If we have a slay, modify the multiplier appropriately */
if (best_s_ptr != NULL) {
result.hit_verb = best_s_ptr->range_verb;
multiplier += best_s_ptr->mult;
}
/* Apply damage: multiplier, slays, criticals, bonuses */
result.dmg = damroll(o_ptr->dd, o_ptr->ds);
result.dmg += o_ptr->to_d;
result.dmg *= multiplier;
result.dmg = critical_norm(o_ptr->weight, o_ptr->to_h, result.dmg, &result.msg_type);
return result;
} | false | false | false | false | false | 0 |
gt_nextEnumpatterniterator(unsigned long *patternlen,
Enumpatterniterator *epi)
{
unsigned long start;
unsigned long j;
GtUchar cc;
if (epi->minpatternlen == epi->maxpatternlen)
{
*patternlen = epi->minpatternlen;
} else
{
*patternlen = (unsigned long) (epi->minpatternlen +
(random() %
(epi->maxpatternlen -
epi->minpatternlen+1)));
}
start =
(unsigned long) (random() % (epi->totallength - *patternlen));
gt_assert(start < (unsigned long) (epi->totallength - *patternlen));
if (epi->esr == NULL) {
epi->esr = gt_encseq_create_reader_with_readmode(epi->sampleencseq,
GT_READMODE_FORWARD,
start);
} else {
gt_encseq_reader_reinit_with_readmode(epi->esr, epi->sampleencseq,
GT_READMODE_FORWARD,
start);
}
for (j=0; j<*patternlen; j++)
{
cc = gt_encseq_reader_next_encoded_char(epi->esr);
if (ISSPECIAL(cc))
{
cc = (GtUchar) (random() % epi->alphasize);
}
epi->patternspace[j] = cc;
}
if (epi->samplecount & 1)
{
reversesequenceinplace(epi->patternspace,*patternlen);
}
epi->samplecount++;
epi->patternstat[*patternlen]++;
return epi->patternspace;
} | false | false | false | false | false | 0 |
init_fore(void)
{
UINT8 *fore_gfxdata = memory_region(REGION_GFX2);
int fore_length = memory_region_length(REGION_GFX2);
UINT16 *dst;
UINT8 *src;
int i, j;
/* allocate the expanded foreground data */
fore_expanded_data = auto_malloc(fore_length);
if (!fore_expanded_data)
return 1;
/* expand the foreground ROMs */
src = fore_gfxdata;
dst = fore_expanded_data;
for (i = 0; i < fore_length / 2; i++, src++)
{
int bits1 = src[0];
int bits2 = src[fore_length / 2];
int newbits = 0;
for (j = 0; j < 8; j++)
{
newbits |= ((bits1 >> (j ^ 7)) & 1) << (j * 2);
newbits |= ((bits2 >> (j ^ 7)) & 1) << (j * 2 + 1);
}
*dst++ = newbits;
}
return 0;
} | false | false | false | false | false | 0 |
find_client(Port * port, int response, int qp, uint64_t trid)
{
Client *cl, *e;
DEBUG("port %" PRIx64 " res %d qp %d trid %" PRIx64,
port->portguid, response, qp, trid);
// response - match trids
if (response && (cl = client_by_trid(port, trid)))
return cl;
for (cl = clients, e = cl + IBSIM_MAX_CLIENTS; cl < e; cl++) {
if (!cl->pid || cl->port->portguid != port->portguid)
continue;
// if there is a non zero/1 qp (sma/sa) - match qps
if (qp > 1) {
if (qp == cl->qp)
return cl;
// zero qp - only issm clients may get requests
} else if (!response && cl->issm)
return cl;
}
DEBUG("no client found");
return 0;
} | false | false | false | false | false | 0 |
gsserb(real_t **a, int n, real_t aux[], int ri[], int ci[])
{
void gsselm(real_t **, int, real_t [], int [], int []);
real_t onenrminv(real_t **, int);
void erbelm(int, real_t [], real_t);
gsselm(a,n,aux,ri,ci);
if (aux[3] == n) erbelm(n,aux,onenrminv(a,n));
} | false | false | false | false | false | 0 |
gregorio_build_char_list_from_buf (char *buf)
{
int i = 0;
size_t len;
grewchar *gwstring;
gregorio_character *current_character = NULL;
if (buf == NULL)
{
return NULL;
}
len = strlen (buf); //to get the length of the syllable in ASCII
gwstring = (grewchar *) malloc ((len + 1) * sizeof (grewchar));
gregorio_mbstowcs (gwstring, buf, len); //converting into wchar_t
// we add the corresponding characters in the list of gregorio_characters
while (gwstring[i])
{
gregorio_add_character (¤t_character, gwstring[i]);
i++;
}
free (gwstring);
gregorio_go_to_first_character (¤t_character);
return current_character;
} | false | true | false | false | false | 1 |
combine_mtext(mtext_t *tail, mtext_t *mtp)
{
if(!tail)
return mtp;
if(!mtp)
return tail;
if(tail->type == exp_text && mtp->type == exp_text)
{
char *new_text;
new_text = pp_xrealloc(tail->subst.text, strlen(tail->subst.text)+strlen(mtp->subst.text)+1);
if(!new_text)
return mtp;
tail->subst.text = new_text;
strcat(tail->subst.text, mtp->subst.text);
free(mtp->subst.text);
free(mtp);
return tail;
}
if(tail->type == exp_concat && mtp->type == exp_concat)
{
free(mtp);
return tail;
}
if(tail->type == exp_concat && mtp->type == exp_text)
{
int len = strlen(mtp->subst.text);
while(len)
{
/* FIXME: should delete space from head of string */
if(isspace(mtp->subst.text[len-1] & 0xff))
mtp->subst.text[--len] = '\0';
else
break;
}
if(!len)
{
free(mtp->subst.text);
free(mtp);
return tail;
}
}
if(tail->type == exp_text && mtp->type == exp_concat)
{
int len = strlen(tail->subst.text);
while(len)
{
if(isspace(tail->subst.text[len-1] & 0xff))
tail->subst.text[--len] = '\0';
else
break;
}
if(!len)
{
mtp->prev = tail->prev;
mtp->next = tail->next;
if(tail->prev)
tail->prev->next = mtp;
free(tail->subst.text);
free(tail);
return mtp;
}
}
tail->next = mtp;
mtp->prev = tail;
return mtp;
} | false | false | false | false | false | 0 |
input(FILE *fp, int *maxlength, wchar_t ***list, int *entries)
{
static int maxentry = DEFNUM;
int len, lineno = 1, reportedline = 0, eval = 0;
wchar_t *p, buf[MAXLINELEN];
wchar_t **local_list = *list;
int local_entries = *entries;
if (!local_list)
local_list = xcalloc(maxentry, sizeof(wchar_t *));
while (fgetws(buf, MAXLINELEN, fp)) {
for (p = buf; *p && iswspace(*p); ++p)
;
if (!*p)
continue;
if (!(p = wcschr(p, '\n')) && !feof(fp)) {
if (reportedline < lineno) {
warnx(_("line %d is too long, output will be truncated"),
lineno);
reportedline = lineno;
}
eval = 1;
continue;
}
lineno++;
if (!feof(fp))
*p = '\0';
len = wcs_width(buf); /* len = p - buf; */
if (*maxlength < len)
*maxlength = len;
if (local_entries == maxentry) {
maxentry += DEFNUM;
local_list = xrealloc(local_list,
(u_int)maxentry * sizeof(wchar_t *));
}
local_list[local_entries++] = wcsdup(buf);
}
*list = local_list;
*entries = local_entries;
return eval;
} | false | false | false | true | false | 1 |
_xfs_buf_read(
xfs_buf_t *bp,
xfs_buf_flags_t flags)
{
ASSERT(!(flags & XBF_WRITE));
ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
if (flags & XBF_ASYNC) {
xfs_buf_submit(bp);
return 0;
}
return xfs_buf_submit_wait(bp);
} | false | false | false | false | false | 0 |
calcNstats(unsigned long key, unsigned long long value,
void *data)
{
Nstats *nstats = data;
unsigned int i;
nstats->current_len += (key * value);
nstats->current_num += value;
for (i = 0; i < (unsigned int) NOF_LIMITS; i++)
{
if ((unsigned long long) key > nstats->limit[i])
nstats->larger_than_limit[i] = nstats->current_num;
}
if (nstats->median == 0 && nstats->current_num >= nstats->half_num)
{
nstats->median = (unsigned long long) key;
}
for (i = 0; i < nstats->nofstats; i++)
{
if (!nstats->done[i] && (nstats->current_len >= nstats->min[i]))
{
nstats->done[i] = true;
nstats->nvalue[i] = key;
nstats->lvalue[i] = (unsigned long) nstats->current_num;
}
}
} | false | false | false | false | false | 0 |
free_vginfo_list(
vg_info_t **nodelist,
int32 num_items )
{
intn i;
/* if the list is not NULL, free each node then reset the list to NULL */
if (nodelist != NULL)
{
for (i = 0; i < num_items; i++)
if (nodelist[i] != NULL)
nodelist[i] = free_node_vg_info_t(nodelist[i]);
HDfree(nodelist);
}
return( NULL );
} | false | false | false | false | false | 0 |
bsbufflength (struct bStream * s, int sz) {
int oldSz;
if (s == NULL || sz < 0) return BSTR_ERR;
oldSz = s->maxBuffSz;
if (sz > 0) s->maxBuffSz = sz;
return oldSz;
} | false | false | false | false | false | 0 |
Pointer_ass_item(PyObject *_self, Py_ssize_t index, PyObject *value)
{
CDataObject *self = (CDataObject *)_self;
Py_ssize_t size;
Py_ssize_t offset;
StgDictObject *stgdict, *itemdict;
PyObject *proto;
if (value == NULL) {
PyErr_SetString(PyExc_TypeError,
"Pointer does not support item deletion");
return -1;
}
if (*(void **)self->b_ptr == NULL) {
PyErr_SetString(PyExc_ValueError,
"NULL pointer access");
return -1;
}
stgdict = PyObject_stgdict((PyObject *)self);
assert(stgdict); /* Cannot be NULL fr pointer instances */
proto = stgdict->proto;
assert(proto);
itemdict = PyType_stgdict(proto);
assert(itemdict); /* Cannot be NULL because the itemtype of a pointer
is always a ctypes type */
size = itemdict->size;
offset = index * itemdict->size;
return PyCData_set((PyObject *)self, proto, stgdict->setfunc, value,
index, size, (*(char **)self->b_ptr) + offset);
} | false | false | false | false | false | 0 |
fnet_get(struct fpga_model* model, net_idx_t net_i)
{
// function must work even if model->rc is set
if (net_i <= NO_NET
|| net_i > model->highest_used_net) {
fprintf(stderr, "%s:%i net_i %i highest_used %i\n", __FILE__,
__LINE__, net_i, model->highest_used_net);
return 0;
}
return &model->nets[net_i-1];
} | false | false | false | false | false | 0 |
_ds_get_spamrecord (
DSPAM_CTX * CTX,
unsigned long long token,
struct _ds_spam_stat *stat)
{
int (*ptr)(DSPAM_CTX *, unsigned long long, struct _ds_spam_stat *);
ptr = (int (*)(DSPAM_CTX *, unsigned long long, struct _ds_spam_stat *))(intptr_t)dlsym(_drv_handle, "_ds_get_spamrecord");
if (!ptr) {
LOG(LOG_CRIT, "dlsym(_ds_get_spamrecord) failed: %s", dlerror());
return EFAILURE;
}
return (*ptr)(CTX, token, stat);
} | false | false | false | false | false | 0 |
lua_isnumber (lua_State *L, int idx) {
TValue n;
const TValue *o = index2adr(L, idx);
return tonumber(o, &n);
} | false | false | false | false | false | 0 |
each_timing_pattern_pixel(int *x,int *y)
{
if(this->_timing_c<this->cells_par_side){
*x=6;
*y=this->_timing_c;
}
else if(this->_timing_c<this->cells_par_side*2){
*x=this->_timing_c-this->cells_par_side;
*y=6;
}
else
return(NULL);
this->_timing_c++;
return(this);
} | false | false | false | false | false | 0 |
compute_table(Table *tb)
{
// delta = table spacing in angle for N-1 bins
int tlm1 = tablength-1;
tb->delta = MY_PI / tlm1;
tb->invdelta = 1.0/tb->delta;
tb->deltasq6 = tb->delta*tb->delta / 6.0;
// N-1 evenly spaced bins in angle from 0 to PI
// ang,e,f = value at lower edge of bin
// de,df values = delta values of e,f
// ang,e,f are N in length so de,df arrays can compute difference
memory->create(tb->ang,tablength,"angle:ang");
memory->create(tb->e,tablength,"angle:e");
memory->create(tb->de,tlm1,"angle:de");
memory->create(tb->f,tablength,"angle:f");
memory->create(tb->df,tlm1,"angle:df");
memory->create(tb->e2,tablength,"angle:e2");
memory->create(tb->f2,tablength,"angle:f2");
double a;
for (int i = 0; i < tablength; i++) {
a = i*tb->delta;
tb->ang[i] = a;
tb->e[i] = splint(tb->afile,tb->efile,tb->e2file,tb->ninput,a);
tb->f[i] = splint(tb->afile,tb->ffile,tb->f2file,tb->ninput,a);
}
for (int i = 0; i < tlm1; i++) {
tb->de[i] = tb->e[i+1] - tb->e[i];
tb->df[i] = tb->f[i+1] - tb->f[i];
}
double ep0 = - tb->f[0];
double epn = - tb->f[tlm1];
spline(tb->ang,tb->e,tablength,ep0,epn,tb->e2);
spline(tb->ang,tb->f,tablength,tb->fplo,tb->fphi,tb->f2);
} | false | false | false | false | false | 0 |
Python(void)
{
int i;
printf(
"/* This code is automatically generated. "
"Changes made here will be lost. */\n#define NO_IMPORT_ARRAY\n"
"#include \"pygetdata.h\"\n"
"const struct gdpy_constant_t gdpy_constant_list[] = {\n");
for (i = 0; constant_list[i].lname != NULL; ++i)
if (constant_list[i].type != 9)
printf("{\"%s\", %s}, ", constant_list[i].sname, constant_list[i].lname);
/* Python numerical type aliases */
printf(
"{\"INT\", GD_INT32}, "
"{\"LONG\", GD_INT64}, "
"{\"ULONG\", GD_UINT64}, "
"{\"FLOAT\", GD_FLOAT64}, "
"{\"COMPLEX\", GD_COMPLEX128}, "
"{NULL, 0}};\n"
);
} | false | false | false | false | false | 0 |
createToleranceWindow(GtkWidget* win, GabeditSignalFunc myFunc)
{
GtkWidget *dialogWindow = NULL;
GtkWidget *button;
GtkWidget *frame;
GtkWidget *hbox;
gchar title[BSIZE];
dialogWindow = gtk_dialog_new();
gtk_widget_realize(GTK_WIDGET(dialogWindow));
sprintf(title, _("Tolerance for computing the point group"));
gtk_window_set_title(GTK_WINDOW(dialogWindow),title);
gtk_window_set_modal (GTK_WINDOW (dialogWindow), TRUE);
gtk_window_set_position(GTK_WINDOW(dialogWindow),GTK_WIN_POS_CENTER);
g_signal_connect(G_OBJECT(dialogWindow), "delete_event", (GCallback)destroy_button_windows, NULL);
g_signal_connect(G_OBJECT(dialogWindow), "delete_event", (GCallback)gtk_widget_destroy, NULL);
frame = gtk_frame_new (NULL);
gtk_widget_show (frame);
gtk_box_pack_start (GTK_BOX (GTK_WIDGET(GTK_DIALOG(dialogWindow)->vbox)), frame, TRUE, TRUE, 3);
hbox = gtk_hbox_new (FALSE, 3);
gtk_widget_show (hbox);
gtk_container_add (GTK_CONTAINER (frame), hbox);
createTolerancePositionFrame(hbox);
createTolerancePrincipalAxisFrame(hbox);
gtk_box_set_homogeneous (GTK_BOX( GTK_DIALOG(dialogWindow)->action_area), TRUE);
button = create_button(dialogWindow,"Cancel");
gtk_box_pack_end (GTK_BOX( GTK_DIALOG(dialogWindow)->action_area), button, FALSE, TRUE, 5);
GTK_WIDGET_SET_FLAGS(button, GTK_CAN_DEFAULT);
g_signal_connect_swapped(G_OBJECT(button), "clicked", (GCallback)destroy_button_windows, GTK_OBJECT(dialogWindow));
g_signal_connect_swapped(G_OBJECT(button), "clicked", (GCallback)gtk_widget_destroy, GTK_OBJECT(dialogWindow));
button = create_button(dialogWindow,"OK");
gtk_box_pack_start (GTK_BOX( GTK_DIALOG(dialogWindow)->action_area), button, FALSE, TRUE, 5);
GTK_WIDGET_SET_FLAGS(button, GTK_CAN_DEFAULT);
gtk_widget_grab_default(button);
g_signal_connect_swapped(G_OBJECT(button), "clicked", (GCallback)setToleranceParametersFromTmp, GTK_OBJECT(dialogWindow));
if(myFunc) g_signal_connect_swapped(G_OBJECT(button), "clicked", (GCallback)myFunc, GTK_OBJECT(dialogWindow));
g_signal_connect_swapped(G_OBJECT(button), "clicked", (GCallback)destroy_button_windows, GTK_OBJECT(dialogWindow));
g_signal_connect_swapped(G_OBJECT(button), "clicked", (GCallback)gtk_widget_destroy, GTK_OBJECT(dialogWindow));
add_button_windows(title,dialogWindow);
gtk_widget_show_all(dialogWindow);
if(GTK_IS_WIDGET(win))
gtk_window_set_transient_for(GTK_WINDOW(dialogWindow),GTK_WINDOW(win));
} | false | true | false | false | false | 1 |
pair_get_wx (struct hash_table_s * table, const char * key)
{
struct pair * item, token;
token.key = (char *) key;
item = (struct pair *) hash_find_item (table, &token);
if (item)
return item->wx;
else
return -1;
} | false | false | false | false | false | 0 |
vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
if (ctx->state == MFCINST_ERROR) {
mfc_err("Call on QBUF after unrecoverable error\n");
return -EIO;
}
if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
if (ctx->state == MFCINST_FINISHING) {
mfc_err("Call on QBUF after EOS command\n");
return -EIO;
}
return vb2_qbuf(&ctx->vq_src, buf);
} else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
return vb2_qbuf(&ctx->vq_dst, buf);
}
return -EINVAL;
} | false | false | false | false | false | 0 |
inf_session_close(InfSession* session)
{
g_return_if_fail(INF_IS_SESSION(session));
g_return_if_fail(inf_session_get_status(session) != INF_SESSION_CLOSED);
g_signal_emit(G_OBJECT(session), session_signals[CLOSE], 0);
} | false | false | false | false | false | 0 |
b44_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
struct b44 *bp = netdev_priv(dev);
spin_lock_irq(&bp->lock);
if (epause->autoneg)
bp->flags |= B44_FLAG_PAUSE_AUTO;
else
bp->flags &= ~B44_FLAG_PAUSE_AUTO;
if (epause->rx_pause)
bp->flags |= B44_FLAG_RX_PAUSE;
else
bp->flags &= ~B44_FLAG_RX_PAUSE;
if (epause->tx_pause)
bp->flags |= B44_FLAG_TX_PAUSE;
else
bp->flags &= ~B44_FLAG_TX_PAUSE;
if (bp->flags & B44_FLAG_PAUSE_AUTO) {
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
} else {
__b44_set_flow_ctrl(bp, bp->flags);
}
spin_unlock_irq(&bp->lock);
b44_enable_ints(bp);
return 0;
} | false | false | false | false | false | 0 |
getResourceInt(const string& resource_path)
{
return atoi(getResourceStr(resource_path).c_str());
} | false | false | false | false | false | 0 |
eepro_poll(struct nic *nic, int retrieve)
{
unsigned int rcv_car = rx_start;
unsigned int rcv_event, rcv_status, rcv_next_frame, rcv_size;
/* return true if there's an ethernet packet ready to read */
/* nic->packet should contain data on return */
/* nic->packetlen should contain length of data */
#if 0
if ((inb(nic->ioaddr + STATUS_REG) & 0x40) == 0)
return (0);
outb(0x40, nic->ioaddr + STATUS_REG);
#endif
outw(rcv_car, nic->ioaddr + HOST_ADDRESS_REG);
rcv_event = inw(nic->ioaddr + IO_PORT);
if (rcv_event != RCV_DONE)
return (0);
/* FIXME: I'm guessing this might not work with this card, since
it looks like once a rcv_event is started it must be completed.
maybe there's another way. */
if ( ! retrieve ) return 1;
rcv_status = inw(nic->ioaddr + IO_PORT);
rcv_next_frame = inw(nic->ioaddr + IO_PORT);
rcv_size = inw(nic->ioaddr + IO_PORT);
#if 0
printf("%hX %hX %d %hhX\n", rcv_status, rcv_next_frame, rcv_size,
inb(nic->ioaddr + STATUS_REG));
#endif
if ((rcv_status & (RX_OK|RX_ERROR)) != RX_OK) {
printf("Receive error %hX\n", rcv_status);
return (0);
}
rcv_size &= 0x3FFF;
insw(nic->ioaddr + IO_PORT, nic->packet, ((rcv_size + 3) >> 1));
#if 0
{
int i;
for (i = 0; i < 48; i++) {
printf("%hhX", nic->packet[i]);
putchar(i % 16 == 15 ? '\n' : ' ');
}
}
#endif
nic->packetlen = rcv_size;
rcv_car = (rx_start + RCV_HEADER + rcv_size);
rx_start = rcv_next_frame;
/*
hex_dump(rcv_car, nic->packetlen);
*/
if (rcv_car == 0)
rcv_car = ((RCV_UPPER_LIMIT << 8) | 0xff);
outw(rcv_car - 1, nic->ioaddr + RCV_STOP);
return (1);
} | false | false | false | false | false | 0 |
vinflate_stored()
/* "decompress" an inflated type 0 (stored) block. */
{
unsigned n; /* number of bytes in block */
unsigned w; /* current window position */
register ulg b; /* bit buffer */
register unsigned k; /* number of bits in bit buffer */
/* make local copies of globals */
b = bb; /* initialize bit buffer */
k = bk;
w = wp; /* initialize window position */
/* go to 16 byte boundary */
n = k & 15;
DUMPBITS(n);
/* get the length and its complement */
NEEDBITS(16)
n = ((unsigned)b & 0xffff);
DUMPBITS(16)
NEEDBITS(16)
if (n != (unsigned)((~b) & 0xffff)) {
DBG(("Stored block length comlpement doesn't match\n"))
return 1; /* error in compressed data */
}
DUMPBITS(16)
/* read and output the compressed data */
while (n--)
{
NEEDBITS(8)
slide[w++] = (uch)b;
if (w == WSIZE)
{
if (vflush_output(w)) {
DBG(("Buffer was unexpectedly large\n"))
return 1;
}
w = 0;
}
DUMPBITS(8)
}
/* restore the globals from the locals */
wp = w; /* restore global window pointer */
bb = b; /* restore global bit buffer */
bk = k;
return 0;
} | false | false | false | false | false | 0 |
xfs_pathconf( XfsCookie *fc, int16 which )
{
char fpathName[MAXPATHNAMELEN];
cookie2Pathname(fc, NULL, fpathName);
int oldErrno = errno;
// FIXME: Has to be different for .XFS and for HOSTFS.
D(bug("HOSTFS: fs_pathconf (%s,%d)", fpathName, which));
STATVFS buff;
int32 res = host_statvfs( fpathName, &buff);
if ( res != TOS_E_OK )
return res;
switch (which) {
case -1:
return 9; // maximal which value
case 0: // DP_IOPEN
return 0x7fffffffL; // unlimited
case 1: { // DP_MAXLINKS
long result = pathconf(fpathName, _PC_LINK_MAX);
if ( result == -1 && oldErrno != errno )
return errnoHost2Mint(errno,TOS_EFILNF);
return result;
}
case 2: // DP_PATHMAX
return MAXPATHNAMELEN; // FIXME: This is the limitation of this implementation (ARAnyM specific)
case 3: // DP_NAMEMAX
#ifdef HAVE_SYS_STATVFS_H
return buff.f_namemax;
#else
# if (defined(OS_openbsd) || defined(OS_freebsd) || defined(OS_netbsd) || defined(OS_darwin))
return MFSNAMELEN;
# else
#if defined(OS_mint)
return Dpathconf(fpathName,3 /* DP_NAMEMAX */);
#else
return buff.f_namelen;
#endif /* OS_mint */
#endif /* OS_*bsd */
#endif /* HAVE_SYS_STATVFS_H */
case 4: // DP_ATOMIC
return buff.f_bsize; // ST max vs Linux optimal
case 5: // DP_TRUNC
return 0; // files are NOT truncated... (hope correct)
case 6: // DP_CASE
return ( !fc->drv || fc->drv->halfSensitive ) ? 2 /*DP_CASEINSENS*/ : 0 /*DP_CASESENS*/;
case 7: // D_XATTRMODE
return 0x0fffffdfL; // only the archive bit is not recognised in the fs_getxattr
case 8: // DP_XATTR
// FIXME: This argument should be set accordingly to the filesystem type mounted
// to the particular path.
return 0x00000ffbL; // rdev is not used
case 9: // DP_VOLNAMEMAX
return 0;
default:;
}
return TOS_EINVFN;
} | false | false | false | false | false | 0 |
glfs_chdir (struct glfs *fs, const char *path)
{
int ret = -1;
xlator_t *subvol = NULL;
loc_t loc = {0, };
struct iatt iatt = {0, };
int reval = 0;
__glfs_entry_fs (fs);
subvol = glfs_active_subvol (fs);
if (!subvol) {
ret = -1;
errno = EIO;
goto out;
}
retry:
ret = glfs_resolve (fs, subvol, path, &loc, &iatt, reval);
ESTALE_RETRY (ret, errno, reval, &loc, retry);
if (ret)
goto out;
if (!IA_ISDIR (iatt.ia_type)) {
ret = -1;
errno = ENOTDIR;
goto out;
}
glfs_cwd_set (fs, loc.inode);
out:
loc_wipe (&loc);
glfs_subvol_done (fs, subvol);
return ret;
} | false | false | false | false | false | 0 |
clear_space(Space* space) {
FOR_ALL_AREAS(space->small_area, s48_free_area(area)); space->small_area = NULL;
FOR_ALL_AREAS(space->large_area, s48_free_area(area)); space->large_area = NULL;
FOR_ALL_AREAS(space->weaks_area, s48_free_area(area)); space->weaks_area = NULL;
} | false | false | false | false | false | 0 |
ptraInsert(L_PTRA *pa,
l_int32 index,
void *item,
l_int32 shiftflag)
{
l_int32 i, ihole, imax;
l_float32 nexpected;
PROCNAME("ptraInsert");
if (!pa)
return ERROR_INT("pa not defined", procName, 1);
if (index < 0 || index > pa->nalloc)
return ERROR_INT("index not in [0 ... nalloc]", procName, 1);
if (shiftflag != L_AUTO_DOWNSHIFT && shiftflag != L_MIN_DOWNSHIFT &&
shiftflag != L_FULL_DOWNSHIFT)
return ERROR_INT("invalid shiftflag", procName, 1);
if (item) pa->nactual++;
if (index == pa->nalloc) { /* can happen when index == n */
if (ptraExtendArray(pa))
return ERROR_INT("extension failure", procName, 1);
}
/* We are inserting into a hole or adding to the end of the array.
* No existing items are moved. */
ptraGetMaxIndex(pa, &imax);
if (pa->array[index] == NULL) {
pa->array[index] = item;
if (item && index > imax) /* new item put beyond max so far */
pa->imax = index;
return 0;
}
/* We are inserting at the location of an existing item,
* forcing the existing item and those below to shift down.
* First, extend the array automatically if the last element
* (nalloc - 1) is occupied (imax). This may not be necessary
* in every situation, but only an anomalous sequence of insertions
* into the array would cause extra ptr allocation. */
if (imax >= pa->nalloc - 1 && ptraExtendArray(pa))
return ERROR_INT("extension failure", procName, 1);
/* If there are no holes, do a full downshift.
* Otherwise, if L_AUTO_DOWNSHIFT, use the expected number
* of holes between index and n to determine the shift mode */
if (imax + 1 == pa->nactual)
shiftflag = L_FULL_DOWNSHIFT;
else if (shiftflag == L_AUTO_DOWNSHIFT) {
if (imax < 10)
shiftflag = L_FULL_DOWNSHIFT; /* no big deal */
else {
nexpected = (l_float32)(imax - pa->nactual) *
(l_float32)((imax - index) / imax);
shiftflag = (nexpected > 2.0) ? L_MIN_DOWNSHIFT : L_FULL_DOWNSHIFT;
}
}
if (shiftflag == L_MIN_DOWNSHIFT) { /* run down looking for a hole */
for (ihole = index + 1; ihole <= imax; ihole++) {
if (pa->array[ihole] == NULL)
break;
}
}
else /* L_FULL_DOWNSHIFT */
ihole = imax + 1;
for (i = ihole; i > index; i--)
pa->array[i] = pa->array[i - 1];
pa->array[index] = (void *)item;
if (ihole == imax + 1) /* the last item was shifted down */
pa->imax++;
return 0;
} | false | false | false | false | false | 0 |
linear(Home home, const BoolVarArgs& x, IntRelType irt, IntVar y,
Reify r, IntConLevel icl) {
if (home.failed()) return;
int n=x.size();
Region re(home);
Linear::Term<BoolView>* t = re.alloc<Linear::Term<BoolView> >(n);
for (int i=n; i--; ) {
t[i].a=1; t[i].x=x[i];
}
Linear::post(home,t,n,irt,y,r,icl);
} | false | false | false | false | false | 0 |
ReadBinary(P_WBXML_INFO buffer, FILE* file)
{
char buf[4096];
int m = 1;
long n;
if (buffer && file)
{
if (file != stdin)
{
buffer->m_length = FileSize(file);
buffer->m_start = (P_WBXML) malloc(buffer->m_length);
buffer->m_curpos = buffer->m_start;
if (!buffer->m_start)
{
fclose(file);
ParseError(ERR_NOT_ENOUGH_MEMORY);
}
if (fread(buffer->m_start, 1, buffer->m_length, file) != buffer->m_length)
{
fclose(file);
ParseError(ERR_FILE_NOT_READ);
}
else
{
fclose(file);
}
}
else
{
while ((n = fread(buf, 1, sizeof(buf), file)) > 0)
{
buffer->m_start = (P_WBXML) realloc(buffer->m_start, sizeof(buf) * m);
memcpy(buffer->m_start + (sizeof(buf) * (m - 1)), buf, sizeof(buf));
m++;
}
buffer->m_length = BufferLength(buffer);
buffer->m_curpos = buffer->m_start;
}
}
else
{
ParseError(ERR_INTERNAL_BAD_PARAM);
}
} | false | false | false | false | true | 1 |
btl_openib_async_command_done(int exp)
{
int comp;
if (read(mca_btl_openib_component.async_comp_pipe[0], &comp,
sizeof(int)) < 0){
BTL_ERROR(("Failed to read from pipe"));
return OMPI_ERROR;
}
if (exp != comp){
BTL_ERROR(("Get wrong completion on async command. Waiting for %d and got %d",
exp, comp));
return OMPI_ERROR;
}
return OMPI_SUCCESS;
} | false | true | false | false | true | 1 |
virtqueue_enable_cb_prepare(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 last_used_idx;
START_USE(vq);
/* We optimistically turn back on interrupts, then check if there was
* more to do. */
/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
* either clear the flags bit or point the event index at the next
* entry. Always do both to keep code simple. */
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
if (!vq->event)
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
}
vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
END_USE(vq);
return last_used_idx;
} | false | false | false | false | false | 0 |
mono_reflection_method_on_tb_inst_get_handle (MonoReflectionMethodOnTypeBuilderInst *m)
{
MonoClass *klass;
MonoGenericContext tmp_context;
MonoType **type_argv;
MonoGenericInst *ginst;
MonoMethod *method, *inflated;
int count, i;
init_type_builder_generics ((MonoObject*)m->inst);
method = inflate_method (m->inst, (MonoObject*)m->mb);
klass = method->klass;
if (m->method_args == NULL)
return method;
if (method->is_inflated)
method = ((MonoMethodInflated *) method)->declaring;
count = mono_array_length (m->method_args);
type_argv = g_new0 (MonoType *, count);
for (i = 0; i < count; i++) {
MonoReflectionType *garg = mono_array_get (m->method_args, gpointer, i);
type_argv [i] = mono_reflection_type_get_handle (garg);
}
ginst = mono_metadata_get_generic_inst (count, type_argv);
g_free (type_argv);
tmp_context.class_inst = klass->generic_class ? klass->generic_class->context.class_inst : NULL;
tmp_context.method_inst = ginst;
inflated = mono_class_inflate_generic_method (method, &tmp_context);
return inflated;
} | false | false | false | false | false | 0 |
conn_pool_get(Octstr *host, int port, int ssl, Octstr *certkeyfile,
Octstr *our_host)
{
Octstr *key;
List *list = NULL;
Connection *conn = NULL;
int retry;
do {
retry = 0;
key = conn_pool_key(host, port, ssl, certkeyfile, our_host);
mutex_lock(conn_pool_lock);
list = dict_get(conn_pool, key);
if (list != NULL)
conn = gwlist_extract_first(list);
mutex_unlock(conn_pool_lock);
/*
* Note: we don't hold conn_pool_lock when we check/destroy/unregister
* connection because otherwise we can deadlock! And it's even better
* not to delay other threads while we check connection.
*/
if (conn != NULL) {
#ifdef USE_KEEPALIVE
/* unregister our server disconnect callback */
conn_unregister(conn);
#endif
/*
* Check whether the server has closed the connection while
* it has been in the pool.
*/
conn_wait(conn, 0);
if (conn_eof(conn) || conn_error(conn)) {
debug("gwlib.http", 0, "HTTP:conn_pool_get: Server closed connection, destroying it <%s><%p><fd:%d>.",
octstr_get_cstr(key), conn, conn_get_id(conn));
conn_destroy(conn);
retry = 1;
conn = NULL;
}
}
octstr_destroy(key);
} while(retry == 1);
if (conn == NULL) {
#ifdef HAVE_LIBSSL
if (ssl)
conn = conn_open_ssl_nb(host, port, certkeyfile, our_host);
else
#endif /* HAVE_LIBSSL */
conn = conn_open_tcp_nb(host, port, our_host);
debug("gwlib.http", 0, "HTTP: Opening connection to `%s:%d' (fd=%d).",
octstr_get_cstr(host), port, conn_get_id(conn));
} else {
debug("gwlib.http", 0, "HTTP: Reusing connection to `%s:%d' (fd=%d).",
octstr_get_cstr(host), port, conn_get_id(conn));
}
return conn;
} | false | false | false | false | false | 0 |
_MIDI_SendProgramChange
(
int channel,
int c1
)
{
int status;
if ( _MIDI_RerouteFunctions[ channel ] != NULL )
{
status = _MIDI_RerouteFunctions[ channel ]( 0xC0 + channel,
c1, 0 );
if ( status == MIDI_DONT_PLAY )
{
return( MIDI_Ok );
}
}
if ( _MIDI_Funcs == NULL )
{
return( MIDI_Error );
}
if ( _MIDI_Funcs->ProgramChange == NULL )
{
return( MIDI_Error );
}
_MIDI_Funcs->ProgramChange( channel, c1 );
return( MIDI_Ok );
} | false | false | false | false | false | 0 |
flush_old_maps(void) {
mapstruct *m, *oldmap;
long sec;
sec = seconds();
m = first_map;
while (m) {
/* There can be cases (ie death) where a player leaves a map and the timeout
* is not set so it isn't swapped out.
*/
if ((m->in_memory == MAP_IN_MEMORY)
&& (m->timeout == 0)
&& !players_on_map(m, TRUE)) {
set_map_timeout(m);
}
/* per player unique maps are never really reset. However, we do want
* to perdiocially remove the entries in the list of active maps - this
* generates a cleaner listing if a player issues the map commands, and
* keeping all those swapped out per player unique maps also has some
* memory and cpu consumption.
* We do the cleanup here because there are lots of places that call
* swap map, and doing it within swap map may cause problems as
* the functions calling it may not expect the map list to change
* underneath them.
*/
if ((m->unique || m->is_template) && m->in_memory == MAP_SWAPPED) {
LOG(llevDebug, "Resetting map %s.\n", m->path);
oldmap = m;
m = m->next;
delete_map(oldmap);
} else if (m->in_memory != MAP_SWAPPED
|| m->tmpname == NULL
|| sec < m->reset_time) {
m = m->next;
} else {
LOG(llevDebug, "Resetting map %s.\n", m->path);
/* Lauwenmark : Here we handle the MAPRESET global event */
execute_global_event(EVENT_MAPRESET, m);
clean_tmp_map(m);
oldmap = m;
m = m->next;
delete_map(oldmap);
}
}
} | false | false | false | false | false | 0 |
copy_NameConstraints(const NameConstraints *from, NameConstraints *to)
{
memset(to, 0, sizeof(*to));
if((from)->permittedSubtrees) {
(to)->permittedSubtrees = malloc(sizeof(*(to)->permittedSubtrees));
if((to)->permittedSubtrees == NULL) goto fail;
if((((to)->permittedSubtrees)->val = malloc(((from)->permittedSubtrees)->len * sizeof(*((to)->permittedSubtrees)->val))) == NULL && ((from)->permittedSubtrees)->len != 0)
goto fail;
for(((to)->permittedSubtrees)->len = 0; ((to)->permittedSubtrees)->len < ((from)->permittedSubtrees)->len; ((to)->permittedSubtrees)->len++){
if(copy_GeneralSubtree(&((from)->permittedSubtrees)->val[((to)->permittedSubtrees)->len], &((to)->permittedSubtrees)->val[((to)->permittedSubtrees)->len])) goto fail;
}
}else
(to)->permittedSubtrees = NULL;
if((from)->excludedSubtrees) {
(to)->excludedSubtrees = malloc(sizeof(*(to)->excludedSubtrees));
if((to)->excludedSubtrees == NULL) goto fail;
if((((to)->excludedSubtrees)->val = malloc(((from)->excludedSubtrees)->len * sizeof(*((to)->excludedSubtrees)->val))) == NULL && ((from)->excludedSubtrees)->len != 0)
goto fail;
for(((to)->excludedSubtrees)->len = 0; ((to)->excludedSubtrees)->len < ((from)->excludedSubtrees)->len; ((to)->excludedSubtrees)->len++){
if(copy_GeneralSubtree(&((from)->excludedSubtrees)->val[((to)->excludedSubtrees)->len], &((to)->excludedSubtrees)->val[((to)->excludedSubtrees)->len])) goto fail;
}
}else
(to)->excludedSubtrees = NULL;
return 0;
fail:
free_NameConstraints(to);
return ENOMEM;
} | false | true | false | false | false | 1 |
REgetc()
{
ZXchar c;
if ((c = REpeekc) != EOF)
REpeekc = EOF;
else if (*REptr != '\0')
c = ZXC(*REptr++);
else
c = EOF;
return c;
} | true | true | false | false | false | 1 |
DiskFileToVirtualFile(
const string& disk_file,
string* virtual_file,
string* shadowing_disk_file) {
int mapping_index = -1;
string canonical_disk_file = CanonicalizePath(disk_file);
for (int i = 0; i < mappings_.size(); i++) {
// Apply the mapping in reverse.
if (ApplyMapping(canonical_disk_file, mappings_[i].disk_path,
mappings_[i].virtual_path, virtual_file)) {
// Success.
mapping_index = i;
break;
}
}
if (mapping_index == -1) {
return NO_MAPPING;
}
// Iterate through all mappings with higher precedence and verify that none
// of them map this file to some other existing file.
for (int i = 0; i < mapping_index; i++) {
if (ApplyMapping(*virtual_file, mappings_[i].virtual_path,
mappings_[i].disk_path, shadowing_disk_file)) {
if (access(shadowing_disk_file->c_str(), F_OK) >= 0) {
// File exists.
return SHADOWED;
}
}
}
shadowing_disk_file->clear();
// Verify that we can open the file. Note that this also has the side-effect
// of verifying that we are not canonicalizing away any non-existent
// directories.
scoped_ptr<io::ZeroCopyInputStream> stream(OpenDiskFile(disk_file));
if (stream == NULL) {
return CANNOT_OPEN;
}
return SUCCESS;
} | false | false | false | false | false | 0 |
btree_printhead(btree_head* head)
{
UInt32 attr;
printf(" depth : %#X\n", head->depth);
printf(" root : %#lX\n", head->root);
printf(" leaf_count : %#lX\n", head->leaf_count);
printf(" leaf_head : %#lX\n", head->leaf_head);
printf(" leaf_tail : %#lX\n", head->leaf_tail);
printf(" node_size : %#X\n", head->node_size);
printf(" max_key_len : %#X\n", head->max_key_len);
printf(" node_count : %#lX\n", head->node_count);
printf(" free_nodes : %#lX\n", head->free_nodes);
printf(" reserved1 : %#X\n", head->reserved1);
printf(" clump_size : %#lX\n", head->clump_size);
printf(" btree_type : %#X\n", head->btree_type);
attr = head->attributes;
printf(" reserved2 : %#X\n", head->reserved2);
if (attr & HFSPLUS_BAD_CLOSE)
printf(" HFSPLUS_BAD_CLOSE *** ");
else
printf(" !HFSPLUS_BAD_CLOSE");
if (attr & HFSPLUS_TREE_BIGKEYS)
printf(" HFSPLUS_TREE_BIGKEYS ");
else
printf(" !HFSPLUS_TREE_BIGKEYS");
if (attr & HFSPLUS_TREE_VAR_NDXKEY_SIZE)
printf(" HFSPLUS_TREE_VAR_NDXKEY_SIZE");
else
printf(" !HFSPLUS_TREE_VAR_NDXKEY_SIZE");
if (attr & HFSPLUS_TREE_UNUSED)
printf(" HFSPLUS_TREE_UNUSED ***\n");
printf("\n");
} | false | false | false | false | false | 0 |
psgen_end_job(GVJ_t * job)
{
gvputs(job, "%%Trailer\n");
if (job->render.id != FORMAT_EPS)
gvprintf(job, "%%%%Pages: %d\n", job->common->viewNum);
if (job->common->show_boxes == NULL)
if (job->render.id != FORMAT_EPS)
gvprintf(job, "%%%%BoundingBox: %d %d %d %d\n",
job->boundingBox.LL.x, job->boundingBox.LL.y,
job->boundingBox.UR.x, job->boundingBox.UR.y);
gvputs(job, "end\nrestore\n");
gvputs(job, "%%EOF\n");
} | false | false | false | false | false | 0 |
nlRowParameterd(NLenum pname, NLdouble param) {
nlCheckState(NL_STATE_MATRIX) ;
switch(pname) {
case NL_RIGHT_HAND_SIDE: {
nlCurrentContext->right_hand_side = param ;
} break ;
case NL_ROW_SCALING: {
nlCurrentContext->row_scaling = param ;
} break ;
}
} | false | false | false | false | false | 0 |
export_hkl_data( const HKL_data_base& cdata, const String mtzpath )
{
if ( mode != WRITE && mode != APPEND )
Message::message( Message_fatal( "CCP4MTZfile: export_hkl_data - no file open for write/append" ) );
// add the exported data columns to the local list
int ncols = cdata.data_size();
std::vector<String> col_names = mtz_assign( mtzpath, cdata.type(),
cdata.data_names(), ncols );
std::vector<String> dat_names = cdata.data_names().split(" ");
std::vector<hkldatacol> newcols(ncols);
// assign the columns to mtz indexes
for ( int col=0; col < ncols; col++ ) { // loop over columns in list
int x, s, c;
match_path( col_names[col], x, s, c );
if ( x < 0 || s < 0 ) Message::message( Message_fatal( "CCP4MTZfile: export_hkl_data - Missing crystal or dataset: "+col_names[col] ) );
datacolinf newcol;
newcol.label = col_names[col].tail();
newcol.type = CCP4MTZ_type_registry::type( dat_names[col] );
c = crystals[x].datasets[s].columns.size();
crystals[x].datasets[s].columns.push_back(newcol);
newcols[col].path = "/" + crystals[x].crystal.crystal_name()
+ "/" + crystals[x].datasets[s].dataset.dataset_name()
+ "/" + crystals[x].datasets[s].columns[c].label;
newcols[col].scale = CCP4MTZ_type_registry::scale( dat_names[col] );
assigned_paths_.push_back( // store names for user query
"/" + crystals[x].crystal.crystal_name() +
"/" + crystals[x].datasets[s].dataset.dataset_name() +
"/" + crystals[x].datasets[s].columns[c].label +
" " + crystals[x].datasets[s].columns[c].type );
}
hkl_data_o.push_back( &cdata );
hkl_data_cols.push_back( newcols );
} | false | false | false | false | false | 0 |
get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
{
unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
unsigned long nr_pages = ent->size >> PAGE_SHIFT;
unsigned long start, end;
struct kcore_list *vmm, *tmp;
start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
end = PAGE_ALIGN(end);
/* overlap check (because we have to align page */
list_for_each_entry(tmp, head, list) {
if (tmp->type != KCORE_VMEMMAP)
continue;
if (start < tmp->addr + tmp->size)
if (end > tmp->addr)
end = tmp->addr;
}
if (start < end) {
vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
if (!vmm)
return 0;
vmm->addr = start;
vmm->size = end - start;
vmm->type = KCORE_VMEMMAP;
list_add_tail(&vmm->list, head);
}
return 1;
} | false | false | false | false | false | 0 |
getTrackPoint ( D303 * p)
{
TcxTrackpoint * singlePoint = new TcxTrackpoint(GpsFunctions::print_dtime(p->time));
if (( p->posn.lat != 0x7fffffff ) && ( p->posn.lon != 0x7fffffff )) {
stringstream lat;
lat.precision(10); // default 4 decimal chars which is not enough
stringstream lon;
lon.precision(10); // default 4 decimal chars which is not enough
lat << SEMI2DEG(p->posn.lat);
lon << SEMI2DEG(p->posn.lon);
singlePoint->setPosition(lat.str(), lon.str());
}
stringstream ss;
if (p->alt < 1.0e24 ) {
ss << p->alt;
singlePoint->setAltitudeMeters(ss.str());
}
if ( p->heart_rate != 0 ) {
ss.str("");
ss << (unsigned int)(p->heart_rate);
singlePoint->setHeartRateBpm(ss.str());
}
return singlePoint;
} | false | false | false | false | false | 0 |
kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
struct x86_exception fault;
if (work->wakeup_all)
work->arch.token = ~0; /* broadcast wakeup */
else
kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
trace_kvm_async_pf_ready(work->arch.token, work->gva);
if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
fault.vector = PF_VECTOR;
fault.error_code_valid = true;
fault.error_code = 0;
fault.nested_page_fault = false;
fault.address = work->arch.token;
kvm_inject_page_fault(vcpu, &fault);
}
vcpu->arch.apf.halted = false;
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
} | false | false | false | false | false | 0 |
icg_user_is_impatient (GnmIOContextGtk *icg)
{
gdouble t = g_timer_elapsed (icg->timer, NULL);
double progress = icg->progress;
double forecast_delay = ICG_POPUP_DELAY / 3.0;
gboolean ret = FALSE;
if (icg->progress == 0. && icg->files_done == 0)
icg->latency = t;
if (t >= forecast_delay) {
if (icg->files_total > 1) {
progress += icg->files_done;
progress /= icg->files_total;
}
if (progress <= 0.0) {
/* We're likely to be back shortly. */
ret = (t > ICG_POPUP_DELAY * 0.8);
} else {
double forecast = icg->latency;
forecast += (t - icg->latency) / progress;
ret = (forecast > ICG_POPUP_DELAY);
}
}
return ret;
} | false | false | false | false | false | 0 |
internalWriteString(const QString &s, TrackItem::Type t, int id)
{
QString out=sanitizeForStream(s);
Q3CString cs = s.utf8();
QByteArray a(cs.length());
memcpy(a.data(), cs.data(), a.size());
return internalWriteData(a, t, id);
} | false | false | false | false | false | 0 |
breakLines(char *string)
{
char *tmpstr;
char *lastspace = NULL;
int curlen = 0;
int c;
tmpstr = string;
while ( ( c = *tmpstr ) != '\0' ) {
switch ( c ) {
case ' ':
lastspace = tmpstr;
break;
case '\n':
lastspace = NULL;
curlen = 0;
break;
}
if ( ( curlen >= 55 ) && ( lastspace != NULL ) ) {
*lastspace = '\n';
curlen = ( tmpstr - lastspace );
lastspace = NULL;
}
curlen++;
tmpstr++;
}
return;
} | false | false | false | false | false | 0 |
_dfb_layer_region_surface_listener( const void *msg_data, void *ctx )
{
CoreSurfaceNotificationFlags flags;
CoreSurface *surface;
CoreLayer *layer;
CoreLayerShared *shared;
const DisplayLayerFuncs *funcs;
const CoreSurfaceNotification *notification = msg_data;
CoreLayerRegion *region = ctx;
D_ASSERT( notification != NULL );
D_ASSERT( region != NULL );
D_ASSERT( region->context != NULL );
D_DEBUG_AT( Core_Layers, "_dfb_layer_region_surface_listener( %p, %p ) <- 0x%08x\n",
notification, region, notification->flags );
D_ASSERT( notification->surface != NULL );
D_ASSUME( notification->surface == region->surface );
if (notification->surface != region->surface)
return RS_OK;
layer = dfb_layer_at( region->context->layer_id );
D_ASSERT( layer != NULL );
D_ASSERT( layer->funcs != NULL );
D_ASSERT( layer->funcs->SetRegion != NULL );
D_ASSERT( layer->shared != NULL );
funcs = layer->funcs;
shared = layer->shared;
flags = notification->flags;
surface = notification->surface;
if (flags & CSNF_DESTROY) {
D_WARN( "layer region surface destroyed" );
region->surface = NULL;
return RS_REMOVE;
}
if (dfb_layer_region_lock( region ))
return RS_OK;
if (D_FLAGS_ARE_SET( region->state, CLRSF_REALIZED | CLRSF_CONFIGURED ) &&
!D_FLAGS_IS_SET( region->state, CLRSF_FROZEN ))
{
if (D_FLAGS_IS_SET( flags, CSNF_PALETTE_CHANGE | CSNF_PALETTE_UPDATE )) {
if (surface->palette)
funcs->SetRegion( layer,
layer->driver_data, layer->layer_data,
region->region_data, ®ion->config,
CLRCF_PALETTE, surface, surface->palette,
®ion->surface_lock );
}
if ((flags & CSNF_FIELD) && funcs->SetInputField)
funcs->SetInputField( layer,
layer->driver_data, layer->layer_data,
region->region_data, surface->field );
if ((flags & CSNF_ALPHA_RAMP) && (shared->description.caps & DLCAPS_ALPHA_RAMP)) {
region->config.alpha_ramp[0] = surface->alpha_ramp[0];
region->config.alpha_ramp[1] = surface->alpha_ramp[1];
region->config.alpha_ramp[2] = surface->alpha_ramp[2];
region->config.alpha_ramp[3] = surface->alpha_ramp[3];
funcs->SetRegion( layer,
layer->driver_data, layer->layer_data,
region->region_data, ®ion->config,
CLRCF_ALPHA_RAMP, surface, surface->palette,
®ion->surface_lock );
}
}
dfb_layer_region_unlock( region );
return RS_OK;
} | false | false | false | false | false | 0 |
alloc_timespec_pair(double sec, long nsec) {
CAMLparam0();
CAMLlocal1(tsout);
tsout = alloc(2,0);
Store_field(tsout, 0, caml_copy_double(sec));
Store_field(tsout, 1, Val_long(nsec));
CAMLreturn(tsout);
} | false | false | false | false | false | 0 |
BuildMessage(SSL* ssl, byte* output, const byte* input, int inSz,
int type)
{
word32 digestSz = ssl->specs.hash_size;
word32 sz = RECORD_HEADER_SZ + inSz + digestSz;
word32 pad = 0, i;
word32 idx = RECORD_HEADER_SZ;
word32 ivSz = 0; /* TLSv1.1 IV */
word32 headerSz = RECORD_HEADER_SZ;
word16 size;
byte iv[AES_BLOCK_SIZE]; /* max size */
#ifdef CYASSL_DTLS
if (ssl->options.dtls) {
sz += DTLS_RECORD_EXTRA;
idx += DTLS_RECORD_EXTRA;
headerSz += DTLS_RECORD_EXTRA;
}
#endif
if (ssl->specs.cipher_type == block) {
word32 blockSz = ssl->specs.block_size;
if (ssl->options.tls1_1) {
ivSz = blockSz;
sz += ivSz;
RNG_GenerateBlock(&ssl->rng, iv, ivSz);
}
sz += 1; /* pad byte */
pad = (sz - headerSz) % blockSz;
pad = blockSz - pad;
sz += pad;
}
size = sz - headerSz; /* include mac and digest */
AddRecordHeader(output, size, type, ssl);
/* write to output */
if (ivSz) {
XMEMCPY(output + idx, iv, ivSz);
idx += ivSz;
}
XMEMCPY(output + idx, input, inSz);
idx += inSz;
if (type == handshake)
HashOutput(ssl, output, headerSz + inSz, ivSz);
ssl->hmac(ssl, output+idx, output + headerSz + ivSz, inSz, type, 0);
idx += digestSz;
if (ssl->specs.cipher_type == block)
for (i = 0; i <= pad; i++) output[idx++] = pad; /* pad byte gets */
/* pad value too */
Encrypt(ssl, output + headerSz, output + headerSz, size);
return sz;
} | false | false | false | false | false | 0 |
show_away(gchar *a_message, void *unused)
{
LList *list;
eb_local_account *ela = NULL;
if (!is_away) {
GtkWidget *label;
GtkWidget *vbox;
GtkTextBuffer *buffer;
awaybox = gtk_window_new(GTK_WINDOW_TOPLEVEL);
gtk_widget_realize(awaybox);
vbox = gtk_vbox_new(FALSE, 0);
label = gtk_label_new(_
("You are currently away, click \"I'm back\" to return"));
gtk_box_pack_start(GTK_BOX(vbox), label, TRUE, TRUE, 5);
gtk_widget_show(label);
label = gtk_label_new(_
("This is the away message that will \nbe sent to people messaging you.\nYou may edit this message if you wish."));
gtk_box_pack_start(GTK_BOX(vbox), label, TRUE, TRUE, 5);
gtk_widget_show(label);
away_message_text_entry = gtk_text_view_new();
gtk_text_view_set_editable(GTK_TEXT_VIEW
(away_message_text_entry), TRUE);
buffer = gtk_text_view_get_buffer(GTK_TEXT_VIEW
(away_message_text_entry));
gtk_text_buffer_insert_at_cursor(buffer, a_message,
strlen(a_message));
gtk_box_pack_start(GTK_BOX(vbox), away_message_text_entry, TRUE,
TRUE, 5);
gtk_widget_set_size_request(away_message_text_entry, 300, 60);
gtk_widget_show(away_message_text_entry);
label = gtk_button_new_with_label(_("I'm Back"));
g_signal_connect_swapped(label, "clicked",
G_CALLBACK(away_window_set_back), awaybox);
gtk_box_pack_start(GTK_BOX(vbox), label, TRUE, FALSE, 0);
gtk_widget_show(label);
g_signal_connect_swapped(awaybox, "destroy",
G_CALLBACK(destroy_away), awaybox);
gtk_container_add(GTK_CONTAINER(awaybox), vbox);
GTK_WIDGET_SET_FLAGS(label, GTK_CAN_DEFAULT);
gtk_widget_grab_default(label);
gtk_widget_show(vbox);
}
gtk_window_set_title(GTK_WINDOW(awaybox), _("Away"));
gtk_container_set_border_width(GTK_CONTAINER(awaybox), 2);
gtk_widget_show(awaybox);
is_away = 1;
for (list = accounts; list; list = list->next) {
ela = list->data;
/* Only change state for those accounts which are connected */
if (ela->connected)
eb_services[ela->service_id].sc->set_away(ela,
a_message, 1);
}
} | false | false | false | false | false | 0 |
broker_check(int event_type, void *data)
{
int result = NEB_OK;
if (event_type == NEBCALLBACK_SERVICE_CHECK_DATA) {
if (g_livecheck_enabled)
result = broker_service_livecheck(event_type, data);
nebstruct_service_check_data *c = (nebstruct_service_check_data *)data;
if (c->type == NEBTYPE_SERVICECHECK_PROCESSED) {
g_counters[COUNTER_SERVICE_CHECKS]++;
}
}
else if (event_type == NEBCALLBACK_HOST_CHECK_DATA) {
if (g_livecheck_enabled)
result = broker_host_livecheck(event_type, data);
nebstruct_host_check_data *c = (nebstruct_host_check_data *)data;
if (c->type == NEBTYPE_HOSTCHECK_PROCESSED) {
g_counters[COUNTER_HOST_CHECKS]++;
}
}
pthread_cond_broadcast(&g_wait_cond[WT_ALL]);
pthread_cond_broadcast(&g_wait_cond[WT_CHECK]);
return result;
} | false | false | false | false | false | 0 |
gamgi_mesa_text_horizontal_move (FT_Vector* to, gamgi_extruded* extruded)
{
if (extruded->open == TRUE) gluTessEndContour (extruded->tesselator);
extruded->last[0] = to->x;
extruded->last[1] = to->y;
extruded->last[2] = 0.0;
gluTessBeginContour (extruded->tesselator);
extruded->open = TRUE;
return 0;
} | false | false | false | false | false | 0 |
x86_mov16_imm( struct x86_function *p, struct x86_reg dst, uint16_t imm )
{
DUMP_RI( dst, imm );
emit_1ub(p, 0x66);
if(dst.mod == mod_REG)
{
emit_1ub(p, 0xb8 + dst.idx);
emit_2ub(p, imm & 0xff, imm >> 8);
}
else
{
emit_1ub(p, 0xc7);
emit_modrm_noreg(p, 0, dst);
emit_2ub(p, imm & 0xff, imm >> 8);
}
} | false | false | false | false | false | 0 |
dh_generate_parameters_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (PrimeLen, Generator) */
int prime_len, generator;
DH* dh_params;
int p_len, g_len;
unsigned char *p_ptr, *g_ptr;
ERL_NIF_TERM ret_p, ret_g;
if (!enif_get_int(env, argv[0], &prime_len)
|| !enif_get_int(env, argv[1], &generator)) {
return enif_make_badarg(env);
}
dh_params = DH_generate_parameters(prime_len, generator, NULL, NULL);
if (dh_params == NULL) {
return atom_error;
}
p_len = BN_num_bytes(dh_params->p);
g_len = BN_num_bytes(dh_params->g);
p_ptr = enif_make_new_binary(env, p_len, &ret_p);
g_ptr = enif_make_new_binary(env, g_len, &ret_g);
BN_bn2bin(dh_params->p, p_ptr);
BN_bn2bin(dh_params->g, g_ptr);
ERL_VALGRIND_MAKE_MEM_DEFINED(p_ptr, p_len);
ERL_VALGRIND_MAKE_MEM_DEFINED(g_ptr, g_len);
DH_free(dh_params);
return enif_make_list2(env, ret_p, ret_g);
} | false | false | false | false | false | 0 |
gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
int i;
u32 tmp = RREG32(mmCP_ME_CNTL);
if (enable) {
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
} else {
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].ready = false;
}
WREG32(mmCP_ME_CNTL, tmp);
udelay(50);
} | false | false | false | false | false | 0 |
detect_imximage_version(struct imx_header *imx_hdr)
{
imx_header_v1_t *hdr_v1 = &imx_hdr->header.hdr_v1;
imx_header_v2_t *hdr_v2 = &imx_hdr->header.hdr_v2;
flash_header_v1_t *fhdr_v1 = &hdr_v1->fhdr;
flash_header_v2_t *fhdr_v2 = &hdr_v2->fhdr;
/* Try to detect V1 */
if ((fhdr_v1->app_code_barker == APP_CODE_BARKER) &&
(hdr_v1->dcd_table.preamble.barker == DCD_BARKER))
return IMXIMAGE_V1;
/* Try to detect V2 */
if ((fhdr_v2->header.tag == IVT_HEADER_TAG) &&
(hdr_v2->dcd_table.header.tag == DCD_HEADER_TAG))
return IMXIMAGE_V2;
return IMXIMAGE_VER_INVALID;
} | false | false | false | false | false | 0 |
mprGetPortablePath(cchar *path)
{
char *result, *cp;
result = mprTransformPath(path, 0);
for (cp = result; *cp; cp++) {
if (*cp == '\\') {
*cp = '/';
}
}
return result;
} | false | false | false | false | false | 0 |
ffunc_inv(const double t)
{
if (t > 0.206893F)
{
return (t * t * t);
}
else
{
return ((t - 16.0F/116.0F) / 7.787F);
}
} | false | false | false | false | false | 0 |
new_rdataset(dns_rbtdb_t *rbtdb, isc_mem_t *mctx)
{
rdatasetheader_t *h;
h = isc_mem_get(mctx, sizeof(*h));
if (h == NULL)
return (NULL);
#if TRACE_HEADER
if (IS_CACHE(rbtdb) && rbtdb->common.rdclass == dns_rdataclass_in)
fprintf(stderr, "allocated header: %p\n", h);
#endif
init_rdataset(rbtdb, h);
return (h);
} | false | false | false | false | false | 0 |
onScrobbled( const QList<TrackInfo>& tracks )
{
Q_DEBUG_BLOCK << tracks.count() << "tracks were successfully scrobbled";
Q_ASSERT( sender() );
//////
Scrobbler* scrobbler = static_cast<Scrobbler*>(sender());
ScrobbleCache cache( scrobbler->username() );
if (tracks.count() > 2)
//do a backup because nobody writes perfect code, least of all me!
cache.backup();
int remaining = cache.remove( tracks );
if (remaining)
scrobble( cache );
else {
// only show status on final submission batch, and only if something
// was scrobbled (not skipped or banned)
if (scrobbler->scrobbled() > 0)
emit status( Scrobbler::TracksScrobbled, scrobbler->scrobbled() );
scrobbler->resetScrobbleCount();
}
} | false | false | false | false | false | 0 |
chan_cleanup(struct ast_channel *chan)
{
struct ast_datastore *msg_ds, *ds;
struct varshead *headp;
struct ast_var_t *vardata;
ast_channel_lock(chan);
/*
* Remove the msg datastore. Free its data but keep around the datastore
* object and just reuse it.
*/
if ((msg_ds = ast_channel_datastore_find(chan, &msg_datastore, NULL)) && msg_ds->data) {
ast_channel_datastore_remove(chan, msg_ds);
ao2_ref(msg_ds->data, -1);
msg_ds->data = NULL;
}
/*
* Destroy all other datastores.
*/
while ((ds = AST_LIST_REMOVE_HEAD(ast_channel_datastores(chan), entry))) {
ast_datastore_free(ds);
}
/*
* Destroy all channel variables.
*/
headp = ast_channel_varshead(chan);
while ((vardata = AST_LIST_REMOVE_HEAD(headp, entries))) {
ast_var_delete(vardata);
}
/*
* Restore msg datastore.
*/
if (msg_ds) {
ast_channel_datastore_add(chan, msg_ds);
}
/*
* Clear softhangup flags.
*/
ast_channel_clear_softhangup(chan, AST_SOFTHANGUP_ALL);
ast_channel_unlock(chan);
} | false | false | false | false | false | 0 |
iscsit_clear_tpg_np_login_thread(
struct iscsi_tpg_np *tpg_np,
struct iscsi_portal_group *tpg,
bool shutdown)
{
if (!tpg_np->tpg_np) {
pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
return;
}
if (shutdown)
tpg_np->tpg_np->enabled = false;
iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
} | false | false | false | false | false | 0 |
brasero_data_project_load_contents (BraseroDataProject *self,
GSList *grafts,
GSList *excluded)
{
GSList *iter;
GSList *folders = NULL;
BraseroDataProjectPrivate *priv;
priv = BRASERO_DATA_PROJECT_PRIVATE (self);
priv->is_loading_contents = 1;
for (iter = grafts; iter; iter = iter->next) {
BraseroGraftPt *graft;
GFile *file;
gchar *path;
gchar *uri;
graft = iter->data;
if (graft->uri) {
file = g_file_new_for_uri (graft->uri);
uri = g_file_get_uri (file);
g_object_unref (file);
}
else
uri = NULL;
if (graft->path) {
/* This might happen if we are loading brasero projects */
if (g_str_has_suffix (graft->path, G_DIR_SEPARATOR_S)) {
int len;
len = strlen (graft->path);
path = g_strndup (graft->path, len - 1);
}
else
path = g_strdup (graft->path);
}
else
path = NULL;
folders = brasero_data_project_add_path (self,
path,
uri,
folders);
g_free (path);
g_free (uri);
}
for (iter = excluded; iter; iter = iter->next) {
gchar *uri;
GFile *file;
file = g_file_new_for_uri (iter->data);
uri = g_file_get_uri (file);
g_object_unref (file);
folders = brasero_data_project_add_excluded_uri (self,
uri,
folders);
g_free (uri);
}
/* Now load the temporary folders that were created */
for (iter = folders; iter; iter = iter->next) {
BraseroURINode *graft;
BraseroFileNode *tmp;
gchar *uri;
tmp = iter->data;
/* get the URI for this node. There should be one now that all
* graft nodes are in the tree. */
uri = brasero_data_project_node_to_uri (self, tmp);
if (!uri) {
/* This node has been grafted under a node that was
* imported or was itself an imported node. Since there
* is no imported nodes any more, then it has to become
* fake.
* NOTE: it has to be a directory */
tmp->is_fake = TRUE;
tmp->is_loading = FALSE;
tmp->is_reloading = FALSE;
graft = brasero_data_project_uri_ensure_graft (self, NEW_FOLDER);
brasero_file_node_graft (tmp, graft);
/* Don't signal the node addition yet we'll do it later
* when all the nodes are created */
continue;
}
/* graft it ? */
graft = brasero_data_project_uri_ensure_graft (self, uri);
brasero_file_node_graft (tmp, graft);
g_free (uri);
/* Don't signal the node addition yet we'll do it later when
* all the nodes are created */
}
g_slist_free (folders);
priv->loading = brasero_data_project_load_contents_notify (self);
priv->is_loading_contents = 0;
return priv->loading;
} | false | false | false | false | false | 0 |
hostap_remove_interface(struct net_device *dev, int rtnl_locked,
int remove_from_list)
{
struct hostap_interface *iface;
if (!dev)
return;
iface = netdev_priv(dev);
if (remove_from_list) {
list_del(&iface->list);
}
if (dev == iface->local->ddev)
iface->local->ddev = NULL;
else if (dev == iface->local->apdev)
iface->local->apdev = NULL;
else if (dev == iface->local->stadev)
iface->local->stadev = NULL;
if (rtnl_locked)
unregister_netdevice(dev);
else
unregister_netdev(dev);
/* dev->destructor = free_netdev() will free the device data, including
* private data, when removing the device */
} | false | false | false | false | false | 0 |
zxid_sp_slo_redir(zxid_conf* cf, zxid_cgi* cgi, zxid_ses* ses)
{
zxid_get_ses_sso_a7n(cf, ses);
if (ses->a7n) {
struct zx_sp_LogoutRequest_s* r;
struct zx_str* rs;
struct zx_str* loc;
zxid_entity* idp_meta;
struct zx_str* ses_ix;
ses_ix = ses->a7n->AuthnStatement?&ses->a7n->AuthnStatement->SessionIndex->g:0;
if (cf->log_level>0)
zxlog(cf, 0, 0, 0, 0, 0, 0, ZX_GET_CONTENT(ses->nameid), "N", "W", "SLOREDIR", ses->sid, "sesix(%.*s)", ses_ix?ses_ix->len:1, ses_ix?ses_ix->s:"?");
idp_meta = zxid_get_ses_idp(cf, ses);
if (!idp_meta)
return zx_dup_str(cf->ctx, "* ERR");
loc = zxid_idp_loc(cf, cgi, ses, idp_meta, ZXID_SLO_SVC, SAML2_REDIR);
if (!loc)
return zx_dup_str(cf->ctx, "* ERR");
r = zxid_mk_logout(cf, zxid_get_user_nameid(cf, ses->nameid), ses_ix, idp_meta);
r->Destination = zx_ref_len_attr(cf->ctx, &r->gg, zx_Destination_ATTR, loc->len, loc->s);
rs = zx_easy_enc_elem_opt(cf, &r->gg);
D("SLO(%.*s)", rs->len, rs->s);
return zxid_saml2_redir(cf, loc, rs, 0);
}
if (ses->a7n11) {
ERR("Not implemented, SAML 1.1 assetion %d", 0);
}
if (ses->a7n12) {
ERR("Not implemented, ID-FF 1.2 type SAML 1.1 assetion %d", 0);
}
ERR("Session sid(%s) lacks SSO assertion.", ses->sid);
return zx_dup_str(cf->ctx, "* ERR");
} | false | false | false | false | false | 0 |
startVoice(int note, int velocity) { /* start playing a voice */
int i;
voice* newVoice; /* voice to use */
if (actVoices < voiceCount) { /* not all voices are playing -> use the next free */
for (i = 0; i < voiceCount; i++) {
if (voices[i].act == 0) {
newVoice = &(voices[i]);
break;
}
}
if (actVoices == 0) { /* no other voices are playing -> current is the first and last started voice */
firstVoice = newVoice;
lastVoice = newVoice;
}
actVoices++;
} else { /* all voices are playing -> use first started voice */
newVoice = firstVoice;
firstVoice = firstVoice->postVoice; /* the voice started after the first is now the first started */
}
/* add current voice to the end */
newVoice->preVoice = lastVoice;
lastVoice->postVoice = newVoice;
lastVoice = newVoice;
/* set values for current voice */
newVoice->freq = getFreq(note);
newVoice->velocity = velocity / 128.0;
newVoice->act = 1;
reinitLocalUnits(newVoice - voices);
#ifdef DEBUG_VOICES
debugVoices();
#endif
} | false | false | false | false | true | 1 |
skk_rom_kana_converter_set_rule (SkkRomKanaConverter* self, SkkRomKanaMapFile* value) {
SkkRomKanaMapFile* _tmp0_;
SkkRomKanaMapFile* _tmp1_;
SkkRomKanaMapFile* _tmp2_;
SkkRomKanaNode* _tmp3_;
SkkRomKanaNode* _tmp4_;
g_return_if_fail (self != NULL);
_tmp0_ = value;
_tmp1_ = _g_object_ref0 (_tmp0_);
_g_object_unref0 (self->priv->_rule);
self->priv->_rule = _tmp1_;
_tmp2_ = self->priv->_rule;
_tmp3_ = _tmp2_->root_node;
_tmp4_ = _g_object_ref0 (_tmp3_);
_g_object_unref0 (self->priv->current_node);
self->priv->current_node = _tmp4_;
g_object_notify ((GObject *) self, "rule");
} | false | false | false | false | false | 0 |
read_seqretry (GstClock * clock, gint seq)
{
/* no retry if the seqnum did not change */
if (G_LIKELY (seq == g_atomic_int_get (&clock->ABI.priv->pre_count)))
return FALSE;
/* wait for the writer to finish and retry */
GST_OBJECT_LOCK (clock);
GST_OBJECT_UNLOCK (clock);
return TRUE;
} | false | false | false | false | false | 0 |
operator+(ulong128& other)
{
ulong128 ret1;
memcpy(ret1.bytes,this->bytes,16);
unsigned int c=0;
for (int j=0;j<16;j++)
{
unsigned int i=c+(unsigned int)other.bytes[j]+
(unsigned int)this->bytes[j];
ret1.bytes[j]=(i&0xFF);
c=i>>8;
}
return ret1;
} | false | false | false | false | false | 0 |
smtp_check_thread(thread_t *thread)
{
checker_t *checker = THREAD_ARG(thread);
smtp_checker_t *smtp_checker = CHECKER_ARG(checker);
smtp_host_t *smtp_host = smtp_checker->host_ptr;
int status;
status = tcp_socket_state(thread->u.fd, thread, smtp_check_thread);
switch (status) {
case connect_error:
smtp_final(thread, 1, "Error connecting to server [%s]:%d"
, inet_sockaddrtos(&smtp_host->dst)
, ntohs(inet_sockaddrport(&smtp_host->dst)));
return 0;
break;
case connect_timeout:
smtp_final(thread, 1, "Connection timeout to server [%s]:%d"
, inet_sockaddrtos(&smtp_host->dst)
, ntohs(inet_sockaddrport(&smtp_host->dst)));
return 0;
break;
case connect_success:
DBG("SMTP_CHECK Remote SMTP server [%s]:%d connected"
, inet_sockaddrtos(&smtp_host->dst)
, ntohs(inet_sockaddrport(&smtp_host->dst)));
/* Enter the engine at SMTP_START */
smtp_checker->state = SMTP_START;
smtp_engine_thread(thread);
return 0;
break;
}
/* we shouldn't be here */
smtp_final(thread, 1, "Unknown connection error to server [%s]:%d"
, inet_sockaddrtos(&smtp_host->dst)
, ntohs(inet_sockaddrport(&smtp_host->dst)));
return 0;
} | false | false | false | false | false | 0 |
percent_complete(int permill){
static int lastOutput = -1;
if( permill>lastOutput ){
fossil_print(" %d.%d%% complete...\r", permill/10, permill%10);
fflush(stdout);
lastOutput = permill;
}
} | false | false | false | false | false | 0 |
roll_work(struct work *work)
{
uint32_t *work_ntime;
uint32_t ntime;
work_ntime = (uint32_t *)(work->data + 68);
ntime = be32toh(*work_ntime);
ntime++;
*work_ntime = htobe32(ntime);
local_work++;
work->rolls++;
work->blk.nonce = 0;
applog(LOG_DEBUG, "Successfully rolled work");
/* This is now a different work item so it needs a different ID for the
* hashtable */
work->id = total_work++;
} | false | false | false | false | false | 0 |
gnc_split_register_needs_conv_rate (SplitRegister *reg,
Transaction *txn, Account *acc)
{
gnc_commodity *txn_cur, *acc_com;
/* If there is not a RATE_CELL, then don't do anything */
if (!gnc_split_reg_has_rate_cell (reg->type))
return FALSE;
/* if txn->currency == acc->commodity, then return FALSE */
acc_com = xaccAccountGetCommodity (acc);
txn_cur = xaccTransGetCurrency (txn);
if (txn_cur && acc_com && gnc_commodity_equal (txn_cur, acc_com))
return FALSE;
return TRUE;
} | false | false | false | false | false | 0 |
isns_registration_add_object_list(isns_simple_t *reg, isns_object_list_t *list)
{
unsigned int i;
for (i = 0; i < list->iol_count; ++i) {
isns_object_extract_writable(list->iol_data[i],
®->is_operating_attrs);
}
} | false | false | false | false | false | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.