functionSource
stringlengths
20
97.4k
CWE-119
bool
2 classes
CWE-120
bool
2 classes
CWE-469
bool
2 classes
CWE-476
bool
2 classes
CWE-other
bool
2 classes
combine
int64
0
1
say_datetime(struct ast_channel *chan, time_t t, const char *ints, const char *lang) { if (!strncasecmp(lang, "en", 2)) { /* English syntax */ return ast_say_datetime_en(chan, t, ints, lang); } else if (!strncasecmp(lang, "de", 2)) { /* German syntax */ return ast_say_datetime_de(chan, t, ints, lang); } else if (!strncasecmp(lang, "fr", 2)) { /* French syntax */ return ast_say_datetime_fr(chan, t, ints, lang); } else if (!strncasecmp(lang, "ge", 2)) { /* deprecated Georgian syntax */ static int deprecation_warning = 0; if (deprecation_warning++ % 10 == 0) { ast_log(LOG_WARNING, "ge is not a standard language code. Please switch to using ka instead.\n"); } return ast_say_datetime_ka(chan, t, ints, lang); } else if (!strncasecmp(lang, "gr", 2)) { /* Greek syntax */ return ast_say_datetime_gr(chan, t, ints, lang); } else if (!strncasecmp(lang, "he", 2)) { /* Hebrew syntax */ return ast_say_datetime_he(chan, t, ints, lang); } else if (!strncasecmp(lang, "hu", 2)) { /* Hungarian syntax */ return ast_say_datetime_hu(chan, t, ints, lang); } else if (!strncasecmp(lang, "ka", 2)) { /* Georgian syntax */ return ast_say_datetime_ka(chan, t, ints, lang); } else if (!strncasecmp(lang, "nl", 2)) { /* Dutch syntax */ return ast_say_datetime_nl(chan, t, ints, lang); } else if (!strncasecmp(lang, "pt_BR", 5)) { /* Brazilian Portuguese syntax */ return ast_say_datetime_pt_BR(chan, t, ints, lang); } else if (!strncasecmp(lang, "pt", 2)) { /* Portuguese syntax */ return ast_say_datetime_pt(chan, t, ints, lang); } else if (!strncasecmp(lang, "th", 2)) { /* Thai syntax */ return ast_say_datetime_th(chan, t, ints, lang); } else if (!strncasecmp(lang, "tw", 2)) { /* deprecated Taiwanese syntax */ static int deprecation_warning = 0; if (deprecation_warning++ % 10 == 0) { ast_log(LOG_WARNING, "tw is a standard language code for Twi, not Taiwanese. Please switch to using zh_TW instead.\n"); } return ast_say_datetime_zh(chan, t, ints, lang); } else if (!strncasecmp(lang, "zh", 2)) { /* Taiwanese / Chinese syntax */ return ast_say_datetime_zh(chan, t, ints, lang); } /* Default to English */ return ast_say_datetime_en(chan, t, ints, lang); }
false
false
false
false
false
0
cancel_current_search (NautilusShellSearchProvider *self) { if (self->current_search != NULL) nautilus_search_provider_stop (NAUTILUS_SEARCH_PROVIDER (self->current_search->engine)); }
false
false
false
false
false
0
synth_move (byte *newboard, byte *board) { static byte movbuf [1024]; int i, j, m = 0; for (i=0; i<board_wid; i++) for (j=0; j<board_heit; j++) if (newboard [j * board_wid + i] != board [j * board_wid + i]) { movbuf[m++] = i; movbuf[m++] = j; movbuf[m++] = newboard [j * board_wid + i]; } movbuf[m] = -1; return movbuf; }
false
false
false
false
false
0
cli_unarj_open(fmap_t *map, const char *dirname, arj_metadata_t *metadata, size_t off) { cli_dbgmsg("in cli_unarj_open\n"); metadata->map = map; metadata->offset = off; if (!is_arj_archive(metadata)) { cli_dbgmsg("Not in ARJ format\n"); return CL_EFORMAT; } if (!arj_read_main_header(metadata)) { cli_dbgmsg("Failed to read main header\n"); return CL_EFORMAT; } return CL_SUCCESS; }
false
false
false
false
false
0
bsd_assign_dos_partition(struct fdisk_context *cxt) { struct fdisk_bsd_label *l = self_label(cxt); size_t i; for (i = 0; i < 4; i++) { fdisk_sector_t ss; l->dos_part = fdisk_dos_get_partition(cxt->parent, i); if (!l->dos_part || !is_bsd_partition_type(l->dos_part->sys_ind)) continue; ss = dos_partition_get_start(l->dos_part); if (!ss) { fdisk_warnx(cxt, _("Partition %zd: has invalid starting " "sector 0."), i + 1); return -1; } if (cxt->parent->dev_path) { free(cxt->dev_path); cxt->dev_path = fdisk_partname( cxt->parent->dev_path, i + 1); } DBG(LABEL, ul_debug("partition %zu assigned to BSD", i + 1)); return 0; } fdisk_warnx(cxt, _("There is no *BSD partition on %s."), cxt->parent->dev_path); free(cxt->dev_path); cxt->dev_path = NULL; l->dos_part = NULL; return 1; }
false
false
false
false
false
0
udisks_mount_monitor_finalize (GObject *object) { UDisksMountMonitor *monitor = UDISKS_MOUNT_MONITOR (object); if (monitor->mounts_channel != NULL) g_io_channel_unref (monitor->mounts_channel); if (monitor->mounts_watch_source != NULL) g_source_destroy (monitor->mounts_watch_source); if (monitor->swaps_channel != NULL) g_io_channel_unref (monitor->swaps_channel); if (monitor->swaps_watch_source != NULL) g_source_destroy (monitor->swaps_watch_source); g_list_foreach (monitor->mounts, (GFunc) g_object_unref, NULL); g_list_free (monitor->mounts); if (G_OBJECT_CLASS (udisks_mount_monitor_parent_class)->finalize != NULL) G_OBJECT_CLASS (udisks_mount_monitor_parent_class)->finalize (object); }
false
false
false
false
false
0
FcStrBufString (FcStrBuf *buf, const FcChar8 *s) { FcChar8 c; while ((c = *s++)) if (!FcStrBufChar (buf, c)) return FcFalse; return FcTrue; }
false
false
false
false
false
0
log_feature_mark(int flag) { int i; if (flag) log_set_default(0); for (i = 0; i < LS_LAST_SYSTEM; i++) { if (!(logDesc[i].mark & LOG_MARK_FILE)) { if (logDesc[i].subsys != LS_DEBUG) { /* debug is special */ if (logDesc[i].file) /* destroy previous entry... */ log_file_destroy(logDesc[i].file); logDesc[i].file = 0; } } if (!(logDesc[i].mark & LOG_MARK_FACILITY)) /* set default facility */ logDesc[i].facility = logDesc[i].def_fac; if (!(logDesc[i].mark & LOG_MARK_SNOMASK)) /* set default snomask */ logDesc[i].snomask = logDesc[i].def_sno; if (!(logDesc[i].mark & LOG_MARK_LEVEL)) /* set default level */ logDesc[i].level = L_DEFAULT; } return 0; /* we don't have a notify handler */ }
false
false
false
false
false
0
report_undefined_key(status_message) char *status_message; { char *prev = tty_get_previous_key_seq(); size_t length = strlen(prev); if (length && (prev[length - 1] != key_INTERRUPT)) { char *str = (char *)tty_key_machine2human(prev); char *buf = xmalloc(128 + strlen(str)); sprintf(buf, "%s: not defined.", str); status(buf, STATUS_ERROR, STATUS_LEFT); xfree(buf); tty_beep(); tty_update(); sleep(1); } else tty_beep(); if (status_message) status(status_message, STATUS_OK, STATUS_CENTERED); else status_default(); il_update_point(); tty_update(); }
false
false
false
false
false
0
_daemonize(void) { int child_status; int fd; pid_t pid; struct rlimit rlim; struct timeval tval; sigset_t my_sigset; sigemptyset(&my_sigset); if (sigprocmask(SIG_SETMASK, &my_sigset, NULL) < 0) { fprintf(stderr, "Unable to restore signals.\n"); exit(EXIT_FAILURE); } signal(SIGTERM, &_exit_handler); switch (pid = fork()) { case -1: perror("fork failed:"); exit(EXIT_FAILURE); case 0: /* Child */ break; default: /* Wait for response from child */ while (!waitpid(pid, &child_status, WNOHANG) && !_exit_now) { tval.tv_sec = 0; tval.tv_usec = 250000; /* .25 sec */ select(0, NULL, NULL, NULL, &tval); } if (_exit_now) /* Child has signaled it is ok - we can exit now */ exit(EXIT_SUCCESS); /* Problem with child. Determine what it is by exit code */ switch (WEXITSTATUS(child_status)) { case EXIT_DESC_CLOSE_FAILURE: case EXIT_DESC_OPEN_FAILURE: case EXIT_FIFO_FAILURE: case EXIT_CHDIR_FAILURE: default: fprintf(stderr, "Child exited with code %d\n", WEXITSTATUS(child_status)); break; } exit(WEXITSTATUS(child_status)); } if (chdir("/")) exit(EXIT_CHDIR_FAILURE); if (getrlimit(RLIMIT_NOFILE, &rlim) < 0) fd = 256; /* just have to guess */ else fd = rlim.rlim_cur; for (--fd; fd >= 0; fd--) { #ifdef linux /* Do not close fds preloaded by systemd! */ if (_systemd_activation && (fd == SD_FD_FIFO_SERVER || fd == SD_FD_FIFO_CLIENT)) continue; #endif (void) close(fd); } if ((open("/dev/null", O_RDONLY) < 0) || (open("/dev/null", O_WRONLY) < 0) || (open("/dev/null", O_WRONLY) < 0)) exit(EXIT_DESC_OPEN_FAILURE); setsid(); }
false
false
false
false
false
0
tag_wpt(const char** attrv) { const char** avp = &attrv[0]; wpt_tmp = waypt_new(); cur_tag = NULL; while (*avp) { if (strcmp(avp[0], "lat") == 0) { sscanf(avp[1], "%lf", &wpt_tmp->latitude); } else if (strcmp(avp[0], "lon") == 0) { sscanf(avp[1], "%lf", &wpt_tmp->longitude); } avp+=2; } fs_ptr = &wpt_tmp->fs; }
false
false
false
false
false
0
project_attach( const char* url, const char* auth, const char* name ) { int retval; SET_LOCALE sl; char buf[768]; RPC rpc(this); snprintf(buf, sizeof(buf), "<project_attach>\n" " <project_url>%s</project_url>\n" " <authenticator>%s</authenticator>\n" " <project_name>%s</project_name>\n" "</project_attach>\n", url, auth, name ); buf[sizeof(buf)-1] = 0; retval = rpc.do_rpc(buf); if (retval) return retval; return rpc.parse_reply(); }
false
false
false
false
false
0
pci_reset_notify(struct pci_dev *dev, bool prepare) { const struct pci_error_handlers *err_handler = dev->driver ? dev->driver->err_handler : NULL; if (err_handler && err_handler->reset_notify) err_handler->reset_notify(dev, prepare); }
false
false
false
false
false
0
pstate_history_list_convert(void *that) { pstate_history_list_ty *this_thing; size_t j; rpt_value::pointer vp; this_thing = *(pstate_history_list_ty **)that; if (!this_thing) return rpt_value::pointer(); trace(("pstate_history_list_convert(this_thing = %08lX)\n{\n", (long)this_thing)); assert(this_thing->length <= this_thing->maximum); assert(!this_thing->list == !this_thing->maximum); rpt_value_list *p = new rpt_value_list(); rpt_value::pointer result(p); for (j = 0; j < this_thing->length; ++j) { vp = pstate_history_type.convert(&this_thing->list[j]); assert(vp); p->append(vp); } trace(("}\n")); trace(("return %08lX;\n", (long)result.get())); return result; }
false
false
false
false
false
0
rtree_get_req(MI_INFO *info, MI_KEYDEF *keyinfo, uint key_length, my_off_t page, int level) { uchar *k; uchar *last; uint nod_flag; int res; uchar *page_buf; uint k_len; uint *saved_key = (uint*) (info->rtree_recursion_state) + level; if (!(page_buf = (uchar*)my_alloca((uint)keyinfo->block_length))) return -1; if (!_mi_fetch_keypage(info, keyinfo, page, DFLT_INIT_HITS, page_buf, 0)) goto err1; nod_flag = mi_test_if_nod(page_buf); k_len = keyinfo->keylength - info->s->base.rec_reflength; if(info->rtree_recursion_depth >= level) { k = page_buf + *saved_key; if (!nod_flag) { /* Only leaf pages contain data references. */ /* Need to check next key with data reference. */ k = rt_PAGE_NEXT_KEY(k, k_len, nod_flag); } } else { k = rt_PAGE_FIRST_KEY(page_buf, nod_flag); } last = rt_PAGE_END(page_buf); for (; k < last; k = rt_PAGE_NEXT_KEY(k, k_len, nod_flag)) { if (nod_flag) { /* this is an internal node in the tree */ switch ((res = rtree_get_req(info, keyinfo, key_length, _mi_kpos(nod_flag, k), level + 1))) { case 0: /* found - exit from recursion */ *saved_key = (uint) (k - page_buf); goto ok; case 1: /* not found - continue searching */ info->rtree_recursion_depth = level; break; default: case -1: /* error */ goto err1; } } else { /* this is a leaf */ uchar *after_key = rt_PAGE_NEXT_KEY(k, k_len, nod_flag); info->lastpos = _mi_dpos(info, 0, after_key); info->lastkey_length = k_len + info->s->base.rec_reflength; memcpy(info->lastkey, k, info->lastkey_length); info->rtree_recursion_depth = level; *saved_key = (uint) (k - page_buf); if (after_key < last) { info->int_keypos = (uchar*)saved_key; memcpy(info->buff, page_buf, keyinfo->block_length); info->int_maxpos = rt_PAGE_END(info->buff); info->buff_used = 0; } else { info->buff_used = 1; } res = 0; goto ok; } } info->lastpos = HA_OFFSET_ERROR; my_errno = HA_ERR_KEY_NOT_FOUND; res = 1; ok: my_afree((uchar*)page_buf); return res; err1: my_afree((uchar*)page_buf); info->lastpos = HA_OFFSET_ERROR; return -1; }
false
true
false
false
false
1
lad_sm_make_cmp_localize_all(StripMining *sm) { List *li; int amax = get_max_array_entry(sm->module->array_head); for (li = sm->loops; li != NULL; li = li->next) { StripMiningInnerLoop *smil = li->data; Cmp_localize *cl; smil->block->cmp_localize = cl = alloc_cmp_localize(); construct_IntSet(cl->local_array, amax + 1); fill_IntSet(cl->local_array); } }
false
false
false
false
false
0
win_window_pos_get(const WinInfo *wi, guint32 id, gint *x, gint *y) { WinDef *wd; if((wd = window_find(wi, id)) != NULL) { if(x) *x = wd->x; if(y) *y = wd->y; return TRUE; } return FALSE; }
false
false
false
false
false
0
dirac_encoder_load (dirac_encoder_t *encoder, unsigned char *uncdata, int uncdata_size) { TEST (encoder != NULL); TEST (encoder->compressor != NULL); DiracEncoder *compressor = (DiracEncoder *)encoder->compressor; int ret_stat = 0; try { if ( compressor->LoadNextFrame (uncdata, uncdata_size)) { ret_stat = uncdata_size; } } catch (...) { if (compressor->GetEncParams().Verbose()) std::cerr << "dirac_encoder_load failed" << std::endl; ret_stat = -1; } return ret_stat; }
false
false
false
false
false
0
MaybeExtendOffsetSlru(void) { int pageno; pageno = MultiXactIdToOffsetPage(MultiXactState->nextMXact); LWLockAcquire(MultiXactOffsetControlLock, LW_EXCLUSIVE); if (!SimpleLruDoesPhysicalPageExist(MultiXactOffsetCtl, pageno)) { int slotno; /* * Fortunately for us, SimpleLruWritePage is already prepared to deal * with creating a new segment file even if the page we're writing is * not the first in it, so this is enough. */ slotno = ZeroMultiXactOffsetPage(pageno, false); SimpleLruWritePage(MultiXactOffsetCtl, slotno); } LWLockRelease(MultiXactOffsetControlLock); }
false
false
false
false
false
0
Reset_IO (int destructive) { Discard_Input (Curr_Input_Port); if (destructive) Discard_Output (Curr_Output_Port); else Flush_Output (Curr_Output_Port); Curr_Input_Port = Standard_Input_Port; Curr_Output_Port = Standard_Output_Port; }
false
false
false
false
false
0
ipack_device_read_id(struct ipack_device *dev) { u8 __iomem *idmem; int i; int ret = 0; idmem = ioremap(dev->region[IPACK_ID_SPACE].start, dev->region[IPACK_ID_SPACE].size); if (!idmem) { dev_err(&dev->dev, "error mapping memory\n"); return -ENOMEM; } /* Determine ID PROM Data Format. If we find the ids "IPAC" or "IPAH" * we are dealing with a IndustryPack format 1 device. If we detect * "VITA4 " (16 bit big endian formatted) we are dealing with a * IndustryPack format 2 device */ if ((ioread8(idmem + 1) == 'I') && (ioread8(idmem + 3) == 'P') && (ioread8(idmem + 5) == 'A') && ((ioread8(idmem + 7) == 'C') || (ioread8(idmem + 7) == 'H'))) { dev->id_format = IPACK_ID_VERSION_1; dev->id_avail = ioread8(idmem + 0x15); if ((dev->id_avail < 0x0c) || (dev->id_avail > 0x40)) { dev_warn(&dev->dev, "invalid id size"); dev->id_avail = 0x0c; } } else if ((ioread8(idmem + 0) == 'I') && (ioread8(idmem + 1) == 'V') && (ioread8(idmem + 2) == 'A') && (ioread8(idmem + 3) == 'T') && (ioread8(idmem + 4) == ' ') && (ioread8(idmem + 5) == '4')) { dev->id_format = IPACK_ID_VERSION_2; dev->id_avail = ioread16be(idmem + 0x16); if ((dev->id_avail < 0x1a) || (dev->id_avail > 0x40)) { dev_warn(&dev->dev, "invalid id size"); dev->id_avail = 0x1a; } } else { dev->id_format = IPACK_ID_VERSION_INVALID; dev->id_avail = 0; } if (!dev->id_avail) { ret = -ENODEV; goto out; } /* Obtain the amount of memory required to store a copy of the complete * ID ROM contents */ dev->id = kmalloc(dev->id_avail, GFP_KERNEL); if (!dev->id) { dev_err(&dev->dev, "dev->id alloc failed.\n"); ret = -ENOMEM; goto out; } for (i = 0; i < dev->id_avail; i++) { if (dev->id_format == IPACK_ID_VERSION_1) dev->id[i] = ioread8(idmem + (i << 1) + 1); else dev->id[i] = ioread8(idmem + i); } /* now we can finally work with the copy */ switch (dev->id_format) { case IPACK_ID_VERSION_1: ipack_parse_id1(dev); break; case IPACK_ID_VERSION_2: ipack_parse_id2(dev); break; } out: iounmap(idmem); return ret; }
false
false
false
false
false
0
caml_extunix_signalfd_read(value vfd) { CAMLparam1(vfd); CAMLlocal1(vret); struct signalfd_siginfo ssi; ssize_t nread = 0; caml_enter_blocking_section(); nread = read(Int_val(vfd), &ssi, SSI_SIZE); caml_leave_blocking_section(); if (nread != SSI_SIZE) unix_error(EINVAL,"signalfd_read",Nothing); vret = caml_alloc_custom(&ssi_ops, SSI_SIZE, 0, 1); memcpy(Data_custom_val(vret),&ssi,SSI_SIZE); CAMLreturn(vret); }
false
false
false
false
false
0
mines_show_custom_game_screen (Mines* self) { GtkAspectFrame* _tmp0_ = NULL; MinefieldView* _tmp1_ = NULL; GtkAspectFrame* _tmp2_ = NULL; g_return_if_fail (self != NULL); self->priv->is_new_game_screen = FALSE; _tmp0_ = self->priv->custom_game_screen; gtk_widget_show ((GtkWidget*) _tmp0_); _tmp1_ = self->priv->minefield_view; gtk_widget_hide ((GtkWidget*) _tmp1_); _tmp2_ = self->priv->new_game_screen; gtk_widget_hide ((GtkWidget*) _tmp2_); }
false
false
false
false
false
0
_open_pictures_in_external_viewer (FrogrMainView *self) { GSList *pictures = NULL; if (!_pictures_selected_required_check (self)) return; pictures = _get_selected_pictures (self); frogr_util_open_pictures_in_viewer (pictures); g_slist_foreach (pictures, (GFunc)g_object_unref, NULL); g_slist_free (pictures); }
false
false
false
false
false
0
ap_set_file_slot(cmd_parms *cmd, void *struct_ptr, const char *arg) { /* Prepend server_root to relative arg. * This allows most args to be independent of server_root, * so the server can be moved or mirrored with less pain. */ const char *path; int offset = (int)(long)cmd->info; path = ap_server_root_relative(cmd->pool, arg); if (!path) { return apr_pstrcat(cmd->pool, "Invalid file path ", arg, NULL); } *(const char **) ((char*)struct_ptr + offset) = path; return NULL; }
false
false
false
false
false
0
snd_hdspm_set_defaults(struct hdspm * hdspm) { /* ASSUMPTION: hdspm->lock is either held, or there is no need to hold it (e.g. during module initialization). */ /* set defaults: */ hdspm->settings_register = 0; switch (hdspm->io_type) { case MADI: case MADIface: hdspm->control_register = 0x2 + 0x8 + 0x10 + 0x80 + 0x400 + 0x4000 + 0x1000000; break; case RayDAT: case AIO: hdspm->settings_register = 0x1 + 0x1000; /* Magic values are: LAT_0, LAT_2, Master, freq1, tx64ch, inp_0, * line_out */ hdspm->control_register = 0x2 + 0x8 + 0x10 + 0x80 + 0x400 + 0x4000 + 0x1000000; break; case AES32: hdspm->control_register = HDSPM_ClockModeMaster | /* Master Clock Mode on */ hdspm_encode_latency(7) | /* latency max=8192samples */ HDSPM_SyncRef0 | /* AES1 is syncclock */ HDSPM_LineOut | /* Analog output in */ HDSPM_Professional; /* Professional mode */ break; } hdspm_write(hdspm, HDSPM_controlRegister, hdspm->control_register); if (AES32 == hdspm->io_type) { /* No control2 register for AES32 */ #ifdef SNDRV_BIG_ENDIAN hdspm->control2_register = HDSPM_BIGENDIAN_MODE; #else hdspm->control2_register = 0; #endif hdspm_write(hdspm, HDSPM_control2Reg, hdspm->control2_register); } hdspm_compute_period_size(hdspm); /* silence everything */ all_in_all_mixer(hdspm, 0 * UNITY_GAIN); if (hdspm_is_raydat_or_aio(hdspm)) hdspm_write(hdspm, HDSPM_WR_SETTINGS, hdspm->settings_register); /* set a default rate so that the channel map is set up. */ hdspm_set_rate(hdspm, 48000, 1); return 0; }
false
false
false
false
false
0
daemonize (const ACE_TCHAR pathname[], bool close_all_handles, const ACE_TCHAR program_name[]) { ACE_TRACE ("ACE::daemonize"); #if !defined (ACE_LACKS_FORK) pid_t pid = ACE_OS::fork (); if (pid == -1) return -1; else if (pid != 0) ACE_OS::exit (0); // Parent exits. // 1st child continues. ACE_OS::setsid (); // Become session leader. ACE_OS::signal (SIGHUP, SIG_IGN); pid = ACE_OS::fork (program_name); if (pid != 0) ACE_OS::exit (0); // First child terminates. // Second child continues. if (pathname != 0) // change working directory. ACE_OS::chdir (pathname); ACE_OS::umask (0); // clear our file mode creation mask. // Close down the I/O handles. if (close_all_handles) { for (int i = ACE::max_handles () - 1; i >= 0; i--) ACE_OS::close (i); int fd = ACE_OS::open ("/dev/null", O_RDWR, 0); if (fd != -1) { ACE_OS::dup2 (fd, ACE_STDIN); ACE_OS::dup2 (fd, ACE_STDOUT); ACE_OS::dup2 (fd, ACE_STDERR); if (fd > ACE_STDERR) ACE_OS::close (fd); } } return 0; #else ACE_UNUSED_ARG (pathname); ACE_UNUSED_ARG (close_all_handles); ACE_UNUSED_ARG (program_name); ACE_NOTSUP_RETURN (-1); #endif /* ACE_LACKS_FORK */ }
false
false
false
false
false
0
projectColour(const cPDVector& sub) const { vector<PDT::Colour> res(sub.size()); transform(sub.begin(),sub.end(),res.begin(),pickColour()); return res; }
false
false
false
false
false
0
cmpc_keys_idev_init(struct input_dev *inputdev) { int i; set_bit(EV_KEY, inputdev->evbit); for (i = 0; cmpc_keys_codes[i] != KEY_MAX; i++) set_bit(cmpc_keys_codes[i], inputdev->keybit); }
false
false
false
false
false
0
attachLine() { m_extracted = false; if (m_object->isBox()) static_cast<RenderBox*>(m_object)->setPlaceHolderBox(this); }
false
false
false
false
false
0
AcpiDbTestBufferType ( ACPI_NAMESPACE_NODE *Node, UINT32 BitLength) { ACPI_OBJECT *Temp1 = NULL; ACPI_OBJECT *Temp2 = NULL; ACPI_OBJECT *Temp3 = NULL; UINT8 *Buffer; ACPI_OBJECT WriteValue; ACPI_STATUS Status; UINT32 ByteLength; UINT32 i; UINT8 ExtraBits; ByteLength = ACPI_ROUND_BITS_UP_TO_BYTES (BitLength); if (ByteLength == 0) { AcpiOsPrintf (" Ignoring zero length buffer"); return (AE_OK); } /* Allocate a local buffer */ Buffer = ACPI_ALLOCATE_ZEROED (ByteLength); if (!Buffer) { return (AE_NO_MEMORY); } /* Read the original value */ Status = AcpiDbReadFromObject (Node, ACPI_TYPE_BUFFER, &Temp1); if (ACPI_FAILURE (Status)) { goto Exit; } /* Emit a few bytes of the buffer */ AcpiOsPrintf (" (%4.4X/%3.3X)", BitLength, Temp1->Buffer.Length); for (i = 0; ((i < 4) && (i < ByteLength)); i++) { AcpiOsPrintf (" %2.2X", Temp1->Buffer.Pointer[i]); } AcpiOsPrintf ("... "); /* * Write a new value. * * Handle possible extra bits at the end of the buffer. Can * happen for FieldUnits larger than an integer, but the bit * count is not an integral number of bytes. Zero out the * unused bits. */ ACPI_MEMSET (Buffer, BUFFER_FILL_VALUE, ByteLength); ExtraBits = BitLength % 8; if (ExtraBits) { Buffer [ByteLength - 1] = ACPI_MASK_BITS_ABOVE (ExtraBits); } WriteValue.Type = ACPI_TYPE_BUFFER; WriteValue.Buffer.Length = ByteLength; WriteValue.Buffer.Pointer = Buffer; Status = AcpiDbWriteToObject (Node, &WriteValue); if (ACPI_FAILURE (Status)) { goto Exit; } /* Ensure that we can read back the new value */ Status = AcpiDbReadFromObject (Node, ACPI_TYPE_BUFFER, &Temp2); if (ACPI_FAILURE (Status)) { goto Exit; } if (ACPI_MEMCMP (Temp2->Buffer.Pointer, Buffer, ByteLength)) { AcpiOsPrintf (" MISMATCH 2: New buffer value"); } /* Write back the original value */ WriteValue.Buffer.Length = ByteLength; WriteValue.Buffer.Pointer = Temp1->Buffer.Pointer; Status = AcpiDbWriteToObject (Node, &WriteValue); if (ACPI_FAILURE (Status)) { goto Exit; } /* Ensure that we can read back the original value */ Status = AcpiDbReadFromObject (Node, ACPI_TYPE_BUFFER, &Temp3); if (ACPI_FAILURE (Status)) { goto Exit; } if (ACPI_MEMCMP (Temp1->Buffer.Pointer, Temp3->Buffer.Pointer, ByteLength)) { AcpiOsPrintf (" MISMATCH 3: While restoring original buffer"); } Exit: ACPI_FREE (Buffer); if (Temp1) {AcpiOsFree (Temp1);} if (Temp2) {AcpiOsFree (Temp2);} if (Temp3) {AcpiOsFree (Temp3);} return (Status); }
false
false
false
false
false
0
spawnl(unsigned logopt, const char *prog, ...) { va_list arg; int argc; char **argv, **p; va_start(arg, prog); for (argc = 1; va_arg(arg, char *); argc++); va_end(arg); if (!(argv = alloca(sizeof(char *) * argc))) return -1; va_start(arg, prog); p = argv; while ((*p++ = va_arg(arg, char *))); va_end(arg); return do_spawn(logopt, -1, SPAWN_OPT_NONE, prog, (const char **) argv); }
false
false
false
false
true
1
hfi1_qp_init(struct hfi1_ibdev *dev) { struct hfi1_devdata *dd = dd_from_dev(dev); int i; int ret = -ENOMEM; /* allocate parent object */ dev->qp_dev = kzalloc(sizeof(*dev->qp_dev), GFP_KERNEL); if (!dev->qp_dev) goto nomem; /* allocate hash table */ dev->qp_dev->qp_table_size = hfi1_qp_table_size; dev->qp_dev->qp_table_bits = ilog2(hfi1_qp_table_size); dev->qp_dev->qp_table = kmalloc(dev->qp_dev->qp_table_size * sizeof(*dev->qp_dev->qp_table), GFP_KERNEL); if (!dev->qp_dev->qp_table) goto nomem; for (i = 0; i < dev->qp_dev->qp_table_size; i++) RCU_INIT_POINTER(dev->qp_dev->qp_table[i], NULL); spin_lock_init(&dev->qp_dev->qpt_lock); /* initialize qpn map */ ret = init_qpn_table(dd, &dev->qp_dev->qpn_table); if (ret) goto nomem; return ret; nomem: if (dev->qp_dev) { kfree(dev->qp_dev->qp_table); free_qpn_table(&dev->qp_dev->qpn_table); kfree(dev->qp_dev); } return ret; }
false
false
false
false
false
0
snd_pcm_rate_move_applptr(snd_pcm_t *pcm, snd_pcm_sframes_t frames) { snd_pcm_rate_t *rate = pcm->private_data; snd_pcm_uframes_t orig_appl_ptr, appl_ptr = rate->appl_ptr, slave_appl_ptr; snd_pcm_sframes_t diff, ndiff; snd_pcm_t *slave = rate->gen.slave; orig_appl_ptr = rate->appl_ptr; if (frames > 0) snd_pcm_mmap_appl_forward(pcm, frames); else snd_pcm_mmap_appl_backward(pcm, -frames); slave_appl_ptr = (appl_ptr / pcm->period_size) * rate->gen.slave->period_size; diff = slave_appl_ptr - *slave->appl.ptr; if (diff < -(snd_pcm_sframes_t)(slave->boundary / 2)) { diff = (slave->boundary - *slave->appl.ptr) + slave_appl_ptr; } else if (diff > (snd_pcm_sframes_t)(slave->boundary / 2)) { diff = -((slave->boundary - slave_appl_ptr) + *slave->appl.ptr); } if (diff == 0) return frames; if (diff > 0) { ndiff = snd_pcm_forward(rate->gen.slave, diff); } else { ndiff = snd_pcm_rewind(rate->gen.slave, diff); } if (ndiff < 0) return diff; slave_appl_ptr = *slave->appl.ptr; rate->appl_ptr = (slave_appl_ptr / rate->gen.slave->period_size) * pcm->period_size + orig_appl_ptr % pcm->period_size; if (pcm->stream == SND_PCM_STREAM_PLAYBACK) rate->appl_ptr += rate->ops.input_frames(rate->obj, slave_appl_ptr % rate->gen.slave->period_size); else rate->appl_ptr += rate->ops.output_frames(rate->obj, slave_appl_ptr % rate->gen.slave->period_size); diff = orig_appl_ptr - rate->appl_ptr; if (diff < -(snd_pcm_sframes_t)(slave->boundary / 2)) { diff = (slave->boundary - rate->appl_ptr) + orig_appl_ptr; } else if (diff > (snd_pcm_sframes_t)(slave->boundary / 2)) { diff = -((slave->boundary - orig_appl_ptr) + rate->appl_ptr); } if (frames < 0) diff = -diff; rate->last_commit_ptr = rate->appl_ptr - rate->appl_ptr % pcm->period_size; return diff; }
false
false
false
false
false
0
validate_step(PyObject *step) { /* No step specified, use a step of 1. */ if (!step) return PyLong_FromLong(1); step = PyNumber_Index(step); if (step) { Py_ssize_t istep = PyNumber_AsSsize_t(step, NULL); if (istep == -1 && PyErr_Occurred()) { /* Ignore OverflowError, we know the value isn't 0. */ PyErr_Clear(); } else if (istep == 0) { PyErr_SetString(PyExc_ValueError, "range() arg 3 must not be zero"); Py_CLEAR(step); } } return step; }
false
false
false
false
false
0
Image_conclude(stp_image_t *image) { Gimp_Image_t *im = (Gimp_Image_t *) (image->rep); gimp_progress_update(1); if (im->alpha_table) stp_free(im->alpha_table); if (im->tmp) stp_free(im->tmp); }
false
false
false
false
false
0
find_engine(const char *family, int priority, char *database, int dbsiz, char *table, int tabsiz) { struct ast_config_engine *eng, *ret = NULL; struct ast_config_map *map; ast_mutex_lock(&config_lock); for (map = config_maps; map; map = map->next) { if (!strcasecmp(family, map->name) && (priority == map->priority)) { if (database) ast_copy_string(database, map->database, dbsiz); if (table) ast_copy_string(table, map->table ? map->table : family, tabsiz); break; } } /* Check if the required driver (engine) exist */ if (map) { for (eng = config_engine_list; !ret && eng; eng = eng->next) { if (!strcasecmp(eng->name, map->driver)) ret = eng; } } ast_mutex_unlock(&config_lock); /* if we found a mapping, but the engine is not available, then issue a warning */ if (map && !ret) ast_log(LOG_WARNING, "Realtime mapping for '%s' found to engine '%s', but the engine is not available\n", map->name, map->driver); return ret; }
false
false
false
false
false
0
irecv_hexdump(unsigned char* buf, unsigned int len, unsigned int addr) { int i, j; printf("0x%08x: ", addr); for (i = 0; i < len; i++) { if (i % 16 == 0 && i != 0) { for (j=i-16; j < i; j++) { unsigned char car = buf[j]; if (car < 0x20 || car > 0x7f) car = '.'; printf("%c", car); } printf("\n"); addr += 0x10; printf("0x%08x: ", addr); } printf("%02x ", buf[i]); } int done = (i % 16); int remains = 16 - done; if (done > 0) { for (j = 0; j < remains; j++) { printf(" "); } } if ((i - done) >= 0) { if (done == 0 && i > 0) done = 16; for (j = (i - done); j < i; j++) { unsigned char car = buf[j]; if (car < 0x20 || car > 0x7f) car = '.'; printf("%c", car); } } printf("\n"); }
false
false
false
false
false
0
trace_access_lock(int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { /* gain it for accessing the whole ring buffer. */ down_write(&all_cpu_access_lock); } else { /* gain it for accessing a cpu ring buffer. */ /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ down_read(&all_cpu_access_lock); /* Secondly block other access to this @cpu ring buffer. */ mutex_lock(&per_cpu(cpu_access_lock, cpu)); } }
false
false
false
false
false
0
r8712_os_recvbuf_resource_free(struct _adapter *padapter, struct recv_buf *precvbuf) { if (precvbuf->pskb) dev_kfree_skb_any(precvbuf->pskb); if (precvbuf->purb) { usb_kill_urb(precvbuf->purb); usb_free_urb(precvbuf->purb); } return _SUCCESS; }
false
false
false
false
false
0
ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) { struct ath_atx_ac *ac = tid->ac; if (tid->paused) return; if (tid->sched) return; tid->sched = 1; list_add_tail(&tid->list, &ac->tid_q); if (ac->sched) return; ac->sched = 1; list_add_tail(&ac->list, &txq->axq_acq); }
false
false
false
false
false
0
contig_align() const { static const contig_align_t empty_set; if (is_full_segment()) return sread()._contig_align; return empty_set; }
false
false
false
false
false
0
field_byte_offset (decl) tree decl; { unsigned int type_align_in_bytes; unsigned int type_align_in_bits; unsigned HOST_WIDE_INT type_size_in_bits; HOST_WIDE_INT object_offset_in_align_units; HOST_WIDE_INT object_offset_in_bits; HOST_WIDE_INT object_offset_in_bytes; tree type; tree field_size_tree; HOST_WIDE_INT bitpos_int; HOST_WIDE_INT deepest_bitpos; unsigned HOST_WIDE_INT field_size_in_bits; if (TREE_CODE (decl) == ERROR_MARK) return 0; if (TREE_CODE (decl) != FIELD_DECL) abort (); type = field_type (decl); field_size_tree = DECL_SIZE (decl); /* The size could be unspecified if there was an error, or for a flexible array member. */ if (! field_size_tree) field_size_tree = bitsize_zero_node; /* We cannot yet cope with fields whose positions or sizes are variable, so for now, when we see such things, we simply return 0. Someday, we may be able to handle such cases, but it will be damn difficult. */ if (! host_integerp (bit_position (decl), 0) || ! host_integerp (field_size_tree, 1)) return 0; bitpos_int = int_bit_position (decl); field_size_in_bits = tree_low_cst (field_size_tree, 1); type_size_in_bits = simple_type_size_in_bits (type); type_align_in_bits = simple_type_align_in_bits (type); type_align_in_bytes = type_align_in_bits / BITS_PER_UNIT; /* Note that the GCC front-end doesn't make any attempt to keep track of the starting bit offset (relative to the start of the containing structure type) of the hypothetical "containing object" for a bit- field. Thus, when computing the byte offset value for the start of the "containing object" of a bit-field, we must deduce this infor- mation on our own. This can be rather tricky to do in some cases. For example, handling the following structure type definition when compiling for an i386/i486 target (which only aligns long long's to 32-bit boundaries) can be very tricky: struct S { int field1; long long field2:31; }; Fortunately, there is a simple rule-of-thumb which can be used in such cases. When compiling for an i386/i486, GCC will allocate 8 bytes for the structure shown above. It decides to do this based upon one simple rule for bit-field allocation. Quite simply, GCC allocates each "con- taining object" for each bit-field at the first (i.e. lowest addressed) legitimate alignment boundary (based upon the required minimum alignment for the declared type of the field) which it can possibly use, subject to the condition that there is still enough available space remaining in the containing object (when allocated at the selected point) to fully accommodate all of the bits of the bit-field itself. This simple rule makes it obvious why GCC allocates 8 bytes for each object of the structure type shown above. When looking for a place to allocate the "containing object" for `field2', the compiler simply tries to allocate a 64-bit "containing object" at each successive 32-bit boundary (starting at zero) until it finds a place to allocate that 64- bit field such that at least 31 contiguous (and previously unallocated) bits remain within that selected 64 bit field. (As it turns out, for the example above, the compiler finds that it is OK to allocate the "containing object" 64-bit field at bit-offset zero within the structure type.) Here we attempt to work backwards from the limited set of facts we're given, and we try to deduce from those facts, where GCC must have believed that the containing object started (within the structure type). The value we deduce is then used (by the callers of this routine) to generate AT_location and AT_bit_offset attributes for fields (both bit-fields and, in the case of AT_location, regular fields as well). */ /* Figure out the bit-distance from the start of the structure to the "deepest" bit of the bit-field. */ deepest_bitpos = bitpos_int + field_size_in_bits; /* This is the tricky part. Use some fancy footwork to deduce where the lowest addressed bit of the containing object must be. */ object_offset_in_bits = ceiling (deepest_bitpos, type_align_in_bits) - type_size_in_bits; /* Compute the offset of the containing object in "alignment units". */ object_offset_in_align_units = object_offset_in_bits / type_align_in_bits; /* Compute the offset of the containing object in bytes. */ object_offset_in_bytes = object_offset_in_align_units * type_align_in_bytes; /* The above code assumes that the field does not cross an alignment boundary. This can happen if PCC_BITFIELD_TYPE_MATTERS is not defined, or if the structure is packed. If this happens, then we get an object which starts after the bitfield, which means that the bit offset is negative. Gdb fails when given negative bit offsets. We avoid this by recomputing using the first bit of the bitfield. This will give us an object which does not completely contain the bitfield, but it will be aligned, and it will contain the first bit of the bitfield. However, only do this for a BYTES_BIG_ENDIAN target. For a ! BYTES_BIG_ENDIAN target, bitpos_int + field_size_in_bits is the first first bit of the bitfield. If we recompute using bitpos_int + 1 below, then we end up computing the object byte offset for the wrong word of the desired bitfield, which in turn causes the field offset to be negative in bit_offset_attribute. */ if (BYTES_BIG_ENDIAN && object_offset_in_bits > bitpos_int) { deepest_bitpos = bitpos_int + 1; object_offset_in_bits = ceiling (deepest_bitpos, type_align_in_bits) - type_size_in_bits; object_offset_in_align_units = (object_offset_in_bits / type_align_in_bits); object_offset_in_bytes = (object_offset_in_align_units * type_align_in_bytes); } return object_offset_in_bytes; }
false
false
false
false
false
0
rpc_out(void *ptr, size_t eltsize, size_t nmemb, void *buffer_) { size_t max = eltsize * nmemb; struct rpc_state *rpc = buffer_; size_t avail = rpc->len - rpc->pos; if (!avail) { rpc->initial_buffer = 0; avail = packet_read_line(rpc->out, rpc->buf, rpc->alloc); if (!avail) return 0; rpc->pos = 0; rpc->len = avail; } if (max < avail) avail = max; memcpy(ptr, rpc->buf + rpc->pos, avail); rpc->pos += avail; return avail; }
false
false
false
false
false
0
destroyCacheLists(cacheCollection* lpCaches) { cacheList* lpTemp; while ((lpTemp = lpCaches->lpL1Caches) != NULL) { lpCaches->lpL1Caches = lpTemp->lpNext; free(lpTemp); } while ((lpTemp = lpCaches->lpL2Caches) != NULL) { lpCaches->lpL2Caches = lpTemp->lpNext; free(lpTemp); } while ((lpTemp = lpCaches->lpL2Caches) != NULL) { lpCaches->lpL2Caches = lpTemp->lpNext; free(lpTemp); } }
false
false
false
false
false
0
xmmsc_io_out_handle (xmmsc_connection_t *c) { x_check_conn (c, -1); x_api_error_if (!xmmsc_ipc_io_out (c->ipc), "without pending output", -1); return xmmsc_ipc_io_out_callback (c->ipc); }
false
false
false
false
false
0
outOfBitsStrategy(algo_t const* that, const int sfwork[SFBMAX], const int vbrsfmin[SFBMAX], int target) { int wrk[SFBMAX]; int const dm = sfDepth(sfwork); int const p = that->cod_info->global_gain; int nbits; /* PART 1 */ { int bi = dm / 2; int bi_ok = -1; int bu = 0; int bo = dm; for (;;) { int const sfmax = flattenDistribution(sfwork, wrk, dm, bi, p); nbits = tryThatOne(that, wrk, vbrsfmin, sfmax); if (nbits <= target) { bi_ok = bi; bo = bi - 1; } else { bu = bi + 1; } if (bu <= bo) { bi = (bu + bo) / 2; } else { break; } } if (bi_ok >= 0) { if (bi != bi_ok) { int const sfmax = flattenDistribution(sfwork, wrk, dm, bi_ok, p); nbits = tryThatOne(that, wrk, vbrsfmin, sfmax); } return; } } /* PART 2: */ { int bi = (255 + p) / 2; int bi_ok = -1; int bu = p; int bo = 255; for (;;) { int const sfmax = flattenDistribution(sfwork, wrk, dm, dm, bi); nbits = tryThatOne(that, wrk, vbrsfmin, sfmax); if (nbits <= target) { bi_ok = bi; bo = bi - 1; } else { bu = bi + 1; } if (bu <= bo) { bi = (bu + bo) / 2; } else { break; } } if (bi_ok >= 0) { if (bi != bi_ok) { int const sfmax = flattenDistribution(sfwork, wrk, dm, dm, bi_ok); nbits = tryThatOne(that, wrk, vbrsfmin, sfmax); } return; } } /* fall back to old code, likely to be never called */ searchGlobalStepsizeMax(that, wrk, vbrsfmin, target); }
false
false
false
false
false
0
save_update_preview(PixmapSaveControls *controls) { GdkPixbuf *pixbuf; gdouble zoom; zoom = gtk_adjustment_get_value(GTK_ADJUSTMENT(controls->zoom)); controls->args->font_size *= controls->args->zoom/zoom; if (controls->args->ztype == PIXMAP_NONE && controls->args->xytype != PIXMAP_RULERS) controls->args->zoom *= 1.4; pixbuf = pixmap_draw_presentational(controls->data, controls->args); gtk_image_set_from_pixbuf(GTK_IMAGE(controls->image), pixbuf); g_object_unref(pixbuf); if (controls->args->ztype == PIXMAP_NONE && controls->args->xytype != PIXMAP_RULERS) controls->args->zoom /= 1.4; controls->args->font_size /= controls->args->zoom/zoom; }
false
false
false
false
false
0
shift_derivatives_of_b() { map<int,int> subst; for (int yi = 0; yi < diff_b.nrows(); yi++) for (int ll = minlag; ll < 0; ll++) if (diff_b(yi, ll-minlag) != ogp::OperationTree::zero) { model.variable_shift_map(model.eqs.nulary_of_term(diff_b(yi, ll-minlag)), -ll, subst); diff_b(yi, ll-minlag) = model.eqs.add_substitution(diff_b(yi, ll-minlag), subst); } }
false
false
false
false
false
0
gnutls_certificate_set_x509_trust_file (gnutls_certificate_credentials_t res, const char *cafile, gnutls_x509_crt_fmt_t type) { int ret, ret2; size_t size; gnutls_datum_t cas; #ifdef ENABLE_PKCS11 if (strncmp (cafile, "pkcs11:", 7) == 0) { return read_cas_url (res, cafile); } #endif cas.data = read_binary_file (cafile, &size); if (cas.data == NULL) { gnutls_assert (); return GNUTLS_E_FILE_ERROR; } cas.size = size; ret = gnutls_certificate_set_x509_trust_mem (res, &cas, type); free (cas.data); if (ret < 0) { gnutls_assert (); return ret; } if ((ret2 = add_new_crt_to_rdn_seq (res, ret)) < 0) return ret2; return ret; }
false
false
false
false
false
0
bmp_565_8888(int fd, struct pixel8888 *fb, int width, int height, int xres, int yres) { int ret; int i, j; struct pixel8888 *tmp_fb; struct pixel565 buff[width]; for (i = height - 1; i >= 0; i--) { ret = read(fd, &buff, sizeof(buff)); if (ret < 0) { print_error("read"); return ret; } for (j = 0; j < width; j++) { tmp_fb = fb + i * xres + j; tmp_fb->red = buff[j].red; tmp_fb->red <<= 3; tmp_fb->green = buff[j].green; tmp_fb->green <<= 2; tmp_fb->blue = buff[j].blue; tmp_fb->blue <<= 3; tmp_fb->transp = 0; } } return 0; }
false
true
false
false
true
1
device_match_driver(struct btd_device *device, struct btd_device_driver *driver, GSList *profiles) { const char **uuid; GSList *uuids = NULL; for (uuid = driver->uuids; *uuid; uuid++) { GSList *match; /* skip duplicated uuids */ if (g_slist_find_custom(uuids, *uuid, (GCompareFunc) strcasecmp)) continue; /* match profile driver */ match = g_slist_find_custom(profiles, *uuid, (GCompareFunc) strcasecmp); if (match) { uuids = g_slist_append(uuids, match->data); continue; } /* match pattern driver */ match = device_match_pattern(device, *uuid, profiles); uuids = g_slist_concat(uuids, match); } return uuids; }
false
false
false
false
false
0
choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg) { unsigned slice; unsigned count; struct cfq_rb_root *st; unsigned group_slice; enum wl_class_t original_class = cfqd->serving_wl_class; /* Choose next priority. RT > BE > IDLE */ if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) cfqd->serving_wl_class = RT_WORKLOAD; else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) cfqd->serving_wl_class = BE_WORKLOAD; else { cfqd->serving_wl_class = IDLE_WORKLOAD; cfqd->workload_expires = jiffies + 1; return; } if (original_class != cfqd->serving_wl_class) goto new_workload; /* * For RT and BE, we have to choose also the type * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload * expiration time */ st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); count = st->count; /* * check workload expiration, and that we still have other queues ready */ if (count && !time_after(jiffies, cfqd->workload_expires)) return; new_workload: /* otherwise select new workload type */ cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg, cfqd->serving_wl_class); st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); count = st->count; /* * the workload slice is computed as a fraction of target latency * proportional to the number of queues in that workload, over * all the queues in the same priority class */ group_slice = cfq_group_slice(cfqd, cfqg); slice = group_slice * count / max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class], cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd, cfqg)); if (cfqd->serving_wl_type == ASYNC_WORKLOAD) { unsigned int tmp; /* * Async queues are currently system wide. Just taking * proportion of queues with-in same group will lead to higher * async ratio system wide as generally root group is going * to have higher weight. A more accurate thing would be to * calculate system wide asnc/sync ratio. */ tmp = cfqd->cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg); tmp = tmp/cfqd->busy_queues; slice = min_t(unsigned, slice, tmp); /* async workload slice is scaled down according to * the sync/async slice ratio. */ slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1]; } else /* sync workload slice is at least 2 * cfq_slice_idle */ slice = max(slice, 2 * cfqd->cfq_slice_idle); slice = max_t(unsigned, slice, CFQ_MIN_TT); cfq_log(cfqd, "workload slice:%d", slice); cfqd->workload_expires = jiffies + slice; }
false
false
false
false
false
0
my_timer_function(GB_TIMER *timer) { if (timer->id) { GB.RaiseTimer(timer); if (timer->id) { MyTimerTag *tag = (MyTimerTag *)timer->tag; GTimer *t = tag->timer; int elapsed = (int)(g_timer_elapsed(t, NULL) * 1000) - tag->timeout; int next = timer->delay - elapsed; if (next < 10) next = 10; tag->timeout = next; g_timer_start(t); timer->id = (intptr_t)g_timeout_add(next, (GSourceFunc)my_timer_function,(gpointer)timer); //fprintf(stderr, "elapsed = %d delay = %d next = %d\n", elapsed, timer->delay, next); } } return false; }
false
false
false
false
false
0
spin_point_size_changed_value(GtkSpinButton *spinbutton, gpointer user_data) { if(user_data && G_IS_OBJECT(user_data)) { GtkWidget* xyplot = GTK_WIDGET(user_data); XYPlotData* data = g_object_get_data(G_OBJECT (spinbutton), "CurentData"); g_return_if_fail (GABEDIT_IS_XYPLOT (xyplot)); if(data) { data->point_size = gtk_spin_button_get_value(spinbutton); xyplot_build_points_data(GABEDIT_XYPLOT(xyplot), data); } else { GList *current_node; current_node=g_list_first(GABEDIT_XYPLOT(xyplot)->data_list); for (; current_node!=NULL; current_node=current_node->next) { data=(XYPlotData*)current_node->data; data->point_size = gtk_spin_button_get_value(spinbutton); xyplot_build_points_data(GABEDIT_XYPLOT(xyplot), data); } } gtk_widget_queue_draw(GTK_WIDGET(xyplot)); } }
false
false
false
false
false
0
exsltMathConstant (xmlChar *name, double precision) { xmlChar *str; double ret; if ((name == NULL) || (xmlXPathIsNaN(precision)) || (precision < 1.0)) { return xmlXPathNAN; } if (xmlStrEqual(name, BAD_CAST "PI")) { int len = xmlStrlen(EXSLT_PI); if (precision <= len) len = (int)precision; str = xmlStrsub(EXSLT_PI, 0, len); } else if (xmlStrEqual(name, BAD_CAST "E")) { int len = xmlStrlen(EXSLT_E); if (precision <= len) len = (int)precision; str = xmlStrsub(EXSLT_E, 0, len); } else if (xmlStrEqual(name, BAD_CAST "SQRRT2")) { int len = xmlStrlen(EXSLT_SQRRT2); if (precision <= len) len = (int)precision; str = xmlStrsub(EXSLT_SQRRT2, 0, len); } else if (xmlStrEqual(name, BAD_CAST "LN2")) { int len = xmlStrlen(EXSLT_LN2); if (precision <= len) len = (int)precision; str = xmlStrsub(EXSLT_LN2, 0, len); } else if (xmlStrEqual(name, BAD_CAST "LN10")) { int len = xmlStrlen(EXSLT_LN10); if (precision <= len) len = (int)precision; str = xmlStrsub(EXSLT_LN10, 0, len); } else if (xmlStrEqual(name, BAD_CAST "LOG2E")) { int len = xmlStrlen(EXSLT_LOG2E); if (precision <= len) len = (int)precision; str = xmlStrsub(EXSLT_LOG2E, 0, len); } else if (xmlStrEqual(name, BAD_CAST "SQRT1_2")) { int len = xmlStrlen(EXSLT_SQRT1_2); if (precision <= len) len = (int)precision; str = xmlStrsub(EXSLT_SQRT1_2, 0, len); } else { str = NULL; } if (str == NULL) return xmlXPathNAN; ret = xmlXPathCastStringToNumber(str); xmlFree(str); return ret; }
false
false
false
false
false
0
ExportSpatiaLiteGeometry( const OGRGeometry *poGeometry, GInt32 nSRID, OGRwkbByteOrder eByteOrder, int bHasM, int bSpatialite2D, int bUseComprGeom, GByte **ppabyData, int *pnDataLenght ) { bUseComprGeom = bUseComprGeom && !bSpatialite2D && CanBeCompressedSpatialiteGeometry(poGeometry); int nDataLen = 44 + ComputeSpatiaLiteGeometrySize( poGeometry, bHasM, bSpatialite2D, bUseComprGeom ); OGREnvelope sEnvelope; *ppabyData = (GByte *) CPLMalloc( nDataLen ); (*ppabyData)[0] = 0x00; (*ppabyData)[1] = (GByte) eByteOrder; // Write out SRID memcpy( *ppabyData + 2, &nSRID, 4 ); // Write out the geometry bounding rectangle poGeometry->getEnvelope( &sEnvelope ); memcpy( *ppabyData + 6, &sEnvelope.MinX, 8 ); memcpy( *ppabyData + 14, &sEnvelope.MinY, 8 ); memcpy( *ppabyData + 22, &sEnvelope.MaxX, 8 ); memcpy( *ppabyData + 30, &sEnvelope.MaxY, 8 ); (*ppabyData)[38] = 0x7C; int nCode = GetSpatialiteGeometryCode(poGeometry, bHasM, bSpatialite2D, bUseComprGeom, TRUE); if (nCode == 0) { CPLFree(*ppabyData); *ppabyData = NULL; *pnDataLenght = 0; return CE_Failure; } memcpy( *ppabyData + 39, &nCode, 4 ); int nWritten = ExportSpatiaLiteGeometryInternal(poGeometry, eByteOrder, bHasM, bSpatialite2D, bUseComprGeom, *ppabyData + 43); if (nWritten == 0) { CPLFree(*ppabyData); *ppabyData = NULL; *pnDataLenght = 0; return CE_Failure; } (*ppabyData)[nDataLen - 1] = 0xFE; if( NEED_SWAP_SPATIALITE() ) { CPL_SWAP32PTR( *ppabyData + 2 ); CPL_SWAP64PTR( *ppabyData + 6 ); CPL_SWAP64PTR( *ppabyData + 14 ); CPL_SWAP64PTR( *ppabyData + 22 ); CPL_SWAP64PTR( *ppabyData + 30 ); CPL_SWAP32PTR( *ppabyData + 39 ); } *pnDataLenght = nDataLen; return CE_None; }
false
false
false
false
false
0
triton_timer_del(struct triton_timer_t *ud) { struct _triton_timer_t *t = (struct _triton_timer_t *)ud->tpd; epoll_ctl(epoll_fd, EPOLL_CTL_DEL, t->fd, &t->epoll_event); close(t->fd); spin_lock(&t->ctx->lock); t->ud = NULL; list_del(&t->entry); if (t->pending) { list_del(&t->entry2); __sync_sub_and_fetch(&triton_stat.timer_pending, 1); } spin_unlock(&t->ctx->lock); sched_yield(); pthread_mutex_lock(&freed_list_lock); list_add_tail(&t->entry, &freed_list); pthread_mutex_unlock(&freed_list_lock); ud->tpd = NULL; triton_stat.timer_count--; }
false
false
false
false
false
0
setPos(const vec3& pos, bool keep_angle) { if(keep_angle) { vec3 dir = target - this->pos; this->pos = pos; this->target = pos + dir; } else { this->pos = pos; } }
false
false
false
false
false
0
RewriteCurrentSource(unsigned NewReg, unsigned NewSubReg) override { // We cannot rewrite out of bound operands. // Moreover, rewritable sources are at odd positions. if ((CurrentSrcIdx & 1) != 1 || CurrentSrcIdx > CopyLike.getNumOperands()) return false; MachineOperand &MO = CopyLike.getOperand(CurrentSrcIdx); MO.setReg(NewReg); MO.setSubReg(NewSubReg); return true; }
false
false
false
false
false
0
cstate_get_attr_cpumask(struct device *dev, struct device_attribute *attr, char *buf) { struct pmu *pmu = dev_get_drvdata(dev); if (pmu == &cstate_core_pmu) return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask); else if (pmu == &cstate_pkg_pmu) return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask); else return 0; }
false
false
false
false
false
0
operator() (double x) const { if (_asInteger) { if (_intPower==0) { return 1; } else if (_intPower>0) { double f = 1; for (int i=0;i<_intPower;i++) { f *=x; } return f; } else { double f = 1; for (int i=0;i<-_intPower;i++) { f /=x; } return f; } } else { return pow(x,_doublePower); } }
false
false
false
false
false
0
ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum) { return dd->ipath_port0_skbinfo ? (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL; }
false
false
false
false
false
0
camel_sasl_challenge_sync (CamelSasl *sasl, GByteArray *token, GCancellable *cancellable, GError **error) { CamelSaslClass *class; GByteArray *response; g_return_val_if_fail (CAMEL_IS_SASL (sasl), NULL); class = CAMEL_SASL_GET_CLASS (sasl); g_return_val_if_fail (class->challenge_sync != NULL, NULL); response = class->challenge_sync (sasl, token, cancellable, error); if (token != NULL) CAMEL_CHECK_GERROR ( sasl, challenge_sync, response != NULL, error); return response; }
false
false
false
true
false
1
configureAdditivity() { helpers::Properties additivityProperties = properties.getPropertySubset(DCMTK_LOG4CPLUS_TEXT("additivity.")); OFVector<tstring> additivitysProps = additivityProperties.propertyNames(); for(OFVector<tstring>::const_iterator it = additivitysProps.begin(); it != additivitysProps.end(); ++it) { Logger logger = getLogger(*it); bool additivity; if (additivityProperties.getBool (additivity, *it)) logger.setAdditivity (additivity); } }
false
false
false
false
false
0
acct_gather_parse_freq(int type, char *freq) { int freq_int = -1; char *sub_str = NULL; if (!freq) return freq_int; switch (type) { case PROFILE_ENERGY: if ((sub_str = slurm_strcasestr(freq, "energy="))) freq_int = _get_int(sub_str + 7); break; case PROFILE_TASK: /* backwards compatibility for when the freq was only for task. */ freq_int = _get_int(freq); if ((freq_int == -1) && (sub_str = slurm_strcasestr(freq, "task="))) freq_int = _get_int(sub_str + 5); break; case PROFILE_FILESYSTEM: if ((sub_str = slurm_strcasestr(freq, "filesystem="))) freq_int = _get_int(sub_str + 11); break; case PROFILE_NETWORK: if ((sub_str = slurm_strcasestr(freq, "network="))) freq_int = _get_int(sub_str + 8); break; default: fatal("Unhandled profile option %d please update " "slurm_acct_gather.c " "(acct_gather_parse_freq)", type); } return freq_int; }
false
false
false
false
false
0
fstatvfs (int fd, struct statvfs *buf) { struct statfs fsbuf; struct stat64 st; /* Get as much information as possible from the system. */ if (__fstatfs (fd, &fsbuf) < 0) return -1; /* Convert the result. */ __internal_statvfs (NULL, buf, &fsbuf, fstat64 (fd, &st) == -1 ? NULL : &st); /* We signal success if the statfs call succeeded. */ return 0; }
false
false
false
false
false
0
makePixelSumTab8(void) { l_uint8 byte; l_int32 i; l_int32 *tab; PROCNAME("makePixelSumTab8"); if ((tab = (l_int32 *)CALLOC(256, sizeof(l_int32))) == NULL) return (l_int32 *)ERROR_PTR("tab not made", procName, NULL); for (i = 0; i < 256; i++) { byte = (l_uint8)i; tab[i] = (byte & 0x1) + ((byte >> 1) & 0x1) + ((byte >> 2) & 0x1) + ((byte >> 3) & 0x1) + ((byte >> 4) & 0x1) + ((byte >> 5) & 0x1) + ((byte >> 6) & 0x1) + ((byte >> 7) & 0x1); } return tab; }
false
false
false
false
false
0
grig_hstock_button (const gchar *stock_id, const gchar *text, const gchar *tooltip) { GtkWidget *button; GtkWidget *image; GtkWidget *box; image = gtk_image_new_from_stock (stock_id, GTK_ICON_SIZE_BUTTON); box = gtk_hbox_new (FALSE, 0); gtk_box_pack_start (GTK_BOX (box), image, TRUE, TRUE, 0); if (text != NULL) gtk_box_pack_start (GTK_BOX (box), gtk_label_new (text), TRUE, TRUE, 0); button = gtk_button_new (); gtk_widget_set_tooltip_text (button, tooltip); gtk_container_add (GTK_CONTAINER (button), box); return button; }
false
false
false
false
false
0
buildStereometricRecord(DcmDirectoryRecord *record, DcmFileFormat *fileformat, const OFString &referencedFileID, const OFFilename &sourceFilename) { /* create new value map record */ if (record == NULL) record = new DcmDirectoryRecord(ERT_Stereometric, referencedFileID.c_str(), sourceFilename, fileformat); if (record != NULL) { /* check whether new record is ok */ if (record->error().good()) { /* nothing to do */ } else { printRecordErrorMessage(record->error(), ERT_Stereometric, "create"); /* free memory */ delete record; record = NULL; } } else printRecordErrorMessage(EC_MemoryExhausted, ERT_Stereometric, "create"); return record; }
false
false
false
false
false
0
LangDumpVec(char *who, int count, SV **data) { int i; PerlIO_printf(PerlIO_stderr(), "%s (%d):\n", who, count); for (i = 0; i < count; i++) { SV *sv = data[i]; if (sv) { PerlIO_printf(PerlIO_stderr(), "%2d ", i); sv_dump(sv); } } }
false
false
false
false
false
0
RecursiveQueueAndDequeueAllSequenceTest() { QString sequence; AppendCharacterJob jobA ( QChar( 'a' ), &sequence, this ); AppendCharacterJob jobB ( QChar( 'b' ), &sequence, this ); AppendCharacterJob jobC ( QChar( 'c' ), &sequence, this ); AppendCharacterJob jobD ( QChar( 'd' ), &sequence, this ); AppendCharacterJob jobE ( QChar( 'e' ), &sequence, this ); AppendCharacterJob jobF ( QChar( 'f' ), &sequence, this ); AppendCharacterJob jobG ( QChar( 'g' ), &sequence, this ); AppendCharacterJob jobH ( QChar( 'h' ), &sequence, this ); AppendCharacterJob jobI ( QChar( 'i' ), &sequence, this ); AppendCharacterJob jobJ ( QChar( 'j' ), &sequence, this ); ThreadWeaver::JobSequence jobSequence1( this ); jobSequence1.setObjectName( "Sequ_1" ); jobSequence1.addJob ( &jobA ); jobSequence1.addJob ( &jobB ); jobSequence1.addJob ( &jobC ); ThreadWeaver::JobSequence jobSequence2( this ); jobSequence2.setObjectName( "Sequ_2" ); jobSequence2.addJob ( &jobD ); jobSequence2.addJob ( &jobE ); jobSequence2.addJob ( &jobF ); ThreadWeaver::JobSequence jobSequence3( this ); jobSequence3.setObjectName( "Sequ_3" ); jobSequence3.addJob ( &jobG ); jobSequence3.addJob ( &jobH ); jobSequence3.addJob ( &jobI ); jobSequence3.addJob ( &jobJ ); // sequence 4 will contain sequences 1, 2, and 3, in that order: ThreadWeaver::JobSequence jobSequence4( this ); jobSequence4.setObjectName( "Sequ_4" ); jobSequence4.addJob ( &jobSequence1 ); jobSequence4.addJob ( &jobSequence2 ); jobSequence4.addJob ( &jobSequence3 ); ThreadWeaver::Weaver::instance()->suspend(); ThreadWeaver::Weaver::instance()->enqueue ( & jobSequence4 ); ThreadWeaver::Weaver::instance()->dequeue (); bool empty = ThreadWeaver::Weaver::instance()->isEmpty(); ThreadWeaver::Weaver::instance()->resume(); ThreadWeaver::Weaver::instance()->finish(); QVERIFY ( empty == true ); }
false
false
false
false
false
0
load_module(void) { struct ast_db_entry *dbtree, *tmp; char groupname[AST_MAX_EXTENSION], *ptr; if ((group_container = ao2_container_alloc(37, group_hash_fn, group_cmp_fn))) { /* Refresh groups from astdb */ if ((dbtree = ast_db_gettree("dialgroup", NULL))) { for (tmp = dbtree; tmp; tmp = tmp->next) { ast_copy_string(groupname, tmp->key, sizeof(groupname)); if ((ptr = strrchr(groupname, '/'))) { ptr++; dialgroup_write(NULL, "", ptr, tmp->data); } } ast_db_freetree(dbtree); } return ast_custom_function_register(&dialgroup_function); } else { return AST_MODULE_LOAD_DECLINE; } }
true
true
false
false
false
1
SPLAverageCps(SplinePointList *spl) { SplinePoint *sp; while ( spl!=NULL ) { for ( sp=spl->first ; ; ) { SPAverageCps(sp); if ( sp->next==NULL ) break; sp = sp->next->to; if ( sp==spl->first ) break; } spl = spl->next; } }
false
false
false
false
false
0
sst_mem_block_register(struct sst_dsp *dsp, u32 offset, u32 size, enum sst_mem_type type, struct sst_block_ops *ops, u32 index, void *private) { struct sst_mem_block *block; block = kzalloc(sizeof(*block), GFP_KERNEL); if (block == NULL) return NULL; block->offset = offset; block->size = size; block->index = index; block->type = type; block->dsp = dsp; block->private = private; block->ops = ops; mutex_lock(&dsp->mutex); list_add(&block->list, &dsp->free_block_list); mutex_unlock(&dsp->mutex); return block; }
false
false
false
false
false
0
file_info_hash_insert(fileinfo_t *fi) { const fileinfo_t *xfi; file_info_check(fi); g_assert(!fi->hashed); g_assert(fi->guid); g_assert(NULL == fi->sf); if (GNET_PROPERTY(fileinfo_debug) > 4) g_debug("FILEINFO insert 0x%p \"%s\" " "(%s/%s bytes done) sha1=%s", cast_to_constpointer(fi), fi->pathname, filesize_to_string(fi->done), filesize_to_string2(fi->size), fi->sha1 ? sha1_base32(fi->sha1) : "none"); /* * Transient fileinfo is only recorded in the GUID hash table. */ if (fi->flags & FI_F_TRANSIENT) goto transient; /* * If an entry already exists in the `fi_by_outname' table, then it * is for THIS fileinfo. Otherwise, there's a structural assertion * that has been broken somewhere! * --RAM, 01/09/2002 */ xfi = hikset_lookup(fi_by_outname, fi->pathname); if (xfi) { file_info_check(xfi); g_assert(xfi == fi); } else { hikset_insert_key(fi_by_outname, &fi->pathname); } /* * Likewise, there can be only ONE entry per given SHA1, but the SHA1 * may not be already present at this time, so the entry is optional. * If it exists, it must be unique though. * --RAM, 01/09/2002 */ if (fi->sha1) { xfi = hikset_lookup(fi_by_sha1, fi->sha1); if (NULL != xfi && xfi != fi) /* See comment above */ g_error("xfi = %p, fi = %p", (void *) xfi, (void *) fi); if (NULL == xfi) hikset_insert_key(fi_by_sha1, &fi->sha1); /* * To be able to return hits on partial files for which we have SHA1, * create a shared file entry and record it as searchable. */ shared_file_from_fileinfo(fi); if (fi->sf != NULL) share_add_partial(fi->sf); } if (fi->file_size_known) { file_info_hash_insert_name_size(fi); } transient: /* * Obviously, GUID entries must be unique as well. */ xfi = hikset_lookup(fi_by_guid, fi->guid); if (NULL != xfi && xfi != fi) /* See comment above */ g_error("xfi = %p, fi = %p", (void *) xfi, (void *) fi); if (NULL == xfi) hikset_insert_key(fi_by_guid, &fi->guid); /* * Notify interested parties, update counters. */ fi->hashed = TRUE; fi->fi_handle = file_info_request_handle(fi); gnet_prop_incr_guint32(PROP_FI_ALL_COUNT); fi_event_trigger(fi, EV_FI_ADDED); }
false
false
false
false
false
0
tree_view_frame_scroll_to_cursor (GtkTreeView *tree_view) { GtkTreePath *path = NULL; gtk_tree_view_get_cursor (tree_view, &path, NULL); if (path != NULL) { gtk_tree_view_scroll_to_cell ( tree_view, path, NULL, FALSE, 0.0, 0.0); gtk_tree_path_free (path); } }
false
false
false
false
false
0
syn_ps2_init_stick(int fd) { if (!stick_enabled) return; gpm_report(GPM_PR_DEBUG,"Initializing Synaptics PS/2 Stick Device"); /* Reset it, set defaults, streaming */ syn_ps2_send_reset(fd,DEVICE_STICK); syn_ps2_putbyte(fd,DEVICE_STICK,PS2_SET_DEFAULT); syn_ps2_putbyte(fd,DEVICE_STICK,PS2_STREAM_MODE); /* Unused */ /* syn_ps2_read_ident (fd, DEVICE_STICK, &ident[1]); */ /* syn_ps2_read_model_id (fd, DEVICE_STICK, &model[1]); */ /* syn_ps2_read_cap (fd, DEVICE_STICK, &capabilities[1]); */ /* syn_dump_info(DEVICE_STICK); */ }
false
false
false
false
false
0
speech_stop() { if(Speech_init == false) return true; #ifdef _WIN32 return SUCCEEDED(Voice_device->Speak( NULL, SPF_PURGEBEFORESPEAK, NULL )); #else STUB_FUNCTION; return true; #endif }
false
false
false
false
false
0
google_chooser_dialog_realize (GtkWidget *widget) { EGoogleChooserDialogPrivate *priv; GdkCursor *cursor; GdkWindow *window; GdkDisplay *display; priv = E_GOOGLE_CHOOSER_DIALOG_GET_PRIVATE (widget); /* Chain up to parent's realize() method. */ GTK_WIDGET_CLASS (e_google_chooser_dialog_parent_class)-> realize (widget); g_return_if_fail (priv->cancellable == NULL); priv->cancellable = g_cancellable_new (); /* Show a busy mouse cursor while populating. */ window = gtk_widget_get_window (widget); display = gtk_widget_get_display (widget); cursor = gdk_cursor_new_for_display (display, GDK_WATCH); gdk_window_set_cursor (window, cursor); g_object_unref (cursor); e_google_chooser_populate ( priv->chooser, priv->cancellable, (GAsyncReadyCallback) google_chooser_dialog_populated_cb, g_object_ref (widget)); }
false
false
false
false
false
0
block_picker_signals (DateCell *cell) { PopBox *box = cell->cell.gui_private; if (!box->signals_connected) return; g_signal_handlers_block_matched (box->date_picker, G_SIGNAL_MATCH_DATA, 0, 0, NULL, NULL, cell); }
false
false
false
false
false
0
fmtlong(long lng, char *fmt) { static int i; int r; char buf[30]; r = rfmtlong(lng, fmt, buf); printf("r: %d ", r); if (r == 0) { printf("%d: %s (fmt was: %s)\n", i++, buf, fmt); } else check_return(r); }
true
true
false
false
false
1
ncd_for_two_cands (basic_block bb1, basic_block bb2, slsr_cand_t c1, slsr_cand_t c2, slsr_cand_t *where) { basic_block ncd; if (!bb1) { *where = c2; return bb2; } if (!bb2) { *where = c1; return bb1; } ncd = nearest_common_dominator (CDI_DOMINATORS, bb1, bb2); /* If both candidates are in the same block, the earlier candidate wins. */ if (bb1 == ncd && bb2 == ncd) { if (!c1 || (c2 && c2->cand_num < c1->cand_num)) *where = c2; else *where = c1; } /* Otherwise, if one of them produced a candidate in the dominator, that one wins. */ else if (bb1 == ncd) *where = c1; else if (bb2 == ncd) *where = c2; /* If neither matches the dominator, neither wins. */ else *where = NULL; return ncd; }
false
false
false
false
false
0
getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const { // {13} 1 == imm12, 0 == Rm // {12} isAdd // {11-0} imm12/Rm const MCOperand &MO = MI.getOperand(OpIdx); const MCOperand &MO1 = MI.getOperand(OpIdx+1); unsigned Imm = MO1.getImm(); bool isAdd = ARM_AM::getAM2Op(Imm) == ARM_AM::add; bool isReg = MO.getReg() != 0; uint32_t Binary = ARM_AM::getAM2Offset(Imm); // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm12 if (isReg) { ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(Imm); Binary <<= 7; // Shift amount is bits [11:7] Binary |= getShiftOp(ShOp) << 5; // Shift type is bits [6:5] Binary |= CTX.getRegisterInfo()->getEncodingValue(MO.getReg()); // Rm is bits [3:0] } return Binary | (isAdd << 12) | (isReg << 13); }
false
false
false
false
false
0
separate_subclust(graph_t * g) { int i, j, margin; graph_t *low, *high; graph_t *left, *right; margin = late_int (g, G_margin, CL_OFFSET, 0); for (i = 1; i <= GD_n_cluster(g); i++) make_lrvn(GD_clust(g)[i]); for (i = 1; i <= GD_n_cluster(g); i++) { for (j = i + 1; j <= GD_n_cluster(g); j++) { low = GD_clust(g)[i]; high = GD_clust(g)[j]; if (GD_minrank(low) > GD_minrank(high)) { graph_t *temp = low; low = high; high = temp; } if (GD_maxrank(low) < GD_minrank(high)) continue; if (ND_order(GD_rank(low)[GD_minrank(high)].v[0]) < ND_order(GD_rank(high)[GD_minrank(high)].v[0])) { left = low; right = high; } else { left = high; right = low; } make_aux_edge(GD_rn(left), GD_ln(right), margin, 0); } separate_subclust(GD_clust(g)[i]); } }
false
false
false
false
false
0
countChineseCharsUtf8(const string& content) { int cnt = 0; for (size_t i = 0; i < content.size();) { char c = content[i]; if ((c & 0x80) == 0) { ++i; } else if ((c & 0xe0) == 0xc0) { i += 2; } else if ((c & 0xf0) == 0xe0) { i += 3; ++cnt; } else { abort(); } } return cnt; }
false
false
false
false
false
0
attach() { assert(!attached()); assert(!m_render); assert(parentNode()); RenderStyle* const _style = document()->styleSelector()->styleForElement(this); _style->ref(); if (parentNode()->renderer() && _style->display() != NONE) { m_render = new (document()->renderArena()) RenderTextArea(this); m_render->setStyle(_style); } HTMLGenericFormElementImpl::attach(); _style->deref(); }
false
false
false
false
false
0
alarm_intr(int alnum) { if (currently_testing >= blocks) return; signal(SIGALRM, alarm_intr); alarm(5); if (!currently_testing) return; printf("%lld... ", (unsigned long long)currently_testing); fflush(stdout); }
false
false
false
false
false
0
revealed_by_phoenix(int xLoc, int yLoc) { Unit* unitPtr; int effectiveRange = unit_res[UNIT_PHOENIX]->visual_range; for( int i=unit_array.size() ; i>0 ; i-- ) { if( unit_array.is_deleted(i) ) continue; unitPtr = unit_array[i]; if( unitPtr->unit_id == UNIT_PHOENIX && unitPtr->nation_recno == nation_recno ) { if( misc.points_distance( xLoc, yLoc, unitPtr->next_x_loc(), unitPtr->next_y_loc() ) <= effectiveRange ) { return 1; } } } return 0; }
false
false
false
false
false
0
combine_bucket (ht_entry_t *h, nsieve_t *ns){ while (1){ if (h == NULL) return; if (h->rel->poly->group->victim == NULL){ h = h->next; continue; } rel_t *base_rel = h->rel; uint64_t base_factors[ns->row_len]; fl_concat (base_rel, base_rel->poly->group->victim); fl_fillrow (base_rel, &base_factors[0], ns); h = h->next; // skip past the base rel. while (h != NULL && base_rel->cofactor == h->rel->cofactor){ // any h inside this loop creates another combined relation. if (h->rel->poly->group->victim == NULL){ h = h->next; continue; // we can't use this relation; there were no full relations in its pg to use as the victim. // such 'orphaned partials' should not occur; they should never be added // to the hashtable, so this is more of a sanity check than anything. It // would probably segfault later if one somehow crept in. } matrel_t *m = &ns->relns[ns->nfull]; m->row = (uint64_t *) calloc (ns->row_len, 8); m -> r1 = h -> rel; m -> r2 = base_rel; fl_concat (h->rel, h->rel->poly->group->victim); fl_fillrow (h->rel, m->row, ns); xor_row (m->row, &base_factors[0], ns->row_len); // multiply the factorizations together. ns->nfull ++; if (ns -> nfull >= ns -> rels_needed){ return; } h = h->next; } if (h == NULL){ return; } } }
false
false
false
false
false
0
NCompareSzPart(ap1, ap2) int ap1, ap2; { byte *pch1, *pch2; int ich; pch1 = ai[ap1].form; pch2 = ai[ap2].form; for (ich = 0; pch1[ich] && pch1[ich] == pch2[ich]; ich++) { if (!us.fArabicFlip) { /* If formulas are being displayed in alternate form, we need to */ /* effectively swap two sections in the string and then compare. */ if (ich == 2) ich = 5; else if (ich == 8) ich = 2; else if (ich == 5) ich = 8; } } return pch1[ich] - pch2[ich]; }
false
false
false
false
false
0
runOnMachineFunction(MachineFunction &mf) { clear(); MF = &mf; LIS = &pass.getAnalysis<LiveIntervals>(); MDT = &pass.getAnalysis<MachineDominatorTree>(); TRI = mf.getSubtarget().getRegisterInfo(); LS.initialize(mf); DEBUG(dbgs() << "********** COMPUTING LIVE DEBUG VARIABLES: " << mf.getName() << " **********\n"); bool Changed = collectDebugValues(mf); computeIntervals(); DEBUG(print(dbgs())); ModifiedMF = Changed; return Changed; }
false
false
false
false
false
0
get_quest_by_number(player *pl, int number) { quest_state *state; quest_player *pq = get_or_create_quest(pl); int questnum = 0; if (number <= 0 || !pq) { return NULL; } /* count through completed quests first */ state = pq->quests; while (state) { /* count up the number of completed quests first */ if (!(quest_get(state->code)->parent) && (state->state == QC_CAN_RESTART || state->is_complete)) if (++questnum == number) return state; state = state->next; } /* then active ones */ state = pq->quests; while (state) { /* count up the number of completed quests first */ if (!(quest_get(state->code)->parent) && state->state != QC_CAN_RESTART && state->is_complete ==0) if (++questnum == number) return state; state = state->next; } /* Ok, we didn't find our quest, return NULL*/ return NULL; }
false
false
false
false
false
0
isCrosses(int dimensionOfGeometryA, int dimensionOfGeometryB) const { if ((dimensionOfGeometryA==Dimension::P && dimensionOfGeometryB==Dimension::L) || (dimensionOfGeometryA==Dimension::P && dimensionOfGeometryB==Dimension::A) || (dimensionOfGeometryA==Dimension::L && dimensionOfGeometryB==Dimension::A)) { return matches(matrix[Location::INTERIOR][Location::INTERIOR], 'T') && matches(matrix[Location::INTERIOR][Location::EXTERIOR], 'T'); } if ((dimensionOfGeometryA==Dimension::L && dimensionOfGeometryB==Dimension::P) || (dimensionOfGeometryA==Dimension::A && dimensionOfGeometryB==Dimension::P) || (dimensionOfGeometryA==Dimension::A && dimensionOfGeometryB==Dimension::L)) { return matches(matrix[Location::INTERIOR][Location::INTERIOR], 'T') && matches(matrix[Location::EXTERIOR][Location::INTERIOR], 'T'); } if (dimensionOfGeometryA==Dimension::L && dimensionOfGeometryB==Dimension::L) { return matrix[Location::INTERIOR][Location::INTERIOR]==0; } return false; }
false
false
false
false
false
0
commit_inmem_pages(struct inode *inode, bool abort) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); struct inmem_pages *cur, *tmp; bool submit_bio = false; struct f2fs_io_info fio = { .sbi = sbi, .type = DATA, .rw = WRITE_SYNC | REQ_PRIO, .encrypted_page = NULL, }; int err = 0; /* * The abort is true only when f2fs_evict_inode is called. * Basically, the f2fs_evict_inode doesn't produce any data writes, so * that we don't need to call f2fs_balance_fs. * Otherwise, f2fs_gc in f2fs_balance_fs can wait forever until this * inode becomes free by iget_locked in f2fs_iget. */ if (!abort) { f2fs_balance_fs(sbi); f2fs_lock_op(sbi); } mutex_lock(&fi->inmem_lock); list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) { lock_page(cur->page); if (!abort) { if (cur->page->mapping == inode->i_mapping) { set_page_dirty(cur->page); f2fs_wait_on_page_writeback(cur->page, DATA); if (clear_page_dirty_for_io(cur->page)) inode_dec_dirty_pages(inode); trace_f2fs_commit_inmem_page(cur->page, INMEM); fio.page = cur->page; err = do_write_data_page(&fio); if (err) { unlock_page(cur->page); break; } clear_cold_data(cur->page); submit_bio = true; } } else { trace_f2fs_commit_inmem_page(cur->page, INMEM_DROP); } set_page_private(cur->page, 0); ClearPagePrivate(cur->page); f2fs_put_page(cur->page, 1); list_del(&cur->list); kmem_cache_free(inmem_entry_slab, cur); dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); } mutex_unlock(&fi->inmem_lock); if (!abort) { f2fs_unlock_op(sbi); if (submit_bio) f2fs_submit_merged_bio(sbi, DATA, WRITE); } return err; }
false
false
false
false
false
0
init_PXC200(struct bttv *btv) { static int vals[] = { 0x08, 0x09, 0x0a, 0x0b, 0x0d, 0x0d, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x00 }; unsigned int i; int tmp; u32 val; /* Initialise GPIO-connevted stuff */ gpio_inout(0xffffff, (1<<13)); gpio_write(0); udelay(3); gpio_write(1<<13); /* GPIO inputs are pulled up, so no need to drive * reset pin any longer */ gpio_bits(0xffffff, 0); if (bttv_gpio) bttv_gpio_tracking(btv,"pxc200"); /* we could/should try and reset/control the AD pots? but right now we simply turned off the crushing. Without this the AGC drifts drifts remember the EN is reverse logic --> setting BT848_ADC_AGC_EN disable the AGC tboult@eecs.lehigh.edu */ btwrite(BT848_ADC_RESERVED|BT848_ADC_AGC_EN, BT848_ADC); /* Initialise MAX517 DAC */ pr_info("Setting DAC reference voltage level ...\n"); bttv_I2CWrite(btv,0x5E,0,0x80,1); /* Initialise 12C508 PIC */ /* The I2CWrite and I2CRead commmands are actually to the * same chips - but the R/W bit is included in the address * argument so the numbers are different */ pr_info("Initialising 12C508 PIC chip ...\n"); /* First of all, enable the clock line. This is used in the PXC200-F */ val = btread(BT848_GPIO_DMA_CTL); val |= BT848_GPIO_DMA_CTL_GPCLKMODE; btwrite(val, BT848_GPIO_DMA_CTL); /* Then, push to 0 the reset pin long enough to reset the * * device same as above for the reset line, but not the same * value sent to the GPIO-connected stuff * which one is the good one? */ gpio_inout(0xffffff,(1<<2)); gpio_write(0); udelay(10); gpio_write(1<<2); for (i = 0; i < ARRAY_SIZE(vals); i++) { tmp=bttv_I2CWrite(btv,0x1E,0,vals[i],1); if (tmp != -1) { pr_info("I2C Write(%2.2x) = %i\nI2C Read () = %2.2x\n\n", vals[i],tmp,bttv_I2CRead(btv,0x1F,NULL)); } } pr_info("PXC200 Initialised\n"); }
false
false
false
false
false
0
f_isrel(NUMBER *val1, NUMBER *val2) { if (qisfrac(val1) || qisfrac(val2)) { math_error("Non-integer for isrel"); /*NOTREACHED*/ } return itoq((long) zrelprime(val1->num, val2->num)); }
false
false
false
false
false
0
parseconnections() { if(!p.findbegin()) return; token t; while(p.parse(t)) switch(t.type) { case token::END: return; case token::PROP: if(!strcmp(t.s, "C")) parseconnection(); else p.skipprop(); break; } }
false
false
false
false
false
0
ParseMemory( const char * s, int size ) { FreeDoc(); if ( (pDoc = xmlRecoverMemory(s,size)) == 0 ) { return false; } return true; }
false
false
false
false
false
0
get_uids_and_rev_cb (gpointer user_data, gint col, gchar **cols, gchar **name) { GHashTable *uids_and_rev = user_data; if (col == 2 && cols[0]) g_hash_table_insert (uids_and_rev, g_strdup (cols[0]), g_strdup (cols[1] ? cols[1] : "")); return 0; }
false
false
false
false
false
0