idx
int64 | func
string | target
int64 |
|---|---|---|
208,115
|
static int xemaclite_of_probe(struct platform_device *ofdev)
{
struct resource *res;
struct net_device *ndev = NULL;
struct net_local *lp = NULL;
struct device *dev = &ofdev->dev;
int rc = 0;
dev_info(dev, "Device Tree Probing\n");
/* Create an ethernet device instance */
ndev = alloc_etherdev(sizeof(struct net_local));
if (!ndev)
return -ENOMEM;
dev_set_drvdata(dev, ndev);
SET_NETDEV_DEV(ndev, &ofdev->dev);
lp = netdev_priv(ndev);
lp->ndev = ndev;
/* Get IRQ for the device */
res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "no IRQ found\n");
rc = -ENXIO;
goto error;
}
ndev->irq = res->start;
res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
lp->base_addr = devm_ioremap_resource(&ofdev->dev, res);
if (IS_ERR(lp->base_addr)) {
rc = PTR_ERR(lp->base_addr);
goto error;
}
ndev->mem_start = res->start;
ndev->mem_end = res->end;
spin_lock_init(&lp->reset_lock);
lp->next_tx_buf_to_use = 0x0;
lp->next_rx_buf_to_use = 0x0;
lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong");
lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
rc = of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr);
if (rc) {
dev_warn(dev, "No MAC address found, using random\n");
eth_hw_addr_random(ndev);
}
/* Clear the Tx CSR's in case this is a restart */
xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET);
xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
/* Set the MAC address in the EmacLite device */
xemaclite_update_address(lp, ndev->dev_addr);
lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
xemaclite_mdio_setup(lp, &ofdev->dev);
dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
ndev->netdev_ops = &xemaclite_netdev_ops;
ndev->ethtool_ops = &xemaclite_ethtool_ops;
ndev->flags &= ~IFF_MULTICAST;
ndev->watchdog_timeo = TX_TIMEOUT;
/* Finally, register the device */
rc = register_netdev(ndev);
if (rc) {
dev_err(dev,
"Cannot register network device, aborting\n");
goto error;
}
dev_info(dev,
"Xilinx EmacLite at 0x%08lX mapped to 0x%08lX, irq=%d\n",
(unsigned long __force)ndev->mem_start,
(unsigned long __force)lp->base_addr, ndev->irq);
return 0;
error:
free_netdev(ndev);
return rc;
}
| 1
|
462,433
|
lstnActivity(ptcplstn_t *pLstn)
{
int newSock = -1;
prop_t *peerName;
prop_t *peerIP;
rsRetVal localRet;
DEFiRet;
DBGPRINTF("imptcp: new connection on listen socket %d\n", pLstn->sock);
while(glbl.GetGlobalInputTermState() == 0) {
localRet = AcceptConnReq(pLstn, &newSock, &peerName, &peerIP);
if(localRet == RS_RET_NO_MORE_DATA || glbl.GetGlobalInputTermState() == 1) {
break;
}
CHKiRet(localRet);
localRet = addSess(pLstn, newSock, peerName, peerIP);
if(localRet != RS_RET_OK) {
close(newSock);
prop.Destruct(&peerName);
prop.Destruct(&peerIP);
ABORT_FINALIZE(localRet);
}
}
finalize_it:
RETiRet;
}
| 0
|
344,813
|
lowercase(char *s)
{
for (; *s; s++)
*s = tolower((u_char)*s);
}
| 0
|
234,717
|
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder)
{
int ret;
lockdep_assert_held(&uuid_mutex);
/*
* The device_list_mutex cannot be taken here in case opening the
* underlying device takes further locks like open_mutex.
*
* We also don't need the lock here as this is called during mount and
* exclusion is provided by uuid_mutex
*/
if (fs_devices->opened) {
fs_devices->opened++;
ret = 0;
} else {
list_sort(NULL, &fs_devices->devices, devid_cmp);
ret = open_fs_devices(fs_devices, flags, holder);
}
return ret;
}
| 0
|
226,045
|
void mp4s_box_del(GF_Box *s)
{
GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s;
if (ptr == NULL) return;
gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s);
if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc);
gf_free(ptr);
}
| 0
|
212,144
|
MOBI_RET mobi_parse_huffdic(const MOBIData *m, MOBIHuffCdic *huffcdic) {
MOBI_RET ret;
const size_t offset = mobi_get_kf8offset(m);
if (m->mh == NULL || m->mh->huff_rec_index == NULL || m->mh->huff_rec_count == NULL) {
debug_print("%s", "HUFF/CDIC records metadata not found in MOBI header\n");
return MOBI_DATA_CORRUPT;
}
const size_t huff_rec_index = *m->mh->huff_rec_index + offset;
const size_t huff_rec_count = *m->mh->huff_rec_count;
if (huff_rec_count > HUFF_RECORD_MAXCNT) {
debug_print("Too many HUFF record (%zu)\n", huff_rec_count);
return MOBI_DATA_CORRUPT;
}
const MOBIPdbRecord *curr = mobi_get_record_by_seqnumber(m, huff_rec_index);
if (curr == NULL || huff_rec_count < 2) {
debug_print("%s", "HUFF/CDIC record not found\n");
return MOBI_DATA_CORRUPT;
}
if (curr->size < HUFF_RECORD_MINSIZE) {
debug_print("HUFF record too short (%zu b)\n", curr->size);
return MOBI_DATA_CORRUPT;
}
ret = mobi_parse_huff(huffcdic, curr);
if (ret != MOBI_SUCCESS) {
debug_print("%s", "HUFF parsing failed\n");
return ret;
}
curr = curr->next;
/* allocate memory for symbols data in each CDIC record */
huffcdic->symbols = malloc((huff_rec_count - 1) * sizeof(*huffcdic->symbols));
if (huffcdic->symbols == NULL) {
debug_print("%s\n", "Memory allocation failed");
return MOBI_MALLOC_FAILED;
}
/* get following CDIC records */
size_t i = 0;
while (i < huff_rec_count - 1) {
if (curr == NULL) {
debug_print("%s\n", "CDIC record not found");
return MOBI_DATA_CORRUPT;
}
ret = mobi_parse_cdic(huffcdic, curr, i++);
if (ret != MOBI_SUCCESS) {
debug_print("%s", "CDIC parsing failed\n");
return ret;
}
curr = curr->next;
}
return MOBI_SUCCESS;
}
| 1
|
310,301
|
dirserv_get_routerdesc_fingerprints(smartlist_t *fps_out, const char *key,
const char **msg, int for_unencrypted_conn,
int is_extrainfo)
{
int by_id = 1;
*msg = NULL;
if (!strcmp(key, "all")) {
routerlist_t *rl = router_get_routerlist();
SMARTLIST_FOREACH(rl->routers, routerinfo_t *, r,
smartlist_add(fps_out,
tor_memdup(r->cache_info.identity_digest, DIGEST_LEN)));
/* Treat "all" requests as if they were unencrypted */
for_unencrypted_conn = 1;
} else if (!strcmp(key, "authority")) {
routerinfo_t *ri = router_get_my_routerinfo();
if (ri)
smartlist_add(fps_out,
tor_memdup(ri->cache_info.identity_digest, DIGEST_LEN));
} else if (!strcmpstart(key, "d/")) {
by_id = 0;
key += strlen("d/");
dir_split_resource_into_fingerprints(key, fps_out, NULL,
DSR_HEX|DSR_SORT_UNIQ);
} else if (!strcmpstart(key, "fp/")) {
key += strlen("fp/");
dir_split_resource_into_fingerprints(key, fps_out, NULL,
DSR_HEX|DSR_SORT_UNIQ);
} else {
*msg = "Key not recognized";
return -1;
}
if (for_unencrypted_conn) {
/* Remove anything that insists it not be sent unencrypted. */
SMARTLIST_FOREACH(fps_out, char *, cp, {
signed_descriptor_t *sd;
if (by_id)
sd = get_signed_descriptor_by_fp(cp,is_extrainfo,0);
else if (is_extrainfo)
sd = extrainfo_get_by_descriptor_digest(cp);
else
sd = router_get_by_descriptor_digest(cp);
if (sd && !sd->send_unencrypted) {
tor_free(cp);
SMARTLIST_DEL_CURRENT(fps_out, cp);
}
});
}
if (!smartlist_len(fps_out)) {
*msg = "Servers unavailable";
return -1;
}
return 0;
}
| 0
|
236,139
|
GF_Box *gppc_box_new()
{
//default type is amr but overwritten by box constructor
ISOM_DECL_BOX_ALLOC(GF_3GPPConfigBox, GF_ISOM_BOX_TYPE_DAMR);
return (GF_Box *)tmp;
}
| 0
|
242,648
|
GF_Err isoffin_configure_pid(GF_Filter *filter, GF_FilterPid *pid, Bool is_remove)
{
const GF_PropertyValue *prop;
ISOMReader *read = gf_filter_get_udta(filter);
if (is_remove) {
isoffin_disconnect(read);
return GF_OK;
}
//check if we have a file path; if not, this is a pure stream of boxes (no local file cache)
prop = gf_filter_pid_get_property(pid, GF_PROP_PID_FILEPATH);
if (!prop || !prop->value.string) {
if (!read->mem_load_mode)
read->mem_load_mode = 1;
if (!read->pid) read->pid = pid;
read->input_loaded = GF_FALSE;
return GF_OK;
}
if (read->pid && prop->value.string) {
const char *next_url = prop->value.string;
u64 sr, er;
u32 crc = gf_crc_32(next_url, (u32) strlen(next_url) );
sr = er = 0;
prop = gf_filter_pid_get_property(read->pid, GF_PROP_PID_FILE_RANGE);
if (prop) {
sr = prop->value.lfrac.num;
er = prop->value.lfrac.den;
}
//if eos is signaled, don't check for crc since we might have the same blob address (same alloc)
if (!read->eos_signaled && (read->src_crc == crc) && (read->start_range==sr) && (read->end_range==er)) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[IsoMedia] same URL crc and range for %s, skipping reconfigure\n", next_url));
return GF_OK;
}
read->src_crc = crc;
read->start_range = sr;
read->end_range = er;
read->input_loaded = GF_FALSE;
read->eos_signaled = GF_FALSE;
//we need to reconfigure
return isoffin_reconfigure(filter, read, next_url);
}
read->pid = pid;
prop = gf_filter_pid_get_property(pid, GF_PROP_PID_FILE_CACHED);
if (prop && prop->value.boolean) {
GF_FilterEvent evt;
read->input_loaded = GF_TRUE;
GF_FEVT_INIT(evt, GF_FEVT_PLAY_HINT, pid);
evt.play.full_file_only=1;
gf_filter_pid_send_event(pid, &evt);
}
return isoffin_setup(filter, read);
}
| 0
|
413,648
|
R_API bool r_core_anal_bb_seek(RCore *core, ut64 addr) {
ut64 bbaddr = r_anal_get_bbaddr (core->anal, addr);
if (bbaddr != UT64_MAX) {
r_core_seek (core, bbaddr, false);
return true;
}
return false;
}
| 0
|
219,960
|
int callback_glewlwyd_get_user_middleware_module (const struct _u_request * request, struct _u_response * response, void * user_middleware_data) {
struct config_elements * config = (struct config_elements *)user_middleware_data;
json_t * j_module;
j_module = get_user_middleware_module(config, u_map_get(request->map_url, "name"));
if (check_result_value(j_module, G_OK)) {
ulfius_set_json_body_response(response, 200, json_object_get(j_module, "module"));
} else if (check_result_value(j_module, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_get_user_middleware_module - Error get_user_middleware_module");
response->status = 500;
}
json_decref(j_module);
return U_CALLBACK_CONTINUE;
}
| 0
|
462,279
|
PJ_DEF(pj_status_t) pj_stun_msg_add_string_attr(pj_pool_t *pool,
pj_stun_msg *msg,
int attr_type,
const pj_str_t *value)
{
pj_stun_string_attr *attr = NULL;
pj_status_t status;
status = pj_stun_string_attr_create(pool, attr_type, value,
&attr);
if (status != PJ_SUCCESS)
return status;
return pj_stun_msg_add_attr(msg, &attr->hdr);
}
| 0
|
197,142
|
void Compute(OpKernelContext* ctx) override {
const Tensor& input = ctx->input(0);
const Tensor& input_min_range = ctx->input(1);
const Tensor& input_max_range = ctx->input(2);
int num_slices = 1;
if (axis_ > -1) {
num_slices = input.dim_size(axis_);
}
const TensorShape& minmax_shape = ctx->input(1).shape();
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &output));
Tensor* output_min_tensor = nullptr;
Tensor* output_max_tensor = nullptr;
if (num_slices == 1) {
OP_REQUIRES_OK(ctx, ctx->allocate_output(1, {}, &output_min_tensor));
OP_REQUIRES_OK(ctx, ctx->allocate_output(2, {}, &output_max_tensor));
const float min_range = input_min_range.template flat<float>()(0);
const float max_range = input_max_range.template flat<float>()(0);
QuantizeTensor(ctx, input, min_range, max_range, output,
output_min_tensor, output_max_tensor);
return;
}
OP_REQUIRES(ctx, mode_ != QUANTIZE_MODE_MIN_FIRST,
errors::Unimplemented("MIN_FIRST mode is not implemented for "
"Quantize with axis != -1."));
OP_REQUIRES_OK(ctx,
ctx->allocate_output(1, minmax_shape, &output_min_tensor));
OP_REQUIRES_OK(ctx,
ctx->allocate_output(2, minmax_shape, &output_max_tensor));
auto input_tensor =
input.template flat_inner_outer_dims<float, 3>(axis_ - 1);
int64_t pre_dim = 1, post_dim = 1;
for (int i = 0; i < axis_; ++i) {
pre_dim *= output->dim_size(i);
}
for (int i = axis_ + 1; i < output->dims(); ++i) {
post_dim *= output->dim_size(i);
}
auto output_tensor = output->template bit_casted_shaped<T, 3>(
{pre_dim, num_slices, post_dim});
auto min_ranges = input_min_range.template vec<float>();
auto max_ranges = input_max_range.template vec<float>();
for (int i = 0; i < num_slices; ++i) {
QuantizeSlice(ctx->eigen_device<Device>(), ctx,
input_tensor.template chip<1>(i), min_ranges(i),
max_ranges(i), output_tensor.template chip<1>(i),
&output_min_tensor->flat<float>()(i),
&output_max_tensor->flat<float>()(i));
}
}
| 1
|
450,821
|
readdir_result_type (struct readdir_result d)
{
#if defined _DIRENT_HAVE_D_TYPE || defined HAVE_STRUCT_DIRENT_D_TYPE
# define D_TYPE_TO_RESULT(source) (source)->d_type,
return d.type;
#else
# define D_TYPE_TO_RESULT(source)
return DT_UNKNOWN;
#endif
}
| 0
|
161,841
|
int64 ClientUsageTracker::GetCachedHostUsage(const std::string& host) {
int64 ClientUsageTracker::GetCachedHostUsage(const std::string& host) const {
HostUsageMap::const_iterator found = cached_usage_.find(host);
if (found == cached_usage_.end())
return 0;
int64 usage = 0;
const UsageMap& map = found->second;
for (UsageMap::const_iterator iter = map.begin();
iter != map.end(); ++iter) {
usage += iter->second;
}
return usage;
}
| 0
|
517,443
|
static void do_home_process(HttpResponse res) {
char buf[STRLEN];
boolean_t on = true;
boolean_t header = true;
for (Service_T s = servicelist_conf; s; s = s->next_conf) {
if (s->type != Service_Process)
continue;
if (header) {
StringBuffer_append(res->outputbuffer,
"<table id='header-row'>"
"<tr>"
"<th class='left' class='first'>Process</th>"
"<th class='left'>Status</th>"
"<th class='right'>Uptime</th>"
"<th class='right'>CPU Total</b></th>"
"<th class='right'>Memory Total</th>"
"<th class='right column'>Read</th>"
"<th class='right column'>Write</th>"
"</tr>");
header = false;
}
StringBuffer_append(res->outputbuffer,
"<tr%s>"
"<td class='left'><a href='%s'>%s</a></td>"
"<td class='left'>%s</td>",
on ? " class='stripe'" : "",
s->name, s->name,
get_service_status(HTML, s, buf, sizeof(buf)));
if (! (Run.flags & Run_ProcessEngineEnabled) || ! Util_hasServiceStatus(s) || s->inf.process->uptime < 0) {
StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>");
} else {
StringBuffer_append(res->outputbuffer, "<td class='right'>%s</td>", _getUptime(s->inf.process->uptime, (char[256]){}));
}
if (! (Run.flags & Run_ProcessEngineEnabled) || ! Util_hasServiceStatus(s) || s->inf.process->total_cpu_percent < 0) {
StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>");
} else {
StringBuffer_append(res->outputbuffer, "<td class='right%s'>%.1f%%</td>", (s->error & Event_Resource) ? " red-text" : "", s->inf.process->total_cpu_percent);
}
if (! (Run.flags & Run_ProcessEngineEnabled) || ! Util_hasServiceStatus(s) || s->inf.process->total_mem_percent < 0) {
StringBuffer_append(res->outputbuffer, "<td class='right'>-</td>");
} else {
StringBuffer_append(res->outputbuffer, "<td class='right%s'>%.1f%% [%s]</td>", (s->error & Event_Resource) ? " red-text" : "", s->inf.process->total_mem_percent, Fmt_bytes2str(s->inf.process->total_mem, buf));
}
boolean_t hasReadBytes = Statistics_initialized(&(s->inf.process->read.bytes));
boolean_t hasReadOperations = Statistics_initialized(&(s->inf.process->read.operations));
if (! (Run.flags & Run_ProcessEngineEnabled) || ! Util_hasServiceStatus(s) || (! hasReadBytes && ! hasReadOperations)) {
StringBuffer_append(res->outputbuffer, "<td class='right column'>-</td>");
} else if (hasReadBytes) {
StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%s/s</td>", (s->error & Event_Resource) ? " red-text" : "", Fmt_bytes2str(Statistics_deltaNormalize(&(s->inf.process->read.bytes)), (char[10]){}));
} else if (hasReadOperations) {
StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%.1f/s</td>", (s->error & Event_Resource) ? " red-text" : "", Statistics_deltaNormalize(&(s->inf.process->read.operations)));
}
boolean_t hasWriteBytes = Statistics_initialized(&(s->inf.process->write.bytes));
boolean_t hasWriteOperations = Statistics_initialized(&(s->inf.process->write.operations));
if (! (Run.flags & Run_ProcessEngineEnabled) || ! Util_hasServiceStatus(s) || (! hasWriteBytes && ! hasWriteOperations)) {
StringBuffer_append(res->outputbuffer, "<td class='right column'>-</td>");
} else if (hasWriteBytes) {
StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%s/s</td>", (s->error & Event_Resource) ? " red-text" : "", Fmt_bytes2str(Statistics_deltaNormalize(&(s->inf.process->write.bytes)), (char[10]){}));
} else if (hasWriteOperations) {
StringBuffer_append(res->outputbuffer, "<td class='right column%s'>%.1f/s</td>", (s->error & Event_Resource) ? " red-text" : "", Statistics_deltaNormalize(&(s->inf.process->write.operations)));
}
StringBuffer_append(res->outputbuffer, "</tr>");
on = ! on;
}
if (! header)
StringBuffer_append(res->outputbuffer, "</table>");
}
| 0
|
353,189
|
void SplashOutputDev::setSoftMask(GfxState *state, const double *bbox,
bool alpha, Function *transferFunc,
GfxColor *backdropColor) {
SplashBitmap *softMask, *tBitmap;
Splash *tSplash;
SplashTransparencyGroup *transpGroup;
SplashColor color;
SplashColorPtr p;
GfxGray gray;
GfxRGB rgb;
#ifdef SPLASH_CMYK
GfxCMYK cmyk;
GfxColor deviceN;
#endif
double lum, lum2;
int tx, ty, x, y;
tx = transpGroupStack->tx;
ty = transpGroupStack->ty;
tBitmap = transpGroupStack->tBitmap;
// composite with backdrop color
if (!alpha && tBitmap->getMode() != splashModeMono1) {
//~ need to correctly handle the case where no blending color
//~ space is given
if (transpGroupStack->blendingColorSpace) {
tSplash = new Splash(tBitmap, vectorAntialias,
transpGroupStack->origSplash->getScreen());
switch (tBitmap->getMode()) {
case splashModeMono1:
// transparency is not supported in mono1 mode
break;
case splashModeMono8:
transpGroupStack->blendingColorSpace->getGray(backdropColor, &gray);
color[0] = colToByte(gray);
tSplash->compositeBackground(color);
break;
case splashModeXBGR8:
color[3] = 255;
// fallthrough
case splashModeRGB8:
case splashModeBGR8:
transpGroupStack->blendingColorSpace->getRGB(backdropColor, &rgb);
color[0] = colToByte(rgb.r);
color[1] = colToByte(rgb.g);
color[2] = colToByte(rgb.b);
tSplash->compositeBackground(color);
break;
#ifdef SPLASH_CMYK
case splashModeCMYK8:
transpGroupStack->blendingColorSpace->getCMYK(backdropColor, &cmyk);
color[0] = colToByte(cmyk.c);
color[1] = colToByte(cmyk.m);
color[2] = colToByte(cmyk.y);
color[3] = colToByte(cmyk.k);
tSplash->compositeBackground(color);
break;
case splashModeDeviceN8:
transpGroupStack->blendingColorSpace->getDeviceN(backdropColor, &deviceN);
for (int cp=0; cp < SPOT_NCOMPS+4; cp++)
color[cp] = colToByte(deviceN.c[cp]);
tSplash->compositeBackground(color);
break;
#endif
}
delete tSplash;
}
}
softMask = new SplashBitmap(bitmap->getWidth(), bitmap->getHeight(),
1, splashModeMono8, false);
unsigned char fill = 0;
if (transpGroupStack->blendingColorSpace) {
transpGroupStack->blendingColorSpace->getGray(backdropColor, &gray);
fill = colToByte(gray);
}
memset(softMask->getDataPtr(), fill,
softMask->getRowSize() * softMask->getHeight());
p = softMask->getDataPtr() + ty * softMask->getRowSize() + tx;
int xMax = tBitmap->getWidth();
int yMax = tBitmap->getHeight();
if (xMax > bitmap->getWidth() - tx) xMax = bitmap->getWidth() - tx;
if (yMax > bitmap->getHeight() - ty) yMax = bitmap->getHeight() - ty;
for (y = 0; y < yMax; ++y) {
for (x = 0; x < xMax; ++x) {
if (alpha) {
if (transferFunc) {
lum = tBitmap->getAlpha(x, y) / 255.0;
transferFunc->transform(&lum, &lum2);
p[x] = (int)(lum2 * 255.0 + 0.5);
} else
p[x] = tBitmap->getAlpha(x, y);
} else {
tBitmap->getPixel(x, y, color);
// convert to luminosity
switch (tBitmap->getMode()) {
case splashModeMono1:
case splashModeMono8:
lum = color[0] / 255.0;
break;
case splashModeXBGR8:
case splashModeRGB8:
case splashModeBGR8:
lum = (0.3 / 255.0) * color[0] +
(0.59 / 255.0) * color[1] +
(0.11 / 255.0) * color[2];
break;
#ifdef SPLASH_CMYK
case splashModeCMYK8:
case splashModeDeviceN8:
lum = (1 - color[3] / 255.0)
- (0.3 / 255.0) * color[0]
- (0.59 / 255.0) * color[1]
- (0.11 / 255.0) * color[2];
if (lum < 0) {
lum = 0;
}
break;
#endif
}
if (transferFunc) {
transferFunc->transform(&lum, &lum2);
} else {
lum2 = lum;
}
p[x] = (int)(lum2 * 255.0 + 0.5);
}
}
p += softMask->getRowSize();
}
splash->setSoftMask(softMask);
// pop the stack
transpGroup = transpGroupStack;
transpGroupStack = transpGroup->next;
delete transpGroup;
delete tBitmap;
}
| 0
|
226,953
|
IRC_PROTOCOL_CALLBACK(whowas_nick_msg)
{
IRC_PROTOCOL_MIN_ARGS(5);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, argv[3], command, "whowas", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s] %s%s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]);
return WEECHAT_RC_OK;
}
| 0
|
274,846
|
std::vector<bool> GetOutput() { return ExtractVector<bool>(output_); }
| 0
|
508,362
|
bool Alter_table_prelocking_strategy::
handle_view(THD *thd, Query_tables_list *prelocking_ctx,
TABLE_LIST *table_list, bool *need_prelocking)
{
return FALSE;
}
| 0
|
262,084
|
static void AddRangeStats(const int start_instance, const int end_instance,
const int start_feature_dim,
const int end_feature_dim,
StatsPartitionMap* stats_map,
const TTypes<float>::ConstMatrix& gradients,
const TTypes<float>::ConstMatrix& hessians,
const TTypes<int32>::ConstVec& node_ids,
const int32_t feature_dims, const int32_t bucket_id,
const int32_t logits_dims, const int32_t stats_dims) {
DCHECK_LE(start_instance, end_instance);
if (start_instance == end_instance) {
DCHECK_LT(start_feature_dim, end_feature_dim);
}
for (int32_t instance = start_instance; instance <= end_instance;
++instance) {
const int32_t start_f_dim =
(instance == start_instance) ? start_feature_dim + 1 : 0;
const int32_t end_f_dim =
(instance == end_instance) ? end_feature_dim : feature_dims;
for (int32_t f_dim = start_f_dim; f_dim < end_f_dim; ++f_dim) {
AddInstanceStatsToMap(instance, f_dim, bucket_id, logits_dims, stats_dims,
stats_map, gradients, hessians, node_ids);
}
}
}
| 0
|
300,814
|
int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
int (*skb_handler)(struct sk_buff *skb,
struct netlink_callback *cb,
struct tipc_sock *tsk))
{
struct rhashtable_iter *iter = (void *)cb->args[4];
struct tipc_sock *tsk;
int err;
rhashtable_walk_start(iter);
while ((tsk = rhashtable_walk_next(iter)) != NULL) {
if (IS_ERR(tsk)) {
err = PTR_ERR(tsk);
if (err == -EAGAIN) {
err = 0;
continue;
}
break;
}
sock_hold(&tsk->sk);
rhashtable_walk_stop(iter);
lock_sock(&tsk->sk);
err = skb_handler(skb, cb, tsk);
if (err) {
release_sock(&tsk->sk);
sock_put(&tsk->sk);
goto out;
}
release_sock(&tsk->sk);
rhashtable_walk_start(iter);
sock_put(&tsk->sk);
}
rhashtable_walk_stop(iter);
out:
return skb->len;
}
| 0
|
482,505
|
getCharForDots(widechar d, const DisplayTableHeader *table) {
CharDotsMapping *cdPtr;
const TranslationTableOffset bucket = table->dotsToChar[_lou_charHash(d)];
TranslationTableOffset offset = bucket;
while (offset) {
cdPtr = (CharDotsMapping *)&table->ruleArea[offset];
if (cdPtr->lookFor == d) return cdPtr;
offset = cdPtr->next;
}
return NULL;
}
| 0
|
234,720
|
static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
struct alloc_chunk_ctl *ctl)
{
int index = btrfs_bg_flags_to_raid_index(ctl->type);
ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
ctl->devs_max = btrfs_raid_array[index].devs_max;
if (!ctl->devs_max)
ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
ctl->devs_min = btrfs_raid_array[index].devs_min;
ctl->devs_increment = btrfs_raid_array[index].devs_increment;
ctl->ncopies = btrfs_raid_array[index].ncopies;
ctl->nparity = btrfs_raid_array[index].nparity;
ctl->ndevs = 0;
switch (fs_devices->chunk_alloc_policy) {
case BTRFS_CHUNK_ALLOC_REGULAR:
init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
break;
case BTRFS_CHUNK_ALLOC_ZONED:
init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl);
break;
default:
BUG();
}
}
| 0
|
387,870
|
void JNIid::verify(Klass* holder) {
int first_field_offset = InstanceMirrorKlass::offset_of_static_fields();
int end_field_offset;
end_field_offset = first_field_offset + (InstanceKlass::cast(holder)->static_field_size() * wordSize);
JNIid* current = this;
while (current != NULL) {
guarantee(current->holder() == holder, "Invalid klass in JNIid");
#ifdef ASSERT
int o = current->offset();
if (current->is_static_field_id()) {
guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid");
}
#endif
current = current->next();
}
}
| 0
|
500,641
|
int sftp_close(sftp_file file){
int err = SSH_NO_ERROR;
SAFE_FREE(file->name);
if (file->handle){
err = sftp_handle_close(file->sftp,file->handle);
ssh_string_free(file->handle);
}
/* FIXME: check server response and implement errno */
SAFE_FREE(file);
return err;
}
| 0
|
101,660
|
bool WebProcessProxy::checkURLReceivedFromWebProcess(const String& urlString)
{
return checkURLReceivedFromWebProcess(KURL(KURL(), urlString));
}
| 0
|
487,619
|
void kernel_shutdown_prepare(enum system_states state)
{
blocking_notifier_call_chain(&reboot_notifier_list,
(state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
system_state = state;
device_shutdown();
}
| 0
|
384,894
|
vim_strnsize(char_u *s, int len)
{
int size = 0;
while (*s != NUL && --len >= 0)
if (has_mbyte)
{
int l = (*mb_ptr2len)(s);
size += ptr2cells(s);
s += l;
len -= l - 1;
}
else
size += byte2cells(*s++);
return size;
}
| 0
|
512,893
|
Item_ref_null_helper(THD *thd, Name_resolution_context *context_arg,
Item_in_subselect* master, Item **item,
const char *table_name_arg,
const LEX_CSTRING *field_name_arg):
Item_ref(thd, context_arg, item, table_name_arg, field_name_arg),
owner(master) {}
| 0
|
417,112
|
PlayerBase* PlayerGeneric::getPreferredPlayer(XModule* module) const
{
switch (getPreferredPlayerType(module))
{
#ifndef MILKYTRACKER
case PlayerBase::PlayerType_FAR:
return new PlayerFAR(frequency);
case PlayerBase::PlayerType_IT:
return new PlayerIT(frequency);
#endif
case PlayerBase::PlayerType_Generic:
return new PlayerSTD(frequency);
default:
return NULL;
}
}
| 0
|
473,836
|
euctw_mbc_enc_len(const UChar* p, const UChar* e, OnigEncoding enc ARG_UNUSED)
{
int firstbyte = *p++;
state_t s = trans[0][firstbyte];
#define RETURN(n) \
return s == ACCEPT ? ONIGENC_CONSTRUCT_MBCLEN_CHARFOUND(n) : \
ONIGENC_CONSTRUCT_MBCLEN_INVALID()
if (s < 0) RETURN(1);
if (p == e) return ONIGENC_CONSTRUCT_MBCLEN_NEEDMORE(EncLen_EUCTW[firstbyte]-1);
s = trans[s][*p++];
if (s < 0) RETURN(2);
if (p == e) return ONIGENC_CONSTRUCT_MBCLEN_NEEDMORE(4-2);
s = trans[s][*p++];
if (s < 0) RETURN(3);
if (p == e) return ONIGENC_CONSTRUCT_MBCLEN_NEEDMORE(4-3);
s = trans[s][*p++];
RETURN(4);
#undef RETURN
}
| 0
|
312,560
|
efmpat_to_regpat(
char_u *efmpat,
char_u *regpat,
efm_T *efminfo,
int idx,
int round)
{
char_u *srcptr;
if (efminfo->addr[idx])
{
// Each errorformat pattern can occur only once
semsg(_(e_too_many_chr_in_format_string), *efmpat);
return NULL;
}
if ((idx && idx < FMT_PATTERN_R
&& vim_strchr((char_u *)"DXOPQ", efminfo->prefix) != NULL)
|| (idx == FMT_PATTERN_R
&& vim_strchr((char_u *)"OPQ", efminfo->prefix) == NULL))
{
semsg(_(e_unexpected_chr_in_format_str), *efmpat);
return NULL;
}
efminfo->addr[idx] = (char_u)++round;
*regpat++ = '\\';
*regpat++ = '(';
#ifdef BACKSLASH_IN_FILENAME
if (*efmpat == 'f')
{
// Also match "c:" in the file name, even when
// checking for a colon next: "%f:".
// "\%(\a:\)\="
STRCPY(regpat, "\\%(\\a:\\)\\=");
regpat += 10;
}
#endif
if (*efmpat == 'f' && efmpat[1] != NUL)
{
if (efmpat[1] != '\\' && efmpat[1] != '%')
{
// A file name may contain spaces, but this isn't
// in "\f". For "%f:%l:%m" there may be a ":" in
// the file name. Use ".\{-1,}x" instead (x is
// the next character), the requirement that :999:
// follows should work.
STRCPY(regpat, ".\\{-1,}");
regpat += 7;
}
else
{
// File name followed by '\\' or '%': include as
// many file name chars as possible.
STRCPY(regpat, "\\f\\+");
regpat += 4;
}
}
else
{
srcptr = (char_u *)fmt_pat[idx].pattern;
while ((*regpat = *srcptr++) != NUL)
++regpat;
}
*regpat++ = '\\';
*regpat++ = ')';
return regpat;
}
| 0
|
274,721
|
static double line_length(double x0, double y0, double x1, double y1)
{
double dx = x0 - x1;
double dy = y0 - y1;
return hypot(dx, dy);
}
| 0
|
90,215
|
virtual bool ethernet_enabled() const {
return enabled_devices_ & (1 << TYPE_ETHERNET);
}
| 0
|
225,914
|
GF_Err tmax_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_TMAXBox *ptr = (GF_TMAXBox *)s;
ISOM_DECREASE_SIZE(ptr, 4)
ptr->maxTime = gf_bs_read_u32(bs);
return GF_OK;
}
| 0
|
338,046
|
void WasmBinaryWriter::writeInlineBuffer(const char* data, size_t size) {
o << U32LEB(size);
writeData(data, size);
}
| 0
|
236,117
|
GF_Err ftab_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_FontTableBox *ptr = (GF_FontTableBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u16(bs, ptr->entry_count);
for (i=0; i<ptr->entry_count; i++) {
gf_bs_write_u16(bs, ptr->fonts[i].fontID);
if (ptr->fonts[i].fontName) {
u32 len = (u32) strlen(ptr->fonts[i].fontName);
gf_bs_write_u8(bs, len);
gf_bs_write_data(bs, ptr->fonts[i].fontName, len);
} else {
gf_bs_write_u8(bs, 0);
}
}
return GF_OK;
}
| 0
|
365,626
|
_asn1_check_identifier (asn1_node node)
{
asn1_node p, p2;
char name2[ASN1_MAX_NAME_SIZE * 2 + 2];
if (node == NULL)
return ASN1_ELEMENT_NOT_FOUND;
p = node;
while (p)
{
if (p->value && type_field (p->type) == ASN1_ETYPE_IDENTIFIER)
{
_asn1_str_cpy (name2, sizeof (name2), node->name);
_asn1_str_cat (name2, sizeof (name2), ".");
_asn1_str_cat (name2, sizeof (name2), (char *) p->value);
p2 = asn1_find_node (node, name2);
if (p2 == NULL)
{
if (p->value)
_asn1_strcpy (_asn1_identifierMissing, p->value);
else
_asn1_strcpy (_asn1_identifierMissing, "(null)");
return ASN1_IDENTIFIER_NOT_FOUND;
}
}
else if ((type_field (p->type) == ASN1_ETYPE_OBJECT_ID) &&
(p->type & CONST_DEFAULT))
{
p2 = p->down;
if (p2 && (type_field (p2->type) == ASN1_ETYPE_DEFAULT))
{
_asn1_str_cpy (name2, sizeof (name2), node->name);
_asn1_str_cat (name2, sizeof (name2), ".");
_asn1_str_cat (name2, sizeof (name2), (char *) p2->value);
_asn1_strcpy (_asn1_identifierMissing, p2->value);
p2 = asn1_find_node (node, name2);
if (!p2 || (type_field (p2->type) != ASN1_ETYPE_OBJECT_ID) ||
!(p2->type & CONST_ASSIGN))
return ASN1_IDENTIFIER_NOT_FOUND;
else
_asn1_identifierMissing[0] = 0;
}
}
else if ((type_field (p->type) == ASN1_ETYPE_OBJECT_ID) &&
(p->type & CONST_ASSIGN))
{
p2 = p->down;
if (p2 && (type_field (p2->type) == ASN1_ETYPE_CONSTANT))
{
if (p2->value && !isdigit (p2->value[0]))
{
_asn1_str_cpy (name2, sizeof (name2), node->name);
_asn1_str_cat (name2, sizeof (name2), ".");
_asn1_str_cat (name2, sizeof (name2), (char *) p2->value);
_asn1_strcpy (_asn1_identifierMissing, p2->value);
p2 = asn1_find_node (node, name2);
if (!p2 || (type_field (p2->type) != ASN1_ETYPE_OBJECT_ID)
|| !(p2->type & CONST_ASSIGN))
return ASN1_IDENTIFIER_NOT_FOUND;
else
_asn1_identifierMissing[0] = 0;
}
}
}
if (p->down)
{
p = p->down;
}
else if (p->right)
p = p->right;
else
{
while (1)
{
p = _asn1_get_up (p);
if (p == node)
{
p = NULL;
break;
}
if (p->right)
{
p = p->right;
break;
}
}
}
}
return ASN1_SUCCESS;
}
| 0
|
229,276
|
std::unique_ptr<cql_server::response> cql_server::connection::make_ready(int16_t stream, const tracing::trace_state_ptr& tr_state) const
{
return std::make_unique<cql_server::response>(stream, cql_binary_opcode::READY, tr_state);
}
| 0
|
203,902
|
get_one_sourceline(source_cookie_T *sp)
{
garray_T ga;
int len;
int c;
char_u *buf;
#ifdef USE_CRNL
int has_cr; // CR-LF found
#endif
int have_read = FALSE;
// use a growarray to store the sourced line
ga_init2(&ga, 1, 250);
// Loop until there is a finished line (or end-of-file).
++sp->sourcing_lnum;
for (;;)
{
// make room to read at least 120 (more) characters
if (ga_grow(&ga, 120) == FAIL)
break;
if (sp->source_from_buf)
{
if (sp->buf_lnum >= sp->buflines.ga_len)
break; // all the lines are processed
ga_concat(&ga, ((char_u **)sp->buflines.ga_data)[sp->buf_lnum]);
sp->buf_lnum++;
if (ga_grow(&ga, 1) == FAIL)
break;
buf = (char_u *)ga.ga_data;
buf[ga.ga_len++] = NUL;
}
else
{
buf = (char_u *)ga.ga_data;
if (fgets((char *)buf + ga.ga_len, ga.ga_maxlen - ga.ga_len,
sp->fp) == NULL)
break;
}
len = ga.ga_len + (int)STRLEN(buf + ga.ga_len);
#ifdef USE_CRNL
// Ignore a trailing CTRL-Z, when in Dos mode. Only recognize the
// CTRL-Z by its own, or after a NL.
if ( (len == 1 || (len >= 2 && buf[len - 2] == '\n'))
&& sp->fileformat == EOL_DOS
&& buf[len - 1] == Ctrl_Z)
{
buf[len - 1] = NUL;
break;
}
#endif
have_read = TRUE;
ga.ga_len = len;
// If the line was longer than the buffer, read more.
if (ga.ga_maxlen - ga.ga_len == 1 && buf[len - 1] != '\n')
continue;
if (len >= 1 && buf[len - 1] == '\n') // remove trailing NL
{
#ifdef USE_CRNL
has_cr = (len >= 2 && buf[len - 2] == '\r');
if (sp->fileformat == EOL_UNKNOWN)
{
if (has_cr)
sp->fileformat = EOL_DOS;
else
sp->fileformat = EOL_UNIX;
}
if (sp->fileformat == EOL_DOS)
{
if (has_cr) // replace trailing CR
{
buf[len - 2] = '\n';
--len;
--ga.ga_len;
}
else // lines like ":map xx yy^M" will have failed
{
if (!sp->error)
{
msg_source(HL_ATTR(HLF_W));
emsg(_("W15: Warning: Wrong line separator, ^M may be missing"));
}
sp->error = TRUE;
sp->fileformat = EOL_UNIX;
}
}
#endif
// The '\n' is escaped if there is an odd number of ^V's just
// before it, first set "c" just before the 'V's and then check
// len&c parities (is faster than ((len-c)%2 == 0)) -- Acevedo
for (c = len - 2; c >= 0 && buf[c] == Ctrl_V; c--)
;
if ((len & 1) != (c & 1)) // escaped NL, read more
{
++sp->sourcing_lnum;
continue;
}
buf[len - 1] = NUL; // remove the NL
}
// Check for ^C here now and then, so recursive :so can be broken.
line_breakcheck();
break;
}
if (have_read)
return (char_u *)ga.ga_data;
vim_free(ga.ga_data);
return NULL;
}
| 1
|
264,668
|
static GF_Err BM_ParseMultipleIndexedReplace(GF_BifsDecoder *codec, GF_BitStream *bs, GF_List *com_list)
{
u32 ID, ind, field_ind, NumBits, lenpos, lennum, count;
GF_Node *node;
GF_Err e;
GF_Command *com;
GF_CommandField *inf;
GF_FieldInfo field;
ID = 1 + gf_bs_read_int(bs, codec->info->config.NodeIDBits);
node = gf_sg_find_node(codec->current_graph, ID);
if (!node) return GF_NON_COMPLIANT_BITSTREAM;
NumBits = gf_get_bit_size(gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_IN)-1);
ind = gf_bs_read_int(bs, NumBits);
e = gf_bifs_get_field_index(node, ind, GF_SG_FIELD_CODING_IN, &field_ind);
if (e) return e;
e = gf_node_get_field(node, field_ind, &field);
if (gf_sg_vrml_is_sf_field(field.fieldType)) return GF_NON_COMPLIANT_BITSTREAM;
lenpos = gf_bs_read_int(bs, 5);
lennum = gf_bs_read_int(bs, 5);
count = gf_bs_read_int(bs, lennum);
com = gf_sg_command_new(codec->current_graph, GF_SG_MULTIPLE_INDEXED_REPLACE);
BM_SetCommandNode(com, node);
field.fieldType = gf_sg_vrml_get_sf_type(field.fieldType);
while (count) {
inf = gf_sg_command_field_new(com);
inf->pos = gf_bs_read_int(bs, lenpos);
inf->fieldIndex = field.fieldIndex;
inf->fieldType = field.fieldType;
if (field.fieldType==GF_SG_VRML_SFNODE) {
inf->new_node = gf_bifs_dec_node(codec, bs, field.NDTtype);
if (codec->LastError) goto err;
inf->field_ptr = &inf->new_node;
gf_node_register(inf->new_node, NULL);
} else {
field.far_ptr = inf->field_ptr = gf_sg_vrml_field_pointer_new(inf->fieldType);
e = gf_bifs_dec_sf_field(codec, bs, node, &field, GF_TRUE);
if (e) goto err;
}
count--;
}
err:
if (e) gf_sg_command_del(com);
else gf_list_add(com_list, com);
return e;
}
| 0
|
294,466
|
test_weeknum(int from, int to, int f, double sg)
{
int j;
fprintf(stderr, "test_weeknum: %d...%d (%d) - %.0f\n",
from, to, to - from, sg);
for (j = from; j <= to; j++) {
int y, w, d, rj, ns;
c_jd_to_weeknum(j, f, sg, &y, &w, &d);
c_weeknum_to_jd(y, w, d, f, sg, &rj, &ns);
if (j != rj) {
fprintf(stderr, "%d != %d\n", j, rj);
return 0;
}
}
return 1;
}
| 0
|
484,769
|
static void xennet_destroy_queues(struct netfront_info *info)
{
unsigned int i;
for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
struct netfront_queue *queue = &info->queues[i];
if (netif_running(info->netdev))
napi_disable(&queue->napi);
netif_napi_del(&queue->napi);
}
kfree(info->queues);
info->queues = NULL;
}
| 0
|
411,892
|
extrainfo_parse_entry_from_string(const char *s, const char *end,
int cache_copy, struct digest_ri_map_t *routermap)
{
extrainfo_t *extrainfo = NULL;
char digest[128];
smartlist_t *tokens = NULL;
directory_token_t *tok;
crypto_pk_env_t *key = NULL;
routerinfo_t *router = NULL;
memarea_t *area = NULL;
const char *s_dup = s;
if (!end) {
end = s + strlen(s);
}
/* point 'end' to a point immediately after the final newline. */
while (end > s+2 && *(end-1) == '\n' && *(end-2) == '\n')
--end;
if (router_get_extrainfo_hash(s, digest) < 0) {
log_warn(LD_DIR, "Couldn't compute router hash.");
goto err;
}
tokens = smartlist_create();
area = memarea_new();
if (tokenize_string(area,s,end,tokens,extrainfo_token_table,0)) {
log_warn(LD_DIR, "Error tokenizing extra-info document.");
goto err;
}
if (smartlist_len(tokens) < 2) {
log_warn(LD_DIR, "Impossibly short extra-info document.");
goto err;
}
tok = smartlist_get(tokens,0);
if (tok->tp != K_EXTRA_INFO) {
log_warn(LD_DIR,"Entry does not start with \"extra-info\"");
goto err;
}
extrainfo = tor_malloc_zero(sizeof(extrainfo_t));
extrainfo->cache_info.is_extrainfo = 1;
if (cache_copy)
extrainfo->cache_info.signed_descriptor_body = tor_strndup(s, end-s);
extrainfo->cache_info.signed_descriptor_len = end-s;
memcpy(extrainfo->cache_info.signed_descriptor_digest, digest, DIGEST_LEN);
tor_assert(tok->n_args >= 2);
if (!is_legal_nickname(tok->args[0])) {
log_warn(LD_DIR,"Bad nickname %s on \"extra-info\"",escaped(tok->args[0]));
goto err;
}
strlcpy(extrainfo->nickname, tok->args[0], sizeof(extrainfo->nickname));
if (strlen(tok->args[1]) != HEX_DIGEST_LEN ||
base16_decode(extrainfo->cache_info.identity_digest, DIGEST_LEN,
tok->args[1], HEX_DIGEST_LEN)) {
log_warn(LD_DIR,"Invalid fingerprint %s on \"extra-info\"",
escaped(tok->args[1]));
goto err;
}
tok = find_by_keyword(tokens, K_PUBLISHED);
if (parse_iso_time(tok->args[0], &extrainfo->cache_info.published_on)) {
log_warn(LD_DIR,"Invalid published time %s on \"extra-info\"",
escaped(tok->args[0]));
goto err;
}
if (routermap &&
(router = digestmap_get((digestmap_t*)routermap,
extrainfo->cache_info.identity_digest))) {
key = router->identity_pkey;
}
tok = find_by_keyword(tokens, K_ROUTER_SIGNATURE);
if (strcmp(tok->object_type, "SIGNATURE") ||
tok->object_size < 128 || tok->object_size > 512) {
log_warn(LD_DIR, "Bad object type or length on extra-info signature");
goto err;
}
if (key) {
note_crypto_pk_op(VERIFY_RTR);
if (check_signature_token(digest, DIGEST_LEN, tok, key, 0,
"extra-info") < 0)
goto err;
if (router)
extrainfo->cache_info.send_unencrypted =
router->cache_info.send_unencrypted;
} else {
extrainfo->pending_sig = tor_memdup(tok->object_body,
tok->object_size);
extrainfo->pending_sig_len = tok->object_size;
}
goto done;
err:
dump_desc(s_dup, "extra-info descriptor");
extrainfo_free(extrainfo);
extrainfo = NULL;
done:
if (tokens) {
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
smartlist_free(tokens);
}
if (area) {
DUMP_AREA(area, "extrainfo");
memarea_drop_all(area);
}
return extrainfo;
}
| 0
|
225,374
|
static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
{
struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file);
return v4l2loopback_set_ctrl(dev, c->id, c->value);
}
| 0
|
244,238
|
GF_Err sidx_box_read(GF_Box *s,GF_BitStream *bs)
{
u32 i;
GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox*) s;
ISOM_DECREASE_SIZE(ptr, 8);
ptr->reference_ID = gf_bs_read_u32(bs);
ptr->timescale = gf_bs_read_u32(bs);
if (ptr->version==0) {
ISOM_DECREASE_SIZE(ptr, 8);
ptr->earliest_presentation_time = gf_bs_read_u32(bs);
ptr->first_offset = gf_bs_read_u32(bs);
} else {
ISOM_DECREASE_SIZE(ptr, 16);
ptr->earliest_presentation_time = gf_bs_read_u64(bs);
ptr->first_offset = gf_bs_read_u64(bs);
}
ISOM_DECREASE_SIZE(ptr, 4);
gf_bs_read_u16(bs); /* reserved */
ptr->nb_refs = gf_bs_read_u16(bs);
ptr->refs = gf_malloc(sizeof(GF_SIDXReference)*ptr->nb_refs);
if (!ptr->refs) return GF_OUT_OF_MEM;
for (i=0; i<ptr->nb_refs; i++) {
ptr->refs[i].reference_type = gf_bs_read_int(bs, 1);
ptr->refs[i].reference_size = gf_bs_read_int(bs, 31);
ptr->refs[i].subsegment_duration = gf_bs_read_u32(bs);
ptr->refs[i].starts_with_SAP = gf_bs_read_int(bs, 1);
ptr->refs[i].SAP_type = gf_bs_read_int(bs, 3);
ptr->refs[i].SAP_delta_time = gf_bs_read_int(bs, 28);
ISOM_DECREASE_SIZE(ptr, 12);
}
return GF_OK;
}
| 0
|
349,891
|
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
u8 *mac)
{
u32 mac_addr[2];
u32 efuse_addr;
int err = 0;
u32 h = 0U;
u32 l = 0U;
if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
unsigned int ucp_0x370 = 0;
unsigned int rnd = 0;
get_random_bytes(&rnd, sizeof(unsigned int));
ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
}
efuse_addr = aq_hw_read_reg(self, 0x00000374U);
err = hw_atl_utils_fw_downld_dwords(self, efuse_addr + (40U * 4U),
mac_addr, ARRAY_SIZE(mac_addr));
if (err < 0) {
mac_addr[0] = 0U;
mac_addr[1] = 0U;
err = 0;
} else {
mac_addr[0] = __swab32(mac_addr[0]);
mac_addr[1] = __swab32(mac_addr[1]);
}
ether_addr_copy(mac, (u8 *)mac_addr);
if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
/* chip revision */
l = 0xE3000000U |
(0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) |
(0x00 << 16);
h = 0x8001300EU;
mac[5] = (u8)(0xFFU & l);
l >>= 8;
mac[4] = (u8)(0xFFU & l);
l >>= 8;
mac[3] = (u8)(0xFFU & l);
l >>= 8;
mac[2] = (u8)(0xFFU & l);
mac[1] = (u8)(0xFFU & h);
h >>= 8;
mac[0] = (u8)(0xFFU & h);
}
return err;
}
| 0
|
257,004
|
static int __init init_route4(void)
{
return register_tcf_proto_ops(&cls_route4_ops);
}
| 0
|
242,665
|
static inline const gchar *format_param_str(tvbuff_t *tvb, int offset, int len) {
char *param_str;
param_str = tvb_get_string_enc(wmem_packet_scope(), tvb, offset, len, ENC_UTF_8|ENC_NA);
if (len < 2) {
return param_str;
}
return format_text_chr(wmem_packet_scope(), param_str, len - 1, ' '); /* Leave terminating NULLs alone. */
}
| 0
|
206,670
|
negotiate_handshake_newstyle_options (void)
{
GET_CONN;
struct nbd_new_option new_option;
size_t nr_options;
bool list_seen = false;
uint64_t version;
uint32_t option;
uint32_t optlen;
struct nbd_export_name_option_reply handshake_finish;
const char *optname;
uint64_t exportsize;
struct backend *b;
for (nr_options = MAX_NR_OPTIONS; nr_options > 0; --nr_options) {
CLEANUP_FREE char *data = NULL;
if (conn_recv_full (&new_option, sizeof new_option,
"reading option: conn->recv: %m") == -1)
return -1;
version = be64toh (new_option.version);
if (version != NBD_NEW_VERSION) {
nbdkit_error ("unknown option version %" PRIx64
", expecting %" PRIx64,
version, NBD_NEW_VERSION);
return -1;
}
/* There is a maximum option length we will accept, regardless
* of the option type.
*/
optlen = be32toh (new_option.optlen);
if (optlen > MAX_REQUEST_SIZE) {
nbdkit_error ("client option data too long (%" PRIu32 ")", optlen);
return -1;
}
data = malloc (optlen + 1); /* Allowing a trailing NUL helps some uses */
if (data == NULL) {
nbdkit_error ("malloc: %m");
return -1;
}
option = be32toh (new_option.option);
optname = name_of_nbd_opt (option);
/* If the client lacks fixed newstyle support, it should only send
* NBD_OPT_EXPORT_NAME.
*/
if (!(conn->cflags & NBD_FLAG_FIXED_NEWSTYLE) &&
option != NBD_OPT_EXPORT_NAME) {
if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID))
return -1;
continue;
}
/* In --tls=require / FORCEDTLS mode the only options allowed
* before TLS negotiation are NBD_OPT_ABORT and NBD_OPT_STARTTLS.
*/
if (tls == 2 && !conn->using_tls &&
!(option == NBD_OPT_ABORT || option == NBD_OPT_STARTTLS)) {
if (send_newstyle_option_reply (option, NBD_REP_ERR_TLS_REQD))
return -1;
continue;
}
switch (option) {
case NBD_OPT_EXPORT_NAME:
if (conn_recv_full (data, optlen,
"read: %s: %m", name_of_nbd_opt (option)) == -1)
return -1;
if (check_export_name (option, data, optlen, optlen) == -1)
return -1;
/* We have to finish the handshake by sending handshake_finish.
* On failure, we have to disconnect.
*/
if (finish_newstyle_options (&exportsize, data, optlen) == -1)
return -1;
memset (&handshake_finish, 0, sizeof handshake_finish);
handshake_finish.exportsize = htobe64 (exportsize);
handshake_finish.eflags = htobe16 (conn->eflags);
if (conn->send (&handshake_finish,
(conn->cflags & NBD_FLAG_NO_ZEROES)
? offsetof (struct nbd_export_name_option_reply, zeroes)
: sizeof handshake_finish, 0) == -1) {
nbdkit_error ("write: %s: %m", optname);
return -1;
}
break;
case NBD_OPT_ABORT:
if (send_newstyle_option_reply (option, NBD_REP_ACK) == -1)
return -1;
debug ("client sent %s to abort the connection",
name_of_nbd_opt (option));
return -1;
case NBD_OPT_LIST:
if (optlen != 0) {
if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID)
== -1)
return -1;
if (conn_recv_full (data, optlen,
"read: %s: %m", name_of_nbd_opt (option)) == -1)
return -1;
continue;
}
if (list_seen) {
debug ("newstyle negotiation: %s: export list already advertised",
name_of_nbd_opt (option));
if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID) == -1)
return -1;
continue;
}
else {
/* Send back the exportname list. */
debug ("newstyle negotiation: %s: advertising exports",
name_of_nbd_opt (option));
if (send_newstyle_option_reply_exportnames (option, &nr_options) == -1)
return -1;
list_seen = true;
}
break;
case NBD_OPT_STARTTLS:
if (optlen != 0) {
if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID)
== -1)
return -1;
if (conn_recv_full (data, optlen,
"read: %s: %m", name_of_nbd_opt (option)) == -1)
return -1;
continue;
}
if (tls == 0) { /* --tls=off (NOTLS mode). */
#ifdef HAVE_GNUTLS
#define NO_TLS_REPLY NBD_REP_ERR_POLICY
#else
#define NO_TLS_REPLY NBD_REP_ERR_UNSUP
#endif
if (send_newstyle_option_reply (option, NO_TLS_REPLY) == -1)
return -1;
}
else /* --tls=on or --tls=require */ {
/* We can't upgrade to TLS twice on the same connection. */
if (conn->using_tls) {
if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID) == -1)
return -1;
continue;
}
/* We have to send the (unencrypted) reply before starting
* the handshake.
*/
if (send_newstyle_option_reply (option, NBD_REP_ACK) == -1)
return -1;
/* Upgrade the connection to TLS. Also performs access control. */
if (crypto_negotiate_tls (conn->sockin, conn->sockout) == -1)
return -1;
conn->using_tls = true;
debug ("using TLS on this connection");
/* Wipe out any cached state. */
conn->structured_replies = false;
for_each_backend (b) {
free (conn->default_exportname[b->i]);
conn->default_exportname[b->i] = NULL;
}
}
break;
case NBD_OPT_INFO:
case NBD_OPT_GO:
if (conn_recv_full (data, optlen, "read: %s: %m", optname) == -1)
return -1;
if (optlen < 6) { /* 32 bit export length + 16 bit nr info */
debug ("newstyle negotiation: %s option length < 6", optname);
if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID)
== -1)
return -1;
continue;
}
{
uint32_t exportnamelen;
uint16_t nrinfos;
uint16_t info;
size_t i;
/* Validate the name length and number of INFO requests. */
memcpy (&exportnamelen, &data[0], 4);
exportnamelen = be32toh (exportnamelen);
if (exportnamelen > optlen-6 /* NB optlen >= 6, see above */) {
debug ("newstyle negotiation: %s: export name too long", optname);
if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID)
== -1)
return -1;
continue;
}
memcpy (&nrinfos, &data[exportnamelen+4], 2);
nrinfos = be16toh (nrinfos);
if (optlen != 4 + exportnamelen + 2 + 2*nrinfos) {
debug ("newstyle negotiation: %s: "
"number of information requests incorrect", optname);
if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID)
== -1)
return -1;
continue;
}
/* As with NBD_OPT_EXPORT_NAME we print the export name and
* save it in the connection. If an earlier
* NBD_OPT_SET_META_CONTEXT used an export name, it must match
* or else we drop the support for that context.
*/
if (check_export_name (option, &data[4], exportnamelen,
optlen - 6) == -1) {
if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID)
== -1)
return -1;
continue;
}
/* The spec is confusing, but it is required that we send back
* NBD_INFO_EXPORT, even if the client did not request it!
* qemu client in particular does not request this, but will
* fail if we don't send it. Note that if .open fails, but we
* succeed at .close, then we merely return an error to the
* client and let them try another NBD_OPT, rather than
* disconnecting.
*/
if (finish_newstyle_options (&exportsize,
&data[4], exportnamelen) == -1) {
if (conn->top_context) {
if (backend_finalize (conn->top_context) == -1)
return -1;
backend_close (conn->top_context);
conn->top_context = NULL;
}
if (send_newstyle_option_reply (option, NBD_REP_ERR_UNKNOWN) == -1)
return -1;
continue;
}
if (send_newstyle_option_reply_info_export (option,
NBD_REP_INFO,
NBD_INFO_EXPORT,
exportsize) == -1)
return -1;
/* For now we send NBD_INFO_NAME and NBD_INFO_DESCRIPTION if
* requested, and ignore all other info requests (including
* NBD_INFO_EXPORT if it was requested, because we replied
* already above).
*/
for (i = 0; i < nrinfos; ++i) {
memcpy (&info, &data[4 + exportnamelen + 2 + i*2], 2);
info = be16toh (info);
switch (info) {
case NBD_INFO_EXPORT: /* ignore - reply sent above */ break;
case NBD_INFO_NAME:
{
const char *name = &data[4];
size_t namelen = exportnamelen;
if (exportnamelen == 0) {
name = backend_default_export (top, read_only);
if (!name) {
debug ("newstyle negotiation: %s: "
"NBD_INFO_NAME: no name to send", optname);
break;
}
namelen = -1;
}
if (send_newstyle_option_reply_info_str (option,
NBD_REP_INFO,
NBD_INFO_NAME,
name, namelen) == -1)
return -1;
}
break;
case NBD_INFO_DESCRIPTION:
{
const char *desc = backend_export_description (conn->top_context);
if (!desc) {
debug ("newstyle negotiation: %s: "
"NBD_INFO_DESCRIPTION: no description to send",
optname);
break;
}
if (send_newstyle_option_reply_info_str (option,
NBD_REP_INFO,
NBD_INFO_DESCRIPTION,
desc, -1) == -1)
return -1;
}
break;
default:
debug ("newstyle negotiation: %s: "
"ignoring NBD_INFO_* request %u (%s)",
optname, (unsigned) info, name_of_nbd_info (info));
break;
}
}
}
/* Unlike NBD_OPT_EXPORT_NAME, NBD_OPT_GO sends back an ACK
* or ERROR packet. If this was NBD_OPT_LIST, call .close.
*/
if (send_newstyle_option_reply (option, NBD_REP_ACK) == -1)
return -1;
if (option == NBD_OPT_INFO) {
if (backend_finalize (conn->top_context) == -1)
return -1;
backend_close (conn->top_context);
conn->top_context = NULL;
}
break;
case NBD_OPT_STRUCTURED_REPLY:
if (optlen != 0) {
if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID)
== -1)
return -1;
if (conn_recv_full (data, optlen,
"read: %s: %m", name_of_nbd_opt (option)) == -1)
return -1;
continue;
}
debug ("newstyle negotiation: %s: client requested structured replies",
name_of_nbd_opt (option));
if (no_sr) {
/* Must fail with ERR_UNSUP for qemu 4.2 to remain happy;
* but failing with ERR_POLICY would have been nicer.
*/
if (send_newstyle_option_reply (option, NBD_REP_ERR_UNSUP) == -1)
return -1;
debug ("newstyle negotiation: %s: structured replies are disabled",
name_of_nbd_opt (option));
break;
}
if (send_newstyle_option_reply (option, NBD_REP_ACK) == -1)
return -1;
conn->structured_replies = true;
break;
case NBD_OPT_LIST_META_CONTEXT:
case NBD_OPT_SET_META_CONTEXT:
{
uint32_t opt_index;
uint32_t exportnamelen;
uint32_t nr_queries;
uint32_t querylen;
const char *what;
if (conn_recv_full (data, optlen, "read: %s: %m", optname) == -1)
return -1;
/* Note that we support base:allocation whether or not the plugin
* supports can_extents.
*/
if (!conn->structured_replies) {
if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID)
== -1)
return -1;
continue;
}
/* Minimum length of the option payload is:
* 32 bit export name length followed by empty export name
* + 32 bit number of queries followed by no queries
* = 8 bytes.
*/
what = "optlen < 8";
if (optlen < 8) {
opt_meta_invalid_option_len:
debug ("newstyle negotiation: %s: invalid option length: %s",
optname, what);
if (send_newstyle_option_reply (option, NBD_REP_ERR_INVALID)
== -1)
return -1;
continue;
}
memcpy (&exportnamelen, &data[0], 4);
exportnamelen = be32toh (exportnamelen);
what = "validating export name";
if (check_export_name (option, &data[4], exportnamelen,
optlen - 8) == -1)
goto opt_meta_invalid_option_len;
/* Remember the export name: the NBD spec says that if the client
* later uses NBD_OPT_GO on a different export, then the context
* returned here is not usable.
*/
if (option == NBD_OPT_SET_META_CONTEXT) {
conn->exportname_from_set_meta_context =
strndup (&data[4], exportnamelen);
if (conn->exportname_from_set_meta_context == NULL) {
nbdkit_error ("malloc: %m");
return -1;
}
}
opt_index = 4 + exportnamelen;
/* Read the number of queries. */
what = "reading number of queries";
if (opt_index+4 > optlen)
goto opt_meta_invalid_option_len;
memcpy (&nr_queries, &data[opt_index], 4);
nr_queries = be32toh (nr_queries);
opt_index += 4;
/* for LIST: nr_queries == 0 means return all meta contexts
* for SET: nr_queries == 0 means reset all contexts
*/
debug ("newstyle negotiation: %s: %s count: %d", optname,
option == NBD_OPT_LIST_META_CONTEXT ? "query" : "set",
nr_queries);
if (option == NBD_OPT_SET_META_CONTEXT)
conn->meta_context_base_allocation = false;
if (nr_queries == 0) {
if (option == NBD_OPT_LIST_META_CONTEXT) {
if (send_newstyle_option_reply_meta_context (option,
NBD_REP_META_CONTEXT,
0, "base:allocation")
== -1)
return -1;
}
if (send_newstyle_option_reply (option, NBD_REP_ACK) == -1)
return -1;
}
else {
/* Read and answer each query. */
while (nr_queries > 0) {
what = "reading query string length";
if (opt_index+4 > optlen)
goto opt_meta_invalid_option_len;
memcpy (&querylen, &data[opt_index], 4);
querylen = be32toh (querylen);
opt_index += 4;
what = "reading query string";
if (check_string (option, &data[opt_index], querylen,
optlen - opt_index, "meta context query") == -1)
goto opt_meta_invalid_option_len;
debug ("newstyle negotiation: %s: %s %.*s",
optname,
option == NBD_OPT_LIST_META_CONTEXT ? "query" : "set",
(int) querylen, &data[opt_index]);
/* For LIST, "base:" returns all supported contexts in the
* base namespace. We only support "base:allocation".
*/
if (option == NBD_OPT_LIST_META_CONTEXT &&
querylen == 5 &&
strncmp (&data[opt_index], "base:", 5) == 0) {
if (send_newstyle_option_reply_meta_context
(option, NBD_REP_META_CONTEXT,
0, "base:allocation") == -1)
return -1;
}
/* "base:allocation" requested by name. */
else if (querylen == 15 &&
strncmp (&data[opt_index], "base:allocation", 15) == 0) {
if (send_newstyle_option_reply_meta_context
(option, NBD_REP_META_CONTEXT,
option == NBD_OPT_SET_META_CONTEXT
? base_allocation_id : 0,
"base:allocation") == -1)
return -1;
if (option == NBD_OPT_SET_META_CONTEXT)
conn->meta_context_base_allocation = true;
}
/* Every other query must be ignored. */
opt_index += querylen;
nr_queries--;
}
if (send_newstyle_option_reply (option, NBD_REP_ACK) == -1)
return -1;
}
debug ("newstyle negotiation: %s: reply complete", optname);
}
break;
default:
/* Unknown option. */
if (send_newstyle_option_reply (option, NBD_REP_ERR_UNSUP) == -1)
return -1;
if (conn_recv_full (data, optlen,
"reading unknown option data: conn->recv: %m") == -1)
return -1;
}
/* Note, since it's not very clear from the protocol doc, that the
* client must send NBD_OPT_EXPORT_NAME or NBD_OPT_GO last, and
* that ends option negotiation.
*/
if (option == NBD_OPT_EXPORT_NAME || option == NBD_OPT_GO)
break;
}
if (nr_options == 0) {
nbdkit_error ("client spent too much time negotiating without selecting "
"an export");
return -1;
}
/* In --tls=require / FORCEDTLS mode, we must have upgraded to TLS
* by the time we finish option negotiation. If not, give up.
*/
if (tls == 2 && !conn->using_tls) {
nbdkit_error ("non-TLS client tried to connect in --tls=require mode");
return -1;
}
return 0;
}
| 1
|
400,740
|
static void pipe_advance(struct iov_iter *i, size_t size)
{
struct pipe_inode_info *pipe = i->pipe;
if (size) {
struct pipe_buffer *buf;
unsigned int p_mask = pipe->ring_size - 1;
unsigned int i_head = i->head;
size_t off = i->iov_offset, left = size;
if (off) /* make it relative to the beginning of buffer */
left += off - pipe->bufs[i_head & p_mask].offset;
while (1) {
buf = &pipe->bufs[i_head & p_mask];
if (left <= buf->len)
break;
left -= buf->len;
i_head++;
}
i->head = i_head;
i->iov_offset = buf->offset + left;
}
i->count -= size;
/* ... and discard everything past that point */
pipe_truncate(i);
}
| 0
|
517,447
|
static void print_service_rules_fsflags(HttpResponse res, Service_T s) {
for (FsFlag_T l = s->fsflaglist; l; l = l->next) {
StringBuffer_append(res->outputbuffer, "<tr class='rule'><td>Filesystem flags</td><td>");
Util_printRule(res->outputbuffer, l->action, "If changed");
StringBuffer_append(res->outputbuffer, "</td></tr>");
}
}
| 0
|
90,207
|
virtual bool cellular_enabled() const {
return enabled_devices_ & (1 << TYPE_CELLULAR);
}
| 0
|
437,725
|
static inline u16 ns_to_clock_divider(unsigned int ns)
{
return count_to_clock_divider(
DIV_ROUND_CLOSEST(CX23888_IR_REFCLK_FREQ / 1000000 * ns, 1000));
}
| 0
|
356,685
|
template <class T> T* Statement::Bind(const Napi::CallbackInfo& info, int start, int last) {
Napi::Env env = info.Env();
Napi::HandleScope scope(env);
if (last < 0) last = info.Length();
Napi::Function callback;
if (last > start && info[last - 1].IsFunction()) {
callback = info[last - 1].As<Napi::Function>();
last--;
}
T* baton = new T(this, callback);
if (start < last) {
if (info[start].IsArray()) {
Napi::Array array = info[start].As<Napi::Array>();
int length = array.Length();
// Note: bind parameters start with 1.
for (int i = 0, pos = 1; i < length; i++, pos++) {
baton->parameters.push_back(BindParameter((array).Get(i), pos));
}
}
else if (!info[start].IsObject() || OtherInstanceOf(info[start].As<Object>(), "RegExp") || OtherInstanceOf(info[start].As<Object>(), "Date") || info[start].IsBuffer()) {
// Parameters directly in array.
// Note: bind parameters start with 1.
for (int i = start, pos = 1; i < last; i++, pos++) {
baton->parameters.push_back(BindParameter(info[i], pos));
}
}
else if (info[start].IsObject()) {
Napi::Object object = info[start].As<Napi::Object>();
Napi::Array array = object.GetPropertyNames();
int length = array.Length();
for (int i = 0; i < length; i++) {
Napi::Value name = (array).Get(i);
Napi::Number num = name.ToNumber();
if (num.Int32Value() == num.DoubleValue()) {
baton->parameters.push_back(
BindParameter((object).Get(name), num.Int32Value()));
}
else {
baton->parameters.push_back(BindParameter((object).Get(name),
name.As<Napi::String>().Utf8Value().c_str()));
}
}
}
else {
return NULL;
}
}
return baton;
}
| 0
|
343,209
|
static unsigned int open_max(void)
{
long z;
if ((z = (long) sysconf(_SC_OPEN_MAX)) < 0L) {
perror("_SC_OPEN_MAX");
_EXIT(EXIT_FAILURE);
}
return (unsigned int) z;
}
| 0
|
275,507
|
njs_vm_array_length(njs_vm_t *vm, njs_value_t *value, int64_t *length)
{
if (njs_fast_path(njs_is_array(value))) {
*length = njs_array(value)->length;
}
return njs_object_length(vm, value, length);
}
| 0
|
220,400
|
ary_subseq(mrb_state *mrb, struct RArray *a, mrb_int beg, mrb_int len)
{
struct RArray *b;
if (!ARY_SHARED_P(a) && len <= ARY_SHIFT_SHARED_MIN) {
return mrb_ary_new_from_values(mrb, len, ARY_PTR(a)+beg);
}
ary_make_shared(mrb, a);
b = MRB_OBJ_ALLOC(mrb, MRB_TT_ARRAY, mrb->array_class);
b->as.heap.ptr = a->as.heap.ptr + beg;
b->as.heap.len = len;
b->as.heap.aux.shared = a->as.heap.aux.shared;
b->as.heap.aux.shared->refcnt++;
ARY_SET_SHARED_FLAG(b);
return mrb_obj_value(b);
}
| 0
|
473,922
|
gbk_mbc_to_code(const UChar* p, const UChar* end, OnigEncoding enc)
{
return onigenc_mbn_mbc_to_code(enc, p, end);
}
| 0
|
196,629
|
void ComputeAsync(OpKernelContext* context, DoneCallback done) final {
const Tensor& input = context->input(0);
const Tensor& rhs = context->input(1);
const int ndims = input.dims();
const int64 n = input.dim_size(ndims - 1);
const int64 nrhs = rhs.dim_size(ndims - 1);
// Validate inputs.
OP_REQUIRES_ASYNC(
context, ndims >= 2,
errors::InvalidArgument("Input must have rank >= 2, got ", ndims),
done);
OP_REQUIRES_ASYNC(context, rhs.dims() == ndims,
errors::InvalidArgument(
"Input and right-hand side must have same rank, got ",
ndims, " != ", rhs.dims()),
done);
OP_REQUIRES_ASYNC(
context, input.dim_size(ndims - 2) == n,
errors::InvalidArgument("Input matrices must be squares, got",
input.dim_size(ndims - 2), " != ", n),
done);
OP_REQUIRES_ASYNC(context, rhs.dim_size(ndims - 2) == n,
errors::InvalidArgument(
"Input matrix and right-hand side must have the "
"same number of rows, got",
n, " != ", rhs.dim_size(ndims - 2)),
done);
// Allocate output.
Tensor* output;
OP_REQUIRES_OK_ASYNC(
context,
context->forward_input_or_allocate_output({1}, 0, rhs.shape(), &output),
done);
// To be consistent with the MatrixInverse op, we define the solution for
// an empty set of equations as the empty matrix.
if (input.NumElements() == 0 || rhs.NumElements() == 0) {
done();
return;
}
// TODO(rmlarsen): Convert to std::make_unique when available.
std::unique_ptr<CudaSolver> solver(new CudaSolver(context));
// Make a copy of the input for the factorization step, or, if adjoint_ is
// false, try to reuse the input buffer if this op owns it exclusively.
Tensor input_copy;
const GPUDevice& device = context->eigen_device<GPUDevice>();
if (adjoint_) {
// For the adjoint case, it is simpler to always make a transposed copy up
// front.
OP_REQUIRES_OK_ASYNC(
context,
solver->allocate_scoped_tensor(DataTypeToEnum<Scalar>::value,
input.shape(), &input_copy),
done);
OP_REQUIRES_OK_ASYNC(context,
DoMatrixTranspose(device, input, &input_copy), done);
} else {
OP_REQUIRES_OK_ASYNC(
context,
solver->forward_input_or_allocate_scoped_tensor(
{0}, DataTypeToEnum<Scalar>::value, input.shape(), &input_copy),
done);
if (!input.SharesBufferWith(input_copy)) {
device.memcpy(input_copy.flat<Scalar>().data(),
input.flat<Scalar>().data(),
input.NumElements() * sizeof(Scalar));
}
}
auto input_copy_reshaped = input_copy.template flat_inner_dims<Scalar, 3>();
const int64 batch_size = input_copy_reshaped.dimension(0);
// Allocate pivots on the device.
Tensor pivots;
OP_REQUIRES_OK_ASYNC(
context,
solver->allocate_scoped_tensor(DataTypeToEnum<int>::value,
TensorShape{batch_size, n}, &pivots),
done);
auto pivots_mat = pivots.template matrix<int>();
// 1. Compute the partially pivoted LU factorization(s) of the
// matrix/matrices.
std::vector<DeviceLapackInfo> dev_info;
auto input_copy_ptrs = solver->GetScratchSpace<uint8>(
sizeof(Scalar*) * batch_size, "input_copt_ptrs",
/* on_host */ true);
const int kMaxMatrixSizeToBatchSizeRatio = 128;
const bool use_batched_solver =
n <= kMaxMatrixSizeToBatchSizeRatio * batch_size;
if (use_batched_solver) {
// For small matrices or large batch sizes, we use the batched interface
// from cuBlas.
const Scalar** input_copy_ptrs_base =
reinterpret_cast<const Scalar**>(input_copy_ptrs.mutable_data());
for (int batch = 0; batch < batch_size; ++batch) {
input_copy_ptrs_base[batch] = &input_copy_reshaped(batch, 0, 0);
}
dev_info.push_back(
solver->GetDeviceLapackInfo(batch_size, "getrfBatched"));
OP_REQUIRES_OK_ASYNC(
context,
solver->GetrfBatched(n, input_copy_ptrs_base, n, pivots_mat.data(),
&dev_info.back(), batch_size),
done);
} else {
// For small batch sizes or large matrices, we use the non-batched
// interface from cuSolver, which is much faster for large matrices.
dev_info.push_back(solver->GetDeviceLapackInfo(batch_size, "getrf"));
for (int batch = 0; batch < batch_size; ++batch) {
OP_REQUIRES_OK_ASYNC(
context,
solver->Getrf(n, n, &input_copy_reshaped(batch, 0, 0), n,
&pivots_mat(batch, 0), &dev_info.back()(batch)),
done);
}
}
// 2. Make a transposed copy of the right-hand sides. This is necessary
// because cuBLAS assumes column-major storage while TensorFlow TF uses
// row-major.
TensorShape transposed_rhs_shape(rhs.shape());
transposed_rhs_shape.RemoveLastDims(2);
transposed_rhs_shape.AddDim(nrhs);
transposed_rhs_shape.AddDim(n);
Tensor transposed_rhs;
OP_REQUIRES_OK_ASYNC(
context,
solver->allocate_scoped_tensor(DataTypeToEnum<Scalar>::value,
transposed_rhs_shape, &transposed_rhs),
done);
if (nrhs > 1) {
OP_REQUIRES_OK_ASYNC(
context, DoMatrixTranspose(device, rhs, &transposed_rhs), done);
} else {
device.memcpy(transposed_rhs.flat<Scalar>().data(),
rhs.flat<Scalar>().data(),
rhs.NumElements() * sizeof(Scalar));
}
// 3. Solve op(A) X = B (in column major form).
// We use a trick here: If adjoint_ is true, we converted A to column major
// form above. If adjoint is false then I leave A in row-major form and use
// trans_a = CUBLAS_OP_T to effectively transform it to column-major on the
// fly. (This means that we actually use the LU-factorization of A^T in that
// case, but that is equally good for solving AX=B). This way we save an
// explicit transpose in the more common case of adjoint_ == false.
auto input_copy_ptr_array = solver->GetScratchSpace<uint8>(
sizeof(Scalar*) * batch_size, "input_copy_ptr_array",
/* on_host */ true);
auto transposed_rhs_ptr_array = solver->GetScratchSpace<uint8>(
sizeof(Scalar*) * batch_size, "transposed_rhs_ptr_array",
/* on_host */ true);
auto transposed_rhs_reshaped =
transposed_rhs.template flat_inner_dims<Scalar, 3>();
if (use_batched_solver) {
const Scalar** input_copy_ptrs_base =
reinterpret_cast<const Scalar**>(input_copy_ptr_array.mutable_data());
const Scalar** transposed_rhs_ptrs_base =
reinterpret_cast<const Scalar**>(
transposed_rhs_ptr_array.mutable_data());
for (int batch = 0; batch < batch_size; ++batch) {
input_copy_ptrs_base[batch] = &input_copy_reshaped(batch, 0, 0);
transposed_rhs_ptrs_base[batch] = &transposed_rhs_reshaped(batch, 0, 0);
}
int host_info = 0;
OP_REQUIRES_OK_ASYNC(
context,
solver->GetrsBatched(adjoint_ ? CUBLAS_OP_C : CUBLAS_OP_T, n, nrhs,
input_copy_ptrs_base, n, pivots_mat.data(),
transposed_rhs_ptrs_base, n, &host_info,
batch_size),
done);
OP_REQUIRES_ASYNC(
context, host_info == 0,
errors::InvalidArgument("The ", -host_info,
"'th argument to cublas*getrsBatched had "
"an illegal value."),
done);
} else {
dev_info.push_back(solver->GetDeviceLapackInfo(batch_size, "getrs"));
for (int batch = 0; batch < batch_size; ++batch) {
OP_REQUIRES_OK_ASYNC(
context,
solver->Getrs(adjoint_ ? CUBLAS_OP_C : CUBLAS_OP_T, n, nrhs,
&input_copy_reshaped(batch, 0, 0), n,
&pivots_mat(batch, 0),
&transposed_rhs_reshaped(batch, 0, 0), n,
&dev_info.back()(batch)),
done);
}
}
// 4. Transpose X to get the final result in row-major form.
if (nrhs > 1) {
OP_REQUIRES_OK_ASYNC(
context, DoMatrixTranspose(device, transposed_rhs, output), done);
} else {
device.memcpy(output->flat<Scalar>().data(),
transposed_rhs.flat<Scalar>().data(),
transposed_rhs.NumElements() * sizeof(Scalar));
}
// Callback for checking info after kernels finish. Also capture the
// temporary Tensors/ScratchSpace so they don't get deallocated before the
// kernels run. TODO(rmlarsen): Use move capture once C++14 becomes
// available.
auto info_checker = [context, done, dev_info](
const Status& status,
const std::vector<HostLapackInfo>& host_infos) {
if (!status.ok() && errors::IsInvalidArgument(status) &&
!host_infos.empty()) {
for (int i = 0; i < host_infos[0].size(); ++i) {
// Match the CPU error message for singular matrices. Otherwise
// just print the original error message from the status below.
OP_REQUIRES_ASYNC(context, host_infos[0].data()[i] <= 0,
errors::InvalidArgument(kErrMsg), done);
}
}
OP_REQUIRES_OK_ASYNC(context, status, done);
done();
};
CudaSolver::CheckLapackInfoAndDeleteSolverAsync(std::move(solver), dev_info,
std::move(info_checker));
}
| 1
|
366,286
|
void __detach_mounts(struct dentry *dentry)
{
struct mountpoint *mp;
struct mount *mnt;
namespace_lock();
lock_mount_hash();
mp = lookup_mountpoint(dentry);
if (!mp)
goto out_unlock;
event++;
while (!hlist_empty(&mp->m_list)) {
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
umount_mnt(mnt);
hlist_add_head(&mnt->mnt_umount, &unmounted);
}
else umount_tree(mnt, UMOUNT_CONNECTED);
}
put_mountpoint(mp);
out_unlock:
unlock_mount_hash();
namespace_unlock();
}
| 0
|
341,818
|
zstring(i_ctx_t *i_ctx_p)
{
os_ptr op = osp;
byte *sbody;
uint size;
check_type(*op, t_integer);
if (op->value.intval < 0 )
return_error(gs_error_rangecheck);
if (op->value.intval > max_string_size )
return_error(gs_error_limitcheck); /* to match Distiller */
size = op->value.intval;
sbody = ialloc_string(size, "string");
if (sbody == 0)
return_error(gs_error_VMerror);
make_string(op, a_all | icurrent_space, size, sbody);
memset(sbody, 0, size);
return 0;
}
| 0
|
432,726
|
static void draw_stroke_color_rgb( wmfAPI* API, const wmfRGB* rgb )
{
PixelWand
*stroke_color;
stroke_color=NewPixelWand();
PixelSetRedQuantum(stroke_color,ScaleCharToQuantum(rgb->r));
PixelSetGreenQuantum(stroke_color,ScaleCharToQuantum(rgb->g));
PixelSetBlueQuantum(stroke_color,ScaleCharToQuantum(rgb->b));
PixelSetAlphaQuantum(stroke_color,OpaqueAlpha);
DrawSetStrokeColor(WmfDrawingWand,stroke_color);
stroke_color=DestroyPixelWand(stroke_color);
}
| 0
|
224,193
|
gen_values(codegen_scope *s, node *t, int val, int limit)
{
int n = 0;
int first = 1;
int slimit = GEN_VAL_STACK_MAX;
if (limit == 0) limit = GEN_LIT_ARY_MAX;
if (cursp() >= slimit) slimit = INT16_MAX;
if (!val) {
while (t) {
codegen(s, t->car, NOVAL);
n++;
t = t->cdr;
}
return n;
}
while (t) {
int is_splat = nint(t->car->car) == NODE_SPLAT;
if (is_splat || cursp() >= slimit) { /* flush stack */
pop_n(n);
if (first) {
if (n == 0) {
genop_1(s, OP_LOADNIL, cursp());
}
else {
genop_2(s, OP_ARRAY, cursp(), n);
}
push();
first = 0;
limit = GEN_LIT_ARY_MAX;
}
else if (n > 0) {
pop();
genop_2(s, OP_ARYPUSH, cursp(), n);
push();
}
n = 0;
}
codegen(s, t->car, val);
if (is_splat) {
pop(); pop();
genop_1(s, OP_ARYCAT, cursp());
push();
}
else {
n++;
}
t = t->cdr;
}
if (!first) {
pop();
if (n > 0) {
pop_n(n);
genop_2(s, OP_ARYPUSH, cursp(), n);
}
return -1; /* variable length */
}
else if (n > limit) {
pop_n(n);
genop_2(s, OP_ARRAY, cursp(), n);
return -1;
}
return n;
}
| 0
|
236,162
|
void dlay_box_del(GF_Box *s)
{
gf_free(s);
}
| 0
|
225,628
|
GF_Err pdin_box_size(GF_Box *s)
{
GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox *)s;
ptr->size += 8*ptr->count;
return GF_OK;
| 0
|
512,447
|
bool pushable_cond_checker_for_subquery(uchar *arg)
{
return excl_dep_on_in_subq_left_part((Item_in_subselect *)arg);
}
| 0
|
238,588
|
static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
{
int load_reg;
int err;
switch (insn->imm) {
case BPF_ADD:
case BPF_ADD | BPF_FETCH:
case BPF_AND:
case BPF_AND | BPF_FETCH:
case BPF_OR:
case BPF_OR | BPF_FETCH:
case BPF_XOR:
case BPF_XOR | BPF_FETCH:
case BPF_XCHG:
case BPF_CMPXCHG:
break;
default:
verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
return -EINVAL;
}
if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
verbose(env, "invalid atomic operand size\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
/* check src2 operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
if (insn->imm == BPF_CMPXCHG) {
/* Check comparison of R0 with memory location */
const u32 aux_reg = BPF_REG_0;
err = check_reg_arg(env, aux_reg, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, aux_reg)) {
verbose(env, "R%d leaks addr into mem\n", aux_reg);
return -EACCES;
}
}
if (is_pointer_value(env, insn->src_reg)) {
verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
return -EACCES;
}
if (is_ctx_reg(env, insn->dst_reg) ||
is_pkt_reg(env, insn->dst_reg) ||
is_flow_key_reg(env, insn->dst_reg) ||
is_sk_reg(env, insn->dst_reg)) {
verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
insn->dst_reg,
reg_type_str(env, reg_state(env, insn->dst_reg)->type));
return -EACCES;
}
if (insn->imm & BPF_FETCH) {
if (insn->imm == BPF_CMPXCHG)
load_reg = BPF_REG_0;
else
load_reg = insn->src_reg;
/* check and record load of old value */
err = check_reg_arg(env, load_reg, DST_OP);
if (err)
return err;
} else {
/* This instruction accesses a memory location but doesn't
* actually load it into a register.
*/
load_reg = -1;
}
/* Check whether we can read the memory, with second call for fetch
* case to simulate the register fill.
*/
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1, true);
if (!err && load_reg >= 0)
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, load_reg,
true);
if (err)
return err;
/* Check whether we can write into the same memory. */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, -1, true);
if (err)
return err;
return 0;
}
| 0
|
359,295
|
DEFUN (neighbor_set_peer_group,
neighbor_set_peer_group_cmd,
NEIGHBOR_CMD "peer-group WORD",
NEIGHBOR_STR
NEIGHBOR_ADDR_STR
"Member of the peer-group\n"
"peer-group name\n")
{
int ret;
as_t as;
union sockunion su;
struct bgp *bgp;
struct peer_group *group;
bgp = vty->index;
ret = str2sockunion (argv[0], &su);
if (ret < 0)
{
vty_out (vty, "%% Malformed address: %s%s", argv[0], VTY_NEWLINE);
return CMD_WARNING;
}
group = peer_group_lookup (bgp, argv[1]);
if (! group)
{
vty_out (vty, "%% Configure the peer-group first%s", VTY_NEWLINE);
return CMD_WARNING;
}
if (peer_address_self_check (&su))
{
vty_out (vty, "%% Can not configure the local system as neighbor%s",
VTY_NEWLINE);
return CMD_WARNING;
}
ret = peer_group_bind (bgp, &su, group, bgp_node_afi (vty),
bgp_node_safi (vty), &as);
if (ret == BGP_ERR_PEER_GROUP_PEER_TYPE_DIFFERENT)
{
vty_out (vty, "%% Peer with AS %d cannot be in this peer-group, members must be all internal or all external%s", as, VTY_NEWLINE);
return CMD_WARNING;
}
return bgp_vty_return (vty, ret);
}
| 0
|
101,674
|
void WebProcessProxy::didReceiveMessage(CoreIPC::Connection* connection, CoreIPC::MessageID messageID, CoreIPC::MessageDecoder& decoder)
{
if (m_messageReceiverMap.dispatchMessage(connection, messageID, decoder))
return;
if (m_context->dispatchMessage(connection, messageID, decoder))
return;
if (decoder.messageReceiverName() == Messages::WebProcessProxy::messageReceiverName()) {
didReceiveWebProcessProxyMessage(connection, messageID, decoder);
return;
}
#if ENABLE(CUSTOM_PROTOCOLS)
if (decoder.messageReceiverName() == Messages::CustomProtocolManagerProxy::messageReceiverName()) {
#if ENABLE(NETWORK_PROCESS)
ASSERT(!context()->usesNetworkProcess());
#endif
m_customProtocolManagerProxy.didReceiveMessage(connection, messageID, decoder);
return;
}
#endif
uint64_t pageID = decoder.destinationID();
if (!pageID)
return;
WebPageProxy* pageProxy = webPage(pageID);
if (!pageProxy)
return;
pageProxy->didReceiveMessage(connection, messageID, decoder);
}
| 0
|
462,259
|
static void* clone_uint64_attr(pj_pool_t *pool, const void *src)
{
pj_stun_uint64_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_uint64_attr);
pj_memcpy(dst, src, sizeof(pj_stun_uint64_attr));
return (void*)dst;
}
| 0
|
355,631
|
check_can_index(typval_T *rettv, int evaluate, int verbose)
{
switch (rettv->v_type)
{
case VAR_FUNC:
case VAR_PARTIAL:
if (verbose)
emsg(_(e_cannot_index_a_funcref));
return FAIL;
case VAR_FLOAT:
#ifdef FEAT_FLOAT
if (verbose)
emsg(_(e_using_float_as_string));
return FAIL;
#endif
case VAR_BOOL:
case VAR_SPECIAL:
case VAR_JOB:
case VAR_CHANNEL:
case VAR_INSTR:
if (verbose)
emsg(_(e_cannot_index_special_variable));
return FAIL;
case VAR_UNKNOWN:
case VAR_ANY:
case VAR_VOID:
if (evaluate)
{
emsg(_(e_cannot_index_special_variable));
return FAIL;
}
// FALLTHROUGH
case VAR_STRING:
case VAR_LIST:
case VAR_DICT:
case VAR_BLOB:
break;
case VAR_NUMBER:
if (in_vim9script())
emsg(_(e_cannot_index_number));
break;
}
return OK;
}
| 0
|
310,016
|
reset_flush(void)
{
if (my_file != 0)
fflush(my_file);
}
| 0
|
317,024
|
static struct inode_security_struct *inode_security_rcu(struct inode *inode, bool rcu)
{
int error;
error = __inode_security_revalidate(inode, NULL, !rcu);
if (error)
return ERR_PTR(error);
return selinux_inode(inode);
}
| 0
|
262,074
|
static void AddInstanceStatsToMap(
const int32_t instance, const int32_t feature_dim, const int32_t bucket_id,
const int32_t logits_dims, const int32_t stats_dims,
StatsPartitionMap* stats_map, const TTypes<float>::ConstMatrix& gradients,
const TTypes<float>::ConstMatrix& hessians,
const TTypes<int32>::ConstVec& node_ids) {
const int32_t node_id = node_ids(instance);
const auto key = StatsPartitionKey(node_id, feature_dim, bucket_id);
std::pair<StatsPartitionIterator, bool> const& insert_result =
stats_map->insert(StatsPartitionIterator::value_type(
key, std::vector<float>(stats_dims, 0.0f)));
auto& stats = insert_result.first->second;
for (int stat_dim = 0; stat_dim < logits_dims; ++stat_dim) {
stats[stat_dim] += gradients(instance, stat_dim);
}
for (int stat_dim = logits_dims; stat_dim < stats_dims; ++stat_dim) {
stats[stat_dim] += hessians(instance, stat_dim - logits_dims);
}
}
| 0
|
390,592
|
ProcXkbSetDebuggingFlags(ClientPtr client)
{
CARD32 newFlags,newCtrls,extraLength;
xkbSetDebuggingFlagsReply rep;
int rc;
REQUEST(xkbSetDebuggingFlagsReq);
REQUEST_AT_LEAST_SIZE(xkbSetDebuggingFlagsReq);
rc = XaceHook(XACE_SERVER_ACCESS, client, DixDebugAccess);
if (rc != Success)
return rc;
newFlags= xkbDebugFlags&(~stuff->affectFlags);
newFlags|= (stuff->flags&stuff->affectFlags);
newCtrls= xkbDebugCtrls&(~stuff->affectCtrls);
newCtrls|= (stuff->ctrls&stuff->affectCtrls);
if (xkbDebugFlags || newFlags || stuff->msgLength) {
ErrorF("[xkb] XkbDebug: Setting debug flags to 0x%lx\n",(long)newFlags);
if (newCtrls!=xkbDebugCtrls)
ErrorF("[xkb] XkbDebug: Setting debug controls to 0x%lx\n",(long)newCtrls);
}
extraLength= (stuff->length<<2)-sz_xkbSetDebuggingFlagsReq;
if (stuff->msgLength>0) {
char *msg;
if (extraLength<XkbPaddedSize(stuff->msgLength)) {
ErrorF("[xkb] XkbDebug: msgLength= %d, length= %ld (should be %d)\n",
stuff->msgLength,(long)extraLength,
XkbPaddedSize(stuff->msgLength));
return BadLength;
}
msg= (char *)&stuff[1];
if (msg[stuff->msgLength-1]!='\0') {
ErrorF("[xkb] XkbDebug: message not null-terminated\n");
return BadValue;
}
ErrorF("[xkb] XkbDebug: %s\n",msg);
}
xkbDebugFlags = newFlags;
xkbDebugCtrls = newCtrls;
XkbDisableLockActions= (xkbDebugCtrls&XkbDF_DisableLocks);
rep.type= X_Reply;
rep.length = 0;
rep.sequenceNumber = client->sequence;
rep.currentFlags = newFlags;
rep.currentCtrls = newCtrls;
rep.supportedFlags = ~0;
rep.supportedCtrls = ~0;
if ( client->swapped ) {
register int n;
swaps(&rep.sequenceNumber, n);
swapl(&rep.currentFlags, n);
swapl(&rep.currentCtrls, n);
swapl(&rep.supportedFlags, n);
swapl(&rep.supportedCtrls, n);
}
WriteToClient(client,SIZEOF(xkbSetDebuggingFlagsReply), (char *)&rep);
return client->noClientException;
}
| 0
|
400,767
|
static ssize_t iter_xarray_get_pages(struct iov_iter *i,
struct page **pages, size_t maxsize,
unsigned maxpages, size_t *_start_offset)
{
unsigned nr, offset;
pgoff_t index, count;
size_t size = maxsize, actual;
loff_t pos;
if (!size || !maxpages)
return 0;
pos = i->xarray_start + i->iov_offset;
index = pos >> PAGE_SHIFT;
offset = pos & ~PAGE_MASK;
*_start_offset = offset;
count = 1;
if (size > PAGE_SIZE - offset) {
size -= PAGE_SIZE - offset;
count += size >> PAGE_SHIFT;
size &= ~PAGE_MASK;
if (size)
count++;
}
if (count > maxpages)
count = maxpages;
nr = iter_xarray_populate_pages(pages, i->xarray, index, count);
if (nr == 0)
return 0;
actual = PAGE_SIZE * nr;
actual -= offset;
if (nr == count && size > 0) {
unsigned last_offset = (nr > 1) ? 0 : offset;
actual -= PAGE_SIZE - (last_offset + size);
}
return actual;
}
| 0
|
293,771
|
static ut64 r_ptr(ut8 *buf, RKernelCacheObj *obj) {
ut64 decorated_addr = r_read_le64 (buf);
return K_PPTR (decorated_addr);
}
| 0
|
508,771
|
void free_cache(READ_RECORD *info)
{
if (info->cache)
{
my_free_lock(info->cache);
info->cache=0;
}
}
| 0
|
484,791
|
static int xennet_create_page_pool(struct netfront_queue *queue)
{
int err;
struct page_pool_params pp_params = {
.order = 0,
.flags = 0,
.pool_size = NET_RX_RING_SIZE,
.nid = NUMA_NO_NODE,
.dev = &queue->info->netdev->dev,
.offset = XDP_PACKET_HEADROOM,
.max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
};
queue->page_pool = page_pool_create(&pp_params);
if (IS_ERR(queue->page_pool)) {
err = PTR_ERR(queue->page_pool);
queue->page_pool = NULL;
return err;
}
err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
queue->id, 0);
if (err) {
netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
goto err_free_pp;
}
err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
MEM_TYPE_PAGE_POOL, queue->page_pool);
if (err) {
netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
goto err_unregister_rxq;
}
return 0;
err_unregister_rxq:
xdp_rxq_info_unreg(&queue->xdp_rxq);
err_free_pp:
page_pool_destroy(queue->page_pool);
queue->page_pool = NULL;
return err;
}
| 0
|
195,023
|
void Compute(OpKernelContext* context) override {
const Tensor* input_indices;
const Tensor* input_values;
const Tensor* input_shape;
SparseTensorsMap* map;
OP_REQUIRES_OK(context, context->input("sparse_indices", &input_indices));
OP_REQUIRES_OK(context, context->input("sparse_values", &input_values));
OP_REQUIRES_OK(context, context->input("sparse_shape", &input_shape));
OP_REQUIRES_OK(context, GetMap(context, true /* is_writing */, &map));
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices->shape()),
errors::InvalidArgument(
"Input indices should be a matrix but received shape ",
input_indices->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_values->shape()),
errors::InvalidArgument(
"Input values should be a vector but received shape ",
input_values->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_shape->shape()),
errors::InvalidArgument(
"Input shape should be a vector but received shape ",
input_shape->shape().DebugString()));
OP_REQUIRES(
context,
input_values->shape().dim_size(0) == input_indices->shape().dim_size(0),
errors::InvalidArgument(
"Number of values must match first dimension of indices. ", "Got ",
input_values->shape().dim_size(0),
" values, indices shape: ", input_indices->shape().DebugString()));
OP_REQUIRES(
context,
input_shape->shape().dim_size(0) == input_indices->shape().dim_size(1),
errors::InvalidArgument(
"Number of dimensions must match second dimension of indices. ",
"Got ", input_shape->shape().dim_size(0),
" dimensions, indices shape: ",
input_indices->shape().DebugString()));
int rank = input_shape->NumElements();
OP_REQUIRES(
context, rank > 1,
errors::InvalidArgument(
"Rank of input SparseTensor should be > 1, but saw rank: ", rank));
auto input_shape_vec = input_shape->vec<int64_t>();
int new_num_elements = 1;
bool overflow_ocurred = false;
for (int i = 0; i < input_shape_vec.size(); i++) {
new_num_elements =
MultiplyWithoutOverflow(new_num_elements, input_shape_vec(i));
if (new_num_elements < 0) {
overflow_ocurred = true;
break;
}
}
OP_REQUIRES(
context, !overflow_ocurred,
errors::Internal("Encountered overflow from large input shape."));
TensorShape tensor_input_shape(input_shape_vec);
gtl::InlinedVector<int64_t, 8> std_order(rank);
std::iota(std_order.begin(), std_order.end(), 0);
SparseTensor input_st;
OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values,
tensor_input_shape, std_order,
&input_st));
const int64_t N = input_shape_vec(0);
Tensor sparse_handles(DT_INT64, TensorShape({N}));
auto sparse_handles_t = sparse_handles.vec<int64_t>();
OP_REQUIRES_OK(context, input_st.IndicesValid());
// We can generate the output shape proto string now, for all
// minibatch entries.
TensorShape output_shape;
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(
input_shape_vec.data() + 1,
input_shape->NumElements() - 1, &output_shape));
// Get groups by minibatch dimension
std::unordered_set<int64_t> visited;
sparse::GroupIterable minibatch = input_st.group({0});
for (const auto& subset : minibatch) {
const int64_t b = subset.group()[0];
visited.insert(b);
OP_REQUIRES(
context, b > -1 && b < N,
errors::InvalidArgument(
"Received unexpected column 0 value in input SparseTensor: ", b,
" < 0 or >= N (= ", N, ")"));
const auto indices = subset.indices();
const auto values = subset.values<T>();
const int64_t num_entries = values.size();
Tensor output_indices = Tensor(DT_INT64, {num_entries, rank - 1});
Tensor output_values = Tensor(DataTypeToEnum<T>::value, {num_entries});
auto output_indices_t = output_indices.matrix<int64_t>();
auto output_values_t = output_values.vec<T>();
for (int i = 0; i < num_entries; ++i) {
for (int d = 1; d < rank; ++d) {
output_indices_t(i, d - 1) = indices(i, d);
}
output_values_t(i) = values(i);
}
SparseTensor st_i;
OP_REQUIRES_OK(context,
SparseTensor::Create(output_indices, output_values,
output_shape, &st_i));
int64_t handle;
OP_REQUIRES_OK(context, map->AddSparseTensor(context, st_i, &handle));
sparse_handles_t(b) = handle;
}
// Fill in any gaps; we must provide an empty ST for batch entries
// the grouper didn't find.
if (visited.size() < N) {
Tensor empty_indices(DT_INT64, {0, rank - 1});
Tensor empty_values(DataTypeToEnum<T>::value, {0});
SparseTensor empty_st;
OP_REQUIRES_OK(context, SparseTensor::Create(empty_indices, empty_values,
output_shape, &empty_st));
for (int64_t b = 0; b < N; ++b) {
// We skipped this batch entry.
if (visited.find(b) == visited.end()) {
int64_t handle;
OP_REQUIRES_OK(context,
map->AddSparseTensor(context, empty_st, &handle));
sparse_handles_t(b) = handle;
}
}
}
context->set_output(0, sparse_handles);
}
| 1
|
314,751
|
cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p)
{
size_t i;
for (i = 0; i < __arraycount(vn); i++)
if (vn[i].v == p)
return snprintf(buf, bufsiz, "%s", vn[i].n);
return snprintf(buf, bufsiz, "0x%x", p);
}
| 0
|
372,872
|
static void irda_connect_confirm(void *instance, void *sap,
struct qos_info *qos,
__u32 max_sdu_size, __u8 max_header_size,
struct sk_buff *skb)
{
struct irda_sock *self;
struct sock *sk;
self = instance;
IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
sk = instance;
if (sk == NULL) {
dev_kfree_skb(skb);
return;
}
dev_kfree_skb(skb);
// Should be ??? skb_queue_tail(&sk->sk_receive_queue, skb);
/* How much header space do we need to reserve */
self->max_header_size = max_header_size;
/* IrTTP max SDU size in transmit direction */
self->max_sdu_size_tx = max_sdu_size;
/* Find out what the largest chunk of data that we can transmit is */
switch (sk->sk_type) {
case SOCK_STREAM:
if (max_sdu_size != 0) {
IRDA_ERROR("%s: max_sdu_size must be 0\n",
__func__);
return;
}
self->max_data_size = irttp_get_max_seg_size(self->tsap);
break;
case SOCK_SEQPACKET:
if (max_sdu_size == 0) {
IRDA_ERROR("%s: max_sdu_size cannot be 0\n",
__func__);
return;
}
self->max_data_size = max_sdu_size;
break;
default:
self->max_data_size = irttp_get_max_seg_size(self->tsap);
}
IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__,
self->max_data_size);
memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
/* We are now connected! */
sk->sk_state = TCP_ESTABLISHED;
sk->sk_state_change(sk);
}
| 0
|
446,423
|
static void populate_cache_headers(RzDyldCache *cache) {
cache->n_hdr = 0;
RzList *hdrs = rz_list_newf(NULL);
if (!hdrs) {
return;
}
cache_hdr_t *h;
ut64 offsets[MAX_N_HDR];
ut64 offset = 0;
do {
offsets[cache->n_hdr] = offset;
h = read_cache_header(cache->buf, offset);
if (!h) {
break;
}
rz_list_append(hdrs, h);
ut64 size = h->codeSignatureOffset + h->codeSignatureSize;
#define SHIFT_MAYBE(x) \
if (x) { \
x += offset; \
}
SHIFT_MAYBE(h->mappingOffset);
SHIFT_MAYBE(h->imagesOffset);
SHIFT_MAYBE(h->codeSignatureOffset);
SHIFT_MAYBE(h->slideInfoOffset);
SHIFT_MAYBE(h->localSymbolsOffset);
SHIFT_MAYBE(h->branchPoolsOffset);
SHIFT_MAYBE(h->imagesTextOffset);
offset += size;
cache->n_hdr++;
} while (cache->n_hdr < MAX_N_HDR);
if (!cache->n_hdr) {
goto beach;
}
cache->hdr = RZ_NEWS0(cache_hdr_t, cache->n_hdr);
if (!cache->hdr) {
cache->n_hdr = 0;
goto beach;
}
cache->hdr_offset = RZ_NEWS0(ut64, cache->n_hdr);
if (!cache->hdr_offset) {
cache->n_hdr = 0;
RZ_FREE(cache->hdr);
goto beach;
}
memcpy(cache->hdr_offset, offsets, cache->n_hdr * sizeof(ut64));
ut32 i = 0;
RzListIter *iter;
cache_hdr_t *item;
rz_list_foreach (hdrs, iter, item) {
if (i >= cache->n_hdr) {
break;
}
memcpy(&cache->hdr[i++], item, sizeof(cache_hdr_t));
}
beach:
rz_list_free(hdrs);
}
| 0
|
473,929
|
is_code_ctype(OnigCodePoint code, unsigned int ctype, OnigEncoding enc ARG_UNUSED)
{
if (code < 256)
return ENC_IS_ISO_8859_6_CTYPE(code, ctype);
else
return FALSE;
}
| 0
|
236,194
|
void gpp_write_rgba(GF_BitStream *bs, u32 col)
{
gf_bs_write_u8(bs, (col>>16) & 0xFF);
gf_bs_write_u8(bs, (col>>8) & 0xFF);
gf_bs_write_u8(bs, (col) & 0xFF);
gf_bs_write_u8(bs, (col>>24) & 0xFF);
}
| 0
|
222,863
|
Status GraphProperties::AnnotateOutputShapes(GraphDef* output_graph_def) const {
*output_graph_def = item_.graph;
for (int i = 0; i < output_graph_def->node_size(); i++) {
auto node = output_graph_def->mutable_node(i);
AttrValue attr_output_shape;
auto tensor_properties = GetOutputProperties(node->name());
for (const auto& tensor_property : tensor_properties) {
TensorShapeProto* proto = attr_output_shape.mutable_list()->add_shape();
*proto = tensor_property.shape();
NormalizeShapeForOutput(proto);
}
(*node->mutable_attr())["_output_shapes"] = std::move(attr_output_shape);
}
return Status::OK();
}
| 0
|
361,306
|
stl_which_vertices_to_change(stl_file *stl, stl_hash_edge *edge_a,
stl_hash_edge *edge_b, int *facet1, int *vertex1,
int *facet2, int *vertex2,
stl_vertex *new_vertex1, stl_vertex *new_vertex2) {
int v1a; /* pair 1, facet a */
int v1b; /* pair 1, facet b */
int v2a; /* pair 2, facet a */
int v2b; /* pair 2, facet b */
/* Find first pair */
if(edge_a->which_edge < 3) {
v1a = edge_a->which_edge;
v2a = (edge_a->which_edge + 1) % 3;
} else {
v2a = edge_a->which_edge % 3;
v1a = (edge_a->which_edge + 1) % 3;
}
if(edge_b->which_edge < 3) {
v1b = edge_b->which_edge;
v2b = (edge_b->which_edge + 1) % 3;
} else {
v2b = edge_b->which_edge % 3;
v1b = (edge_b->which_edge + 1) % 3;
}
/* Of the first pair, which vertex, if any, should be changed */
if(!memcmp(&stl->facet_start[edge_a->facet_number].vertex[v1a],
&stl->facet_start[edge_b->facet_number].vertex[v1b],
sizeof(stl_vertex))) {
/* These facets are already equal. No need to change. */
*facet1 = -1;
} else {
if( (stl->neighbors_start[edge_a->facet_number].neighbor[v1a] == -1)
&& (stl->neighbors_start[edge_a->facet_number].
neighbor[(v1a + 2) % 3] == -1)) {
/* This vertex has no neighbors. This is a good one to change */
*facet1 = edge_a->facet_number;
*vertex1 = v1a;
*new_vertex1 = stl->facet_start[edge_b->facet_number].vertex[v1b];
} else {
*facet1 = edge_b->facet_number;
*vertex1 = v1b;
*new_vertex1 = stl->facet_start[edge_a->facet_number].vertex[v1a];
}
}
/* Of the second pair, which vertex, if any, should be changed */
if(!memcmp(&stl->facet_start[edge_a->facet_number].vertex[v2a],
&stl->facet_start[edge_b->facet_number].vertex[v2b],
sizeof(stl_vertex))) {
/* These facets are already equal. No need to change. */
*facet2 = -1;
} else {
if( (stl->neighbors_start[edge_a->facet_number].neighbor[v2a] == -1)
&& (stl->neighbors_start[edge_a->facet_number].
neighbor[(v2a + 2) % 3] == -1)) {
/* This vertex has no neighbors. This is a good one to change */
*facet2 = edge_a->facet_number;
*vertex2 = v2a;
*new_vertex2 = stl->facet_start[edge_b->facet_number].vertex[v2b];
} else {
*facet2 = edge_b->facet_number;
*vertex2 = v2b;
*new_vertex2 = stl->facet_start[edge_a->facet_number].vertex[v2a];
}
}
}
| 0
|
293,769
|
static int kexts_sort_vaddr_func(const void *a, const void *b) {
RKext *A = (RKext *) a;
RKext *B = (RKext *) b;
int vaddr_compare = A->vaddr - B->vaddr;
if (vaddr_compare == 0) {
return A->text_range.size - B->text_range.size;
}
return vaddr_compare;
}
| 0
|
432,158
|
auto buildProjectionForPushdown(const DepsTracker& deps,
Pipeline* pipeline,
bool allowExpressions) {
auto&& sources = pipeline->getSources();
// Short-circuit if the pipeline is empty: there is no projection and nothing to push down.
if (sources.empty()) {
return BSONObj();
}
if (const auto projStage =
exact_pointer_cast<DocumentSourceSingleDocumentTransformation*>(sources.front().get());
projStage) {
if (projStage->getType() == TransformerInterface::TransformerType::kInclusionProjection) {
auto projObj =
projStage->getTransformer().serializeTransformation(boost::none).toBson();
auto projAst = projection_ast::parse(projStage->getContext(),
projObj,
ProjectionPolicies::aggregateProjectionPolicies());
if (!projAst.hasExpressions() || allowExpressions) {
// If there is an inclusion projection at the front of the pipeline, we have case 1.
sources.pop_front();
return projObj;
}
}
}
// Depending of whether there is a finite dependency set, either return a projection
// representing this dependency set, or an empty BSON, meaning no projection push down will
// happen. This covers cases 2 and 3.
if (deps.getNeedsAnyMetadata())
return BSONObj();
return deps.toProjectionWithoutMetadata();
}
| 0
|
234,223
|
regname (unsigned int regno, int name_only_p)
{
static char reg[64];
const char *name = NULL;
if (dwarf_regnames_lookup_func != NULL)
name = dwarf_regnames_lookup_func (regno);
if (name != NULL)
{
if (name_only_p)
return name;
snprintf (reg, sizeof (reg), "r%d (%s)", regno, name);
}
else
snprintf (reg, sizeof (reg), "r%d", regno);
return reg;
}
| 0
|
383,373
|
gdImageBrushApply (gdImagePtr im, int x, int y)
{
int lx, ly;
int hy, hx;
int x1, y1, x2, y2;
int srcx, srcy;
if (!im->brush) {
return;
}
hy = gdImageSY (im->brush) / 2;
y1 = y - hy;
y2 = y1 + gdImageSY (im->brush);
hx = gdImageSX (im->brush) / 2;
x1 = x - hx;
x2 = x1 + gdImageSX (im->brush);
srcy = 0;
if (im->trueColor) {
for (ly = y1; (ly < y2); ly++) {
srcx = 0;
for (lx = x1; (lx < x2); lx++) {
int p;
p = gdImageGetTrueColorPixel (im->brush, srcx, srcy);
/* 2.0.9, Thomas Winzig: apply simple full transparency */
if (p != gdImageGetTransparent (im->brush)) {
gdImageSetPixel (im, lx, ly, p);
}
srcx++;
}
srcy++;
}
} else {
for (ly = y1; (ly < y2); ly++) {
srcx = 0;
for (lx = x1; (lx < x2); lx++) {
int p;
p = gdImageGetPixel (im->brush, srcx, srcy);
/* Allow for non-square brushes! */
if (p != gdImageGetTransparent (im->brush)) {
/* Truecolor brush. Very slow on a palette destination. */
if (im->brush->trueColor) {
gdImageSetPixel(im, lx, ly, gdImageColorResolveAlpha(im, gdTrueColorGetRed(p),
gdTrueColorGetGreen(p),
gdTrueColorGetBlue(p),
gdTrueColorGetAlpha(p)));
} else {
gdImageSetPixel(im, lx, ly, im->brushColorMap[p]);
}
}
srcx++;
}
srcy++;
}
}
}
| 0
|
317,222
|
static int smack_inode_alloc_security(struct inode *inode)
{
struct smack_known *skp = smk_of_current();
init_inode_smack(inode, skp);
return 0;
}
| 0
|
294,528
|
d_lite_s_alloc_complex(VALUE klass)
{
return d_complex_new_internal(klass,
INT2FIX(0), 0,
0, INT2FIX(0),
0, DEFAULT_SG,
0, 0, 0,
0, 0, 0,
HAVE_JD | HAVE_DF);
}
| 0
|
90,161
|
NetworkLibraryImpl()
: network_manager_monitor_(NULL),
data_plan_monitor_(NULL),
ethernet_(NULL),
wifi_(NULL),
cellular_(NULL),
available_devices_(0),
enabled_devices_(0),
connected_devices_(0),
offline_mode_(false) {
if (EnsureCrosLoaded()) {
Init();
network_manager_monitor_ =
MonitorNetworkManager(&NetworkManagerStatusChangedHandler,
this);
data_plan_monitor_ = MonitorCellularDataPlan(&DataPlanUpdateHandler,
this);
} else {
InitTestData();
}
}
| 0
|
417,115
|
const char* PlayerGeneric::getCurrentAudioDriverName() const
{
if (mixer)
return mixer->getCurrentAudioDriverName();
return audioDriverName;
}
| 0
|
513,289
|
test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
key_map usable_keys, int ref_key,
ha_rows select_limit_arg,
int *new_key, int *new_key_direction,
ha_rows *new_select_limit, uint *new_used_key_parts,
uint *saved_best_key_parts)
{
DBUG_ENTER("test_if_cheaper_ordering");
/*
Check whether there is an index compatible with the given order
usage of which is cheaper than usage of the ref_key index (ref_key>=0)
or a table scan.
It may be the case if ORDER/GROUP BY is used with LIMIT.
*/
ha_rows best_select_limit= HA_POS_ERROR;
JOIN *join= tab ? tab->join : NULL;
uint nr;
key_map keys;
uint best_key_parts= 0;
int best_key_direction= 0;
ha_rows best_records= 0;
double read_time;
int best_key= -1;
bool is_best_covering= FALSE;
double fanout= 1;
ha_rows table_records= table->stat_records();
bool group= join && join->group && order == join->group_list;
ha_rows refkey_rows_estimate= table->quick_condition_rows;
const bool has_limit= (select_limit_arg != HA_POS_ERROR);
/*
If not used with LIMIT, only use keys if the whole query can be
resolved with a key; This is because filesort() is usually faster than
retrieving all rows through an index.
*/
if (select_limit_arg >= table_records)
{
keys= *table->file->keys_to_use_for_scanning();
keys.merge(table->covering_keys);
/*
We are adding here also the index specified in FORCE INDEX clause,
if any.
This is to allow users to use index in ORDER BY.
*/
if (table->force_index)
keys.merge(group ? table->keys_in_use_for_group_by :
table->keys_in_use_for_order_by);
keys.intersect(usable_keys);
}
else
keys= usable_keys;
if (join)
{
uint tablenr= (uint)(tab - join->join_tab);
read_time= join->best_positions[tablenr].read_time;
for (uint i= tablenr+1; i < join->table_count; i++)
fanout*= join->best_positions[i].records_read; // fanout is always >= 1
}
else
read_time= table->file->scan_time();
/*
TODO: add cost of sorting here.
*/
read_time += COST_EPS;
/*
Calculate the selectivity of the ref_key for REF_ACCESS. For
RANGE_ACCESS we use table->quick_condition_rows.
*/
if (ref_key >= 0 && ref_key != MAX_KEY && tab->type == JT_REF)
{
if (table->quick_keys.is_set(ref_key))
refkey_rows_estimate= table->quick_rows[ref_key];
else
{
const KEY *ref_keyinfo= table->key_info + ref_key;
refkey_rows_estimate= ref_keyinfo->rec_per_key[tab->ref.key_parts - 1];
}
set_if_bigger(refkey_rows_estimate, 1);
}
for (nr=0; nr < table->s->keys ; nr++)
{
int direction;
ha_rows select_limit= select_limit_arg;
uint used_key_parts= 0;
if (keys.is_set(nr) &&
(direction= test_if_order_by_key(join, order, table, nr,
&used_key_parts)))
{
/*
At this point we are sure that ref_key is a non-ordering
key (where "ordering key" is a key that will return rows
in the order required by ORDER BY).
*/
DBUG_ASSERT (ref_key != (int) nr);
bool is_covering= (table->covering_keys.is_set(nr) ||
(table->file->index_flags(nr, 0, 1) &
HA_CLUSTERED_INDEX));
/*
Don't use an index scan with ORDER BY without limit.
For GROUP BY without limit always use index scan
if there is a suitable index.
Why we hold to this asymmetry hardly can be explained
rationally. It's easy to demonstrate that using
temporary table + filesort could be cheaper for grouping
queries too.
*/
if (is_covering ||
select_limit != HA_POS_ERROR ||
(ref_key < 0 && (group || table->force_index)))
{
double rec_per_key;
double index_scan_time;
KEY *keyinfo= table->key_info+nr;
if (select_limit == HA_POS_ERROR)
select_limit= table_records;
if (group)
{
/*
Used_key_parts can be larger than keyinfo->user_defined_key_parts
when using a secondary index clustered with a primary
key (e.g. as in Innodb).
See Bug #28591 for details.
*/
uint used_index_parts= keyinfo->user_defined_key_parts;
uint used_pk_parts= 0;
if (used_key_parts > used_index_parts)
used_pk_parts= used_key_parts-used_index_parts;
rec_per_key= used_key_parts ?
keyinfo->actual_rec_per_key(used_key_parts-1) : 1;
/* Take into account the selectivity of the used pk prefix */
if (used_pk_parts)
{
KEY *pkinfo=tab->table->key_info+table->s->primary_key;
/*
If the values of of records per key for the prefixes
of the primary key are considered unknown we assume
they are equal to 1.
*/
if (used_key_parts == pkinfo->user_defined_key_parts ||
pkinfo->rec_per_key[0] == 0)
rec_per_key= 1;
if (rec_per_key > 1)
{
rec_per_key*= pkinfo->actual_rec_per_key(used_pk_parts-1);
rec_per_key/= pkinfo->actual_rec_per_key(0);
/*
The value of rec_per_key for the extended key has
to be adjusted accordingly if some components of
the secondary key are included in the primary key.
*/
for(uint i= 1; i < used_pk_parts; i++)
{
if (pkinfo->key_part[i].field->key_start.is_set(nr))
{
/*
We presume here that for any index rec_per_key[i] != 0
if rec_per_key[0] != 0.
*/
DBUG_ASSERT(pkinfo->actual_rec_per_key(i));
rec_per_key*= pkinfo->actual_rec_per_key(i-1);
rec_per_key/= pkinfo->actual_rec_per_key(i);
}
}
}
}
set_if_bigger(rec_per_key, 1);
/*
With a grouping query each group containing on average
rec_per_key records produces only one row that will
be included into the result set.
*/
if (select_limit > table_records/rec_per_key)
select_limit= table_records;
else
select_limit= (ha_rows) (select_limit*rec_per_key);
} /* group */
/*
If tab=tk is not the last joined table tn then to get first
L records from the result set we can expect to retrieve
only L/fanout(tk,tn) where fanout(tk,tn) says how many
rows in the record set on average will match each row tk.
Usually our estimates for fanouts are too pessimistic.
So the estimate for L/fanout(tk,tn) will be too optimistic
and as result we'll choose an index scan when using ref/range
access + filesort will be cheaper.
*/
select_limit= (ha_rows) (select_limit < fanout ?
1 : select_limit/fanout);
/*
We assume that each of the tested indexes is not correlated
with ref_key. Thus, to select first N records we have to scan
N/selectivity(ref_key) index entries.
selectivity(ref_key) = #scanned_records/#table_records =
refkey_rows_estimate/table_records.
In any case we can't select more than #table_records.
N/(refkey_rows_estimate/table_records) > table_records
<=> N > refkey_rows_estimate.
*/
if (select_limit > refkey_rows_estimate)
select_limit= table_records;
else
select_limit= (ha_rows) (select_limit *
(double) table_records /
refkey_rows_estimate);
rec_per_key= keyinfo->actual_rec_per_key(keyinfo->user_defined_key_parts-1);
set_if_bigger(rec_per_key, 1);
/*
Here we take into account the fact that rows are
accessed in sequences rec_per_key records in each.
Rows in such a sequence are supposed to be ordered
by rowid/primary key. When reading the data
in a sequence we'll touch not more pages than the
table file contains.
TODO. Use the formula for a disk sweep sequential access
to calculate the cost of accessing data rows for one
index entry.
*/
index_scan_time= select_limit/rec_per_key *
MY_MIN(rec_per_key, table->file->scan_time());
double range_scan_time;
if (get_range_limit_read_cost(tab, table, nr, select_limit,
&range_scan_time))
{
if (range_scan_time < index_scan_time)
index_scan_time= range_scan_time;
}
if ((ref_key < 0 && (group || table->force_index || is_covering)) ||
index_scan_time < read_time)
{
ha_rows quick_records= table_records;
ha_rows refkey_select_limit= (ref_key >= 0 &&
!is_hash_join_key_no(ref_key) &&
table->covering_keys.is_set(ref_key)) ?
refkey_rows_estimate :
HA_POS_ERROR;
if ((is_best_covering && !is_covering) ||
(is_covering && refkey_select_limit < select_limit))
continue;
if (table->quick_keys.is_set(nr))
quick_records= table->quick_rows[nr];
if (best_key < 0 ||
(select_limit <= MY_MIN(quick_records,best_records) ?
keyinfo->user_defined_key_parts < best_key_parts :
quick_records < best_records) ||
(!is_best_covering && is_covering))
{
best_key= nr;
best_key_parts= keyinfo->user_defined_key_parts;
if (saved_best_key_parts)
*saved_best_key_parts= used_key_parts;
best_records= quick_records;
is_best_covering= is_covering;
best_key_direction= direction;
best_select_limit= select_limit;
}
}
}
}
}
if (best_key < 0 || best_key == ref_key)
DBUG_RETURN(FALSE);
*new_key= best_key;
*new_key_direction= best_key_direction;
*new_select_limit= has_limit ? best_select_limit : table_records;
if (new_used_key_parts != NULL)
*new_used_key_parts= best_key_parts;
DBUG_RETURN(TRUE);
}
| 0
|
246,647
|
static void naludmx_enqueue_or_dispatch(GF_NALUDmxCtx *ctx, GF_FilterPacket *n_pck, Bool flush_ref)
{
//TODO: we are dispatching frames in "negctts mode", ie we may have DTS>CTS
//need to signal this for consumers using DTS (eg MPEG-2 TS)
if (flush_ref && ctx->pck_queue && ctx->poc_diff) {
u32 dts_inc=0;
s32 last_poc = 0;
Bool patch_missing_frame = GF_FALSE;
//send all reference packet queued
if (ctx->strict_poc==STRICT_POC_ERROR) {
u32 i;
u32 nb_bframes = 0;
for (i=0; i<gf_list_count(ctx->pck_queue); i++) {
s32 poc;
u64 poc_ts, dts;
GF_FilterPacket *q_pck = gf_list_get(ctx->pck_queue, i);
if (q_pck == ctx->first_pck_in_au) break;
dts = gf_filter_pck_get_dts(q_pck);
if (dts == GF_FILTER_NO_TS) continue;
poc_ts = gf_filter_pck_get_cts(q_pck);
assert(poc_ts != GF_FILTER_NO_TS);
poc = (s32) ((s64) poc_ts - CTS_POC_OFFSET_SAFETY);
if (i) {
if (last_poc>poc) nb_bframes ++;
else if (last_poc + ctx->poc_diff<poc)
patch_missing_frame = GF_TRUE;
}
last_poc = poc;
}
if (nb_bframes>1)
patch_missing_frame = GF_FALSE;
else if (nb_bframes)
patch_missing_frame = GF_TRUE;
}
last_poc = GF_INT_MIN;
while (gf_list_count(ctx->pck_queue) ) {
u64 dts;
GF_FilterPacket *q_pck = gf_list_get(ctx->pck_queue, 0);
if (q_pck == ctx->first_pck_in_au) break;
dts = gf_filter_pck_get_dts(q_pck);
if (dts != GF_FILTER_NO_TS) {
s32 poc;
u64 poc_ts, cts;
u8 carousel_info = gf_filter_pck_get_carousel_version(q_pck);
//we reused timing from source packets
if (!carousel_info) {
assert(ctx->timescale);
gf_list_rem(ctx->pck_queue, 0);
gf_filter_pck_send(q_pck);
continue;
}
gf_filter_pck_set_carousel_version(q_pck, 0);
poc_ts = gf_filter_pck_get_cts(q_pck);
assert(poc_ts != GF_FILTER_NO_TS);
poc = (s32) ((s64) poc_ts - CTS_POC_OFFSET_SAFETY);
if (patch_missing_frame) {
if (last_poc!=GF_INT_MIN) {
//check if we missed an IDR (poc reset)
if (poc && (last_poc > poc) ) {
last_poc = 0;
dts_inc += ctx->cur_fps.den;
ctx->dts_last_IDR = dts;
ctx->dts += ctx->cur_fps.den;
}
//check if we miss a frame
while (last_poc + ctx->poc_diff < poc) {
last_poc += ctx->poc_diff;
dts_inc += ctx->cur_fps.den;
ctx->dts += ctx->cur_fps.den;
}
}
last_poc = poc;
dts += dts_inc;
}
//poc is stored as diff since last IDR which has min_poc
cts = ( (ctx->min_poc + (s32) poc) * ctx->cur_fps.den ) / ctx->poc_diff + ctx->dts_last_IDR;
/*if PAFF, 2 pictures (eg poc) <=> 1 aggregated frame (eg sample), divide by 2*/
if (ctx->is_paff) {
cts /= 2;
/*in some cases the poc is not on the top field - if that is the case, round up*/
if (cts % ctx->cur_fps.den) {
cts = ((cts/ctx->cur_fps.den)+1) * ctx->cur_fps.den;
}
}
gf_filter_pck_set_cts(q_pck, cts);
GF_LOG(GF_LOG_DEBUG, GF_LOG_MEDIA, ("[%s] Frame timestamps computed dts "LLU" cts "LLU" (poc %d min poc %d poc_diff %d last IDR DTS "LLU")\n", ctx->log_name, dts, cts, poc, ctx->min_poc, ctx->poc_diff, ctx->dts_last_IDR));
if (ctx->importer && ctx->cur_fps.den) {
poc = (s32) ( (s64) cts - (s64) dts);
if (poc<0) poc = -poc;
poc /= ctx->cur_fps.den;
if (poc > ctx->max_total_delay)
ctx->max_total_delay = poc;
}
}
gf_list_rem(ctx->pck_queue, 0);
gf_filter_pck_send(q_pck);
}
}
if (!n_pck) return;
if (!ctx->pck_queue) ctx->pck_queue = gf_list_new();
gf_list_add(ctx->pck_queue, n_pck);
}
| 0
|
415,187
|
has_option (const char *line, const char *name)
{
const char *s;
int n = strlen (name);
s = strstr (line, name);
return (s && (s == line || spacep (s-1)) && (!s[n] || spacep (s+n)));
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.