idx
int64 | func
string | target
int64 |
|---|---|---|
179,714
|
void BlobURLRequestJob::CreateFileStreamReader(size_t index,
int64 additional_offset) {
DCHECK_LT(index, blob_data_->items().size());
const BlobData::Item& item = blob_data_->items().at(index);
DCHECK(IsFileType(item.type()));
DCHECK_EQ(0U, index_to_reader_.count(index));
FileStreamReader* reader = NULL;
switch (item.type()) {
case BlobData::Item::TYPE_FILE:
reader = new LocalFileStreamReader(
file_thread_proxy_,
item.path(),
item.offset() + additional_offset,
item.expected_modification_time());
break;
case BlobData::Item::TYPE_FILE_FILESYSTEM:
reader = file_system_context_->CreateFileStreamReader(
fileapi::FileSystemURL(file_system_context_->CrackURL(item.url())),
item.offset() + additional_offset,
item.expected_modification_time());
break;
default:
NOTREACHED();
}
DCHECK(reader);
index_to_reader_[index] = reader;
}
| 0
|
115,852
|
static ssize_t show_tabletStylusLower(struct device *dev, struct device_attribute *attr, char *buf)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s\n",
map_val_to_str(stylus_button_map,
aiptek->curSetting.stylusButtonLower));
}
| 0
|
53,034
|
static int mov_metadata_gnre(MOVContext *c, AVIOContext *pb,
unsigned len, const char *key)
{
short genre;
char buf[20];
avio_r8(pb); // unknown
genre = avio_r8(pb);
if (genre < 1 || genre > ID3v1_GENRE_MAX)
return 0;
snprintf(buf, sizeof(buf), "%s", ff_id3v1_genre_str[genre-1]);
av_dict_set(&c->fc->metadata, key, buf, 0);
return 0;
}
| 0
|
446,269
|
virDomainFSDefFormat(virBufferPtr buf,
virDomainFSDefPtr def,
unsigned int flags)
{
const char *type = virDomainFSTypeToString(def->type);
const char *accessmode = virDomainFSAccessModeTypeToString(def->accessmode);
const char *fsdriver = virDomainFSDriverTypeToString(def->fsdriver);
const char *wrpolicy = virDomainFSWrpolicyTypeToString(def->wrpolicy);
const char *multidevs = virDomainFSMultidevsTypeToString(def->multidevs);
const char *src = def->src->path;
g_auto(virBuffer) driverAttrBuf = VIR_BUFFER_INITIALIZER;
g_auto(virBuffer) driverBuf = VIR_BUFFER_INIT_CHILD(buf);
g_auto(virBuffer) binaryAttrBuf = VIR_BUFFER_INITIALIZER;
g_auto(virBuffer) binaryBuf = VIR_BUFFER_INIT_CHILD(buf);
if (!type) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("unexpected filesystem type %d"), def->type);
return -1;
}
if (!accessmode) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("unexpected accessmode %d"), def->accessmode);
return -1;
}
if (!multidevs) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("unexpected multidevs %d"), def->multidevs);
return -1;
}
virBufferAsprintf(buf,
"<filesystem type='%s' accessmode='%s'",
type, accessmode);
if (def->model) {
virBufferAsprintf(buf, " model='%s'",
virDomainFSModelTypeToString(def->model));
}
if (def->multidevs)
virBufferAsprintf(buf, " multidevs='%s'", multidevs);
virBufferAddLit(buf, ">\n");
virBufferAdjustIndent(buf, 2);
virBufferAdjustIndent(&driverBuf, 2);
virBufferAdjustIndent(&binaryBuf, 2);
if (def->fsdriver) {
virBufferAsprintf(&driverAttrBuf, " type='%s'", fsdriver);
if (def->format)
virBufferAsprintf(&driverAttrBuf, " format='%s'",
virStorageFileFormatTypeToString(def->format));
/* Don't generate anything if wrpolicy is set to default */
if (def->wrpolicy)
virBufferAsprintf(&driverAttrBuf, " wrpolicy='%s'", wrpolicy);
if (def->queue_size)
virBufferAsprintf(&driverAttrBuf, " queue='%llu'", def->queue_size);
}
if (def->fsdriver == VIR_DOMAIN_FS_DRIVER_TYPE_VIRTIOFS) {
g_auto(virBuffer) lockAttrBuf = VIR_BUFFER_INITIALIZER;
virBufferEscapeString(&binaryAttrBuf, " path='%s'", def->binary);
if (def->xattr != VIR_TRISTATE_SWITCH_ABSENT) {
virBufferAsprintf(&binaryAttrBuf, " xattr='%s'",
virTristateSwitchTypeToString(def->xattr));
}
if (def->cache != VIR_DOMAIN_FS_CACHE_MODE_DEFAULT) {
virBufferAsprintf(&binaryBuf, "<cache mode='%s'/>\n",
virDomainFSCacheModeTypeToString(def->cache));
}
if (def->posix_lock != VIR_TRISTATE_SWITCH_ABSENT) {
virBufferAsprintf(&lockAttrBuf, " posix='%s'",
virTristateSwitchTypeToString(def->posix_lock));
}
if (def->flock != VIR_TRISTATE_SWITCH_ABSENT) {
virBufferAsprintf(&lockAttrBuf, " flock='%s'",
virTristateSwitchTypeToString(def->flock));
}
virXMLFormatElement(&binaryBuf, "lock", &lockAttrBuf, NULL);
}
virDomainVirtioOptionsFormat(&driverAttrBuf, def->virtio);
virXMLFormatElement(buf, "driver", &driverAttrBuf, &driverBuf);
virXMLFormatElement(buf, "binary", &binaryAttrBuf, &binaryBuf);
switch (def->type) {
case VIR_DOMAIN_FS_TYPE_MOUNT:
case VIR_DOMAIN_FS_TYPE_BIND:
virBufferEscapeString(buf, "<source dir='%s'/>\n",
src);
break;
case VIR_DOMAIN_FS_TYPE_BLOCK:
virBufferEscapeString(buf, "<source dev='%s'/>\n",
src);
break;
case VIR_DOMAIN_FS_TYPE_FILE:
virBufferEscapeString(buf, "<source file='%s'/>\n",
src);
break;
case VIR_DOMAIN_FS_TYPE_TEMPLATE:
virBufferEscapeString(buf, "<source name='%s'/>\n",
src);
break;
case VIR_DOMAIN_FS_TYPE_RAM:
virBufferAsprintf(buf, "<source usage='%lld' units='KiB'/>\n",
def->usage / 1024);
break;
case VIR_DOMAIN_FS_TYPE_VOLUME:
virBufferAddLit(buf, "<source");
virBufferEscapeString(buf, " pool='%s'", def->src->srcpool->pool);
virBufferEscapeString(buf, " volume='%s'", def->src->srcpool->volume);
virBufferAddLit(buf, "/>\n");
break;
}
virBufferEscapeString(buf, "<target dir='%s'/>\n",
def->dst);
if (def->readonly)
virBufferAddLit(buf, "<readonly/>\n");
if (virDomainDeviceInfoFormat(buf, &def->info, flags) < 0)
return -1;
if (def->space_hard_limit)
virBufferAsprintf(buf, "<space_hard_limit unit='bytes'>"
"%llu</space_hard_limit>\n", def->space_hard_limit);
if (def->space_soft_limit) {
virBufferAsprintf(buf, "<space_soft_limit unit='bytes'>"
"%llu</space_soft_limit>\n", def->space_soft_limit);
}
virBufferAdjustIndent(buf, -2);
virBufferAddLit(buf, "</filesystem>\n");
return 0;
}
| 0
|
265,048
|
SG_Exception_BadSum::SG_Exception_BadSum(const char* container_name, const char* arg1, const char* arg2)
{
std::string cn_s(container_name);
std::string arg1_s(arg1);
std::string arg2_s(arg2);
this -> err_msg = "[" + cn_s + "]"
+ " The sum of all values in "
+ arg1_s
+ " and "
+ arg2_s
+ " do not match.";
}
| 0
|
116,952
|
int tcp_sendpage(struct sock *sk, struct page *page, int offset,
size_t size, int flags)
{
ssize_t res;
if (!(sk->sk_route_caps & NETIF_F_SG) ||
!sk_check_csum_caps(sk))
return sock_no_sendpage(sk->sk_socket, page, offset, size,
flags);
lock_sock(sk);
tcp_rate_check_app_limited(sk); /* is sending application-limited? */
res = do_tcp_sendpages(sk, page, offset, size, flags);
release_sock(sk);
return res;
}
| 0
|
252,581
|
bool HTMLFormElement::LayoutObjectIsNeeded(const ComputedStyle& style) {
if (!was_demoted_)
return HTMLElement::LayoutObjectIsNeeded(style);
ContainerNode* node = parentNode();
if (!node || !node->GetLayoutObject())
return HTMLElement::LayoutObjectIsNeeded(style);
LayoutObject* parent_layout_object = node->GetLayoutObject();
bool parent_is_table_element_part =
(parent_layout_object->IsTable() && IsHTMLTableElement(*node)) ||
(parent_layout_object->IsTableRow() && IsHTMLTableRowElement(*node)) ||
(parent_layout_object->IsTableSection() && node->HasTagName(tbodyTag)) ||
(parent_layout_object->IsLayoutTableCol() && node->HasTagName(colTag)) ||
(parent_layout_object->IsTableCell() && IsHTMLTableRowElement(*node));
if (!parent_is_table_element_part)
return true;
EDisplay display = style.Display();
bool form_is_table_part =
display == EDisplay::kTable || display == EDisplay::kInlineTable ||
display == EDisplay::kTableRowGroup ||
display == EDisplay::kTableHeaderGroup ||
display == EDisplay::kTableFooterGroup ||
display == EDisplay::kTableRow ||
display == EDisplay::kTableColumnGroup ||
display == EDisplay::kTableColumn || display == EDisplay::kTableCell ||
display == EDisplay::kTableCaption;
return form_is_table_part;
}
| 0
|
301,473
|
void *Sys_LoadGameDll( const char *name, GetModuleAPIProc **moduleAPI )
{
void *libHandle = NULL;
char filename[MAX_OSPATH];
Com_sprintf (filename, sizeof(filename), "%s" ARCH_STRING DLL_EXT, name);
#if defined(_DEBUG)
libHandle = Sys_LoadLibrary( filename );
if ( !libHandle )
#endif
{
UnpackDLLResult unpackResult = Sys_UnpackDLL(filename);
if ( !unpackResult.succeeded )
{
if ( Sys_DLLNeedsUnpacking() )
{
FreeUnpackDLLResult(&unpackResult);
Com_DPrintf( "Sys_LoadLegacyGameDll: Failed to unpack %s from PK3.\n", filename );
return NULL;
}
}
else
{
libHandle = Sys_LoadLibrary(unpackResult.tempDLLPath);
}
FreeUnpackDLLResult(&unpackResult);
if ( !libHandle )
{
#if defined(MACOS_X) && !defined(_JK2EXE)
//First, look for the old-style mac .bundle that's inside a pk3
//It's actually zipped, and the zipfile has the same name as 'name'
libHandle = Sys_LoadMachOBundle( name );
#endif
if (!libHandle) {
char *basepath = Cvar_VariableString( "fs_basepath" );
char *homepath = Cvar_VariableString( "fs_homepath" );
char *cdpath = Cvar_VariableString( "fs_cdpath" );
char *gamedir = Cvar_VariableString( "fs_game" );
#ifdef MACOS_X
char *apppath = Cvar_VariableString( "fs_apppath" );
#endif
const char *searchPaths[] = {
homepath,
#ifdef MACOS_X
apppath,
#endif
basepath,
cdpath,
};
size_t numPaths = ARRAY_LEN( searchPaths );
libHandle = Sys_LoadDllFromPaths( filename, gamedir, searchPaths, numPaths, SEARCH_PATH_BASE | SEARCH_PATH_MOD, __FUNCTION__ );
if ( !libHandle )
return NULL;
}
}
}
*moduleAPI = (GetModuleAPIProc *)Sys_LoadFunction( libHandle, "GetModuleAPI" );
if ( !*moduleAPI ) {
Com_DPrintf ( "Sys_LoadGameDll(%s) failed to find GetModuleAPI function:\n...%s!\n", name, Sys_LibraryError() );
Sys_UnloadLibrary( libHandle );
return NULL;
}
return libHandle;
}
| 0
|
122,585
|
int usb_disabled(void)
{
return nousb;
}
| 0
|
249,520
|
error::Error GLES2DecoderImpl::HandleResizeCHROMIUM(uint32 immediate_data_size,
const void* cmd_data) {
const gles2::cmds::ResizeCHROMIUM& c =
*static_cast<const gles2::cmds::ResizeCHROMIUM*>(cmd_data);
if (!offscreen_target_frame_buffer_.get() && surface_->DeferDraws())
return error::kDeferCommandUntilLater;
GLuint width = static_cast<GLuint>(c.width);
GLuint height = static_cast<GLuint>(c.height);
GLfloat scale_factor = c.scale_factor;
TRACE_EVENT2("gpu", "glResizeChromium", "width", width, "height", height);
width = std::max(1U, width);
height = std::max(1U, height);
#if defined(OS_POSIX) && !defined(OS_MACOSX) && \
!defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
glFinish();
#endif
bool is_offscreen = !!offscreen_target_frame_buffer_.get();
if (is_offscreen) {
if (!ResizeOffscreenFrameBuffer(gfx::Size(width, height))) {
LOG(ERROR) << "GLES2DecoderImpl: Context lost because "
<< "ResizeOffscreenFrameBuffer failed.";
return error::kLostContext;
}
}
if (!resize_callback_.is_null()) {
resize_callback_.Run(gfx::Size(width, height), scale_factor);
DCHECK(context_->IsCurrent(surface_.get()));
if (!context_->IsCurrent(surface_.get())) {
LOG(ERROR) << "GLES2DecoderImpl: Context lost because context no longer "
<< "current after resize callback.";
return error::kLostContext;
}
}
return error::kNoError;
}
| 0
|
83,737
|
struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
int noblock, int *err)
{
int error;
struct sk_buff *skb;
long timeo;
timeo = sock_rcvtimeo(sk, noblock);
pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo,
MAX_SCHEDULE_TIMEOUT);
do {
/* Again only user level code calls this function,
* so nothing interrupt level
* will suddenly eat the receive_queue.
*
* Look at current nfs client by the way...
* However, this function was correct in any case. 8)
*/
if (flags & MSG_PEEK) {
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
refcount_inc(&skb->users);
} else {
skb = __skb_dequeue(&sk->sk_receive_queue);
}
if (skb)
return skb;
/* Caller is allowed not to check sk->sk_err before calling. */
error = sock_error(sk);
if (error)
goto no_packet;
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
if (sk_can_busy_loop(sk)) {
sk_busy_loop(sk, noblock);
if (!skb_queue_empty(&sk->sk_receive_queue))
continue;
}
/* User doesn't want to wait. */
error = -EAGAIN;
if (!timeo)
goto no_packet;
} while (sctp_wait_for_packet(sk, err, &timeo) == 0);
return NULL;
no_packet:
*err = error;
return NULL;
}
| 0
|
344,940
|
nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
struct nf_queue_entry *entry,
__be32 **packet_id_ptr)
{
size_t size;
size_t data_len = 0, cap_len = 0;
unsigned int hlen = 0;
struct sk_buff *skb;
struct nlattr *nla;
struct nfqnl_msg_packet_hdr *pmsg;
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
struct sk_buff *entskb = entry->skb;
struct net_device *indev;
struct net_device *outdev;
struct nf_conn *ct = NULL;
enum ip_conntrack_info uninitialized_var(ctinfo);
bool csum_verify;
size = nlmsg_total_size(sizeof(struct nfgenmsg))
+ nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
#ifdef CONFIG_BRIDGE_NETFILTER
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
#endif
+ nla_total_size(sizeof(u_int32_t)) /* mark */
+ nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
+ nla_total_size(sizeof(u_int32_t)) /* skbinfo */
+ nla_total_size(sizeof(u_int32_t)); /* cap_len */
if (entskb->tstamp.tv64)
size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
if (entry->hook <= NF_INET_FORWARD ||
(entry->hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
csum_verify = !skb_csum_unnecessary(entskb);
else
csum_verify = false;
outdev = entry->outdev;
switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
case NFQNL_COPY_META:
case NFQNL_COPY_NONE:
break;
case NFQNL_COPY_PACKET:
if (!(queue->flags & NFQA_CFG_F_GSO) &&
entskb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_help(entskb))
return NULL;
data_len = ACCESS_ONCE(queue->copy_range);
if (data_len > entskb->len)
data_len = entskb->len;
hlen = skb_zerocopy_headlen(entskb);
hlen = min_t(unsigned int, hlen, data_len);
size += sizeof(struct nlattr) + hlen;
cap_len = entskb->len;
break;
}
if (queue->flags & NFQA_CFG_F_CONNTRACK)
ct = nfqnl_ct_get(entskb, &size, &ctinfo);
if (queue->flags & NFQA_CFG_F_UID_GID) {
size += (nla_total_size(sizeof(u_int32_t)) /* uid */
+ nla_total_size(sizeof(u_int32_t))); /* gid */
}
skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
GFP_ATOMIC);
if (!skb)
return NULL;
nlh = nlmsg_put(skb, 0, 0,
NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
sizeof(struct nfgenmsg), 0);
if (!nlh) {
kfree_skb(skb);
return NULL;
}
nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = entry->pf;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(queue->queue_num);
nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
pmsg = nla_data(nla);
pmsg->hw_protocol = entskb->protocol;
pmsg->hook = entry->hook;
*packet_id_ptr = &pmsg->packet_id;
indev = entry->indev;
if (indev) {
#ifndef CONFIG_BRIDGE_NETFILTER
if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
goto nla_put_failure;
#else
if (entry->pf == PF_BRIDGE) {
/* Case 1: indev is physical input device, we need to
* look for bridge group (when called from
* netfilter_bridge) */
if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
htonl(indev->ifindex)) ||
/* this is the bridge group "brX" */
/* rcu_read_lock()ed by __nf_queue */
nla_put_be32(skb, NFQA_IFINDEX_INDEV,
htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
goto nla_put_failure;
} else {
/* Case 2: indev is bridge group, we need to look for
* physical device (when called from ipv4) */
if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
htonl(indev->ifindex)))
goto nla_put_failure;
if (entskb->nf_bridge && entskb->nf_bridge->physindev &&
nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
htonl(entskb->nf_bridge->physindev->ifindex)))
goto nla_put_failure;
}
#endif
}
if (outdev) {
#ifndef CONFIG_BRIDGE_NETFILTER
if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
goto nla_put_failure;
#else
if (entry->pf == PF_BRIDGE) {
/* Case 1: outdev is physical output device, we need to
* look for bridge group (when called from
* netfilter_bridge) */
if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
htonl(outdev->ifindex)) ||
/* this is the bridge group "brX" */
/* rcu_read_lock()ed by __nf_queue */
nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
goto nla_put_failure;
} else {
/* Case 2: outdev is bridge group, we need to look for
* physical output device (when called from ipv4) */
if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
htonl(outdev->ifindex)))
goto nla_put_failure;
if (entskb->nf_bridge && entskb->nf_bridge->physoutdev &&
nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
htonl(entskb->nf_bridge->physoutdev->ifindex)))
goto nla_put_failure;
}
#endif
}
if (entskb->mark &&
nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
goto nla_put_failure;
if (indev && entskb->dev &&
entskb->mac_header != entskb->network_header) {
struct nfqnl_msg_packet_hw phw;
int len;
memset(&phw, 0, sizeof(phw));
len = dev_parse_header(entskb, phw.hw_addr);
if (len) {
phw.hw_addrlen = htons(len);
if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
goto nla_put_failure;
}
}
if (entskb->tstamp.tv64) {
struct nfqnl_msg_packet_timestamp ts;
struct timeval tv = ktime_to_timeval(entskb->tstamp);
ts.sec = cpu_to_be64(tv.tv_sec);
ts.usec = cpu_to_be64(tv.tv_usec);
if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
goto nla_put_failure;
}
if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk &&
nfqnl_put_sk_uidgid(skb, entskb->sk) < 0)
goto nla_put_failure;
if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
goto nla_put_failure;
if (cap_len > data_len &&
nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
goto nla_put_failure;
if (nfqnl_put_packet_info(skb, entskb, csum_verify))
goto nla_put_failure;
if (data_len) {
struct nlattr *nla;
if (skb_tailroom(skb) < sizeof(*nla) + hlen)
goto nla_put_failure;
nla = (struct nlattr *)skb_put(skb, sizeof(*nla));
nla->nla_type = NFQA_PAYLOAD;
nla->nla_len = nla_attr_size(data_len);
skb_zerocopy(skb, entskb, data_len, hlen);
}
nlh->nlmsg_len = skb->len;
return skb;
nla_put_failure:
kfree_skb(skb);
net_err_ratelimited("nf_queue: error creating packet message\n");
return NULL;
}
| 1
|
503,824
|
void FilterManager::maybeEndDecode(bool end_stream) {
// If recreateStream is called, the HCM rewinds state and may send more encodeData calls.
if (end_stream && !remoteDecodeComplete()) {
stream_info_.downstreamTiming().onLastDownstreamRxByteReceived(dispatcher().timeSource());
ENVOY_STREAM_LOG(debug, "request end stream", *this);
}
}
| 0
|
14,961
|
static uint32_t dma_rinvalid (void *opaque, target_phys_addr_t addr)
{
hw_error("Unsupported short raccess. reg=" TARGET_FMT_plx "\n", addr);
return 0;
}
| 0
|
91,699
|
static void mipspmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
mipspmu_stop(event, PERF_EF_UPDATE);
cpuc->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
perf_event_update_userpage(event);
}
| 0
|
216,164
|
rule_criteria_destroy(struct rule_criteria *criteria)
{
cls_rule_destroy(&criteria->cr);
criteria->version = OVS_VERSION_NOT_REMOVED; /* Mark as destroyed. */
}
| 0
|
238,487
|
filesystem_create_wait_for_luks_device_not_seen_cb (gpointer user_data)
{
MkfsLuksData *data = user_data;
throw_error (data->context,
ERROR_FAILED,
"Error creating luks encrypted file system: timeout (10s) waiting for luks device to show up");
g_signal_handler_disconnect (data->device->priv->daemon, data->device_changed_signal_handler_id);
mkfse_data_unref (data);
return FALSE;
}
| 0
|
514,524
|
const char* Http2Session::TypeName() const {
switch (session_type_) {
case NGHTTP2_SESSION_SERVER: return "server";
case NGHTTP2_SESSION_CLIENT: return "client";
default:
// This should never happen
ABORT();
}
}
| 0
|
333,855
|
static int aio_epoll(AioContext *ctx, GPollFD *pfds,
unsigned npfd, int64_t timeout)
{
assert(false);
}
| 0
|
40,978
|
_handel_muc_user(xmpp_stanza_t *const stanza)
{
xmpp_ctx_t *ctx = connection_get_ctx();
xmpp_stanza_t *xns_muc_user = xmpp_stanza_get_child_by_ns(stanza, STANZA_NS_MUC_USER);
const char *room = xmpp_stanza_get_from(stanza);
if (!room) {
log_warning("Message received with no from attribute, ignoring");
return;
}
// XEP-0045
xmpp_stanza_t *invite = xmpp_stanza_get_child_by_name(xns_muc_user, STANZA_NAME_INVITE);
if (!invite) {
return;
}
const char *invitor_jid = xmpp_stanza_get_from(invite);
if (!invitor_jid) {
log_warning("Chat room invite received with no from attribute");
return;
}
Jid *jidp = jid_create(invitor_jid);
if (!jidp) {
return;
}
char *invitor = jidp->barejid;
char *reason = NULL;
xmpp_stanza_t *reason_st = xmpp_stanza_get_child_by_name(invite, STANZA_NAME_REASON);
if (reason_st) {
reason = xmpp_stanza_get_text(reason_st);
}
char *password = NULL;
xmpp_stanza_t *password_st = xmpp_stanza_get_child_by_name(xns_muc_user, STANZA_NAME_PASSWORD);
if (password_st) {
password = xmpp_stanza_get_text(password_st);
}
sv_ev_room_invite(INVITE_MEDIATED, invitor, room, reason, password);
jid_destroy(jidp);
if (reason) {
xmpp_free(ctx, reason);
}
if (password) {
xmpp_free(ctx, password);
}
}
| 0
|
379,695
|
static int ZEND_FASTCALL ZEND_FETCH_OBJ_R_SPEC_UNUSED_VAR_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
return zend_fetch_property_address_read_helper_SPEC_UNUSED_VAR(BP_VAR_R, ZEND_OPCODE_HANDLER_ARGS_PASSTHRU);
}
| 0
|
404,896
|
static void l2cap_state_change(struct l2cap_chan *chan, int state)
{
BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
state_to_string(state));
chan->state = state;
chan->ops->state_change(chan, state, 0);
}
| 0
|
438,756
|
at_subpath(int fd, size_t baselen, const char *path)
{
#if USE_OPENDIR_AT
if (fd != (int)AT_FDCWD && baselen > 0) {
path += baselen;
if (*path == '/') ++path;
}
#endif
return *path ? path : ".";
}
| 0
|
506,294
|
int ssl3_get_certificate_request(SSL *s)
{
int ok,ret=0;
unsigned long n,nc,l;
unsigned int llen,ctype_num,i;
X509_NAME *xn=NULL;
const unsigned char *p,*q;
unsigned char *d;
STACK_OF(X509_NAME) *ca_sk=NULL;
n=s->method->ssl_get_message(s,
SSL3_ST_CR_CERT_REQ_A,
SSL3_ST_CR_CERT_REQ_B,
-1,
s->max_cert_list,
&ok);
if (!ok) return((int)n);
s->s3->tmp.cert_req=0;
if (s->s3->tmp.message_type == SSL3_MT_SERVER_DONE)
{
s->s3->tmp.reuse_message=1;
return(1);
}
if (s->s3->tmp.message_type != SSL3_MT_CERTIFICATE_REQUEST)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_UNEXPECTED_MESSAGE);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_WRONG_MESSAGE_TYPE);
goto err;
}
/* TLS does not like anon-DH with client cert */
if (s->version > SSL3_VERSION)
{
l=s->s3->tmp.new_cipher->algorithms;
if (l & SSL_aNULL)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_UNEXPECTED_MESSAGE);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_TLS_CLIENT_CERT_REQ_WITH_ANON_CIPHER);
goto err;
}
}
p=d=(unsigned char *)s->init_msg;
if ((ca_sk=sk_X509_NAME_new(ca_dn_cmp)) == NULL)
{
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,ERR_R_MALLOC_FAILURE);
goto err;
}
/* get the certificate types */
ctype_num= *(p++);
if (ctype_num > SSL3_CT_NUMBER)
ctype_num=SSL3_CT_NUMBER;
for (i=0; i<ctype_num; i++)
s->s3->tmp.ctype[i]= p[i];
p+=ctype_num;
/* get the CA RDNs */
n2s(p,llen);
#if 0
{
FILE *out;
out=fopen("/tmp/vsign.der","w");
fwrite(p,1,llen,out);
fclose(out);
}
#endif
if ((llen+ctype_num+2+1) != n)
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_LENGTH_MISMATCH);
goto err;
}
for (nc=0; nc<llen; )
{
n2s(p,l);
if ((l+nc+2) > llen)
{
if ((s->options & SSL_OP_NETSCAPE_CA_DN_BUG))
goto cont; /* netscape bugs */
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_CA_DN_TOO_LONG);
goto err;
}
q=p;
if ((xn=d2i_X509_NAME(NULL,&q,l)) == NULL)
{
/* If netscape tolerance is on, ignore errors */
if (s->options & SSL_OP_NETSCAPE_CA_DN_BUG)
goto cont;
else
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,ERR_R_ASN1_LIB);
goto err;
}
}
if (q != (p+l))
{
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,SSL_R_CA_DN_LENGTH_MISMATCH);
goto err;
}
if (!sk_X509_NAME_push(ca_sk,xn))
{
SSLerr(SSL_F_SSL3_GET_CERTIFICATE_REQUEST,ERR_R_MALLOC_FAILURE);
goto err;
}
p+=l;
nc+=l+2;
}
if (0)
{
cont:
ERR_clear_error();
}
/* we should setup a certificate to return.... */
s->s3->tmp.cert_req=1;
s->s3->tmp.ctype_num=ctype_num;
if (s->s3->tmp.ca_names != NULL)
sk_X509_NAME_pop_free(s->s3->tmp.ca_names,X509_NAME_free);
s->s3->tmp.ca_names=ca_sk;
ca_sk=NULL;
ret=1;
err:
if (ca_sk != NULL) sk_X509_NAME_pop_free(ca_sk,X509_NAME_free);
return(ret);
}
| 0
|
126,206
|
static int af9005_pid_filter_control(struct dvb_usb_adapter *adap, int onoff)
{
int ret;
deb_info("pid filter control onoff %d\n", onoff);
if (onoff) {
ret =
af9005_write_ofdm_register(adap->dev, XD_MP2IF_DMX_CTRL, 1);
if (ret)
return ret;
ret =
af9005_write_register_bits(adap->dev,
XD_MP2IF_DMX_CTRL, 1, 1, 1);
if (ret)
return ret;
ret =
af9005_write_ofdm_register(adap->dev, XD_MP2IF_DMX_CTRL, 1);
} else
ret =
af9005_write_ofdm_register(adap->dev, XD_MP2IF_DMX_CTRL, 0);
if (ret)
return ret;
deb_info("pid filter control ok\n");
return 0;
}
| 0
|
317,227
|
void RenderViewHostImpl::OnRunModal(int opener_id, IPC::Message* reply_msg) {
DCHECK(!run_modal_reply_msg_);
run_modal_reply_msg_ = reply_msg;
run_modal_opener_id_ = opener_id;
RecordAction(UserMetricsAction("ShowModalDialog"));
RenderViewHostImpl* opener =
RenderViewHostImpl::FromID(GetProcess()->GetID(), run_modal_opener_id_);
if (opener) {
opener->StopHangMonitorTimeout();
opener->decrement_in_flight_event_count();
}
}
| 0
|
316,625
|
Element* RootEditableElementOfSelection(const FrameSelection& frameSelection) {
const SelectionInDOMTree& selection = frameSelection.GetSelectionInDOMTree();
if (selection.IsNone())
return nullptr;
if (Element* editable = RootEditableElementOf(selection.Base()))
return editable;
frameSelection.GetDocument().UpdateStyleAndLayoutIgnorePendingStylesheets();
const VisibleSelection& visibleSeleciton =
frameSelection.ComputeVisibleSelectionInDOMTree();
return RootEditableElementOf(visibleSeleciton.Start());
}
| 0
|
474,162
|
static inline void unix_release_addr(struct unix_address *addr)
{
if (refcount_dec_and_test(&addr->refcnt))
kfree(addr);
}
| 0
|
425,646
|
CreatePageTable (
VOID
)
{
RETURN_STATUS Status;
UINTN PhysicalAddressBits;
UINTN NumberOfEntries;
PAGE_ATTRIBUTE TopLevelPageAttr;
UINTN PageTable;
PAGE_ATTRIBUTE MaxMemoryPage;
UINTN Index;
UINT64 AddressEncMask;
UINT64 *PageEntry;
EFI_PHYSICAL_ADDRESS PhysicalAddress;
TopLevelPageAttr = (PAGE_ATTRIBUTE)GetPageTableTopLevelType ();
PhysicalAddressBits = GetPhysicalAddressWidth ();
NumberOfEntries = (UINTN)1 << (PhysicalAddressBits -
mPageAttributeTable[TopLevelPageAttr].AddressBitOffset);
PageTable = (UINTN) AllocatePageTableMemory (1);
if (PageTable == 0) {
return 0;
}
AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);
AddressEncMask &= mPageAttributeTable[TopLevelPageAttr].AddressMask;
MaxMemoryPage = GetMaxMemoryPage (TopLevelPageAttr);
PageEntry = (UINT64 *)PageTable;
PhysicalAddress = 0;
for (Index = 0; Index < NumberOfEntries; ++Index) {
*PageEntry = PhysicalAddress | AddressEncMask | PAGE_ATTRIBUTE_BITS;
//
// Split the top page table down to the maximum page size supported
//
if (MaxMemoryPage < TopLevelPageAttr) {
Status = SplitPage(PageEntry, TopLevelPageAttr, MaxMemoryPage, TRUE);
ASSERT_EFI_ERROR (Status);
}
if (TopLevelPageAttr == Page1G) {
//
// PDPTE[2:1] (PAE Paging) must be 0. SplitPage() might change them to 1.
//
*PageEntry &= ~(UINT64)(IA32_PG_RW | IA32_PG_U);
}
PageEntry += 1;
PhysicalAddress += mPageAttributeTable[TopLevelPageAttr].Length;
}
return PageTable;
}
| 0
|
404,577
|
tiff_load_map_file (thandle_t handle, tdata_t *buf, toff_t *size)
{
TiffContext *context = (TiffContext *)handle;
*buf = context->buffer;
*size = context->used;
return 0;
}
| 0
|
152,214
|
decoding_fgets(char *s, int size, struct tok_state *tok)
{
char *line = NULL;
int badchar = 0;
for (;;) {
if (tok->decoding_state == STATE_NORMAL) {
/* We already have a codec associated with
this input. */
line = fp_readl(s, size, tok);
break;
} else if (tok->decoding_state == STATE_RAW) {
/* We want a 'raw' read. */
line = Py_UniversalNewlineFgets(s, size,
tok->fp, NULL);
break;
} else {
/* We have not yet determined the encoding.
If an encoding is found, use the file-pointer
reader functions from now on. */
if (!check_bom(fp_getc, fp_ungetc, fp_setreadl, tok))
return error_ret(tok);
assert(tok->decoding_state != STATE_INIT);
}
}
if (line != NULL && tok->lineno < 2 && !tok->read_coding_spec) {
if (!check_coding_spec(line, strlen(line), tok, fp_setreadl)) {
return error_ret(tok);
}
}
#ifndef PGEN
/* The default encoding is UTF-8, so make sure we don't have any
non-UTF-8 sequences in it. */
if (line && !tok->encoding) {
unsigned char *c;
int length;
for (c = (unsigned char *)line; *c; c += length)
if (!(length = valid_utf8(c))) {
badchar = *c;
break;
}
}
if (badchar) {
/* Need to add 1 to the line number, since this line
has not been counted, yet. */
PyErr_Format(PyExc_SyntaxError,
"Non-UTF-8 code starting with '\\x%.2x' "
"in file %U on line %i, "
"but no encoding declared; "
"see http://python.org/dev/peps/pep-0263/ for details",
badchar, tok->filename, tok->lineno + 1);
return error_ret(tok);
}
#endif
return line;
}
| 0
|
431,356
|
const char* dbrefNS() const {
uassert(10063, "not a dbref", type() == DBRef);
return value() + 4;
}
| 0
|
189,870
|
bool HTMLLinkElement::HasLegalLinkAttribute(const QualifiedName& name) const {
return name == hrefAttr || HTMLElement::HasLegalLinkAttribute(name);
}
| 0
|
308,904
|
void DatabaseMessageFilter::OnDatabaseGetUsageAndQuota(
IPC::Message* reply_msg,
quota::QuotaStatusCode status,
int64 usage,
int64 quota) {
int64 available = 0;
if ((status == quota::kQuotaStatusOk) && (usage < quota))
available = quota - usage;
DatabaseHostMsg_GetSpaceAvailable::WriteReplyParams(reply_msg, available);
Send(reply_msg);
}
| 0
|
172,849
|
bool ClipboardUtil::GetWebCustomData(
IDataObject* data_object,
std::map<base::string16, base::string16>* custom_data) {
DCHECK(data_object && custom_data);
if (!HasData(data_object, Clipboard::GetWebCustomDataFormatType()))
return false;
STGMEDIUM store;
if (GetData(data_object, Clipboard::GetWebCustomDataFormatType(), &store)) {
{
base::win::ScopedHGlobal<char*> data(store.hGlobal);
ReadCustomDataIntoMap(data.get(), data.Size(), custom_data);
}
ReleaseStgMedium(&store);
return true;
}
return false;
}
| 0
|
247,879
|
void WorkerThread::willDestroyIsolate()
{
ASSERT(isCurrentThread());
ASSERT(m_isolate);
V8PerIsolateData::willBeDestroyed(m_isolate);
ThreadState::current()->removeInterruptor(m_interruptor.get());
}
| 0
|
484,073
|
static void i740fb_ddc_setsda(void *data, int val)
{
struct i740fb_par *par = data;
i740outreg_mask(par, XRX, REG_DDC_DRIVE, DDC_SDA, DDC_SDA);
i740outreg_mask(par, XRX, REG_DDC_STATE, val ? DDC_SDA : 0, DDC_SDA);
}
| 0
|
402,696
|
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
#ifdef CONFIG_HUGETLB_SUPER_PAGES
pte_t *pte;
#endif
/* Get the top-level page table entry. */
pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
/* We don't have four levels. */
pud = pud_offset(pgd, addr);
#ifndef __PAGETABLE_PUD_FOLDED
# error support fourth page table level
#endif
if (!pud_present(*pud))
return NULL;
/* Check for an L0 huge PTE, if we have three levels. */
#ifndef __PAGETABLE_PMD_FOLDED
if (pud_huge(*pud))
return (pte_t *)pud;
pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
pmd_index(addr), 1);
if (!pmd_present(*pmd))
return NULL;
#else
pmd = pmd_offset(pud, addr);
#endif
/* Check for an L1 huge PTE. */
if (pmd_huge(*pmd))
return (pte_t *)pmd;
#ifdef CONFIG_HUGETLB_SUPER_PAGES
/* Check for an L2 huge PTE. */
pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
if (!pte_present(*pte))
return NULL;
if (pte_super(*pte))
return pte;
#endif
return NULL;
}
| 0
|
83,514
|
static Jsi_RC jsi_csSetupStruct(Jsi_Interp *interp, Jsi_StructSpec *sl, Jsi_FieldSpec *sf,
Jsi_StructSpec* recs, int flen, Jsi_OptionTypedef** stPtr, int arrCnt) {
bool isNew;
int i, cnt = 0, boffset = 0;
Jsi_HashEntry *entry, *hPtr;
if (!(hPtr=Jsi_HashEntryNew(interp->CTypeHash, sl->name, &isNew)) || !isNew)
return Jsi_LogError("struct is c-type: %s", sl->name);
entry = Jsi_HashEntryNew(interp->StructHash, sl->name, &isNew);
if (!isNew)
return Jsi_LogError("duplicate struct: %s", sl->name);
Jsi_FieldSpec *asf = NULL, *osf = sf;
while (sf && sf->id != JSI_OPTION_END) {
if (!sf->type)
sf->type = Jsi_OptionTypeInfo(sf->id);
if (!sf->type && sf->tname)
sf->type = Jsi_TypeLookup(interp, sf->tname);
int isbitset = ((sf->flags&JSI_OPT_BITSET_ENUM)!=0);
if (sf->type && sf->type->extData && (sf->type->flags&(jsi_CTYP_ENUM|jsi_CTYP_STRUCT))) {
// A struct sub-field or a bit field mapped to an ENUM.
Jsi_OptionSpec *es = (typeof(es))sf->type->extData;
es->value++;
if ((sf->type->flags&jsi_CTYP_ENUM)) {
if (sf->bits)
return Jsi_LogError("enum of bits unsupported: %s", sl->name); //TODO: get working again...
sf->custom = (isbitset ? Jsi_Opt_SwitchBitset : Jsi_Opt_SwitchEnum);
sf->data = (void*)es->data;
sf->id = JSI_OPTION_CUSTOM;
}
else if (sf->type->flags & jsi_CTYP_STRUCT) {
sf->custom = Jsi_Opt_SwitchSuboption;
sf->data = es->extData;
sf->id = JSI_OPTION_CUSTOM;
}
}
if (recs) {
if (!sf->type)
return Jsi_LogError("unknown id");
sf->tname = sf->type->cName;
sf->size = (isbitset?(int)sizeof(int):sf->type->size);
if (sf->arrSize)
sf->size *= sf->arrSize;
sf->idx = cnt;
sf->boffset = boffset;
if (sf->bits) {
if (sf->bits>=64)
return Jsi_LogError("bits too large");
boffset += sf->bits;
sf->id = JSI_OPTION_CUSTOM;
sf->custom=Jsi_Opt_SwitchBitfield;
sf->init.OPT_BITS=&jsi_csBitGetSet;
} else {
sf->offset = (boffset+7)/8;
boffset += sf->size*8;
}
} else {
boffset += sf->size*8;
}
sf->extData = (uchar*)sl;
sf++, cnt++;
}
sl->idx = cnt;
if (!sl->size)
sl->size = (boffset+7)/8;
if (sl->ssig)
Jsi_HashSet(interp->SigHash, (void*)(uintptr_t)sl->ssig, sl);
int extra = 0;
if (flen)
extra = sl->size + ((flen+2+arrCnt*2)*sizeof(Jsi_StructSpec));
Jsi_OptionTypedef *st = (typeof(st))Jsi_Calloc(1, sizeof(*st) + extra);
SIGINIT(st, TYPEDEF);
if (!recs)
sf = osf;
else {
st->extra = (uchar*)(st+1); // Space for struct initializer.
sf = (typeof(sf))(st->extra + sl->size);
memcpy(sf, recs, sizeof(*sf)*(flen+1));
sl = sf+flen+1;
if (arrCnt)
asf = sl+1;
memcpy(sl, recs+flen+1, sizeof(*sl));
for (i=0; i<flen; i++) {
sf[i].extData = (uchar*)sl;
if (sf[i].id == 0 && sf[i].type)
sf[i].id = sf[i].type->id;
if (sf[i].arrSize) {
asf[0] = sf[i];
asf[1] = sf[flen];
asf->arrSize = asf->offset = 0;
//asf->size = asf->type->size;
sf[i].id = JSI_OPTION_CUSTOM;
sf[i].custom=Jsi_Opt_SwitchCArray;
sf[i].init.OPT_CARRAY = asf;
asf += 2;
//sf[i].extData =
// {.sig=JSI_SIG_OPTS_FIELD, .name=sf[i].name,
// JSI_OPT_CARRAY_ITEM_(JSI_SIG_OPTS_FIELD,'+otype+', '+name+', sf[i].name, .help=sf[i].help, .flags='+fflags+rest+'),\n'
// JSI_OPT_END_(JSI_SIG_OPTS_FIELD,'+name+', .help="Options for array field '+name+'.'+fname+'")\n };\n\n';
// JSI_OPT_CARRAY_(JSI_SIG_OPTS_FIELD,'+name+', '+fname+', "'+fdescr+'", '+fflags+', '+arnam+', '+f.asize+', "'+type+'", '+csinit+'),\n';
}
}
}
st->extData = (uchar*)sl;
sl->extData = (uchar*)sf;
sl->type = st;
st->cName = sl->name;
st->idName = "CUSTOM";
st->id = JSI_OPTION_CUSTOM;
st->size = sl->size;
st->flags = jsi_CTYP_DYN_MEMORY|jsi_CTYP_STRUCT;
Jsi_HashValueSet(entry, sl);
Jsi_HashValueSet(hPtr, st);
st->hPtr = hPtr;
if (stPtr)
*stPtr = st;
return JSI_OK;
}
| 0
|
111,635
|
bos_reply_print(netdissect_options *ndo,
register const u_char *bp, int length, int32_t opcode)
{
const struct rx_header *rxh;
if (length <= (int)sizeof(struct rx_header))
return;
rxh = (const struct rx_header *) bp;
/*
* Print out the afs call we're invoking. The table used here was
* gleaned from volser/volint.xg
*/
ND_PRINT((ndo, " bos reply %s", tok2str(bos_req, "op#%d", opcode)));
bp += sizeof(struct rx_header);
/*
* If it was a data packet, interpret the response.
*/
if (rxh->type == RX_PACKET_TYPE_DATA)
/* Well, no, not really. Leave this for later */
;
else {
/*
* Otherwise, just print out the return code
*/
ND_PRINT((ndo, " errcode"));
INTOUT();
}
return;
trunc:
ND_PRINT((ndo, " [|bos]"));
}
| 0
|
371,012
|
guestfs___check_netbsd_root (guestfs_h *g, struct inspect_fs *fs)
{
if (guestfs_exists (g, "/etc/release") > 0) {
char *major, *minor;
if (parse_release_file (g, fs, "/etc/release") == -1)
return -1;
if (match2 (g, fs->product_name, re_netbsd, &major, &minor)) {
fs->type = OS_TYPE_NETBSD;
fs->major_version = guestfs___parse_unsigned_int (g, major);
free (major);
if (fs->major_version == -1) {
free (minor);
return -1;
}
fs->minor_version = guestfs___parse_unsigned_int (g, minor);
free (minor);
if (fs->minor_version == -1)
return -1;
}
} else {
return -1;
}
/* Determine the architecture. */
check_architecture (g, fs);
/* We already know /etc/fstab exists because it's part of the test above. */
const char *configfiles[] = { "/etc/fstab", NULL };
if (inspect_with_augeas (g, fs, configfiles, check_fstab) == -1)
return -1;
/* Determine hostname. */
if (check_hostname_unix (g, fs) == -1)
return -1;
return 0;
}
| 0
|
418,551
|
vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, pfn_t pfn, bool write)
{
pgprot_t pgprot = vma->vm_page_prot;
pgtable_t pgtable = NULL;
/*
* If we had pmd_special, we could avoid all these restrictions,
* but we need to be consistent with PTEs and architectures that
* can't support a 'special' bit.
*/
BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
!pfn_t_devmap(pfn));
BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
(VM_PFNMAP|VM_MIXEDMAP));
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
if (addr < vma->vm_start || addr >= vma->vm_end)
return VM_FAULT_SIGBUS;
if (arch_needs_pgtable_deposit()) {
pgtable = pte_alloc_one(vma->vm_mm, addr);
if (!pgtable)
return VM_FAULT_OOM;
}
track_pfn_insert(vma, &pgprot, pfn);
insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable);
return VM_FAULT_NOPAGE;
}
| 0
|
180,347
|
void JSTestActiveDOMObject::destroy(JSC::JSCell* cell)
{
JSTestActiveDOMObject* thisObject = jsCast<JSTestActiveDOMObject*>(cell);
thisObject->JSTestActiveDOMObject::~JSTestActiveDOMObject();
}
| 0
|
295,445
|
int wc_SetAuthKeyIdFromCert(Cert *cert, const byte *der, int derSz)
{
int ret = 0;
if (cert == NULL) {
ret = BAD_FUNC_ARG;
}
else {
/* Check if decodedCert is cached */
if (cert->der != der) {
/* Allocate cache for the decoded cert */
ret = wc_SetCert_LoadDer(cert, der, derSz);
}
if (ret >= 0) {
ret = SetAuthKeyIdFromDcert(cert, (DecodedCert*)cert->decodedCert);
#ifndef WOLFSSL_CERT_GEN_CACHE
wc_SetCert_Free(cert);
#endif
}
}
return ret;
}
| 0
|
187,907
|
static void S_AL_NewLoopMaster(src_t *rmSource, qboolean iskilled)
{
int index;
src_t *curSource = NULL;
alSfx_t *curSfx;
curSfx = &knownSfx[rmSource->sfx];
if(rmSource->isPlaying)
curSfx->loopActiveCnt--;
if(iskilled)
curSfx->loopCnt--;
if(curSfx->loopCnt)
{
if(rmSource->priority == SRCPRI_ENTITY)
{
if(!iskilled && rmSource->isPlaying)
{
S_AL_SaveLoopPos(rmSource, rmSource->alSource);
}
}
else if(rmSource == &srcList[curSfx->masterLoopSrc])
{
int firstInactive = -1;
if(iskilled || curSfx->loopActiveCnt)
{
for(index = 0; index < srcCount; index++)
{
curSource = &srcList[index];
if(curSource->sfx == rmSource->sfx && curSource != rmSource &&
curSource->isActive && curSource->isLooping && curSource->priority == SRCPRI_AMBIENT)
{
if(curSource->isPlaying)
{
curSfx->masterLoopSrc = index;
break;
}
else if(firstInactive < 0)
firstInactive = index;
}
}
}
if(!curSfx->loopActiveCnt)
{
if(firstInactive < 0)
{
if(iskilled)
{
curSfx->masterLoopSrc = -1;
return;
}
else
curSource = rmSource;
}
else
curSource = &srcList[firstInactive];
if(rmSource->isPlaying)
{
S_AL_SaveLoopPos(curSource, rmSource->alSource);
}
else
{
curSource->lastTimePos = rmSource->lastTimePos;
curSource->lastSampleTime = rmSource->lastSampleTime;
}
}
}
}
else
curSfx->masterLoopSrc = -1;
}
| 0
|
222,895
|
SoftMPEG4Encoder::~SoftMPEG4Encoder() {
ALOGV("Destruct SoftMPEG4Encoder");
releaseEncoder();
List<BufferInfo *> &outQueue = getPortQueue(1);
List<BufferInfo *> &inQueue = getPortQueue(0);
CHECK(outQueue.empty());
CHECK(inQueue.empty());
}
| 0
|
214,737
|
jbig2_sd_release(Jbig2Ctx *ctx, Jbig2SymbolDict *dict)
{
uint32_t i;
if (dict == NULL)
return;
for (i = 0; i < dict->n_symbols; i++)
if (dict->glyphs[i])
jbig2_image_release(ctx, dict->glyphs[i]);
jbig2_free(ctx->allocator, dict->glyphs);
jbig2_free(ctx->allocator, dict);
}
| 0
|
506,941
|
int n_ssl3_mac(SSL *ssl, unsigned char *md, int send)
{
SSL3_RECORD *rec;
unsigned char *mac_sec,*seq;
EVP_MD_CTX md_ctx;
const EVP_MD_CTX *hash;
unsigned char *p,rec_char;
size_t md_size, orig_len;
int npad;
int t;
if (send)
{
rec= &(ssl->s3->wrec);
mac_sec= &(ssl->s3->write_mac_secret[0]);
seq= &(ssl->s3->write_sequence[0]);
hash=ssl->write_hash;
}
else
{
rec= &(ssl->s3->rrec);
mac_sec= &(ssl->s3->read_mac_secret[0]);
seq= &(ssl->s3->read_sequence[0]);
hash=ssl->read_hash;
}
t=EVP_MD_CTX_size(hash);
if (t < 0)
return -1;
md_size=t;
npad=(48/md_size)*md_size;
/* kludge: ssl3_cbc_remove_padding passes padding length in rec->type */
orig_len = rec->length+md_size+((unsigned int)rec->type>>8);
rec->type &= 0xff;
if (!send &&
EVP_CIPHER_CTX_mode(ssl->enc_read_ctx) == EVP_CIPH_CBC_MODE &&
ssl3_cbc_record_digest_supported(hash))
{
/* This is a CBC-encrypted record. We must avoid leaking any
* timing-side channel information about how many blocks of
* data we are hashing because that gives an attacker a
* timing-oracle. */
/* npad is, at most, 48 bytes and that's with MD5:
* 16 + 48 + 8 (sequence bytes) + 1 + 2 = 75.
*
* With SHA-1 (the largest hash speced for SSLv3) the hash size
* goes up 4, but npad goes down by 8, resulting in a smaller
* total size. */
unsigned char header[75];
unsigned j = 0;
memcpy(header+j, mac_sec, md_size);
j += md_size;
memcpy(header+j, ssl3_pad_1, npad);
j += npad;
memcpy(header+j, seq, 8);
j += 8;
header[j++] = rec->type;
header[j++] = rec->length >> 8;
header[j++] = rec->length & 0xff;
ssl3_cbc_digest_record(
hash,
md, &md_size,
header, rec->input,
rec->length + md_size, orig_len,
mac_sec, md_size,
1 /* is SSLv3 */);
}
else
{
unsigned int md_size_u;
/* Chop the digest off the end :-) */
EVP_MD_CTX_init(&md_ctx);
EVP_MD_CTX_copy_ex( &md_ctx,hash);
EVP_DigestUpdate(&md_ctx,mac_sec,md_size);
EVP_DigestUpdate(&md_ctx,ssl3_pad_1,npad);
EVP_DigestUpdate(&md_ctx,seq,8);
rec_char=rec->type;
EVP_DigestUpdate(&md_ctx,&rec_char,1);
p=md;
s2n(rec->length,p);
EVP_DigestUpdate(&md_ctx,md,2);
EVP_DigestUpdate(&md_ctx,rec->input,rec->length);
EVP_DigestFinal_ex( &md_ctx,md,NULL);
EVP_MD_CTX_copy_ex( &md_ctx,hash);
EVP_DigestUpdate(&md_ctx,mac_sec,md_size);
EVP_DigestUpdate(&md_ctx,ssl3_pad_2,npad);
EVP_DigestUpdate(&md_ctx,md,md_size);
EVP_DigestFinal_ex( &md_ctx,md,&md_size_u);
md_size = md_size_u;
EVP_MD_CTX_cleanup(&md_ctx);
}
ssl3_record_sequence_update(seq);
return(md_size);
}
| 0
|
40,029
|
nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
struct nfsd4_layoutcommit *lcp)
{
DECODE_HEAD;
u32 timechange;
READ_BUF(20);
p = xdr_decode_hyper(p, &lcp->lc_seg.offset);
p = xdr_decode_hyper(p, &lcp->lc_seg.length);
lcp->lc_reclaim = be32_to_cpup(p++);
status = nfsd4_decode_stateid(argp, &lcp->lc_sid);
if (status)
return status;
READ_BUF(4);
lcp->lc_newoffset = be32_to_cpup(p++);
if (lcp->lc_newoffset) {
READ_BUF(8);
p = xdr_decode_hyper(p, &lcp->lc_last_wr);
} else
lcp->lc_last_wr = 0;
READ_BUF(4);
timechange = be32_to_cpup(p++);
if (timechange) {
status = nfsd4_decode_time(argp, &lcp->lc_mtime);
if (status)
return status;
} else {
lcp->lc_mtime.tv_nsec = UTIME_NOW;
}
READ_BUF(8);
lcp->lc_layout_type = be32_to_cpup(p++);
/*
* Save the layout update in XDR format and let the layout driver deal
* with it later.
*/
lcp->lc_up_len = be32_to_cpup(p++);
if (lcp->lc_up_len > 0) {
READ_BUF(lcp->lc_up_len);
READMEM(lcp->lc_up_layout, lcp->lc_up_len);
}
DECODE_TAIL;
}
| 0
|
468,974
|
kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
bool atomic, bool *async, bool write_fault,
bool *writable, hva_t *hva)
{
unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
if (hva)
*hva = addr;
if (addr == KVM_HVA_ERR_RO_BAD) {
if (writable)
*writable = false;
return KVM_PFN_ERR_RO_FAULT;
}
if (kvm_is_error_hva(addr)) {
if (writable)
*writable = false;
return KVM_PFN_NOSLOT;
}
/* Do not map writable pfn in the readonly memslot. */
if (writable && memslot_is_readonly(slot)) {
*writable = false;
writable = NULL;
}
return hva_to_pfn(addr, atomic, async, write_fault,
writable);
}
| 0
|
301,460
|
void CLASS kodak_65000_load_raw()
{
short buf[272]; /* 264 looks enough */
int row, col, len, pred[2], ret, i;
for (row = 0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 0; col < width; col += 256)
{
pred[0] = pred[1] = 0;
len = MIN(256, width - col);
ret = kodak_65000_decode(buf, len);
for (i = 0; i < len; i++)
{
int idx = ret ? buf[i] : (pred[i & 1] += buf[i]);
if(idx >=0 && idx < 0xffff)
{
if ((RAW(row, col + i) = curve[idx]) >> 12)
derror();
}
else
derror();
}
}
}
}
| 0
|
202,597
|
void RenderViewTest::LoadHTML(const char* html) {
std::string url_str = "data:text/html;charset=utf-8,";
url_str.append(html);
GURL url(url_str);
GetMainFrame()->loadRequest(WebURLRequest(url));
ProcessPendingMessages();
}
| 0
|
378,778
|
static int get_external_ip(struct in_addr *ip)
{
int sock;
struct addrinfo *addr;
int res;
const char *getstr = "GET /ip/ HTTP/1.0\r\n"
/* HTTP 1.0 to avoid chunked transfer coding */
"Host: api.externalip.net\r\n\r\n";
char buf[512];
char *b;
int len;
res = getaddrinfo("api.externalip.net", "80", NULL, &addr);
if (res < 0) return 1;
sock = socket(addr->ai_family, addr->ai_socktype, addr->ai_protocol);
if (sock < 0) {
freeaddrinfo(addr);
return 2;
}
res = connect(sock, addr->ai_addr, addr->ai_addrlen);
freeaddrinfo(addr);
if (res < 0) return 3;
res = write(sock, getstr, strlen(getstr));
if (res != strlen(getstr)) return 4;
/* Zero buf before receiving, leave at least one zero at the end */
memset(buf, 0, sizeof(buf));
res = read(sock, buf, sizeof(buf) - 1);
if (res < 0) return 5;
len = res;
res = close(sock);
if (res < 0) return 6;
b = buf;
while (len > 9) {
/* Look for split between headers and data */
if (strncmp("\r\n\r\n", b, 4) == 0) break;
b++;
len--;
}
if (len < 10) return 7;
b += 4;
res = inet_aton(b, ip);
return (res == 0);
}
| 0
|
235,679
|
~Logger() {}
| 0
|
513,367
|
ProcessInput2(ibuf, ilen)
char *ibuf;
int ilen;
{
char *s;
int ch, slen;
struct action *ktabp;
debug1("ProcessInput2: %d bytes\n", ilen);
while (ilen && display)
{
debug1(" - ilen now %d bytes\n", ilen);
flayer = D_forecv->c_layer;
fore = D_fore;
slen = ilen;
s = ibuf;
if (!D_ESCseen)
{
while (ilen > 0)
{
if ((unsigned char)*s++ == D_user->u_Esc)
break;
ilen--;
}
slen -= ilen;
if (slen)
DoProcess(fore, &ibuf, &slen, 0);
if (--ilen == 0)
{
D_ESCseen = ktab;
WindowChanged(fore, 'E');
}
}
if (ilen <= 0)
return;
ktabp = D_ESCseen ? D_ESCseen : ktab;
if (D_ESCseen)
{
D_ESCseen = 0;
WindowChanged(fore, 'E');
}
ch = (unsigned char)*s;
/*
* As users have different esc characters, but a common ktab[],
* we fold back the users esc and meta-esc key to the Default keys
* that can be looked up in the ktab[]. grmbl. jw.
* XXX: make ktab[] a per user thing.
*/
if (ch == D_user->u_Esc)
ch = DefaultEsc;
else if (ch == D_user->u_MetaEsc)
ch = DefaultMetaEsc;
if (ch >= 0)
DoAction(&ktabp[ch], ch);
ibuf = (char *)(s + 1);
ilen--;
}
}
| 0
|
360,885
|
on_config_changed (GConfClient *client,
guint cnxn_id,
GConfEntry *entry,
GsdXrandrManager *manager)
{
start_or_stop_icon (manager);
}
| 0
|
158,991
|
Generation(const string &serverInstanceDir, unsigned int number) {
path = serverInstanceDir + "/generation-" + toString(number);
this->number = number;
owner = false;
}
| 0
|
421,287
|
static void iovec_advance(struct iovec iov[], unsigned *idx, size_t size) {
while (size > 0) {
struct iovec *i = iov + *idx;
if (i->iov_len > size) {
i->iov_base = (uint8_t*) i->iov_base + size;
i->iov_len -= size;
return;
}
size -= i->iov_len;
*i = IOVEC_MAKE(NULL, 0);
(*idx)++;
}
}
| 0
|
159,900
|
ClientThread(Messenger *m, int c, ConnectionRef con, int len, int ops, int think_time_us):
msgr(m), concurrent(c), conn(con), oid("object-name"), oloc(1, 1), msg_len(len), ops(ops),
dispatcher(think_time_us, this), lock("MessengerBenchmark::ClientThread::lock"), inflight(0) {
m->add_dispatcher_head(&dispatcher);
bufferptr ptr(msg_len);
memset(ptr.c_str(), 0, msg_len);
data.append(ptr);
}
| 0
|
194,430
|
unsigned lodepng_auto_choose_color(LodePNGColorMode* mode_out,
const unsigned char* image, unsigned w, unsigned h,
const LodePNGColorMode* mode_in)
{
LodePNGColorProfile prof;
unsigned error = 0;
unsigned i, n, palettebits, grey_ok, palette_ok;
lodepng_color_profile_init(&prof);
error = get_color_profile(&prof, image, w, h, mode_in);
if(error) return error;
mode_out->key_defined = 0;
if(prof.key && w * h <= 16) prof.alpha = 1; /*too few pixels to justify tRNS chunk overhead*/
grey_ok = !prof.colored && !prof.alpha; /*grey without alpha, with potentially low bits*/
n = prof.numcolors;
palettebits = n <= 2 ? 1 : (n <= 4 ? 2 : (n <= 16 ? 4 : 8));
palette_ok = n <= 256 && (n * 2 < w * h) && prof.bits <= 8;
if(w * h < n * 2) palette_ok = 0; /*don't add palette overhead if image has only a few pixels*/
if(grey_ok && prof.bits <= palettebits) palette_ok = 0; /*grey is less overhead*/
if(palette_ok)
{
unsigned char* p = prof.palette;
lodepng_palette_clear(mode_out); /*remove potential earlier palette*/
for(i = 0; i < prof.numcolors; i++)
{
error = lodepng_palette_add(mode_out, p[i * 4 + 0], p[i * 4 + 1], p[i * 4 + 2], p[i * 4 + 3]);
if(error) break;
}
mode_out->colortype = LCT_PALETTE;
mode_out->bitdepth = palettebits;
if(mode_in->colortype == LCT_PALETTE && mode_in->palettesize >= mode_out->palettesize
&& mode_in->bitdepth == mode_out->bitdepth)
{
/*If input should have same palette colors, keep original to preserve its order and prevent conversion*/
lodepng_color_mode_cleanup(mode_out);
lodepng_color_mode_copy(mode_out, mode_in);
}
}
else /*8-bit or 16-bit per channel*/
{
mode_out->bitdepth = prof.bits;
mode_out->colortype = prof.alpha ? (prof.colored ? LCT_RGBA : LCT_GREY_ALPHA)
: (prof.colored ? LCT_RGB : LCT_GREY);
if(prof.key && !prof.alpha)
{
unsigned mask = (1u << mode_out->bitdepth) - 1u; /*profile always uses 16-bit, mask converts it*/
mode_out->key_r = prof.key_r & mask;
mode_out->key_g = prof.key_g & mask;
mode_out->key_b = prof.key_b & mask;
mode_out->key_defined = 1;
}
}
return error;
}
| 0
|
497,092
|
static void apply_channel_coupling(AACContext *ac, ChannelElement *cc,
enum RawDataBlockType type, int elem_id,
enum CouplingPoint coupling_point,
void (*apply_coupling_method)(AACContext *ac, SingleChannelElement *target, ChannelElement *cce, int index))
{
int i, c;
for (i = 0; i < MAX_ELEM_ID; i++) {
ChannelElement *cce = ac->che[TYPE_CCE][i];
int index = 0;
if (cce && cce->coup.coupling_point == coupling_point) {
ChannelCoupling *coup = &cce->coup;
for (c = 0; c <= coup->num_coupled; c++) {
if (coup->type[c] == type && coup->id_select[c] == elem_id) {
if (coup->ch_select[c] != 1) {
apply_coupling_method(ac, &cc->ch[0], cce, index);
if (coup->ch_select[c] != 0)
index++;
}
if (coup->ch_select[c] != 2)
apply_coupling_method(ac, &cc->ch[1], cce, index++);
} else
index += 1 + (coup->ch_select[c] == 3);
}
}
}
}
| 0
|
115,079
|
static inline void find_entity_for_char(
unsigned int k,
enum entity_charset charset,
const entity_stage1_row *table,
const unsigned char **entity,
size_t *entity_len,
unsigned char *old,
size_t oldlen,
size_t *cursor)
{
unsigned stage1_idx = ENT_STAGE1_INDEX(k);
const entity_stage3_row *c;
if (stage1_idx > 0x1D) {
*entity = NULL;
*entity_len = 0;
return;
}
c = &table[stage1_idx][ENT_STAGE2_INDEX(k)][ENT_STAGE3_INDEX(k)];
if (!c->ambiguous) {
*entity = (const unsigned char *)c->data.ent.entity;
*entity_len = c->data.ent.entity_len;
} else {
/* peek at next char */
size_t cursor_before = *cursor;
int status = SUCCESS;
unsigned next_char;
if (!(*cursor < oldlen))
goto no_suitable_2nd;
next_char = get_next_char(charset, old, oldlen, cursor, &status);
if (status == FAILURE)
goto no_suitable_2nd;
{
const entity_multicodepoint_row *s, *e;
s = &c->data.multicodepoint_table[1];
e = s - 1 + c->data.multicodepoint_table[0].leading_entry.size;
/* we could do a binary search but it's not worth it since we have
* at most two entries... */
for ( ; s <= e; s++) {
if (s->normal_entry.second_cp == next_char) {
*entity = s->normal_entry.entity;
*entity_len = s->normal_entry.entity_len;
return;
}
}
}
no_suitable_2nd:
*cursor = cursor_before;
*entity = (const unsigned char *)
c->data.multicodepoint_table[0].leading_entry.default_entity;
*entity_len = c->data.multicodepoint_table[0].leading_entry.default_entity_len;
}
}
| 0
|
432,225
|
static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
{
u64 a = addr / VHOST_PAGE_SIZE / 8;
/* Make sure 64 bit math will not overflow. */
if (a > ULONG_MAX - (unsigned long)log_base ||
a + (unsigned long)log_base > ULONG_MAX)
return false;
return access_ok(log_base + a,
(sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
}
| 0
|
102,126
|
TRIO_PUBLIC_STRING int trio_xstring_match TRIO_ARGS2((self, other), trio_string_t* self,
TRIO_CONST char* other)
{
assert(self);
assert(other);
return trio_match(self->content, other);
}
| 0
|
408,660
|
static int netlink_dump(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
struct netlink_callback *cb;
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
struct module *module;
int err = -ENOBUFS;
int alloc_min_size;
int alloc_size;
mutex_lock(nlk->cb_mutex);
if (!nlk->cb_running) {
err = -EINVAL;
goto errout_skb;
}
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
goto errout_skb;
/* NLMSG_GOODSIZE is small to avoid high order allocations being
* required, but it makes sense to _attempt_ a 16K bytes allocation
* to reduce number of system calls on dump operations, if user
* ever provided a big enough buffer.
*/
cb = &nlk->cb;
alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
if (alloc_min_size < nlk->max_recvmsg_len) {
alloc_size = nlk->max_recvmsg_len;
skb = alloc_skb(alloc_size,
(GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
__GFP_NOWARN | __GFP_NORETRY);
}
if (!skb) {
alloc_size = alloc_min_size;
skb = alloc_skb(alloc_size, GFP_KERNEL);
}
if (!skb)
goto errout_skb;
/* Trim skb to allocated size. User is expected to provide buffer as
* large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
* netlink_recvmsg())). dump will pack as many smaller messages as
* could fit within the allocated skb. skb is typically allocated
* with larger space than required (could be as much as near 2x the
* requested size with align to next power of 2 approach). Allowing
* dump to use the excess space makes it difficult for a user to have a
* reasonable static buffer based on the expected largest dump of a
* single netdev. The outcome is MSG_TRUNC error.
*/
skb_reserve(skb, skb_tailroom(skb) - alloc_size);
netlink_skb_set_owner_r(skb, sk);
if (nlk->dump_done_errno > 0)
nlk->dump_done_errno = cb->dump(skb, cb);
if (nlk->dump_done_errno > 0 ||
skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
mutex_unlock(nlk->cb_mutex);
if (sk_filter(sk, skb))
kfree_skb(skb);
else
__netlink_sendskb(sk, skb);
return 0;
}
nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE,
sizeof(nlk->dump_done_errno), NLM_F_MULTI);
if (WARN_ON(!nlh))
goto errout_skb;
nl_dump_check_consistent(cb, nlh);
memcpy(nlmsg_data(nlh), &nlk->dump_done_errno,
sizeof(nlk->dump_done_errno));
if (sk_filter(sk, skb))
kfree_skb(skb);
else
__netlink_sendskb(sk, skb);
if (cb->done)
cb->done(cb);
nlk->cb_running = false;
module = cb->module;
skb = cb->skb;
mutex_unlock(nlk->cb_mutex);
module_put(module);
consume_skb(skb);
return 0;
errout_skb:
mutex_unlock(nlk->cb_mutex);
kfree_skb(skb);
return err;
}
| 0
|
337,104
|
static int mov_read_mac_string(MOVContext *c, AVIOContext *pb, int len,
char *dst, int dstlen)
{
char *p = dst;
char *end = dst+dstlen-1;
int i;
for (i = 0; i < len; i++) {
uint8_t t, c = avio_r8(pb);
if (c < 0x80 && p < end)
*p++ = c;
else
PUT_UTF8(mac_to_unicode[c-0x80], t, if (p < end) *p++ = t;);
}
*p = 0;
return p - dst;
}
| 1
|
41,854
|
int sctp_register_af(struct sctp_af *af)
{
switch (af->sa_family) {
case AF_INET:
if (sctp_af_v4_specific)
return 0;
sctp_af_v4_specific = af;
break;
case AF_INET6:
if (sctp_af_v6_specific)
return 0;
sctp_af_v6_specific = af;
break;
default:
return 0;
}
INIT_LIST_HEAD(&af->list);
list_add_tail(&af->list, &sctp_address_families);
return 1;
}
| 0
|
75,457
|
static void async_newpending(struct async *as)
{
struct usb_dev_state *ps = as->ps;
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
list_add_tail(&as->asynclist, &ps->async_pending);
spin_unlock_irqrestore(&ps->lock, flags);
}
| 0
|
415,795
|
char *lxc_string_replace(const char *needle, const char *replacement, const char *haystack)
{
ssize_t len = -1, saved_len = -1;
char *result = NULL;
size_t replacement_len = strlen(replacement);
size_t needle_len = strlen(needle);
/* should be executed exactly twice */
while (len == -1 || result == NULL) {
char *p;
char *last_p;
ssize_t part_len;
if (len != -1) {
result = calloc(1, len + 1);
if (!result)
return NULL;
saved_len = len;
}
len = 0;
for (last_p = (char *)haystack, p = strstr(last_p, needle); p; last_p = p, p = strstr(last_p, needle)) {
part_len = (ssize_t)(p - last_p);
if (result && part_len > 0)
memcpy(&result[len], last_p, part_len);
len += part_len;
if (result && replacement_len > 0)
memcpy(&result[len], replacement, replacement_len);
len += replacement_len;
p += needle_len;
}
part_len = strlen(last_p);
if (result && part_len > 0)
memcpy(&result[len], last_p, part_len);
len += part_len;
}
/* make sure we did the same thing twice,
* once for calculating length, the other
* time for copying data */
if (saved_len != len) {
free(result);
return NULL;
}
/* make sure we didn't overwrite any buffer,
* due to calloc the string should be 0-terminated */
if (result[len] != '\0') {
free(result);
return NULL;
}
return result;
}
| 0
|
259,848
|
static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
struct nfs_fsinfo *info)
{
int error;
struct nfs_fattr *fattr = info->fattr;
error = nfs4_server_capabilities(server, mntfh);
if (error < 0) {
dprintk("nfs4_get_root: getcaps error = %d\n", -error);
return error;
}
error = nfs4_proc_getattr(server, mntfh, fattr);
if (error < 0) {
dprintk("nfs4_get_root: getattr error = %d\n", -error);
return error;
}
if (fattr->valid & NFS_ATTR_FATTR_FSID &&
!nfs_fsid_equal(&server->fsid, &fattr->fsid))
memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
return error;
}
| 0
|
156,360
|
void WebContents::FindReply(content::WebContents* web_contents,
int request_id,
int number_of_matches,
const gfx::Rect& selection_rect,
int active_match_ordinal,
bool final_update) {
if (!final_update)
return;
v8::Locker locker(isolate());
v8::HandleScope handle_scope(isolate());
gin_helper::Dictionary result = gin::Dictionary::CreateEmpty(isolate());
result.Set("requestId", request_id);
result.Set("matches", number_of_matches);
result.Set("selectionArea", selection_rect);
result.Set("activeMatchOrdinal", active_match_ordinal);
result.Set("finalUpdate", final_update); // Deprecate after 2.0
Emit("found-in-page", result.GetHandle());
}
| 0
|
278,743
|
IHEVCD_ERROR_T ihevcd_parse_coding_unit(codec_t *ps_codec,
WORD32 x0,
WORD32 y0,
WORD32 log2_cb_size)
{
IHEVCD_ERROR_T ret = (IHEVCD_ERROR_T)IHEVCD_SUCCESS;
sps_t *ps_sps;
pps_t *ps_pps;
WORD32 cb_size;
slice_header_t *ps_slice_hdr;
WORD32 skip_flag;
WORD32 pcm_flag;
UWORD32 *pu4_skip_top = ps_codec->s_parse.pu4_skip_cu_top;
UWORD32 u4_skip_left = ps_codec->s_parse.u4_skip_cu_left;
bitstrm_t *ps_bitstrm = &ps_codec->s_parse.s_bitstrm;
tu_t *ps_tu = ps_codec->s_parse.ps_tu;
WORD32 cu_pos_x;
WORD32 cu_pos_y;
cab_ctxt_t *ps_cabac = &ps_codec->s_parse.s_cabac;
ASSERT(0 == (x0 % 8));
ASSERT(0 == (y0 % 8));
ps_codec->s_parse.s_cu.i4_tu_cnt = 0;
ps_sps = ps_codec->s_parse.ps_sps;
ps_pps = ps_codec->s_parse.ps_pps;
cu_pos_x = ps_codec->s_parse.s_cu.i4_pos_x;
cu_pos_y = ps_codec->s_parse.s_cu.i4_pos_y;
ps_slice_hdr = ps_codec->s_parse.ps_slice_hdr;
cb_size = 1 << log2_cb_size;
ps_codec->s_parse.s_cu.i4_cu_transquant_bypass = 0;
if(ps_pps->i1_transquant_bypass_enable_flag)
{
TRACE_CABAC_CTXT("cu_transquant_bypass_flag", ps_cabac->u4_range, IHEVC_CAB_CU_TQ_BYPASS_FLAG);
ps_codec->s_parse.s_cu.i4_cu_transquant_bypass =
ihevcd_cabac_decode_bin(ps_cabac, ps_bitstrm,
IHEVC_CAB_CU_TQ_BYPASS_FLAG);
/* Update transquant_bypass in ps_tu */
AEV_TRACE("cu_transquant_bypass_flag", ps_codec->s_parse.s_cu.i4_cu_transquant_bypass,
ps_cabac->u4_range);
if(ps_codec->s_parse.s_cu.i4_cu_transquant_bypass)
{
UWORD8 *pu1_pic_no_loop_filter_flag = ps_codec->s_parse.pu1_pic_no_loop_filter_flag;
UWORD32 u4_mask;
WORD32 i;
WORD32 numbytes_row;
numbytes_row = (ps_sps->i2_pic_width_in_luma_samples + 63) / 64;
pu1_pic_no_loop_filter_flag += (y0 / 8) * numbytes_row;
pu1_pic_no_loop_filter_flag += (x0 / 64);
/* Generate (cb_size / 8) number of 1s */
/* i.e (log2_cb_size - 2) number of 1s */
u4_mask = LSB_ONES((cb_size >> 3));
for(i = 0; i < (cb_size / 8); i++)
{
*pu1_pic_no_loop_filter_flag |= (u4_mask << (((x0) / 8) % 8));
pu1_pic_no_loop_filter_flag += numbytes_row;
}
}
}
{
UWORD32 u4_skip_top = 0;
UWORD32 u4_mask;
UWORD32 u4_top_mask, u4_left_mask;
UWORD32 u4_min_cu_x = x0 / 8;
UWORD32 u4_min_cu_y = y0 / 8;
pu4_skip_top += (u4_min_cu_x / 32);
if(ps_slice_hdr->i1_slice_type != ISLICE)
{
WORD32 ctx_idx_inc;
ctx_idx_inc = 0;
if((0 != cu_pos_y) ||
((0 != ps_codec->s_parse.i4_ctb_slice_y) &&
(0 != ps_codec->s_parse.i4_ctb_tile_y)))
{
u4_skip_top = *pu4_skip_top;
u4_skip_top >>= (u4_min_cu_x % 32);
if(u4_skip_top & 1)
ctx_idx_inc++;
}
/*****************************************************************/
/* If cu_pos_x is non-zero then left is available */
/* If cu_pos_x is zero then ensure both the following are true */
/* Current CTB is not the first CTB in a tile row */
/* Current CTB is not the first CTB in a slice */
/*****************************************************************/
if((0 != cu_pos_x) ||
(((0 != ps_codec->s_parse.i4_ctb_slice_x) || (0 != ps_codec->s_parse.i4_ctb_slice_y)) &&
(0 != ps_codec->s_parse.i4_ctb_tile_x)))
{
u4_skip_left >>= (u4_min_cu_y % 32);
if(u4_skip_left & 1)
ctx_idx_inc++;
}
TRACE_CABAC_CTXT("cu_skip_flag", ps_cabac->u4_range, (IHEVC_CAB_SKIP_FLAG + ctx_idx_inc));
skip_flag = ihevcd_cabac_decode_bin(ps_cabac,
ps_bitstrm,
(IHEVC_CAB_SKIP_FLAG + ctx_idx_inc));
AEV_TRACE("cu_skip_flag", skip_flag, ps_cabac->u4_range);
}
else
skip_flag = 0;
/* Update top skip_flag */
u4_skip_top = *pu4_skip_top;
/* Since Max cb_size is 64, maximum of 8 bits will be set or reset */
/* Also since Coding block will be within 64x64 grid, only 8bits within a WORD32
* need to be updated. These 8 bits will not cross 8 bit boundaries
*/
u4_mask = LSB_ONES(cb_size / 8);
u4_top_mask = u4_mask << (u4_min_cu_x % 32);
if(skip_flag)
{
u4_skip_top |= u4_top_mask;
}
else
{
u4_skip_top &= ~u4_top_mask;
}
*pu4_skip_top = u4_skip_top;
/* Update left skip_flag */
u4_skip_left = ps_codec->s_parse.u4_skip_cu_left;
u4_mask = LSB_ONES(cb_size / 8);
u4_left_mask = u4_mask << (u4_min_cu_y % 32);
if(skip_flag)
{
u4_skip_left |= u4_left_mask;
}
else
{
u4_skip_left &= ~u4_left_mask;
}
ps_codec->s_parse.u4_skip_cu_left = u4_skip_left;
}
ps_codec->s_parse.i4_cu_pcm_flag = 0;
if(skip_flag)
{
WORD32 ctb_x_base;
WORD32 ctb_y_base;
ctb_x_base = ps_codec->s_parse.i4_ctb_x << ps_sps->i1_log2_ctb_size;
ctb_y_base = ps_codec->s_parse.i4_ctb_y << ps_sps->i1_log2_ctb_size;
ps_tu->b1_cb_cbf = 0;
ps_tu->b1_cr_cbf = 0;
ps_tu->b1_y_cbf = 0;
ps_tu->b4_pos_x = ((x0 - ctb_x_base) >> 2);
ps_tu->b4_pos_y = ((y0 - ctb_y_base) >> 2);
ps_tu->b1_transquant_bypass = 0;
ps_tu->b3_size = (log2_cb_size - 2);
ps_tu->b7_qp = ps_codec->s_parse.u4_qp;
ps_tu->b3_chroma_intra_mode_idx = INTRA_PRED_CHROMA_IDX_NONE;
ps_tu->b6_luma_intra_mode = INTRA_PRED_NONE;
/* Set the first TU in CU flag */
{
if((ps_codec->s_parse.s_cu.i4_pos_x << 3) == (ps_tu->b4_pos_x << 2) &&
(ps_codec->s_parse.s_cu.i4_pos_y << 3) == (ps_tu->b4_pos_y << 2))
{
ps_tu->b1_first_tu_in_cu = 1;
}
else
{
ps_tu->b1_first_tu_in_cu = 0;
}
}
ps_codec->s_parse.ps_tu++;
ps_codec->s_parse.s_cu.i4_tu_cnt++;
ps_codec->s_parse.i4_pic_tu_idx++;
ps_codec->s_parse.s_cu.i4_pred_mode = PRED_MODE_SKIP;
ps_codec->s_parse.s_cu.i4_part_mode = PART_2Nx2N;
{
pu_t *ps_pu = ps_codec->s_parse.ps_pu;
ps_pu->b2_part_idx = 0;
ihevcd_parse_prediction_unit(ps_codec, x0, y0, cb_size, cb_size);
STATS_UPDATE_PU_SKIP_SIZE(ps_pu);
}
}
else
{
WORD32 pred_mode;
WORD32 part_mode;
WORD32 intra_split_flag;
WORD32 is_mincb;
cb_size = (1 << log2_cb_size);
is_mincb = (cb_size == (1 << ps_sps->i1_log2_min_coding_block_size));
pcm_flag = 0;
if(ps_slice_hdr->i1_slice_type != ISLICE)
{
TRACE_CABAC_CTXT("pred_mode_flag", ps_cabac->u4_range, IHEVC_CAB_PRED_MODE);
pred_mode = ihevcd_cabac_decode_bin(ps_cabac,
ps_bitstrm,
IHEVC_CAB_PRED_MODE);
AEV_TRACE("pred_mode_flag", pred_mode, ps_cabac->u4_range);
}
else
{
pred_mode = PRED_MODE_INTRA;
}
/* If current CU is intra then set corresponging bit in picture level intra map */
if(PRED_MODE_INTRA == pred_mode)
{
UWORD8 *pu1_pic_intra_flag = ps_codec->s_parse.pu1_pic_intra_flag;
UWORD32 u4_mask;
WORD32 i;
WORD32 numbytes_row;
numbytes_row = (ps_sps->i2_pic_width_in_luma_samples + 63) / 64;
pu1_pic_intra_flag += (y0 / 8) * numbytes_row;
pu1_pic_intra_flag += (x0 / 64);
/* Generate (cb_size / 8) number of 1s */
/* i.e (log2_cb_size - 2) number of 1s */
u4_mask = LSB_ONES((cb_size >> 3));
for(i = 0; i < (cb_size / 8); i++)
{
*pu1_pic_intra_flag |= (u4_mask << (((x0) / 8) % 8));
pu1_pic_intra_flag += numbytes_row;
}
}
ps_codec->s_parse.s_cu.i4_pred_mode = pred_mode;
intra_split_flag = 0;
if((PRED_MODE_INTRA != pred_mode) ||
is_mincb)
{
UWORD32 bin;
if(PRED_MODE_INTRA == pred_mode)
{
TRACE_CABAC_CTXT("part_mode", ps_cabac->u4_range, IHEVC_CAB_PART_MODE);
bin = ihevcd_cabac_decode_bin(ps_cabac, ps_bitstrm, IHEVC_CAB_PART_MODE);
part_mode = (bin) ? PART_2Nx2N : PART_NxN;
}
else
{
WORD32 amp_enabled = ps_sps->i1_amp_enabled_flag;
UWORD32 u4_max_bin_cnt = 0;
if(amp_enabled && !is_mincb)
{
part_mode = ihevcd_parse_part_mode_amp(ps_cabac, ps_bitstrm);
}
else
{
WORD32 ctxt_inc = IHEVC_CAB_PART_MODE;
u4_max_bin_cnt = 2;
if((is_mincb) && (cb_size > 8))
{
u4_max_bin_cnt++;
}
part_mode = -1;
TRACE_CABAC_CTXT("part_mode", ps_cabac->u4_range, IHEVC_CAB_PART_MODE);
do
{
bin = ihevcd_cabac_decode_bin(ps_cabac, ps_bitstrm,
ctxt_inc++);
part_mode++;
}while(--u4_max_bin_cnt && !bin);
/* If the last bin was zero, then increment part mode by 1 */
if(!bin)
part_mode++;
}
}
AEV_TRACE("part_mode", part_mode, ps_cabac->u4_range);
}
else
{
part_mode = 0;
intra_split_flag = 0;
}
ps_codec->s_parse.s_cu.i4_part_mode = part_mode;
if((PRED_MODE_INTRA == ps_codec->s_parse.s_cu.i4_pred_mode) &&
(PART_NxN == ps_codec->s_parse.s_cu.i4_part_mode))
{
intra_split_flag = 1;
}
ps_codec->s_parse.s_cu.i4_part_mode = part_mode;
ps_codec->s_parse.s_cu.i4_intra_split_flag = intra_split_flag;
if(pred_mode == PRED_MODE_INTRA)
{
ps_codec->s_parse.i4_cu_pcm_flag = 0;
ihevcd_parse_coding_unit_intra(ps_codec, x0, y0, log2_cb_size);
pcm_flag = ps_codec->s_parse.i4_cu_pcm_flag;
}
else
{
if(part_mode == PART_2Nx2N)
{
pu_t *ps_pu = ps_codec->s_parse.ps_pu;
ihevcd_parse_prediction_unit(ps_codec, x0, y0, cb_size, cb_size);
ps_pu->b2_part_idx = 0;
}
else if(part_mode == PART_2NxN)
{
pu_t *ps_pu = ps_codec->s_parse.ps_pu;
ihevcd_parse_prediction_unit(ps_codec, x0, y0, cb_size, cb_size / 2);
ps_pu->b2_part_idx = 0;
ps_pu = ps_codec->s_parse.ps_pu;
ihevcd_parse_prediction_unit(ps_codec, x0, y0 + (cb_size / 2), cb_size, cb_size / 2);
ps_pu->b2_part_idx = 1;
}
else if(part_mode == PART_Nx2N)
{
pu_t *ps_pu = ps_codec->s_parse.ps_pu;
ihevcd_parse_prediction_unit(ps_codec, x0, y0, cb_size / 2, cb_size);
ps_pu->b2_part_idx = 0;
ps_pu = ps_codec->s_parse.ps_pu;
ihevcd_parse_prediction_unit(ps_codec, x0 + (cb_size / 2), y0, cb_size / 2, cb_size);
ps_pu->b2_part_idx = 1;
}
else if(part_mode == PART_2NxnU)
{
pu_t *ps_pu = ps_codec->s_parse.ps_pu;
ihevcd_parse_prediction_unit(ps_codec, x0, y0, cb_size, cb_size / 4);
ps_pu->b2_part_idx = 0;
ps_pu = ps_codec->s_parse.ps_pu;
ihevcd_parse_prediction_unit(ps_codec, x0, y0 + (cb_size / 4), cb_size, cb_size * 3 / 4);
ps_pu->b2_part_idx = 1;
}
else if(part_mode == PART_2NxnD)
{
pu_t *ps_pu = ps_codec->s_parse.ps_pu;
ihevcd_parse_prediction_unit(ps_codec, x0, y0, cb_size, cb_size * 3 / 4);
ps_pu->b2_part_idx = 0;
ps_pu = ps_codec->s_parse.ps_pu;
ihevcd_parse_prediction_unit(ps_codec, x0, y0 + (cb_size * 3 / 4), cb_size, cb_size / 4);
ps_pu->b2_part_idx = 1;
}
else if(part_mode == PART_nLx2N)
{
pu_t *ps_pu = ps_codec->s_parse.ps_pu;
ihevcd_parse_prediction_unit(ps_codec, x0, y0, cb_size / 4, cb_size);
ps_pu->b2_part_idx = 0;
ps_pu = ps_codec->s_parse.ps_pu;
ihevcd_parse_prediction_unit(ps_codec, x0 + (cb_size / 4), y0, cb_size * 3 / 4, cb_size);
ps_pu->b2_part_idx = 1;
}
else if(part_mode == PART_nRx2N)
{
pu_t *ps_pu = ps_codec->s_parse.ps_pu;
ihevcd_parse_prediction_unit(ps_codec, x0, y0, cb_size * 3 / 4, cb_size);
ps_pu->b2_part_idx = 0;
ps_pu = ps_codec->s_parse.ps_pu;
ihevcd_parse_prediction_unit(ps_codec, x0 + (cb_size * 3 / 4), y0, cb_size / 4, cb_size);
ps_pu->b2_part_idx = 1;
}
else
{ /* PART_NxN */
pu_t *ps_pu = ps_codec->s_parse.ps_pu;
ihevcd_parse_prediction_unit(ps_codec, x0, y0, cb_size / 2, cb_size / 2);
ps_pu->b2_part_idx = 0;
ps_pu = ps_codec->s_parse.ps_pu;
ihevcd_parse_prediction_unit(ps_codec, x0 + (cb_size / 2), y0, cb_size / 2, cb_size / 2);
ps_pu->b2_part_idx = 1;
ps_pu = ps_codec->s_parse.ps_pu;
ihevcd_parse_prediction_unit(ps_codec, x0, y0 + (cb_size / 2), cb_size / 2, cb_size / 2);
ps_pu->b2_part_idx = 2;
ps_pu = ps_codec->s_parse.ps_pu;
ihevcd_parse_prediction_unit(ps_codec, x0 + (cb_size / 2), y0 + (cb_size / 2), cb_size / 2, cb_size / 2);
ps_pu->b2_part_idx = 3;
}
}
if(!pcm_flag)
{
WORD32 no_residual_syntax_flag = 0;
pu_t *ps_pu;
/* Since ps_pu is incremented for each PU parsed, decrement by 1 to
* access last decoded PU
*/
ps_pu = ps_codec->s_parse.ps_pu - 1;
if((PRED_MODE_INTRA != pred_mode) &&
(!((part_mode == PART_2Nx2N) && ps_pu->b1_merge_flag)))
{
TRACE_CABAC_CTXT("rqt_root_cbf", ps_cabac->u4_range, IHEVC_CAB_NORES_IDX);
no_residual_syntax_flag = ihevcd_cabac_decode_bin(ps_cabac,
ps_bitstrm,
IHEVC_CAB_NORES_IDX);
AEV_TRACE("rqt_root_cbf", no_residual_syntax_flag,
ps_cabac->u4_range);
/* TODO: HACK FOR COMPLIANCE WITH HM REFERENCE DECODER */
/*********************************************************/
/* currently the HM decoder expects qtroot cbf instead of */
/* no_residue_flag which has opposite meaning */
/* This will be fixed once the software / spec is fixed */
/*********************************************************/
no_residual_syntax_flag = 1 - no_residual_syntax_flag;
}
if(!no_residual_syntax_flag)
{
ps_codec->s_parse.s_cu.i4_max_trafo_depth = (pred_mode == PRED_MODE_INTRA) ?
(ps_sps->i1_max_transform_hierarchy_depth_intra + intra_split_flag) :
(ps_sps->i1_max_transform_hierarchy_depth_inter);
ihevcd_parse_transform_tree(ps_codec, x0, y0, x0, y0,
log2_cb_size, 0, 0,
ps_codec->s_parse.s_cu.ai4_intra_luma_pred_mode[0]);
}
else
{
WORD32 ctb_x_base;
WORD32 ctb_y_base;
ctb_x_base = ps_codec->s_parse.i4_ctb_x << ps_sps->i1_log2_ctb_size;
ctb_y_base = ps_codec->s_parse.i4_ctb_y << ps_sps->i1_log2_ctb_size;
ps_tu = ps_codec->s_parse.ps_tu;
ps_tu->b1_cb_cbf = 0;
ps_tu->b1_cr_cbf = 0;
ps_tu->b1_y_cbf = 0;
ps_tu->b4_pos_x = ((x0 - ctb_x_base) >> 2);
ps_tu->b4_pos_y = ((y0 - ctb_y_base) >> 2);
ps_tu->b1_transquant_bypass = 0;
ps_tu->b3_size = (log2_cb_size - 2);
ps_tu->b7_qp = ps_codec->s_parse.u4_qp;
ps_tu->b3_chroma_intra_mode_idx = INTRA_PRED_CHROMA_IDX_NONE;
ps_tu->b6_luma_intra_mode = ps_codec->s_parse.s_cu.ai4_intra_luma_pred_mode[0];
/* Set the first TU in CU flag */
{
if((ps_codec->s_parse.s_cu.i4_pos_x << 3) == (ps_tu->b4_pos_x << 2) &&
(ps_codec->s_parse.s_cu.i4_pos_y << 3) == (ps_tu->b4_pos_y << 2))
{
ps_tu->b1_first_tu_in_cu = 1;
}
else
{
ps_tu->b1_first_tu_in_cu = 0;
}
}
ps_codec->s_parse.ps_tu++;
ps_codec->s_parse.s_cu.i4_tu_cnt++;
ps_codec->s_parse.i4_pic_tu_idx++;
}
}
}
return ret;
}
| 0
|
147,276
|
static bool check_noproxy(const char *name, const char *no_proxy)
{
/* no_proxy=domain1.dom,host.domain2.dom
* (a comma-separated list of hosts which should
* not be proxied, or an asterisk to override
* all proxy variables)
*/
if(no_proxy && no_proxy[0]) {
size_t tok_start;
size_t tok_end;
const char *separator = ", ";
size_t no_proxy_len;
size_t namelen;
char *endptr;
if(strcasecompare("*", no_proxy)) {
return TRUE;
}
/* NO_PROXY was specified and it wasn't just an asterisk */
no_proxy_len = strlen(no_proxy);
if(name[0] == '[') {
/* IPv6 numerical address */
endptr = strchr(name, ']');
if(!endptr)
return FALSE;
name++;
namelen = endptr - name;
}
else
namelen = strlen(name);
for(tok_start = 0; tok_start < no_proxy_len; tok_start = tok_end + 1) {
while(tok_start < no_proxy_len &&
strchr(separator, no_proxy[tok_start]) != NULL) {
/* Look for the beginning of the token. */
++tok_start;
}
if(tok_start == no_proxy_len)
break; /* It was all trailing separator chars, no more tokens. */
for(tok_end = tok_start; tok_end < no_proxy_len &&
strchr(separator, no_proxy[tok_end]) == NULL; ++tok_end)
/* Look for the end of the token. */
;
/* To match previous behaviour, where it was necessary to specify
* ".local.com" to prevent matching "notlocal.com", we will leave
* the '.' off.
*/
if(no_proxy[tok_start] == '.')
++tok_start;
if((tok_end - tok_start) <= namelen) {
/* Match the last part of the name to the domain we are checking. */
const char *checkn = name + namelen - (tok_end - tok_start);
if(strncasecompare(no_proxy + tok_start, checkn,
tok_end - tok_start)) {
if((tok_end - tok_start) == namelen || *(checkn - 1) == '.') {
/* We either have an exact match, or the previous character is a .
* so it is within the same domain, so no proxy for this host.
*/
return TRUE;
}
}
} /* if((tok_end - tok_start) <= namelen) */
} /* for(tok_start = 0; tok_start < no_proxy_len;
tok_start = tok_end + 1) */
} /* NO_PROXY was specified and it wasn't just an asterisk */
return FALSE;
}
| 0
|
446,708
|
virDomainDefPostParseCheckFailure(virDomainDefPtr def,
unsigned int parseFlags,
int ret)
{
if (ret != 0)
def->postParseFailed = true;
if (ret <= 0)
return ret;
if (!(parseFlags & VIR_DOMAIN_DEF_PARSE_ALLOW_POST_PARSE_FAIL))
return -1;
virResetLastError();
return 0;
}
| 0
|
171,869
|
void SyncManager::UpdateCredentials(const SyncCredentials& credentials) {
DCHECK(thread_checker_.CalledOnValidThread());
data_->UpdateCredentials(credentials);
}
| 0
|
111,463
|
static enum TIFFReadDirEntryErr TIFFReadDirEntryArrayWithLimit(
TIFF* tif, TIFFDirEntry* direntry, uint32* count, uint32 desttypesize,
void** value, uint64 maxcount)
{
int typesize;
uint32 datasize;
void* data;
uint64 target_count64;
typesize=TIFFDataWidth(direntry->tdir_type);
target_count64 = (direntry->tdir_count > maxcount) ?
maxcount : direntry->tdir_count;
if ((target_count64==0)||(typesize==0))
{
*value=0;
return(TIFFReadDirEntryErrOk);
}
(void) desttypesize;
/*
* As a sanity check, make sure we have no more than a 2GB tag array
* in either the current data type or the dest data type. This also
* avoids problems with overflow of tmsize_t on 32bit systems.
*/
if ((uint64)(2147483647/typesize)<target_count64)
return(TIFFReadDirEntryErrSizesan);
if ((uint64)(2147483647/desttypesize)<target_count64)
return(TIFFReadDirEntryErrSizesan);
*count=(uint32)target_count64;
datasize=(*count)*typesize;
assert((tmsize_t)datasize>0);
if( isMapped(tif) && datasize > (uint32)tif->tif_size )
return TIFFReadDirEntryErrIo;
if( !isMapped(tif) &&
(((tif->tif_flags&TIFF_BIGTIFF) && datasize > 8) ||
(!(tif->tif_flags&TIFF_BIGTIFF) && datasize > 4)) )
{
data = NULL;
}
else
{
data=_TIFFCheckMalloc(tif, *count, typesize, "ReadDirEntryArray");
if (data==0)
return(TIFFReadDirEntryErrAlloc);
}
if (!(tif->tif_flags&TIFF_BIGTIFF))
{
if (datasize<=4)
_TIFFmemcpy(data,&direntry->tdir_offset,datasize);
else
{
enum TIFFReadDirEntryErr err;
uint32 offset = direntry->tdir_offset.toff_long;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabLong(&offset);
if( isMapped(tif) )
err=TIFFReadDirEntryData(tif,(uint64)offset,(tmsize_t)datasize,data);
else
err=TIFFReadDirEntryDataAndRealloc(tif,(uint64)offset,(tmsize_t)datasize,&data);
if (err!=TIFFReadDirEntryErrOk)
{
_TIFFfree(data);
return(err);
}
}
}
else
{
if (datasize<=8)
_TIFFmemcpy(data,&direntry->tdir_offset,datasize);
else
{
enum TIFFReadDirEntryErr err;
uint64 offset = direntry->tdir_offset.toff_long8;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabLong8(&offset);
if( isMapped(tif) )
err=TIFFReadDirEntryData(tif,(uint64)offset,(tmsize_t)datasize,data);
else
err=TIFFReadDirEntryDataAndRealloc(tif,(uint64)offset,(tmsize_t)datasize,&data);
if (err!=TIFFReadDirEntryErrOk)
{
_TIFFfree(data);
return(err);
}
}
}
*value=data;
return(TIFFReadDirEntryErrOk);
}
| 0
|
244,397
|
static void timeFunction(const v8::FunctionCallbackInfo<v8::Value>& info, bool timelinePrefix)
{
ConsoleHelper helper(info);
if (V8InspectorClient* client = helper.ensureDebuggerClient()) {
String16 protocolTitle = helper.firstArgToString("default");
if (timelinePrefix)
protocolTitle = "Timeline '" + protocolTitle + "'";
client->consoleTime(protocolTitle);
v8::Local<v8::Map> timeMap;
if (!helper.privateMap("V8Console#timeMap").ToLocal(&timeMap))
return;
helper.setDoubleOnMap(timeMap, protocolTitle, client->currentTimeMS());
}
}
| 0
|
72,850
|
__hw_perf_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int mapping, err;
mapping = armpmu->map_event(event);
if (mapping < 0) {
pr_debug("event %x:%llx not supported\n", event->attr.type,
event->attr.config);
return mapping;
}
/*
* We don't assign an index until we actually place the event onto
* hardware. Use -1 to signify that we haven't decided where to put it
* yet. For SMP systems, each core has it's own PMU so we can't do any
* clever allocation or constraints checking at this point.
*/
hwc->idx = -1;
hwc->config_base = 0;
hwc->config = 0;
hwc->event_base = 0;
/*
* Check whether we need to exclude the counter from certain modes.
*/
if ((!armpmu->set_event_filter ||
armpmu->set_event_filter(hwc, &event->attr)) &&
event_requires_mode_exclusion(&event->attr)) {
pr_debug("ARM performance counters do not support mode exclusion\n");
return -EPERM;
}
/*
* Store the event encoding into the config_base field.
*/
hwc->config_base |= (unsigned long)mapping;
if (!hwc->sample_period) {
/*
* For non-sampling runs, limit the sample_period to half
* of the counter width. That way, the new counter value
* is far less likely to overtake the previous one unless
* you have some serious IRQ latency issues.
*/
hwc->sample_period = armpmu->max_period >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
err = 0;
if (event->group_leader != event) {
err = validate_group(event);
if (err)
return -EINVAL;
}
return err;
}
| 0
|
172,714
|
SpeechRecognitionManagerImpl::FrameDeletionObserver::~FrameDeletionObserver() {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
DCHECK_EQ(0u, contents_observers_.size());
}
| 0
|
463,317
|
autoar_extractor_step_scan_toplevel (AutoarExtractor *self)
{
/* Step 0: Scan all file names in the archive
* We have to check whether the archive contains a top-level directory
* before performing the extraction. We emit the "scanned" signal when
* the checking is completed. */
struct archive *a;
struct archive_entry *entry;
int r;
g_debug ("autoar_extractor_step_scan_toplevel: called");
r = libarchive_create_read_object (FALSE, self, &a);
if (r != ARCHIVE_OK) {
archive_read_free (a);
r = libarchive_create_read_object (TRUE, self, &a);
if (r != ARCHIVE_OK) {
if (self->error == NULL)
self->error = autoar_common_g_error_new_a (a, self->source_basename);
return;
} else if (archive_filter_count (a) <= 1){
/* If we only use raw format and filter count is one, libarchive will
* not do anything except for just copying the source file. We do not
* want this thing to happen because it does unnecesssary copying. */
if (self->error == NULL)
self->error = g_error_new (AUTOAR_EXTRACTOR_ERROR,
NOT_AN_ARCHIVE_ERRNO,
"\'%s\': %s",
self->source_basename,
"not an archive");
return;
}
self->use_raw_format = TRUE;
}
while ((r = archive_read_next_header (a, &entry)) == ARCHIVE_OK) {
const char *pathname;
g_autofree char *utf8_pathname = NULL;
if (g_cancellable_is_cancelled (self->cancellable)) {
archive_read_free (a);
return;
}
if (archive_entry_is_encrypted (entry)) {
autoar_extractor_request_passphrase (self);
}
if (self->use_raw_format) {
pathname = autoar_common_get_basename_remove_extension (g_file_peek_path (self->source_file));
g_debug ("autoar_extractor_step_scan_toplevel: %d: raw pathname = %s",
self->total_files, pathname);
} else {
const char *symlink_pathname;
const char *hardlink_pathname;
pathname = archive_entry_pathname (entry);
utf8_pathname = autoar_common_get_utf8_pathname (pathname);
symlink_pathname = archive_entry_symlink (entry);
hardlink_pathname = archive_entry_hardlink (entry);
g_debug ("autoar_extractor_step_scan_toplevel: %d: pathname = %s%s%s%s%s%s%s",
self->total_files, pathname,
utf8_pathname ? " utf8 pathname = " : "",
utf8_pathname ? utf8_pathname : "",
symlink_pathname ? " symlink = " : "",
symlink_pathname ? symlink_pathname : "",
hardlink_pathname ? " hardlink = " : "",
hardlink_pathname ? hardlink_pathname : "");
}
self->files_list =
g_list_prepend (self->files_list,
g_file_get_child (self->output_file,
utf8_pathname ? utf8_pathname : pathname));
self->total_files++;
self->total_size += archive_entry_size (entry);
archive_read_data_skip (a);
}
if (self->files_list == NULL) {
if (self->error == NULL) {
self->error = g_error_new (AUTOAR_EXTRACTOR_ERROR,
EMPTY_ARCHIVE_ERRNO,
"\'%s\': %s",
self->source_basename,
"empty archive");
}
archive_read_free (a);
return;
}
if (r != ARCHIVE_EOF) {
if (self->error == NULL) {
self->error =
autoar_common_g_error_new_a (a, self->source_basename);
}
archive_read_free (a);
return;
}
/* If we are unable to determine the total size, set it to a positive
* number to prevent strange percentage. */
if (self->total_size <= 0)
self->total_size = G_MAXUINT64;
archive_read_free (a);
g_debug ("autoar_extractor_step_scan_toplevel: files = %d",
self->total_files);
self->files_list = g_list_reverse (self->files_list);
autoar_extractor_signal_scanned (self);
}
| 0
|
515,479
|
Http2Session::Callbacks::Callbacks(bool kHasGetPaddingCallback) {
nghttp2_session_callbacks* callbacks_;
CHECK_EQ(nghttp2_session_callbacks_new(&callbacks_), 0);
callbacks.reset(callbacks_);
nghttp2_session_callbacks_set_on_begin_headers_callback(
callbacks_, OnBeginHeadersCallback);
nghttp2_session_callbacks_set_on_header_callback2(
callbacks_, OnHeaderCallback);
nghttp2_session_callbacks_set_on_frame_recv_callback(
callbacks_, OnFrameReceive);
nghttp2_session_callbacks_set_on_stream_close_callback(
callbacks_, OnStreamClose);
nghttp2_session_callbacks_set_on_data_chunk_recv_callback(
callbacks_, OnDataChunkReceived);
nghttp2_session_callbacks_set_on_frame_not_send_callback(
callbacks_, OnFrameNotSent);
nghttp2_session_callbacks_set_on_invalid_header_callback2(
callbacks_, OnInvalidHeader);
nghttp2_session_callbacks_set_error_callback(
callbacks_, OnNghttpError);
nghttp2_session_callbacks_set_send_data_callback(
callbacks_, OnSendData);
nghttp2_session_callbacks_set_on_invalid_frame_recv_callback(
callbacks_, OnInvalidFrame);
nghttp2_session_callbacks_set_on_frame_send_callback(
callbacks_, OnFrameSent);
if (kHasGetPaddingCallback) {
nghttp2_session_callbacks_set_select_padding_callback(
callbacks_, OnSelectPadding);
}
}
| 0
|
387,069
|
static int ssl_write_split( ssl_context *ssl,
const unsigned char *buf, size_t len )
{
int ret;
if( ssl->split_done == SSL_CBC_RECORD_SPLITTING_DISABLED ||
len <= 1 ||
ssl->minor_ver > SSL_MINOR_VERSION_1 ||
cipher_get_cipher_mode( &ssl->transform_out->cipher_ctx_enc )
!= POLARSSL_MODE_CBC )
{
return( ssl_write_real( ssl, buf, len ) );
}
if( ssl->split_done == 0 )
{
if( ( ret = ssl_write_real( ssl, buf, 1 ) ) <= 0 )
return( ret );
ssl->split_done = 1;
}
if( ( ret = ssl_write_real( ssl, buf + 1, len - 1 ) ) <= 0 )
return( ret );
ssl->split_done = 0;
return( ret + 1 );
}
| 0
|
423,620
|
got_transfer_quota(isc_task_t *task, isc_event_t *event) {
isc_result_t result = ISC_R_SUCCESS;
dns_peer_t *peer = NULL;
char master[ISC_SOCKADDR_FORMATSIZE];
char source[ISC_SOCKADDR_FORMATSIZE];
dns_rdatatype_t xfrtype;
dns_zone_t *zone = event->ev_arg;
isc_netaddr_t masterip;
isc_sockaddr_t sourceaddr;
isc_sockaddr_t masteraddr;
isc_time_t now;
const char *soa_before = "";
isc_dscp_t dscp = -1;
bool loaded;
UNUSED(task);
INSIST(task == zone->task);
if (DNS_ZONE_FLAG(zone, DNS_ZONEFLG_EXITING)) {
result = ISC_R_CANCELED;
goto cleanup;
}
TIME_NOW(&now);
isc_sockaddr_format(&zone->masteraddr, master, sizeof(master));
if (dns_zonemgr_unreachable(zone->zmgr, &zone->masteraddr,
&zone->sourceaddr, &now))
{
isc_sockaddr_format(&zone->sourceaddr, source, sizeof(source));
dns_zone_log(zone, ISC_LOG_INFO,
"got_transfer_quota: skipping zone transfer as "
"master %s (source %s) is unreachable (cached)",
master, source);
result = ISC_R_CANCELED;
goto cleanup;
}
isc_netaddr_fromsockaddr(&masterip, &zone->masteraddr);
(void)dns_peerlist_peerbyaddr(zone->view->peers, &masterip, &peer);
if (DNS_ZONE_FLAG(zone, DNS_ZONEFLG_SOABEFOREAXFR))
soa_before = "SOA before ";
/*
* Decide whether we should request IXFR or AXFR.
*/
ZONEDB_LOCK(&zone->dblock, isc_rwlocktype_read);
loaded = (zone->db != NULL);
ZONEDB_UNLOCK(&zone->dblock, isc_rwlocktype_read);
if (!loaded) {
dns_zone_log(zone, ISC_LOG_DEBUG(1),
"no database exists yet, requesting AXFR of "
"initial version from %s", master);
xfrtype = dns_rdatatype_axfr;
} else if (DNS_ZONE_FLAG(zone, DNS_ZONEFLG_FORCEXFER)) {
dns_zone_log(zone, ISC_LOG_DEBUG(1),
"forced reload, requesting AXFR of "
"initial version from %s", master);
xfrtype = dns_rdatatype_axfr;
} else if (DNS_ZONE_FLAG(zone, DNS_ZONEFLAG_NOIXFR)) {
dns_zone_log(zone, ISC_LOG_DEBUG(1),
"retrying with AXFR from %s due to "
"previous IXFR failure", master);
xfrtype = dns_rdatatype_axfr;
LOCK_ZONE(zone);
DNS_ZONE_CLRFLAG(zone, DNS_ZONEFLAG_NOIXFR);
UNLOCK_ZONE(zone);
} else {
bool use_ixfr = true;
if (peer != NULL)
result = dns_peer_getrequestixfr(peer, &use_ixfr);
if (peer == NULL || result != ISC_R_SUCCESS)
use_ixfr = zone->requestixfr;
if (use_ixfr == false) {
dns_zone_log(zone, ISC_LOG_DEBUG(1),
"IXFR disabled, requesting %sAXFR from %s",
soa_before, master);
if (DNS_ZONE_FLAG(zone, DNS_ZONEFLG_SOABEFOREAXFR))
xfrtype = dns_rdatatype_soa;
else
xfrtype = dns_rdatatype_axfr;
} else {
dns_zone_log(zone, ISC_LOG_DEBUG(1),
"requesting IXFR from %s", master);
xfrtype = dns_rdatatype_ixfr;
}
}
/*
* Determine if we should attempt to sign the request with TSIG.
*/
result = ISC_R_NOTFOUND;
/*
* First, look for a tsig key in the master statement, then
* try for a server key.
*/
if ((zone->masterkeynames != NULL) &&
(zone->masterkeynames[zone->curmaster] != NULL)) {
dns_view_t *view = dns_zone_getview(zone);
dns_name_t *keyname = zone->masterkeynames[zone->curmaster];
result = dns_view_gettsig(view, keyname, &zone->tsigkey);
}
if (zone->tsigkey == NULL)
result = dns_view_getpeertsig(zone->view, &masterip,
&zone->tsigkey);
if (result != ISC_R_SUCCESS && result != ISC_R_NOTFOUND) {
dns_zone_log(zone, ISC_LOG_ERROR,
"could not get TSIG key for zone transfer: %s",
isc_result_totext(result));
}
if (zone->masterdscps != NULL)
dscp = zone->masterdscps[zone->curmaster];
LOCK_ZONE(zone);
masteraddr = zone->masteraddr;
sourceaddr = zone->sourceaddr;
switch (isc_sockaddr_pf(&masteraddr)) {
case PF_INET:
if (dscp == -1)
dscp = zone->xfrsource4dscp;
break;
case PF_INET6:
if (dscp == -1)
dscp = zone->xfrsource6dscp;
break;
default:
INSIST(0);
ISC_UNREACHABLE();
}
UNLOCK_ZONE(zone);
INSIST(isc_sockaddr_pf(&masteraddr) == isc_sockaddr_pf(&sourceaddr));
result = dns_xfrin_create(zone, xfrtype, &masteraddr, &sourceaddr,
dscp, zone->tsigkey, zone->mctx,
zone->zmgr->timermgr, zone->zmgr->socketmgr,
zone->task, zone_xfrdone, &zone->xfr);
if (result == ISC_R_SUCCESS) {
LOCK_ZONE(zone);
if (xfrtype == dns_rdatatype_axfr) {
if (isc_sockaddr_pf(&masteraddr) == PF_INET)
inc_stats(zone, dns_zonestatscounter_axfrreqv4);
else
inc_stats(zone, dns_zonestatscounter_axfrreqv6);
} else if (xfrtype == dns_rdatatype_ixfr) {
if (isc_sockaddr_pf(&masteraddr) == PF_INET)
inc_stats(zone, dns_zonestatscounter_ixfrreqv4);
else
inc_stats(zone, dns_zonestatscounter_ixfrreqv6);
}
UNLOCK_ZONE(zone);
}
cleanup:
/*
* Any failure in this function is handled like a failed
* zone transfer. This ensures that we get removed from
* zmgr->xfrin_in_progress.
*/
if (result != ISC_R_SUCCESS)
zone_xfrdone(zone, result);
isc_event_free(&event);
}
| 0
|
235,208
|
void V8InjectedScriptHost::proxyTargetValueCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
if (info.Length() != 1 || !info[0]->IsProxy()) {
NOTREACHED();
return;
}
v8::Local<v8::Object> target = info[0].As<v8::Proxy>();
while (target->IsProxy())
target = v8::Local<v8::Proxy>::Cast(target)->GetTarget();
info.GetReturnValue().Set(target);
}
| 0
|
240,570
|
int64_t LayerTreeHostQt::adoptImageBackingStore(Image* image)
{
if (!image)
return InvalidWebLayerID;
QPixmap* pixmap = image->nativeImageForCurrentFrame();
if (!pixmap)
return InvalidWebLayerID;
int64_t key = pixmap->cacheKey();
HashMap<int64_t, int>::iterator it = m_directlyCompositedImageRefCounts.find(key);
if (it != m_directlyCompositedImageRefCounts.end()) {
++(it->second);
return key;
}
RefPtr<ShareableBitmap> bitmap = ShareableBitmap::createShareable(image->size(), image->currentFrameHasAlpha() ? ShareableBitmap::SupportsAlpha : 0);
{
OwnPtr<WebCore::GraphicsContext> graphicsContext = bitmap->createGraphicsContext();
graphicsContext->drawImage(image, ColorSpaceDeviceRGB, IntPoint::zero());
}
ShareableBitmap::Handle handle;
bitmap->createHandle(handle);
m_webPage->send(Messages::LayerTreeHostProxy::CreateDirectlyCompositedImage(key, handle));
m_directlyCompositedImageRefCounts.add(key, 1);
return key;
}
| 0
|
34,865
|
int evdns_resolve_ipv6(const char *name, int flags,
evdns_callback_type callback, void *ptr) {
return evdns_base_resolve_ipv6(current_base, name, flags, callback, ptr)
? 0 : -1;
}
| 0
|
489,487
|
GF_Err chan_box_dump(GF_Box *a, FILE * trace)
{
u32 i;
GF_ChannelLayoutInfoBox *p = (GF_ChannelLayoutInfoBox *) a;
gf_isom_box_dump_start(a, "ChannelLayoutInfoBox", trace);
gf_fprintf(trace, "layout=\"%d\" bitmap=\"%d\">\n", p->layout_tag, p->bitmap);
for (i=0; i<p->num_audio_description; i++) {
GF_AudioChannelDescription *adesc = &p->audio_descs[i];
gf_fprintf(trace, "<AudioChannelDescription label=\"%d\" flags=\"%08X\" coordinates=\"%f %f %f\"/>\n", adesc->label, adesc->flags, adesc->coordinates[0], adesc->coordinates[1], adesc->coordinates[2]);
}
gf_isom_box_dump_done("ChannelLayoutInfoBox", a, trace);
return GF_OK;
}
| 0
|
190,615
|
static zend_object_value spl_heap_object_new_ex(zend_class_entry *class_type, spl_heap_object **obj, zval *orig, int clone_orig TSRMLS_DC) /* {{{ */
{
zend_object_value retval;
spl_heap_object *intern;
zend_class_entry *parent = class_type;
int inherited = 0;
intern = ecalloc(1, sizeof(spl_heap_object));
*obj = intern;
ALLOC_INIT_ZVAL(intern->retval);
zend_object_std_init(&intern->std, class_type TSRMLS_CC);
object_properties_init(&intern->std, class_type);
intern->flags = 0;
intern->fptr_cmp = NULL;
intern->debug_info = NULL;
if (orig) {
spl_heap_object *other = (spl_heap_object*)zend_object_store_get_object(orig TSRMLS_CC);
intern->ce_get_iterator = other->ce_get_iterator;
if (clone_orig) {
int i;
intern->heap = spl_ptr_heap_clone(other->heap TSRMLS_CC);
for (i = 0; i < intern->heap->count; ++i) {
if (intern->heap->elements[i]) {
Z_ADDREF_P((zval *)intern->heap->elements[i]);
}
}
} else {
intern->heap = other->heap;
}
intern->flags = other->flags;
} else {
intern->heap = spl_ptr_heap_init(spl_ptr_heap_zval_max_cmp, spl_ptr_heap_zval_ctor, spl_ptr_heap_zval_dtor);
}
retval.handlers = &spl_handler_SplHeap;
while (parent) {
if (parent == spl_ce_SplPriorityQueue) {
intern->heap->cmp = spl_ptr_pqueue_zval_cmp;
intern->flags = SPL_PQUEUE_EXTR_DATA;
retval.handlers = &spl_handler_SplPriorityQueue;
break;
}
if (parent == spl_ce_SplMinHeap) {
intern->heap->cmp = spl_ptr_heap_zval_min_cmp;
break;
}
if (parent == spl_ce_SplMaxHeap) {
intern->heap->cmp = spl_ptr_heap_zval_max_cmp;
break;
}
if (parent == spl_ce_SplHeap) {
break;
}
parent = parent->parent;
inherited = 1;
}
retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t)zend_objects_destroy_object, spl_heap_object_free_storage, NULL TSRMLS_CC);
if (!parent) { /* this must never happen */
php_error_docref(NULL TSRMLS_CC, E_COMPILE_ERROR, "Internal compiler error, Class is not child of SplHeap");
}
if (inherited) {
zend_hash_find(&class_type->function_table, "compare", sizeof("compare"), (void **) &intern->fptr_cmp);
if (intern->fptr_cmp->common.scope == parent) {
intern->fptr_cmp = NULL;
}
zend_hash_find(&class_type->function_table, "count", sizeof("count"), (void **) &intern->fptr_count);
if (intern->fptr_count->common.scope == parent) {
intern->fptr_count = NULL;
}
}
return retval;
}
/* }}} */
| 0
|
436,447
|
static void dualshock4_set_leds_from_id(struct sony_sc *sc)
{
/* The first 4 color/index entries match what the PS4 assigns */
static const u8 color_code[7][3] = {
/* Blue */ { 0x00, 0x00, 0x40 },
/* Red */ { 0x40, 0x00, 0x00 },
/* Green */ { 0x00, 0x40, 0x00 },
/* Pink */ { 0x20, 0x00, 0x20 },
/* Orange */ { 0x02, 0x01, 0x00 },
/* Teal */ { 0x00, 0x01, 0x01 },
/* White */ { 0x01, 0x01, 0x01 }
};
int id = sc->device_id;
BUILD_BUG_ON(MAX_LEDS < ARRAY_SIZE(color_code[0]));
if (id < 0)
return;
id %= 7;
memcpy(sc->led_state, color_code[id], sizeof(color_code[id]));
}
| 0
|
46,818
|
AVFrame *avcodec_alloc_frame(void)
{
AVFrame *frame = av_mallocz(sizeof(AVFrame));
if (frame == NULL)
return NULL;
FF_DISABLE_DEPRECATION_WARNINGS
avcodec_get_frame_defaults(frame);
FF_ENABLE_DEPRECATION_WARNINGS
return frame;
}
| 0
|
492,457
|
bool operator!=(const DateTimeVal& other) const { return !(*this == other); }
| 0
|
241,521
|
static void set_pixel_format(VncState *vs,
int bits_per_pixel, int depth,
int big_endian_flag, int true_color_flag,
int red_max, int green_max, int blue_max,
int red_shift, int green_shift, int blue_shift)
{
if (!true_color_flag) {
vnc_client_error(vs);
return;
}
switch (bits_per_pixel) {
case 8:
case 16:
case 32:
break;
default:
vnc_client_error(vs);
return;
}
vs->client_pf.rmax = red_max;
vs->client_pf.rbits = hweight_long(red_max);
vs->client_pf.rshift = red_shift;
vs->client_pf.bytes_per_pixel = bits_per_pixel / 8;
vs->client_pf.depth = bits_per_pixel == 32 ? 24 : bits_per_pixel;
vs->client_be = big_endian_flag;
set_pixel_conversion(vs);
graphic_hw_invalidate(NULL);
graphic_hw_update(NULL);
}
| 0
|
353,007
|
v8::Local<v8::Object> CreateNativeEvent(
v8::Isolate* isolate,
v8::Local<v8::Object> sender,
content::RenderFrameHost* frame,
electron::mojom::ElectronBrowser::MessageSyncCallback callback) {
v8::Local<v8::Object> event;
if (frame && callback) {
gin::Handle<Event> native_event = Event::Create(isolate);
native_event->SetCallback(std::move(callback));
event = native_event.ToV8().As<v8::Object>();
} else {
// No need to create native event if we do not need to send reply.
event = CreateEvent(isolate);
}
Dictionary dict(isolate, event);
dict.Set("sender", sender);
// Should always set frameId even when callback is null.
if (frame) {
dict.Set("frameId", frame->GetRoutingID());
dict.Set("processId", frame->GetProcess()->GetID());
}
return event;
}
| 1
|
299,490
|
mime_list_callback (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
MimeListState *state;
NautilusDirectory *directory;
GError *error;
GList *files, *l;
GFileInfo *info;
state = user_data;
directory = state->directory;
if (g_cancellable_is_cancelled (state->cancellable))
{
/* Operation was cancelled. Bail out */
directory->details->mime_list_in_progress = NULL;
async_job_end (directory, "MIME list");
nautilus_directory_async_state_changed (directory);
mime_list_state_free (state);
return;
}
g_assert (directory->details->mime_list_in_progress != NULL);
g_assert (directory->details->mime_list_in_progress == state);
error = NULL;
files = g_file_enumerator_next_files_finish (state->enumerator,
res, &error);
for (l = files; l != NULL; l = l->next)
{
info = l->data;
mime_list_one (state, info);
g_object_unref (info);
}
if (files == NULL)
{
mime_list_done (state, error != NULL);
mime_list_state_free (state);
}
else
{
g_file_enumerator_next_files_async (state->enumerator,
DIRECTORY_LOAD_ITEMS_PER_CALLBACK,
G_PRIORITY_DEFAULT,
state->cancellable,
mime_list_callback,
state);
}
g_list_free (files);
if (error)
{
g_error_free (error);
}
}
| 0
|
416,139
|
gdev_pdf_put_params(gx_device * dev, gs_param_list * plist)
{
int code;
gx_device_pdf *pdev = (gx_device_pdf *) dev;
gs_memory_t *mem = gs_memory_stable(pdev->memory);
gx_device_pdf *save_dev = gs_malloc(mem, sizeof(gx_device_pdf), 1,
"saved gx_device_pdf");
if (!save_dev)
return_error(gs_error_VMerror);
memcpy(save_dev, pdev, sizeof(gx_device_pdf));
code = gdev_pdf_put_params_impl(dev, save_dev, plist);
gs_free(mem, save_dev, sizeof(gx_device_pdf), 1, "saved gx_device_pdf");
return code;
}
| 0
|
26,315
|
static int jbig2_decode_generic_template0_unopt ( Jbig2Ctx * ctx , Jbig2Segment * segment , const Jbig2GenericRegionParams * params , Jbig2ArithState * as , Jbig2Image * image , Jbig2ArithCx * GB_stats ) {
const int GBW = image -> width ;
const int GBH = image -> height ;
uint32_t CONTEXT ;
int x , y ;
bool bit ;
for ( y = 0 ;
y < GBH ;
y ++ ) {
for ( x = 0 ;
x < GBW ;
x ++ ) {
CONTEXT = 0 ;
CONTEXT |= jbig2_image_get_pixel ( image , x - 1 , y ) << 0 ;
CONTEXT |= jbig2_image_get_pixel ( image , x - 2 , y ) << 1 ;
CONTEXT |= jbig2_image_get_pixel ( image , x - 3 , y ) << 2 ;
CONTEXT |= jbig2_image_get_pixel ( image , x - 4 , y ) << 3 ;
CONTEXT |= jbig2_image_get_pixel ( image , x + params -> gbat [ 0 ] , y + params -> gbat [ 1 ] ) << 4 ;
CONTEXT |= jbig2_image_get_pixel ( image , x + 2 , y - 1 ) << 5 ;
CONTEXT |= jbig2_image_get_pixel ( image , x + 1 , y - 1 ) << 6 ;
CONTEXT |= jbig2_image_get_pixel ( image , x + 0 , y - 1 ) << 7 ;
CONTEXT |= jbig2_image_get_pixel ( image , x - 1 , y - 1 ) << 8 ;
CONTEXT |= jbig2_image_get_pixel ( image , x - 2 , y - 1 ) << 9 ;
CONTEXT |= jbig2_image_get_pixel ( image , x + params -> gbat [ 2 ] , y + params -> gbat [ 3 ] ) << 10 ;
CONTEXT |= jbig2_image_get_pixel ( image , x + params -> gbat [ 4 ] , y + params -> gbat [ 5 ] ) << 11 ;
CONTEXT |= jbig2_image_get_pixel ( image , x + 1 , y - 2 ) << 12 ;
CONTEXT |= jbig2_image_get_pixel ( image , x + 0 , y - 2 ) << 13 ;
CONTEXT |= jbig2_image_get_pixel ( image , x - 1 , y - 2 ) << 14 ;
CONTEXT |= jbig2_image_get_pixel ( image , x + params -> gbat [ 6 ] , y + params -> gbat [ 7 ] ) << 15 ;
bit = jbig2_arith_decode ( as , & GB_stats [ CONTEXT ] ) ;
if ( bit < 0 ) return - 1 ;
jbig2_image_set_pixel ( image , x , y , bit ) ;
}
}
return 0 ;
}
| 0
|
437,884
|
Block::~Block() { delete[] m_frames; }
| 0
|
262,214
|
void bgp_packet_mpattr_end(struct stream *s, size_t sizep)
{
/* Set MP attribute length. Don't count the (2) bytes used to encode
the attr length */
stream_putw_at(s, sizep, (stream_get_endp(s) - sizep) - 2);
}
| 0
|
341,304
|
static long kvm_hypercall(unsigned long nr, unsigned long param1,
unsigned long param2)
{
register ulong r_nr asm("1") = nr;
register ulong r_param1 asm("2") = param1;
register ulong r_param2 asm("3") = param2;
register long retval asm("2");
asm volatile ("diag 2,4,0x500"
: "=d" (retval)
: "d" (r_nr), "0" (r_param1), "r"(r_param2)
: "memory", "cc");
return retval;
}
| 0
|
347,653
|
static inline void ext4_iget_extra_inode(struct inode *inode,
struct ext4_inode *raw_inode,
struct ext4_inode_info *ei)
{
__le32 *magic = (void *)raw_inode +
EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <=
EXT4_INODE_SIZE(inode->i_sb) &&
*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
ext4_set_inode_state(inode, EXT4_STATE_XATTR);
ext4_find_inline_data_nolock(inode);
} else
EXT4_I(inode)->i_inline_off = 0;
}
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.