unique_id
int64 13
189k
| target
int64 0
1
| code
stringlengths 20
241k
| __index_level_0__
int64 0
18.9k
|
|---|---|---|---|
92,132
| 0
|
static int is_qp0(enum ib_qp_type qp_type)
{
return qp_type == IB_QPT_SMI;
}
| 13,600
|
106,841
| 0
|
LayoutUnit RenderBox::computeLogicalHeightUsing(const Length& h)
{
LayoutUnit logicalHeight = -1;
if (!h.isAuto()) {
if (h.isFixed())
logicalHeight = h.value();
else if (h.isPercent())
logicalHeight = computePercentageLogicalHeight(h);
if (logicalHeight != -1) {
logicalHeight = computeBorderBoxLogicalHeight(logicalHeight);
return logicalHeight;
}
}
return logicalHeight;
}
| 13,601
|
150,352
| 0
|
ClientControlledShellSurface::ClientControlledShellSurface(Surface* surface,
bool can_minimize,
int container)
: ShellSurfaceBase(surface, gfx::Point(), true, can_minimize, container) {
display::Screen::GetScreen()->AddObserver(this);
}
| 13,602
|
46,033
| 0
|
void svcauth_gss_set_log_miscerr_func(
auth_gssapi_log_miscerr_func func,
caddr_t data)
{
log_miscerr = func;
log_miscerr_data = data;
}
| 13,603
|
57,571
| 0
|
static void ext4_handle_error(struct super_block *sb)
{
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
if (sb->s_flags & MS_RDONLY)
return;
if (!test_opt(sb, ERRORS_CONT)) {
journal_t *journal = EXT4_SB(sb)->s_journal;
EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
if (journal)
jbd2_journal_abort(journal, -EIO);
}
if (test_opt(sb, ERRORS_RO)) {
ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
sb->s_flags |= MS_RDONLY;
}
ext4_commit_super(sb, 1);
if (test_opt(sb, ERRORS_PANIC))
panic("EXT4-fs (device %s): panic forced after error\n",
sb->s_id);
}
| 13,604
|
64,778
| 0
|
static void crypto_skcipher_free_instance(struct crypto_instance *inst)
{
struct skcipher_instance *skcipher =
container_of(inst, struct skcipher_instance, s.base);
skcipher->free(skcipher);
}
| 13,605
|
730
| 0
|
struct dns_resolvers *find_resolvers_by_id(const char *id)
{
struct dns_resolvers *res;
list_for_each_entry(res, &dns_resolvers, list) {
if (!strcmp(res->id, id))
return res;
}
return NULL;
}
| 13,606
|
15,864
| 0
|
static void virtio_net_tx_timer(void *opaque)
{
VirtIONetQueue *q = opaque;
VirtIONet *n = q->n;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
assert(vdev->vm_running);
q->tx_waiting = 0;
/* Just in case the driver is not ready on more */
if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
return;
}
virtio_queue_set_notification(q->tx_vq, 1);
virtio_net_flush_tx(q);
}
| 13,607
|
167,802
| 0
|
bool WebRuntimeFeatures::IsBlinkGenPropertyTreesEnabled() {
return RuntimeEnabledFeatures::BlinkGenPropertyTreesEnabled();
}
| 13,608
|
65,733
| 0
|
nfsd4_decode_bitmap(struct nfsd4_compoundargs *argp, u32 *bmval)
{
u32 bmlen;
DECODE_HEAD;
bmval[0] = 0;
bmval[1] = 0;
bmval[2] = 0;
READ_BUF(4);
bmlen = be32_to_cpup(p++);
if (bmlen > 1000)
goto xdr_error;
READ_BUF(bmlen << 2);
if (bmlen > 0)
bmval[0] = be32_to_cpup(p++);
if (bmlen > 1)
bmval[1] = be32_to_cpup(p++);
if (bmlen > 2)
bmval[2] = be32_to_cpup(p++);
DECODE_TAIL;
}
| 13,609
|
92,799
| 0
|
static void yield_task_fair(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
struct sched_entity *se = &curr->se;
/*
* Are we the only task in the tree?
*/
if (unlikely(rq->nr_running == 1))
return;
clear_buddies(cfs_rq, se);
if (curr->policy != SCHED_BATCH) {
update_rq_clock(rq);
/*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
/*
* Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule()
* and double the fastpath cost.
*/
rq_clock_skip_update(rq);
}
set_skip_buddy(se);
}
| 13,610
|
158,786
| 0
|
void Reset() {
resource_load_infos_.clear();
memory_cached_loaded_urls_.clear();
resource_is_associated_with_main_frame_.clear();
}
| 13,611
|
134,316
| 0
|
void TabStrip::CloseTab(Tab* tab, CloseTabSource source) {
if (tab->closing()) {
for (TabsClosingMap::const_iterator i(tabs_closing_map_.begin());
i != tabs_closing_map_.end(); ++i) {
std::vector<Tab*>::const_iterator j =
std::find(i->second.begin(), i->second.end(), tab);
if (j != i->second.end()) {
if (i->first + 1 < GetModelCount())
controller_->CloseTab(i->first + 1, source);
return;
}
}
NOTREACHED();
return;
}
int model_index = GetModelIndexOfTab(tab);
if (IsValidModelIndex(model_index))
controller_->CloseTab(model_index, source);
}
| 13,612
|
16,652
| 0
|
ReadUserLogFileState::convertState(
const ReadUserLog::FileState &state,
const ReadUserLogFileState::FileState *&internal )
{
const ReadUserLogFileState::FileStatePub *pub;
convertState(state, pub);
internal = &(pub->internal);
return true;
}
| 13,613
|
82,649
| 0
|
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event)
{
memset(event, 0, sizeof(*event));
return (!access_ok(VERIFY_READ, u_event, sizeof(*u_event)) ||
__get_user(event->sigev_value.sival_int,
&u_event->sigev_value.sival_int) ||
__get_user(event->sigev_signo, &u_event->sigev_signo) ||
__get_user(event->sigev_notify, &u_event->sigev_notify) ||
__get_user(event->sigev_notify_thread_id,
&u_event->sigev_notify_thread_id))
? -EFAULT : 0;
}
| 13,614
|
146,592
| 0
|
void WebGLRenderingContextBase::vertexAttrib2fv(
GLuint index,
MaybeShared<const DOMFloat32Array> v) {
if (isContextLost())
return;
if (!v.View() || v.View()->length() < 2) {
SynthesizeGLError(GL_INVALID_VALUE, "vertexAttrib2fv", "invalid array");
return;
}
ContextGL()->VertexAttrib2fv(index, v.View()->DataMaybeShared());
SetVertexAttribType(index, kFloat32ArrayType);
}
| 13,615
|
63,459
| 0
|
int module_finalize(
YR_MODULE* module)
{
return ERROR_SUCCESS;
}
| 13,616
|
61,071
| 0
|
get_target_file (GFile *src,
GFile *dest_dir,
const char *dest_fs_type,
gboolean same_fs)
{
return get_target_file_with_custom_name (src, dest_dir, dest_fs_type, same_fs, NULL);
}
| 13,617
|
178,361
| 1
|
bgp_nlri_parse_vpnv4 (struct peer *peer, struct attr *attr,
struct bgp_nlri *packet)
{
u_char *pnt;
u_char *lim;
struct prefix p;
int psize;
int prefixlen;
u_int16_t type;
struct rd_as rd_as;
struct rd_ip rd_ip;
struct prefix_rd prd;
u_char *tagpnt;
/* Check peer status. */
if (peer->status != Established)
return 0;
/* Make prefix_rd */
prd.family = AF_UNSPEC;
prd.prefixlen = 64;
pnt = packet->nlri;
lim = pnt + packet->length;
for (; pnt < lim; pnt += psize)
{
/* Clear prefix structure. */
/* Fetch prefix length. */
prefixlen = *pnt++;
p.family = AF_INET;
psize = PSIZE (prefixlen);
if (prefixlen < 88)
{
zlog_err ("prefix length is less than 88: %d", prefixlen);
return -1;
}
/* Copyr label to prefix. */
tagpnt = pnt;;
/* Copy routing distinguisher to rd. */
memcpy (&prd.val, pnt + 3, 8);
else if (type == RD_TYPE_IP)
zlog_info ("prefix %ld:%s:%ld:%s/%d", label, inet_ntoa (rd_ip.ip),
rd_ip.val, inet_ntoa (p.u.prefix4), p.prefixlen);
#endif /* 0 */
if (pnt + psize > lim)
return -1;
if (attr)
bgp_update (peer, &p, attr, AFI_IP, SAFI_MPLS_VPN,
ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd, tagpnt, 0);
else
return -1;
}
p.prefixlen = prefixlen - 88;
memcpy (&p.u.prefix, pnt + 11, psize - 11);
#if 0
if (type == RD_TYPE_AS)
}
| 13,618
|
184,929
| 1
|
void BluetoothDeviceChromeOS::RequestPinCode(
const dbus::ObjectPath& device_path,
const PinCodeCallback& callback) {
DCHECK(agent_.get());
DCHECK(device_path == object_path_);
VLOG(1) << object_path_.value() << ": RequestPinCode";
UMA_HISTOGRAM_ENUMERATION("Bluetooth.PairingMethod",
UMA_PAIRING_METHOD_REQUEST_PINCODE,
UMA_PAIRING_METHOD_COUNT);
DCHECK(pairing_delegate_);
DCHECK(pincode_callback_.is_null());
pincode_callback_ = callback;
pairing_delegate_->RequestPinCode(this);
pairing_delegate_used_ = true;
}
| 13,619
|
99,010
| 0
|
bool WebGraphicsContext3DDefaultImpl::initialize(WebGraphicsContext3D::Attributes attributes, WebView* webView, bool renderDirectlyToWebView)
{
if (!gfx::GLContext::InitializeOneOff())
return false;
m_renderDirectlyToWebView = renderDirectlyToWebView;
gfx::GLContext* shareContext = 0;
if (!renderDirectlyToWebView) {
WebGraphicsContext3D* viewContext = webView->graphicsContext3D();
if (viewContext) {
WebGraphicsContext3DDefaultImpl* contextImpl = static_cast<WebGraphicsContext3DDefaultImpl*>(viewContext);
shareContext = contextImpl->m_glContext.get();
} else {
m_renderDirectlyToWebView = false;
}
}
m_glContext = WTF::adoptPtr(gfx::GLContext::CreateOffscreenGLContext(shareContext));
if (!m_glContext)
return false;
m_attributes = attributes;
if (renderDirectlyToWebView)
m_attributes.antialias = false;
validateAttributes();
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE);
if (!angleCreateCompilers()) {
angleDestroyCompilers();
return false;
}
m_initialized = true;
return true;
}
| 13,620
|
73,937
| 0
|
static void process_level0_unix_area(LHAFileHeader *header,
uint8_t *data, size_t data_len)
{
if (data_len < LEVEL_0_UNIX_EXTENDED_LEN || data[1] != 0x00) {
return;
}
header->os_type = data[0];
header->timestamp = lha_decode_uint32(data + 2);
header->unix_perms = lha_decode_uint16(data + data_len - 6);
header->unix_uid = lha_decode_uint16(data + data_len - 4);
header->unix_gid = lha_decode_uint16(data + data_len - 2);
header->extra_flags |= LHA_FILE_UNIX_PERMS | LHA_FILE_UNIX_UID_GID;
}
| 13,621
|
26,867
| 0
|
static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode = dentry->d_inode;
int error = -EACCES;
/* We don't need a base pointer in the /proc filesystem */
path_put(&nd->path);
/* Are we allowed to snoop on the tasks file descriptors? */
if (!proc_fd_access_allowed(inode))
goto out;
error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
out:
return ERR_PTR(error);
}
| 13,622
|
181,863
| 1
|
static inline long decode_twos_comp(ulong c, int prec)
{
long result;
assert(prec >= 2);
jas_eprintf("warning: support for signed data is untested\n");
result = (c & ((1 << (prec - 1)) - 1)) - (c & (1 << (prec - 1)));
return result;
}
| 13,623
|
142,312
| 0
|
void ChromePasswordManagerClient::OnInputEvent(
const blink::WebInputEvent& event) {
if (event.GetType() != blink::WebInputEvent::kChar)
return;
const blink::WebKeyboardEvent& key_event =
static_cast<const blink::WebKeyboardEvent&>(event);
password_reuse_detection_manager_.OnKeyPressed(key_event.text);
}
| 13,624
|
40,292
| 0
|
static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
struct sk_buff *skb;
struct sock *sk = sock->sk;
struct pppox_sock *po = pppox_sk(sk);
int error;
struct pppoe_hdr hdr;
struct pppoe_hdr *ph;
struct net_device *dev;
char *start;
lock_sock(sk);
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
error = -ENOTCONN;
goto end;
}
hdr.ver = 1;
hdr.type = 1;
hdr.code = 0;
hdr.sid = po->num;
dev = po->pppoe_dev;
error = -EMSGSIZE;
if (total_len > (dev->mtu + dev->hard_header_len))
goto end;
skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
0, GFP_KERNEL);
if (!skb) {
error = -ENOMEM;
goto end;
}
/* Reserve space for headers. */
skb_reserve(skb, dev->hard_header_len);
skb_reset_network_header(skb);
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->protocol = cpu_to_be16(ETH_P_PPP_SES);
ph = (struct pppoe_hdr *)skb_put(skb, total_len + sizeof(struct pppoe_hdr));
start = (char *)&ph->tag[0];
error = memcpy_fromiovec(start, m->msg_iov, total_len);
if (error < 0) {
kfree_skb(skb);
goto end;
}
error = total_len;
dev_hard_header(skb, dev, ETH_P_PPP_SES,
po->pppoe_pa.remote, NULL, total_len);
memcpy(ph, &hdr, sizeof(struct pppoe_hdr));
ph->length = htons(total_len);
dev_queue_xmit(skb);
end:
release_sock(sk);
return error;
}
| 13,625
|
144,776
| 0
|
StateChangeReason DiscardReasonToStateChangeReason(DiscardReason reason) {
switch (reason) {
case DiscardReason::kExternal:
return StateChangeReason::EXTENSION_INITIATED;
case DiscardReason::kProactive:
return StateChangeReason::BROWSER_INITIATED;
case DiscardReason::kUrgent:
return StateChangeReason::SYSTEM_MEMORY_PRESSURE;
}
}
| 13,626
|
159,690
| 0
|
void ExternalProtocolHandler::RecordHandleStateMetrics(bool checkbox_selected,
BlockState block_state) {
HandleState handle_state = DONT_LAUNCH;
switch (block_state) {
case DONT_BLOCK:
handle_state = checkbox_selected ? CHECKED_LAUNCH : LAUNCH;
break;
case BLOCK:
handle_state =
checkbox_selected ? CHECKED_DONT_LAUNCH_DEPRECATED : DONT_LAUNCH;
break;
case UNKNOWN:
NOTREACHED();
return;
}
DCHECK_NE(CHECKED_DONT_LAUNCH_DEPRECATED, handle_state);
UMA_HISTOGRAM_ENUMERATION(kHandleStateMetric, handle_state,
HANDLE_STATE_LAST);
}
| 13,627
|
118,425
| 0
|
NOINLINE static void MaybeTriggerAsanError(const GURL& url) {
static const char kCrashDomain[] = "crash";
static const char kHeapOverflow[] = "/heap-overflow";
static const char kHeapUnderflow[] = "/heap-underflow";
static const char kUseAfterFree[] = "/use-after-free";
static const int kArraySize = 5;
if (!url.DomainIs(kCrashDomain, sizeof(kCrashDomain) - 1))
return;
if (!url.has_path())
return;
scoped_ptr<int[]> array(new int[kArraySize]);
std::string crash_type(url.path());
int dummy = 0;
if (crash_type == kHeapOverflow) {
dummy = array[kArraySize];
} else if (crash_type == kHeapUnderflow ) {
dummy = array[-1];
} else if (crash_type == kUseAfterFree) {
int* dangling = array.get();
array.reset();
dummy = dangling[kArraySize / 2];
}
base::debug::Alias(&dummy);
}
| 13,628
|
96,526
| 0
|
static int AppLayerProtoDetectTest16(void)
{
int result = 0;
Flow *f = NULL;
HtpState *http_state = NULL;
uint8_t http_buf1[] = "POST /one HTTP/1.0\r\n"
"User-Agent: Mozilla/1.0\r\n"
"Cookie: hellocatch\r\n\r\n";
uint32_t http_buf1_len = sizeof(http_buf1) - 1;
TcpSession ssn;
Packet *p = NULL;
Signature *s = NULL;
ThreadVars tv;
DetectEngineThreadCtx *det_ctx = NULL;
DetectEngineCtx *de_ctx = NULL;
AppLayerParserThreadCtx *alp_tctx = AppLayerParserThreadCtxAlloc();
memset(&tv, 0, sizeof(ThreadVars));
memset(&ssn, 0, sizeof(TcpSession));
p = UTHBuildPacket(NULL, 0, IPPROTO_TCP);
if (p == NULL) {
printf("packet setup failed: ");
goto end;
}
f = UTHBuildFlow(AF_INET, "1.1.1.1", "2.2.2.2", 1024, 80);
if (f == NULL) {
printf("flow setup failed: ");
goto end;
}
f->protoctx = &ssn;
f->proto = IPPROTO_TCP;
p->flow = f;
p->flowflags |= FLOW_PKT_TOSERVER;
p->flowflags |= FLOW_PKT_ESTABLISHED;
p->flags |= PKT_HAS_FLOW|PKT_STREAM_EST;
f->alproto = ALPROTO_HTTP;
StreamTcpInitConfig(TRUE);
de_ctx = DetectEngineCtxInit();
if (de_ctx == NULL) {
goto end;
}
de_ctx->flags |= DE_QUIET;
s = de_ctx->sig_list = SigInit(de_ctx, "alert http any any -> any any "
"(msg:\"Test content option\"; "
"sid:1;)");
if (s == NULL) {
goto end;
}
SigGroupBuild(de_ctx);
DetectEngineThreadCtxInit(&tv, (void *)de_ctx, (void *)&det_ctx);
FLOWLOCK_WRLOCK(f);
int r = AppLayerParserParse(NULL, alp_tctx, f, ALPROTO_HTTP,
STREAM_TOSERVER, http_buf1, http_buf1_len);
if (r != 0) {
printf("toserver chunk 1 returned %" PRId32 ", expected 0: ", r);
FLOWLOCK_UNLOCK(f);
goto end;
}
FLOWLOCK_UNLOCK(f);
http_state = f->alstate;
if (http_state == NULL) {
printf("no http state: ");
goto end;
}
/* do detect */
SigMatchSignatures(&tv, de_ctx, det_ctx, p);
if (!PacketAlertCheck(p, 1)) {
printf("sig 1 didn't alert, but it should: ");
goto end;
}
result = 1;
end:
if (alp_tctx != NULL)
AppLayerParserThreadCtxFree(alp_tctx);
if (det_ctx != NULL)
DetectEngineThreadCtxDeinit(&tv, det_ctx);
if (de_ctx != NULL)
SigGroupCleanup(de_ctx);
if (de_ctx != NULL)
DetectEngineCtxFree(de_ctx);
StreamTcpFreeConfig(TRUE);
UTHFreePackets(&p, 1);
UTHFreeFlow(f);
return result;
}
| 13,629
|
165,239
| 0
|
void ArcVoiceInteractionFrameworkService::ToggleSessionFromUserInteraction() {
DCHECK_CURRENTLY_ON(content::BrowserThread::UI);
if (!InitiateUserInteraction(true /* is_toggle */))
return;
mojom::VoiceInteractionFrameworkInstance* framework_instance =
ARC_GET_INSTANCE_FOR_METHOD(
arc_bridge_service_->voice_interaction_framework(),
ToggleVoiceInteractionSession);
DCHECK(framework_instance);
framework_instance->ToggleVoiceInteractionSession(IsHomescreenActive());
}
| 13,630
|
43,338
| 0
|
void CLASS parse_foveon()
{
int entries, img=0, off, len, tag, save, i, wide, high, pent, poff[256][2];
char name[64], value[64];
order = 0x4949; /* Little-endian */
fseek (ifp, 36, SEEK_SET);
flip = get4();
fseek (ifp, -4, SEEK_END);
fseek (ifp, get4(), SEEK_SET);
if (get4() != 0x64434553) return; /* SECd */
entries = (get4(),get4());
while (entries--) {
off = get4();
len = get4();
tag = get4();
save = ftell(ifp);
fseek (ifp, off, SEEK_SET);
if (get4() != (unsigned)(0x20434553 | (tag << 24))) return;
switch (tag) {
case 0x47414d49: /* IMAG */
case 0x32414d49: /* IMA2 */
fseek (ifp, 8, SEEK_CUR);
if (get4() == 30) { /* SIGMA SD15/SD1/DP* are unsupported */
is_foveon = 0;
return;
}
wide = get4();
high = get4();
if (wide > raw_width && high > raw_height) {
raw_width = wide;
raw_height = high;
data_offset = off+24;
}
fseek (ifp, off+28, SEEK_SET);
if (fgetc(ifp) == 0xff && fgetc(ifp) == 0xd8
&& thumb_length < (unsigned)(len-28)) {
thumb_offset = off+28;
thumb_length = len-28;
write_thumb = &CLASS jpeg_thumb;
}
if (++img == 2 && !thumb_length) {
thumb_offset = off+24;
thumb_width = wide;
thumb_height = high;
write_thumb = &CLASS foveon_thumb;
}
break;
case 0x464d4143: /* CAMF */
meta_offset = off+24;
meta_length = len-28;
if (meta_length > 0x20000)
meta_length = 0x20000;
break;
case 0x504f5250: /* PROP */
pent = (get4(),get4());
fseek (ifp, 12, SEEK_CUR);
off += pent*8 + 24;
if ((unsigned) pent > 256) pent=256;
for (i=0; i < pent*2; i++)
poff[0][i] = off + get4()*2;
for (i=0; i < pent; i++) {
foveon_gets (poff[i][0], name, 64);
foveon_gets (poff[i][1], value, 64);
if (!strcmp (name, "ISO"))
iso_speed = atoi(value);
if (!strcmp (name, "CAMMANUF"))
strcpy (make, value);
if (!strcmp (name, "CAMMODEL"))
strcpy (model, value);
if (!strcmp (name, "WB_DESC"))
strcpy (model2, value);
if (!strcmp (name, "TIME"))
timestamp = atoi(value);
if (!strcmp (name, "EXPTIME"))
shutter = atoi(value) / 1000000.0;
if (!strcmp (name, "APERTURE"))
aperture = atof(value);
if (!strcmp (name, "FLENGTH"))
focal_len = atof(value);
}
#ifdef LOCALTIME
timestamp = mktime (gmtime (×tamp));
#endif
}
fseek (ifp, save, SEEK_SET);
}
is_foveon = 1;
}
| 13,631
|
99,632
| 0
|
void VaapiVideoDecodeAccelerator::FinishReset() {
DCHECK_EQ(message_loop_, base::MessageLoop::current());
DVLOG(1) << "FinishReset";
base::AutoLock auto_lock(lock_);
if (state_ != kResetting) {
DCHECK(state_ == kDestroying || state_ == kUninitialized) << state_;
return; // We could've gotten destroyed already.
}
while (!pending_output_cbs_.empty())
pending_output_cbs_.pop();
if (awaiting_va_surfaces_recycle_) {
message_loop_->PostTask(FROM_HERE, base::Bind(
&VaapiVideoDecodeAccelerator::FinishReset, weak_this_));
return;
}
num_stream_bufs_at_decoder_ = 0;
state_ = kIdle;
message_loop_->PostTask(FROM_HERE, base::Bind(
&Client::NotifyResetDone, client_));
if (!input_buffers_.empty()) {
state_ = kDecoding;
decoder_thread_task_runner_->PostTask(
FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::DecodeTask,
base::Unretained(this)));
}
DVLOG(1) << "Reset finished";
}
| 13,632
|
112,089
| 0
|
bool SyncManager::SyncInternal::IsUsingExplicitPassphrase() {
ReadTransaction trans(FROM_HERE, &share_);
ReadNode node(&trans);
if (node.InitByTagLookup(kNigoriTag) != sync_api::BaseNode::INIT_OK) {
NOTREACHED();
return false;
}
return node.GetNigoriSpecifics().using_explicit_passphrase();
}
| 13,633
|
102,939
| 0
|
void DefaultTabHandler::ToggleUseVerticalTabs() {
delegate_->AsBrowser()->ToggleUseVerticalTabs();
}
| 13,634
|
185,498
| 1
|
SchedulerHelper::SchedulerHelper(
scoped_refptr<NestableSingleThreadTaskRunner> main_task_runner,
const char* tracing_category,
const char* disabled_by_default_tracing_category,
size_t total_task_queue_count)
: task_queue_selector_(new PrioritizingTaskQueueSelector()),
task_queue_manager_(
new TaskQueueManager(total_task_queue_count,
main_task_runner,
task_queue_selector_.get(),
disabled_by_default_tracing_category)),
quiescence_monitored_task_queue_mask_(
((1ull << total_task_queue_count) - 1ull) &
~(1ull << QueueId::CONTROL_TASK_QUEUE) &
~(1ull << QueueId::CONTROL_TASK_AFTER_WAKEUP_QUEUE)),
control_task_runner_(
task_queue_manager_->TaskRunnerForQueue(QueueId::CONTROL_TASK_QUEUE)),
control_after_wakeup_task_runner_(task_queue_manager_->TaskRunnerForQueue(
QueueId::CONTROL_TASK_AFTER_WAKEUP_QUEUE)),
default_task_runner_(
task_queue_manager_->TaskRunnerForQueue(QueueId::DEFAULT_TASK_QUEUE)),
time_source_(new TimeSource),
tracing_category_(tracing_category),
disabled_by_default_tracing_category_(
disabled_by_default_tracing_category) {
DCHECK_GE(total_task_queue_count,
static_cast<size_t>(QueueId::TASK_QUEUE_COUNT));
task_queue_selector_->SetQueuePriority(
QueueId::CONTROL_TASK_QUEUE,
PrioritizingTaskQueueSelector::CONTROL_PRIORITY);
task_queue_manager_->SetWakeupPolicy(
QueueId::CONTROL_TASK_QUEUE,
TaskQueueManager::WakeupPolicy::DONT_WAKE_OTHER_QUEUES);
task_queue_selector_->SetQueuePriority(
QueueId::CONTROL_TASK_AFTER_WAKEUP_QUEUE,
PrioritizingTaskQueueSelector::CONTROL_PRIORITY);
task_queue_manager_->SetPumpPolicy(
QueueId::CONTROL_TASK_AFTER_WAKEUP_QUEUE,
TaskQueueManager::PumpPolicy::AFTER_WAKEUP);
task_queue_manager_->SetWakeupPolicy(
QueueId::CONTROL_TASK_AFTER_WAKEUP_QUEUE,
TaskQueueManager::WakeupPolicy::DONT_WAKE_OTHER_QUEUES);
for (size_t i = 0; i < TASK_QUEUE_COUNT; i++) {
task_queue_manager_->SetQueueName(
i, TaskQueueIdToString(static_cast<QueueId>(i)));
}
// TODO(skyostil): Increase this to 4 (crbug.com/444764).
task_queue_manager_->SetWorkBatchSize(1);
}
| 13,635
|
100,144
| 0
|
DataObjectItem* DataObjectItem::CreateFromString(const String& type,
const String& data) {
DataObjectItem* item =
MakeGarbageCollected<DataObjectItem>(kStringKind, type);
item->data_ = data;
return item;
}
| 13,636
|
43,456
| 0
|
static int ablk_pcbc_init(struct crypto_tfm *tfm)
{
return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
}
| 13,637
|
26,241
| 0
|
ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
{
long max_size = perf_data_size(rb);
if (watermark)
rb->watermark = min(max_size, watermark);
if (!rb->watermark)
rb->watermark = max_size / 2;
if (flags & RING_BUFFER_WRITABLE)
rb->writable = 1;
atomic_set(&rb->refcount, 1);
}
| 13,638
|
62,898
| 0
|
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
kvm_inject_undefined(vcpu);
return 1;
}
| 13,639
|
145,919
| 0
|
aura::Window* GetPanelContainer(aura::Window* panel) {
return Shell::GetContainer(panel->GetRootWindow(),
kShellWindowId_PanelContainer);
}
| 13,640
|
4,773
| 0
|
stringprep_ucs4_nfkc_normalize (const uint32_t * str, ssize_t len)
{
char *p;
uint32_t *result_wc;
p = stringprep_ucs4_to_utf8 (str, len, 0, 0);
result_wc = _g_utf8_normalize_wc (p, -1, G_NORMALIZE_NFKC);
free (p);
return result_wc;
}
| 13,641
|
110,217
| 0
|
void NaClProcessHost::OnDebugExceptionHandlerLaunchedByBroker(bool success) {
IPC::Message* reply = attach_debug_exception_handler_reply_msg_.release();
NaClProcessMsg_AttachDebugExceptionHandler::WriteReplyParams(reply, success);
Send(reply);
}
| 13,642
|
21,666
| 0
|
int aio_complete(struct kiocb *iocb, long res, long res2)
{
struct kioctx *ctx = iocb->ki_ctx;
struct aio_ring_info *info;
struct aio_ring *ring;
struct io_event *event;
unsigned long flags;
unsigned long tail;
int ret;
/*
* Special case handling for sync iocbs:
* - events go directly into the iocb for fast handling
* - the sync task with the iocb in its stack holds the single iocb
* ref, no other paths have a way to get another ref
* - the sync task helpfully left a reference to itself in the iocb
*/
if (is_sync_kiocb(iocb)) {
BUG_ON(iocb->ki_users != 1);
iocb->ki_user_data = res;
iocb->ki_users = 0;
wake_up_process(iocb->ki_obj.tsk);
return 1;
}
info = &ctx->ring_info;
/* add a completion event to the ring buffer.
* must be done holding ctx->ctx_lock to prevent
* other code from messing with the tail
* pointer since we might be called from irq
* context.
*/
spin_lock_irqsave(&ctx->ctx_lock, flags);
if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list))
list_del_init(&iocb->ki_run_list);
/*
* cancelled requests don't get events, userland was given one
* when the event got cancelled.
*/
if (kiocbIsCancelled(iocb))
goto put_rq;
ring = kmap_atomic(info->ring_pages[0], KM_IRQ1);
tail = info->tail;
event = aio_ring_event(info, tail, KM_IRQ0);
if (++tail >= info->nr)
tail = 0;
event->obj = (u64)(unsigned long)iocb->ki_obj.user;
event->data = iocb->ki_user_data;
event->res = res;
event->res2 = res2;
dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n",
ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
res, res2);
/* after flagging the request as done, we
* must never even look at it again
*/
smp_wmb(); /* make event visible before updating tail */
info->tail = tail;
ring->tail = tail;
put_aio_ring_event(event, KM_IRQ0);
kunmap_atomic(ring, KM_IRQ1);
pr_debug("added to ring %p at [%lu]\n", iocb, tail);
/*
* Check if the user asked us to deliver the result through an
* eventfd. The eventfd_signal() function is safe to be called
* from IRQ context.
*/
if (iocb->ki_eventfd != NULL)
eventfd_signal(iocb->ki_eventfd, 1);
put_rq:
/* everything turned out well, dispose of the aiocb. */
ret = __aio_put_req(ctx, iocb);
/*
* We have to order our ring_info tail store above and test
* of the wait list below outside the wait lock. This is
* like in wake_up_bit() where clearing a bit has to be
* ordered with the unlocked test.
*/
smp_mb();
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
spin_unlock_irqrestore(&ctx->ctx_lock, flags);
return ret;
}
| 13,643
|
179,655
| 1
|
static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len)
{
struct sk_buff *skb = NULL;
struct sockaddr_pn sa;
int rval = -EOPNOTSUPP;
int copylen;
if (flags & ~(MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_NOSIGNAL|
MSG_CMSG_COMPAT))
goto out_nofree;
if (addr_len)
*addr_len = sizeof(sa);
skb = skb_recv_datagram(sk, flags, noblock, &rval);
if (skb == NULL)
goto out_nofree;
pn_skb_get_src_sockaddr(skb, &sa);
copylen = skb->len;
if (len < copylen) {
msg->msg_flags |= MSG_TRUNC;
copylen = len;
}
rval = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copylen);
if (rval) {
rval = -EFAULT;
goto out;
}
rval = (flags & MSG_TRUNC) ? skb->len : copylen;
if (msg->msg_name != NULL)
memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn));
out:
skb_free_datagram(sk, skb);
out_nofree:
return rval;
}
| 13,644
|
172,606
| 0
|
status_t AudioFlinger::EffectModule::setVolume(uint32_t *left, uint32_t *right, bool controller)
{
Mutex::Autolock _l(mLock);
if (mStatus != NO_ERROR) {
return mStatus;
}
status_t status = NO_ERROR;
if (isProcessEnabled() &&
((mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL ||
(mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_IND)) {
status_t cmdStatus;
uint32_t volume[2];
uint32_t *pVolume = NULL;
uint32_t size = sizeof(volume);
volume[0] = *left;
volume[1] = *right;
if (controller) {
pVolume = volume;
}
status = (*mEffectInterface)->command(mEffectInterface,
EFFECT_CMD_SET_VOLUME,
size,
volume,
&size,
pVolume);
if (controller && status == NO_ERROR && size == sizeof(volume)) {
*left = volume[0];
*right = volume[1];
}
}
return status;
}
| 13,645
|
50,037
| 0
|
client_open_proxy(struct archive_read_filter *self)
{
int r = ARCHIVE_OK;
if (self->archive->client.opener != NULL)
r = (self->archive->client.opener)(
(struct archive *)self->archive, self->data);
return (r);
}
| 13,646
|
147,195
| 0
|
void V8TestObject::AnyAttributeAttributeGetterCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
RUNTIME_CALL_TIMER_SCOPE_DISABLED_BY_DEFAULT(info.GetIsolate(), "Blink_TestObject_anyAttribute_Getter");
test_object_v8_internal::AnyAttributeAttributeGetter(info);
}
| 13,647
|
131,422
| 0
|
static void int32ArrayMethodMethodCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMMethod");
TestObjectPythonV8Internal::int32ArrayMethodMethod(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| 13,648
|
13,329
| 0
|
pdf14_push_transparency_group(pdf14_ctx *ctx, gs_int_rect *rect, bool isolated,
bool knockout, byte alpha, byte shape,
gs_blend_mode_t blend_mode, bool idle, uint mask_id,
int numcomps, bool cm_back_drop,
cmm_profile_t *group_profile,
cmm_profile_t *tos_profile, gs_gstate *pgs,
gx_device *dev)
{
pdf14_buf *tos = ctx->stack;
pdf14_buf *buf, *backdrop;
bool has_shape, has_tags;
if_debug1m('v', ctx->memory,
"[v]pdf14_push_transparency_group, idle = %d\n", idle);
/* We are going to use the shape in the knockout computation. If previous
buffer has a shape or if this is a knockout then we will have a shape here */
has_shape = tos->has_shape || tos->knockout;
/* If previous buffer has tags, then add tags here */
has_tags = tos->has_tags;
/* If the group is NOT isolated we add in the alpha_g plane. This enables
recompositing to be performed ala art_pdf_recomposite_group_8 so that
the backdrop is only included one time in the computation. */
/* Order of buffer data is color data, followed by alpha channel, followed by
shape (if present), then alpha_g (if present), then tags (if present) */
buf = pdf14_buf_new(rect, has_tags, !isolated, has_shape, idle, numcomps + 1,
tos->num_spots, ctx->memory);
if_debug4m('v', ctx->memory,
"[v]base buf: %d x %d, %d color channels, %d planes\n",
buf->rect.q.x, buf->rect.q.y, buf->n_chan, buf->n_planes);
if (buf == NULL)
return_error(gs_error_VMerror);
buf->isolated = isolated;
buf->knockout = knockout;
buf->alpha = alpha;
buf->shape = shape;
buf->blend_mode = blend_mode;
buf->mask_id = mask_id;
buf->mask_stack = ctx->mask_stack; /* Save because the group rendering may
set up another (nested) mask. */
ctx->mask_stack = NULL; /* Clean the mask field for rendering this group.
See pdf14_pop_transparency_group how to handle it. */
buf->saved = tos;
ctx->stack = buf;
if (buf->data == NULL)
return 0;
if (idle)
return 0;
backdrop = pdf14_find_backdrop_buf(ctx);
if (backdrop == NULL) {
memset(buf->data, 0, buf->planestride * (buf->n_chan +
(buf->has_shape ? 1 : 0) +
(buf->has_alpha_g ? 1 : 0) +
(buf->has_tags ? 1 : 0)));
} else {
if (!buf->knockout) {
if (!cm_back_drop) {
pdf14_preserve_backdrop(buf, tos, false);
} else {
/* We must have an non-isolated group with a mismatch in color spaces.
In this case, we can't just copy the buffer but must CM it */
pdf14_preserve_backdrop_cm(buf, group_profile, tos, tos_profile,
ctx->memory, pgs, dev, false);
}
}
}
/* If knockout, we have to maintain a copy of the backdrop in case we are
drawing nonisolated groups on top of the knockout group. */
if (buf->knockout) {
buf->backdrop = gs_alloc_bytes(ctx->memory, buf->planestride * buf->n_chan,
"pdf14_push_transparency_group");
if (buf->backdrop == NULL) {
gs_free_object(ctx->memory, buf->backdrop, "pdf14_push_transparency_group");
return gs_throw(gs_error_VMerror, "Knockout backdrop allocation failed");
}
if (buf->isolated) {
/* We will have opaque backdrop for non-isolated compositing */
memset(buf->backdrop, 0, buf->planestride * buf->n_chan);
} else {
/* Save knockout backdrop for non-isolated compositing */
/* Note that we need to drill down through the non-isolated groups in our
stack and make sure that we are not embedded in another knockout group */
pdf14_buf *check = tos;
pdf14_buf *child = NULL; /* Needed so we can get profile */
cmm_profile_t *prev_knockout_profile;
while (check != NULL) {
if (check->isolated)
break;
if (check->knockout) {
break;
}
child = check;
check = check->saved;
}
/* Here we need to grab a back drop from a knockout parent group and
potentially worry about color differences. */
if (check == NULL) {
prev_knockout_profile = tos_profile;
check = tos;
} else {
if (child == NULL) {
prev_knockout_profile = tos_profile;
} else {
prev_knockout_profile = child->parent_color_info_procs->icc_profile;
}
}
if (!cm_back_drop) {
pdf14_preserve_backdrop(buf, check, false);
} else {
/* We must have an non-isolated group with a mismatch in color spaces.
In this case, we can't just copy the buffer but must CM it */
pdf14_preserve_backdrop_cm(buf, group_profile, check,
prev_knockout_profile, ctx->memory, pgs,
dev, false);
}
memcpy(buf->backdrop, buf->data, buf->planestride * buf->n_chan);
}
#if RAW_DUMP
/* Dump the current buffer to see what we have. */
dump_raw_buffer(ctx->stack->rect.q.y-ctx->stack->rect.p.y,
ctx->stack->rowstride, buf->n_chan,
ctx->stack->planestride, ctx->stack->rowstride,
"KnockoutBackDrop", buf->backdrop);
global_index++;
#endif
} else {
buf->backdrop = NULL;
}
#if RAW_DUMP
/* Dump the current buffer to see what we have. */
dump_raw_buffer(ctx->stack->rect.q.y-ctx->stack->rect.p.y,
ctx->stack->rowstride, ctx->stack->n_planes,
ctx->stack->planestride, ctx->stack->rowstride,
"TransGroupPush", ctx->stack->data);
global_index++;
#endif
return 0;
}
| 13,649
|
35,373
| 0
|
static int __init sit_init(void)
{
int err;
printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n");
err = register_pernet_device(&sit_net_ops);
if (err < 0)
return err;
err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
if (err < 0) {
unregister_pernet_device(&sit_net_ops);
printk(KERN_INFO "sit init: Can't add protocol\n");
}
return err;
}
| 13,650
|
121,346
| 0
|
void CloseUndockedDevTools() {
chrome::CloseWindow(window_->browser());
}
| 13,651
|
78,442
| 0
|
gpk_get_default_key(sc_card_t *card, struct sc_cardctl_default_key *data)
{
if (data->method == SC_AC_PRO && data->key_ref == 1) {
if (data->len < 16)
return SC_ERROR_BUFFER_TOO_SMALL;
memcpy(data->key_data, "TEST KEYTEST KEY", 16);
data->len = 16;
return 0;
}
return SC_ERROR_NO_DEFAULT_KEY;
}
| 13,652
|
127,483
| 0
|
inline bool doFancyUpsampling() { return false; }
| 13,653
|
48,910
| 0
|
int netif_receive_skb(struct sk_buff *skb)
{
trace_netif_receive_skb_entry(skb);
return netif_receive_skb_internal(skb);
}
| 13,654
|
63,538
| 0
|
static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
{
struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
int retval = 0;
poll_wait(filp, &info->wait_q, poll_tab);
spin_lock(&info->lock);
if (info->attr.mq_curmsgs)
retval = POLLIN | POLLRDNORM;
if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
retval |= POLLOUT | POLLWRNORM;
spin_unlock(&info->lock);
return retval;
}
| 13,655
|
93,375
| 0
|
struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *storage)
{
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_get_stats64) {
memset(storage, 0, sizeof(*storage));
ops->ndo_get_stats64(dev, storage);
} else if (ops->ndo_get_stats) {
netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
return storage;
}
| 13,656
|
134,986
| 0
|
AppCacheUpdateJob::AppCacheUpdateJob(AppCacheServiceImpl* service,
AppCacheGroup* group)
: service_(service),
manifest_url_(group->manifest_url()),
group_(group),
update_type_(UNKNOWN_TYPE),
internal_state_(FETCH_MANIFEST),
doing_full_update_check_(false),
master_entries_completed_(0),
url_fetches_completed_(0),
manifest_fetcher_(NULL),
manifest_has_valid_mime_type_(false),
stored_state_(UNSTORED),
storage_(service->storage()),
weak_factory_(this) {
service_->AddObserver(this);
}
| 13,657
|
105,206
| 0
|
static bool isHTMLBlockElement(const Node* node)
{
return node->hasTagName(tdTag)
|| node->hasTagName(thTag)
|| isNonTableCellHTMLBlockElement(node);
}
| 13,658
|
117,683
| 0
|
static void DumpStackTraceSignalHandler(int signal) {
base::debug::StackTrace().PrintBacktrace();
_exit(128 + signal);
}
| 13,659
|
6,884
| 0
|
static int sample_conv_q_prefered(const struct arg *args, struct sample *smp, void *private)
{
const char *al = smp->data.u.str.str;
const char *end = al + smp->data.u.str.len;
const char *token;
int toklen;
int qvalue;
const char *str;
const char *w;
int best_q = 0;
/* Set the constant to the sample, because the output of the
* function will be peek in the constant configuration string.
*/
smp->flags |= SMP_F_CONST;
smp->data.u.str.size = 0;
smp->data.u.str.str = "";
smp->data.u.str.len = 0;
/* Parse the accept language */
while (1) {
/* Jump spaces, quit if the end is detected. */
while (al < end && isspace((unsigned char)*al))
al++;
if (al >= end)
break;
/* Start of the fisrt word. */
token = al;
/* Look for separator: isspace(), ',' or ';'. Next value if 0 length word. */
while (al < end && *al != ';' && *al != ',' && !isspace((unsigned char)*al))
al++;
if (al == token)
goto expect_comma;
/* Length of the token. */
toklen = al - token;
qvalue = 1000;
/* Check if the token exists in the list. If the token not exists,
* jump to the next token.
*/
str = args[0].data.str.str;
w = str;
while (1) {
if (*str == ';' || *str == '\0') {
if (language_range_match(token, toklen, w, str-w))
goto look_for_q;
if (*str == '\0')
goto expect_comma;
w = str + 1;
}
str++;
}
goto expect_comma;
look_for_q:
/* Jump spaces, quit if the end is detected. */
while (al < end && isspace((unsigned char)*al))
al++;
if (al >= end)
goto process_value;
/* If ',' is found, process the result */
if (*al == ',')
goto process_value;
/* If the character is different from ';', look
* for the end of the header part in best effort.
*/
if (*al != ';')
goto expect_comma;
/* Assumes that the char is ';', now expect "q=". */
al++;
/* Jump spaces, process value if the end is detected. */
while (al < end && isspace((unsigned char)*al))
al++;
if (al >= end)
goto process_value;
/* Expect 'q'. If no 'q', continue in best effort */
if (*al != 'q')
goto process_value;
al++;
/* Jump spaces, process value if the end is detected. */
while (al < end && isspace((unsigned char)*al))
al++;
if (al >= end)
goto process_value;
/* Expect '='. If no '=', continue in best effort */
if (*al != '=')
goto process_value;
al++;
/* Jump spaces, process value if the end is detected. */
while (al < end && isspace((unsigned char)*al))
al++;
if (al >= end)
goto process_value;
/* Parse the q value. */
qvalue = parse_qvalue(al, &al);
process_value:
/* If the new q value is the best q value, then store the associated
* language in the response. If qvalue is the biggest value (1000),
* break the process.
*/
if (qvalue > best_q) {
smp->data.u.str.str = (char *)w;
smp->data.u.str.len = str - w;
if (qvalue >= 1000)
break;
best_q = qvalue;
}
expect_comma:
/* Expect comma or end. If the end is detected, quit the loop. */
while (al < end && *al != ',')
al++;
if (al >= end)
break;
/* Comma is found, jump it and restart the analyzer. */
al++;
}
/* Set default value if required. */
if (smp->data.u.str.len == 0 && args[1].type == ARGT_STR) {
smp->data.u.str.str = args[1].data.str.str;
smp->data.u.str.len = args[1].data.str.len;
}
/* Return true only if a matching language was found. */
return smp->data.u.str.len != 0;
}
| 13,660
|
81,585
| 0
|
static int ftrace_function_set_filter(struct perf_event *event,
struct event_filter *filter)
{
return -ENODEV;
}
| 13,661
|
10,020
| 0
|
grid_status_draw_grid( GridStatus st )
{
int x_org = (int)st->x_origin;
int y_org = (int)st->y_origin;
double xy_incr = 64.0 * st->scale;
if ( xy_incr >= 2. )
{
double x2 = x_org;
double y2 = y_org;
for ( ; x2 < st->disp_width; x2 += xy_incr )
grFillVLine( st->disp_bitmap, (int)x2, 0,
st->disp_height, st->grid_color );
for ( x2 = x_org - xy_incr; (int)x2 >= 0; x2 -= xy_incr )
grFillVLine( st->disp_bitmap, (int)x2, 0,
st->disp_height, st->grid_color );
for ( ; y2 < st->disp_height; y2 += xy_incr )
grFillHLine( st->disp_bitmap, 0, (int)y2,
st->disp_width, st->grid_color );
for ( y2 = y_org - xy_incr; (int)y2 >= 0; y2 -= xy_incr )
grFillHLine( st->disp_bitmap, 0, (int)y2,
st->disp_width, st->grid_color );
}
grFillVLine( st->disp_bitmap, x_org, 0, st->disp_height, st->axis_color );
grFillHLine( st->disp_bitmap, 0, y_org, st->disp_width, st->axis_color );
}
| 13,662
|
148,099
| 0
|
void V8TestObject::VoidMethodEventTargetArgMethodCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
RUNTIME_CALL_TIMER_SCOPE_DISABLED_BY_DEFAULT(info.GetIsolate(), "Blink_TestObject_voidMethodEventTargetArg");
test_object_v8_internal::VoidMethodEventTargetArgMethod(info);
}
| 13,663
|
75,919
| 0
|
process_running(const char *pid_file)
{
FILE *pidfile = fopen(pid_file, "r");
pid_t pid = 0;
int ret;
/* No pidfile */
if (!pidfile)
return 0;
ret = fscanf(pidfile, "%d", &pid);
fclose(pidfile);
if (ret != 1) {
log_message(LOG_INFO, "Error reading pid file %s", pid_file);
pid = 0;
pidfile_rm(pid_file);
}
/* What should we return - we don't know if it is running or not. */
if (!pid)
return 1;
/* If no process is attached to pidfile, remove it */
if (kill(pid, 0)) {
log_message(LOG_INFO, "Remove a zombie pid file %s", pid_file);
pidfile_rm(pid_file);
return 0;
}
return 1;
}
| 13,664
|
176,417
| 0
|
static bt_status_t disconnect(RawAddress* bd_addr) {
BTIF_TRACE_EVENT("%s", __func__);
CHECK_BTAV_INIT();
/* Switch to BTIF context */
return btif_transfer_context(btif_av_handle_event, BTIF_AV_DISCONNECT_REQ_EVT,
(char*)bd_addr, sizeof(RawAddress), NULL);
}
| 13,665
|
31,966
| 0
|
static void perf_output_read(struct perf_output_handle *handle,
struct perf_event *event)
{
u64 enabled = 0, running = 0, now;
u64 read_format = event->attr.read_format;
/*
* compute total_time_enabled, total_time_running
* based on snapshot values taken when the event
* was last scheduled in.
*
* we cannot simply called update_context_time()
* because of locking issue as we are called in
* NMI context
*/
if (read_format & PERF_FORMAT_TOTAL_TIMES)
calc_timer_values(event, &now, &enabled, &running);
if (event->attr.read_format & PERF_FORMAT_GROUP)
perf_output_read_group(handle, event, enabled, running);
else
perf_output_read_one(handle, event, enabled, running);
}
| 13,666
|
121,222
| 0
|
bool HTMLInputElement::rangeOverflow() const
{
return willValidate() && m_inputType->rangeOverflow(value());
}
| 13,667
|
149,289
| 0
|
void DatabaseImpl::IDBThreadHelper::Commit(int64_t transaction_id) {
DCHECK(idb_thread_checker_.CalledOnValidThread());
if (!connection_->IsConnected())
return;
IndexedDBTransaction* transaction =
connection_->GetTransaction(transaction_id);
if (!transaction)
return;
if (transaction->size() == 0) {
connection_->database()->Commit(transaction);
return;
}
indexed_db_context_->quota_manager_proxy()->GetUsageAndQuota(
indexed_db_context_->TaskRunner(), origin_.GetURL(),
storage::kStorageTypeTemporary,
base::Bind(&IDBThreadHelper::OnGotUsageAndQuotaForCommit,
weak_factory_.GetWeakPtr(), transaction_id));
}
| 13,668
|
66,328
| 0
|
static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
{
gen_setcc1(s, JCC_B << 1, reg);
}
| 13,669
|
179,567
| 1
|
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
struct ath_node *an)
{
struct ath_atx_tid *tid;
struct ath_atx_ac *ac;
struct ath_txq *txq;
bool buffered;
int tidno;
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
if (!tid->sched)
continue;
ac = tid->ac;
txq = ac->txq;
ath_txq_lock(sc, txq);
buffered = ath_tid_has_buffered(tid);
tid->sched = false;
list_del(&tid->list);
if (ac->sched) {
ac->sched = false;
list_del(&ac->list);
}
ath_txq_unlock(sc, txq);
ieee80211_sta_set_buffered(sta, tidno, buffered);
}
}
| 13,670
|
48,295
| 0
|
DECLAREwriteFunc(writeBufferToContigTiles)
{
uint32 imagew = TIFFScanlineSize(out);
uint32 tilew = TIFFTileRowSize(out);
int iskew = imagew - tilew;
tsize_t tilesize = TIFFTileSize(out);
tdata_t obuf;
uint8* bufp = (uint8*) buf;
uint32 tl, tw;
uint32 row;
(void) spp;
obuf = _TIFFmalloc(TIFFTileSize(out));
if (obuf == NULL)
return 0;
_TIFFmemset(obuf, 0, tilesize);
(void) TIFFGetField(out, TIFFTAG_TILELENGTH, &tl);
(void) TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw);
for (row = 0; row < imagelength; row += tilelength) {
uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl;
uint32 colb = 0;
uint32 col;
for (col = 0; col < imagewidth; col += tw) {
/*
* Tile is clipped horizontally. Calculate
* visible portion and skewing factors.
*/
if (colb + tilew > imagew) {
uint32 width = imagew - colb;
int oskew = tilew - width;
cpStripToTile(obuf, bufp + colb, nrow, width,
oskew, oskew + iskew);
} else
cpStripToTile(obuf, bufp + colb, nrow, tilew,
0, iskew);
if (TIFFWriteTile(out, obuf, col, row, 0, 0) < 0) {
TIFFError(TIFFFileName(out),
"Error, can't write tile at %lu %lu",
(unsigned long) col,
(unsigned long) row);
_TIFFfree(obuf);
return 0;
}
colb += tilew;
}
bufp += nrow * imagew;
}
_TIFFfree(obuf);
return 1;
}
| 13,671
|
170,224
| 0
|
bool GetPacHttpsUrlStrippingEnabled() {
network::mojom::NetworkContextParamsPtr network_context_params =
g_browser_process->system_network_context_manager()
->CreateDefaultNetworkContextParams();
return !network_context_params->dangerously_allow_pac_access_to_secure_urls;
}
| 13,672
|
66,342
| 0
|
static void gen_illegal_opcode(DisasContext *s)
{
gen_exception(s, EXCP06_ILLOP, s->pc_start - s->cs_base);
}
| 13,673
|
15,207
| 0
|
static void php_imagechar(INTERNAL_FUNCTION_PARAMETERS, int mode)
{
zval *IM;
long SIZE, X, Y, COL;
char *C;
int C_len;
gdImagePtr im;
int ch = 0, col, x, y, size, i, l = 0;
unsigned char *str = NULL;
gdFontPtr font;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rlllsl", &IM, &SIZE, &X, &Y, &C, &C_len, &COL) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
col = COL;
if (mode < 2) {
ch = (int)((unsigned char)*C);
} else {
str = (unsigned char *) estrndup(C, C_len);
l = strlen((char *)str);
}
y = Y;
x = X;
size = SIZE;
font = php_find_gd_font(size TSRMLS_CC);
switch (mode) {
case 0:
gdImageChar(im, font, x, y, ch, col);
break;
case 1:
php_gdimagecharup(im, font, x, y, ch, col);
break;
case 2:
for (i = 0; (i < l); i++) {
gdImageChar(im, font, x, y, (int) ((unsigned char) str[i]), col);
x += font->w;
}
break;
case 3: {
for (i = 0; (i < l); i++) {
/* php_gdimagecharup(im, font, x, y, (int) str[i], col); */
gdImageCharUp(im, font, x, y, (int) str[i], col);
y -= font->w;
}
break;
}
}
if (str) {
efree(str);
}
RETURN_TRUE;
}
| 13,674
|
49,262
| 0
|
void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
{
const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
struct inet_connection_sock *icsk;
struct tcp_sock *tp;
struct inet_sock *inet;
const int type = icmp_hdr(icmp_skb)->type;
const int code = icmp_hdr(icmp_skb)->code;
struct sock *sk;
struct sk_buff *skb;
struct request_sock *fastopen;
__u32 seq, snd_una;
__u32 remaining;
int err;
struct net *net = dev_net(icmp_skb->dev);
sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
th->dest, iph->saddr, ntohs(th->source),
inet_iif(icmp_skb));
if (!sk) {
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
return;
}
if (sk->sk_state == TCP_TIME_WAIT) {
inet_twsk_put(inet_twsk(sk));
return;
}
seq = ntohl(th->seq);
if (sk->sk_state == TCP_NEW_SYN_RECV)
return tcp_req_err(sk, seq,
type == ICMP_PARAMETERPROB ||
type == ICMP_TIME_EXCEEDED ||
(type == ICMP_DEST_UNREACH &&
(code == ICMP_NET_UNREACH ||
code == ICMP_HOST_UNREACH)));
bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
* We do take care of PMTU discovery (RFC1191) special case :
* we can receive locally generated ICMP messages while socket is held.
*/
if (sock_owned_by_user(sk)) {
if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
}
if (sk->sk_state == TCP_CLOSE)
goto out;
if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
goto out;
}
icsk = inet_csk(sk);
tp = tcp_sk(sk);
/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
fastopen = tp->fastopen_rsk;
snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
if (sk->sk_state != TCP_LISTEN &&
!between(seq, snd_una, tp->snd_nxt)) {
__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
switch (type) {
case ICMP_REDIRECT:
do_redirect(icmp_skb, sk);
goto out;
case ICMP_SOURCE_QUENCH:
/* Just silently ignore these. */
goto out;
case ICMP_PARAMETERPROB:
err = EPROTO;
break;
case ICMP_DEST_UNREACH:
if (code > NR_ICMP_UNREACH)
goto out;
if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
/* We are not interested in TCP_LISTEN and open_requests
* (SYN-ACKs send out by Linux are always <576bytes so
* they should go through unfragmented).
*/
if (sk->sk_state == TCP_LISTEN)
goto out;
tp->mtu_info = info;
if (!sock_owned_by_user(sk)) {
tcp_v4_mtu_reduced(sk);
} else {
if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
sock_hold(sk);
}
goto out;
}
err = icmp_err_convert[code].errno;
/* check if icmp_skb allows revert of backoff
* (see draft-zimmermann-tcp-lcd) */
if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
break;
if (seq != tp->snd_una || !icsk->icsk_retransmits ||
!icsk->icsk_backoff || fastopen)
break;
if (sock_owned_by_user(sk))
break;
icsk->icsk_backoff--;
icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
TCP_TIMEOUT_INIT;
icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
skb = tcp_write_queue_head(sk);
BUG_ON(!skb);
remaining = icsk->icsk_rto -
min(icsk->icsk_rto,
tcp_time_stamp - tcp_skb_timestamp(skb));
if (remaining) {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
remaining, TCP_RTO_MAX);
} else {
/* RTO revert clocked out retransmission.
* Will retransmit now */
tcp_retransmit_timer(sk);
}
break;
case ICMP_TIME_EXCEEDED:
err = EHOSTUNREACH;
break;
default:
goto out;
}
switch (sk->sk_state) {
case TCP_SYN_SENT:
case TCP_SYN_RECV:
/* Only in fast or simultaneous open. If a fast open socket is
* is already accepted it is treated as a connected one below.
*/
if (fastopen && !fastopen->sk)
break;
if (!sock_owned_by_user(sk)) {
sk->sk_err = err;
sk->sk_error_report(sk);
tcp_done(sk);
} else {
sk->sk_err_soft = err;
}
goto out;
}
/* If we've already connected we will keep trying
* until we time out, or the user gives up.
*
* rfc1122 4.2.3.9 allows to consider as hard errors
* only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
* but it is obsoleted by pmtu discovery).
*
* Note, that in modern internet, where routing is unreliable
* and in each dark corner broken firewalls sit, sending random
* errors ordered by their masters even this two messages finally lose
* their original sense (even Linux sends invalid PORT_UNREACHs)
*
* Now we are in compliance with RFCs.
* --ANK (980905)
*/
inet = inet_sk(sk);
if (!sock_owned_by_user(sk) && inet->recverr) {
sk->sk_err = err;
sk->sk_error_report(sk);
} else { /* Only an error on timeout */
sk->sk_err_soft = err;
}
out:
bh_unlock_sock(sk);
sock_put(sk);
}
| 13,675
|
180,220
| 1
|
SPL_METHOD(GlobIterator, count)
{
spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (php_stream_is(intern->u.dir.dirp ,&php_glob_stream_ops)) {
RETURN_LONG(php_glob_stream_get_count(intern->u.dir.dirp, NULL));
} else {
/* should not happen */
php_error_docref(NULL TSRMLS_CC, E_ERROR, "GlobIterator lost glob state");
}
}
| 13,676
|
160,750
| 0
|
void RenderFrameImpl::WillInsertBody() {
Send(new FrameHostMsg_WillInsertBody(routing_id_,
render_view_->GetRoutingID()));
}
| 13,677
|
103,709
| 0
|
void DevToolsAgent::runtimePropertyChanged(
const WebKit::WebString& name,
const WebKit::WebString& value) {
Send(new DevToolsHostMsg_RuntimePropertyChanged(
routing_id(),
name.utf8(),
value.utf8()));
}
| 13,678
|
62,294
| 0
|
format_nid(const u_char *data)
{
static char buf[4][sizeof("01:01:01:01")];
static int i = 0;
i = (i + 1) % 4;
snprintf(buf[i], sizeof(buf[i]), "%02x:%02x:%02x:%02x",
data[0], data[1], data[2], data[3]);
return buf[i];
}
| 13,679
|
58,148
| 0
|
static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
update_rq_clock(rq);
sched_info_dequeued(rq, p);
p->sched_class->dequeue_task(rq, p, flags);
}
| 13,680
|
160,845
| 0
|
WebViewTestClient::WebViewTestClient(
WebViewTestProxyBase* web_view_test_proxy_base)
: web_view_test_proxy_base_(web_view_test_proxy_base) {
DCHECK(web_view_test_proxy_base);
}
| 13,681
|
130,662
| 0
|
static void cachedDirtyableAttributeRaisesAttributeGetter(const v8::PropertyCallbackInfo<v8::Value>& info)
{
v8::Handle<v8::String> propertyName = v8AtomicString(info.GetIsolate(), "cachedDirtyableAttributeRaises");
TestObject* imp = V8TestObject::toNative(info.Holder());
if (!imp->isValueDirty()) {
v8::Handle<v8::Value> jsValue = V8HiddenValue::getHiddenValue(info.GetIsolate(), info.Holder(), propertyName);
if (!jsValue.IsEmpty()) {
v8SetReturnValue(info, jsValue);
return;
}
}
ExceptionState exceptionState(ExceptionState::GetterContext, "cachedDirtyableAttributeRaises", "TestObject", info.Holder(), info.GetIsolate());
ScriptValue jsValue = imp->cachedDirtyableAttributeRaises(exceptionState);
if (UNLIKELY(exceptionState.throwIfNeeded()))
return;
V8HiddenValue::setHiddenValue(info.GetIsolate(), info.Holder(), propertyName, jsValue.v8Value());
v8SetReturnValue(info, jsValue.v8Value());
}
| 13,682
|
158,469
| 0
|
explicit TestWindowObserver(aura::Window* window_to_observe)
: window_(window_to_observe) {
window_->AddObserver(this);
}
| 13,683
|
50,028
| 0
|
archive_read_set_open_callback(struct archive *_a,
archive_open_callback *client_opener)
{
struct archive_read *a = (struct archive_read *)_a;
archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW,
"archive_read_set_open_callback");
a->client.opener = client_opener;
return ARCHIVE_OK;
}
| 13,684
|
120,183
| 0
|
void Layer::SetPositionConstraint(const LayerPositionConstraint& constraint) {
DCHECK(IsPropertyChangeAllowed());
if (position_constraint_ == constraint)
return;
position_constraint_ = constraint;
SetNeedsCommit();
}
| 13,685
|
139,728
| 0
|
void ResourceFetcher::HandleLoaderFinish(Resource* resource,
double finish_time,
LoaderFinishType type) {
DCHECK(resource);
ResourceLoader* loader = resource->Loader();
if (type == kDidFinishFirstPartInMultipart) {
MoveResourceLoaderToNonBlocking(loader);
} else {
RemoveResourceLoader(loader);
DCHECK(!non_blocking_loaders_.Contains(loader));
}
DCHECK(!loaders_.Contains(loader));
const int64_t encoded_data_length =
resource->GetResponse().EncodedDataLength();
if (resource->GetType() == Resource::kMainResource) {
DCHECK(navigation_timing_info_);
AddRedirectsToTimingInfo(resource, navigation_timing_info_.Get());
if (resource->GetResponse().IsHTTP()) {
PopulateTimingInfo(navigation_timing_info_.Get(), resource);
navigation_timing_info_->AddFinalTransferSize(
encoded_data_length == -1 ? 0 : encoded_data_length);
}
}
if (RefPtr<ResourceTimingInfo> info =
resource_timing_info_map_.Take(resource)) {
AddRedirectsToTimingInfo(resource, info.Get());
if (resource->GetResponse().IsHTTP() &&
resource->GetResponse().HttpStatusCode() < 400) {
PopulateTimingInfo(info.Get(), resource);
info->SetLoadFinishTime(finish_time);
info->AddFinalTransferSize(
encoded_data_length == -1 ? 0 : encoded_data_length);
if (resource->Options().request_initiator_context == kDocumentContext)
Context().AddResourceTiming(*info);
resource->ReportResourceTimingToClients(*info);
}
}
Context().DispatchDidFinishLoading(
resource->Identifier(), finish_time, encoded_data_length,
resource->GetResponse().DecodedBodyLength());
if (type == kDidFinishLoading)
resource->Finish(finish_time);
HandleLoadCompletion(resource);
}
| 13,686
|
8,788
| 0
|
static struct vrend_linked_shader_program *add_shader_program(struct vrend_context *ctx,
struct vrend_shader *vs,
struct vrend_shader *fs,
struct vrend_shader *gs)
{
struct vrend_linked_shader_program *sprog = CALLOC_STRUCT(vrend_linked_shader_program);
char name[16];
int i;
GLuint prog_id;
GLint lret;
int id;
int last_shader;
if (!sprog)
return NULL;
/* need to rewrite VS code to add interpolation params */
if ((gs && gs->compiled_fs_id != fs->id) ||
(!gs && vs->compiled_fs_id != fs->id)) {
bool ret;
if (gs)
vrend_patch_vertex_shader_interpolants(gs->glsl_prog,
&gs->sel->sinfo,
&fs->sel->sinfo, true, fs->key.flatshade);
else
vrend_patch_vertex_shader_interpolants(vs->glsl_prog,
&vs->sel->sinfo,
&fs->sel->sinfo, false, fs->key.flatshade);
ret = vrend_compile_shader(ctx, gs ? gs : vs);
if (ret == false) {
glDeleteShader(gs ? gs->id : vs->id);
free(sprog);
return NULL;
}
if (gs)
gs->compiled_fs_id = fs->id;
else
vs->compiled_fs_id = fs->id;
}
prog_id = glCreateProgram();
glAttachShader(prog_id, vs->id);
if (gs) {
if (gs->id > 0)
glAttachShader(prog_id, gs->id);
set_stream_out_varyings(prog_id, &gs->sel->sinfo);
}
else
set_stream_out_varyings(prog_id, &vs->sel->sinfo);
glAttachShader(prog_id, fs->id);
if (fs->sel->sinfo.num_outputs > 1) {
if (util_blend_state_is_dual(&ctx->sub->blend_state, 0)) {
glBindFragDataLocationIndexed(prog_id, 0, 0, "fsout_c0");
glBindFragDataLocationIndexed(prog_id, 0, 1, "fsout_c1");
sprog->dual_src_linked = true;
} else {
glBindFragDataLocationIndexed(prog_id, 0, 0, "fsout_c0");
glBindFragDataLocationIndexed(prog_id, 1, 0, "fsout_c1");
sprog->dual_src_linked = false;
}
} else
sprog->dual_src_linked = false;
if (vrend_state.have_vertex_attrib_binding) {
uint32_t mask = vs->sel->sinfo.attrib_input_mask;
while (mask) {
i = u_bit_scan(&mask);
snprintf(name, 10, "in_%d", i);
glBindAttribLocation(prog_id, i, name);
}
}
glLinkProgram(prog_id);
glGetProgramiv(prog_id, GL_LINK_STATUS, &lret);
if (lret == GL_FALSE) {
char infolog[65536];
int len;
glGetProgramInfoLog(prog_id, 65536, &len, infolog);
fprintf(stderr,"got error linking\n%s\n", infolog);
/* dump shaders */
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
fprintf(stderr,"vert shader: %d GLSL\n%s\n", vs->id, vs->glsl_prog);
if (gs)
fprintf(stderr,"geom shader: %d GLSL\n%s\n", gs->id, gs->glsl_prog);
fprintf(stderr,"frag shader: %d GLSL\n%s\n", fs->id, fs->glsl_prog);
glDeleteProgram(prog_id);
free(sprog);
return NULL;
}
sprog->ss[PIPE_SHADER_VERTEX] = vs;
sprog->ss[PIPE_SHADER_FRAGMENT] = fs;
sprog->ss[PIPE_SHADER_GEOMETRY] = gs;
list_add(&sprog->sl[PIPE_SHADER_VERTEX], &vs->programs);
list_add(&sprog->sl[PIPE_SHADER_FRAGMENT], &fs->programs);
if (gs)
list_add(&sprog->sl[PIPE_SHADER_GEOMETRY], &gs->programs);
last_shader = gs ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT;
sprog->id = prog_id;
list_addtail(&sprog->head, &ctx->sub->programs);
if (fs->key.pstipple_tex)
sprog->fs_stipple_loc = glGetUniformLocation(prog_id, "pstipple_sampler");
else
sprog->fs_stipple_loc = -1;
sprog->vs_ws_adjust_loc = glGetUniformLocation(prog_id, "winsys_adjust");
for (id = PIPE_SHADER_VERTEX; id <= last_shader; id++) {
if (sprog->ss[id]->sel->sinfo.samplers_used_mask) {
uint32_t mask = sprog->ss[id]->sel->sinfo.samplers_used_mask;
int nsamp = util_bitcount(sprog->ss[id]->sel->sinfo.samplers_used_mask);
int index;
sprog->shadow_samp_mask[id] = sprog->ss[id]->sel->sinfo.shadow_samp_mask;
if (sprog->ss[id]->sel->sinfo.shadow_samp_mask) {
sprog->shadow_samp_mask_locs[id] = calloc(nsamp, sizeof(uint32_t));
sprog->shadow_samp_add_locs[id] = calloc(nsamp, sizeof(uint32_t));
} else {
sprog->shadow_samp_mask_locs[id] = sprog->shadow_samp_add_locs[id] = NULL;
}
sprog->samp_locs[id] = calloc(nsamp, sizeof(uint32_t));
if (sprog->samp_locs[id]) {
const char *prefix = pipe_shader_to_prefix(id);
index = 0;
while(mask) {
i = u_bit_scan(&mask);
snprintf(name, 10, "%ssamp%d", prefix, i);
sprog->samp_locs[id][index] = glGetUniformLocation(prog_id, name);
if (sprog->ss[id]->sel->sinfo.shadow_samp_mask & (1 << i)) {
snprintf(name, 14, "%sshadmask%d", prefix, i);
sprog->shadow_samp_mask_locs[id][index] = glGetUniformLocation(prog_id, name);
snprintf(name, 14, "%sshadadd%d", prefix, i);
sprog->shadow_samp_add_locs[id][index] = glGetUniformLocation(prog_id, name);
}
index++;
}
}
} else {
sprog->samp_locs[id] = NULL;
sprog->shadow_samp_mask_locs[id] = NULL;
sprog->shadow_samp_add_locs[id] = NULL;
sprog->shadow_samp_mask[id] = 0;
}
sprog->samplers_used_mask[id] = sprog->ss[id]->sel->sinfo.samplers_used_mask;
}
for (id = PIPE_SHADER_VERTEX; id <= last_shader; id++) {
if (sprog->ss[id]->sel->sinfo.num_consts) {
sprog->const_locs[id] = calloc(sprog->ss[id]->sel->sinfo.num_consts, sizeof(uint32_t));
if (sprog->const_locs[id]) {
const char *prefix = pipe_shader_to_prefix(id);
for (i = 0; i < sprog->ss[id]->sel->sinfo.num_consts; i++) {
snprintf(name, 16, "%sconst0[%d]", prefix, i);
sprog->const_locs[id][i] = glGetUniformLocation(prog_id, name);
}
}
} else
sprog->const_locs[id] = NULL;
}
if (!vrend_state.have_vertex_attrib_binding) {
if (vs->sel->sinfo.num_inputs) {
sprog->attrib_locs = calloc(vs->sel->sinfo.num_inputs, sizeof(uint32_t));
if (sprog->attrib_locs) {
for (i = 0; i < vs->sel->sinfo.num_inputs; i++) {
snprintf(name, 10, "in_%d", i);
sprog->attrib_locs[i] = glGetAttribLocation(prog_id, name);
}
}
} else
sprog->attrib_locs = NULL;
}
for (id = PIPE_SHADER_VERTEX; id <= last_shader; id++) {
if (sprog->ss[id]->sel->sinfo.num_ubos) {
const char *prefix = pipe_shader_to_prefix(id);
sprog->ubo_locs[id] = calloc(sprog->ss[id]->sel->sinfo.num_ubos, sizeof(uint32_t));
for (i = 0; i < sprog->ss[id]->sel->sinfo.num_ubos; i++) {
snprintf(name, 16, "%subo%d", prefix, i + 1);
sprog->ubo_locs[id][i] = glGetUniformBlockIndex(prog_id, name);
}
} else
sprog->ubo_locs[id] = NULL;
}
if (vs->sel->sinfo.num_ucp) {
for (i = 0; i < vs->sel->sinfo.num_ucp; i++) {
snprintf(name, 10, "clipp[%d]", i);
sprog->clip_locs[i] = glGetUniformLocation(prog_id, name);
}
}
return sprog;
}
| 13,687
|
55,283
| 0
|
static int atl2_check_eeprom_exist(struct atl2_hw *hw)
{
u32 value;
value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
if (value & SPI_FLASH_CTRL_EN_VPD) {
value &= ~SPI_FLASH_CTRL_EN_VPD;
ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
}
value = ATL2_READ_REGW(hw, REG_PCIE_CAP_LIST);
return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
}
| 13,688
|
49,080
| 0
|
static s32 brcmf_inform_bss(struct brcmf_cfg80211_info *cfg)
{
struct brcmf_scan_results *bss_list;
struct brcmf_bss_info_le *bi = NULL; /* must be initialized */
s32 err = 0;
int i;
bss_list = (struct brcmf_scan_results *)cfg->escan_info.escan_buf;
if (bss_list->count != 0 &&
bss_list->version != BRCMF_BSS_INFO_VERSION) {
brcmf_err("Version %d != WL_BSS_INFO_VERSION\n",
bss_list->version);
return -EOPNOTSUPP;
}
brcmf_dbg(SCAN, "scanned AP count (%d)\n", bss_list->count);
for (i = 0; i < bss_list->count; i++) {
bi = next_bss_le(bss_list, bi);
err = brcmf_inform_single_bss(cfg, bi);
if (err)
break;
}
return err;
}
| 13,689
|
82,269
| 0
|
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
{
struct qstr name = { .name = "" };
struct path path;
struct file *file;
if (dname) {
name.name = dname;
name.len = strlen(name.name);
} else if (sock->sk) {
name.name = sock->sk->sk_prot_creator->name;
name.len = strlen(name.name);
}
path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
if (unlikely(!path.dentry)) {
sock_release(sock);
return ERR_PTR(-ENOMEM);
}
path.mnt = mntget(sock_mnt);
d_instantiate(path.dentry, SOCK_INODE(sock));
file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
&socket_file_ops);
if (IS_ERR(file)) {
/* drop dentry, keep inode for a bit */
ihold(d_inode(path.dentry));
path_put(&path);
/* ... and now kill it properly */
sock_release(sock);
return file;
}
sock->file = file;
file->f_flags = O_RDWR | (flags & O_NONBLOCK);
file->private_data = sock;
return file;
}
| 13,690
|
55,448
| 0
|
SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
{
int ret = -EINVAL;
switch (policy) {
case SCHED_FIFO:
case SCHED_RR:
ret = MAX_USER_RT_PRIO-1;
break;
case SCHED_DEADLINE:
case SCHED_NORMAL:
case SCHED_BATCH:
case SCHED_IDLE:
ret = 0;
break;
}
return ret;
}
| 13,691
|
42,843
| 0
|
static void on_comment_changed(GtkTextBuffer *buffer, gpointer user_data)
{
toggle_eb_comment();
}
| 13,692
|
3,041
| 0
|
static int cieacompareproc(i_ctx_t *i_ctx_p, ref *space, ref *testspace)
{
int code = 0;
ref CIEdict1, CIEdict2;
code = array_get(imemory, space, 1, &CIEdict1);
if (code < 0)
return 0;
code = array_get(imemory, testspace, 1, &CIEdict2);
if (code < 0)
return 0;
if (!comparedictkey(i_ctx_p, &CIEdict1, &CIEdict2, (char *)"WhitePoint"))
return 0;
if (!comparedictkey(i_ctx_p, &CIEdict1, &CIEdict2, (char *)"BlackPoint"))
return 0;
if (!comparedictkey(i_ctx_p, &CIEdict1, &CIEdict2, (char *)"RangeA"))
return 0;
if (!comparedictkey(i_ctx_p, &CIEdict1, &CIEdict2, (char *)"DecodeA"))
return 0;
if (!comparedictkey(i_ctx_p, &CIEdict1, &CIEdict2, (char *)"MatrixA"))
return 0;
if (!comparedictkey(i_ctx_p, &CIEdict1, &CIEdict2, (char *)"RangeLMN"))
return 0;
if (!comparedictkey(i_ctx_p, &CIEdict1, &CIEdict2, (char *)"DecodeLMN"))
return 0;
if (!comparedictkey(i_ctx_p, &CIEdict1, &CIEdict2, (char *)"MatrixMN"))
return 0;
return 1;
}
| 13,693
|
94,198
| 0
|
radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, CompassionateData);
uint16_t data_offset;
struct _COMPASSIONATE_DATA *dac_info;
uint8_t frev, crev;
uint8_t bg, dac;
struct radeon_encoder_tv_dac *tv_dac = NULL;
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
dac_info = (struct _COMPASSIONATE_DATA *)
(mode_info->atom_context->bios + data_offset);
tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
if (!tv_dac)
return NULL;
bg = dac_info->ucDAC2_CRT2_BG_Adjustment;
dac = dac_info->ucDAC2_CRT2_DAC_Adjustment;
tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
bg = dac_info->ucDAC2_PAL_BG_Adjustment;
dac = dac_info->ucDAC2_PAL_DAC_Adjustment;
tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
bg = dac_info->ucDAC2_NTSC_BG_Adjustment;
dac = dac_info->ucDAC2_NTSC_DAC_Adjustment;
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
tv_dac->tv_std = radeon_atombios_get_tv_info(rdev);
}
return tv_dac;
}
| 13,694
|
188,006
| 1
|
static inline ogg_uint32_t decode_packed_entry_number(codebook *book,
oggpack_buffer *b){
ogg_uint32_t chase=0;
int read=book->dec_maxlength;
long lok = oggpack_look(b,read),i;
while(lok<0 && read>1)
lok = oggpack_look(b, --read);
if(lok<0){
oggpack_adv(b,1); /* force eop */
return -1;
}
/* chase the tree with the bits we got */
switch (book->dec_method)
{
case 0:
{
/* book->dec_nodeb==1, book->dec_leafw==1 */
/* 8/8 - Used */
unsigned char *t=(unsigned char *)book->dec_table;
for(i=0;i<read;i++){
chase=t[chase*2+((lok>>i)&1)];
if(chase&0x80UL)break;
}
chase&=0x7fUL;
break;
}
case 1:
{
/* book->dec_nodeb==1, book->dec_leafw!=1 */
/* 8/16 - Used by infile2 */
unsigned char *t=(unsigned char *)book->dec_table;
for(i=0;i<read;i++){
int bit=(lok>>i)&1;
int next=t[chase+bit];
if(next&0x80){
chase= (next<<8) | t[chase+bit+1+(!bit || t[chase]&0x80)];
break;
}
chase=next;
}
//chase&=0x7fffUL;
chase&=~0x8000UL;
break;
}
case 2:
{
/* book->dec_nodeb==2, book->dec_leafw==1 */
/* 16/16 - Used */
for(i=0;i<read;i++){
chase=((ogg_uint16_t *)(book->dec_table))[chase*2+((lok>>i)&1)];
if(chase&0x8000UL)break;
}
//chase&=0x7fffUL;
chase&=~0x8000UL;
break;
}
case 3:
{
/* book->dec_nodeb==2, book->dec_leafw!=1 */
/* 16/32 - Used by infile2 */
ogg_uint16_t *t=(ogg_uint16_t *)book->dec_table;
for(i=0;i<read;i++){
int bit=(lok>>i)&1;
int next=t[chase+bit];
if(next&0x8000){
chase= (next<<16) | t[chase+bit+1+(!bit || t[chase]&0x8000)];
break;
}
chase=next;
}
//chase&=0x7fffffffUL;
chase&=~0x80000000UL;
break;
}
case 4:
{
//Output("32/32");
for(i=0;i<read;i++){
chase=((ogg_uint32_t *)(book->dec_table))[chase*2+((lok>>i)&1)];
if(chase&0x80000000UL)break;
}
//chase&=0x7fffffffUL;
chase&=~0x80000000UL;
break;
}
}
if(i<read){
oggpack_adv(b,i+1);
return chase;
}
oggpack_adv(b,read+1);
return(-1);
}
| 13,695
|
149,867
| 0
|
void LayerTreeHost::UnregisterLayer(Layer* layer) {
DCHECK(LayerById(layer->id()));
DCHECK(!in_paint_layer_contents_);
if (layer->element_id()) {
mutator_host_->UnregisterElement(layer->element_id(),
ElementListType::ACTIVE);
}
RemoveLayerShouldPushProperties(layer);
layer_id_map_.erase(layer->id());
}
| 13,696
|
91,117
| 0
|
static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
u8 ip_proto, __be16 sport,
__be16 dport)
{
struct sk_buff *skb;
struct iphdr *iph;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
return NULL;
/* Reserve room for dummy headers, this skb can pass
* through good chunk of routing engine.
*/
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb->protocol = htons(ETH_P_IP);
iph = skb_put(skb, sizeof(struct iphdr));
iph->protocol = ip_proto;
iph->saddr = src;
iph->daddr = dst;
iph->version = 0x4;
iph->frag_off = 0;
iph->ihl = 0x5;
skb_set_transport_header(skb, skb->len);
switch (iph->protocol) {
case IPPROTO_UDP: {
struct udphdr *udph;
udph = skb_put_zero(skb, sizeof(struct udphdr));
udph->source = sport;
udph->dest = dport;
udph->len = sizeof(struct udphdr);
udph->check = 0;
break;
}
case IPPROTO_TCP: {
struct tcphdr *tcph;
tcph = skb_put_zero(skb, sizeof(struct tcphdr));
tcph->source = sport;
tcph->dest = dport;
tcph->doff = sizeof(struct tcphdr) / 4;
tcph->rst = 1;
tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
src, dst, 0);
break;
}
case IPPROTO_ICMP: {
struct icmphdr *icmph;
icmph = skb_put_zero(skb, sizeof(struct icmphdr));
icmph->type = ICMP_ECHO;
icmph->code = 0;
}
}
return skb;
}
| 13,697
|
184,522
| 1
|
void FireInvalidateUnknownVersion(const char* type_name) {
const invalidation::ObjectId object_id(
ipc::invalidation::ObjectSource::CHROME_SYNC, type_name);
invalidation::AckHandle ack_handle("fakedata");
EXPECT_CALL(mock_invalidation_client_, Acknowledge(ack_handle));
client_.InvalidateUnknownVersion(&mock_invalidation_client_, object_id,
ack_handle);
}
| 13,698
|
132,779
| 0
|
ChromotingStats* PepperVideoRenderer3D::GetStats() {
return &stats_;
}
| 13,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.