idx
int64 | func
string | target
int64 |
|---|---|---|
166,181
|
void BasicFindMainFallbackResponse(bool drop_from_working_set) {
PushNextTask(base::BindOnce(
&AppCacheStorageImplTest::Verify_BasicFindMainFallbackResponse,
base::Unretained(this)));
MakeCacheAndGroup(kManifestUrl, 2, 1, true);
cache_->AddEntry(kEntryUrl, AppCacheEntry(AppCacheEntry::FALLBACK, 1));
cache_->AddEntry(kEntryUrl2, AppCacheEntry(AppCacheEntry::FALLBACK, 2));
cache_->fallback_namespaces_.push_back(AppCacheNamespace(
APPCACHE_FALLBACK_NAMESPACE, kFallbackNamespace2, kEntryUrl2, false));
cache_->fallback_namespaces_.push_back(AppCacheNamespace(
APPCACHE_FALLBACK_NAMESPACE, kFallbackNamespace, kEntryUrl, false));
AppCacheDatabase::CacheRecord cache_record;
std::vector<AppCacheDatabase::EntryRecord> entries;
std::vector<AppCacheDatabase::NamespaceRecord> intercepts;
std::vector<AppCacheDatabase::NamespaceRecord> fallbacks;
std::vector<AppCacheDatabase::OnlineWhiteListRecord> whitelists;
cache_->ToDatabaseRecords(group_.get(), &cache_record, &entries,
&intercepts, &fallbacks, &whitelists);
for (const auto& entry : entries) {
if (entry.url != kDefaultEntryUrl)
EXPECT_TRUE(database()->InsertEntry(&entry));
}
EXPECT_TRUE(database()->InsertNamespaceRecords(fallbacks));
EXPECT_TRUE(database()->InsertOnlineWhiteListRecords(whitelists));
if (drop_from_working_set) {
EXPECT_TRUE(cache_->HasOneRef());
cache_ = nullptr;
EXPECT_TRUE(group_->HasOneRef());
group_ = nullptr;
}
storage()->FindResponseForMainRequest(kFallbackTestUrl, GURL(), delegate());
EXPECT_NE(kFallbackTestUrl, delegate()->found_url_);
}
| 0
|
202,379
|
void Browser::ShowCollectedCookiesDialog(TabContents *tab_contents) {
window()->ShowCollectedCookiesDialog(tab_contents);
}
| 0
|
284,948
|
void ImageBitmapFactories::ImageBitmapLoader::LoadBlobAsync(
void ImageBitmapFactories::ImageBitmapLoader::LoadBlobAsync(Blob* blob) {
loader_->Start(blob->GetBlobDataHandle());
}
| 0
|
154,435
|
static ssize_t k90_store_current_profile(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
struct usb_interface *usbif = to_usb_interface(dev->parent);
struct usb_device *usbdev = interface_to_usbdev(usbif);
int profile;
if (kstrtoint(buf, 10, &profile))
return -EINVAL;
if (profile < 1 || profile > 3)
return -EINVAL;
ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
K90_REQUEST_PROFILE,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, profile, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
if (ret != 0) {
dev_warn(dev, "Failed to change current profile (error %d).\n",
ret);
return ret;
}
return count;
}
| 0
|
493,241
|
transformIndexConstraints(CreateStmtContext *cxt)
{
IndexStmt *index;
List *indexlist = NIL;
List *finalindexlist = NIL;
ListCell *lc;
/*
* Run through the constraints that need to generate an index. For PRIMARY
* KEY, mark each column as NOT NULL and create an index. For UNIQUE or
* EXCLUDE, create an index as for PRIMARY KEY, but do not insist on NOT
* NULL.
*/
foreach(lc, cxt->ixconstraints)
{
Constraint *constraint = lfirst_node(Constraint, lc);
Assert(constraint->contype == CONSTR_PRIMARY ||
constraint->contype == CONSTR_UNIQUE ||
constraint->contype == CONSTR_EXCLUSION);
index = transformIndexConstraint(constraint, cxt);
indexlist = lappend(indexlist, index);
}
/*
* Scan the index list and remove any redundant index specifications. This
* can happen if, for instance, the user writes UNIQUE PRIMARY KEY. A
* strict reading of SQL would suggest raising an error instead, but that
* strikes me as too anal-retentive. - tgl 2001-02-14
*
* XXX in ALTER TABLE case, it'd be nice to look for duplicate
* pre-existing indexes, too.
*/
if (cxt->pkey != NULL)
{
/* Make sure we keep the PKEY index in preference to others... */
finalindexlist = list_make1(cxt->pkey);
}
foreach(lc, indexlist)
{
bool keep = true;
ListCell *k;
index = lfirst(lc);
/* if it's pkey, it's already in finalindexlist */
if (index == cxt->pkey)
continue;
foreach(k, finalindexlist)
{
IndexStmt *priorindex = lfirst(k);
if (equal(index->indexParams, priorindex->indexParams) &&
equal(index->indexIncludingParams, priorindex->indexIncludingParams) &&
equal(index->whereClause, priorindex->whereClause) &&
equal(index->excludeOpNames, priorindex->excludeOpNames) &&
strcmp(index->accessMethod, priorindex->accessMethod) == 0 &&
index->deferrable == priorindex->deferrable &&
index->initdeferred == priorindex->initdeferred)
{
priorindex->unique |= index->unique;
/*
* If the prior index is as yet unnamed, and this one is
* named, then transfer the name to the prior index. This
* ensures that if we have named and unnamed constraints,
* we'll use (at least one of) the names for the index.
*/
if (priorindex->idxname == NULL)
priorindex->idxname = index->idxname;
keep = false;
break;
}
}
if (keep)
finalindexlist = lappend(finalindexlist, index);
}
/*
* Now append all the IndexStmts to cxt->alist. If we generated an ALTER
* TABLE SET NOT NULL statement to support a primary key, it's already in
* cxt->alist.
*/
cxt->alist = list_concat(cxt->alist, finalindexlist);
}
| 0
|
397,912
|
void irc_queries_deinit(void)
{
signal_remove("event privmsg", (SIGNAL_FUNC) event_privmsg);
signal_remove("ctcp action", (SIGNAL_FUNC) ctcp_action);
signal_remove("event nick", (SIGNAL_FUNC) event_nick);
}
| 0
|
111,744
|
void PackLinuxElf64::updateLoader(OutputFile * /*fo*/)
{
if (xct_off) {
return; // FIXME elfout has no values at all
}
upx_uint64_t const vbase = get_te64(&elfout.phdr[0].p_vaddr);
unsigned start = linker->getSymbolOffset("_start");
if (get_te16(&elfout.ehdr.e_machine)==Elf64_Ehdr::EM_PPC64
&& elfout.ehdr.e_ident[Elf64_Ehdr::EI_DATA]==Elf64_Ehdr::ELFDATA2MSB) {
unsigned descr = linker->getSymbolOffset("entry_descr");
// External relocation of PPC64 function descriptor.
upx_uint64_t dot_entry = start + sz_pack2 + vbase;
upx_byte *p = getLoader();
set_te64(&p[descr], dot_entry);
set_te64(&elfout.ehdr.e_entry, descr + sz_pack2 + vbase);
}
else {
set_te64(&elfout.ehdr.e_entry, start + sz_pack2 + vbase);
}
}
| 0
|
194,771
|
virtual void scheduleDrawAndPresent()
{
m_proxy->drawLayersAndPresentOnCCThread();
}
| 0
|
202,155
|
bool ExtensionPrefs::IsBlacklistedExtensionAcknowledged(
const std::string& extension_id) {
return ReadExtensionPrefBoolean(extension_id, kPrefBlacklistAcknowledged);
}
| 0
|
196,978
|
void Gfx::go(GBool topLevel) {
Object obj;
Object args[maxArgs];
int numArgs, i;
int lastAbortCheck;
pushStateGuard();
updateLevel = lastAbortCheck = 0;
numArgs = 0;
parser->getObj(&obj);
while (!obj.isEOF()) {
commandAborted = gFalse;
if (obj.isCmd()) {
if (printCommands) {
obj.print(stdout);
for (i = 0; i < numArgs; ++i) {
printf(" ");
args[i].print(stdout);
}
printf("\n");
fflush(stdout);
}
GooTimer timer;
execOp(&obj, args, numArgs);
if (profileCommands) {
GooHash *hash;
hash = out->getProfileHash ();
if (hash) {
GooString *cmd_g;
ProfileData *data_p;
cmd_g = new GooString (obj.getCmd());
data_p = (ProfileData *)hash->lookup (cmd_g);
if (data_p == NULL) {
data_p = new ProfileData();
hash->add (cmd_g, data_p);
}
data_p->addElement(timer.getElapsed ());
}
}
obj.free();
for (i = 0; i < numArgs; ++i)
args[i].free();
numArgs = 0;
if (++updateLevel >= 20000) {
out->dump();
updateLevel = 0;
}
if (commandAborted) {
commandAborted = gFalse;
break;
}
if (abortCheckCbk) {
if (updateLevel - lastAbortCheck > 10) {
if ((*abortCheckCbk)(abortCheckCbkData)) {
break;
}
lastAbortCheck = updateLevel;
}
}
} else if (numArgs < maxArgs) {
args[numArgs++] = obj;
} else {
error(getPos(), "Too many args in content stream");
if (printCommands) {
printf("throwing away arg: ");
obj.print(stdout);
printf("\n");
fflush(stdout);
}
obj.free();
}
parser->getObj(&obj);
}
obj.free();
if (numArgs > 0) {
error(getPos(), "Leftover args in content stream");
if (printCommands) {
printf("%d leftovers:", numArgs);
for (i = 0; i < numArgs; ++i) {
printf(" ");
args[i].print(stdout);
}
printf("\n");
fflush(stdout);
}
for (i = 0; i < numArgs; ++i)
args[i].free();
}
popStateGuard();
if (topLevel && updateLevel > 0) {
out->dump();
}
}
| 0
|
202,239
|
void Document::DidRemoveAllPendingBodyStylesheets() {
if (ScriptableDocumentParser* parser = GetScriptableDocumentParser())
parser->DidLoadAllBodyStylesheets();
}
| 0
|
401,130
|
static void cirrus_bitblt_rop_nop(CirrusVGAState *s,
uint32_t dstaddr, uint32_t srcaddr,
int dstpitch,int srcpitch,
int bltwidth,int bltheight)
{
}
| 0
|
196,813
|
bool ParamTraits<gfx::SizeF>::Read(const Message* m,
PickleIterator* iter,
gfx::SizeF* p) {
float w, h;
if (!ParamTraits<float>::Read(m, iter, &w) ||
!ParamTraits<float>::Read(m, iter, &h))
return false;
p->set_width(w);
p->set_height(h);
return true;
}
| 0
|
368,065
|
handle_denyall(CMD_Request *rx_message, CMD_Reply *tx_message)
{
IPAddr ip;
int subnet_bits;
UTI_IPNetworkToHost(&rx_message->data.allow_deny.ip, &ip);
subnet_bits = ntohl(rx_message->data.allow_deny.subnet_bits);
if (NCR_AddAccessRestriction(&ip, subnet_bits, 0, 1)) {
tx_message->status = htons(STT_SUCCESS);
} else {
tx_message->status = htons(STT_BADSUBNET);
}
}
| 0
|
376,904
|
static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
uint16_t *refcount_table, int refcount_table_size, int64_t l2_offset,
int flags)
{
BDRVQcowState *s = bs->opaque;
uint64_t *l2_table, l2_entry;
uint64_t next_contiguous_offset = 0;
int i, l2_size, nb_csectors;
/* Read L2 table from disk */
l2_size = s->l2_size * sizeof(uint64_t);
l2_table = g_malloc(l2_size);
if (bdrv_pread(bs->file, l2_offset, l2_table, l2_size) != l2_size)
goto fail;
/* Do the actual checks */
for(i = 0; i < s->l2_size; i++) {
l2_entry = be64_to_cpu(l2_table[i]);
switch (qcow2_get_cluster_type(l2_entry)) {
case QCOW2_CLUSTER_COMPRESSED:
/* Compressed clusters don't have QCOW_OFLAG_COPIED */
if (l2_entry & QCOW_OFLAG_COPIED) {
fprintf(stderr, "ERROR: cluster %" PRId64 ": "
"copied flag must never be set for compressed "
"clusters\n", l2_entry >> s->cluster_bits);
l2_entry &= ~QCOW_OFLAG_COPIED;
res->corruptions++;
}
/* Mark cluster as used */
nb_csectors = ((l2_entry >> s->csize_shift) &
s->csize_mask) + 1;
l2_entry &= s->cluster_offset_mask;
inc_refcounts(bs, res, refcount_table, refcount_table_size,
l2_entry & ~511, nb_csectors * 512);
if (flags & CHECK_FRAG_INFO) {
res->bfi.allocated_clusters++;
res->bfi.compressed_clusters++;
/* Compressed clusters are fragmented by nature. Since they
* take up sub-sector space but we only have sector granularity
* I/O we need to re-read the same sectors even for adjacent
* compressed clusters.
*/
res->bfi.fragmented_clusters++;
}
break;
case QCOW2_CLUSTER_ZERO:
if ((l2_entry & L2E_OFFSET_MASK) == 0) {
break;
}
/* fall through */
case QCOW2_CLUSTER_NORMAL:
{
uint64_t offset = l2_entry & L2E_OFFSET_MASK;
if (flags & CHECK_FRAG_INFO) {
res->bfi.allocated_clusters++;
if (next_contiguous_offset &&
offset != next_contiguous_offset) {
res->bfi.fragmented_clusters++;
}
next_contiguous_offset = offset + s->cluster_size;
}
/* Mark cluster as used */
inc_refcounts(bs, res, refcount_table,refcount_table_size,
offset, s->cluster_size);
/* Correct offsets are cluster aligned */
if (offset_into_cluster(s, offset)) {
fprintf(stderr, "ERROR offset=%" PRIx64 ": Cluster is not "
"properly aligned; L2 entry corrupted.\n", offset);
res->corruptions++;
}
break;
}
case QCOW2_CLUSTER_UNALLOCATED:
break;
default:
abort();
}
}
g_free(l2_table);
return 0;
fail:
fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n");
g_free(l2_table);
return -EIO;
}
| 0
|
251,053
|
std::unique_ptr<NavigationUIData> WebContentsImpl::GetNavigationUIData(
NavigationHandle* navigation_handle) {
DCHECK(IsBrowserSideNavigationEnabled());
return GetContentClient()->browser()->GetNavigationUIData(navigation_handle);
}
| 0
|
366,914
|
static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
{
struct k_sigaction ka;
siginfo_t info;
int signr;
/*
* We want the common case to go fast, which
* is why we may in certain cases get here from
* kernel mode. Just return without doing anything
* if so.
*/
if (!user_mode(regs))
return 0;
if (try_to_freeze())
goto no_signal;
single_step_clear(current);
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
handle_signal(signr, &ka, &info, oldset, regs, syscall);
single_step_set(current);
return 1;
}
no_signal:
/*
* No signal to deliver to the process - restart the syscall.
*/
if (syscall) {
if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) {
if (thumb_mode(regs)) {
regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
regs->ARM_pc -= 2;
} else {
#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
regs->ARM_r7 = __NR_restart_syscall;
regs->ARM_pc -= 4;
#else
u32 __user *usp;
u32 swival = __NR_restart_syscall;
regs->ARM_sp -= 12;
usp = (u32 __user *)regs->ARM_sp;
/*
* Either we supports OABI only, or we have
* EABI with the OABI compat layer enabled.
* In the later case we don't know if user
* space is EABI or not, and if not we must
* not clobber r7. Always using the OABI
* syscall solves that issue and works for
* all those cases.
*/
swival = swival - __NR_SYSCALL_BASE + __NR_OABI_SYSCALL_BASE;
put_user(regs->ARM_pc, &usp[0]);
/* swi __NR_restart_syscall */
put_user(0xef000000 | swival, &usp[1]);
/* ldr pc, [sp], #12 */
put_user(0xe49df00c, &usp[2]);
flush_icache_range((unsigned long)usp,
(unsigned long)(usp + 3));
regs->ARM_pc = regs->ARM_sp + 4;
#endif
}
}
if (regs->ARM_r0 == -ERESTARTNOHAND ||
regs->ARM_r0 == -ERESTARTSYS ||
regs->ARM_r0 == -ERESTARTNOINTR) {
setup_syscall_restart(regs);
}
}
single_step_set(current);
return 0;
}
| 0
|
413,300
|
Elf32_Shdr const *PackLinuxElf32::elf_find_section_name(
char const *const name
) const
{
Elf32_Shdr const *shdr = shdri;
if (!shdr) {
return 0;
}
int j = e_shnum;
for (; 0 <=--j; ++shdr) {
if (0==strcmp(name, &shstrtab[get_te32(&shdr->sh_name)])) {
return shdr;
}
}
return 0;
}
| 0
|
270,980
|
int luaopen_create(lua_State *L) {
int i;
/* Manually construct our module table instead of
* relying on _register or _newlib */
lua_newtable(L);
for (i = 0; i < (sizeof(cmds)/sizeof(*cmds) - 1); i++) {
lua_pushcfunction(L, cmds[i].func);
lua_setfield(L, -2, cmds[i].name);
}
/* Add metadata */
lua_pushliteral(L, LUACMSGPACK_NAME);
lua_setfield(L, -2, "_NAME");
lua_pushliteral(L, LUACMSGPACK_VERSION);
lua_setfield(L, -2, "_VERSION");
lua_pushliteral(L, LUACMSGPACK_COPYRIGHT);
lua_setfield(L, -2, "_COPYRIGHT");
lua_pushliteral(L, LUACMSGPACK_DESCRIPTION);
lua_setfield(L, -2, "_DESCRIPTION");
return 1;
}
| 0
|
253,183
|
void DelegatedFrameHost::OnUpdateVSyncParameters(base::TimeTicks timebase,
base::TimeDelta interval) {
vsync_timebase_ = timebase;
vsync_interval_ = interval;
}
| 0
|
336,788
|
static av_cold int alac_encode_init(AVCodecContext *avctx)
{
AlacEncodeContext *s = avctx->priv_data;
int ret;
uint8_t *alac_extradata;
avctx->frame_size = s->frame_size = DEFAULT_FRAME_SIZE;
if (avctx->sample_fmt != AV_SAMPLE_FMT_S16) {
av_log(avctx, AV_LOG_ERROR, "only pcm_s16 input samples are supported\n");
return -1;
}
/* TODO: Correctly implement multi-channel ALAC.
It is similar to multi-channel AAC, in that it has a series of
single-channel (SCE), channel-pair (CPE), and LFE elements. */
if (avctx->channels > 2) {
av_log(avctx, AV_LOG_ERROR, "only mono or stereo input is currently supported\n");
return AVERROR_PATCHWELCOME;
}
// Set default compression level
if (avctx->compression_level == FF_COMPRESSION_DEFAULT)
s->compression_level = 2;
else
s->compression_level = av_clip(avctx->compression_level, 0, 2);
// Initialize default Rice parameters
s->rc.history_mult = 40;
s->rc.initial_history = 10;
s->rc.k_modifier = 14;
s->rc.rice_modifier = 4;
s->max_coded_frame_size = get_max_frame_size(avctx->frame_size,
avctx->channels,
DEFAULT_SAMPLE_SIZE);
// FIXME: consider wasted_bytes
s->write_sample_size = DEFAULT_SAMPLE_SIZE + avctx->channels - 1;
avctx->extradata = av_mallocz(ALAC_EXTRADATA_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
if (!avctx->extradata) {
ret = AVERROR(ENOMEM);
goto error;
}
avctx->extradata_size = ALAC_EXTRADATA_SIZE;
alac_extradata = avctx->extradata;
AV_WB32(alac_extradata, ALAC_EXTRADATA_SIZE);
AV_WB32(alac_extradata+4, MKBETAG('a','l','a','c'));
AV_WB32(alac_extradata+12, avctx->frame_size);
AV_WB8 (alac_extradata+17, DEFAULT_SAMPLE_SIZE);
AV_WB8 (alac_extradata+21, avctx->channels);
AV_WB32(alac_extradata+24, s->max_coded_frame_size);
AV_WB32(alac_extradata+28,
avctx->sample_rate * avctx->channels * DEFAULT_SAMPLE_SIZE); // average bitrate
AV_WB32(alac_extradata+32, avctx->sample_rate);
// Set relevant extradata fields
if (s->compression_level > 0) {
AV_WB8(alac_extradata+18, s->rc.history_mult);
AV_WB8(alac_extradata+19, s->rc.initial_history);
AV_WB8(alac_extradata+20, s->rc.k_modifier);
}
s->min_prediction_order = DEFAULT_MIN_PRED_ORDER;
if (avctx->min_prediction_order >= 0) {
if (avctx->min_prediction_order < MIN_LPC_ORDER ||
avctx->min_prediction_order > ALAC_MAX_LPC_ORDER) {
av_log(avctx, AV_LOG_ERROR, "invalid min prediction order: %d\n",
avctx->min_prediction_order);
ret = AVERROR(EINVAL);
goto error;
}
s->min_prediction_order = avctx->min_prediction_order;
}
s->max_prediction_order = DEFAULT_MAX_PRED_ORDER;
if (avctx->max_prediction_order >= 0) {
if (avctx->max_prediction_order < MIN_LPC_ORDER ||
avctx->max_prediction_order > ALAC_MAX_LPC_ORDER) {
av_log(avctx, AV_LOG_ERROR, "invalid max prediction order: %d\n",
avctx->max_prediction_order);
ret = AVERROR(EINVAL);
goto error;
}
s->max_prediction_order = avctx->max_prediction_order;
}
if (s->max_prediction_order < s->min_prediction_order) {
av_log(avctx, AV_LOG_ERROR,
"invalid prediction orders: min=%d max=%d\n",
s->min_prediction_order, s->max_prediction_order);
ret = AVERROR(EINVAL);
goto error;
}
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame) {
ret = AVERROR(ENOMEM);
goto error;
}
s->avctx = avctx;
if ((ret = ff_lpc_init(&s->lpc_ctx, avctx->frame_size,
s->max_prediction_order,
FF_LPC_TYPE_LEVINSON)) < 0) {
goto error;
}
return 0;
error:
alac_encode_close(avctx);
return ret;
}
| 0
|
428,260
|
int tcp_set_rcvlowat(struct sock *sk, int val)
{
int cap;
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
cap = sk->sk_rcvbuf >> 1;
else
cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1;
val = min(val, cap);
sk->sk_rcvlowat = val ? : 1;
/* Check if we need to signal EPOLLIN right now */
tcp_data_ready(sk);
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
return 0;
val <<= 1;
if (val > sk->sk_rcvbuf) {
sk->sk_rcvbuf = val;
tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val);
}
return 0;
}
| 0
|
286,268
|
process_config_source( const char* file, const char* name,
const char* host, int required )
{
int rval;
if( access( file, R_OK ) != 0 && !is_piped_command(file)) {
if( !required) { return; }
if( !host ) {
fprintf( stderr, "ERROR: Can't read %s %s\n",
name, file );
exit( 1 );
}
} else {
rval = Read_config( file, ConfigTab, TABLESIZE, EXPAND_LAZY,
false, extra_info );
if( rval < 0 ) {
fprintf( stderr,
"Configuration Error Line %d while reading %s %s\n",
ConfigLineNo, name, file );
exit( 1 );
}
}
}
| 0
|
212,570
|
static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
{
int i;
for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
vcpu->arch.apf.gfns[i] = ~0;
}
| 0
|
348,471
|
int memory_failure(unsigned long pfn, int trapno, int flags)
{
struct page_state *ps;
struct page *p;
struct page *hpage;
struct page *orig_head;
int res;
unsigned int nr_pages;
unsigned long page_flags;
if (!sysctl_memory_failure_recovery)
panic("Memory failure from trap %d on page %lx", trapno, pfn);
if (!pfn_valid(pfn)) {
pr_err("Memory failure: %#lx: memory outside kernel control\n",
pfn);
return -ENXIO;
}
p = pfn_to_page(pfn);
orig_head = hpage = compound_head(p);
if (TestSetPageHWPoison(p)) {
pr_err("Memory failure: %#lx: already hardware poisoned\n",
pfn);
return 0;
}
/*
* Currently errors on hugetlbfs pages are measured in hugepage units,
* so nr_pages should be 1 << compound_order. OTOH when errors are on
* transparent hugepages, they are supposed to be split and error
* measurement is done in normal page units. So nr_pages should be one
* in this case.
*/
if (PageHuge(p))
nr_pages = 1 << compound_order(hpage);
else /* normal page or thp */
nr_pages = 1;
num_poisoned_pages_add(nr_pages);
/*
* We need/can do nothing about count=0 pages.
* 1) it's a free page, and therefore in safe hand:
* prep_new_page() will be the gate keeper.
* 2) it's a free hugepage, which is also safe:
* an affected hugepage will be dequeued from hugepage freelist,
* so there's no concern about reusing it ever after.
* 3) it's part of a non-compound high order page.
* Implies some kernel user: cannot stop them from
* R/W the page; let's pray that the page has been
* used and will be freed some time later.
* In fact it's dangerous to directly bump up page count from 0,
* that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
*/
if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
if (is_free_buddy_page(p)) {
action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
return 0;
} else if (PageHuge(hpage)) {
/*
* Check "filter hit" and "race with other subpage."
*/
lock_page(hpage);
if (PageHWPoison(hpage)) {
if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
|| (p != hpage && TestSetPageHWPoison(hpage))) {
num_poisoned_pages_sub(nr_pages);
unlock_page(hpage);
return 0;
}
}
set_page_hwpoison_huge_page(hpage);
res = dequeue_hwpoisoned_huge_page(hpage);
action_result(pfn, MF_MSG_FREE_HUGE,
res ? MF_IGNORED : MF_DELAYED);
unlock_page(hpage);
return res;
} else {
action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
return -EBUSY;
}
}
if (!PageHuge(p) && PageTransHuge(hpage)) {
lock_page(hpage);
if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
unlock_page(hpage);
if (!PageAnon(hpage))
pr_err("Memory failure: %#lx: non anonymous thp\n",
pfn);
else
pr_err("Memory failure: %#lx: thp split failed\n",
pfn);
if (TestClearPageHWPoison(p))
num_poisoned_pages_sub(nr_pages);
put_hwpoison_page(p);
return -EBUSY;
}
unlock_page(hpage);
get_hwpoison_page(p);
put_hwpoison_page(hpage);
VM_BUG_ON_PAGE(!page_count(p), p);
hpage = compound_head(p);
}
/*
* We ignore non-LRU pages for good reasons.
* - PG_locked is only well defined for LRU pages and a few others
* - to avoid races with __SetPageLocked()
* - to avoid races with __SetPageSlab*() (and more non-atomic ops)
* The check (unnecessarily) ignores LRU pages being isolated and
* walked by the page reclaim code, however that's not a big loss.
*/
if (!PageHuge(p)) {
if (!PageLRU(p))
shake_page(p, 0);
if (!PageLRU(p)) {
/*
* shake_page could have turned it free.
*/
if (is_free_buddy_page(p)) {
if (flags & MF_COUNT_INCREASED)
action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
else
action_result(pfn, MF_MSG_BUDDY_2ND,
MF_DELAYED);
return 0;
}
}
}
lock_page(hpage);
/*
* The page could have changed compound pages during the locking.
* If this happens just bail out.
*/
if (PageCompound(p) && compound_head(p) != orig_head) {
action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
res = -EBUSY;
goto out;
}
/*
* We use page flags to determine what action should be taken, but
* the flags can be modified by the error containment action. One
* example is an mlocked page, where PG_mlocked is cleared by
* page_remove_rmap() in try_to_unmap_one(). So to determine page status
* correctly, we save a copy of the page flags at this time.
*/
page_flags = p->flags;
/*
* unpoison always clear PG_hwpoison inside page lock
*/
if (!PageHWPoison(p)) {
pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
num_poisoned_pages_sub(nr_pages);
unlock_page(hpage);
put_hwpoison_page(hpage);
return 0;
}
if (hwpoison_filter(p)) {
if (TestClearPageHWPoison(p))
num_poisoned_pages_sub(nr_pages);
unlock_page(hpage);
put_hwpoison_page(hpage);
return 0;
}
if (!PageHuge(p) && !PageTransTail(p) && !PageLRU(p))
goto identify_page_state;
/*
* For error on the tail page, we should set PG_hwpoison
* on the head page to show that the hugepage is hwpoisoned
*/
if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) {
action_result(pfn, MF_MSG_POISONED_HUGE, MF_IGNORED);
unlock_page(hpage);
put_hwpoison_page(hpage);
return 0;
}
/*
* Set PG_hwpoison on all pages in an error hugepage,
* because containment is done in hugepage unit for now.
* Since we have done TestSetPageHWPoison() for the head page with
* page lock held, we can safely set PG_hwpoison bits on tail pages.
*/
if (PageHuge(p))
set_page_hwpoison_huge_page(hpage);
/*
* It's very difficult to mess with pages currently under IO
* and in many cases impossible, so we just avoid it here.
*/
wait_on_page_writeback(p);
/*
* Now take care of user space mappings.
* Abort on fail: __delete_from_page_cache() assumes unmapped page.
*
* When the raw error page is thp tail page, hpage points to the raw
* page after thp split.
*/
if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
!= SWAP_SUCCESS) {
action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
res = -EBUSY;
goto out;
}
/*
* Torn down by someone else?
*/
if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
res = -EBUSY;
goto out;
}
identify_page_state:
res = -EBUSY;
/*
* The first check uses the current page flags which may not have any
* relevant information. The second check with the saved page flagss is
* carried out only if the first check can't determine the page status.
*/
for (ps = error_states;; ps++)
if ((p->flags & ps->mask) == ps->res)
break;
page_flags |= (p->flags & (1UL << PG_dirty));
if (!ps->mask)
for (ps = error_states;; ps++)
if ((page_flags & ps->mask) == ps->res)
break;
res = page_action(ps, p, pfn);
out:
unlock_page(hpage);
return res;
}
| 1
|
294,698
|
R_API int r_str_binstr2bin(const char *str, ut8 *out, int outlen) {
int n, i, j, k, ret, len;
len = strlen (str);
for (n = i = 0; i < len; i += 8) {
ret = 0;
while (str[i]==' ') {
str++;
}
if (i + 7 < len) {
for (k = 0, j = i + 7; j >= i; j--, k++) {
// INVERSE for (k=0,j=i; j<i+8; j++,k++) {
if (str[j] == ' ') {
//k--;
continue;
}
// printf ("---> j=%d (%c) (%02x)\n", j, str[j], str[j]);
if (str[j] == '1') {
ret|=1 << k;
} else if (str[j] != '0') {
return n;
}
}
}
// printf ("-======> %02x\n", ret);
out[n++] = ret;
if (n == outlen) {
return n;
}
}
return n;
}
| 0
|
106,696
|
static int userauth_list (lua_State *L, int status, lua_KContext ctx) {
char *auth_list = NULL;
struct ssh_userdata *state = NULL;
const char *username = luaL_checkstring(L, 2);
state = (struct ssh_userdata *) nseU_checkudata(L, 1, SSH2_UDATA, "ssh2");
assert(state->session != NULL);
while ((auth_list = libssh2_userauth_list(state->session, username, lua_rawlen(L, 2))) == NULL
&& libssh2_session_last_errno(state->session) == LIBSSH2_ERROR_EAGAIN) {
luaL_getmetafield(L, 1, "filter");
lua_pushvalue(L, 1);
assert(lua_status(L) == LUA_OK);
lua_callk(L, 1, 0, 0, userauth_list);
}
if (auth_list) {
const char *auth = strtok(auth_list, ",");
lua_newtable(L);
do {
lua_pushstring(L, auth);
lua_rawseti(L, -2, lua_rawlen(L, -2) + 1);
}
while ((auth = strtok(NULL, ",")));
//libssh2_free(state->session, (void *)auth_list);
}
else if (libssh2_userauth_authenticated(state->session)) {
lua_pushliteral(L, "none_auth");
}
else {
return ssh_error(L, state->session, "userauth_list");
}
return 1;
}
| 0
|
404,716
|
static void foo_close(struct tcmu_device *dev)
{
/* not supported in this example */
}
| 0
|
474,600
|
mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
{
/* Only non-active operations use up in-flight slots */
mirror_wait_for_any_operation(s, false);
}
| 0
|
137,699
|
TEST_F(RouterTest, RetryRequestDuringBodyTrailerBetweenAttempts) {
Buffer::OwnedImpl decoding_buffer;
EXPECT_CALL(callbacks_, decodingBuffer()).WillRepeatedly(Return(&decoding_buffer));
EXPECT_CALL(callbacks_, addDecodedData(_, true))
.WillRepeatedly(Invoke([&](Buffer::Instance& data, bool) { decoding_buffer.move(data); }));
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
Http::TestRequestHeaderMapImpl headers{
{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}, {"myheader", "present"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
const std::string body1("body1");
Buffer::OwnedImpl buf1(body1);
EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));
router_.decodeData(buf1, false);
router_.retry_state_->expectResetRetry();
encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);
// Complete request while there is no upstream request.
Http::TestRequestTrailerMapImpl trailers{{"some", "trailer"}};
router_.decodeTrailers(trailers);
NiceMock<Http::MockRequestEncoder> encoder2;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
EXPECT_CALL(encoder2, encodeHeaders(HeaderHasValueRef("myheader", "present"), false));
EXPECT_CALL(encoder2, encodeData(BufferStringEqual(body1), false));
EXPECT_CALL(encoder2, encodeTrailers(HeaderMapEqualRef(&trailers)));
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Send successful response, verify success.
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl({{":status", "200"}}));
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {
EXPECT_EQ(headers.Status()->value(), "200");
}));
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
| 0
|
9,083
|
bool SimpleMessenger::verify_authorizer(Connection *con, int peer_type,
int protocol, bufferlist& authorizer, bufferlist& authorizer_reply,
bool& isvalid,CryptoKey& session_key)
{
return ms_deliver_verify_authorizer(con, peer_type, protocol, authorizer, authorizer_reply, isvalid,session_key);
}
| 1
|
19,159
|
static ossl_inline unsigned long lh_ ## type ## _num_items ( LHASH_OF ( type ) * lh ) {
return OPENSSL_LH_num_items ( ( OPENSSL_LHASH * ) lh ) ;
}
static ossl_inline void lh_ ## type ## _node_stats_bio ( const LHASH_OF ( type ) * lh , BIO * out ) {
OPENSSL_LH_node_stats_bio ( ( const OPENSSL_LHASH * ) lh , out ) ;
}
static ossl_inline void lh_ ## type ## _node_usage_stats_bio ( const LHASH_OF ( type ) * lh , BIO * out ) {
OPENSSL_LH_node_usage_stats_bio ( ( const OPENSSL_LHASH * ) lh , out ) ;
}
static ossl_inline void lh_ ## type ## _stats_bio ( const LHASH_OF ( type ) * lh , BIO * out ) {
OPENSSL_LH_stats_bio ( ( const OPENSSL_LHASH * ) lh , out ) ;
}
static ossl_inline unsigned long lh_ ## type ## _get_down_load ( LHASH_OF ( type ) * lh ) {
return OPENSSL_LH_get_down_load ( ( OPENSSL_LHASH * ) lh ) ;
}
static ossl_inline void lh_ ## type ## _set_down_load ( LHASH_OF ( type ) * lh , unsigned long dl ) {
OPENSSL_LH_set_down_load ( ( OPENSSL_LHASH * ) lh , dl ) ;
}
static ossl_inline void lh_ ## type ## _doall ( LHASH_OF ( type ) * lh , void ( * doall ) ( type * ) ) {
OPENSSL_LH_doall ( ( OPENSSL_LHASH * ) lh , ( OPENSSL_LH_DOALL_FUNC ) doall ) ;
}
LHASH_OF ( type ) # define IMPLEMENT_LHASH_DOALL_ARG_CONST ( type , argtype ) int_implement_lhash_doall ( type , argtype , const type ) # define IMPLEMENT_LHASH_DOALL_ARG ( type , argtype ) int_implement_lhash_doall ( type , argtype , type ) # define int_implement_lhash_doall ( type , argtype , cbargtype ) static ossl_inline void lh_ ## type ## _doall_ ## argtype ( LHASH_OF ( type ) * lh , void ( * fn ) ( cbargtype * , argtype * ) , argtype * arg ) {
OPENSSL_LH_doall_arg ( ( OPENSSL_LHASH * ) lh , ( OPENSSL_LH_DOALL_FUNCARG ) fn , ( void * ) arg ) ;
}
LHASH_OF ( type ) DEFINE_LHASH_OF ( OPENSSL_STRING ) ;
# ifdef _MSC_VER # pragma warning ( push ) # pragma warning ( disable : 4090 ) # endif DEFINE_LHASH_OF ( OPENSSL_CSTRING )
| 0
|
135,211
|
static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
{
static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
static const uint8_t vlan[] = {0x81, 0x00};
uint8_t *ptr = (uint8_t *)buf;
int i;
if (n->promisc)
return 1;
ptr += n->host_hdr_len;
if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
int vid = lduw_be_p(ptr + 14) & 0xfff;
if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
return 0;
}
if (ptr[0] & 1) { // multicast
if (!memcmp(ptr, bcast, sizeof(bcast))) {
return !n->nobcast;
} else if (n->nomulti) {
return 0;
} else if (n->allmulti || n->mac_table.multi_overflow) {
return 1;
}
for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
return 1;
}
}
} else { // unicast
if (n->nouni) {
return 0;
} else if (n->alluni || n->mac_table.uni_overflow) {
return 1;
} else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
return 1;
}
for (i = 0; i < n->mac_table.first_multi; i++) {
if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
return 1;
}
}
}
return 0;
}
| 0
|
79,827
|
xfs_attr3_rmt_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_buf_log_item *bip = bp->b_fspriv;
char *ptr;
int len;
xfs_daddr_t bno;
/* no verification of non-crc buffers */
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
ptr = bp->b_addr;
bno = bp->b_bn;
len = BBTOB(bp->b_length);
ASSERT(len >= XFS_LBSIZE(mp));
while (len > 0) {
if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) {
xfs_buf_ioerror(bp, EFSCORRUPTED);
xfs_verifier_error(bp);
return;
}
if (bip) {
struct xfs_attr3_rmt_hdr *rmt;
rmt = (struct xfs_attr3_rmt_hdr *)ptr;
rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
}
xfs_update_cksum(ptr, XFS_LBSIZE(mp), XFS_ATTR3_RMT_CRC_OFF);
len -= XFS_LBSIZE(mp);
ptr += XFS_LBSIZE(mp);
bno += mp->m_bsize;
}
ASSERT(len == 0);
}
| 0
|
366,743
|
static int selinux_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp)
{
int rc;
rc = cap_task_setscheduler(p, policy, lp);
if (rc)
return rc;
return current_has_perm(p, PROCESS__SETSCHED);
}
| 0
|
375,325
|
StartupXLOG(void)
{
XLogCtlInsert *Insert;
CheckPoint checkPoint;
bool wasShutdown;
bool reachedStopPoint = false;
bool haveBackupLabel = false;
XLogRecPtr RecPtr,
checkPointLoc,
EndOfLog;
XLogSegNo endLogSegNo;
TimeLineID PrevTimeLineID;
XLogRecord *record;
TransactionId oldestActiveXID;
bool backupEndRequired = false;
bool backupFromStandby = false;
DBState dbstate_at_startup;
XLogReaderState *xlogreader;
XLogPageReadPrivate private;
bool fast_promoted = false;
/*
* Read control file and check XLOG status looks valid.
*
* Note: in most control paths, *ControlFile is already valid and we need
* not do ReadControlFile() here, but might as well do it to be sure.
*/
ReadControlFile();
if (ControlFile->state < DB_SHUTDOWNED ||
ControlFile->state > DB_IN_PRODUCTION ||
!XRecOffIsValid(ControlFile->checkPoint))
ereport(FATAL,
(errmsg("control file contains invalid data")));
if (ControlFile->state == DB_SHUTDOWNED)
{
/* This is the expected case, so don't be chatty in standalone mode */
ereport(IsPostmasterEnvironment ? LOG : NOTICE,
(errmsg("database system was shut down at %s",
str_time(ControlFile->time))));
}
else if (ControlFile->state == DB_SHUTDOWNED_IN_RECOVERY)
ereport(LOG,
(errmsg("database system was shut down in recovery at %s",
str_time(ControlFile->time))));
else if (ControlFile->state == DB_SHUTDOWNING)
ereport(LOG,
(errmsg("database system shutdown was interrupted; last known up at %s",
str_time(ControlFile->time))));
else if (ControlFile->state == DB_IN_CRASH_RECOVERY)
ereport(LOG,
(errmsg("database system was interrupted while in recovery at %s",
str_time(ControlFile->time)),
errhint("This probably means that some data is corrupted and"
" you will have to use the last backup for recovery.")));
else if (ControlFile->state == DB_IN_ARCHIVE_RECOVERY)
ereport(LOG,
(errmsg("database system was interrupted while in recovery at log time %s",
str_time(ControlFile->checkPointCopy.time)),
errhint("If this has occurred more than once some data might be corrupted"
" and you might need to choose an earlier recovery target.")));
else if (ControlFile->state == DB_IN_PRODUCTION)
ereport(LOG,
(errmsg("database system was interrupted; last known up at %s",
str_time(ControlFile->time))));
/* This is just to allow attaching to startup process with a debugger */
#ifdef XLOG_REPLAY_DELAY
if (ControlFile->state != DB_SHUTDOWNED)
pg_usleep(60000000L);
#endif
/*
* Verify that pg_xlog and pg_xlog/archive_status exist. In cases where
* someone has performed a copy for PITR, these directories may have been
* excluded and need to be re-created.
*/
ValidateXLOGDirectoryStructure();
/*
* Clear out any old relcache cache files. This is *necessary* if we do
* any WAL replay, since that would probably result in the cache files
* being out of sync with database reality. In theory we could leave them
* in place if the database had been cleanly shut down, but it seems
* safest to just remove them always and let them be rebuilt during the
* first backend startup.
*/
RelationCacheInitFileRemove();
/*
* Initialize on the assumption we want to recover to the latest timeline
* that's active according to pg_control.
*/
if (ControlFile->minRecoveryPointTLI >
ControlFile->checkPointCopy.ThisTimeLineID)
recoveryTargetTLI = ControlFile->minRecoveryPointTLI;
else
recoveryTargetTLI = ControlFile->checkPointCopy.ThisTimeLineID;
/*
* Check for recovery control file, and if so set up state for offline
* recovery
*/
readRecoveryCommandFile();
/*
* Save archive_cleanup_command in shared memory so that other processes
* can see it.
*/
strlcpy(XLogCtl->archiveCleanupCommand,
archiveCleanupCommand ? archiveCleanupCommand : "",
sizeof(XLogCtl->archiveCleanupCommand));
if (ArchiveRecoveryRequested)
{
if (StandbyModeRequested)
ereport(LOG,
(errmsg("entering standby mode")));
else if (recoveryTarget == RECOVERY_TARGET_XID)
ereport(LOG,
(errmsg("starting point-in-time recovery to XID %u",
recoveryTargetXid)));
else if (recoveryTarget == RECOVERY_TARGET_TIME)
ereport(LOG,
(errmsg("starting point-in-time recovery to %s",
timestamptz_to_str(recoveryTargetTime))));
else if (recoveryTarget == RECOVERY_TARGET_NAME)
ereport(LOG,
(errmsg("starting point-in-time recovery to \"%s\"",
recoveryTargetName)));
else if (recoveryTarget == RECOVERY_TARGET_IMMEDIATE)
ereport(LOG,
(errmsg("starting point-in-time recovery to earliest consistent point")));
else
ereport(LOG,
(errmsg("starting archive recovery")));
}
/*
* Take ownership of the wakeup latch if we're going to sleep during
* recovery.
*/
if (StandbyModeRequested)
OwnLatch(&XLogCtl->recoveryWakeupLatch);
/* Set up XLOG reader facility */
MemSet(&private, 0, sizeof(XLogPageReadPrivate));
xlogreader = XLogReaderAllocate(&XLogPageRead, &private);
if (!xlogreader)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"),
errdetail("Failed while allocating an XLog reading processor.")));
xlogreader->system_identifier = ControlFile->system_identifier;
if (read_backup_label(&checkPointLoc, &backupEndRequired,
&backupFromStandby))
{
/*
* Archive recovery was requested, and thanks to the backup label
* file, we know how far we need to replay to reach consistency. Enter
* archive recovery directly.
*/
InArchiveRecovery = true;
if (StandbyModeRequested)
StandbyMode = true;
/*
* When a backup_label file is present, we want to roll forward from
* the checkpoint it identifies, rather than using pg_control.
*/
record = ReadCheckpointRecord(xlogreader, checkPointLoc, 0, true);
if (record != NULL)
{
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN);
ereport(DEBUG1,
(errmsg("checkpoint record is at %X/%X",
(uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
InRecovery = true; /* force recovery even if SHUTDOWNED */
/*
* Make sure that REDO location exists. This may not be the case
* if there was a crash during an online backup, which left a
* backup_label around that references a WAL segment that's
* already been archived.
*/
if (checkPoint.redo < checkPointLoc)
{
if (!ReadRecord(xlogreader, checkPoint.redo, LOG, false))
ereport(FATAL,
(errmsg("could not find redo location referenced by checkpoint record"),
errhint("If you are not restoring from a backup, try removing the file \"%s/backup_label\".", DataDir)));
}
}
else
{
ereport(FATAL,
(errmsg("could not locate required checkpoint record"),
errhint("If you are not restoring from a backup, try removing the file \"%s/backup_label\".", DataDir)));
wasShutdown = false; /* keep compiler quiet */
}
/* set flag to delete it later */
haveBackupLabel = true;
}
else
{
/*
* It's possible that archive recovery was requested, but we don't
* know how far we need to replay the WAL before we reach consistency.
* This can happen for example if a base backup is taken from a
* running server using an atomic filesystem snapshot, without calling
* pg_start/stop_backup. Or if you just kill a running master server
* and put it into archive recovery by creating a recovery.conf file.
*
* Our strategy in that case is to perform crash recovery first,
* replaying all the WAL present in pg_xlog, and only enter archive
* recovery after that.
*
* But usually we already know how far we need to replay the WAL (up
* to minRecoveryPoint, up to backupEndPoint, or until we see an
* end-of-backup record), and we can enter archive recovery directly.
*/
if (ArchiveRecoveryRequested &&
(ControlFile->minRecoveryPoint != InvalidXLogRecPtr ||
ControlFile->backupEndRequired ||
ControlFile->backupEndPoint != InvalidXLogRecPtr ||
ControlFile->state == DB_SHUTDOWNED))
{
InArchiveRecovery = true;
if (StandbyModeRequested)
StandbyMode = true;
}
/*
* Get the last valid checkpoint record. If the latest one according
* to pg_control is broken, try the next-to-last one.
*/
checkPointLoc = ControlFile->checkPoint;
RedoStartLSN = ControlFile->checkPointCopy.redo;
record = ReadCheckpointRecord(xlogreader, checkPointLoc, 1, true);
if (record != NULL)
{
ereport(DEBUG1,
(errmsg("checkpoint record is at %X/%X",
(uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
}
else if (StandbyMode)
{
/*
* The last valid checkpoint record required for a streaming
* recovery exists in neither standby nor the primary.
*/
ereport(PANIC,
(errmsg("could not locate a valid checkpoint record")));
}
else
{
checkPointLoc = ControlFile->prevCheckPoint;
record = ReadCheckpointRecord(xlogreader, checkPointLoc, 2, true);
if (record != NULL)
{
ereport(LOG,
(errmsg("using previous checkpoint record at %X/%X",
(uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
ereport(PANIC,
(errmsg("could not locate a valid checkpoint record")));
}
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN);
}
/*
* If the location of the checkpoint record is not on the expected
* timeline in the history of the requested timeline, we cannot proceed:
* the backup is not part of the history of the requested timeline.
*/
Assert(expectedTLEs); /* was initialized by reading checkpoint
* record */
if (tliOfPointInHistory(checkPointLoc, expectedTLEs) !=
checkPoint.ThisTimeLineID)
{
XLogRecPtr switchpoint;
/*
* tliSwitchPoint will throw an error if the checkpoint's timeline is
* not in expectedTLEs at all.
*/
switchpoint = tliSwitchPoint(ControlFile->checkPointCopy.ThisTimeLineID, expectedTLEs, NULL);
ereport(FATAL,
(errmsg("requested timeline %u is not a child of this server's history",
recoveryTargetTLI),
errdetail("Latest checkpoint is at %X/%X on timeline %u, but in the history of the requested timeline, the server forked off from that timeline at %X/%X.",
(uint32) (ControlFile->checkPoint >> 32),
(uint32) ControlFile->checkPoint,
ControlFile->checkPointCopy.ThisTimeLineID,
(uint32) (switchpoint >> 32),
(uint32) switchpoint)));
}
/*
* The min recovery point should be part of the requested timeline's
* history, too.
*/
if (!XLogRecPtrIsInvalid(ControlFile->minRecoveryPoint) &&
tliOfPointInHistory(ControlFile->minRecoveryPoint - 1, expectedTLEs) !=
ControlFile->minRecoveryPointTLI)
ereport(FATAL,
(errmsg("requested timeline %u does not contain minimum recovery point %X/%X on timeline %u",
recoveryTargetTLI,
(uint32) (ControlFile->minRecoveryPoint >> 32),
(uint32) ControlFile->minRecoveryPoint,
ControlFile->minRecoveryPointTLI)));
LastRec = RecPtr = checkPointLoc;
ereport(DEBUG1,
(errmsg("redo record is at %X/%X; shutdown %s",
(uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo,
wasShutdown ? "TRUE" : "FALSE")));
ereport(DEBUG1,
(errmsg("next transaction ID: %u/%u; next OID: %u",
checkPoint.nextXidEpoch, checkPoint.nextXid,
checkPoint.nextOid)));
ereport(DEBUG1,
(errmsg("next MultiXactId: %u; next MultiXactOffset: %u",
checkPoint.nextMulti, checkPoint.nextMultiOffset)));
ereport(DEBUG1,
(errmsg("oldest unfrozen transaction ID: %u, in database %u",
checkPoint.oldestXid, checkPoint.oldestXidDB)));
ereport(DEBUG1,
(errmsg("oldest MultiXactId: %u, in database %u",
checkPoint.oldestMulti, checkPoint.oldestMultiDB)));
if (!TransactionIdIsNormal(checkPoint.nextXid))
ereport(PANIC,
(errmsg("invalid next transaction ID")));
/* initialize shared memory variables from the checkpoint record */
ShmemVariableCache->nextXid = checkPoint.nextXid;
ShmemVariableCache->nextOid = checkPoint.nextOid;
ShmemVariableCache->oidCount = 0;
MultiXactSetNextMXact(checkPoint.nextMulti, checkPoint.nextMultiOffset);
SetTransactionIdLimit(checkPoint.oldestXid, checkPoint.oldestXidDB);
SetMultiXactIdLimit(checkPoint.oldestMulti, checkPoint.oldestMultiDB);
XLogCtl->ckptXidEpoch = checkPoint.nextXidEpoch;
XLogCtl->ckptXid = checkPoint.nextXid;
/*
* Initialize replication slots, before there's a chance to remove
* required resources.
*/
StartupReplicationSlots(checkPoint.redo);
/*
* Startup MultiXact. We need to do this early for two reasons: one
* is that we might try to access multixacts when we do tuple freezing,
* and the other is we need its state initialized because we attempt
* truncation during restartpoints.
*/
StartupMultiXact();
/*
* Initialize unlogged LSN. On a clean shutdown, it's restored from the
* control file. On recovery, all unlogged relations are blown away, so
* the unlogged LSN counter can be reset too.
*/
if (ControlFile->state == DB_SHUTDOWNED)
XLogCtl->unloggedLSN = ControlFile->unloggedLSN;
else
XLogCtl->unloggedLSN = 1;
/*
* We must replay WAL entries using the same TimeLineID they were created
* under, so temporarily adopt the TLI indicated by the checkpoint (see
* also xlog_redo()).
*/
ThisTimeLineID = checkPoint.ThisTimeLineID;
/*
* Copy any missing timeline history files between 'now' and the recovery
* target timeline from archive to pg_xlog. While we don't need those
* files ourselves - the history file of the recovery target timeline
* covers all the previous timelines in the history too - a cascading
* standby server might be interested in them. Or, if you archive the WAL
* from this server to a different archive than the master, it'd be good
* for all the history files to get archived there after failover, so that
* you can use one of the old timelines as a PITR target. Timeline history
* files are small, so it's better to copy them unnecessarily than not
* copy them and regret later.
*/
restoreTimeLineHistoryFiles(ThisTimeLineID, recoveryTargetTLI);
lastFullPageWrites = checkPoint.fullPageWrites;
RedoRecPtr = XLogCtl->RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo;
if (RecPtr < checkPoint.redo)
ereport(PANIC,
(errmsg("invalid redo in checkpoint record")));
/*
* Check whether we need to force recovery from WAL. If it appears to
* have been a clean shutdown and we did not have a recovery.conf file,
* then assume no recovery needed.
*/
if (checkPoint.redo < RecPtr)
{
if (wasShutdown)
ereport(PANIC,
(errmsg("invalid redo record in shutdown checkpoint")));
InRecovery = true;
}
else if (ControlFile->state != DB_SHUTDOWNED)
InRecovery = true;
else if (ArchiveRecoveryRequested)
{
/* force recovery due to presence of recovery.conf */
InRecovery = true;
}
/* REDO */
if (InRecovery)
{
int rmid;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
/*
* Update pg_control to show that we are recovering and to show the
* selected checkpoint as the place we are starting from. We also mark
* pg_control with any minimum recovery stop point obtained from a
* backup history file.
*/
dbstate_at_startup = ControlFile->state;
if (InArchiveRecovery)
ControlFile->state = DB_IN_ARCHIVE_RECOVERY;
else
{
ereport(LOG,
(errmsg("database system was not properly shut down; "
"automatic recovery in progress")));
if (recoveryTargetTLI > ControlFile->checkPointCopy.ThisTimeLineID)
ereport(LOG,
(errmsg("crash recovery starts in timeline %u "
"and has target timeline %u",
ControlFile->checkPointCopy.ThisTimeLineID,
recoveryTargetTLI)));
ControlFile->state = DB_IN_CRASH_RECOVERY;
}
ControlFile->prevCheckPoint = ControlFile->checkPoint;
ControlFile->checkPoint = checkPointLoc;
ControlFile->checkPointCopy = checkPoint;
if (InArchiveRecovery)
{
/* initialize minRecoveryPoint if not set yet */
if (ControlFile->minRecoveryPoint < checkPoint.redo)
{
ControlFile->minRecoveryPoint = checkPoint.redo;
ControlFile->minRecoveryPointTLI = checkPoint.ThisTimeLineID;
}
}
/*
* Set backupStartPoint if we're starting recovery from a base backup.
*
* Set backupEndPoint and use minRecoveryPoint as the backup end
* location if we're starting recovery from a base backup which was
* taken from the standby. In this case, the database system status in
* pg_control must indicate DB_IN_ARCHIVE_RECOVERY. If not, which
* means that backup is corrupted, so we cancel recovery.
*/
if (haveBackupLabel)
{
ControlFile->backupStartPoint = checkPoint.redo;
ControlFile->backupEndRequired = backupEndRequired;
if (backupFromStandby)
{
if (dbstate_at_startup != DB_IN_ARCHIVE_RECOVERY)
ereport(FATAL,
(errmsg("backup_label contains data inconsistent with control file"),
errhint("This means that the backup is corrupted and you will "
"have to use another backup for recovery.")));
ControlFile->backupEndPoint = ControlFile->minRecoveryPoint;
}
}
ControlFile->time = (pg_time_t) time(NULL);
/* No need to hold ControlFileLock yet, we aren't up far enough */
UpdateControlFile();
/* initialize our local copy of minRecoveryPoint */
minRecoveryPoint = ControlFile->minRecoveryPoint;
minRecoveryPointTLI = ControlFile->minRecoveryPointTLI;
/*
* Reset pgstat data, because it may be invalid after recovery.
*/
pgstat_reset_all();
/*
* If there was a backup label file, it's done its job and the info
* has now been propagated into pg_control. We must get rid of the
* label file so that if we crash during recovery, we'll pick up at
* the latest recovery restartpoint instead of going all the way back
* to the backup start point. It seems prudent though to just rename
* the file out of the way rather than delete it completely.
*/
if (haveBackupLabel)
{
unlink(BACKUP_LABEL_OLD);
if (rename(BACKUP_LABEL_FILE, BACKUP_LABEL_OLD) != 0)
ereport(FATAL,
(errcode_for_file_access(),
errmsg("could not rename file \"%s\" to \"%s\": %m",
BACKUP_LABEL_FILE, BACKUP_LABEL_OLD)));
}
/* Check that the GUCs used to generate the WAL allow recovery */
CheckRequiredParameterValues();
/*
* We're in recovery, so unlogged relations may be trashed and must be
* reset. This should be done BEFORE allowing Hot Standby
* connections, so that read-only backends don't try to read whatever
* garbage is left over from before.
*/
ResetUnloggedRelations(UNLOGGED_RELATION_CLEANUP);
/*
* Likewise, delete any saved transaction snapshot files that got left
* behind by crashed backends.
*/
DeleteAllExportedSnapshotFiles();
/*
* Initialize for Hot Standby, if enabled. We won't let backends in
* yet, not until we've reached the min recovery point specified in
* control file and we've established a recovery snapshot from a
* running-xacts WAL record.
*/
if (ArchiveRecoveryRequested && EnableHotStandby)
{
TransactionId *xids;
int nxids;
ereport(DEBUG1,
(errmsg("initializing for hot standby")));
InitRecoveryTransactionEnvironment();
if (wasShutdown)
oldestActiveXID = PrescanPreparedTransactions(&xids, &nxids);
else
oldestActiveXID = checkPoint.oldestActiveXid;
Assert(TransactionIdIsValid(oldestActiveXID));
/* Tell procarray about the range of xids it has to deal with */
ProcArrayInitRecovery(ShmemVariableCache->nextXid);
/*
* Startup commit log and subtrans only. MultiXact has already
* been started up and other SLRUs are not maintained during
* recovery and need not be started yet.
*/
StartupCLOG();
StartupSUBTRANS(oldestActiveXID);
/*
* If we're beginning at a shutdown checkpoint, we know that
* nothing was running on the master at this point. So fake-up an
* empty running-xacts record and use that here and now. Recover
* additional standby state for prepared transactions.
*/
if (wasShutdown)
{
RunningTransactionsData running;
TransactionId latestCompletedXid;
/*
* Construct a RunningTransactions snapshot representing a
* shut down server, with only prepared transactions still
* alive. We're never overflowed at this point because all
* subxids are listed with their parent prepared transactions.
*/
running.xcnt = nxids;
running.subxcnt = 0;
running.subxid_overflow = false;
running.nextXid = checkPoint.nextXid;
running.oldestRunningXid = oldestActiveXID;
latestCompletedXid = checkPoint.nextXid;
TransactionIdRetreat(latestCompletedXid);
Assert(TransactionIdIsNormal(latestCompletedXid));
running.latestCompletedXid = latestCompletedXid;
running.xids = xids;
ProcArrayApplyRecoveryInfo(&running);
StandbyRecoverPreparedTransactions(false);
}
}
/* Initialize resource managers */
for (rmid = 0; rmid <= RM_MAX_ID; rmid++)
{
if (RmgrTable[rmid].rm_startup != NULL)
RmgrTable[rmid].rm_startup();
}
/*
* Initialize shared variables for tracking progress of WAL replay,
* as if we had just replayed the record before the REDO location.
*/
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->replayEndRecPtr = checkPoint.redo;
xlogctl->replayEndTLI = ThisTimeLineID;
xlogctl->lastReplayedEndRecPtr = checkPoint.redo;
xlogctl->lastReplayedTLI = ThisTimeLineID;
xlogctl->recoveryLastXTime = 0;
xlogctl->currentChunkStartTime = 0;
xlogctl->recoveryPause = false;
SpinLockRelease(&xlogctl->info_lck);
/* Also ensure XLogReceiptTime has a sane value */
XLogReceiptTime = GetCurrentTimestamp();
/*
* Let postmaster know we've started redo now, so that it can launch
* checkpointer to perform restartpoints. We don't bother during
* crash recovery as restartpoints can only be performed during
* archive recovery. And we'd like to keep crash recovery simple, to
* avoid introducing bugs that could affect you when recovering after
* crash.
*
* After this point, we can no longer assume that we're the only
* process in addition to postmaster! Also, fsync requests are
* subsequently to be handled by the checkpointer, not locally.
*/
if (ArchiveRecoveryRequested && IsUnderPostmaster)
{
PublishStartupProcessInformation();
SetForwardFsyncRequests();
SendPostmasterSignal(PMSIGNAL_RECOVERY_STARTED);
bgwriterLaunched = true;
}
/*
* Allow read-only connections immediately if we're consistent
* already.
*/
CheckRecoveryConsistency();
/*
* Find the first record that logically follows the checkpoint --- it
* might physically precede it, though.
*/
if (checkPoint.redo < RecPtr)
{
/* back up to find the record */
record = ReadRecord(xlogreader, checkPoint.redo, PANIC, false);
}
else
{
/* just have to read next record after CheckPoint */
record = ReadRecord(xlogreader, InvalidXLogRecPtr, LOG, false);
}
if (record != NULL)
{
ErrorContextCallback errcallback;
TimestampTz xtime;
InRedo = true;
ereport(LOG,
(errmsg("redo starts at %X/%X",
(uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
/*
* main redo apply loop
*/
do
{
bool switchedTLI = false;
#ifdef WAL_DEBUG
if (XLOG_DEBUG ||
(rmid == RM_XACT_ID && trace_recovery_messages <= DEBUG2) ||
(rmid != RM_XACT_ID && trace_recovery_messages <= DEBUG3))
{
StringInfoData buf;
initStringInfo(&buf);
appendStringInfo(&buf, "REDO @ %X/%X; LSN %X/%X: ",
(uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr,
(uint32) (EndRecPtr >> 32), (uint32) EndRecPtr);
xlog_outrec(&buf, record);
appendStringInfoString(&buf, " - ");
RmgrTable[record->xl_rmid].rm_desc(&buf,
record->xl_info,
XLogRecGetData(record));
elog(LOG, "%s", buf.data);
pfree(buf.data);
}
#endif
/* Handle interrupt signals of startup process */
HandleStartupProcInterrupts();
/*
* Pause WAL replay, if requested by a hot-standby session via
* SetRecoveryPause().
*
* Note that we intentionally don't take the info_lck spinlock
* here. We might therefore read a slightly stale value of
* the recoveryPause flag, but it can't be very stale (no
* worse than the last spinlock we did acquire). Since a
* pause request is a pretty asynchronous thing anyway,
* possibly responding to it one WAL record later than we
* otherwise would is a minor issue, so it doesn't seem worth
* adding another spinlock cycle to prevent that.
*/
if (xlogctl->recoveryPause)
recoveryPausesHere();
/*
* Have we reached our recovery target?
*/
if (recoveryStopsBefore(record))
{
reachedStopPoint = true; /* see below */
break;
}
/*
* If we've been asked to lag the master, wait on
* latch until enough time has passed.
*/
if (recoveryApplyDelay(record))
{
/*
* We test for paused recovery again here. If
* user sets delayed apply, it may be because
* they expect to pause recovery in case of
* problems, so we must test again here otherwise
* pausing during the delay-wait wouldn't work.
*/
if (xlogctl->recoveryPause)
recoveryPausesHere();
}
/* Setup error traceback support for ereport() */
errcallback.callback = rm_redo_error_callback;
errcallback.arg = (void *) record;
errcallback.previous = error_context_stack;
error_context_stack = &errcallback;
/*
* ShmemVariableCache->nextXid must be beyond record's xid.
*
* We don't expect anyone else to modify nextXid, hence we
* don't need to hold a lock while examining it. We still
* acquire the lock to modify it, though.
*/
if (TransactionIdFollowsOrEquals(record->xl_xid,
ShmemVariableCache->nextXid))
{
LWLockAcquire(XidGenLock, LW_EXCLUSIVE);
ShmemVariableCache->nextXid = record->xl_xid;
TransactionIdAdvance(ShmemVariableCache->nextXid);
LWLockRelease(XidGenLock);
}
/*
* Before replaying this record, check if this record causes
* the current timeline to change. The record is already
* considered to be part of the new timeline, so we update
* ThisTimeLineID before replaying it. That's important so
* that replayEndTLI, which is recorded as the minimum
* recovery point's TLI if recovery stops after this record,
* is set correctly.
*/
if (record->xl_rmid == RM_XLOG_ID)
{
TimeLineID newTLI = ThisTimeLineID;
TimeLineID prevTLI = ThisTimeLineID;
uint8 info = record->xl_info & ~XLR_INFO_MASK;
if (info == XLOG_CHECKPOINT_SHUTDOWN)
{
CheckPoint checkPoint;
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
newTLI = checkPoint.ThisTimeLineID;
prevTLI = checkPoint.PrevTimeLineID;
}
else if (info == XLOG_END_OF_RECOVERY)
{
xl_end_of_recovery xlrec;
memcpy(&xlrec, XLogRecGetData(record), sizeof(xl_end_of_recovery));
newTLI = xlrec.ThisTimeLineID;
prevTLI = xlrec.PrevTimeLineID;
}
if (newTLI != ThisTimeLineID)
{
/* Check that it's OK to switch to this TLI */
checkTimeLineSwitch(EndRecPtr, newTLI, prevTLI);
/* Following WAL records should be run with new TLI */
ThisTimeLineID = newTLI;
switchedTLI = true;
}
}
/*
* Update shared replayEndRecPtr before replaying this record,
* so that XLogFlush will update minRecoveryPoint correctly.
*/
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->replayEndRecPtr = EndRecPtr;
xlogctl->replayEndTLI = ThisTimeLineID;
SpinLockRelease(&xlogctl->info_lck);
/*
* If we are attempting to enter Hot Standby mode, process
* XIDs we see
*/
if (standbyState >= STANDBY_INITIALIZED &&
TransactionIdIsValid(record->xl_xid))
RecordKnownAssignedTransactionIds(record->xl_xid);
/* Now apply the WAL record itself */
RmgrTable[record->xl_rmid].rm_redo(EndRecPtr, record);
/* Pop the error context stack */
error_context_stack = errcallback.previous;
/*
* Update lastReplayedEndRecPtr after this record has been
* successfully replayed.
*/
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->lastReplayedEndRecPtr = EndRecPtr;
xlogctl->lastReplayedTLI = ThisTimeLineID;
SpinLockRelease(&xlogctl->info_lck);
/* Remember this record as the last-applied one */
LastRec = ReadRecPtr;
/* Allow read-only connections if we're consistent now */
CheckRecoveryConsistency();
/*
* If this record was a timeline switch, wake up any
* walsenders to notice that we are on a new timeline.
*/
if (switchedTLI && AllowCascadeReplication())
WalSndWakeup();
/* Exit loop if we reached inclusive recovery target */
if (recoveryStopsAfter(record))
{
reachedStopPoint = true;
break;
}
/* Else, try to fetch the next WAL record */
record = ReadRecord(xlogreader, InvalidXLogRecPtr, LOG, false);
} while (record != NULL);
/*
* end of main redo apply loop
*/
if (recoveryPauseAtTarget && reachedStopPoint)
{
SetRecoveryPause(true);
recoveryPausesHere();
}
ereport(LOG,
(errmsg("redo done at %X/%X",
(uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
xtime = GetLatestXTime();
if (xtime)
ereport(LOG,
(errmsg("last completed transaction was at log time %s",
timestamptz_to_str(xtime))));
InRedo = false;
}
else
{
/* there are no WAL records following the checkpoint */
ereport(LOG,
(errmsg("redo is not required")));
}
}
/*
* Kill WAL receiver, if it's still running, before we continue to write
* the startup checkpoint record. It will trump over the checkpoint and
* subsequent records if it's still alive when we start writing WAL.
*/
ShutdownWalRcv();
/*
* We don't need the latch anymore. It's not strictly necessary to disown
* it, but let's do it for the sake of tidiness.
*/
if (StandbyModeRequested)
DisownLatch(&XLogCtl->recoveryWakeupLatch);
/*
* We are now done reading the xlog from stream. Turn off streaming
* recovery to force fetching the files (which would be required at end of
* recovery, e.g., timeline history file) from archive or pg_xlog.
*/
StandbyMode = false;
/*
* Re-fetch the last valid or last applied record, so we can identify the
* exact endpoint of what we consider the valid portion of WAL.
*/
record = ReadRecord(xlogreader, LastRec, PANIC, false);
EndOfLog = EndRecPtr;
XLByteToPrevSeg(EndOfLog, endLogSegNo);
/*
* Complain if we did not roll forward far enough to render the backup
* dump consistent. Note: it is indeed okay to look at the local variable
* minRecoveryPoint here, even though ControlFile->minRecoveryPoint might
* be further ahead --- ControlFile->minRecoveryPoint cannot have been
* advanced beyond the WAL we processed.
*/
if (InRecovery &&
(EndOfLog < minRecoveryPoint ||
!XLogRecPtrIsInvalid(ControlFile->backupStartPoint)))
{
if (reachedStopPoint)
{
/* stopped because of stop request */
ereport(FATAL,
(errmsg("requested recovery stop point is before consistent recovery point")));
}
/*
* Ran off end of WAL before reaching end-of-backup WAL record, or
* minRecoveryPoint. That's usually a bad sign, indicating that you
* tried to recover from an online backup but never called
* pg_stop_backup(), or you didn't archive all the WAL up to that
* point. However, this also happens in crash recovery, if the system
* crashes while an online backup is in progress. We must not treat
* that as an error, or the database will refuse to start up.
*/
if (ArchiveRecoveryRequested || ControlFile->backupEndRequired)
{
if (ControlFile->backupEndRequired)
ereport(FATAL,
(errmsg("WAL ends before end of online backup"),
errhint("All WAL generated while online backup was taken must be available at recovery.")));
else if (!XLogRecPtrIsInvalid(ControlFile->backupStartPoint))
ereport(FATAL,
(errmsg("WAL ends before end of online backup"),
errhint("Online backup started with pg_start_backup() must be ended with pg_stop_backup(), and all WAL up to that point must be available at recovery.")));
else
ereport(FATAL,
(errmsg("WAL ends before consistent recovery point")));
}
}
/*
* Consider whether we need to assign a new timeline ID.
*
* If we are doing an archive recovery, we always assign a new ID. This
* handles a couple of issues. If we stopped short of the end of WAL
* during recovery, then we are clearly generating a new timeline and must
* assign it a unique new ID. Even if we ran to the end, modifying the
* current last segment is problematic because it may result in trying to
* overwrite an already-archived copy of that segment, and we encourage
* DBAs to make their archive_commands reject that. We can dodge the
* problem by making the new active segment have a new timeline ID.
*
* In a normal crash recovery, we can just extend the timeline we were in.
*/
PrevTimeLineID = ThisTimeLineID;
if (ArchiveRecoveryRequested)
{
char reason[200];
Assert(InArchiveRecovery);
ThisTimeLineID = findNewestTimeLine(recoveryTargetTLI) + 1;
ereport(LOG,
(errmsg("selected new timeline ID: %u", ThisTimeLineID)));
/*
* Create a comment for the history file to explain why and where
* timeline changed.
*/
if (recoveryTarget == RECOVERY_TARGET_XID)
snprintf(reason, sizeof(reason),
"%s transaction %u",
recoveryStopAfter ? "after" : "before",
recoveryStopXid);
else if (recoveryTarget == RECOVERY_TARGET_TIME)
snprintf(reason, sizeof(reason),
"%s %s\n",
recoveryStopAfter ? "after" : "before",
timestamptz_to_str(recoveryStopTime));
else if (recoveryTarget == RECOVERY_TARGET_NAME)
snprintf(reason, sizeof(reason),
"at restore point \"%s\"",
recoveryStopName);
else if (recoveryTarget == RECOVERY_TARGET_IMMEDIATE)
snprintf(reason, sizeof(reason), "reached consistency");
else
snprintf(reason, sizeof(reason), "no recovery target specified");
writeTimeLineHistory(ThisTimeLineID, recoveryTargetTLI,
EndRecPtr, reason);
}
/* Save the selected TimeLineID in shared memory, too */
XLogCtl->ThisTimeLineID = ThisTimeLineID;
XLogCtl->PrevTimeLineID = PrevTimeLineID;
/*
* We are now done reading the old WAL. Turn off archive fetching if it
* was active, and make a writable copy of the last WAL segment. (Note
* that we also have a copy of the last block of the old WAL in readBuf;
* we will use that below.)
*/
if (ArchiveRecoveryRequested)
exitArchiveRecovery(xlogreader->readPageTLI, endLogSegNo);
/*
* Prepare to write WAL starting at EndOfLog position, and init xlog
* buffer cache using the block containing the last record from the
* previous incarnation.
*/
openLogSegNo = endLogSegNo;
openLogFile = XLogFileOpen(openLogSegNo);
openLogOff = 0;
Insert = &XLogCtl->Insert;
Insert->PrevBytePos = XLogRecPtrToBytePos(LastRec);
Insert->CurrBytePos = XLogRecPtrToBytePos(EndOfLog);
/*
* Tricky point here: readBuf contains the *last* block that the LastRec
* record spans, not the one it starts in. The last block is indeed the
* one we want to use.
*/
if (EndOfLog % XLOG_BLCKSZ != 0)
{
char *page;
int len;
int firstIdx;
XLogRecPtr pageBeginPtr;
pageBeginPtr = EndOfLog - (EndOfLog % XLOG_BLCKSZ);
Assert(readOff == pageBeginPtr % XLogSegSize);
firstIdx = XLogRecPtrToBufIdx(EndOfLog);
/* Copy the valid part of the last block, and zero the rest */
page = &XLogCtl->pages[firstIdx * XLOG_BLCKSZ];
len = EndOfLog % XLOG_BLCKSZ;
memcpy(page, xlogreader->readBuf, len);
memset(page + len, 0, XLOG_BLCKSZ - len);
XLogCtl->xlblocks[firstIdx] = pageBeginPtr + XLOG_BLCKSZ;
XLogCtl->InitializedUpTo = pageBeginPtr + XLOG_BLCKSZ;
}
else
{
/*
* There is no partial block to copy. Just set InitializedUpTo,
* and let the first attempt to insert a log record to initialize
* the next buffer.
*/
XLogCtl->InitializedUpTo = EndOfLog;
}
LogwrtResult.Write = LogwrtResult.Flush = EndOfLog;
XLogCtl->LogwrtResult = LogwrtResult;
XLogCtl->LogwrtRqst.Write = EndOfLog;
XLogCtl->LogwrtRqst.Flush = EndOfLog;
/* Pre-scan prepared transactions to find out the range of XIDs present */
oldestActiveXID = PrescanPreparedTransactions(NULL, NULL);
/*
* Update full_page_writes in shared memory and write an XLOG_FPW_CHANGE
* record before resource manager writes cleanup WAL records or checkpoint
* record is written.
*/
Insert->fullPageWrites = lastFullPageWrites;
LocalSetXLogInsertAllowed();
UpdateFullPageWrites();
LocalXLogInsertAllowed = -1;
if (InRecovery)
{
int rmid;
/*
* Resource managers might need to write WAL records, eg, to record
* index cleanup actions. So temporarily enable XLogInsertAllowed in
* this process only.
*/
LocalSetXLogInsertAllowed();
/*
* Allow resource managers to do any required cleanup.
*/
for (rmid = 0; rmid <= RM_MAX_ID; rmid++)
{
if (RmgrTable[rmid].rm_cleanup != NULL)
RmgrTable[rmid].rm_cleanup();
}
/* Disallow XLogInsert again */
LocalXLogInsertAllowed = -1;
/*
* Perform a checkpoint to update all our recovery activity to disk.
*
* Note that we write a shutdown checkpoint rather than an on-line
* one. This is not particularly critical, but since we may be
* assigning a new TLI, using a shutdown checkpoint allows us to have
* the rule that TLI only changes in shutdown checkpoints, which
* allows some extra error checking in xlog_redo.
*
* In fast promotion, only create a lightweight end-of-recovery record
* instead of a full checkpoint. A checkpoint is requested later,
* after we're fully out of recovery mode and already accepting
* queries.
*/
if (bgwriterLaunched)
{
if (fast_promote)
{
checkPointLoc = ControlFile->prevCheckPoint;
/*
* Confirm the last checkpoint is available for us to recover
* from if we fail. Note that we don't check for the secondary
* checkpoint since that isn't available in most base backups.
*/
record = ReadCheckpointRecord(xlogreader, checkPointLoc, 1, false);
if (record != NULL)
{
fast_promoted = true;
/*
* Insert a special WAL record to mark the end of
* recovery, since we aren't doing a checkpoint. That
* means that the checkpointer process may likely be in
* the middle of a time-smoothed restartpoint and could
* continue to be for minutes after this. That sounds
* strange, but the effect is roughly the same and it
* would be stranger to try to come out of the
* restartpoint and then checkpoint. We request a
* checkpoint later anyway, just for safety.
*/
CreateEndOfRecoveryRecord();
}
}
if (!fast_promoted)
RequestCheckpoint(CHECKPOINT_END_OF_RECOVERY |
CHECKPOINT_IMMEDIATE |
CHECKPOINT_WAIT);
}
else
CreateCheckPoint(CHECKPOINT_END_OF_RECOVERY | CHECKPOINT_IMMEDIATE);
/*
* And finally, execute the recovery_end_command, if any.
*/
if (recoveryEndCommand)
ExecuteRecoveryCommand(recoveryEndCommand,
"recovery_end_command",
true);
}
/*
* Preallocate additional log files, if wanted.
*/
PreallocXlogFiles(EndOfLog);
/*
* Reset initial contents of unlogged relations. This has to be done
* AFTER recovery is complete so that any unlogged relations created
* during recovery also get picked up.
*/
if (InRecovery)
ResetUnloggedRelations(UNLOGGED_RELATION_INIT);
/*
* Okay, we're officially UP.
*/
InRecovery = false;
LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
ControlFile->state = DB_IN_PRODUCTION;
ControlFile->time = (pg_time_t) time(NULL);
UpdateControlFile();
LWLockRelease(ControlFileLock);
/* start the archive_timeout timer running */
XLogCtl->lastSegSwitchTime = (pg_time_t) time(NULL);
/* also initialize latestCompletedXid, to nextXid - 1 */
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
ShmemVariableCache->latestCompletedXid = ShmemVariableCache->nextXid;
TransactionIdRetreat(ShmemVariableCache->latestCompletedXid);
LWLockRelease(ProcArrayLock);
/*
* Start up the commit log and subtrans, if not already done for hot
* standby.
*/
if (standbyState == STANDBY_DISABLED)
{
StartupCLOG();
StartupSUBTRANS(oldestActiveXID);
}
/*
* Perform end of recovery actions for any SLRUs that need it.
*/
TrimCLOG();
TrimMultiXact();
/* Reload shared-memory state for prepared transactions */
RecoverPreparedTransactions();
/*
* Shutdown the recovery environment. This must occur after
* RecoverPreparedTransactions(), see notes for lock_twophase_recover()
*/
if (standbyState != STANDBY_DISABLED)
ShutdownRecoveryTransactionEnvironment();
/* Shut down xlogreader */
if (readFile >= 0)
{
close(readFile);
readFile = -1;
}
XLogReaderFree(xlogreader);
/*
* If any of the critical GUCs have changed, log them before we allow
* backends to write WAL.
*/
LocalSetXLogInsertAllowed();
XLogReportParameters();
/*
* All done. Allow backends to write WAL. (Although the bool flag is
* probably atomic in itself, we use the info_lck here to ensure that
* there are no race conditions concerning visibility of other recent
* updates to shared memory.)
*/
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->SharedRecoveryInProgress = false;
SpinLockRelease(&xlogctl->info_lck);
}
/*
* If there were cascading standby servers connected to us, nudge any wal
* sender processes to notice that we've been promoted.
*/
WalSndWakeup();
/*
* If this was a fast promotion, request an (online) checkpoint now. This
* isn't required for consistency, but the last restartpoint might be far
* back, and in case of a crash, recovering from it might take a longer
* than is appropriate now that we're not in standby mode anymore.
*/
if (fast_promoted)
RequestCheckpoint(CHECKPOINT_FORCE);
}
| 0
|
419,566
|
char *get_sa_devname(unsigned int major, unsigned int minor, unsigned int flags)
{
char *dev_name = NULL, *persist_dev_name = NULL;
if (DISPLAY_PERSIST_NAME_S(flags)) {
persist_dev_name = get_persistent_name_from_pretty(get_devname(major, minor, TRUE));
}
if (persist_dev_name) {
dev_name = persist_dev_name;
}
else {
if ((USE_PRETTY_OPTION(flags)) && (major == dm_major)) {
dev_name = transform_devmapname(major, minor);
}
if (!dev_name) {
dev_name = get_devname(major, minor,
USE_PRETTY_OPTION(flags));
}
}
return dev_name;
}
| 0
|
270,484
|
int cil_gen_userrange(struct cil_db *db, struct cil_tree_node *parse_current, struct cil_tree_node *ast_node)
{
enum cil_syntax syntax[] = {
CIL_SYN_STRING,
CIL_SYN_STRING,
CIL_SYN_STRING | CIL_SYN_LIST,
CIL_SYN_END
};
int syntax_len = sizeof(syntax)/sizeof(*syntax);
struct cil_userrange *userrange = NULL;
int rc = SEPOL_ERR;
if (db == NULL || parse_current == NULL || ast_node == NULL) {
goto exit;
}
rc = __cil_verify_syntax(parse_current, syntax, syntax_len);
if (rc != SEPOL_OK) {
goto exit;
}
cil_userrange_init(&userrange);
userrange->user_str = parse_current->next->data;
if (parse_current->next->next->cl_head == NULL) {
userrange->range_str = parse_current->next->next->data;
} else {
cil_levelrange_init(&userrange->range);
rc = cil_fill_levelrange(parse_current->next->next->cl_head, userrange->range);
if (rc != SEPOL_OK) {
goto exit;
}
}
ast_node->data = userrange;
ast_node->flavor = CIL_USERRANGE;
return SEPOL_OK;
exit:
cil_tree_log(parse_current, CIL_ERR, "Bad userrange declaration");
cil_destroy_userrange(userrange);
return rc;
}
| 0
|
517,674
|
static void update_maria_group_commit(MYSQL_THD thd,
struct st_mysql_sys_var *var,
void *var_ptr, const void *save)
{
ulong value= (ulong)*((long *)var_ptr);
DBUG_ENTER("update_maria_group_commit");
DBUG_PRINT("enter", ("old value: %lu new value %lu rate %lu",
value, (ulong)(*(long *)save),
maria_group_commit_interval));
/* old value */
switch (value) {
case TRANSLOG_GCOMMIT_NONE:
break;
case TRANSLOG_GCOMMIT_HARD:
translog_hard_group_commit(FALSE);
break;
case TRANSLOG_GCOMMIT_SOFT:
translog_soft_sync(FALSE);
if (maria_group_commit_interval)
translog_soft_sync_end();
break;
default:
DBUG_ASSERT(0); /* impossible */
}
value= *(ulong *)var_ptr= (ulong)(*(long *)save);
translog_sync();
/* new value */
switch (value) {
case TRANSLOG_GCOMMIT_NONE:
break;
case TRANSLOG_GCOMMIT_HARD:
translog_hard_group_commit(TRUE);
break;
case TRANSLOG_GCOMMIT_SOFT:
translog_soft_sync(TRUE);
/* variable change made under global lock so we can just read it */
if (maria_group_commit_interval)
translog_soft_sync_start();
break;
default:
DBUG_ASSERT(0); /* impossible */
}
DBUG_VOID_RETURN;
}
| 0
|
55,427
|
static void cassignop(JF, js_Ast *exp, int opcode)
{
js_Ast *lhs = exp->a;
js_Ast *rhs = exp->b;
cassignop1(J, F, lhs);
cexp(J, F, rhs);
emitline(J, F, exp);
emit(J, F, opcode);
cassignop2(J, F, lhs, 0);
}
| 0
|
335,003
|
static void rpza_decode_stream(RpzaContext *s)
{
int width = s->avctx->width;
int stride = s->frame.linesize[0] / 2;
int row_inc = stride - 4;
int stream_ptr = 0;
int chunk_size;
unsigned char opcode;
int n_blocks;
unsigned short colorA = 0, colorB;
unsigned short color4[4];
unsigned char index, idx;
unsigned short ta, tb;
unsigned short *pixels = (unsigned short *)s->frame.data[0];
int row_ptr = 0;
int pixel_ptr = 0;
int block_ptr;
int pixel_x, pixel_y;
int total_blocks;
/* First byte is always 0xe1. Warn if it's different */
if (s->buf[stream_ptr] != 0xe1)
av_log(s->avctx, AV_LOG_ERROR, "First chunk byte is 0x%02x instead of 0xe1\n",
s->buf[stream_ptr]);
/* Get chunk size, ingnoring first byte */
chunk_size = AV_RB32(&s->buf[stream_ptr]) & 0x00FFFFFF;
stream_ptr += 4;
/* If length mismatch use size from MOV file and try to decode anyway */
if (chunk_size != s->size)
av_log(s->avctx, AV_LOG_ERROR, "MOV chunk size != encoded chunk size; using MOV chunk size\n");
chunk_size = s->size;
/* Number of 4x4 blocks in frame. */
total_blocks = ((s->avctx->width + 3) / 4) * ((s->avctx->height + 3) / 4);
/* Process chunk data */
while (stream_ptr < chunk_size) {
opcode = s->buf[stream_ptr++]; /* Get opcode */
n_blocks = (opcode & 0x1f) + 1; /* Extract block counter from opcode */
/* If opcode MSbit is 0, we need more data to decide what to do */
if ((opcode & 0x80) == 0) {
colorA = (opcode << 8) | (s->buf[stream_ptr++]);
opcode = 0;
if ((s->buf[stream_ptr] & 0x80) != 0) {
/* Must behave as opcode 110xxxxx, using colorA computed
* above. Use fake opcode 0x20 to enter switch block at
* the right place */
opcode = 0x20;
n_blocks = 1;
}
}
switch (opcode & 0xe0) {
/* Skip blocks */
case 0x80:
while (n_blocks--) {
ADVANCE_BLOCK();
}
break;
/* Fill blocks with one color */
case 0xa0:
colorA = AV_RB16 (&s->buf[stream_ptr]);
stream_ptr += 2;
while (n_blocks--) {
block_ptr = row_ptr + pixel_ptr;
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
for (pixel_x = 0; pixel_x < 4; pixel_x++){
pixels[block_ptr] = colorA;
block_ptr++;
}
block_ptr += row_inc;
}
ADVANCE_BLOCK();
}
break;
/* Fill blocks with 4 colors */
case 0xc0:
colorA = AV_RB16 (&s->buf[stream_ptr]);
stream_ptr += 2;
case 0x20:
colorB = AV_RB16 (&s->buf[stream_ptr]);
stream_ptr += 2;
/* sort out the colors */
color4[0] = colorB;
color4[1] = 0;
color4[2] = 0;
color4[3] = colorA;
/* red components */
ta = (colorA >> 10) & 0x1F;
tb = (colorB >> 10) & 0x1F;
color4[1] |= ((11 * ta + 21 * tb) >> 5) << 10;
color4[2] |= ((21 * ta + 11 * tb) >> 5) << 10;
/* green components */
ta = (colorA >> 5) & 0x1F;
tb = (colorB >> 5) & 0x1F;
color4[1] |= ((11 * ta + 21 * tb) >> 5) << 5;
color4[2] |= ((21 * ta + 11 * tb) >> 5) << 5;
/* blue components */
ta = colorA & 0x1F;
tb = colorB & 0x1F;
color4[1] |= ((11 * ta + 21 * tb) >> 5);
color4[2] |= ((21 * ta + 11 * tb) >> 5);
while (n_blocks--) {
block_ptr = row_ptr + pixel_ptr;
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
index = s->buf[stream_ptr++];
for (pixel_x = 0; pixel_x < 4; pixel_x++){
idx = (index >> (2 * (3 - pixel_x))) & 0x03;
pixels[block_ptr] = color4[idx];
block_ptr++;
}
block_ptr += row_inc;
}
ADVANCE_BLOCK();
}
break;
/* Fill block with 16 colors */
case 0x00:
if (s->size - stream_ptr < 16)
block_ptr = row_ptr + pixel_ptr;
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
for (pixel_x = 0; pixel_x < 4; pixel_x++){
/* We already have color of upper left pixel */
if ((pixel_y != 0) || (pixel_x !=0)) {
colorA = AV_RB16 (&s->buf[stream_ptr]);
stream_ptr += 2;
}
pixels[block_ptr] = colorA;
block_ptr++;
}
block_ptr += row_inc;
}
ADVANCE_BLOCK();
break;
/* Unknown opcode */
default:
av_log(s->avctx, AV_LOG_ERROR, "Unknown opcode %d in rpza chunk."
" Skip remaining %d bytes of chunk data.\n", opcode,
chunk_size - stream_ptr);
} /* Opcode switch */
}
}
| 1
|
314,220
|
SoundChannel::~SoundChannel()
{
ALOGV("SoundChannel destructor %p", this);
{
Mutex::Autolock lock(&mLock);
clearNextEvent();
doStop_l();
}
mAudioTrack.clear();
}
| 0
|
424,603
|
utf16le_code_to_mbclen(OnigCodePoint code)
{
return (code > 0xffff ? 4 : 2);
}
| 0
|
180,744
|
void V8TestObject::ElementAttributeAttributeSetterCallback(
const v8::FunctionCallbackInfo<v8::Value>& info) {
RUNTIME_CALL_TIMER_SCOPE_DISABLED_BY_DEFAULT(info.GetIsolate(), "Blink_TestObject_elementAttribute_Setter");
v8::Local<v8::Value> v8_value = info[0];
test_object_v8_internal::ElementAttributeAttributeSetter(v8_value, info);
}
| 0
|
386,380
|
static int
xmlXPathNodeCollectAndTest(xmlXPathParserContextPtr ctxt,
xmlXPathStepOpPtr op,
xmlNodePtr * first, xmlNodePtr * last,
int toBool)
{
#define XP_TEST_HIT \
if (hasAxisRange != 0) { \
if (++pos == maxPos) { \
if (addNode(seq, cur) < 0) \
ctxt->error = XPATH_MEMORY_ERROR; \
goto axis_range_end; } \
} else { \
if (addNode(seq, cur) < 0) \
ctxt->error = XPATH_MEMORY_ERROR; \
if (breakOnFirstHit) goto first_hit; }
#define XP_TEST_HIT_NS \
if (hasAxisRange != 0) { \
if (++pos == maxPos) { \
hasNsNodes = 1; \
if (xmlXPathNodeSetAddNs(seq, xpctxt->node, (xmlNsPtr) cur) < 0) \
ctxt->error = XPATH_MEMORY_ERROR; \
goto axis_range_end; } \
} else { \
hasNsNodes = 1; \
if (xmlXPathNodeSetAddNs(seq, xpctxt->node, (xmlNsPtr) cur) < 0) \
ctxt->error = XPATH_MEMORY_ERROR; \
if (breakOnFirstHit) goto first_hit; }
xmlXPathAxisVal axis = (xmlXPathAxisVal) op->value;
xmlXPathTestVal test = (xmlXPathTestVal) op->value2;
xmlXPathTypeVal type = (xmlXPathTypeVal) op->value3;
const xmlChar *prefix = op->value4;
const xmlChar *name = op->value5;
const xmlChar *URI = NULL;
#ifdef DEBUG_STEP
int nbMatches = 0, prevMatches = 0;
#endif
int total = 0, hasNsNodes = 0;
/* The popped object holding the context nodes */
xmlXPathObjectPtr obj;
/* The set of context nodes for the node tests */
xmlNodeSetPtr contextSeq;
int contextIdx;
xmlNodePtr contextNode;
/* The final resulting node set wrt to all context nodes */
xmlNodeSetPtr outSeq;
/*
* The temporary resulting node set wrt 1 context node.
* Used to feed predicate evaluation.
*/
xmlNodeSetPtr seq;
xmlNodePtr cur;
/* First predicate operator */
xmlXPathStepOpPtr predOp;
int maxPos; /* The requested position() (when a "[n]" predicate) */
int hasPredicateRange, hasAxisRange, pos, size, newSize;
int breakOnFirstHit;
xmlXPathTraversalFunction next = NULL;
int (*addNode) (xmlNodeSetPtr, xmlNodePtr);
xmlXPathNodeSetMergeFunction mergeAndClear;
xmlNodePtr oldContextNode;
xmlXPathContextPtr xpctxt = ctxt->context;
CHECK_TYPE0(XPATH_NODESET);
obj = valuePop(ctxt);
/*
* Setup namespaces.
*/
if (prefix != NULL) {
URI = xmlXPathNsLookup(xpctxt, prefix);
if (URI == NULL) {
xmlXPathReleaseObject(xpctxt, obj);
XP_ERROR0(XPATH_UNDEF_PREFIX_ERROR);
}
}
/*
* Setup axis.
*
* MAYBE FUTURE TODO: merging optimizations:
* - If the nodes to be traversed wrt to the initial nodes and
* the current axis cannot overlap, then we could avoid searching
* for duplicates during the merge.
* But the question is how/when to evaluate if they cannot overlap.
* Example: if we know that for two initial nodes, the one is
* not in the ancestor-or-self axis of the other, then we could safely
* avoid a duplicate-aware merge, if the axis to be traversed is e.g.
* the descendant-or-self axis.
*/
mergeAndClear = xmlXPathNodeSetMergeAndClear;
switch (axis) {
case AXIS_ANCESTOR:
first = NULL;
next = xmlXPathNextAncestor;
break;
case AXIS_ANCESTOR_OR_SELF:
first = NULL;
next = xmlXPathNextAncestorOrSelf;
break;
case AXIS_ATTRIBUTE:
first = NULL;
last = NULL;
next = xmlXPathNextAttribute;
mergeAndClear = xmlXPathNodeSetMergeAndClearNoDupls;
break;
case AXIS_CHILD:
last = NULL;
if (((test == NODE_TEST_NAME) || (test == NODE_TEST_ALL)) &&
(type == NODE_TYPE_NODE))
{
/*
* Optimization if an element node type is 'element'.
*/
next = xmlXPathNextChildElement;
} else
next = xmlXPathNextChild;
mergeAndClear = xmlXPathNodeSetMergeAndClearNoDupls;
break;
case AXIS_DESCENDANT:
last = NULL;
next = xmlXPathNextDescendant;
break;
case AXIS_DESCENDANT_OR_SELF:
last = NULL;
next = xmlXPathNextDescendantOrSelf;
break;
case AXIS_FOLLOWING:
last = NULL;
next = xmlXPathNextFollowing;
break;
case AXIS_FOLLOWING_SIBLING:
last = NULL;
next = xmlXPathNextFollowingSibling;
break;
case AXIS_NAMESPACE:
first = NULL;
last = NULL;
next = (xmlXPathTraversalFunction) xmlXPathNextNamespace;
mergeAndClear = xmlXPathNodeSetMergeAndClearNoDupls;
break;
case AXIS_PARENT:
first = NULL;
next = xmlXPathNextParent;
break;
case AXIS_PRECEDING:
first = NULL;
next = xmlXPathNextPrecedingInternal;
break;
case AXIS_PRECEDING_SIBLING:
first = NULL;
next = xmlXPathNextPrecedingSibling;
break;
case AXIS_SELF:
first = NULL;
last = NULL;
next = xmlXPathNextSelf;
mergeAndClear = xmlXPathNodeSetMergeAndClearNoDupls;
break;
}
#ifdef DEBUG_STEP
xmlXPathDebugDumpStepAxis(op,
(obj->nodesetval != NULL) ? obj->nodesetval->nodeNr : 0);
#endif
if (next == NULL) {
xmlXPathReleaseObject(xpctxt, obj);
return(0);
}
contextSeq = obj->nodesetval;
if ((contextSeq == NULL) || (contextSeq->nodeNr <= 0)) {
xmlXPathReleaseObject(xpctxt, obj);
valuePush(ctxt, xmlXPathCacheWrapNodeSet(xpctxt, NULL));
return(0);
}
/*
* Predicate optimization ---------------------------------------------
* If this step has a last predicate, which contains a position(),
* then we'll optimize (although not exactly "position()", but only
* the short-hand form, i.e., "[n]".
*
* Example - expression "/foo[parent::bar][1]":
*
* COLLECT 'child' 'name' 'node' foo -- op (we are here)
* ROOT -- op->ch1
* PREDICATE -- op->ch2 (predOp)
* PREDICATE -- predOp->ch1 = [parent::bar]
* SORT
* COLLECT 'parent' 'name' 'node' bar
* NODE
* ELEM Object is a number : 1 -- predOp->ch2 = [1]
*
*/
maxPos = 0;
predOp = NULL;
hasPredicateRange = 0;
hasAxisRange = 0;
if (op->ch2 != -1) {
/*
* There's at least one predicate. 16 == XPATH_OP_PREDICATE
*/
predOp = &ctxt->comp->steps[op->ch2];
if (xmlXPathIsPositionalPredicate(ctxt, predOp, &maxPos)) {
if (predOp->ch1 != -1) {
/*
* Use the next inner predicate operator.
*/
predOp = &ctxt->comp->steps[predOp->ch1];
hasPredicateRange = 1;
} else {
/*
* There's no other predicate than the [n] predicate.
*/
predOp = NULL;
hasAxisRange = 1;
}
}
}
breakOnFirstHit = ((toBool) && (predOp == NULL)) ? 1 : 0;
/*
* Axis traversal -----------------------------------------------------
*/
/*
* 2.3 Node Tests
* - For the attribute axis, the principal node type is attribute.
* - For the namespace axis, the principal node type is namespace.
* - For other axes, the principal node type is element.
*
* A node test * is true for any node of the
* principal node type. For example, child::* will
* select all element children of the context node
*/
oldContextNode = xpctxt->node;
addNode = xmlXPathNodeSetAddUnique;
outSeq = NULL;
seq = NULL;
contextNode = NULL;
contextIdx = 0;
while (((contextIdx < contextSeq->nodeNr) || (contextNode != NULL)) &&
(ctxt->error == XPATH_EXPRESSION_OK)) {
xpctxt->node = contextSeq->nodeTab[contextIdx++];
if (seq == NULL) {
seq = xmlXPathNodeSetCreate(NULL);
if (seq == NULL) {
total = 0;
goto error;
}
}
/*
* Traverse the axis and test the nodes.
*/
pos = 0;
cur = NULL;
hasNsNodes = 0;
do {
cur = next(ctxt, cur);
if (cur == NULL)
break;
/*
* QUESTION TODO: What does the "first" and "last" stuff do?
*/
if ((first != NULL) && (*first != NULL)) {
if (*first == cur)
break;
if (((total % 256) == 0) &&
#ifdef XP_OPTIMIZED_NON_ELEM_COMPARISON
(xmlXPathCmpNodesExt(*first, cur) >= 0))
#else
(xmlXPathCmpNodes(*first, cur) >= 0))
#endif
{
break;
}
}
if ((last != NULL) && (*last != NULL)) {
if (*last == cur)
break;
if (((total % 256) == 0) &&
#ifdef XP_OPTIMIZED_NON_ELEM_COMPARISON
(xmlXPathCmpNodesExt(cur, *last) >= 0))
#else
(xmlXPathCmpNodes(cur, *last) >= 0))
#endif
{
break;
}
}
total++;
#ifdef DEBUG_STEP
xmlGenericError(xmlGenericErrorContext, " %s", cur->name);
#endif
switch (test) {
case NODE_TEST_NONE:
total = 0;
STRANGE
goto error;
case NODE_TEST_TYPE:
/*
* TODO: Don't we need to use
* xmlXPathNodeSetAddNs() for namespace nodes here?
* Surprisingly, some c14n tests fail, if we do this.
*/
if (type == NODE_TYPE_NODE) {
switch (cur->type) {
case XML_DOCUMENT_NODE:
case XML_HTML_DOCUMENT_NODE:
#ifdef LIBXML_DOCB_ENABLED
case XML_DOCB_DOCUMENT_NODE:
#endif
case XML_ELEMENT_NODE:
case XML_ATTRIBUTE_NODE:
case XML_PI_NODE:
case XML_COMMENT_NODE:
case XML_CDATA_SECTION_NODE:
case XML_TEXT_NODE:
case XML_NAMESPACE_DECL:
XP_TEST_HIT
break;
default:
break;
}
} else if (cur->type == type) {
if (cur->type == XML_NAMESPACE_DECL)
XP_TEST_HIT_NS
else
XP_TEST_HIT
} else if ((type == NODE_TYPE_TEXT) &&
(cur->type == XML_CDATA_SECTION_NODE))
{
XP_TEST_HIT
}
break;
case NODE_TEST_PI:
if ((cur->type == XML_PI_NODE) &&
((name == NULL) || xmlStrEqual(name, cur->name)))
{
XP_TEST_HIT
}
break;
case NODE_TEST_ALL:
if (axis == AXIS_ATTRIBUTE) {
if (cur->type == XML_ATTRIBUTE_NODE)
{
if (prefix == NULL)
{
XP_TEST_HIT
} else if ((cur->ns != NULL) &&
(xmlStrEqual(URI, cur->ns->href)))
{
XP_TEST_HIT
}
}
} else if (axis == AXIS_NAMESPACE) {
if (cur->type == XML_NAMESPACE_DECL)
{
XP_TEST_HIT_NS
}
} else {
if (cur->type == XML_ELEMENT_NODE) {
if (prefix == NULL)
{
XP_TEST_HIT
} else if ((cur->ns != NULL) &&
(xmlStrEqual(URI, cur->ns->href)))
{
XP_TEST_HIT
}
}
}
break;
case NODE_TEST_NS:{
TODO;
break;
}
case NODE_TEST_NAME:
if (axis == AXIS_ATTRIBUTE) {
if (cur->type != XML_ATTRIBUTE_NODE)
break;
} else if (axis == AXIS_NAMESPACE) {
if (cur->type != XML_NAMESPACE_DECL)
break;
} else {
if (cur->type != XML_ELEMENT_NODE)
break;
}
switch (cur->type) {
case XML_ELEMENT_NODE:
if (xmlStrEqual(name, cur->name)) {
if (prefix == NULL) {
if (cur->ns == NULL)
{
XP_TEST_HIT
}
} else {
if ((cur->ns != NULL) &&
(xmlStrEqual(URI, cur->ns->href)))
{
XP_TEST_HIT
}
}
}
break;
case XML_ATTRIBUTE_NODE:{
xmlAttrPtr attr = (xmlAttrPtr) cur;
if (xmlStrEqual(name, attr->name)) {
if (prefix == NULL) {
if ((attr->ns == NULL) ||
(attr->ns->prefix == NULL))
{
XP_TEST_HIT
}
} else {
if ((attr->ns != NULL) &&
(xmlStrEqual(URI,
attr->ns->href)))
{
XP_TEST_HIT
}
}
}
break;
}
case XML_NAMESPACE_DECL:
if (cur->type == XML_NAMESPACE_DECL) {
xmlNsPtr ns = (xmlNsPtr) cur;
if ((ns->prefix != NULL) && (name != NULL)
&& (xmlStrEqual(ns->prefix, name)))
{
XP_TEST_HIT_NS
}
}
break;
default:
break;
}
break;
} /* switch(test) */
} while ((cur != NULL) && (ctxt->error == XPATH_EXPRESSION_OK));
goto apply_predicates;
axis_range_end: /* ----------------------------------------------------- */
/*
* We have a "/foo[n]", and position() = n was reached.
* Note that we can have as well "/foo/::parent::foo[1]", so
* a duplicate-aware merge is still needed.
* Merge with the result.
*/
if (outSeq == NULL) {
outSeq = seq;
seq = NULL;
} else
outSeq = mergeAndClear(outSeq, seq, 0);
/*
* Break if only a true/false result was requested.
*/
if (toBool)
break;
continue;
first_hit: /* ---------------------------------------------------------- */
/*
* Break if only a true/false result was requested and
* no predicates existed and a node test succeeded.
*/
if (outSeq == NULL) {
outSeq = seq;
seq = NULL;
} else
outSeq = mergeAndClear(outSeq, seq, 0);
break;
#ifdef DEBUG_STEP
if (seq != NULL)
nbMatches += seq->nodeNr;
#endif
apply_predicates: /* --------------------------------------------------- */
if (ctxt->error != XPATH_EXPRESSION_OK)
goto error;
/*
* Apply predicates.
*/
if ((predOp != NULL) && (seq->nodeNr > 0)) {
/*
* E.g. when we have a "/foo[some expression][n]".
*/
/*
* QUESTION TODO: The old predicate evaluation took into
* account location-sets.
* (E.g. ctxt->value->type == XPATH_LOCATIONSET)
* Do we expect such a set here?
* All what I learned now from the evaluation semantics
* does not indicate that a location-set will be processed
* here, so this looks OK.
*/
/*
* Iterate over all predicates, starting with the outermost
* predicate.
* TODO: Problem: we cannot execute the inner predicates first
* since we cannot go back *up* the operator tree!
* Options we have:
* 1) Use of recursive functions (like is it currently done
* via xmlXPathCompOpEval())
* 2) Add a predicate evaluation information stack to the
* context struct
* 3) Change the way the operators are linked; we need a
* "parent" field on xmlXPathStepOp
*
* For the moment, I'll try to solve this with a recursive
* function: xmlXPathCompOpEvalPredicate().
*/
size = seq->nodeNr;
if (hasPredicateRange != 0)
newSize = xmlXPathCompOpEvalPositionalPredicate(ctxt,
predOp, seq, size, maxPos, maxPos, hasNsNodes);
else
newSize = xmlXPathCompOpEvalPredicate(ctxt,
predOp, seq, size, hasNsNodes);
if (ctxt->error != XPATH_EXPRESSION_OK) {
total = 0;
goto error;
}
/*
* Add the filtered set of nodes to the result node set.
*/
if (newSize == 0) {
/*
* The predicates filtered all nodes out.
*/
xmlXPathNodeSetClear(seq, hasNsNodes);
} else if (seq->nodeNr > 0) {
/*
* Add to result set.
*/
if (outSeq == NULL) {
if (size != newSize) {
/*
* We need to merge and clear here, since
* the sequence will contained NULLed entries.
*/
outSeq = mergeAndClear(NULL, seq, 1);
} else {
outSeq = seq;
seq = NULL;
}
} else
outSeq = mergeAndClear(outSeq, seq,
(size != newSize) ? 1: 0);
/*
* Break if only a true/false result was requested.
*/
if (toBool)
break;
}
} else if (seq->nodeNr > 0) {
/*
* Add to result set.
*/
if (outSeq == NULL) {
outSeq = seq;
seq = NULL;
} else {
outSeq = mergeAndClear(outSeq, seq, 0);
}
}
}
error:
if ((obj->boolval) && (obj->user != NULL)) {
/*
* QUESTION TODO: What does this do and why?
* TODO: Do we have to do this also for the "error"
* cleanup further down?
*/
ctxt->value->boolval = 1;
ctxt->value->user = obj->user;
obj->user = NULL;
obj->boolval = 0;
}
xmlXPathReleaseObject(xpctxt, obj);
/*
* Ensure we return at least an emtpy set.
*/
if (outSeq == NULL) {
if ((seq != NULL) && (seq->nodeNr == 0))
outSeq = seq;
else
outSeq = xmlXPathNodeSetCreate(NULL);
/* XXX what if xmlXPathNodeSetCreate returned NULL here? */
}
if ((seq != NULL) && (seq != outSeq)) {
xmlXPathFreeNodeSet(seq);
}
/*
* Hand over the result. Better to push the set also in
* case of errors.
*/
valuePush(ctxt, xmlXPathCacheWrapNodeSet(xpctxt, outSeq));
/*
* Reset the context node.
*/
xpctxt->node = oldContextNode;
#ifdef DEBUG_STEP
xmlGenericError(xmlGenericErrorContext,
"\nExamined %d nodes, found %d nodes at that step\n",
total, nbMatches);
#endif
| 0
|
28,877
|
static void truespeech_read_frame ( TSContext * dec , const uint8_t * input ) {
GetBitContext gb ;
dec -> dsp . bswap_buf ( ( uint32_t * ) dec -> buffer , ( const uint32_t * ) input , 8 ) ;
init_get_bits ( & gb , dec -> buffer , 32 * 8 ) ;
dec -> vector [ 7 ] = ts_codebook [ 7 ] [ get_bits ( & gb , 3 ) ] ;
dec -> vector [ 6 ] = ts_codebook [ 6 ] [ get_bits ( & gb , 3 ) ] ;
dec -> vector [ 5 ] = ts_codebook [ 5 ] [ get_bits ( & gb , 3 ) ] ;
dec -> vector [ 4 ] = ts_codebook [ 4 ] [ get_bits ( & gb , 4 ) ] ;
dec -> vector [ 3 ] = ts_codebook [ 3 ] [ get_bits ( & gb , 4 ) ] ;
dec -> vector [ 2 ] = ts_codebook [ 2 ] [ get_bits ( & gb , 4 ) ] ;
dec -> vector [ 1 ] = ts_codebook [ 1 ] [ get_bits ( & gb , 5 ) ] ;
dec -> vector [ 0 ] = ts_codebook [ 0 ] [ get_bits ( & gb , 5 ) ] ;
dec -> flag = get_bits1 ( & gb ) ;
dec -> offset1 [ 0 ] = get_bits ( & gb , 4 ) << 4 ;
dec -> offset2 [ 3 ] = get_bits ( & gb , 7 ) ;
dec -> offset2 [ 2 ] = get_bits ( & gb , 7 ) ;
dec -> offset2 [ 1 ] = get_bits ( & gb , 7 ) ;
dec -> offset2 [ 0 ] = get_bits ( & gb , 7 ) ;
dec -> offset1 [ 1 ] = get_bits ( & gb , 4 ) ;
dec -> pulseval [ 1 ] = get_bits ( & gb , 14 ) ;
dec -> pulseval [ 0 ] = get_bits ( & gb , 14 ) ;
dec -> offset1 [ 1 ] |= get_bits ( & gb , 4 ) << 4 ;
dec -> pulseval [ 3 ] = get_bits ( & gb , 14 ) ;
dec -> pulseval [ 2 ] = get_bits ( & gb , 14 ) ;
dec -> offset1 [ 0 ] |= get_bits1 ( & gb ) ;
dec -> pulsepos [ 0 ] = get_bits_long ( & gb , 27 ) ;
dec -> pulseoff [ 0 ] = get_bits ( & gb , 4 ) ;
dec -> offset1 [ 0 ] |= get_bits1 ( & gb ) << 1 ;
dec -> pulsepos [ 1 ] = get_bits_long ( & gb , 27 ) ;
dec -> pulseoff [ 1 ] = get_bits ( & gb , 4 ) ;
dec -> offset1 [ 0 ] |= get_bits1 ( & gb ) << 2 ;
dec -> pulsepos [ 2 ] = get_bits_long ( & gb , 27 ) ;
dec -> pulseoff [ 2 ] = get_bits ( & gb , 4 ) ;
dec -> offset1 [ 0 ] |= get_bits1 ( & gb ) << 3 ;
dec -> pulsepos [ 3 ] = get_bits_long ( & gb , 27 ) ;
dec -> pulseoff [ 3 ] = get_bits ( & gb , 4 ) ;
}
| 0
|
213,216
|
static void unsignedLongLongAttrAttributeSetterCallback(v8::Local<v8::String>, v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMSetter");
TestObjectV8Internal::unsignedLongLongAttrAttributeSetter(jsValue, info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| 0
|
271,205
|
int mainloop(CLIENT *client) {
struct nbd_request request;
struct nbd_reply reply;
gboolean go_on=TRUE;
#ifdef DODBG
int i = 0;
#endif
negotiate(client->net, client, NULL, client->modern ? NEG_MODERN : (NEG_OLD | NEG_INIT));
DEBUG("Entering request loop!\n");
reply.magic = htonl(NBD_REPLY_MAGIC);
reply.error = 0;
while (go_on) {
char buf[BUFSIZE];
char* p;
size_t len;
size_t currlen;
size_t writelen;
uint16_t command;
#ifdef DODBG
i++;
printf("%d: ", i);
#endif
readit(client->net, &request, sizeof(request));
if (client->transactionlogfd != -1)
writeit(client->transactionlogfd, &request, sizeof(request));
request.from = ntohll(request.from);
request.type = ntohl(request.type);
command = request.type & NBD_CMD_MASK_COMMAND;
len = ntohl(request.len);
DEBUG("%s from %llu (%llu) len %u, ", getcommandname(command),
(unsigned long long)request.from,
(unsigned long long)request.from / 512, len);
if (request.magic != htonl(NBD_REQUEST_MAGIC))
err("Not enough magic.");
memcpy(reply.handle, request.handle, sizeof(reply.handle));
if ((command==NBD_CMD_WRITE) || (command==NBD_CMD_READ)) {
if (request.from + len < request.from) { // 64 bit overflow!!
DEBUG("[Number too large!]");
ERROR(client, reply, EINVAL);
continue;
}
if (((off_t)request.from + len) > client->exportsize) {
DEBUG("[RANGE!]");
ERROR(client, reply, EINVAL);
continue;
}
currlen = len;
if (currlen > BUFSIZE - sizeof(struct nbd_reply)) {
currlen = BUFSIZE - sizeof(struct nbd_reply);
if(!logged_oversized) {
msg(LOG_DEBUG, "oversized request (this is not a problem)");
logged_oversized = true;
}
}
}
switch (command) {
case NBD_CMD_DISC:
msg(LOG_INFO, "Disconnect request received.");
if (client->server->flags & F_COPYONWRITE) {
if (client->difmap) g_free(client->difmap) ;
close(client->difffile);
unlink(client->difffilename);
free(client->difffilename);
}
go_on=FALSE;
continue;
case NBD_CMD_WRITE:
DEBUG("wr: net->buf, ");
while(len > 0) {
readit(client->net, buf, currlen);
DEBUG("buf->exp, ");
if ((client->server->flags & F_READONLY) ||
(client->server->flags & F_AUTOREADONLY)) {
DEBUG("[WRITE to READONLY!]");
ERROR(client, reply, EPERM);
consume(client->net, buf, len-currlen, BUFSIZE);
continue;
}
if (expwrite(request.from, buf, currlen, client,
request.type & NBD_CMD_FLAG_FUA)) {
DEBUG("Write failed: %m" );
ERROR(client, reply, errno);
consume(client->net, buf, len-currlen, BUFSIZE);
continue;
}
len -= currlen;
request.from += currlen;
currlen = (len < BUFSIZE) ? len : BUFSIZE;
}
SEND(client->net, reply);
DEBUG("OK!\n");
continue;
case NBD_CMD_FLUSH:
DEBUG("fl: ");
if (expflush(client)) {
DEBUG("Flush failed: %m");
ERROR(client, reply, errno);
continue;
}
SEND(client->net, reply);
DEBUG("OK!\n");
continue;
case NBD_CMD_READ:
DEBUG("exp->buf, ");
if (client->transactionlogfd != -1)
writeit(client->transactionlogfd, &reply, sizeof(reply));
writeit(client->net, &reply, sizeof(reply));
p = buf;
writelen = currlen;
while(len > 0) {
if (expread(request.from, p, currlen, client)) {
DEBUG("Read failed: %m");
ERROR(client, reply, errno);
continue;
}
DEBUG("buf->net, ");
writeit(client->net, buf, writelen);
len -= currlen;
request.from += currlen;
currlen = (len < BUFSIZE) ? len : BUFSIZE;
p = buf;
writelen = currlen;
}
DEBUG("OK!\n");
continue;
case NBD_CMD_TRIM:
/* The kernel module sets discard_zeroes_data == 0,
* so it is okay to do nothing. */
if (exptrim(&request, client)) {
DEBUG("Trim failed: %m");
ERROR(client, reply, errno);
continue;
}
SEND(client->net, reply);
continue;
default:
DEBUG ("Ignoring unknown command\n");
continue;
}
}
return 0;
}
| 0
|
284,659
|
static int AppLayerProtoDetectTest06(void)
{
AppLayerProtoDetectUnittestCtxBackup();
AppLayerProtoDetectSetup();
uint8_t l7data[] = "220 Welcome to the OISF FTP server\r\n";
const char *buf;
int r = 0;
Flow f;
AppProto pm_results[ALPROTO_MAX];
AppLayerProtoDetectThreadCtx *alpd_tctx;
memset(&f, 0x00, sizeof(f));
f.protomap = FlowGetProtoMapping(IPPROTO_TCP);
buf = "HTTP";
AppLayerProtoDetectPMRegisterPatternCS(IPPROTO_TCP, ALPROTO_HTTP, buf, 4, 0, STREAM_TOCLIENT);
buf = "220 ";
AppLayerProtoDetectPMRegisterPatternCS(IPPROTO_TCP, ALPROTO_FTP, buf, 4, 0, STREAM_TOCLIENT);
AppLayerProtoDetectPrepareState();
/* AppLayerProtoDetectGetCtxThread() should be called post AppLayerProtoDetectPrepareState(), since
* it sets internal structures which depends on the above function. */
alpd_tctx = AppLayerProtoDetectGetCtxThread();
if (alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[0].max_pat_id != 0) {
printf("alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[0].max_pat_id != 0\n");
goto end;
}
if (alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].max_pat_id != 2) {
printf("alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].max_pat_id != 2\n");
goto end;
}
if (alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[0].map != NULL) {
printf("alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[0].map != NULL\n");
goto end;
}
if (alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].map == NULL) {
printf("alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].map != NULL\n");
goto end;
}
if (alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].map[0]->alproto != ALPROTO_FTP) {
printf("alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].map[0].alproto != ALPROTO_FTP\n");
goto end;
}
if (alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].map[1]->alproto != ALPROTO_HTTP) {
printf("alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].map[1].alproto != ALPROTO_HTTP\n");
goto end;
}
uint32_t cnt = AppLayerProtoDetectPMGetProto(alpd_tctx,
&f,
l7data, sizeof(l7data),
STREAM_TOCLIENT,
IPPROTO_TCP,
pm_results);
if (cnt != 1 && pm_results[0] != ALPROTO_FTP) {
printf("cnt != 1 && pm_results[0] != AlPROTO_FTP\n");
goto end;
}
r = 1;
end:
if (alpd_tctx != NULL)
AppLayerProtoDetectDestroyCtxThread(alpd_tctx);
AppLayerProtoDetectDeSetup();
AppLayerProtoDetectUnittestCtxRestore();
return r;
}
| 0
|
408,530
|
bytes_richcompare(PyBytesObject *a, PyBytesObject *b, int op)
{
int c;
Py_ssize_t len_a, len_b;
Py_ssize_t min_len;
PyObject *result;
int rc;
/* Make sure both arguments are strings. */
if (!(PyBytes_Check(a) && PyBytes_Check(b))) {
if (Py_BytesWarningFlag && (op == Py_EQ || op == Py_NE)) {
rc = PyObject_IsInstance((PyObject*)a,
(PyObject*)&PyUnicode_Type);
if (!rc)
rc = PyObject_IsInstance((PyObject*)b,
(PyObject*)&PyUnicode_Type);
if (rc < 0)
return NULL;
if (rc) {
if (PyErr_WarnEx(PyExc_BytesWarning,
"Comparison between bytes and string", 1))
return NULL;
}
else {
rc = PyObject_IsInstance((PyObject*)a,
(PyObject*)&PyLong_Type);
if (!rc)
rc = PyObject_IsInstance((PyObject*)b,
(PyObject*)&PyLong_Type);
if (rc < 0)
return NULL;
if (rc) {
if (PyErr_WarnEx(PyExc_BytesWarning,
"Comparison between bytes and int", 1))
return NULL;
}
}
}
result = Py_NotImplemented;
}
else if (a == b) {
switch (op) {
case Py_EQ:
case Py_LE:
case Py_GE:
/* a string is equal to itself */
result = Py_True;
break;
case Py_NE:
case Py_LT:
case Py_GT:
result = Py_False;
break;
default:
PyErr_BadArgument();
return NULL;
}
}
else if (op == Py_EQ || op == Py_NE) {
int eq = bytes_compare_eq(a, b);
eq ^= (op == Py_NE);
result = eq ? Py_True : Py_False;
}
else {
len_a = Py_SIZE(a);
len_b = Py_SIZE(b);
min_len = Py_MIN(len_a, len_b);
if (min_len > 0) {
c = Py_CHARMASK(*a->ob_sval) - Py_CHARMASK(*b->ob_sval);
if (c == 0)
c = memcmp(a->ob_sval, b->ob_sval, min_len);
}
else
c = 0;
if (c == 0)
c = (len_a < len_b) ? -1 : (len_a > len_b) ? 1 : 0;
switch (op) {
case Py_LT: c = c < 0; break;
case Py_LE: c = c <= 0; break;
case Py_GT: c = c > 0; break;
case Py_GE: c = c >= 0; break;
default:
PyErr_BadArgument();
return NULL;
}
result = c ? Py_True : Py_False;
}
Py_INCREF(result);
return result;
}
| 0
|
164,220
|
static void pcnet_transmit(PCNetState *s)
{
hwaddr xmit_cxda = 0;
int count = CSR_XMTRL(s)-1;
int add_crc = 0;
int bcnt;
s->xmit_pos = -1;
if (!CSR_TXON(s)) {
s->csr[0] &= ~0x0008;
return;
}
s->tx_busy = 1;
txagain:
if (pcnet_tdte_poll(s)) {
struct pcnet_TMD tmd;
TMDLOAD(&tmd, PHYSADDR(s,CSR_CXDA(s)));
#ifdef PCNET_DEBUG_TMD
printf(" TMDLOAD 0x%08x\n", PHYSADDR(s,CSR_CXDA(s)));
PRINT_TMD(&tmd);
#endif
if (GET_FIELD(tmd.status, TMDS, STP)) {
s->xmit_pos = 0;
xmit_cxda = PHYSADDR(s,CSR_CXDA(s));
if (BCR_SWSTYLE(s) != 1)
add_crc = GET_FIELD(tmd.status, TMDS, ADDFCS);
}
if (s->lnkst == 0 &&
(!CSR_LOOP(s) || (!CSR_INTL(s) && !BCR_TMAULOOP(s)))) {
SET_FIELD(&tmd.misc, TMDM, LCAR, 1);
SET_FIELD(&tmd.status, TMDS, ERR, 1);
SET_FIELD(&tmd.status, TMDS, OWN, 0);
s->csr[0] |= 0xa000; /* ERR | CERR */
s->xmit_pos = -1;
goto txdone;
}
if (s->xmit_pos < 0) {
goto txdone;
}
bcnt = 4096 - GET_FIELD(tmd.length, TMDL, BCNT);
/* if multi-tmd packet outsizes s->buffer then skip it silently.
* Note: this is not what real hw does.
* Last four bytes of s->buffer are used to store CRC FCS code.
*/
if (s->xmit_pos + bcnt > sizeof(s->buffer) - 4) {
s->xmit_pos = -1;
goto txdone;
}
s->phys_mem_read(s->dma_opaque, PHYSADDR(s, tmd.tbadr),
s->buffer + s->xmit_pos, bcnt, CSR_BSWP(s));
s->xmit_pos += bcnt;
if (!GET_FIELD(tmd.status, TMDS, ENP)) {
goto txdone;
}
#ifdef PCNET_DEBUG
printf("pcnet_transmit size=%d\n", s->xmit_pos);
#endif
if (CSR_LOOP(s)) {
if (BCR_SWSTYLE(s) == 1)
add_crc = !GET_FIELD(tmd.status, TMDS, NOFCS);
s->looptest = add_crc ? PCNET_LOOPTEST_CRC : PCNET_LOOPTEST_NOCRC;
pcnet_receive(qemu_get_queue(s->nic), s->buffer, s->xmit_pos);
s->looptest = 0;
} else {
if (s->nic) {
qemu_send_packet(qemu_get_queue(s->nic), s->buffer,
s->xmit_pos);
}
}
s->csr[0] &= ~0x0008; /* clear TDMD */
s->csr[4] |= 0x0004; /* set TXSTRT */
s->xmit_pos = -1;
txdone:
SET_FIELD(&tmd.status, TMDS, OWN, 0);
TMDSTORE(&tmd, PHYSADDR(s,CSR_CXDA(s)));
if (!CSR_TOKINTD(s) || (CSR_LTINTEN(s) && GET_FIELD(tmd.status, TMDS, LTINT)))
s->csr[0] |= 0x0200; /* set TINT */
if (CSR_XMTRC(s)<=1)
CSR_XMTRC(s) = CSR_XMTRL(s);
else
CSR_XMTRC(s)--;
if (count--)
goto txagain;
} else
if (s->xmit_pos >= 0) {
struct pcnet_TMD tmd;
TMDLOAD(&tmd, xmit_cxda);
SET_FIELD(&tmd.misc, TMDM, BUFF, 1);
SET_FIELD(&tmd.misc, TMDM, UFLO, 1);
SET_FIELD(&tmd.status, TMDS, ERR, 1);
SET_FIELD(&tmd.status, TMDS, OWN, 0);
TMDSTORE(&tmd, xmit_cxda);
s->csr[0] |= 0x0200; /* set TINT */
if (!CSR_DXSUFLO(s)) {
s->csr[0] &= ~0x0010;
} else
if (count--)
goto txagain;
}
s->tx_busy = 0;
}
| 0
|
436,343
|
get_case_fold_codes_by_str(OnigCaseFoldType flag,
const OnigUChar* p, const OnigUChar* end,
OnigCaseFoldCodeItem items[])
{
return onigenc_get_case_fold_codes_by_str_with_map(
sizeof(CaseFoldMap)/sizeof(OnigPairCaseFoldCodes), CaseFoldMap, 0,
flag, p, end, items);
}
| 0
|
295,771
|
dtdReset(DTD *p, const XML_Memory_Handling_Suite *ms) {
HASH_TABLE_ITER iter;
hashTableIterInit(&iter, &(p->elementTypes));
for (;;) {
ELEMENT_TYPE *e = (ELEMENT_TYPE *)hashTableIterNext(&iter);
if (! e)
break;
if (e->allocDefaultAtts != 0)
ms->free_fcn(e->defaultAtts);
}
hashTableClear(&(p->generalEntities));
#ifdef XML_DTD
p->paramEntityRead = XML_FALSE;
hashTableClear(&(p->paramEntities));
#endif /* XML_DTD */
hashTableClear(&(p->elementTypes));
hashTableClear(&(p->attributeIds));
hashTableClear(&(p->prefixes));
poolClear(&(p->pool));
poolClear(&(p->entityValuePool));
p->defaultPrefix.name = NULL;
p->defaultPrefix.binding = NULL;
p->in_eldecl = XML_FALSE;
ms->free_fcn(p->scaffIndex);
p->scaffIndex = NULL;
ms->free_fcn(p->scaffold);
p->scaffold = NULL;
p->scaffLevel = 0;
p->scaffSize = 0;
p->scaffCount = 0;
p->contentStringLen = 0;
p->keepProcessing = XML_TRUE;
p->hasParamEntityRefs = XML_FALSE;
p->standalone = XML_FALSE;
}
| 0
|
281,538
|
void WebContentsImpl::LoadingStateChanged(bool to_different_document,
bool due_to_interstitial,
LoadNotificationDetails* details) {
if (ShowingInterstitialPage() && interstitial_page_->pause_throbber() &&
!due_to_interstitial) {
return;
}
bool is_loading = IsLoading();
if (!is_loading) {
load_state_ = net::LoadStateWithParam(net::LOAD_STATE_IDLE,
base::string16());
load_state_host_.clear();
upload_size_ = 0;
upload_position_ = 0;
}
GetRenderManager()->SetIsLoading(is_loading);
waiting_for_response_ = is_loading;
is_load_to_different_document_ = to_different_document;
if (delegate_)
delegate_->LoadingStateChanged(this, to_different_document);
NotifyNavigationStateChanged(INVALIDATE_TYPE_LOAD);
std::string url = (details ? details->url.possibly_invalid_spec() : "NULL");
if (is_loading) {
TRACE_EVENT_ASYNC_BEGIN2("browser,navigation", "WebContentsImpl Loading",
this, "URL", url, "Main FrameTreeNode id",
GetFrameTree()->root()->frame_tree_node_id());
for (auto& observer : observers_)
observer.DidStartLoading();
} else {
TRACE_EVENT_ASYNC_END1("browser,navigation", "WebContentsImpl Loading",
this, "URL", url);
for (auto& observer : observers_)
observer.DidStopLoading();
}
int type = is_loading ? NOTIFICATION_LOAD_START : NOTIFICATION_LOAD_STOP;
NotificationDetails det = NotificationService::NoDetails();
if (details)
det = Details<LoadNotificationDetails>(details);
NotificationService::current()->Notify(
type, Source<NavigationController>(&controller_), det);
}
| 0
|
478,883
|
const CImg<T>& save_off(const CImgList<tf>& primitives, const CImgList<tc>& colors,
std::FILE *const file) const {
return _save_off(primitives,colors,file,0);
}
| 0
|
401,903
|
ZEND_VM_HOT_HANDLER(129, ZEND_DO_ICALL, ANY, ANY, SPEC(RETVAL))
{
USE_OPLINE
zend_execute_data *call = EX(call);
zend_function *fbc = call->func;
zval *ret;
zval retval;
SAVE_OPLINE();
EX(call) = call->prev_execute_data;
call->prev_execute_data = execute_data;
EG(current_execute_data) = call;
ret = RETURN_VALUE_USED(opline) ? EX_VAR(opline->result.var) : &retval;
ZVAL_NULL(ret);
fbc->internal_function.handler(call, ret);
#if ZEND_DEBUG
if (!EG(exception) && call->func) {
ZEND_ASSERT(!(call->func->common.fn_flags & ZEND_ACC_HAS_RETURN_TYPE) ||
zend_verify_internal_return_type(call->func, ret));
ZEND_ASSERT((call->func->common.fn_flags & ZEND_ACC_RETURN_REFERENCE)
? Z_ISREF_P(ret) : !Z_ISREF_P(ret));
}
#endif
EG(current_execute_data) = execute_data;
zend_vm_stack_free_args(call);
zend_vm_stack_free_call_frame(call);
if (!RETURN_VALUE_USED(opline)) {
i_zval_ptr_dtor(ret);
}
if (UNEXPECTED(EG(exception) != NULL)) {
zend_rethrow_exception(execute_data);
HANDLE_EXCEPTION();
}
ZEND_VM_SET_OPCODE(opline + 1);
ZEND_VM_CONTINUE();
}
| 0
|
197,338
|
WebUI* WebContentsImpl::GetWebUI() const {
return render_manager_.web_ui() ? render_manager_.web_ui()
: render_manager_.pending_web_ui();
}
| 0
|
98,320
|
static int setup_efi_info_memmap(struct boot_params *params,
unsigned long params_load_addr,
unsigned int efi_map_offset,
unsigned int efi_map_sz)
{
void *efi_map = (void *)params + efi_map_offset;
unsigned long efi_map_phys_addr = params_load_addr + efi_map_offset;
struct efi_info *ei = ¶ms->efi_info;
if (!efi_map_sz)
return 0;
efi_runtime_map_copy(efi_map, efi_map_sz);
ei->efi_memmap = efi_map_phys_addr & 0xffffffff;
ei->efi_memmap_hi = efi_map_phys_addr >> 32;
ei->efi_memmap_size = efi_map_sz;
return 0;
}
| 0
|
16,717
|
static inline int rsvp_class_to_filter_num ( int classnum ) {
switch ( classnum ) {
case RSVP_CLASS_SESSION : case RSVP_CLASS_HOP : case RSVP_CLASS_INTEGRITY : case RSVP_CLASS_TIME_VALUES : case RSVP_CLASS_ERROR : case RSVP_CLASS_SCOPE : case RSVP_CLASS_STYLE : case RSVP_CLASS_FLOWSPEC : case RSVP_CLASS_FILTER_SPEC : case RSVP_CLASS_SENDER_TEMPLATE : case RSVP_CLASS_SENDER_TSPEC : case RSVP_CLASS_ADSPEC : case RSVP_CLASS_POLICY : case RSVP_CLASS_CONFIRM : case RSVP_CLASS_LABEL : case RSVP_CLASS_LABEL_REQUEST : case RSVP_CLASS_HELLO : case RSVP_CLASS_EXPLICIT_ROUTE : case RSVP_CLASS_RECORD_ROUTE : case RSVP_CLASS_MESSAGE_ID : case RSVP_CLASS_MESSAGE_ID_ACK : case RSVP_CLASS_MESSAGE_ID_LIST : return classnum + RSVPF_OBJECT ;
break ;
case RSVP_CLASS_RECOVERY_LABEL : case RSVP_CLASS_UPSTREAM_LABEL : case RSVP_CLASS_LABEL_SET : case RSVP_CLASS_PROTECTION : return RSVPF_RECOVERY_LABEL + ( classnum - RSVP_CLASS_RECOVERY_LABEL ) ;
case RSVP_CLASS_SUGGESTED_LABEL : case RSVP_CLASS_ACCEPTABLE_LABEL_SET : case RSVP_CLASS_RESTART_CAP : return RSVPF_SUGGESTED_LABEL + ( classnum - RSVP_CLASS_SUGGESTED_LABEL ) ;
case RSVP_CLASS_LINK_CAP : return RSVPF_LINK_CAP ;
case RSVP_CLASS_DIFFSERV : return RSVPF_DIFFSERV ;
case RSVP_CLASS_CLASSTYPE : return RSVPF_DSTE ;
case RSVP_CLASS_NOTIFY_REQUEST : return RSVPF_NOTIFY_REQUEST ;
case RSVP_CLASS_ADMIN_STATUS : return RSVPF_ADMIN_STATUS ;
case RSVP_CLASS_LSP_ATTRIBUTES : return RSVPF_LSP_ATTRIBUTES ;
case RSVP_CLASS_ASSOCIATION : return RSVPF_ASSOCIATION ;
case RSVP_CLASS_CALL_ATTRIBUTES : return RSVPF_CALL_ATTRIBUTES ;
case RSVP_CLASS_SESSION_ATTRIBUTE : return RSVPF_SESSION_ATTRIBUTE ;
case RSVP_CLASS_GENERALIZED_UNI : return RSVPF_GENERALIZED_UNI ;
case RSVP_CLASS_CALL_ID : return RSVPF_CALL_ID ;
case RSVP_CLASS_3GPP2_OBJECT : return RSVPF_3GPP2_OBJECT ;
case RSVP_CLASS_DCLASS : return RSVPF_DCLASS ;
case RSVP_CLASS_LSP_TUNNEL_IF_ID : return RSVPF_LSP_TUNNEL_IF_ID ;
case RSVP_CLASS_EXCLUDE_ROUTE : return RSVPF_EXCLUDE_ROUTE ;
case RSVP_CLASS_JUNIPER_PROPERTIES : return RSVPF_JUNIPER ;
case RSVP_CLASS_VENDOR_PRIVATE_1 : case RSVP_CLASS_VENDOR_PRIVATE_2 : case RSVP_CLASS_VENDOR_PRIVATE_3 : case RSVP_CLASS_VENDOR_PRIVATE_4 : case RSVP_CLASS_VENDOR_PRIVATE_5 : case RSVP_CLASS_VENDOR_PRIVATE_6 : case RSVP_CLASS_VENDOR_PRIVATE_7 : case RSVP_CLASS_VENDOR_PRIVATE_8 : case RSVP_CLASS_VENDOR_PRIVATE_9 : case RSVP_CLASS_VENDOR_PRIVATE_10 : case RSVP_CLASS_VENDOR_PRIVATE_11 : case RSVP_CLASS_VENDOR_PRIVATE_12 : return RSVPF_PRIVATE_OBJ ;
default : return RSVPF_UNKNOWN_OBJ ;
}
}
| 0
|
302,281
|
static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
struct xfrm_state *x;
if (hdr->sadb_msg_len != sizeof(struct sadb_msg)/8)
return -EOPNOTSUPP;
if (hdr->sadb_msg_seq == 0 || hdr->sadb_msg_errno == 0)
return 0;
x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq);
if (x == NULL)
return 0;
spin_lock_bh(&x->lock);
if (x->km.state == XFRM_STATE_ACQ)
x->km.state = XFRM_STATE_ERROR;
spin_unlock_bh(&x->lock);
xfrm_state_put(x);
return 0;
}
| 0
|
370,096
|
PHP_LIBXML_API void php_libxml_node_free_resource(xmlNodePtr node TSRMLS_DC)
{
if (!node) {
return;
}
switch (node->type) {
case XML_DOCUMENT_NODE:
case XML_HTML_DOCUMENT_NODE:
break;
default:
if (node->parent == NULL || node->type == XML_NAMESPACE_DECL) {
php_libxml_node_free_list((xmlNodePtr) node->children TSRMLS_CC);
switch (node->type) {
/* Skip property freeing for the following types */
case XML_ATTRIBUTE_DECL:
case XML_DTD_NODE:
case XML_DOCUMENT_TYPE_NODE:
case XML_ENTITY_DECL:
case XML_ATTRIBUTE_NODE:
case XML_NAMESPACE_DECL:
case XML_TEXT_NODE:
break;
default:
php_libxml_node_free_list((xmlNodePtr) node->properties TSRMLS_CC);
}
if (php_libxml_unregister_node(node TSRMLS_CC) == 0) {
node->doc = NULL;
}
php_libxml_node_free(node);
} else {
php_libxml_unregister_node(node TSRMLS_CC);
}
}
}
| 0
|
385,553
|
PHP_METHOD(SoapServer, addFunction)
{
soapServicePtr service;
zval *function_name, *function_copy;
HashPosition pos;
SOAP_SERVER_BEGIN_CODE();
FETCH_THIS_SERVICE(service);
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &function_name) == FAILURE) {
return;
}
/* TODO: could use zend_is_callable here */
if (function_name->type == IS_ARRAY) {
if (service->type == SOAP_FUNCTIONS) {
zval **tmp_function, *function_copy;
if (service->soap_functions.ft == NULL) {
service->soap_functions.functions_all = FALSE;
service->soap_functions.ft = emalloc(sizeof(HashTable));
zend_hash_init(service->soap_functions.ft, zend_hash_num_elements(Z_ARRVAL_P(function_name)), NULL, ZVAL_PTR_DTOR, 0);
}
zend_hash_internal_pointer_reset_ex(Z_ARRVAL_P(function_name), &pos);
while (zend_hash_get_current_data_ex(Z_ARRVAL_P(function_name), (void **)&tmp_function, &pos) != FAILURE) {
char *key;
int key_len;
zend_function *f;
if (Z_TYPE_PP(tmp_function) != IS_STRING) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Tried to add a function that isn't a string");
return;
}
key_len = Z_STRLEN_PP(tmp_function);
key = emalloc(key_len + 1);
zend_str_tolower_copy(key, Z_STRVAL_PP(tmp_function), key_len);
if (zend_hash_find(EG(function_table), key, key_len+1, (void**)&f) == FAILURE) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Tried to add a non existent function '%s'", Z_STRVAL_PP(tmp_function));
return;
}
MAKE_STD_ZVAL(function_copy);
ZVAL_STRING(function_copy, f->common.function_name, 1);
zend_hash_update(service->soap_functions.ft, key, key_len+1, &function_copy, sizeof(zval *), NULL);
efree(key);
zend_hash_move_forward_ex(Z_ARRVAL_P(function_name), &pos);
}
}
} else if (function_name->type == IS_STRING) {
char *key;
int key_len;
zend_function *f;
key_len = Z_STRLEN_P(function_name);
key = emalloc(key_len + 1);
zend_str_tolower_copy(key, Z_STRVAL_P(function_name), key_len);
if (zend_hash_find(EG(function_table), key, key_len+1, (void**)&f) == FAILURE) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Tried to add a non existent function '%s'", Z_STRVAL_P(function_name));
return;
}
if (service->soap_functions.ft == NULL) {
service->soap_functions.functions_all = FALSE;
service->soap_functions.ft = emalloc(sizeof(HashTable));
zend_hash_init(service->soap_functions.ft, 0, NULL, ZVAL_PTR_DTOR, 0);
}
MAKE_STD_ZVAL(function_copy);
ZVAL_STRING(function_copy, f->common.function_name, 1);
zend_hash_update(service->soap_functions.ft, key, key_len+1, &function_copy, sizeof(zval *), NULL);
efree(key);
} else if (function_name->type == IS_LONG) {
if (Z_LVAL_P(function_name) == SOAP_FUNCTIONS_ALL) {
if (service->soap_functions.ft != NULL) {
zend_hash_destroy(service->soap_functions.ft);
efree(service->soap_functions.ft);
service->soap_functions.ft = NULL;
}
service->soap_functions.functions_all = TRUE;
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid value passed");
return;
}
}
SOAP_SERVER_END_CODE();
}
| 0
|
19,250
|
uint32_t jbig2_get_uint32 ( const byte * bptr ) {
return ( ( uint32_t ) get_uint16 ( bptr ) << 16 ) | get_uint16 ( bptr + 2 ) ;
}
| 0
|
147,864
|
static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
{
struct unimac_mdio_priv *priv = bus->priv;
int ret;
u32 cmd;
/* Prepare the read operation */
cmd = MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT);
unimac_mdio_writel(priv, cmd, MDIO_CMD);
/* Start MDIO transaction */
unimac_mdio_start(priv);
ret = priv->wait_func(priv->wait_func_data);
if (ret)
return ret;
cmd = unimac_mdio_readl(priv, MDIO_CMD);
/* Some broken devices are known not to release the line during
* turn-around, e.g: Broadcom BCM53125 external switches, so check for
* that condition here and ignore the MDIO controller read failure
* indication.
*/
if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (cmd & MDIO_READ_FAIL))
return -EIO;
return cmd & 0xffff;
}
| 0
|
128,454
|
static int net_slirp_init(NetClientState *peer, const char *model,
const char *name, int restricted,
const char *vnetwork, const char *vhost,
const char *vhostname, const char *tftp_export,
const char *bootfile, const char *vdhcp_start,
const char *vnameserver, const char *smb_export,
const char *vsmbserver, const char **dnssearch)
{
/* default settings according to historic slirp */
struct in_addr net = { .s_addr = htonl(0x0a000200) }; /* 10.0.2.0 */
struct in_addr mask = { .s_addr = htonl(0xffffff00) }; /* 255.255.255.0 */
struct in_addr host = { .s_addr = htonl(0x0a000202) }; /* 10.0.2.2 */
struct in_addr dhcp = { .s_addr = htonl(0x0a00020f) }; /* 10.0.2.15 */
struct in_addr dns = { .s_addr = htonl(0x0a000203) }; /* 10.0.2.3 */
#ifndef _WIN32
struct in_addr smbsrv = { .s_addr = 0 };
#endif
NetClientState *nc;
SlirpState *s;
char buf[20];
uint32_t addr;
int shift;
char *end;
struct slirp_config_str *config;
if (!tftp_export) {
tftp_export = legacy_tftp_prefix;
}
if (!bootfile) {
bootfile = legacy_bootp_filename;
}
if (vnetwork) {
if (get_str_sep(buf, sizeof(buf), &vnetwork, '/') < 0) {
if (!inet_aton(vnetwork, &net)) {
return -1;
}
addr = ntohl(net.s_addr);
if (!(addr & 0x80000000)) {
mask.s_addr = htonl(0xff000000); /* class A */
} else if ((addr & 0xfff00000) == 0xac100000) {
mask.s_addr = htonl(0xfff00000); /* priv. 172.16.0.0/12 */
} else if ((addr & 0xc0000000) == 0x80000000) {
mask.s_addr = htonl(0xffff0000); /* class B */
} else if ((addr & 0xffff0000) == 0xc0a80000) {
mask.s_addr = htonl(0xffff0000); /* priv. 192.168.0.0/16 */
} else if ((addr & 0xffff0000) == 0xc6120000) {
mask.s_addr = htonl(0xfffe0000); /* tests 198.18.0.0/15 */
} else if ((addr & 0xe0000000) == 0xe0000000) {
mask.s_addr = htonl(0xffffff00); /* class C */
} else {
mask.s_addr = htonl(0xfffffff0); /* multicast/reserved */
}
} else {
if (!inet_aton(buf, &net)) {
return -1;
}
shift = strtol(vnetwork, &end, 10);
if (*end != '\0') {
if (!inet_aton(vnetwork, &mask)) {
return -1;
}
} else if (shift < 4 || shift > 32) {
return -1;
} else {
mask.s_addr = htonl(0xffffffff << (32 - shift));
}
}
net.s_addr &= mask.s_addr;
host.s_addr = net.s_addr | (htonl(0x0202) & ~mask.s_addr);
dhcp.s_addr = net.s_addr | (htonl(0x020f) & ~mask.s_addr);
dns.s_addr = net.s_addr | (htonl(0x0203) & ~mask.s_addr);
}
if (vhost && !inet_aton(vhost, &host)) {
return -1;
}
if ((host.s_addr & mask.s_addr) != net.s_addr) {
return -1;
}
if (vdhcp_start && !inet_aton(vdhcp_start, &dhcp)) {
return -1;
}
if ((dhcp.s_addr & mask.s_addr) != net.s_addr ||
dhcp.s_addr == host.s_addr || dhcp.s_addr == dns.s_addr) {
return -1;
}
if (vnameserver && !inet_aton(vnameserver, &dns)) {
return -1;
}
if ((dns.s_addr & mask.s_addr) != net.s_addr ||
dns.s_addr == host.s_addr) {
return -1;
}
#ifndef _WIN32
if (vsmbserver && !inet_aton(vsmbserver, &smbsrv)) {
return -1;
}
#endif
nc = qemu_new_net_client(&net_slirp_info, peer, model, name);
snprintf(nc->info_str, sizeof(nc->info_str),
"net=%s,restrict=%s", inet_ntoa(net),
restricted ? "on" : "off");
s = DO_UPCAST(SlirpState, nc, nc);
s->slirp = slirp_init(restricted, net, mask, host, vhostname,
tftp_export, bootfile, dhcp, dns, dnssearch, s);
QTAILQ_INSERT_TAIL(&slirp_stacks, s, entry);
for (config = slirp_configs; config; config = config->next) {
if (config->flags & SLIRP_CFG_HOSTFWD) {
if (slirp_hostfwd(s, config->str,
config->flags & SLIRP_CFG_LEGACY) < 0)
goto error;
} else {
if (slirp_guestfwd(s, config->str,
config->flags & SLIRP_CFG_LEGACY) < 0)
goto error;
}
}
#ifndef _WIN32
if (!smb_export) {
smb_export = legacy_smb_export;
}
if (smb_export) {
if (slirp_smb(s, smb_export, smbsrv) < 0)
goto error;
}
#endif
return 0;
error:
qemu_del_net_client(nc);
return -1;
}
| 0
|
499,837
|
static void print_rect(std::ostream& out,
QPDFObjectHandle::Rectangle const& r)
{
out << "[" << r.llx << ", " << r.lly << ", "
<< r.urx << ", " << r.ury << "]";
}
| 0
|
18,217
|
static int cert_status_cb ( SSL * s , void * arg ) {
tlsextstatusctx * srctx = arg ;
BIO * err = srctx -> err ;
char * host , * port , * path ;
int use_ssl ;
unsigned char * rspder = NULL ;
int rspderlen ;
STACK_OF ( OPENSSL_STRING ) * aia = NULL ;
X509 * x = NULL ;
X509_STORE_CTX inctx ;
X509_OBJECT obj ;
OCSP_REQUEST * req = NULL ;
OCSP_RESPONSE * resp = NULL ;
OCSP_CERTID * id = NULL ;
STACK_OF ( X509_EXTENSION ) * exts ;
int ret = SSL_TLSEXT_ERR_NOACK ;
int i ;
# if 0 STACK_OF ( OCSP_RESPID ) * ids ;
SSL_get_tlsext_status_ids ( s , & ids ) ;
BIO_printf ( err , "cert_status: received %d ids\n" , sk_OCSP_RESPID_num ( ids ) ) ;
# endif if ( srctx -> verbose ) BIO_puts ( err , "cert_status: callback called\n" ) ;
x = SSL_get_certificate ( s ) ;
aia = X509_get1_ocsp ( x ) ;
if ( aia ) {
if ( ! OCSP_parse_url ( sk_OPENSSL_STRING_value ( aia , 0 ) , & host , & port , & path , & use_ssl ) ) {
BIO_puts ( err , "cert_status: can't parse AIA URL\n" ) ;
goto err ;
}
if ( srctx -> verbose ) BIO_printf ( err , "cert_status: AIA URL: %s\n" , sk_OPENSSL_STRING_value ( aia , 0 ) ) ;
}
else {
if ( ! srctx -> host ) {
BIO_puts ( srctx -> err , "cert_status: no AIA and no default responder URL\n" ) ;
goto done ;
}
host = srctx -> host ;
path = srctx -> path ;
port = srctx -> port ;
use_ssl = srctx -> use_ssl ;
}
if ( ! X509_STORE_CTX_init ( & inctx , SSL_CTX_get_cert_store ( SSL_get_SSL_CTX ( s ) ) , NULL , NULL ) ) goto err ;
if ( X509_STORE_get_by_subject ( & inctx , X509_LU_X509 , X509_get_issuer_name ( x ) , & obj ) <= 0 ) {
BIO_puts ( err , "cert_status: Can't retrieve issuer certificate.\n" ) ;
X509_STORE_CTX_cleanup ( & inctx ) ;
goto done ;
}
req = OCSP_REQUEST_new ( ) ;
if ( ! req ) goto err ;
id = OCSP_cert_to_id ( NULL , x , obj . data . x509 ) ;
X509_free ( obj . data . x509 ) ;
X509_STORE_CTX_cleanup ( & inctx ) ;
if ( ! id ) goto err ;
if ( ! OCSP_request_add0_id ( req , id ) ) goto err ;
id = NULL ;
SSL_get_tlsext_status_exts ( s , & exts ) ;
for ( i = 0 ;
i < sk_X509_EXTENSION_num ( exts ) ;
i ++ ) {
X509_EXTENSION * ext = sk_X509_EXTENSION_value ( exts , i ) ;
if ( ! OCSP_REQUEST_add_ext ( req , ext , - 1 ) ) goto err ;
}
resp = process_responder ( err , req , host , path , port , use_ssl , NULL , srctx -> timeout ) ;
if ( ! resp ) {
BIO_puts ( err , "cert_status: error querying responder\n" ) ;
goto done ;
}
rspderlen = i2d_OCSP_RESPONSE ( resp , & rspder ) ;
if ( rspderlen <= 0 ) goto err ;
SSL_set_tlsext_status_ocsp_resp ( s , rspder , rspderlen ) ;
if ( srctx -> verbose ) {
BIO_puts ( err , "cert_status: ocsp response sent:\n" ) ;
OCSP_RESPONSE_print ( err , resp , 2 ) ;
}
ret = SSL_TLSEXT_ERR_OK ;
done : if ( ret != SSL_TLSEXT_ERR_OK ) ERR_print_errors ( err ) ;
if ( aia ) {
OPENSSL_free ( host ) ;
OPENSSL_free ( path ) ;
OPENSSL_free ( port ) ;
X509_email_free ( aia ) ;
}
if ( id ) OCSP_CERTID_free ( id ) ;
if ( req ) OCSP_REQUEST_free ( req ) ;
if ( resp ) OCSP_RESPONSE_free ( resp ) ;
return ret ;
err : ret = SSL_TLSEXT_ERR_ALERT_FATAL ;
goto done ;
}
# ifndef OPENSSL_NO_NEXTPROTONEG typedef struct tlsextnextprotoctx_st {
unsigned char * data ;
unsigned int len ;
}
tlsextnextprotoctx ;
static int next_proto_cb ( SSL * s , const unsigned char * * data , unsigned int * len , void * arg ) {
tlsextnextprotoctx * next_proto = arg ;
* data = next_proto -> data ;
* len = next_proto -> len ;
return SSL_TLSEXT_ERR_OK ;
}
# endif # endif int MAIN ( int , char * * ) ;
# ifndef OPENSSL_NO_JPAKE static char * jpake_secret = NULL ;
# endif # ifndef OPENSSL_NO_SRP static srpsrvparm srp_callback_parm ;
# endif # ifndef OPENSSL_NO_SRTP static char * srtp_profiles = NULL ;
# endif int MAIN ( int argc , char * argv [ ] ) {
X509_VERIFY_PARAM * vpm = NULL ;
int badarg = 0 ;
short port = PORT ;
char * CApath = NULL , * CAfile = NULL ;
unsigned char * context = NULL ;
char * dhfile = NULL ;
# ifndef OPENSSL_NO_ECDH char * named_curve = NULL ;
# endif int badop = 0 , bugs = 0 ;
int ret = 1 ;
int off = 0 ;
int no_tmp_rsa = 0 , no_dhe = 0 , nocert = 0 ;
# ifndef OPENSSL_NO_ECDH int no_ecdhe = 0 ;
# endif int state = 0 ;
const SSL_METHOD * meth = NULL ;
int socket_type = SOCK_STREAM ;
ENGINE * e = NULL ;
char * inrand = NULL ;
int s_cert_format = FORMAT_PEM , s_key_format = FORMAT_PEM ;
char * passarg = NULL , * pass = NULL ;
char * dpassarg = NULL , * dpass = NULL ;
int s_dcert_format = FORMAT_PEM , s_dkey_format = FORMAT_PEM ;
X509 * s_cert = NULL , * s_dcert = NULL ;
EVP_PKEY * s_key = NULL , * s_dkey = NULL ;
int no_cache = 0 ;
# ifndef OPENSSL_NO_TLSEXT EVP_PKEY * s_key2 = NULL ;
X509 * s_cert2 = NULL ;
tlsextctx tlsextcbp = {
NULL , NULL , SSL_TLSEXT_ERR_ALERT_WARNING }
;
# ifndef OPENSSL_NO_NEXTPROTONEG const char * next_proto_neg_in = NULL ;
tlsextnextprotoctx next_proto ;
# endif # endif # ifndef OPENSSL_NO_PSK static char * psk_identity_hint = NULL ;
# endif # ifndef OPENSSL_NO_SRP char * srpuserseed = NULL ;
char * srp_verifier_file = NULL ;
# endif meth = SSLv23_server_method ( ) ;
local_argc = argc ;
local_argv = argv ;
apps_startup ( ) ;
# ifdef MONOLITH s_server_init ( ) ;
# endif if ( bio_err == NULL ) bio_err = BIO_new_fp ( stderr , BIO_NOCLOSE ) ;
if ( ! load_config ( bio_err , NULL ) ) goto end ;
verify_depth = 0 ;
# ifdef FIONBIO s_nbio = 0 ;
# endif s_nbio_test = 0 ;
argc -- ;
argv ++ ;
while ( argc >= 1 ) {
if ( ( strcmp ( * argv , "-port" ) == 0 ) || ( strcmp ( * argv , "-accept" ) == 0 ) ) {
if ( -- argc < 1 ) goto bad ;
if ( ! extract_port ( * ( ++ argv ) , & port ) ) goto bad ;
}
else if ( strcmp ( * argv , "-verify" ) == 0 ) {
s_server_verify = SSL_VERIFY_PEER | SSL_VERIFY_CLIENT_ONCE ;
if ( -- argc < 1 ) goto bad ;
verify_depth = atoi ( * ( ++ argv ) ) ;
BIO_printf ( bio_err , "verify depth is %d\n" , verify_depth ) ;
}
else if ( strcmp ( * argv , "-Verify" ) == 0 ) {
s_server_verify = SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT | SSL_VERIFY_CLIENT_ONCE ;
if ( -- argc < 1 ) goto bad ;
verify_depth = atoi ( * ( ++ argv ) ) ;
BIO_printf ( bio_err , "verify depth is %d, must return a certificate\n" , verify_depth ) ;
}
else if ( strcmp ( * argv , "-context" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
context = ( unsigned char * ) * ( ++ argv ) ;
}
else if ( strcmp ( * argv , "-cert" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
s_cert_file = * ( ++ argv ) ;
}
else if ( strcmp ( * argv , "-certform" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
s_cert_format = str2fmt ( * ( ++ argv ) ) ;
}
else if ( strcmp ( * argv , "-key" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
s_key_file = * ( ++ argv ) ;
}
else if ( strcmp ( * argv , "-keyform" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
s_key_format = str2fmt ( * ( ++ argv ) ) ;
}
else if ( strcmp ( * argv , "-pass" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
passarg = * ( ++ argv ) ;
}
else if ( strcmp ( * argv , "-dhparam" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
dhfile = * ( ++ argv ) ;
}
# ifndef OPENSSL_NO_ECDH else if ( strcmp ( * argv , "-named_curve" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
named_curve = * ( ++ argv ) ;
}
# endif else if ( strcmp ( * argv , "-dcertform" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
s_dcert_format = str2fmt ( * ( ++ argv ) ) ;
}
else if ( strcmp ( * argv , "-dcert" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
s_dcert_file = * ( ++ argv ) ;
}
else if ( strcmp ( * argv , "-dkeyform" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
s_dkey_format = str2fmt ( * ( ++ argv ) ) ;
}
else if ( strcmp ( * argv , "-dpass" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
dpassarg = * ( ++ argv ) ;
}
else if ( strcmp ( * argv , "-dkey" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
s_dkey_file = * ( ++ argv ) ;
}
else if ( strcmp ( * argv , "-nocert" ) == 0 ) {
nocert = 1 ;
}
else if ( strcmp ( * argv , "-CApath" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
CApath = * ( ++ argv ) ;
}
else if ( strcmp ( * argv , "-no_cache" ) == 0 ) no_cache = 1 ;
else if ( args_verify ( & argv , & argc , & badarg , bio_err , & vpm ) ) {
if ( badarg ) goto bad ;
continue ;
}
else if ( strcmp ( * argv , "-verify_return_error" ) == 0 ) verify_return_error = 1 ;
else if ( strcmp ( * argv , "-serverpref" ) == 0 ) {
off |= SSL_OP_CIPHER_SERVER_PREFERENCE ;
}
else if ( strcmp ( * argv , "-legacy_renegotiation" ) == 0 ) off |= SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION ;
else if ( strcmp ( * argv , "-cipher" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
cipher = * ( ++ argv ) ;
}
else if ( strcmp ( * argv , "-CAfile" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
CAfile = * ( ++ argv ) ;
}
# ifdef FIONBIO else if ( strcmp ( * argv , "-nbio" ) == 0 ) {
s_nbio = 1 ;
}
# endif else if ( strcmp ( * argv , "-nbio_test" ) == 0 ) {
# ifdef FIONBIO s_nbio = 1 ;
# endif s_nbio_test = 1 ;
}
else if ( strcmp ( * argv , "-debug" ) == 0 ) {
s_debug = 1 ;
}
# ifndef OPENSSL_NO_TLSEXT else if ( strcmp ( * argv , "-tlsextdebug" ) == 0 ) s_tlsextdebug = 1 ;
else if ( strcmp ( * argv , "-status" ) == 0 ) s_tlsextstatus = 1 ;
else if ( strcmp ( * argv , "-status_verbose" ) == 0 ) {
s_tlsextstatus = 1 ;
tlscstatp . verbose = 1 ;
}
else if ( ! strcmp ( * argv , "-status_timeout" ) ) {
s_tlsextstatus = 1 ;
if ( -- argc < 1 ) goto bad ;
tlscstatp . timeout = atoi ( * ( ++ argv ) ) ;
}
else if ( ! strcmp ( * argv , "-status_url" ) ) {
s_tlsextstatus = 1 ;
if ( -- argc < 1 ) goto bad ;
if ( ! OCSP_parse_url ( * ( ++ argv ) , & tlscstatp . host , & tlscstatp . port , & tlscstatp . path , & tlscstatp . use_ssl ) ) {
BIO_printf ( bio_err , "Error parsing URL\n" ) ;
goto bad ;
}
}
# endif else if ( strcmp ( * argv , "-msg" ) == 0 ) {
s_msg = 1 ;
}
else if ( strcmp ( * argv , "-hack" ) == 0 ) {
hack = 1 ;
}
else if ( strcmp ( * argv , "-state" ) == 0 ) {
state = 1 ;
}
else if ( strcmp ( * argv , "-crlf" ) == 0 ) {
s_crlf = 1 ;
}
else if ( strcmp ( * argv , "-quiet" ) == 0 ) {
s_quiet = 1 ;
}
else if ( strcmp ( * argv , "-bugs" ) == 0 ) {
bugs = 1 ;
}
else if ( strcmp ( * argv , "-no_tmp_rsa" ) == 0 ) {
no_tmp_rsa = 1 ;
}
else if ( strcmp ( * argv , "-no_dhe" ) == 0 ) {
no_dhe = 1 ;
}
# ifndef OPENSSL_NO_ECDH else if ( strcmp ( * argv , "-no_ecdhe" ) == 0 ) {
no_ecdhe = 1 ;
}
# endif # ifndef OPENSSL_NO_PSK else if ( strcmp ( * argv , "-psk_hint" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
psk_identity_hint = * ( ++ argv ) ;
}
else if ( strcmp ( * argv , "-psk" ) == 0 ) {
size_t i ;
if ( -- argc < 1 ) goto bad ;
psk_key = * ( ++ argv ) ;
for ( i = 0 ;
i < strlen ( psk_key ) ;
i ++ ) {
if ( isxdigit ( ( unsigned char ) psk_key [ i ] ) ) continue ;
BIO_printf ( bio_err , "Not a hex number '%s'\n" , * argv ) ;
goto bad ;
}
}
# endif # ifndef OPENSSL_NO_SRP else if ( strcmp ( * argv , "-srpvfile" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
srp_verifier_file = * ( ++ argv ) ;
meth = TLSv1_server_method ( ) ;
}
else if ( strcmp ( * argv , "-srpuserseed" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
srpuserseed = * ( ++ argv ) ;
meth = TLSv1_server_method ( ) ;
}
# endif else if ( strcmp ( * argv , "-www" ) == 0 ) {
www = 1 ;
}
else if ( strcmp ( * argv , "-WWW" ) == 0 ) {
www = 2 ;
}
else if ( strcmp ( * argv , "-HTTP" ) == 0 ) {
www = 3 ;
}
else if ( strcmp ( * argv , "-no_ssl2" ) == 0 ) {
off |= SSL_OP_NO_SSLv2 ;
}
else if ( strcmp ( * argv , "-no_ssl3" ) == 0 ) {
off |= SSL_OP_NO_SSLv3 ;
}
else if ( strcmp ( * argv , "-no_tls1" ) == 0 ) {
off |= SSL_OP_NO_TLSv1 ;
}
else if ( strcmp ( * argv , "-no_tls1_1" ) == 0 ) {
off |= SSL_OP_NO_TLSv1_1 ;
}
else if ( strcmp ( * argv , "-no_tls1_2" ) == 0 ) {
off |= SSL_OP_NO_TLSv1_2 ;
}
else if ( strcmp ( * argv , "-no_comp" ) == 0 ) {
off |= SSL_OP_NO_COMPRESSION ;
}
# ifndef OPENSSL_NO_TLSEXT else if ( strcmp ( * argv , "-no_ticket" ) == 0 ) {
off |= SSL_OP_NO_TICKET ;
}
# endif # ifndef OPENSSL_NO_SSL2 else if ( strcmp ( * argv , "-ssl2" ) == 0 ) {
meth = SSLv2_server_method ( ) ;
}
# endif # ifndef OPENSSL_NO_SSL3_METHOD else if ( strcmp ( * argv , "-ssl3" ) == 0 ) {
meth = SSLv3_server_method ( ) ;
}
# endif # ifndef OPENSSL_NO_TLS1 else if ( strcmp ( * argv , "-tls1" ) == 0 ) {
meth = TLSv1_server_method ( ) ;
}
else if ( strcmp ( * argv , "-tls1_1" ) == 0 ) {
meth = TLSv1_1_server_method ( ) ;
}
else if ( strcmp ( * argv , "-tls1_2" ) == 0 ) {
meth = TLSv1_2_server_method ( ) ;
}
# endif # ifndef OPENSSL_NO_DTLS1 else if ( strcmp ( * argv , "-dtls1" ) == 0 ) {
meth = DTLSv1_server_method ( ) ;
socket_type = SOCK_DGRAM ;
}
else if ( strcmp ( * argv , "-timeout" ) == 0 ) enable_timeouts = 1 ;
else if ( strcmp ( * argv , "-mtu" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
socket_mtu = atol ( * ( ++ argv ) ) ;
}
else if ( strcmp ( * argv , "-chain" ) == 0 ) cert_chain = 1 ;
# endif else if ( strcmp ( * argv , "-id_prefix" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
session_id_prefix = * ( ++ argv ) ;
}
# ifndef OPENSSL_NO_ENGINE else if ( strcmp ( * argv , "-engine" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
engine_id = * ( ++ argv ) ;
}
# endif else if ( strcmp ( * argv , "-rand" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
inrand = * ( ++ argv ) ;
}
# ifndef OPENSSL_NO_TLSEXT else if ( strcmp ( * argv , "-servername" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
tlsextcbp . servername = * ( ++ argv ) ;
}
else if ( strcmp ( * argv , "-servername_fatal" ) == 0 ) {
tlsextcbp . extension_error = SSL_TLSEXT_ERR_ALERT_FATAL ;
}
else if ( strcmp ( * argv , "-cert2" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
s_cert_file2 = * ( ++ argv ) ;
}
else if ( strcmp ( * argv , "-key2" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
s_key_file2 = * ( ++ argv ) ;
}
# ifndef OPENSSL_NO_NEXTPROTONEG else if ( strcmp ( * argv , "-nextprotoneg" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
next_proto_neg_in = * ( ++ argv ) ;
}
# endif # endif # if ! defined ( OPENSSL_NO_JPAKE ) && ! defined ( OPENSSL_NO_PSK ) else if ( strcmp ( * argv , "-jpake" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
jpake_secret = * ( ++ argv ) ;
}
# endif # ifndef OPENSSL_NO_SRTP else if ( strcmp ( * argv , "-use_srtp" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
srtp_profiles = * ( ++ argv ) ;
}
# endif else if ( strcmp ( * argv , "-keymatexport" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
keymatexportlabel = * ( ++ argv ) ;
}
else if ( strcmp ( * argv , "-keymatexportlen" ) == 0 ) {
if ( -- argc < 1 ) goto bad ;
keymatexportlen = atoi ( * ( ++ argv ) ) ;
if ( keymatexportlen == 0 ) goto bad ;
}
else {
BIO_printf ( bio_err , "unknown option %s\n" , * argv ) ;
badop = 1 ;
break ;
}
argc -- ;
argv ++ ;
}
if ( badop ) {
bad : sv_usage ( ) ;
goto end ;
}
# ifndef OPENSSL_NO_DTLS1 if ( www && socket_type == SOCK_DGRAM ) {
BIO_printf ( bio_err , "Can't use -HTTP, -www or -WWW with DTLS\n" ) ;
goto end ;
}
# endif # if ! defined ( OPENSSL_NO_JPAKE ) && ! defined ( OPENSSL_NO_PSK ) if ( jpake_secret ) {
if ( psk_key ) {
BIO_printf ( bio_err , "Can't use JPAKE and PSK together\n" ) ;
goto end ;
}
psk_identity = "JPAKE" ;
if ( cipher ) {
BIO_printf ( bio_err , "JPAKE sets cipher to PSK\n" ) ;
goto end ;
}
cipher = "PSK" ;
}
# endif SSL_load_error_strings ( ) ;
OpenSSL_add_ssl_algorithms ( ) ;
# ifndef OPENSSL_NO_ENGINE e = setup_engine ( bio_err , engine_id , 1 ) ;
# endif if ( ! app_passwd ( bio_err , passarg , dpassarg , & pass , & dpass ) ) {
BIO_printf ( bio_err , "Error getting password\n" ) ;
goto end ;
}
if ( s_key_file == NULL ) s_key_file = s_cert_file ;
# ifndef OPENSSL_NO_TLSEXT if ( s_key_file2 == NULL ) s_key_file2 = s_cert_file2 ;
# endif if ( nocert == 0 ) {
s_key = load_key ( bio_err , s_key_file , s_key_format , 0 , pass , e , "server certificate private key file" ) ;
if ( ! s_key ) {
ERR_print_errors ( bio_err ) ;
goto end ;
}
s_cert = load_cert ( bio_err , s_cert_file , s_cert_format , NULL , e , "server certificate file" ) ;
if ( ! s_cert ) {
ERR_print_errors ( bio_err ) ;
goto end ;
}
# ifndef OPENSSL_NO_TLSEXT if ( tlsextcbp . servername ) {
s_key2 = load_key ( bio_err , s_key_file2 , s_key_format , 0 , pass , e , "second server certificate private key file" ) ;
if ( ! s_key2 ) {
ERR_print_errors ( bio_err ) ;
goto end ;
}
s_cert2 = load_cert ( bio_err , s_cert_file2 , s_cert_format , NULL , e , "second server certificate file" ) ;
if ( ! s_cert2 ) {
ERR_print_errors ( bio_err ) ;
goto end ;
}
}
# endif }
# if ! defined ( OPENSSL_NO_TLSEXT ) && ! defined ( OPENSSL_NO_NEXTPROTONEG ) if ( next_proto_neg_in ) {
unsigned short len ;
next_proto . data = next_protos_parse ( & len , next_proto_neg_in ) ;
if ( next_proto . data == NULL ) goto end ;
next_proto . len = len ;
}
else {
next_proto . data = NULL ;
}
# endif if ( s_dcert_file ) {
if ( s_dkey_file == NULL ) s_dkey_file = s_dcert_file ;
s_dkey = load_key ( bio_err , s_dkey_file , s_dkey_format , 0 , dpass , e , "second certificate private key file" ) ;
if ( ! s_dkey ) {
ERR_print_errors ( bio_err ) ;
goto end ;
}
s_dcert = load_cert ( bio_err , s_dcert_file , s_dcert_format , NULL , e , "second server certificate file" ) ;
if ( ! s_dcert ) {
ERR_print_errors ( bio_err ) ;
goto end ;
}
}
if ( ! app_RAND_load_file ( NULL , bio_err , 1 ) && inrand == NULL && ! RAND_status ( ) ) {
BIO_printf ( bio_err , "warning, not much extra random data, consider using the -rand option\n" ) ;
}
if ( inrand != NULL ) BIO_printf ( bio_err , "%ld semi-random bytes loaded\n" , app_RAND_load_files ( inrand ) ) ;
if ( bio_s_out == NULL ) {
if ( s_quiet && ! s_debug && ! s_msg ) {
bio_s_out = BIO_new ( BIO_s_null ( ) ) ;
}
else {
if ( bio_s_out == NULL ) bio_s_out = BIO_new_fp ( stdout , BIO_NOCLOSE ) ;
}
}
# if ! defined ( OPENSSL_NO_RSA ) || ! defined ( OPENSSL_NO_DSA ) || ! defined ( OPENSSL_NO_ECDSA ) if ( nocert ) # endif {
s_cert_file = NULL ;
s_key_file = NULL ;
s_dcert_file = NULL ;
s_dkey_file = NULL ;
# ifndef OPENSSL_NO_TLSEXT s_cert_file2 = NULL ;
s_key_file2 = NULL ;
# endif }
ctx = SSL_CTX_new ( meth ) ;
if ( ctx == NULL ) {
ERR_print_errors ( bio_err ) ;
goto end ;
}
if ( session_id_prefix ) {
if ( strlen ( session_id_prefix ) >= 32 ) BIO_printf ( bio_err , "warning: id_prefix is too long, only one new session will be possible\n" ) ;
else if ( strlen ( session_id_prefix ) >= 16 ) BIO_printf ( bio_err , "warning: id_prefix is too long if you use SSLv2\n" ) ;
if ( ! SSL_CTX_set_generate_session_id ( ctx , generate_session_id ) ) {
BIO_printf ( bio_err , "error setting 'id_prefix'\n" ) ;
ERR_print_errors ( bio_err ) ;
goto end ;
}
BIO_printf ( bio_err , "id_prefix '%s' set.\n" , session_id_prefix ) ;
}
SSL_CTX_set_quiet_shutdown ( ctx , 1 ) ;
if ( bugs ) SSL_CTX_set_options ( ctx , SSL_OP_ALL ) ;
if ( hack ) SSL_CTX_set_options ( ctx , SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG ) ;
SSL_CTX_set_options ( ctx , off ) ;
if ( state ) SSL_CTX_set_info_callback ( ctx , apps_ssl_info_callback ) ;
if ( no_cache ) SSL_CTX_set_session_cache_mode ( ctx , SSL_SESS_CACHE_OFF ) ;
else SSL_CTX_sess_set_cache_size ( ctx , 128 ) ;
# ifndef OPENSSL_NO_SRTP if ( srtp_profiles != NULL ) SSL_CTX_set_tlsext_use_srtp ( ctx , srtp_profiles ) ;
# endif # if 0 if ( cipher == NULL ) cipher = getenv ( "SSL_CIPHER" ) ;
# endif # if 0 if ( s_cert_file == NULL ) {
BIO_printf ( bio_err , "You must specify a certificate file for the server to use\n" ) ;
goto end ;
}
# endif if ( ( ! SSL_CTX_load_verify_locations ( ctx , CAfile , CApath ) ) || ( ! SSL_CTX_set_default_verify_paths ( ctx ) ) ) {
ERR_print_errors ( bio_err ) ;
}
if ( vpm ) SSL_CTX_set1_param ( ctx , vpm ) ;
# ifndef OPENSSL_NO_TLSEXT if ( s_cert2 ) {
ctx2 = SSL_CTX_new ( meth ) ;
if ( ctx2 == NULL ) {
ERR_print_errors ( bio_err ) ;
goto end ;
}
}
if ( ctx2 ) {
BIO_printf ( bio_s_out , "Setting secondary ctx parameters\n" ) ;
if ( session_id_prefix ) {
if ( strlen ( session_id_prefix ) >= 32 ) BIO_printf ( bio_err , "warning: id_prefix is too long, only one new session will be possible\n" ) ;
else if ( strlen ( session_id_prefix ) >= 16 ) BIO_printf ( bio_err , "warning: id_prefix is too long if you use SSLv2\n" ) ;
if ( ! SSL_CTX_set_generate_session_id ( ctx2 , generate_session_id ) ) {
BIO_printf ( bio_err , "error setting 'id_prefix'\n" ) ;
ERR_print_errors ( bio_err ) ;
goto end ;
}
BIO_printf ( bio_err , "id_prefix '%s' set.\n" , session_id_prefix ) ;
}
SSL_CTX_set_quiet_shutdown ( ctx2 , 1 ) ;
if ( bugs ) SSL_CTX_set_options ( ctx2 , SSL_OP_ALL ) ;
if ( hack ) SSL_CTX_set_options ( ctx2 , SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG ) ;
SSL_CTX_set_options ( ctx2 , off ) ;
if ( state ) SSL_CTX_set_info_callback ( ctx2 , apps_ssl_info_callback ) ;
if ( no_cache ) SSL_CTX_set_session_cache_mode ( ctx2 , SSL_SESS_CACHE_OFF ) ;
else SSL_CTX_sess_set_cache_size ( ctx2 , 128 ) ;
if ( ( ! SSL_CTX_load_verify_locations ( ctx2 , CAfile , CApath ) ) || ( ! SSL_CTX_set_default_verify_paths ( ctx2 ) ) ) {
ERR_print_errors ( bio_err ) ;
}
if ( vpm ) SSL_CTX_set1_param ( ctx2 , vpm ) ;
}
# ifndef OPENSSL_NO_NEXTPROTONEG if ( next_proto . data ) SSL_CTX_set_next_protos_advertised_cb ( ctx , next_proto_cb , & next_proto ) ;
# endif # endif # ifndef OPENSSL_NO_DH if ( ! no_dhe ) {
DH * dh = NULL ;
if ( dhfile ) dh = load_dh_param ( dhfile ) ;
else if ( s_cert_file ) dh = load_dh_param ( s_cert_file ) ;
if ( dh != NULL ) {
BIO_printf ( bio_s_out , "Setting temp DH parameters\n" ) ;
}
else {
BIO_printf ( bio_s_out , "Using default temp DH parameters\n" ) ;
dh = get_dh2048 ( ) ;
if ( dh == NULL ) {
ERR_print_errors ( bio_err ) ;
goto end ;
}
}
( void ) BIO_flush ( bio_s_out ) ;
SSL_CTX_set_tmp_dh ( ctx , dh ) ;
# ifndef OPENSSL_NO_TLSEXT if ( ctx2 ) {
if ( ! dhfile ) {
DH * dh2 = load_dh_param ( s_cert_file2 ) ;
if ( dh2 != NULL ) {
BIO_printf ( bio_s_out , "Setting temp DH parameters\n" ) ;
( void ) BIO_flush ( bio_s_out ) ;
DH_free ( dh ) ;
dh = dh2 ;
}
}
SSL_CTX_set_tmp_dh ( ctx2 , dh ) ;
}
# endif DH_free ( dh ) ;
}
# endif # ifndef OPENSSL_NO_ECDH if ( ! no_ecdhe ) {
EC_KEY * ecdh = NULL ;
if ( named_curve ) {
int nid = OBJ_sn2nid ( named_curve ) ;
if ( nid == 0 ) {
BIO_printf ( bio_err , "unknown curve name (%s)\n" , named_curve ) ;
goto end ;
}
ecdh = EC_KEY_new_by_curve_name ( nid ) ;
if ( ecdh == NULL ) {
BIO_printf ( bio_err , "unable to create curve (%s)\n" , named_curve ) ;
goto end ;
}
}
if ( ecdh != NULL ) {
BIO_printf ( bio_s_out , "Setting temp ECDH parameters\n" ) ;
}
else {
BIO_printf ( bio_s_out , "Using default temp ECDH parameters\n" ) ;
ecdh = EC_KEY_new_by_curve_name ( NID_X9_62_prime256v1 ) ;
if ( ecdh == NULL ) {
BIO_printf ( bio_err , "unable to create curve (nistp256)\n" ) ;
goto end ;
}
}
( void ) BIO_flush ( bio_s_out ) ;
SSL_CTX_set_tmp_ecdh ( ctx , ecdh ) ;
# ifndef OPENSSL_NO_TLSEXT if ( ctx2 ) SSL_CTX_set_tmp_ecdh ( ctx2 , ecdh ) ;
# endif EC_KEY_free ( ecdh ) ;
}
# endif if ( ! set_cert_key_stuff ( ctx , s_cert , s_key ) ) goto end ;
# ifndef OPENSSL_NO_TLSEXT if ( ctx2 && ! set_cert_key_stuff ( ctx2 , s_cert2 , s_key2 ) ) goto end ;
# endif if ( s_dcert != NULL ) {
if ( ! set_cert_key_stuff ( ctx , s_dcert , s_dkey ) ) goto end ;
}
# ifndef OPENSSL_NO_RSA # if 1 if ( ! no_tmp_rsa ) {
SSL_CTX_set_tmp_rsa_callback ( ctx , tmp_rsa_cb ) ;
# ifndef OPENSSL_NO_TLSEXT if ( ctx2 ) SSL_CTX_set_tmp_rsa_callback ( ctx2 , tmp_rsa_cb ) ;
# endif }
# else if ( ! no_tmp_rsa && SSL_CTX_need_tmp_RSA ( ctx ) ) {
RSA * rsa ;
BIO_printf ( bio_s_out , "Generating temp (512 bit) RSA key..." ) ;
BIO_flush ( bio_s_out ) ;
rsa = RSA_generate_key ( 512 , RSA_F4 , NULL ) ;
if ( ! SSL_CTX_set_tmp_rsa ( ctx , rsa ) ) {
ERR_print_errors ( bio_err ) ;
goto end ;
}
# ifndef OPENSSL_NO_TLSEXT if ( ctx2 ) {
if ( ! SSL_CTX_set_tmp_rsa ( ctx2 , rsa ) ) {
ERR_print_errors ( bio_err ) ;
goto end ;
}
}
# endif RSA_free ( rsa ) ;
BIO_printf ( bio_s_out , "\n" ) ;
}
# endif # endif # ifndef OPENSSL_NO_PSK # ifdef OPENSSL_NO_JPAKE if ( psk_key != NULL ) # else if ( psk_key != NULL || jpake_secret ) # endif {
if ( s_debug ) BIO_printf ( bio_s_out , "PSK key given or JPAKE in use, setting server callback\n" ) ;
SSL_CTX_set_psk_server_callback ( ctx , psk_server_cb ) ;
}
if ( ! SSL_CTX_use_psk_identity_hint ( ctx , psk_identity_hint ) ) {
BIO_printf ( bio_err , "error setting PSK identity hint to context\n" ) ;
ERR_print_errors ( bio_err ) ;
goto end ;
}
# endif if ( cipher != NULL ) {
if ( ! SSL_CTX_set_cipher_list ( ctx , cipher ) ) {
BIO_printf ( bio_err , "error setting cipher list\n" ) ;
ERR_print_errors ( bio_err ) ;
goto end ;
}
# ifndef OPENSSL_NO_TLSEXT if ( ctx2 && ! SSL_CTX_set_cipher_list ( ctx2 , cipher ) ) {
BIO_printf ( bio_err , "error setting cipher list\n" ) ;
ERR_print_errors ( bio_err ) ;
goto end ;
}
# endif }
SSL_CTX_set_verify ( ctx , s_server_verify , verify_callback ) ;
SSL_CTX_set_session_id_context ( ctx , ( void * ) & s_server_session_id_context , sizeof s_server_session_id_context ) ;
SSL_CTX_set_cookie_generate_cb ( ctx , generate_cookie_callback ) ;
SSL_CTX_set_cookie_verify_cb ( ctx , verify_cookie_callback ) ;
# ifndef OPENSSL_NO_TLSEXT if ( ctx2 ) {
SSL_CTX_set_verify ( ctx2 , s_server_verify , verify_callback ) ;
SSL_CTX_set_session_id_context ( ctx2 , ( void * ) & s_server_session_id_context , sizeof s_server_session_id_context ) ;
tlsextcbp . biodebug = bio_s_out ;
SSL_CTX_set_tlsext_servername_callback ( ctx2 , ssl_servername_cb ) ;
SSL_CTX_set_tlsext_servername_arg ( ctx2 , & tlsextcbp ) ;
SSL_CTX_set_tlsext_servername_callback ( ctx , ssl_servername_cb ) ;
SSL_CTX_set_tlsext_servername_arg ( ctx , & tlsextcbp ) ;
}
# endif # ifndef OPENSSL_NO_SRP if ( srp_verifier_file != NULL ) {
srp_callback_parm . vb = SRP_VBASE_new ( srpuserseed ) ;
srp_callback_parm . user = NULL ;
srp_callback_parm . login = NULL ;
if ( ( ret = SRP_VBASE_init ( srp_callback_parm . vb , srp_verifier_file ) ) != SRP_NO_ERROR ) {
BIO_printf ( bio_err , "Cannot initialize SRP verifier file \"%s\":ret=%d\n" , srp_verifier_file , ret ) ;
goto end ;
}
SSL_CTX_set_verify ( ctx , SSL_VERIFY_NONE , verify_callback ) ;
SSL_CTX_set_srp_cb_arg ( ctx , & srp_callback_parm ) ;
SSL_CTX_set_srp_username_callback ( ctx , ssl_srp_server_param_cb ) ;
}
else # endif if ( CAfile != NULL ) {
SSL_CTX_set_client_CA_list ( ctx , SSL_load_client_CA_file ( CAfile ) ) ;
# ifndef OPENSSL_NO_TLSEXT if ( ctx2 ) SSL_CTX_set_client_CA_list ( ctx2 , SSL_load_client_CA_file ( CAfile ) ) ;
# endif }
BIO_printf ( bio_s_out , "ACCEPT\n" ) ;
( void ) BIO_flush ( bio_s_out ) ;
if ( www ) do_server ( port , socket_type , & accept_socket , www_body , context ) ;
else do_server ( port , socket_type , & accept_socket , sv_body , context ) ;
print_stats ( bio_s_out , ctx ) ;
ret = 0 ;
end : if ( ctx != NULL ) SSL_CTX_free ( ctx ) ;
if ( s_cert ) X509_free ( s_cert ) ;
if ( s_dcert ) X509_free ( s_dcert ) ;
if ( s_key ) EVP_PKEY_free ( s_key ) ;
if ( s_dkey ) EVP_PKEY_free ( s_dkey ) ;
if ( pass ) OPENSSL_free ( pass ) ;
if ( dpass ) OPENSSL_free ( dpass ) ;
if ( vpm ) X509_VERIFY_PARAM_free ( vpm ) ;
# ifndef OPENSSL_NO_TLSEXT if ( tlscstatp . host ) OPENSSL_free ( tlscstatp . host ) ;
if ( tlscstatp . port ) OPENSSL_free ( tlscstatp . port ) ;
if ( tlscstatp . path ) OPENSSL_free ( tlscstatp . path ) ;
if ( ctx2 != NULL ) SSL_CTX_free ( ctx2 ) ;
if ( s_cert2 ) X509_free ( s_cert2 ) ;
if ( s_key2 ) EVP_PKEY_free ( s_key2 ) ;
# endif if ( bio_s_out != NULL ) {
BIO_free ( bio_s_out ) ;
bio_s_out = NULL ;
}
apps_shutdown ( ) ;
OPENSSL_EXIT ( ret ) ;
}
static void print_stats ( BIO * bio , SSL_CTX * ssl_ctx ) {
BIO_printf ( bio , "%4ld items in the session cache\n" , SSL_CTX_sess_number ( ssl_ctx ) ) ;
BIO_printf ( bio , "%4ld client connects (SSL_connect())\n" , SSL_CTX_sess_connect ( ssl_ctx ) ) ;
BIO_printf ( bio , "%4ld client renegotiates (SSL_connect())\n" , SSL_CTX_sess_connect_renegotiate ( ssl_ctx ) ) ;
BIO_printf ( bio , "%4ld client connects that finished\n" , SSL_CTX_sess_connect_good ( ssl_ctx ) ) ;
BIO_printf ( bio , "%4ld server accepts (SSL_accept())\n" , SSL_CTX_sess_accept ( ssl_ctx ) ) ;
BIO_printf ( bio , "%4ld server renegotiates (SSL_accept())\n" , SSL_CTX_sess_accept_renegotiate ( ssl_ctx ) ) ;
BIO_printf ( bio , "%4ld server accepts that finished\n" , SSL_CTX_sess_accept_good ( ssl_ctx ) ) ;
BIO_printf ( bio , "%4ld session cache hits\n" , SSL_CTX_sess_hits ( ssl_ctx ) ) ;
BIO_printf ( bio , "%4ld session cache misses\n" , SSL_CTX_sess_misses ( ssl_ctx ) ) ;
BIO_printf ( bio , "%4ld session cache timeouts\n" , SSL_CTX_sess_timeouts ( ssl_ctx ) ) ;
BIO_printf ( bio , "%4ld callback cache hits\n" , SSL_CTX_sess_cb_hits ( ssl_ctx ) ) ;
BIO_printf ( bio , "%4ld cache full overflows (%ld allowed)\n" , SSL_CTX_sess_cache_full ( ssl_ctx ) , SSL_CTX_sess_get_cache_size ( ssl_ctx ) ) ;
}
static int sv_body ( char * hostname , int s , unsigned char * context ) {
char * buf = NULL ;
fd_set readfds ;
int ret = 1 , width ;
int k , i ;
unsigned long l ;
SSL * con = NULL ;
BIO * sbio ;
# ifndef OPENSSL_NO_KRB5 KSSL_CTX * kctx ;
# endif struct timeval timeout ;
# if defined ( OPENSSL_SYS_WINDOWS ) || defined ( OPENSSL_SYS_MSDOS ) || defined ( OPENSSL_SYS_NETWARE ) || defined ( OPENSSL_SYS_BEOS_R5 ) struct timeval tv ;
# else struct timeval * timeoutp ;
# endif if ( ( buf = OPENSSL_malloc ( bufsize ) ) == NULL ) {
BIO_printf ( bio_err , "out of memory\n" ) ;
goto err ;
}
# ifdef FIONBIO if ( s_nbio ) {
unsigned long sl = 1 ;
if ( ! s_quiet ) BIO_printf ( bio_err , "turning on non blocking io\n" ) ;
if ( BIO_socket_ioctl ( s , FIONBIO , & sl ) < 0 ) ERR_print_errors ( bio_err ) ;
}
# endif if ( con == NULL ) {
con = SSL_new ( ctx ) ;
# ifndef OPENSSL_NO_TLSEXT if ( s_tlsextdebug ) {
SSL_set_tlsext_debug_callback ( con , tlsext_cb ) ;
SSL_set_tlsext_debug_arg ( con , bio_s_out ) ;
}
if ( s_tlsextstatus ) {
SSL_CTX_set_tlsext_status_cb ( ctx , cert_status_cb ) ;
tlscstatp . err = bio_err ;
SSL_CTX_set_tlsext_status_arg ( ctx , & tlscstatp ) ;
}
# endif # ifndef OPENSSL_NO_KRB5 if ( ( kctx = kssl_ctx_new ( ) ) != NULL ) {
SSL_set0_kssl_ctx ( con , kctx ) ;
kssl_ctx_setstring ( kctx , KSSL_SERVICE , KRB5SVC ) ;
kssl_ctx_setstring ( kctx , KSSL_KEYTAB , KRB5KEYTAB ) ;
}
# endif if ( context ) SSL_set_session_id_context ( con , context , strlen ( ( char * ) context ) ) ;
}
SSL_clear ( con ) ;
# if 0 # ifdef TLSEXT_TYPE_opaque_prf_input SSL_set_tlsext_opaque_prf_input ( con , "Test server" , 11 ) ;
# endif # endif if ( SSL_version ( con ) == DTLS1_VERSION ) {
sbio = BIO_new_dgram ( s , BIO_NOCLOSE ) ;
if ( enable_timeouts ) {
timeout . tv_sec = 0 ;
timeout . tv_usec = DGRAM_RCV_TIMEOUT ;
BIO_ctrl ( sbio , BIO_CTRL_DGRAM_SET_RECV_TIMEOUT , 0 , & timeout ) ;
timeout . tv_sec = 0 ;
timeout . tv_usec = DGRAM_SND_TIMEOUT ;
BIO_ctrl ( sbio , BIO_CTRL_DGRAM_SET_SEND_TIMEOUT , 0 , & timeout ) ;
}
if ( socket_mtu ) {
if ( socket_mtu < DTLS_get_link_min_mtu ( con ) ) {
BIO_printf ( bio_err , "MTU too small. Must be at least %ld\n" , DTLS_get_link_min_mtu ( con ) ) ;
ret = - 1 ;
BIO_free ( sbio ) ;
goto err ;
}
SSL_set_options ( con , SSL_OP_NO_QUERY_MTU ) ;
if ( ! DTLS_set_link_mtu ( con , socket_mtu ) ) {
BIO_printf ( bio_err , "Failed to set MTU\n" ) ;
ret = - 1 ;
BIO_free ( sbio ) ;
goto err ;
}
}
else BIO_ctrl ( sbio , BIO_CTRL_DGRAM_MTU_DISCOVER , 0 , NULL ) ;
SSL_set_options ( con , SSL_OP_COOKIE_EXCHANGE ) ;
}
else sbio = BIO_new_socket ( s , BIO_NOCLOSE ) ;
if ( s_nbio_test ) {
BIO * test ;
test = BIO_new ( BIO_f_nbio_test ( ) ) ;
sbio = BIO_push ( test , sbio ) ;
}
# ifndef OPENSSL_NO_JPAKE if ( jpake_secret ) jpake_server_auth ( bio_s_out , sbio , jpake_secret ) ;
# endif SSL_set_bio ( con , sbio , sbio ) ;
SSL_set_accept_state ( con ) ;
if ( s_debug ) {
SSL_set_debug ( con , 1 ) ;
BIO_set_callback ( SSL_get_rbio ( con ) , bio_dump_callback ) ;
BIO_set_callback_arg ( SSL_get_rbio ( con ) , ( char * ) bio_s_out ) ;
}
if ( s_msg ) {
SSL_set_msg_callback ( con , msg_cb ) ;
SSL_set_msg_callback_arg ( con , bio_s_out ) ;
}
# ifndef OPENSSL_NO_TLSEXT if ( s_tlsextdebug ) {
SSL_set_tlsext_debug_callback ( con , tlsext_cb ) ;
SSL_set_tlsext_debug_arg ( con , bio_s_out ) ;
}
# endif width = s + 1 ;
for ( ;
;
) {
int read_from_terminal ;
int read_from_sslcon ;
read_from_terminal = 0 ;
read_from_sslcon = SSL_pending ( con ) ;
if ( ! read_from_sslcon ) {
FD_ZERO ( & readfds ) ;
# if ! defined ( OPENSSL_SYS_WINDOWS ) && ! defined ( OPENSSL_SYS_MSDOS ) && ! defined ( OPENSSL_SYS_NETWARE ) && ! defined ( OPENSSL_SYS_BEOS_R5 ) openssl_fdset ( fileno ( stdin ) , & readfds ) ;
# endif openssl_fdset ( s , & readfds ) ;
# if defined ( OPENSSL_SYS_WINDOWS ) || defined ( OPENSSL_SYS_MSDOS ) || defined ( OPENSSL_SYS_NETWARE ) tv . tv_sec = 1 ;
tv . tv_usec = 0 ;
i = select ( width , ( void * ) & readfds , NULL , NULL , & tv ) ;
if ( ( i < 0 ) || ( ! i && ! _kbhit ( ) ) ) continue ;
if ( _kbhit ( ) ) read_from_terminal = 1 ;
# elif defined ( OPENSSL_SYS_BEOS_R5 ) tv . tv_sec = 1 ;
tv . tv_usec = 0 ;
( void ) fcntl ( fileno ( stdin ) , F_SETFL , O_NONBLOCK ) ;
i = select ( width , ( void * ) & readfds , NULL , NULL , & tv ) ;
if ( ( i < 0 ) || ( ! i && read ( fileno ( stdin ) , buf , 0 ) < 0 ) ) continue ;
if ( read ( fileno ( stdin ) , buf , 0 ) >= 0 ) read_from_terminal = 1 ;
( void ) fcntl ( fileno ( stdin ) , F_SETFL , 0 ) ;
# else if ( ( SSL_version ( con ) == DTLS1_VERSION ) && DTLSv1_get_timeout ( con , & timeout ) ) timeoutp = & timeout ;
else timeoutp = NULL ;
i = select ( width , ( void * ) & readfds , NULL , NULL , timeoutp ) ;
if ( ( SSL_version ( con ) == DTLS1_VERSION ) && DTLSv1_handle_timeout ( con ) > 0 ) {
BIO_printf ( bio_err , "TIMEOUT occured\n" ) ;
}
if ( i <= 0 ) continue ;
if ( FD_ISSET ( fileno ( stdin ) , & readfds ) ) read_from_terminal = 1 ;
# endif if ( FD_ISSET ( s , & readfds ) ) read_from_sslcon = 1 ;
}
if ( read_from_terminal ) {
if ( s_crlf ) {
int j , lf_num ;
i = raw_read_stdin ( buf , bufsize / 2 ) ;
lf_num = 0 ;
for ( j = 0 ;
j < i ;
j ++ ) if ( buf [ j ] == '\n' ) lf_num ++ ;
for ( j = i - 1 ;
j >= 0 ;
j -- ) {
buf [ j + lf_num ] = buf [ j ] ;
if ( buf [ j ] == '\n' ) {
lf_num -- ;
i ++ ;
buf [ j + lf_num ] = '\r' ;
}
}
assert ( lf_num == 0 ) ;
}
else i = raw_read_stdin ( buf , bufsize ) ;
if ( ! s_quiet ) {
if ( ( i <= 0 ) || ( buf [ 0 ] == 'Q' ) ) {
BIO_printf ( bio_s_out , "DONE\n" ) ;
SHUTDOWN ( s ) ;
close_accept_socket ( ) ;
ret = - 11 ;
goto err ;
}
if ( ( i <= 0 ) || ( buf [ 0 ] == 'q' ) ) {
BIO_printf ( bio_s_out , "DONE\n" ) ;
if ( SSL_version ( con ) != DTLS1_VERSION ) SHUTDOWN ( s ) ;
goto err ;
}
# ifndef OPENSSL_NO_HEARTBEATS if ( ( buf [ 0 ] == 'B' ) && ( ( buf [ 1 ] == '\n' ) || ( buf [ 1 ] == '\r' ) ) ) {
BIO_printf ( bio_err , "HEARTBEATING\n" ) ;
SSL_heartbeat ( con ) ;
i = 0 ;
continue ;
}
# endif if ( ( buf [ 0 ] == 'r' ) && ( ( buf [ 1 ] == '\n' ) || ( buf [ 1 ] == '\r' ) ) ) {
SSL_renegotiate ( con ) ;
i = SSL_do_handshake ( con ) ;
printf ( "SSL_do_handshake -> %d\n" , i ) ;
i = 0 ;
continue ;
}
if ( ( buf [ 0 ] == 'R' ) && ( ( buf [ 1 ] == '\n' ) || ( buf [ 1 ] == '\r' ) ) ) {
SSL_set_verify ( con , SSL_VERIFY_PEER | SSL_VERIFY_CLIENT_ONCE , NULL ) ;
SSL_renegotiate ( con ) ;
i = SSL_do_handshake ( con ) ;
printf ( "SSL_do_handshake -> %d\n" , i ) ;
i = 0 ;
continue ;
}
if ( buf [ 0 ] == 'P' ) {
static const char * str = "Lets print some clear text\n" ;
BIO_write ( SSL_get_wbio ( con ) , str , strlen ( str ) ) ;
}
if ( buf [ 0 ] == 'S' ) {
print_stats ( bio_s_out , SSL_get_SSL_CTX ( con ) ) ;
}
}
# ifdef CHARSET_EBCDIC ebcdic2ascii ( buf , buf , i ) ;
# endif l = k = 0 ;
for ( ;
;
) {
# ifdef RENEG {
static count = 0 ;
if ( ++ count == 100 ) {
count = 0 ;
SSL_renegotiate ( con ) ;
}
}
# endif k = SSL_write ( con , & ( buf [ l ] ) , ( unsigned int ) i ) ;
# ifndef OPENSSL_NO_SRP while ( SSL_get_error ( con , k ) == SSL_ERROR_WANT_X509_LOOKUP ) {
BIO_printf ( bio_s_out , "LOOKUP renego during write\n" ) ;
SRP_user_pwd_free ( srp_callback_parm . user ) ;
srp_callback_parm . user = SRP_VBASE_get1_by_user ( srp_callback_parm . vb , srp_callback_parm . login ) ;
if ( srp_callback_parm . user ) BIO_printf ( bio_s_out , "LOOKUP done %s\n" , srp_callback_parm . user -> info ) ;
else BIO_printf ( bio_s_out , "LOOKUP not successful\n" ) ;
k = SSL_write ( con , & ( buf [ l ] ) , ( unsigned int ) i ) ;
}
# endif switch ( SSL_get_error ( con , k ) ) {
case SSL_ERROR_NONE : break ;
case SSL_ERROR_WANT_WRITE : case SSL_ERROR_WANT_READ : case SSL_ERROR_WANT_X509_LOOKUP : BIO_printf ( bio_s_out , "Write BLOCK\n" ) ;
break ;
case SSL_ERROR_SYSCALL : case SSL_ERROR_SSL : BIO_printf ( bio_s_out , "ERROR\n" ) ;
ERR_print_errors ( bio_err ) ;
ret = 1 ;
goto err ;
case SSL_ERROR_ZERO_RETURN : BIO_printf ( bio_s_out , "DONE\n" ) ;
ret = 1 ;
goto err ;
}
if ( k > 0 ) {
l += k ;
i -= k ;
}
if ( i <= 0 ) break ;
}
}
if ( read_from_sslcon ) {
if ( ! SSL_is_init_finished ( con ) ) {
i = init_ssl_connection ( con ) ;
if ( i < 0 ) {
ret = 0 ;
goto err ;
}
else if ( i == 0 ) {
ret = 1 ;
goto err ;
}
}
else {
again : i = SSL_read ( con , ( char * ) buf , bufsize ) ;
# ifndef OPENSSL_NO_SRP while ( SSL_get_error ( con , i ) == SSL_ERROR_WANT_X509_LOOKUP ) {
BIO_printf ( bio_s_out , "LOOKUP renego during read\n" ) ;
SRP_user_pwd_free ( srp_callback_parm . user ) ;
srp_callback_parm . user = SRP_VBASE_get1_by_user ( srp_callback_parm . vb , srp_callback_parm . login ) ;
if ( srp_callback_parm . user ) BIO_printf ( bio_s_out , "LOOKUP done %s\n" , srp_callback_parm . user -> info ) ;
else BIO_printf ( bio_s_out , "LOOKUP not successful\n" ) ;
i = SSL_read ( con , ( char * ) buf , bufsize ) ;
}
# endif switch ( SSL_get_error ( con , i ) ) {
case SSL_ERROR_NONE : # ifdef CHARSET_EBCDIC ascii2ebcdic ( buf , buf , i ) ;
# endif raw_write_stdout ( buf , ( unsigned int ) i ) ;
if ( SSL_pending ( con ) ) goto again ;
break ;
case SSL_ERROR_WANT_WRITE : case SSL_ERROR_WANT_READ : BIO_printf ( bio_s_out , "Read BLOCK\n" ) ;
break ;
case SSL_ERROR_SYSCALL : case SSL_ERROR_SSL : BIO_printf ( bio_s_out , "ERROR\n" ) ;
ERR_print_errors ( bio_err ) ;
ret = 1 ;
goto err ;
case SSL_ERROR_ZERO_RETURN : BIO_printf ( bio_s_out , "DONE\n" ) ;
ret = 1 ;
goto err ;
}
}
}
}
err : if ( con != NULL ) {
BIO_printf ( bio_s_out , "shutting down SSL\n" ) ;
# if 1 SSL_set_shutdown ( con , SSL_SENT_SHUTDOWN | SSL_RECEIVED_SHUTDOWN ) ;
# else SSL_shutdown ( con ) ;
# endif SSL_free ( con ) ;
}
BIO_printf ( bio_s_out , "CONNECTION CLOSED\n" ) ;
if ( buf != NULL ) {
OPENSSL_cleanse ( buf , bufsize ) ;
OPENSSL_free ( buf ) ;
}
if ( ret >= 0 ) BIO_printf ( bio_s_out , "ACCEPT\n" ) ;
return ( ret ) ;
}
static void close_accept_socket ( void ) {
BIO_printf ( bio_err , "shutdown accept socket\n" ) ;
if ( accept_socket >= 0 ) {
SHUTDOWN2 ( accept_socket ) ;
}
}
static int init_ssl_connection ( SSL * con ) {
int i ;
const char * str ;
X509 * peer ;
long verify_error ;
MS_STATIC char buf [ BUFSIZ ] ;
# ifndef OPENSSL_NO_KRB5 char * client_princ ;
# endif # if ! defined ( OPENSSL_NO_TLSEXT ) && ! defined ( OPENSSL_NO_NEXTPROTONEG ) const unsigned char * next_proto_neg ;
unsigned next_proto_neg_len ;
# endif unsigned char * exportedkeymat ;
i = SSL_accept ( con ) ;
# ifndef OPENSSL_NO_SRP while ( i <= 0 && SSL_get_error ( con , i ) == SSL_ERROR_WANT_X509_LOOKUP ) {
BIO_printf ( bio_s_out , "LOOKUP during accept %s\n" , srp_callback_parm . login ) ;
SRP_user_pwd_free ( srp_callback_parm . user ) ;
srp_callback_parm . user = SRP_VBASE_get1_by_user ( srp_callback_parm . vb , srp_callback_parm . login ) ;
if ( srp_callback_parm . user ) BIO_printf ( bio_s_out , "LOOKUP done %s\n" , srp_callback_parm . user -> info ) ;
else BIO_printf ( bio_s_out , "LOOKUP not successful\n" ) ;
i = SSL_accept ( con ) ;
}
# endif if ( i <= 0 ) {
if ( BIO_sock_should_retry ( i ) ) {
BIO_printf ( bio_s_out , "DELAY\n" ) ;
return ( 1 ) ;
}
BIO_printf ( bio_err , "ERROR\n" ) ;
verify_error = SSL_get_verify_result ( con ) ;
if ( verify_error != X509_V_OK ) {
BIO_printf ( bio_err , "verify error:%s\n" , X509_verify_cert_error_string ( verify_error ) ) ;
}
else ERR_print_errors ( bio_err ) ;
return ( 0 ) ;
}
PEM_write_bio_SSL_SESSION ( bio_s_out , SSL_get_session ( con ) ) ;
peer = SSL_get_peer_certificate ( con ) ;
if ( peer != NULL ) {
BIO_printf ( bio_s_out , "Client certificate\n" ) ;
PEM_write_bio_X509 ( bio_s_out , peer ) ;
X509_NAME_oneline ( X509_get_subject_name ( peer ) , buf , sizeof buf ) ;
BIO_printf ( bio_s_out , "subject=%s\n" , buf ) ;
X509_NAME_oneline ( X509_get_issuer_name ( peer ) , buf , sizeof buf ) ;
BIO_printf ( bio_s_out , "issuer=%s\n" , buf ) ;
X509_free ( peer ) ;
}
if ( SSL_get_shared_ciphers ( con , buf , sizeof buf ) != NULL ) BIO_printf ( bio_s_out , "Shared ciphers:%s\n" , buf ) ;
str = SSL_CIPHER_get_name ( SSL_get_current_cipher ( con ) ) ;
BIO_printf ( bio_s_out , "CIPHER is %s\n" , ( str != NULL ) ? str : "(NONE)" ) ;
# if ! defined ( OPENSSL_NO_TLSEXT ) && ! defined ( OPENSSL_NO_NEXTPROTONEG ) SSL_get0_next_proto_negotiated ( con , & next_proto_neg , & next_proto_neg_len ) ;
if ( next_proto_neg ) {
BIO_printf ( bio_s_out , "NEXTPROTO is " ) ;
BIO_write ( bio_s_out , next_proto_neg , next_proto_neg_len ) ;
BIO_printf ( bio_s_out , "\n" ) ;
}
# endif # ifndef OPENSSL_NO_SRTP {
SRTP_PROTECTION_PROFILE * srtp_profile = SSL_get_selected_srtp_profile ( con ) ;
if ( srtp_profile ) BIO_printf ( bio_s_out , "SRTP Extension negotiated, profile=%s\n" , srtp_profile -> name ) ;
}
# endif if ( SSL_cache_hit ( con ) ) BIO_printf ( bio_s_out , "Reused session-id\n" ) ;
if ( SSL_ctrl ( con , SSL_CTRL_GET_FLAGS , 0 , NULL ) & TLS1_FLAGS_TLS_PADDING_BUG ) BIO_printf ( bio_s_out , "Peer has incorrect TLSv1 block padding\n" ) ;
# ifndef OPENSSL_NO_KRB5 client_princ = kssl_ctx_get0_client_princ ( SSL_get0_kssl_ctx ( con ) ) ;
if ( client_princ != NULL ) {
BIO_printf ( bio_s_out , "Kerberos peer principal is %s\n" , client_princ ) ;
}
# endif BIO_printf ( bio_s_out , "Secure Renegotiation IS%s supported\n" , SSL_get_secure_renegotiation_support ( con ) ? "" : " NOT" ) ;
if ( keymatexportlabel != NULL ) {
BIO_printf ( bio_s_out , "Keying material exporter:\n" ) ;
BIO_printf ( bio_s_out , " Label: '%s'\n" , keymatexportlabel ) ;
BIO_printf ( bio_s_out , " Length: %i bytes\n" , keymatexportlen ) ;
exportedkeymat = OPENSSL_malloc ( keymatexportlen ) ;
if ( exportedkeymat != NULL ) {
if ( ! SSL_export_keying_material ( con , exportedkeymat , keymatexportlen , keymatexportlabel , strlen ( keymatexportlabel ) , NULL , 0 , 0 ) ) {
BIO_printf ( bio_s_out , " Error\n" ) ;
}
else {
BIO_printf ( bio_s_out , " Keying material: " ) ;
for ( i = 0 ;
i < keymatexportlen ;
i ++ ) BIO_printf ( bio_s_out , "%02X" , exportedkeymat [ i ] ) ;
BIO_printf ( bio_s_out , "\n" ) ;
}
OPENSSL_free ( exportedkeymat ) ;
}
}
return ( 1 ) ;
}
# ifndef OPENSSL_NO_DH static DH * load_dh_param ( const char * dhfile ) {
DH * ret = NULL ;
BIO * bio ;
if ( ( bio = BIO_new_file ( dhfile , "r" ) ) == NULL ) goto err ;
ret = PEM_read_bio_DHparams ( bio , NULL , NULL , NULL ) ;
err : if ( bio != NULL ) BIO_free ( bio ) ;
return ( ret ) ;
}
# endif # ifndef OPENSSL_NO_KRB5 char * client_princ ;
# endif # if 0 static int load_CA ( SSL_CTX * ctx , char * file ) {
FILE * in ;
X509 * x = NULL ;
if ( ( in = fopen ( file , "r" ) ) == NULL ) return ( 0 ) ;
for ( ;
;
) {
if ( PEM_read_X509 ( in , & x , NULL ) == NULL ) break ;
SSL_CTX_add_client_CA ( ctx , x ) ;
}
if ( x != NULL ) X509_free ( x ) ;
fclose ( in ) ;
return ( 1 ) ;
}
# endif static int www_body ( char * hostname , int s , unsigned char * context ) {
char * buf = NULL ;
int ret = 1 ;
int i , j , k , dot ;
SSL * con ;
const SSL_CIPHER * c ;
BIO * io , * ssl_bio , * sbio ;
# ifndef OPENSSL_NO_KRB5 KSSL_CTX * kctx ;
# endif buf = OPENSSL_malloc ( bufsize ) ;
if ( buf == NULL ) return ( 0 ) ;
io = BIO_new ( BIO_f_buffer ( ) ) ;
ssl_bio = BIO_new ( BIO_f_ssl ( ) ) ;
if ( ( io == NULL ) || ( ssl_bio == NULL ) ) goto err ;
# ifdef FIONBIO if ( s_nbio ) {
unsigned long sl = 1 ;
if ( ! s_quiet ) BIO_printf ( bio_err , "turning on non blocking io\n" ) ;
if ( BIO_socket_ioctl ( s , FIONBIO , & sl ) < 0 ) ERR_print_errors ( bio_err ) ;
}
# endif if ( ! BIO_set_write_buffer_size ( io , bufsize ) ) goto err ;
if ( ( con = SSL_new ( ctx ) ) == NULL ) goto err ;
# ifndef OPENSSL_NO_TLSEXT if ( s_tlsextdebug ) {
SSL_set_tlsext_debug_callback ( con , tlsext_cb ) ;
SSL_set_tlsext_debug_arg ( con , bio_s_out ) ;
}
# endif # ifndef OPENSSL_NO_KRB5 if ( ( kctx = kssl_ctx_new ( ) ) != NULL ) {
kssl_ctx_setstring ( kctx , KSSL_SERVICE , KRB5SVC ) ;
kssl_ctx_setstring ( kctx , KSSL_KEYTAB , KRB5KEYTAB ) ;
}
# endif if ( context ) SSL_set_session_id_context ( con , context , strlen ( ( char * ) context ) ) ;
sbio = BIO_new_socket ( s , BIO_NOCLOSE ) ;
if ( s_nbio_test ) {
BIO * test ;
test = BIO_new ( BIO_f_nbio_test ( ) ) ;
sbio = BIO_push ( test , sbio ) ;
}
SSL_set_bio ( con , sbio , sbio ) ;
SSL_set_accept_state ( con ) ;
BIO_set_ssl ( ssl_bio , con , BIO_CLOSE ) ;
BIO_push ( io , ssl_bio ) ;
# ifdef CHARSET_EBCDIC io = BIO_push ( BIO_new ( BIO_f_ebcdic_filter ( ) ) , io ) ;
# endif if ( s_debug ) {
SSL_set_debug ( con , 1 ) ;
BIO_set_callback ( SSL_get_rbio ( con ) , bio_dump_callback ) ;
BIO_set_callback_arg ( SSL_get_rbio ( con ) , ( char * ) bio_s_out ) ;
}
if ( s_msg ) {
SSL_set_msg_callback ( con , msg_cb ) ;
SSL_set_msg_callback_arg ( con , bio_s_out ) ;
}
for ( ;
;
) {
if ( hack ) {
i = SSL_accept ( con ) ;
# ifndef OPENSSL_NO_SRP while ( i <= 0 && SSL_get_error ( con , i ) == SSL_ERROR_WANT_X509_LOOKUP ) {
BIO_printf ( bio_s_out , "LOOKUP during accept %s\n" , srp_callback_parm . login ) ;
SRP_user_pwd_free ( srp_callback_parm . user ) ;
srp_callback_parm . user = SRP_VBASE_get1_by_user ( srp_callback_parm . vb , srp_callback_parm . login ) ;
if ( srp_callback_parm . user ) BIO_printf ( bio_s_out , "LOOKUP done %s\n" , srp_callback_parm . user -> info ) ;
else BIO_printf ( bio_s_out , "LOOKUP not successful\n" ) ;
i = SSL_accept ( con ) ;
}
# endif switch ( SSL_get_error ( con , i ) ) {
case SSL_ERROR_NONE : break ;
case SSL_ERROR_WANT_WRITE : case SSL_ERROR_WANT_READ : case SSL_ERROR_WANT_X509_LOOKUP : continue ;
case SSL_ERROR_SYSCALL : case SSL_ERROR_SSL : case SSL_ERROR_ZERO_RETURN : ret = 1 ;
goto err ;
}
SSL_renegotiate ( con ) ;
SSL_write ( con , NULL , 0 ) ;
}
i = BIO_gets ( io , buf , bufsize - 1 ) ;
if ( i < 0 ) {
if ( ! BIO_should_retry ( io ) ) {
if ( ! s_quiet ) ERR_print_errors ( bio_err ) ;
goto err ;
}
else {
BIO_printf ( bio_s_out , "read R BLOCK\n" ) ;
# ifndef OPENSSL_NO_SRP if ( BIO_should_io_special ( io ) && BIO_get_retry_reason ( io ) == BIO_RR_SSL_X509_LOOKUP ) {
BIO_printf ( bio_s_out , "LOOKUP renego during read\n" ) ;
SRP_user_pwd_free ( srp_callback_parm . user ) ;
srp_callback_parm . user = SRP_VBASE_get1_by_user ( srp_callback_parm . vb , srp_callback_parm . login ) ;
if ( srp_callback_parm . user ) BIO_printf ( bio_s_out , "LOOKUP done %s\n" , srp_callback_parm . user -> info ) ;
else BIO_printf ( bio_s_out , "LOOKUP not successful\n" ) ;
continue ;
}
# endif # if defined ( OPENSSL_SYS_NETWARE ) delay ( 1000 ) ;
# elif ! defined ( OPENSSL_SYS_MSDOS ) && ! defined ( __DJGPP__ ) sleep ( 1 ) ;
# endif continue ;
}
}
else if ( i == 0 ) {
ret = 1 ;
goto end ;
}
if ( ( ( www == 1 ) && ( strncmp ( "GET " , buf , 4 ) == 0 ) ) || ( ( www == 2 ) && ( strncmp ( "GET /stats " , buf , 11 ) == 0 ) ) ) {
char * p ;
X509 * peer ;
STACK_OF ( SSL_CIPHER ) * sk ;
static const char * space = " " ;
BIO_puts ( io , "HTTP/1.0 200 ok\r\nContent-type: text/html\r\n\r\n" ) ;
BIO_puts ( io , "<HTML><BODY BGCOLOR=\"#ffffff\">\n" ) ;
BIO_puts ( io , "<pre>\n" ) ;
BIO_puts ( io , "\n" ) ;
for ( i = 0 ;
i < local_argc ;
i ++ ) {
BIO_puts ( io , local_argv [ i ] ) ;
BIO_write ( io , " " , 1 ) ;
}
BIO_puts ( io , "\n" ) ;
BIO_printf ( io , "Secure Renegotiation IS%s supported\n" , SSL_get_secure_renegotiation_support ( con ) ? "" : " NOT" ) ;
BIO_printf ( io , "Ciphers supported in s_server binary\n" ) ;
sk = SSL_get_ciphers ( con ) ;
j = sk_SSL_CIPHER_num ( sk ) ;
for ( i = 0 ;
i < j ;
i ++ ) {
c = sk_SSL_CIPHER_value ( sk , i ) ;
BIO_printf ( io , "%-11s:%-25s" , SSL_CIPHER_get_version ( c ) , SSL_CIPHER_get_name ( c ) ) ;
if ( ( ( ( i + 1 ) % 2 ) == 0 ) && ( i + 1 != j ) ) BIO_puts ( io , "\n" ) ;
}
BIO_puts ( io , "\n" ) ;
p = SSL_get_shared_ciphers ( con , buf , bufsize ) ;
if ( p != NULL ) {
BIO_printf ( io , "---\nCiphers common between both SSL end points:\n" ) ;
j = i = 0 ;
while ( * p ) {
if ( * p == ':' ) {
BIO_write ( io , space , 26 - j ) ;
i ++ ;
j = 0 ;
BIO_write ( io , ( ( i % 3 ) ? " " : "\n" ) , 1 ) ;
}
else {
BIO_write ( io , p , 1 ) ;
j ++ ;
}
p ++ ;
}
BIO_puts ( io , "\n" ) ;
}
BIO_printf ( io , ( SSL_cache_hit ( con ) ? "---\nReused, " : "---\nNew, " ) ) ;
c = SSL_get_current_cipher ( con ) ;
BIO_printf ( io , "%s, Cipher is %s\n" , SSL_CIPHER_get_version ( c ) , SSL_CIPHER_get_name ( c ) ) ;
SSL_SESSION_print ( io , SSL_get_session ( con ) ) ;
BIO_printf ( io , "---\n" ) ;
print_stats ( io , SSL_get_SSL_CTX ( con ) ) ;
BIO_printf ( io , "---\n" ) ;
peer = SSL_get_peer_certificate ( con ) ;
if ( peer != NULL ) {
BIO_printf ( io , "Client certificate\n" ) ;
X509_print ( io , peer ) ;
PEM_write_bio_X509 ( io , peer ) ;
}
else BIO_puts ( io , "no client certificate available\n" ) ;
BIO_puts ( io , "</BODY></HTML>\r\n\r\n" ) ;
break ;
}
else if ( ( www == 2 || www == 3 ) && ( strncmp ( "GET /" , buf , 5 ) == 0 ) ) {
BIO * file ;
char * p , * e ;
static const char * text = "HTTP/1.0 200 ok\r\nContent-type: text/plain\r\n\r\n" ;
p = & ( buf [ 5 ] ) ;
dot = 1 ;
for ( e = p ;
* e != '\0' ;
e ++ ) {
if ( e [ 0 ] == ' ' ) break ;
switch ( dot ) {
case 1 : dot = ( e [ 0 ] == '.' ) ? 2 : 0 ;
break ;
case 2 : dot = ( e [ 0 ] == '.' ) ? 3 : 0 ;
break ;
case 3 : dot = ( e [ 0 ] == '/' ) ? - 1 : 0 ;
break ;
}
if ( dot == 0 ) dot = ( e [ 0 ] == '/' ) ? 1 : 0 ;
}
dot = ( dot == 3 ) || ( dot == - 1 ) ;
if ( * e == '\0' ) {
BIO_puts ( io , text ) ;
BIO_printf ( io , "'%s' is an invalid file name\r\n" , p ) ;
break ;
}
* e = '\0' ;
if ( dot ) {
BIO_puts ( io , text ) ;
BIO_printf ( io , "'%s' contains '..' reference\r\n" , p ) ;
break ;
}
if ( * p == '/' ) {
BIO_puts ( io , text ) ;
BIO_printf ( io , "'%s' is an invalid path\r\n" , p ) ;
break ;
}
# if 0 if ( e [ - 1 ] == '/' ) strcat ( p , "index.html" ) ;
# endif if ( app_isdir ( p ) > 0 ) {
# if 0 strcat ( p , "/index.html" ) ;
# else BIO_puts ( io , text ) ;
BIO_printf ( io , "'%s' is a directory\r\n" , p ) ;
break ;
# endif }
if ( ( file = BIO_new_file ( p , "r" ) ) == NULL ) {
BIO_puts ( io , text ) ;
BIO_printf ( io , "Error opening '%s'\r\n" , p ) ;
ERR_print_errors ( io ) ;
break ;
}
if ( ! s_quiet ) BIO_printf ( bio_err , "FILE:%s\n" , p ) ;
if ( www == 2 ) {
i = strlen ( p ) ;
if ( ( ( i > 5 ) && ( strcmp ( & ( p [ i - 5 ] ) , ".html" ) == 0 ) ) || ( ( i > 4 ) && ( strcmp ( & ( p [ i - 4 ] ) , ".php" ) == 0 ) ) || ( ( i > 4 ) && ( strcmp ( & ( p [ i - 4 ] ) , ".htm" ) == 0 ) ) ) BIO_puts ( io , "HTTP/1.0 200 ok\r\nContent-type: text/html\r\n\r\n" ) ;
else BIO_puts ( io , "HTTP/1.0 200 ok\r\nContent-type: text/plain\r\n\r\n" ) ;
}
for ( ;
;
) {
i = BIO_read ( file , buf , bufsize ) ;
if ( i <= 0 ) break ;
# ifdef RENEG total_bytes += i ;
fprintf ( stderr , "%d\n" , i ) ;
if ( total_bytes > 3 * 1024 ) {
total_bytes = 0 ;
fprintf ( stderr , "RENEGOTIATE\n" ) ;
SSL_renegotiate ( con ) ;
}
# endif for ( j = 0 ;
j < i ;
) {
# ifdef RENEG {
static count = 0 ;
if ( ++ count == 13 ) {
SSL_renegotiate ( con ) ;
}
}
# endif k = BIO_write ( io , & ( buf [ j ] ) , i - j ) ;
if ( k <= 0 ) {
if ( ! BIO_should_retry ( io ) ) goto write_error ;
else {
BIO_printf ( bio_s_out , "rwrite W BLOCK\n" ) ;
}
}
else {
j += k ;
}
}
}
write_error : BIO_free ( file ) ;
break ;
}
}
for ( ;
;
) {
i = ( int ) BIO_flush ( io ) ;
if ( i <= 0 ) {
if ( ! BIO_should_retry ( io ) ) break ;
}
else break ;
}
end : # if 1 SSL_set_shutdown ( con , SSL_SENT_SHUTDOWN | SSL_RECEIVED_SHUTDOWN ) ;
# else # endif err : if ( ret >= 0 ) BIO_printf ( bio_s_out , "ACCEPT\n" ) ;
if ( buf != NULL ) OPENSSL_free ( buf ) ;
if ( io != NULL ) BIO_free_all ( io ) ;
return ( ret ) ;
}
# ifndef OPENSSL_NO_RSA static RSA MS_CALLBACK * tmp_rsa_cb ( SSL * s , int is_export , int keylength ) {
BIGNUM * bn = NULL ;
static RSA * rsa_tmp = NULL ;
if ( ! rsa_tmp && ( ( bn = BN_new ( ) ) == NULL ) ) BIO_printf ( bio_err , "Allocation error in generating RSA key\n" ) ;
if ( ! rsa_tmp && bn ) {
if ( ! s_quiet ) {
BIO_printf ( bio_err , "Generating temp (%d bit) RSA key..." , keylength ) ;
( void ) BIO_flush ( bio_err ) ;
}
if ( ! BN_set_word ( bn , RSA_F4 ) || ( ( rsa_tmp = RSA_new ( ) ) == NULL ) || ! RSA_generate_key_ex ( rsa_tmp , keylength , bn , NULL ) ) {
if ( rsa_tmp ) RSA_free ( rsa_tmp ) ;
rsa_tmp = NULL ;
}
if ( ! s_quiet ) {
BIO_printf ( bio_err , "\n" ) ;
( void ) BIO_flush ( bio_err ) ;
}
BN_free ( bn ) ;
}
return ( rsa_tmp ) ;
}
# endif # define MAX_SESSION_ID_ATTEMPTS 10 static int generate_session_id ( const SSL * ssl , unsigned char * id , unsigned int * id_len ) {
unsigned int count = 0 ;
do {
if ( RAND_pseudo_bytes ( id , * id_len ) < 0 ) return 0 ;
memcpy ( id , session_id_prefix , ( strlen ( session_id_prefix ) < * id_len ) ? strlen ( session_id_prefix ) : * id_len ) ;
}
while ( SSL_has_matching_session_id ( ssl , id , * id_len ) && ( ++ count < MAX_SESSION_ID_ATTEMPTS ) ) ;
if ( count >= MAX_SESSION_ID_ATTEMPTS ) return 0 ;
return 1 ;
}
| 0
|
137,130
|
PHP_FUNCTION(imagepalettecopy)
{
zval *dstim, *srcim;
gdImagePtr dst, src;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rr", &dstim, &srcim) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(dst, gdImagePtr, &dstim, -1, "Image", le_gd);
ZEND_FETCH_RESOURCE(src, gdImagePtr, &srcim, -1, "Image", le_gd);
gdImagePaletteCopy(dst, src);
}
| 0
|
65,074
|
v3d_reset_v3d(struct v3d_dev *v3d)
{
if (v3d->reset)
reset_control_reset(v3d->reset);
else
v3d_reset_by_bridge(v3d);
v3d_init_hw_state(v3d);
}
| 0
|
241,881
|
addToGroup(struct rx_call *call, afs_int32 aid, afs_int32 gid, afs_int32 *cid)
{
afs_int32 code;
struct ubik_trans *tt;
afs_int32 tempu;
afs_int32 tempg;
struct prentry tentry;
struct prentry uentry;
code = Initdb();
if (code != PRSUCCESS)
return code;
if (gid == ANYUSERID || gid == AUTHUSERID)
return PRPERM;
if (aid == ANONYMOUSID)
return PRPERM;
code = ubik_BeginTrans(dbase, UBIK_WRITETRANS, &tt);
if (code)
return code;
code = ubik_SetLock(tt, 1, 1, LOCKWRITE);
if (code)
ABORT_WITH(tt, code);
code = read_DbHeader(tt);
if (code)
ABORT_WITH(tt, code);
code = WhoIsThis(call, tt, cid);
if (code)
ABORT_WITH(tt, PRPERM);
tempu = FindByID(tt, aid);
if (!tempu)
ABORT_WITH(tt, PRNOENT);
memset(&uentry, 0, sizeof(uentry));
code = pr_ReadEntry(tt, 0, tempu, &uentry);
if (code != 0)
ABORT_WITH(tt, code);
#if !defined(SUPERGROUPS)
/* we don't allow groups as members of groups at present */
if (uentry.flags & PRGRP)
ABORT_WITH(tt, PRNOTUSER);
#endif
tempg = FindByID(tt, gid);
if (!tempg)
ABORT_WITH(tt, PRNOENT);
code = pr_ReadEntry(tt, 0, tempg, &tentry);
if (code != 0)
ABORT_WITH(tt, code);
/* make sure that this is a group */
if (!(tentry.flags & PRGRP))
ABORT_WITH(tt, PRNOTGROUP);
if (!AccessOK(tt, *cid, &tentry, PRP_ADD_MEM, PRP_ADD_ANY))
ABORT_WITH(tt, PRPERM);
code = AddToEntry(tt, &tentry, tempg, aid);
if (code != PRSUCCESS)
ABORT_WITH(tt, code);
#if defined(SUPERGROUPS)
if (uentry.flags & PRGRP)
code = AddToSGEntry(tt, &uentry, tempu, gid); /* mod group to be in sg */
else
#endif
/* now, modify the user's entry as well */
code = AddToEntry(tt, &uentry, tempu, gid);
if (code != PRSUCCESS)
ABORT_WITH(tt, code);
code = ubik_EndTrans(tt);
if (code)
return code;
return PRSUCCESS;
}
| 0
|
51,571
|
path_poly(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
POLYGON *poly;
int size;
int i;
/* This is not very consistent --- other similar cases return NULL ... */
if (!path->closed)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("open path cannot be converted to polygon")));
/*
* Never overflows: the old size fit in MaxAllocSize, and the new size is
* just a small constant larger.
*/
size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * path->npts;
poly = (POLYGON *) palloc(size);
SET_VARSIZE(poly, size);
poly->npts = path->npts;
for (i = 0; i < path->npts; i++)
{
poly->p[i].x = path->p[i].x;
poly->p[i].y = path->p[i].y;
}
make_bound_box(poly);
PG_RETURN_POLYGON_P(poly);
}
| 0
|
439,826
|
ipmi_sdr_get_sensor_thresholds(struct ipmi_intf *intf, uint8_t sensor,
uint8_t target, uint8_t lun, uint8_t channel)
{
struct ipmi_rq req;
struct ipmi_rs *rsp;
uint8_t bridged_request = 0;
uint32_t save_addr;
uint32_t save_channel;
if ( BRIDGE_TO_SENSOR(intf, target, channel) ) {
bridged_request = 1;
save_addr = intf->target_addr;
intf->target_addr = target;
save_channel = intf->target_channel;
intf->target_channel = channel;
}
memset(&req, 0, sizeof (req));
req.msg.netfn = IPMI_NETFN_SE;
req.msg.lun = lun;
req.msg.cmd = GET_SENSOR_THRESHOLDS;
req.msg.data = &sensor;
req.msg.data_len = sizeof (sensor);
rsp = intf->sendrecv(intf, &req);
if (bridged_request) {
intf->target_addr = save_addr;
intf->target_channel = save_channel;
}
return rsp;
}
| 0
|
487,822
|
static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
{
int order, added = 0, lg_prealloc_count = 1;
struct super_block *sb = ac->ac_sb;
struct ext4_locality_group *lg = ac->ac_lg;
struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
order = fls(pa->pa_free) - 1;
if (order > PREALLOC_TB_SIZE - 1)
/* The max size of hash table is PREALLOC_TB_SIZE */
order = PREALLOC_TB_SIZE - 1;
/* Add the prealloc space to lg */
spin_lock(&lg->lg_prealloc_lock);
list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
pa_inode_list,
lockdep_is_held(&lg->lg_prealloc_lock)) {
spin_lock(&tmp_pa->pa_lock);
if (tmp_pa->pa_deleted) {
spin_unlock(&tmp_pa->pa_lock);
continue;
}
if (!added && pa->pa_free < tmp_pa->pa_free) {
/* Add to the tail of the previous entry */
list_add_tail_rcu(&pa->pa_inode_list,
&tmp_pa->pa_inode_list);
added = 1;
/*
* we want to count the total
* number of entries in the list
*/
}
spin_unlock(&tmp_pa->pa_lock);
lg_prealloc_count++;
}
if (!added)
list_add_tail_rcu(&pa->pa_inode_list,
&lg->lg_prealloc_list[order]);
spin_unlock(&lg->lg_prealloc_lock);
/* Now trim the list to be not more than 8 elements */
if (lg_prealloc_count > 8) {
ext4_mb_discard_lg_preallocations(sb, lg,
order, lg_prealloc_count);
return;
}
return ;
}
| 0
|
168,070
|
TT_Load_Context( TT_ExecContext exec,
TT_Face face,
TT_Size size )
{
FT_Int i;
FT_ULong tmp;
TT_MaxProfile* maxp;
FT_Error error;
exec->face = face;
maxp = &face->max_profile;
exec->size = size;
if ( size )
{
exec->numFDefs = size->num_function_defs;
exec->maxFDefs = size->max_function_defs;
exec->numIDefs = size->num_instruction_defs;
exec->maxIDefs = size->max_instruction_defs;
exec->FDefs = size->function_defs;
exec->IDefs = size->instruction_defs;
exec->pointSize = size->point_size;
exec->tt_metrics = size->ttmetrics;
exec->metrics = *size->metrics;
exec->maxFunc = size->max_func;
exec->maxIns = size->max_ins;
for ( i = 0; i < TT_MAX_CODE_RANGES; i++ )
exec->codeRangeTable[i] = size->codeRangeTable[i];
/* set graphics state */
exec->GS = size->GS;
exec->cvtSize = size->cvt_size;
exec->cvt = size->cvt;
exec->storeSize = size->storage_size;
exec->storage = size->storage;
exec->twilight = size->twilight;
/* In case of multi-threading it can happen that the old size object */
/* no longer exists, thus we must clear all glyph zone references. */
FT_ZERO( &exec->zp0 );
exec->zp1 = exec->zp0;
exec->zp2 = exec->zp0;
}
/* XXX: We reserve a little more elements on the stack to deal safely */
/* with broken fonts like arialbs, courbs, timesbs, etc. */
tmp = (FT_ULong)exec->stackSize;
error = Update_Max( exec->memory,
&tmp,
sizeof ( FT_F26Dot6 ),
(void*)&exec->stack,
maxp->maxStackElements + 32 );
exec->stackSize = (FT_Long)tmp;
if ( error )
return error;
tmp = exec->glyphSize;
error = Update_Max( exec->memory,
&tmp,
sizeof ( FT_Byte ),
(void*)&exec->glyphIns,
maxp->maxSizeOfInstructions );
exec->glyphSize = (FT_UShort)tmp;
if ( error )
return error;
exec->pts.n_points = 0;
exec->pts.n_contours = 0;
exec->zp1 = exec->pts;
exec->zp2 = exec->pts;
exec->zp0 = exec->pts;
exec->instruction_trap = FALSE;
return FT_Err_Ok;
}
| 0
|
153,423
|
static void cryp_state_free(struct user_ta_ctx *utc, struct tee_cryp_state *cs)
{
struct tee_obj *o;
if (tee_obj_get(utc, cs->key1, &o) == TEE_SUCCESS)
tee_obj_close(utc, o);
if (tee_obj_get(utc, cs->key2, &o) == TEE_SUCCESS)
tee_obj_close(utc, o);
TAILQ_REMOVE(&utc->cryp_states, cs, link);
if (cs->ctx_finalize != NULL)
cs->ctx_finalize(cs->ctx, cs->algo);
switch (TEE_ALG_GET_CLASS(cs->algo)) {
case TEE_OPERATION_CIPHER:
crypto_cipher_free_ctx(cs->ctx, cs->algo);
break;
case TEE_OPERATION_AE:
crypto_authenc_free_ctx(cs->ctx, cs->algo);
break;
case TEE_OPERATION_DIGEST:
crypto_hash_free_ctx(cs->ctx, cs->algo);
break;
case TEE_OPERATION_MAC:
crypto_mac_free_ctx(cs->ctx, cs->algo);
break;
default:
assert(!cs->ctx);
}
free(cs);
}
| 0
|
428,001
|
static int imip_send(icalcomponent *ical)
{
icalcomponent *comp;
icalproperty *prop;
icalproperty_method meth;
icalcomponent_kind kind;
const char *argv[8], *originator, *subject;
FILE *sm;
pid_t pid;
int r;
time_t t = time(NULL);
char datestr[80];
static unsigned send_count = 0;
icalproperty_kind recip_kind;
const char *(*get_recipient)(const icalproperty *);
meth = icalcomponent_get_method(ical);
comp = icalcomponent_get_first_real_component(ical);
kind = icalcomponent_isa(comp);
/* Determine Originator and Recipient(s) based on methond and component */
if (meth == ICAL_METHOD_REPLY) {
recip_kind = ICAL_ORGANIZER_PROPERTY;
get_recipient = &icalproperty_get_organizer;
if (kind == ICAL_VPOLL_COMPONENT) {
prop = icalcomponent_get_first_property(comp, ICAL_VOTER_PROPERTY);
originator = icalproperty_get_voter(prop) + 7;
}
else {
prop =
icalcomponent_get_first_property(comp, ICAL_ATTENDEE_PROPERTY);
originator = icalproperty_get_attendee(prop) + 7;
}
}
else {
prop = icalcomponent_get_first_property(comp, ICAL_ORGANIZER_PROPERTY);
originator = icalproperty_get_organizer(prop) + 7;
if (kind == ICAL_VPOLL_COMPONENT) {
recip_kind = ICAL_VOTER_PROPERTY;
get_recipient = &icalproperty_get_voter;
}
else {
recip_kind = ICAL_ATTENDEE_PROPERTY;
get_recipient = &icalproperty_get_attendee;
}
}
argv[0] = "sendmail";
argv[1] = "-f";
argv[2] = originator;
argv[3] = "-i";
argv[4] = "-N";
argv[5] = "failure,delay";
argv[6] = "-t";
argv[7] = NULL;
pid = open_sendmail(argv, &sm);
if (sm == NULL) return HTTP_UNAVAILABLE;
/* Create iMIP message */
fprintf(sm, "From: %s\r\n", originator);
for (prop = icalcomponent_get_first_property(comp, recip_kind);
prop;
prop = icalcomponent_get_next_property(comp, recip_kind)) {
fprintf(sm, "To: %s\r\n", get_recipient(prop) + 7);
}
subject = icalcomponent_get_summary(comp);
if (!subject) {
fprintf(sm, "Subject: %s %s\r\n", icalcomponent_kind_to_string(kind),
icalproperty_method_to_string(meth));
}
else fprintf(sm, "Subject: %s\r\n", subject);
time_to_rfc822(t, datestr, sizeof(datestr));
fprintf(sm, "Date: %s\r\n", datestr);
fprintf(sm, "Message-ID: <cmu-httpd-%u-%ld-%u@%s>\r\n",
getpid(), t, send_count++, config_servername);
fprintf(sm, "Content-Type: text/calendar; charset=utf-8");
fprintf(sm, "; method=%s; component=%s \r\n",
icalproperty_method_to_string(meth),
icalcomponent_kind_to_string(kind));
fputs("Content-Disposition: inline\r\n", sm);
fputs("MIME-Version: 1.0\r\n", sm);
fputs("\r\n", sm);
fputs(icalcomponent_as_ical_string(ical), sm);
fclose(sm);
while (waitpid(pid, &r, 0) < 0);
return r;
}
| 0
|
334,255
|
void qemu_system_reset(ShutdownCause reason)
{
MachineClass *mc;
mc = current_machine ? MACHINE_GET_CLASS(current_machine) : NULL;
cpu_synchronize_all_states();
if (mc && mc->reset) {
mc->reset();
} else {
qemu_devices_reset();
}
if (reason) {
/* TODO update event based on reason */
qapi_event_send_reset(&error_abort);
}
cpu_synchronize_all_post_reset();
}
| 0
|
161,497
|
static const char *pathbase(const char *path) {
const char *p = strrchr(path, SEP);
return (p == NULL) ? path : p + 1;
}
| 0
|
111,384
|
void cil_destroy_cats(struct cil_cats *cats)
{
if (cats == NULL) {
return;
}
cil_list_destroy(&cats->str_expr, CIL_TRUE);
cil_list_destroy(&cats->datum_expr, CIL_FALSE);
free(cats);
}
| 0
|
252,493
|
base::Histogram::Sample HashInterfaceNameToHistogramSample(
base::StringPiece name) {
return base::strict_cast<base::Histogram::Sample>(
static_cast<int32_t>(base::HashMetricName(name) & 0x7fffffffull));
}
| 0
|
394,254
|
static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image,
const cmsHPROFILE source_profile,const cmsUInt32Number source_type,
const cmsHPROFILE target_profile,const cmsUInt32Number target_type,
const int intent,const cmsUInt32Number flags)
{
cmsHTRANSFORM
*transform;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads,
sizeof(*transform));
if (transform == (cmsHTRANSFORM *) NULL)
return((cmsHTRANSFORM *) NULL);
(void) ResetMagickMemory(transform,0,number_threads*sizeof(*transform));
for (i=0; i < (ssize_t) number_threads; i++)
{
transform[i]=cmsCreateTransformTHR(image,source_profile,source_type,
target_profile,target_type,intent,flags);
if (transform[i] == (cmsHTRANSFORM) NULL)
return(DestroyTransformThreadSet(transform));
}
return(transform);
}
| 0
|
238,103
|
static void arrayBufferAttributeAttributeSetter(v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder());
V8TRYCATCH_VOID(ArrayBuffer*, cppValue, jsValue->IsArrayBuffer() ? V8ArrayBuffer::toNative(v8::Handle<v8::ArrayBuffer>::Cast(jsValue)) : 0);
imp->setArrayBufferAttribute(WTF::getPtr(cppValue));
}
| 0
|
110,116
|
get_sw_value_col(buf_T *buf, colnr_T col UNUSED)
{
return buf->b_p_sw ? buf->b_p_sw :
#ifdef FEAT_VARTABS
tabstop_at(col, buf->b_p_ts, buf->b_p_vts_array);
#else
buf->b_p_ts;
#endif
}
| 0
|
427,508
|
string_timediff(struct timeval * diff)
{
static uschar buf[sizeof("0.000s")];
if (diff->tv_sec >= 5 || !LOGGING(millisec))
return readconf_printtime((int)diff->tv_sec);
sprintf(CS buf, "%u.%03us", (uint)diff->tv_sec, (uint)diff->tv_usec/1000);
return buf;
}
| 0
|
322,397
|
DECLARE_LOOP_FILTER(mmxext)
DECLARE_LOOP_FILTER(sse2)
DECLARE_LOOP_FILTER(ssse3)
DECLARE_LOOP_FILTER(sse4)
#endif /* HAVE_YASM */
#define VP8_LUMA_MC_FUNC(IDX, SIZE, OPT) \
c->put_vp8_epel_pixels_tab[IDX][0][2] = ff_put_vp8_epel ## SIZE ## _h6_ ## OPT; \
c->put_vp8_epel_pixels_tab[IDX][2][0] = ff_put_vp8_epel ## SIZE ## _v6_ ## OPT; \
c->put_vp8_epel_pixels_tab[IDX][2][2] = ff_put_vp8_epel ## SIZE ## _h6v6_ ## OPT
#define VP8_MC_FUNC(IDX, SIZE, OPT) \
c->put_vp8_epel_pixels_tab[IDX][0][1] = ff_put_vp8_epel ## SIZE ## _h4_ ## OPT; \
c->put_vp8_epel_pixels_tab[IDX][1][0] = ff_put_vp8_epel ## SIZE ## _v4_ ## OPT; \
c->put_vp8_epel_pixels_tab[IDX][1][1] = ff_put_vp8_epel ## SIZE ## _h4v4_ ## OPT; \
c->put_vp8_epel_pixels_tab[IDX][1][2] = ff_put_vp8_epel ## SIZE ## _h6v4_ ## OPT; \
c->put_vp8_epel_pixels_tab[IDX][2][1] = ff_put_vp8_epel ## SIZE ## _h4v6_ ## OPT; \
VP8_LUMA_MC_FUNC(IDX, SIZE, OPT)
#define VP8_BILINEAR_MC_FUNC(IDX, SIZE, OPT) \
c->put_vp8_bilinear_pixels_tab[IDX][0][1] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \
c->put_vp8_bilinear_pixels_tab[IDX][0][2] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \
c->put_vp8_bilinear_pixels_tab[IDX][1][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \
c->put_vp8_bilinear_pixels_tab[IDX][1][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
c->put_vp8_bilinear_pixels_tab[IDX][1][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
c->put_vp8_bilinear_pixels_tab[IDX][2][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \
c->put_vp8_bilinear_pixels_tab[IDX][2][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
c->put_vp8_bilinear_pixels_tab[IDX][2][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT
av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
{
#if HAVE_YASM
int mm_flags = av_get_cpu_flags();
if (mm_flags & AV_CPU_FLAG_MMX) {
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx;
c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx;
#if ARCH_X86_32
c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx;
c->vp8_idct_add = ff_vp8_idct_add_mmx;
c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx;
c->put_vp8_epel_pixels_tab[0][0][0] =
c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx;
#endif
c->put_vp8_epel_pixels_tab[1][0][0] =
c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx;
#if ARCH_X86_32
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx;
c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmx;
c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmx;
c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmx;
c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmx;
c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmx;
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmx;
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmx;
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmx;
#endif
}
/* note that 4-tap width=16 functions are missing because w=16
* is only used for luma, and luma is always a copy or sixtap. */
if (mm_flags & AV_CPU_FLAG_MMXEXT) {
VP8_MC_FUNC(2, 4, mmxext);
VP8_BILINEAR_MC_FUNC(2, 4, mmxext);
#if ARCH_X86_32
VP8_LUMA_MC_FUNC(0, 16, mmxext);
VP8_MC_FUNC(1, 8, mmxext);
VP8_BILINEAR_MC_FUNC(0, 16, mmxext);
VP8_BILINEAR_MC_FUNC(1, 8, mmxext);
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext;
c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmxext;
c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmxext;
c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmxext;
c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmxext;
c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmxext;
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmxext;
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext;
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext;
#endif
}
if (mm_flags & AV_CPU_FLAG_SSE) {
c->vp8_idct_add = ff_vp8_idct_add_sse;
c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_sse;
c->put_vp8_epel_pixels_tab[0][0][0] =
c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse;
}
if (mm_flags & (AV_CPU_FLAG_SSE2|AV_CPU_FLAG_SSE2SLOW)) {
VP8_LUMA_MC_FUNC(0, 16, sse2);
VP8_MC_FUNC(1, 8, sse2);
VP8_BILINEAR_MC_FUNC(0, 16, sse2);
VP8_BILINEAR_MC_FUNC(1, 8, sse2);
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_sse2;
#if ARCH_X86_64 || HAVE_ALIGNED_STACK
c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_sse2;
c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_sse2;
c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_sse2;
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_sse2;
#endif
}
if (mm_flags & AV_CPU_FLAG_SSE2) {
c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse2;
#if ARCH_X86_64 || HAVE_ALIGNED_STACK
c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_sse2;
c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_sse2;
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse2;
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse2;
#endif
}
if (mm_flags & AV_CPU_FLAG_SSSE3) {
VP8_LUMA_MC_FUNC(0, 16, ssse3);
VP8_MC_FUNC(1, 8, ssse3);
VP8_MC_FUNC(2, 4, ssse3);
VP8_BILINEAR_MC_FUNC(0, 16, ssse3);
VP8_BILINEAR_MC_FUNC(1, 8, ssse3);
VP8_BILINEAR_MC_FUNC(2, 4, ssse3);
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_ssse3;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_ssse3;
#if ARCH_X86_64 || HAVE_ALIGNED_STACK
c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_ssse3;
c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_ssse3;
c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_ssse3;
c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_ssse3;
c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_ssse3;
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_ssse3;
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_ssse3;
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_ssse3;
#endif
}
if (mm_flags & AV_CPU_FLAG_SSE4) {
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse4;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse4;
#if ARCH_X86_64 || HAVE_ALIGNED_STACK
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse4;
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse4;
#endif
}
#endif /* HAVE_YASM */
}
| 0
|
163,977
|
bool RenderBlock::checkPaginationAndFloatsAtEndLine(LineLayoutState& layoutState)
{
LayoutUnit lineDelta = logicalHeight() - layoutState.endLineLogicalTop();
bool paginated = view()->layoutState() && view()->layoutState()->isPaginated();
if (paginated && layoutState.flowThread()) {
for (RootInlineBox* lineBox = layoutState.endLine(); lineBox; lineBox = lineBox->nextRootBox()) {
if (paginated) {
LayoutUnit oldPaginationStrut = lineBox->paginationStrut();
lineDelta -= oldPaginationStrut;
adjustLinePositionForPagination(lineBox, lineDelta, layoutState.flowThread());
lineBox->setPaginationStrut(oldPaginationStrut);
}
if (lineWidthForPaginatedLineChanged(lineBox, lineDelta, layoutState.flowThread()))
return false;
}
}
if (!lineDelta || !m_floatingObjects)
return true;
LayoutUnit logicalTop = min(logicalHeight(), layoutState.endLineLogicalTop());
RootInlineBox* lastLine = layoutState.endLine();
while (RootInlineBox* nextLine = lastLine->nextRootBox())
lastLine = nextLine;
LayoutUnit logicalBottom = lastLine->lineBottomWithLeading() + absoluteValue(lineDelta);
const FloatingObjectSet& floatingObjectSet = m_floatingObjects->set();
FloatingObjectSetIterator end = floatingObjectSet.end();
for (FloatingObjectSetIterator it = floatingObjectSet.begin(); it != end; ++it) {
FloatingObject* f = *it;
if (f->logicalBottom(isHorizontalWritingMode()) >= logicalTop && f->logicalBottom(isHorizontalWritingMode()) < logicalBottom)
return false;
}
return true;
}
| 0
|
141,356
|
void lua_datum::shutdown(CLua &)
{
cleanup();
}
| 0
|
501,001
|
GF_Box *txtc_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_TextConfigBox, GF_ISOM_BOX_TYPE_TXTC);
return (GF_Box *)tmp;
}
| 0
|
130,524
|
static struct dentry *trace_options_init_dentry(struct trace_array *tr)
{
struct dentry *d_tracer;
if (tr->options)
return tr->options;
d_tracer = tracing_get_dentry(tr);
if (IS_ERR(d_tracer))
return NULL;
tr->options = tracefs_create_dir("options", d_tracer);
if (!tr->options) {
pr_warn("Could not create tracefs directory 'options'\n");
return NULL;
}
return tr->options;
}
| 0
|
301,850
|
long jas_stream_seek(jas_stream_t *stream, long offset, int origin)
{
long newpos;
/* The buffer cannot be in use for both reading and writing. */
assert(!((stream->bufmode_ & JAS_STREAM_RDBUF) && (stream->bufmode_ &
JAS_STREAM_WRBUF)));
/* Reset the EOF indicator (since we may not be at the EOF anymore). */
stream->flags_ &= ~JAS_STREAM_EOF;
if (stream->bufmode_ & JAS_STREAM_RDBUF) {
if (origin == SEEK_CUR) {
offset -= stream->cnt_;
}
} else if (stream->bufmode_ & JAS_STREAM_WRBUF) {
if (jas_stream_flush(stream)) {
return -1;
}
}
stream->cnt_ = 0;
stream->ptr_ = stream->bufstart_;
stream->bufmode_ &= ~(JAS_STREAM_RDBUF | JAS_STREAM_WRBUF);
if ((newpos = (*stream->ops_->seek_)(stream->obj_, offset, origin))
< 0) {
return -1;
}
return newpos;
}
| 0
|
377,671
|
void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
{
hrtimer_cancel(&wd->timer);
qdisc_unthrottled(wd->qdisc);
}
| 0
|
507,032
|
static const SSL_METHOD *ssl23_get_server_method(int ver)
{
#ifndef OPENSSL_NO_SSL2
if (ver == SSL2_VERSION)
return(SSLv2_server_method());
#endif
#ifndef OPENSSL_NO_SSL3
if (ver == SSL3_VERSION)
return(SSLv3_server_method());
#endif
if (ver == TLS1_VERSION)
return(TLSv1_server_method());
else if (ver == TLS1_1_VERSION)
return(TLSv1_1_server_method());
else if (ver == TLS1_2_VERSION)
return(TLSv1_2_server_method());
else
return(NULL);
}
| 0
|
287,935
|
DOMHandler::DOMHandler()
: DevToolsDomainHandler(DOM::Metainfo::domainName),
host_(nullptr) {
}
| 1
|
42,107
|
write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
{
size_t bytes;
__u32 buf[16];
const char __user *p = buffer;
while (count > 0) {
bytes = min(count, sizeof(buf));
if (copy_from_user(&buf, p, bytes))
return -EFAULT;
count -= bytes;
p += bytes;
mix_pool_bytes(r, buf, bytes);
cond_resched();
}
return 0;
}
| 0
|
119,019
|
static void sig_handler(const int sig) {
printf("SIGINT handled.\n");
exit(EXIT_SUCCESS);
}
| 0
|
52,009
|
int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len)
{
unsigned char *op;
const unsigned char *ip;
size_t t, next;
size_t state = 0;
const unsigned char *m_pos;
const unsigned char * const ip_end = in + in_len;
unsigned char * const op_end = out + *out_len;
op = out;
ip = in;
if (unlikely(in_len < 3))
goto input_overrun;
if (*ip > 17) {
t = *ip++ - 17;
if (t < 4) {
next = t;
goto match_next;
}
goto copy_literal_run;
}
for (;;) {
t = *ip++;
if (t < 16) {
if (likely(state == 0)) {
if (unlikely(t == 0)) {
while (unlikely(*ip == 0)) {
t += 255;
ip++;
NEED_IP(1, 0);
}
t += 15 + *ip++;
}
t += 3;
copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
const unsigned char *ie = ip + t;
unsigned char *oe = op + t;
do {
COPY8(op, ip);
op += 8;
ip += 8;
COPY8(op, ip);
op += 8;
ip += 8;
} while (ip < ie);
ip = ie;
op = oe;
} else
#endif
{
NEED_OP(t, 0);
NEED_IP(t, 3);
do {
*op++ = *ip++;
} while (--t > 0);
}
state = 4;
continue;
} else if (state != 4) {
next = t & 3;
m_pos = op - 1;
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
TEST_LB(m_pos);
NEED_OP(2, 0);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
goto match_next;
} else {
next = t & 3;
m_pos = op - (1 + M2_MAX_OFFSET);
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
t = 3;
}
} else if (t >= 64) {
next = t & 3;
m_pos = op - 1;
m_pos -= (t >> 2) & 7;
m_pos -= *ip++ << 3;
t = (t >> 5) - 1 + (3 - 1);
} else if (t >= 32) {
t = (t & 31) + (3 - 1);
if (unlikely(t == 2)) {
while (unlikely(*ip == 0)) {
t += 255;
ip++;
NEED_IP(1, 0);
}
t += 31 + *ip++;
NEED_IP(2, 0);
}
m_pos = op - 1;
next = get_unaligned_le16(ip);
ip += 2;
m_pos -= next >> 2;
next &= 3;
} else {
m_pos = op;
m_pos -= (t & 8) << 11;
t = (t & 7) + (3 - 1);
if (unlikely(t == 2)) {
while (unlikely(*ip == 0)) {
t += 255;
ip++;
NEED_IP(1, 0);
}
t += 7 + *ip++;
NEED_IP(2, 0);
}
next = get_unaligned_le16(ip);
ip += 2;
m_pos -= next >> 2;
next &= 3;
if (m_pos == op)
goto eof_found;
m_pos -= 0x4000;
}
TEST_LB(m_pos);
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
if (op - m_pos >= 8) {
unsigned char *oe = op + t;
if (likely(HAVE_OP(t, 15))) {
do {
COPY8(op, m_pos);
op += 8;
m_pos += 8;
COPY8(op, m_pos);
op += 8;
m_pos += 8;
} while (op < oe);
op = oe;
if (HAVE_IP(6, 0)) {
state = next;
COPY4(op, ip);
op += next;
ip += next;
continue;
}
} else {
NEED_OP(t, 0);
do {
*op++ = *m_pos++;
} while (op < oe);
}
} else
#endif
{
unsigned char *oe = op + t;
NEED_OP(t, 0);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
m_pos += 2;
do {
*op++ = *m_pos++;
} while (op < oe);
}
match_next:
state = next;
t = next;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
COPY4(op, ip);
op += t;
ip += t;
} else
#endif
{
NEED_IP(t, 3);
NEED_OP(t, 0);
while (t > 0) {
*op++ = *ip++;
t--;
}
}
}
eof_found:
*out_len = op - out;
return (t != 3 ? LZO_E_ERROR :
ip == ip_end ? LZO_E_OK :
ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN);
input_overrun:
*out_len = op - out;
return LZO_E_INPUT_OVERRUN;
output_overrun:
*out_len = op - out;
return LZO_E_OUTPUT_OVERRUN;
lookbehind_overrun:
*out_len = op - out;
return LZO_E_LOOKBEHIND_OVERRUN;
}
| 0
|
276,845
|
BrowserContext* NavigationControllerImpl::GetBrowserContext() const {
return browser_context_;
}
| 0
|
314,566
|
RenderWidget* RenderViewImpl::GetWidget() {
return this;
}
| 0
|
516,365
|
void X509Certificate::InfoAccess(const FunctionCallbackInfo<Value>& args) {
Environment* env = Environment::GetCurrent(args);
X509Certificate* cert;
ASSIGN_OR_RETURN_UNWRAP(&cert, args.Holder());
BIOPointer bio(BIO_new(BIO_s_mem()));
Local<Value> ret;
if (GetInfoAccessString(env, bio, cert->get()).ToLocal(&ret))
args.GetReturnValue().Set(ret);
}
| 0
|
125,266
|
flatpak_dir_get_remote_oci (FlatpakDir *self,
const char *remote_name)
{
g_autofree char *url = NULL;
if (!flatpak_dir_ensure_repo (self, NULL, NULL))
return FALSE;
if (!ostree_repo_remote_get_url (self->repo, remote_name, &url, NULL))
return FALSE;
return url && g_str_has_prefix (url, "oci+");
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.