idx
int64 | func
string | target
int64 |
|---|---|---|
464,199
|
nm_utils_kernel_cmdline_match_check(const char *const *proc_cmdline,
const char *const *patterns,
guint num_patterns,
GError ** error)
{
gboolean has_optional = FALSE;
gboolean has_any_optional = FALSE;
guint i;
for (i = 0; i < num_patterns; i++) {
const char *element = patterns[i];
gboolean is_inverted = FALSE;
gboolean is_mandatory = FALSE;
gboolean match;
const char *p;
_pattern_parse(element, &p, &is_inverted, &is_mandatory);
match = _kernel_cmdline_match(proc_cmdline, p);
if (is_inverted)
match = !match;
if (is_mandatory) {
if (!match) {
nm_utils_error_set(error,
NM_UTILS_ERROR_CONNECTION_AVAILABLE_TEMPORARY,
"device does not satisfy match.kernel-command-line property %s",
patterns[i]);
return FALSE;
}
} else {
has_any_optional = TRUE;
if (match)
has_optional = TRUE;
}
}
if (!has_optional && has_any_optional) {
nm_utils_error_set(error,
NM_UTILS_ERROR_CONNECTION_AVAILABLE_TEMPORARY,
"device does not satisfy any match.kernel-command-line property");
return FALSE;
}
return TRUE;
}
| 0
|
319,987
|
void cpu_exit(CPUState *cpu)
{
cpu->exit_request = 1;
/* Ensure cpu_exec will see the exit request after TCG has exited. */
smp_wmb();
cpu->tcg_exit_req = 1;
}
| 1
|
498,165
|
static enum PartMode decode_part_mode(thread_context* tctx,
enum PredMode pred_mode, int cLog2CbSize)
{
de265_image* img = tctx->img;
if (pred_mode == MODE_INTRA) {
logtrace(LogSlice,"# part_mode (INTRA)\n");
int bit = decode_CABAC_bit(&tctx->cabac_decoder, &tctx->ctx_model[CONTEXT_MODEL_PART_MODE]);
logtrace(LogSlice,"> %s\n",bit ? "2Nx2N" : "NxN");
logtrace(LogSymbols,"$1 part_mode=%d\n",bit ? PART_2Nx2N : PART_NxN);
return bit ? PART_2Nx2N : PART_NxN;
}
else {
const seq_parameter_set& sps = img->get_sps();
int bit0 = decode_CABAC_bit(&tctx->cabac_decoder, &tctx->ctx_model[CONTEXT_MODEL_PART_MODE+0]);
if (bit0) { logtrace(LogSymbols,"$1 part_mode=%d\n",PART_2Nx2N); return PART_2Nx2N; }
// CHECK_ME: I optimize code and fix bug here, need more VERIFY!
int bit1 = decode_CABAC_bit(&tctx->cabac_decoder, &tctx->ctx_model[CONTEXT_MODEL_PART_MODE+1]);
if (cLog2CbSize > sps.Log2MinCbSizeY) {
if (!sps.amp_enabled_flag) {
logtrace(LogSymbols,"$1 part_mode=%d\n",bit1 ? PART_2NxN : PART_Nx2N);
return bit1 ? PART_2NxN : PART_Nx2N;
}
else {
int bit3 = decode_CABAC_bit(&tctx->cabac_decoder, &tctx->ctx_model[CONTEXT_MODEL_PART_MODE+3]);
if (bit3) {
logtrace(LogSymbols,"$1 part_mode=%d\n",bit1 ? PART_2NxN : PART_Nx2N);
return bit1 ? PART_2NxN : PART_Nx2N;
}
int bit4 = decode_CABAC_bypass(&tctx->cabac_decoder);
if ( bit1 && bit4) {
logtrace(LogSymbols,"$1 part_mode=%d\n",PART_2NxnD);
return PART_2NxnD;
}
if ( bit1 && !bit4) {
logtrace(LogSymbols,"$1 part_mode=%d\n",PART_2NxnU);
return PART_2NxnU;
}
if (!bit1 && !bit4) {
logtrace(LogSymbols,"$1 part_mode=%d\n",PART_nLx2N);
return PART_nLx2N;
}
if (!bit1 && bit4) {
logtrace(LogSymbols,"$1 part_mode=%d\n",PART_nRx2N);
return PART_nRx2N;
}
}
}
else {
// TODO, we could save one if here when first decoding the next bin and then
// checkcLog2CbSize==3 when it is '0'
if (bit1) {
logtrace(LogSymbols,"$1 part_mode=%d\n",PART_2NxN);
return PART_2NxN;
}
if (cLog2CbSize==3) {
logtrace(LogSymbols,"$1 part_mode=%d\n",PART_Nx2N);
return PART_Nx2N;
}
else {
int bit2 = decode_CABAC_bit(&tctx->cabac_decoder, &tctx->ctx_model[CONTEXT_MODEL_PART_MODE+2]);
logtrace(LogSymbols,"$1 part_mode=%d\n",PART_NxN-bit2);
return (enum PartMode)((int)PART_NxN - bit2)/*bit2 ? PART_Nx2N : PART_NxN*/;
}
}
}
assert(false); // should never be reached
return PART_2Nx2N;
}
| 0
|
117,004
|
char *http_content_type(ci_request_t * req)
{
ci_headers_list_t *heads;
char *val;
if (!(heads = ci_http_response_headers(req))) {
/* Then maybe is a reqmod request, try to get request headers */
if (!(heads = ci_http_request_headers(req)))
return NULL;
}
if (!(val = ci_headers_value(heads, "Content-Type")))
return NULL;
return val;
}
| 0
|
491,816
|
parser_parse_continue_statement (parser_context_t *context_p) /**< context */
{
parser_stack_iterator_t iterator;
cbc_opcode_t opcode = CBC_JUMP_FORWARD;
lexer_next_token (context_p);
parser_stack_iterator_init (context_p, &iterator);
if (!(context_p->token.flags & LEXER_WAS_NEWLINE)
&& context_p->token.type == LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
parser_stack_iterator_t loop_iterator;
loop_iterator.current_p = NULL;
/* The label with the same name is searched on the stack. */
while (true)
{
uint8_t type = parser_stack_iterator_read_uint8 (&iterator);
if (type == PARSER_STATEMENT_START)
{
parser_raise_error (context_p, PARSER_ERR_INVALID_CONTINUE_LABEL);
}
/* Only those labels are checked, whose are label of a loop. */
if (loop_iterator.current_p != NULL && type == PARSER_STATEMENT_LABEL)
{
parser_label_statement_t label_statement;
parser_stack_iterator_skip (&iterator, 1);
parser_stack_iterator_read (&iterator, &label_statement, sizeof (parser_label_statement_t));
if (lexer_current_is_literal (context_p, &label_statement.label_ident))
{
parser_loop_statement_t loop;
parser_stack_iterator_skip (&loop_iterator, 1);
parser_stack_iterator_read (&loop_iterator, &loop, sizeof (parser_loop_statement_t));
loop.branch_list_p = parser_emit_cbc_forward_branch_item (context_p,
(uint16_t) opcode,
loop.branch_list_p);
loop.branch_list_p->branch.offset |= CBC_HIGHEST_BIT_MASK;
parser_stack_iterator_write (&loop_iterator, &loop, sizeof (parser_loop_statement_t));
lexer_next_token (context_p);
return;
}
parser_stack_iterator_skip (&iterator, sizeof (parser_label_statement_t));
continue;
}
if (parser_statement_flags[type] & PARSER_STATM_CONTEXT_BREAK)
{
opcode = CBC_JUMP_FORWARD_EXIT_CONTEXT;
}
#if JERRY_ESNEXT
const bool is_private_scope = (type == PARSER_STATEMENT_PRIVATE_SCOPE
|| type == PARSER_STATEMENT_PRIVATE_CONTEXT);
#else /* !JERRY_ESNEXT */
const bool is_private_scope = false;
#endif /* !JERRY_ESNEXT */
if (parser_statement_flags[type] & PARSER_STATM_CONTINUE_TARGET)
{
loop_iterator = iterator;
}
else if (!is_private_scope)
{
loop_iterator.current_p = NULL;
}
parser_stack_iterator_skip (&iterator, parser_statement_length (type));
}
}
/* The first loop statement is searched. */
while (true)
{
uint8_t type = parser_stack_iterator_read_uint8 (&iterator);
if (type == PARSER_STATEMENT_START)
{
parser_raise_error (context_p, PARSER_ERR_INVALID_CONTINUE);
}
if (parser_statement_flags[type] & PARSER_STATM_CONTINUE_TARGET)
{
parser_loop_statement_t loop;
parser_stack_iterator_skip (&iterator, 1);
parser_stack_iterator_read (&iterator, &loop, sizeof (parser_loop_statement_t));
loop.branch_list_p = parser_emit_cbc_forward_branch_item (context_p,
(uint16_t) opcode,
loop.branch_list_p);
loop.branch_list_p->branch.offset |= CBC_HIGHEST_BIT_MASK;
parser_stack_iterator_write (&iterator, &loop, sizeof (parser_loop_statement_t));
return;
}
if (parser_statement_flags[type] & PARSER_STATM_CONTEXT_BREAK)
{
opcode = CBC_JUMP_FORWARD_EXIT_CONTEXT;
}
parser_stack_iterator_skip (&iterator, parser_statement_length (type));
}
} /* parser_parse_continue_statement */
| 0
|
26,212
|
int ff_msmpeg4_decode_motion ( MpegEncContext * s , int * mx_ptr , int * my_ptr ) {
MVTable * mv ;
int code , mx , my ;
mv = & ff_mv_tables [ s -> mv_table_index ] ;
code = get_vlc2 ( & s -> gb , mv -> vlc . table , MV_VLC_BITS , 2 ) ;
if ( code < 0 ) {
av_log ( s -> avctx , AV_LOG_ERROR , "illegal MV code at %d %d\n" , s -> mb_x , s -> mb_y ) ;
return - 1 ;
}
if ( code == mv -> n ) {
mx = get_bits ( & s -> gb , 6 ) ;
my = get_bits ( & s -> gb , 6 ) ;
}
else {
mx = mv -> table_mvx [ code ] ;
my = mv -> table_mvy [ code ] ;
}
mx += * mx_ptr - 32 ;
my += * my_ptr - 32 ;
if ( mx <= - 64 ) mx += 64 ;
else if ( mx >= 64 ) mx -= 64 ;
if ( my <= - 64 ) my += 64 ;
else if ( my >= 64 ) my -= 64 ;
* mx_ptr = mx ;
* my_ptr = my ;
return 0 ;
}
| 0
|
212,965
|
static bool IsRequiredForInjection(UChar c) {
return (c == '\'' || c == '"' || c == '<' || c == '>');
}
| 0
|
124,722
|
FilterUtility::finalTimeout(const RouteEntry& route, Http::HeaderMap& request_headers,
bool insert_envoy_expected_request_timeout_ms, bool grpc_request,
bool per_try_timeout_hedging_enabled,
bool respect_expected_rq_timeout) {
// See if there is a user supplied timeout in a request header. If there is we take that.
// Otherwise if the request is gRPC and a maximum gRPC timeout is configured we use the timeout
// in the gRPC headers (or infinity when gRPC headers have no timeout), but cap that timeout to
// the configured maximum gRPC timeout (which may also be infinity, represented by a 0 value),
// or the default from the route config otherwise.
TimeoutData timeout;
if (grpc_request && route.maxGrpcTimeout()) {
const std::chrono::milliseconds max_grpc_timeout = route.maxGrpcTimeout().value();
std::chrono::milliseconds grpc_timeout = Grpc::Common::getGrpcTimeout(request_headers);
if (route.grpcTimeoutOffset()) {
// We only apply the offset if it won't result in grpc_timeout hitting 0 or below, as
// setting it to 0 means infinity and a negative timeout makes no sense.
const auto offset = *route.grpcTimeoutOffset();
if (offset < grpc_timeout) {
grpc_timeout -= offset;
}
}
// Cap gRPC timeout to the configured maximum considering that 0 means infinity.
if (max_grpc_timeout != std::chrono::milliseconds(0) &&
(grpc_timeout == std::chrono::milliseconds(0) || grpc_timeout > max_grpc_timeout)) {
grpc_timeout = max_grpc_timeout;
}
timeout.global_timeout_ = grpc_timeout;
} else {
timeout.global_timeout_ = route.timeout();
}
timeout.per_try_timeout_ = route.retryPolicy().perTryTimeout();
uint64_t header_timeout;
if (respect_expected_rq_timeout) {
// Check if there is timeout set by egress Envoy.
// If present, use that value as route timeout and don't override
// *x-envoy-expected-rq-timeout-ms* header. At this point *x-envoy-upstream-rq-timeout-ms*
// header should have been sanitized by egress Envoy.
Http::HeaderEntry* header_expected_timeout_entry =
request_headers.EnvoyExpectedRequestTimeoutMs();
if (header_expected_timeout_entry) {
trySetGlobalTimeout(header_expected_timeout_entry, timeout);
} else {
Http::HeaderEntry* header_timeout_entry = request_headers.EnvoyUpstreamRequestTimeoutMs();
if (trySetGlobalTimeout(header_timeout_entry, timeout)) {
request_headers.removeEnvoyUpstreamRequestTimeoutMs();
}
}
} else {
Http::HeaderEntry* header_timeout_entry = request_headers.EnvoyUpstreamRequestTimeoutMs();
if (trySetGlobalTimeout(header_timeout_entry, timeout)) {
request_headers.removeEnvoyUpstreamRequestTimeoutMs();
}
}
// See if there is a per try/retry timeout. If it's >= global we just ignore it.
Http::HeaderEntry* per_try_timeout_entry = request_headers.EnvoyUpstreamRequestPerTryTimeoutMs();
if (per_try_timeout_entry) {
if (absl::SimpleAtoi(per_try_timeout_entry->value().getStringView(), &header_timeout)) {
timeout.per_try_timeout_ = std::chrono::milliseconds(header_timeout);
}
request_headers.removeEnvoyUpstreamRequestPerTryTimeoutMs();
}
if (timeout.per_try_timeout_ >= timeout.global_timeout_) {
timeout.per_try_timeout_ = std::chrono::milliseconds(0);
}
// See if there is any timeout to write in the expected timeout header.
uint64_t expected_timeout = timeout.per_try_timeout_.count();
// Use the global timeout if no per try timeout was specified or if we're
// doing hedging when there are per try timeouts. Either of these scenarios
// mean that the upstream server can use the full global timeout.
if (per_try_timeout_hedging_enabled || expected_timeout == 0) {
expected_timeout = timeout.global_timeout_.count();
}
if (insert_envoy_expected_request_timeout_ms && expected_timeout > 0) {
request_headers.insertEnvoyExpectedRequestTimeoutMs().value(expected_timeout);
}
// If we've configured max_grpc_timeout, override the grpc-timeout header with
// the expected timeout. This ensures that the optional per try timeout is reflected
// in grpc-timeout, ensuring that the upstream gRPC server is aware of the actual timeout.
// If the expected timeout is 0 set no timeout, as Envoy treats 0 as infinite timeout.
if (grpc_request && route.maxGrpcTimeout() && expected_timeout != 0) {
Grpc::Common::toGrpcTimeout(std::chrono::milliseconds(expected_timeout),
request_headers.insertGrpcTimeout().value());
}
return timeout;
}
| 0
|
125,827
|
static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
{
if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
}
| 0
|
458,012
|
gdm_session_worker_handle_start_reauthentication (GdmDBusWorker *object,
GDBusMethodInvocation *invocation,
int pid_of_caller,
int uid_of_caller)
{
GdmSessionWorker *worker = GDM_SESSION_WORKER (object);
ReauthenticationRequest *request;
if (worker->priv->state != GDM_SESSION_WORKER_STATE_SESSION_STARTED) {
g_dbus_method_invocation_return_error (invocation,
GDM_SESSION_WORKER_ERROR,
GDM_SESSION_WORKER_ERROR_WRONG_STATE,
"Cannot reauthenticate while in state %s",
get_state_name (worker->priv->state));
return TRUE;
}
g_debug ("GdmSessionWorker: start reauthentication");
request = reauthentication_request_new (worker, pid_of_caller, uid_of_caller, invocation);
g_hash_table_replace (worker->priv->reauthentication_requests,
GINT_TO_POINTER (pid_of_caller),
request);
return TRUE;
}
| 0
|
269,020
|
inline void ArgMax(const RuntimeShape& input1_shape, const T1* input1_data,
const RuntimeShape& input2_shape, const T3* input2_data,
const RuntimeShape& output_shape, T2* output_data) {
// Drop shape of second input: not needed.
ArgMax(input1_shape, input1_data, input2_data, output_shape, output_data);
}
| 0
|
36,898
|
static gdImagePtr _gd2CreateFromFile (gdIOCtxPtr in, int *sx, int *sy, int *cs, int *vers, int *fmt, int *ncx, int *ncy, t_chunk_info ** cidx)
{
gdImagePtr im;
if (_gd2GetHeader (in, sx, sy, cs, vers, fmt, ncx, ncy, cidx) != 1) {
GD2_DBG(php_gd_error("Bad GD2 header"));
goto fail1;
}
if (gd2_truecolor(*fmt)) {
im = gdImageCreateTrueColor(*sx, *sy);
} else {
im = gdImageCreate(*sx, *sy);
}
if (im == NULL) {
GD2_DBG(php_gd_error("Could not create gdImage"));
goto fail1;
}
if (!_gdGetColors(in, im, (*vers) == 2)) {
GD2_DBG(php_gd_error("Could not read color palette"));
goto fail2;
}
GD2_DBG(php_gd_error("Image palette completed: %d colours", im->colorsTotal));
return im;
fail2:
gdImageDestroy(im);
return 0;
fail1:
return 0;
}
| 0
|
188,222
|
void QQuickWebViewExperimental::setContentWidth(qreal width)
{
Q_D(QQuickWebView);
ASSERT(d->flickProvider);
d->userDidOverrideContentWidth = true;
d->flickProvider->setContentWidth(width);
}
| 0
|
374,420
|
plpgsql_inline_handler(PG_FUNCTION_ARGS)
{
InlineCodeBlock *codeblock = (InlineCodeBlock *) DatumGetPointer(PG_GETARG_DATUM(0));
PLpgSQL_function *func;
FunctionCallInfoData fake_fcinfo;
FmgrInfo flinfo;
EState *simple_eval_estate;
Datum retval;
int rc;
Assert(IsA(codeblock, InlineCodeBlock));
/*
* Connect to SPI manager
*/
if ((rc = SPI_connect()) != SPI_OK_CONNECT)
elog(ERROR, "SPI_connect failed: %s", SPI_result_code_string(rc));
/* Compile the anonymous code block */
func = plpgsql_compile_inline(codeblock->source_text);
/* Mark the function as busy, just pro forma */
func->use_count++;
/*
* Set up a fake fcinfo with just enough info to satisfy
* plpgsql_exec_function(). In particular note that this sets things up
* with no arguments passed.
*/
MemSet(&fake_fcinfo, 0, sizeof(fake_fcinfo));
MemSet(&flinfo, 0, sizeof(flinfo));
fake_fcinfo.flinfo = &flinfo;
flinfo.fn_oid = InvalidOid;
flinfo.fn_mcxt = CurrentMemoryContext;
/* Create a private EState for simple-expression execution */
simple_eval_estate = CreateExecutorState();
/* And run the function */
PG_TRY();
{
retval = plpgsql_exec_function(func, &fake_fcinfo, simple_eval_estate);
}
PG_CATCH();
{
/*
* We need to clean up what would otherwise be long-lived resources
* accumulated by the failed DO block, principally cached plans for
* statements (which can be flushed with plpgsql_free_function_memory)
* and execution trees for simple expressions, which are in the
* private EState.
*
* Before releasing the private EState, we must clean up any
* simple_econtext_stack entries pointing into it, which we can do by
* invoking the subxact callback. (It will be called again later if
* some outer control level does a subtransaction abort, but no harm
* is done.) We cheat a bit knowing that plpgsql_subxact_cb does not
* pay attention to its parentSubid argument.
*/
plpgsql_subxact_cb(SUBXACT_EVENT_ABORT_SUB,
GetCurrentSubTransactionId(),
0, NULL);
/* Clean up the private EState */
FreeExecutorState(simple_eval_estate);
/* Function should now have no remaining use-counts ... */
func->use_count--;
Assert(func->use_count == 0);
/* ... so we can free subsidiary storage */
plpgsql_free_function_memory(func);
/* And propagate the error */
PG_RE_THROW();
}
PG_END_TRY();
/* Clean up the private EState */
FreeExecutorState(simple_eval_estate);
/* Function should now have no remaining use-counts ... */
func->use_count--;
Assert(func->use_count == 0);
/* ... so we can free subsidiary storage */
plpgsql_free_function_memory(func);
/*
* Disconnect from SPI manager
*/
if ((rc = SPI_finish()) != SPI_OK_FINISH)
elog(ERROR, "SPI_finish failed: %s", SPI_result_code_string(rc));
return retval;
}
| 0
|
422,419
|
e_ews_calendar_period_new (void)
{
return g_new0 (EEwsCalendarPeriod, 1);
}
| 0
|
365,743
|
g_random(char* data, int len)
{
#if defined(_WIN32)
int index;
srand(g_time1());
for (index = 0; index < len; index++)
{
data[index] = (char)rand(); /* rand returns a number between 0 and
RAND_MAX */
}
#else
int fd;
memset(data, 0x44, len);
fd = open("/dev/urandom", O_RDONLY);
if (fd == -1)
{
fd = open("/dev/random", O_RDONLY);
}
if (fd != -1)
{
if (read(fd, data, len) != len)
{
}
close(fd);
}
#endif
}
| 0
|
454,790
|
Value ExpressionIndexOfCP::evaluate(const Document& root, Variables* variables) const {
Value stringArg = _children[0]->evaluate(root, variables);
if (stringArg.nullish()) {
return Value(BSONNULL);
}
uassert(40093,
str::stream() << "$indexOfCP requires a string as the first argument, found: "
<< typeName(stringArg.getType()),
stringArg.getType() == String);
const std::string& input = stringArg.getString();
Value tokenArg = _children[1]->evaluate(root, variables);
uassert(40094,
str::stream() << "$indexOfCP requires a string as the second argument, found: "
<< typeName(tokenArg.getType()),
tokenArg.getType() == String);
const std::string& token = tokenArg.getString();
size_t startCodePointIndex = 0;
if (_children.size() > 2) {
Value startIndexArg = _children[2]->evaluate(root, variables);
uassertIfNotIntegralAndNonNegative(startIndexArg, getOpName(), "starting index");
startCodePointIndex = static_cast<size_t>(startIndexArg.coerceToInt());
}
// Compute the length (in code points) of the input, and convert 'startCodePointIndex' to a byte
// index.
size_t codePointLength = 0;
size_t startByteIndex = 0;
for (size_t byteIx = 0; byteIx < input.size(); ++codePointLength) {
if (codePointLength == startCodePointIndex) {
// We have determined the byte at which our search will start.
startByteIndex = byteIx;
}
uassert(40095,
"$indexOfCP found bad UTF-8 in the input",
!str::isUTF8ContinuationByte(input[byteIx]));
byteIx += getCodePointLength(input[byteIx]);
}
size_t endCodePointIndex = codePointLength;
if (_children.size() > 3) {
Value endIndexArg = _children[3]->evaluate(root, variables);
uassertIfNotIntegralAndNonNegative(endIndexArg, getOpName(), "ending index");
// Don't let 'endCodePointIndex' exceed the number of code points in the string.
endCodePointIndex =
std::min(codePointLength, static_cast<size_t>(endIndexArg.coerceToInt()));
}
if (startByteIndex == 0 && input.empty() && token.empty()) {
// If we are finding the index of "" in the string "", the below loop will not loop, so we
// need a special case for this.
return Value(0);
}
// We must keep track of which byte, and which code point, we are examining, being careful not
// to overflow either the length of the string or the ending code point.
size_t currentCodePointIndex = startCodePointIndex;
for (size_t byteIx = startByteIndex; currentCodePointIndex < endCodePointIndex;
++currentCodePointIndex) {
if (stringHasTokenAtIndex(byteIx, input, token)) {
return Value(static_cast<int>(currentCodePointIndex));
}
byteIx += getCodePointLength(input[byteIx]);
}
return Value(-1);
}
| 0
|
130,759
|
static void mspack_fmap_close(struct mspack_file *file)
{
struct mspack_handle *mspack_handle = (struct mspack_handle *)file;
if (!mspack_handle)
return;
if (mspack_handle->type == FILETYPE_FILENAME)
fclose(mspack_handle->f);
free(mspack_handle);
}
| 0
|
53,674
|
static inline void io_req_complete_state(struct io_kiocb *req, s32 res,
u32 cflags)
{
req->result = res;
req->cflags = cflags;
req->flags |= REQ_F_COMPLETE_INLINE;
}
| 0
|
36,617
|
static int rfcomm_create_dev(struct sock *sk, void __user *arg)
{
struct rfcomm_dev_req req;
struct rfcomm_dlc *dlc;
int id;
if (copy_from_user(&req, arg, sizeof(req)))
return -EFAULT;
BT_DBG("sk %p dev_id %d flags 0x%x", sk, req.dev_id, req.flags);
if (req.flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN))
return -EPERM;
if (req.flags & (1 << RFCOMM_REUSE_DLC)) {
/* Socket must be connected */
if (sk->sk_state != BT_CONNECTED)
return -EBADFD;
dlc = rfcomm_pi(sk)->dlc;
rfcomm_dlc_hold(dlc);
} else {
dlc = rfcomm_dlc_alloc(GFP_KERNEL);
if (!dlc)
return -ENOMEM;
}
id = rfcomm_dev_add(&req, dlc);
if (id < 0) {
rfcomm_dlc_put(dlc);
return id;
}
if (req.flags & (1 << RFCOMM_REUSE_DLC)) {
/* DLC is now used by device.
* Socket must be disconnected */
sk->sk_state = BT_CLOSED;
}
return id;
}
| 0
|
390,246
|
bool operator>(tm& a, tm& b)
{
if (a.tm_year > b.tm_year)
return true;
if (a.tm_year == b.tm_year && a.tm_mon > b.tm_mon)
return true;
if (a.tm_year == b.tm_year && a.tm_mon == b.tm_mon && a.tm_mday >b.tm_mday)
return true;
if (a.tm_year == b.tm_year && a.tm_mon == b.tm_mon &&
a.tm_mday == b.tm_mday && a.tm_hour > b.tm_hour)
return true;
if (a.tm_year == b.tm_year && a.tm_mon == b.tm_mon &&
a.tm_mday == b.tm_mday && a.tm_hour == b.tm_hour &&
a.tm_min > b.tm_min)
return true;
if (a.tm_year == b.tm_year && a.tm_mon == b.tm_mon &&
a.tm_mday == b.tm_mday && a.tm_hour == b.tm_hour &&
a.tm_min == b.tm_min && a.tm_sec > b.tm_sec)
return true;
return false;
}
| 0
|
383,976
|
gnutls_x509_crt_get_proxy(gnutls_x509_crt_t cert,
unsigned int *critical,
int *pathlen,
char **policyLanguage,
char **policy, size_t * sizeof_policy)
{
int result;
gnutls_datum_t proxyCertInfo;
if (cert == NULL) {
gnutls_assert();
return GNUTLS_E_INVALID_REQUEST;
}
if ((result =
_gnutls_x509_crt_get_extension(cert, "1.3.6.1.5.5.7.1.14", 0,
&proxyCertInfo, critical)) < 0)
{
return result;
}
if (proxyCertInfo.size == 0 || proxyCertInfo.data == NULL) {
gnutls_assert();
return GNUTLS_E_REQUESTED_DATA_NOT_AVAILABLE;
}
result = gnutls_x509_ext_import_proxy(&proxyCertInfo, pathlen,
policyLanguage,
policy,
sizeof_policy);
_gnutls_free_datum(&proxyCertInfo);
if (result < 0) {
gnutls_assert();
return result;
}
return 0;
}
| 0
|
376,637
|
ReachabilityAnalyzer(HBasicBlock* entry_block,
int block_count,
HBasicBlock* dont_visit)
: visited_count_(0),
stack_(16, entry_block->zone()),
reachable_(block_count, entry_block->zone()),
dont_visit_(dont_visit) {
PushBlock(entry_block);
Analyze();
}
| 0
|
463,011
|
NamespaceString ns() const override {
// TODO get the ns from the parsed QueryRequest.
return NamespaceString(CommandHelpers::parseNsFromCommand(_dbName, _request.body));
}
| 0
|
99,244
|
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
int stats_cnt;
int ret = 0;
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry(device, &fs_devices->devices, dev_list) {
stats_cnt = atomic_read(&device->dev_stats_ccnt);
if (!device->dev_stats_valid || stats_cnt == 0)
continue;
/*
* There is a LOAD-LOAD control dependency between the value of
* dev_stats_ccnt and updating the on-disk values which requires
* reading the in-memory counters. Such control dependencies
* require explicit read memory barriers.
*
* This memory barriers pairs with smp_mb__before_atomic in
* btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
* barrier implied by atomic_xchg in
* btrfs_dev_stats_read_and_reset
*/
smp_rmb();
ret = update_dev_stat_item(trans, device);
if (!ret)
atomic_sub(stats_cnt, &device->dev_stats_ccnt);
}
mutex_unlock(&fs_devices->device_list_mutex);
return ret;
}
| 0
|
502,289
|
bool is_attr_in_list(const char * const * attrs, const char *attr)
{
unsigned int i;
for (i = 0; attrs[i]; i++) {
if (ldb_attr_cmp(attrs[i], attr) == 0)
return true;
}
return false;
}
| 0
|
272,696
|
static handle_t *ext4_get_nojournal(void)
{
handle_t *handle = current->journal_info;
unsigned long ref_cnt = (unsigned long)handle;
BUG_ON(ref_cnt >= EXT4_NOJOURNAL_MAX_REF_COUNT);
ref_cnt++;
handle = (handle_t *)ref_cnt;
current->journal_info = handle;
return handle;
}
| 0
|
69,653
|
md_process_line(MD_CTX* ctx, const MD_LINE_ANALYSIS** p_pivot_line, MD_LINE_ANALYSIS* line)
{
const MD_LINE_ANALYSIS* pivot_line = *p_pivot_line;
int ret = 0;
/* Blank line ends current leaf block. */
if(line->type == MD_LINE_BLANK) {
MD_CHECK(md_end_current_block(ctx));
*p_pivot_line = &md_dummy_blank_line;
return 0;
}
/* Some line types form block on their own. */
if(line->type == MD_LINE_HR || line->type == MD_LINE_ATXHEADER) {
MD_CHECK(md_end_current_block(ctx));
/* Add our single-line block. */
MD_CHECK(md_start_new_block(ctx, line));
MD_CHECK(md_add_line_into_current_block(ctx, line));
MD_CHECK(md_end_current_block(ctx));
*p_pivot_line = &md_dummy_blank_line;
return 0;
}
/* MD_LINE_SETEXTUNDERLINE changes meaning of the current block and ends it. */
if(line->type == MD_LINE_SETEXTUNDERLINE) {
MD_ASSERT(ctx->current_block != NULL);
ctx->current_block->type = MD_BLOCK_H;
ctx->current_block->data = line->data;
ctx->current_block->flags |= MD_BLOCK_SETEXT_HEADER;
MD_CHECK(md_add_line_into_current_block(ctx, line));
MD_CHECK(md_end_current_block(ctx));
if(ctx->current_block == NULL) {
*p_pivot_line = &md_dummy_blank_line;
} else {
/* This happens if we have consumed all the body as link ref. defs.
* and downgraded the underline into start of a new paragraph block. */
line->type = MD_LINE_TEXT;
*p_pivot_line = line;
}
return 0;
}
/* MD_LINE_TABLEUNDERLINE changes meaning of the current block. */
if(line->type == MD_LINE_TABLEUNDERLINE) {
MD_ASSERT(ctx->current_block != NULL);
MD_ASSERT(ctx->current_block->n_lines == 1);
ctx->current_block->type = MD_BLOCK_TABLE;
ctx->current_block->data = line->data;
MD_ASSERT(pivot_line != &md_dummy_blank_line);
((MD_LINE_ANALYSIS*)pivot_line)->type = MD_LINE_TABLE;
MD_CHECK(md_add_line_into_current_block(ctx, line));
return 0;
}
/* The current block also ends if the line has different type. */
if(line->type != pivot_line->type)
MD_CHECK(md_end_current_block(ctx));
/* The current line may start a new block. */
if(ctx->current_block == NULL) {
MD_CHECK(md_start_new_block(ctx, line));
*p_pivot_line = line;
}
/* In all other cases the line is just a continuation of the current block. */
MD_CHECK(md_add_line_into_current_block(ctx, line));
abort:
return ret;
}
| 0
|
8,047
|
static PyObject *__pyx_f_17clickhouse_driver_14bufferedreader___pyx_unpickle_BufferedSocketReader__set_state(struct __pyx_obj_17clickhouse_driver_14bufferedreader_BufferedSocketReader *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("__pyx_unpickle_BufferedSocketReader__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[4])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(PyByteArray_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytearray", Py_TYPE(__pyx_t_1)->tp_name), 0))) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->__pyx_base.buffer);
__Pyx_DECREF(__pyx_v___pyx_result->__pyx_base.buffer);
__pyx_v___pyx_result->__pyx_base.buffer = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->__pyx_base.current_buffer_size = __pyx_t_2;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v___pyx_result->__pyx_base.position = __pyx_t_2;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 3, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->sock);
__Pyx_DECREF(__pyx_v___pyx_result->sock);
__pyx_v___pyx_result->sock = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[4])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 13, __pyx_L1_error)
}
__pyx_t_2 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_4 = ((__pyx_t_2 > 4) != 0);
if (__pyx_t_4) {
} else {
__pyx_t_3 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_5 = (__pyx_t_4 != 0);
__pyx_t_3 = __pyx_t_5;
__pyx_L4_bool_binop_done:;
if (__pyx_t_3) {
/* "(tree fragment)":14
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[4]) # <<<<<<<<<<<<<<
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 14, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 4, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state):
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[4])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_BufferedSocketReader__set_state(<BufferedSocketReader> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_BufferedSocketReader__set_state(BufferedSocketReader __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.buffer = __pyx_state[0]; __pyx_result.current_buffer_size = __pyx_state[1]; __pyx_result.position = __pyx_state[2]; __pyx_result.sock = __pyx_state[3]
* if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("clickhouse_driver.bufferedreader.__pyx_unpickle_BufferedSocketReader__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
| 1
|
303,120
|
guard_get_guardfraction_bandwidth(guardfraction_bandwidth_t *guardfraction_bw,
int orig_bandwidth,
uint32_t guardfraction_percentage)
{
double guardfraction_fraction;
/* Turn the percentage into a fraction. */
tor_assert(guardfraction_percentage <= 100);
guardfraction_fraction = guardfraction_percentage / 100.0;
long guard_bw = tor_lround(guardfraction_fraction * orig_bandwidth);
tor_assert(guard_bw <= INT_MAX);
guardfraction_bw->guard_bw = (int) guard_bw;
guardfraction_bw->non_guard_bw = orig_bandwidth - (int) guard_bw;
}
| 0
|
234,280
|
AppCache::AppCache(AppCacheStorage* storage, int64_t cache_id)
: cache_id_(cache_id),
owning_group_(nullptr),
online_whitelist_all_(false),
is_complete_(false),
cache_size_(0),
padding_size_(0),
storage_(storage) {
storage_->working_set()->AddCache(this);
}
| 0
|
517,332
|
virtual bool mark_as_eliminated_processor(void *arg) { return 0; }
| 0
|
436,784
|
char *mksnpath(char *buf, size_t n, const char *fmt, ...)
{
va_list args;
unsigned len;
va_start(args, fmt);
len = vsnprintf(buf, n, fmt, args);
va_end(args);
if (len >= n) {
strlcpy(buf, bad_path, n);
return buf;
}
return (char *)cleanup_path(buf);
}
| 0
|
511,101
|
execute_command (command)
COMMAND *command;
{
struct fd_bitmap *bitmap;
int result;
current_fds_to_close = (struct fd_bitmap *)NULL;
bitmap = new_fd_bitmap (FD_BITMAP_DEFAULT_SIZE);
begin_unwind_frame ("execute-command");
add_unwind_protect (dispose_fd_bitmap, (char *)bitmap);
/* Just do the command, but not asynchronously. */
result = execute_command_internal (command, 0, NO_PIPE, NO_PIPE, bitmap);
dispose_fd_bitmap (bitmap);
discard_unwind_frame ("execute-command");
#if defined (PROCESS_SUBSTITUTION)
/* don't unlink fifos if we're in a shell function; wait until the function
returns. */
if (variable_context == 0)
unlink_fifo_list ();
#endif /* PROCESS_SUBSTITUTION */
QUIT;
return (result);
}
| 0
|
519,896
|
Diagnostics_area *get_stmt_da()
{ return m_stmt_da; }
| 0
|
46,597
|
static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
{
/*
* Note that as a general rule, the high half of the MSRs (bits in
* the control fields which may be 1) should be initialized by the
* intersection of the underlying hardware's MSR (i.e., features which
* can be supported) and the list of features we want to expose -
* because they are known to be properly supported in our code.
* Also, usually, the low half of the MSRs (bits which must be 1) can
* be set to 0, meaning that L1 may turn off any of these bits. The
* reason is that if one of these bits is necessary, it will appear
* in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
* fields of vmcs01 and vmcs02, will turn these bits off - and
* nested_vmx_exit_handled() will not pass related exits to L1.
* These rules have exceptions below.
*/
/* pin-based controls */
rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
vmx->nested.nested_vmx_pinbased_ctls_low,
vmx->nested.nested_vmx_pinbased_ctls_high);
vmx->nested.nested_vmx_pinbased_ctls_low |=
PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
vmx->nested.nested_vmx_pinbased_ctls_high &=
PIN_BASED_EXT_INTR_MASK |
PIN_BASED_NMI_EXITING |
PIN_BASED_VIRTUAL_NMIS;
vmx->nested.nested_vmx_pinbased_ctls_high |=
PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
PIN_BASED_VMX_PREEMPTION_TIMER;
if (kvm_vcpu_apicv_active(&vmx->vcpu))
vmx->nested.nested_vmx_pinbased_ctls_high |=
PIN_BASED_POSTED_INTR;
/* exit controls */
rdmsr(MSR_IA32_VMX_EXIT_CTLS,
vmx->nested.nested_vmx_exit_ctls_low,
vmx->nested.nested_vmx_exit_ctls_high);
vmx->nested.nested_vmx_exit_ctls_low =
VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
vmx->nested.nested_vmx_exit_ctls_high &=
#ifdef CONFIG_X86_64
VM_EXIT_HOST_ADDR_SPACE_SIZE |
#endif
VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
vmx->nested.nested_vmx_exit_ctls_high |=
VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
if (kvm_mpx_supported())
vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
/* We support free control of debug control saving. */
vmx->nested.nested_vmx_true_exit_ctls_low =
vmx->nested.nested_vmx_exit_ctls_low &
~VM_EXIT_SAVE_DEBUG_CONTROLS;
/* entry controls */
rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
vmx->nested.nested_vmx_entry_ctls_low,
vmx->nested.nested_vmx_entry_ctls_high);
vmx->nested.nested_vmx_entry_ctls_low =
VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
vmx->nested.nested_vmx_entry_ctls_high &=
#ifdef CONFIG_X86_64
VM_ENTRY_IA32E_MODE |
#endif
VM_ENTRY_LOAD_IA32_PAT;
vmx->nested.nested_vmx_entry_ctls_high |=
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
if (kvm_mpx_supported())
vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
/* We support free control of debug control loading. */
vmx->nested.nested_vmx_true_entry_ctls_low =
vmx->nested.nested_vmx_entry_ctls_low &
~VM_ENTRY_LOAD_DEBUG_CONTROLS;
/* cpu-based controls */
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
vmx->nested.nested_vmx_procbased_ctls_low,
vmx->nested.nested_vmx_procbased_ctls_high);
vmx->nested.nested_vmx_procbased_ctls_low =
CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
vmx->nested.nested_vmx_procbased_ctls_high &=
CPU_BASED_VIRTUAL_INTR_PENDING |
CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING |
#ifdef CONFIG_X86_64
CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
#endif
CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
/*
* We can allow some features even when not supported by the
* hardware. For example, L1 can specify an MSR bitmap - and we
* can use it to avoid exits to L1 - even when L0 runs L2
* without MSR bitmaps.
*/
vmx->nested.nested_vmx_procbased_ctls_high |=
CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
CPU_BASED_USE_MSR_BITMAPS;
/* We support free control of CR3 access interception. */
vmx->nested.nested_vmx_true_procbased_ctls_low =
vmx->nested.nested_vmx_procbased_ctls_low &
~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
/* secondary cpu-based controls */
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
vmx->nested.nested_vmx_secondary_ctls_low,
vmx->nested.nested_vmx_secondary_ctls_high);
vmx->nested.nested_vmx_secondary_ctls_low = 0;
vmx->nested.nested_vmx_secondary_ctls_high &=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_ENABLE_VPID |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_WBINVD_EXITING |
SECONDARY_EXEC_XSAVES |
SECONDARY_EXEC_PCOMMIT;
if (enable_ept) {
/* nested EPT: emulate EPT also to L1 */
vmx->nested.nested_vmx_secondary_ctls_high |=
SECONDARY_EXEC_ENABLE_EPT;
vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
VMX_EPT_INVEPT_BIT;
vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept;
/*
* For nested guests, we don't do anything specific
* for single context invalidation. Hence, only advertise
* support for global context invalidation.
*/
vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT;
} else
vmx->nested.nested_vmx_ept_caps = 0;
/*
* Old versions of KVM use the single-context version without
* checking for support, so declare that it is supported even
* though it is treated as global context. The alternative is
* not failing the single-context invvpid, and it is worse.
*/
if (enable_vpid)
vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |
VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
else
vmx->nested.nested_vmx_vpid_caps = 0;
if (enable_unrestricted_guest)
vmx->nested.nested_vmx_secondary_ctls_high |=
SECONDARY_EXEC_UNRESTRICTED_GUEST;
/* miscellaneous data */
rdmsr(MSR_IA32_VMX_MISC,
vmx->nested.nested_vmx_misc_low,
vmx->nested.nested_vmx_misc_high);
vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA;
vmx->nested.nested_vmx_misc_low |=
VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
VMX_MISC_ACTIVITY_HLT;
vmx->nested.nested_vmx_misc_high = 0;
}
| 0
|
429,212
|
void defer_leftover_input(__G)
__GDEF
{
if ((zoff_t)G.incnt > G.csize) {
/* (G.csize < MAXINT), we can safely cast it to int !! */
if (G.csize < 0L)
G.csize = 0L;
G.inptr_leftover = G.inptr + (int)G.csize;
G.incnt_leftover = G.incnt - (int)G.csize;
G.incnt = (int)G.csize;
} else
G.incnt_leftover = 0;
G.csize -= G.incnt;
} /* end function defer_leftover_input() */
| 0
|
123,549
|
t2p_sample_rgbaa_to_rgb(tdata_t data, uint32 samplecount)
{
uint32 i;
/* For the 3 first samples, there is overlap between source and
* destination, so use memmove().
* See http://bugzilla.maptools.org/show_bug.cgi?id=2577
*/
for(i = 0; i < 3 && i < samplecount; i++)
memmove((uint8*)data + i * 3, (uint8*)data + i * 4, 3);
for(; i < samplecount; i++)
memcpy((uint8*)data + i * 3, (uint8*)data + i * 4, 3);
return(i * 3);
}
| 0
|
230,540
|
static void Ins_MDRP( INS_ARG )
{
Int point;
TT_F26Dot6 distance,
org_dist;
point = (Int)args[0];
if ( BOUNDS( args[0], CUR.zp1.n_points ) ||
BOUNDS( CUR.GS.rp0, CUR.zp0.n_points) )
{
/* Current version of FreeType silently ignores this out of bounds error
* and drops the instruction, see bug #691121
CUR.error = TT_Err_Invalid_Reference; */
return;
}
/* XXX: Is there some undocumented feature while in the */
/* twilight zone? */
org_dist = CUR_Func_dualproj( CUR.zp1.org_x[point] -
CUR.zp0.org_x[CUR.GS.rp0],
CUR.zp1.org_y[point] -
CUR.zp0.org_y[CUR.GS.rp0] );
/* single width cutin test */
if ( ABS(org_dist) < CUR.GS.single_width_cutin )
{
if ( org_dist >= 0 )
org_dist = CUR.GS.single_width_value;
else
org_dist = -CUR.GS.single_width_value;
}
/* round flag */
if ( (CUR.opcode & 4) != 0 )
distance = CUR_Func_round( org_dist,
CUR.metrics.compensations[CUR.opcode & 3] );
else
distance = Round_None( EXEC_ARGS
org_dist,
CUR.metrics.compensations[CUR.opcode & 3] );
/* minimum distance flag */
if ( (CUR.opcode & 8) != 0 )
{
if ( org_dist >= 0 )
{
if ( distance < CUR.GS.minimum_distance )
distance = CUR.GS.minimum_distance;
}
else
{
if ( distance > -CUR.GS.minimum_distance )
distance = -CUR.GS.minimum_distance;
}
}
/* now move the point */
org_dist = CUR_Func_project( CUR.zp1.cur_x[point] -
CUR.zp0.cur_x[CUR.GS.rp0],
CUR.zp1.cur_y[point] -
CUR.zp0.cur_y[CUR.GS.rp0] );
CUR_Func_move( &CUR.zp1, point, distance - org_dist );
CUR.GS.rp1 = CUR.GS.rp0;
CUR.GS.rp2 = point;
if ( (CUR.opcode & 16) != 0 )
CUR.GS.rp0 = point;
}
| 0
|
16,460
|
static void e1000e_intrmgr_resume ( E1000ECore * core ) {
int i ;
e1000e_intmgr_timer_resume ( & core -> radv ) ;
e1000e_intmgr_timer_resume ( & core -> rdtr ) ;
e1000e_intmgr_timer_resume ( & core -> raid ) ;
e1000e_intmgr_timer_resume ( & core -> tidv ) ;
e1000e_intmgr_timer_resume ( & core -> tadv ) ;
e1000e_intmgr_timer_resume ( & core -> itr ) ;
for ( i = 0 ;
i < E1000E_MSIX_VEC_NUM ;
i ++ ) {
e1000e_intmgr_timer_resume ( & core -> eitr [ i ] ) ;
}
}
| 0
|
277,442
|
DeprecatedAcceleratorNotificationDelegate() {}
| 0
|
207,703
|
void TabHelper::DidNavigateMainFrame(
const content::LoadCommittedDetails& details,
const content::FrameNavigateParams& params) {
InvokeForContentRulesRegistries(
[this, &details, ¶ms](ContentRulesRegistry* registry) {
registry->DidNavigateMainFrame(web_contents(), details, params);
});
content::BrowserContext* context = web_contents()->GetBrowserContext();
ExtensionRegistry* registry = ExtensionRegistry::Get(context);
const ExtensionSet& enabled_extensions = registry->enabled_extensions();
if (util::IsNewBookmarkAppsEnabled()) {
Browser* browser = chrome::FindBrowserWithWebContents(web_contents());
if (browser && browser->is_app()) {
const Extension* extension = registry->GetExtensionById(
web_app::GetExtensionIdFromApplicationName(browser->app_name()),
ExtensionRegistry::EVERYTHING);
if (extension && AppLaunchInfo::GetFullLaunchURL(extension).is_valid())
SetExtensionApp(extension);
} else {
UpdateExtensionAppIcon(
enabled_extensions.GetExtensionOrAppByURL(params.url));
}
} else {
UpdateExtensionAppIcon(
enabled_extensions.GetExtensionOrAppByURL(params.url));
}
if (!details.is_in_page)
ExtensionActionAPI::Get(context)->ClearAllValuesForTab(web_contents());
}
| 0
|
493,586
|
EXPORTED int mailbox_open_irlnb(const char *name, struct mailbox **mailboxptr)
{
return mailbox_open_advanced(name,
LOCK_SHARED|LOCK_NONBLOCK,
/* cannot do nonblocking lock on index...why? */
LOCK_SHARED,
NULL, mailboxptr);
}
| 0
|
206,745
|
check_lock_length(u64 offset, u64 length)
{
return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
(length > ~offset)));
}
| 0
|
484,091
|
static int __maybe_unused i740fb_suspend(struct device *dev)
{
struct fb_info *info = dev_get_drvdata(dev);
struct i740fb_par *par = info->par;
console_lock();
mutex_lock(&(par->open_lock));
/* do nothing if framebuffer is not active */
if (par->ref_count == 0) {
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
}
fb_set_suspend(info, 1);
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
}
| 0
|
1,162
|
static int decode_frame ( AVCodecContext * avctx , void * data , int * got_frame , AVPacket * avpkt ) {
TM2Context * const l = avctx -> priv_data ;
const uint8_t * buf = avpkt -> data ;
int buf_size = avpkt -> size & ~ 3 ;
AVFrame * const p = & l -> pic ;
int offset = TM2_HEADER_SIZE ;
int i , t , ret ;
uint8_t * swbuf ;
swbuf = av_malloc ( buf_size + FF_INPUT_BUFFER_PADDING_SIZE ) ;
if ( ! swbuf ) {
av_log ( avctx , AV_LOG_ERROR , "Cannot allocate temporary buffer\n" ) ;
return AVERROR ( ENOMEM ) ;
}
p -> reference = 1 ;
p -> buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE ;
if ( ( ret = avctx -> reget_buffer ( avctx , p ) ) < 0 ) {
av_log ( avctx , AV_LOG_ERROR , "get_buffer() failed\n" ) ;
av_free ( swbuf ) ;
return ret ;
}
l -> dsp . bswap_buf ( ( uint32_t * ) swbuf , ( const uint32_t * ) buf , buf_size >> 2 ) ;
if ( ( ret = tm2_read_header ( l , swbuf ) ) < 0 ) {
av_free ( swbuf ) ;
return ret ;
}
for ( i = 0 ;
i < TM2_NUM_STREAMS ;
i ++ ) {
if ( offset >= buf_size ) {
av_free ( swbuf ) ;
return AVERROR_INVALIDDATA ;
}
t = tm2_read_stream ( l , swbuf + offset , tm2_stream_order [ i ] , buf_size - offset ) ;
if ( t < 0 ) {
av_free ( swbuf ) ;
return t ;
}
offset += t ;
}
p -> key_frame = tm2_decode_blocks ( l , p ) ;
if ( p -> key_frame ) p -> pict_type = AV_PICTURE_TYPE_I ;
else p -> pict_type = AV_PICTURE_TYPE_P ;
l -> cur = ! l -> cur ;
* got_frame = 1 ;
* ( AVFrame * ) data = l -> pic ;
av_free ( swbuf ) ;
return buf_size ;
}
| 1
|
193,826
|
inline int Round(double x) {
return static_cast<int>(x + 0.5);
}
| 0
|
113,884
|
static void free_buffers(struct v4l2_loopback_device *dev)
{
MARK();
dprintk("freeing image@%p for dev:%p\n", dev ? dev->image : NULL, dev);
if (dev->image) {
vfree(dev->image);
dev->image = NULL;
}
if (dev->timeout_image) {
vfree(dev->timeout_image);
dev->timeout_image = NULL;
}
dev->imagesize = 0;
}
| 0
|
501,925
|
static struct security_descriptor *descr_handle_sd_flags(TALLOC_CTX *mem_ctx,
struct security_descriptor *new_sd,
struct security_descriptor *old_sd,
uint32_t sd_flags)
{
struct security_descriptor *final_sd;
/* if there is no control or control == 0 modify everything */
if (!sd_flags) {
return new_sd;
}
final_sd = talloc_zero(mem_ctx, struct security_descriptor);
final_sd->revision = SECURITY_DESCRIPTOR_REVISION_1;
final_sd->type = SEC_DESC_SELF_RELATIVE;
if (sd_flags & (SECINFO_OWNER)) {
if (new_sd->owner_sid) {
final_sd->owner_sid = talloc_memdup(mem_ctx, new_sd->owner_sid, sizeof(struct dom_sid));
}
final_sd->type |= new_sd->type & SEC_DESC_OWNER_DEFAULTED;
}
else if (old_sd) {
if (old_sd->owner_sid) {
final_sd->owner_sid = talloc_memdup(mem_ctx, old_sd->owner_sid, sizeof(struct dom_sid));
}
final_sd->type |= old_sd->type & SEC_DESC_OWNER_DEFAULTED;
}
if (sd_flags & (SECINFO_GROUP)) {
if (new_sd->group_sid) {
final_sd->group_sid = talloc_memdup(mem_ctx, new_sd->group_sid, sizeof(struct dom_sid));
}
final_sd->type |= new_sd->type & SEC_DESC_GROUP_DEFAULTED;
}
else if (old_sd) {
if (old_sd->group_sid) {
final_sd->group_sid = talloc_memdup(mem_ctx, old_sd->group_sid, sizeof(struct dom_sid));
}
final_sd->type |= old_sd->type & SEC_DESC_GROUP_DEFAULTED;
}
if (sd_flags & (SECINFO_SACL)) {
final_sd->sacl = security_acl_dup(mem_ctx,new_sd->sacl);
final_sd->type |= new_sd->type & (SEC_DESC_SACL_PRESENT |
SEC_DESC_SACL_DEFAULTED|SEC_DESC_SACL_AUTO_INHERIT_REQ |
SEC_DESC_SACL_AUTO_INHERITED|SEC_DESC_SACL_PROTECTED |
SEC_DESC_SERVER_SECURITY);
}
else if (old_sd && old_sd->sacl) {
final_sd->sacl = security_acl_dup(mem_ctx,old_sd->sacl);
final_sd->type |= old_sd->type & (SEC_DESC_SACL_PRESENT |
SEC_DESC_SACL_DEFAULTED|SEC_DESC_SACL_AUTO_INHERIT_REQ |
SEC_DESC_SACL_AUTO_INHERITED|SEC_DESC_SACL_PROTECTED |
SEC_DESC_SERVER_SECURITY);
}
if (sd_flags & (SECINFO_DACL)) {
final_sd->dacl = security_acl_dup(mem_ctx,new_sd->dacl);
final_sd->type |= new_sd->type & (SEC_DESC_DACL_PRESENT |
SEC_DESC_DACL_DEFAULTED|SEC_DESC_DACL_AUTO_INHERIT_REQ |
SEC_DESC_DACL_AUTO_INHERITED|SEC_DESC_DACL_PROTECTED |
SEC_DESC_DACL_TRUSTED);
}
else if (old_sd && old_sd->dacl) {
final_sd->dacl = security_acl_dup(mem_ctx,old_sd->dacl);
final_sd->type |= old_sd->type & (SEC_DESC_DACL_PRESENT |
SEC_DESC_DACL_DEFAULTED|SEC_DESC_DACL_AUTO_INHERIT_REQ |
SEC_DESC_DACL_AUTO_INHERITED|SEC_DESC_DACL_PROTECTED |
SEC_DESC_DACL_TRUSTED);
}
/* not so sure about this */
final_sd->type |= new_sd->type & SEC_DESC_RM_CONTROL_VALID;
return final_sd;
}
| 0
|
718
|
TEST_F ( ProtocolHandlerRegistryTest , TestIsSameOrigin ) {
ProtocolHandler ph1 = CreateProtocolHandler ( "mailto" , GURL ( "http://test.com/%s" ) , "test1" ) ;
ProtocolHandler ph2 = CreateProtocolHandler ( "mailto" , GURL ( "http://test.com/updated-url/%s" ) , "test2" ) ;
ProtocolHandler ph3 = CreateProtocolHandler ( "mailto" , GURL ( "http://other.com/%s" ) , "test" ) ;
ASSERT_EQ ( ph1 . url ( ) . GetOrigin ( ) == ph2 . url ( ) . GetOrigin ( ) , ph1 . IsSameOrigin ( ph2 ) ) ;
ASSERT_EQ ( ph1 . url ( ) . GetOrigin ( ) == ph2 . url ( ) . GetOrigin ( ) , ph2 . IsSameOrigin ( ph1 ) ) ;
ASSERT_EQ ( ph2 . url ( ) . GetOrigin ( ) == ph3 . url ( ) . GetOrigin ( ) , ph2 . IsSameOrigin ( ph3 ) ) ;
ASSERT_EQ ( ph3 . url ( ) . GetOrigin ( ) == ph2 . url ( ) . GetOrigin ( ) , ph3 . IsSameOrigin ( ph2 ) ) ;
}
| 1
|
356,912
|
int filp_close(struct file *filp, fl_owner_t id)
{
int retval = 0;
if (!file_count(filp)) {
printk(KERN_ERR "VFS: Close: file count is 0\n");
return 0;
}
if (filp->f_op && filp->f_op->flush)
retval = filp->f_op->flush(filp, id);
dnotify_flush(filp, id);
locks_remove_posix(filp, id);
fput(filp);
return retval;
}
| 0
|
162,947
|
long VideoTrack::Seek(
if (status < 0)
return status;
if (rate <= 0)
return E_FILE_FORMAT_INVALID;
}
pos += size; // consume payload
assert(pos <= stop);
}
assert(pos == stop);
VideoTrack* const pTrack =
new (std::nothrow) VideoTrack(pSegment, element_start, element_size);
if (pTrack == NULL)
return -1; // generic error
const int status = info.Copy(pTrack->m_info);
if (status) { // error
delete pTrack;
return status;
}
pTrack->m_width = width;
pTrack->m_height = height;
pTrack->m_rate = rate;
pResult = pTrack;
return 0; // success
}
bool VideoTrack::VetEntry(const BlockEntry* pBlockEntry) const {
return Track::VetEntry(pBlockEntry) && pBlockEntry->GetBlock()->IsKey();
}
long VideoTrack::Seek(long long time_ns, const BlockEntry*& pResult) const {
const long status = GetFirst(pResult);
if (status < 0) // buffer underflow, etc
return status;
assert(pResult);
if (pResult->EOS())
return 0;
const Cluster* pCluster = pResult->GetCluster();
assert(pCluster);
assert(pCluster->GetIndex() >= 0);
if (time_ns <= pResult->GetBlock()->GetTime(pCluster))
return 0;
Cluster** const clusters = m_pSegment->m_clusters;
assert(clusters);
const long count = m_pSegment->GetCount(); // loaded only, not pre-loaded
assert(count > 0);
Cluster** const i = clusters + pCluster->GetIndex();
assert(i);
assert(*i == pCluster);
assert(pCluster->GetTime() <= time_ns);
Cluster** const j = clusters + count;
Cluster** lo = i;
Cluster** hi = j;
while (lo < hi) {
// INVARIANT:
//[i, lo) <= time_ns
//[lo, hi) ?
//[hi, j) > time_ns
Cluster** const mid = lo + (hi - lo) / 2;
assert(mid < hi);
pCluster = *mid;
assert(pCluster);
assert(pCluster->GetIndex() >= 0);
assert(pCluster->GetIndex() == long(mid - m_pSegment->m_clusters));
const long long t = pCluster->GetTime();
if (t <= time_ns)
lo = mid + 1;
else
hi = mid;
assert(lo <= hi);
}
assert(lo == hi);
assert(lo > i);
assert(lo <= j);
pCluster = *--lo;
assert(pCluster);
assert(pCluster->GetTime() <= time_ns);
pResult = pCluster->GetEntry(this, time_ns);
if ((pResult != 0) && !pResult->EOS()) // found a keyframe
return 0;
while (lo != i) {
pCluster = *--lo;
assert(pCluster);
assert(pCluster->GetTime() <= time_ns);
#if 0
pResult = pCluster->GetMaxKey(this);
#else
pResult = pCluster->GetEntry(this, time_ns);
#endif
if ((pResult != 0) && !pResult->EOS())
return 0;
}
// weird: we're on the first cluster, but no keyframe found
// should never happen but we must return something anyway
pResult = GetEOS();
return 0;
}
long long VideoTrack::GetWidth() const { return m_width; }
long long VideoTrack::GetHeight() const { return m_height; }
double VideoTrack::GetFrameRate() const { return m_rate; }
AudioTrack::AudioTrack(Segment* pSegment, long long element_start,
long long element_size)
: Track(pSegment, element_start, element_size) {}
long AudioTrack::Parse(Segment* pSegment, const Info& info,
long long element_start, long long element_size,
AudioTrack*& pResult) {
if (pResult)
return -1;
if (info.type != Track::kAudio)
return -1;
IMkvReader* const pReader = pSegment->m_pReader;
const Settings& s = info.settings;
assert(s.start >= 0);
assert(s.size >= 0);
long long pos = s.start;
assert(pos >= 0);
const long long stop = pos + s.size;
double rate = 8000.0; // MKV default
long long channels = 1;
long long bit_depth = 0;
while (pos < stop) {
long long id, size;
long status = ParseElementHeader(pReader, pos, stop, id, size);
if (status < 0) // error
return status;
if (id == 0x35) { // Sample Rate
status = UnserializeFloat(pReader, pos, size, rate);
if (status < 0)
return status;
if (rate <= 0)
return E_FILE_FORMAT_INVALID;
} else if (id == 0x1F) { // Channel Count
channels = UnserializeUInt(pReader, pos, size);
if (channels <= 0)
return E_FILE_FORMAT_INVALID;
} else if (id == 0x2264) { // Bit Depth
bit_depth = UnserializeUInt(pReader, pos, size);
if (bit_depth <= 0)
return E_FILE_FORMAT_INVALID;
}
pos += size; // consume payload
assert(pos <= stop);
}
assert(pos == stop);
AudioTrack* const pTrack =
new (std::nothrow) AudioTrack(pSegment, element_start, element_size);
if (pTrack == NULL)
return -1; // generic error
const int status = info.Copy(pTrack->m_info);
if (status) {
delete pTrack;
return status;
}
pTrack->m_rate = rate;
pTrack->m_channels = channels;
pTrack->m_bitDepth = bit_depth;
pResult = pTrack;
return 0; // success
}
| 0
|
355,812
|
resume_copy_required_values (gnutls_session_t session)
{
/* get the new random values */
memcpy (session->internals.resumed_security_parameters.
server_random,
session->security_parameters.server_random, TLS_RANDOM_SIZE);
memcpy (session->internals.resumed_security_parameters.
client_random,
session->security_parameters.client_random, TLS_RANDOM_SIZE);
/* keep the ciphersuite and compression
* That is because the client must see these in our
* hello message.
*/
memcpy (session->security_parameters.current_cipher_suite.
suite,
session->internals.resumed_security_parameters.
current_cipher_suite.suite, 2);
session->internals.compression_method =
session->internals.resumed_security_parameters.read_compression_algorithm;
/* or write_compression_algorithm
* they are the same
*/
session->security_parameters.entity =
session->internals.resumed_security_parameters.entity;
_gnutls_set_current_version (session,
session->internals.
resumed_security_parameters.version);
session->security_parameters.cert_type =
session->internals.resumed_security_parameters.cert_type;
memcpy (session->security_parameters.session_id,
session->internals.resumed_security_parameters.
session_id, sizeof (session->security_parameters.session_id));
session->security_parameters.session_id_size =
session->internals.resumed_security_parameters.session_id_size;
}
| 0
|
61,873
|
static int maybe_start_packet(vorb *f)
{
if (f->next_seg == -1) {
int x = get8(f);
if (f->eof) return FALSE; // EOF at page boundary is not an error!
if (0x4f != x ) return error(f, VORBIS_missing_capture_pattern);
if (0x67 != get8(f)) return error(f, VORBIS_missing_capture_pattern);
if (0x67 != get8(f)) return error(f, VORBIS_missing_capture_pattern);
if (0x53 != get8(f)) return error(f, VORBIS_missing_capture_pattern);
if (!start_page_no_capturepattern(f)) return FALSE;
if (f->page_flag & PAGEFLAG_continued_packet) {
// set up enough state that we can read this packet if we want,
// e.g. during recovery
f->last_seg = FALSE;
f->bytes_in_seg = 0;
return error(f, VORBIS_continued_packet_flag_invalid);
}
}
return start_packet(f);
}
| 0
|
277,731
|
NPError WebPluginDelegatePepper::Device3DDestroyBuffer(
NPDeviceContext3D* context,
int32 id) {
#if defined(ENABLE_GPU)
command_buffer_->DestroyTransferBuffer(id);
#endif // ENABLE_GPU
return NPERR_NO_ERROR;
}
| 0
|
447,929
|
_XimSetInnerICAttributes(
Xic ic,
XPointer top,
XIMArg *arg,
unsigned long mode)
{
XIMResourceList res;
int check;
if (!(res = _XimGetResourceListRec(ic->private.proto.ic_inner_resources,
ic->private.proto.ic_num_inner_resources, arg->name)))
return False;
check = _XimCheckICMode(res, mode);
if(check == XIM_CHECK_INVALID)
return True;
else if(check == XIM_CHECK_ERROR)
return False;
return _XimEncodeLocalICAttr(ic, res, top, arg, mode);
}
| 0
|
285,568
|
void TestTransactionConsumer::DidFinish(int result) {
state_ = DONE;
error_ = result;
if (--quit_counter_ == 0)
base::MessageLoop::current()->QuitWhenIdle();
}
| 0
|
133,513
|
DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
{
/* This should never happen and there is no way to handle it. */
instrumentation_begin();
pr_err("Unknown trap in Xen PV mode.");
BUG();
instrumentation_end();
}
| 0
|
438,084
|
long long VideoTrack::GetHeight() const { return m_height; }
| 0
|
115,684
|
gdImageBrushApply (gdImagePtr im, int x, int y)
{
int lx, ly;
int hy;
int hx;
int x1, y1, x2, y2;
int srcx, srcy;
if (!im->brush) {
return;
}
hy = gdImageSY (im->brush) / 2;
y1 = y - hy;
y2 = y1 + gdImageSY (im->brush);
hx = gdImageSX (im->brush) / 2;
x1 = x - hx;
x2 = x1 + gdImageSX (im->brush);
srcy = 0;
if (im->trueColor) {
if (im->brush->trueColor) {
for (ly = y1; (ly < y2); ly++) {
srcx = 0;
for (lx = x1; (lx < x2); lx++) {
int p;
p = gdImageGetTrueColorPixel (im->brush, srcx, srcy);
/* 2.0.9, Thomas Winzig: apply simple full transparency */
if (p != gdImageGetTransparent (im->brush)) {
gdImageSetPixel (im, lx, ly, p);
}
srcx++;
}
srcy++;
}
} else {
/* 2.0.12: Brush palette, image truecolor (thanks to Thorben Kundinger
for pointing out the issue) */
for (ly = y1; (ly < y2); ly++) {
srcx = 0;
for (lx = x1; (lx < x2); lx++) {
int p, tc;
p = gdImageGetPixel (im->brush, srcx, srcy);
tc = gdImageGetTrueColorPixel (im->brush, srcx, srcy);
/* 2.0.9, Thomas Winzig: apply simple full transparency */
if (p != gdImageGetTransparent (im->brush)) {
gdImageSetPixel (im, lx, ly, tc);
}
srcx++;
}
srcy++;
}
}
} else {
for (ly = y1; (ly < y2); ly++) {
srcx = 0;
for (lx = x1; (lx < x2); lx++) {
int p;
p = gdImageGetPixel (im->brush, srcx, srcy);
/* Allow for non-square brushes! */
if (p != gdImageGetTransparent (im->brush)) {
/* Truecolor brush. Very slow
on a palette destination. */
if (im->brush->trueColor) {
gdImageSetPixel (im, lx, ly,
gdImageColorResolveAlpha (im,
gdTrueColorGetRed
(p),
gdTrueColorGetGreen
(p),
gdTrueColorGetBlue
(p),
gdTrueColorGetAlpha
(p)));
} else {
gdImageSetPixel (im, lx, ly, im->brushColorMap[p]);
}
}
srcx++;
}
srcy++;
}
}
}
| 0
|
71,809
|
static void zynq_init(MachineState *machine)
{
ram_addr_t ram_size = machine->ram_size;
const char *cpu_model = machine->cpu_model;
const char *kernel_filename = machine->kernel_filename;
const char *kernel_cmdline = machine->kernel_cmdline;
const char *initrd_filename = machine->initrd_filename;
ObjectClass *cpu_oc;
ARMCPU *cpu;
MemoryRegion *address_space_mem = get_system_memory();
MemoryRegion *ext_ram = g_new(MemoryRegion, 1);
MemoryRegion *ocm_ram = g_new(MemoryRegion, 1);
DeviceState *dev;
SysBusDevice *busdev;
qemu_irq pic[64];
Error *err = NULL;
int n;
if (!cpu_model) {
cpu_model = "cortex-a9";
}
cpu_oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model);
cpu = ARM_CPU(object_new(object_class_get_name(cpu_oc)));
/* By default A9 CPUs have EL3 enabled. This board does not
* currently support EL3 so the CPU EL3 property is disabled before
* realization.
*/
if (object_property_find(OBJECT(cpu), "has_el3", NULL)) {
object_property_set_bool(OBJECT(cpu), false, "has_el3", &err);
if (err) {
error_report_err(err);
exit(1);
}
}
object_property_set_int(OBJECT(cpu), ZYNQ_BOARD_MIDR, "midr", &err);
if (err) {
error_report_err(err);
exit(1);
}
object_property_set_int(OBJECT(cpu), MPCORE_PERIPHBASE, "reset-cbar", &err);
if (err) {
error_report_err(err);
exit(1);
}
object_property_set_bool(OBJECT(cpu), true, "realized", &err);
if (err) {
error_report_err(err);
exit(1);
}
/* max 2GB ram */
if (ram_size > 0x80000000) {
ram_size = 0x80000000;
}
/* DDR remapped to address zero. */
memory_region_allocate_system_memory(ext_ram, NULL, "zynq.ext_ram",
ram_size);
memory_region_add_subregion(address_space_mem, 0, ext_ram);
/* 256K of on-chip memory */
memory_region_init_ram(ocm_ram, NULL, "zynq.ocm_ram", 256 << 10,
&error_abort);
vmstate_register_ram_global(ocm_ram);
memory_region_add_subregion(address_space_mem, 0xFFFC0000, ocm_ram);
DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0);
/* AMD */
pflash_cfi02_register(0xe2000000, NULL, "zynq.pflash", FLASH_SIZE,
dinfo ? blk_by_legacy_dinfo(dinfo) : NULL,
FLASH_SECTOR_SIZE,
FLASH_SIZE/FLASH_SECTOR_SIZE, 1,
1, 0x0066, 0x0022, 0x0000, 0x0000, 0x0555, 0x2aa,
0);
dev = qdev_create(NULL, "xilinx,zynq_slcr");
qdev_init_nofail(dev);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xF8000000);
dev = qdev_create(NULL, "a9mpcore_priv");
qdev_prop_set_uint32(dev, "num-cpu", 1);
qdev_init_nofail(dev);
busdev = SYS_BUS_DEVICE(dev);
sysbus_mmio_map(busdev, 0, MPCORE_PERIPHBASE);
sysbus_connect_irq(busdev, 0,
qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ));
for (n = 0; n < 64; n++) {
pic[n] = qdev_get_gpio_in(dev, n);
}
zynq_init_spi_flashes(0xE0006000, pic[58-IRQ_OFFSET], false);
zynq_init_spi_flashes(0xE0007000, pic[81-IRQ_OFFSET], false);
zynq_init_spi_flashes(0xE000D000, pic[51-IRQ_OFFSET], true);
sysbus_create_simple("xlnx,ps7-usb", 0xE0002000, pic[53-IRQ_OFFSET]);
sysbus_create_simple("xlnx,ps7-usb", 0xE0003000, pic[76-IRQ_OFFSET]);
sysbus_create_simple("cadence_uart", 0xE0000000, pic[59-IRQ_OFFSET]);
sysbus_create_simple("cadence_uart", 0xE0001000, pic[82-IRQ_OFFSET]);
sysbus_create_varargs("cadence_ttc", 0xF8001000,
pic[42-IRQ_OFFSET], pic[43-IRQ_OFFSET], pic[44-IRQ_OFFSET], NULL);
sysbus_create_varargs("cadence_ttc", 0xF8002000,
pic[69-IRQ_OFFSET], pic[70-IRQ_OFFSET], pic[71-IRQ_OFFSET], NULL);
gem_init(&nd_table[0], 0xE000B000, pic[54-IRQ_OFFSET]);
gem_init(&nd_table[1], 0xE000C000, pic[77-IRQ_OFFSET]);
dev = qdev_create(NULL, "generic-sdhci");
qdev_init_nofail(dev);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xE0100000);
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[56-IRQ_OFFSET]);
dev = qdev_create(NULL, "generic-sdhci");
qdev_init_nofail(dev);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xE0101000);
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[79-IRQ_OFFSET]);
dev = qdev_create(NULL, "pl330");
qdev_prop_set_uint8(dev, "num_chnls", 8);
qdev_prop_set_uint8(dev, "num_periph_req", 4);
qdev_prop_set_uint8(dev, "num_events", 16);
qdev_prop_set_uint8(dev, "data_width", 64);
qdev_prop_set_uint8(dev, "wr_cap", 8);
qdev_prop_set_uint8(dev, "wr_q_dep", 16);
qdev_prop_set_uint8(dev, "rd_cap", 8);
qdev_prop_set_uint8(dev, "rd_q_dep", 16);
qdev_prop_set_uint16(dev, "data_buffer_dep", 256);
qdev_init_nofail(dev);
busdev = SYS_BUS_DEVICE(dev);
sysbus_mmio_map(busdev, 0, 0xF8003000);
sysbus_connect_irq(busdev, 0, pic[45-IRQ_OFFSET]); /* abort irq line */
for (n = 0; n < 8; ++n) { /* event irqs */
sysbus_connect_irq(busdev, n + 1, pic[dma_irqs[n] - IRQ_OFFSET]);
}
zynq_binfo.ram_size = ram_size;
zynq_binfo.kernel_filename = kernel_filename;
zynq_binfo.kernel_cmdline = kernel_cmdline;
zynq_binfo.initrd_filename = initrd_filename;
zynq_binfo.nb_cpus = 1;
zynq_binfo.board_id = 0xd32;
zynq_binfo.loader_start = 0;
arm_load_kernel(ARM_CPU(first_cpu), &zynq_binfo);
}
| 1
|
326,401
|
static int replace_int_data_in_filename(char *buf, int buf_size, const char *filename, char placeholder, int64_t number)
{
const char *p;
char *q, buf1[20], c;
int nd, len, addchar_count;
int found_count = 0;
q = buf;
p = filename;
for (;;) {
c = *p;
if (c == '\0')
break;
if (c == '%' && *(p+1) == '%') // %%
addchar_count = 2;
else if (c == '%' && (av_isdigit(*(p+1)) || *(p+1) == placeholder)) {
nd = 0;
addchar_count = 1;
while (av_isdigit(*(p + addchar_count))) {
nd = nd * 10 + *(p + addchar_count) - '0';
addchar_count++;
}
if (*(p + addchar_count) == placeholder) {
len = snprintf(buf1, sizeof(buf1), "%0*"PRId64, (number < 0) ? nd : nd++, number);
if (len < 1) // returned error or empty buf1
goto fail;
if ((q - buf + len) > buf_size - 1)
goto fail;
memcpy(q, buf1, len);
q += len;
p += (addchar_count + 1);
addchar_count = 0;
found_count++;
}
} else
addchar_count = 1;
while (addchar_count--)
if ((q - buf) < buf_size - 1)
*q++ = *p++;
else
goto fail;
}
*q = '\0';
return found_count;
fail:
*q = '\0';
return -1;
}
| 0
|
248,078
|
xsltGetXIncludeDefault(void) {
return(xsltDoXIncludeDefault);
}
| 0
|
20,436
|
static void qio_channel_websock_unset_watch ( QIOChannelWebsock * ioc ) {
if ( ioc -> io_tag ) {
g_source_remove ( ioc -> io_tag ) ;
ioc -> io_tag = 0 ;
}
}
| 0
|
329,936
|
static int uhci_broadcast_packet(UHCIState *s, USBPacket *p)
{
UHCIPort *port;
USBDevice *dev;
int i, ret;
#ifdef DEBUG_PACKET
{
const char *pidstr;
switch(p->pid) {
case USB_TOKEN_SETUP: pidstr = "SETUP"; break;
case USB_TOKEN_IN: pidstr = "IN"; break;
case USB_TOKEN_OUT: pidstr = "OUT"; break;
default: pidstr = "?"; break;
}
printf("frame %d: pid=%s addr=0x%02x ep=%d len=%d\n",
s->frnum, pidstr, p->devaddr, p->devep, p->len);
if (p->pid != USB_TOKEN_IN) {
printf(" data_out=");
for(i = 0; i < p->len; i++) {
printf(" %02x", p->data[i]);
}
printf("\n");
}
}
#endif
for(i = 0; i < NB_PORTS; i++) {
port = &s->ports[i];
dev = port->port.dev;
if (dev && (port->ctrl & UHCI_PORT_EN)) {
ret = dev->handle_packet(dev, p);
if (ret != USB_RET_NODEV) {
#ifdef DEBUG_PACKET
if (ret == USB_RET_ASYNC) {
printf("usb-uhci: Async packet\n");
} else {
printf(" ret=%d ", ret);
if (p->pid == USB_TOKEN_IN && ret > 0) {
printf("data_in=");
for(i = 0; i < ret; i++) {
printf(" %02x", p->data[i]);
}
}
printf("\n");
}
#endif
return ret;
}
}
}
return USB_RET_NODEV;
}
| 0
|
2,636
|
SPL_METHOD(SplFileInfo, getLinkTarget)
{
spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC);
int ret;
char buff[MAXPATHLEN];
zend_error_handling error_handling;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC);
#if defined(PHP_WIN32) || HAVE_SYMLINK
if (intern->file_name == NULL) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Empty filename");
RETURN_FALSE;
} else if (!IS_ABSOLUTE_PATH(intern->file_name, intern->file_name_len)) {
char expanded_path[MAXPATHLEN];
if (!expand_filepath_with_mode(intern->file_name, expanded_path, NULL, 0, CWD_EXPAND TSRMLS_CC)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "No such file or directory");
RETURN_FALSE;
}
ret = php_sys_readlink(expanded_path, buff, MAXPATHLEN - 1);
} else {
ret = php_sys_readlink(intern->file_name, buff, MAXPATHLEN-1);
}
#else
ret = -1; /* always fail if not implemented */
#endif
if (ret == -1) {
zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Unable to read link %s, error: %s", intern->file_name, strerror(errno));
RETVAL_FALSE;
} else {
/* Append NULL to the end of the string */
buff[ret] = '\0';
RETVAL_STRINGL(buff, ret, 1);
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
}
| 1
|
474,206
|
static int reencrypt_recovery_by_passphrase(struct crypt_device *cd,
struct luks2_hdr *hdr,
int keyslot_old,
int keyslot_new,
const char *passphrase,
size_t passphrase_size)
{
int r;
crypt_reencrypt_info ri;
struct crypt_lock_handle *reencrypt_lock;
r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
if (r) {
if (r == -EBUSY)
log_err(cd, _("Reencryption in-progress. Cannot perform recovery."));
else
log_err(cd, _("Failed to get reencryption lock."));
return r;
}
if ((r = crypt_load(cd, CRYPT_LUKS2, NULL))) {
LUKS2_reencrypt_unlock(cd, reencrypt_lock);
return r;
}
ri = LUKS2_reencrypt_status(hdr);
if (ri == CRYPT_REENCRYPT_INVALID) {
LUKS2_reencrypt_unlock(cd, reencrypt_lock);
return -EINVAL;
}
if (ri == CRYPT_REENCRYPT_CRASH) {
r = LUKS2_reencrypt_locked_recovery_by_passphrase(cd, keyslot_old, keyslot_new,
passphrase, passphrase_size, 0, NULL);
if (r < 0)
log_err(cd, _("LUKS2 reencryption recovery failed."));
} else {
log_dbg(cd, "No LUKS2 reencryption recovery needed.");
r = 0;
}
LUKS2_reencrypt_unlock(cd, reencrypt_lock);
return r;
}
| 0
|
413,889
|
void Inspect::operator()(Error_Ptr error)
{
append_indentation();
append_token("@error", error);
append_mandatory_space();
error->message()->perform(this);
append_delimiter();
}
| 0
|
58,268
|
static void *yam_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (*pos < NR_PORTS) ? yam_devs[*pos] : NULL;
}
| 0
|
34,442
|
static inline void user_fpu_begin(void)
{
preempt_disable();
if (!user_has_fpu())
__thread_fpu_begin(current);
preempt_enable();
}
| 0
|
105,843
|
wiki_show_edit_page(HttpResponse *res, char *wikitext, char *page)
{
wiki_show_header(res, page, FALSE);
if (wikitext == NULL) wikitext = "";
http_response_printf(res, EDITFORM, page, wikitext);
wiki_show_footer(res);
http_response_send(res);
exit(0);
}
| 0
|
483,586
|
int dev_vprintk_emit(int level, const struct device *dev,
const char *fmt, va_list args)
{
char hdr[128];
size_t hdrlen;
hdrlen = create_syslog_header(dev, hdr, sizeof(hdr));
return vprintk_emit(0, level, hdrlen ? hdr : NULL, hdrlen, fmt, args);
}
| 0
|
244,823
|
void HTMLTextAreaElement::updateFocusAppearance(bool restorePreviousSelection)
{
if (!restorePreviousSelection || !hasCachedSelection()) {
setSelectionRange(0, 0);
} else
restoreCachedSelection();
if (document().frame())
document().frame()->selection().revealSelection();
}
| 0
|
405,839
|
mf_bitmap_to_of11(const struct mf_bitmap *fields)
{
const struct ofp11_wc_map *p;
uint32_t wc11 = 0;
for (p = ofp11_wc_map; p < &ofp11_wc_map[ARRAY_SIZE(ofp11_wc_map)]; p++) {
if (bitmap_is_set(fields->bm, p->mf)) {
wc11 |= p->wc11;
}
}
return htonl(wc11);
}
| 0
|
490,267
|
destroyODBCStmt(ODBCStmt *stmt)
{
ODBCStmt **stmtp;
assert(isValidStmt(stmt));
/* first set this object to invalid */
stmt->Type = 0;
/* remove this stmt from the dbc */
assert(stmt->Dbc);
/* search for stmt in linked list */
stmtp = &stmt->Dbc->FirstStmt;
while (*stmtp && *stmtp != stmt)
stmtp = &(*stmtp)->next;
/* stmtp points to location in list where stmt is found, or
* *stmtp is NULL in case it wasn't there (presumably not added
* yet) */
if (*stmtp) {
/* now remove it from the linked list */
*stmtp = stmt->next;
}
/* cleanup own managed data */
deleteODBCErrorList(&stmt->Error);
destroyODBCDesc(stmt->ImplParamDescr);
destroyODBCDesc(stmt->ImplRowDescr);
destroyODBCDesc(stmt->AutoApplParamDescr);
destroyODBCDesc(stmt->AutoApplRowDescr);
if (stmt->hdl)
mapi_close_handle(stmt->hdl);
free(stmt);
}
| 0
|
183,293
|
ofproto_port_set_queues(struct ofproto *ofproto, ofp_port_t ofp_port,
const struct ofproto_port_queue *queues,
size_t n_queues)
{
struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
if (!ofport) {
VLOG_WARN("%s: cannot set queues on nonexistent port %"PRIu32,
ofproto->name, ofp_port);
return ENODEV;
}
return (ofproto->ofproto_class->set_queues
? ofproto->ofproto_class->set_queues(ofport, queues, n_queues)
: EOPNOTSUPP);
}
| 0
|
439,396
|
static int cpia2_s_fmt_vid_cap(struct file *file, void *_fh,
struct v4l2_format *f)
{
struct camera_data *cam = video_drvdata(file);
int err, frame;
err = cpia2_try_fmt_vid_cap(file, _fh, f);
if(err != 0)
return err;
cam->pixelformat = f->fmt.pix.pixelformat;
/* NOTE: This should be set to 1 for MJPEG, but some apps don't handle
* the missing Huffman table properly. */
cam->params.compression.inhibit_htables = 0;
/*f->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG;*/
/* we set the video window to something smaller or equal to what
* is requested by the user???
*/
DBG("Requested width = %d, height = %d\n",
f->fmt.pix.width, f->fmt.pix.height);
if (f->fmt.pix.width != cam->width ||
f->fmt.pix.height != cam->height) {
cam->width = f->fmt.pix.width;
cam->height = f->fmt.pix.height;
cam->params.roi.width = f->fmt.pix.width;
cam->params.roi.height = f->fmt.pix.height;
cpia2_set_format(cam);
}
for (frame = 0; frame < cam->num_frames; ++frame) {
if (cam->buffers[frame].status == FRAME_READING)
if ((err = sync(cam, frame)) < 0)
return err;
cam->buffers[frame].status = FRAME_EMPTY;
}
return 0;
}
| 0
|
360,274
|
nautilus_file_invalidate_attributes (NautilusFile *file,
NautilusFileAttributes file_attributes)
{
/* Cancel possible in-progress loads of any of these attributes */
nautilus_directory_cancel_loading_file_attributes (file->details->directory,
file,
file_attributes);
/* Actually invalidate the values */
nautilus_file_invalidate_attributes_internal (file, file_attributes);
nautilus_directory_add_file_to_work_queue (file->details->directory, file);
/* Kick off I/O if necessary */
nautilus_directory_async_state_changed (file->details->directory);
}
| 0
|
464,239
|
SProcXChangeFeedbackControl(ClientPtr client)
{
REQUEST(xChangeFeedbackControlReq);
swaps(&stuff->length);
REQUEST_AT_LEAST_SIZE(xChangeFeedbackControlReq);
swapl(&stuff->mask);
return (ProcXChangeFeedbackControl(client));
}
| 0
|
452,005
|
static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
{
int ret;
/* Record our major and minor device numbers. */
if (!single_major) {
ret = register_blkdev(0, rbd_dev->name);
if (ret < 0)
goto err_out_unlock;
rbd_dev->major = ret;
rbd_dev->minor = 0;
} else {
rbd_dev->major = rbd_major;
rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
}
/* Set up the blkdev mapping. */
ret = rbd_init_disk(rbd_dev);
if (ret)
goto err_out_blkdev;
set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev));
ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
if (ret)
goto err_out_disk;
set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
up_write(&rbd_dev->header_rwsem);
return 0;
err_out_disk:
rbd_free_disk(rbd_dev);
err_out_blkdev:
if (!single_major)
unregister_blkdev(rbd_dev->major, rbd_dev->name);
err_out_unlock:
up_write(&rbd_dev->header_rwsem);
return ret;
}
| 0
|
88,226
|
e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
{
E1000State *s = qemu_get_nic_opaque(nc);
PCIDevice *d = PCI_DEVICE(s);
struct e1000_rx_desc desc;
dma_addr_t base;
unsigned int n, rdt;
uint32_t rdh_start;
uint16_t vlan_special = 0;
uint8_t vlan_status = 0;
uint8_t min_buf[MIN_BUF_SIZE];
struct iovec min_iov;
uint8_t *filter_buf = iov->iov_base;
size_t size = iov_size(iov, iovcnt);
size_t iov_ofs = 0;
size_t desc_offset;
size_t desc_size;
size_t total_size;
if (!e1000x_hw_rx_enabled(s->mac_reg)) {
return -1;
}
if (timer_pending(s->flush_queue_timer)) {
return 0;
}
/* Pad to minimum Ethernet frame length */
if (size < sizeof(min_buf)) {
iov_to_buf(iov, iovcnt, 0, min_buf, size);
memset(&min_buf[size], 0, sizeof(min_buf) - size);
min_iov.iov_base = filter_buf = min_buf;
min_iov.iov_len = size = sizeof(min_buf);
iovcnt = 1;
iov = &min_iov;
} else if (iov->iov_len < MAXIMUM_ETHERNET_HDR_LEN) {
/* This is very unlikely, but may happen. */
iov_to_buf(iov, iovcnt, 0, min_buf, MAXIMUM_ETHERNET_HDR_LEN);
filter_buf = min_buf;
}
/* Discard oversized packets if !LPE and !SBP. */
if (e1000x_is_oversized(s->mac_reg, size)) {
return size;
}
if (!receive_filter(s, filter_buf, size)) {
return size;
}
if (e1000x_vlan_enabled(s->mac_reg) &&
e1000x_is_vlan_packet(filter_buf, le16_to_cpu(s->mac_reg[VET]))) {
vlan_special = cpu_to_le16(lduw_be_p(filter_buf + 14));
iov_ofs = 4;
if (filter_buf == iov->iov_base) {
memmove(filter_buf + 4, filter_buf, 12);
} else {
iov_from_buf(iov, iovcnt, 4, filter_buf, 12);
while (iov->iov_len <= iov_ofs) {
iov_ofs -= iov->iov_len;
iov++;
}
}
vlan_status = E1000_RXD_STAT_VP;
size -= 4;
}
rdh_start = s->mac_reg[RDH];
desc_offset = 0;
total_size = size + e1000x_fcs_len(s->mac_reg);
if (!e1000_has_rxbufs(s, total_size)) {
e1000_receiver_overrun(s, total_size);
return -1;
}
do {
desc_size = total_size - desc_offset;
if (desc_size > s->rxbuf_size) {
desc_size = s->rxbuf_size;
}
base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
pci_dma_read(d, base, &desc, sizeof(desc));
desc.special = vlan_special;
desc.status |= (vlan_status | E1000_RXD_STAT_DD);
if (desc.buffer_addr) {
if (desc_offset < size) {
size_t iov_copy;
hwaddr ba = le64_to_cpu(desc.buffer_addr);
size_t copy_size = size - desc_offset;
if (copy_size > s->rxbuf_size) {
copy_size = s->rxbuf_size;
}
do {
iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
pci_dma_write(d, ba, iov->iov_base + iov_ofs, iov_copy);
copy_size -= iov_copy;
ba += iov_copy;
iov_ofs += iov_copy;
if (iov_ofs == iov->iov_len) {
iov++;
iov_ofs = 0;
}
} while (copy_size);
}
desc_offset += desc_size;
desc.length = cpu_to_le16(desc_size);
if (desc_offset >= total_size) {
desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
} else {
/* Guest zeroing out status is not a hardware requirement.
Clear EOP in case guest didn't do it. */
desc.status &= ~E1000_RXD_STAT_EOP;
}
} else { // as per intel docs; skip descriptors with null buf addr
DBGOUT(RX, "Null RX descriptor!!\n");
}
pci_dma_write(d, base, &desc, sizeof(desc));
if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
s->mac_reg[RDH] = 0;
/* see comment in start_xmit; same here */
if (s->mac_reg[RDH] == rdh_start ||
rdh_start >= s->mac_reg[RDLEN] / sizeof(desc)) {
DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
e1000_receiver_overrun(s, total_size);
return -1;
}
} while (desc_offset < total_size);
e1000x_update_rx_total_stats(s->mac_reg, size, total_size);
n = E1000_ICS_RXT0;
if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
rdt += s->mac_reg[RDLEN] / sizeof(desc);
if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
s->rxbuf_min_shift)
n |= E1000_ICS_RXDMT0;
set_ics(s, 0, n);
return size;
}
| 0
|
129,014
|
static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
{
unsigned hw_argc;
int ret;
struct dm_target *ti = m->ti;
static struct dm_arg _args[] = {
{0, 1024, "invalid number of hardware handler args"},
};
if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
return -EINVAL;
if (!hw_argc)
return 0;
m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
request_module("scsi_dh_%s", m->hw_handler_name);
if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
ti->error = "unknown hardware handler type";
ret = -EINVAL;
goto fail;
}
if (hw_argc > 1) {
char *p;
int i, j, len = 4;
for (i = 0; i <= hw_argc - 2; i++)
len += strlen(as->argv[i]) + 1;
p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
if (!p) {
ti->error = "memory allocation failed";
ret = -ENOMEM;
goto fail;
}
j = sprintf(p, "%d", hw_argc - 1);
for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
j = sprintf(p, "%s", as->argv[i]);
}
dm_consume_args(as, hw_argc - 1);
return 0;
fail:
kfree(m->hw_handler_name);
m->hw_handler_name = NULL;
return ret;
}
| 0
|
340,119
|
static void discard_vq_data(VirtQueue *vq, VirtIODevice *vdev)
{
VirtQueueElement elem;
if (!virtio_queue_ready(vq)) {
return;
}
while (virtqueue_pop(vq, &elem)) {
virtqueue_push(vq, &elem, 0);
}
virtio_notify(vdev, vq);
}
| 0
|
414,105
|
work_city_populate (EContact *card,
gchar **values)
{
EContactAddress *contact_addr = getormakeEContactAddress (card, E_CONTACT_ADDRESS_WORK);
contact_addr->locality = g_strdup (values[0]);
e_contact_set (card, E_CONTACT_ADDRESS_WORK, contact_addr);
e_contact_address_free (contact_addr);
}
| 0
|
405,016
|
static void l2cap_send_srej_tail(struct l2cap_chan *chan)
{
struct l2cap_ctrl control;
BT_DBG("chan %p", chan);
if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
return;
memset(&control, 0, sizeof(control));
control.sframe = 1;
control.super = L2CAP_SUPER_SREJ;
control.reqseq = chan->srej_list.tail;
l2cap_send_sframe(chan, &control);
}
| 0
|
417,338
|
void CiffComponent::add(AutoPtr component)
{
doAdd(component);
}
| 0
|
122,152
|
int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
loff_t offset, ssize_t len)
{
unsigned int max_blocks;
int ret = 0;
int ret2 = 0;
struct ext4_map_blocks map;
unsigned int credits, blkbits = inode->i_blkbits;
map.m_lblk = offset >> blkbits;
max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
/*
* This is somewhat ugly but the idea is clear: When transaction is
* reserved, everything goes into it. Otherwise we rather start several
* smaller transactions for conversion of each extent separately.
*/
if (handle) {
handle = ext4_journal_start_reserved(handle,
EXT4_HT_EXT_CONVERT);
if (IS_ERR(handle))
return PTR_ERR(handle);
credits = 0;
} else {
/*
* credits to insert 1 extent into extent tree
*/
credits = ext4_chunk_trans_blocks(inode, max_blocks);
}
while (ret >= 0 && ret < max_blocks) {
map.m_lblk += ret;
map.m_len = (max_blocks -= ret);
if (credits) {
handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
break;
}
}
ret = ext4_map_blocks(handle, inode, &map,
EXT4_GET_BLOCKS_IO_CONVERT_EXT);
if (ret <= 0)
ext4_warning(inode->i_sb,
"inode #%lu: block %u: len %u: "
"ext4_ext_map_blocks returned %d",
inode->i_ino, map.m_lblk,
map.m_len, ret);
ext4_mark_inode_dirty(handle, inode);
if (credits)
ret2 = ext4_journal_stop(handle);
if (ret <= 0 || ret2)
break;
}
if (!credits)
ret2 = ext4_journal_stop(handle);
return ret > 0 ? ret2 : ret;
}
| 0
|
331,340
|
void exec_start_outgoing_migration(MigrationState *s, const char *command, Error **errp)
{
QIOChannel *ioc;
const char *argv[] = { "/bin/sh", "-c", command, NULL };
trace_migration_exec_outgoing(command);
ioc = QIO_CHANNEL(qio_channel_command_new_spawn(argv,
O_WRONLY,
errp));
if (!ioc) {
return;
}
migration_set_outgoing_channel(s, ioc);
object_unref(OBJECT(ioc));
}
| 1
|
6,365
|
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* fft_length = GetInput(context, node, kFftLengthTensor);
const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
if (output->type != kTfLiteComplex64) {
context->ReportError(context,
"Type '%s' for output is not supported by rfft2d.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
// Resize the output tensor if the fft_length tensor is not constant.
// Otherwise, check if the output shape is correct.
if (!IsConstantTensor(fft_length)) {
TF_LITE_ENSURE_STATUS(ResizeOutputandTemporaryTensors(context, node));
} else {
int num_dims_output = NumDimensions(output);
const RuntimeShape output_shape = GetTensorShape(output);
TF_LITE_ENSURE_EQ(context, num_dims_output, NumDimensions(input));
TF_LITE_ENSURE(context, num_dims_output >= 2);
TF_LITE_ENSURE_EQ(context, output_shape.Dims(num_dims_output - 2),
fft_length_data[0]);
TF_LITE_ENSURE_EQ(context, output_shape.Dims(num_dims_output - 1),
fft_length_data[1] / 2 + 1);
}
return Rfft2dHelper(context, node);
}
| 1
|
176,610
|
static void classInitNative(JNIEnv* env, jclass clazz) {
int err;
hw_module_t* module;
jclass jniCallbackClass =
env->FindClass("com/android/bluetooth/btservice/JniCallbacks");
sJniCallbacksField = env->GetFieldID(clazz, "mJniCallbacks",
"Lcom/android/bluetooth/btservice/JniCallbacks;");
method_stateChangeCallback = env->GetMethodID(jniCallbackClass, "stateChangeCallback", "(I)V");
method_adapterPropertyChangedCallback = env->GetMethodID(jniCallbackClass,
"adapterPropertyChangedCallback",
"([I[[B)V");
method_discoveryStateChangeCallback = env->GetMethodID(jniCallbackClass,
"discoveryStateChangeCallback", "(I)V");
method_devicePropertyChangedCallback = env->GetMethodID(jniCallbackClass,
"devicePropertyChangedCallback",
"([B[I[[B)V");
method_deviceFoundCallback = env->GetMethodID(jniCallbackClass, "deviceFoundCallback", "([B)V");
method_pinRequestCallback = env->GetMethodID(jniCallbackClass, "pinRequestCallback",
"([B[BIZ)V");
method_sspRequestCallback = env->GetMethodID(jniCallbackClass, "sspRequestCallback",
"([B[BIII)V");
method_bondStateChangeCallback = env->GetMethodID(jniCallbackClass,
"bondStateChangeCallback", "(I[BI)V");
method_aclStateChangeCallback = env->GetMethodID(jniCallbackClass,
"aclStateChangeCallback", "(I[BI)V");
method_setWakeAlarm = env->GetMethodID(clazz, "setWakeAlarm", "(JZ)Z");
method_acquireWakeLock = env->GetMethodID(clazz, "acquireWakeLock", "(Ljava/lang/String;)Z");
method_releaseWakeLock = env->GetMethodID(clazz, "releaseWakeLock", "(Ljava/lang/String;)Z");
method_energyInfo = env->GetMethodID(clazz, "energyInfoCallback", "(IIJJJJ)V");
char value[PROPERTY_VALUE_MAX];
property_get("bluetooth.mock_stack", value, "");
const char *id = (strcmp(value, "1")? BT_STACK_MODULE_ID : BT_STACK_TEST_MODULE_ID);
err = hw_get_module(id, (hw_module_t const**)&module);
if (err == 0) {
hw_device_t* abstraction;
err = module->methods->open(module, id, &abstraction);
if (err == 0) {
bluetooth_module_t* btStack = (bluetooth_module_t *)abstraction;
sBluetoothInterface = btStack->get_bluetooth_interface();
} else {
ALOGE("Error while opening Bluetooth library");
}
} else {
ALOGE("No Bluetooth Library found");
}
}
| 0
|
217,666
|
void PluginServiceImpl::AddExtraPluginDir(const FilePath& path) {
plugin_list_->AddExtraPluginDir(path);
}
| 0
|
406,028
|
ofputil_put_ofp14_table_desc(const struct ofputil_table_desc *td,
struct ofpbuf *b, enum ofp_version version)
{
struct ofp14_table_desc *otd;
struct ofp14_table_mod_prop_vacancy *otv;
size_t start_otd;
start_otd = b->size;
ofpbuf_put_zeros(b, sizeof *otd);
ofpprop_put_u32(b, OFPTMPT14_EVICTION, td->eviction_flags);
otv = ofpbuf_put_zeros(b, sizeof *otv);
otv->type = htons(OFPTMPT14_VACANCY);
otv->length = htons(sizeof *otv);
otv->vacancy_down = td->table_vacancy.vacancy_down;
otv->vacancy_up = td->table_vacancy.vacancy_up;
otv->vacancy = td->table_vacancy.vacancy;
otd = ofpbuf_at_assert(b, start_otd, sizeof *otd);
otd->length = htons(b->size - start_otd);
otd->table_id = td->table_id;
otd->config = ofputil_encode_table_config(OFPUTIL_TABLE_MISS_DEFAULT,
td->eviction, td->vacancy,
version);
}
| 0
|
428,435
|
g_file_mount_mountable (GFile *file,
GMountMountFlags flags,
GMountOperation *mount_operation,
GCancellable *cancellable,
GAsyncReadyCallback callback,
gpointer user_data)
{
GFileIface *iface;
g_return_if_fail (G_IS_FILE (file));
iface = G_FILE_GET_IFACE (file);
if (iface->mount_mountable == NULL)
{
g_task_report_new_error (file, callback, user_data,
g_file_mount_mountable,
G_IO_ERROR, G_IO_ERROR_NOT_SUPPORTED,
_("Operation not supported"));
return;
}
(* iface->mount_mountable) (file,
flags,
mount_operation,
cancellable,
callback,
user_data);
}
| 0
|
440,398
|
PHP_METHOD(Phar, unlinkArchive)
{
char *fname, *error, *zname, *arch, *entry;
size_t fname_len;
int zname_len, arch_len, entry_len;
phar_archive_data *phar;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "p", &fname, &fname_len) == FAILURE) {
RETURN_FALSE;
}
if (ZEND_SIZE_T_INT_OVFL(fname_len)) {
RETURN_FALSE;
}
if (!fname_len) {
zend_throw_exception_ex(phar_ce_PharException, 0, "Unknown phar archive \"\"");
return;
}
if (FAILURE == phar_open_from_filename(fname, (int)fname_len, NULL, 0, REPORT_ERRORS, &phar, &error)) {
if (error) {
zend_throw_exception_ex(phar_ce_PharException, 0, "Unknown phar archive \"%s\": %s", fname, error);
efree(error);
} else {
zend_throw_exception_ex(phar_ce_PharException, 0, "Unknown phar archive \"%s\"", fname);
}
return;
}
zname = (char*)zend_get_executed_filename();
zname_len = (int)strlen(zname);
if (zname_len > 7 && !memcmp(zname, "phar://", 7) && SUCCESS == phar_split_fname(zname, zname_len, &arch, &arch_len, &entry, &entry_len, 2, 0)) {
if ((size_t)arch_len == fname_len && !memcmp(arch, fname, arch_len)) {
zend_throw_exception_ex(phar_ce_PharException, 0, "phar archive \"%s\" cannot be unlinked from within itself", fname);
efree(arch);
efree(entry);
return;
}
efree(arch);
efree(entry);
}
if (phar->is_persistent) {
zend_throw_exception_ex(phar_ce_PharException, 0, "phar archive \"%s\" is in phar.cache_list, cannot unlinkArchive()", fname);
return;
}
if (phar->refcount) {
zend_throw_exception_ex(phar_ce_PharException, 0, "phar archive \"%s\" has open file handles or objects. fclose() all file handles, and unset() all objects prior to calling unlinkArchive()", fname);
return;
}
fname = estrndup(phar->fname, phar->fname_len);
/* invalidate phar cache */
PHAR_G(last_phar) = NULL;
PHAR_G(last_phar_name) = PHAR_G(last_alias) = NULL;
phar_archive_delref(phar);
unlink(fname);
efree(fname);
RETURN_TRUE;
}
| 0
|
208,886
|
void PepperPlatformAudioInput::StartCaptureOnIOThread() {
DCHECK(io_message_loop_proxy_->BelongsToCurrentThread());
if (ipc_)
ipc_->RecordStream();
}
| 0
|
136,699
|
GF_Err hinf_box_write(GF_Box *s, GF_BitStream *bs)
{
// GF_HintInfoBox *ptr = (GF_HintInfoBox *)s;
if (!s) return GF_BAD_PARAM;
return gf_isom_box_write_header(s, bs);
}
| 0
|
440,490
|
static int vxlan_nl2flag(struct vxlan_config *conf, struct nlattr *tb[],
int attrtype, unsigned long mask, bool changelink,
bool changelink_supported,
struct netlink_ext_ack *extack)
{
unsigned long flags;
if (!tb[attrtype])
return 0;
if (changelink && !changelink_supported) {
vxlan_flag_attr_error(attrtype, extack);
return -EOPNOTSUPP;
}
if (vxlan_policy[attrtype].type == NLA_FLAG)
flags = conf->flags | mask;
else if (nla_get_u8(tb[attrtype]))
flags = conf->flags | mask;
else
flags = conf->flags & ~mask;
conf->flags = flags;
return 0;
}
| 0
|
73,887
|
void __fastcall TOwnConsole::WindowStateTimer(TObject * /*Sender*/)
{
DebugAssert(FConsoleWindow != NULL);
WINDOWPLACEMENT Placement;
memset(&Placement, 0, sizeof(Placement));
Placement.length = sizeof(Placement);
if (GetWindowPlacement(FConsoleWindow, &Placement))
{
bool Minimized = (Placement.showCmd == SW_SHOWMINIMIZED);
if (FMinimized != Minimized)
{
FMinimized = Minimized;
if (FMinimized && WinConfiguration->MinimizeToTray)
{
FTrayIcon->Visible = true;
ShowWindow(FConsoleWindow, SW_HIDE);
}
else
{
FTrayIcon->Visible = false;
ShowWindow(FConsoleWindow, SW_SHOW);
}
}
}
else
{
DebugFail();
}
}
| 0
|
401,355
|
static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
size_t max_data_size,
const __be32 **_xdr,
unsigned int *_toklen)
{
const __be32 *xdr = *_xdr;
unsigned int toklen = *_toklen, len, paddedlen;
/* there must be at least one tag and one length word */
if (toklen <= 8)
return -EINVAL;
_enter(",%zu,{%x,%x},%u",
max_data_size, ntohl(xdr[0]), ntohl(xdr[1]), toklen);
td->tag = ntohl(*xdr++);
len = ntohl(*xdr++);
toklen -= 8;
if (len > max_data_size)
return -EINVAL;
paddedlen = (len + 3) & ~3;
if (paddedlen > toklen)
return -EINVAL;
td->data_len = len;
if (len > 0) {
td->data = kmemdup(xdr, len, GFP_KERNEL);
if (!td->data)
return -ENOMEM;
toklen -= paddedlen;
xdr += paddedlen >> 2;
}
_debug("tag %x len %x", td->tag, td->data_len);
*_xdr = xdr;
*_toklen = toklen;
_leave(" = 0 [toklen=%u]", toklen);
return 0;
}
| 0
|
88,947
|
static gboolean id_match_value(gpointer key, gpointer value, gpointer user_data)
{
if (value == *(gpointer *)user_data) {
*(int *)user_data = (uintptr_t)key;
return true;
}
return false;
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.