idx
int64 | func
string | target
int64 |
|---|---|---|
195,040
|
Status BuildXlaCompilationCache(DeviceBase* device, FunctionLibraryRuntime* flr,
const XlaPlatformInfo& platform_info,
XlaCompilationCache** cache) {
if (platform_info.xla_device_metadata()) {
*cache = new XlaCompilationCache(
platform_info.xla_device_metadata()->client(),
platform_info.xla_device_metadata()->jit_device_type());
return Status::OK();
}
auto platform =
se::MultiPlatformManager::PlatformWithId(platform_info.platform_id());
if (!platform.ok()) {
return platform.status();
}
StatusOr<xla::Compiler*> compiler_for_platform =
xla::Compiler::GetForPlatform(platform.ValueOrDie());
if (!compiler_for_platform.ok()) {
// In some rare cases (usually in unit tests with very small clusters) we
// may end up transforming an XLA cluster with at least one GPU operation
// (which would normally force the cluster to be compiled using XLA:GPU)
// into an XLA cluster with no GPU operations (i.e. containing only CPU
// operations). Such a cluster can fail compilation (in way that
// MarkForCompilation could not have detected) if the CPU JIT is not linked
// in.
//
// So bail out of _XlaCompile in this case, and let the executor handle the
// situation for us.
const Status& status = compiler_for_platform.status();
if (status.code() == error::NOT_FOUND) {
return errors::Unimplemented("Could not find compiler for platform ",
platform.ValueOrDie()->Name(), ": ",
status.ToString());
}
}
xla::LocalClientOptions client_options;
client_options.set_platform(platform.ValueOrDie());
client_options.set_intra_op_parallelism_threads(
device->tensorflow_cpu_worker_threads()->num_threads);
string allowed_gpus =
flr->config_proto()->gpu_options().visible_device_list();
TF_ASSIGN_OR_RETURN(absl::optional<std::set<int>> gpu_ids,
ParseVisibleDeviceList(allowed_gpus));
client_options.set_allowed_devices(gpu_ids);
auto client = xla::ClientLibrary::GetOrCreateLocalClient(client_options);
if (!client.ok()) {
return client.status();
}
const XlaOpRegistry::DeviceRegistration* registration;
if (!XlaOpRegistry::GetCompilationDevice(platform_info.device_type().type(),
®istration)) {
return errors::InvalidArgument("No JIT device registered for ",
platform_info.device_type().type());
}
*cache = new XlaCompilationCache(
client.ValueOrDie(), DeviceType(registration->compilation_device_name));
return Status::OK();
}
| 1
|
197,466
|
void RestoreTensor(OpKernelContext* context,
checkpoint::TensorSliceReader::OpenTableFunction open_func,
int preferred_shard, bool restore_slice, int restore_index) {
const Tensor& file_pattern_t = context->input(0);
{
const int64_t size = file_pattern_t.NumElements();
OP_REQUIRES(
context, size == 1,
errors::InvalidArgument(
"Input 0 (file_pattern) must be a string scalar; got a tensor of ",
size, "elements"));
}
const string& file_pattern = file_pattern_t.flat<tstring>()(0);
const Tensor& tensor_name_t = context->input(1);
const string& tensor_name = tensor_name_t.flat<tstring>()(restore_index);
// If we cannot find a cached reader we will allocate our own.
std::unique_ptr<checkpoint::TensorSliceReader> allocated_reader;
const checkpoint::TensorSliceReader* reader = nullptr;
if (context->slice_reader_cache()) {
reader = context->slice_reader_cache()->GetReader(file_pattern, open_func,
preferred_shard);
}
if (!reader) {
allocated_reader.reset(new checkpoint::TensorSliceReader(
file_pattern, open_func, preferred_shard));
reader = allocated_reader.get();
}
OP_REQUIRES_OK(context, CHECK_NOTNULL(reader)->status());
// Get the shape and type from the save file.
DataType type;
TensorShape saved_shape;
OP_REQUIRES(
context, reader->HasTensor(tensor_name, &saved_shape, &type),
errors::NotFound("Tensor name \"", tensor_name,
"\" not found in checkpoint files ", file_pattern));
OP_REQUIRES(
context, type == context->expected_output_dtype(restore_index),
errors::InvalidArgument("Expected to restore a tensor of type ",
DataTypeString(context->expected_output_dtype(0)),
", got a tensor of type ", DataTypeString(type),
" instead: tensor_name = ", tensor_name));
// Shape of the output and slice to load.
TensorShape output_shape(saved_shape);
TensorSlice slice_to_load(saved_shape.dims());
if (restore_slice) {
const tstring& shape_spec =
context->input(2).flat<tstring>()(restore_index);
if (!shape_spec.empty()) {
TensorShape parsed_shape;
OP_REQUIRES_OK(context, checkpoint::ParseShapeAndSlice(
shape_spec, &parsed_shape, &slice_to_load,
&output_shape));
OP_REQUIRES(
context, parsed_shape.IsSameSize(saved_shape),
errors::InvalidArgument(
"Shape in shape_and_slice spec does not match the shape in the "
"save file: ",
parsed_shape.DebugString(),
", save file shape: ", saved_shape.DebugString()));
}
}
Tensor* t = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(restore_index, output_shape, &t));
if (output_shape.num_elements() == 0) return;
#define READER_COPY(T) \
case DataTypeToEnum<T>::value: \
OP_REQUIRES(context, \
reader->CopySliceData(tensor_name, slice_to_load, \
t->flat<T>().data()), \
errors::InvalidArgument("Error copying slice data")); \
break;
switch (type) {
TF_CALL_SAVE_RESTORE_TYPES(READER_COPY)
default:
context->SetStatus(errors::Unimplemented(
"Restoring data type ", DataTypeString(type), " not yet supported"));
}
#undef READER_COPY
}
| 1
|
231,746
|
TEST_F(QuicUnencryptedServerTransportTest, TestEncryptedDataBeforeCFIN) {
getFakeHandshakeLayer()->allowZeroRttKeys();
// This should trigger derivation of keys.
recvClientHello();
StreamId streamId = 4;
recvEncryptedStream(streamId, *IOBuf::copyBuffer("hello"));
auto stream = server->getNonConstConn().streamManager->getStream(streamId);
ASSERT_TRUE(stream->readBuffer.empty());
}
| 0
|
197,128
|
gen_assignment(codegen_scope *s, node *tree, node *rhs, int sp, int val)
{
int idx;
int type = nint(tree->car);
switch (type) {
case NODE_GVAR:
case NODE_ARG:
case NODE_LVAR:
case NODE_IVAR:
case NODE_CVAR:
case NODE_CONST:
case NODE_NIL:
case NODE_MASGN:
if (rhs) {
codegen(s, rhs, VAL);
pop();
sp = cursp();
}
break;
case NODE_COLON2:
case NODE_CALL:
case NODE_SCALL:
/* keep evaluation order */
break;
case NODE_NVAR:
codegen_error(s, "Can't assign to numbered parameter");
break;
default:
codegen_error(s, "unknown lhs");
break;
}
tree = tree->cdr;
switch (type) {
case NODE_GVAR:
gen_setxv(s, OP_SETGV, sp, nsym(tree), val);
break;
case NODE_ARG:
case NODE_LVAR:
idx = lv_idx(s, nsym(tree));
if (idx > 0) {
if (idx != sp) {
gen_move(s, idx, sp, val);
}
break;
}
else { /* upvar */
gen_setupvar(s, sp, nsym(tree));
}
break;
case NODE_IVAR:
gen_setxv(s, OP_SETIV, sp, nsym(tree), val);
break;
case NODE_CVAR:
gen_setxv(s, OP_SETCV, sp, nsym(tree), val);
break;
case NODE_CONST:
gen_setxv(s, OP_SETCONST, sp, nsym(tree), val);
break;
case NODE_COLON2:
if (sp) {
gen_move(s, cursp(), sp, 0);
}
sp = cursp();
push();
codegen(s, tree->car, VAL);
if (rhs) {
codegen(s, rhs, VAL); pop();
gen_move(s, sp, cursp(), 0);
}
pop_n(2);
idx = new_sym(s, nsym(tree->cdr));
genop_2(s, OP_SETMCNST, sp, idx);
break;
case NODE_CALL:
case NODE_SCALL:
{
int noself = 0, safe = (type == NODE_SCALL), skip = 0, top, call, n = 0;
mrb_sym mid = nsym(tree->cdr->car);
top = cursp();
if (val || sp == cursp()) {
push(); /* room for retval */
}
call = cursp();
if (!tree->car) {
noself = 1;
push();
}
else {
codegen(s, tree->car, VAL); /* receiver */
}
if (safe) {
int recv = cursp()-1;
gen_move(s, cursp(), recv, 1);
skip = genjmp2_0(s, OP_JMPNIL, cursp(), val);
}
tree = tree->cdr->cdr->car;
if (tree) {
if (tree->car) { /* positional arguments */
n = gen_values(s, tree->car, VAL, (tree->cdr->car)?13:14);
if (n < 0) { /* variable length */
n = 15;
push();
}
}
if (tree->cdr->car) { /* keyword arguments */
if (n == 14) {
pop_n(n);
genop_2(s, OP_ARRAY, cursp(), n);
push();
n = 15;
}
gen_hash(s, tree->cdr->car->cdr, VAL, 0);
if (n < 14) {
n++;
}
else {
pop_n(2);
genop_2(s, OP_ARYPUSH, cursp(), 1);
}
push();
}
}
if (rhs) {
codegen(s, rhs, VAL);
pop();
}
else {
gen_move(s, cursp(), sp, 0);
}
if (val) {
gen_move(s, top, cursp(), 1);
}
if (n < 14) {
n++;
}
else {
pop();
genop_2(s, OP_ARYPUSH, cursp(), 1);
}
s->sp = call;
if (mid == MRB_OPSYM_2(s->mrb, aref) && n == 2) {
genop_1(s, OP_SETIDX, cursp());
}
else {
genop_3(s, noself ? OP_SSEND : OP_SEND, cursp(), new_sym(s, attrsym(s, mid)), n);
}
if (safe) {
dispatch(s, skip);
}
s->sp = top;
}
break;
case NODE_MASGN:
gen_vmassignment(s, tree->car, sp, val);
break;
/* splat without assignment */
case NODE_NIL:
break;
default:
codegen_error(s, "unknown lhs");
break;
}
if (val) push();
}
| 1
|
474,095
|
setup_tree(Node* node, regex_t* reg, int state, ScanEnv* env)
{
int type;
int r = 0;
restart:
type = NTYPE(node);
switch (type) {
case NT_LIST:
{
Node* prev = NULL_NODE;
do {
r = setup_tree(NCAR(node), reg, state, env);
if (IS_NOT_NULL(prev) && r == 0) {
r = next_setup(prev, NCAR(node), reg);
}
prev = NCAR(node);
} while (r == 0 && IS_NOT_NULL(node = NCDR(node)));
}
break;
case NT_ALT:
do {
r = setup_tree(NCAR(node), reg, (state | IN_ALT), env);
} while (r == 0 && IS_NOT_NULL(node = NCDR(node)));
break;
case NT_CCLASS:
break;
case NT_STR:
if (IS_IGNORECASE(reg->options) && !NSTRING_IS_RAW(node)) {
r = expand_case_fold_string(node, reg);
}
break;
case NT_CTYPE:
case NT_CANY:
break;
#ifdef USE_SUBEXP_CALL
case NT_CALL:
break;
#endif
case NT_BREF:
{
int i;
int* p;
Node** nodes = SCANENV_MEM_NODES(env);
BRefNode* br = NBREF(node);
p = BACKREFS_P(br);
for (i = 0; i < br->back_num; i++) {
if (p[i] > env->num_mem) return ONIGERR_INVALID_BACKREF;
BIT_STATUS_ON_AT(env->backrefed_mem, p[i]);
BIT_STATUS_ON_AT(env->bt_mem_start, p[i]);
#ifdef USE_BACKREF_WITH_LEVEL
if (IS_BACKREF_NEST_LEVEL(br)) {
BIT_STATUS_ON_AT(env->bt_mem_end, p[i]);
}
#endif
SET_ENCLOSE_STATUS(nodes[p[i]], NST_MEM_BACKREFED);
}
}
break;
case NT_QTFR:
{
OnigDistance d;
QtfrNode* qn = NQTFR(node);
Node* target = qn->target;
if ((state & IN_REPEAT) != 0) {
qn->state |= NST_IN_REPEAT;
}
if (IS_REPEAT_INFINITE(qn->upper) || qn->upper >= 1) {
r = get_min_match_length(target, &d, env);
if (r) break;
if (d == 0) {
qn->target_empty_info = NQ_TARGET_IS_EMPTY;
#ifdef USE_MONOMANIAC_CHECK_CAPTURES_IN_ENDLESS_REPEAT
r = quantifiers_memory_node_info(target);
if (r < 0) break;
if (r > 0) {
qn->target_empty_info = r;
}
#endif
#if 0
r = get_max_match_length(target, &d, env);
if (r == 0 && d == 0) {
/* ()* ==> ()?, ()+ ==> () */
qn->upper = 1;
if (qn->lower > 1) qn->lower = 1;
if (NTYPE(target) == NT_STR) {
qn->upper = qn->lower = 0; /* /(?:)+/ ==> // */
}
}
#endif
}
}
state |= IN_REPEAT;
if (qn->lower != qn->upper)
state |= IN_VAR_REPEAT;
r = setup_tree(target, reg, state, env);
if (r) break;
/* expand string */
#define EXPAND_STRING_MAX_LENGTH 100
if (NTYPE(target) == NT_STR) {
if (!IS_REPEAT_INFINITE(qn->lower) && qn->lower == qn->upper &&
qn->lower > 1 && qn->lower <= EXPAND_STRING_MAX_LENGTH) {
OnigDistance len = NSTRING_LEN(target);
StrNode* sn = NSTR(target);
if (len * qn->lower <= EXPAND_STRING_MAX_LENGTH) {
int i, n = qn->lower;
onig_node_conv_to_str_node(node, NSTR(target)->flag);
for (i = 0; i < n; i++) {
r = onig_node_str_cat(node, sn->s, sn->end);
if (r) break;
}
onig_node_free(target);
break; /* break case NT_QTFR: */
}
}
}
#ifdef USE_OP_PUSH_OR_JUMP_EXACT
if (qn->greedy && (qn->target_empty_info != 0)) {
if (NTYPE(target) == NT_QTFR) {
QtfrNode* tqn = NQTFR(target);
if (IS_NOT_NULL(tqn->head_exact)) {
qn->head_exact = tqn->head_exact;
tqn->head_exact = NULL;
}
}
else {
qn->head_exact = get_head_value_node(qn->target, 1, reg);
}
}
#endif
}
break;
case NT_ENCLOSE:
{
EncloseNode* en = NENCLOSE(node);
switch (en->type) {
case ENCLOSE_OPTION:
{
OnigOptionType options = reg->options;
reg->options = NENCLOSE(node)->option;
r = setup_tree(NENCLOSE(node)->target, reg, state, env);
reg->options = options;
}
break;
case ENCLOSE_MEMORY:
if ((state & (IN_ALT | IN_NOT | IN_VAR_REPEAT)) != 0) {
BIT_STATUS_ON_AT(env->bt_mem_start, en->regnum);
/* SET_ENCLOSE_STATUS(node, NST_MEM_IN_ALT_NOT); */
}
r = setup_tree(en->target, reg, state, env);
break;
case ENCLOSE_STOP_BACKTRACK:
{
Node* target = en->target;
r = setup_tree(target, reg, state, env);
if (NTYPE(target) == NT_QTFR) {
QtfrNode* tqn = NQTFR(target);
if (IS_REPEAT_INFINITE(tqn->upper) && tqn->lower <= 1 &&
tqn->greedy != 0) { /* (?>a*), a*+ etc... */
int qtype = NTYPE(tqn->target);
if (IS_NODE_TYPE_SIMPLE(qtype))
SET_ENCLOSE_STATUS(node, NST_STOP_BT_SIMPLE_REPEAT);
}
}
}
break;
}
}
break;
case NT_ANCHOR:
{
AnchorNode* an = NANCHOR(node);
switch (an->type) {
case ANCHOR_PREC_READ:
r = setup_tree(an->target, reg, state, env);
break;
case ANCHOR_PREC_READ_NOT:
r = setup_tree(an->target, reg, (state | IN_NOT), env);
break;
/* allowed node types in look-behind */
#define ALLOWED_TYPE_IN_LB \
( BIT_NT_LIST | BIT_NT_ALT | BIT_NT_STR | BIT_NT_CCLASS | BIT_NT_CTYPE | \
BIT_NT_CANY | BIT_NT_ANCHOR | BIT_NT_ENCLOSE | BIT_NT_QTFR | BIT_NT_CALL )
#define ALLOWED_ENCLOSE_IN_LB ( ENCLOSE_MEMORY )
#define ALLOWED_ENCLOSE_IN_LB_NOT 0
#define ALLOWED_ANCHOR_IN_LB \
( ANCHOR_LOOK_BEHIND | ANCHOR_BEGIN_LINE | ANCHOR_END_LINE | ANCHOR_BEGIN_BUF | ANCHOR_BEGIN_POSITION )
#define ALLOWED_ANCHOR_IN_LB_NOT \
( ANCHOR_LOOK_BEHIND | ANCHOR_LOOK_BEHIND_NOT | ANCHOR_BEGIN_LINE | ANCHOR_END_LINE | ANCHOR_BEGIN_BUF | ANCHOR_BEGIN_POSITION )
case ANCHOR_LOOK_BEHIND:
{
r = check_type_tree(an->target, ALLOWED_TYPE_IN_LB,
ALLOWED_ENCLOSE_IN_LB, ALLOWED_ANCHOR_IN_LB);
if (r < 0) return r;
if (r > 0) return ONIGERR_INVALID_LOOK_BEHIND_PATTERN;
r = setup_look_behind(node, reg, env);
if (r != 0) return r;
if (NTYPE(node) != NT_ANCHOR) goto restart;
r = setup_tree(an->target, reg, state, env);
}
break;
case ANCHOR_LOOK_BEHIND_NOT:
{
r = check_type_tree(an->target, ALLOWED_TYPE_IN_LB,
ALLOWED_ENCLOSE_IN_LB_NOT, ALLOWED_ANCHOR_IN_LB_NOT);
if (r < 0) return r;
if (r > 0) return ONIGERR_INVALID_LOOK_BEHIND_PATTERN;
r = setup_look_behind(node, reg, env);
if (r != 0) return r;
if (NTYPE(node) != NT_ANCHOR) goto restart;
r = setup_tree(an->target, reg, (state | IN_NOT), env);
}
break;
}
}
break;
default:
break;
}
return r;
}
| 0
|
275,962
|
unsigned uECC_curve_num_bits(uECC_Curve curve) {
return curve->num_bytes * 8;
}
| 0
|
404,735
|
struct file *fget_many(unsigned int fd, unsigned int refs)
{
return __fget(fd, FMODE_PATH, refs);
}
| 0
|
221,666
|
bool Socket::writeString(const char *line) //throw(std::exception)
{
int l = strlen(line);
return writeToSocket(line, l, 0, timeout);
}
| 0
|
450,398
|
static void zrle_write_u32(VncState *vs, uint32_t value)
{
vnc_write(vs, (uint8_t *)&value, 4);
}
| 0
|
338,160
|
bool WasmBinaryBuilder::maybeVisitArrayInit(Expression*& out, uint32_t code) {
if (code == BinaryConsts::ArrayInitStatic) {
auto heapType = getIndexedHeapType();
auto size = getU32LEB();
std::vector<Expression*> values(size);
for (size_t i = 0; i < size; i++) {
values[size - i - 1] = popNonVoidExpression();
}
out = Builder(wasm).makeArrayInit(heapType, values);
return true;
} else if (code == BinaryConsts::ArrayInit) {
auto heapType = getIndexedHeapType();
auto size = getU32LEB();
auto* rtt = popNonVoidExpression();
validateHeapTypeUsingChild(rtt, heapType);
std::vector<Expression*> values(size);
for (size_t i = 0; i < size; i++) {
values[size - i - 1] = popNonVoidExpression();
}
out = Builder(wasm).makeArrayInit(rtt, values);
return true;
}
return false;
}
| 0
|
312,460
|
mark_quickfix_ctx(qf_info_T *qi, int copyID)
{
int i;
int abort = FALSE;
typval_T *ctx;
callback_T *cb;
for (i = 0; i < LISTCOUNT && !abort; ++i)
{
ctx = qi->qf_lists[i].qf_ctx;
if (ctx != NULL && ctx->v_type != VAR_NUMBER
&& ctx->v_type != VAR_STRING && ctx->v_type != VAR_FLOAT)
abort = abort || set_ref_in_item(ctx, copyID, NULL, NULL);
cb = &qi->qf_lists[i].qf_qftf_cb;
abort = abort || set_ref_in_callback(cb, copyID);
}
return abort;
}
| 0
|
247,669
|
TEST_P(SslSocketTest, Ipv6San) {
const std::string client_ctx_yaml = R"EOF(
common_tls_context:
validation_context:
trusted_ca:
filename: "{{ test_rundir }}/test/config/integration/certs/upstreamcacert.pem"
match_typed_subject_alt_names:
- san_type: IP_ADDRESS
matcher:
exact: "::1"
)EOF";
const std::string server_ctx_yaml = R"EOF(
common_tls_context:
tls_certificates:
certificate_chain:
filename: "{{ test_rundir }}/test/config/integration/certs/upstreamlocalhostcert.pem"
private_key:
filename: "{{ test_rundir }}/test/config/integration/certs/upstreamlocalhostkey.pem"
)EOF";
TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());
testUtil(test_options);
}
| 0
|
328,908
|
R_API void r_bin_java_print_synthetic_attr_summary(RBinJavaAttrInfo *attr) {
if (attr == NULL) {
eprintf ("Attempting to print an invalid RBinJavaAttrInfo *Synthetic.\n");
return;
}
printf ("Synthetic Attribute Information:\n");
printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset);
printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name);
printf (" Attribute Length: %d\n", attr->length);
printf (" Attribute Index: %d\n", attr->info.source_file_attr.sourcefile_idx);
}
| 0
|
487,666
|
asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
{
struct task_struct *p;
struct task_struct *group_leader = current->group_leader;
int err = -EINVAL;
if (!pid)
pid = group_leader->pid;
if (!pgid)
pgid = pid;
if (pgid < 0)
return -EINVAL;
/* From this point forward we keep holding onto the tasklist lock
* so that our parent does not change from under us. -DaveM
*/
write_lock_irq(&tasklist_lock);
err = -ESRCH;
p = find_task_by_pid(pid);
if (!p)
goto out;
err = -EINVAL;
if (!thread_group_leader(p))
goto out;
if (p->real_parent == group_leader) {
err = -EPERM;
if (task_session(p) != task_session(group_leader))
goto out;
err = -EACCES;
if (p->did_exec)
goto out;
} else {
err = -ESRCH;
if (p != group_leader)
goto out;
}
err = -EPERM;
if (p->signal->leader)
goto out;
if (pgid != pid) {
struct task_struct *g =
find_task_by_pid_type(PIDTYPE_PGID, pgid);
if (!g || task_session(g) != task_session(group_leader))
goto out;
}
err = security_task_setpgid(p, pgid);
if (err)
goto out;
if (process_group(p) != pgid) {
detach_pid(p, PIDTYPE_PGID);
p->signal->pgrp = pgid;
attach_pid(p, PIDTYPE_PGID, pgid);
}
err = 0;
out:
/* All paths lead to here, thus we are safe. -DaveM */
write_unlock_irq(&tasklist_lock);
return err;
}
| 0
|
309,870
|
reset_color_pair(NCURSES_SP_DCL0)
{
#ifdef USE_TERM_DRIVER
return CallDriver(SP_PARM, td_rescol);
#else
bool result = FALSE;
(void) SP_PARM;
if (orig_pair != 0) {
(void) NCURSES_PUTP2("orig_pair", orig_pair);
result = TRUE;
}
return result;
#endif
}
| 0
|
225,915
|
GF_Err fdsa_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem)
{
GF_HintSample *ptr = (GF_HintSample *)s;
switch(a->type) {
case GF_ISOM_BOX_TYPE_FDPA:
BOX_FIELD_LIST_ASSIGN(packetTable)
return GF_OK;
case GF_ISOM_BOX_TYPE_EXTR:
BOX_FIELD_ASSIGN(extra_data, GF_ExtraDataBox)
break;
}
return GF_OK;
| 0
|
509,568
|
int ha_maria::repair(THD *thd, HA_CHECK *param, bool do_optimize)
{
int error= 0;
ulonglong local_testflag= param->testflag;
bool optimize_done= !do_optimize, statistics_done= 0, full_repair_done= 0;
const char *old_proc_info= thd->proc_info;
char fixed_name[FN_REFLEN];
MARIA_SHARE *share= file->s;
ha_rows rows= file->state->records;
TRN *old_trn= file->trn;
my_bool locking= 0;
DBUG_ENTER("ha_maria::repair");
/*
Normally this method is entered with a properly opened table. If the
repair fails, it can be repeated with more elaborate options. Under
special circumstances it can happen that a repair fails so that it
closed the data file and cannot re-open it. In this case file->dfile
is set to -1. We must not try another repair without an open data
file. (Bug #25289)
*/
if (file->dfile.file == -1)
{
sql_print_information("Retrying repair of: '%s' failed. "
"Please try REPAIR EXTENDED or aria_chk",
table->s->path.str);
DBUG_RETURN(HA_ADMIN_FAILED);
}
/*
If transactions was not enabled for a transactional table then
file->s->status is not up to date. This is needed for repair_by_sort
to work
*/
if (share->base.born_transactional && !share->now_transactional)
_ma_copy_nontrans_state_information(file);
param->db_name= table->s->db.str;
param->table_name= table->alias.c_ptr();
param->tmpfile_createflag= O_RDWR | O_TRUNC;
param->using_global_keycache= 1;
param->thd= thd;
param->tmpdir= &mysql_tmpdir_list;
param->out_flag= 0;
share->state.dupp_key= MI_MAX_KEY;
strmov(fixed_name, share->open_file_name.str);
unmap_file(file);
/*
Don't lock tables if we have used LOCK TABLE or if we come from
enable_index()
*/
if (!thd->locked_tables_mode && ! (param->testflag & T_NO_LOCKS))
{
locking= 1;
if (maria_lock_database(file, table->s->tmp_table ? F_EXTRA_LCK : F_WRLCK))
{
_ma_check_print_error(param, ER_THD(thd, ER_CANT_LOCK), my_errno);
DBUG_RETURN(HA_ADMIN_FAILED);
}
}
if (!do_optimize ||
(((share->data_file_type == BLOCK_RECORD) ?
(share->state.changed & STATE_NOT_OPTIMIZED_ROWS) :
(file->state->del ||
share->state.split != file->state->records)) &&
(!(param->testflag & T_QUICK) ||
(share->state.changed & (STATE_NOT_OPTIMIZED_KEYS |
STATE_NOT_OPTIMIZED_ROWS)))))
{
ulonglong key_map= ((local_testflag & T_CREATE_MISSING_KEYS) ?
maria_get_mask_all_keys_active(share->base.keys) :
share->state.key_map);
ulonglong save_testflag= param->testflag;
if (maria_test_if_sort_rep(file, file->state->records, key_map, 0) &&
(local_testflag & T_REP_BY_SORT))
{
local_testflag |= T_STATISTICS;
param->testflag |= T_STATISTICS; // We get this for free
statistics_done= 1;
/* TODO: Remove BLOCK_RECORD test when parallel works with blocks */
if (THDVAR(thd,repair_threads) > 1 &&
share->data_file_type != BLOCK_RECORD)
{
char buf[40];
/* TODO: respect maria_repair_threads variable */
my_snprintf(buf, 40, "Repair with %d threads", my_count_bits(key_map));
thd_proc_info(thd, buf);
param->testflag|= T_REP_PARALLEL;
error= maria_repair_parallel(param, file, fixed_name,
MY_TEST(param->testflag & T_QUICK));
/* to reset proc_info, as it was pointing to local buffer */
thd_proc_info(thd, "Repair done");
}
else
{
thd_proc_info(thd, "Repair by sorting");
param->testflag|= T_REP_BY_SORT;
error= maria_repair_by_sort(param, file, fixed_name,
MY_TEST(param->testflag & T_QUICK));
}
if (error && file->create_unique_index_by_sort &&
share->state.dupp_key != MAX_KEY)
{
my_errno= HA_ERR_FOUND_DUPP_KEY;
print_keydup_error(table, &table->key_info[share->state.dupp_key],
MYF(0));
}
}
else
{
thd_proc_info(thd, "Repair with keycache");
param->testflag &= ~(T_REP_BY_SORT | T_REP_PARALLEL);
error= maria_repair(param, file, fixed_name,
MY_TEST(param->testflag & T_QUICK));
}
param->testflag= save_testflag | (param->testflag & T_RETRY_WITHOUT_QUICK);
optimize_done= 1;
/*
set full_repair_done if we re-wrote all rows and all keys
(and thus removed all transid's from the table
*/
full_repair_done= !MY_TEST(param->testflag & T_QUICK);
}
if (!error)
{
if ((local_testflag & T_SORT_INDEX) &&
(share->state.changed & STATE_NOT_SORTED_PAGES))
{
optimize_done= 1;
thd_proc_info(thd, "Sorting index");
error= maria_sort_index(param, file, fixed_name);
}
if (!error && !statistics_done && (local_testflag & T_STATISTICS))
{
if (share->state.changed & STATE_NOT_ANALYZED)
{
optimize_done= 1;
thd_proc_info(thd, "Analyzing");
error= maria_chk_key(param, file);
}
else
local_testflag &= ~T_STATISTICS; // Don't update statistics
}
}
thd_proc_info(thd, "Saving state");
if (full_repair_done && !error &&
!(param->testflag & T_NO_CREATE_RENAME_LSN))
{
/* Set trid (needed if the table was moved from another system) */
share->state.create_trid= trnman_get_min_safe_trid();
}
mysql_mutex_lock(&share->intern_lock);
if (!error)
{
if ((share->state.changed & STATE_CHANGED) || maria_is_crashed(file))
{
DBUG_PRINT("info", ("Resetting crashed state"));
share->state.changed&= ~(STATE_CHANGED | STATE_CRASHED_FLAGS |
STATE_IN_REPAIR | STATE_MOVED);
file->update |= HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
}
/*
repair updates share->state.state. Ensure that file->state is up to date
*/
if (file->state != &share->state.state)
*file->state= share->state.state;
if (share->base.auto_key)
_ma_update_auto_increment_key(param, file, 1);
if (optimize_done)
error= maria_update_state_info(param, file,
UPDATE_TIME | UPDATE_OPEN_COUNT |
(local_testflag &
T_STATISTICS ? UPDATE_STAT : 0));
/* File is repaired; Mark the file as moved to this system */
(void) _ma_set_uuid(share, 0);
info(HA_STATUS_NO_LOCK | HA_STATUS_TIME | HA_STATUS_VARIABLE |
HA_STATUS_CONST);
if (rows != file->state->records && !(param->testflag & T_VERY_SILENT))
{
char llbuff[22], llbuff2[22];
_ma_check_print_warning(param, "Number of rows changed from %s to %s",
llstr(rows, llbuff),
llstr(file->state->records, llbuff2));
}
}
else
{
maria_mark_crashed_on_repair(file);
file->update |= HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
maria_update_state_info(param, file, 0);
}
mysql_mutex_unlock(&share->intern_lock);
thd_proc_info(thd, old_proc_info);
thd_progress_end(thd); // Mark done
if (locking)
maria_lock_database(file, F_UNLCK);
/* Reset trn, that may have been set by repair */
if (old_trn && old_trn != file->trn)
_ma_set_trn_for_table(file, old_trn);
error= error ? HA_ADMIN_FAILED :
(optimize_done ?
(write_log_record_for_repair(param, file) ? HA_ADMIN_FAILED :
HA_ADMIN_OK) : HA_ADMIN_ALREADY_DONE);
DBUG_RETURN(error);
}
| 0
|
401,586
|
static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
{
struct crng_state *crng = NULL;
#ifdef CONFIG_NUMA
if (crng_node_pool)
crng = crng_node_pool[numa_node_id()];
if (crng == NULL)
#endif
crng = &primary_crng;
_crng_backtrack_protect(crng, tmp, used);
}
| 0
|
513,358
|
do_select(JOIN *join, Procedure *procedure)
{
int rc= 0;
enum_nested_loop_state error= NESTED_LOOP_OK;
DBUG_ENTER("do_select");
if (join->pushdown_query)
{
/* Select fields are in the temporary table */
join->fields= &join->tmp_fields_list1;
/* Setup HAVING to work with fields in temporary table */
join->set_items_ref_array(join->items1);
/* The storage engine will take care of the group by query result */
int res= join->pushdown_query->execute(join);
if (res)
DBUG_RETURN(res);
if (join->pushdown_query->store_data_in_temp_table)
{
JOIN_TAB *last_tab= join->join_tab + join->table_count -
join->exec_join_tab_cnt();
last_tab->next_select= end_send;
enum_nested_loop_state state= last_tab->aggr->end_send();
if (state >= NESTED_LOOP_OK)
state= sub_select(join, last_tab, true);
if (state < NESTED_LOOP_OK)
res= 1;
if (join->result->send_eof())
res= 1;
}
DBUG_RETURN(res);
}
join->procedure= procedure;
join->duplicate_rows= join->send_records=0;
if (join->only_const_tables() && !join->need_tmp)
{
Next_select_func end_select= setup_end_select_func(join, NULL);
/*
HAVING will be checked after processing aggregate functions,
But WHERE should checked here (we alredy have read tables).
Notice that make_join_select() splits all conditions in this case
into two groups exec_const_cond and outer_ref_cond.
If join->table_count == join->const_tables then it is
sufficient to check only the condition pseudo_bits_cond.
*/
DBUG_ASSERT(join->outer_ref_cond == NULL);
if (!join->pseudo_bits_cond || join->pseudo_bits_cond->val_int())
{
// HAVING will be checked by end_select
error= (*end_select)(join, 0, 0);
if (error >= NESTED_LOOP_OK)
error= (*end_select)(join, 0, 1);
/*
If we don't go through evaluate_join_record(), do the counting
here. join->send_records is increased on success in end_send(),
so we don't touch it here.
*/
join->join_examined_rows++;
DBUG_ASSERT(join->join_examined_rows <= 1);
}
else if (join->send_row_on_empty_set())
{
if (!join->having || join->having->val_int())
{
List<Item> *columns_list= (procedure ? &join->procedure_fields_list :
join->fields);
rc= join->result->send_data(*columns_list) > 0;
}
}
/*
An error can happen when evaluating the conds
(the join condition and piece of where clause
relevant to this join table).
*/
if (join->thd->is_error())
error= NESTED_LOOP_ERROR;
}
else
{
DBUG_EXECUTE_IF("show_explain_probe_do_select",
if (dbug_user_var_equals_int(join->thd,
"show_explain_probe_select_id",
join->select_lex->select_number))
dbug_serve_apcs(join->thd, 1);
);
JOIN_TAB *join_tab= join->join_tab +
(join->tables_list ? join->const_tables : 0);
if (join->outer_ref_cond && !join->outer_ref_cond->val_int())
error= NESTED_LOOP_NO_MORE_ROWS;
else
error= join->first_select(join,join_tab,0);
if (error >= NESTED_LOOP_OK && join->thd->killed != ABORT_QUERY)
error= join->first_select(join,join_tab,1);
}
join->thd->limit_found_rows= join->send_records - join->duplicate_rows;
if (error == NESTED_LOOP_NO_MORE_ROWS || join->thd->killed == ABORT_QUERY)
error= NESTED_LOOP_OK;
/*
For "order by with limit", we cannot rely on send_records, but need
to use the rowcount read originally into the join_tab applying the
filesort. There cannot be any post-filtering conditions, nor any
following join_tabs in this case, so this rowcount properly represents
the correct number of qualifying rows.
*/
if (join->order)
{
// Save # of found records prior to cleanup
JOIN_TAB *sort_tab;
JOIN_TAB *join_tab= join->join_tab;
uint const_tables= join->const_tables;
// Take record count from first non constant table or from last tmp table
if (join->aggr_tables > 0)
sort_tab= join_tab + join->top_join_tab_count + join->aggr_tables - 1;
else
{
DBUG_ASSERT(!join->only_const_tables());
sort_tab= join_tab + const_tables;
}
if (sort_tab->filesort &&
join->select_options & OPTION_FOUND_ROWS &&
sort_tab->filesort->sortorder &&
sort_tab->filesort->limit != HA_POS_ERROR)
{
join->thd->limit_found_rows= sort_tab->records;
}
}
{
/*
The following will unlock all cursors if the command wasn't an
update command
*/
join->join_free(); // Unlock all cursors
}
if (error == NESTED_LOOP_OK)
{
/*
Sic: this branch works even if rc != 0, e.g. when
send_data above returns an error.
*/
if (join->result->send_eof())
rc= 1; // Don't send error
DBUG_PRINT("info",("%ld records output", (long) join->send_records));
}
else
rc= -1;
#ifndef DBUG_OFF
if (rc)
{
DBUG_PRINT("error",("Error: do_select() failed"));
}
#endif
rc= join->thd->is_error() ? -1 : rc;
DBUG_RETURN(rc);
}
| 0
|
369,180
|
static inline void __io_queue_sqe(struct io_kiocb *req)
__must_hold(&req->ctx->uring_lock)
{
struct io_kiocb *linked_timeout;
int ret;
ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
if (req->flags & REQ_F_COMPLETE_INLINE) {
io_req_add_compl_list(req);
return;
}
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
* doesn't support non-blocking read/write attempts
*/
if (likely(!ret)) {
linked_timeout = io_prep_linked_timeout(req);
if (linked_timeout)
io_queue_linked_timeout(linked_timeout);
} else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
io_queue_sqe_arm_apoll(req);
} else {
io_req_complete_failed(req, ret);
}
| 0
|
225,417
|
static int vidioc_streamoff(struct file *file, void *fh,
enum v4l2_buf_type type)
{
struct v4l2_loopback_device *dev;
MARK();
dprintk("%d\n", type);
dev = v4l2loopback_getdevice(file);
switch (type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (dev->ready_for_capture > 0)
dev->ready_for_capture--;
return 0;
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
return 0;
default:
return -EINVAL;
}
return -EINVAL;
}
| 0
|
238,438
|
static const char *kernel_type_name(const struct btf* btf, u32 id)
{
return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
}
| 0
|
513,028
|
int Arg_comparator::compare_e_time()
{
THD *thd= current_thd;
longlong val1= (*a)->val_time_packed(thd);
longlong val2= (*b)->val_time_packed(thd);
if ((*a)->null_value || (*b)->null_value)
return MY_TEST((*a)->null_value && (*b)->null_value);
return MY_TEST(val1 == val2);
}
| 0
|
283,749
|
static void zynq_slcr_reset_hold(Object *obj)
{
ZynqSLCRState *s = ZYNQ_SLCR(obj);
/* will disable all output clocks */
zynq_slcr_compute_clocks(s);
zynq_slcr_propagate_clocks(s);
}
| 0
|
437,404
|
vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
uint64_t features = msg->payload.u64;
uint64_t vhost_features = 0;
struct rte_vdpa_device *vdpa_dev;
int did = -1;
rte_vhost_driver_get_features(dev->ifname, &vhost_features);
if (features & ~vhost_features) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) received invalid negotiated features.\n",
dev->vid);
return RTE_VHOST_MSG_RESULT_ERR;
}
if (dev->flags & VIRTIO_DEV_RUNNING) {
if (dev->features == features)
return RTE_VHOST_MSG_RESULT_OK;
/*
* Error out if master tries to change features while device is
* in running state. The exception being VHOST_F_LOG_ALL, which
* is enabled when the live-migration starts.
*/
if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) features changed while device is running.\n",
dev->vid);
return RTE_VHOST_MSG_RESULT_ERR;
}
if (dev->notify_ops->features_changed)
dev->notify_ops->features_changed(dev->vid, features);
}
dev->features = features;
if (dev->features &
((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
} else {
dev->vhost_hlen = sizeof(struct virtio_net_hdr);
}
RTE_LOG(INFO, VHOST_CONFIG,
"negotiated Virtio features: 0x%" PRIx64 "\n", dev->features);
VHOST_LOG_DEBUG(VHOST_CONFIG,
"(%d) mergeable RX buffers %s, virtio 1 %s\n",
dev->vid,
(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) &&
!(dev->features & (1ULL << VIRTIO_NET_F_MQ))) {
/*
* Remove all but first queue pair if MQ hasn't been
* negotiated. This is safe because the device is not
* running at this stage.
*/
while (dev->nr_vring > 2) {
struct vhost_virtqueue *vq;
vq = dev->virtqueue[--dev->nr_vring];
if (!vq)
continue;
dev->virtqueue[dev->nr_vring] = NULL;
cleanup_vq(vq, 1);
free_vq(dev, vq);
}
}
did = dev->vdpa_dev_id;
vdpa_dev = rte_vdpa_get_device(did);
if (vdpa_dev && vdpa_dev->ops->set_features)
vdpa_dev->ops->set_features(dev->vid);
return RTE_VHOST_MSG_RESULT_OK;
}
| 0
|
432,169
|
void address_space_destroy(AddressSpace *as)
{
MemoryRegion *root = as->root;
/* Flush out anything from MemoryListeners listening in on this */
memory_region_transaction_begin();
as->root = NULL;
memory_region_transaction_commit(root);
QTAILQ_REMOVE(&as->uc->address_spaces, as, address_spaces_link);
/* At this point, as->dispatch and as->current_map are dummy
* entries that the guest should never use. Wait for the old
* values to expire before freeing the data.
*/
as->root = root;
flatview_unref(as->current_map);
}
| 0
|
246,449
|
static RBinWasmCustomNameEntry *parse_custom_name_entry(RBinWasmObj *bin, ut64 bound) {
RBuffer *b = bin->buf;
RBinWasmCustomNameEntry *cust = R_NEW0 (RBinWasmCustomNameEntry);
if (!cust) {
return NULL;
}
cust->type = R_BIN_WASM_NAMETYPE_None;
size_t start = r_buf_tell (b);
if (!consume_u7_r (b, bound, &cust->type)) {
goto beach;
};
if (!consume_u32_r (b, bound, &cust->size)) {
goto beach;
};
switch (cust->type) {
case R_BIN_WASM_NAMETYPE_Module:
if (!consume_encoded_name_new (b, bound, NULL, &cust->mod_name)) {
goto beach;
}
break;
case R_BIN_WASM_NAMETYPE_Function:
cust->func = R_NEW0 (RBinWasmCustomNameFunctionNames);
if (!cust->func) {
goto beach;
}
cust->func->names = r_id_storage_new (0, UT32_MAX);
if (!cust->func->names) {
goto beach;
}
if (!parse_namemap (b, bound, cust->func->names, &cust->func->count)) {
goto beach;
}
break;
case R_BIN_WASM_NAMETYPE_Local:
cust->local = parse_custom_names_local (b, bound);
if (!cust->local) {
goto beach;
}
break;
default:
R_LOG_WARN ("[wasm] Halting custom name section parsing at unknown type 0x%x offset 0x%" PFMTSZx "\n", cust->type, start);
cust->type = R_BIN_WASM_NAMETYPE_None;
goto beach;
}
return cust;
beach:
wasm_custom_name_free (cust);
return NULL;
}
| 0
|
245,714
|
process_client_headers (struct conn_s *connptr, orderedmap hashofheaders)
{
static const char *skipheaders[] = {
"host",
"keep-alive",
"proxy-connection",
"te",
"trailers",
"upgrade"
};
int i;
size_t iter;
int ret = 0;
char *data, *header;
/*
* Don't send headers if there's already an error, if the request was
* a stats request, or if this was a CONNECT method (unless upstream
* http proxy is in use.)
*/
if (connptr->server_fd == -1 || connptr->show_stats
|| (connptr->connect_method && ! UPSTREAM_IS_HTTP(connptr))) {
log_message (LOG_INFO,
"Not sending client headers to remote machine");
return 0;
}
/*
* See if there is a "Content-Length" header. If so, again we need
* to do a bit of processing.
*/
connptr->content_length.client = get_content_length (hashofheaders);
/* Check whether client sends chunked data. */
if (connptr->content_length.client == -1 && is_chunked_transfer (hashofheaders))
connptr->content_length.client = -2;
/*
* See if there is a "Connection" header. If so, we need to do a bit
* of processing. :)
*/
remove_connection_headers (hashofheaders);
/*
* Delete the headers listed in the skipheaders list
*/
for (i = 0; i != (sizeof (skipheaders) / sizeof (char *)); i++) {
orderedmap_remove (hashofheaders, skipheaders[i]);
}
/* Send, or add the Via header */
ret = write_via_header (connptr->server_fd, hashofheaders,
connptr->protocol.major,
connptr->protocol.minor);
if (ret < 0) {
indicate_http_error (connptr, 503,
"Could not send data to remote server",
"detail",
"A network error occurred while "
"trying to write data to the remote web server.",
NULL);
goto PULL_CLIENT_DATA;
}
/*
* Output all the remaining headers to the remote machine.
*/
iter = 0;
while((iter = orderedmap_next(hashofheaders, iter, &data, &header))) {
if (!is_anonymous_enabled (config)
|| anonymous_search (config, data) > 0) {
ret =
write_message (connptr->server_fd,
"%s: %s\r\n", data, header);
if (ret < 0) {
indicate_http_error (connptr, 503,
"Could not send data to remote server",
"detail",
"A network error occurred while "
"trying to write data to the "
"remote web server.",
NULL);
goto PULL_CLIENT_DATA;
}
}
}
#if defined(XTINYPROXY_ENABLE)
if (config->add_xtinyproxy)
add_xtinyproxy_header (connptr);
#endif
/* Write the final "blank" line to signify the end of the headers */
if (safe_write (connptr->server_fd, "\r\n", 2) < 0)
return -1;
/*
* Spin here pulling the data from the client.
*/
PULL_CLIENT_DATA:
if (connptr->content_length.client > 0) {
ret = pull_client_data (connptr,
connptr->content_length.client, 1);
} else if (connptr->content_length.client == -2)
ret = pull_client_data_chunked (connptr);
return ret;
}
| 0
|
389,710
|
typval_compare(
typval_T *tv1, // first operand
typval_T *tv2, // second operand
exprtype_T type, // operator
int ic) // ignore case
{
varnumber_T n1, n2;
int res = 0;
int type_is = type == EXPR_IS || type == EXPR_ISNOT;
if (type_is && tv1->v_type != tv2->v_type)
{
// For "is" a different type always means FALSE, for "notis"
// it means TRUE.
n1 = (type == EXPR_ISNOT);
}
else if (((tv1->v_type == VAR_SPECIAL && tv1->vval.v_number == VVAL_NULL)
|| (tv2->v_type == VAR_SPECIAL
&& tv2->vval.v_number == VVAL_NULL))
&& tv1->v_type != tv2->v_type
&& (type == EXPR_EQUAL || type == EXPR_NEQUAL))
{
n1 = typval_compare_null(tv1, tv2);
if (n1 == MAYBE)
{
clear_tv(tv1);
return FAIL;
}
if (type == EXPR_NEQUAL)
n1 = !n1;
}
else if (tv1->v_type == VAR_BLOB || tv2->v_type == VAR_BLOB)
{
if (typval_compare_blob(tv1, tv2, type, &res) == FAIL)
{
clear_tv(tv1);
return FAIL;
}
n1 = res;
}
else if (tv1->v_type == VAR_LIST || tv2->v_type == VAR_LIST)
{
if (typval_compare_list(tv1, tv2, type, ic, &res) == FAIL)
{
clear_tv(tv1);
return FAIL;
}
n1 = res;
}
else if (tv1->v_type == VAR_DICT || tv2->v_type == VAR_DICT)
{
if (typval_compare_dict(tv1, tv2, type, ic, &res) == FAIL)
{
clear_tv(tv1);
return FAIL;
}
n1 = res;
}
else if (tv1->v_type == VAR_FUNC || tv2->v_type == VAR_FUNC
|| tv1->v_type == VAR_PARTIAL || tv2->v_type == VAR_PARTIAL)
{
if (typval_compare_func(tv1, tv2, type, ic, &res) == FAIL)
{
clear_tv(tv1);
return FAIL;
}
n1 = res;
}
#ifdef FEAT_FLOAT
// If one of the two variables is a float, compare as a float.
// When using "=~" or "!~", always compare as string.
else if ((tv1->v_type == VAR_FLOAT || tv2->v_type == VAR_FLOAT)
&& type != EXPR_MATCH && type != EXPR_NOMATCH)
{
float_T f1, f2;
int error = FALSE;
f1 = tv_get_float_chk(tv1, &error);
if (!error)
f2 = tv_get_float_chk(tv2, &error);
if (error)
{
clear_tv(tv1);
return FAIL;
}
n1 = FALSE;
switch (type)
{
case EXPR_IS:
case EXPR_EQUAL: n1 = (f1 == f2); break;
case EXPR_ISNOT:
case EXPR_NEQUAL: n1 = (f1 != f2); break;
case EXPR_GREATER: n1 = (f1 > f2); break;
case EXPR_GEQUAL: n1 = (f1 >= f2); break;
case EXPR_SMALLER: n1 = (f1 < f2); break;
case EXPR_SEQUAL: n1 = (f1 <= f2); break;
case EXPR_UNKNOWN:
case EXPR_MATCH:
default: break; // avoid gcc warning
}
}
#endif
// If one of the two variables is a number, compare as a number.
// When using "=~" or "!~", always compare as string.
else if ((tv1->v_type == VAR_NUMBER || tv2->v_type == VAR_NUMBER)
&& type != EXPR_MATCH && type != EXPR_NOMATCH)
{
int error = FALSE;
n1 = tv_get_number_chk(tv1, &error);
if (!error)
n2 = tv_get_number_chk(tv2, &error);
if (error)
{
clear_tv(tv1);
return FAIL;
}
switch (type)
{
case EXPR_IS:
case EXPR_EQUAL: n1 = (n1 == n2); break;
case EXPR_ISNOT:
case EXPR_NEQUAL: n1 = (n1 != n2); break;
case EXPR_GREATER: n1 = (n1 > n2); break;
case EXPR_GEQUAL: n1 = (n1 >= n2); break;
case EXPR_SMALLER: n1 = (n1 < n2); break;
case EXPR_SEQUAL: n1 = (n1 <= n2); break;
case EXPR_UNKNOWN:
case EXPR_MATCH:
default: break; // avoid gcc warning
}
}
else if (in_vim9script() && (tv1->v_type == VAR_BOOL
|| tv2->v_type == VAR_BOOL
|| (tv1->v_type == VAR_SPECIAL
&& tv2->v_type == VAR_SPECIAL)))
{
if (tv1->v_type != tv2->v_type)
{
semsg(_(e_cannot_compare_str_with_str),
vartype_name(tv1->v_type), vartype_name(tv2->v_type));
clear_tv(tv1);
return FAIL;
}
n1 = tv1->vval.v_number;
n2 = tv2->vval.v_number;
switch (type)
{
case EXPR_IS:
case EXPR_EQUAL: n1 = (n1 == n2); break;
case EXPR_ISNOT:
case EXPR_NEQUAL: n1 = (n1 != n2); break;
default:
semsg(_(e_invalid_operation_for_str),
vartype_name(tv1->v_type));
clear_tv(tv1);
return FAIL;
}
}
#ifdef FEAT_JOB_CHANNEL
else if (tv1->v_type == tv2->v_type
&& (tv1->v_type == VAR_CHANNEL || tv1->v_type == VAR_JOB)
&& (type == EXPR_NEQUAL || type == EXPR_EQUAL))
{
if (tv1->v_type == VAR_CHANNEL)
n1 = tv1->vval.v_channel == tv2->vval.v_channel;
else
n1 = tv1->vval.v_job == tv2->vval.v_job;
if (type == EXPR_NEQUAL)
n1 = !n1;
}
#endif
else
{
if (typval_compare_string(tv1, tv2, type, ic, &res) == FAIL)
{
clear_tv(tv1);
return FAIL;
}
n1 = res;
}
clear_tv(tv1);
if (in_vim9script())
{
tv1->v_type = VAR_BOOL;
tv1->vval.v_number = n1 ? VVAL_TRUE : VVAL_FALSE;
}
else
{
tv1->v_type = VAR_NUMBER;
tv1->vval.v_number = n1;
}
return OK;
}
| 0
|
269,507
|
static Image *ReadTIFFImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
#define ThrowTIFFException(severity,message) \
{ \
if (pixel_info != (MemoryInfo *) NULL) \
pixel_info=RelinquishVirtualMemory(pixel_info); \
if (quantum_info != (QuantumInfo *) NULL) \
quantum_info=DestroyQuantumInfo(quantum_info); \
TIFFClose(tiff); \
ThrowReaderException(severity,message); \
}
float
*chromaticity = (float *) NULL,
x_position,
y_position,
x_resolution,
y_resolution;
Image
*image;
int
tiff_status = 0;
MagickBooleanType
more_frames;
MagickSizeType
number_pixels;
MagickStatusType
status;
MemoryInfo
*pixel_info = (MemoryInfo *) NULL;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
ssize_t
i,
scanline_size,
y;
TIFF
*tiff;
TIFFMethodType
method;
uint16
compress_tag = 0,
bits_per_sample = 0,
endian = 0,
extra_samples = 0,
interlace = 0,
max_sample_value = 0,
min_sample_value = 0,
orientation = 0,
pages = 0,
photometric = 0,
*sample_info = NULL,
sample_format = 0,
samples_per_pixel = 0,
units = 0,
value = 0;
uint32
height,
rows_per_strip,
width;
unsigned char
*pixels;
void
*sans[8] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };
/*
Open image.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
(void) SetMagickThreadValue(tiff_exception,exception);
tiff=TIFFClientOpen(image->filename,"rb",(thandle_t) image,TIFFReadBlob,
TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob,
TIFFUnmapBlob);
if (tiff == (TIFF *) NULL)
{
if (exception->severity == UndefinedException)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
image=DestroyImageList(image);
return((Image *) NULL);
}
if (exception->severity > ErrorException)
{
TIFFClose(tiff);
image=DestroyImageList(image);
return((Image *) NULL);
}
if (image_info->number_scenes != 0)
{
/*
Generate blank images for subimage specification (e.g. image.tif[4].
We need to check the number of directores because it is possible that
the subimage(s) are stored in the photoshop profile.
*/
if (image_info->scene < (size_t) TIFFNumberOfDirectories(tiff))
{
for (i=0; i < (ssize_t) image_info->scene; i++)
{
status=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse;
if (status == MagickFalse)
{
TIFFClose(tiff);
image=DestroyImageList(image);
return((Image *) NULL);
}
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
TIFFClose(tiff);
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
}
}
}
more_frames=MagickTrue;
do
{
/* TIFFPrintDirectory(tiff,stdout,MagickFalse); */
photometric=PHOTOMETRIC_RGB;
if ((TIFFGetField(tiff,TIFFTAG_IMAGEWIDTH,&width) != 1) ||
(TIFFGetField(tiff,TIFFTAG_IMAGELENGTH,&height) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_PHOTOMETRIC,&photometric,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_COMPRESSION,&compress_tag,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_PLANARCONFIG,&interlace,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL,&samples_per_pixel,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE,&bits_per_sample,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLEFORMAT,&sample_format,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_MINSAMPLEVALUE,&min_sample_value,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_MAXSAMPLEVALUE,&max_sample_value,sans) != 1))
{
TIFFClose(tiff);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
if (((sample_format != SAMPLEFORMAT_IEEEFP) || (bits_per_sample != 64)) &&
((bits_per_sample <= 0) || (bits_per_sample > 32)))
{
TIFFClose(tiff);
ThrowReaderException(CorruptImageError,"UnsupportedBitsPerPixel");
}
if (samples_per_pixel > MaxPixelChannels)
{
TIFFClose(tiff);
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
}
if (sample_format == SAMPLEFORMAT_IEEEFP)
(void) SetImageProperty(image,"quantum:format","floating-point",
exception);
switch (photometric)
{
case PHOTOMETRIC_MINISBLACK:
{
(void) SetImageProperty(image,"tiff:photometric","min-is-black",
exception);
break;
}
case PHOTOMETRIC_MINISWHITE:
{
(void) SetImageProperty(image,"tiff:photometric","min-is-white",
exception);
break;
}
case PHOTOMETRIC_PALETTE:
{
(void) SetImageProperty(image,"tiff:photometric","palette",exception);
break;
}
case PHOTOMETRIC_RGB:
{
(void) SetImageProperty(image,"tiff:photometric","RGB",exception);
break;
}
case PHOTOMETRIC_CIELAB:
{
(void) SetImageProperty(image,"tiff:photometric","CIELAB",exception);
break;
}
case PHOTOMETRIC_LOGL:
{
(void) SetImageProperty(image,"tiff:photometric","CIE Log2(L)",
exception);
break;
}
case PHOTOMETRIC_LOGLUV:
{
(void) SetImageProperty(image,"tiff:photometric","LOGLUV",exception);
break;
}
#if defined(PHOTOMETRIC_MASK)
case PHOTOMETRIC_MASK:
{
(void) SetImageProperty(image,"tiff:photometric","MASK",exception);
break;
}
#endif
case PHOTOMETRIC_SEPARATED:
{
(void) SetImageProperty(image,"tiff:photometric","separated",exception);
break;
}
case PHOTOMETRIC_YCBCR:
{
(void) SetImageProperty(image,"tiff:photometric","YCBCR",exception);
break;
}
default:
{
(void) SetImageProperty(image,"tiff:photometric","unknown",exception);
break;
}
}
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %ux%u",
(unsigned int) width,(unsigned int) height);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Interlace: %u",
interlace);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Bits per sample: %u",bits_per_sample);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Min sample value: %u",min_sample_value);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Max sample value: %u",max_sample_value);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Photometric "
"interpretation: %s",GetImageProperty(image,"tiff:photometric",
exception));
}
image->columns=(size_t) width;
image->rows=(size_t) height;
image->depth=(size_t) bits_per_sample;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Image depth: %.20g",
(double) image->depth);
image->endian=MSBEndian;
if (endian == FILLORDER_LSB2MSB)
image->endian=LSBEndian;
#if defined(MAGICKCORE_HAVE_TIFFISBIGENDIAN)
if (TIFFIsBigEndian(tiff) == 0)
{
(void) SetImageProperty(image,"tiff:endian","lsb",exception);
image->endian=LSBEndian;
}
else
{
(void) SetImageProperty(image,"tiff:endian","msb",exception);
image->endian=MSBEndian;
}
#endif
if ((photometric == PHOTOMETRIC_MINISBLACK) ||
(photometric == PHOTOMETRIC_MINISWHITE))
(void) SetImageColorspace(image,GRAYColorspace,exception);
if (photometric == PHOTOMETRIC_SEPARATED)
(void) SetImageColorspace(image,CMYKColorspace,exception);
if (photometric == PHOTOMETRIC_CIELAB)
(void) SetImageColorspace(image,LabColorspace,exception);
if ((photometric == PHOTOMETRIC_YCBCR) &&
(compress_tag != COMPRESSION_OJPEG) &&
(compress_tag != COMPRESSION_JPEG))
(void) SetImageColorspace(image,YCbCrColorspace,exception);
status=TIFFGetProfiles(tiff,image,exception);
if (status == MagickFalse)
{
TIFFClose(tiff);
return(DestroyImageList(image));
}
status=TIFFGetProperties(tiff,image,exception);
if (status == MagickFalse)
{
TIFFClose(tiff);
return(DestroyImageList(image));
}
TIFFGetEXIFProperties(tiff,image,image_info,exception);
TIFFGetGPSProperties(tiff,image,image_info,exception);
if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XRESOLUTION,&x_resolution,sans) == 1) &&
(TIFFGetFieldDefaulted(tiff,TIFFTAG_YRESOLUTION,&y_resolution,sans) == 1))
{
image->resolution.x=x_resolution;
image->resolution.y=y_resolution;
}
if (TIFFGetFieldDefaulted(tiff,TIFFTAG_RESOLUTIONUNIT,&units,sans,sans) == 1)
{
if (units == RESUNIT_INCH)
image->units=PixelsPerInchResolution;
if (units == RESUNIT_CENTIMETER)
image->units=PixelsPerCentimeterResolution;
}
if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XPOSITION,&x_position,sans) == 1) &&
(TIFFGetFieldDefaulted(tiff,TIFFTAG_YPOSITION,&y_position,sans) == 1))
{
image->page.x=CastDoubleToLong(ceil(x_position*
image->resolution.x-0.5));
image->page.y=CastDoubleToLong(ceil(y_position*
image->resolution.y-0.5));
}
if (TIFFGetFieldDefaulted(tiff,TIFFTAG_ORIENTATION,&orientation,sans) == 1)
image->orientation=(OrientationType) orientation;
if (TIFFGetField(tiff,TIFFTAG_WHITEPOINT,&chromaticity) == 1)
{
if ((chromaticity != (float *) NULL) && (*chromaticity != 0.0))
{
image->chromaticity.white_point.x=chromaticity[0];
image->chromaticity.white_point.y=chromaticity[1];
}
}
if (TIFFGetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,&chromaticity) == 1)
{
if ((chromaticity != (float *) NULL) && (*chromaticity != 0.0))
{
image->chromaticity.red_primary.x=chromaticity[0];
image->chromaticity.red_primary.y=chromaticity[1];
image->chromaticity.green_primary.x=chromaticity[2];
image->chromaticity.green_primary.y=chromaticity[3];
image->chromaticity.blue_primary.x=chromaticity[4];
image->chromaticity.blue_primary.y=chromaticity[5];
}
}
#if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919)
if ((compress_tag != COMPRESSION_NONE) &&
(TIFFIsCODECConfigured(compress_tag) == 0))
{
TIFFClose(tiff);
ThrowReaderException(CoderError,"CompressNotSupported");
}
#endif
switch (compress_tag)
{
case COMPRESSION_NONE: image->compression=NoCompression; break;
case COMPRESSION_CCITTFAX3: image->compression=FaxCompression; break;
case COMPRESSION_CCITTFAX4: image->compression=Group4Compression; break;
case COMPRESSION_JPEG:
{
image->compression=JPEGCompression;
#if defined(JPEG_SUPPORT)
{
char
sampling_factor[MagickPathExtent];
uint16
horizontal,
vertical;
tiff_status=TIFFGetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,&horizontal,
&vertical);
if (tiff_status == 1)
{
(void) FormatLocaleString(sampling_factor,MagickPathExtent,
"%dx%d",horizontal,vertical);
(void) SetImageProperty(image,"jpeg:sampling-factor",
sampling_factor,exception);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling Factors: %s",sampling_factor);
}
}
#endif
break;
}
case COMPRESSION_OJPEG: image->compression=JPEGCompression; break;
#if defined(COMPRESSION_LZMA)
case COMPRESSION_LZMA: image->compression=LZMACompression; break;
#endif
case COMPRESSION_LZW: image->compression=LZWCompression; break;
case COMPRESSION_DEFLATE: image->compression=ZipCompression; break;
case COMPRESSION_ADOBE_DEFLATE: image->compression=ZipCompression; break;
#if defined(COMPRESSION_WEBP)
case COMPRESSION_WEBP: image->compression=WebPCompression; break;
#endif
#if defined(COMPRESSION_ZSTD)
case COMPRESSION_ZSTD: image->compression=ZstdCompression; break;
#endif
default: image->compression=RLECompression; break;
}
quantum_info=(QuantumInfo *) NULL;
if ((photometric == PHOTOMETRIC_PALETTE) &&
(pow(2.0,1.0*bits_per_sample) <= MaxColormapSize))
{
size_t
colors;
colors=(size_t) GetQuantumRange(bits_per_sample)+1;
if (AcquireImageColormap(image,colors,exception) == MagickFalse)
{
TIFFClose(tiff);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
}
value=(unsigned short) image->scene;
if (TIFFGetFieldDefaulted(tiff,TIFFTAG_PAGENUMBER,&value,&pages,sans) == 1)
image->scene=value;
if (image->storage_class == PseudoClass)
{
size_t
range;
uint16
*blue_colormap = (uint16 *) NULL,
*green_colormap = (uint16 *) NULL,
*red_colormap = (uint16 *) NULL;
/*
Initialize colormap.
*/
tiff_status=TIFFGetField(tiff,TIFFTAG_COLORMAP,&red_colormap,
&green_colormap,&blue_colormap);
if (tiff_status == 1)
{
if ((red_colormap != (uint16 *) NULL) &&
(green_colormap != (uint16 *) NULL) &&
(blue_colormap != (uint16 *) NULL))
{
range=255; /* might be old style 8-bit colormap */
for (i=0; i < (ssize_t) image->colors; i++)
if ((red_colormap[i] >= 256) || (green_colormap[i] >= 256) ||
(blue_colormap[i] >= 256))
{
range=65535;
break;
}
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].red=ClampToQuantum(((double)
QuantumRange*red_colormap[i])/range);
image->colormap[i].green=ClampToQuantum(((double)
QuantumRange*green_colormap[i])/range);
image->colormap[i].blue=ClampToQuantum(((double)
QuantumRange*blue_colormap[i])/range);
}
}
}
}
if (image_info->ping != MagickFalse)
{
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
goto next_tiff_frame;
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
{
TIFFClose(tiff);
return(DestroyImageList(image));
}
status=SetImageColorspace(image,image->colorspace,exception);
status&=ResetImagePixels(image,exception);
if (status == MagickFalse)
{
TIFFClose(tiff);
return(DestroyImageList(image));
}
/*
Allocate memory for the image and pixel buffer.
*/
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
if (sample_format == SAMPLEFORMAT_UINT)
status=SetQuantumFormat(image,quantum_info,UnsignedQuantumFormat);
if (sample_format == SAMPLEFORMAT_INT)
status=SetQuantumFormat(image,quantum_info,SignedQuantumFormat);
if (sample_format == SAMPLEFORMAT_IEEEFP)
status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat);
if (status == MagickFalse)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
status=MagickTrue;
switch (photometric)
{
case PHOTOMETRIC_MINISBLACK:
{
quantum_info->min_is_white=MagickFalse;
break;
}
case PHOTOMETRIC_MINISWHITE:
{
quantum_info->min_is_white=MagickTrue;
break;
}
default:
break;
}
extra_samples=0;
tiff_status=TIFFGetFieldDefaulted(tiff,TIFFTAG_EXTRASAMPLES,&extra_samples,
&sample_info,sans);
if (tiff_status == 1)
{
(void) SetImageProperty(image,"tiff:alpha","unspecified",exception);
if (extra_samples == 0)
{
if ((samples_per_pixel == 4) && (photometric == PHOTOMETRIC_RGB))
image->alpha_trait=BlendPixelTrait;
}
else
for (i=0; i < extra_samples; i++)
{
image->alpha_trait=BlendPixelTrait;
if (sample_info[i] == EXTRASAMPLE_ASSOCALPHA)
{
SetQuantumAlphaType(quantum_info,AssociatedQuantumAlpha);
(void) SetImageProperty(image,"tiff:alpha","associated",
exception);
}
else
if (sample_info[i] == EXTRASAMPLE_UNASSALPHA)
{
SetQuantumAlphaType(quantum_info,DisassociatedQuantumAlpha);
(void) SetImageProperty(image,"tiff:alpha","unassociated",
exception);
}
}
}
if (image->alpha_trait != UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
method=ReadGenericMethod;
rows_per_strip=(uint32) image->rows;
if (TIFFGetField(tiff,TIFFTAG_ROWSPERSTRIP,&rows_per_strip) == 1)
{
char
buffer[MagickPathExtent];
(void) FormatLocaleString(buffer,MagickPathExtent,"%u",
(unsigned int) rows_per_strip);
(void) SetImageProperty(image,"tiff:rows-per-strip",buffer,exception);
method=ReadStripMethod;
if (rows_per_strip > (uint32) image->rows)
rows_per_strip=(uint32) image->rows;
}
if (TIFFIsTiled(tiff) != MagickFalse)
{
uint32
columns,
rows;
if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) ||
(TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1))
ThrowTIFFException(CoderError,"ImageIsNotTiled");
if ((AcquireMagickResource(WidthResource,columns) == MagickFalse) ||
(AcquireMagickResource(HeightResource,rows) == MagickFalse))
ThrowTIFFException(ImageError,"WidthOrHeightExceedsLimit");
method=ReadTileMethod;
}
if ((photometric == PHOTOMETRIC_LOGLUV) ||
(compress_tag == COMPRESSION_CCITTFAX3))
method=ReadGenericMethod;
if (image->compression == JPEGCompression)
method=GetJPEGMethod(image,tiff,photometric,bits_per_sample,
samples_per_pixel);
quantum_info->endian=LSBEndian;
scanline_size=TIFFScanlineSize(tiff);
if (scanline_size <= 0)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
number_pixels=MagickMax((MagickSizeType) image->columns*samples_per_pixel*
pow(2.0,ceil(log(bits_per_sample)/log(2.0))),image->columns*
rows_per_strip);
if ((double) scanline_size > 1.5*number_pixels)
ThrowTIFFException(CorruptImageError,"CorruptImage");
number_pixels=MagickMax((MagickSizeType) scanline_size,number_pixels);
pixel_info=AcquireVirtualMemory(number_pixels,sizeof(uint32));
if (pixel_info == (MemoryInfo *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
(void) memset(pixels,0,number_pixels*sizeof(uint32));
quantum_type=GrayQuantum;
if (image->storage_class == PseudoClass)
quantum_type=IndexQuantum;
if (interlace != PLANARCONFIG_SEPARATE)
{
size_t
pad;
pad=(size_t) MagickMax((ssize_t) samples_per_pixel-1,0);
if (image->alpha_trait != UndefinedPixelTrait)
{
if (image->storage_class == PseudoClass)
quantum_type=IndexAlphaQuantum;
else
quantum_type=samples_per_pixel == 1 ? AlphaQuantum :
GrayAlphaQuantum;
}
if ((samples_per_pixel > 2) && (interlace != PLANARCONFIG_SEPARATE))
{
quantum_type=RGBQuantum;
pad=(size_t) MagickMax((size_t) samples_per_pixel-3,0);
if (image->alpha_trait != UndefinedPixelTrait)
{
quantum_type=RGBAQuantum;
pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0);
}
if (image->colorspace == CMYKColorspace)
{
quantum_type=CMYKQuantum;
pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0);
if (image->alpha_trait != UndefinedPixelTrait)
{
quantum_type=CMYKAQuantum;
pad=(size_t) MagickMax((size_t) samples_per_pixel-5,0);
}
}
status=SetQuantumPad(image,quantum_info,pad*((bits_per_sample+7) >>
3));
if (status == MagickFalse)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
}
}
switch (method)
{
case ReadYCCKMethod:
{
/*
Convert YCC TIFF image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
unsigned char
*p;
tiff_status=TIFFReadPixels(tiff,0,y,(char *) pixels);
if (tiff_status == -1)
break;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
p=pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelCyan(image,ScaleCharToQuantum(ClampYCC((double) *p+
(1.402*(double) *(p+2))-179.456)),q);
SetPixelMagenta(image,ScaleCharToQuantum(ClampYCC((double) *p-
(0.34414*(double) *(p+1))-(0.71414*(double ) *(p+2))+
135.45984)),q);
SetPixelYellow(image,ScaleCharToQuantum(ClampYCC((double) *p+
(1.772*(double) *(p+1))-226.816)),q);
SetPixelBlack(image,ScaleCharToQuantum((unsigned char) *(p+3)),q);
q+=GetPixelChannels(image);
p+=4;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case ReadStripMethod:
{
unsigned char
*p;
size_t
extent;
ssize_t
stride,
strip_id;
tsize_t
strip_size;
unsigned char
*strip_pixels;
/*
Convert stripped TIFF image.
*/
extent=(samples_per_pixel+1)*TIFFStripSize(tiff);
#if defined(TIFF_VERSION_BIG)
extent+=image->columns*sizeof(uint64);
#else
extent+=image->columns*sizeof(uint32);
#endif
strip_pixels=(unsigned char *) AcquireQuantumMemory(extent,
sizeof(*strip_pixels));
if (strip_pixels == (unsigned char *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(strip_pixels,0,extent*sizeof(*strip_pixels));
stride=TIFFVStripSize(tiff,1);
strip_id=0;
p=strip_pixels;
for (i=0; i < (ssize_t) samples_per_pixel; i++)
{
size_t
rows_remaining;
switch (i)
{
case 0: break;
case 1: quantum_type=GreenQuantum; break;
case 2: quantum_type=BlueQuantum; break;
case 3:
{
quantum_type=AlphaQuantum;
if (image->colorspace == CMYKColorspace)
quantum_type=BlackQuantum;
break;
}
case 4: quantum_type=AlphaQuantum; break;
default: break;
}
rows_remaining=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
if (rows_remaining == 0)
{
strip_size=TIFFReadEncodedStrip(tiff,strip_id,strip_pixels,
TIFFStripSize(tiff));
if (strip_size == -1)
break;
rows_remaining=rows_per_strip;
if ((y+rows_per_strip) > (ssize_t) image->rows)
rows_remaining=(rows_per_strip-(y+rows_per_strip-
image->rows));
p=strip_pixels;
strip_id++;
}
(void) ImportQuantumPixels(image,(CacheView *) NULL,
quantum_info,quantum_type,p,exception);
p+=stride;
rows_remaining--;
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if ((samples_per_pixel > 1) && (interlace != PLANARCONFIG_SEPARATE))
break;
}
strip_pixels=(unsigned char *) RelinquishMagickMemory(strip_pixels);
break;
}
case ReadTileMethod:
{
unsigned char
*p;
size_t
extent;
uint32
columns,
rows;
unsigned char
*tile_pixels;
/*
Convert tiled TIFF image.
*/
if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) ||
(TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1))
ThrowTIFFException(CoderError,"ImageIsNotTiled");
number_pixels=(MagickSizeType) columns*rows;
if (HeapOverflowSanityCheck(rows,sizeof(*tile_pixels)) != MagickFalse)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
extent=4*MagickMax(rows*TIFFTileRowSize(tiff),TIFFTileSize(tiff));
#if defined(TIFF_VERSION_BIG)
extent+=image->columns*sizeof(uint64);
#else
extent+=image->columns*sizeof(uint32);
#endif
tile_pixels=(unsigned char *) AcquireQuantumMemory(extent,
sizeof(*tile_pixels));
if (tile_pixels == (unsigned char *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(tile_pixels,0,extent*sizeof(*tile_pixels));
for (i=0; i < (ssize_t) samples_per_pixel; i++)
{
switch (i)
{
case 0: break;
case 1: quantum_type=GreenQuantum; break;
case 2: quantum_type=BlueQuantum; break;
case 3:
{
quantum_type=AlphaQuantum;
if (image->colorspace == CMYKColorspace)
quantum_type=BlackQuantum;
break;
}
case 4: quantum_type=AlphaQuantum; break;
default: break;
}
for (y=0; y < (ssize_t) image->rows; y+=rows)
{
ssize_t
x;
size_t
rows_remaining;
rows_remaining=image->rows-y;
if ((ssize_t) (y+rows) < (ssize_t) image->rows)
rows_remaining=rows;
for (x=0; x < (ssize_t) image->columns; x+=columns)
{
size_t
columns_remaining,
row;
columns_remaining=image->columns-x;
if ((ssize_t) (x+columns) < (ssize_t) image->columns)
columns_remaining=columns;
tiff_status=TIFFReadTile(tiff,tile_pixels,(uint32) x,(uint32) y,
0,i);
if (tiff_status == -1)
break;
p=tile_pixels;
for (row=0; row < rows_remaining; row++)
{
Quantum
*magick_restrict q;
q=GetAuthenticPixels(image,x,y+row,columns_remaining,1,
exception);
if (q == (Quantum *) NULL)
break;
(void) ImportQuantumPixels(image,(CacheView *) NULL,
quantum_info,quantum_type,p,exception);
p+=TIFFTileRowSize(tiff);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
}
if ((samples_per_pixel > 1) && (interlace != PLANARCONFIG_SEPARATE))
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) i,
samples_per_pixel);
if (status == MagickFalse)
break;
}
}
tile_pixels=(unsigned char *) RelinquishMagickMemory(tile_pixels);
break;
}
case ReadGenericMethod:
default:
{
MemoryInfo
*generic_info = (MemoryInfo * ) NULL;
uint32
*p;
/*
Convert generic TIFF image.
*/
if (HeapOverflowSanityCheck(image->rows,sizeof(*pixels)) != MagickFalse)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
number_pixels=(MagickSizeType) image->columns*image->rows;
#if defined(TIFF_VERSION_BIG)
number_pixels+=image->columns*sizeof(uint64);
#else
number_pixels+=image->columns*sizeof(uint32);
#endif
generic_info=AcquireVirtualMemory(number_pixels,sizeof(uint32));
if (generic_info == (MemoryInfo *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
p=(uint32 *) GetVirtualMemoryBlob(generic_info);
tiff_status=TIFFReadRGBAImage(tiff,(uint32) image->columns,(uint32)
image->rows,(uint32 *) p,0);
if (tiff_status == -1)
{
generic_info=RelinquishVirtualMemory(generic_info);
break;
}
p+=(image->columns*image->rows)-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
q+=GetPixelChannels(image)*(image->columns-1);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
TIFFGetR(*p)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
TIFFGetG(*p)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
TIFFGetB(*p)),q);
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
TIFFGetA(*p)),q);
p--;
q-=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
generic_info=RelinquishVirtualMemory(generic_info);
break;
}
}
pixel_info=RelinquishVirtualMemory(pixel_info);
SetQuantumImageType(image,quantum_type);
next_tiff_frame:
if (quantum_info != (QuantumInfo *) NULL)
quantum_info=DestroyQuantumInfo(quantum_info);
if (tiff_status == -1)
{
status=MagickFalse;
break;
}
if (photometric == PHOTOMETRIC_CIELAB)
DecodeLabImage(image,exception);
if ((photometric == PHOTOMETRIC_LOGL) ||
(photometric == PHOTOMETRIC_MINISBLACK) ||
(photometric == PHOTOMETRIC_MINISWHITE))
{
image->type=GrayscaleType;
if (bits_per_sample == 1)
image->type=BilevelType;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
more_frames=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse;
if (more_frames != MagickFalse)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
status=MagickFalse;
break;
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,image->scene-1,
image->scene);
if (status == MagickFalse)
break;
}
} while ((status != MagickFalse) && (more_frames != MagickFalse));
TIFFClose(tiff);
if (status != MagickFalse)
TIFFReadPhotoshopLayers(image_info,image,exception);
if ((image_info->number_scenes != 0) &&
(image_info->scene >= GetImageListLength(image)))
status=MagickFalse;
if (status == MagickFalse)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
}
| 0
|
210,271
|
sug_filltree(spellinfo_T *spin, slang_T *slang)
{
char_u *byts;
idx_T *idxs;
int depth;
idx_T arridx[MAXWLEN];
int curi[MAXWLEN];
char_u tword[MAXWLEN];
char_u tsalword[MAXWLEN];
int c;
idx_T n;
unsigned words_done = 0;
int wordcount[MAXWLEN];
// We use si_foldroot for the soundfolded trie.
spin->si_foldroot = wordtree_alloc(spin);
if (spin->si_foldroot == NULL)
return FAIL;
// let tree_add_word() know we're adding to the soundfolded tree
spin->si_sugtree = TRUE;
/*
* Go through the whole case-folded tree, soundfold each word and put it
* in the trie.
*/
byts = slang->sl_fbyts;
idxs = slang->sl_fidxs;
arridx[0] = 0;
curi[0] = 1;
wordcount[0] = 0;
depth = 0;
while (depth >= 0 && !got_int)
{
if (curi[depth] > byts[arridx[depth]])
{
// Done all bytes at this node, go up one level.
idxs[arridx[depth]] = wordcount[depth];
if (depth > 0)
wordcount[depth - 1] += wordcount[depth];
--depth;
line_breakcheck();
}
else
{
// Do one more byte at this node.
n = arridx[depth] + curi[depth];
++curi[depth];
c = byts[n];
if (c == 0)
{
// Sound-fold the word.
tword[depth] = NUL;
spell_soundfold(slang, tword, TRUE, tsalword);
// We use the "flags" field for the MSB of the wordnr,
// "region" for the LSB of the wordnr.
if (tree_add_word(spin, tsalword, spin->si_foldroot,
words_done >> 16, words_done & 0xffff,
0) == FAIL)
return FAIL;
++words_done;
++wordcount[depth];
// Reset the block count each time to avoid compression
// kicking in.
spin->si_blocks_cnt = 0;
// Skip over any other NUL bytes (same word with different
// flags). But don't go over the end.
while (n + 1 < slang->sl_fbyts_len && byts[n + 1] == 0)
{
++n;
++curi[depth];
}
}
else
{
// Normal char, go one level deeper.
tword[depth++] = c;
arridx[depth] = idxs[n];
curi[depth] = 1;
wordcount[depth] = 0;
}
}
}
smsg(_("Total number of words: %d"), words_done);
return OK;
}
| 1
|
250,686
|
std::string HttpFile::getMd5() const
{
return implPtr_->getMd5();
}
| 0
|
245,692
|
njs_array_length_set(njs_vm_t *vm, njs_value_t *value,
njs_object_prop_t *prev, njs_value_t *setval)
{
double num, idx;
int64_t prev_length;
uint32_t i, length;
njs_int_t ret;
njs_array_t *array, *keys;
array = njs_object_proto_lookup(njs_object(value), NJS_ARRAY, njs_array_t);
if (njs_slow_path(array == NULL)) {
return NJS_DECLINED;
}
ret = njs_value_to_number(vm, setval, &num);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
length = (uint32_t) njs_number_to_length(num);
if ((double) length != num) {
njs_range_error(vm, "Invalid array length");
return NJS_ERROR;
}
ret = njs_value_to_length(vm, &prev->value, &prev_length);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
keys = NULL;
if (length < prev_length) {
keys = njs_array_indices(vm, value);
if (njs_slow_path(keys == NULL)) {
return NJS_ERROR;
}
if (keys->length != 0) {
i = keys->length - 1;
do {
idx = njs_string_to_index(&keys->start[i]);
if (idx >= length) {
ret = njs_value_property_delete(vm, value, &keys->start[i],
NULL, 1);
if (njs_slow_path(ret == NJS_ERROR)) {
goto done;
}
}
} while (i-- != 0);
}
}
ret = njs_array_length_redefine(vm, value, length);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
ret = NJS_OK;
done:
if (keys != NULL) {
njs_array_destroy(vm, keys);
}
return ret;
}
| 0
|
291,764
|
static void destroy_con(struct rtrs_clt_con *con)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
clt_path->s.con[con->c.cid] = NULL;
mutex_destroy(&con->con_mutex);
kfree(con);
}
| 0
|
259,222
|
static int avif_add_stream(MOVContext *c, int item_id)
{
MOVStreamContext *sc;
AVStream *st;
int item_index = -1;
for (int i = 0; i < c->avif_info_size; i++)
if (c->avif_info[i].item_id == item_id) {
item_index = i;
break;
}
if (item_index < 0)
return AVERROR_INVALIDDATA;
st = avformat_new_stream(c->fc, NULL);
if (!st)
return AVERROR(ENOMEM);
st->id = c->fc->nb_streams;
sc = av_mallocz(sizeof(MOVStreamContext));
if (!sc)
return AVERROR(ENOMEM);
st->priv_data = sc;
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_AV1;
sc->ffindex = st->index;
c->trak_index = st->index;
st->avg_frame_rate.num = st->avg_frame_rate.den = 1;
st->time_base.num = st->time_base.den = 1;
st->nb_frames = 1;
sc->time_scale = 1;
sc = st->priv_data;
sc->pb = c->fc->pb;
sc->pb_is_copied = 1;
// Populate the necessary fields used by mov_build_index.
sc->stsc_count = 1;
sc->stsc_data = av_malloc_array(1, sizeof(*sc->stsc_data));
if (!sc->stsc_data)
return AVERROR(ENOMEM);
sc->stsc_data[0].first = 1;
sc->stsc_data[0].count = 1;
sc->stsc_data[0].id = 1;
sc->chunk_count = 1;
sc->chunk_offsets = av_malloc_array(1, sizeof(*sc->chunk_offsets));
if (!sc->chunk_offsets)
return AVERROR(ENOMEM);
sc->sample_count = 1;
sc->sample_sizes = av_malloc_array(1, sizeof(*sc->sample_sizes));
if (!sc->sample_sizes)
return AVERROR(ENOMEM);
sc->stts_count = 1;
sc->stts_data = av_malloc_array(1, sizeof(*sc->stts_data));
if (!sc->stts_data)
return AVERROR(ENOMEM);
sc->stts_data[0].count = 1;
// Not used for still images. But needed by mov_build_index.
sc->stts_data[0].duration = 0;
sc->sample_sizes[0] = c->avif_info[item_index].extent_length;
sc->chunk_offsets[0] = c->avif_info[item_index].extent_offset;
mov_build_index(c, st);
return 0;
}
| 0
|
424,531
|
static UINT video_control_on_close(IWTSVirtualChannelCallback* pChannelCallback)
{
free(pChannelCallback);
return CHANNEL_RC_OK;
}
| 0
|
244,301
|
GF_Err sbgp_box_read(GF_Box *s, GF_BitStream *bs)
{
u32 i;
GF_SampleGroupBox *ptr = (GF_SampleGroupBox *)s;
ISOM_DECREASE_SIZE(ptr, 8);
ptr->grouping_type = gf_bs_read_u32(bs);
if (ptr->version==1) {
ISOM_DECREASE_SIZE(ptr, 4);
ptr->grouping_type_parameter = gf_bs_read_u32(bs);
}
ptr->entry_count = gf_bs_read_u32(bs);
if (ptr->size < sizeof(GF_SampleGroupEntry)*ptr->entry_count || (u64)ptr->entry_count > (u64)SIZE_MAX/sizeof(GF_SampleGroupEntry))
return GF_ISOM_INVALID_FILE;
ptr->sample_entries = gf_malloc(sizeof(GF_SampleGroupEntry)*ptr->entry_count);
if (!ptr->sample_entries) return GF_OUT_OF_MEM;
for (i=0; i<ptr->entry_count; i++) {
ISOM_DECREASE_SIZE(ptr, 8);
ptr->sample_entries[i].sample_count = gf_bs_read_u32(bs);
ptr->sample_entries[i].group_description_index = gf_bs_read_u32(bs);
}
return GF_OK;
}
| 0
|
221,508
|
flatpak_run_add_app_info_args (FlatpakBwrap *bwrap,
GFile *app_files,
GFile *original_app_files,
GBytes *app_deploy_data,
const char *app_extensions,
GFile *runtime_files,
GFile *original_runtime_files,
GBytes *runtime_deploy_data,
const char *runtime_extensions,
const char *app_id,
const char *app_branch,
FlatpakDecomposed *runtime_ref,
GFile *app_id_dir,
FlatpakContext *final_app_context,
FlatpakContext *cmdline_context,
gboolean sandbox,
gboolean build,
gboolean devel,
char **app_info_path_out,
int instance_id_fd,
char **instance_id_host_dir_out,
GError **error)
{
g_autofree char *info_path = NULL;
g_autofree char *bwrapinfo_path = NULL;
int fd, fd2, fd3;
g_autoptr(GKeyFile) keyfile = NULL;
g_autofree char *runtime_path = NULL;
const char *group;
g_autofree char *instance_id = NULL;
glnx_autofd int lock_fd = -1;
g_autofree char *instance_id_host_dir = NULL;
g_autofree char *instance_id_sandbox_dir = NULL;
g_autofree char *instance_id_lock_file = NULL;
g_autofree char *arch = flatpak_decomposed_dup_arch (runtime_ref);
g_return_val_if_fail (app_id != NULL, FALSE);
instance_id = flatpak_instance_allocate_id (&instance_id_host_dir, &lock_fd);
if (instance_id == NULL)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Unable to allocate instance id"));
instance_id_sandbox_dir = g_strdup_printf ("/run/flatpak/.flatpak/%s", instance_id);
instance_id_lock_file = g_build_filename (instance_id_sandbox_dir, ".ref", NULL);
flatpak_bwrap_add_args (bwrap,
"--ro-bind",
instance_id_host_dir,
instance_id_sandbox_dir,
"--lock-file",
instance_id_lock_file,
NULL);
flatpak_bwrap_add_runtime_dir_member (bwrap, ".flatpak");
/* Keep the .ref lock held until we've started bwrap to avoid races */
flatpak_bwrap_add_noinherit_fd (bwrap, glnx_steal_fd (&lock_fd));
info_path = g_build_filename (instance_id_host_dir, "info", NULL);
keyfile = g_key_file_new ();
if (original_app_files)
group = FLATPAK_METADATA_GROUP_APPLICATION;
else
group = FLATPAK_METADATA_GROUP_RUNTIME;
g_key_file_set_string (keyfile, group, FLATPAK_METADATA_KEY_NAME, app_id);
g_key_file_set_string (keyfile, group, FLATPAK_METADATA_KEY_RUNTIME,
flatpak_decomposed_get_ref (runtime_ref));
g_key_file_set_string (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_INSTANCE_ID, instance_id);
if (app_id_dir)
{
g_autofree char *instance_path = g_file_get_path (app_id_dir);
g_key_file_set_string (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_INSTANCE_PATH, instance_path);
}
if (app_files)
{
g_autofree char *app_path = g_file_get_path (app_files);
g_key_file_set_string (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_APP_PATH, app_path);
}
if (original_app_files != NULL && original_app_files != app_files)
{
g_autofree char *app_path = g_file_get_path (original_app_files);
g_key_file_set_string (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_ORIGINAL_APP_PATH, app_path);
}
if (app_deploy_data)
g_key_file_set_string (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_APP_COMMIT, flatpak_deploy_data_get_commit (app_deploy_data));
if (app_extensions && *app_extensions != 0)
g_key_file_set_string (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_APP_EXTENSIONS, app_extensions);
runtime_path = g_file_get_path (runtime_files);
g_key_file_set_string (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_RUNTIME_PATH, runtime_path);
if (runtime_files != original_runtime_files)
{
g_autofree char *path = g_file_get_path (original_runtime_files);
g_key_file_set_string (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_ORIGINAL_RUNTIME_PATH, path);
}
if (runtime_deploy_data)
g_key_file_set_string (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_RUNTIME_COMMIT, flatpak_deploy_data_get_commit (runtime_deploy_data));
if (runtime_extensions && *runtime_extensions != 0)
g_key_file_set_string (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_RUNTIME_EXTENSIONS, runtime_extensions);
if (app_branch != NULL)
g_key_file_set_string (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_BRANCH, app_branch);
g_key_file_set_string (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_ARCH, arch);
g_key_file_set_string (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_FLATPAK_VERSION, PACKAGE_VERSION);
if ((final_app_context->sockets & FLATPAK_CONTEXT_SOCKET_SESSION_BUS) == 0)
g_key_file_set_boolean (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_SESSION_BUS_PROXY, TRUE);
if ((final_app_context->sockets & FLATPAK_CONTEXT_SOCKET_SYSTEM_BUS) == 0)
g_key_file_set_boolean (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_SYSTEM_BUS_PROXY, TRUE);
if (sandbox)
g_key_file_set_boolean (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_SANDBOX, TRUE);
if (build)
g_key_file_set_boolean (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_BUILD, TRUE);
if (devel)
g_key_file_set_boolean (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_DEVEL, TRUE);
if (cmdline_context)
{
g_autoptr(GPtrArray) cmdline_args = g_ptr_array_new_with_free_func (g_free);
flatpak_context_to_args (cmdline_context, cmdline_args);
if (cmdline_args->len > 0)
{
g_key_file_set_string_list (keyfile, FLATPAK_METADATA_GROUP_INSTANCE,
FLATPAK_METADATA_KEY_EXTRA_ARGS,
(const char * const *) cmdline_args->pdata,
cmdline_args->len);
}
}
flatpak_context_save_metadata (final_app_context, TRUE, keyfile);
if (!g_key_file_save_to_file (keyfile, info_path, error))
return FALSE;
/* We want to create a file on /.flatpak-info that the app cannot modify, which
we do by creating a read-only bind mount. This way one can openat()
/proc/$pid/root, and if that succeeds use openat via that to find the
unfakable .flatpak-info file. However, there is a tiny race in that if
you manage to open /proc/$pid/root, but then the pid dies, then
every mount but the root is unmounted in the namespace, so the
.flatpak-info will be empty. We fix this by first creating a real file
with the real info in, then bind-mounting on top of that, the same info.
This way even if the bind-mount is unmounted we can find the real data.
*/
fd = open (info_path, O_RDONLY);
if (fd == -1)
{
int errsv = errno;
g_set_error (error, G_IO_ERROR, g_io_error_from_errno (errsv),
_("Failed to open flatpak-info file: %s"), g_strerror (errsv));
return FALSE;
}
fd2 = open (info_path, O_RDONLY);
if (fd2 == -1)
{
close (fd);
int errsv = errno;
g_set_error (error, G_IO_ERROR, g_io_error_from_errno (errsv),
_("Failed to open flatpak-info file: %s"), g_strerror (errsv));
return FALSE;
}
flatpak_bwrap_add_args_data_fd (bwrap,
"--file", fd, "/.flatpak-info");
flatpak_bwrap_add_args_data_fd (bwrap,
"--ro-bind-data", fd2, "/.flatpak-info");
/* Tell the application that it's running under Flatpak in a generic way. */
flatpak_bwrap_add_args (bwrap,
"--setenv", "container", "flatpak",
NULL);
if (!flatpak_bwrap_add_args_data (bwrap,
"container-manager",
"flatpak\n", -1,
"/run/host/container-manager",
error))
return FALSE;
bwrapinfo_path = g_build_filename (instance_id_host_dir, "bwrapinfo.json", NULL);
fd3 = open (bwrapinfo_path, O_RDWR | O_CREAT, 0644);
if (fd3 == -1)
{
close (fd);
close (fd2);
int errsv = errno;
g_set_error (error, G_IO_ERROR, g_io_error_from_errno (errsv),
_("Failed to open bwrapinfo.json file: %s"), g_strerror (errsv));
return FALSE;
}
/* NOTE: It is important that this takes place after bwrapinfo.json is created,
otherwise start notifications in the portal may not work. */
if (instance_id_fd != -1)
{
gsize instance_id_position = 0;
gsize instance_id_size = strlen (instance_id);
while (instance_id_size > 0)
{
gssize bytes_written = write (instance_id_fd, instance_id + instance_id_position, instance_id_size);
if (G_UNLIKELY (bytes_written <= 0))
{
int errsv = bytes_written == -1 ? errno : ENOSPC;
if (errsv == EINTR)
continue;
close (fd);
close (fd2);
close (fd3);
g_set_error (error, G_IO_ERROR, g_io_error_from_errno (errsv),
_("Failed to write to instance id fd: %s"), g_strerror (errsv));
return FALSE;
}
instance_id_position += bytes_written;
instance_id_size -= bytes_written;
}
close (instance_id_fd);
}
flatpak_bwrap_add_args_data_fd (bwrap, "--info-fd", fd3, NULL);
if (app_info_path_out != NULL)
*app_info_path_out = g_strdup_printf ("/proc/self/fd/%d", fd);
if (instance_id_host_dir_out != NULL)
*instance_id_host_dir_out = g_steal_pointer (&instance_id_host_dir);
return TRUE;
}
| 0
|
291,794
|
static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
struct rtrs_clt_io_req *req,
struct rtrs_rbuf *rbuf, u32 off,
u32 imm, struct ib_send_wr *wr)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
enum ib_send_flags flags;
struct ib_sge sge;
if (!req->sg_size) {
rtrs_wrn(con->c.path,
"Doing RDMA Write failed, no data supplied\n");
return -EINVAL;
}
/* user data and user message in the first list element */
sge.addr = req->iu->dma_addr;
sge.length = req->sg_size;
sge.lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
/*
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
0 : IB_SEND_SIGNALED;
ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
req->iu->dma_addr,
req->sg_size, DMA_TO_DEVICE);
return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
rbuf->rkey, rbuf->addr + off,
imm, flags, wr, NULL);
}
| 0
|
473,912
|
euckr_is_code_ctype(OnigCodePoint code, unsigned int ctype, OnigEncoding enc)
{
return onigenc_mb2_is_code_ctype(enc, code, ctype);
}
| 0
|
226,398
|
void def_parent_full_box_del(GF_Box *s)
{
if (s) gf_free(s);
}
| 0
|
279,916
|
set_old_sub(char_u *val)
{
vim_free(old_sub);
old_sub = val;
}
| 0
|
462,440
|
addSess(ptcplstn_t *pLstn, int sock, prop_t *peerName, prop_t *peerIP)
{
DEFiRet;
ptcpsess_t *pSess = NULL;
ptcpsrv_t *pSrv = pLstn->pSrv;
CHKmalloc(pSess = malloc(sizeof(ptcpsess_t)));
CHKmalloc(pSess->pMsg = malloc(iMaxLine));
pSess->pLstn = pLstn;
pSess->sock = sock;
pSess->bSuppOctetFram = pLstn->bSuppOctetFram;
pSess->bSPFramingFix = pLstn->bSPFramingFix;
pSess->inputState = eAtStrtFram;
pSess->iMsg = 0;
pSess->bzInitDone = 0;
pSess->bAtStrtOfFram = 1;
pSess->peerName = peerName;
pSess->peerIP = peerIP;
pSess->compressionMode = pLstn->pSrv->compressionMode;
/* add to start of server's listener list */
pSess->prev = NULL;
pthread_mutex_lock(&pSrv->mutSessLst);
pSess->next = pSrv->pSess;
if(pSrv->pSess != NULL)
pSrv->pSess->prev = pSess;
pSrv->pSess = pSess;
pthread_mutex_unlock(&pSrv->mutSessLst);
CHKiRet(addEPollSock(epolld_sess, pSess, sock, &pSess->epd));
finalize_it:
if(iRet != RS_RET_OK) {
if(pSess != NULL) {
if(pSess->pMsg != NULL)
free(pSess->pMsg);
free(pSess);
}
}
RETiRet;
}
| 0
|
332,385
|
op_insert(oparg_T *oap, long count1)
{
long ins_len, pre_textlen = 0;
char_u *firstline, *ins_text;
colnr_T ind_pre_col = 0, ind_post_col;
int ind_pre_vcol = 0, ind_post_vcol = 0;
struct block_def bd;
int i;
pos_T t1;
pos_T start_insert;
// offset when cursor was moved in insert mode
int offset = 0;
// edit() changes this - record it for OP_APPEND
bd.is_MAX = (curwin->w_curswant == MAXCOL);
// vis block is still marked. Get rid of it now.
curwin->w_cursor.lnum = oap->start.lnum;
update_screen(INVERTED);
if (oap->block_mode)
{
// When 'virtualedit' is used, need to insert the extra spaces before
// doing block_prep(). When only "block" is used, virtual edit is
// already disabled, but still need it when calling
// coladvance_force().
// coladvance_force() uses get_ve_flags() to get the 'virtualedit'
// state for the current window. To override that state, we need to
// set the window-local value of ve_flags rather than the global value.
if (curwin->w_cursor.coladd > 0)
{
int old_ve_flags = curwin->w_ve_flags;
if (u_save_cursor() == FAIL)
return;
curwin->w_ve_flags = VE_ALL;
coladvance_force(oap->op_type == OP_APPEND
? oap->end_vcol + 1 : getviscol());
if (oap->op_type == OP_APPEND)
--curwin->w_cursor.col;
curwin->w_ve_flags = old_ve_flags;
}
// Get the info about the block before entering the text
block_prep(oap, &bd, oap->start.lnum, TRUE);
// Get indent information
ind_pre_col = (colnr_T)getwhitecols_curline();
ind_pre_vcol = get_indent();
firstline = ml_get(oap->start.lnum) + bd.textcol;
if (oap->op_type == OP_APPEND)
firstline += bd.textlen;
pre_textlen = (long)STRLEN(firstline);
}
if (oap->op_type == OP_APPEND)
{
if (oap->block_mode && curwin->w_cursor.coladd == 0)
{
// Move the cursor to the character right of the block.
curwin->w_set_curswant = TRUE;
while (*ml_get_cursor() != NUL
&& (curwin->w_cursor.col < bd.textcol + bd.textlen))
++curwin->w_cursor.col;
if (bd.is_short && !bd.is_MAX)
{
// First line was too short, make it longer and adjust the
// values in "bd".
if (u_save_cursor() == FAIL)
return;
for (i = 0; i < bd.endspaces; ++i)
ins_char(' ');
bd.textlen += bd.endspaces;
}
}
else
{
curwin->w_cursor = oap->end;
check_cursor_col();
// Works just like an 'i'nsert on the next character.
if (!LINEEMPTY(curwin->w_cursor.lnum)
&& oap->start_vcol != oap->end_vcol)
inc_cursor();
}
}
t1 = oap->start;
start_insert = curwin->w_cursor;
(void)edit(NUL, FALSE, (linenr_T)count1);
// When a tab was inserted, and the characters in front of the tab
// have been converted to a tab as well, the column of the cursor
// might have actually been reduced, so need to adjust here.
if (t1.lnum == curbuf->b_op_start_orig.lnum
&& LT_POS(curbuf->b_op_start_orig, t1))
oap->start = curbuf->b_op_start_orig;
// If user has moved off this line, we don't know what to do, so do
// nothing.
// Also don't repeat the insert when Insert mode ended with CTRL-C.
if (curwin->w_cursor.lnum != oap->start.lnum || got_int)
return;
if (oap->block_mode)
{
struct block_def bd2;
int did_indent = FALSE;
size_t len;
int add;
// If indent kicked in, the firstline might have changed
// but only do that, if the indent actually increased.
ind_post_col = (colnr_T)getwhitecols_curline();
if (curbuf->b_op_start.col > ind_pre_col && ind_post_col > ind_pre_col)
{
bd.textcol += ind_post_col - ind_pre_col;
ind_post_vcol = get_indent();
bd.start_vcol += ind_post_vcol - ind_pre_vcol;
did_indent = TRUE;
}
// The user may have moved the cursor before inserting something, try
// to adjust the block for that. But only do it, if the difference
// does not come from indent kicking in.
if (oap->start.lnum == curbuf->b_op_start_orig.lnum
&& !bd.is_MAX && !did_indent)
{
int t = getviscol2(curbuf->b_op_start_orig.col,
curbuf->b_op_start_orig.coladd);
if (!bd.is_MAX)
{
if (oap->op_type == OP_INSERT
&& oap->start.col + oap->start.coladd
!= curbuf->b_op_start_orig.col
+ curbuf->b_op_start_orig.coladd)
{
oap->start.col = curbuf->b_op_start_orig.col;
pre_textlen -= t - oap->start_vcol;
oap->start_vcol = t;
}
else if (oap->op_type == OP_APPEND
&& oap->start.col + oap->start.coladd
>= curbuf->b_op_start_orig.col
+ curbuf->b_op_start_orig.coladd)
{
oap->start.col = curbuf->b_op_start_orig.col;
// reset pre_textlen to the value of OP_INSERT
pre_textlen += bd.textlen;
pre_textlen -= t - oap->start_vcol;
oap->start_vcol = t;
oap->op_type = OP_INSERT;
}
}
else if (bd.is_MAX && oap->op_type == OP_APPEND)
{
// reset pre_textlen to the value of OP_INSERT
pre_textlen += bd.textlen;
pre_textlen -= t - oap->start_vcol;
}
}
// Spaces and tabs in the indent may have changed to other spaces and
// tabs. Get the starting column again and correct the length.
// Don't do this when "$" used, end-of-line will have changed.
//
// if indent was added and the inserted text was after the indent,
// correct the selection for the new indent.
if (did_indent && bd.textcol - ind_post_col > 0)
{
oap->start.col += ind_post_col - ind_pre_col;
oap->start_vcol += ind_post_vcol - ind_pre_vcol;
oap->end.col += ind_post_col - ind_pre_col;
oap->end_vcol += ind_post_vcol - ind_pre_vcol;
}
block_prep(oap, &bd2, oap->start.lnum, TRUE);
if (did_indent && bd.textcol - ind_post_col > 0)
{
// undo for where "oap" is used below
oap->start.col -= ind_post_col - ind_pre_col;
oap->start_vcol -= ind_post_vcol - ind_pre_vcol;
oap->end.col -= ind_post_col - ind_pre_col;
oap->end_vcol -= ind_post_vcol - ind_pre_vcol;
}
if (!bd.is_MAX || bd2.textlen < bd.textlen)
{
if (oap->op_type == OP_APPEND)
{
pre_textlen += bd2.textlen - bd.textlen;
if (bd2.endspaces)
--bd2.textlen;
}
bd.textcol = bd2.textcol;
bd.textlen = bd2.textlen;
}
/*
* Subsequent calls to ml_get() flush the firstline data - take a
* copy of the required string.
*/
firstline = ml_get(oap->start.lnum);
len = STRLEN(firstline);
add = bd.textcol;
if (oap->op_type == OP_APPEND)
{
add += bd.textlen;
// account for pressing cursor in insert mode when '$' was used
if (bd.is_MAX
&& (start_insert.lnum == Insstart.lnum
&& start_insert.col > Insstart.col))
{
offset = (start_insert.col - Insstart.col);
add -= offset;
if (oap->end_vcol > offset)
oap->end_vcol -= (offset + 1);
else
// moved outside of the visual block, what to do?
return;
}
}
if ((size_t)add > len)
firstline += len; // short line, point to the NUL
else
firstline += add;
if (pre_textlen >= 0 && (ins_len =
(long)STRLEN(firstline) - pre_textlen - offset) > 0)
{
ins_text = vim_strnsave(firstline, ins_len);
if (ins_text != NULL)
{
// block handled here
if (u_save(oap->start.lnum,
(linenr_T)(oap->end.lnum + 1)) == OK)
block_insert(oap, ins_text, (oap->op_type == OP_INSERT),
&bd);
curwin->w_cursor.col = oap->start.col;
check_cursor();
vim_free(ins_text);
}
}
}
}
| 0
|
195,289
|
void Compute(OpKernelContext* context) override {
const Tensor& indices = context->input(0);
const Tensor& values = context->input(1);
const Tensor& shape = context->input(2);
const Tensor& weights = context->input(3);
bool use_weights = weights.NumElements() > 0;
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(indices.shape()),
errors::InvalidArgument(
"Input indices must be a 2-dimensional tensor. Got: ",
indices.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(values.shape()),
errors::InvalidArgument("Input values must be a vector. Got: ",
values.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(shape.shape()),
errors::InvalidArgument("Input shape must be a vector. Got: ",
shape.shape().DebugString()));
OP_REQUIRES(context,
values.shape().dim_size(0) == indices.shape().dim_size(0),
errors::InvalidArgument(
"Number of values must match first dimension of indices.",
"Got ", values.shape().dim_size(0),
" values, indices shape: ", indices.shape().DebugString()));
OP_REQUIRES(
context, shape.shape().dim_size(0) == indices.shape().dim_size(1),
errors::InvalidArgument(
"Number of dimensions must match second dimension of indices.",
"Got ", shape.shape().dim_size(0),
" dimensions, indices shape: ", indices.shape().DebugString()));
OP_REQUIRES(context, shape.NumElements() > 0,
errors::InvalidArgument(
"The shape argument requires at least one element."));
if (use_weights) {
OP_REQUIRES(
context, weights.shape() == values.shape(),
errors::InvalidArgument(
"Weights and values must have the same shape. Weight shape: ",
weights.shape().DebugString(),
"; values shape: ", values.shape().DebugString()));
}
bool is_1d = shape.NumElements() == 1;
auto shape_vector = shape.flat<int64_t>();
int num_batches = is_1d ? 1 : shape_vector(0);
int num_values = values.NumElements();
const auto indices_values = indices.matrix<int64_t>();
const auto values_values = values.flat<T>();
const auto weight_values = weights.flat<W>();
auto per_batch_counts = BatchedMap<W>(num_batches);
T max_value = 0;
for (int idx = 0; idx < num_values; ++idx) {
int batch = is_1d ? 0 : indices_values(idx, 0);
if (batch >= num_batches) {
OP_REQUIRES(context, batch < num_batches,
errors::InvalidArgument(
"Indices value along the first dimension must be ",
"lower than the first index of the shape.", "Got ",
batch, " as batch and ", num_batches,
" as the first dimension of the shape."));
}
const auto& value = values_values(idx);
if (value >= 0 && (maxlength_ <= 0 || value < maxlength_)) {
if (binary_output_) {
per_batch_counts[batch][value] = 1;
} else if (use_weights) {
per_batch_counts[batch][value] += weight_values(idx);
} else {
per_batch_counts[batch][value]++;
}
if (value > max_value) {
max_value = value;
}
}
}
int num_output_values = GetOutputSize(max_value, maxlength_, minlength_);
OP_REQUIRES_OK(context, OutputSparse<W>(per_batch_counts, num_output_values,
is_1d, context));
}
| 1
|
391,667
|
static void defer_open_done(struct tevent_req *req)
{
struct defer_open_state *state = tevent_req_callback_data(
req, struct defer_open_state);
NTSTATUS status;
bool ret;
status = dbwrap_record_watch_recv(req, talloc_tos(), NULL);
TALLOC_FREE(req);
if (!NT_STATUS_IS_OK(status)) {
DEBUG(5, ("dbwrap_record_watch_recv returned %s\n",
nt_errstr(status)));
/*
* Even if it failed, retry anyway. TODO: We need a way to
* tell a re-scheduled open about that error.
*/
}
DEBUG(10, ("scheduling mid %llu\n", (unsigned long long)state->mid));
ret = schedule_deferred_open_message_smb(state->sconn, state->mid);
SMB_ASSERT(ret);
TALLOC_FREE(state);
}
| 0
|
261,893
|
njs_string_decode_uri(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs,
njs_index_t component)
{
u_char *dst;
int64_t size, length;
uint32_t cp;
njs_int_t ret;
njs_chb_t chain;
njs_uint_t i, n;
njs_bool_t percent;
njs_value_t *value;
const u_char *src, *p, *end;
const uint32_t *reserve;
njs_string_prop_t string;
njs_unicode_decode_t ctx;
u_char encode[4];
static const uint32_t reserve_uri[] = {
0x00000000, /* 0000 0000 0000 0000 0000 0000 0000 0000 */
/* ?>=< ;:98 7654 3210 /.-, +*)( '&%$ #"! */
0xac009858, /* 1010 1100 0000 0000 1001 1000 0101 1000 */
/* _^]\ [ZYX WVUT SRQP ONML KJIH GFED CBA@ */
0x00000001, /* 0000 0000 0000 0000 0000 0000 0000 0001 */
/* ~}| {zyx wvut srqp onml kjih gfed cba` */
0x00000000, /* 0000 0000 0000 0000 0000 0000 0000 0000 */
0x00000000, /* 0000 0000 0000 0000 0000 0000 0000 0000 */
0x00000000, /* 0000 0000 0000 0000 0000 0000 0000 0000 */
0x00000000, /* 0000 0000 0000 0000 0000 0000 0000 0000 */
0x00000000, /* 0000 0000 0000 0000 0000 0000 0000 0000 */
};
static const uint32_t reserve_uri_component[] = {
0x00000000, /* 0000 0000 0000 0000 0000 0000 0000 0000 */
/* ?>=< ;:98 7654 3210 /.-, +*)( '&%$ #"! */
0x00000000, /* 0000 0000 0000 0000 0000 0000 0000 0000 */
/* _^]\ [ZYX WVUT SRQP ONML KJIH GFED CBA@ */
0x00000000, /* 0000 0000 0000 0000 0000 0000 0000 0000 */
/* ~}| {zyx wvut srqp onml kjih gfed cba` */
0x00000000, /* 0000 0000 0000 0000 0000 0000 0000 0000 */
0x00000000, /* 0000 0000 0000 0000 0000 0000 0000 0000 */
0x00000000, /* 0000 0000 0000 0000 0000 0000 0000 0000 */
0x00000000, /* 0000 0000 0000 0000 0000 0000 0000 0000 */
0x00000000, /* 0000 0000 0000 0000 0000 0000 0000 0000 */
};
static const int8_t hex[256]
njs_aligned(32) =
{
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1,
-1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
};
if (nargs < 2) {
vm->retval = njs_string_undefined;
return NJS_OK;
}
value = njs_argument(args, 1);
ret = njs_value_to_string(vm, value, value);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
reserve = component ? reserve_uri_component : reserve_uri;
njs_prefetch(reserve);
njs_prefetch(&hex['0']);
(void) njs_string_prop(&string, value);
length = 0;
src = string.start;
end = string.start + string.size;
njs_chb_init(&chain, vm->mem_pool);
njs_utf8_decode_init(&ctx);
while (src < end) {
percent = (src[0] == '%');
cp = njs_string_decode_uri_cp(hex, &src, end, 0);
if (njs_slow_path(cp > NJS_UNICODE_MAX_CODEPOINT)) {
goto uri_error;
}
if (!percent) {
length += 1;
dst = njs_chb_reserve(&chain, 4);
if (dst != NULL) {
njs_utf8_encode(dst, cp);
njs_chb_written(&chain, njs_utf8_size(cp));
}
continue;
}
if (cp < 0x80) {
if (njs_reserved(reserve, cp)) {
length += 3;
njs_chb_append(&chain, &src[-3], 3);
} else {
length += 1;
dst = njs_chb_reserve(&chain, 1);
if (dst != NULL) {
*dst = cp;
njs_chb_written(&chain, 1);
}
}
continue;
}
n = 1;
do {
n++;
} while (((cp << n) & 0x80));
if (njs_slow_path(n > 4)) {
goto uri_error;
}
encode[0] = cp;
for (i = 1; i < n; i++) {
cp = njs_string_decode_uri_cp(hex, &src, end, 1);
if (njs_slow_path(cp > NJS_UNICODE_MAX_CODEPOINT)) {
goto uri_error;
}
encode[i] = cp;
}
p = encode;
cp = njs_utf8_decode(&ctx, &p, p + n);
if (njs_slow_path(cp > NJS_UNICODE_MAX_CODEPOINT)) {
goto uri_error;
}
dst = njs_chb_reserve(&chain, 4);
if (dst != NULL) {
njs_utf8_encode(dst, cp);
njs_chb_written(&chain, njs_utf8_size(cp));
}
length += 1;
}
size = njs_chb_size(&chain);
if (njs_slow_path(size < 0)) {
njs_memory_error(vm);
return NJS_ERROR;
}
if (size == 0) {
/* GC: retain src. */
vm->retval = *value;
return NJS_OK;
}
dst = njs_string_alloc(vm, &vm->retval, size, length);
if (njs_slow_path(dst == NULL)) {
return NJS_ERROR;
}
njs_chb_join_to(&chain, dst);
njs_chb_destroy(&chain);
return NJS_OK;
uri_error:
njs_uri_error(vm, "malformed URI");
return NJS_ERROR;
}
| 0
|
459,003
|
HTTP_Clone(struct http *to, const struct http * const fm)
{
HTTP_Dup(to, fm);
to->vsl = fm->vsl;
to->ws = fm->ws;
}
| 0
|
256,138
|
void Compute(OpKernelContext* ctx) override {
const Tensor& a = ctx->input(0);
const Tensor& b = ctx->input(1);
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(a.shape()),
errors::InvalidArgument("a is not a matrix"));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(b.shape()),
errors::InvalidArgument("b is not a matrix"));
const int m = transpose_a_ ? a.dim_size(1) : a.dim_size(0);
const int k = transpose_a_ ? a.dim_size(0) : a.dim_size(1);
const int n = transpose_b_ ? b.dim_size(0) : b.dim_size(1);
const int k2 = transpose_b_ ? b.dim_size(1) : b.dim_size(0);
OP_REQUIRES(ctx, k == k2,
errors::InvalidArgument(
"Matrix size incompatible: a: ", a.shape().DebugString(),
", b: ", b.shape().DebugString()));
OP_REQUIRES(ctx, m >= 0 && n >= 0 && k >= 0,
errors::InvalidArgument(
"Matrix dimensions cannot be negative: a: ",
a.shape().DebugString(), ", b: ", b.shape().DebugString()));
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({m, n}), &output));
// Return early if at least one of the output dimension size is 0.
if (m == 0 || n == 0) {
return;
}
if (k == 0) {
// If the inner dimension k in the matrix multiplication is zero, we fill
// the output with zeros.
functor::SetZeroFunctor<CPUDevice, float> f;
f(ctx->eigen_device<CPUDevice>(), output->flat<float>());
return;
}
auto out = output->matrix<float>();
std::unique_ptr<Tensor> a_float;
std::unique_ptr<Tensor> b_float;
if (!a_is_sparse_ && !b_is_sparse_) {
auto left = &a;
auto right = &b;
// TODO(agarwal): multi-thread the conversions from bfloat16 to float.
if (std::is_same<TL, bfloat16>::value) {
a_float.reset(new Tensor(DT_FLOAT, a.shape()));
BFloat16ToFloat(a.flat<bfloat16>().data(),
a_float->flat<float>().data(), a.NumElements());
left = a_float.get();
}
if (std::is_same<TR, bfloat16>::value) {
b_float.reset(new Tensor(DT_FLOAT, b.shape()));
BFloat16ToFloat(b.flat<bfloat16>().data(),
b_float->flat<float>().data(), b.NumElements());
right = b_float.get();
}
Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> dim_pair;
dim_pair[0].first = transpose_a_ ? 0 : 1;
dim_pair[0].second = transpose_b_ ? 1 : 0;
out.device(ctx->template eigen_device<CPUDevice>()) =
left->matrix<float>().contract(right->matrix<float>(), dim_pair);
return;
}
auto left = &a;
auto right = &b;
bool transpose_output = false;
bool transpose_a = transpose_a_;
bool transpose_b = transpose_b_;
if (!a_is_sparse_) {
// Swap the order of multiplications using the identity:
// A * B = (B' * A')'.
std::swap(left, right);
std::swap(transpose_a, transpose_b);
transpose_a = !transpose_a;
transpose_b = !transpose_b;
transpose_output = !transpose_output;
}
std::unique_ptr<Tensor> right_tr;
if (transpose_b) {
// TODO(agarwal): avoid transposing the matrix here and directly handle
// transpose in CreateDenseSlices.
OP_REQUIRES(ctx, right->dim_size(0) != 0,
errors::InvalidArgument("b has an entry 0 in it's shape."));
OP_REQUIRES(ctx, right->dim_size(1) != 0,
errors::InvalidArgument("b has an entry 0 in it's shape."));
right_tr.reset(
new Tensor(right->dtype(),
TensorShape({right->dim_size(1), right->dim_size(0)})));
const auto perm = dsizes_10();
if (transpose_output) {
right_tr->matrix<TL>().device(ctx->template eigen_device<CPUDevice>()) =
right->matrix<TL>().shuffle(perm);
} else {
right_tr->matrix<TR>().device(ctx->template eigen_device<CPUDevice>()) =
right->matrix<TR>().shuffle(perm);
}
right = right_tr.get();
}
if (transpose_output) {
DoMatMul<TR, TL>::Compute(&this->cache_tr_, left->matrix<TR>(),
right->matrix<TL>(), transpose_a,
ctx->device()->tensorflow_cpu_worker_threads(),
transpose_output, &out);
} else {
DoMatMul<TL, TR>::Compute(&this->cache_nt_, left->matrix<TL>(),
right->matrix<TR>(), transpose_a,
ctx->device()->tensorflow_cpu_worker_threads(),
transpose_output, &out);
}
}
| 0
|
247,677
|
TEST_P(SslSocketTest, GetUriWithLocalUriSan) {
const std::string client_ctx_yaml = R"EOF(
common_tls_context:
tls_certificates:
certificate_chain:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem"
private_key:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem"
)EOF";
const std::string server_ctx_yaml = R"EOF(
common_tls_context:
tls_certificates:
certificate_chain:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem"
private_key:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_key.pem"
validation_context:
trusted_ca:
filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem"
)EOF";
TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());
testUtil(test_options.setExpectedLocalUri("spiffe://lyft.com/test-team")
.setExpectedSerialNumber(TEST_NO_SAN_CERT_SERIAL));
}
| 0
|
343,255
|
void dofeat(void)
{
# define FEAT "Extensions supported:" CRLF \
" UTF8" CRLF \
" EPRT" CRLF " IDLE" CRLF " MDTM" CRLF " SIZE" CRLF " MFMT" CRLF \
" REST STREAM" CRLF \
" MLST type*;size*;sizd*;modify*;UNIX.mode*;UNIX.uid*;UNIX.gid*;unique*;" CRLF \
" MLSD" CRLF \
" PRET"
# ifdef WITH_TLS
# define FEAT_TLS CRLF " AUTH TLS" CRLF " PBSZ" CRLF " PROT"
# else
# define FEAT_TLS ""
# endif
# ifdef DEBUG
# define FEAT_DEBUG CRLF " XDBG"
# else
# define FEAT_DEBUG ""
# endif
# ifdef WITH_VIRTUAL_CHROOT
# define FEAT_TVFS ""
# else
# define FEAT_TVFS CRLF " TVFS"
# endif
# define FEAT_PASV CRLF " PASV" CRLF " EPSV"
# ifdef MINIMAL
# define FEAT_ESTA ""
# define FEAT_ESTP ""
# else
# define FEAT_ESTA CRLF " ESTA"
# define FEAT_ESTP CRLF " ESTP"
# endif
char feat[] = FEAT FEAT_DEBUG FEAT_TLS FEAT_TVFS FEAT_ESTA FEAT_PASV FEAT_ESTP;
if (disallow_passive != 0) {
feat[sizeof FEAT FEAT_DEBUG FEAT_TLS FEAT_TVFS FEAT_ESTA - 1U] = 0;
}
# ifndef MINIMAL
else if (STORAGE_FAMILY(force_passive_ip) != 0) {
feat[sizeof FEAT FEAT_DEBUG FEAT_TLS FEAT_TVFS FEAT_ESTA FEAT_PASV - 1U] = 0;
}
# endif
addreply_noformat(0, feat);
addreply_noformat(211, "End.");
}
| 0
|
263,503
|
static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
int len, err = 0;
struct bt_voice voice;
u32 phys;
int pkt_status;
BT_DBG("sk %p", sk);
if (level == SOL_SCO)
return sco_sock_getsockopt_old(sock, optname, optval, optlen);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case BT_DEFER_SETUP:
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
err = -EINVAL;
break;
}
if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
(u32 __user *)optval))
err = -EFAULT;
break;
case BT_VOICE:
voice.setting = sco_pi(sk)->setting;
len = min_t(unsigned int, len, sizeof(voice));
if (copy_to_user(optval, (char *)&voice, len))
err = -EFAULT;
break;
case BT_PHY:
if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
phys = hci_conn_get_phy(sco_pi(sk)->conn->hcon);
if (put_user(phys, (u32 __user *) optval))
err = -EFAULT;
break;
case BT_PKT_STATUS:
pkt_status = (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS);
if (put_user(pkt_status, (int __user *)optval))
err = -EFAULT;
break;
case BT_SNDMTU:
case BT_RCVMTU:
if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
if (put_user(sco_pi(sk)->conn->mtu, (u32 __user *)optval))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
| 0
|
204,438
|
WandPrivate void CLINoImageOperator(MagickCLI *cli_wand,
const char *option,const char *arg1n,const char *arg2n)
{
const char /* percent escaped versions of the args */
*arg1,
*arg2;
#define _image_info (cli_wand->wand.image_info)
#define _images (cli_wand->wand.images)
#define _exception (cli_wand->wand.exception)
#define _process_flags (cli_wand->process_flags)
#define _option_type ((CommandOptionFlags) cli_wand->command->flags)
#define IfNormalOp (*option=='-')
#define IfPlusOp (*option!='-')
assert(cli_wand != (MagickCLI *) NULL);
assert(cli_wand->signature == MagickWandSignature);
assert(cli_wand->wand.signature == MagickWandSignature);
if (cli_wand->wand.debug != MagickFalse)
(void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(),
"- NoImage Operator: %s \"%s\" \"%s\"", option,
arg1n != (char *) NULL ? arg1n : "",
arg2n != (char *) NULL ? arg2n : "");
arg1 = arg1n;
arg2 = arg2n;
/* Interpret Percent Escapes in Arguments - using first image */
if ( (((_process_flags & ProcessInterpretProperities) != 0 )
|| ((_option_type & AlwaysInterpretArgsFlag) != 0)
) && ((_option_type & NeverInterpretArgsFlag) == 0) ) {
/* Interpret Percent escapes in argument 1 */
if (arg1n != (char *) NULL) {
arg1=InterpretImageProperties(_image_info,_images,arg1n,_exception);
if (arg1 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg1=arg1n; /* use the given argument as is */
}
}
if (arg2n != (char *) NULL) {
arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception);
if (arg2 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg2=arg2n; /* use the given argument as is */
}
}
}
#undef _process_flags
#undef _option_type
do { /* break to exit code */
/*
No-op options (ignore these)
*/
if (LocaleCompare("noop",option+1) == 0) /* zero argument */
break;
if (LocaleCompare("sans",option+1) == 0) /* one argument */
break;
if (LocaleCompare("sans0",option+1) == 0) /* zero argument */
break;
if (LocaleCompare("sans1",option+1) == 0) /* one argument */
break;
if (LocaleCompare("sans2",option+1) == 0) /* two arguments */
break;
/*
Image Reading
*/
if ( ( LocaleCompare("read",option+1) == 0 ) ||
( LocaleCompare("--",option) == 0 ) ) {
/* Do Glob filename Expansion for 'arg1' then read all images.
*
* Expansion handles '@', '~', '*', and '?' meta-characters while ignoring
* (but attaching to the filenames in the generated argument list) any
* [...] read modifiers that may be present.
*
* For example: It will expand '*.gif[20x20]' into a list such as
* 'abc.gif[20x20]', 'foobar.gif[20x20]', 'xyzzy.gif[20x20]'
*
* NOTE: In IMv6 this was done globally across all images. This
* meant you could include IM options in '@filename' lists, but you
* could not include comments. Doing it only for image read makes
* it far more secure.
*
* Note: arguments do not have percent escapes expanded for security
* reasons.
*/
int argc;
char **argv;
ssize_t i;
argc = 1;
argv = (char **) &arg1;
/* Expand 'glob' expressions in the given filename.
Expansion handles any 'coder:' prefix, or read modifiers attached
to the filename, including them in the resulting expanded list.
*/
if (ExpandFilenames(&argc,&argv) == MagickFalse)
CLIWandExceptArgBreak(ResourceLimitError,"MemoryAllocationFailed",
option,GetExceptionMessage(errno));
/* loop over expanded filename list, and read then all in */
for (i=0; i < (ssize_t) argc; i++) {
Image *
new_images;
if (_image_info->ping != MagickFalse)
new_images=PingImages(_image_info,argv[i],_exception);
else
new_images=ReadImages(_image_info,argv[i],_exception);
AppendImageToList(&_images, new_images);
argv[i]=DestroyString(argv[i]);
}
argv=(char **) RelinquishMagickMemory(argv);
break;
}
/*
Image Writing
Note: Writing a empty image list is valid in specific cases
*/
if (LocaleCompare("write",option+1) == 0) {
/* Note: arguments do not have percent escapes expanded */
char
key[MagickPathExtent];
Image
*write_images;
ImageInfo
*write_info;
/* Need images, unless a "null:" output coder is used */
if ( _images == (Image *) NULL ) {
if ( LocaleCompare(arg1,"null:") == 0 )
break;
CLIWandExceptArgBreak(OptionError,"NoImagesForWrite",option,arg1);
}
(void) FormatLocaleString(key,MagickPathExtent,"cache:%s",arg1);
(void) DeleteImageRegistry(key);
write_images=CloneImageList(_images,_exception);
write_info=CloneImageInfo(_image_info);
(void) WriteImages(write_info,write_images,arg1,_exception);
write_info=DestroyImageInfo(write_info);
write_images=DestroyImageList(write_images);
break;
}
/*
Parenthesis and Brace operations
*/
if (LocaleCompare("(",option) == 0) {
/* stack 'push' images */
Stack
*node;
size_t
size;
size=0;
node=cli_wand->image_list_stack;
for ( ; node != (Stack *) NULL; node=node->next)
size++;
if ( size >= MAX_STACK_DEPTH )
CLIWandExceptionBreak(OptionError,"ParenthesisNestedTooDeeply",option);
node=(Stack *) AcquireMagickMemory(sizeof(*node));
if (node == (Stack *) NULL)
CLIWandExceptionBreak(ResourceLimitFatalError,
"MemoryAllocationFailed",option);
node->data = (void *)cli_wand->wand.images;
node->next = cli_wand->image_list_stack;
cli_wand->image_list_stack = node;
cli_wand->wand.images = NewImageList();
/* handle respect-parenthesis */
if (IsStringTrue(GetImageOption(cli_wand->wand.image_info,
"respect-parenthesis")) != MagickFalse)
option="{"; /* fall-thru so as to push image settings too */
else
break;
/* fall thru to operation */
}
if (LocaleCompare("{",option) == 0) {
/* stack 'push' of image_info settings */
Stack
*node;
size_t
size;
size=0;
node=cli_wand->image_info_stack;
for ( ; node != (Stack *) NULL; node=node->next)
size++;
if ( size >= MAX_STACK_DEPTH )
CLIWandExceptionBreak(OptionError,"CurlyBracesNestedTooDeeply",option);
node=(Stack *) AcquireMagickMemory(sizeof(*node));
if (node == (Stack *) NULL)
CLIWandExceptionBreak(ResourceLimitFatalError,
"MemoryAllocationFailed",option);
node->data = (void *)cli_wand->wand.image_info;
node->next = cli_wand->image_info_stack;
cli_wand->image_info_stack = node;
cli_wand->wand.image_info = CloneImageInfo(cli_wand->wand.image_info);
if (cli_wand->wand.image_info == (ImageInfo *) NULL) {
CLIWandException(ResourceLimitFatalError,"MemoryAllocationFailed",
option);
cli_wand->wand.image_info = (ImageInfo *)node->data;
node = (Stack *)RelinquishMagickMemory(node);
break;
}
break;
}
if (LocaleCompare(")",option) == 0) {
/* pop images from stack */
Stack
*node;
node = (Stack *)cli_wand->image_list_stack;
if ( node == (Stack *) NULL)
CLIWandExceptionBreak(OptionError,"UnbalancedParenthesis",option);
cli_wand->image_list_stack = node->next;
AppendImageToList((Image **)&node->data,cli_wand->wand.images);
cli_wand->wand.images= (Image *)node->data;
node = (Stack *)RelinquishMagickMemory(node);
/* handle respect-parenthesis - of the previous 'pushed' settings */
node = cli_wand->image_info_stack;
if ( node != (Stack *) NULL)
{
if (IsStringTrue(GetImageOption(
cli_wand->wand.image_info,"respect-parenthesis")) != MagickFalse)
option="}"; /* fall-thru so as to pop image settings too */
else
break;
}
else
break;
/* fall thru to next if */
}
if (LocaleCompare("}",option) == 0) {
/* pop image_info settings from stack */
Stack
*node;
node = (Stack *)cli_wand->image_info_stack;
if ( node == (Stack *) NULL)
CLIWandExceptionBreak(OptionError,"UnbalancedCurlyBraces",option);
cli_wand->image_info_stack = node->next;
(void) DestroyImageInfo(cli_wand->wand.image_info);
cli_wand->wand.image_info = (ImageInfo *)node->data;
node = (Stack *)RelinquishMagickMemory(node);
GetDrawInfo(cli_wand->wand.image_info, cli_wand->draw_info);
cli_wand->quantize_info=DestroyQuantizeInfo(cli_wand->quantize_info);
cli_wand->quantize_info=AcquireQuantizeInfo(cli_wand->wand.image_info);
break;
}
if (LocaleCompare("print",option+1) == 0)
{
(void) FormatLocaleFile(stdout,"%s",arg1);
break;
}
if (LocaleCompare("set",option+1) == 0)
{
/* Settings are applied to each image in memory in turn (if any).
While a option: only need to be applied once globally.
NOTE: rguments have not been automatically percent expaneded
*/
/* escape the 'key' once only, using first image. */
arg1=InterpretImageProperties(_image_info,_images,arg1n,_exception);
if (arg1 == (char *) NULL)
CLIWandExceptionBreak(OptionWarning,"InterpretPropertyFailure",
option);
if (LocaleNCompare(arg1,"registry:",9) == 0)
{
if (IfPlusOp)
{
(void) DeleteImageRegistry(arg1+9);
arg1=DestroyString((char *)arg1);
break;
}
arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception);
if (arg2 == (char *) NULL) {
arg1=DestroyString((char *)arg1);
CLIWandExceptionBreak(OptionWarning,"InterpretPropertyFailure",
option);
}
(void) SetImageRegistry(StringRegistryType,arg1+9,arg2,_exception);
arg1=DestroyString((char *)arg1);
arg2=DestroyString((char *)arg2);
break;
}
if (LocaleNCompare(arg1,"option:",7) == 0)
{
/* delete equivelent artifact from all images (if any) */
if (_images != (Image *) NULL)
{
MagickResetIterator(&cli_wand->wand);
while (MagickNextImage(&cli_wand->wand) != MagickFalse)
(void) DeleteImageArtifact(_images,arg1+7);
MagickResetIterator(&cli_wand->wand);
}
/* now set/delete the global option as needed */
/* FUTURE: make escapes in a global 'option:' delayed */
arg2=(char *) NULL;
if (IfNormalOp)
{
arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception);
if (arg2 == (char *) NULL)
CLIWandExceptionBreak(OptionWarning,
"InterpretPropertyFailure",option);
}
(void) SetImageOption(_image_info,arg1+7,arg2);
arg1=DestroyString((char *)arg1);
arg2=DestroyString((char *)arg2);
break;
}
/* Set Artifacts/Properties/Attributes all images (required) */
if ( _images == (Image *) NULL )
CLIWandExceptArgBreak(OptionWarning,"NoImageForProperty",option,arg1);
MagickResetIterator(&cli_wand->wand);
while (MagickNextImage(&cli_wand->wand) != MagickFalse)
{
arg2=(char *) NULL;
if (IfNormalOp)
{
arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception);
if (arg2 == (char *) NULL)
CLIWandExceptionBreak(OptionWarning,
"InterpretPropertyFailure",option);
}
if (LocaleNCompare(arg1,"artifact:",9) == 0)
(void) SetImageArtifact(_images,arg1+9,arg2);
else if (LocaleNCompare(arg1,"property:",9) == 0)
(void) SetImageProperty(_images,arg1+9,arg2,_exception);
else
(void) SetImageProperty(_images,arg1,arg2,_exception);
arg2=DestroyString((char *)arg2);
}
MagickResetIterator(&cli_wand->wand);
arg1=DestroyString((char *)arg1);
break;
}
if (LocaleCompare("clone",option+1) == 0) {
Image
*new_images;
if (*option == '+')
arg1=AcquireString("-1");
if (IsSceneGeometry(arg1,MagickFalse) == MagickFalse)
CLIWandExceptionBreak(OptionError,"InvalidArgument",option);
if ( cli_wand->image_list_stack == (Stack *) NULL)
CLIWandExceptionBreak(OptionError,"UnableToCloneImage",option);
new_images = (Image *)cli_wand->image_list_stack->data;
if (new_images == (Image *) NULL)
CLIWandExceptionBreak(OptionError,"UnableToCloneImage",option);
new_images=CloneImages(new_images,arg1,_exception);
if (new_images == (Image *) NULL)
CLIWandExceptionBreak(OptionError,"NoSuchImage",option);
AppendImageToList(&_images,new_images);
break;
}
/*
Informational Operations.
Note that these do not require either a cli-wand or images!
Though currently a cli-wand much be provided regardless.
*/
if (LocaleCompare("version",option+1) == 0)
{
ListMagickVersion(stdout);
break;
}
if (LocaleCompare("list",option+1) == 0) {
/*
FUTURE: This 'switch' should really be part of MagickCore
*/
ssize_t
list;
list=ParseCommandOption(MagickListOptions,MagickFalse,arg1);
if ( list < 0 ) {
CLIWandExceptionArg(OptionError,"UnrecognizedListType",option,arg1);
break;
}
switch (list)
{
case MagickCoderOptions:
{
(void) ListCoderInfo((FILE *) NULL,_exception);
break;
}
case MagickColorOptions:
{
(void) ListColorInfo((FILE *) NULL,_exception);
break;
}
case MagickConfigureOptions:
{
(void) ListConfigureInfo((FILE *) NULL,_exception);
break;
}
case MagickDelegateOptions:
{
(void) ListDelegateInfo((FILE *) NULL,_exception);
break;
}
case MagickFontOptions:
{
(void) ListTypeInfo((FILE *) NULL,_exception);
break;
}
case MagickFormatOptions:
(void) ListMagickInfo((FILE *) NULL,_exception);
break;
case MagickLocaleOptions:
(void) ListLocaleInfo((FILE *) NULL,_exception);
break;
case MagickLogOptions:
(void) ListLogInfo((FILE *) NULL,_exception);
break;
case MagickMagicOptions:
(void) ListMagicInfo((FILE *) NULL,_exception);
break;
case MagickMimeOptions:
(void) ListMimeInfo((FILE *) NULL,_exception);
break;
case MagickModuleOptions:
(void) ListModuleInfo((FILE *) NULL,_exception);
break;
case MagickPolicyOptions:
(void) ListPolicyInfo((FILE *) NULL,_exception);
break;
case MagickResourceOptions:
(void) ListMagickResourceInfo((FILE *) NULL,_exception);
break;
case MagickThresholdOptions:
(void) ListThresholdMaps((FILE *) NULL,_exception);
break;
default:
(void) ListCommandOptions((FILE *) NULL,(CommandOption) list,
_exception);
break;
}
break;
}
CLIWandException(OptionError,"UnrecognizedOption",option);
DisableMSCWarning(4127)
} while (0); /* break to exit code. */
RestoreMSCWarning
/* clean up percent escape interpreted strings */
if (arg1 != arg1n )
arg1=DestroyString((char *)arg1);
if (arg2 != arg2n )
arg2=DestroyString((char *)arg2);
#undef _image_info
#undef _images
#undef _exception
#undef IfNormalOp
#undef IfPlusOp
}
| 1
|
308,202
|
fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
struct fastrpc_dma_buf_attachment *a = attachment->priv;
struct sg_table *table;
table = &a->sgt;
if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir))
return ERR_PTR(-ENOMEM);
return table;
}
| 0
|
369,128
|
__must_hold(&req->ctx->timeout_lock)
{
struct io_timeout_data *io = req->async_data;
if (hrtimer_try_to_cancel(&io->timer) != -1) {
if (status)
req_set_fail(req);
atomic_set(&req->ctx->cq_timeouts,
atomic_read(&req->ctx->cq_timeouts) + 1);
list_del_init(&req->timeout.list);
io_fill_cqe_req(req, status, 0);
io_put_req_deferred(req);
}
}
| 0
|
238,569
|
static int check_stack_access_for_ptr_arithmetic(
struct bpf_verifier_env *env,
int regno,
const struct bpf_reg_state *reg,
int off)
{
if (!tnum_is_const(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
regno, tn_buf, off);
return -EACCES;
}
if (off >= 0 || off < -MAX_BPF_STACK) {
verbose(env, "R%d stack pointer arithmetic goes out of range, "
"prohibited for !root; off=%d\n", regno, off);
return -EACCES;
}
return 0;
}
| 0
|
225,894
|
GF_Box *moov_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_MovieBox, GF_ISOM_BOX_TYPE_MOOV);
tmp->trackList = gf_list_new();
if (!tmp->trackList) {
gf_free(tmp);
return NULL;
}
return (GF_Box *)tmp;
}
| 0
|
226,257
|
GF_Err sdp_box_size(GF_Box *s)
{
GF_SDPBox *ptr = (GF_SDPBox *)s;
//don't count the NULL char!!!
if (ptr->sdpText)
ptr->size += strlen(ptr->sdpText);
return GF_OK;
}
| 0
|
473,996
|
left_adjust_char_head(const UChar* start, const UChar* s, const UChar* end, OnigEncoding enc ARG_UNUSED)
{
const UChar *p;
if (s <= start) return (UChar* )s;
p = s;
while (!utf8_islead(*p) && p > start) p--;
return (UChar* )p;
}
| 0
|
274,738
|
gchar *utf8_strncpy(gchar *dst, const gchar *src, gsize byte_len)
{
/* -1 for '\0' in buffer */
glong char_len = g_utf8_strlen(src, byte_len - 1);
return g_utf8_strncpy(dst, src, char_len);
}
| 0
|
318,766
|
drill_parse_header_is_metric(gerb_file_t *fd, drill_state_t *state,
gerbv_image_t *image, ssize_t file_line)
{
gerbv_drill_stats_t *stats = image->drill_stats;
char c, op[3];
dprintf(" %s(): entering\n", __FUNCTION__);
/* METRIC is not an actual M code but a command that is only
* acceptable within the header.
*
* The syntax is
* METRIC[,{TZ|LZ}][,{000.000|000.00|0000.00}]
*/
if (DRILL_HEADER != state->curr_section)
return 0;
switch (file_check_str(fd, "METRIC")) {
case -1:
gerbv_stats_printf(stats->error_list, GERBV_MESSAGE_ERROR, -1,
_("Unexpected EOF found while parsing \"%s\" string "
"in file \"%s\""), "METRIC", fd->filename);
return 0;
case 0:
return 0;
}
header_again:
if (',' != gerb_fgetc(fd)) {
gerb_ungetc(fd);
eat_line(fd);
} else {
/* Is it TZ, LZ, or zerofmt? */
switch (c = gerb_fgetc(fd)) {
case 'T':
case 'L':
if ('Z' != gerb_fgetc(fd))
goto header_junk;
if (c == 'L') {
dprintf (" %s(): Detected a file that probably has "
"trailing zero suppression\n", __FUNCTION__);
if (state->autod)
image->format->omit_zeros = GERBV_OMIT_ZEROS_TRAILING;
} else {
dprintf (" %s(): Detected a file that probably has "
"leading zero suppression\n", __FUNCTION__);
if (state->autod)
image->format->omit_zeros = GERBV_OMIT_ZEROS_LEADING;
}
if (state->autod) {
/* Default metric number format is 6-digit, 1 um
* resolution. The header number format (for T#C#
* definitions) is fixed to that, while the number
* format within the file can differ. */
state->header_number_format =
state->number_format = FMT_000_000;
state->decimals = 3;
}
if (',' == gerb_fgetc(fd))
/* Anticipate number format will follow */
goto header_again;
gerb_ungetc(fd);
break;
case '0':
if ('0' != gerb_fgetc(fd)
|| '0' != gerb_fgetc(fd))
goto header_junk;
/* We just parsed three 0s, the remainder options
so far are: .000 | .00 | 0.00 */
op[0] = gerb_fgetc(fd);
op[1] = gerb_fgetc(fd);
op[2] = '\0';
if (EOF == op[0]
|| EOF == op[1])
goto header_junk;
if (0 == strcmp(op, "0.")) {
/* expecting FMT_0000_00,
two trailing 0s must follow */
if ('0' != gerb_fgetc(fd)
|| '0' != gerb_fgetc(fd))
goto header_junk;
eat_line(fd);
if (state->autod) {
state->number_format = FMT_0000_00;
state->decimals = 2;
}
break;
}
if (0 != strcmp(op, ".0"))
goto header_junk;
/* Must be either FMT_000_000 or FMT_000_00, depending
* on whether one or two 0s are following */
if ('0' != gerb_fgetc(fd))
goto header_junk;
if ('0' == gerb_fgetc(fd)
&& state->autod) {
state->number_format = FMT_000_000;
state->decimals = 3;
} else {
gerb_ungetc(fd);
if (state->autod) {
state->number_format = FMT_000_00;
state->decimals = 2;
}
}
eat_line(fd);
break;
default:
header_junk:
gerb_ungetc(fd);
eat_line(fd);
gerbv_stats_printf(stats->error_list,
GERBV_MESSAGE_WARNING, -1,
_("Found junk after METRIC command "
"at line %ld in file \"%s\""),
file_line, fd->filename);
break;
}
}
state->unit = GERBV_UNIT_MM;
return 1;
} /* drill_parse_header_is_metric() */
| 0
|
459,215
|
static void tcf_proto_mark_delete(struct tcf_proto *tp)
{
spin_lock(&tp->lock);
tp->deleting = true;
spin_unlock(&tp->lock);
}
| 0
|
313,547
|
int rose_rt_ioctl(unsigned int cmd, void __user *arg)
{
struct rose_route_struct rose_route;
struct net_device *dev;
int err;
switch (cmd) {
case SIOCADDRT:
if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
return -EFAULT;
if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
return -EINVAL;
if (rose_dev_exists(&rose_route.address)) /* Can't add routes to ourself */
return -EINVAL;
if (rose_route.mask > 10) /* Mask can't be more than 10 digits */
return -EINVAL;
if (rose_route.ndigis > AX25_MAX_DIGIS)
return -EINVAL;
err = rose_add_node(&rose_route, dev);
return err;
case SIOCDELRT:
if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
return -EFAULT;
if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
return -EINVAL;
err = rose_del_node(&rose_route, dev);
return err;
case SIOCRSCLRRT:
return rose_clear_routes();
default:
return -EINVAL;
}
return 0;
}
| 0
|
489,137
|
static sctp_disposition_t sctp_sf_do_unexpected_init(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg, sctp_cmd_seq_t *commands)
{
sctp_disposition_t retval;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *repl;
struct sctp_association *new_asoc;
struct sctp_chunk *err_chunk;
struct sctp_packet *packet;
sctp_unrecognized_param_t *unk_param;
int len;
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*
* IG Section 2.11.2
* Furthermore, we require that the receiver of an INIT chunk MUST
* enforce these rules by silently discarding an arriving packet
* with an INIT chunk that is bundled with other chunks.
*/
if (!chunk->singleton)
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
* Tag.
*/
if (chunk->sctp_hdr->vtag != 0)
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
/* Make sure that the INIT chunk has a valid length.
* In this case, we generate a protocol violation since we have
* an association established.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
/* Grab the INIT header. */
chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
/* Tag the variable length parameters. */
chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t));
/* Verify the INIT chunk before processing it. */
err_chunk = NULL;
if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes if there is any.
*/
if (err_chunk) {
packet = sctp_abort_pkt_new(ep, asoc, arg,
(__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t),
ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t));
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
retval = SCTP_DISPOSITION_CONSUME;
} else {
retval = SCTP_DISPOSITION_NOMEM;
}
goto cleanup;
} else {
return sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
commands);
}
}
/*
* Other parameters for the endpoint SHOULD be copied from the
* existing parameters of the association (e.g. number of
* outbound streams) into the INIT ACK and cookie.
* FIXME: We are copying parameters from the endpoint not the
* association.
*/
new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC);
if (!new_asoc)
goto nomem;
/* In the outbound INIT ACK the endpoint MUST copy its current
* Verification Tag and Peers Verification tag into a reserved
* place (local tie-tag and per tie-tag) within the state cookie.
*/
if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type,
sctp_source(chunk),
(sctp_init_chunk_t *)chunk->chunk_hdr,
GFP_ATOMIC))
goto nomem;
/* Make sure no new addresses are being added during the
* restart. Do not do this check for COOKIE-WAIT state,
* since there are no peer addresses to check against.
* Upon return an ABORT will have been sent if needed.
*/
if (!sctp_state(asoc, COOKIE_WAIT)) {
if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk,
commands)) {
retval = SCTP_DISPOSITION_CONSUME;
goto nomem_retval;
}
}
sctp_tietags_populate(new_asoc, asoc);
/* B) "Z" shall respond immediately with an INIT ACK chunk. */
/* If there are errors need to be reported for unknown parameters,
* make sure to reserve enough room in the INIT ACK for them.
*/
len = 0;
if (err_chunk) {
len = ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t);
}
if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
goto nomem;
repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
if (!repl)
goto nomem;
/* If there are errors need to be reported for unknown parameters,
* include them in the outgoing INIT ACK as "Unrecognized parameter"
* parameter.
*/
if (err_chunk) {
/* Get the "Unrecognized parameter" parameter(s) out of the
* ERROR chunk generated by sctp_verify_init(). Since the
* error cause code for "unknown parameter" and the
* "Unrecognized parameter" type is the same, we can
* construct the parameters in INIT ACK by copying the
* ERROR causes over.
*/
unk_param = (sctp_unrecognized_param_t *)
((__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t));
/* Replace the cause code with the "Unrecognized parameter"
* parameter type.
*/
sctp_addto_chunk(repl, len, unk_param);
}
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
/*
* Note: After sending out INIT ACK with the State Cookie parameter,
* "Z" MUST NOT allocate any resources for this new association.
* Otherwise, "Z" will be vulnerable to resource attacks.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
retval = SCTP_DISPOSITION_CONSUME;
return retval;
nomem:
retval = SCTP_DISPOSITION_NOMEM;
nomem_retval:
if (new_asoc)
sctp_association_free(new_asoc);
cleanup:
if (err_chunk)
sctp_chunk_free(err_chunk);
return retval;
}
| 0
|
384,779
|
getwhitecols(char_u *p)
{
return skipwhite(p) - p;
}
| 0
|
225,620
|
GF_Box *stsh_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_ShadowSyncBox, GF_ISOM_BOX_TYPE_STSH);
tmp->entries = gf_list_new();
if (!tmp->entries) {
gf_free(tmp);
return NULL;
}
return (GF_Box *)tmp;
}
| 0
|
379,322
|
parse_cmd_address(exarg_T *eap, char **errormsg, int silent)
{
int address_count = 1;
linenr_T lnum;
int need_check_cursor = FALSE;
int ret = FAIL;
// Repeat for all ',' or ';' separated addresses.
for (;;)
{
eap->line1 = eap->line2;
eap->line2 = default_address(eap);
eap->cmd = skipwhite(eap->cmd);
lnum = get_address(eap, &eap->cmd, eap->addr_type, eap->skip, silent,
eap->addr_count == 0, address_count++);
if (eap->cmd == NULL) // error detected
goto theend;
if (lnum == MAXLNUM)
{
if (*eap->cmd == '%') // '%' - all lines
{
++eap->cmd;
switch (eap->addr_type)
{
case ADDR_LINES:
case ADDR_OTHER:
eap->line1 = 1;
eap->line2 = curbuf->b_ml.ml_line_count;
break;
case ADDR_LOADED_BUFFERS:
{
buf_T *buf = firstbuf;
while (buf->b_next != NULL
&& buf->b_ml.ml_mfp == NULL)
buf = buf->b_next;
eap->line1 = buf->b_fnum;
buf = lastbuf;
while (buf->b_prev != NULL
&& buf->b_ml.ml_mfp == NULL)
buf = buf->b_prev;
eap->line2 = buf->b_fnum;
break;
}
case ADDR_BUFFERS:
eap->line1 = firstbuf->b_fnum;
eap->line2 = lastbuf->b_fnum;
break;
case ADDR_WINDOWS:
case ADDR_TABS:
if (IS_USER_CMDIDX(eap->cmdidx))
{
eap->line1 = 1;
eap->line2 = eap->addr_type == ADDR_WINDOWS
? LAST_WIN_NR : LAST_TAB_NR;
}
else
{
// there is no Vim command which uses '%' and
// ADDR_WINDOWS or ADDR_TABS
*errormsg = _(e_invalid_range);
goto theend;
}
break;
case ADDR_TABS_RELATIVE:
case ADDR_UNSIGNED:
case ADDR_QUICKFIX:
*errormsg = _(e_invalid_range);
goto theend;
case ADDR_ARGUMENTS:
if (ARGCOUNT == 0)
eap->line1 = eap->line2 = 0;
else
{
eap->line1 = 1;
eap->line2 = ARGCOUNT;
}
break;
case ADDR_QUICKFIX_VALID:
#ifdef FEAT_QUICKFIX
eap->line1 = 1;
eap->line2 = qf_get_valid_size(eap);
if (eap->line2 == 0)
eap->line2 = 1;
#endif
break;
case ADDR_NONE:
// Will give an error later if a range is found.
break;
}
++eap->addr_count;
}
else if (*eap->cmd == '*' && vim_strchr(p_cpo, CPO_STAR) == NULL)
{
pos_T *fp;
// '*' - visual area
if (eap->addr_type != ADDR_LINES)
{
*errormsg = _(e_invalid_range);
goto theend;
}
++eap->cmd;
if (!eap->skip)
{
fp = getmark('<', FALSE);
if (check_mark(fp) == FAIL)
goto theend;
eap->line1 = fp->lnum;
fp = getmark('>', FALSE);
if (check_mark(fp) == FAIL)
goto theend;
eap->line2 = fp->lnum;
++eap->addr_count;
}
}
}
else
eap->line2 = lnum;
eap->addr_count++;
if (*eap->cmd == ';')
{
if (!eap->skip)
{
curwin->w_cursor.lnum = eap->line2;
// Don't leave the cursor on an illegal line or column, but do
// accept zero as address, so 0;/PATTERN/ works correctly
// (where zero usually means to use the first line).
// Check the cursor position before returning.
if (eap->line2 > 0)
check_cursor();
else
check_cursor_col();
need_check_cursor = TRUE;
}
}
else if (*eap->cmd != ',')
break;
++eap->cmd;
}
// One address given: set start and end lines.
if (eap->addr_count == 1)
{
eap->line1 = eap->line2;
// ... but only implicit: really no address given
if (lnum == MAXLNUM)
eap->addr_count = 0;
}
ret = OK;
theend:
if (need_check_cursor)
check_cursor();
return ret;
}
| 0
|
336,552
|
RedCharDeviceVDIPort::RedCharDeviceVDIPort(RedsState *reds):
RedCharDevice(reds, nullptr, REDS_TOKENS_TO_SEND, REDS_NUM_INTERNAL_AGENT_MESSAGES)
{
priv->read_state = VDI_PORT_READ_STATE_READ_HEADER;
priv->receive_pos = (uint8_t *)&priv->vdi_chunk_header;
priv->receive_len = sizeof(priv->vdi_chunk_header);
RedCharDeviceVDIPort *dev = this;
agent_msg_filter_init(&dev->priv->write_filter, reds->config->agent_copypaste,
reds->config->agent_file_xfer,
reds_use_client_monitors_config(reds),
TRUE);
agent_msg_filter_init(&dev->priv->read_filter, reds->config->agent_copypaste,
reds->config->agent_file_xfer,
reds_use_client_monitors_config(reds),
TRUE);
}
| 0
|
498,081
|
static void print_header(void)
{
char *logo = NULL, *logo_link = NULL;
html("<table id='header'>\n");
html("<tr>\n");
if (ctx.repo && ctx.repo->logo && *ctx.repo->logo)
logo = ctx.repo->logo;
else
logo = ctx.cfg.logo;
if (ctx.repo && ctx.repo->logo_link && *ctx.repo->logo_link)
logo_link = ctx.repo->logo_link;
else
logo_link = ctx.cfg.logo_link;
if (logo && *logo) {
html("<td class='logo' rowspan='2'><a href='");
if (logo_link && *logo_link)
html_attr(logo_link);
else
html_attr(cgit_rooturl());
html("'><img src='");
html_attr(logo);
html("' alt='cgit logo'/></a></td>\n");
}
html("<td class='main'>");
if (ctx.repo) {
cgit_index_link("index", NULL, NULL, NULL, NULL, 0, 1);
html(" : ");
cgit_summary_link(ctx.repo->name, ctx.repo->name, NULL, NULL);
if (ctx.env.authenticated) {
html("</td><td class='form'>");
html("<form method='get' action=''>\n");
cgit_add_hidden_formfields(0, 1, ctx.qry.page);
html("<select name='h' onchange='this.form.submit();'>\n");
for_each_branch_ref(print_branch_option, ctx.qry.head);
if (ctx.repo->enable_remote_branches)
for_each_remote_ref(print_branch_option, ctx.qry.head);
html("</select> ");
html("<input type='submit' name='' value='switch'/>");
html("</form>");
}
} else
html_txt(ctx.cfg.root_title);
html("</td></tr>\n");
html("<tr><td class='sub'>");
if (ctx.repo) {
html_txt(ctx.repo->desc);
html("</td><td class='sub right'>");
html_txt(ctx.repo->owner);
} else {
if (ctx.cfg.root_desc)
html_txt(ctx.cfg.root_desc);
else if (ctx.cfg.index_info)
html_include(ctx.cfg.index_info);
}
html("</td></tr></table>\n");
}
| 0
|
437,407
|
translate_ring_addresses(struct virtio_net *dev, int vq_index)
{
struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
struct vhost_vring_addr *addr = &vq->ring_addrs;
uint64_t len, expected_len;
if (vq_is_packed(dev)) {
len = sizeof(struct vring_packed_desc) * vq->size;
vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len);
vq->log_guest_addr = 0;
if (vq->desc_packed == NULL ||
len != sizeof(struct vring_packed_desc) *
vq->size) {
RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to map desc_packed ring.\n",
dev->vid);
return dev;
}
dev = numa_realloc(dev, vq_index);
vq = dev->virtqueue[vq_index];
addr = &vq->ring_addrs;
len = sizeof(struct vring_packed_desc_event);
vq->driver_event = (struct vring_packed_desc_event *)
(uintptr_t)ring_addr_to_vva(dev,
vq, addr->avail_user_addr, &len);
if (vq->driver_event == NULL ||
len != sizeof(struct vring_packed_desc_event)) {
RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to find driver area address.\n",
dev->vid);
return dev;
}
len = sizeof(struct vring_packed_desc_event);
vq->device_event = (struct vring_packed_desc_event *)
(uintptr_t)ring_addr_to_vva(dev,
vq, addr->used_user_addr, &len);
if (vq->device_event == NULL ||
len != sizeof(struct vring_packed_desc_event)) {
RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to find device area address.\n",
dev->vid);
return dev;
}
vq->access_ok = 1;
return dev;
}
/* The addresses are converted from QEMU virtual to Vhost virtual. */
if (vq->desc && vq->avail && vq->used)
return dev;
len = sizeof(struct vring_desc) * vq->size;
vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
vq, addr->desc_user_addr, &len);
if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to map desc ring.\n",
dev->vid);
return dev;
}
dev = numa_realloc(dev, vq_index);
vq = dev->virtqueue[vq_index];
addr = &vq->ring_addrs;
len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
len += sizeof(uint16_t);
expected_len = len;
vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
vq, addr->avail_user_addr, &len);
if (vq->avail == 0 || len != expected_len) {
RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to map avail ring.\n",
dev->vid);
return dev;
}
len = sizeof(struct vring_used) +
sizeof(struct vring_used_elem) * vq->size;
if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
len += sizeof(uint16_t);
expected_len = len;
vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
vq, addr->used_user_addr, &len);
if (vq->used == 0 || len != expected_len) {
RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to map used ring.\n",
dev->vid);
return dev;
}
if (vq->last_used_idx != vq->used->idx) {
RTE_LOG(WARNING, VHOST_CONFIG,
"last_used_idx (%u) and vq->used->idx (%u) mismatches; "
"some packets maybe resent for Tx and dropped for Rx\n",
vq->last_used_idx, vq->used->idx);
vq->last_used_idx = vq->used->idx;
vq->last_avail_idx = vq->used->idx;
}
vq->log_guest_addr =
translate_log_addr(dev, vq, addr->log_guest_addr);
if (vq->log_guest_addr == 0) {
RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to map log_guest_addr .\n",
dev->vid);
return dev;
}
vq->access_ok = 1;
VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
dev->vid, vq->desc);
VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
dev->vid, vq->avail);
VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
dev->vid, vq->used);
VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
dev->vid, vq->log_guest_addr);
return dev;
}
| 0
|
483,489
|
static int __init efisubsys_init(void)
{
int error;
if (!efi_enabled(EFI_BOOT))
return 0;
/*
* Since we process only one efi_runtime_service() at a time, an
* ordered workqueue (which creates only one execution context)
* should suffice all our needs.
*/
efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
if (!efi_rts_wq) {
pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return 0;
}
/* We register the efi directory at /sys/firmware/efi */
efi_kobj = kobject_create_and_add("efi", firmware_kobj);
if (!efi_kobj) {
pr_err("efi: Firmware registration failed.\n");
return -ENOMEM;
}
error = generic_ops_register();
if (error)
goto err_put;
if (efi_enabled(EFI_RUNTIME_SERVICES))
efivar_ssdt_load();
error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
if (error) {
pr_err("efi: Sysfs attribute export failed with error %d.\n",
error);
goto err_unregister;
}
error = efi_runtime_map_init(efi_kobj);
if (error)
goto err_remove_group;
/* and the standard mountpoint for efivarfs */
error = sysfs_create_mount_point(efi_kobj, "efivars");
if (error) {
pr_err("efivars: Subsystem registration failed.\n");
goto err_remove_group;
}
return 0;
err_remove_group:
sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
err_unregister:
generic_ops_unregister();
err_put:
kobject_put(efi_kobj);
return error;
}
| 0
|
96,946
|
bool decode(ArgumentDecoder* decoder, RetainPtr<CFDateRef>& result)
{
double absoluteTime;
if (!decoder->decodeDouble(absoluteTime))
return false;
result.adoptCF(CFDateCreate(0, absoluteTime));
return true;
}
| 0
|
513,314
|
pick_table_access_method(JOIN_TAB *tab)
{
switch (tab->type)
{
case JT_REF:
tab->read_first_record= join_read_always_key;
tab->read_record.read_record= join_read_next_same;
break;
case JT_REF_OR_NULL:
tab->read_first_record= join_read_always_key_or_null;
tab->read_record.read_record= join_read_next_same_or_null;
break;
case JT_CONST:
tab->read_first_record= join_read_const;
tab->read_record.read_record= join_no_more_records;
break;
case JT_EQ_REF:
tab->read_first_record= join_read_key;
tab->read_record.read_record= join_no_more_records;
break;
case JT_FT:
tab->read_first_record= join_ft_read_first;
tab->read_record.read_record= join_ft_read_next;
break;
case JT_SYSTEM:
tab->read_first_record= join_read_system;
tab->read_record.read_record= join_no_more_records;
break;
/* keep gcc happy */
default:
break;
}
}
| 0
|
301,443
|
static int vfswrap_fstat(vfs_handle_struct *handle, files_struct *fsp, SMB_STRUCT_STAT *sbuf)
{
int result;
START_PROFILE(syscall_fstat);
result = sys_fstat(fsp->fh->fd,
sbuf, lp_fake_dir_create_times(SNUM(handle->conn)));
END_PROFILE(syscall_fstat);
return result;
}
| 0
|
301,412
|
static ssize_t vfswrap_sendfile(vfs_handle_struct *handle, int tofd, files_struct *fromfsp, const DATA_BLOB *hdr,
off_t offset, size_t n)
{
ssize_t result;
START_PROFILE_BYTES(syscall_sendfile, n);
result = sys_sendfile(tofd, fromfsp->fh->fd, hdr, offset, n);
END_PROFILE(syscall_sendfile);
return result;
}
| 0
|
242,268
|
PamData(BareosSocket* UA_sock, const std::string& passwd)
: UA_sock_(UA_sock), passwd_(passwd)
{
}
| 0
|
270,920
|
::tensorflow::Status MakeSplits(
const Tensor& indices_in, const OpInputList& params_nested_splits_in,
SPLITS_TYPE num_params_dense_values,
std::vector<std::vector<SPLITS_TYPE>>* out_splits,
std::vector<std::pair<SPLITS_TYPE, SPLITS_TYPE>>* value_slices,
SPLITS_TYPE* num_values) {
*num_values = 0;
value_slices->clear();
int num_splits = indices_in.dims() - 1 + params_nested_splits_in.size();
out_splits->assign(num_splits, {0});
// Get Eigen tensors.
const auto& indices = indices_in.flat<INDEX_TYPE>();
std::vector<ConstFlatType> params_nested_splits;
params_nested_splits.reserve(params_nested_splits_in.size());
for (const auto& splits_in : params_nested_splits_in) {
params_nested_splits.push_back(splits_in.flat<SPLITS_TYPE>());
}
TF_RETURN_IF_ERROR(
ValidateSplits(params_nested_splits, num_params_dense_values));
// Add `splits` that come from all but the last dimension of the dense
// Tensor `indices`. In particular, for each dimension D, we add a
// splits tensor whose values are:
// range(reduce_prod(splits.shape[:D]) + 1) * splits.shape[D+1]
// E.g., if indices.shape=[2, 3, 4] then we will add splits tensors:
// [0, 3, 6] # length=2+1, stride=3
// [0, 4, 8, 12, 16, 20, 24] # length=2*3+1, stride=4
int nrows = 1;
for (int dim = 0; dim < indices_in.dims() - 1; ++dim) {
nrows *= indices_in.dim_size(dim);
int row_length = indices_in.dim_size(dim + 1);
for (int i = 1; i < nrows + 1; ++i) {
out_splits->at(dim).push_back(i * row_length);
}
}
// Add `splits` that come from `params_nested_splits`. Starting with the
// outermost ragged dimension (i.e., the first `splits` tensor), we work
// our way in, finding the range of values that should be copied. As we
// go, we update the output `splits` for each dimension with the appropriate
// values. In particular, the *lengths* of the slices from `param_splits`
// should be copied to generate corresponding slice lengths in the output
// splits. E.g., if we are copying a ragged row with length 4, then we
// should add a new split point to out_splits that is 4 greater than the
// previous split point in out_splits.
for (int i = 0; i < indices.size(); ++i) {
int start = indices(i);
int limit = indices(i) + 1;
// Copy splits.
for (int dim = 0; dim < params_nested_splits.size(); ++dim) {
const auto& splits = params_nested_splits[dim];
int out_dim = dim + indices_in.dims() - 1;
if (out_dim >= 0) {
SPLITS_TYPE delta = out_splits->at(out_dim).back() - splits(start);
for (int j = start; j < limit; ++j) {
out_splits->at(out_dim).push_back(splits(j + 1) + delta);
}
}
start = splits(start);
limit = splits(limit);
}
if (limit != start) {
value_slices->emplace_back(start, limit);
*num_values += limit - start;
}
}
return ::tensorflow::Status::OK();
}
| 0
|
503,855
|
SCM_DEFINE (scm_close_fdes, "close-fdes", 1, 0, 0,
(SCM fd),
"A simple wrapper for the @code{close} system call.\n"
"Close file descriptor @var{fd}, which must be an integer.\n"
"Unlike close (@pxref{Ports and File Descriptors, close}),\n"
"the file descriptor will be closed even if a port is using it.\n"
"The return value is unspecified.")
#define FUNC_NAME s_scm_close_fdes
{
int c_fd;
int rv;
c_fd = scm_to_int (fd);
SCM_SYSCALL (rv = close (c_fd));
if (rv < 0)
SCM_SYSERROR;
return SCM_UNSPECIFIED;
}
| 0
|
261,423
|
static int decode_sao_offset_sign(thread_context* tctx)
{
logtrace(LogSlice,"# sao_offset_sign\n");
int value = decode_CABAC_bypass(&tctx->cabac_decoder);
logtrace(LogSymbols,"$1 sao_offset_sign=%d\n",value);
return value;
}
| 0
|
244,014
|
GF_Err trex_box_size(GF_Box *s)
{
GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *)s;
ptr->size += 20;
return GF_OK;
}
| 0
|
225,475
|
string SwapNodeNamesSwitchControlErrorMsg(absl::string_view node_name) {
return absl::Substitute(
"can't swap node name '$0' as it will become a Switch control dependency",
node_name);
}
| 0
|
225,441
|
static int vidioc_g_fmt_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct v4l2_loopback_device *dev;
MARK();
dev = v4l2loopback_getdevice(file);
if (!dev->ready_for_capture)
return -EINVAL;
fmt->fmt.pix = dev->pix_format;
MARK();
return 0;
}
| 0
|
383,304
|
gdImageLine (gdImagePtr im, int x1, int y1, int x2, int y2, int color)
{
int dx, dy, incr1, incr2, d, x, y, xend, yend, xdirflag, ydirflag;
int wid;
int w, wstart;
int thick = im->thick;
/* 2.0.10: Nick Atty: clip to edges of drawing rectangle, return if no points need to be drawn */
if (!clip_1d(&x1,&y1,&x2,&y2,gdImageSX(im)) || !clip_1d(&y1,&x1,&y2,&x2,gdImageSY(im))) {
return;
}
dx = abs(x2 - x1);
dy = abs(y2 - y1);
if (dy <= dx) {
/* More-or-less horizontal. use wid for vertical stroke */
/* Doug Claar: watch out for NaN in atan2 (2.0.5) */
if ((dx == 0) && (dy == 0)) {
wid = 1;
} else {
wid = (int)(thick * cos (atan2 (dy, dx)));
if (wid == 0) {
wid = 1;
}
}
d = 2 * dy - dx;
incr1 = 2 * dy;
incr2 = 2 * (dy - dx);
if (x1 > x2) {
x = x2;
y = y2;
ydirflag = (-1);
xend = x1;
} else {
x = x1;
y = y1;
ydirflag = 1;
xend = x2;
}
/* Set up line thickness */
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel(im, x, w, color);
}
if (((y2 - y1) * ydirflag) > 0) {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y++;
d += incr2;
}
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel (im, x, w, color);
}
}
} else {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y--;
d += incr2;
}
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel (im, x, w, color);
}
}
}
} else {
/* More-or-less vertical. use wid for horizontal stroke */
wid = (int)(thick * sin (atan2 (dy, dx)));
if (wid == 0) {
wid = 1;
}
d = 2 * dx - dy;
incr1 = 2 * dx;
incr2 = 2 * (dx - dy);
if (y1 > y2) {
y = y2;
x = x2;
yend = y1;
xdirflag = (-1);
} else {
y = y1;
x = x1;
yend = y2;
xdirflag = 1;
}
/* Set up line thickness */
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel (im, w, y, color);
}
if (((x2 - x1) * xdirflag) > 0) {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x++;
d += incr2;
}
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel (im, w, y, color);
}
}
} else {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x--;
d += incr2;
}
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel (im, w, y, color);
}
}
}
}
}
| 0
|
398,525
|
RZ_API void rz_bin_dwarf_debug_abbrev_free(RzBinDwarfDebugAbbrev *da) {
size_t i;
if (!da) {
return;
}
for (i = 0; i < da->count; i++) {
RZ_FREE(da->decls[i].defs);
}
RZ_FREE(da->decls);
free(da);
}
| 0
|
247,682
|
void setExpectedClientCertUri(const std::string& expected_client_cert_uri) {
expected_client_cert_uri_ = {expected_client_cert_uri};
}
| 0
|
401,511
|
static inline void timers_update_migration(void) { }
| 0
|
508,315
|
open_and_process_table(THD *thd, TABLE_LIST *tables, uint *counter, uint flags,
Prelocking_strategy *prelocking_strategy,
bool has_prelocking_list, Open_table_context *ot_ctx)
{
bool error= FALSE;
bool safe_to_ignore_table= FALSE;
LEX *lex= thd->lex;
DBUG_ENTER("open_and_process_table");
DEBUG_SYNC(thd, "open_and_process_table");
/*
Ignore placeholders for derived tables. After derived tables
processing, link to created temporary table will be put here.
If this is derived table for view then we still want to process
routines used by this view.
*/
if (tables->derived)
{
if (!tables->view)
{
if (!tables->is_derived())
tables->set_derived();
goto end;
}
/*
We restore view's name and database wiped out by derived tables
processing and fall back to standard open process in order to
obtain proper metadata locks and do other necessary steps like
stored routine processing.
*/
tables->db= tables->view_db.str;
tables->db_length= tables->view_db.length;
tables->table_name= tables->view_name.str;
tables->table_name_length= tables->view_name.length;
}
if (!tables->derived &&
is_infoschema_db(tables->db, tables->db_length))
{
/*
Check whether the information schema contains a table
whose name is tables->schema_table_name
*/
ST_SCHEMA_TABLE *schema_table= tables->schema_table;
if (!schema_table ||
(schema_table->hidden &&
((sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0 ||
/*
this check is used for show columns|keys from I_S hidden table
*/
lex->sql_command == SQLCOM_SHOW_FIELDS ||
lex->sql_command == SQLCOM_SHOW_KEYS)))
{
my_error(ER_UNKNOWN_TABLE, MYF(0),
tables->table_name, INFORMATION_SCHEMA_NAME.str);
DBUG_RETURN(1);
}
}
/*
If this TABLE_LIST object is a placeholder for an information_schema
table, create a temporary table to represent the information_schema
table in the query. Do not fill it yet - will be filled during
execution.
*/
if (tables->schema_table)
{
/*
If this information_schema table is merged into a mergeable
view, ignore it for now -- it will be filled when its respective
TABLE_LIST is processed. This code works only during re-execution.
*/
if (tables->view)
{
MDL_ticket *mdl_ticket;
/*
We still need to take a MDL lock on the merged view to protect
it from concurrent changes.
*/
if (!open_table_get_mdl_lock(thd, ot_ctx, &tables->mdl_request,
flags, &mdl_ticket) &&
mdl_ticket != NULL)
goto process_view_routines;
/* Fall-through to return error. */
}
else if (!mysql_schema_table(thd, lex, tables) &&
!check_and_update_table_version(thd, tables, tables->table->s))
{
goto end;
}
error= TRUE;
goto end;
}
DBUG_PRINT("tcache", ("opening table: '%s'.'%s' item: %p",
tables->db, tables->table_name, tables)); //psergey: invalid read of size 1 here
(*counter)++;
/*
Not a placeholder: must be a base/temporary table or a view. Let us open it.
*/
if (tables->table)
{
/*
If this TABLE_LIST object has an associated open TABLE object
(TABLE_LIST::table is not NULL), that TABLE object must be a pre-opened
temporary table.
*/
DBUG_ASSERT(is_temporary_table(tables));
}
else if (tables->open_type == OT_TEMPORARY_ONLY)
{
/*
OT_TEMPORARY_ONLY means that we are in CREATE TEMPORARY TABLE statement.
Also such table list element can't correspond to prelocking placeholder
or to underlying table of merge table.
So existing temporary table should have been preopened by this moment
and we can simply continue without trying to open temporary or base
table.
*/
DBUG_ASSERT(tables->open_strategy);
DBUG_ASSERT(!tables->prelocking_placeholder);
DBUG_ASSERT(!tables->parent_l);
DBUG_RETURN(0);
}
/* Not a placeholder: must be a base table or a view. Let us open it. */
if (tables->prelocking_placeholder)
{
/*
For the tables added by the pre-locking code, attempt to open
the table but fail silently if the table does not exist.
The real failure will occur when/if a statement attempts to use
that table.
*/
No_such_table_error_handler no_such_table_handler;
thd->push_internal_handler(&no_such_table_handler);
/*
We're opening a table from the prelocking list.
Since this table list element might have been added after pre-opening
of temporary tables we have to try to open temporary table for it.
We can't simply skip this table list element and postpone opening of
temporary table till the execution of substatement for several reasons:
- Temporary table can be a MERGE table with base underlying tables,
so its underlying tables has to be properly open and locked at
prelocking stage.
- Temporary table can be a MERGE table and we might be in PREPARE
phase for a prepared statement. In this case it is important to call
HA_ATTACH_CHILDREN for all merge children.
This is necessary because merge children remember "TABLE_SHARE ref type"
and "TABLE_SHARE def version" in the HA_ATTACH_CHILDREN operation.
If HA_ATTACH_CHILDREN is not called, these attributes are not set.
Then, during the first EXECUTE, those attributes need to be updated.
That would cause statement re-preparing (because changing those
attributes during EXECUTE is caught by THD::m_reprepare_observers).
The problem is that since those attributes are not set in merge
children, another round of PREPARE will not help.
*/
error= thd->open_temporary_table(tables);
if (!error && !tables->table)
error= open_table(thd, tables, ot_ctx);
thd->pop_internal_handler();
safe_to_ignore_table= no_such_table_handler.safely_trapped_errors();
}
else if (tables->parent_l && (thd->open_options & HA_OPEN_FOR_REPAIR))
{
/*
Also fail silently for underlying tables of a MERGE table if this
table is opened for CHECK/REPAIR TABLE statement. This is needed
to provide complete list of problematic underlying tables in
CHECK/REPAIR TABLE output.
*/
Repair_mrg_table_error_handler repair_mrg_table_handler;
thd->push_internal_handler(&repair_mrg_table_handler);
error= thd->open_temporary_table(tables);
if (!error && !tables->table)
error= open_table(thd, tables, ot_ctx);
thd->pop_internal_handler();
safe_to_ignore_table= repair_mrg_table_handler.safely_trapped_errors();
}
else
{
if (tables->parent_l)
{
/*
Even if we are opening table not from the prelocking list we
still might need to look for a temporary table if this table
list element corresponds to underlying table of a merge table.
*/
error= thd->open_temporary_table(tables);
}
if (!error && !tables->table)
error= open_table(thd, tables, ot_ctx);
}
if (error)
{
if (! ot_ctx->can_recover_from_failed_open() && safe_to_ignore_table)
{
DBUG_PRINT("info", ("open_table: ignoring table '%s'.'%s'",
tables->db, tables->alias));
error= FALSE;
}
goto end;
}
/*
We can't rely on simple check for TABLE_LIST::view to determine
that this is a view since during re-execution we might reopen
ordinary table in place of view and thus have TABLE_LIST::view
set from repvious execution and TABLE_LIST::table set from
current.
*/
if (!tables->table && tables->view)
{
/* VIEW placeholder */
(*counter)--;
/*
tables->next_global list consists of two parts:
1) Query tables and underlying tables of views.
2) Tables used by all stored routines that this statement invokes on
execution.
We need to know where the bound between these two parts is. If we've
just opened a view, which was the last table in part #1, and it
has added its base tables after itself, adjust the boundary pointer
accordingly.
*/
if (lex->query_tables_own_last == &(tables->next_global) &&
tables->view->query_tables)
lex->query_tables_own_last= tables->view->query_tables_last;
/*
Let us free memory used by 'sroutines' hash here since we never
call destructor for this LEX.
*/
my_hash_free(&tables->view->sroutines);
goto process_view_routines;
}
/*
Special types of open can succeed but still don't set
TABLE_LIST::table to anything.
*/
if (tables->open_strategy && !tables->table)
goto end;
error= extend_table_list(thd, tables, prelocking_strategy, has_prelocking_list);
if (error)
goto end;
/* Copy grant information from TABLE_LIST instance to TABLE one. */
tables->table->grant= tables->grant;
/* Check and update metadata version of a base table. */
error= check_and_update_table_version(thd, tables, tables->table->s);
if (error)
goto end;
/*
After opening a MERGE table add the children to the query list of
tables, so that they are opened too.
Note that placeholders don't have the handler open.
*/
/* MERGE tables need to access parent and child TABLE_LISTs. */
DBUG_ASSERT(tables->table->pos_in_table_list == tables);
/* Non-MERGE tables ignore this call. */
if (tables->table->file->extra(HA_EXTRA_ADD_CHILDREN_LIST))
{
error= TRUE;
goto end;
}
process_view_routines:
/*
Again we may need cache all routines used by this view and add
tables used by them to table list.
*/
if (tables->view &&
thd->locked_tables_mode <= LTM_LOCK_TABLES &&
! has_prelocking_list)
{
bool need_prelocking= FALSE;
TABLE_LIST **save_query_tables_last= lex->query_tables_last;
error= prelocking_strategy->handle_view(thd, lex, tables,
&need_prelocking);
if (need_prelocking && ! lex->requires_prelocking())
lex->mark_as_requiring_prelocking(save_query_tables_last);
if (error)
goto end;
}
end:
DBUG_RETURN(error);
}
| 0
|
261,214
|
int wm_SemInit(wm_Sem *s) {
*s = xSemaphoreCreateBinary();
xSemaphoreGive(*s);
return 0;
}
| 0
|
195,626
|
static size_t send_control_msg(VirtIOSerial *vser, void *buf, size_t len)
{
VirtQueueElement elem;
VirtQueue *vq;
vq = vser->c_ivq;
if (!virtio_queue_ready(vq)) {
return 0;
}
if (!virtqueue_pop(vq, &elem)) {
return 0;
}
memcpy(elem.in_sg[0].iov_base, buf, len);
virtqueue_push(vq, &elem, len);
virtio_notify(VIRTIO_DEVICE(vser), vq);
return len;
}
| 1
|
384,908
|
vim_FullName(
char_u *fname,
char_u *buf,
int len,
int force) // force expansion even when already absolute
{
int retval = OK;
int url;
*buf = NUL;
if (fname == NULL)
return FAIL;
url = path_with_url(fname);
if (!url)
retval = mch_FullName(fname, buf, len, force);
if (url || retval == FAIL)
{
// something failed; use the file name (truncate when too long)
vim_strncpy(buf, fname, len - 1);
}
#if defined(MSWIN)
slash_adjust(buf);
#endif
return retval;
}
| 0
|
244,172
|
void tfdt_box_del(GF_Box *s)
{
gf_free(s);
}
| 0
|
211,136
|
static RzDyldRebaseInfos *get_rebase_infos(RzDyldCache *cache) {
RzDyldRebaseInfos *result = RZ_NEW0(RzDyldRebaseInfos);
if (!result) {
return NULL;
}
if (!cache->hdr->slideInfoOffset || !cache->hdr->slideInfoSize) {
ut32 total_slide_infos = 0;
ut32 n_slide_infos[MAX_N_HDR];
ut32 i;
for (i = 0; i < cache->n_hdr && i < MAX_N_HDR; i++) {
ut64 hdr_offset = cache->hdr_offset[i];
if (!rz_buf_read_le32_at(cache->buf, 0x13c + hdr_offset, &n_slide_infos[i])) {
goto beach;
}
total_slide_infos += n_slide_infos[i];
}
if (!total_slide_infos) {
goto beach;
}
RzDyldRebaseInfosEntry *infos = RZ_NEWS0(RzDyldRebaseInfosEntry, total_slide_infos);
if (!infos) {
goto beach;
}
ut32 k = 0;
for (i = 0; i < cache->n_hdr && i < MAX_N_HDR; i++) {
ut64 hdr_offset = cache->hdr_offset[i];
if (!n_slide_infos[i]) {
continue;
}
ut32 sio;
if (!rz_buf_read_le32_at(cache->buf, 0x138 + hdr_offset, &sio)) {
continue;
}
ut64 slide_infos_offset = sio;
if (!slide_infos_offset) {
continue;
}
slide_infos_offset += hdr_offset;
ut32 j;
RzDyldRebaseInfo *prev_info = NULL;
for (j = 0; j < n_slide_infos[i]; j++) {
ut64 offset = slide_infos_offset + j * sizeof(cache_mapping_slide);
cache_mapping_slide entry;
if (rz_buf_fread_at(cache->buf, offset, (ut8 *)&entry, "6lii", 1) != sizeof(cache_mapping_slide)) {
break;
}
if (entry.slideInfoOffset && entry.slideInfoSize) {
infos[k].start = entry.fileOffset + hdr_offset;
infos[k].end = infos[k].start + entry.size;
ut64 slide = prev_info ? prev_info->slide : UT64_MAX;
infos[k].info = get_rebase_info(cache, entry.slideInfoOffset + hdr_offset, entry.slideInfoSize, entry.fileOffset + hdr_offset, slide);
prev_info = infos[k].info;
k++;
}
}
}
if (!k) {
free(infos);
goto beach;
}
if (k < total_slide_infos) {
RzDyldRebaseInfosEntry *pruned_infos = RZ_NEWS0(RzDyldRebaseInfosEntry, k);
if (!pruned_infos) {
free(infos);
goto beach;
}
memcpy(pruned_infos, infos, sizeof(RzDyldRebaseInfosEntry) * k);
free(infos);
infos = pruned_infos;
}
result->entries = infos;
result->length = k;
return result;
}
if (cache->hdr->mappingCount > 1) {
RzDyldRebaseInfosEntry *infos = RZ_NEWS0(RzDyldRebaseInfosEntry, 1);
if (!infos) {
goto beach;
}
infos[0].start = cache->maps[1].fileOffset;
infos[0].end = infos[0].start + cache->maps[1].size;
infos[0].info = get_rebase_info(cache, cache->hdr->slideInfoOffset, cache->hdr->slideInfoSize, infos[0].start, UT64_MAX);
result->entries = infos;
result->length = 1;
return result;
}
beach:
free(result);
return NULL;
}
| 1
|
224,572
|
Status QuantizeV2Shape(InferenceContext* c) {
int axis = -1;
Status s = c->GetAttr("axis", &axis);
if (!s.ok() && s.code() != error::NOT_FOUND) {
return s;
}
if (axis < -1) {
return errors::InvalidArgument("axis should be at least -1, got ", axis);
}
const int minmax_rank = (axis == -1) ? 0 : 1;
TF_RETURN_IF_ERROR(shape_inference::UnchangedShape(c));
ShapeHandle minmax;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), minmax_rank, &minmax));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), minmax_rank, &minmax));
if (axis != -1) {
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input));
DimensionHandle depth;
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(minmax, 0), c->Dim(input, axis), &depth));
}
c->set_output(1, minmax);
c->set_output(2, minmax);
return Status::OK();
}
| 0
|
343,269
|
char *skip_telnet_controls(const char *str)
{
if (str == NULL) {
return NULL;
}
while (*str != 0 && (unsigned char) *str >= 240U) {
str++;
}
return (char *) str;
}
| 0
|
259,613
|
bool HierarchicalBitmapRequester::isNextMCULineReady(void) const
{
#if ACCUSOFT_CODE
// MCUs can only be written if the smallest scale, which is written first,
// is ready.
return m_pSmallestScale->isNextMCULineReady();
#else
return false;
#endif
}
| 0
|
256,459
|
JANET_CORE_FN(cfun_array_remove,
"(array/remove arr at &opt n)",
"Remove up to `n` elements starting at index `at` in array `arr`. `at` can index from "
"the end of the array with a negative index, and `n` must be a non-negative integer. "
"By default, `n` is 1. "
"Returns the array.") {
janet_arity(argc, 2, 3);
JanetArray *array = janet_getarray(argv, 0);
int32_t at = janet_getinteger(argv, 1);
int32_t n = 1;
if (at < 0) {
at = array->count + at + 1;
}
if (at < 0 || at > array->count)
janet_panicf("removal index %d out of range [0,%d]", at, array->count);
if (argc == 3) {
n = janet_getinteger(argv, 2);
if (n < 0)
janet_panicf("expected non-negative integer for argument n, got %v", argv[2]);
}
if (at + n > array->count) {
n = array->count - at;
}
memmove(array->data + at,
array->data + at + n,
(array->count - at - n) * sizeof(Janet));
array->count -= n;
return argv[0];
}
| 0
|
282,988
|
LJ_NOINLINE void lj_err_callerv(lua_State *L, ErrMsg em, ...)
{
const char *msg;
va_list argp;
va_start(argp, em);
msg = lj_str_pushvf(L, err2msg(em), argp);
va_end(argp);
lj_err_callermsg(L, msg);
}
| 0
|
338,080
|
uint32_t WasmBinaryWriter::getFunctionIndex(Name name) const {
auto it = indexes.functionIndexes.find(name);
assert(it != indexes.functionIndexes.end());
return it->second;
}
| 0
|
201,913
|
set_fflags_platform(struct archive_write_disk *a, int fd, const char *name,
mode_t mode, unsigned long set, unsigned long clear)
{
int ret;
int myfd = fd;
int newflags, oldflags;
/*
* Linux has no define for the flags that are only settable by
* the root user. This code may seem a little complex, but
* there seem to be some Linux systems that lack these
* defines. (?) The code below degrades reasonably gracefully
* if sf_mask is incomplete.
*/
const int sf_mask = 0
#if defined(FS_IMMUTABLE_FL)
| FS_IMMUTABLE_FL
#elif defined(EXT2_IMMUTABLE_FL)
| EXT2_IMMUTABLE_FL
#endif
#if defined(FS_APPEND_FL)
| FS_APPEND_FL
#elif defined(EXT2_APPEND_FL)
| EXT2_APPEND_FL
#endif
#if defined(FS_JOURNAL_DATA_FL)
| FS_JOURNAL_DATA_FL
#endif
;
if (set == 0 && clear == 0)
return (ARCHIVE_OK);
/* Only regular files and dirs can have flags. */
if (!S_ISREG(mode) && !S_ISDIR(mode))
return (ARCHIVE_OK);
/* If we weren't given an fd, open it ourselves. */
if (myfd < 0) {
myfd = open(name, O_RDONLY | O_NONBLOCK | O_BINARY | O_CLOEXEC);
__archive_ensure_cloexec_flag(myfd);
}
if (myfd < 0)
return (ARCHIVE_OK);
/*
* XXX As above, this would be way simpler if we didn't have
* to read the current flags from disk. XXX
*/
ret = ARCHIVE_OK;
/* Read the current file flags. */
if (ioctl(myfd,
#ifdef FS_IOC_GETFLAGS
FS_IOC_GETFLAGS,
#else
EXT2_IOC_GETFLAGS,
#endif
&oldflags) < 0)
goto fail;
/* Try setting the flags as given. */
newflags = (oldflags & ~clear) | set;
if (ioctl(myfd,
#ifdef FS_IOC_SETFLAGS
FS_IOC_SETFLAGS,
#else
EXT2_IOC_SETFLAGS,
#endif
&newflags) >= 0)
goto cleanup;
if (errno != EPERM)
goto fail;
/* If we couldn't set all the flags, try again with a subset. */
newflags &= ~sf_mask;
oldflags &= sf_mask;
newflags |= oldflags;
if (ioctl(myfd,
#ifdef FS_IOC_SETFLAGS
FS_IOC_SETFLAGS,
#else
EXT2_IOC_SETFLAGS,
#endif
&newflags) >= 0)
goto cleanup;
/* We couldn't set the flags, so report the failure. */
fail:
archive_set_error(&a->archive, errno,
"Failed to set file flags");
ret = ARCHIVE_WARN;
cleanup:
if (fd < 0)
close(myfd);
return (ret);
}
| 1
|
234,772
|
static int chunk_profiles_filter(u64 chunk_type,
struct btrfs_balance_args *bargs)
{
chunk_type = chunk_to_extended(chunk_type) &
BTRFS_EXTENDED_PROFILE_MASK;
if (bargs->profiles & chunk_type)
return 0;
return 1;
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.