idx
int64 | func
string | target
int64 |
|---|---|---|
463,073
|
static void sungem_reset_tx(SunGEMState *s)
{
trace_sungem_tx_reset();
/* XXX Do TXCFG */
/* XXX Check value */
s->txdmaregs[TXDMA_FSZ >> 2] = 0x90;
s->txdmaregs[TXDMA_TXDONE >> 2] = 0;
s->txdmaregs[TXDMA_KICK >> 2] = 0;
s->txdmaregs[TXDMA_CFG >> 2] = 0x118010;
sungem_update_masks(s);
s->tx_size = 0;
s->tx_first_ctl = 0;
}
| 0
|
278,252
|
inindent(int extra)
{
char_u *ptr;
colnr_T col;
for (col = 0, ptr = ml_get_curline(); VIM_ISWHITE(*ptr); ++col)
++ptr;
if (col >= curwin->w_cursor.col + extra)
return TRUE;
else
return FALSE;
}
| 0
|
224,723
|
GF_Box *bxml_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_BinaryXMLBox, GF_ISOM_BOX_TYPE_BXML);
return (GF_Box *)tmp;
}
| 0
|
401,570
|
void init_timer_key(struct timer_list *timer,
void (*func)(struct timer_list *), unsigned int flags,
const char *name, struct lock_class_key *key)
{
debug_init(timer);
do_init_timer(timer, func, flags, name, key);
}
| 0
|
240,265
|
free_yank(long n)
{
if (y_current->y_array != NULL)
{
long i;
for (i = n; --i >= 0; )
vim_free(y_current->y_array[i]);
VIM_CLEAR(y_current->y_array);
}
}
| 0
|
369,405
|
static int io_write(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_rw_state __s, *s = &__s;
struct iovec *iovec;
struct kiocb *kiocb = &req->rw.kiocb;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ssize_t ret, ret2;
loff_t *ppos;
if (!req_has_async_data(req)) {
ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
if (unlikely(ret < 0))
return ret;
} else {
struct io_async_rw *rw = req->async_data;
s = &rw->s;
iov_iter_restore(&s->iter, &s->iter_state);
iovec = NULL;
}
ret = io_rw_init_file(req, FMODE_WRITE);
if (unlikely(ret)) {
kfree(iovec);
return ret;
}
req->result = iov_iter_count(&s->iter);
if (force_nonblock) {
/* If the file doesn't support async, just async punt */
if (unlikely(!io_file_supports_nowait(req)))
goto copy_iov;
/* file path doesn't support NOWAIT for non-direct_IO */
if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
(req->flags & REQ_F_ISREG))
goto copy_iov;
kiocb->ki_flags |= IOCB_NOWAIT;
} else {
/* Ensure we clear previously set non-block flag */
kiocb->ki_flags &= ~IOCB_NOWAIT;
}
ppos = io_kiocb_update_pos(req);
ret = rw_verify_area(WRITE, req->file, ppos, req->result);
if (unlikely(ret))
goto out_free;
/*
* Open-code file_start_write here to grab freeze protection,
* which will be released by another thread in
* io_complete_rw(). Fool lockdep by telling it the lock got
* released so that it doesn't complain about the held lock when
* we return to userspace.
*/
if (req->flags & REQ_F_ISREG) {
sb_start_write(file_inode(req->file)->i_sb);
__sb_writers_release(file_inode(req->file)->i_sb,
SB_FREEZE_WRITE);
}
kiocb->ki_flags |= IOCB_WRITE;
if (likely(req->file->f_op->write_iter))
ret2 = call_write_iter(req->file, kiocb, &s->iter);
else if (req->file->f_op->write)
ret2 = loop_rw_iter(WRITE, req, &s->iter);
else
ret2 = -EINVAL;
if (req->flags & REQ_F_REISSUE) {
req->flags &= ~REQ_F_REISSUE;
ret2 = -EAGAIN;
}
/*
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
* retry them without IOCB_NOWAIT.
*/
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
ret2 = -EAGAIN;
/* no retry on NONBLOCK nor RWF_NOWAIT */
if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
goto done;
if (!force_nonblock || ret2 != -EAGAIN) {
/* IOPOLL retry should happen for io-wq threads */
if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
goto copy_iov;
done:
kiocb_done(req, ret2, issue_flags);
} else {
copy_iov:
iov_iter_restore(&s->iter, &s->iter_state);
ret = io_setup_async_rw(req, iovec, s, false);
return ret ?: -EAGAIN;
}
out_free:
/* it's reportedly faster than delegating the null check to kfree() */
if (iovec)
kfree(iovec);
return ret;
}
| 0
|
232,405
|
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override {
// Create a new SparseTensorSliceDatasetOp::Dataset, insert it in
// the step container, and return it as the output.
const Tensor* indices;
OP_REQUIRES_OK(ctx, ctx->input("indices", &indices));
const Tensor* values;
OP_REQUIRES_OK(ctx, ctx->input("values", &values));
const Tensor* dense_shape;
OP_REQUIRES_OK(ctx, ctx->input("dense_shape", &dense_shape));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices->shape()),
errors::InvalidArgument(
"Input indices should be a matrix but received shape ",
indices->shape().DebugString()));
const auto num_indices = indices->NumElements();
const auto num_values = values->NumElements();
if (num_indices == 0 || num_values == 0) {
OP_REQUIRES(ctx, num_indices == num_values,
errors::InvalidArgument(
"If indices or values are empty, the other one must also "
"be. Got indices of shape ",
indices->shape().DebugString(), " and values of shape ",
values->shape().DebugString()));
}
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(values->shape()),
errors::InvalidArgument(
"Input values should be a vector but received shape ",
indices->shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(dense_shape->shape()),
errors::InvalidArgument(
"Input shape should be a vector but received shape ",
dense_shape->shape().DebugString()));
// We currently ensure that `sparse_tensor` is ordered in the
// batch dimension.
// TODO(mrry): Investigate ways to avoid this unconditional check
// if we can be sure that the sparse tensor was produced in an
// appropriate order (e.g. by `tf.parse_example()` or a Dataset
// that batches elements into rows of a SparseTensor).
int64_t previous_batch_index = -1;
for (int64_t i = 0; i < indices->dim_size(0); ++i) {
int64_t next_batch_index = indices->matrix<int64>()(i, 0);
OP_REQUIRES(
ctx, next_batch_index >= previous_batch_index,
errors::Unimplemented("The SparseTensor must be ordered in the batch "
"dimension; handling arbitrarily ordered input "
"is not currently supported."));
previous_batch_index = next_batch_index;
}
gtl::InlinedVector<int64, 8> std_order(dense_shape->NumElements(), 0);
sparse::SparseTensor tensor;
OP_REQUIRES_OK(
ctx, sparse::SparseTensor::Create(
*indices, *values, TensorShape(dense_shape->vec<int64>()),
std_order, &tensor));
*output = new Dataset<T>(ctx, std::move(tensor));
}
| 0
|
223,375
|
static void read_char8_type(compiler_common *common, jump_list **backtracks, BOOL negated)
{
/* Reads the character type into TMP1, updates STR_PTR. Does not check STR_END. */
DEFINE_COMPILER;
#if defined SUPPORT_UNICODE || PCRE2_CODE_UNIT_WIDTH != 8
struct sljit_jump *jump;
#endif
#if defined SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == 8
struct sljit_jump *jump2;
#endif
SLJIT_UNUSED_ARG(backtracks);
SLJIT_UNUSED_ARG(negated);
OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), 0);
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
#if defined SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == 8
if (common->utf)
{
/* The result of this read may be unused, but saves an "else" part. */
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP2), common->ctypes);
jump = CMP(SLJIT_LESS, TMP2, 0, SLJIT_IMM, 0x80);
if (!negated)
{
if (common->invalid_utf)
add_jump(compiler, backtracks, CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0));
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0));
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
OP2(SLJIT_SUB, TMP2, 0, TMP2, 0, SLJIT_IMM, 0xc2);
if (common->invalid_utf)
add_jump(compiler, backtracks, CMP(SLJIT_GREATER_EQUAL, TMP2, 0, SLJIT_IMM, 0xe0 - 0xc2));
OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 6);
OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, TMP1, 0);
OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x80);
if (common->invalid_utf)
add_jump(compiler, backtracks, CMP(SLJIT_GREATER_EQUAL, TMP2, 0, SLJIT_IMM, 0x40));
OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, 0);
jump2 = CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, 255);
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP2), common->ctypes);
JUMPHERE(jump2);
}
else if (common->invalid_utf)
{
add_jump(compiler, &common->utfreadchar_invalid, JUMP(SLJIT_FAST_CALL));
OP1(SLJIT_MOV, TMP2, 0, TMP1, 0);
add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, INVALID_UTF_CHAR));
OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, 0);
jump2 = CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, 255);
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP2), common->ctypes);
JUMPHERE(jump2);
}
else
add_jump(compiler, &common->utfreadtype8, JUMP(SLJIT_FAST_CALL));
JUMPHERE(jump);
return;
}
#endif /* SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == 8 */
#if defined SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == 32
if (common->invalid_utf && negated)
add_jump(compiler, backtracks, CMP(SLJIT_GREATER_EQUAL, TMP2, 0, SLJIT_IMM, 0x110000));
#endif /* SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == 32 */
#if PCRE2_CODE_UNIT_WIDTH != 8
/* The ctypes array contains only 256 values. */
OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, 0);
jump = CMP(SLJIT_GREATER, TMP2, 0, SLJIT_IMM, 255);
#endif /* PCRE2_CODE_UNIT_WIDTH != 8 */
OP1(SLJIT_MOV_U8, TMP1, 0, SLJIT_MEM1(TMP2), common->ctypes);
#if PCRE2_CODE_UNIT_WIDTH != 8
JUMPHERE(jump);
#endif /* PCRE2_CODE_UNIT_WIDTH != 8 */
#if defined SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == 16
if (common->utf && negated)
{
/* Skip low surrogate if necessary. */
if (!common->invalid_utf)
{
OP2(SLJIT_SUB, TMP2, 0, TMP2, 0, SLJIT_IMM, 0xd800);
if (sljit_has_cpu_feature(SLJIT_HAS_CMOV) && !HAS_VIRTUAL_REGISTERS)
{
OP2(SLJIT_ADD, RETURN_ADDR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
OP2U(SLJIT_SUB | SLJIT_SET_LESS, TMP2, 0, SLJIT_IMM, 0x400);
CMOV(SLJIT_LESS, STR_PTR, RETURN_ADDR, 0);
}
else
{
jump = CMP(SLJIT_GREATER_EQUAL, TMP2, 0, SLJIT_IMM, 0x400);
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
JUMPHERE(jump);
}
return;
}
OP2(SLJIT_SUB, TMP2, 0, TMP2, 0, SLJIT_IMM, 0xd800);
jump = CMP(SLJIT_GREATER_EQUAL, TMP2, 0, SLJIT_IMM, 0xe000 - 0xd800);
add_jump(compiler, backtracks, CMP(SLJIT_GREATER_EQUAL, TMP2, 0, SLJIT_IMM, 0x400));
add_jump(compiler, backtracks, CMP(SLJIT_GREATER_EQUAL, STR_PTR, 0, STR_END, 0));
OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(0));
OP2(SLJIT_ADD, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
OP2(SLJIT_SUB, TMP2, 0, TMP2, 0, SLJIT_IMM, 0xdc00);
add_jump(compiler, backtracks, CMP(SLJIT_GREATER_EQUAL, TMP2, 0, SLJIT_IMM, 0x400));
JUMPHERE(jump);
return;
}
#endif /* SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH == 16 */
}
| 0
|
238,792
|
restore_search_patterns(void)
{
if (--save_level == 0)
{
vim_free(spats[0].pat);
spats[0] = saved_spats[0];
#if defined(FEAT_EVAL)
set_vv_searchforward();
#endif
vim_free(spats[1].pat);
spats[1] = saved_spats[1];
vim_free(mr_pattern);
mr_pattern = saved_mr_pattern;
#ifdef FEAT_SEARCH_EXTRA
last_idx = saved_spats_last_idx;
set_no_hlsearch(saved_spats_no_hlsearch);
#endif
}
}
| 0
|
489,218
|
static void hfsplus_cat_build_key_uni(hfsplus_btree_key *key, u32 parent,
struct hfsplus_unistr *name)
{
int ustrlen;
ustrlen = be16_to_cpu(name->length);
key->cat.parent = cpu_to_be32(parent);
key->cat.name.length = cpu_to_be16(ustrlen);
ustrlen *= 2;
memcpy(key->cat.name.unicode, name->unicode, ustrlen);
key->key_len = cpu_to_be16(6 + ustrlen);
}
| 0
|
463,206
|
EXPORTED int annotate_state_fetch(annotate_state_t *state,
const strarray_t *entries, const strarray_t *attribs,
annotate_fetch_cb_t callback, void *rock)
{
int i;
struct glob *g;
const ptrarray_t *non_db_entries;
const annotate_entrydesc_t *db_entry;
int r = 0;
init_internal();
annotate_state_start(state);
state->callback = callback;
state->callback_rock = rock;
/* Build list of attributes to fetch */
for (i = 0 ; i < attribs->count ; i++)
{
const char *s = attribs->data[i];
int attribcount;
/*
* TODO: this is bogus. The * and % wildcard characters applied
* to attributes in the early drafts of the ANNOTATEMORE
* extension, but not in later drafts where those characters are
* actually illegal in attribute names.
*/
g = glob_init(s, '.');
for (attribcount = 0;
annotation_attributes[attribcount].name;
attribcount++) {
if (GLOB_MATCH(g, annotation_attributes[attribcount].name)) {
if (annotation_attributes[attribcount].entry & ATTRIB_DEPRECATED) {
if (strcmp(s, "*"))
syslog(LOG_WARNING, "annotatemore_fetch: client used "
"deprecated attribute \"%s\", ignoring",
annotation_attributes[attribcount].name);
}
else
state->attribs |= annotation_attributes[attribcount].entry;
}
}
glob_free(&g);
}
if (!state->attribs)
goto out;
if (state->which == ANNOTATION_SCOPE_SERVER) {
non_db_entries = &server_entries;
db_entry = &server_db_entry;
}
else if (state->which == ANNOTATION_SCOPE_MAILBOX) {
non_db_entries = &mailbox_entries;
db_entry = &mailbox_db_entry;
}
else if (state->which == ANNOTATION_SCOPE_MESSAGE) {
non_db_entries = &message_entries;
db_entry = &message_db_entry;
}
else {
syslog(LOG_ERR, "IOERROR: unknown annotation scope %d", state->which);
r = IMAP_INTERNAL;
goto out;
}
/* Build a list of callbacks for fetching the annotations */
for (i = 0 ; i < entries->count ; i++)
{
const char *s = entries->data[i];
int j;
int check_db = 0; /* should we check the db for this entry? */
g = glob_init(s, '/');
for (j = 0 ; j < non_db_entries->count ; j++) {
const annotate_entrydesc_t *desc = non_db_entries->data[j];
if (!desc->get)
continue;
if (GLOB_MATCH(g, desc->name)) {
/* Add this entry to our list only if it
applies to our particular server type */
if ((desc->proxytype != PROXY_ONLY)
|| proxy_fetch_func)
_annotate_state_add_entry(state, desc, desc->name);
}
if (!strcmp(s, desc->name)) {
/* exact match */
if (desc->proxytype != PROXY_ONLY) {
state->orig_entry = entries; /* proxy it */
}
break;
}
}
if (j == non_db_entries->count) {
/* no [exact] match */
state->orig_entry = entries; /* proxy it */
check_db = 1;
}
/* Add the db entry to our list if only if it
applies to our particular server type */
if (check_db &&
((db_entry->proxytype != PROXY_ONLY) || proxy_fetch_func)) {
/* Add the db entry to our list */
_annotate_state_add_entry(state, db_entry, s);
}
glob_free(&g);
}
if (state->which == ANNOTATION_SCOPE_SERVER) {
_annotate_fetch_entries(state, /*proxy_check*/1);
}
else if (state->which == ANNOTATION_SCOPE_MAILBOX) {
if (state->entry_list || proxy_fetch_func) {
if (proxy_fetch_func) {
r = annotate_state_need_mbentry(state);
if (r)
goto out;
assert(state->mbentry);
}
if (proxy_fetch_func && state->orig_entry) {
state->orig_mailbox = state->mbentry->name;
state->orig_attribute = attribs;
}
_annotate_fetch_entries(state, /*proxy_check*/1);
if (proxy_fetch_func && state->orig_entry && state->mbentry->server &&
!hash_lookup(state->mbentry->server, &state->server_table)) {
/* xxx ignoring result */
proxy_fetch_func(state->mbentry->server, state->mbentry->ext_name,
state->orig_entry, state->orig_attribute);
hash_insert(state->mbentry->server, (void *)0xDEADBEEF, &state->server_table);
}
}
}
else if (state->which == ANNOTATION_SCOPE_MESSAGE) {
_annotate_fetch_entries(state, /*proxy_check*/0);
}
/* Flush last cached entry in output_entryatt() */
flush_entryatt(state);
out:
annotate_state_finish(state);
return r;
}
| 0
|
328,961
|
R_API int r_bin_java_new_bin(RBinJavaObj *bin, ut64 loadaddr, Sdb *kv, const ut8 *buf, ut64 len) {
R_BIN_JAVA_GLOBAL_BIN = bin;
if (!r_str_constpool_init (&bin->constpool)) {
return false;
}
bin->lines.count = 0;
bin->loadaddr = loadaddr;
r_bin_java_get_java_null_cp ();
bin->id = r_num_rand (UT32_MAX);
bin->kv = kv ? kv : sdb_new (NULL, NULL, 0);
bin->AllJavaBinObjs = NULL;
return r_bin_java_load_bin (bin, buf, len);
}
| 0
|
359,582
|
afi2str (afi_t afi)
{
if (afi == AFI_IP)
return "AFI_IP";
else if (afi == AFI_IP6)
return "AFI_IP6";
else
return "Unknown AFI";
}
| 0
|
409,499
|
f_terminalprops(typval_T *argvars UNUSED, typval_T *rettv)
{
# ifdef FEAT_TERMRESPONSE
int i;
# endif
if (rettv_dict_alloc(rettv) == FAIL)
return;
# ifdef FEAT_TERMRESPONSE
for (i = 0; i < TPR_COUNT; ++i)
{
char_u value[2];
value[0] = term_props[i].tpr_status;
value[1] = NUL;
dict_add_string(rettv->vval.v_dict, term_props[i].tpr_name, value);
}
# endif
}
| 0
|
309,816
|
ClrToEOS(NCURSES_SP_DCLx NCURSES_CH_T blank)
{
int row, col;
row = SP_PARM->_cursrow;
col = SP_PARM->_curscol;
if (row < 0)
row = 0;
if (col < 0)
col = 0;
UpdateAttrs(SP_PARM, blank);
TPUTS_TRACE("clr_eos");
NCURSES_SP_NAME(tputs) (NCURSES_SP_ARGx
clr_eos,
screen_lines(SP_PARM) - row,
NCURSES_SP_NAME(_nc_outch));
while (col < screen_columns(SP_PARM))
CurScreen(SP_PARM)->_line[row].text[col++] = blank;
for (row++; row < screen_lines(SP_PARM); row++) {
for (col = 0; col < screen_columns(SP_PARM); col++)
CurScreen(SP_PARM)->_line[row].text[col] = blank;
}
}
| 0
|
195,059
|
bool DependencyOptimizer::SafeToRemoveIdentity(const NodeDef& node) const {
if (!IsIdentity(node) && !IsIdentityN(node)) {
return true;
}
if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) {
return false;
}
if (!fetch_nodes_known_) {
// The output values of this node may be needed.
return false;
}
if (node.input_size() < 1) {
// Node lacks input, is invalid
return false;
}
const NodeDef* input = node_map_->GetNode(NodeName(node.input(0)));
CHECK(input != nullptr) << "node = " << node.name()
<< " input = " << node.input(0);
// Don't remove Identity nodes corresponding to Variable reads or following
// Recv.
if (IsVariable(*input) || IsRecv(*input)) {
return false;
}
for (const auto& consumer : node_map_->GetOutputs(node.name())) {
if (node.input_size() > 1 && (IsRetval(*consumer) || IsMerge(*consumer))) {
return false;
}
if (IsSwitch(*input)) {
for (const string& consumer_input : consumer->input()) {
if (consumer_input == AsControlDependency(node.name())) {
return false;
}
}
}
}
return true;
}
| 1
|
226,966
|
IRC_PROTOCOL_CALLBACK(353)
{
char *pos_channel, *pos_nick, *pos_nick_orig, *pos_host, *nickname;
char *prefixes, *str_nicks, *color;
int args, i, length;
struct t_irc_channel *ptr_channel;
IRC_PROTOCOL_MIN_ARGS(5);
if (irc_channel_is_channel (server, argv[3]))
{
pos_channel = argv[3];
args = 4;
}
else
{
pos_channel = argv[4];
args = 5;
}
IRC_PROTOCOL_MIN_ARGS(args + 1);
ptr_channel = irc_channel_search (server, pos_channel);
str_nicks = NULL;
/*
* for a channel without buffer, prepare a string that will be built
* with nicks and colors (argc - args is the number of nicks)
*/
if (!ptr_channel)
{
/*
* prefix color (16) + nick color (16) + reset color (16) = 48 bytes
* added for each nick
*/
length = strlen (argv_eol[args]) + ((argc - args) * (16 + 16 + 16)) + 1;
str_nicks = malloc (length);
if (str_nicks)
str_nicks[0] = '\0';
}
for (i = args; i < argc; i++)
{
pos_nick = (argv[i][0] == ':') ? argv[i] + 1 : argv[i];
pos_nick_orig = pos_nick;
/* skip and save prefix(es) */
while (pos_nick[0]
&& (irc_server_get_prefix_char_index (server, pos_nick[0]) >= 0))
{
pos_nick++;
}
prefixes = (pos_nick > pos_nick_orig) ?
weechat_strndup (pos_nick_orig, pos_nick - pos_nick_orig) : NULL;
/* extract nick from host */
pos_host = strchr (pos_nick, '!');
if (pos_host)
{
nickname = weechat_strndup (pos_nick, pos_host - pos_nick);
pos_host++;
}
else
nickname = strdup (pos_nick);
/* add or update nick on channel */
if (nickname)
{
if (ptr_channel && ptr_channel->nicks)
{
if (!irc_nick_new (server, ptr_channel, nickname, pos_host,
prefixes, 0, NULL, NULL))
{
weechat_printf (
server->buffer,
_("%s%s: cannot create nick \"%s\" for channel \"%s\""),
weechat_prefix ("error"), IRC_PLUGIN_NAME, nickname,
ptr_channel->name);
}
}
else if (!ptr_channel && str_nicks)
{
if (str_nicks[0])
{
strcat (str_nicks, IRC_COLOR_RESET);
strcat (str_nicks, " ");
}
if (prefixes)
{
strcat (str_nicks,
weechat_color (
irc_nick_get_prefix_color_name (server,
prefixes[0])));
strcat (str_nicks, prefixes);
}
if (weechat_config_boolean (irc_config_look_color_nicks_in_names))
{
if (irc_server_strcasecmp (server, nickname, server->nick) == 0)
strcat (str_nicks, IRC_COLOR_CHAT_NICK_SELF);
else
{
color = irc_nick_find_color (nickname);
strcat (str_nicks, color);
if (color)
free (color);
}
}
else
strcat (str_nicks, IRC_COLOR_RESET);
strcat (str_nicks, nickname);
}
free (nickname);
}
if (prefixes)
free (prefixes);
}
if (!ptr_channel)
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "names", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%sNicks %s%s%s: %s[%s%s%s]"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
pos_channel,
IRC_COLOR_RESET,
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(str_nicks) ? str_nicks : "",
IRC_COLOR_CHAT_DELIMITERS);
}
if (str_nicks)
free (str_nicks);
return WEECHAT_RC_OK;
}
| 0
|
317,207
|
static int smack_fs_context_dup(struct fs_context *fc,
struct fs_context *src_fc)
{
struct smack_mnt_opts *dst, *src = src_fc->security;
if (!src)
return 0;
fc->security = kzalloc(sizeof(struct smack_mnt_opts), GFP_KERNEL);
if (!fc->security)
return -ENOMEM;
dst = fc->security;
if (src->fsdefault) {
dst->fsdefault = kstrdup(src->fsdefault, GFP_KERNEL);
if (!dst->fsdefault)
return -ENOMEM;
}
if (src->fsfloor) {
dst->fsfloor = kstrdup(src->fsfloor, GFP_KERNEL);
if (!dst->fsfloor)
return -ENOMEM;
}
if (src->fshat) {
dst->fshat = kstrdup(src->fshat, GFP_KERNEL);
if (!dst->fshat)
return -ENOMEM;
}
if (src->fsroot) {
dst->fsroot = kstrdup(src->fsroot, GFP_KERNEL);
if (!dst->fsroot)
return -ENOMEM;
}
if (src->fstransmute) {
dst->fstransmute = kstrdup(src->fstransmute, GFP_KERNEL);
if (!dst->fstransmute)
return -ENOMEM;
}
return 0;
}
| 0
|
437,698
|
static int cx23888_ir_tx_write(struct v4l2_subdev *sd, u8 *buf, size_t count,
ssize_t *num)
{
struct cx23888_ir_state *state = to_state(sd);
struct cx23885_dev *dev = state->dev;
/* For now enable the Tx FIFO Service interrupt & pretend we did work */
irqenable_tx(dev, IRQEN_TSE);
*num = count;
return 0;
}
| 0
|
242,634
|
static void isor_reset_seq_list(GF_List *list)
{
while (gf_list_count(list)) {
GF_NALUFFParam *sl = gf_list_pop_back(list);
gf_free(sl->data);
gf_free(sl);
}
}
| 0
|
484,745
|
static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
{
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
memcpy(data + i * ETH_GSTRING_LEN,
xennet_stats[i].name, ETH_GSTRING_LEN);
break;
}
}
| 0
|
383,951
|
cdf_app_to_mime(const char *vbuf, const struct nv *nv)
{
size_t i;
const char *rv = NULL;
char *old_lc_ctype;
old_lc_ctype = setlocale(LC_CTYPE, NULL);
assert(old_lc_ctype != NULL);
old_lc_ctype = strdup(old_lc_ctype);
assert(old_lc_ctype != NULL);
(void)setlocale(LC_CTYPE, "C");
for (i = 0; nv[i].pattern != NULL; i++)
if (strcasestr(vbuf, nv[i].pattern) != NULL) {
rv = nv[i].mime;
break;
}
(void)setlocale(LC_CTYPE, old_lc_ctype);
free(old_lc_ctype);
return rv;
}
| 0
|
507,771
|
void ECDSA_SIG_get0(const ECDSA_SIG *sig, const BIGNUM **pr, const BIGNUM **ps)
{
if (pr != NULL)
*pr = sig->r;
if (ps != NULL)
*ps = sig->s;
}
| 0
|
463,125
|
static void annotate_begin(annotate_db_t *d)
{
if (d)
d->in_txn = 1;
}
| 0
|
301,387
|
static NTSTATUS vfswrap_copy_chunk_recv(struct vfs_handle_struct *handle,
struct tevent_req *req,
off_t *copied)
{
struct vfs_cc_state *vfs_cc_state = tevent_req_data(req,
struct vfs_cc_state);
NTSTATUS status;
if (tevent_req_is_nterror(req, &status)) {
DEBUG(2, ("server side copy chunk failed: %s\n",
nt_errstr(status)));
*copied = 0;
tevent_req_received(req);
return status;
}
*copied = vfs_cc_state->copied;
DEBUG(10, ("server side copy chunk copied %lu\n",
(unsigned long)*copied));
tevent_req_received(req);
return NT_STATUS_OK;
}
| 0
|
247,152
|
GF_Err gf_fs_run(GF_FilterSession *fsess)
{
u32 i, nb_threads;
assert(fsess);
fsess->run_status = GF_OK;
fsess->main_th.has_seen_eot = GF_FALSE;
fsess->nb_threads_stopped = 0;
nb_threads = gf_list_count(fsess->threads);
for (i=0;i<nb_threads; i++) {
GF_SessionThread *sess_th = gf_list_get(fsess->threads, i);
gf_th_run(sess_th->th, (gf_thread_run) gf_fs_thread_proc, sess_th);
}
if (fsess->no_main_thread) return GF_OK;
gf_fs_thread_proc(&fsess->main_th);
//wait for all threads to be done
while (nb_threads+1 != fsess->nb_threads_stopped) {
gf_sleep(1);
}
return fsess->run_status;
}
| 0
|
249,952
|
__realpath (const char *name, char *resolved)
{
#ifdef GCC_BOGUS_WRETURN_LOCAL_ADDR
#warning "GCC might issue a bogus -Wreturn-local-addr warning here."
#warning "See <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93644>."
#endif
struct scratch_buffer rname_buffer;
return realpath_stk (name, resolved, &rname_buffer);
}
| 0
|
248,267
|
DLLIMPORT cfg_t *cfg_addtsec(cfg_t *cfg, const char *name, const char *title)
{
cfg_opt_t *opt;
cfg_value_t *val;
if (cfg_gettsec(cfg, name, title))
return NULL;
opt = cfg_getopt(cfg, name);
if (!opt) {
cfg_error(cfg, _("no such option '%s'"), name);
return NULL;
}
val = cfg_setopt(cfg, opt, title);
if (!val)
return NULL;
val->section->path = cfg->path; /* Remember global search path. */
val->section->line = 1;
val->section->errfunc = cfg->errfunc;
return val->section;
}
| 0
|
450,324
|
void vnc_disconnect_finish(VncState *vs)
{
int i;
trace_vnc_client_disconnect_finish(vs, vs->ioc);
vnc_jobs_join(vs); /* Wait encoding jobs */
vnc_lock_output(vs);
vnc_qmp_event(vs, QAPI_EVENT_VNC_DISCONNECTED);
buffer_free(&vs->input);
buffer_free(&vs->output);
qapi_free_VncClientInfo(vs->info);
vnc_zlib_clear(vs);
vnc_tight_clear(vs);
vnc_zrle_clear(vs);
#ifdef CONFIG_VNC_SASL
vnc_sasl_client_cleanup(vs);
#endif /* CONFIG_VNC_SASL */
audio_del(vs);
qkbd_state_lift_all_keys(vs->vd->kbd);
if (vs->mouse_mode_notifier.notify != NULL) {
qemu_remove_mouse_mode_change_notifier(&vs->mouse_mode_notifier);
}
QTAILQ_REMOVE(&vs->vd->clients, vs, next);
if (QTAILQ_EMPTY(&vs->vd->clients)) {
/* last client gone */
vnc_update_server_surface(vs->vd);
}
vnc_unlock_output(vs);
qemu_mutex_destroy(&vs->output_mutex);
if (vs->bh != NULL) {
qemu_bh_delete(vs->bh);
}
buffer_free(&vs->jobs_buffer);
for (i = 0; i < VNC_STAT_ROWS; ++i) {
g_free(vs->lossy_rect[i]);
}
g_free(vs->lossy_rect);
object_unref(OBJECT(vs->ioc));
vs->ioc = NULL;
object_unref(OBJECT(vs->sioc));
vs->sioc = NULL;
vs->magic = 0;
g_free(vs->zrle);
g_free(vs->tight);
g_free(vs);
}
| 0
|
415,181
|
get_current_reader (void)
{
struct vreader_s *vr;
/* We only support one reader for now. */
vr = &vreader_table[0];
/* Initialize the vreader item if not yet done. */
if (!vr->valid)
{
vr->slot = -1;
vr->valid = 1;
}
/* Try to open the reader. */
if (vr->slot == -1)
{
vr->slot = apdu_open_reader (opt.reader_port);
/* If we still don't have a slot, we have no readers.
Invalidate for now until a reader is attached. */
if (vr->slot == -1)
{
vr->valid = 0;
}
}
/* Return the vreader index or -1. */
return vr->valid ? 0 : -1;
}
| 0
|
198,399
|
static void handle_PORT(ctrl_t *ctrl, char *str)
{
int a, b, c, d, e, f;
char addr[INET_ADDRSTRLEN];
struct sockaddr_in sin;
if (ctrl->data_sd > 0) {
uev_io_stop(&ctrl->data_watcher);
close(ctrl->data_sd);
ctrl->data_sd = -1;
}
/* Convert PORT command's argument to IP address + port */
sscanf(str, "%d,%d,%d,%d,%d,%d", &a, &b, &c, &d, &e, &f);
sprintf(addr, "%d.%d.%d.%d", a, b, c, d);
/* Check IPv4 address using inet_aton(), throw away converted result */
if (!inet_aton(addr, &(sin.sin_addr))) {
ERR(0, "Invalid address '%s' given to PORT command", addr);
send_msg(ctrl->sd, "500 Illegal PORT command.\r\n");
return;
}
strlcpy(ctrl->data_address, addr, sizeof(ctrl->data_address));
ctrl->data_port = e * 256 + f;
DBG("Client PORT command accepted for %s:%d", ctrl->data_address, ctrl->data_port);
send_msg(ctrl->sd, "200 PORT command successful.\r\n");
}
| 1
|
432,291
|
void memory_region_init_ram(struct uc_struct *uc,
MemoryRegion *mr,
uint64_t size,
uint32_t perms)
{
memory_region_init(uc, mr, size);
mr->ram = true;
if (!(perms & UC_PROT_WRITE)) {
mr->readonly = true;
}
mr->perms = perms;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->ram_block = qemu_ram_alloc(uc, size, mr);
}
| 0
|
300,827
|
static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
{
if (dsz > TIPC_MAX_USER_MSG_SIZE)
return -EMSGSIZE;
return tipc_sendstream(sock, m, dsz);
}
| 0
|
373,637
|
add_pack_dir_to_rtp(char_u *fname)
{
char_u *p4, *p3, *p2, *p1, *p;
char_u *entry;
char_u *insp = NULL;
int c;
char_u *new_rtp;
int keep;
size_t oldlen;
size_t addlen;
size_t new_rtp_len;
char_u *afterdir = NULL;
size_t afterlen = 0;
char_u *after_insp = NULL;
char_u *ffname = NULL;
size_t fname_len;
char_u *buf = NULL;
char_u *rtp_ffname;
int match;
int retval = FAIL;
p4 = p3 = p2 = p1 = get_past_head(fname);
for (p = p1; *p; MB_PTR_ADV(p))
if (vim_ispathsep_nocolon(*p))
{
p4 = p3; p3 = p2; p2 = p1; p1 = p;
}
// now we have:
// rtp/pack/name/start/name
// p4 p3 p2 p1
//
// find the part up to "pack" in 'runtimepath'
c = *++p4; // append pathsep in order to expand symlink
*p4 = NUL;
ffname = fix_fname(fname);
*p4 = c;
if (ffname == NULL)
return FAIL;
// Find "ffname" in "p_rtp", ignoring '/' vs '\' differences.
// Also stop at the first "after" directory.
fname_len = STRLEN(ffname);
buf = alloc(MAXPATHL);
if (buf == NULL)
goto theend;
for (entry = p_rtp; *entry != NUL; )
{
char_u *cur_entry = entry;
copy_option_part(&entry, buf, MAXPATHL, ",");
if (insp == NULL)
{
add_pathsep(buf);
rtp_ffname = fix_fname(buf);
if (rtp_ffname == NULL)
goto theend;
match = vim_fnamencmp(rtp_ffname, ffname, fname_len) == 0;
vim_free(rtp_ffname);
if (match)
// Insert "ffname" after this entry (and comma).
insp = entry;
}
if ((p = (char_u *)strstr((char *)buf, "after")) != NULL
&& p > buf
&& vim_ispathsep(p[-1])
&& (vim_ispathsep(p[5]) || p[5] == NUL || p[5] == ','))
{
if (insp == NULL)
// Did not find "ffname" before the first "after" directory,
// insert it before this entry.
insp = cur_entry;
after_insp = cur_entry;
break;
}
}
if (insp == NULL)
// Both "fname" and "after" not found, append at the end.
insp = p_rtp + STRLEN(p_rtp);
// check if rtp/pack/name/start/name/after exists
afterdir = concat_fnames(fname, (char_u *)"after", TRUE);
if (afterdir != NULL && mch_isdir(afterdir))
afterlen = STRLEN(afterdir) + 1; // add one for comma
oldlen = STRLEN(p_rtp);
addlen = STRLEN(fname) + 1; // add one for comma
new_rtp = alloc(oldlen + addlen + afterlen + 1); // add one for NUL
if (new_rtp == NULL)
goto theend;
// We now have 'rtp' parts: {keep}{keep_after}{rest}.
// Create new_rtp, first: {keep},{fname}
keep = (int)(insp - p_rtp);
mch_memmove(new_rtp, p_rtp, keep);
new_rtp_len = keep;
if (*insp == NUL)
new_rtp[new_rtp_len++] = ','; // add comma before
mch_memmove(new_rtp + new_rtp_len, fname, addlen - 1);
new_rtp_len += addlen - 1;
if (*insp != NUL)
new_rtp[new_rtp_len++] = ','; // add comma after
if (afterlen > 0 && after_insp != NULL)
{
int keep_after = (int)(after_insp - p_rtp);
// Add to new_rtp: {keep},{fname}{keep_after},{afterdir}
mch_memmove(new_rtp + new_rtp_len, p_rtp + keep,
keep_after - keep);
new_rtp_len += keep_after - keep;
mch_memmove(new_rtp + new_rtp_len, afterdir, afterlen - 1);
new_rtp_len += afterlen - 1;
new_rtp[new_rtp_len++] = ',';
keep = keep_after;
}
if (p_rtp[keep] != NUL)
// Append rest: {keep},{fname}{keep_after},{afterdir}{rest}
mch_memmove(new_rtp + new_rtp_len, p_rtp + keep, oldlen - keep + 1);
else
new_rtp[new_rtp_len] = NUL;
if (afterlen > 0 && after_insp == NULL)
{
// Append afterdir when "after" was not found:
// {keep},{fname}{rest},{afterdir}
STRCAT(new_rtp, ",");
STRCAT(new_rtp, afterdir);
}
set_option_value((char_u *)"rtp", 0L, new_rtp, 0);
vim_free(new_rtp);
retval = OK;
theend:
vim_free(buf);
vim_free(ffname);
vim_free(afterdir);
return retval;
}
| 0
|
225,011
|
PQerrorMessage(const PGconn *conn)
{
if (!conn)
return libpq_gettext("connection pointer is NULL\n");
/*
* The errorMessage buffer might be marked "broken" due to having
* previously failed to allocate enough memory for the message. In that
* case, tell the application we ran out of memory.
*/
if (PQExpBufferBroken(&conn->errorMessage))
return libpq_gettext("out of memory\n");
return conn->errorMessage.data;
}
| 0
|
225,886
|
GF_Box *trik_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_TrickPlayBox, GF_ISOM_BOX_TYPE_TRIK);
return (GF_Box *)tmp;
| 0
|
225,455
|
Status MutableGraphView::UpdateFanin(absl::string_view node_name,
const TensorId& from_fanin,
const TensorId& to_fanin) {
auto error_status = [node_name, from_fanin, to_fanin](absl::string_view msg) {
string params =
absl::Substitute("node_name='$0', from_fanin='$1', to_fanin='$2'",
node_name, from_fanin.ToString(), to_fanin.ToString());
return MutationError("UpdateFanin", params, msg);
};
TF_RETURN_IF_ERROR(CheckFaninIsValid(from_fanin, error_status));
TF_RETURN_IF_ERROR(CheckFaninIsValid(to_fanin, error_status));
NodeDef* node = GetNode(node_name);
TF_RETURN_IF_ERROR(CheckNodeExists(node_name, node, error_status));
NodeDef* from_fanin_node = GetNode(from_fanin.node());
TF_RETURN_IF_ERROR(
CheckNodeExists(from_fanin.node(), from_fanin_node, error_status));
NodeDef* to_fanin_node = GetNode(to_fanin.node());
TF_RETURN_IF_ERROR(
CheckNodeExists(to_fanin.node(), to_fanin_node, error_status));
// When replacing a non control dependency fanin with a control dependency, or
// vice versa, remove and add, so ports can be updated properly in fanout(s).
bool to_fanin_is_control = IsTensorIdControlling(to_fanin);
if (to_fanin_is_control && IsSwitch(*to_fanin_node)) {
// Can't add Switch node as a control dependency.
return error_status(
absl::Substitute("can't update to fanin '$0' as it will become a "
"Switch control dependency",
to_fanin.ToString()));
}
if (node_name == from_fanin.node() || node_name == to_fanin.node()) {
return error_status("can't update fanin to or from self");
}
if (from_fanin == to_fanin) {
return Status::OK();
}
bool from_fanin_is_control = IsTensorIdControlling(from_fanin);
if (from_fanin_is_control || to_fanin_is_control) {
bool modified = false;
if (from_fanin_is_control) {
modified |= RemoveControllingFaninInternal(node, from_fanin_node);
} else {
modified |= RemoveRegularFaninInternal(
node, {from_fanin_node, from_fanin.index()});
}
if (modified) {
AddFaninInternal(node, {to_fanin_node, to_fanin.index()});
}
return Status::OK();
}
// In place mutation of regular fanins, requires no shifting of ports.
string to_fanin_string = TensorIdToString(to_fanin);
const int num_regular_fanins =
NumFanins(*node, /*include_controlling_nodes=*/false);
bool modified = false;
for (int i = 0; i < num_regular_fanins; ++i) {
if (ParseTensorName(node->input(i)) == from_fanin) {
InputPort input(node, i);
OutputPort from_fanin_port(from_fanin_node, from_fanin.index());
fanouts()[from_fanin_port].erase(input);
OutputPort to_fanin_port(to_fanin_node, to_fanin.index());
fanouts()[to_fanin_port].insert(input);
node->set_input(i, to_fanin_string);
modified = true;
}
}
// Dedup control dependencies and update max regular output ports.
if (modified) {
OutputPort from_fanin_port(from_fanin_node, from_fanin.index());
UpdateMaxRegularOutputPortForRemovedFanin(
{from_fanin_node, from_fanin.index()}, fanouts()[from_fanin_port]);
if (max_regular_output_port()[to_fanin_node] < to_fanin.index()) {
max_regular_output_port()[to_fanin_node] = to_fanin.index();
}
if (CanDedupControlWithRegularInput(*this, *to_fanin_node)) {
RemoveControllingFaninInternal(node, to_fanin_node);
}
}
return Status::OK();
}
| 0
|
224,203
|
Subscriber(void *user) : user(user) {}
| 0
|
301,497
|
spell_edit_score_limit_w(
slang_T *slang,
char_u *badword,
char_u *goodword,
int limit)
{
limitscore_T stack[10]; // allow for over 3 * 2 edits
int stackidx;
int bi, gi;
int bi2, gi2;
int bc, gc;
int score;
int score_off;
int minscore;
int round;
char_u *p;
int wbadword[MAXWLEN];
int wgoodword[MAXWLEN];
// Get the characters from the multi-byte strings and put them in an
// int array for easy access.
bi = 0;
for (p = badword; *p != NUL; )
wbadword[bi++] = mb_cptr2char_adv(&p);
wbadword[bi++] = 0;
gi = 0;
for (p = goodword; *p != NUL; )
wgoodword[gi++] = mb_cptr2char_adv(&p);
wgoodword[gi++] = 0;
// The idea is to go from start to end over the words. So long as
// characters are equal just continue, this always gives the lowest score.
// When there is a difference try several alternatives. Each alternative
// increases "score" for the edit distance. Some of the alternatives are
// pushed unto a stack and tried later, some are tried right away. At the
// end of the word the score for one alternative is known. The lowest
// possible score is stored in "minscore".
stackidx = 0;
bi = 0;
gi = 0;
score = 0;
minscore = limit + 1;
for (;;)
{
// Skip over an equal part, score remains the same.
for (;;)
{
bc = wbadword[bi];
gc = wgoodword[gi];
if (bc != gc) // stop at a char that's different
break;
if (bc == NUL) // both words end
{
if (score < minscore)
minscore = score;
goto pop; // do next alternative
}
++bi;
++gi;
}
if (gc == NUL) // goodword ends, delete badword chars
{
do
{
if ((score += SCORE_DEL) >= minscore)
goto pop; // do next alternative
} while (wbadword[++bi] != NUL);
minscore = score;
}
else if (bc == NUL) // badword ends, insert badword chars
{
do
{
if ((score += SCORE_INS) >= minscore)
goto pop; // do next alternative
} while (wgoodword[++gi] != NUL);
minscore = score;
}
else // both words continue
{
// If not close to the limit, perform a change. Only try changes
// that may lead to a lower score than "minscore".
// round 0: try deleting a char from badword
// round 1: try inserting a char in badword
for (round = 0; round <= 1; ++round)
{
score_off = score + (round == 0 ? SCORE_DEL : SCORE_INS);
if (score_off < minscore)
{
if (score_off + SCORE_EDIT_MIN >= minscore)
{
// Near the limit, rest of the words must match. We
// can check that right now, no need to push an item
// onto the stack.
bi2 = bi + 1 - round;
gi2 = gi + round;
while (wgoodword[gi2] == wbadword[bi2])
{
if (wgoodword[gi2] == NUL)
{
minscore = score_off;
break;
}
++bi2;
++gi2;
}
}
else
{
// try deleting a character from badword later
stack[stackidx].badi = bi + 1 - round;
stack[stackidx].goodi = gi + round;
stack[stackidx].score = score_off;
++stackidx;
}
}
}
if (score + SCORE_SWAP < minscore)
{
// If swapping two characters makes a match then the
// substitution is more expensive, thus there is no need to
// try both.
if (gc == wbadword[bi + 1] && bc == wgoodword[gi + 1])
{
// Swap two characters, that is: skip them.
gi += 2;
bi += 2;
score += SCORE_SWAP;
continue;
}
}
// Substitute one character for another which is the same
// thing as deleting a character from both goodword and badword.
// Use a better score when there is only a case difference.
if (SPELL_TOFOLD(bc) == SPELL_TOFOLD(gc))
score += SCORE_ICASE;
else
{
// For a similar character use SCORE_SIMILAR.
if (slang != NULL
&& slang->sl_has_map
&& similar_chars(slang, gc, bc))
score += SCORE_SIMILAR;
else
score += SCORE_SUBST;
}
if (score < minscore)
{
// Do the substitution.
++gi;
++bi;
continue;
}
}
pop:
// Get here to try the next alternative, pop it from the stack.
if (stackidx == 0) // stack is empty, finished
break;
// pop an item from the stack
--stackidx;
gi = stack[stackidx].goodi;
bi = stack[stackidx].badi;
score = stack[stackidx].score;
}
// When the score goes over "limit" it may actually be much higher.
// Return a very large number to avoid going below the limit when giving a
// bonus.
if (minscore > limit)
return SCORE_MAXMAX;
return minscore;
}
| 0
|
264,213
|
static void set_pixel_format(VncState *vs,
int bits_per_pixel, int depth,
int big_endian_flag, int true_color_flag,
int red_max, int green_max, int blue_max,
int red_shift, int green_shift, int blue_shift)
{
if (!true_color_flag) {
vnc_client_error(vs);
return;
}
vs->client_pf.rmax = red_max;
vs->client_pf.rbits = hweight_long(red_max);
vs->client_pf.rshift = red_shift;
vs->client_pf.rmask = red_max << red_shift;
vs->client_pf.gmax = green_max;
vs->client_pf.gbits = hweight_long(green_max);
vs->client_pf.gshift = green_shift;
vs->client_pf.gmask = green_max << green_shift;
vs->client_pf.bmax = blue_max;
vs->client_pf.bbits = hweight_long(blue_max);
vs->client_pf.bshift = blue_shift;
vs->client_pf.bmask = blue_max << blue_shift;
vs->client_pf.bits_per_pixel = bits_per_pixel;
vs->client_pf.bytes_per_pixel = bits_per_pixel / 8;
vs->client_pf.depth = bits_per_pixel == 32 ? 24 : bits_per_pixel;
vs->client_be = big_endian_flag;
set_pixel_conversion(vs);
graphic_hw_invalidate(NULL);
graphic_hw_update(NULL);
}
| 0
|
294,363
|
f_gt_p(VALUE x, VALUE y)
{
if (FIXNUM_P(x) && FIXNUM_P(y))
return f_boolcast(FIX2LONG(x) > FIX2LONG(y));
return rb_funcall(x, '>', 1, y);
}
| 0
|
383,317
|
int gdImageGrayScale(gdImagePtr src)
{
int x, y;
int r,g,b,a;
int new_pxl, pxl;
typedef int (*FuncPtr)(gdImagePtr, int, int);
FuncPtr f;
f = GET_PIXEL_FUNCTION(src);
if (src==NULL) {
return 0;
}
for (y=0; y<src->sy; ++y) {
for (x=0; x<src->sx; ++x) {
pxl = f (src, x, y);
r = gdImageRed(src, pxl);
g = gdImageGreen(src, pxl);
b = gdImageBlue(src, pxl);
a = gdImageAlpha(src, pxl);
r = g = b = (int) (.299 * r + .587 * g + .114 * b);
new_pxl = gdImageColorAllocateAlpha(src, r, g, b, a);
if (new_pxl == -1) {
new_pxl = gdImageColorClosestAlpha(src, r, g, b, a);
}
if ((y >= 0) && (y < src->sy)) {
gdImageSetPixel (src, x, y, new_pxl);
}
}
}
return 1;
}
| 0
|
253,537
|
smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
unsigned int epoch, bool *purge_cache)
{
char message[5] = {0};
unsigned int new_oplock = 0;
oplock &= 0xFF;
cinode->lease_granted = true;
if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
return;
/* Check if the server granted an oplock rather than a lease */
if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
return smb2_set_oplock_level(cinode, oplock, epoch,
purge_cache);
if (oplock & SMB2_LEASE_READ_CACHING_HE) {
new_oplock |= CIFS_CACHE_READ_FLG;
strcat(message, "R");
}
if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
new_oplock |= CIFS_CACHE_HANDLE_FLG;
strcat(message, "H");
}
if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
new_oplock |= CIFS_CACHE_WRITE_FLG;
strcat(message, "W");
}
if (!new_oplock)
strncpy(message, "None", sizeof(message));
cinode->oplock = new_oplock;
cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
&cinode->vfs_inode);
}
| 0
|
424,941
|
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
mutex_lock(&trans_pcie->mutex);
ret = _iwl_trans_pcie_start_hw(trans);
mutex_unlock(&trans_pcie->mutex);
return ret;
}
| 0
|
337,816
|
struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep,
struct sctp_chunk *chunk,
gfp_t gfp)
{
struct sctp_association *asoc;
enum sctp_scope scope;
struct sk_buff *skb;
/* Create the bare association. */
scope = sctp_scope(sctp_source(chunk));
asoc = sctp_association_new(ep, ep->base.sk, scope, gfp);
if (!asoc)
goto nodata;
asoc->temp = 1;
skb = chunk->skb;
/* Create an entry for the source address of the packet. */
SCTP_INPUT_CB(skb)->af->from_skb(&asoc->c.peer_addr, skb, 1);
nodata:
return asoc;
}
| 0
|
512,443
|
void reset_buffer()
{
m_string.set(buffer, buffer_size, &my_charset_bin);
}
| 0
|
225,384
|
static struct v4l2_loopback_device *v4l2loopback_cd2dev(struct device *cd)
{
struct video_device *loopdev = to_video_device(cd);
struct v4l2loopback_private *ptr =
(struct v4l2loopback_private *)video_get_drvdata(loopdev);
int nr = ptr->device_nr;
return idr_find(&v4l2loopback_index_idr, nr);
}
| 0
|
244,342
|
GF_Err metx_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem)
{
GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox *)s;
switch (a->type) {
case GF_ISOM_BOX_TYPE_TXTC:
//we allow the config box on metx
BOX_FIELD_ASSIGN(config, GF_TextConfigBox)
break;
}
return GF_OK;
}
| 0
|
221,161
|
void gf_odf_avc_cfg_del(GF_AVCConfig *cfg)
{
if (!cfg) return;
while (gf_list_count(cfg->sequenceParameterSets)) {
GF_NALUFFParam *sl = (GF_NALUFFParam *)gf_list_get(cfg->sequenceParameterSets, 0);
gf_list_rem(cfg->sequenceParameterSets, 0);
if (sl->data) gf_free(sl->data);
gf_free(sl);
}
gf_list_del(cfg->sequenceParameterSets);
cfg->sequenceParameterSets = NULL;
while (gf_list_count(cfg->pictureParameterSets)) {
GF_NALUFFParam *sl = (GF_NALUFFParam *)gf_list_get(cfg->pictureParameterSets, 0);
gf_list_rem(cfg->pictureParameterSets, 0);
if (sl->data) gf_free(sl->data);
gf_free(sl);
}
gf_list_del(cfg->pictureParameterSets);
cfg->pictureParameterSets = NULL;
if (cfg->sequenceParameterSetExtensions) {
while (gf_list_count(cfg->sequenceParameterSetExtensions)) {
GF_NALUFFParam *sl = (GF_NALUFFParam *)gf_list_get(cfg->sequenceParameterSetExtensions, 0);
gf_list_rem(cfg->sequenceParameterSetExtensions, 0);
if (sl->data) gf_free(sl->data);
gf_free(sl);
}
gf_list_del(cfg->sequenceParameterSetExtensions);
cfg->sequenceParameterSetExtensions = NULL;
}
gf_free(cfg);
}
| 0
|
500,052
|
kssl_sget_tkt( /* UPDATE */ KSSL_CTX *kssl_ctx,
/* IN */ krb5_data *indata,
/* OUT */ krb5_ticket_times *ttimes,
/* OUT */ KSSL_ERR *kssl_err )
{
krb5_error_code krb5rc = KRB5KRB_ERR_GENERIC;
static krb5_context krb5context = NULL;
static krb5_auth_context krb5auth_context = NULL;
krb5_ticket *krb5ticket = NULL;
KRB5_TKTBODY *asn1ticket = NULL;
const unsigned char *p;
krb5_keytab krb5keytab = NULL;
krb5_keytab_entry kt_entry;
krb5_principal krb5server;
krb5_rcache rcache = NULL;
kssl_err_set(kssl_err, 0, "");
if (!kssl_ctx)
{
kssl_err_set(kssl_err, SSL_R_KRB5_S_INIT,
"No kssl_ctx defined.\n");
goto err;
}
#ifdef KSSL_DEBUG
printf("in kssl_sget_tkt(%s)\n", kstring(kssl_ctx->service_name));
#endif /* KSSL_DEBUG */
if (!krb5context && (krb5rc = krb5_init_context(&krb5context)))
{
kssl_err_set(kssl_err, SSL_R_KRB5_S_INIT,
"krb5_init_context() fails.\n");
goto err;
}
if (krb5auth_context &&
(krb5rc = krb5_auth_con_free(krb5context, krb5auth_context)))
{
kssl_err_set(kssl_err, SSL_R_KRB5_S_INIT,
"krb5_auth_con_free() fails.\n");
goto err;
}
else krb5auth_context = NULL;
if (!krb5auth_context &&
(krb5rc = krb5_auth_con_init(krb5context, &krb5auth_context)))
{
kssl_err_set(kssl_err, SSL_R_KRB5_S_INIT,
"krb5_auth_con_init() fails.\n");
goto err;
}
if ((krb5rc = krb5_auth_con_getrcache(krb5context, krb5auth_context,
&rcache)))
{
kssl_err_set(kssl_err, SSL_R_KRB5_S_INIT,
"krb5_auth_con_getrcache() fails.\n");
goto err;
}
if ((krb5rc = krb5_sname_to_principal(krb5context, NULL,
(kssl_ctx->service_name)? kssl_ctx->service_name: KRB5SVC,
KRB5_NT_SRV_HST, &krb5server)) != 0)
{
kssl_err_set(kssl_err, SSL_R_KRB5_S_INIT,
"krb5_sname_to_principal() fails.\n");
goto err;
}
if (rcache == NULL)
{
if ((krb5rc = krb5_get_server_rcache(krb5context,
krb5_princ_component(krb5context, krb5server, 0),
&rcache)))
{
kssl_err_set(kssl_err, SSL_R_KRB5_S_INIT,
"krb5_get_server_rcache() fails.\n");
goto err;
}
}
if ((krb5rc = krb5_auth_con_setrcache(krb5context, krb5auth_context, rcache)))
{
kssl_err_set(kssl_err, SSL_R_KRB5_S_INIT,
"krb5_auth_con_setrcache() fails.\n");
goto err;
}
/* kssl_ctx->keytab_file == NULL ==> use Kerberos default
*/
if (kssl_ctx->keytab_file)
{
krb5rc = krb5_kt_resolve(krb5context, kssl_ctx->keytab_file,
&krb5keytab);
if (krb5rc)
{
kssl_err_set(kssl_err, SSL_R_KRB5_S_INIT,
"krb5_kt_resolve() fails.\n");
goto err;
}
}
else
{
krb5rc = krb5_kt_default(krb5context,&krb5keytab);
if (krb5rc)
{
kssl_err_set(kssl_err, SSL_R_KRB5_S_INIT,
"krb5_kt_default() fails.\n");
goto err;
}
}
/* Actual Kerberos5 krb5_recvauth() has initial conversation here
** o check KRB5_SENDAUTH_BADAUTHVERS
** unless KRB5_RECVAUTH_SKIP_VERSION
** o check KRB5_SENDAUTH_BADAPPLVERS
** o send "0" msg if all OK
*/
/* 20010411 was using AP_REQ instead of true KerberosWrapper
**
** if ((krb5rc = krb5_rd_req(krb5context, &krb5auth_context,
** &krb5in_data, krb5server, krb5keytab,
** &ap_option, &krb5ticket)) != 0) { Error }
*/
p = (unsigned char *)indata->data;
if ((asn1ticket = (KRB5_TKTBODY *) d2i_KRB5_TICKET(NULL, &p,
(long) indata->length)) == NULL)
{
BIO_snprintf(kssl_err->text, KSSL_ERR_MAX,
"d2i_KRB5_TICKET() ASN.1 decode failure.\n");
kssl_err->reason = SSL_R_KRB5_S_RD_REQ;
goto err;
}
/* Was: krb5rc = krb5_decode_ticket(krb5in_data,&krb5ticket)) != 0) */
if ((krb5rc = kssl_TKT2tkt(krb5context, asn1ticket, &krb5ticket,
kssl_err)) != 0)
{
BIO_snprintf(kssl_err->text, KSSL_ERR_MAX,
"Error converting ASN.1 ticket to krb5_ticket.\n");
kssl_err->reason = SSL_R_KRB5_S_RD_REQ;
goto err;
}
if (! krb5_principal_compare(krb5context, krb5server,
krb5ticket->server)) {
krb5rc = KRB5_PRINC_NOMATCH;
BIO_snprintf(kssl_err->text, KSSL_ERR_MAX,
"server principal != ticket principal\n");
kssl_err->reason = SSL_R_KRB5_S_RD_REQ;
goto err;
}
if ((krb5rc = krb5_kt_get_entry(krb5context, krb5keytab,
krb5ticket->server, krb5ticket->enc_part.kvno,
krb5ticket->enc_part.enctype, &kt_entry)) != 0) {
BIO_snprintf(kssl_err->text, KSSL_ERR_MAX,
"krb5_kt_get_entry() fails with %x.\n", krb5rc);
kssl_err->reason = SSL_R_KRB5_S_RD_REQ;
goto err;
}
if ((krb5rc = krb5_decrypt_tkt_part(krb5context, &kt_entry.key,
krb5ticket)) != 0) {
BIO_snprintf(kssl_err->text, KSSL_ERR_MAX,
"krb5_decrypt_tkt_part() failed.\n");
kssl_err->reason = SSL_R_KRB5_S_RD_REQ;
goto err;
}
else {
krb5_kt_free_entry(krb5context, &kt_entry);
#ifdef KSSL_DEBUG
{
int i; krb5_address **paddr = krb5ticket->enc_part2->caddrs;
printf("Decrypted ticket fields:\n");
printf("\tflags: %X, transit-type: %X",
krb5ticket->enc_part2->flags,
krb5ticket->enc_part2->transited.tr_type);
print_krb5_data("\ttransit-data: ",
&(krb5ticket->enc_part2->transited.tr_contents));
printf("\tcaddrs: %p, authdata: %p\n",
krb5ticket->enc_part2->caddrs,
krb5ticket->enc_part2->authorization_data);
if (paddr)
{
printf("\tcaddrs:\n");
for (i=0; paddr[i] != NULL; i++)
{
krb5_data d;
d.length=paddr[i]->length;
d.data=paddr[i]->contents;
print_krb5_data("\t\tIP: ", &d);
}
}
printf("\tstart/auth/end times: %d / %d / %d\n",
krb5ticket->enc_part2->times.starttime,
krb5ticket->enc_part2->times.authtime,
krb5ticket->enc_part2->times.endtime);
}
#endif /* KSSL_DEBUG */
}
krb5rc = KRB5_NO_TKT_SUPPLIED;
if (!krb5ticket || !krb5ticket->enc_part2 ||
!krb5ticket->enc_part2->client ||
!krb5ticket->enc_part2->client->data ||
!krb5ticket->enc_part2->session)
{
kssl_err_set(kssl_err, SSL_R_KRB5_S_BAD_TICKET,
"bad ticket from krb5_rd_req.\n");
}
else if (kssl_ctx_setprinc(kssl_ctx, KSSL_CLIENT,
&krb5ticket->enc_part2->client->realm,
krb5ticket->enc_part2->client->data,
krb5ticket->enc_part2->client->length))
{
kssl_err_set(kssl_err, SSL_R_KRB5_S_BAD_TICKET,
"kssl_ctx_setprinc() fails.\n");
}
else if (kssl_ctx_setkey(kssl_ctx, krb5ticket->enc_part2->session))
{
kssl_err_set(kssl_err, SSL_R_KRB5_S_BAD_TICKET,
"kssl_ctx_setkey() fails.\n");
}
else if (krb5ticket->enc_part2->flags & TKT_FLG_INVALID)
{
krb5rc = KRB5KRB_AP_ERR_TKT_INVALID;
kssl_err_set(kssl_err, SSL_R_KRB5_S_BAD_TICKET,
"invalid ticket from krb5_rd_req.\n");
}
else krb5rc = 0;
kssl_ctx->enctype = krb5ticket->enc_part.enctype;
ttimes->authtime = krb5ticket->enc_part2->times.authtime;
ttimes->starttime = krb5ticket->enc_part2->times.starttime;
ttimes->endtime = krb5ticket->enc_part2->times.endtime;
ttimes->renew_till = krb5ticket->enc_part2->times.renew_till;
err:
#ifdef KSSL_DEBUG
kssl_ctx_show(kssl_ctx);
#endif /* KSSL_DEBUG */
if (asn1ticket) KRB5_TICKET_free((KRB5_TICKET *) asn1ticket);
if (krb5keytab) krb5_kt_close(krb5context, krb5keytab);
if (krb5ticket) krb5_free_ticket(krb5context, krb5ticket);
if (krb5server) krb5_free_principal(krb5context, krb5server);
return (krb5rc);
}
| 0
|
312,434
|
qf_jump_goto_line(
linenr_T qf_lnum,
int qf_col,
char_u qf_viscol,
char_u *qf_pattern)
{
linenr_T i;
if (qf_pattern == NULL)
{
// Go to line with error, unless qf_lnum is 0.
i = qf_lnum;
if (i > 0)
{
if (i > curbuf->b_ml.ml_line_count)
i = curbuf->b_ml.ml_line_count;
curwin->w_cursor.lnum = i;
}
if (qf_col > 0)
{
curwin->w_cursor.coladd = 0;
if (qf_viscol == TRUE)
coladvance(qf_col - 1);
else
curwin->w_cursor.col = qf_col - 1;
curwin->w_set_curswant = TRUE;
check_cursor();
}
else
beginline(BL_WHITE | BL_FIX);
}
else
{
pos_T save_cursor;
// Move the cursor to the first line in the buffer
save_cursor = curwin->w_cursor;
curwin->w_cursor.lnum = 0;
if (!do_search(NULL, '/', '/', qf_pattern, (long)1, SEARCH_KEEP, NULL))
curwin->w_cursor = save_cursor;
}
}
| 0
|
318,778
|
drill_parse_M_code(gerb_file_t *fd, drill_state_t *state,
gerbv_image_t *image, ssize_t file_line)
{
gerbv_drill_stats_t *stats = image->drill_stats;
drill_m_code_t m_code;
char op[3];
dprintf("---> entering %s() ...\n", __FUNCTION__);
op[0] = gerb_fgetc(fd);
op[1] = gerb_fgetc(fd);
op[2] = '\0';
if (op[0] == EOF
|| op[1] == EOF) {
gerbv_stats_printf(stats->error_list, GERBV_MESSAGE_ERROR, -1,
_("Unexpected EOF found while parsing M-code in file \"%s\""),
fd->filename);
return DRILL_M_UNKNOWN;
}
dprintf(" Compare M-code \"%s\" at line %ld\n", op, file_line);
switch (m_code = atoi(op)) {
case 0:
/* atoi() return 0 in case of error, recheck string */
if (0 != strncmp(op, "00", 2)) {
m_code = DRILL_M_UNKNOWN;
gerb_ungetc(fd);
gerb_ungetc(fd);
break;
}
stats->M00++;
break;
case 1:
stats->M01++;
break;
case 18:
stats->M18++;
break;
case 25:
stats->M25++;
break;
case 30:
stats->M30++;
break;
case 45:
stats->M45++;
break;
case 47:
stats->M47++;
break;
case 48:
stats->M48++;
break;
case 71:
stats->M71++;
eat_line(fd);
break;
case 72:
stats->M72++;
eat_line(fd);
break;
case 95:
stats->M95++;
break;
case 97:
stats->M97++;
break;
case 98:
stats->M98++;
break;
default:
case DRILL_M_UNKNOWN:
break;
}
dprintf("<---- ...leaving %s()\n", __FUNCTION__);
return m_code;
} /* drill_parse_M_code() */
| 0
|
336,541
|
static RedLinkInfo *reds_init_client_connection(RedsState *reds, int socket)
{
RedLinkInfo *link;
if (!red_socket_set_non_blocking(socket, TRUE)) {
return NULL;
}
if (!red_socket_set_no_delay(socket, TRUE)) {
return NULL;
}
red_socket_set_keepalive(socket, TRUE, KEEPALIVE_TIMEOUT);
red_socket_set_nosigpipe(socket, true);
link = g_new0(RedLinkInfo, 1);
link->reds = reds;
link->stream = red_stream_new(reds, socket);
/* gather info + send event */
red_stream_push_channel_event(link->stream, SPICE_CHANNEL_EVENT_CONNECTED);
openssl_init(link);
return link;
}
| 0
|
274,868
|
TEST(ComparisonsTest, GreaterBroadcast) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_INT32,
BuiltinOperator_GREATER);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {7});
model.Invoke();
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, false, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
| 0
|
247,665
|
TEST_P(SslReadBufferLimitTest, TestBind) {
std::string address_string = TestUtility::getIpv4Loopback();
if (GetParam() == Network::Address::IpVersion::v4) {
source_address_ = Network::Address::InstanceConstSharedPtr{
new Network::Address::Ipv4Instance(address_string, 0, nullptr)};
} else {
address_string = "::1";
source_address_ = Network::Address::InstanceConstSharedPtr{
new Network::Address::Ipv6Instance(address_string, 0, nullptr)};
}
initialize();
EXPECT_CALL(listener_callbacks_, onAccept_(_))
.WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void {
server_connection_ = dispatcher_->createServerConnection(
std::move(socket), server_ssl_socket_factory_->createTransportSocket(nullptr),
stream_info_);
server_connection_->addConnectionCallbacks(server_callbacks_);
server_connection_->addReadFilter(read_filter_);
EXPECT_EQ("", server_connection_->nextProtocol());
}));
EXPECT_CALL(client_callbacks_, onEvent(Network::ConnectionEvent::Connected))
.WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); }));
dispatcher_->run(Event::Dispatcher::RunType::Block);
EXPECT_EQ(address_string,
server_connection_->connectionInfoProvider().remoteAddress()->ip()->addressAsString());
disconnect();
}
| 0
|
281,645
|
void CLASS subtract (const char *fname)
{
FILE *fp;
int dim[3]={0,0,0}, comment=0, number=0, error=0, nd=0, c, row, col;
ushort *pixel;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,0,2);
#endif
if (!(fp = fopen (fname, "rb"))) {
#ifdef DCRAW_VERBOSE
perror (fname);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_FILE;
#endif
return;
}
if (fgetc(fp) != 'P' || fgetc(fp) != '5') error = 1;
while (!error && nd < 3 && (c = fgetc(fp)) != EOF) {
if (c == '#') comment = 1;
if (c == '\n') comment = 0;
if (comment) continue;
if (isdigit(c)) number = 1;
if (number) {
if (isdigit(c)) dim[nd] = dim[nd]*10 + c -'0';
else if (isspace(c)) {
number = 0; nd++;
} else error = 1;
}
}
if (error || nd < 3) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s is not a valid PGM file!\n"), fname);
#endif
fclose (fp); return;
} else if (dim[0] != width || dim[1] != height || dim[2] != 65535) {
#ifdef DCRAW_VERBOSE
fprintf (stderr,_("%s has the wrong dimensions!\n"), fname);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_DIM;
#endif
fclose (fp); return;
}
pixel = (ushort *) calloc (width, sizeof *pixel);
merror (pixel, "subtract()");
for (row=0; row < height; row++) {
fread (pixel, 2, width, fp);
for (col=0; col < width; col++)
BAYER(row,col) = MAX (BAYER(row,col) - ntohs(pixel[col]), 0);
}
free (pixel);
fclose (fp);
memset (cblack, 0, sizeof cblack);
black = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME,1,2);
#endif
}
| 0
|
437,320
|
numbered_ref_check(Node* node)
{
int r = 0;
switch (NODE_TYPE(node)) {
case NODE_LIST:
case NODE_ALT:
do {
r = numbered_ref_check(NODE_CAR(node));
} while (r == 0 && IS_NOT_NULL(node = NODE_CDR(node)));
break;
case NODE_ANCHOR:
if (IS_NULL(NODE_BODY(node)))
break;
/* fall */
case NODE_QUANT:
r = numbered_ref_check(NODE_BODY(node));
break;
case NODE_ENCLOSURE:
{
EnclosureNode* en = ENCLOSURE_(node);
r = numbered_ref_check(NODE_BODY(node));
if (r != 0) return r;
if (en->type == ENCLOSURE_IF_ELSE) {
if (IS_NOT_NULL(en->te.Then)) {
r = numbered_ref_check(en->te.Then);
if (r != 0) return r;
}
if (IS_NOT_NULL(en->te.Else)) {
r = numbered_ref_check(en->te.Else);
if (r != 0) return r;
}
}
}
break;
case NODE_BACKREF:
if (! NODE_IS_BY_NAME(node))
return ONIGERR_NUMBERED_BACKREF_OR_CALL_NOT_ALLOWED;
break;
default:
break;
}
return r;
}
| 0
|
294,700
|
c_find_ldom(int y, int m, double sg, int *rjd, int *ns)
{
int i, rm, rd;
for (i = 0; i < 30; i++)
if (c_valid_civil_p(y, m, 31 - i, sg, &rm, &rd, rjd, ns))
return 1;
return 0;
}
| 0
|
352,953
|
telephoneNumberNormalize(
slap_mask_t usage,
Syntax *syntax,
MatchingRule *mr,
struct berval *val,
struct berval *normalized,
void *ctx )
{
char *q;
ber_len_t c;
assert( SLAP_MR_IS_VALUE_OF_SYNTAX( usage ) != 0 );
/* Ensure q is big enough, though validator should have caught this */
if ( BER_BVISEMPTY( val )) {
BER_BVZERO( normalized );
return LDAP_INVALID_SYNTAX;
}
q = normalized->bv_val = slap_sl_malloc( val->bv_len + 1, ctx );
for( c = 0; c < val->bv_len; c++ ) {
if ( ! ( ASCII_SPACE( val->bv_val[c] ) || val->bv_val[c] == '-' )) {
*q++ = val->bv_val[c];
}
}
if ( q == normalized->bv_val ) {
*q++ = ' ';
}
*q = '\0';
normalized->bv_len = q - normalized->bv_val;
return LDAP_SUCCESS;
}
| 0
|
265,435
|
int sqfs_read(const char *filename, void *buf, loff_t offset, loff_t len,
loff_t *actread)
{
char *dir = NULL, *fragment_block, *datablock = NULL;
char *fragment = NULL, *file = NULL, *resolved, *data;
u64 start, n_blks, table_size, data_offset, table_offset, sparse_size;
int ret, j, i_number, datablk_count = 0;
struct squashfs_super_block *sblk = ctxt.sblk;
struct squashfs_fragment_block_entry frag_entry;
struct squashfs_file_info finfo = {0};
struct squashfs_symlink_inode *symlink;
struct fs_dir_stream *dirsp = NULL;
struct squashfs_dir_stream *dirs;
struct squashfs_lreg_inode *lreg;
struct squashfs_base_inode *base;
struct squashfs_reg_inode *reg;
unsigned long dest_len;
struct fs_dirent *dent;
unsigned char *ipos;
*actread = 0;
if (offset) {
/*
* TODO: implement reading at an offset in file
*/
printf("Error: reading at a specific offset in a squashfs file is not supported yet.\n");
return -EINVAL;
}
/*
* sqfs_opendir will uncompress inode and directory tables, and will
* return a pointer to the directory that contains the requested file.
*/
sqfs_split_path(&file, &dir, filename);
ret = sqfs_opendir(dir, &dirsp);
if (ret) {
goto out;
}
dirs = (struct squashfs_dir_stream *)dirsp;
/* For now, only regular files are able to be loaded */
while (!sqfs_readdir(dirsp, &dent)) {
ret = strcmp(dent->name, file);
if (!ret)
break;
free(dirs->entry);
dirs->entry = NULL;
}
if (ret) {
printf("File not found.\n");
*actread = 0;
ret = -ENOENT;
goto out;
}
i_number = dirs->dir_header->inode_number + dirs->entry->inode_offset;
ipos = sqfs_find_inode(dirs->inode_table, i_number, sblk->inodes,
sblk->block_size);
base = (struct squashfs_base_inode *)ipos;
switch (get_unaligned_le16(&base->inode_type)) {
case SQFS_REG_TYPE:
reg = (struct squashfs_reg_inode *)ipos;
datablk_count = sqfs_get_regfile_info(reg, &finfo, &frag_entry,
sblk->block_size);
if (datablk_count < 0) {
ret = -EINVAL;
goto out;
}
memcpy(finfo.blk_sizes, ipos + sizeof(*reg),
datablk_count * sizeof(u32));
break;
case SQFS_LREG_TYPE:
lreg = (struct squashfs_lreg_inode *)ipos;
datablk_count = sqfs_get_lregfile_info(lreg, &finfo,
&frag_entry,
sblk->block_size);
if (datablk_count < 0) {
ret = -EINVAL;
goto out;
}
memcpy(finfo.blk_sizes, ipos + sizeof(*lreg),
datablk_count * sizeof(u32));
break;
case SQFS_SYMLINK_TYPE:
case SQFS_LSYMLINK_TYPE:
symlink = (struct squashfs_symlink_inode *)ipos;
resolved = sqfs_resolve_symlink(symlink, filename);
ret = sqfs_read(resolved, buf, offset, len, actread);
free(resolved);
goto out;
case SQFS_BLKDEV_TYPE:
case SQFS_CHRDEV_TYPE:
case SQFS_LBLKDEV_TYPE:
case SQFS_LCHRDEV_TYPE:
case SQFS_FIFO_TYPE:
case SQFS_SOCKET_TYPE:
case SQFS_LFIFO_TYPE:
case SQFS_LSOCKET_TYPE:
default:
printf("Unsupported entry type\n");
ret = -EINVAL;
goto out;
}
/* If the user specifies a length, check its sanity */
if (len) {
if (len > finfo.size) {
ret = -EINVAL;
goto out;
}
finfo.size = len;
} else {
len = finfo.size;
}
if (datablk_count) {
data_offset = finfo.start;
datablock = malloc(get_unaligned_le32(&sblk->block_size));
if (!datablock) {
ret = -ENOMEM;
goto out;
}
}
for (j = 0; j < datablk_count; j++) {
char *data_buffer;
start = lldiv(data_offset, ctxt.cur_dev->blksz);
table_size = SQFS_BLOCK_SIZE(finfo.blk_sizes[j]);
table_offset = data_offset - (start * ctxt.cur_dev->blksz);
n_blks = DIV_ROUND_UP(table_size + table_offset,
ctxt.cur_dev->blksz);
/* Don't load any data for sparse blocks */
if (finfo.blk_sizes[j] == 0) {
n_blks = 0;
table_offset = 0;
data_buffer = NULL;
data = NULL;
} else {
data_buffer = malloc_cache_aligned(n_blks * ctxt.cur_dev->blksz);
if (!data_buffer) {
ret = -ENOMEM;
goto out;
}
ret = sqfs_disk_read(start, n_blks, data_buffer);
if (ret < 0) {
/*
* Possible causes: too many data blocks or too large
* SquashFS block size. Tip: re-compile the SquashFS
* image with mksquashfs's -b <block_size> option.
*/
printf("Error: too many data blocks to be read.\n");
goto out;
}
data = data_buffer + table_offset;
}
/* Load the data */
if (finfo.blk_sizes[j] == 0) {
/* This is a sparse block */
sparse_size = get_unaligned_le32(&sblk->block_size);
if ((*actread + sparse_size) > len)
sparse_size = len - *actread;
memset(buf + *actread, 0, sparse_size);
*actread += sparse_size;
} else if (SQFS_COMPRESSED_BLOCK(finfo.blk_sizes[j])) {
dest_len = get_unaligned_le32(&sblk->block_size);
ret = sqfs_decompress(&ctxt, datablock, &dest_len,
data, table_size);
if (ret)
goto out;
if ((*actread + dest_len) > len)
dest_len = len - *actread;
memcpy(buf + *actread, datablock, dest_len);
*actread += dest_len;
} else {
if ((*actread + table_size) > len)
table_size = len - *actread;
memcpy(buf + *actread, data, table_size);
*actread += table_size;
}
data_offset += table_size;
free(data_buffer);
if (*actread >= len)
break;
}
/*
* There is no need to continue if the file is not fragmented.
*/
if (!finfo.frag) {
ret = 0;
goto out;
}
start = lldiv(frag_entry.start, ctxt.cur_dev->blksz);
table_size = SQFS_BLOCK_SIZE(frag_entry.size);
table_offset = frag_entry.start - (start * ctxt.cur_dev->blksz);
n_blks = DIV_ROUND_UP(table_size + table_offset, ctxt.cur_dev->blksz);
fragment = malloc_cache_aligned(n_blks * ctxt.cur_dev->blksz);
if (!fragment) {
ret = -ENOMEM;
goto out;
}
ret = sqfs_disk_read(start, n_blks, fragment);
if (ret < 0)
goto out;
/* File compressed and fragmented */
if (finfo.frag && finfo.comp) {
dest_len = get_unaligned_le32(&sblk->block_size);
fragment_block = malloc(dest_len);
if (!fragment_block) {
ret = -ENOMEM;
goto out;
}
ret = sqfs_decompress(&ctxt, fragment_block, &dest_len,
(void *)fragment + table_offset,
frag_entry.size);
if (ret) {
free(fragment_block);
goto out;
}
memcpy(buf + *actread, &fragment_block[finfo.offset], finfo.size - *actread);
*actread = finfo.size;
free(fragment_block);
} else if (finfo.frag && !finfo.comp) {
fragment_block = (void *)fragment + table_offset;
memcpy(buf + *actread, &fragment_block[finfo.offset], finfo.size - *actread);
*actread = finfo.size;
}
out:
free(fragment);
free(datablock);
free(file);
free(dir);
free(finfo.blk_sizes);
sqfs_closedir(dirsp);
return ret;
}
| 0
|
276,908
|
static int do_i2c_read(struct cmd_tbl *cmdtp, int flag, int argc,
char *const argv[])
{
uint chip;
uint devaddr, length;
uint alen;
u_char *memaddr;
int ret;
#if CONFIG_IS_ENABLED(DM_I2C)
struct udevice *dev;
#endif
if (argc != 5)
return CMD_RET_USAGE;
/*
* I2C chip address
*/
chip = hextoul(argv[1], NULL);
/*
* I2C data address within the chip. This can be 1 or
* 2 bytes long. Some day it might be 3 bytes long :-).
*/
devaddr = hextoul(argv[2], NULL);
alen = get_alen(argv[2], DEFAULT_ADDR_LEN);
if (alen > 3)
return CMD_RET_USAGE;
/*
* Length is the number of objects, not number of bytes.
*/
length = hextoul(argv[3], NULL);
/*
* memaddr is the address where to store things in memory
*/
memaddr = (u_char *)hextoul(argv[4], NULL);
#if CONFIG_IS_ENABLED(DM_I2C)
ret = i2c_get_cur_bus_chip(chip, &dev);
if (!ret && alen != -1)
ret = i2c_set_chip_offset_len(dev, alen);
if (!ret)
ret = dm_i2c_read(dev, devaddr, memaddr, length);
#else
ret = i2c_read(chip, devaddr, alen, memaddr, length);
#endif
if (ret)
return i2c_report_err(ret, I2C_ERR_READ);
return 0;
}
| 0
|
293,779
|
static RBinInfo *info(RBinFile *bf) {
RBinInfo *ret = NULL;
bool big_endian = 0;
if (!(ret = R_NEW0 (RBinInfo))) {
return NULL;
}
ret->file = strdup (bf->file);
ret->bclass = strdup ("kernelcache");
ret->rclass = strdup ("ios");
ret->os = strdup ("iOS");
ret->arch = strdup ("arm"); // XXX
ret->machine = strdup (ret->arch);
ret->subsystem = strdup ("xnu");
ret->type = strdup ("kernel-cache");
ret->bits = 64;
ret->has_va = true;
ret->big_endian = big_endian;
ret->dbg_info = 0;
return ret;
}
| 0
|
90,146
|
NetworkLibrary* NetworkLibrary::GetImpl(bool stub) {
if (stub)
return new NetworkLibraryStubImpl();
else
return new NetworkLibraryImpl();
}
| 0
|
317,118
|
static void smack_inet_csk_clone(struct sock *sk,
const struct request_sock *req)
{
struct socket_smack *ssp = sk->sk_security;
struct smack_known *skp;
if (req->peer_secid != 0) {
skp = smack_from_secid(req->peer_secid);
ssp->smk_packet = skp;
} else
ssp->smk_packet = NULL;
}
| 0
|
259,313
|
static int find_prev_closest_index(AVStream *st,
AVIndexEntry *e_old,
int nb_old,
MOVCtts* ctts_data,
int64_t ctts_count,
int64_t timestamp_pts,
int flag,
int64_t* index,
int64_t* ctts_index,
int64_t* ctts_sample)
{
MOVStreamContext *msc = st->priv_data;
FFStream *const sti = ffstream(st);
AVIndexEntry *e_keep = sti->index_entries;
int nb_keep = sti->nb_index_entries;
int64_t i = 0;
int64_t index_ctts_count;
av_assert0(index);
// If dts_shift > 0, then all the index timestamps will have to be offset by
// at least dts_shift amount to obtain PTS.
// Hence we decrement the searched timestamp_pts by dts_shift to find the closest index element.
if (msc->dts_shift > 0) {
timestamp_pts -= msc->dts_shift;
}
sti->index_entries = e_old;
sti->nb_index_entries = nb_old;
*index = av_index_search_timestamp(st, timestamp_pts, flag | AVSEEK_FLAG_BACKWARD);
// Keep going backwards in the index entries until the timestamp is the same.
if (*index >= 0) {
for (i = *index; i > 0 && e_old[i].timestamp == e_old[i - 1].timestamp;
i--) {
if ((flag & AVSEEK_FLAG_ANY) ||
(e_old[i - 1].flags & AVINDEX_KEYFRAME)) {
*index = i - 1;
}
}
}
// If we have CTTS then refine the search, by searching backwards over PTS
// computed by adding corresponding CTTS durations to index timestamps.
if (ctts_data && *index >= 0) {
av_assert0(ctts_index);
av_assert0(ctts_sample);
// Find out the ctts_index for the found frame.
*ctts_index = 0;
*ctts_sample = 0;
for (index_ctts_count = 0; index_ctts_count < *index; index_ctts_count++) {
if (*ctts_index < ctts_count) {
(*ctts_sample)++;
if (ctts_data[*ctts_index].count == *ctts_sample) {
(*ctts_index)++;
*ctts_sample = 0;
}
}
}
while (*index >= 0 && (*ctts_index) >= 0 && (*ctts_index) < ctts_count) {
// Find a "key frame" with PTS <= timestamp_pts (So that we can decode B-frames correctly).
// No need to add dts_shift to the timestamp here becase timestamp_pts has already been
// compensated by dts_shift above.
if ((e_old[*index].timestamp + ctts_data[*ctts_index].duration) <= timestamp_pts &&
(e_old[*index].flags & AVINDEX_KEYFRAME)) {
break;
}
(*index)--;
if (*ctts_sample == 0) {
(*ctts_index)--;
if (*ctts_index >= 0)
*ctts_sample = ctts_data[*ctts_index].count - 1;
} else {
(*ctts_sample)--;
}
}
}
/* restore AVStream state*/
sti->index_entries = e_keep;
sti->nb_index_entries = nb_keep;
return *index >= 0 ? 0 : -1;
}
| 0
|
344,738
|
set_nonblock(int fd)
{
int val;
val = fcntl(fd, F_GETFL);
if (val == -1) {
error("fcntl(%d, F_GETFL): %s", fd, strerror(errno));
return (-1);
}
if (val & O_NONBLOCK) {
debug3("fd %d is O_NONBLOCK", fd);
return (0);
}
debug2("fd %d setting O_NONBLOCK", fd);
val |= O_NONBLOCK;
if (fcntl(fd, F_SETFL, val) == -1) {
debug("fcntl(%d, F_SETFL, O_NONBLOCK): %s", fd,
strerror(errno));
return (-1);
}
return (0);
}
| 0
|
387,593
|
static unsigned long get_ctl_id_hash(const struct snd_ctl_elem_id *id)
{
int i;
unsigned long h;
h = id->iface;
h = MULTIPLIER * h + id->device;
h = MULTIPLIER * h + id->subdevice;
for (i = 0; i < SNDRV_CTL_ELEM_ID_NAME_MAXLEN && id->name[i]; i++)
h = MULTIPLIER * h + id->name[i];
h = MULTIPLIER * h + id->index;
h &= LONG_MAX;
return h;
}
| 0
|
513,361
|
add_key_part(DYNAMIC_ARRAY *keyuse_array, KEY_FIELD *key_field)
{
Field *field=key_field->field;
TABLE *form= field->table;
if (key_field->eq_func && !(key_field->optimize & KEY_OPTIMIZE_EXISTS))
{
for (uint key=0 ; key < form->s->keys ; key++)
{
if (!(form->keys_in_use_for_query.is_set(key)))
continue;
if (form->key_info[key].flags & (HA_FULLTEXT | HA_SPATIAL))
continue; // ToDo: ft-keys in non-ft queries. SerG
KEY *keyinfo= form->key_info+key;
uint key_parts= form->actual_n_key_parts(keyinfo);
for (uint part=0 ; part < key_parts ; part++)
{
if (field->eq(form->key_info[key].key_part[part].field) &&
field->can_optimize_keypart_ref(key_field->cond, key_field->val))
{
if (add_keyuse(keyuse_array, key_field, key, part))
return TRUE;
}
}
}
if (field->hash_join_is_possible() &&
(key_field->optimize & KEY_OPTIMIZE_EQ) &&
key_field->val->used_tables())
{
if (!field->can_optimize_hash_join(key_field->cond, key_field->val))
return false;
/*
If a key use is extracted from an equi-join predicate then it is
added not only as a key use for every index whose component can
be evalusted utilizing this key use, but also as a key use for
hash join. Such key uses are marked with a special key number.
*/
if (add_keyuse(keyuse_array, key_field, get_hash_join_key_no(), 0))
return TRUE;
}
}
return FALSE;
}
| 0
|
361,759
|
static int em28xx_wait_until_ac97_features_equals(struct em28xx *dev,
int expected_feat)
{
unsigned long timeout = jiffies + msecs_to_jiffies(2000);
int feat, powerdown;
while (time_is_after_jiffies(timeout)) {
feat = em28xx_read_ac97(dev, AC97_RESET);
if (feat < 0)
return feat;
powerdown = em28xx_read_ac97(dev, AC97_POWERDOWN);
if (powerdown < 0)
return powerdown;
if (feat == expected_feat && feat != powerdown)
return 0;
msleep(50);
}
dev_warn(&dev->intf->dev, "AC97 registers access is not reliable !\n");
return -ETIMEDOUT;
}
| 0
|
254,905
|
std::unique_ptr<GroupFromFirstDocumentTransformation> GroupFromFirstDocumentTransformation::create(
const intrusive_ptr<ExpressionContext>& expCtx,
const std::string& groupId,
vector<pair<std::string, intrusive_ptr<Expression>>> accumulatorExprs) {
return std::make_unique<GroupFromFirstDocumentTransformation>(groupId,
std::move(accumulatorExprs));
}
| 0
|
344,751
|
freeargs(arglist *args)
{
u_int i;
if (args->list != NULL) {
for (i = 0; i < args->num; i++)
free(args->list[i]);
free(args->list);
args->nalloc = args->num = 0;
args->list = NULL;
}
}
| 0
|
163,835
|
v8::Handle<v8::Value> V8ThrowException::createDOMException(v8::Isolate* isolate, int ec, const String& sanitizedMessage, const String& unsanitizedMessage, const v8::Handle<v8::Object>& creationContext)
{
if (ec <= 0 || v8::V8::IsExecutionTerminating())
return v8Undefined();
ASSERT(ec == SecurityError || unsanitizedMessage.isEmpty());
if (ec == V8GeneralError)
return V8ThrowException::createGeneralError(isolate, sanitizedMessage);
if (ec == V8TypeError)
return V8ThrowException::createTypeError(isolate, sanitizedMessage);
if (ec == V8RangeError)
return V8ThrowException::createRangeError(isolate, sanitizedMessage);
if (ec == V8SyntaxError)
return V8ThrowException::createSyntaxError(isolate, sanitizedMessage);
if (ec == V8ReferenceError)
return V8ThrowException::createReferenceError(isolate, sanitizedMessage);
v8::Handle<v8::Object> sanitizedCreationContext = creationContext;
// FIXME: Is the current context always the right choice?
Frame* frame = toFrameIfNotDetached(creationContext->CreationContext());
if (!frame || !BindingSecurity::shouldAllowAccessToFrame(isolate, frame, DoNotReportSecurityError))
sanitizedCreationContext = isolate->GetCurrentContext()->Global();
RefPtrWillBeRawPtr<DOMException> domException = DOMException::create(ec, sanitizedMessage, unsanitizedMessage);
v8::Handle<v8::Value> exception = toV8(domException.get(), sanitizedCreationContext, isolate);
if (exception.IsEmpty())
return v8Undefined();
v8::Handle<v8::Value> error = v8::Exception::Error(v8String(isolate, domException->message()));
ASSERT(!error.IsEmpty());
ASSERT(exception->IsObject());
exception->ToObject(isolate)->SetAccessor(v8AtomicString(isolate, "stack"), domExceptionStackGetter, domExceptionStackSetter, error);
V8HiddenValue::setHiddenValue(isolate, exception->ToObject(isolate), V8HiddenValue::error(isolate), error);
return exception;
}
| 0
|
328,807
|
do_put(
int regname,
char_u *expr_result, // result for regname "=" when compiled
int dir, // BACKWARD for 'P', FORWARD for 'p'
long count,
int flags)
{
char_u *ptr;
char_u *newp, *oldp;
int yanklen;
int totlen = 0; // init for gcc
linenr_T lnum;
colnr_T col;
long i; // index in y_array[]
int y_type;
long y_size;
int oldlen;
long y_width = 0;
colnr_T vcol;
int delcount;
int incr = 0;
long j;
struct block_def bd;
char_u **y_array = NULL;
yankreg_T *y_current_used = NULL;
long nr_lines = 0;
pos_T new_cursor;
int indent;
int orig_indent = 0; // init for gcc
int indent_diff = 0; // init for gcc
int first_indent = TRUE;
int lendiff = 0;
pos_T old_pos;
char_u *insert_string = NULL;
int allocated = FALSE;
long cnt;
pos_T orig_start = curbuf->b_op_start;
pos_T orig_end = curbuf->b_op_end;
unsigned int cur_ve_flags = get_ve_flags();
#ifdef FEAT_CLIPBOARD
// Adjust register name for "unnamed" in 'clipboard'.
adjust_clip_reg(®name);
(void)may_get_selection(regname);
#endif
if (flags & PUT_FIXINDENT)
orig_indent = get_indent();
curbuf->b_op_start = curwin->w_cursor; // default for '[ mark
curbuf->b_op_end = curwin->w_cursor; // default for '] mark
// Using inserted text works differently, because the register includes
// special characters (newlines, etc.).
if (regname == '.')
{
if (VIsual_active)
stuffcharReadbuff(VIsual_mode);
(void)stuff_inserted((dir == FORWARD ? (count == -1 ? 'o' : 'a') :
(count == -1 ? 'O' : 'i')), count, FALSE);
// Putting the text is done later, so can't really move the cursor to
// the next character. Use "l" to simulate it.
if ((flags & PUT_CURSEND) && gchar_cursor() != NUL)
stuffcharReadbuff('l');
return;
}
// For special registers '%' (file name), '#' (alternate file name) and
// ':' (last command line), etc. we have to create a fake yank register.
// For compiled code "expr_result" holds the expression result.
if (regname == '=' && expr_result != NULL)
insert_string = expr_result;
else if (get_spec_reg(regname, &insert_string, &allocated, TRUE)
&& insert_string == NULL)
return;
// Autocommands may be executed when saving lines for undo. This might
// make "y_array" invalid, so we start undo now to avoid that.
if (u_save(curwin->w_cursor.lnum, curwin->w_cursor.lnum + 1) == FAIL)
goto end;
if (insert_string != NULL)
{
y_type = MCHAR;
#ifdef FEAT_EVAL
if (regname == '=')
{
// For the = register we need to split the string at NL
// characters.
// Loop twice: count the number of lines and save them.
for (;;)
{
y_size = 0;
ptr = insert_string;
while (ptr != NULL)
{
if (y_array != NULL)
y_array[y_size] = ptr;
++y_size;
ptr = vim_strchr(ptr, '\n');
if (ptr != NULL)
{
if (y_array != NULL)
*ptr = NUL;
++ptr;
// A trailing '\n' makes the register linewise.
if (*ptr == NUL)
{
y_type = MLINE;
break;
}
}
}
if (y_array != NULL)
break;
y_array = ALLOC_MULT(char_u *, y_size);
if (y_array == NULL)
goto end;
}
}
else
#endif
{
y_size = 1; // use fake one-line yank register
y_array = &insert_string;
}
}
else
{
get_yank_register(regname, FALSE);
y_type = y_current->y_type;
y_width = y_current->y_width;
y_size = y_current->y_size;
y_array = y_current->y_array;
y_current_used = y_current;
}
if (y_type == MLINE)
{
if (flags & PUT_LINE_SPLIT)
{
char_u *p;
// "p" or "P" in Visual mode: split the lines to put the text in
// between.
if (u_save_cursor() == FAIL)
goto end;
p = ml_get_cursor();
if (dir == FORWARD && *p != NUL)
MB_PTR_ADV(p);
ptr = vim_strsave(p);
if (ptr == NULL)
goto end;
ml_append(curwin->w_cursor.lnum, ptr, (colnr_T)0, FALSE);
vim_free(ptr);
oldp = ml_get_curline();
p = oldp + curwin->w_cursor.col;
if (dir == FORWARD && *p != NUL)
MB_PTR_ADV(p);
ptr = vim_strnsave(oldp, p - oldp);
if (ptr == NULL)
goto end;
ml_replace(curwin->w_cursor.lnum, ptr, FALSE);
++nr_lines;
dir = FORWARD;
}
if (flags & PUT_LINE_FORWARD)
{
// Must be "p" for a Visual block, put lines below the block.
curwin->w_cursor = curbuf->b_visual.vi_end;
dir = FORWARD;
}
curbuf->b_op_start = curwin->w_cursor; // default for '[ mark
curbuf->b_op_end = curwin->w_cursor; // default for '] mark
}
if (flags & PUT_LINE) // :put command or "p" in Visual line mode.
y_type = MLINE;
if (y_size == 0 || y_array == NULL)
{
semsg(_(e_nothing_in_register_str),
regname == 0 ? (char_u *)"\"" : transchar(regname));
goto end;
}
if (y_type == MBLOCK)
{
lnum = curwin->w_cursor.lnum + y_size + 1;
if (lnum > curbuf->b_ml.ml_line_count)
lnum = curbuf->b_ml.ml_line_count + 1;
if (u_save(curwin->w_cursor.lnum - 1, lnum) == FAIL)
goto end;
}
else if (y_type == MLINE)
{
lnum = curwin->w_cursor.lnum;
#ifdef FEAT_FOLDING
// Correct line number for closed fold. Don't move the cursor yet,
// u_save() uses it.
if (dir == BACKWARD)
(void)hasFolding(lnum, &lnum, NULL);
else
(void)hasFolding(lnum, NULL, &lnum);
#endif
if (dir == FORWARD)
++lnum;
// In an empty buffer the empty line is going to be replaced, include
// it in the saved lines.
if ((BUFEMPTY() ? u_save(0, 2) : u_save(lnum - 1, lnum)) == FAIL)
goto end;
#ifdef FEAT_FOLDING
if (dir == FORWARD)
curwin->w_cursor.lnum = lnum - 1;
else
curwin->w_cursor.lnum = lnum;
curbuf->b_op_start = curwin->w_cursor; // for mark_adjust()
#endif
}
else if (u_save_cursor() == FAIL)
goto end;
yanklen = (int)STRLEN(y_array[0]);
if (cur_ve_flags == VE_ALL && y_type == MCHAR)
{
if (gchar_cursor() == TAB)
{
int viscol = getviscol();
int ts = curbuf->b_p_ts;
// Don't need to insert spaces when "p" on the last position of a
// tab or "P" on the first position.
if (dir == FORWARD ?
#ifdef FEAT_VARTABS
tabstop_padding(viscol, ts, curbuf->b_p_vts_array) != 1
#else
ts - (viscol % ts) != 1
#endif
: curwin->w_cursor.coladd > 0)
coladvance_force(viscol);
else
curwin->w_cursor.coladd = 0;
}
else if (curwin->w_cursor.coladd > 0 || gchar_cursor() == NUL)
coladvance_force(getviscol() + (dir == FORWARD));
}
lnum = curwin->w_cursor.lnum;
col = curwin->w_cursor.col;
// Block mode
if (y_type == MBLOCK)
{
int c = gchar_cursor();
colnr_T endcol2 = 0;
if (dir == FORWARD && c != NUL)
{
if (cur_ve_flags == VE_ALL)
getvcol(curwin, &curwin->w_cursor, &col, NULL, &endcol2);
else
getvcol(curwin, &curwin->w_cursor, NULL, NULL, &col);
if (has_mbyte)
// move to start of next multi-byte character
curwin->w_cursor.col += (*mb_ptr2len)(ml_get_cursor());
else
if (c != TAB || cur_ve_flags != VE_ALL)
++curwin->w_cursor.col;
++col;
}
else
getvcol(curwin, &curwin->w_cursor, &col, NULL, &endcol2);
col += curwin->w_cursor.coladd;
if (cur_ve_flags == VE_ALL
&& (curwin->w_cursor.coladd > 0
|| endcol2 == curwin->w_cursor.col))
{
if (dir == FORWARD && c == NUL)
++col;
if (dir != FORWARD && c != NUL && curwin->w_cursor.coladd > 0)
++curwin->w_cursor.col;
if (c == TAB)
{
if (dir == BACKWARD && curwin->w_cursor.col)
curwin->w_cursor.col--;
if (dir == FORWARD && col - 1 == endcol2)
curwin->w_cursor.col++;
}
}
curwin->w_cursor.coladd = 0;
bd.textcol = 0;
for (i = 0; i < y_size; ++i)
{
int spaces = 0;
char shortline;
bd.startspaces = 0;
bd.endspaces = 0;
vcol = 0;
delcount = 0;
// add a new line
if (curwin->w_cursor.lnum > curbuf->b_ml.ml_line_count)
{
if (ml_append(curbuf->b_ml.ml_line_count, (char_u *)"",
(colnr_T)1, FALSE) == FAIL)
break;
++nr_lines;
}
// get the old line and advance to the position to insert at
oldp = ml_get_curline();
oldlen = (int)STRLEN(oldp);
for (ptr = oldp; vcol < col && *ptr; )
{
// Count a tab for what it's worth (if list mode not on)
incr = lbr_chartabsize_adv(oldp, &ptr, vcol);
vcol += incr;
}
bd.textcol = (colnr_T)(ptr - oldp);
shortline = (vcol < col) || (vcol == col && !*ptr) ;
if (vcol < col) // line too short, padd with spaces
bd.startspaces = col - vcol;
else if (vcol > col)
{
bd.endspaces = vcol - col;
bd.startspaces = incr - bd.endspaces;
--bd.textcol;
delcount = 1;
if (has_mbyte)
bd.textcol -= (*mb_head_off)(oldp, oldp + bd.textcol);
if (oldp[bd.textcol] != TAB)
{
// Only a Tab can be split into spaces. Other
// characters will have to be moved to after the
// block, causing misalignment.
delcount = 0;
bd.endspaces = 0;
}
}
yanklen = (int)STRLEN(y_array[i]);
if ((flags & PUT_BLOCK_INNER) == 0)
{
// calculate number of spaces required to fill right side of
// block
spaces = y_width + 1;
for (j = 0; j < yanklen; j++)
spaces -= lbr_chartabsize(NULL, &y_array[i][j], 0);
if (spaces < 0)
spaces = 0;
}
// Insert the new text.
// First check for multiplication overflow.
if (yanklen + spaces != 0
&& count > ((INT_MAX - (bd.startspaces + bd.endspaces))
/ (yanklen + spaces)))
{
emsg(_(e_resulting_text_too_long));
break;
}
totlen = count * (yanklen + spaces) + bd.startspaces + bd.endspaces;
newp = alloc(totlen + oldlen + 1);
if (newp == NULL)
break;
// copy part up to cursor to new line
ptr = newp;
mch_memmove(ptr, oldp, (size_t)bd.textcol);
ptr += bd.textcol;
// may insert some spaces before the new text
vim_memset(ptr, ' ', (size_t)bd.startspaces);
ptr += bd.startspaces;
// insert the new text
for (j = 0; j < count; ++j)
{
mch_memmove(ptr, y_array[i], (size_t)yanklen);
ptr += yanklen;
// insert block's trailing spaces only if there's text behind
if ((j < count - 1 || !shortline) && spaces)
{
vim_memset(ptr, ' ', (size_t)spaces);
ptr += spaces;
}
else
totlen -= spaces; // didn't use these spaces
}
// may insert some spaces after the new text
vim_memset(ptr, ' ', (size_t)bd.endspaces);
ptr += bd.endspaces;
// move the text after the cursor to the end of the line.
mch_memmove(ptr, oldp + bd.textcol + delcount,
(size_t)(oldlen - bd.textcol - delcount + 1));
ml_replace(curwin->w_cursor.lnum, newp, FALSE);
++curwin->w_cursor.lnum;
if (i == 0)
curwin->w_cursor.col += bd.startspaces;
}
changed_lines(lnum, 0, curwin->w_cursor.lnum, nr_lines);
// Set '[ mark.
curbuf->b_op_start = curwin->w_cursor;
curbuf->b_op_start.lnum = lnum;
// adjust '] mark
curbuf->b_op_end.lnum = curwin->w_cursor.lnum - 1;
curbuf->b_op_end.col = bd.textcol + totlen - 1;
curbuf->b_op_end.coladd = 0;
if (flags & PUT_CURSEND)
{
colnr_T len;
curwin->w_cursor = curbuf->b_op_end;
curwin->w_cursor.col++;
// in Insert mode we might be after the NUL, correct for that
len = (colnr_T)STRLEN(ml_get_curline());
if (curwin->w_cursor.col > len)
curwin->w_cursor.col = len;
}
else
curwin->w_cursor.lnum = lnum;
}
else
{
// Character or Line mode
if (y_type == MCHAR)
{
// if type is MCHAR, FORWARD is the same as BACKWARD on the next
// char
if (dir == FORWARD && gchar_cursor() != NUL)
{
if (has_mbyte)
{
int bytelen = (*mb_ptr2len)(ml_get_cursor());
// put it on the next of the multi-byte character.
col += bytelen;
if (yanklen)
{
curwin->w_cursor.col += bytelen;
curbuf->b_op_end.col += bytelen;
}
}
else
{
++col;
if (yanklen)
{
++curwin->w_cursor.col;
++curbuf->b_op_end.col;
}
}
}
curbuf->b_op_start = curwin->w_cursor;
}
// Line mode: BACKWARD is the same as FORWARD on the previous line
else if (dir == BACKWARD)
--lnum;
new_cursor = curwin->w_cursor;
// simple case: insert into one line at a time
if (y_type == MCHAR && y_size == 1)
{
linenr_T end_lnum = 0; // init for gcc
linenr_T start_lnum = lnum;
int first_byte_off = 0;
if (VIsual_active)
{
end_lnum = curbuf->b_visual.vi_end.lnum;
if (end_lnum < curbuf->b_visual.vi_start.lnum)
end_lnum = curbuf->b_visual.vi_start.lnum;
if (end_lnum > start_lnum)
{
pos_T pos;
// "col" is valid for the first line, in following lines
// the virtual column needs to be used. Matters for
// multi-byte characters.
pos.lnum = lnum;
pos.col = col;
pos.coladd = 0;
getvcol(curwin, &pos, NULL, &vcol, NULL);
}
}
if (count == 0 || yanklen == 0)
{
if (VIsual_active)
lnum = end_lnum;
}
else if (count > INT_MAX / yanklen)
// multiplication overflow
emsg(_(e_resulting_text_too_long));
else
{
totlen = count * yanklen;
do {
oldp = ml_get(lnum);
oldlen = (int)STRLEN(oldp);
if (lnum > start_lnum)
{
pos_T pos;
pos.lnum = lnum;
if (getvpos(&pos, vcol) == OK)
col = pos.col;
else
col = MAXCOL;
}
if (VIsual_active && col > oldlen)
{
lnum++;
continue;
}
newp = alloc(totlen + oldlen + 1);
if (newp == NULL)
goto end; // alloc() gave an error message
mch_memmove(newp, oldp, (size_t)col);
ptr = newp + col;
for (i = 0; i < count; ++i)
{
mch_memmove(ptr, y_array[0], (size_t)yanklen);
ptr += yanklen;
}
STRMOVE(ptr, oldp + col);
ml_replace(lnum, newp, FALSE);
// compute the byte offset for the last character
first_byte_off = mb_head_off(newp, ptr - 1);
// Place cursor on last putted char.
if (lnum == curwin->w_cursor.lnum)
{
// make sure curwin->w_virtcol is updated
changed_cline_bef_curs();
curwin->w_cursor.col += (colnr_T)(totlen - 1);
}
if (VIsual_active)
lnum++;
} while (VIsual_active && lnum <= end_lnum);
if (VIsual_active) // reset lnum to the last visual line
lnum--;
}
// put '] at the first byte of the last character
curbuf->b_op_end = curwin->w_cursor;
curbuf->b_op_end.col -= first_byte_off;
// For "CTRL-O p" in Insert mode, put cursor after last char
if (totlen && (restart_edit != 0 || (flags & PUT_CURSEND)))
++curwin->w_cursor.col;
else
curwin->w_cursor.col -= first_byte_off;
changed_bytes(lnum, col);
}
else
{
linenr_T new_lnum = new_cursor.lnum;
size_t len;
// Insert at least one line. When y_type is MCHAR, break the first
// line in two.
for (cnt = 1; cnt <= count; ++cnt)
{
i = 0;
if (y_type == MCHAR)
{
// Split the current line in two at the insert position.
// First insert y_array[size - 1] in front of second line.
// Then append y_array[0] to first line.
lnum = new_cursor.lnum;
ptr = ml_get(lnum) + col;
totlen = (int)STRLEN(y_array[y_size - 1]);
newp = alloc(STRLEN(ptr) + totlen + 1);
if (newp == NULL)
goto error;
STRCPY(newp, y_array[y_size - 1]);
STRCAT(newp, ptr);
// insert second line
ml_append(lnum, newp, (colnr_T)0, FALSE);
++new_lnum;
vim_free(newp);
oldp = ml_get(lnum);
newp = alloc(col + yanklen + 1);
if (newp == NULL)
goto error;
// copy first part of line
mch_memmove(newp, oldp, (size_t)col);
// append to first line
mch_memmove(newp + col, y_array[0], (size_t)(yanklen + 1));
ml_replace(lnum, newp, FALSE);
curwin->w_cursor.lnum = lnum;
i = 1;
}
for (; i < y_size; ++i)
{
if (y_type != MCHAR || i < y_size - 1)
{
if (ml_append(lnum, y_array[i], (colnr_T)0, FALSE)
== FAIL)
goto error;
new_lnum++;
}
lnum++;
++nr_lines;
if (flags & PUT_FIXINDENT)
{
old_pos = curwin->w_cursor;
curwin->w_cursor.lnum = lnum;
ptr = ml_get(lnum);
if (cnt == count && i == y_size - 1)
lendiff = (int)STRLEN(ptr);
if (*ptr == '#' && preprocs_left())
indent = 0; // Leave # lines at start
else
if (*ptr == NUL)
indent = 0; // Ignore empty lines
else if (first_indent)
{
indent_diff = orig_indent - get_indent();
indent = orig_indent;
first_indent = FALSE;
}
else if ((indent = get_indent() + indent_diff) < 0)
indent = 0;
(void)set_indent(indent, 0);
curwin->w_cursor = old_pos;
// remember how many chars were removed
if (cnt == count && i == y_size - 1)
lendiff -= (int)STRLEN(ml_get(lnum));
}
}
if (cnt == 1)
new_lnum = lnum;
}
error:
// Adjust marks.
if (y_type == MLINE)
{
curbuf->b_op_start.col = 0;
if (dir == FORWARD)
curbuf->b_op_start.lnum++;
}
// Skip mark_adjust when adding lines after the last one, there
// can't be marks there. But still needed in diff mode.
if (curbuf->b_op_start.lnum + (y_type == MCHAR) - 1 + nr_lines
< curbuf->b_ml.ml_line_count
#ifdef FEAT_DIFF
|| curwin->w_p_diff
#endif
)
mark_adjust(curbuf->b_op_start.lnum + (y_type == MCHAR),
(linenr_T)MAXLNUM, nr_lines, 0L);
// note changed text for displaying and folding
if (y_type == MCHAR)
changed_lines(curwin->w_cursor.lnum, col,
curwin->w_cursor.lnum + 1, nr_lines);
else
changed_lines(curbuf->b_op_start.lnum, 0,
curbuf->b_op_start.lnum, nr_lines);
if (y_current_used != NULL && (y_current_used != y_current
|| y_current->y_array != y_array))
{
// Something invoked through changed_lines() has changed the
// yank buffer, e.g. a GUI clipboard callback.
emsg(_(e_yank_register_changed_while_using_it));
goto end;
}
// Put the '] mark on the first byte of the last inserted character.
// Correct the length for change in indent.
curbuf->b_op_end.lnum = new_lnum;
len = STRLEN(y_array[y_size - 1]);
col = (colnr_T)len - lendiff;
if (col > 1)
{
curbuf->b_op_end.col = col - 1;
if (len > 0)
curbuf->b_op_end.col -= mb_head_off(y_array[y_size - 1],
y_array[y_size - 1] + len - 1);
}
else
curbuf->b_op_end.col = 0;
if (flags & PUT_CURSLINE)
{
// ":put": put cursor on last inserted line
curwin->w_cursor.lnum = lnum;
beginline(BL_WHITE | BL_FIX);
}
else if (flags & PUT_CURSEND)
{
// put cursor after inserted text
if (y_type == MLINE)
{
if (lnum >= curbuf->b_ml.ml_line_count)
curwin->w_cursor.lnum = curbuf->b_ml.ml_line_count;
else
curwin->w_cursor.lnum = lnum + 1;
curwin->w_cursor.col = 0;
}
else
{
curwin->w_cursor.lnum = new_lnum;
curwin->w_cursor.col = col;
curbuf->b_op_end = curwin->w_cursor;
if (col > 1)
curbuf->b_op_end.col = col - 1;
}
}
else if (y_type == MLINE)
{
// put cursor on first non-blank in first inserted line
curwin->w_cursor.col = 0;
if (dir == FORWARD)
++curwin->w_cursor.lnum;
beginline(BL_WHITE | BL_FIX);
}
else // put cursor on first inserted character
curwin->w_cursor = new_cursor;
}
}
msgmore(nr_lines);
curwin->w_set_curswant = TRUE;
end:
if (cmdmod.cmod_flags & CMOD_LOCKMARKS)
{
curbuf->b_op_start = orig_start;
curbuf->b_op_end = orig_end;
}
if (allocated)
vim_free(insert_string);
if (regname == '=')
vim_free(y_array);
VIsual_active = FALSE;
// If the cursor is past the end of the line put it at the end.
adjust_cursor_eol();
}
| 0
|
244,119
|
GF_Box *csgp_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_CompactSampleGroupBox, GF_ISOM_BOX_TYPE_CSGP);
return (GF_Box *)tmp;
}
| 0
|
427,239
|
static void retstat (LexState *ls) {
/* stat -> RETURN [explist] [';'] */
FuncState *fs = ls->fs;
expdesc e;
int nret; /* number of values being returned */
int first = luaY_nvarstack(fs); /* first slot to be returned */
if (block_follow(ls, 1) || ls->t.token == ';')
nret = 0; /* return no values */
else {
nret = explist(ls, &e); /* optional return values */
if (hasmultret(e.k)) {
luaK_setmultret(fs, &e);
if (e.k == VCALL && nret == 1 && !fs->bl->insidetbc) { /* tail call? */
SET_OPCODE(getinstruction(fs,&e), OP_TAILCALL);
lua_assert(GETARG_A(getinstruction(fs,&e)) == luaY_nvarstack(fs));
}
nret = LUA_MULTRET; /* return all values */
}
else {
if (nret == 1) /* only one single value? */
first = luaK_exp2anyreg(fs, &e); /* can use original slot */
else { /* values must go to the top of the stack */
luaK_exp2nextreg(fs, &e);
lua_assert(nret == fs->freereg - first);
}
}
}
luaK_ret(fs, first, nret);
testnext(ls, ';'); /* skip optional semicolon */
}
| 0
|
310,318
|
connection_dirserv_add_networkstatus_bytes_to_outbuf(dir_connection_t *conn)
{
while (buf_datalen(conn->_base.outbuf) < DIRSERV_BUFFER_MIN) {
if (conn->cached_dir) {
int uncompressing = (conn->zlib_state != NULL);
int r = connection_dirserv_add_dir_bytes_to_outbuf(conn);
if (conn->dir_spool_src == DIR_SPOOL_NONE) {
/* add_dir_bytes thinks we're done with the cached_dir. But we
* may have more cached_dirs! */
conn->dir_spool_src = DIR_SPOOL_NETWORKSTATUS;
/* This bit is tricky. If we were uncompressing the last
* networkstatus, we may need to make a new zlib object to
* uncompress the next one. */
if (uncompressing && ! conn->zlib_state &&
conn->fingerprint_stack &&
smartlist_len(conn->fingerprint_stack)) {
conn->zlib_state = tor_zlib_new(0, ZLIB_METHOD);
}
}
if (r) return r;
} else if (conn->fingerprint_stack &&
smartlist_len(conn->fingerprint_stack)) {
/* Add another networkstatus; start serving it. */
char *fp = smartlist_pop_last(conn->fingerprint_stack);
cached_dir_t *d = lookup_cached_dir_by_fp(fp);
tor_free(fp);
if (d) {
++d->refcnt;
conn->cached_dir = d;
conn->cached_dir_offset = 0;
}
} else {
connection_dirserv_finish_spooling(conn);
smartlist_free(conn->fingerprint_stack);
conn->fingerprint_stack = NULL;
return 0;
}
}
return 0;
}
| 0
|
259,204
|
static int mov_read_pcmc(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
int format_flags;
if (atom.size < 6) {
av_log(c->fc, AV_LOG_ERROR, "Empty pcmC box\n");
return AVERROR_INVALIDDATA;
}
avio_r8(pb); // version
avio_rb24(pb); // flags
format_flags = avio_r8(pb);
if (format_flags == 1) // indicates little-endian format. If not present, big-endian format is used
set_last_stream_little_endian(c->fc);
return 0;
}
| 0
|
512,311
|
bool Item_func_in::fix_for_scalar_comparison_using_cmp_items(THD *thd,
uint found_types)
{
if (found_types & (1U << STRING_RESULT) &&
agg_arg_charsets_for_comparison(cmp_collation, args, arg_count))
return true;
if (make_unique_cmp_items(thd, cmp_collation.collation))
return true;
return false;
}
| 0
|
361,746
|
void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
{
memset(ctl, 0, sizeof(*ctl));
ctl->fname = XC2028_DEFAULT_FIRMWARE;
ctl->max_len = 64;
ctl->mts = em28xx_boards[dev->model].mts_firmware;
switch (dev->model) {
case EM2880_BOARD_EMPIRE_DUAL_TV:
case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
case EM2882_BOARD_TERRATEC_HYBRID_XS:
case EM2880_BOARD_TERRATEC_HYBRID_XS:
case EM2880_BOARD_TERRATEC_HYBRID_XS_FR:
case EM2881_BOARD_PINNACLE_HYBRID_PRO:
case EM2882_BOARD_ZOLID_HYBRID_TV_STICK:
ctl->demod = XC3028_FE_ZARLINK456;
break;
case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2:
case EM2882_BOARD_PINNACLE_HYBRID_PRO_330E:
ctl->demod = XC3028_FE_DEFAULT;
break;
case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600:
ctl->demod = XC3028_FE_DEFAULT;
ctl->fname = XC3028L_DEFAULT_FIRMWARE;
break;
case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850:
case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950:
case EM2880_BOARD_PINNACLE_PCTV_HD_PRO:
/* FIXME: Better to specify the needed IF */
ctl->demod = XC3028_FE_DEFAULT;
break;
case EM2883_BOARD_KWORLD_HYBRID_330U:
case EM2882_BOARD_DIKOM_DK300:
case EM2882_BOARD_KWORLD_VS_DVBT:
ctl->demod = XC3028_FE_CHINA;
ctl->fname = XC2028_DEFAULT_FIRMWARE;
break;
case EM2882_BOARD_EVGA_INDTUBE:
ctl->demod = XC3028_FE_CHINA;
ctl->fname = XC3028L_DEFAULT_FIRMWARE;
break;
default:
ctl->demod = XC3028_FE_OREN538;
}
}
| 0
|
289,283
|
static int snd_pcm_hw_param_mask(struct snd_pcm_substream *pcm,
struct snd_pcm_hw_params *params,
snd_pcm_hw_param_t var,
const struct snd_mask *val)
{
int changed = _snd_pcm_hw_param_mask(params, var, val);
if (changed < 0)
return changed;
if (params->rmask) {
int err = snd_pcm_hw_refine(pcm, params);
if (err < 0)
return err;
}
return 0;
}
| 0
|
400,735
|
static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
struct pipe_buffer *buf;
unsigned int p_tail = pipe->tail;
unsigned int p_mask = pipe->ring_size - 1;
unsigned int i_head = i->head;
size_t off;
if (unlikely(bytes > i->count))
bytes = i->count;
if (unlikely(!bytes))
return 0;
if (!sanity(i))
return 0;
off = i->iov_offset;
buf = &pipe->bufs[i_head & p_mask];
if (off) {
if (offset == off && buf->page == page) {
/* merge with the last one */
buf->len += bytes;
i->iov_offset += bytes;
goto out;
}
i_head++;
buf = &pipe->bufs[i_head & p_mask];
}
if (pipe_full(i_head, p_tail, pipe->max_usage))
return 0;
buf->ops = &page_cache_pipe_buf_ops;
buf->flags = 0;
get_page(page);
buf->page = page;
buf->offset = offset;
buf->len = bytes;
pipe->head = i_head + 1;
i->iov_offset = offset + bytes;
i->head = i_head;
out:
i->count -= bytes;
return bytes;
}
| 0
|
96,963
|
static CFType typeFromCFTypeRef(CFTypeRef type)
{
ASSERT(type);
if (type == tokenNullTypeRef())
return Null;
CFTypeID typeID = CFGetTypeID(type);
if (typeID == CFArrayGetTypeID())
return CFArray;
if (typeID == CFBooleanGetTypeID())
return CFBoolean;
if (typeID == CFDataGetTypeID())
return CFData;
if (typeID == CFDateGetTypeID())
return CFDate;
if (typeID == CFDictionaryGetTypeID())
return CFDictionary;
if (typeID == CFNullGetTypeID())
return CFNull;
if (typeID == CFNumberGetTypeID())
return CFNumber;
if (typeID == CFStringGetTypeID())
return CFString;
if (typeID == CFURLGetTypeID())
return CFURL;
#if PLATFORM(MAC)
if (typeID == SecCertificateGetTypeID())
return SecCertificate;
if (typeID == SecKeychainItemGetTypeID())
return SecKeychainItem;
#endif
ASSERT_NOT_REACHED();
return Unknown;
}
| 0
|
279,906
|
prepare_tagpreview(
int undo_sync, // sync undo when leaving the window
int use_previewpopup, // use popup if 'previewpopup' set
use_popup_T use_popup) // use other popup window
{
win_T *wp;
# ifdef FEAT_GUI
need_mouse_correct = TRUE;
# endif
/*
* If there is already a preview window open, use that one.
*/
if (!curwin->w_p_pvw)
{
# ifdef FEAT_PROP_POPUP
if (use_previewpopup && *p_pvp != NUL)
{
wp = popup_find_preview_window();
if (wp != NULL)
popup_set_wantpos_cursor(wp, wp->w_minwidth, NULL);
}
else if (use_popup != USEPOPUP_NONE)
{
wp = popup_find_info_window();
if (wp != NULL)
{
if (use_popup == USEPOPUP_NORMAL)
popup_show(wp);
else
popup_hide(wp);
// When the popup moves or resizes it may reveal part of
// another window. TODO: can this be done more efficiently?
redraw_all_later(NOT_VALID);
}
}
else
# endif
{
FOR_ALL_WINDOWS(wp)
if (wp->w_p_pvw)
break;
}
if (wp != NULL)
win_enter(wp, undo_sync);
else
{
/*
* There is no preview window open yet. Create one.
*/
# ifdef FEAT_PROP_POPUP
if ((use_previewpopup && *p_pvp != NUL)
|| use_popup != USEPOPUP_NONE)
return popup_create_preview_window(use_popup != USEPOPUP_NONE);
# endif
if (win_split(g_do_tagpreview > 0 ? g_do_tagpreview : 0, 0) == FAIL)
return FALSE;
curwin->w_p_pvw = TRUE;
curwin->w_p_wfh = TRUE;
RESET_BINDING(curwin); // don't take over 'scrollbind'
// and 'cursorbind'
# ifdef FEAT_DIFF
curwin->w_p_diff = FALSE; // no 'diff'
# endif
# ifdef FEAT_FOLDING
curwin->w_p_fdc = 0; // no 'foldcolumn'
# endif
return TRUE;
}
}
return FALSE;
}
| 0
|
175,697
|
virtual void UpdateSystemInfo() {}
| 0
|
450,342
|
static void key_event(VncState *vs, int down, uint32_t sym)
{
int keycode;
int lsym = sym;
if (lsym >= 'A' && lsym <= 'Z' && qemu_console_is_graphic(NULL)) {
lsym = lsym - 'A' + 'a';
}
keycode = keysym2scancode(vs->vd->kbd_layout, lsym & 0xFFFF,
vs->vd->kbd, down) & SCANCODE_KEYMASK;
trace_vnc_key_event_map(down, sym, keycode, code2name(keycode));
do_key_event(vs, down, keycode, sym);
}
| 0
|
343,284
|
void stripctrl(char * const buf, size_t len)
{
if (len <= (size_t) 0U) {
return;
}
do {
len--;
if (ISCTRLCODE(buf[len]) &&
buf[len] != 0 && buf[len] != '\n') {
buf[len] = '_';
}
} while (len != (size_t) 0U);
}
| 0
|
387,819
|
Klass* InstanceKlass::array_klass_impl(bool or_null, TRAPS) {
return array_klass_impl(or_null, 1, THREAD);
}
| 0
|
292,134
|
void LinkResolver::resolve_invokedynamic(CallInfo& result, const constantPoolHandle& pool, int index, TRAPS) {
Symbol* method_name = pool->name_ref_at(index);
Symbol* method_signature = pool->signature_ref_at(index);
Klass* current_klass = pool->pool_holder();
// Resolve the bootstrap specifier (BSM + optional arguments).
Handle bootstrap_specifier;
// Check if CallSite has been bound already:
ConstantPoolCacheEntry* cpce = pool->invokedynamic_cp_cache_entry_at(index);
int pool_index = cpce->constant_pool_index();
if (cpce->is_f1_null()) {
if (cpce->indy_resolution_failed()) {
ConstantPool::throw_resolution_error(pool,
ResolutionErrorTable::encode_cpcache_index(index),
CHECK);
}
// The initial step in Call Site Specifier Resolution is to resolve the symbolic
// reference to a method handle which will be the bootstrap method for a dynamic
// call site. If resolution for the java.lang.invoke.MethodHandle for the bootstrap
// method fails, then a MethodHandleInError is stored at the corresponding bootstrap
// method's CP index for the CONSTANT_MethodHandle_info. So, there is no need to
// set the indy_rf flag since any subsequent invokedynamic instruction which shares
// this bootstrap method will encounter the resolution of MethodHandleInError.
oop bsm_info = pool->resolve_bootstrap_specifier_at(pool_index, THREAD);
Exceptions::wrap_dynamic_exception(CHECK);
assert(bsm_info != NULL, "");
// FIXME: Cache this once per BootstrapMethods entry, not once per CONSTANT_InvokeDynamic.
bootstrap_specifier = Handle(THREAD, bsm_info);
}
if (!cpce->is_f1_null()) {
methodHandle method( THREAD, cpce->f1_as_method());
Handle appendix( THREAD, cpce->appendix_if_resolved(pool));
Handle method_type(THREAD, cpce->method_type_if_resolved(pool));
result.set_handle(method, appendix, method_type, THREAD);
Exceptions::wrap_dynamic_exception(CHECK);
return;
}
if (TraceMethodHandles) {
ResourceMark rm(THREAD);
tty->print_cr("resolve_invokedynamic #%d %s %s in %s",
ConstantPool::decode_invokedynamic_index(index),
method_name->as_C_string(), method_signature->as_C_string(),
current_klass->name()->as_C_string());
tty->print(" BSM info: "); bootstrap_specifier->print();
}
resolve_dynamic_call(result, pool_index, bootstrap_specifier, method_name,
method_signature, current_klass, THREAD);
if (HAS_PENDING_EXCEPTION && PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) {
int encoded_index = ResolutionErrorTable::encode_cpcache_index(index);
bool recorded_res_status = cpce->save_and_throw_indy_exc(pool, pool_index,
encoded_index,
pool()->tag_at(pool_index),
CHECK);
if (!recorded_res_status) {
// Another thread got here just before we did. So, either use the method
// that it resolved or throw the LinkageError exception that it threw.
if (!cpce->is_f1_null()) {
methodHandle method( THREAD, cpce->f1_as_method());
Handle appendix( THREAD, cpce->appendix_if_resolved(pool));
Handle method_type(THREAD, cpce->method_type_if_resolved(pool));
result.set_handle(method, appendix, method_type, THREAD);
Exceptions::wrap_dynamic_exception(CHECK);
} else {
assert(cpce->indy_resolution_failed(), "Resolution failure flag not set");
ConstantPool::throw_resolution_error(pool, encoded_index, CHECK);
}
return;
}
assert(cpce->indy_resolution_failed(), "Resolution failure flag wasn't set");
}
}
| 0
|
513,272
|
end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
bool end_of_records)
{
TABLE *const table= join_tab->table;
ORDER *group;
int error;
DBUG_ENTER("end_update");
if (end_of_records)
DBUG_RETURN(NESTED_LOOP_OK);
join->found_records++;
copy_fields(join_tab->tmp_table_param); // Groups are copied twice.
/* Make a key of group index */
for (group=table->group ; group ; group=group->next)
{
Item *item= *group->item;
if (group->fast_field_copier_setup != group->field)
{
DBUG_PRINT("info", ("new setup %p -> %p",
group->fast_field_copier_setup,
group->field));
group->fast_field_copier_setup= group->field;
group->fast_field_copier_func=
item->setup_fast_field_copier(group->field);
}
item->save_org_in_field(group->field, group->fast_field_copier_func);
/* Store in the used key if the field was 0 */
if (item->maybe_null)
group->buff[-1]= (char) group->field->is_null();
}
if (!table->file->ha_index_read_map(table->record[1],
join_tab->tmp_table_param->group_buff,
HA_WHOLE_KEY,
HA_READ_KEY_EXACT))
{ /* Update old record */
restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table);
if ((error= table->file->ha_update_tmp_row(table->record[1],
table->record[0])))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
}
goto end;
}
init_tmptable_sum_functions(join->sum_funcs);
if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd))
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
if ((error= table->file->ha_write_tmp_row(table->record[0])))
{
if (create_internal_tmp_table_from_heap(join->thd, table,
join_tab->tmp_table_param->start_recinfo,
&join_tab->tmp_table_param->recinfo,
error, 0, NULL))
DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error
/* Change method to update rows */
if ((error= table->file->ha_index_init(0, 0)))
{
table->file->print_error(error, MYF(0));
DBUG_RETURN(NESTED_LOOP_ERROR);
}
join_tab->aggr->set_write_func(end_unique_update);
}
join_tab->send_records++;
end:
if (join->thd->check_killed())
{
join->thd->send_kill_message();
DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */
}
DBUG_RETURN(NESTED_LOOP_OK);
}
| 0
|
101,672
|
void WebProcessProxy::didPerformServerRedirect(uint64_t pageID, const String& sourceURLString, const String& destinationURLString, uint64_t frameID)
{
WebPageProxy* page = webPage(pageID);
if (!page)
return;
if (sourceURLString.isEmpty() || destinationURLString.isEmpty())
return;
WebFrameProxy* frame = webFrame(frameID);
MESSAGE_CHECK(frame);
MESSAGE_CHECK(frame->page() == page);
MESSAGE_CHECK_URL(sourceURLString);
MESSAGE_CHECK_URL(destinationURLString);
m_context->historyClient().didPerformServerRedirect(m_context.get(), page, sourceURLString, destinationURLString, frame);
}
| 0
|
450,815
|
glob_in_dir (const char *pattern, const char *directory, int flags,
int (*errfunc) (const char *, int),
glob_t *pglob, size_t alloca_used)
{
size_t dirlen = strlen (directory);
void *stream = NULL;
# define GLOBNAMES_MEMBERS(nnames) \
struct globnames *next; size_t count; char *name[nnames];
struct globnames { GLOBNAMES_MEMBERS (FLEXIBLE_ARRAY_MEMBER) };
struct { GLOBNAMES_MEMBERS (64) } init_names_buf;
struct globnames *init_names = (struct globnames *) &init_names_buf;
struct globnames *names = init_names;
struct globnames *names_alloca = init_names;
size_t nfound = 0;
size_t cur = 0;
int meta;
int save;
int result;
alloca_used += sizeof init_names_buf;
init_names->next = NULL;
init_names->count = ((sizeof init_names_buf
- offsetof (struct globnames, name))
/ sizeof init_names->name[0]);
meta = __glob_pattern_type (pattern, !(flags & GLOB_NOESCAPE));
if (meta == GLOBPAT_NONE && (flags & (GLOB_NOCHECK|GLOB_NOMAGIC)))
{
/* We need not do any tests. The PATTERN contains no meta
characters and we must not return an error therefore the
result will always contain exactly one name. */
flags |= GLOB_NOCHECK;
}
else if (meta == GLOBPAT_NONE)
{
union
{
struct stat st;
struct_stat64 st64;
} ust;
size_t patlen = strlen (pattern);
size_t fullsize;
bool alloca_fullname
= (! size_add_wrapv (dirlen + 1, patlen + 1, &fullsize)
&& glob_use_alloca (alloca_used, fullsize));
char *fullname;
if (alloca_fullname)
fullname = alloca_account (fullsize, alloca_used);
else
{
fullname = malloc (fullsize);
if (fullname == NULL)
return GLOB_NOSPACE;
}
mempcpy (mempcpy (mempcpy (fullname, directory, dirlen),
"/", 1),
pattern, patlen + 1);
if (((__builtin_expect (flags & GLOB_ALTDIRFUNC, 0)
? (*pglob->gl_lstat) (fullname, &ust.st)
: __lstat64 (fullname, &ust.st64))
== 0)
|| errno == EOVERFLOW)
/* We found this file to be existing. Now tell the rest
of the function to copy this name into the result. */
flags |= GLOB_NOCHECK;
if (__glibc_unlikely (!alloca_fullname))
free (fullname);
}
else
{
stream = (__builtin_expect (flags & GLOB_ALTDIRFUNC, 0)
? (*pglob->gl_opendir) (directory)
: opendir (directory));
if (stream == NULL)
{
if (errno != ENOTDIR
&& ((errfunc != NULL && (*errfunc) (directory, errno))
|| (flags & GLOB_ERR)))
return GLOB_ABORTED;
}
else
{
int fnm_flags = ((!(flags & GLOB_PERIOD) ? FNM_PERIOD : 0)
| ((flags & GLOB_NOESCAPE) ? FNM_NOESCAPE : 0));
flags |= GLOB_MAGCHAR;
while (1)
{
struct readdir_result d;
{
if (__builtin_expect (flags & GLOB_ALTDIRFUNC, 0))
d = convert_dirent (GL_READDIR (pglob, stream));
else
{
#ifdef COMPILE_GLOB64
d = convert_dirent (__readdir (stream));
#else
d = convert_dirent64 (__readdir64 (stream));
#endif
}
}
if (d.name == NULL)
break;
/* If we shall match only directories use the information
provided by the dirent call if possible. */
if (flags & GLOB_ONLYDIR)
switch (readdir_result_type (d))
{
case DT_DIR: case DT_LNK: case DT_UNKNOWN: break;
default: continue;
}
if (fnmatch (pattern, d.name, fnm_flags) == 0)
{
if (cur == names->count)
{
struct globnames *newnames;
size_t count = names->count * 2;
size_t nameoff = offsetof (struct globnames, name);
size_t size = FLEXSIZEOF (struct globnames, name,
count * sizeof (char *));
if ((SIZE_MAX - nameoff) / 2 / sizeof (char *)
< names->count)
goto memory_error;
if (glob_use_alloca (alloca_used, size))
newnames = names_alloca
= alloca_account (size, alloca_used);
else if ((newnames = malloc (size))
== NULL)
goto memory_error;
newnames->count = count;
newnames->next = names;
names = newnames;
cur = 0;
}
names->name[cur] = strdup (d.name);
if (names->name[cur] == NULL)
goto memory_error;
++cur;
++nfound;
if (SIZE_MAX - pglob->gl_offs <= nfound)
goto memory_error;
}
}
}
}
if (nfound == 0 && (flags & GLOB_NOCHECK))
{
size_t len = strlen (pattern);
nfound = 1;
names->name[cur] = malloc (len + 1);
if (names->name[cur] == NULL)
goto memory_error;
*((char *) mempcpy (names->name[cur++], pattern, len)) = '\0';
}
result = GLOB_NOMATCH;
if (nfound != 0)
{
char **new_gl_pathv;
result = 0;
if (SIZE_MAX / sizeof (char *) - pglob->gl_pathc
< pglob->gl_offs + nfound + 1)
goto memory_error;
new_gl_pathv
= realloc (pglob->gl_pathv,
(pglob->gl_pathc + pglob->gl_offs + nfound + 1)
* sizeof (char *));
if (new_gl_pathv == NULL)
{
memory_error:
while (1)
{
struct globnames *old = names;
for (size_t i = 0; i < cur; ++i)
free (names->name[i]);
names = names->next;
/* NB: we will not leak memory here if we exit without
freeing the current block assigned to OLD. At least
the very first block is always allocated on the stack
and this is the block assigned to OLD here. */
if (names == NULL)
{
assert (old == init_names);
break;
}
cur = names->count;
if (old == names_alloca)
names_alloca = names;
else
free (old);
}
result = GLOB_NOSPACE;
}
else
{
while (1)
{
struct globnames *old = names;
for (size_t i = 0; i < cur; ++i)
new_gl_pathv[pglob->gl_offs + pglob->gl_pathc++]
= names->name[i];
names = names->next;
/* NB: we will not leak memory here if we exit without
freeing the current block assigned to OLD. At least
the very first block is always allocated on the stack
and this is the block assigned to OLD here. */
if (names == NULL)
{
assert (old == init_names);
break;
}
cur = names->count;
if (old == names_alloca)
names_alloca = names;
else
free (old);
}
pglob->gl_pathv = new_gl_pathv;
pglob->gl_pathv[pglob->gl_offs + pglob->gl_pathc] = NULL;
pglob->gl_flags = flags;
}
}
if (stream != NULL)
{
save = errno;
if (__glibc_unlikely (flags & GLOB_ALTDIRFUNC))
(*pglob->gl_closedir) (stream);
else
closedir (stream);
__set_errno (save);
}
return result;
}
| 0
|
225,723
|
GF_Err segr_box_write(GF_Box *s, GF_BitStream *bs)
{
u32 i, k;
GF_Err e;
FDSessionGroupBox *ptr = (FDSessionGroupBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u16(bs, ptr->num_session_groups);
for (i=0; i<ptr->num_session_groups; i++) {
gf_bs_write_u8(bs, ptr->session_groups[i].nb_groups);
for (k=0; k<ptr->session_groups[i].nb_groups; k++) {
gf_bs_write_u32(bs, ptr->session_groups[i].group_ids[k]);
}
gf_bs_write_u16(bs, ptr->session_groups[i].nb_channels);
for (k=0; k<ptr->session_groups[i].nb_channels; k++) {
gf_bs_write_u32(bs, ptr->session_groups[i].channels[k]);
}
}
return GF_OK;
| 0
|
297,216
|
static int exif_scan_thumbnail(image_info_type *ImageInfo TSRMLS_DC)
{
uchar c, *data = (uchar*)ImageInfo->Thumbnail.data;
int n, marker;
size_t length=2, pos=0;
jpeg_sof_info sof_info;
if (!data) {
return FALSE; /* nothing to do here */
}
if (memcmp(data, "\xFF\xD8\xFF", 3)) {
if (!ImageInfo->Thumbnail.width && !ImageInfo->Thumbnail.height) {
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Thumbnail is not a JPEG image");
}
return FALSE;
}
for (;;) {
pos += length;
if (pos>=ImageInfo->Thumbnail.size)
return FALSE;
c = data[pos++];
if (pos>=ImageInfo->Thumbnail.size)
return FALSE;
if (c != 0xFF) {
return FALSE;
}
n = 8;
while ((c = data[pos++]) == 0xFF && n--) {
if (pos+3>=ImageInfo->Thumbnail.size)
return FALSE;
/* +3 = pos++ of next check when reaching marker + 2 bytes for length */
}
if (c == 0xFF)
return FALSE;
marker = c;
length = php_jpg_get16(data+pos);
if (pos+length>=ImageInfo->Thumbnail.size) {
return FALSE;
}
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: process section(x%02X=%s) @ x%04X + x%04X", marker, exif_get_markername(marker), pos, length);
#endif
switch (marker) {
case M_SOF0:
case M_SOF1:
case M_SOF2:
case M_SOF3:
case M_SOF5:
case M_SOF6:
case M_SOF7:
case M_SOF9:
case M_SOF10:
case M_SOF11:
case M_SOF13:
case M_SOF14:
case M_SOF15:
/* handle SOFn block */
exif_process_SOFn(data+pos, marker, &sof_info);
ImageInfo->Thumbnail.height = sof_info.height;
ImageInfo->Thumbnail.width = sof_info.width;
#ifdef EXIF_DEBUG
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail: size: %d * %d", sof_info.width, sof_info.height);
#endif
return TRUE;
case M_SOS:
case M_EOI:
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Could not compute size of thumbnail");
return FALSE;
break;
default:
/* just skip */
break;
}
}
exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_WARNING, "Could not compute size of thumbnail");
return FALSE;
}
| 0
|
317,304
|
static noinline int audit_inode_permission(struct inode *inode,
u32 perms, u32 audited, u32 denied,
int result)
{
struct common_audit_data ad;
struct inode_security_struct *isec = selinux_inode(inode);
ad.type = LSM_AUDIT_DATA_INODE;
ad.u.inode = inode;
return slow_avc_audit(&selinux_state,
current_sid(), isec->sid, isec->sclass, perms,
audited, denied, result, &ad);
}
| 0
|
226,143
|
void mhac_box_del(GF_Box *s)
{
GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s;
if (ptr->mha_config) gf_free(ptr->mha_config);
gf_free(s);
| 0
|
198,566
|
MOBI_RET mobi_decode_infl(unsigned char *decoded, int *decoded_size, const unsigned char *rule) {
int pos = *decoded_size;
char mod = 'i';
char dir = '<';
char olddir;
unsigned char c;
while ((c = *rule++)) {
if (c <= 4) {
mod = (c <= 2) ? 'i' : 'd'; /* insert, delete */
olddir = dir;
dir = (c & 2) ? '<' : '>'; /* left, right */
if (olddir != dir && olddir) {
pos = (c & 2) ? *decoded_size : 0;
}
}
else if (c > 10 && c < 20) {
if (dir == '>') {
pos = *decoded_size;
}
pos -= c - 10;
dir = 0;
if (pos < 0 || pos > *decoded_size) {
debug_print("Position setting failed (%s)\n", decoded);
return MOBI_DATA_CORRUPT;
}
}
else {
if (mod == 'i') {
const unsigned char *s = decoded + pos;
unsigned char *d = decoded + pos + 1;
const int l = *decoded_size - pos;
if (l < 0 || d + l > decoded + INDX_INFLBUF_SIZEMAX) {
debug_print("Out of buffer in %s at pos: %i\n", decoded, pos);
return MOBI_DATA_CORRUPT;
}
memmove(d, s, (size_t) l);
decoded[pos] = c;
(*decoded_size)++;
if (dir == '>') { pos++; }
} else {
if (dir == '<') { pos--; }
const unsigned char *s = decoded + pos + 1;
unsigned char *d = decoded + pos;
const int l = *decoded_size - pos;
if (l < 0 || d + l > decoded + INDX_INFLBUF_SIZEMAX) {
debug_print("Out of buffer in %s at pos: %i\n", decoded, pos);
return MOBI_DATA_CORRUPT;
}
if (decoded[pos] != c) {
debug_print("Character mismatch in %s at pos: %i (%c != %c)\n", decoded, pos, decoded[pos], c);
return MOBI_DATA_CORRUPT;
}
memmove(d, s, (size_t) l);
(*decoded_size)--;
}
}
}
return MOBI_SUCCESS;
}
| 1
|
385,920
|
SYSCALL_DEFINE2(truncate64, const char __user *, path, loff_t, length)
{
return do_sys_truncate(path, length);
}
| 0
|
231,636
|
bool isDraining() {
return drainTimeout_.isScheduled();
}
| 0
|
486,822
|
static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
CadenceGEMState *s = qemu_get_nic_opaque(nc);
unsigned rxbufsize, bytes_to_copy;
unsigned rxbuf_offset;
uint8_t *rxbuf_ptr;
bool first_desc = true;
int maf;
int q = 0;
/* Is this destination MAC address "for us" ? */
maf = gem_mac_address_filter(s, buf);
if (maf == GEM_RX_REJECT) {
return size; /* no, drop siliently b/c it's not an error */
}
/* Discard packets with receive length error enabled ? */
if (s->regs[GEM_NWCFG] & GEM_NWCFG_LERR_DISC) {
unsigned type_len;
/* Fish the ethertype / length field out of the RX packet */
type_len = buf[12] << 8 | buf[13];
/* It is a length field, not an ethertype */
if (type_len < 0x600) {
if (size < type_len) {
/* discard */
return -1;
}
}
}
/*
* Determine configured receive buffer offset (probably 0)
*/
rxbuf_offset = (s->regs[GEM_NWCFG] & GEM_NWCFG_BUFF_OFST_M) >>
GEM_NWCFG_BUFF_OFST_S;
/* The configure size of each receive buffer. Determines how many
* buffers needed to hold this packet.
*/
rxbufsize = ((s->regs[GEM_DMACFG] & GEM_DMACFG_RBUFSZ_M) >>
GEM_DMACFG_RBUFSZ_S) * GEM_DMACFG_RBUFSZ_MUL;
bytes_to_copy = size;
/* Hardware allows a zero value here but warns against it. To avoid QEMU
* indefinite loops we enforce a minimum value here
*/
if (rxbufsize < GEM_DMACFG_RBUFSZ_MUL) {
rxbufsize = GEM_DMACFG_RBUFSZ_MUL;
}
/* Pad to minimum length. Assume FCS field is stripped, logic
* below will increment it to the real minimum of 64 when
* not FCS stripping
*/
if (size < 60) {
size = 60;
}
/* Strip of FCS field ? (usually yes) */
if (s->regs[GEM_NWCFG] & GEM_NWCFG_STRIP_FCS) {
rxbuf_ptr = (void *)buf;
} else {
unsigned crc_val;
if (size > MAX_FRAME_SIZE - sizeof(crc_val)) {
size = MAX_FRAME_SIZE - sizeof(crc_val);
}
bytes_to_copy = size;
/* The application wants the FCS field, which QEMU does not provide.
* We must try and calculate one.
*/
memcpy(s->rx_packet, buf, size);
memset(s->rx_packet + size, 0, MAX_FRAME_SIZE - size);
rxbuf_ptr = s->rx_packet;
crc_val = cpu_to_le32(crc32(0, s->rx_packet, MAX(size, 60)));
memcpy(s->rx_packet + size, &crc_val, sizeof(crc_val));
bytes_to_copy += 4;
size += 4;
}
DB_PRINT("config bufsize: %u packet size: %zd\n", rxbufsize, size);
/* Find which queue we are targeting */
q = get_queue_from_screen(s, rxbuf_ptr, rxbufsize);
if (size > gem_get_max_buf_len(s, false)) {
qemu_log_mask(LOG_GUEST_ERROR, "rx frame too long\n");
gem_set_isr(s, q, GEM_INT_AMBA_ERR);
return -1;
}
while (bytes_to_copy) {
hwaddr desc_addr;
/* Do nothing if receive is not enabled. */
if (!gem_can_receive(nc)) {
return -1;
}
DB_PRINT("copy %" PRIu32 " bytes to 0x%" PRIx64 "\n",
MIN(bytes_to_copy, rxbufsize),
rx_desc_get_buffer(s, s->rx_desc[q]));
/* Copy packet data to emulated DMA buffer */
address_space_write(&s->dma_as, rx_desc_get_buffer(s, s->rx_desc[q]) +
rxbuf_offset,
MEMTXATTRS_UNSPECIFIED, rxbuf_ptr,
MIN(bytes_to_copy, rxbufsize));
rxbuf_ptr += MIN(bytes_to_copy, rxbufsize);
bytes_to_copy -= MIN(bytes_to_copy, rxbufsize);
rx_desc_clear_control(s->rx_desc[q]);
/* Update the descriptor. */
if (first_desc) {
rx_desc_set_sof(s->rx_desc[q]);
first_desc = false;
}
if (bytes_to_copy == 0) {
rx_desc_set_eof(s->rx_desc[q]);
rx_desc_set_length(s->rx_desc[q], size);
}
rx_desc_set_ownership(s->rx_desc[q]);
switch (maf) {
case GEM_RX_PROMISCUOUS_ACCEPT:
break;
case GEM_RX_BROADCAST_ACCEPT:
rx_desc_set_broadcast(s->rx_desc[q]);
break;
case GEM_RX_UNICAST_HASH_ACCEPT:
rx_desc_set_unicast_hash(s->rx_desc[q]);
break;
case GEM_RX_MULTICAST_HASH_ACCEPT:
rx_desc_set_multicast_hash(s->rx_desc[q]);
break;
case GEM_RX_REJECT:
abort();
default: /* SAR */
rx_desc_set_sar(s->rx_desc[q], maf);
}
/* Descriptor write-back. */
desc_addr = gem_get_rx_desc_addr(s, q);
address_space_write(&s->dma_as, desc_addr, MEMTXATTRS_UNSPECIFIED,
s->rx_desc[q],
sizeof(uint32_t) * gem_get_desc_len(s, true));
/* Next descriptor */
if (rx_desc_get_wrap(s->rx_desc[q])) {
DB_PRINT("wrapping RX descriptor list\n");
s->rx_desc_addr[q] = gem_get_rx_queue_base_addr(s, q);
} else {
DB_PRINT("incrementing RX descriptor list\n");
s->rx_desc_addr[q] += 4 * gem_get_desc_len(s, true);
}
gem_get_rx_desc(s, q);
}
/* Count it */
gem_receive_updatestats(s, buf, size);
s->regs[GEM_RXSTATUS] |= GEM_RXSTATUS_FRMRCVD;
gem_set_isr(s, q, GEM_INT_RXCMPL);
/* Handle interrupt consequences */
gem_update_int_status(s);
return size;
}
| 0
|
244,114
|
GF_Box *vmhd_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_VideoMediaHeaderBox, GF_ISOM_BOX_TYPE_VMHD);
tmp->flags = 1;
return (GF_Box *)tmp;
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.