idx
int64 | func
string | target
int64 |
|---|---|---|
230,296
|
njs_array_length_redefine(njs_vm_t *vm, njs_value_t *value, uint32_t length)
{
njs_object_prop_t *prop;
static const njs_value_t string_length = njs_string("length");
if (njs_slow_path(!njs_is_array(value))) {
njs_internal_error(vm, "njs_array_length_redefine() "
"applied to non-array");
return NJS_ERROR;
}
prop = njs_object_property_add(vm, value, njs_value_arg(&string_length), 1);
if (njs_slow_path(prop == NULL)) {
njs_internal_error(vm, "njs_array_length_redefine() "
"cannot redefine \"length\"");
return NJS_ERROR;
}
prop->enumerable = 0;
prop->configurable = 0;
njs_value_number_set(&prop->value, length);
return NJS_OK;
}
| 0
|
314,764
|
_cdf_tole2(uint16_t sv)
{
uint16_t rv;
uint8_t *s = (uint8_t *)(void *)&sv;
uint8_t *d = (uint8_t *)(void *)&rv;
d[0] = s[1];
d[1] = s[0];
return rv;
}
| 0
|
344,797
|
atoi_err(const char *nptr, int *val)
{
const char *errstr = NULL;
long long num;
if (nptr == NULL || *nptr == '\0')
return "missing";
num = strtonum(nptr, 0, INT_MAX, &errstr);
if (errstr == NULL)
*val = (int)num;
return errstr;
}
| 0
|
356,702
|
Napi::Value Statement::Each(const Napi::CallbackInfo& info) {
Napi::Env env = info.Env();
Statement* stmt = this;
int last = info.Length();
Napi::Function completed;
if (last >= 2 && info[last - 1].IsFunction() && info[last - 2].IsFunction()) {
completed = info[--last].As<Napi::Function>();
}
EachBaton* baton = stmt->Bind<EachBaton>(info, 0, last);
if (baton == NULL) {
Napi::Error::New(env, "Data type is not supported").ThrowAsJavaScriptException();
return env.Null();
}
else {
baton->completed.Reset(completed, 1);
stmt->Schedule(Work_BeginEach, baton);
return info.This();
}
}
| 0
|
246,209
|
Status RaggedCountSparseOutputShapeFn(InferenceContext *c) {
int32_t rank = c->Rank(c->input(1));
if (rank != c->kUnknownRank) {
++rank; // Add the ragged dimension
}
auto nvals = c->UnknownDim();
c->set_output(0, c->Matrix(nvals, rank)); // out.indices
c->set_output(1, c->Vector(nvals)); // out.values
c->set_output(2, c->Vector(rank)); // out.dense_shape
return Status::OK();
}
| 0
|
328,935
|
R_API ut8 *r_bin_java_get_attr_buf(RBinJavaObj *bin, ut64 sz, const ut64 offset, const ut8 *buf, const ut64 len) {
// XXX this pending is wrong and too expensive
int pending = len - offset;
const ut8 *a_buf = offset + buf;
ut8 *attr_buf = (ut8 *) calloc (pending + 1, 1);
if (!attr_buf) {
eprintf ("Unable to allocate enough bytes (0x%04"PFMT64x
") to read in the attribute.\n", sz);
return attr_buf;
}
memcpy (attr_buf, a_buf, pending); // sz+1);
return attr_buf;
}
| 0
|
512,485
|
bool Item_func_truth::val_bool()
{
bool val= args[0]->val_bool();
if (args[0]->null_value)
{
/*
NULL val IS {TRUE, FALSE} --> FALSE
NULL val IS NOT {TRUE, FALSE} --> TRUE
*/
return (! affirmative);
}
if (affirmative)
{
/* {TRUE, FALSE} val IS {TRUE, FALSE} value */
return (val == value);
}
/* {TRUE, FALSE} val IS NOT {TRUE, FALSE} value */
return (val != value);
}
| 0
|
231,729
|
TEST_F(QuicServerTransportTest, ReceiveRstStreamNonExistentAndOtherFrame) {
StreamId clientUnidirectional = 0x02;
// Deliver reset on peer unidirectional stream to close the stream.
RstStreamFrame rstFrame(
clientUnidirectional, GenericApplicationErrorCode::UNKNOWN, 0);
ShortHeader header(
ProtectionType::KeyPhaseZero,
*server->getConn().serverConnectionId,
clientNextAppDataPacketNum++);
RegularQuicPacketBuilder builder(
server->getConn().udpSendPacketLen,
std::move(header),
0 /* largestAcked */);
builder.encodePacketHeader();
writeFrame(rstFrame, builder);
auto packet = packetToBuf(std::move(builder).buildPacket());
deliverData(std::move(packet));
auto streamId =
server->createBidirectionalStream(false /* replaySafe */).value();
ShortHeader header2(
ProtectionType::KeyPhaseZero,
*server->getConn().serverConnectionId,
clientNextAppDataPacketNum++);
RegularQuicPacketBuilder builder2(
server->getConn().udpSendPacketLen,
std::move(header2),
0 /* largestAcked */);
builder2.encodePacketHeader();
writeFrame(rstFrame, builder2);
auto data = folly::IOBuf::copyBuffer("hello");
writeStreamFrameHeader(
builder2,
streamId,
0,
data->computeChainDataLength(),
data->computeChainDataLength(),
false,
folly::none /* skipLenHint */);
writeStreamFrameData(builder2, data->clone(), data->computeChainDataLength());
auto packetObject = std::move(builder2).buildPacket();
auto packet2 = packetToBuf(std::move(packetObject));
deliverData(std::move(packet2));
auto readData = server->read(streamId, 0);
ASSERT_TRUE(readData.hasValue());
ASSERT_NE(readData.value().first, nullptr);
EXPECT_TRUE(folly::IOBufEqualTo()(*readData.value().first, *data));
}
| 0
|
474,081
|
st_strncasecmp(const char *s1, const char *s2, size_t n)
{
unsigned int c1, c2;
while (n--) {
c1 = (unsigned char)*s1++;
c2 = (unsigned char)*s2++;
if (c1 == '\0' || c2 == '\0') {
if (c1 != '\0') return 1;
if (c2 != '\0') return -1;
return 0;
}
if ((unsigned int)(c1 - 'A') <= ('Z' - 'A')) c1 += 'a' - 'A';
if ((unsigned int)(c2 - 'A') <= ('Z' - 'A')) c2 += 'a' - 'A';
if (c1 != c2) {
if (c1 > c2)
return 1;
else
return -1;
}
}
return 0;
}
| 0
|
405,710
|
static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
{
struct net_local *lp = bus->priv;
u32 ctrl_reg;
u32 rc;
if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT;
/* Write the PHY address, register number and set the OP bit in the
* MDIO Address register. Set the Status bit in the MDIO Control
* register to start a MDIO read transaction.
*/
ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
xemaclite_writel(XEL_MDIOADDR_OP_MASK |
((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
lp->base_addr + XEL_MDIOADDR_OFFSET);
xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET);
if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT;
rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
dev_dbg(&lp->ndev->dev,
"%s(phy_id=%i, reg=%x) == %x\n", __func__,
phy_id, reg, rc);
return rc;
}
| 0
|
196,691
|
static GF_Err isoffin_process(GF_Filter *filter)
{
ISOMReader *read = gf_filter_get_udta(filter);
u32 i, count = gf_list_count(read->channels);
Bool is_active = GF_FALSE;
Bool in_is_eos = GF_FALSE;
Bool check_forced_end = GF_FALSE;
Bool has_new_data = GF_FALSE;
u64 min_offset_plus_one = 0;
u32 nb_forced_end=0;
if (read->in_error)
return read->in_error;
if (read->pid) {
Bool fetch_input = GF_TRUE;
//we failed at loading the init segment during a dash switch, retry
if (!read->is_partial_download && !read->mem_load_mode && (read->moov_not_loaded==2) ) {
isoffin_configure_pid(filter, read->pid, GF_FALSE);
if (read->moov_not_loaded) return GF_OK;
}
if (read->mem_load_mode==2) {
if (!read->force_fetch && read->mem_blob.size > read->mstore_size) {
fetch_input = GF_FALSE;
}
read->force_fetch = GF_FALSE;
}
while (fetch_input) {
GF_FilterPacket *pck = gf_filter_pid_get_packet(read->pid);
if (!pck) {
//we issued a seek, wait for the first packet to be received before fetching channels
//otherwise we could end up reading from the wrong cache
if (read->wait_for_source) {
//something went wrong during the seek request
if (gf_filter_pid_is_eos(read->pid))
return GF_EOS;
return GF_OK;
}
break;
}
read->wait_for_source = GF_FALSE;
if (read->mem_load_mode) {
u32 data_size;
const u8 *pck_data = gf_filter_pck_get_data(pck, &data_size);
isoffin_push_buffer(filter, read, pck_data, data_size);
}
//we just had a switch but init seg is not completely done: input packet is only a part of the init, drop it
else if (read->moov_not_loaded==2) {
gf_filter_pid_drop_packet(read->pid);
return GF_OK;
}
gf_filter_pid_drop_packet(read->pid);
has_new_data = GF_TRUE;
if (read->in_error)
return read->in_error;
}
if (gf_filter_pid_is_eos(read->pid)) {
read->input_loaded = GF_TRUE;
in_is_eos = GF_TRUE;
}
if (read->input_is_stop) {
read->input_loaded = GF_TRUE;
in_is_eos = GF_TRUE;
read->input_is_stop = GF_FALSE;
}
if (!read->frag_type && read->input_loaded) {
in_is_eos = GF_TRUE;
}
//segment is invalid, wait for eos on input an send eos on all channels
if (read->invalid_segment) {
if (!in_is_eos) return GF_OK;
read->invalid_segment = GF_FALSE;
for (i=0; i<count; i++) {
ISOMChannel *ch = gf_list_get(read->channels, i);
if (!ch->playing) {
continue;
}
if (!ch->eos_sent) {
ch->eos_sent = GF_TRUE;
gf_filter_pid_set_eos(ch->pid);
}
}
read->eos_signaled = GF_TRUE;
return GF_EOS;
}
} else if (read->extern_mov) {
in_is_eos = GF_TRUE;
read->input_loaded = GF_TRUE;
}
if (read->moov_not_loaded==1) {
if (read->mem_load_mode)
return GF_OK;
read->moov_not_loaded = GF_FALSE;
return isoffin_setup(filter, read);
}
if (read->refresh_fragmented) {
const GF_PropertyValue *prop;
if (in_is_eos) {
read->refresh_fragmented = GF_FALSE;
} else {
prop = gf_filter_pid_get_property(read->pid, GF_PROP_PID_FILE_CACHED);
if (prop && prop->value.boolean)
read->refresh_fragmented = GF_FALSE;
}
if (has_new_data) {
u64 bytesMissing=0;
GF_Err e;
const char *new_url = NULL;
prop = gf_filter_pid_get_property(read->pid, GF_PROP_PID_FILEPATH);
if (prop) new_url = prop->value.string;
e = gf_isom_refresh_fragmented(read->mov, &bytesMissing, new_url);
if (e && (e!= GF_ISOM_INCOMPLETE_FILE)) {
GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[IsoMedia] Failed to refresh current segment: %s\n", gf_error_to_string(e) ));
read->refresh_fragmented = GF_FALSE;
} else {
GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[IsoMedia] Refreshing current segment at UTC "LLU" - "LLU" bytes still missing - input is EOS %d\n", gf_net_get_utc(), bytesMissing, in_is_eos));
}
if (!read->refresh_fragmented && (e==GF_ISOM_INCOMPLETE_FILE)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_DASH, ("[IsoMedia] Incomplete Segment received - "LLU" bytes missing but EOF found\n", bytesMissing ));
}
#ifndef GPAC_DISABLE_LOG
if (gf_log_tool_level_on(GF_LOG_DASH, GF_LOG_DEBUG)) {
for (i=0; i<count; i++) {
ISOMChannel *ch = gf_list_get(read->channels, i);
GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[IsoMedia] refresh track %d fragment - cur sample %d - new sample count %d\n", ch->track, ch->sample_num, gf_isom_get_sample_count(ch->owner->mov, ch->track) ));
}
}
#endif
isor_check_producer_ref_time(read);
if (!read->frag_type)
read->refresh_fragmented = GF_FALSE;
}
}
for (i=0; i<count; i++) {
u8 *data;
u32 nb_pck=50;
ISOMChannel *ch;
ch = gf_list_get(read->channels, i);
if (!ch->playing) {
nb_forced_end++;
continue;
}
//eos not sent on this channel, we are active
if (!ch->eos_sent)
is_active = GF_TRUE;
while (nb_pck) {
ch->sample_data_offset = 0;
if (!read->full_segment_flush && gf_filter_pid_would_block(ch->pid) )
break;
if (ch->item_id) {
isor_reader_get_sample_from_item(ch);
} else {
isor_reader_get_sample(ch);
}
if (read->stsd && (ch->last_sample_desc_index != read->stsd) && ch->sample) {
isor_reader_release_sample(ch);
continue;
}
if (ch->sample) {
u32 sample_dur;
u8 dep_flags;
u8 *subs_buf;
u32 subs_buf_size;
GF_FilterPacket *pck;
if (ch->needs_pid_reconfig) {
isor_update_channel_config(ch);
ch->needs_pid_reconfig = GF_FALSE;
}
//we have at least two samples, update GF_PROP_PID_HAS_SYNC if needed
if (ch->check_has_rap && (gf_isom_get_sample_count(ch->owner->mov, ch->track)>1) && (gf_isom_has_sync_points(ch->owner->mov, ch->track)==1)) {
ch->check_has_rap = GF_FALSE;
ch->has_rap = GF_TRUE;
gf_filter_pid_set_property(ch->pid, GF_PROP_PID_HAS_SYNC, &PROP_BOOL(ch->has_rap) );
}
//strip param sets from payload, trigger reconfig if needed
isor_reader_check_config(ch);
if (read->nodata) {
pck = gf_filter_pck_new_shared(ch->pid, NULL, ch->sample->dataLength, NULL);
if (!pck) return GF_OUT_OF_MEM;
} else {
pck = gf_filter_pck_new_alloc(ch->pid, ch->sample->dataLength, &data);
if (!pck) return GF_OUT_OF_MEM;
memcpy(data, ch->sample->data, ch->sample->dataLength);
}
gf_filter_pck_set_dts(pck, ch->dts);
gf_filter_pck_set_cts(pck, ch->cts);
if (ch->sample->IsRAP==-1) {
gf_filter_pck_set_sap(pck, GF_FILTER_SAP_1);
ch->redundant = 1;
} else {
gf_filter_pck_set_sap(pck, (GF_FilterSAPType) ch->sample->IsRAP);
}
if (ch->sap_3)
gf_filter_pck_set_sap(pck, GF_FILTER_SAP_3);
else if (ch->sap_4_type) {
gf_filter_pck_set_sap(pck, (ch->sap_4_type==GF_ISOM_SAMPLE_PREROLL) ? GF_FILTER_SAP_4_PROL : GF_FILTER_SAP_4);
gf_filter_pck_set_roll_info(pck, ch->roll);
}
sample_dur = ch->au_duration;
if (ch->sample->nb_pack)
sample_dur *= ch->sample->nb_pack;
gf_filter_pck_set_duration(pck, sample_dur);
gf_filter_pck_set_seek_flag(pck, ch->seek_flag);
//for now we only signal xPS mask for non-sap
if (ch->xps_mask && !gf_filter_pck_get_sap(pck) ) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_XPS_MASK, &PROP_UINT(ch->xps_mask) );
}
dep_flags = ch->isLeading;
dep_flags <<= 2;
dep_flags |= ch->dependsOn;
dep_flags <<= 2;
dep_flags |= ch->dependedOn;
dep_flags <<= 2;
dep_flags |= ch->redundant;
if (dep_flags)
gf_filter_pck_set_dependency_flags(pck, dep_flags);
gf_filter_pck_set_crypt_flags(pck, ch->pck_encrypted ? GF_FILTER_PCK_CRYPT : 0);
gf_filter_pck_set_seq_num(pck, ch->sample_num);
subs_buf = gf_isom_sample_get_subsamples_buffer(read->mov, ch->track, ch->sample_num, &subs_buf_size);
if (subs_buf) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_SUBS, &PROP_DATA_NO_COPY(subs_buf, subs_buf_size) );
}
if (ch->sai_buffer && ch->pck_encrypted) {
assert(ch->sai_buffer_size);
gf_filter_pck_set_property(pck, GF_PROP_PCK_CENC_SAI, &PROP_DATA(ch->sai_buffer, ch->sai_buffer_size) );
}
if (read->sigfrag) {
GF_ISOFragmentBoundaryInfo finfo;
if (gf_isom_sample_is_fragment_start(read->mov, ch->track, ch->sample_num, &finfo) ) {
u64 start=0;
u32 traf_start = finfo.seg_start_plus_one ? 2 : 1;
if (finfo.seg_start_plus_one)
gf_filter_pck_set_property(pck, GF_PROP_PCK_CUE_START, &PROP_BOOL(GF_TRUE));
gf_filter_pck_set_property(pck, GF_PROP_PCK_FRAG_START, &PROP_UINT(traf_start));
start = finfo.frag_start;
if (finfo.seg_start_plus_one) start = finfo.seg_start_plus_one-1;
gf_filter_pck_set_property(pck, GF_PROP_PCK_FRAG_RANGE, &PROP_FRAC64_INT(start, finfo.mdat_end));
if (finfo.moof_template) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_MOOF_TEMPLATE, &PROP_DATA((u8 *)finfo.moof_template, finfo.moof_template_size));
}
if (finfo.sidx_end) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_SIDX_RANGE, &PROP_FRAC64_INT(finfo.sidx_start , finfo.sidx_end));
}
if (read->seg_name_changed) {
const GF_PropertyValue *p = gf_filter_pid_get_property(read->pid, GF_PROP_PID_URL);
read->seg_name_changed = GF_FALSE;
if (p && p->value.string) {
gf_filter_pck_set_property(pck, GF_PROP_PID_URL, &PROP_STRING(p->value.string));
}
}
}
}
if (ch->sender_ntp) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_SENDER_NTP, &PROP_LONGUINT(ch->sender_ntp));
if (ch->ntp_at_server_ntp) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_RECEIVER_NTP, &PROP_LONGUINT(ch->ntp_at_server_ntp));
}
}
ch->eos_sent = GF_FALSE;
//this might not be the true end of stream
if ((ch->streamType==GF_STREAM_AUDIO) && (ch->sample_num == gf_isom_get_sample_count(read->mov, ch->track))) {
gf_filter_pck_set_property(pck, GF_PROP_PCK_END_RANGE, &PROP_BOOL(GF_TRUE));
}
gf_filter_pck_send(pck);
isor_reader_release_sample(ch);
ch->last_valid_sample_data_offset = ch->sample_data_offset;
nb_pck--;
} else if (ch->last_state==GF_EOS) {
if (ch->playing == 2) {
if (in_is_eos) {
ch->playing = GF_FALSE;
} else {
nb_forced_end++;
check_forced_end = GF_TRUE;
}
}
if (in_is_eos && !ch->eos_sent) {
void *tfrf;
const void *gf_isom_get_tfrf(GF_ISOFile *movie, u32 trackNumber);
ch->eos_sent = GF_TRUE;
read->eos_signaled = GF_TRUE;
tfrf = (void *) gf_isom_get_tfrf(read->mov, ch->track);
if (tfrf) {
gf_filter_pid_set_info_str(ch->pid, "smooth_tfrf", &PROP_POINTER(tfrf) );
ch->last_has_tfrf = GF_TRUE;
} else if (ch->last_has_tfrf) {
gf_filter_pid_set_info_str(ch->pid, "smooth_tfrf", NULL);
ch->last_has_tfrf = GF_FALSE;
}
gf_filter_pid_set_eos(ch->pid);
}
break;
} else {
read->force_fetch = GF_TRUE;
break;
}
}
if (!min_offset_plus_one || (min_offset_plus_one - 1 > ch->last_valid_sample_data_offset))
min_offset_plus_one = 1 + ch->last_valid_sample_data_offset;
}
if (read->mem_load_mode && min_offset_plus_one) {
isoffin_purge_mem(read, min_offset_plus_one-1);
}
//we reached end of playback due to play range request, we must send eos - however for safety reason with DASH, we first need to cancel the input
if (read->pid && check_forced_end && (nb_forced_end==count)) {
//abort input
GF_FilterEvent evt;
GF_FEVT_INIT(evt, GF_FEVT_STOP, read->pid);
gf_filter_pid_send_event(read->pid, &evt);
}
if (!is_active) {
return GF_EOS;
}
//if (in_is_eos)
// gf_filter_ask_rt_reschedule(filter, 1);
return GF_OK;
}
| 1
|
300,803
|
static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
int peer)
{
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
memset(addr, 0, sizeof(*addr));
if (peer) {
if ((!tipc_sk_connected(sk)) &&
((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
return -ENOTCONN;
addr->addr.id.ref = tsk_peer_port(tsk);
addr->addr.id.node = tsk_peer_node(tsk);
} else {
addr->addr.id.ref = tsk->portid;
addr->addr.id.node = tipc_own_addr(sock_net(sk));
}
addr->addrtype = TIPC_SOCKET_ADDR;
addr->family = AF_TIPC;
addr->scope = 0;
addr->addr.name.domain = 0;
return sizeof(*addr);
}
| 0
|
312,584
|
qf_find_last_entry_on_line(qfline_T *entry, int *errornr)
{
while (!got_int &&
entry->qf_next != NULL
&& entry->qf_fnum == entry->qf_next->qf_fnum
&& entry->qf_lnum == entry->qf_next->qf_lnum)
{
entry = entry->qf_next;
++*errornr;
}
return entry;
}
| 0
|
195,801
|
*/
static void php_wddx_pop_element(void *user_data, const XML_Char *name)
{
st_entry *ent1, *ent2;
wddx_stack *stack = (wddx_stack *)user_data;
HashTable *target_hash;
zend_class_entry *pce;
zval obj;
/* OBJECTS_FIXME */
if (stack->top == 0) {
return;
}
if (!strcmp((char *)name, EL_STRING) || !strcmp((char *)name, EL_NUMBER) ||
!strcmp((char *)name, EL_BOOLEAN) || !strcmp((char *)name, EL_NULL) ||
!strcmp((char *)name, EL_ARRAY) || !strcmp((char *)name, EL_STRUCT) ||
!strcmp((char *)name, EL_RECORDSET) || !strcmp((char *)name, EL_BINARY) ||
!strcmp((char *)name, EL_DATETIME)) {
wddx_stack_top(stack, (void**)&ent1);
if (Z_TYPE(ent1->data) == IS_UNDEF) {
if (stack->top > 1) {
stack->top--;
} else {
stack->done = 1;
}
efree(ent1);
return;
}
if (!strcmp((char *)name, EL_BINARY)) {
zend_string *new_str = php_base64_decode(
(unsigned char *)Z_STRVAL(ent1->data), Z_STRLEN(ent1->data));
zval_ptr_dtor(&ent1->data);
ZVAL_STR(&ent1->data, new_str);
}
/* Call __wakeup() method on the object. */
if (Z_TYPE(ent1->data) == IS_OBJECT) {
zval fname, retval;
ZVAL_STRING(&fname, "__wakeup");
call_user_function_ex(NULL, &ent1->data, &fname, &retval, 0, 0, 0, NULL);
zval_ptr_dtor(&fname);
zval_ptr_dtor(&retval);
}
if (stack->top > 1) {
stack->top--;
wddx_stack_top(stack, (void**)&ent2);
/* if non-existent field */
if (ent2->type == ST_FIELD && Z_ISUNDEF(ent2->data)) {
zval_ptr_dtor(&ent1->data);
efree(ent1);
return;
}
if (Z_TYPE(ent2->data) == IS_ARRAY || Z_TYPE(ent2->data) == IS_OBJECT) {
target_hash = HASH_OF(&ent2->data);
if (ent1->varname) {
if (!strcmp(ent1->varname, PHP_CLASS_NAME_VAR) &&
Z_TYPE(ent1->data) == IS_STRING && Z_STRLEN(ent1->data) &&
ent2->type == ST_STRUCT && Z_TYPE(ent2->data) == IS_ARRAY) {
zend_bool incomplete_class = 0;
zend_str_tolower(Z_STRVAL(ent1->data), Z_STRLEN(ent1->data));
zend_string_forget_hash_val(Z_STR(ent1->data));
if ((pce = zend_hash_find_ptr(EG(class_table), Z_STR(ent1->data))) == NULL) {
incomplete_class = 1;
pce = PHP_IC_ENTRY;
}
/* Initialize target object */
object_init_ex(&obj, pce);
/* Merge current hashtable with object's default properties */
zend_hash_merge(Z_OBJPROP(obj),
Z_ARRVAL(ent2->data),
zval_add_ref, 0);
if (incomplete_class) {
php_store_class_name(&obj, Z_STRVAL(ent1->data), Z_STRLEN(ent1->data));
}
/* Clean up old array entry */
zval_ptr_dtor(&ent2->data);
/* Set stack entry to point to the newly created object */
ZVAL_COPY_VALUE(&ent2->data, &obj);
/* Clean up class name var entry */
zval_ptr_dtor(&ent1->data);
} else if (Z_TYPE(ent2->data) == IS_OBJECT) {
zend_class_entry *old_scope = EG(scope);
EG(scope) = Z_OBJCE(ent2->data);
add_property_zval(&ent2->data, ent1->varname, &ent1->data);
if Z_REFCOUNTED(ent1->data) Z_DELREF(ent1->data);
EG(scope) = old_scope;
} else {
zend_symtable_str_update(target_hash, ent1->varname, strlen(ent1->varname), &ent1->data);
}
efree(ent1->varname);
} else {
zend_hash_next_index_insert(target_hash, &ent1->data);
}
}
efree(ent1);
} else {
stack->done = 1;
}
} else if (!strcmp((char *)name, EL_VAR) && stack->varname) {
efree(stack->varname);
stack->varname = NULL;
} else if (!strcmp((char *)name, EL_FIELD)) {
st_entry *ent;
wddx_stack_top(stack, (void **)&ent);
efree(ent);
stack->top--;
}
| 1
|
512,405
|
Item *Item_in_optimizer::transform(THD *thd, Item_transformer transformer,
uchar *argument)
{
Item *new_item;
DBUG_ASSERT(fixed);
DBUG_ASSERT(!thd->stmt_arena->is_stmt_prepare());
DBUG_ASSERT(arg_count == 2);
/* Transform the left IN operand. */
new_item= (*args)->transform(thd, transformer, argument);
if (!new_item)
return 0;
/*
THD::change_item_tree() should be called only if the tree was
really transformed, i.e. when a new item has been created.
Otherwise we'll be allocating a lot of unnecessary memory for
change records at each execution.
*/
if ((*args) != new_item)
thd->change_item_tree(args, new_item);
if (invisible_mode())
{
/* MAX/MIN transformed => pass through */
new_item= args[1]->transform(thd, transformer, argument);
if (!new_item)
return 0;
if (args[1] != new_item)
thd->change_item_tree(args + 1, new_item);
}
else
{
/*
Transform the right IN operand which should be an Item_in_subselect or a
subclass of it. The left operand of the IN must be the same as the left
operand of this Item_in_optimizer, so in this case there is no further
transformation, we only make both operands the same.
TODO: is it the way it should be?
*/
DBUG_ASSERT((args[1])->type() == Item::SUBSELECT_ITEM &&
(((Item_subselect*)(args[1]))->substype() ==
Item_subselect::IN_SUBS ||
((Item_subselect*)(args[1]))->substype() ==
Item_subselect::ALL_SUBS ||
((Item_subselect*)(args[1]))->substype() ==
Item_subselect::ANY_SUBS));
Item_in_subselect *in_arg= (Item_in_subselect*)args[1];
thd->change_item_tree(&in_arg->left_expr, args[0]);
}
return (this->*transformer)(thd, argument);
}
| 0
|
294,407
|
date_s__valid_ordinal_p(int argc, VALUE *argv, VALUE klass)
{
VALUE vy, vd, vsg;
VALUE argv2[3];
rb_scan_args(argc, argv, "21", &vy, &vd, &vsg);
argv2[0] = vy;
argv2[1] = vd;
if (argc < 3)
argv2[2] = DBL2NUM(GREGORIAN);
else
argv2[2] = vsg;
return valid_ordinal_sub(3, argv2, klass, 1);
}
| 0
|
468,369
|
g_socket_client_new (void)
{
return g_object_new (G_TYPE_SOCKET_CLIENT, NULL);
}
| 0
|
294,585
|
d_lite_monday_p(VALUE self)
{
get_d1(self);
return f_boolcast(m_wday(dat) == 1);
}
| 0
|
512,737
|
Item *get_copy(THD *thd)
{ return get_item_copy<Item_hex_hybrid>(thd, this); }
| 0
|
353,173
|
void SplashOutputDev::clip(GfxState *state) {
SplashPath path = convertPath(state, state->getPath(), true);
splash->clipToPath(&path, false);
}
| 0
|
364,243
|
void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
__be32 saddr)
{
atomic_set(&inc->i_refcount, 1);
INIT_LIST_HEAD(&inc->i_item);
inc->i_conn = conn;
inc->i_saddr = saddr;
inc->i_rdma_cookie = 0;
inc->i_rx_tstamp.tv_sec = 0;
inc->i_rx_tstamp.tv_usec = 0;
}
| 0
|
194,989
|
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
const unsigned char
*p;
IndexPacket
*indexes;
PixelPacket
*q;
ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
indexes=GetAuthenticIndexQueue(image);
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,indexes,x);
q++;
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit=0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++);
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
| 1
|
90,104
|
std::string CellularNetwork::ActivationStateToString(
ActivationState activation_state) {
switch (activation_state) {
case ACTIVATION_STATE_ACTIVATED:
return l10n_util::GetStringUTF8(
IDS_CHROMEOS_NETWORK_ACTIVATION_STATE_ACTIVATED);
break;
case ACTIVATION_STATE_ACTIVATING:
return l10n_util::GetStringUTF8(
IDS_CHROMEOS_NETWORK_ACTIVATION_STATE_ACTIVATING);
break;
case ACTIVATION_STATE_NOT_ACTIVATED:
return l10n_util::GetStringUTF8(
IDS_CHROMEOS_NETWORK_ACTIVATION_STATE_NOT_ACTIVATED);
break;
case ACTIVATION_STATE_PARTIALLY_ACTIVATED:
return l10n_util::GetStringUTF8(
IDS_CHROMEOS_NETWORK_ACTIVATION_STATE_PARTIALLY_ACTIVATED);
break;
default:
return l10n_util::GetStringUTF8(
IDS_CHROMEOS_NETWORK_ACTIVATION_STATE_UNKNOWN);
break;
}
}
| 0
|
491,902
|
static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
{
int i;
size_t count = req->misc.read.in.size;
size_t num_read = req->out.args[0].size;
struct inode *inode = req->pages[0]->mapping->host;
/*
* Short read means EOF. If file size is larger, truncate it
*/
if (!req->out.h.error && num_read < count) {
loff_t pos = page_offset(req->pages[0]) + num_read;
fuse_read_update_size(inode, pos, req->misc.read.attr_ver);
}
fuse_invalidate_attr(inode); /* atime changed */
for (i = 0; i < req->num_pages; i++) {
struct page *page = req->pages[i];
if (!req->out.h.error)
SetPageUptodate(page);
else
SetPageError(page);
unlock_page(page);
}
if (req->ff)
fuse_file_put(req->ff);
}
| 0
|
389,756
|
int addopt(char *optstr, int maxlen, const char *s)
{
size_t n;
size_t m;
n = strlen(optstr);
m = n + strlen(s) + 1;
if (m > JAS_CAST(size_t, maxlen)) {
return 1;
}
if (n > 0) {
strcat(optstr, "\n");
}
strcat(optstr, s);
return 0;
}
| 0
|
359,292
|
DEFUN_DEPRECATED (neighbor_version,
neighbor_version_cmd,
NEIGHBOR_CMD "version (4|4-)",
NEIGHBOR_STR
NEIGHBOR_ADDR_STR
"Set the BGP version to match a neighbor\n"
"Neighbor's BGP version\n")
{
return CMD_SUCCESS;
}
| 0
|
249,955
|
__canonicalize_file_name (const char *name)
{
return __realpath (name, NULL);
}
| 0
|
255,780
|
compare_node_rule_segment(const void *void_lhs,
const void *void_rhs)
{
const sorted_pattern_t *element = void_lhs;
const authz_rule_segment_t *segment = void_rhs;
return strcmp(element->node->segment.data, segment->pattern.data);
}
| 0
|
512,935
|
Item *Item_cond::transform(THD *thd, Item_transformer transformer, uchar *arg)
{
DBUG_ASSERT(!thd->stmt_arena->is_stmt_prepare());
List_iterator<Item> li(list);
Item *item;
while ((item= li++))
{
Item *new_item= item->transform(thd, transformer, arg);
if (!new_item)
return 0;
/*
THD::change_item_tree() should be called only if the tree was
really transformed, i.e. when a new item has been created.
Otherwise we'll be allocating a lot of unnecessary memory for
change records at each execution.
*/
if (new_item != item)
thd->change_item_tree(li.ref(), new_item);
}
return Item_func::transform(thd, transformer, arg);
}
| 0
|
199,918
|
spell_move_to(
win_T *wp,
int dir, // FORWARD or BACKWARD
int allwords, // TRUE for "[s"/"]s", FALSE for "[S"/"]S"
int curline,
hlf_T *attrp) // return: attributes of bad word or NULL
// (only when "dir" is FORWARD)
{
linenr_T lnum;
pos_T found_pos;
int found_len = 0;
char_u *line;
char_u *p;
char_u *endp;
hlf_T attr;
int len;
#ifdef FEAT_SYN_HL
int has_syntax = syntax_present(wp);
#endif
int col;
int can_spell;
char_u *buf = NULL;
int buflen = 0;
int skip = 0;
int capcol = -1;
int found_one = FALSE;
int wrapped = FALSE;
if (no_spell_checking(wp))
return 0;
/*
* Start looking for bad word at the start of the line, because we can't
* start halfway a word, we don't know where it starts or ends.
*
* When searching backwards, we continue in the line to find the last
* bad word (in the cursor line: before the cursor).
*
* We concatenate the start of the next line, so that wrapped words work
* (e.g. "et<line-break>cetera"). Doesn't work when searching backwards
* though...
*/
lnum = wp->w_cursor.lnum;
CLEAR_POS(&found_pos);
while (!got_int)
{
line = ml_get_buf(wp->w_buffer, lnum, FALSE);
len = (int)STRLEN(line);
if (buflen < len + MAXWLEN + 2)
{
vim_free(buf);
buflen = len + MAXWLEN + 2;
buf = alloc(buflen);
if (buf == NULL)
break;
}
// In first line check first word for Capital.
if (lnum == 1)
capcol = 0;
// For checking first word with a capital skip white space.
if (capcol == 0)
capcol = getwhitecols(line);
else if (curline && wp == curwin)
{
// For spellbadword(): check if first word needs a capital.
col = getwhitecols(line);
if (check_need_cap(lnum, col))
capcol = col;
// Need to get the line again, may have looked at the previous
// one.
line = ml_get_buf(wp->w_buffer, lnum, FALSE);
}
// Copy the line into "buf" and append the start of the next line if
// possible.
STRCPY(buf, line);
if (lnum < wp->w_buffer->b_ml.ml_line_count)
spell_cat_line(buf + STRLEN(buf),
ml_get_buf(wp->w_buffer, lnum + 1, FALSE), MAXWLEN);
p = buf + skip;
endp = buf + len;
while (p < endp)
{
// When searching backward don't search after the cursor. Unless
// we wrapped around the end of the buffer.
if (dir == BACKWARD
&& lnum == wp->w_cursor.lnum
&& !wrapped
&& (colnr_T)(p - buf) >= wp->w_cursor.col)
break;
// start of word
attr = HLF_COUNT;
len = spell_check(wp, p, &attr, &capcol, FALSE);
if (attr != HLF_COUNT)
{
// We found a bad word. Check the attribute.
if (allwords || attr == HLF_SPB)
{
// When searching forward only accept a bad word after
// the cursor.
if (dir == BACKWARD
|| lnum != wp->w_cursor.lnum
|| (wrapped
|| (colnr_T)(curline ? p - buf + len
: p - buf)
> wp->w_cursor.col))
{
#ifdef FEAT_SYN_HL
if (has_syntax)
{
col = (int)(p - buf);
(void)syn_get_id(wp, lnum, (colnr_T)col,
FALSE, &can_spell, FALSE);
if (!can_spell)
attr = HLF_COUNT;
}
else
#endif
can_spell = TRUE;
if (can_spell)
{
found_one = TRUE;
found_pos.lnum = lnum;
found_pos.col = (int)(p - buf);
found_pos.coladd = 0;
if (dir == FORWARD)
{
// No need to search further.
wp->w_cursor = found_pos;
vim_free(buf);
if (attrp != NULL)
*attrp = attr;
return len;
}
else if (curline)
// Insert mode completion: put cursor after
// the bad word.
found_pos.col += len;
found_len = len;
}
}
else
found_one = TRUE;
}
}
// advance to character after the word
p += len;
capcol -= len;
}
if (dir == BACKWARD && found_pos.lnum != 0)
{
// Use the last match in the line (before the cursor).
wp->w_cursor = found_pos;
vim_free(buf);
return found_len;
}
if (curline)
break; // only check cursor line
// If we are back at the starting line and searched it again there
// is no match, give up.
if (lnum == wp->w_cursor.lnum && wrapped)
break;
// Advance to next line.
if (dir == BACKWARD)
{
if (lnum > 1)
--lnum;
else if (!p_ws)
break; // at first line and 'nowrapscan'
else
{
// Wrap around to the end of the buffer. May search the
// starting line again and accept the last match.
lnum = wp->w_buffer->b_ml.ml_line_count;
wrapped = TRUE;
if (!shortmess(SHM_SEARCH))
give_warning((char_u *)_(top_bot_msg), TRUE);
}
capcol = -1;
}
else
{
if (lnum < wp->w_buffer->b_ml.ml_line_count)
++lnum;
else if (!p_ws)
break; // at first line and 'nowrapscan'
else
{
// Wrap around to the start of the buffer. May search the
// starting line again and accept the first match.
lnum = 1;
wrapped = TRUE;
if (!shortmess(SHM_SEARCH))
give_warning((char_u *)_(bot_top_msg), TRUE);
}
// If we are back at the starting line and there is no match then
// give up.
if (lnum == wp->w_cursor.lnum && !found_one)
break;
// Skip the characters at the start of the next line that were
// included in a match crossing line boundaries.
if (attr == HLF_COUNT)
skip = (int)(p - endp);
else
skip = 0;
// Capcol skips over the inserted space.
--capcol;
// But after empty line check first word in next line
if (*skipwhite(line) == NUL)
capcol = 0;
}
line_breakcheck();
}
vim_free(buf);
return 0;
}
| 1
|
294,408
|
m_virtual_sg(union DateData *x)
{
if (simple_dat_p(x))
return s_virtual_sg(x);
else
return c_virtual_sg(x);
}
| 0
|
512,981
|
uint time_precision(THD *thd)
{
return const_item() ? type_handler()->Item_time_precision(thd, this) :
MY_MIN(decimals, TIME_SECOND_PART_DIGITS);
}
| 0
|
229,141
|
static VirtIOSerialPort *find_port_by_name(char *name)
{
VirtIOSerial *vser;
QLIST_FOREACH(vser, &vserdevices.devices, next) {
VirtIOSerialPort *port;
QTAILQ_FOREACH(port, &vser->ports, next) {
if (port->name && !strcmp(port->name, name)) {
return port;
}
}
}
return NULL;
}
| 0
|
335,419
|
trigger_DirChangedPre(char_u *acmd_fname, char_u *new_dir)
{
#ifdef FEAT_EVAL
dict_T *v_event;
save_v_event_T save_v_event;
v_event = get_v_event(&save_v_event);
(void)dict_add_string(v_event, "directory", new_dir);
dict_set_items_ro(v_event);
#endif
apply_autocmds(EVENT_DIRCHANGEDPRE, acmd_fname, new_dir, FALSE, curbuf);
#ifdef FEAT_EVAL
restore_v_event(v_event, &save_v_event);
#endif
}
| 0
|
336,646
|
static void reds_mig_target_client_free(RedsState *reds, RedsMigTargetClient *mig_client)
{
reds->mig_target_clients = g_list_remove(reds->mig_target_clients, mig_client);
g_list_free_full(mig_client->pending_links, g_free);
g_free(mig_client);
}
| 0
|
402,622
|
generate_validity(cms_context *cms, SECItem *der, time_t start, time_t end)
{
Validity validity;
int rc;
rc = generate_time(cms, &validity.start, start);
if (rc < 0)
return rc;
rc = generate_time(cms, &validity.end, end);
if (rc < 0)
return rc;
void *ret;
ret = SEC_ASN1EncodeItem(cms->arena, der, &validity, ValidityTemplate);
if (ret == NULL)
cmsreterr(-1, cms, "could not encode validity");
return 0;
}
| 0
|
404,705
|
unsigned long __fdget_raw(unsigned int fd)
{
return __fget_light(fd, 0);
}
| 0
|
312,515
|
f_setqflist(typval_T *argvars, typval_T *rettv)
{
if (in_vim9script()
&& (check_for_list_arg(argvars, 0) == FAIL
|| check_for_opt_string_arg(argvars, 1) == FAIL
|| (argvars[1].v_type != VAR_UNKNOWN
&& check_for_opt_dict_arg(argvars, 2) == FAIL)))
return;
set_qf_ll_list(NULL, &argvars[0], &argvars[1], &argvars[2], rettv);
}
| 0
|
274,855
|
TEST(ComparisonsTest, GreaterQuantized) {
const float kMin = -1.f;
const float kMax = 128.f;
ComparisonOpModel model({TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_GREATER);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {1, 9, 7, 3});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {1, 2, 6, 5});
model.Invoke();
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
}
| 0
|
234,862
|
static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device)
{
int ret;
struct btrfs_path *path;
struct btrfs_root *root = device->fs_info->chunk_root;
struct btrfs_dev_item *dev_item;
struct extent_buffer *leaf;
struct btrfs_key key;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.type = BTRFS_DEV_ITEM_KEY;
key.offset = device->devid;
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret < 0)
goto out;
if (ret > 0) {
ret = -ENOENT;
goto out;
}
leaf = path->nodes[0];
dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
btrfs_set_device_id(leaf, dev_item, device->devid);
btrfs_set_device_type(leaf, dev_item, device->type);
btrfs_set_device_io_align(leaf, dev_item, device->io_align);
btrfs_set_device_io_width(leaf, dev_item, device->io_width);
btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
btrfs_set_device_total_bytes(leaf, dev_item,
btrfs_device_get_disk_total_bytes(device));
btrfs_set_device_bytes_used(leaf, dev_item,
btrfs_device_get_bytes_used(device));
btrfs_mark_buffer_dirty(leaf);
out:
btrfs_free_path(path);
return ret;
}
| 0
|
384,771
|
cursor_correct(void)
{
int above = 0; // screen lines above topline
linenr_T topline;
int below = 0; // screen lines below botline
linenr_T botline;
int above_wanted, below_wanted;
linenr_T cln; // Cursor Line Number
int max_off;
long so = get_scrolloff_value();
/*
* How many lines we would like to have above/below the cursor depends on
* whether the first/last line of the file is on screen.
*/
above_wanted = so;
below_wanted = so;
if (mouse_dragging > 0)
{
above_wanted = mouse_dragging - 1;
below_wanted = mouse_dragging - 1;
}
if (curwin->w_topline == 1)
{
above_wanted = 0;
max_off = curwin->w_height / 2;
if (below_wanted > max_off)
below_wanted = max_off;
}
validate_botline();
if (curwin->w_botline == curbuf->b_ml.ml_line_count + 1
&& mouse_dragging == 0)
{
below_wanted = 0;
max_off = (curwin->w_height - 1) / 2;
if (above_wanted > max_off)
above_wanted = max_off;
}
/*
* If there are sufficient file-lines above and below the cursor, we can
* return now.
*/
cln = curwin->w_cursor.lnum;
if (cln >= curwin->w_topline + above_wanted
&& cln < curwin->w_botline - below_wanted
#ifdef FEAT_FOLDING
&& !hasAnyFolding(curwin)
#endif
)
return;
/*
* Narrow down the area where the cursor can be put by taking lines from
* the top and the bottom until:
* - the desired context lines are found
* - the lines from the top is past the lines from the bottom
*/
topline = curwin->w_topline;
botline = curwin->w_botline - 1;
#ifdef FEAT_DIFF
// count filler lines as context
above = curwin->w_topfill;
below = curwin->w_filler_rows;
#endif
while ((above < above_wanted || below < below_wanted) && topline < botline)
{
if (below < below_wanted && (below <= above || above >= above_wanted))
{
#ifdef FEAT_FOLDING
if (hasFolding(botline, &botline, NULL))
++below;
else
#endif
below += plines(botline);
--botline;
}
if (above < above_wanted && (above < below || below >= below_wanted))
{
#ifdef FEAT_FOLDING
if (hasFolding(topline, NULL, &topline))
++above;
else
#endif
above += PLINES_NOFILL(topline);
#ifdef FEAT_DIFF
// Count filler lines below this line as context.
if (topline < botline)
above += diff_check_fill(curwin, topline + 1);
#endif
++topline;
}
}
if (topline == botline || botline == 0)
curwin->w_cursor.lnum = topline;
else if (topline > botline)
curwin->w_cursor.lnum = botline;
else
{
if (cln < topline && curwin->w_topline > 1)
{
curwin->w_cursor.lnum = topline;
curwin->w_valid &=
~(VALID_WROW|VALID_WCOL|VALID_CHEIGHT|VALID_CROW);
}
if (cln > botline && curwin->w_botline <= curbuf->b_ml.ml_line_count)
{
curwin->w_cursor.lnum = botline;
curwin->w_valid &=
~(VALID_WROW|VALID_WCOL|VALID_CHEIGHT|VALID_CROW);
}
}
curwin->w_valid |= VALID_TOPLINE;
}
| 0
|
300,739
|
static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
struct netlink_callback *cb,
struct tipc_sock *tsk, u32 *last_publ)
{
int err;
struct publication *p;
if (*last_publ) {
list_for_each_entry(p, &tsk->publications, binding_sock) {
if (p->key == *last_publ)
break;
}
if (p->key != *last_publ) {
/* We never set seq or call nl_dump_check_consistent()
* this means that setting prev_seq here will cause the
* consistence check to fail in the netlink callback
* handler. Resulting in the last NLMSG_DONE message
* having the NLM_F_DUMP_INTR flag set.
*/
cb->prev_seq = 1;
*last_publ = 0;
return -EPIPE;
}
} else {
p = list_first_entry(&tsk->publications, struct publication,
binding_sock);
}
list_for_each_entry_from(p, &tsk->publications, binding_sock) {
err = __tipc_nl_add_sk_publ(skb, cb, p);
if (err) {
*last_publ = p->key;
return err;
}
}
*last_publ = 0;
return 0;
}
| 0
|
299,983
|
static void elo_process_data(struct input_dev *input, const u8 *data, int size)
{
int press;
input_report_abs(input, ABS_X, (data[3] << 8) | data[2]);
input_report_abs(input, ABS_Y, (data[5] << 8) | data[4]);
press = 0;
if (data[1] & 0x80)
press = (data[7] << 8) | data[6];
input_report_abs(input, ABS_PRESSURE, press);
if (data[1] & 0x03) {
input_report_key(input, BTN_TOUCH, 1);
input_sync(input);
}
if (data[1] & 0x04)
input_report_key(input, BTN_TOUCH, 0);
input_sync(input);
}
| 0
|
246,717
|
static u32 do_raw_cat()
{
char chunk[4096];
FILE *fin, *fout;
s64 to_copy, done;
fin = gf_fopen(raw_cat, "rb");
if (!fin) return mp4box_cleanup(1);
fout = gf_fopen(inName, "a+b");
if (!fout) {
gf_fclose(fin);
return mp4box_cleanup(1);
}
gf_fseek(fin, 0, SEEK_END);
to_copy = gf_ftell(fin);
gf_fseek(fin, 0, SEEK_SET);
done = 0;
while (1) {
u32 nb_bytes = (u32) gf_fread(chunk, 4096, fin);
gf_fwrite(chunk, nb_bytes, fout);
done += nb_bytes;
fprintf(stderr, "Appending file %s - %02.2f done\r", raw_cat, 100.0*done/to_copy);
if (done >= to_copy) break;
}
gf_fclose(fin);
gf_fclose(fout);
return mp4box_cleanup(0);
}
| 0
|
196,846
|
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteDivParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) {
EvalDiv<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteUInt8) {
TF_LITE_ENSURE_OK(
context, EvalQuantized<kernel_type>(context, node, params, data, input1,
input2, output));
} else {
context->ReportError(
context,
"Div only supports FLOAT32, INT32 and quantized UINT8 now, got %d.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
}
| 1
|
248,312
|
DLLIMPORT const char *cfg_name(cfg_t *cfg)
{
if (cfg)
return cfg->name;
return NULL;
}
| 0
|
336,576
|
static const char *get_index_name(const EnumNames names[], uint32_t index)
{
while (names->name != NULL && names->id != index) {
names++;
}
return names->name;
}
| 0
|
462,571
|
std::string controller::get_hostname_from_url(const std::string& url) {
xmlURIPtr uri = xmlParseURI(url.c_str());
std::string hostname;
if (uri) {
hostname = uri->server;
xmlFreeURI(uri);
}
return hostname;
}
| 0
|
272,358
|
find_named_certificate(cms_context *cms, char *name, CERTCertificate **cert)
{
if (!name)
cnreterr(-1, cms, "no subject name specified");
return find_certificate_by_callback(cms, match_subject, name, cert);
}
| 0
|
450,330
|
static void tight_send_compact_size(VncState *vs, size_t len)
{
int lpc = 0;
int bytes = 0;
char buf[3] = {0, 0, 0};
buf[bytes++] = len & 0x7F;
if (len > 0x7F) {
buf[bytes-1] |= 0x80;
buf[bytes++] = (len >> 7) & 0x7F;
if (len > 0x3FFF) {
buf[bytes-1] |= 0x80;
buf[bytes++] = (len >> 14) & 0xFF;
}
}
for (lpc = 0; lpc < bytes; lpc++) {
vnc_write_u8(vs, buf[lpc]);
}
}
| 0
|
96,947
|
bool decode(ArgumentDecoder* decoder, RetainPtr<CFNumberRef>& result)
{
CFNumberType numberType;
if (!decoder->decodeEnum(numberType))
return false;
CoreIPC::DataReference dataReference;
if (!decoder->decode(dataReference))
return false;
size_t neededBufferSize = sizeForNumberType(numberType);
if (!neededBufferSize || dataReference.size() != neededBufferSize)
return false;
ASSERT(dataReference.data());
CFNumberRef number = CFNumberCreate(0, numberType, dataReference.data());
result.adoptCF(number);
return true;
}
| 0
|
264,717
|
bool MaybeReplaceSizeOp(const Node* n,
const std::vector<PartialTensorShape>& input_shapes,
std::unordered_map<const Node*, std::vector<Tensor>>*
shape_replacement_map) {
CHECK_EQ(input_shapes.size(), 1);
if (!input_shapes[0].IsFullyDefined()) {
return false;
}
DataType op_type = n->output_type(0);
Tensor t(op_type, TensorShape({}));
int64_t size = input_shapes[0].num_elements();
if (op_type == DT_INT64) {
t.scalar<int64_t>()() = size;
} else {
CHECK(op_type == DT_INT32);
if (size > INT_MAX) {
VLOG(1) << "Node " << n->name() << " has input shape size " << size
<< " but type INT32 "
<< " so not replacing as constant: this will trigger a runtime "
"error later.";
return false;
}
t.scalar<int32>()() = static_cast<int32>(size);
}
shape_replacement_map->insert({n, {t}});
return true;
}
| 0
|
216,637
|
SCM_DEFINE (scm_mkdir, "mkdir", 1, 1, 0,
(SCM path, SCM mode),
"Create a new directory named by @var{path}. If @var{mode} is omitted\n"
"then the permissions of the directory file are set using the current\n"
"umask. Otherwise they are set to the decimal value specified with\n"
"@var{mode}. The return value is unspecified.")
#define FUNC_NAME s_scm_mkdir
{
int rv;
mode_t mask;
if (SCM_UNBNDP (mode))
{
mask = umask (0);
umask (mask);
STRING_SYSCALL (path, c_path, rv = mkdir (c_path, 0777 ^ mask));
}
else
{
STRING_SYSCALL (path, c_path, rv = mkdir (c_path, scm_to_uint (mode)));
}
if (rv != 0)
SCM_SYSERROR;
return SCM_UNSPECIFIED;
}
| 1
|
384,193
|
int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_expr *expr_array[])
{
struct nft_expr *expr;
int err, i, k;
for (i = 0; i < set->num_exprs; i++) {
expr = kzalloc(set->exprs[i]->ops->size, GFP_KERNEL_ACCOUNT);
if (!expr)
goto err_expr;
err = nft_expr_clone(expr, set->exprs[i]);
if (err < 0) {
kfree(expr);
goto err_expr;
}
expr_array[i] = expr;
}
return 0;
err_expr:
for (k = i - 1; k >= 0; k--)
nft_expr_destroy(ctx, expr_array[k]);
return -ENOMEM;
}
| 0
|
492,680
|
vte_sequence_handler_decset_internal(VteTerminal *terminal,
int setting,
gboolean restore,
gboolean save,
gboolean set)
{
gboolean recognized = FALSE;
gpointer p;
guint i;
struct {
int setting;
gboolean *bvalue;
gint *ivalue;
gpointer *pvalue;
gpointer fvalue;
gpointer tvalue;
VteTerminalSequenceHandler reset, set;
} settings[] = {
/* 1: Application/normal cursor keys. */
{1, NULL, &terminal->pvt->cursor_mode, NULL,
GINT_TO_POINTER(VTE_KEYMODE_NORMAL),
GINT_TO_POINTER(VTE_KEYMODE_APPLICATION),
NULL, NULL,},
/* 2: disallowed, we don't do VT52. */
{2, NULL, NULL, NULL, NULL, NULL, NULL, NULL,},
/* 3: disallowed, window size is set by user. */
{3, NULL, NULL, NULL, NULL, NULL, NULL, NULL,},
/* 4: Smooth scroll. */
{4, &terminal->pvt->smooth_scroll, NULL, NULL,
GINT_TO_POINTER(FALSE),
GINT_TO_POINTER(TRUE),
NULL, NULL,},
/* 5: Reverse video. */
{5, &terminal->pvt->screen->reverse_mode, NULL, NULL,
GINT_TO_POINTER(FALSE),
GINT_TO_POINTER(TRUE),
NULL, NULL,},
/* 6: Origin mode: when enabled, cursor positioning is
* relative to the scrolling region. */
{6, &terminal->pvt->screen->origin_mode, NULL, NULL,
GINT_TO_POINTER(FALSE),
GINT_TO_POINTER(TRUE),
NULL, NULL,},
/* 7: Wraparound mode. */
{7, &terminal->pvt->flags.am, NULL, NULL,
GINT_TO_POINTER(FALSE),
GINT_TO_POINTER(TRUE),
NULL, NULL,},
/* 8: disallowed, keyboard repeat is set by user. */
{8, NULL, NULL, NULL, NULL, NULL, NULL, NULL,},
/* 9: Send-coords-on-click. */
{9, NULL, &terminal->pvt->mouse_tracking_mode, NULL,
GINT_TO_POINTER(0),
GINT_TO_POINTER(MOUSE_TRACKING_SEND_XY_ON_CLICK),
NULL, NULL,},
/* 12: disallowed, cursor blinks is set by user. */
{12, NULL, NULL, NULL, NULL, NULL, NULL, NULL,},
/* 18: print form feed. */
/* 19: set print extent to full screen. */
/* 25: Cursor visible. */
{25, &terminal->pvt->cursor_visible, NULL, NULL,
GINT_TO_POINTER(FALSE),
GINT_TO_POINTER(TRUE),
NULL, NULL,},
/* 30/rxvt: disallowed, scrollbar visibility is set by user. */
{30, NULL, NULL, NULL, NULL, NULL, NULL, NULL,},
/* 35/rxvt: disallowed, fonts set by user. */
{35, NULL, NULL, NULL, NULL, NULL, NULL, NULL,},
/* 38: enter Tektronix mode. */
/* 40: disallowed, the user sizes dynamically. */
{40, NULL, NULL, NULL, NULL, NULL, NULL, NULL,},
/* 41: more(1) fix. */
/* 42: Enable NLS replacements. */
{42, &terminal->pvt->nrc_mode, NULL, NULL,
GINT_TO_POINTER(FALSE),
GINT_TO_POINTER(TRUE),
NULL, NULL,},
/* 44: Margin bell. */
{44, &terminal->pvt->margin_bell, NULL, NULL,
GINT_TO_POINTER(FALSE),
GINT_TO_POINTER(TRUE),
NULL, NULL,},
/* 47: Alternate screen. */
{47, NULL, NULL, (gpointer) &terminal->pvt->screen,
&terminal->pvt->normal_screen,
&terminal->pvt->alternate_screen,
NULL, NULL,},
/* 66: Keypad mode. */
{66, &terminal->pvt->keypad_mode, NULL, NULL,
GINT_TO_POINTER(VTE_KEYMODE_NORMAL),
GINT_TO_POINTER(VTE_KEYMODE_APPLICATION),
NULL, NULL,},
/* 67: disallowed, backspace key policy is set by user. */
{67, NULL, NULL, NULL, NULL, NULL, NULL, NULL,},
/* 1000: Send-coords-on-button. */
{1000, NULL, &terminal->pvt->mouse_tracking_mode, NULL,
GINT_TO_POINTER(0),
GINT_TO_POINTER(MOUSE_TRACKING_SEND_XY_ON_BUTTON),
NULL, NULL,},
/* 1001: Hilite tracking. */
{1001, NULL, &terminal->pvt->mouse_tracking_mode, NULL,
GINT_TO_POINTER(0),
GINT_TO_POINTER(MOUSE_TRACKING_HILITE_TRACKING),
NULL, NULL,},
/* 1002: Cell motion tracking. */
{1002, NULL, &terminal->pvt->mouse_tracking_mode, NULL,
GINT_TO_POINTER(0),
GINT_TO_POINTER(MOUSE_TRACKING_CELL_MOTION_TRACKING),
NULL, NULL,},
/* 1003: All motion tracking. */
{1003, NULL, &terminal->pvt->mouse_tracking_mode, NULL,
GINT_TO_POINTER(0),
GINT_TO_POINTER(MOUSE_TRACKING_ALL_MOTION_TRACKING),
NULL, NULL,},
/* 1010/rxvt: disallowed, scroll-on-output is set by user. */
{1010, NULL, NULL, NULL, NULL, NULL, NULL, NULL,},
/* 1011/rxvt: disallowed, scroll-on-keypress is set by user. */
{1011, NULL, NULL, NULL, NULL, NULL, NULL, NULL,},
/* 1035: disallowed, don't know what to do with it. */
{1035, NULL, NULL, NULL, NULL, NULL, NULL, NULL,},
/* 1036: Meta-sends-escape. */
{1036, &terminal->pvt->meta_sends_escape, NULL, NULL,
GINT_TO_POINTER(FALSE),
GINT_TO_POINTER(TRUE),
NULL, NULL,},
/* 1037: disallowed, delete key policy is set by user. */
{1037, NULL, NULL, NULL, NULL, NULL, NULL, NULL,},
/* 1047: Use alternate screen buffer. */
{1047, NULL, NULL, (gpointer) &terminal->pvt->screen,
&terminal->pvt->normal_screen,
&terminal->pvt->alternate_screen,
NULL, NULL,},
/* 1048: Save/restore cursor position. */
{1048, NULL, NULL, NULL,
NULL,
NULL,
vte_sequence_handler_rc,
vte_sequence_handler_sc,},
/* 1049: Use alternate screen buffer, saving the cursor
* position. */
{1049, NULL, NULL, (gpointer) &terminal->pvt->screen,
&terminal->pvt->normal_screen,
&terminal->pvt->alternate_screen,
vte_sequence_handler_rc,
vte_sequence_handler_sc,},
/* 1051: Sun function key mode. */
{1051, NULL, NULL, (gpointer) &terminal->pvt->sun_fkey_mode,
GINT_TO_POINTER(FALSE),
GINT_TO_POINTER(TRUE),
NULL, NULL},
/* 1052: HP function key mode. */
{1052, NULL, NULL, (gpointer) &terminal->pvt->hp_fkey_mode,
GINT_TO_POINTER(FALSE),
GINT_TO_POINTER(TRUE),
NULL, NULL},
/* 1060: Legacy function key mode. */
{1060, NULL, NULL, (gpointer) &terminal->pvt->legacy_fkey_mode,
GINT_TO_POINTER(FALSE),
GINT_TO_POINTER(TRUE),
NULL, NULL},
/* 1061: VT220 function key mode. */
{1061, NULL, NULL, (gpointer) &terminal->pvt->vt220_fkey_mode,
GINT_TO_POINTER(FALSE),
GINT_TO_POINTER(TRUE),
NULL, NULL},
/* 2004: Bracketed paste mode. */
{2004, &terminal->pvt->screen->bracketed_paste_mode, NULL, NULL,
GINT_TO_POINTER(FALSE),
GINT_TO_POINTER(TRUE),
NULL, NULL,},
};
/* Handle the setting. */
for (i = 0; i < G_N_ELEMENTS(settings); i++)
if (settings[i].setting == setting) {
recognized = TRUE;
/* Handle settings we want to ignore. */
if ((settings[i].fvalue == settings[i].tvalue) &&
(settings[i].set == NULL) &&
(settings[i].reset == NULL)) {
continue;
}
/* Read the old setting. */
if (restore) {
p = g_hash_table_lookup(terminal->pvt->dec_saved,
GINT_TO_POINTER(setting));
set = (p != NULL);
_vte_debug_print(VTE_DEBUG_PARSE,
"Setting %d was %s.\n",
setting, set ? "set" : "unset");
}
/* Save the current setting. */
if (save) {
if (settings[i].bvalue) {
set = *(settings[i].bvalue) != FALSE;
} else
if (settings[i].ivalue) {
set = *(settings[i].ivalue) ==
GPOINTER_TO_INT(settings[i].tvalue);
} else
if (settings[i].pvalue) {
set = *(settings[i].pvalue) ==
settings[i].tvalue;
}
_vte_debug_print(VTE_DEBUG_PARSE,
"Setting %d is %s, saving.\n",
setting, set ? "set" : "unset");
g_hash_table_insert(terminal->pvt->dec_saved,
GINT_TO_POINTER(setting),
GINT_TO_POINTER(set));
}
/* Change the current setting to match the new/saved value. */
if (!save) {
_vte_debug_print(VTE_DEBUG_PARSE,
"Setting %d to %s.\n",
setting, set ? "set" : "unset");
if (settings[i].set && set) {
settings[i].set (terminal, NULL);
}
if (settings[i].bvalue) {
*(settings[i].bvalue) = set;
} else
if (settings[i].ivalue) {
*(settings[i].ivalue) = set ?
GPOINTER_TO_INT(settings[i].tvalue) :
GPOINTER_TO_INT(settings[i].fvalue);
} else
if (settings[i].pvalue) {
*(settings[i].pvalue) = set ?
settings[i].tvalue :
settings[i].fvalue;
}
if (settings[i].reset && !set) {
settings[i].reset (terminal, NULL);
}
}
}
/* Do whatever's necessary when the setting changes. */
switch (setting) {
case 1:
_vte_debug_print(VTE_DEBUG_KEYBOARD, set ?
"Entering application cursor mode.\n" :
"Leaving application cursor mode.\n");
break;
#if 0 /* 3: disallowed, window size is set by user. */
case 3:
vte_terminal_emit_resize_window(terminal,
(set ? 132 : 80) *
terminal->char_width +
terminal->pvt->inner_border.left +
terminal->pvt->inner_border.right,
terminal->row_count *
terminal->char_height +
terminal->pvt->inner_border.top +
terminal->pvt->inner_border.bottom);
/* Request a resize and redraw. */
_vte_invalidate_all(terminal);
break;
#endif
case 5:
/* Repaint everything in reverse mode. */
_vte_invalidate_all(terminal);
break;
case 6:
/* Reposition the cursor in its new home position. */
terminal->pvt->screen->cursor_current.col = 0;
terminal->pvt->screen->cursor_current.row =
terminal->pvt->screen->insert_delta;
break;
case 47:
case 1047:
case 1049:
/* Clear the alternate screen if we're switching
* to it, and home the cursor. */
if (set) {
_vte_terminal_clear_screen (terminal);
_vte_terminal_home_cursor (terminal);
}
/* Reset scrollbars and repaint everything. */
gtk_adjustment_set_value(terminal->adjustment,
terminal->pvt->screen->scroll_delta);
vte_terminal_set_scrollback_lines(terminal,
terminal->pvt->scrollback_lines);
_vte_terminal_queue_contents_changed(terminal);
_vte_invalidate_all (terminal);
break;
case 9:
case 1000:
case 1001:
case 1002:
case 1003:
/* Make the pointer visible. */
_vte_terminal_set_pointer_visible(terminal, TRUE);
break;
case 66:
_vte_debug_print(VTE_DEBUG_KEYBOARD, set ?
"Entering application keypad mode.\n" :
"Leaving application keypad mode.\n");
break;
case 1051:
_vte_debug_print(VTE_DEBUG_KEYBOARD, set ?
"Entering Sun fkey mode.\n" :
"Leaving Sun fkey mode.\n");
break;
case 1052:
_vte_debug_print(VTE_DEBUG_KEYBOARD, set ?
"Entering HP fkey mode.\n" :
"Leaving HP fkey mode.\n");
break;
case 1060:
_vte_debug_print(VTE_DEBUG_KEYBOARD, set ?
"Entering Legacy fkey mode.\n" :
"Leaving Legacy fkey mode.\n");
break;
case 1061:
_vte_debug_print(VTE_DEBUG_KEYBOARD, set ?
"Entering VT220 fkey mode.\n" :
"Leaving VT220 fkey mode.\n");
break;
default:
break;
}
if (!recognized) {
_vte_debug_print (VTE_DEBUG_MISC,
"DECSET/DECRESET mode %d not recognized, ignoring.\n",
setting);
}
}
| 0
|
513,346
|
add_ft_keys(DYNAMIC_ARRAY *keyuse_array,
JOIN_TAB *stat,COND *cond,table_map usable_tables)
{
Item_func_match *cond_func=NULL;
if (!cond)
return FALSE;
if (cond->type() == Item::FUNC_ITEM)
{
Item_func *func=(Item_func *)cond;
Item_func::Functype functype= func->functype();
if (functype == Item_func::FT_FUNC)
cond_func=(Item_func_match *)cond;
else if (func->argument_count() == 2)
{
Item *arg0=(Item *)(func->arguments()[0]),
*arg1=(Item *)(func->arguments()[1]);
if (arg1->const_item() && arg1->cols() == 1 &&
arg0->type() == Item::FUNC_ITEM &&
((Item_func *) arg0)->functype() == Item_func::FT_FUNC &&
((functype == Item_func::GE_FUNC && arg1->val_real() > 0) ||
(functype == Item_func::GT_FUNC && arg1->val_real() >=0)))
cond_func= (Item_func_match *) arg0;
else if (arg0->const_item() && arg0->cols() == 1 &&
arg1->type() == Item::FUNC_ITEM &&
((Item_func *) arg1)->functype() == Item_func::FT_FUNC &&
((functype == Item_func::LE_FUNC && arg0->val_real() > 0) ||
(functype == Item_func::LT_FUNC && arg0->val_real() >=0)))
cond_func= (Item_func_match *) arg1;
}
}
else if (cond->type() == Item::COND_ITEM)
{
List_iterator_fast<Item> li(*((Item_cond*) cond)->argument_list());
if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
{
Item *item;
while ((item=li++))
{
if (add_ft_keys(keyuse_array,stat,item,usable_tables))
return TRUE;
}
}
}
if (!cond_func || cond_func->key == NO_SUCH_KEY ||
!(usable_tables & cond_func->table->map))
return FALSE;
KEYUSE keyuse;
keyuse.table= cond_func->table;
keyuse.val = cond_func;
keyuse.key = cond_func->key;
keyuse.keypart= FT_KEYPART;
keyuse.used_tables=cond_func->key_item()->used_tables();
keyuse.optimize= 0;
keyuse.keypart_map= 0;
keyuse.sj_pred_no= UINT_MAX;
return insert_dynamic(keyuse_array,(uchar*) &keyuse);
}
| 0
|
349,250
|
int read_super_1(squashfs_operations **s_ops, void *s)
{
squashfs_super_block_3 *sBlk_3 = s;
if(sBlk_3->s_magic != SQUASHFS_MAGIC || sBlk_3->s_major != 1 ||
sBlk_3->s_minor != 0)
return -1;
sBlk.s.s_magic = sBlk_3->s_magic;
sBlk.s.inodes = sBlk_3->inodes;
sBlk.s.mkfs_time = sBlk_3->mkfs_time;
sBlk.s.block_size = sBlk_3->block_size_1;
sBlk.s.fragments = 0;
sBlk.s.block_log = sBlk_3->block_log;
sBlk.s.flags = sBlk_3->flags;
sBlk.s.s_major = sBlk_3->s_major;
sBlk.s.s_minor = sBlk_3->s_minor;
sBlk.s.root_inode = sBlk_3->root_inode;
sBlk.s.bytes_used = sBlk_3->bytes_used_2;
sBlk.s.inode_table_start = sBlk_3->inode_table_start_2;
sBlk.s.directory_table_start = sBlk_3->directory_table_start_2;
sBlk.s.fragment_table_start = SQUASHFS_INVALID_BLK;
sBlk.s.lookup_table_start = sBlk_3->lookup_table_start;
sBlk.no_uids = sBlk_3->no_uids;
sBlk.no_guids = sBlk_3->no_guids;
sBlk.uid_start = sBlk_3->uid_start_2;
sBlk.guid_start = sBlk_3->guid_start_2;
sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK;
*s_ops = &ops;
/*
* 1.x filesystems use gzip compression.
*/
comp = lookup_compressor("gzip");
return TRUE;
}
| 0
|
229,158
|
static void add_port(VirtIOSerial *vser, uint32_t port_id)
{
mark_port_added(vser, port_id);
send_control_event(vser, port_id, VIRTIO_CONSOLE_PORT_ADD, 1);
}
| 0
|
272,341
|
unlock_nss_token(cms_context *cms)
{
char *tokenname = resolve_token_name(cms->tokenname);
dprintf("setting password function to %s", cms->func ? "cms->func" : "SECU_GetModulePassword");
PK11_SetPasswordFunc(cms->func ? cms->func : SECU_GetModulePassword);
PK11SlotList *slots = NULL;
slots = PK11_GetAllTokens(CKM_RSA_PKCS, PR_FALSE, PR_TRUE, cms);
if (!slots)
cnreterr(-1, cms, "could not get pk11 token list");
PK11SlotListElement *psle = NULL;
psle = PK11_GetFirstSafe(slots);
if (!psle) {
save_port_err() {
PK11_FreeSlotList(slots);
}
cnreterr(-1, cms, "could not get pk11 safe");
}
while (psle) {
if (!strcmp(tokenname, PK11_GetTokenName(psle->slot)))
break;
psle = PK11_GetNextSafe(slots, psle, PR_FALSE);
}
if (!psle) {
save_port_err() {
PK11_FreeSlotList(slots);
}
nssreterr(-1, "Could not find token \"%s\"", tokenname);
}
SECStatus status;
if (PK11_NeedLogin(psle->slot) &&
!PK11_IsLoggedIn(psle->slot, cms)) {
status = PK11_Authenticate(psle->slot, PR_TRUE, cms);
if (status != SECSuccess) {
save_port_err() {
int err = PORT_GetError();
PK11_DestroySlotListElement(slots, &psle);
PK11_FreeSlotList(slots);
cms->log(cms, LOG_ERR,
"authentication failed for token \"%s\": %s",
tokenname, PORT_ErrorToString(err));
}
return -1;
}
}
PK11_DestroySlotListElement(slots, &psle);
PK11_FreeSlotList(slots);
return 0;
}
| 0
|
222,832
|
const NodeDef* pop() {
CHECK(!empty());
auto it = queue_.begin();
const NodeDef* n = it->first;
queue_.erase(it);
return n;
}
| 0
|
438,682
|
static int virtio_rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src,
u32 dst, void *data, int len)
{
struct rpmsg_device *rpdev = ept->rpdev;
return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
}
| 0
|
436,036
|
static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
{
struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
struct io_ring_ctx *ctx = rsrc_data->ctx;
struct io_rsrc_put *prsrc, *tmp;
list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
list_del(&prsrc->list);
if (prsrc->tag) {
bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL;
io_ring_submit_lock(ctx, lock_ring);
spin_lock_irq(&ctx->completion_lock);
io_cqring_fill_event(ctx, prsrc->tag, 0, 0);
ctx->cq_extra++;
io_commit_cqring(ctx);
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
io_ring_submit_unlock(ctx, lock_ring);
}
rsrc_data->do_put(ctx, prsrc);
kfree(prsrc);
}
io_rsrc_node_destroy(ref_node);
if (atomic_dec_and_test(&rsrc_data->refs))
complete(&rsrc_data->done);
| 0
|
252,283
|
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
| 0
|
245,710
|
static int get_all_headers (int fd, orderedmap hashofheaders)
{
char *line = NULL;
char *header = NULL;
int count;
char *tmp;
ssize_t linelen;
ssize_t len = 0;
unsigned int double_cgi = FALSE; /* boolean */
assert (fd >= 0);
assert (hashofheaders != NULL);
for (count = 0; count < MAX_HEADERS; count++) {
if ((linelen = readline (fd, &line)) <= 0) {
safefree (header);
safefree (line);
return -1;
}
/*
* If we received a CR LF or a non-continuation line, then add
* the accumulated header field, if any, to the hashmap, and
* reset it.
*/
if (CHECK_CRLF (line, linelen) || !CHECK_LWS (line, linelen)) {
if (!double_cgi
&& len > 0
&& add_header_to_connection (hashofheaders, header,
len) < 0) {
safefree (header);
safefree (line);
return -1;
}
len = 0;
}
/*
* If we received just a CR LF on a line, the headers are
* finished.
*/
if (CHECK_CRLF (line, linelen)) {
safefree (header);
safefree (line);
return 0;
}
/*
* BUG FIX: The following code detects a "Double CGI"
* situation so that we can handle the nonconforming system.
* This problem was found when accessing cgi.ebay.com, and it
* turns out to be a wider spread problem as well.
*
* If "Double CGI" is in effect, duplicate headers are
* ignored.
*
* FIXME: Might need to change this to a more robust check.
*/
if (linelen >= 5 && strncasecmp (line, "HTTP/", 5) == 0) {
double_cgi = TRUE;
}
/*
* Append the new line to the current header field.
*/
tmp = (char *) saferealloc (header, len + linelen);
if (tmp == NULL) {
safefree (header);
safefree (line);
return -1;
}
header = tmp;
memcpy (header + len, line, linelen);
len += linelen;
safefree (line);
}
/*
* If we get here, this means we reached MAX_HEADERS count.
* Bail out with error.
*/
safefree (header);
safefree (line);
return -1;
}
| 0
|
275,988
|
uECC_VLI_API void uECC_vli_clear(uECC_word_t *vli, wordcount_t num_words) {
wordcount_t i;
for (i = 0; i < num_words; ++i) {
vli[i] = 0;
}
}
| 0
|
235,252
|
static bool test_writeunlock(struct torture_context *tctx,
struct smbcli_state *cli)
{
union smb_write io;
NTSTATUS status;
bool ret = true;
int fnum;
uint8_t *buf;
const int maxsize = 90000;
const char *fname = BASEDIR "\\test.txt";
unsigned int seed = time(NULL);
union smb_fileinfo finfo;
buf = talloc_zero_array(tctx, uint8_t, maxsize);
if (!cli->transport->negotiate.lockread_supported) {
torture_skip(tctx, "Server does not support writeunlock - skipping\n");
}
if (!torture_setup_dir(cli, BASEDIR)) {
torture_fail(tctx, "failed to setup basedir");
}
torture_comment(tctx, "Testing RAW_WRITE_WRITEUNLOCK\n");
io.generic.level = RAW_WRITE_WRITEUNLOCK;
fnum = smbcli_open(cli->tree, fname, O_RDWR|O_CREAT, DENY_NONE);
if (fnum == -1) {
ret = false;
torture_fail_goto(tctx, done, talloc_asprintf(tctx, "Failed to create %s - %s\n", fname, smbcli_errstr(cli->tree)));
}
torture_comment(tctx, "Trying zero write\n");
io.writeunlock.in.file.fnum = fnum;
io.writeunlock.in.count = 0;
io.writeunlock.in.offset = 0;
io.writeunlock.in.remaining = 0;
io.writeunlock.in.data = buf;
status = smb_raw_write(cli->tree, &io);
CHECK_STATUS(status, NT_STATUS_OK);
CHECK_VALUE(io.writeunlock.out.nwritten, io.writeunlock.in.count);
setup_buffer(buf, seed, maxsize);
torture_comment(tctx, "Trying small write\n");
io.writeunlock.in.count = 9;
io.writeunlock.in.offset = 4;
io.writeunlock.in.data = buf;
status = smb_raw_write(cli->tree, &io);
CHECK_STATUS(status, NT_STATUS_RANGE_NOT_LOCKED);
if (smbcli_read(cli->tree, fnum, buf, 0, 13) != 13) {
ret = false;
torture_fail_goto(tctx, done, talloc_asprintf(tctx, "read failed at %s\n", __location__));
}
CHECK_BUFFER(buf+4, seed, 9);
CHECK_VALUE(IVAL(buf,0), 0);
setup_buffer(buf, seed, maxsize);
smbcli_lock(cli->tree, fnum, io.writeunlock.in.offset, io.writeunlock.in.count,
0, WRITE_LOCK);
status = smb_raw_write(cli->tree, &io);
CHECK_STATUS(status, NT_STATUS_OK);
CHECK_VALUE(io.writeunlock.out.nwritten, io.writeunlock.in.count);
memset(buf, 0, maxsize);
if (smbcli_read(cli->tree, fnum, buf, 0, 13) != 13) {
ret = false;
torture_fail_goto(tctx, done, talloc_asprintf(tctx, "read failed at %s\n", __location__));
}
CHECK_BUFFER(buf+4, seed, 9);
CHECK_VALUE(IVAL(buf,0), 0);
setup_buffer(buf, seed, maxsize);
torture_comment(tctx, "Trying large write\n");
io.writeunlock.in.count = 4000;
io.writeunlock.in.offset = 0;
io.writeunlock.in.data = buf;
smbcli_lock(cli->tree, fnum, io.writeunlock.in.offset, io.writeunlock.in.count,
0, WRITE_LOCK);
status = smb_raw_write(cli->tree, &io);
CHECK_STATUS(status, NT_STATUS_OK);
CHECK_VALUE(io.writeunlock.out.nwritten, 4000);
status = smb_raw_write(cli->tree, &io);
CHECK_STATUS(status, NT_STATUS_RANGE_NOT_LOCKED);
memset(buf, 0, maxsize);
if (smbcli_read(cli->tree, fnum, buf, 0, 4000) != 4000) {
ret = false;
torture_fail_goto(tctx, done, talloc_asprintf(tctx, "read failed at %s\n", __location__));
}
CHECK_BUFFER(buf, seed, 4000);
torture_comment(tctx, "Trying bad fnum\n");
io.writeunlock.in.file.fnum = fnum+1;
io.writeunlock.in.count = 4000;
io.writeunlock.in.offset = 0;
io.writeunlock.in.data = buf;
status = smb_raw_write(cli->tree, &io);
CHECK_STATUS(status, NT_STATUS_INVALID_HANDLE);
torture_comment(tctx, "Setting file as sparse\n");
status = torture_set_sparse(cli->tree, fnum);
CHECK_STATUS(status, NT_STATUS_OK);
if (!(cli->transport->negotiate.capabilities & CAP_LARGE_FILES)) {
torture_skip(tctx, "skipping large file tests - CAP_LARGE_FILES not set\n");
}
torture_comment(tctx, "Trying 2^32 offset\n");
setup_buffer(buf, seed, maxsize);
io.writeunlock.in.file.fnum = fnum;
io.writeunlock.in.count = 4000;
io.writeunlock.in.offset = 0xFFFFFFFF - 2000;
io.writeunlock.in.data = buf;
smbcli_lock(cli->tree, fnum, io.writeunlock.in.offset, io.writeunlock.in.count,
0, WRITE_LOCK);
status = smb_raw_write(cli->tree, &io);
CHECK_STATUS(status, NT_STATUS_OK);
CHECK_VALUE(io.writeunlock.out.nwritten, 4000);
CHECK_ALL_INFO(io.writeunlock.in.count + (uint64_t)io.writeunlock.in.offset, size);
memset(buf, 0, maxsize);
if (smbcli_read(cli->tree, fnum, buf, io.writeunlock.in.offset, 4000) != 4000) {
ret = false;
torture_fail_goto(tctx, done, talloc_asprintf(tctx, "read failed at %s\n", __location__));
}
CHECK_BUFFER(buf, seed, 4000);
done:
smbcli_close(cli->tree, fnum);
smb_raw_exit(cli->session);
smbcli_deltree(cli->tree, BASEDIR);
return ret;
}
| 0
|
385,919
|
SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
{
return sys_fchownat(AT_FDCWD, filename, user, group, 0);
}
| 0
|
502,726
|
void SSL_CTX_set_info_callback(SSL_CTX *ctx,
void (*cb) (const SSL *ssl, int type, int val))
{
ctx->info_callback = cb;
}
| 0
|
225,913
|
GF_Err audio_sample_entry_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_MPEGAudioSampleEntryBox *ptr;
char *data;
u8 a, b, c, d;
u32 i, size, v, nb_alnum;
GF_Err e;
u64 pos, start;
ptr = (GF_MPEGAudioSampleEntryBox *)s;
start = gf_bs_get_position(bs);
gf_bs_seek(bs, start + 8);
v = gf_bs_read_u16(bs);
if (v)
ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_ON_NOEXT;
//try to disambiguate QTFF v1 and MP4 v1 audio sample entries ...
if (v==1) {
//go to end of ISOM audio sample entry, skip 4 byte (box size field), read 4 bytes (box type) and check if this looks like a box
gf_bs_seek(bs, start + 8 + 20 + 4);
a = gf_bs_read_u8(bs);
b = gf_bs_read_u8(bs);
c = gf_bs_read_u8(bs);
d = gf_bs_read_u8(bs);
nb_alnum = 0;
if (isalnum(a)) nb_alnum++;
if (isalnum(b)) nb_alnum++;
if (isalnum(c)) nb_alnum++;
if (isalnum(d)) nb_alnum++;
if (nb_alnum>2) ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE;
}
gf_bs_seek(bs, start);
e = gf_isom_audio_sample_entry_read((GF_AudioSampleEntryBox*)s, bs);
if (e) return e;
pos = gf_bs_get_position(bs);
size = (u32) s->size;
//when cookie is set on bs, always convert qtff-style mp4a to isobmff-style
//since the conversion is done in addBox and we don't have the bitstream there (arg...), flag the box
if (gf_bs_get_cookie(bs) & GF_ISOM_BS_COOKIE_QT_CONV) {
ptr->qtff_mode |= GF_ISOM_AUDIO_QTFF_CONVERT_FLAG;
}
e = gf_isom_box_array_read(s, bs);
if (!e) {
if (s->type==GF_ISOM_BOX_TYPE_ENCA) {
GF_ProtectionSchemeInfoBox *sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(s->child_boxes, GF_ISOM_BOX_TYPE_SINF);
if (sinf && sinf->original_format) {
u32 type = sinf->original_format->data_format;
switch (type) {
case GF_ISOM_SUBTYPE_3GP_AMR:
case GF_ISOM_SUBTYPE_3GP_AMR_WB:
case GF_ISOM_SUBTYPE_3GP_EVRC:
case GF_ISOM_SUBTYPE_3GP_QCELP:
case GF_ISOM_SUBTYPE_3GP_SMV:
if (ptr->cfg_3gpp) ptr->cfg_3gpp->cfg.type = type;
break;
}
}
}
return GF_OK;
}
if (size<8) return GF_ISOM_INVALID_FILE;
/*hack for some weird files (possibly recorded with live.com tools, needs further investigations)*/
gf_bs_seek(bs, pos);
data = (char*)gf_malloc(sizeof(char) * size);
if (!data) return GF_OUT_OF_MEM;
gf_bs_read_data(bs, data, size);
for (i=0; i<size-8; i++) {
if (GF_4CC((u32)data[i+4], (u8)data[i+5], (u8)data[i+6], (u8)data[i+7]) == GF_ISOM_BOX_TYPE_ESDS) {
GF_BitStream *mybs = gf_bs_new(data + i, size - i, GF_BITSTREAM_READ);
if (ptr->esd) gf_isom_box_del_parent(&ptr->child_boxes, (GF_Box *)ptr->esd);
ptr->esd = NULL;
e = gf_isom_box_parse((GF_Box **)&ptr->esd, mybs);
gf_bs_del(mybs);
if ((e==GF_OK) && (ptr->esd->type == GF_ISOM_BOX_TYPE_ESDS)) {
if (!ptr->child_boxes) ptr->child_boxes = gf_list_new();
gf_list_add(ptr->child_boxes, ptr->esd);
} else if (ptr->esd) {
gf_isom_box_del((GF_Box *)ptr->esd);
ptr->esd = NULL;
}
break;
}
}
gf_free(data);
return e;
}
| 0
|
512,819
|
virtual const my_decimal *const_ptr_my_decimal() const { return NULL; }
| 0
|
356,170
|
static void build_dirs(char *src, char *dst, size_t src_prefix_len, size_t dst_prefix_len) {
char *p = src + src_prefix_len + 1;
char *q = dst + dst_prefix_len + 1;
char *r = dst + dst_prefix_len;
struct stat s;
bool last = false;
*r = '\0';
for (; !last; p++, q++) {
if (*p == '\0') {
last = true;
}
if (*p == '\0' || (*p == '/' && *(p - 1) != '/')) {
// We found a new component of our src path.
// Null-terminate it temporarily here so that we can work
// with it.
*p = '\0';
if (stat(src, &s) == 0 && S_ISDIR(s.st_mode)) {
// Null-terminate the dst path and undo its previous
// termination.
*q = '\0';
*r = '/';
r = q;
if (mkdir(dst, 0700) != 0 && errno != EEXIST)
errExit("mkdir");
if (chmod(dst, s.st_mode) != 0)
errExit("chmod");
}
if (!last) {
// If we're not at the final terminating null, restore
// the slash so that we can continue our traversal.
*p = '/';
}
}
}
}
| 0
|
256,139
|
inline BlockingCounter* SparseMatMul<TL, TR>::ShuffleMatrix(
const typename SparseMatMul<TL, TR>::ConstMatrixMapR& mat,
int slice_row_start, int slice_num_rows, int slice_col_start,
int slice_num_cols, const int N,
const DeviceBase::CpuWorkerThreads* thread_pool, MatrixR* buffer) {
DCHECK_EQ(N % 2, 0);
DCHECK_LE(kNumOperands * sizeof(float) / sizeof(TR), N);
// Note(nikhilsarda): This heuristic is optimal in benchmarks as of
// Jan 21, 2020.
int num_threads = std::min(thread_pool->num_threads, 8);
BlockingCounter* counter = new BlockingCounter(num_threads);
DCHECK_EQ(N, buffer->dimension(1));
auto shuffle_work = [&mat, slice_row_start, slice_num_rows, slice_col_start,
slice_num_cols, N, buffer, counter](int s, int e) {
const int row_start = s % slice_num_rows + slice_row_start;
const int col_start = s / slice_num_rows * N + slice_col_start;
auto* out_start = &(*buffer)(s, 0);
const auto* input_start = &mat(row_start, col_start);
const auto* input_end = &mat(slice_row_start + slice_num_rows - 1,
slice_col_start + slice_num_cols - 1);
const int mat_num_cols = mat.dimension(1);
const int row_slice_size = slice_num_rows * mat_num_cols;
const int aligned_end = slice_num_cols / N * slice_num_rows;
const int e1 = std::min(e, aligned_end);
while (s < e1) {
CopyAndMayBeInterleave<TR>(out_start, input_start, N);
out_start += N;
input_start += mat_num_cols;
if (input_start > input_end) {
input_start = input_start - row_slice_size + N;
}
++s;
}
int s1 = std::max(s, aligned_end);
const int copy_num_cols = slice_num_cols % N;
while (s1 < e) {
CopyAndMayBeInterleave<TR>(out_start, input_start, copy_num_cols);
out_start += N;
input_start += mat_num_cols;
++s1;
}
if (counter) counter->DecrementCount();
};
int start = 0;
int end = 0;
int num_out_rows = (slice_num_cols + N - 1) / N * slice_num_rows;
DCHECK_LE(num_out_rows, buffer->dimension(0));
for (int i = std::max(1, num_threads); i > 0; --i) {
end = start + num_out_rows / i;
thread_pool->workers->Schedule([=]() { shuffle_work(start, end); });
num_out_rows -= (end - start);
start = end;
}
return counter;
}
| 0
|
300,818
|
static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
{
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_msg *hdr = buf_msg(skb);
if (unlikely(msg_in_group(hdr)))
return READ_ONCE(sk->sk_rcvbuf);
if (unlikely(!msg_connected(hdr)))
return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
return READ_ONCE(sk->sk_rcvbuf);
return FLOWCTL_MSG_LIM;
}
| 0
|
379,671
|
static void extract_arg(RAnal *anal, RAnalFunction *fcn, RAnalOp *op, const char *reg, const char *sign, char type) {
st64 ptr = 0;
char *addr, *esil_buf = NULL;
const st64 maxstackframe = 1024 * 8;
r_return_if_fail (anal && fcn && op && reg);
size_t i;
for (i = 0; i < R_ARRAY_SIZE (op->src); i++) {
if (op->src[i] && op->src[i]->reg && op->src[i]->reg->name) {
if (!strcmp (reg, op->src[i]->reg->name)) {
st64 delta = op->src[i]->delta;
if ((delta > 0 && *sign == '+') || (delta < 0 && *sign == '-')) {
ptr = R_ABS (op->src[i]->delta);
break;
}
}
}
}
if (!ptr) {
const char *op_esil = r_strbuf_get (&op->esil);
if (!op_esil) {
return;
}
esil_buf = strdup (op_esil);
if (!esil_buf) {
return;
}
r_strf_var (esilexpr, 64, ",%s,%s,", reg, sign);
char *ptr_end = strstr (esil_buf, esilexpr);
if (!ptr_end) {
free (esil_buf);
return;
}
*ptr_end = 0;
addr = ptr_end;
while ((addr[0] != '0' || addr[1] != 'x') && addr >= esil_buf + 1 && *addr != ',') {
addr--;
}
if (strncmp (addr, "0x", 2)) {
//XXX: This is a workaround for inconsistent esil
if (!op->stackop && op->dst) {
const char *sp = r_reg_get_name (anal->reg, R_REG_NAME_SP);
const char *bp = r_reg_get_name (anal->reg, R_REG_NAME_BP);
const char *rn = op->dst->reg ? op->dst->reg->name : NULL;
if (rn && ((bp && !strcmp (bp, rn)) || (sp && !strcmp (sp, rn)))) {
if (anal->verbose) {
eprintf ("Warning: Analysis didn't fill op->stackop for instruction that alters stack at 0x%" PFMT64x ".\n", op->addr);
}
goto beach;
}
}
if (*addr == ',') {
addr++;
}
if (!op->stackop && op->type != R_ANAL_OP_TYPE_PUSH && op->type != R_ANAL_OP_TYPE_POP
&& op->type != R_ANAL_OP_TYPE_RET && r_str_isnumber (addr)) {
ptr = (st64)r_num_get (NULL, addr);
if (ptr && op->src[0] && ptr == op->src[0]->imm) {
goto beach;
}
} else if ((op->stackop == R_ANAL_STACK_SET) || (op->stackop == R_ANAL_STACK_GET)) {
if (op->ptr % 4) {
goto beach;
}
ptr = R_ABS (op->ptr);
} else {
goto beach;
}
} else {
ptr = (st64)r_num_get (NULL, addr);
}
}
if (anal->verbose && (!op->src[0] || !op->dst)) {
eprintf ("Warning: Analysis didn't fill op->src/dst at 0x%" PFMT64x ".\n", op->addr);
}
int rw = (op->direction == R_ANAL_OP_DIR_WRITE) ? R_ANAL_VAR_ACCESS_TYPE_WRITE : R_ANAL_VAR_ACCESS_TYPE_READ;
if (*sign == '+') {
const bool isarg = type == R_ANAL_VAR_KIND_SPV ? ptr >= fcn->stack : ptr >= fcn->bp_off;
const char *pfx = isarg ? ARGPREFIX : VARPREFIX;
st64 frame_off;
if (type == R_ANAL_VAR_KIND_SPV) {
frame_off = ptr - fcn->stack;
} else {
frame_off = ptr - fcn->bp_off;
}
if (maxstackframe != 0 && (frame_off > maxstackframe || frame_off < -maxstackframe)) {
goto beach;
}
RAnalVar *var = get_stack_var (fcn, frame_off);
if (var) {
r_anal_var_set_access (var, reg, op->addr, rw, ptr);
goto beach;
}
char *varname = NULL, *vartype = NULL;
if (isarg) {
const char *place = fcn->cc ? r_anal_cc_arg (anal, fcn->cc, ST32_MAX) : NULL;
bool stack_rev = place ? !strcmp (place, "stack_rev") : false;
char *fname = r_type_func_guess (anal->sdb_types, fcn->name);
if (fname) {
ut64 sum_sz = 0;
size_t from, to, i;
if (stack_rev) {
const size_t cnt = r_type_func_args_count (anal->sdb_types, fname);
from = cnt ? cnt - 1 : cnt;
to = fcn->cc ? r_anal_cc_max_arg (anal, fcn->cc) : 0;
} else {
from = fcn->cc ? r_anal_cc_max_arg (anal, fcn->cc) : 0;
to = r_type_func_args_count (anal->sdb_types, fname);
}
const int bytes = (fcn->bits ? fcn->bits : anal->bits) / 8;
for (i = from; stack_rev ? i >= to : i < to; stack_rev ? i-- : i++) {
char *tp = r_type_func_args_type (anal->sdb_types, fname, i);
if (!tp) {
break;
}
if (sum_sz == frame_off) {
vartype = tp;
varname = strdup (r_type_func_args_name (anal->sdb_types, fname, i));
break;
}
ut64 bit_sz = r_type_get_bitsize (anal->sdb_types, tp);
sum_sz += bit_sz ? bit_sz / 8 : bytes;
sum_sz = R_ROUND (sum_sz, bytes);
free (tp);
}
free (fname);
}
}
if (!varname) {
if (anal->opt.varname_stack) {
varname = r_str_newf ("%s_%" PFMT64x "h", pfx, R_ABS (frame_off));
} else {
varname = r_anal_function_autoname_var (fcn, type, pfx, ptr);
}
}
if (varname) {
#if 0
if (isarg && frame_off > 48) {
free (varname);
goto beach;
}
#endif
RAnalVar *var = r_anal_function_set_var (fcn, frame_off, type, vartype, anal->bits / 8, isarg, varname);
if (var) {
r_anal_var_set_access (var, reg, op->addr, rw, ptr);
}
free (varname);
}
free (vartype);
} else {
st64 frame_off = -(ptr + fcn->bp_off);
if (maxstackframe > 0 && (frame_off > maxstackframe || frame_off < -maxstackframe)) {
goto beach;
}
RAnalVar *var = get_stack_var (fcn, frame_off);
if (var) {
r_anal_var_set_access (var, reg, op->addr, rw, -ptr);
goto beach;
}
char *varname = anal->opt.varname_stack
? r_str_newf ("%s_%" PFMT64x "h", VARPREFIX, R_ABS (frame_off))
: r_anal_function_autoname_var (fcn, type, VARPREFIX, -ptr);
if (varname) {
RAnalVar *var = r_anal_function_set_var (fcn, frame_off, type, NULL, anal->bits / 8, false, varname);
if (var) {
r_anal_var_set_access (var, reg, op->addr, rw, -ptr);
}
free (varname);
}
}
beach:
free (esil_buf);
}
| 0
|
246,454
|
static size_t consume_init_expr_r(RBuffer *b, ut64 bound, ut8 eoc, void *out) {
if (!b || bound >= r_buf_size (b) || r_buf_tell (b) > bound) {
return 0;
}
size_t res = 0;
ut8 cur = r_buf_read8 (b);
while (r_buf_tell (b) <= bound && cur != eoc) {
cur = r_buf_read8 (b);
res++;
}
if (cur != eoc) {
return 0;
}
return res + 1;
}
| 0
|
256,388
|
static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
gfp_t gfp_mask)
{
struct bio_map_data *bmd;
if (data->nr_segs > UIO_MAXIOV)
return NULL;
bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
if (!bmd)
return NULL;
memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
bmd->iter = *data;
bmd->iter.iov = bmd->iov;
return bmd;
}
| 0
|
335,426
|
find_ex_command(
exarg_T *eap,
int *full UNUSED,
int (*lookup)(char_u *, size_t, int cmd, cctx_T *) UNUSED,
cctx_T *cctx UNUSED)
{
int len;
char_u *p;
int i;
#ifndef FEAT_EVAL
int vim9 = FALSE;
#else
int vim9 = in_vim9script();
/*
* Recognize a Vim9 script function/method call and assignment:
* "lvar = value", "lvar(arg)", "[1, 2 3]->Func()"
*/
p = eap->cmd;
if (lookup != NULL)
{
char_u *pskip = skip_option_env_lead(eap->cmd);
if (vim_strchr((char_u *)"{('[\"@&$", *p) != NULL
|| ((p = to_name_const_end(pskip)) > eap->cmd && *p != NUL)
|| (p[0] == '0' && p[1] == 'z'))
{
int oplen;
int heredoc;
char_u *swp;
if (*eap->cmd == '&'
|| *eap->cmd == '$'
|| (eap->cmd[0] == '@'
&& (valid_yank_reg(eap->cmd[1], FALSE)
|| eap->cmd[1] == '@')))
{
if (*eap->cmd == '&')
{
p = eap->cmd + 1;
if (STRNCMP("l:", p, 2) == 0 || STRNCMP("g:", p, 2) == 0)
p += 2;
p = to_name_end(p, FALSE);
}
else if (*eap->cmd == '$')
p = to_name_end(eap->cmd + 1, FALSE);
else
p = eap->cmd + 2;
if (ends_excmd(*skipwhite(p)))
{
// "&option <NL>", "$ENV <NL>" and "@r <NL>" are the start
// of an expression.
eap->cmdidx = CMD_eval;
return eap->cmd;
}
// "&option" can be followed by "->" or "=", check below
}
swp = skipwhite(p);
if (
// "(..." is an expression.
// "funcname(" is always a function call.
*p == '('
|| (p == eap->cmd
? (
// "{..." is a dict expression or block start.
*eap->cmd == '{'
// "'string'->func()" is an expression.
|| *eap->cmd == '\''
// '"string"->func()' is an expression.
|| (eap->cmd[0] == '0' && eap->cmd[1] == 'z')
// '"string"->func()' is an expression.
|| *eap->cmd == '"'
// "g:varname" is an expression.
|| eap->cmd[1] == ':'
)
// "varname->func()" is an expression.
: (*swp == '-' && swp[1] == '>')))
{
if (*eap->cmd == '{' && ends_excmd(*skipwhite(eap->cmd + 1)))
{
// "{" by itself is the start of a block.
eap->cmdidx = CMD_block;
return eap->cmd + 1;
}
eap->cmdidx = CMD_eval;
return eap->cmd;
}
if ((p != eap->cmd && (
// "varname[]" is an expression.
*p == '['
// "varname.key" is an expression.
|| (*p == '.'
&& (ASCII_ISALPHA(p[1]) || p[1] == '_'))))
// g:[key] is an expression
|| STRNCMP(eap->cmd, "g:[", 3) == 0)
{
char_u *after = eap->cmd;
// When followed by "=" or "+=" then it is an assignment.
// Skip over the whole thing, it can be:
// name.member = val
// name[a : b] = val
// name[idx] = val
// name[idx].member = val
// etc.
eap->cmdidx = CMD_eval;
++emsg_silent;
if (skip_expr(&after, NULL) == OK)
{
after = skipwhite(after);
if (*after == '=' || (*after != NUL && after[1] == '=')
|| (after[0] == '.' && after[1] == '.'
&& after[2] == '='))
eap->cmdidx = CMD_var;
}
--emsg_silent;
return eap->cmd;
}
// "[...]->Method()" is a list expression, but "[a, b] = Func()" is
// an assignment.
// If there is no line break inside the "[...]" then "p" is
// advanced to after the "]" by to_name_const_end(): check if a "="
// follows.
// If "[...]" has a line break "p" still points at the "[" and it
// can't be an assignment.
if (*eap->cmd == '[')
{
char_u *eq;
p = to_name_const_end(eap->cmd);
if (p == eap->cmd && *p == '[')
{
int count = 0;
int semicolon = FALSE;
p = skip_var_list(eap->cmd, TRUE, &count, &semicolon, TRUE);
}
eq = p;
if (eq != NULL)
{
eq = skipwhite(eq);
if (vim_strchr((char_u *)"+-*/%", *eq) != NULL)
++eq;
}
if (p == NULL || p == eap->cmd || *eq != '=')
{
eap->cmdidx = CMD_eval;
return eap->cmd;
}
if (p > eap->cmd && *eq == '=')
{
eap->cmdidx = CMD_var;
return eap->cmd;
}
}
// Recognize an assignment if we recognize the variable name:
// "g:var = expr"
// "@r = expr"
// "&opt = expr"
// "var = expr" where "var" is a variable name or we are skipping
// (variable declaration might have been skipped).
// Not "redir => var" (when skipping).
oplen = assignment_len(skipwhite(p), &heredoc);
if (oplen > 0)
{
if (((p - eap->cmd) > 2 && eap->cmd[1] == ':')
|| *eap->cmd == '&'
|| *eap->cmd == '$'
|| *eap->cmd == '@'
|| (eap->skip && IS_WHITE_OR_NUL(
*(skipwhite(p) + oplen)))
|| lookup(eap->cmd, p - eap->cmd, TRUE, cctx) == OK)
{
eap->cmdidx = CMD_var;
return eap->cmd;
}
}
// Recognize using a type for a w:, b:, t: or g: variable:
// "w:varname: number = 123".
if (eap->cmd[1] == ':' && *p == ':')
{
eap->cmdidx = CMD_eval;
return eap->cmd;
}
}
// "g:", "s:" and "l:" are always assumed to be a variable, thus start
// an expression. A global/substitute/list command needs to use a
// longer name.
if (vim_strchr((char_u *)"gsl", *p) != NULL && p[1] == ':')
{
eap->cmdidx = CMD_eval;
return eap->cmd;
}
// If it is an ID it might be a variable with an operator on the next
// line, if the variable exists it can't be an Ex command.
if (p > eap->cmd && ends_excmd(*skipwhite(p))
&& (lookup(eap->cmd, p - eap->cmd, TRUE, cctx) == OK
|| (ASCII_ISALPHA(eap->cmd[0]) && eap->cmd[1] == ':')))
{
eap->cmdidx = CMD_eval;
return eap->cmd;
}
// Check for "++nr" and "--nr".
if (p == eap->cmd && p[0] != NUL && p[0] == p[1]
&& (*p == '+' || *p == '-'))
{
eap->cmdidx = *p == '+' ? CMD_increment : CMD_decrement;
return eap->cmd + 2;
}
}
#endif
/*
* Isolate the command and search for it in the command table.
*/
p = eap->cmd;
if (one_letter_cmd(p, &eap->cmdidx))
{
++p;
}
else
{
while (ASCII_ISALPHA(*p))
++p;
// for python 3.x support ":py3", ":python3", ":py3file", etc.
if (eap->cmd[0] == 'p' && eap->cmd[1] == 'y')
{
while (ASCII_ISALNUM(*p))
++p;
}
else if (*p == '9' && STRNCMP("vim9", eap->cmd, 4) == 0)
{
// include "9" for "vim9*" commands; "vim9cmd" and "vim9script".
++p;
while (ASCII_ISALPHA(*p))
++p;
}
// check for non-alpha command
if (p == eap->cmd && vim_strchr((char_u *)"@*!=><&~#}", *p) != NULL)
++p;
len = (int)(p - eap->cmd);
// The "d" command can directly be followed by 'l' or 'p' flag, when
// not in Vim9 script.
if (!vim9 && *eap->cmd == 'd' && (p[-1] == 'l' || p[-1] == 'p'))
{
// Check for ":dl", ":dell", etc. to ":deletel": that's
// :delete with the 'l' flag. Same for 'p'.
for (i = 0; i < len; ++i)
if (eap->cmd[i] != ((char_u *)"delete")[i])
break;
if (i == len - 1)
{
--len;
if (p[-1] == 'l')
eap->flags |= EXFLAG_LIST;
else
eap->flags |= EXFLAG_PRINT;
}
}
if (ASCII_ISLOWER(eap->cmd[0]))
{
int c1 = eap->cmd[0];
int c2 = len == 1 ? NUL : eap->cmd[1];
if (command_count != (int)CMD_SIZE)
{
iemsg(_(e_command_table_needs_to_be_updated_run_make_cmdidxs));
getout(1);
}
// Use a precomputed index for fast look-up in cmdnames[]
// taking into account the first 2 letters of eap->cmd.
eap->cmdidx = cmdidxs1[CharOrdLow(c1)];
if (ASCII_ISLOWER(c2))
eap->cmdidx += cmdidxs2[CharOrdLow(c1)][CharOrdLow(c2)];
}
else if (ASCII_ISUPPER(eap->cmd[0]))
eap->cmdidx = CMD_Next;
else
eap->cmdidx = CMD_bang;
for ( ; (int)eap->cmdidx < (int)CMD_SIZE;
eap->cmdidx = (cmdidx_T)((int)eap->cmdidx + 1))
if (STRNCMP(cmdnames[(int)eap->cmdidx].cmd_name, (char *)eap->cmd,
(size_t)len) == 0)
{
#ifdef FEAT_EVAL
if (full != NULL && cmdnames[eap->cmdidx].cmd_name[len] == NUL)
*full = TRUE;
#endif
break;
}
// :Print and :mode are not supported in Vim9 script.
// Some commands cannot be shortened in Vim9 script.
if (vim9 && eap->cmdidx != CMD_SIZE)
{
if (eap->cmdidx == CMD_mode || eap->cmdidx == CMD_Print)
eap->cmdidx = CMD_SIZE;
else if ((cmdnames[eap->cmdidx].cmd_argt & EX_WHOLE)
&& len < (int)STRLEN(cmdnames[eap->cmdidx].cmd_name))
{
semsg(_(e_command_cannot_be_shortened_str), eap->cmd);
eap->cmdidx = CMD_SIZE;
}
}
// Do not recognize ":*" as the star command unless '*' is in
// 'cpoptions'.
if (eap->cmdidx == CMD_star && vim_strchr(p_cpo, CPO_STAR) == NULL)
p = eap->cmd;
// Look for a user defined command as a last resort. Let ":Print" be
// overruled by a user defined command.
if ((eap->cmdidx == CMD_SIZE || eap->cmdidx == CMD_Print)
&& *eap->cmd >= 'A' && *eap->cmd <= 'Z')
{
// User defined commands may contain digits.
while (ASCII_ISALNUM(*p))
++p;
p = find_ucmd(eap, p, full, NULL, NULL);
}
if (p == NULL || p == eap->cmd)
eap->cmdidx = CMD_SIZE;
}
// ":fina" means ":finally" in legacy script, for backwards compatibility.
if (eap->cmdidx == CMD_final && p - eap->cmd == 4 && !vim9)
eap->cmdidx = CMD_finally;
#ifdef FEAT_EVAL
if (eap->cmdidx < CMD_SIZE
&& vim9
&& !IS_WHITE_OR_NUL(*p) && *p != '\n' && *p != '!' && *p != '|'
&& (eap->cmdidx < 0 ||
(cmdnames[eap->cmdidx].cmd_argt & EX_NONWHITE_OK) == 0))
{
char_u *cmd = vim_strnsave(eap->cmd, p - eap->cmd);
semsg(_(e_command_str_not_followed_by_white_space_str), cmd, eap->cmd);
eap->cmdidx = CMD_SIZE;
vim_free(cmd);
}
#endif
return p;
}
| 0
|
488,426
|
static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data)
{
pte_t *pte;
int err;
pgtable_t token;
spinlock_t *uninitialized_var(ptl);
pte = (mm == &init_mm) ?
pte_alloc_kernel(pmd, addr) :
pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
BUG_ON(pmd_huge(*pmd));
token = pmd_pgtable(*pmd);
do {
err = fn(pte, token, addr, data);
if (err)
break;
} while (pte++, addr += PAGE_SIZE, addr != end);
if (mm != &init_mm)
pte_unmap_unlock(pte-1, ptl);
return err;
}
| 0
|
278,253
|
get_indent_lnum(linenr_T lnum)
{
#ifdef FEAT_VARTABS
return get_indent_str_vtab(ml_get(lnum), (int)curbuf->b_p_ts,
curbuf->b_p_vts_array, FALSE);
#else
return get_indent_str(ml_get(lnum), (int)curbuf->b_p_ts, FALSE);
#endif
}
| 0
|
484,740
|
static int xennet_get_responses(struct netfront_queue *queue,
struct netfront_rx_info *rinfo, RING_IDX rp,
struct sk_buff_head *list,
bool *need_xdp_flush)
{
struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
struct xen_netif_extra_info *extras = rinfo->extras;
grant_ref_t ref = xennet_get_rx_ref(queue, cons);
struct device *dev = &queue->info->netdev->dev;
struct bpf_prog *xdp_prog;
struct xdp_buff xdp;
int slots = 1;
int err = 0;
u32 verdict;
if (rx->flags & XEN_NETRXF_extra_info) {
err = xennet_get_extras(queue, extras, rp);
if (!err) {
if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
struct xen_netif_extra_info *xdp;
xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
rx->offset = xdp->u.xdp.headroom;
}
}
cons = queue->rx.rsp_cons;
}
for (;;) {
if (unlikely(rx->status < 0 ||
rx->offset + rx->status > XEN_PAGE_SIZE)) {
if (net_ratelimit())
dev_warn(dev, "rx->offset: %u, size: %d\n",
rx->offset, rx->status);
xennet_move_rx_slot(queue, skb, ref);
err = -EINVAL;
goto next;
}
/*
* This definitely indicates a bug, either in this driver or in
* the backend driver. In future this should flag the bad
* situation to the system controller to reboot the backend.
*/
if (ref == INVALID_GRANT_REF) {
if (net_ratelimit())
dev_warn(dev, "Bad rx response id %d.\n",
rx->id);
err = -EINVAL;
goto next;
}
if (!gnttab_end_foreign_access_ref(ref)) {
dev_alert(dev,
"Grant still in use by backend domain\n");
queue->info->broken = true;
dev_alert(dev, "Disabled for further use\n");
return -EINVAL;
}
gnttab_release_grant_reference(&queue->gref_rx_head, ref);
rcu_read_lock();
xdp_prog = rcu_dereference(queue->xdp_prog);
if (xdp_prog) {
if (!(rx->flags & XEN_NETRXF_more_data)) {
/* currently only a single page contains data */
verdict = xennet_run_xdp(queue,
skb_frag_page(&skb_shinfo(skb)->frags[0]),
rx, xdp_prog, &xdp, need_xdp_flush);
if (verdict != XDP_PASS)
err = -EINVAL;
} else {
/* drop the frame */
err = -EINVAL;
}
}
rcu_read_unlock();
__skb_queue_tail(list, skb);
next:
if (!(rx->flags & XEN_NETRXF_more_data))
break;
if (cons + slots == rp) {
if (net_ratelimit())
dev_warn(dev, "Need more slots\n");
err = -ENOENT;
break;
}
RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
rx = &rx_local;
skb = xennet_get_rx_skb(queue, cons + slots);
ref = xennet_get_rx_ref(queue, cons + slots);
slots++;
}
if (unlikely(slots > max)) {
if (net_ratelimit())
dev_warn(dev, "Too many slots\n");
err = -E2BIG;
}
if (unlikely(err))
xennet_set_rx_rsp_cons(queue, cons + slots);
return err;
}
| 0
|
443,154
|
static int jfs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
return mpage_writepages(mapping, wbc, jfs_get_block);
}
| 0
|
252,367
|
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0) return MZ_TRUE;
return MZ_FALSE;
}
| 0
|
483,052
|
static void cgroup_pidlist_stop(struct seq_file *s, void *v)
{
struct kernfs_open_file *of = s->private;
struct cgroup_file_ctx *ctx = of->priv;
struct cgroup_pidlist *l = ctx->procs1.pidlist;
if (l)
mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
CGROUP_PIDLIST_DESTROY_DELAY);
mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
}
| 0
|
309,999
|
init_xterm_mouse(SCREEN *sp)
{
sp->_mouse_type = M_XTERM;
sp->_mouse_xtermcap = NCURSES_SP_NAME(tigetstr) (NCURSES_SP_ARGx "XM");
if (!VALID_STRING(sp->_mouse_xtermcap))
sp->_mouse_xtermcap = "\033[?1000%?%p1%{1}%=%th%el%;";
}
| 0
|
261,445
|
static enum PartMode decode_part_mode(thread_context* tctx,
enum PredMode pred_mode, int cLog2CbSize)
{
de265_image* img = tctx->img;
if (pred_mode == MODE_INTRA) {
logtrace(LogSlice,"# part_mode (INTRA)\n");
int bit = decode_CABAC_bit(&tctx->cabac_decoder, &tctx->ctx_model[CONTEXT_MODEL_PART_MODE]);
logtrace(LogSlice,"> %s\n",bit ? "2Nx2N" : "NxN");
logtrace(LogSymbols,"$1 part_mode=%d\n",bit ? PART_2Nx2N : PART_NxN);
return bit ? PART_2Nx2N : PART_NxN;
}
else {
const seq_parameter_set& sps = img->get_sps();
int bit0 = decode_CABAC_bit(&tctx->cabac_decoder, &tctx->ctx_model[CONTEXT_MODEL_PART_MODE+0]);
if (bit0) { logtrace(LogSymbols,"$1 part_mode=%d\n",PART_2Nx2N); return PART_2Nx2N; }
// CHECK_ME: I optimize code and fix bug here, need more VERIFY!
int bit1 = decode_CABAC_bit(&tctx->cabac_decoder, &tctx->ctx_model[CONTEXT_MODEL_PART_MODE+1]);
if (cLog2CbSize > sps.Log2MinCbSizeY) {
if (!sps.amp_enabled_flag) {
logtrace(LogSymbols,"$1 part_mode=%d\n",bit1 ? PART_2NxN : PART_Nx2N);
return bit1 ? PART_2NxN : PART_Nx2N;
}
else {
int bit3 = decode_CABAC_bit(&tctx->cabac_decoder, &tctx->ctx_model[CONTEXT_MODEL_PART_MODE+3]);
if (bit3) {
logtrace(LogSymbols,"$1 part_mode=%d\n",bit1 ? PART_2NxN : PART_Nx2N);
return bit1 ? PART_2NxN : PART_Nx2N;
}
int bit4 = decode_CABAC_bypass(&tctx->cabac_decoder);
if ( bit1 && bit4) {
logtrace(LogSymbols,"$1 part_mode=%d\n",PART_2NxnD);
return PART_2NxnD;
}
if ( bit1 && !bit4) {
logtrace(LogSymbols,"$1 part_mode=%d\n",PART_2NxnU);
return PART_2NxnU;
}
if (!bit1 && !bit4) {
logtrace(LogSymbols,"$1 part_mode=%d\n",PART_nLx2N);
return PART_nLx2N;
}
if (!bit1 && bit4) {
logtrace(LogSymbols,"$1 part_mode=%d\n",PART_nRx2N);
return PART_nRx2N;
}
}
}
else {
// TODO, we could save one if here when first decoding the next bin and then
// checkcLog2CbSize==3 when it is '0'
if (bit1) {
logtrace(LogSymbols,"$1 part_mode=%d\n",PART_2NxN);
return PART_2NxN;
}
if (cLog2CbSize==3) {
logtrace(LogSymbols,"$1 part_mode=%d\n",PART_Nx2N);
return PART_Nx2N;
}
else {
int bit2 = decode_CABAC_bit(&tctx->cabac_decoder, &tctx->ctx_model[CONTEXT_MODEL_PART_MODE+2]);
logtrace(LogSymbols,"$1 part_mode=%d\n",PART_NxN-bit2);
return (enum PartMode)((int)PART_NxN - bit2)/*bit2 ? PART_Nx2N : PART_NxN*/;
}
}
}
assert(false); // should never be reached
return PART_2Nx2N;
}
| 0
|
101,700
|
void WebProcessProxy::updateTextCheckerState()
{
if (canSendMessage())
send(Messages::WebProcess::SetTextCheckerState(TextChecker::state()), 0);
}
| 0
|
222,886
|
DimensionHandle GetUnknownOutputDim(const NodeDef* node, int index,
int dim_id) {
DimId id{node, index, dim_id};
auto it = unknown_dims_.find(id);
if (it != unknown_dims_.end()) {
return it->second;
}
InferenceContext* c = GetContext(node);
DimensionHandle dim = c->UnknownDim();
unknown_dims_[id] = dim;
return dim;
}
| 0
|
313,738
|
nv_help(cmdarg_T *cap)
{
if (!checkclearopq(cap->oap))
ex_help(NULL);
}
| 0
|
441,823
|
SProcXkbGetGeometry(ClientPtr client)
{
REQUEST(xkbGetGeometryReq);
swaps(&stuff->length);
REQUEST_SIZE_MATCH(xkbGetGeometryReq);
swaps(&stuff->deviceSpec);
swapl(&stuff->name);
return ProcXkbGetGeometry(client);
}
| 0
|
289,316
|
snd_pcm_sframes_t snd_pcm_oss_readv3(struct snd_pcm_substream *substream, void **bufs, snd_pcm_uframes_t frames)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int ret;
while (1) {
if (runtime->status->state == SNDRV_PCM_STATE_XRUN ||
runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
#ifdef OSS_DEBUG
pcm_dbg(substream->pcm,
"pcm_oss: readv: recovering from %s\n",
runtime->status->state == SNDRV_PCM_STATE_XRUN ?
"XRUN" : "SUSPEND");
#endif
ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, NULL);
if (ret < 0)
break;
} else if (runtime->status->state == SNDRV_PCM_STATE_SETUP) {
ret = snd_pcm_oss_prepare(substream);
if (ret < 0)
break;
}
ret = snd_pcm_kernel_readv(substream, bufs, frames);
if (ret != -EPIPE && ret != -ESTRPIPE)
break;
}
return ret;
}
| 0
|
508,397
|
void setup_defaults(THD *thd, List<Item> &fields, List<Item> &values)
{
List_iterator<Item> fit(fields);
List_iterator<Item> vit(values);
for (Item *value= vit++, *f_item= fit++; value; value= vit++, f_item= fit++)
{
value->walk(&Item::enchant_default_with_arg_processor, false, f_item);
}
}
| 0
|
310,314
|
dirserv_router_has_valid_address(routerinfo_t *ri)
{
struct in_addr iaddr;
if (get_options()->DirAllowPrivateAddresses)
return 0; /* whatever it is, we're fine with it */
if (!tor_inet_aton(ri->address, &iaddr)) {
log_info(LD_DIRSERV,"Router %s published non-IP address '%s'. Refusing.",
router_describe(ri),
ri->address);
return -1;
}
if (is_internal_IP(ntohl(iaddr.s_addr), 0)) {
log_info(LD_DIRSERV,
"Router %s published internal IP address '%s'. Refusing.",
router_describe(ri), ri->address);
return -1; /* it's a private IP, we should reject it */
}
return 0;
}
| 0
|
259,599
|
void HierarchicalBitmapRequester::Pull8Lines(UBYTE c)
{
int cnt;
ULONG y = m_pulY[c];
//
// Allocate a line block from the encoding line adapter.
for(cnt = 0;cnt < 8 && y < m_pulHeight[c];cnt++) {
assert(m_ppDecodingMCU[cnt | (c << 3)] == NULL);
m_ppDecodingMCU[cnt | (c << 3)] = m_pLargestScale->GetNextLine(c);
y++;
}
}
| 0
|
463,161
|
EXPORTED int annotate_rename_mailbox(struct mailbox *oldmailbox,
struct mailbox *newmailbox)
{
/* rename one mailbox */
char *olduserid = mboxname_to_userid(oldmailbox->name);
char *newuserid = mboxname_to_userid(newmailbox->name);
annotate_db_t *d = NULL;
int r = 0;
init_internal();
/* rewrite any per-folder annotations from the global db */
r = _annotate_getdb(NULL, 0, /*don't create*/0, &d);
if (r == CYRUSDB_NOTFOUND) {
/* no global database, must not be anything to rename */
r = 0;
goto done;
}
if (r) goto done;
annotate_begin(d);
/* copy here - delete will dispose of old records later */
r = _annotate_rewrite(oldmailbox, 0, olduserid,
newmailbox, 0, newuserid,
/*copy*/1);
if (r) goto done;
r = annotate_commit(d);
if (r) goto done;
/*
* The per-folder database got moved or linked by mailbox_copy_files().
*/
done:
annotate_putdb(&d);
free(olduserid);
free(newuserid);
return r;
}
| 0
|
252,470
|
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize) *pSize = 0;
if (!p) return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize) *pSize = (size_t)alloc_size;
return pBuf;
}
| 0
|
195,238
|
setup_seccomp (FlatpakBwrap *bwrap,
const char *arch,
gulong allowed_personality,
FlatpakRunFlags run_flags,
GError **error)
{
gboolean multiarch = (run_flags & FLATPAK_RUN_FLAG_MULTIARCH) != 0;
gboolean devel = (run_flags & FLATPAK_RUN_FLAG_DEVEL) != 0;
__attribute__((cleanup (cleanup_seccomp))) scmp_filter_ctx seccomp = NULL;
/**** BEGIN NOTE ON CODE SHARING
*
* There are today a number of different Linux container
* implementations. That will likely continue for long into the
* future. But we can still try to share code, and it's important
* to do so because it affects what library and application writers
* can do, and we should support code portability between different
* container tools.
*
* This syscall blocklist is copied from linux-user-chroot, which was in turn
* clearly influenced by the Sandstorm.io blocklist.
*
* If you make any changes here, I suggest sending the changes along
* to other sandbox maintainers. Using the libseccomp list is also
* an appropriate venue:
* https://groups.google.com/forum/#!forum/libseccomp
*
* A non-exhaustive list of links to container tooling that might
* want to share this blocklist:
*
* https://github.com/sandstorm-io/sandstorm
* in src/sandstorm/supervisor.c++
* https://github.com/flatpak/flatpak.git
* in common/flatpak-run.c
* https://git.gnome.org/browse/linux-user-chroot
* in src/setup-seccomp.c
*
**** END NOTE ON CODE SHARING
*/
struct
{
int scall;
struct scmp_arg_cmp *arg;
} syscall_blocklist[] = {
/* Block dmesg */
{SCMP_SYS (syslog)},
/* Useless old syscall */
{SCMP_SYS (uselib)},
/* Don't allow disabling accounting */
{SCMP_SYS (acct)},
/* 16-bit code is unnecessary in the sandbox, and modify_ldt is a
historic source of interesting information leaks. */
{SCMP_SYS (modify_ldt)},
/* Don't allow reading current quota use */
{SCMP_SYS (quotactl)},
/* Don't allow access to the kernel keyring */
{SCMP_SYS (add_key)},
{SCMP_SYS (keyctl)},
{SCMP_SYS (request_key)},
/* Scary VM/NUMA ops */
{SCMP_SYS (move_pages)},
{SCMP_SYS (mbind)},
{SCMP_SYS (get_mempolicy)},
{SCMP_SYS (set_mempolicy)},
{SCMP_SYS (migrate_pages)},
/* Don't allow subnamespace setups: */
{SCMP_SYS (unshare)},
{SCMP_SYS (mount)},
{SCMP_SYS (pivot_root)},
#if defined(__s390__) || defined(__s390x__) || defined(__CRIS__)
/* Architectures with CONFIG_CLONE_BACKWARDS2: the child stack
* and flags arguments are reversed so the flags come second */
{SCMP_SYS (clone), &SCMP_A1 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)},
#else
/* Normally the flags come first */
{SCMP_SYS (clone), &SCMP_A0 (SCMP_CMP_MASKED_EQ, CLONE_NEWUSER, CLONE_NEWUSER)},
#endif
/* Don't allow faking input to the controlling tty (CVE-2017-5226) */
{SCMP_SYS (ioctl), &SCMP_A1 (SCMP_CMP_MASKED_EQ, 0xFFFFFFFFu, (int) TIOCSTI)},
};
struct
{
int scall;
struct scmp_arg_cmp *arg;
} syscall_nondevel_blocklist[] = {
/* Profiling operations; we expect these to be done by tools from outside
* the sandbox. In particular perf has been the source of many CVEs.
*/
{SCMP_SYS (perf_event_open)},
/* Don't allow you to switch to bsd emulation or whatnot */
{SCMP_SYS (personality), &SCMP_A0 (SCMP_CMP_NE, allowed_personality)},
{SCMP_SYS (ptrace)}
};
/* Blocklist all but unix, inet, inet6 and netlink */
struct
{
int family;
FlatpakRunFlags flags_mask;
} socket_family_allowlist[] = {
/* NOTE: Keep in numerical order */
{ AF_UNSPEC, 0 },
{ AF_LOCAL, 0 },
{ AF_INET, 0 },
{ AF_INET6, 0 },
{ AF_NETLINK, 0 },
{ AF_CAN, FLATPAK_RUN_FLAG_CANBUS },
{ AF_BLUETOOTH, FLATPAK_RUN_FLAG_BLUETOOTH },
};
int last_allowed_family;
int i, r;
g_auto(GLnxTmpfile) seccomp_tmpf = { 0, };
seccomp = seccomp_init (SCMP_ACT_ALLOW);
if (!seccomp)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Initialize seccomp failed"));
if (arch != NULL)
{
uint32_t arch_id = 0;
const uint32_t *extra_arches = NULL;
if (strcmp (arch, "i386") == 0)
{
arch_id = SCMP_ARCH_X86;
}
else if (strcmp (arch, "x86_64") == 0)
{
arch_id = SCMP_ARCH_X86_64;
extra_arches = seccomp_x86_64_extra_arches;
}
else if (strcmp (arch, "arm") == 0)
{
arch_id = SCMP_ARCH_ARM;
}
#ifdef SCMP_ARCH_AARCH64
else if (strcmp (arch, "aarch64") == 0)
{
arch_id = SCMP_ARCH_AARCH64;
extra_arches = seccomp_aarch64_extra_arches;
}
#endif
/* We only really need to handle arches on multiarch systems.
* If only one arch is supported the default is fine */
if (arch_id != 0)
{
/* This *adds* the target arch, instead of replacing the
native one. This is not ideal, because we'd like to only
allow the target arch, but we can't really disallow the
native arch at this point, because then bubblewrap
couldn't continue running. */
r = seccomp_arch_add (seccomp, arch_id);
if (r < 0 && r != -EEXIST)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add architecture to seccomp filter"));
if (multiarch && extra_arches != NULL)
{
for (i = 0; extra_arches[i] != 0; i++)
{
r = seccomp_arch_add (seccomp, extra_arches[i]);
if (r < 0 && r != -EEXIST)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to add multiarch architecture to seccomp filter"));
}
}
}
}
/* TODO: Should we filter the kernel keyring syscalls in some way?
* We do want them to be used by desktop apps, but they could also perhaps
* leak system stuff or secrets from other apps.
*/
for (i = 0; i < G_N_ELEMENTS (syscall_blocklist); i++)
{
int scall = syscall_blocklist[i].scall;
if (syscall_blocklist[i].arg)
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (EPERM), scall, 1, *syscall_blocklist[i].arg);
else
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (EPERM), scall, 0);
if (r < 0 && r == -EFAULT /* unknown syscall */)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall);
}
if (!devel)
{
for (i = 0; i < G_N_ELEMENTS (syscall_nondevel_blocklist); i++)
{
int scall = syscall_nondevel_blocklist[i].scall;
if (syscall_nondevel_blocklist[i].arg)
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (EPERM), scall, 1, *syscall_nondevel_blocklist[i].arg);
else
r = seccomp_rule_add (seccomp, SCMP_ACT_ERRNO (EPERM), scall, 0);
if (r < 0 && r == -EFAULT /* unknown syscall */)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to block syscall %d"), scall);
}
}
/* Socket filtering doesn't work on e.g. i386, so ignore failures here
* However, we need to user seccomp_rule_add_exact to avoid libseccomp doing
* something else: https://github.com/seccomp/libseccomp/issues/8 */
last_allowed_family = -1;
for (i = 0; i < G_N_ELEMENTS (socket_family_allowlist); i++)
{
int family = socket_family_allowlist[i].family;
int disallowed;
if (socket_family_allowlist[i].flags_mask != 0 &&
(socket_family_allowlist[i].flags_mask & run_flags) != socket_family_allowlist[i].flags_mask)
continue;
for (disallowed = last_allowed_family + 1; disallowed < family; disallowed++)
{
/* Blocklist the in-between valid families */
seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_EQ, disallowed));
}
last_allowed_family = family;
}
/* Blocklist the rest */
seccomp_rule_add_exact (seccomp, SCMP_ACT_ERRNO (EAFNOSUPPORT), SCMP_SYS (socket), 1, SCMP_A0 (SCMP_CMP_GE, last_allowed_family + 1));
if (!glnx_open_anonymous_tmpfile_full (O_RDWR | O_CLOEXEC, "/tmp", &seccomp_tmpf, error))
return FALSE;
if (seccomp_export_bpf (seccomp, seccomp_tmpf.fd) != 0)
return flatpak_fail_error (error, FLATPAK_ERROR_SETUP_FAILED, _("Failed to export bpf"));
lseek (seccomp_tmpf.fd, 0, SEEK_SET);
flatpak_bwrap_add_args_data_fd (bwrap,
"--seccomp", glnx_steal_fd (&seccomp_tmpf.fd), NULL);
return TRUE;
}
| 1
|
226,124
|
void vwid_box_del(GF_Box *s)
{
u32 i;
GF_ViewIdentifierBox *ptr = (GF_ViewIdentifierBox *) s;
if (ptr->views) {
for (i=0; i<ptr->num_views; i++) {
if (ptr->views[i].view_refs)
gf_free(ptr->views[i].view_refs);
}
gf_free(ptr->views);
}
gf_free(ptr);
| 0
|
513,098
|
Field *create_field_for_create_select(TABLE *table)
{ return create_table_field_from_handler(table); }
| 0
|
289,300
|
static int snd_pcm_oss_get_format(struct snd_pcm_oss_file *pcm_oss_file)
{
struct snd_pcm_substream *substream;
int err;
err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream);
if (err < 0)
return err;
return substream->runtime->oss.format;
}
| 0
|
447,058
|
void SshIo::SshImpl::writeRemote(const byte* data, size_t size, long from, long to)
{
if (protocol_ == pSftp) throw Error(1, "not support SFTP write access.");
//printf("ssh update size=%ld from=%ld to=%ld\n", (long)size, from, to);
assert(isMalloced_);
std::string tempFile = hostInfo_.Path + ".exiv2tmp";
std::string response;
std::stringstream ss;
// copy the head (byte 0 to byte fromByte) of original file to filepath.exiv2tmp
ss << "head -c " << from
<< " " << hostInfo_.Path
<< " > " << tempFile;
std::string cmd = ss.str();
if (ssh_->runCommand(cmd, &response) != 0) {
throw Error(1, "SSH: Unable to cope the head of file to temp");
}
// upload the data (the byte ranges which are different between the original
// file and the new file) to filepath.exiv2datatemp
if (ssh_->scp(hostInfo_.Path + ".exiv2datatemp", data, size) != 0) {
throw Error(1, "SSH: Unable to copy file");
}
// concatenate the filepath.exiv2datatemp to filepath.exiv2tmp
cmd = "cat " + hostInfo_.Path + ".exiv2datatemp >> " + tempFile;
if (ssh_->runCommand(cmd, &response) != 0) {
throw Error(1, "SSH: Unable to copy the rest");
}
// copy the tail (from byte toByte to the end of file) of original file to filepath.exiv2tmp
ss.str("");
ss << "tail -c+" << (to + 1)
<< " " << hostInfo_.Path
<< " >> " << tempFile;
cmd = ss.str();
if (ssh_->runCommand(cmd, &response) != 0) {
throw Error(1, "SSH: Unable to copy the rest");
}
// replace the original file with filepath.exiv2tmp
cmd = "mv " + tempFile + " " + hostInfo_.Path;
if (ssh_->runCommand(cmd, &response) != 0) {
throw Error(1, "SSH: Unable to copy the rest");
}
// remove filepath.exiv2datatemp
cmd = "rm " + hostInfo_.Path + ".exiv2datatemp";
if (ssh_->runCommand(cmd, &response) != 0) {
throw Error(1, "SSH: Unable to copy the rest");
}
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.