idx
int64 | func
string | target
int64 |
|---|---|---|
247,609
|
TEST_P(SslSocketTest, UpstreamNotReadySslSocket) {
Stats::TestUtil::TestStore stats_store;
NiceMock<LocalInfo::MockLocalInfo> local_info;
testing::NiceMock<Server::Configuration::MockTransportSocketFactoryContext> factory_context;
NiceMock<Init::MockManager> init_manager;
NiceMock<Event::MockDispatcher> dispatcher;
EXPECT_CALL(factory_context, localInfo()).WillOnce(ReturnRef(local_info));
EXPECT_CALL(factory_context, stats()).WillOnce(ReturnRef(stats_store));
EXPECT_CALL(factory_context, initManager()).WillRepeatedly(ReturnRef(init_manager));
EXPECT_CALL(factory_context, mainThreadDispatcher()).WillRepeatedly(ReturnRef(dispatcher));
envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context;
auto sds_secret_configs =
tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add();
sds_secret_configs->set_name("abc.com");
sds_secret_configs->mutable_sds_config();
auto client_cfg = std::make_unique<ClientContextConfigImpl>(tls_context, factory_context);
EXPECT_TRUE(client_cfg->tlsCertificates().empty());
EXPECT_FALSE(client_cfg->isReady());
ContextManagerImpl manager(time_system_);
ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), manager, stats_store);
auto transport_socket = client_ssl_socket_factory.createTransportSocket(nullptr);
EXPECT_EQ(EMPTY_STRING, transport_socket->protocol());
EXPECT_EQ(nullptr, transport_socket->ssl());
EXPECT_EQ(true, transport_socket->canFlushClose());
Buffer::OwnedImpl buffer;
Network::IoResult result = transport_socket->doRead(buffer);
EXPECT_EQ(Network::PostIoAction::Close, result.action_);
result = transport_socket->doWrite(buffer, true);
EXPECT_EQ(Network::PostIoAction::Close, result.action_);
EXPECT_EQ("TLS error: Secret is not supplied by SDS", transport_socket->failureReason());
}
| 0
|
90,139
|
std::string CellularNetwork::GetActivationStateString() const {
return ActivationStateToString(this->activation_state_);
}
| 0
|
441,806
|
SProcXkbGetNamedIndicator(ClientPtr client)
{
REQUEST(xkbGetNamedIndicatorReq);
swaps(&stuff->length);
REQUEST_SIZE_MATCH(xkbGetNamedIndicatorReq);
swaps(&stuff->deviceSpec);
swaps(&stuff->ledClass);
swaps(&stuff->ledID);
swapl(&stuff->indicator);
return ProcXkbGetNamedIndicator(client);
}
| 0
|
356,695
|
void Statement::Work_BeginRun(Baton* baton) {
STATEMENT_BEGIN(Run);
}
| 0
|
317,142
|
static void selinux_cred_getsecid(const struct cred *c, u32 *secid)
{
*secid = cred_sid(c);
}
| 0
|
197,517
|
static json_t * check_attestation_fido_u2f(json_t * j_params, unsigned char * credential_id, size_t credential_id_len, unsigned char * cert_x, size_t cert_x_len, unsigned char * cert_y, size_t cert_y_len, cbor_item_t * att_stmt, unsigned char * rpid_hash, size_t rpid_hash_len, const unsigned char * client_data) {
json_t * j_error = json_array(), * j_return;
cbor_item_t * key = NULL, * x5c = NULL, * sig = NULL, * att_cert = NULL;
int i, ret;
char * message = NULL;
gnutls_pubkey_t pubkey = NULL;
gnutls_x509_crt_t cert = NULL;
gnutls_datum_t cert_dat, data, signature, cert_issued_by;
unsigned char data_signed[200], client_data_hash[32], cert_export[32], cert_export_b64[64];
size_t data_signed_offset = 0, client_data_hash_len = 32, cert_export_len = 32, cert_export_b64_len = 0;
if (j_error != NULL) {
do {
if (gnutls_x509_crt_init(&cert)) {
json_array_append_new(j_error, json_string("check_attestation_fido_u2f - Error gnutls_x509_crt_init"));
break;
}
if (gnutls_pubkey_init(&pubkey)) {
json_array_append_new(j_error, json_string("check_attestation_fido_u2f - Error gnutls_pubkey_init"));
break;
}
// Step 1
if (att_stmt == NULL || !cbor_isa_map(att_stmt) || cbor_map_size(att_stmt) != 2) {
json_array_append_new(j_error, json_string("CBOR map value 'attStmt' invalid format"));
break;
}
for (i=0; i<2; i++) {
key = cbor_map_handle(att_stmt)[i].key;
if (cbor_isa_string(key)) {
if (0 == o_strncmp((const char *)cbor_string_handle(key), "x5c", MIN(o_strlen("x5c"), cbor_string_length(key)))) {
x5c = cbor_map_handle(att_stmt)[i].value;
} else if (0 == o_strncmp((const char *)cbor_string_handle(key), "sig", MIN(o_strlen("sig"), cbor_string_length(key)))) {
sig = cbor_map_handle(att_stmt)[i].value;
} else {
message = msprintf("attStmt map element %d key is not valid: '%.*s'", i, cbor_string_length(key), cbor_string_handle(key));
json_array_append_new(j_error, json_string(message));
o_free(message);
break;
}
} else {
message = msprintf("attStmt map element %d key is not a string", i);
json_array_append_new(j_error, json_string(message));
o_free(message);
break;
}
}
if (x5c == NULL || !cbor_isa_array(x5c) || cbor_array_size(x5c) != 1) {
json_array_append_new(j_error, json_string("CBOR map value 'x5c' invalid format"));
break;
}
att_cert = cbor_array_get(x5c, 0);
cert_dat.data = cbor_bytestring_handle(att_cert);
cert_dat.size = cbor_bytestring_length(att_cert);
if ((ret = gnutls_x509_crt_import(cert, &cert_dat, GNUTLS_X509_FMT_DER)) < 0) {
json_array_append_new(j_error, json_string("Error importing x509 certificate"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_fido_u2f - Error gnutls_pcert_import_x509_raw: %d", ret);
break;
}
if (json_object_get(j_params, "root-ca-list") != json_null() && validate_certificate_from_root(j_params, cert, x5c) != G_OK) {
json_array_append_new(j_error, json_string("Unrecognized certificate authority"));
if (gnutls_x509_crt_get_issuer_dn2(cert, &cert_issued_by) >= 0) {
message = msprintf("Unrecognized certificate autohority: %.*s", cert_issued_by.size, cert_issued_by.data);
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_fido_u2f - %s", message);
o_free(message);
gnutls_free(cert_issued_by.data);
} else {
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_fido_u2f - Unrecognized certificate autohority (unable to get issuer dn)");
}
break;
}
if ((ret = gnutls_pubkey_import_x509(pubkey, cert, 0)) < 0) {
json_array_append_new(j_error, json_string("Error importing x509 certificate"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_fido_u2f - Error gnutls_pubkey_import_x509: %d", ret);
break;
}
if ((ret = gnutls_x509_crt_get_key_id(cert, GNUTLS_KEYID_USE_SHA256, cert_export, &cert_export_len)) < 0) {
json_array_append_new(j_error, json_string("Error exporting x509 certificate"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_fido_u2f - Error gnutls_x509_crt_get_key_id: %d", ret);
break;
}
if (!o_base64_encode(cert_export, cert_export_len, cert_export_b64, &cert_export_b64_len)) {
json_array_append_new(j_error, json_string("Internal error"));
y_log_message(Y_LOG_LEVEL_DEBUG, "check_attestation_fido_u2f - Error o_base64_encode cert_export");
break;
}
if (!generate_digest_raw(digest_SHA256, client_data, o_strlen((char *)client_data), client_data_hash, &client_data_hash_len)) {
json_array_append_new(j_error, json_string("Internal error"));
y_log_message(Y_LOG_LEVEL_ERROR, "check_attestation_fido_u2f - Error generate_digest_raw client_data");
break;
}
if (sig == NULL || !cbor_isa_bytestring(sig)) {
json_array_append_new(j_error, json_string("Error sig is not a bytestring"));
break;
}
// Build bytestring to verify signature
data_signed[0] = 0x0;
data_signed_offset = 1;
memcpy(data_signed+data_signed_offset, rpid_hash, rpid_hash_len);
data_signed_offset += rpid_hash_len;
memcpy(data_signed+data_signed_offset, client_data_hash, client_data_hash_len);
data_signed_offset+=client_data_hash_len;
memcpy(data_signed+data_signed_offset, credential_id, credential_id_len);
data_signed_offset+=credential_id_len;
data_signed[data_signed_offset] = 0x04;
data_signed_offset++;
memcpy(data_signed+data_signed_offset, cert_x, cert_x_len);
data_signed_offset+=cert_x_len;
memcpy(data_signed+data_signed_offset, cert_y, cert_y_len);
data_signed_offset+=cert_y_len;
// Let's verify sig over data_signed
data.data = data_signed;
data.size = data_signed_offset;
signature.data = cbor_bytestring_handle(sig);
signature.size = cbor_bytestring_length(sig);
if (gnutls_pubkey_verify_data2(pubkey, GNUTLS_SIGN_ECDSA_SHA256, 0, &data, &signature)) {
json_array_append_new(j_error, json_string("Invalid signature"));
}
} while (0);
if (json_array_size(j_error)) {
j_return = json_pack("{sisO}", "result", G_ERROR_PARAM, "error", j_error);
} else {
j_return = json_pack("{sis{ss%}}", "result", G_OK, "data", "certificate", cert_export_b64, cert_export_b64_len);
}
json_decref(j_error);
gnutls_pubkey_deinit(pubkey);
gnutls_x509_crt_deinit(cert);
if (att_cert != NULL) {
cbor_decref(&att_cert);
}
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "check_attestation_fido_u2f - Error allocating resources for j_error");
j_return = json_pack("{si}", "result", G_ERROR);
}
return j_return;
}
| 1
|
101,696
|
void WebProcessProxy::removeMessageReceiver(CoreIPC::StringReference messageReceiverName, uint64_t destinationID)
{
m_messageReceiverMap.removeMessageReceiver(messageReceiverName, destinationID);
}
| 0
|
230,461
|
ns_input(void)
{
uint8_t flags = 0;
LOG_INFO("Received NS from ");
LOG_INFO_6ADDR(&UIP_IP_BUF->srcipaddr);
LOG_INFO_(" to ");
LOG_INFO_6ADDR(&UIP_IP_BUF->destipaddr);
LOG_INFO_(" with target address ");
LOG_INFO_6ADDR((uip_ipaddr_t *) (&UIP_ND6_NS_BUF->tgtipaddr));
LOG_INFO_("\n");
UIP_STAT(++uip_stat.nd6.recv);
#if UIP_CONF_IPV6_CHECKS
if((UIP_IP_BUF->ttl != UIP_ND6_HOP_LIMIT) ||
(uip_is_addr_mcast(&UIP_ND6_NS_BUF->tgtipaddr)) ||
(UIP_ICMP_BUF->icode != 0)) {
LOG_ERR("NS received is bad\n");
goto discard;
}
#endif /* UIP_CONF_IPV6_CHECKS */
/* Options processing */
nd6_opt_llao = NULL;
nd6_opt_offset = UIP_ND6_NS_LEN;
while(uip_l3_icmp_hdr_len + nd6_opt_offset + UIP_ND6_OPT_HDR_LEN < uip_len) {
#if UIP_CONF_IPV6_CHECKS
if(ND6_OPT_HDR_BUF(nd6_opt_offset)->len == 0) {
LOG_ERR("NS received is bad\n");
goto discard;
}
#endif /* UIP_CONF_IPV6_CHECKS */
switch (ND6_OPT_HDR_BUF(nd6_opt_offset)->type) {
case UIP_ND6_OPT_SLLAO:
if(uip_l3_icmp_hdr_len + nd6_opt_offset +
UIP_ND6_OPT_DATA_OFFSET + UIP_LLADDR_LEN > uip_len) {
LOG_ERR("Insufficient data for NS SLLAO option\n");
goto discard;
}
nd6_opt_llao = &uip_buf[uip_l3_icmp_hdr_len + nd6_opt_offset];
#if UIP_CONF_IPV6_CHECKS
/* There must be NO option in a DAD NS */
if(uip_is_addr_unspecified(&UIP_IP_BUF->srcipaddr)) {
LOG_ERR("NS received is bad\n");
goto discard;
} else {
#endif /*UIP_CONF_IPV6_CHECKS */
uip_lladdr_t lladdr_aligned;
extract_lladdr_from_llao_aligned(&lladdr_aligned);
nbr = uip_ds6_nbr_lookup(&UIP_IP_BUF->srcipaddr);
if(nbr == NULL) {
uip_ds6_nbr_add(&UIP_IP_BUF->srcipaddr, &lladdr_aligned,
0, NBR_STALE, NBR_TABLE_REASON_IPV6_ND, NULL);
} else {
const uip_lladdr_t *lladdr = uip_ds6_nbr_get_ll(nbr);
if(lladdr == NULL) {
goto discard;
}
if(memcmp(&nd6_opt_llao[UIP_ND6_OPT_DATA_OFFSET],
lladdr, UIP_LLADDR_LEN) != 0) {
if(uip_ds6_nbr_update_ll(&nbr,
(const uip_lladdr_t *)&lladdr_aligned)
< 0) {
/* failed to update the lladdr */
goto discard;
}
nbr->state = NBR_STALE;
} else {
if(nbr->state == NBR_INCOMPLETE) {
nbr->state = NBR_STALE;
}
}
}
#if UIP_CONF_IPV6_CHECKS
}
#endif /*UIP_CONF_IPV6_CHECKS */
break;
default:
LOG_WARN("ND option not supported in NS");
break;
}
nd6_opt_offset += (ND6_OPT_HDR_BUF(nd6_opt_offset)->len << 3);
}
addr = uip_ds6_addr_lookup(&UIP_ND6_NS_BUF->tgtipaddr);
if(addr != NULL) {
if(uip_is_addr_unspecified(&UIP_IP_BUF->srcipaddr)) {
/* DAD CASE */
#if UIP_ND6_DEF_MAXDADNS > 0
#if UIP_CONF_IPV6_CHECKS
if(!uip_is_addr_solicited_node(&UIP_IP_BUF->destipaddr)) {
LOG_ERR("NS received is bad\n");
goto discard;
}
#endif /* UIP_CONF_IPV6_CHECKS */
if(addr->state != ADDR_TENTATIVE) {
uip_create_linklocal_allnodes_mcast(&UIP_IP_BUF->destipaddr);
uip_ds6_select_src(&UIP_IP_BUF->srcipaddr, &UIP_IP_BUF->destipaddr);
flags = UIP_ND6_NA_FLAG_OVERRIDE;
goto create_na;
} else {
/** \todo if I sent a NS before him, I win */
uip_ds6_dad_failed(addr);
goto discard;
}
#else /* UIP_ND6_DEF_MAXDADNS > 0 */
goto discard; /* DAD CASE */
#endif /* UIP_ND6_DEF_MAXDADNS > 0 */
}
#if UIP_CONF_IPV6_CHECKS
if(uip_ds6_is_my_addr(&UIP_IP_BUF->srcipaddr)) {
/**
* \NOTE do we do something here? we both are using the same address.
* If we are doing dad, we could cancel it, though we should receive a
* NA in response of DAD NS we sent, hence DAD will fail anyway. If we
* were not doing DAD, it means there is a duplicate in the network!
*/
LOG_ERR("NS received is bad\n");
goto discard;
}
#endif /*UIP_CONF_IPV6_CHECKS */
/* Address resolution case */
if(uip_is_addr_solicited_node(&UIP_IP_BUF->destipaddr)) {
uip_ipaddr_copy(&UIP_IP_BUF->destipaddr, &UIP_IP_BUF->srcipaddr);
uip_ipaddr_copy(&UIP_IP_BUF->srcipaddr, &UIP_ND6_NS_BUF->tgtipaddr);
flags = UIP_ND6_NA_FLAG_SOLICITED | UIP_ND6_NA_FLAG_OVERRIDE;
goto create_na;
}
/* NUD CASE */
if(uip_ds6_addr_lookup(&UIP_IP_BUF->destipaddr) == addr) {
uip_ipaddr_copy(&UIP_IP_BUF->destipaddr, &UIP_IP_BUF->srcipaddr);
uip_ipaddr_copy(&UIP_IP_BUF->srcipaddr, &UIP_ND6_NS_BUF->tgtipaddr);
flags = UIP_ND6_NA_FLAG_SOLICITED | UIP_ND6_NA_FLAG_OVERRIDE;
goto create_na;
} else {
#if UIP_CONF_IPV6_CHECKS
LOG_ERR("NS received is bad\n");
goto discard;
#endif /* UIP_CONF_IPV6_CHECKS */
}
} else {
goto discard;
}
create_na:
/* If the node is a router it should set R flag in NAs */
#if UIP_CONF_ROUTER
flags = flags | UIP_ND6_NA_FLAG_ROUTER;
#endif
uipbuf_clear();
UIP_IP_BUF->vtc = 0x60;
UIP_IP_BUF->tcflow = 0;
UIP_IP_BUF->flow = 0;
uipbuf_set_len_field(UIP_IP_BUF, UIP_ICMPH_LEN + UIP_ND6_NA_LEN + UIP_ND6_OPT_LLAO_LEN);
UIP_IP_BUF->proto = UIP_PROTO_ICMP6;
UIP_IP_BUF->ttl = UIP_ND6_HOP_LIMIT;
UIP_ICMP_BUF->type = ICMP6_NA;
UIP_ICMP_BUF->icode = 0;
UIP_ND6_NA_BUF->flagsreserved = flags;
memcpy(&UIP_ND6_NA_BUF->tgtipaddr, &addr->ipaddr, sizeof(uip_ipaddr_t));
create_llao(&uip_buf[uip_l3_icmp_hdr_len + UIP_ND6_NA_LEN],
UIP_ND6_OPT_TLLAO);
UIP_ICMP_BUF->icmpchksum = 0;
UIP_ICMP_BUF->icmpchksum = ~uip_icmp6chksum();
uipbuf_set_len(UIP_IPH_LEN + UIP_ICMPH_LEN + UIP_ND6_NA_LEN + UIP_ND6_OPT_LLAO_LEN);
UIP_STAT(++uip_stat.nd6.sent);
LOG_INFO("Sending NA to ");
LOG_INFO_6ADDR(&UIP_IP_BUF->destipaddr);
LOG_INFO_(" from ");
LOG_INFO_6ADDR(&UIP_IP_BUF->srcipaddr);
LOG_INFO_(" with target address ");
LOG_INFO_6ADDR(&UIP_ND6_NA_BUF->tgtipaddr);
LOG_INFO_("\n");
return;
discard:
uipbuf_clear();
return;
}
| 0
|
387,808
|
void InstanceKlass::rewrite_class(TRAPS) {
assert(is_loaded(), "must be loaded");
if (is_rewritten()) {
assert(is_shared(), "rewriting an unshared class?");
return;
}
Rewriter::rewrite(this, CHECK);
set_rewritten();
}
| 0
|
384,113
|
raptor_free_xml_writer(raptor_xml_writer* xml_writer)
{
if(!xml_writer)
return;
if(xml_writer->nstack && xml_writer->my_nstack)
raptor_free_namespaces(xml_writer->nstack);
raptor_object_options_clear(&xml_writer->options);
RAPTOR_FREE(raptor_xml_writer, xml_writer);
}
| 0
|
351,178
|
static void copy_related (const char *inName, const char *outName,
const char *old_ext, const char *new_ext)
{
size_t name_len = strlen(inName);
const size_t old_len = strlen(old_ext);
const size_t new_len = strlen(new_ext);
char *in = malloc(name_len - old_len + new_len + 1);
strncpy(in, inName, (name_len - old_len));
strcpy(&in[(name_len - old_len)], new_ext);
FILE *inFile = fopen(in, "rb");
if (!inFile) {
free(in);
return;
}
name_len = strlen(outName);
char *out = malloc(name_len - old_len + new_len + 1);
if (!out) {
fprintf(stderr, "%s:%d: couldn't copy related file!\n",
__FILE__, __LINE__);
fclose(inFile);
free(in);
free(out);
return;
}
strncpy(out, outName, (name_len - old_len));
strcpy(&out[(name_len - old_len)], new_ext);
FILE *outFile = fopen(out, "wb");
int c;
while ((c = fgetc(inFile)) != EOF) {
fputc(c, outFile);
}
fclose(inFile);
fclose(outFile);
free(in);
free(out);
}
| 0
|
379,664
|
R_API void r_anal_function_delete_unused_vars(RAnalFunction *fcn) {
r_return_if_fail (fcn);
void **v;
RPVector *vars_clone = (RPVector *)r_vector_clone ((RVector *)&fcn->vars);
r_pvector_foreach (vars_clone, v) {
RAnalVar *var = *v;
if (r_vector_empty (&var->accesses)) {
r_anal_function_delete_var (fcn, var);
}
}
r_pvector_free (vars_clone);
}
| 0
|
275,497
|
njs_vm_value(njs_vm_t *vm, const njs_str_t *path, njs_value_t *retval)
{
u_char *start, *p, *end;
size_t size;
njs_int_t ret;
njs_value_t value, key;
start = path->start;
end = start + path->length;
njs_set_object(&value, &vm->global_object);
for ( ;; ) {
p = njs_strlchr(start, end, '.');
size = ((p != NULL) ? p : end) - start;
if (njs_slow_path(size == 0)) {
njs_type_error(vm, "empty path element");
return NJS_ERROR;
}
ret = njs_string_set(vm, &key, start, size);
if (njs_slow_path(ret != NJS_OK)) {
return NJS_ERROR;
}
ret = njs_value_property(vm, &value, &key, njs_value_arg(retval));
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
if (p == NULL) {
break;
}
start = p + 1;
value = *retval;
}
return NJS_OK;
}
| 0
|
427,200
|
static void whilestat (LexState *ls, int line) {
/* whilestat -> WHILE cond DO block END */
FuncState *fs = ls->fs;
int whileinit;
int condexit;
BlockCnt bl;
luaX_next(ls); /* skip WHILE */
whileinit = luaK_getlabel(fs);
condexit = cond(ls);
enterblock(fs, &bl, 1);
checknext(ls, TK_DO);
block(ls);
luaK_jumpto(fs, whileinit);
check_match(ls, TK_END, TK_WHILE, line);
leaveblock(fs);
luaK_patchtohere(fs, condexit); /* false conditions finish the loop */
}
| 0
|
196,834
|
Status SparseCountSparseOutputShapeFn(InferenceContext *c) {
auto rank = c->Dim(c->input(0), 1);
auto nvals = c->UnknownDim();
c->set_output(0, c->Matrix(nvals, rank)); // out.indices
c->set_output(1, c->Vector(nvals)); // out.values
c->set_output(2, c->Vector(rank)); // out.dense_shape
return Status::OK();
}
| 1
|
376,337
|
gpg_verify_sync (CamelCipherContext *context,
CamelMimePart *ipart,
GCancellable *cancellable,
GError **error)
{
CamelCipherContextClass *class;
CamelCipherValidity *validity;
const gchar *diagnostics = NULL;
struct _GpgCtx *gpg = NULL;
gchar *sigfile = NULL;
CamelContentType *ct;
CamelMimePart *sigpart;
CamelStream *istream = NULL, *canon_stream;
CamelMultipart *mps;
CamelStream *filter;
CamelMimeFilter *canon;
class = CAMEL_CIPHER_CONTEXT_GET_CLASS (context);
mps = (CamelMultipart *) camel_medium_get_content ((CamelMedium *) ipart);
ct = ((CamelDataWrapper *) mps)->mime_type;
/* Inline signature (using our fake mime type) or PGP/Mime signature */
if (camel_content_type_is (ct, "multipart", "signed")) {
/* PGP/Mime Signature */
const gchar *tmp;
tmp = camel_content_type_param (ct, "protocol");
if (!CAMEL_IS_MULTIPART_SIGNED (mps)
|| tmp == NULL
|| g_ascii_strcasecmp (tmp, class->sign_protocol) != 0) {
g_set_error (
error, CAMEL_ERROR, CAMEL_ERROR_GENERIC,
_("Cannot verify message signature: "
"Incorrect message format"));
return NULL;
}
if (!(istream = camel_multipart_signed_get_content_stream ((CamelMultipartSigned *) mps, NULL))) {
g_set_error (
error, CAMEL_ERROR, CAMEL_ERROR_GENERIC,
_("Cannot verify message signature: "
"Incorrect message format"));
return NULL;
}
if (!(sigpart = camel_multipart_get_part (mps, CAMEL_MULTIPART_SIGNED_SIGNATURE))) {
g_set_error (
error, CAMEL_ERROR, CAMEL_ERROR_GENERIC,
_("Cannot verify message signature: "
"Incorrect message format"));
g_object_unref (istream);
return NULL;
}
} else if (camel_content_type_is (ct, "application", "x-inlinepgp-signed")) {
/* Inline Signed */
CamelDataWrapper *content;
content = camel_medium_get_content ((CamelMedium *) ipart);
istream = camel_stream_mem_new ();
if (!camel_data_wrapper_decode_to_stream_sync (
content, istream, cancellable, error))
goto exception;
g_seekable_seek (
G_SEEKABLE (istream), 0, G_SEEK_SET, NULL, NULL);
sigpart = NULL;
} else {
/* Invalid Mimetype */
g_set_error (
error, CAMEL_ERROR, CAMEL_ERROR_GENERIC,
_("Cannot verify message signature: "
"Incorrect message format"));
return NULL;
}
/* Now start the real work of verifying the message */
#ifdef GPG_LOG
if (camel_debug_start ("gpg:sign")) {
gchar *name;
CamelStream *out;
name = g_strdup_printf ("camel-gpg.%d.verify.data", logid);
out = camel_stream_fs_new_with_name (name, O_CREAT | O_TRUNC | O_WRONLY, 0666);
if (out) {
printf ("Writing gpg verify data to '%s'\n", name);
camel_stream_write_to_stream (istream, out);
g_seekable_seek (
G_SEEKABLE (istream),
0, G_SEEK_SET, NULL, NULL);
g_object_unref (out);
}
g_free (name);
if (sigpart) {
name = g_strdup_printf ("camel-gpg.%d.verify.signature", logid++);
out = camel_stream_fs_new_with_name (name, O_CREAT | O_TRUNC | O_WRONLY, 0666);
if (out) {
printf ("Writing gpg verify signature to '%s'\n", name);
camel_data_wrapper_write_to_stream ((CamelDataWrapper *) sigpart, out);
g_object_unref (out);
}
g_free (name);
}
camel_debug_end ();
}
#endif
if (sigpart) {
sigfile = swrite (sigpart, cancellable, error);
if (sigfile == NULL) {
g_prefix_error (
error, _("Cannot verify message signature: "));
goto exception;
}
}
g_seekable_seek (G_SEEKABLE (istream), 0, G_SEEK_SET, NULL, NULL);
canon_stream = camel_stream_mem_new ();
/* strip trailing white-spaces */
filter = camel_stream_filter_new (canon_stream);
canon = camel_mime_filter_canon_new (CAMEL_MIME_FILTER_CANON_CRLF | CAMEL_MIME_FILTER_CANON_STRIP);
camel_stream_filter_add (CAMEL_STREAM_FILTER (filter), canon);
g_object_unref (canon);
camel_stream_write_to_stream (istream, filter, NULL, NULL);
g_object_unref (filter);
g_seekable_seek (G_SEEKABLE (istream), 0, G_SEEK_SET, NULL, NULL);
g_seekable_seek (G_SEEKABLE (canon_stream), 0, G_SEEK_SET, NULL, NULL);
gpg = gpg_ctx_new (context);
gpg_ctx_set_mode (gpg, GPG_CTX_MODE_VERIFY);
if (sigfile)
gpg_ctx_set_sigfile (gpg, sigfile);
gpg_ctx_set_istream (gpg, canon_stream);
if (!gpg_ctx_op_start (gpg, error))
goto exception;
while (!gpg_ctx_op_complete (gpg)) {
if (gpg_ctx_op_step (gpg, cancellable, error) == -1) {
gpg_ctx_op_cancel (gpg);
goto exception;
}
}
/* report error only when no data or didn't found signature */
if (gpg_ctx_op_wait (gpg) != 0 && (gpg->nodata || !gpg->hadsig)) {
const gchar *diagnostics;
diagnostics = gpg_ctx_get_diagnostics (gpg);
g_set_error (
error, CAMEL_ERROR, CAMEL_ERROR_GENERIC, "%s",
(diagnostics != NULL && *diagnostics != '\0') ?
diagnostics : _("Failed to execute gpg."));
goto exception;
}
validity = camel_cipher_validity_new ();
diagnostics = gpg_ctx_get_diagnostics (gpg);
camel_cipher_validity_set_description (validity, diagnostics);
if (gpg->validsig) {
if (gpg->trust == GPG_TRUST_UNDEFINED || gpg->trust == GPG_TRUST_NONE)
validity->sign.status = CAMEL_CIPHER_VALIDITY_SIGN_UNKNOWN;
else if (gpg->trust != GPG_TRUST_NEVER)
validity->sign.status = CAMEL_CIPHER_VALIDITY_SIGN_GOOD;
else
validity->sign.status = CAMEL_CIPHER_VALIDITY_SIGN_BAD;
} else if (gpg->nopubkey) {
validity->sign.status = CAMEL_CIPHER_VALIDITY_SIGN_NEED_PUBLIC_KEY;
} else {
validity->sign.status = CAMEL_CIPHER_VALIDITY_SIGN_BAD;
}
add_signers (validity, gpg->signers);
gpg_ctx_free (gpg);
if (sigfile) {
g_unlink (sigfile);
g_free (sigfile);
}
g_object_unref (istream);
g_object_unref (canon_stream);
return validity;
exception:
if (gpg != NULL)
gpg_ctx_free (gpg);
if (istream)
g_object_unref (istream);
if (sigfile) {
g_unlink (sigfile);
g_free (sigfile);
}
return NULL;
}
| 0
|
512,260
|
Item *Item_bool_rowready_func2::neg_transformer(THD *thd)
{
Item *item= negated_item(thd);
return item;
}
| 0
|
359,379
|
DEFUN (no_neighbor_capability_dynamic,
no_neighbor_capability_dynamic_cmd,
NO_NEIGHBOR_CMD2 "capability dynamic",
NO_STR
NEIGHBOR_STR
NEIGHBOR_ADDR_STR2
"Advertise capability to the peer\n"
"Advertise dynamic capability to this neighbor\n")
{
return peer_flag_unset_vty (vty, argv[0], PEER_FLAG_DYNAMIC_CAPABILITY);
}
| 0
|
214,364
|
void simplestring_addn(simplestring* target, const char* source, int add_len) {
if(target && source) {
if(!target->str) {
simplestring_init_str(target);
}
if(target->len + add_len + 1 > target->size) {
/* newsize is current length + new length */
int newsize = target->len + add_len + 1;
int incr = target->size * 2;
/* align to SIMPLESTRING_INCR increments */
newsize = newsize - (newsize % incr) + incr;
target->str = (char*)realloc(target->str, newsize);
target->size = target->str ? newsize : 0;
}
if(target->str) {
if(add_len) {
memcpy(target->str + target->len, source, add_len);
}
target->len += add_len;
target->str[target->len] = 0; /* null terminate */
}
}
}
| 1
|
336,803
|
lprn_bubble_flush(gx_device_printer * pdev, gp_file * fp, Bubble * bbl)
{
gx_device_lprn *const lprn = (gx_device_lprn *) pdev;
int i, j, bx;
byte *p;
int bx0 = bbl->brect.p.x / lprn->nBw;
int bx1 = (bbl->brect.q.x + lprn->nBw - 1) / lprn->nBw;
int bpl = gdev_mem_bytes_per_scan_line(pdev);
int x = bbl->brect.p.x * 8;
int y = bbl->brect.p.y;
int width = (bbl->brect.q.x - bbl->brect.p.x + 1) * 8;
int height = bbl->brect.q.y - bbl->brect.p.y + 1;
int maxY = lprn->BlockLine / lprn->nBh * lprn->nBh;
for (i = 0; i < height; i++) {
p = lprn->ImageBuf + ((i + y) % maxY) * bpl;
for (j = 0; j < width / 8; j++) {
if (lprn->NegativePrint)
*(lprn->TmpBuf + i * width / 8 + j) = ~*(p + j + bbl->brect.p.x);
else
*(lprn->TmpBuf + i * width / 8 + j) = *(p + j + bbl->brect.p.x);
}
}
(*lprn->image_out) (pdev, fp, x, y, width, height);
/* Initialize bubbleTbl */
for (bx = bx0; bx <= bx1; bx++) {
assert(lprn->bubbleTbl[bx] == bbl);
lprn->bubbleTbl[bx] = NULL;
}
bbl->next = lprn->freeBubbleList;
lprn->freeBubbleList = bbl;
}
| 0
|
508,869
|
static int find_keyword(Lex_input_stream *lip, uint len, bool function)
{
const char *tok= lip->get_tok_start();
SYMBOL *symbol= get_hash_symbol(tok, len, function);
if (symbol)
{
lip->yylval->symbol.symbol=symbol;
lip->yylval->symbol.str= (char*) tok;
lip->yylval->symbol.length=len;
if ((symbol->tok == NOT_SYM) &&
(lip->m_thd->variables.sql_mode & MODE_HIGH_NOT_PRECEDENCE))
return NOT2_SYM;
if ((symbol->tok == OR_OR_SYM) &&
!(lip->m_thd->variables.sql_mode & MODE_PIPES_AS_CONCAT))
return OR2_SYM;
return symbol->tok;
}
return 0;
}
| 0
|
224,167
|
std::size_t size() {
tensorflow::mutex_lock lock(mu_);
return map_.size();
}
| 0
|
233,813
|
int fmtutil_is_standard_iff_chunk(deark *c, struct de_iffctx *ictx,
u32 ct)
{
switch(ct) {
case CODE__c_:
case CODE_ANNO:
case CODE_AUTH:
case CODE_NAME:
case CODE_TEXT:
return 1;
}
return 0;
}
| 0
|
262,025
|
Proto_DumpRequest(ProtoRequest *req)
{
#if VGAUTH_PROTO_TRACE
printf("raw data: %s\n", req->rawData ? req->rawData : "<none>");
#endif
Debug("complete: %d\n", req->complete);
Debug("sequenceNumber: %d\n", req->sequenceNumber);
Log("requestType: %d(%s REQ)\n", req->reqType,
ProtoRequestTypeText(req->reqType));
switch (req->reqType) {
case PROTO_REQUEST_SESSION_REQ:
Debug("version #: %d\n", req->reqData.sessionReq.version);
Log("userName: '%s'\n", req->reqData.sessionReq.userName);
break;
case PROTO_REQUEST_CONN:
// no details
break;
case PROTO_REQUEST_ADDALIAS:
Log("userName: %s\n", req->reqData.addAlias.userName);
Log("addMapped: %d\n", req->reqData.addAlias.addMapped);
Debug("pemCert: %s\n", req->reqData.addAlias.pemCert);
if (req->reqData.addAlias.aliasInfo.type == SUBJECT_TYPE_NAMED) {
Log("Subject: %s\n", req->reqData.addAlias.aliasInfo.name);
} else if (req->reqData.addAlias.aliasInfo.type == SUBJECT_TYPE_ANY) {
Log("ANY Subject\n");
} else {
Warning("*** UNKNOWN Subject type ***\n");
}
Log("comment: %s\n", req->reqData.addAlias.aliasInfo.comment);
break;
case PROTO_REQUEST_REMOVEALIAS:
Log("userName: %s\n", req->reqData.removeAlias.userName);
Debug("pemCert: %s\n", req->reqData.removeAlias.pemCert);
if (req->reqData.removeAlias.subject.type == SUBJECT_TYPE_NAMED) {
Log("Subject: %s\n", req->reqData.removeAlias.subject.name);
} else if (req->reqData.removeAlias.subject.type == SUBJECT_TYPE_ANY) {
Log("ANY Subject\n");
} else {
Log("No Subject type specified (assuming removeAll case)\n");
}
break;
case PROTO_REQUEST_QUERYALIASES:
Log("userName: %s\n", req->reqData.queryAliases.userName);
break;
case PROTO_REQUEST_QUERYMAPPEDALIASES:
// no details
break;
case PROTO_REQUEST_CREATETICKET:
Log("userName '%s'\n", req->reqData.createTicket.userName);
break;
case PROTO_REQUEST_VALIDATETICKET:
Log("ticket '%s'\n", req->reqData.validateTicket.ticket);
break;
case PROTO_REQUEST_REVOKETICKET:
Log("ticket '%s'\n", req->reqData.revokeTicket.ticket);
break;
case PROTO_REQUEST_VALIDATE_SAML_BEARER_TOKEN:
Debug("token '%s'\n", req->reqData.validateSamlBToken.samlToken);
Log("username '%s'\n", req->reqData.validateSamlBToken.userName);
Log("validate Only '%s'\n",
req->reqData.validateSamlBToken.validateOnly ? "TRUE" : "FALSE");
break;
default:
Warning("Unknown request type -- no request specific data\n");
break;
}
}
| 0
|
516,252
|
static int virtio_net_tx_waiting_pre_load(void *opaque)
{
struct VirtIONetMigTmp *tmp = opaque;
/* Reuse the pointer setup from save */
virtio_net_tx_waiting_pre_save(opaque);
if (tmp->parent->curr_queues > tmp->parent->max_queues) {
error_report("virtio-net: curr_queues %x > max_queues %x",
tmp->parent->curr_queues, tmp->parent->max_queues);
return -EINVAL;
}
return 0; /* all good */
}
| 0
|
427,730
|
cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p)
{
size_t i;
for (i = 0; i < __arraycount(vn); i++)
if (vn[i].v == p)
return snprintf(buf, bufsiz, "%s", vn[i].n);
return snprintf(buf, bufsiz, "%#x", p);
}
| 0
|
484,814
|
static void xennet_bus_close(struct xenbus_device *dev)
{
int ret;
if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
return;
do {
xenbus_switch_state(dev, XenbusStateClosing);
ret = wait_event_timeout(module_wq,
xenbus_read_driver_state(dev->otherend) ==
XenbusStateClosing ||
xenbus_read_driver_state(dev->otherend) ==
XenbusStateClosed ||
xenbus_read_driver_state(dev->otherend) ==
XenbusStateUnknown,
XENNET_TIMEOUT);
} while (!ret);
if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
return;
do {
xenbus_switch_state(dev, XenbusStateClosed);
ret = wait_event_timeout(module_wq,
xenbus_read_driver_state(dev->otherend) ==
XenbusStateClosed ||
xenbus_read_driver_state(dev->otherend) ==
XenbusStateUnknown,
XENNET_TIMEOUT);
} while (!ret);
}
| 0
|
226,355
|
GF_Box *stri_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_SubTrackInformationBox, GF_ISOM_BOX_TYPE_STRI);
return (GF_Box *)tmp;
| 0
|
244,261
|
GF_Err trik_box_read(GF_Box *s,GF_BitStream *bs)
{
u32 i;
GF_TrickPlayBox *ptr = (GF_TrickPlayBox *) s;
ptr->entry_count = (u32) ptr->size;
if ((u64)ptr->entry_count > (u64)SIZE_MAX/sizeof(GF_TrickPlayBoxEntry)) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid size %llu in trik\n", ptr->size));
return GF_ISOM_INVALID_FILE;
}
ptr->entries = (GF_TrickPlayBoxEntry *) gf_malloc(ptr->entry_count * sizeof(GF_TrickPlayBoxEntry) );
if (!ptr->entries) return GF_OUT_OF_MEM;
for (i=0; i< ptr->entry_count; i++) {
ptr->entries[i].pic_type = gf_bs_read_int(bs, 2);
ptr->entries[i].dependency_level = gf_bs_read_int(bs, 6);
}
return GF_OK;
}
| 0
|
401,585
|
static int proc_do_uuid(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table fake_table;
unsigned char buf[64], tmp_uuid[16], *uuid;
uuid = table->data;
if (!uuid) {
uuid = tmp_uuid;
generate_random_uuid(uuid);
} else {
static DEFINE_SPINLOCK(bootid_spinlock);
spin_lock(&bootid_spinlock);
if (!uuid[8])
generate_random_uuid(uuid);
spin_unlock(&bootid_spinlock);
}
sprintf(buf, "%pU", uuid);
fake_table.data = buf;
fake_table.maxlen = sizeof(buf);
return proc_dostring(&fake_table, write, buffer, lenp, ppos);
}
| 0
|
329,915
|
_cairo_image_spans (void *abstract_renderer,
int y, int height,
const cairo_half_open_span_t *spans,
unsigned num_spans)
{
cairo_image_span_renderer_t *r = abstract_renderer;
uint8_t *mask, *row;
int len;
if (num_spans == 0)
return CAIRO_STATUS_SUCCESS;
mask = r->u.mask.data + (y - r->u.mask.extents.y) * r->u.mask.stride;
mask += spans[0].x - r->u.mask.extents.x;
row = mask;
do {
len = spans[1].x - spans[0].x;
if (spans[0].coverage) {
*row++ = r->opacity * spans[0].coverage;
if (--len)
memset (row, row[-1], len);
}
row += len;
spans++;
} while (--num_spans > 1);
len = row - mask;
row = mask;
while (--height) {
mask += r->u.mask.stride;
memcpy (mask, row, len);
}
return CAIRO_STATUS_SUCCESS;
}
| 0
|
292,211
|
inbound_upart (server *serv, char *chan, char *ip, char *reason,
const message_tags_data *tags_data)
{
session *sess = find_channel (serv, chan);
if (sess)
{
if (*reason)
EMIT_SIGNAL_TIMESTAMP (XP_TE_UPARTREASON, sess, serv->nick, ip, chan,
reason, 0, tags_data->timestamp);
else
EMIT_SIGNAL_TIMESTAMP (XP_TE_UPART, sess, serv->nick, ip, chan, NULL,
0, tags_data->timestamp);
clear_channel (sess);
}
}
| 0
|
222,494
|
Status AddItem(const string& name, const NameInfoItem& item) {
if (!index_.insert({name, item}).second) {
return errors::InvalidArgument(
strings::StrCat("Duplicated ", item.is_func_arg ? "arg" : "ret",
" name: "),
name);
}
return Status::OK();
}
| 0
|
513,336
|
int join_read_key2(THD *thd, JOIN_TAB *tab, TABLE *table, TABLE_REF *table_ref)
{
int error;
if (!table->file->inited)
{
error= table->file->ha_index_init(table_ref->key, tab ? tab->sorted : TRUE);
if (error)
{
(void) report_error(table, error);
return 1;
}
}
/*
The following is needed when one makes ref (or eq_ref) access from row
comparisons: one must call row->bring_value() to get the new values.
*/
if (tab && tab->bush_children)
{
TABLE_LIST *emb_sj_nest= tab->bush_children->start->emb_sj_nest;
emb_sj_nest->sj_subq_pred->left_expr->bring_value();
}
/* TODO: Why don't we do "Late NULLs Filtering" here? */
if (cmp_buffer_with_ref(thd, table, table_ref) ||
(table->status & (STATUS_GARBAGE | STATUS_NO_PARENT | STATUS_NULL_ROW)))
{
if (table_ref->key_err)
{
table->status=STATUS_NOT_FOUND;
return -1;
}
/*
Moving away from the current record. Unlock the row
in the handler if it did not match the partial WHERE.
*/
if (tab && tab->ref.has_record && tab->ref.use_count == 0)
{
tab->read_record.table->file->unlock_row();
table_ref->has_record= FALSE;
}
error=table->file->ha_index_read_map(table->record[0],
table_ref->key_buff,
make_prev_keypart_map(table_ref->key_parts),
HA_READ_KEY_EXACT);
if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
if (! error)
{
table_ref->has_record= TRUE;
table_ref->use_count= 1;
}
}
else if (table->status == 0)
{
DBUG_ASSERT(table_ref->has_record);
table_ref->use_count++;
}
table->null_row=0;
return table->status ? -1 : 0;
}
| 0
|
300,823
|
static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
sockptr_t ov, unsigned int ol)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_group_req mreq;
u32 value = 0;
int res = 0;
if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
return 0;
if (lvl != SOL_TIPC)
return -ENOPROTOOPT;
switch (opt) {
case TIPC_IMPORTANCE:
case TIPC_SRC_DROPPABLE:
case TIPC_DEST_DROPPABLE:
case TIPC_CONN_TIMEOUT:
case TIPC_NODELAY:
if (ol < sizeof(value))
return -EINVAL;
if (copy_from_sockptr(&value, ov, sizeof(u32)))
return -EFAULT;
break;
case TIPC_GROUP_JOIN:
if (ol < sizeof(mreq))
return -EINVAL;
if (copy_from_sockptr(&mreq, ov, sizeof(mreq)))
return -EFAULT;
break;
default:
if (!sockptr_is_null(ov) || ol)
return -EINVAL;
}
lock_sock(sk);
switch (opt) {
case TIPC_IMPORTANCE:
res = tsk_set_importance(sk, value);
break;
case TIPC_SRC_DROPPABLE:
if (sock->type != SOCK_STREAM)
tsk_set_unreliable(tsk, value);
else
res = -ENOPROTOOPT;
break;
case TIPC_DEST_DROPPABLE:
tsk_set_unreturnable(tsk, value);
break;
case TIPC_CONN_TIMEOUT:
tipc_sk(sk)->conn_timeout = value;
break;
case TIPC_MCAST_BROADCAST:
tsk->mc_method.rcast = false;
tsk->mc_method.mandatory = true;
break;
case TIPC_MCAST_REPLICAST:
tsk->mc_method.rcast = true;
tsk->mc_method.mandatory = true;
break;
case TIPC_GROUP_JOIN:
res = tipc_sk_join(tsk, &mreq);
break;
case TIPC_GROUP_LEAVE:
res = tipc_sk_leave(tsk);
break;
case TIPC_NODELAY:
tsk->nodelay = !!value;
tsk_set_nagle(tsk);
break;
default:
res = -EINVAL;
}
release_sock(sk);
return res;
}
| 0
|
225,457
|
Status MutableGraphView::CheckNodesCanBeDeleted(
const absl::flat_hash_set<string>& nodes_to_delete) {
std::vector<string> missing_nodes;
std::vector<string> nodes_with_fanouts;
for (const string& node_name_to_delete : nodes_to_delete) {
NodeDef* node = GetNode(node_name_to_delete);
if (node == nullptr) {
// Can't delete missing node.
missing_nodes.push_back(node_name_to_delete);
continue;
}
const int max_port = gtl::FindWithDefault(max_regular_output_port(), node,
Graph::kControlSlot);
for (int i = Graph::kControlSlot; i <= max_port; ++i) {
auto it = fanouts().find({node, i});
bool has_retained_fanout = false;
if (it != fanouts().end()) {
for (const auto& fanout : it->second) {
// Check if fanouts are of nodes to be deleted, and if so, they can be
// ignored, as they will be removed also.
if (!nodes_to_delete.contains(fanout.node->name())) {
// Removing node will leave graph in an invalid state.
has_retained_fanout = true;
break;
}
}
}
if (has_retained_fanout) {
nodes_with_fanouts.push_back(node_name_to_delete);
break;
}
}
}
// Error message can get quite long, so we only show the first 5 node names.
auto sort_and_sample = [](std::vector<string>* s) {
constexpr int kMaxNodeNames = 5;
std::sort(s->begin(), s->end());
if (s->size() > kMaxNodeNames) {
return absl::StrCat(
absl::StrJoin(s->begin(), s->begin() + kMaxNodeNames, ", "), ", ...");
}
return absl::StrJoin(*s, ", ");
};
if (!missing_nodes.empty()) {
VLOG(2) << absl::Substitute("Attempting to delete missing node(s) [$0].",
sort_and_sample(&missing_nodes));
}
if (!nodes_with_fanouts.empty()) {
std::vector<string> input_node_names(nodes_to_delete.begin(),
nodes_to_delete.end());
string params = absl::Substitute("nodes_to_delete={$0}",
sort_and_sample(&input_node_names));
string error_msg =
absl::Substitute("can't delete node(s) with retained fanouts(s) [$0]",
sort_and_sample(&nodes_with_fanouts));
return MutationError("DeleteNodes", params, error_msg);
}
return Status::OK();
}
| 0
|
482,473
|
_lou_getCharForDots(widechar d, const DisplayTableHeader *table) {
CharDotsMapping *cdPtr = getCharForDots(d, table);
if (cdPtr) return cdPtr->found;
return '\0';
}
| 0
|
369,270
|
static struct io_rsrc_node *io_rsrc_node_alloc(void)
{
struct io_rsrc_node *ref_node;
ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
if (!ref_node)
return NULL;
if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
0, GFP_KERNEL)) {
kfree(ref_node);
return NULL;
}
INIT_LIST_HEAD(&ref_node->node);
INIT_LIST_HEAD(&ref_node->rsrc_list);
ref_node->done = false;
return ref_node;
| 0
|
491,961
|
__acquires(&fc->lock)
{
struct fuse_inode *fi = get_fuse_inode(req->inode);
loff_t size = i_size_read(req->inode);
struct fuse_write_in *inarg = &req->misc.write.in;
if (!fc->connected)
goto out_free;
if (inarg->offset + PAGE_CACHE_SIZE <= size) {
inarg->size = PAGE_CACHE_SIZE;
} else if (inarg->offset < size) {
inarg->size = size & (PAGE_CACHE_SIZE - 1);
} else {
/* Got truncated off completely */
goto out_free;
}
req->in.args[1].size = inarg->size;
fi->writectr++;
fuse_request_send_background_locked(fc, req);
return;
out_free:
fuse_writepage_finish(fc, req);
spin_unlock(&fc->lock);
fuse_writepage_free(fc, req);
fuse_put_request(fc, req);
spin_lock(&fc->lock);
}
| 0
|
353,238
|
bool SplashOutputDev::maskedImageSrc(void *data, SplashColorPtr colorLine,
unsigned char *alphaLine) {
SplashOutMaskedImageData *imgData = (SplashOutMaskedImageData *)data;
unsigned char *p, *aq;
SplashColorPtr q, col;
GfxRGB rgb;
GfxGray gray;
#ifdef SPLASH_CMYK
GfxCMYK cmyk;
GfxColor deviceN;
#endif
unsigned char alpha;
unsigned char *maskPtr;
int maskBit;
int nComps, x;
if (imgData->y == imgData->height) {
return false;
}
if (!(p = imgData->imgStr->getLine())) {
return false;
}
nComps = imgData->colorMap->getNumPixelComps();
maskPtr = imgData->mask->getDataPtr() +
imgData->y * imgData->mask->getRowSize();
maskBit = 0x80;
for (x = 0, q = colorLine, aq = alphaLine;
x < imgData->width;
++x, p += nComps) {
alpha = (*maskPtr & maskBit) ? 0xff : 0x00;
if (!(maskBit >>= 1)) {
++maskPtr;
maskBit = 0x80;
}
if (imgData->lookup) {
switch (imgData->colorMode) {
case splashModeMono1:
case splashModeMono8:
*q++ = imgData->lookup[*p];
break;
case splashModeRGB8:
case splashModeBGR8:
col = &imgData->lookup[3 * *p];
*q++ = col[0];
*q++ = col[1];
*q++ = col[2];
break;
case splashModeXBGR8:
col = &imgData->lookup[4 * *p];
*q++ = col[0];
*q++ = col[1];
*q++ = col[2];
*q++ = 255;
break;
#ifdef SPLASH_CMYK
case splashModeCMYK8:
col = &imgData->lookup[4 * *p];
*q++ = col[0];
*q++ = col[1];
*q++ = col[2];
*q++ = col[3];
break;
case splashModeDeviceN8:
col = &imgData->lookup[(SPOT_NCOMPS+4) * *p];
for (int cp = 0; cp < SPOT_NCOMPS+4; cp++)
*q++ = col[cp];
break;
#endif
}
*aq++ = alpha;
} else {
switch (imgData->colorMode) {
case splashModeMono1:
case splashModeMono8:
imgData->colorMap->getGray(p, &gray);
*q++ = colToByte(gray);
break;
case splashModeXBGR8:
case splashModeRGB8:
case splashModeBGR8:
imgData->colorMap->getRGB(p, &rgb);
*q++ = colToByte(rgb.r);
*q++ = colToByte(rgb.g);
*q++ = colToByte(rgb.b);
if (imgData->colorMode == splashModeXBGR8) *q++ = 255;
break;
#ifdef SPLASH_CMYK
case splashModeCMYK8:
imgData->colorMap->getCMYK(p, &cmyk);
*q++ = colToByte(cmyk.c);
*q++ = colToByte(cmyk.m);
*q++ = colToByte(cmyk.y);
*q++ = colToByte(cmyk.k);
break;
case splashModeDeviceN8:
imgData->colorMap->getDeviceN(p, &deviceN);
for (int cp = 0; cp < SPOT_NCOMPS+4; cp++)
*q++ = colToByte(deviceN.c[cp]);
break;
#endif
}
*aq++ = alpha;
}
}
++imgData->y;
return true;
}
| 0
|
252,332
|
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return TINYEXR_ERROR_INVALID_HEADER;
}
return TINYEXR_SUCCESS;
}
| 0
|
369,233
|
static int io_msg_ring_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
if (unlikely(sqe->addr || sqe->ioprio || sqe->rw_flags ||
sqe->splice_fd_in || sqe->buf_index || sqe->personality))
return -EINVAL;
req->msg.user_data = READ_ONCE(sqe->off);
req->msg.len = READ_ONCE(sqe->len);
return 0;
}
| 0
|
313,806
|
add_to_showcmd(int c)
{
char_u *p;
int old_len;
int extra_len;
int overflow;
int i;
static int ignore[] =
{
#ifdef FEAT_GUI
K_VER_SCROLLBAR, K_HOR_SCROLLBAR,
K_LEFTMOUSE_NM, K_LEFTRELEASE_NM,
#endif
K_IGNORE, K_PS,
K_LEFTMOUSE, K_LEFTDRAG, K_LEFTRELEASE, K_MOUSEMOVE,
K_MIDDLEMOUSE, K_MIDDLEDRAG, K_MIDDLERELEASE,
K_RIGHTMOUSE, K_RIGHTDRAG, K_RIGHTRELEASE,
K_MOUSEDOWN, K_MOUSEUP, K_MOUSELEFT, K_MOUSERIGHT,
K_X1MOUSE, K_X1DRAG, K_X1RELEASE, K_X2MOUSE, K_X2DRAG, K_X2RELEASE,
K_CURSORHOLD,
0
};
if (!p_sc || msg_silent != 0)
return FALSE;
if (showcmd_visual)
{
showcmd_buf[0] = NUL;
showcmd_visual = FALSE;
}
// Ignore keys that are scrollbar updates and mouse clicks
if (IS_SPECIAL(c))
for (i = 0; ignore[i] != 0; ++i)
if (ignore[i] == c)
return FALSE;
p = transchar(c);
if (*p == ' ')
STRCPY(p, "<20>");
old_len = (int)STRLEN(showcmd_buf);
extra_len = (int)STRLEN(p);
overflow = old_len + extra_len - SHOWCMD_COLS;
if (overflow > 0)
mch_memmove(showcmd_buf, showcmd_buf + overflow,
old_len - overflow + 1);
STRCAT(showcmd_buf, p);
if (char_avail())
return FALSE;
display_showcmd();
return TRUE;
}
| 0
|
393,538
|
static SQInteger base_callee(HSQUIRRELVM v)
{
if(v->_callsstacksize > 1)
{
v->Push(v->_callsstack[v->_callsstacksize - 2]._closure);
return 1;
}
return sq_throwerror(v,_SC("no closure in the calls stack"));
}
| 0
|
317,167
|
static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
int addrlen)
{
int rc = 0;
if (sock->sk == NULL)
return 0;
if (sock->sk->sk_family != PF_INET &&
(!IS_ENABLED(CONFIG_IPV6) || sock->sk->sk_family != PF_INET6))
return 0;
if (addrlen < offsetofend(struct sockaddr, sa_family))
return 0;
if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) {
struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
struct smack_known *rsp = NULL;
if (addrlen < SIN6_LEN_RFC2133)
return 0;
if (__is_defined(SMACK_IPV6_SECMARK_LABELING))
rsp = smack_ipv6host_label(sip);
if (rsp != NULL) {
struct socket_smack *ssp = sock->sk->sk_security;
rc = smk_ipv6_check(ssp->smk_out, rsp, sip,
SMK_CONNECTING);
}
if (__is_defined(SMACK_IPV6_PORT_LABELING))
rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING);
return rc;
}
if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in))
return 0;
rc = smk_ipv4_check(sock->sk, (struct sockaddr_in *)sap);
return rc;
}
| 0
|
195,752
|
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override {
// Create a new SparseTensorSliceDatasetOp::Dataset, insert it in
// the step container, and return it as the output.
const Tensor* indices;
OP_REQUIRES_OK(ctx, ctx->input("indices", &indices));
const Tensor* values;
OP_REQUIRES_OK(ctx, ctx->input("values", &values));
const Tensor* dense_shape;
OP_REQUIRES_OK(ctx, ctx->input("dense_shape", &dense_shape));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices->shape()),
errors::InvalidArgument(
"Input indices should be a matrix but received shape ",
indices->shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(values->shape()),
errors::InvalidArgument(
"Input values should be a vector but received shape ",
indices->shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(dense_shape->shape()),
errors::InvalidArgument(
"Input shape should be a vector but received shape ",
dense_shape->shape().DebugString()));
// We currently ensure that `sparse_tensor` is ordered in the
// batch dimension.
// TODO(mrry): Investigate ways to avoid this unconditional check
// if we can be sure that the sparse tensor was produced in an
// appropriate order (e.g. by `tf.parse_example()` or a Dataset
// that batches elements into rows of a SparseTensor).
int64_t previous_batch_index = -1;
for (int64_t i = 0; i < indices->dim_size(0); ++i) {
int64_t next_batch_index = indices->matrix<int64>()(i, 0);
OP_REQUIRES(
ctx, next_batch_index >= previous_batch_index,
errors::Unimplemented("The SparseTensor must be ordered in the batch "
"dimension; handling arbitrarily ordered input "
"is not currently supported."));
previous_batch_index = next_batch_index;
}
gtl::InlinedVector<int64, 8> std_order(dense_shape->NumElements(), 0);
sparse::SparseTensor tensor;
OP_REQUIRES_OK(
ctx, sparse::SparseTensor::Create(
*indices, *values, TensorShape(dense_shape->vec<int64>()),
std_order, &tensor));
*output = new Dataset<T>(ctx, std::move(tensor));
}
| 1
|
219,012
|
Status ConstantFolding::EvaluateNode(const NodeDef& node,
const TensorVector& inputs,
TensorVector* output) const {
return ::tensorflow::grappler::EvaluateNode(node, inputs, cpu_device_,
resource_mgr_.get(), output);
}
| 0
|
238,377
|
njs_function_prototype_call(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs,
njs_index_t unused)
{
njs_int_t ret;
njs_function_t *function;
const njs_value_t *this;
njs_native_frame_t *frame;
if (!njs_is_function(&args[0])) {
njs_type_error(vm, "\"this\" argument is not a function");
return NJS_ERROR;
}
if (nargs > 1) {
this = &args[1];
nargs -= 2;
} else {
this = (njs_value_t *) &njs_value_undefined;
nargs = 0;
}
frame = vm->top_frame;
/* Skip the "call" method frame. */
frame->skip = 1;
function = njs_function(&args[0]);
ret = njs_function_frame(vm, function, this, &args[2], nargs, 0);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
ret = njs_function_frame_invoke(vm, frame->retval);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
return NJS_DECLINED;
}
| 0
|
252,284
|
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling));
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
| 0
|
277,673
|
start_input_ppm (j_compress_ptr cinfo, cjpeg_source_ptr sinfo)
{
ppm_source_ptr source = (ppm_source_ptr) sinfo;
int c;
unsigned int w, h, maxval;
boolean need_iobuffer, use_raw_buffer, need_rescale;
if (getc(source->pub.input_file) != 'P')
ERREXIT(cinfo, JERR_PPM_NOT);
c = getc(source->pub.input_file); /* subformat discriminator character */
/* detect unsupported variants (ie, PBM) before trying to read header */
switch (c) {
case '2': /* it's a text-format PGM file */
case '3': /* it's a text-format PPM file */
case '5': /* it's a raw-format PGM file */
case '6': /* it's a raw-format PPM file */
break;
default:
ERREXIT(cinfo, JERR_PPM_NOT);
break;
}
/* fetch the remaining header info */
w = read_pbm_integer(cinfo, source->pub.input_file, 65535);
h = read_pbm_integer(cinfo, source->pub.input_file, 65535);
maxval = read_pbm_integer(cinfo, source->pub.input_file, 65535);
if (w <= 0 || h <= 0 || maxval <= 0) /* error check */
ERREXIT(cinfo, JERR_PPM_NOT);
cinfo->data_precision = BITS_IN_JSAMPLE; /* we always rescale data to this */
cinfo->image_width = (JDIMENSION) w;
cinfo->image_height = (JDIMENSION) h;
source->maxval = maxval;
/* initialize flags to most common settings */
need_iobuffer = TRUE; /* do we need an I/O buffer? */
use_raw_buffer = FALSE; /* do we map input buffer onto I/O buffer? */
need_rescale = TRUE; /* do we need a rescale array? */
switch (c) {
case '2': /* it's a text-format PGM file */
cinfo->input_components = 1;
cinfo->in_color_space = JCS_GRAYSCALE;
TRACEMS2(cinfo, 1, JTRC_PGM_TEXT, w, h);
source->pub.get_pixel_rows = get_text_gray_row;
need_iobuffer = FALSE;
break;
case '3': /* it's a text-format PPM file */
cinfo->input_components = 3;
cinfo->in_color_space = JCS_RGB;
TRACEMS2(cinfo, 1, JTRC_PPM_TEXT, w, h);
source->pub.get_pixel_rows = get_text_rgb_row;
need_iobuffer = FALSE;
break;
case '5': /* it's a raw-format PGM file */
cinfo->input_components = 1;
cinfo->in_color_space = JCS_GRAYSCALE;
TRACEMS2(cinfo, 1, JTRC_PGM, w, h);
if (maxval > 255) {
source->pub.get_pixel_rows = get_word_gray_row;
} else if (maxval == MAXJSAMPLE && sizeof(JSAMPLE) == sizeof(U_CHAR)) {
source->pub.get_pixel_rows = get_raw_row;
use_raw_buffer = TRUE;
need_rescale = FALSE;
} else {
source->pub.get_pixel_rows = get_scaled_gray_row;
}
break;
case '6': /* it's a raw-format PPM file */
cinfo->input_components = 3;
cinfo->in_color_space = JCS_RGB;
TRACEMS2(cinfo, 1, JTRC_PPM, w, h);
if (maxval > 255) {
source->pub.get_pixel_rows = get_word_rgb_row;
} else if (maxval == MAXJSAMPLE && sizeof(JSAMPLE) == sizeof(U_CHAR)) {
source->pub.get_pixel_rows = get_raw_row;
use_raw_buffer = TRUE;
need_rescale = FALSE;
} else {
source->pub.get_pixel_rows = get_scaled_rgb_row;
}
break;
}
/* Allocate space for I/O buffer: 1 or 3 bytes or words/pixel. */
if (need_iobuffer) {
source->buffer_width = (size_t) w * cinfo->input_components *
((maxval<=255) ? sizeof(U_CHAR) : (2*sizeof(U_CHAR)));
source->iobuffer = (U_CHAR *)
(*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE,
source->buffer_width);
}
/* Create compressor input buffer. */
if (use_raw_buffer) {
/* For unscaled raw-input case, we can just map it onto the I/O buffer. */
/* Synthesize a JSAMPARRAY pointer structure */
source->pixrow = (JSAMPROW) source->iobuffer;
source->pub.buffer = & source->pixrow;
source->pub.buffer_height = 1;
} else {
/* Need to translate anyway, so make a separate sample buffer. */
source->pub.buffer = (*cinfo->mem->alloc_sarray)
((j_common_ptr) cinfo, JPOOL_IMAGE,
(JDIMENSION) w * cinfo->input_components, (JDIMENSION) 1);
source->pub.buffer_height = 1;
}
/* Compute the rescaling array if required. */
if (need_rescale) {
INT32 val, half_maxval;
/* On 16-bit-int machines we have to be careful of maxval = 65535 */
source->rescale = (JSAMPLE *)
(*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE,
(size_t) (((long) maxval + 1L) * sizeof(JSAMPLE)));
half_maxval = maxval / 2;
for (val = 0; val <= (INT32) maxval; val++) {
/* The multiplication here must be done in 32 bits to avoid overflow */
source->rescale[val] = (JSAMPLE) ((val*MAXJSAMPLE + half_maxval)/maxval);
}
}
}
| 0
|
455,393
|
xfs_inode_ag_iterator_flags(
struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags,
void *args),
int flags,
void *args,
int iter_flags)
{
struct xfs_perag *pag;
int error = 0;
int last_error = 0;
xfs_agnumber_t ag;
ag = 0;
while ((pag = xfs_perag_get(mp, ag))) {
ag = pag->pag_agno + 1;
error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
iter_flags);
xfs_perag_put(pag);
if (error) {
last_error = error;
if (error == -EFSCORRUPTED)
break;
}
}
return last_error;
}
| 0
|
336,593
|
void RedCharDeviceVDIPort::send_tokens_to_client(RedCharDeviceClientOpaque *opaque, uint32_t tokens)
{
RedClient *client = (RedClient *) opaque;
client->get_main()->push_agent_tokens(tokens);
}
| 0
|
253,613
|
smb2_can_echo(struct TCP_Server_Info *server)
{
return server->echoes;
}
| 0
|
197,247
|
Status ShapeRefiner::InferShapesForFunctionSubNode(
const Node* node, InferenceContext* outer_context) {
TF_RETURN_IF_ERROR(AddNodeInternal(node, outer_context));
InferenceContext* node_context = CHECK_NOTNULL(GetContext(node));
if (StringPiece(node->type_string()) == kArgOp) {
// Handle special node: function input.
// Shapes for these nodes are provided in the outer inference
// context.
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node->def()), "index", &index));
if (index < 0 || outer_context->num_inputs() <= index) {
return errors::Internal(
"Function instantiation included invalid input index: ", index,
" not in [0, ", outer_context->num_inputs(), ").");
}
// TODO(b/134547156): TEMPORARY WORKAROUND. If input shape handle is not set
// in outer context, set _Arg node output shape to unknown.
if (outer_context->input(index).SameHandle(ShapeHandle())) {
VLOG(1) << "Function instantiation has undefined input shape at "
<< "index: " << index << " in the outer inference context.";
node_context->set_output(0, node_context->UnknownShape());
} else {
node_context->set_output(0, outer_context->input(index));
}
auto* resource = outer_context->input_handle_shapes_and_types(index);
if (resource) {
node_context->set_output_handle_shapes_and_types(0, *resource);
}
} else if (StringPiece(node->type_string()) == kRetvalOp) {
// Handle special node: function output.
// Shapes inferred for these nodes go into the outer inference
// context.
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node->def()), "index", &index));
if (index < 0 || outer_context->num_outputs() <= index) {
return errors::Internal(
"Function instantiation included invalid output index: ", index,
" not in [0, ", outer_context->num_outputs(), ").");
}
// outer_context outlives node_context, therefore we need to create
// a new shape handle owned by outer_context instead.
ShapeHandle handle;
TensorShapeProto proto;
node_context->ShapeHandleToProto(node_context->input(0), &proto);
TF_RETURN_IF_ERROR(outer_context->MakeShapeFromShapeProto(proto, &handle));
outer_context->set_output(index, handle);
auto* resource = node_context->input_handle_shapes_and_types(0);
if (resource) {
outer_context->set_output_handle_shapes_and_types(index, *resource);
}
}
return Status::OK();
}
| 1
|
313,823
|
nv_open(cmdarg_T *cap)
{
#ifdef FEAT_DIFF
// "do" is ":diffget"
if (cap->oap->op_type == OP_DELETE && cap->cmdchar == 'o')
{
clearop(cap->oap);
nv_diffgetput(FALSE, cap->opcount);
}
else
#endif
if (VIsual_active) // switch start and end of visual
v_swap_corners(cap->cmdchar);
#ifdef FEAT_JOB_CHANNEL
else if (bt_prompt(curbuf))
clearopbeep(cap->oap);
#endif
else
n_opencmd(cap);
}
| 0
|
476,143
|
void composite_disconnect(struct usb_gadget *gadget)
{
usb_gadget_vbus_draw(gadget, 0);
__composite_disconnect(gadget);
}
| 0
|
402,657
|
handle_unlock_token(context *ctx, struct pollfd *pollfd, socklen_t size)
{
struct msghdr msg;
struct iovec iov;
ssize_t n;
int rc = cms_context_alloc(&ctx->cms);
if (rc < 0) {
send_response(ctx, ctx->backup_cms, pollfd, rc);
return;
}
steal_from_cms(ctx->backup_cms, ctx->cms);
char *buffer = malloc(size);
if (!buffer) {
oom:
ctx->cms->log(ctx->cms, ctx->priority|LOG_ERR,
"unable to allocate memory: %m");
exit(1);
}
memset(&msg, '\0', sizeof(msg));
iov.iov_base = buffer;
iov.iov_len = size;
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
n = recvmsg(pollfd->fd, &msg, MSG_WAITALL);
pesignd_string *tn = (pesignd_string *)buffer;
if (n < (long long)sizeof(tn->size)) {
malformed:
ctx->cms->log(ctx->cms, ctx->priority|LOG_ERR,
"unlock-token: invalid data");
ctx->cms->log(ctx->cms, ctx->priority|LOG_ERR,
"possible exploit attempt. closing.");
close(pollfd->fd);
return;
}
n -= sizeof(tn->size);
if ((size_t)n < tn->size)
goto malformed;
n -= tn->size;
if (tn->value[tn->size - 1] != '\0')
goto malformed;
pesignd_string *tp = pesignd_string_next(tn);
if ((size_t)n < sizeof(tp->size))
goto malformed;
n -= sizeof(tp->size);
if ((size_t)n < tp->size)
goto malformed;
n -= tp->size;
if (tn->value[tn->size - 1] != '\0')
goto malformed;
if (n != 0)
goto malformed;
ctx->cms->log(ctx->cms, ctx->priority|LOG_NOTICE,
"unlocking token \"%s\"", tn->value);
/* authenticating with nss frees this ... best API ever. */
ctx->cms->tokenname = PORT_ArenaStrdup(ctx->cms->arena,
(char *)tn->value);
if (!ctx->cms->tokenname)
goto oom;
char *pin = (char *)tp->value;
if (!pin)
goto oom;
secuPWData pwdata;
memset(&pwdata, 0, sizeof(pwdata));
pwdata.source = pwdata.orig_source = PW_PLAINTEXT;
pwdata.data = pin;
cms_set_pw_callback(ctx->cms, get_password_passthrough);
cms_set_pw_data(ctx->cms, &pwdata);
rc = unlock_nss_token(ctx->cms);
cms_set_pw_callback(ctx->cms, get_password_fail);
cms_set_pw_data(ctx->cms, NULL);
if (rc == -1)
ctx->cms->log(ctx->cms, ctx->priority|LOG_ERR,
"could not find token \"%s\"", tn->value);
else if (rc == 0) {
ctx->cms->log(ctx->cms, ctx->priority|LOG_NOTICE,
"authentication succeeded for token \"%s\"",
tn->value);
rc = add_token_to_authenticated_list(ctx, tn->value);
if (rc < 0)
ctx->cms->log(ctx->cms, ctx->priority|LOG_ERR,
"couldn't add token to internal list: %m");
}
send_response(ctx, ctx->cms, pollfd, rc);
free(buffer);
hide_stolen_goods_from_cms(ctx->cms, ctx->backup_cms);
cms_context_fini(ctx->cms);
}
| 0
|
313,746
|
nv_ctrlh(cmdarg_T *cap)
{
if (VIsual_active && VIsual_select)
{
cap->cmdchar = 'x'; // BS key behaves like 'x' in Select mode
v_visop(cap);
}
else
nv_left(cap);
}
| 0
|
449,299
|
nv_screengo(oparg_T *oap, int dir, long dist)
{
int linelen = linetabsize(ml_get_curline());
int retval = OK;
int atend = FALSE;
int n;
int col_off1; // margin offset for first screen line
int col_off2; // margin offset for wrapped screen line
int width1; // text width for first screen line
int width2; // text width for wrapped screen line
oap->motion_type = MCHAR;
oap->inclusive = (curwin->w_curswant == MAXCOL);
col_off1 = curwin_col_off();
col_off2 = col_off1 - curwin_col_off2();
width1 = curwin->w_width - col_off1;
width2 = curwin->w_width - col_off2;
if (width2 == 0)
width2 = 1; // avoid divide by zero
if (curwin->w_width != 0)
{
/*
* Instead of sticking at the last character of the buffer line we
* try to stick in the last column of the screen.
*/
if (curwin->w_curswant == MAXCOL)
{
atend = TRUE;
validate_virtcol();
if (width1 <= 0)
curwin->w_curswant = 0;
else
{
curwin->w_curswant = width1 - 1;
if (curwin->w_virtcol > curwin->w_curswant)
curwin->w_curswant += ((curwin->w_virtcol
- curwin->w_curswant - 1) / width2 + 1) * width2;
}
}
else
{
if (linelen > width1)
n = ((linelen - width1 - 1) / width2 + 1) * width2 + width1;
else
n = width1;
if (curwin->w_curswant >= (colnr_T)n)
curwin->w_curswant = n - 1;
}
while (dist--)
{
if (dir == BACKWARD)
{
if ((long)curwin->w_curswant >= width1
#ifdef FEAT_FOLDING
&& !hasFolding(curwin->w_cursor.lnum, NULL, NULL)
#endif
)
// Move back within the line. This can give a negative value
// for w_curswant if width1 < width2 (with cpoptions+=n),
// which will get clipped to column 0.
curwin->w_curswant -= width2;
else
{
// to previous line
#ifdef FEAT_FOLDING
// Move to the start of a closed fold. Don't do that when
// 'foldopen' contains "all": it will open in a moment.
if (!(fdo_flags & FDO_ALL))
(void)hasFolding(curwin->w_cursor.lnum,
&curwin->w_cursor.lnum, NULL);
#endif
if (curwin->w_cursor.lnum == 1)
{
retval = FAIL;
break;
}
--curwin->w_cursor.lnum;
linelen = linetabsize(ml_get_curline());
if (linelen > width1)
curwin->w_curswant += (((linelen - width1 - 1) / width2)
+ 1) * width2;
}
}
else // dir == FORWARD
{
if (linelen > width1)
n = ((linelen - width1 - 1) / width2 + 1) * width2 + width1;
else
n = width1;
if (curwin->w_curswant + width2 < (colnr_T)n
#ifdef FEAT_FOLDING
&& !hasFolding(curwin->w_cursor.lnum, NULL, NULL)
#endif
)
// move forward within line
curwin->w_curswant += width2;
else
{
// to next line
#ifdef FEAT_FOLDING
// Move to the end of a closed fold.
(void)hasFolding(curwin->w_cursor.lnum, NULL,
&curwin->w_cursor.lnum);
#endif
if (curwin->w_cursor.lnum == curbuf->b_ml.ml_line_count)
{
retval = FAIL;
break;
}
curwin->w_cursor.lnum++;
curwin->w_curswant %= width2;
// Check if the cursor has moved below the number display
// when width1 < width2 (with cpoptions+=n). Subtract width2
// to get a negative value for w_curswant, which will get
// clipped to column 0.
if (curwin->w_curswant >= width1)
curwin->w_curswant -= width2;
linelen = linetabsize(ml_get_curline());
}
}
}
}
if (virtual_active() && atend)
coladvance(MAXCOL);
else
coladvance(curwin->w_curswant);
if (curwin->w_cursor.col > 0 && curwin->w_p_wrap)
{
colnr_T virtcol;
int c;
/*
* Check for landing on a character that got split at the end of the
* last line. We want to advance a screenline, not end up in the same
* screenline or move two screenlines.
*/
validate_virtcol();
virtcol = curwin->w_virtcol;
#if defined(FEAT_LINEBREAK)
if (virtcol > (colnr_T)width1 && *get_showbreak_value(curwin) != NUL)
virtcol -= vim_strsize(get_showbreak_value(curwin));
#endif
c = (*mb_ptr2char)(ml_get_cursor());
if (dir == FORWARD && virtcol < curwin->w_curswant
&& (curwin->w_curswant <= (colnr_T)width1)
&& !vim_isprintc(c) && c > 255)
oneright();
if (virtcol > curwin->w_curswant
&& (curwin->w_curswant < (colnr_T)width1
? (curwin->w_curswant > (colnr_T)width1 / 2)
: ((curwin->w_curswant - width1) % width2
> (colnr_T)width2 / 2)))
--curwin->w_cursor.col;
}
if (atend)
curwin->w_curswant = MAXCOL; // stick in the last column
return retval;
}
| 0
|
238,403
|
njs_function_capture_global_closures(njs_vm_t *vm, njs_function_t *function)
{
void *start, *end;
uint32_t n;
njs_value_t *value, **refs, **global;
njs_index_t *indexes, index;
njs_native_frame_t *native;
njs_function_lambda_t *lambda;
lambda = function->u.lambda;
if (lambda->nclosures == 0) {
return NJS_OK;
}
native = vm->top_frame;
while (native->previous->function != NULL) {
native = native->previous;
}
start = native;
end = native->free;
indexes = lambda->closures;
refs = njs_function_closures(function);
global = vm->levels[NJS_LEVEL_GLOBAL];
n = lambda->nclosures;
while (n > 0) {
n--;
index = indexes[n];
switch (njs_scope_index_type(index)) {
case NJS_LEVEL_LOCAL:
value = njs_function_closure_value(vm, native->local, index,
start, end);
break;
case NJS_LEVEL_GLOBAL:
value = njs_function_closure_value(vm, global, index, start, end);
break;
default:
njs_type_error(vm, "unexpected value type for closure \"%uD\"",
njs_scope_index_type(index));
return NJS_ERROR;
}
if (njs_slow_path(value == NULL)) {
return NJS_ERROR;
}
refs[n] = value;
}
function->closure_copied = 1;
return NJS_OK;
}
| 0
|
437,708
|
int cx23888_ir_probe(struct cx23885_dev *dev)
{
struct cx23888_ir_state *state;
struct v4l2_subdev *sd;
struct v4l2_subdev_ir_parameters default_params;
int ret;
state = kzalloc(sizeof(struct cx23888_ir_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
spin_lock_init(&state->rx_kfifo_lock);
if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE,
GFP_KERNEL)) {
kfree(state);
return -ENOMEM;
}
state->dev = dev;
sd = &state->sd;
v4l2_subdev_init(sd, &cx23888_ir_controller_ops);
v4l2_set_subdevdata(sd, state);
/* FIXME - fix the formatting of dev->v4l2_dev.name and use it */
snprintf(sd->name, sizeof(sd->name), "%s/888-ir", dev->name);
sd->grp_id = CX23885_HW_888_IR;
ret = v4l2_device_register_subdev(&dev->v4l2_dev, sd);
if (ret == 0) {
/*
* Ensure no interrupts arrive from '888 specific conditions,
* since we ignore them in this driver to have commonality with
* similar IR controller cores.
*/
cx23888_ir_write4(dev, CX23888_IR_IRQEN_REG, 0);
mutex_init(&state->rx_params_lock);
default_params = default_rx_params;
v4l2_subdev_call(sd, ir, rx_s_parameters, &default_params);
mutex_init(&state->tx_params_lock);
default_params = default_tx_params;
v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params);
} else {
kfifo_free(&state->rx_kfifo);
}
return ret;
}
| 0
|
389,745
|
check_for_opt_string_or_number_arg(typval_T *args, int idx)
{
return (args[idx].v_type == VAR_UNKNOWN
|| check_for_string_or_number_arg(args, idx) != FAIL);
}
| 0
|
275,526
|
njs_vm_object_alloc(njs_vm_t *vm, njs_value_t *retval, ...)
{
va_list args;
njs_int_t ret;
njs_value_t *name, *value;
njs_object_t *object;
njs_object_prop_t *prop;
njs_lvlhsh_query_t lhq;
object = njs_object_alloc(vm);
if (njs_slow_path(object == NULL)) {
return NJS_ERROR;
}
ret = NJS_ERROR;
va_start(args, retval);
for ( ;; ) {
name = va_arg(args, njs_value_t *);
if (name == NULL) {
break;
}
value = va_arg(args, njs_value_t *);
if (value == NULL) {
njs_type_error(vm, "missed value for a key");
goto done;
}
if (njs_slow_path(!njs_is_string(name))) {
njs_type_error(vm, "prop name is not a string");
goto done;
}
lhq.replace = 0;
lhq.pool = vm->mem_pool;
lhq.proto = &njs_object_hash_proto;
njs_string_get(name, &lhq.key);
lhq.key_hash = njs_djb_hash(lhq.key.start, lhq.key.length);
prop = njs_object_prop_alloc(vm, name, value, 1);
if (njs_slow_path(prop == NULL)) {
goto done;
}
lhq.value = prop;
ret = njs_lvlhsh_insert(&object->hash, &lhq);
if (njs_slow_path(ret != NJS_OK)) {
njs_internal_error(vm, NULL);
goto done;
}
}
ret = NJS_OK;
njs_set_object(retval, object);
done:
va_end(args);
return ret;
}
| 0
|
244,170
|
GF_Box *ssix_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_SubsegmentIndexBox, GF_ISOM_BOX_TYPE_SSIX);
return (GF_Box *)tmp;
}
| 0
|
482,550
|
passFindCharacters(const FileInfo *file, widechar *instructions, int end,
widechar **characters, int *length) {
int IC = 0;
int lookback = 0;
*characters = NULL;
*length = 0;
while (IC < end) {
widechar instruction = instructions[IC];
switch (instruction) {
case pass_string:
case pass_dots: {
int count = instructions[IC + 1];
IC += 2;
if (count > lookback) {
*characters = &instructions[IC + lookback];
*length = count - lookback;
return 1;
} else {
lookback -= count;
}
IC += count;
continue;
}
case pass_attributes:
IC += 7;
if (instructions[IC - 2] == instructions[IC - 1] &&
instructions[IC - 1] <= lookback) {
lookback -= instructions[IC - 1];
continue;
}
goto NO_CHARACTERS;
case pass_swap:
IC += 2;
/* fall through */
case pass_groupstart:
case pass_groupend:
case pass_groupreplace:
IC += 3;
NO_CHARACTERS : { return 1; }
case pass_eq:
case pass_lt:
case pass_gt:
case pass_lteq:
case pass_gteq:
IC += 3;
continue;
case pass_lookback:
lookback += instructions[IC + 1];
IC += 2;
continue;
case pass_not:
case pass_startReplace:
case pass_endReplace:
case pass_first:
case pass_last:
case pass_copy:
case pass_omit:
case pass_plus:
case pass_hyphen:
IC += 1;
continue;
case pass_endTest:
goto NO_CHARACTERS;
default:
compileError(file, "unhandled test suboperand: \\x%02x", instruction);
return 0;
}
}
goto NO_CHARACTERS;
}
| 0
|
310,025
|
_nc_screen_init(void)
{
NCURSES_SP_NAME(_nc_screen_init) (CURRENT_SCREEN);
}
| 0
|
343,286
|
static int dlmap_init(DLHandler * const dlhandler, const int clientfd,
void * const tls_clientfd, const int xferfd,
const char * const name, const int f,
void * const tls_fd, const off_t restartat,
const int ascii_mode, const unsigned long bandwidth)
{
if (ascii_mode > 0) {
#ifdef WITHOUT_ASCII
addreply_noformat(450, MSG_ASCII_MODE_UNSUPPORTED);
return -1;
#else
addreply_noformat(0, MSG_ASCII_MODE_WARNING);
#endif
}
if (dlhandler_init(dlhandler, clientfd, tls_clientfd, xferfd, name, f,
tls_fd, restartat, ascii_mode, bandwidth) != 0) {
return -1;
}
dlhandler->min_chunk_size = DL_MIN_CHUNK_SIZE;
if (ascii_mode > 0) {
dlhandler->default_chunk_size = dlhandler->max_chunk_size =
DL_DEFAULT_CHUNK_SIZE_ASCII;
} else {
dlhandler->max_chunk_size = DL_MAX_CHUNK_SIZE;
if (bandwidth <= 0UL) {
dlhandler->default_chunk_size = dlhandler->max_chunk_size;
} else {
dlhandler->default_chunk_size = DL_DEFAULT_CHUNK_SIZE;
}
}
dlhandler->chunk_size = dlhandler->default_chunk_size;
dlhandler->dlmap_size =
(DL_DLMAP_SIZE + page_size - (size_t) 1U) & ~(page_size - (size_t) 1U);
dlhandler->cur_pos = restartat;
dlhandler->dlmap_pos = (off_t) 0;
dlhandler->dlmap_fdpos = (off_t) -1;
dlhandler->sizeof_map = (size_t) 0U;
dlhandler->map_data = NULL;
dlhandler->sizeof_map = dlhandler->dlmap_size;
dlhandler->map = malloc(dlhandler->sizeof_map);
if (dlhandler->map == NULL) {
die_mem();
}
return 0;
}
| 0
|
338,156
|
void WasmBinaryBuilder::pushExpression(Expression* curr) {
auto type = curr->type;
if (type.isTuple()) {
// Store tuple to local and push individual extracted values
Builder builder(wasm);
// Non-nullable types require special handling as they cannot be stored to
// a local.
std::vector<Type> finalTypes;
if (!wasm.features.hasGCNNLocals()) {
for (auto t : type) {
if (t.isNonNullable()) {
t = Type(t.getHeapType(), Nullable);
}
finalTypes.push_back(t);
}
}
auto nullableType = Type(Tuple(finalTypes));
requireFunctionContext("pushExpression-tuple");
Index tuple = builder.addVar(currFunction, nullableType);
expressionStack.push_back(builder.makeLocalSet(tuple, curr));
for (Index i = 0; i < nullableType.size(); ++i) {
Expression* value =
builder.makeTupleExtract(builder.makeLocalGet(tuple, nullableType), i);
if (nullableType[i] != type[i]) {
// We modified this to be nullable; undo that.
value = builder.makeRefAs(RefAsNonNull, value);
}
expressionStack.push_back(value);
}
} else {
expressionStack.push_back(curr);
}
}
| 0
|
326,095
|
regpiece(int *flagp)
{
char_u *ret;
int op;
char_u *next;
int flags;
long minval;
long maxval;
ret = regatom(&flags);
if (ret == NULL)
return NULL;
op = peekchr();
if (re_multi_type(op) == NOT_MULTI)
{
*flagp = flags;
return ret;
}
// default flags
*flagp = (WORST | SPSTART | (flags & (HASNL | HASLOOKBH)));
skipchr();
switch (op)
{
case Magic('*'):
if (flags & SIMPLE)
reginsert(STAR, ret);
else
{
// Emit x* as (x&|), where & means "self".
reginsert(BRANCH, ret); // Either x
regoptail(ret, regnode(BACK)); // and loop
regoptail(ret, ret); // back
regtail(ret, regnode(BRANCH)); // or
regtail(ret, regnode(NOTHING)); // null.
}
break;
case Magic('+'):
if (flags & SIMPLE)
reginsert(PLUS, ret);
else
{
// Emit x+ as x(&|), where & means "self".
next = regnode(BRANCH); // Either
regtail(ret, next);
regtail(regnode(BACK), ret); // loop back
regtail(next, regnode(BRANCH)); // or
regtail(ret, regnode(NOTHING)); // null.
}
*flagp = (WORST | HASWIDTH | (flags & (HASNL | HASLOOKBH)));
break;
case Magic('@'):
{
int lop = END;
long nr;
nr = getdecchrs();
switch (no_Magic(getchr()))
{
case '=': lop = MATCH; break; // \@=
case '!': lop = NOMATCH; break; // \@!
case '>': lop = SUBPAT; break; // \@>
case '<': switch (no_Magic(getchr()))
{
case '=': lop = BEHIND; break; // \@<=
case '!': lop = NOBEHIND; break; // \@<!
}
}
if (lop == END)
EMSG2_RET_NULL(_(e_invalid_character_after_str_at),
reg_magic == MAGIC_ALL);
// Look behind must match with behind_pos.
if (lop == BEHIND || lop == NOBEHIND)
{
regtail(ret, regnode(BHPOS));
*flagp |= HASLOOKBH;
}
regtail(ret, regnode(END)); // operand ends
if (lop == BEHIND || lop == NOBEHIND)
{
if (nr < 0)
nr = 0; // no limit is same as zero limit
reginsert_nr(lop, nr, ret);
}
else
reginsert(lop, ret);
break;
}
case Magic('?'):
case Magic('='):
// Emit x= as (x|)
reginsert(BRANCH, ret); // Either x
regtail(ret, regnode(BRANCH)); // or
next = regnode(NOTHING); // null.
regtail(ret, next);
regoptail(ret, next);
break;
case Magic('{'):
if (!read_limits(&minval, &maxval))
return NULL;
if (flags & SIMPLE)
{
reginsert(BRACE_SIMPLE, ret);
reginsert_limits(BRACE_LIMITS, minval, maxval, ret);
}
else
{
if (num_complex_braces >= 10)
EMSG2_RET_NULL(_(e_too_many_complex_str_curly),
reg_magic == MAGIC_ALL);
reginsert(BRACE_COMPLEX + num_complex_braces, ret);
regoptail(ret, regnode(BACK));
regoptail(ret, ret);
reginsert_limits(BRACE_LIMITS, minval, maxval, ret);
++num_complex_braces;
}
if (minval > 0 && maxval > 0)
*flagp = (HASWIDTH | (flags & (HASNL | HASLOOKBH)));
break;
}
if (re_multi_type(peekchr()) != NOT_MULTI)
{
// Can't have a multi follow a multi.
if (peekchr() == Magic('*'))
EMSG2_RET_NULL(_(e_nested_str), reg_magic >= MAGIC_ON);
EMSG3_RET_NULL(_(e_nested_str_chr), reg_magic == MAGIC_ALL,
no_Magic(peekchr()));
}
return ret;
}
| 0
|
432,283
|
static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
hwaddr addr, uint64_t size)
{
MemoryRegionSection ret = { .mr = NULL };
MemoryRegion *root;
AddressSpace *as;
AddrRange range;
FlatView *view;
FlatRange *fr;
addr += mr->addr;
for (root = mr; root->container; ) {
root = root->container;
addr += root->addr;
}
as = memory_region_to_address_space(root);
if (!as) {
return ret;
}
range = addrrange_make(int128_make64(addr), int128_make64(size));
view = address_space_to_flatview(as);
fr = flatview_lookup(view, range);
if (!fr) {
return ret;
}
while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
--fr;
}
ret.mr = fr->mr;
ret.fv = view;
range = addrrange_intersection(range, fr->addr);
ret.offset_within_region = fr->offset_in_region;
ret.offset_within_region += int128_get64(int128_sub(range.start,
fr->addr.start));
ret.size = range.size;
ret.offset_within_address_space = int128_get64(range.start);
ret.readonly = fr->readonly;
return ret;
}
| 0
|
243,983
|
GF_Err sdtp_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox*)s;
/*out-of-order sdtp, assume no padding at the end*/
if (!ptr->sampleCount) ptr->sampleCount = (u32) ptr->size;
else if (ptr->sampleCount > (u32) ptr->size) return GF_ISOM_INVALID_FILE;
ptr->sample_info = (u8 *) gf_malloc(sizeof(u8)*ptr->sampleCount);
if (!ptr->sample_info) return GF_OUT_OF_MEM;
ptr->sample_alloc = ptr->sampleCount;
gf_bs_read_data(bs, (char*)ptr->sample_info, ptr->sampleCount);
ISOM_DECREASE_SIZE(ptr, ptr->sampleCount);
return GF_OK;
}
| 0
|
294,685
|
rt_complete_frags(VALUE klass, VALUE hash)
{
static VALUE tab = Qnil;
int g;
long e;
VALUE k, a, d;
if (NIL_P(tab)) {
tab = f_frozen_ary(11,
f_frozen_ary(2,
sym("time"),
f_frozen_ary(3,
sym("hour"),
sym("min"),
sym("sec"))),
f_frozen_ary(2,
Qnil,
f_frozen_ary(1,
sym("jd"))),
f_frozen_ary(2,
sym("ordinal"),
f_frozen_ary(5,
sym("year"),
sym("yday"),
sym("hour"),
sym("min"),
sym("sec"))),
f_frozen_ary(2,
sym("civil"),
f_frozen_ary(6,
sym("year"),
sym("mon"),
sym("mday"),
sym("hour"),
sym("min"),
sym("sec"))),
f_frozen_ary(2,
sym("commercial"),
f_frozen_ary(6,
sym("cwyear"),
sym("cweek"),
sym("cwday"),
sym("hour"),
sym("min"),
sym("sec"))),
f_frozen_ary(2,
sym("wday"),
f_frozen_ary(4,
sym("wday"),
sym("hour"),
sym("min"),
sym("sec"))),
f_frozen_ary(2,
sym("wnum0"),
f_frozen_ary(6,
sym("year"),
sym("wnum0"),
sym("wday"),
sym("hour"),
sym("min"),
sym("sec"))),
f_frozen_ary(2,
sym("wnum1"),
f_frozen_ary(6,
sym("year"),
sym("wnum1"),
sym("wday"),
sym("hour"),
sym("min"),
sym("sec"))),
f_frozen_ary(2,
Qnil,
f_frozen_ary(6,
sym("cwyear"),
sym("cweek"),
sym("wday"),
sym("hour"),
sym("min"),
sym("sec"))),
f_frozen_ary(2,
Qnil,
f_frozen_ary(6,
sym("year"),
sym("wnum0"),
sym("cwday"),
sym("hour"),
sym("min"),
sym("sec"))),
f_frozen_ary(2,
Qnil,
f_frozen_ary(6,
sym("year"),
sym("wnum1"),
sym("cwday"),
sym("hour"),
sym("min"),
sym("sec"))));
rb_gc_register_mark_object(tab);
}
{
long i, eno = 0, idx = 0;
for (i = 0; i < RARRAY_LEN(tab); i++) {
VALUE x, a;
x = RARRAY_AREF(tab, i);
a = RARRAY_AREF(x, 1);
{
long j, n = 0;
for (j = 0; j < RARRAY_LEN(a); j++)
if (!NIL_P(ref_hash0(RARRAY_AREF(a, j))))
n++;
if (n > eno) {
eno = n;
idx = i;
}
}
}
if (eno == 0)
g = 0;
else {
g = 1;
k = RARRAY_AREF(RARRAY_AREF(tab, idx), 0);
a = RARRAY_AREF(RARRAY_AREF(tab, idx), 1);
e = eno;
}
}
d = Qnil;
if (g && !NIL_P(k) && (RARRAY_LEN(a) - e)) {
if (k == sym("ordinal")) {
if (NIL_P(ref_hash("year"))) {
if (NIL_P(d))
d = date_s_today(0, (VALUE *)0, cDate);
set_hash("year", d_lite_year(d));
}
if (NIL_P(ref_hash("yday")))
set_hash("yday", INT2FIX(1));
}
else if (k == sym("civil")) {
long i;
for (i = 0; i < RARRAY_LEN(a); i++) {
VALUE e = RARRAY_AREF(a, i);
if (!NIL_P(ref_hash0(e)))
break;
if (NIL_P(d))
d = date_s_today(0, (VALUE *)0, cDate);
set_hash0(e, rb_funcall(d, SYM2ID(e), 0));
}
if (NIL_P(ref_hash("mon")))
set_hash("mon", INT2FIX(1));
if (NIL_P(ref_hash("mday")))
set_hash("mday", INT2FIX(1));
}
else if (k == sym("commercial")) {
long i;
for (i = 0; i < RARRAY_LEN(a); i++) {
VALUE e = RARRAY_AREF(a, i);
if (!NIL_P(ref_hash0(e)))
break;
if (NIL_P(d))
d = date_s_today(0, (VALUE *)0, cDate);
set_hash0(e, rb_funcall(d, SYM2ID(e), 0));
}
if (NIL_P(ref_hash("cweek")))
set_hash("cweek", INT2FIX(1));
if (NIL_P(ref_hash("cwday")))
set_hash("cwday", INT2FIX(1));
}
else if (k == sym("wday")) {
if (NIL_P(d))
d = date_s_today(0, (VALUE *)0, cDate);
set_hash("jd", d_lite_jd(f_add(f_sub(d,
d_lite_wday(d)),
ref_hash("wday"))));
}
else if (k == sym("wnum0")) {
long i;
for (i = 0; i < RARRAY_LEN(a); i++) {
VALUE e = RARRAY_AREF(a, i);
if (!NIL_P(ref_hash0(e)))
break;
if (NIL_P(d))
d = date_s_today(0, (VALUE *)0, cDate);
set_hash0(e, rb_funcall(d, SYM2ID(e), 0));
}
if (NIL_P(ref_hash("wnum0")))
set_hash("wnum0", INT2FIX(0));
if (NIL_P(ref_hash("wday")))
set_hash("wday", INT2FIX(0));
}
else if (k == sym("wnum1")) {
long i;
for (i = 0; i < RARRAY_LEN(a); i++) {
VALUE e = RARRAY_AREF(a, i);
if (!NIL_P(ref_hash0(e)))
break;
if (NIL_P(d))
d = date_s_today(0, (VALUE *)0, cDate);
set_hash0(e, rb_funcall(d, SYM2ID(e), 0));
}
if (NIL_P(ref_hash("wnum1")))
set_hash("wnum1", INT2FIX(0));
if (NIL_P(ref_hash("wday")))
set_hash("wday", INT2FIX(1));
}
}
if (g && k == sym("time")) {
if (f_le_p(klass, cDateTime)) {
if (NIL_P(d))
d = date_s_today(0, (VALUE *)0, cDate);
if (NIL_P(ref_hash("jd")))
set_hash("jd", d_lite_jd(d));
}
}
if (NIL_P(ref_hash("hour")))
set_hash("hour", INT2FIX(0));
if (NIL_P(ref_hash("min")))
set_hash("min", INT2FIX(0));
if (NIL_P(ref_hash("sec")))
set_hash("sec", INT2FIX(0));
else if (f_gt_p(ref_hash("sec"), INT2FIX(59)))
set_hash("sec", INT2FIX(59));
return hash;
}
| 0
|
224,172
|
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (tuple[index].has_value()) {
return errors::InvalidArgument("The tensor for index '", index,
"' for key '", key.scalar<int64_t>()(),
"' was already initialized '",
dtypes_.size(), "'.");
}
return Status::OK();
}
| 0
|
404,719
|
int receive_fd(struct file *file, unsigned int o_flags)
{
return __receive_fd(file, NULL, o_flags);
}
| 0
|
226,375
|
GF_Err unkn_box_size(GF_Box *s)
{
GF_UnknownBox *ptr = (GF_UnknownBox *)s;
if (ptr->dataSize && ptr->data) {
ptr->size += ptr->dataSize;
}
return GF_OK;
}
| 0
|
221,123
|
void DecodePngV2(OpKernelContext* context, StringPiece input) {
int channel_bits = (data_type_ == DataType::DT_UINT8) ? 8 : 16;
png::DecodeContext decode;
OP_REQUIRES(
context, png::CommonInitDecode(input, channels_, channel_bits, &decode),
errors::InvalidArgument("Invalid PNG. Failed to initialize decoder."));
// Verify that width and height are not too large:
// - verify width and height don't overflow int.
// - width can later be multiplied by channels_ and sizeof(uint16), so
// verify single dimension is not too large.
// - verify when width and height are multiplied together, there are a few
// bits to spare as well.
const int width = static_cast<int>(decode.width);
const int height = static_cast<int>(decode.height);
const int64_t total_size =
static_cast<int64_t>(width) * static_cast<int64_t>(height);
if (width != static_cast<int64_t>(decode.width) || width <= 0 ||
width >= (1LL << 27) || height != static_cast<int64_t>(decode.height) ||
height <= 0 || height >= (1LL << 27) || total_size >= (1LL << 29)) {
OP_REQUIRES(context, false,
errors::InvalidArgument("PNG size too large for int: ",
decode.width, " by ", decode.height));
}
Tensor* output = nullptr;
Status status;
// By the existing API, we support decoding PNG with `DecodeGif` op.
// We need to make sure to return 4-D shapes when using `DecodeGif`.
if (op_type_ == "DecodeGif") {
status = context->allocate_output(
0, TensorShape({1, height, width, decode.channels}), &output);
} else {
status = context->allocate_output(
0, TensorShape({height, width, decode.channels}), &output);
}
if (op_type_ == "DecodeBmp") {
// TODO(b/171060723): Only DecodeBmp as op_type_ is not acceptable here
// because currently `decode_(jpeg|png|gif)` ops can decode any one of
// jpeg, png or gif but not bmp. Similarly, `decode_bmp` cannot decode
// anything but bmp formats. This behavior needs to be revisited. For more
// details, please refer to the bug.
OP_REQUIRES(context, false,
errors::InvalidArgument(
"Trying to decode PNG format using DecodeBmp op. Use "
"`decode_png` or `decode_image` instead."));
} else if (op_type_ == "DecodeAndCropJpeg") {
OP_REQUIRES(context, false,
errors::InvalidArgument(
"DecodeAndCropJpeg operation can run on JPEG only, but "
"detected PNG."));
}
if (!status.ok()) png::CommonFreeDecode(&decode);
OP_REQUIRES_OK(context, status);
if (data_type_ == DataType::DT_UINT8) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint8>().data()),
decode.channels * width * sizeof(uint8), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_UINT16) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint16>().data()),
decode.channels * width * sizeof(uint16), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_FLOAT) {
// `png::CommonFinishDecode` does not support `float`. First allocate
// uint16 buffer for the image and decode in uint16 (lossless). Wrap the
// buffer in `unique_ptr` so that we don't forget to delete the buffer.
std::unique_ptr<uint16[]> buffer(
new uint16[height * width * decode.channels]);
OP_REQUIRES(
context,
png::CommonFinishDecode(reinterpret_cast<png_bytep>(buffer.get()),
decode.channels * width * sizeof(uint16),
&decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
// Convert uint16 image data to desired data type.
// Use eigen threadpooling to speed up the copy operation.
const auto& device = context->eigen_device<Eigen::ThreadPoolDevice>();
TTypes<uint16, 3>::UnalignedConstTensor buf(buffer.get(), height, width,
decode.channels);
float scale = 1. / std::numeric_limits<uint16>::max();
// Fill output tensor with desired dtype.
output->tensor<float, 3>().device(device) = buf.cast<float>() * scale;
}
}
| 0
|
198,662
|
ex_copy(linenr_T line1, linenr_T line2, linenr_T n)
{
linenr_T count;
char_u *p;
count = line2 - line1 + 1;
if ((cmdmod.cmod_flags & CMOD_LOCKMARKS) == 0)
{
curbuf->b_op_start.lnum = n + 1;
curbuf->b_op_end.lnum = n + count;
curbuf->b_op_start.col = curbuf->b_op_end.col = 0;
}
/*
* there are three situations:
* 1. destination is above line1
* 2. destination is between line1 and line2
* 3. destination is below line2
*
* n = destination (when starting)
* curwin->w_cursor.lnum = destination (while copying)
* line1 = start of source (while copying)
* line2 = end of source (while copying)
*/
if (u_save(n, n + 1) == FAIL)
return;
curwin->w_cursor.lnum = n;
while (line1 <= line2)
{
// need to use vim_strsave() because the line will be unlocked within
// ml_append()
p = vim_strsave(ml_get(line1));
if (p != NULL)
{
ml_append(curwin->w_cursor.lnum, p, (colnr_T)0, FALSE);
vim_free(p);
}
// situation 2: skip already copied lines
if (line1 == n)
line1 = curwin->w_cursor.lnum;
++line1;
if (curwin->w_cursor.lnum < line1)
++line1;
if (curwin->w_cursor.lnum < line2)
++line2;
++curwin->w_cursor.lnum;
}
appended_lines_mark(n, count);
msgmore((long)count);
}
| 1
|
366,257
|
static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
{
LIST_HEAD(tmp_list);
struct mount *p;
if (how & UMOUNT_PROPAGATE)
propagate_mount_unlock(mnt);
/* Gather the mounts to umount */
for (p = mnt; p; p = next_mnt(p, mnt)) {
p->mnt.mnt_flags |= MNT_UMOUNT;
list_move(&p->mnt_list, &tmp_list);
}
/* Hide the mounts from mnt_mounts */
list_for_each_entry(p, &tmp_list, mnt_list) {
list_del_init(&p->mnt_child);
}
/* Add propogated mounts to the tmp_list */
if (how & UMOUNT_PROPAGATE)
propagate_umount(&tmp_list);
while (!list_empty(&tmp_list)) {
struct mnt_namespace *ns;
bool disconnect;
p = list_first_entry(&tmp_list, struct mount, mnt_list);
list_del_init(&p->mnt_expire);
list_del_init(&p->mnt_list);
ns = p->mnt_ns;
if (ns) {
ns->mounts--;
__touch_mnt_namespace(ns);
}
p->mnt_ns = NULL;
if (how & UMOUNT_SYNC)
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
disconnect = disconnect_mount(p, how);
if (mnt_has_parent(p)) {
mnt_add_count(p->mnt_parent, -1);
if (!disconnect) {
/* Don't forget about p */
list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
} else {
umount_mnt(p);
}
}
change_mnt_propagation(p, MS_PRIVATE);
if (disconnect)
hlist_add_head(&p->mnt_umount, &unmounted);
}
}
| 0
|
234,725
|
int btrfs_uuid_scan_kthread(void *data)
{
struct btrfs_fs_info *fs_info = data;
struct btrfs_root *root = fs_info->tree_root;
struct btrfs_key key;
struct btrfs_path *path = NULL;
int ret = 0;
struct extent_buffer *eb;
int slot;
struct btrfs_root_item root_item;
u32 item_size;
struct btrfs_trans_handle *trans = NULL;
bool closing = false;
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
key.objectid = 0;
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = 0;
while (1) {
if (btrfs_fs_closing(fs_info)) {
closing = true;
break;
}
ret = btrfs_search_forward(root, &key, path,
BTRFS_OLDEST_GENERATION);
if (ret) {
if (ret > 0)
ret = 0;
break;
}
if (key.type != BTRFS_ROOT_ITEM_KEY ||
(key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
key.objectid != BTRFS_FS_TREE_OBJECTID) ||
key.objectid > BTRFS_LAST_FREE_OBJECTID)
goto skip;
eb = path->nodes[0];
slot = path->slots[0];
item_size = btrfs_item_size_nr(eb, slot);
if (item_size < sizeof(root_item))
goto skip;
read_extent_buffer(eb, &root_item,
btrfs_item_ptr_offset(eb, slot),
(int)sizeof(root_item));
if (btrfs_root_refs(&root_item) == 0)
goto skip;
if (!btrfs_is_empty_uuid(root_item.uuid) ||
!btrfs_is_empty_uuid(root_item.received_uuid)) {
if (trans)
goto update_tree;
btrfs_release_path(path);
/*
* 1 - subvol uuid item
* 1 - received_subvol uuid item
*/
trans = btrfs_start_transaction(fs_info->uuid_root, 2);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
break;
}
continue;
} else {
goto skip;
}
update_tree:
btrfs_release_path(path);
if (!btrfs_is_empty_uuid(root_item.uuid)) {
ret = btrfs_uuid_tree_add(trans, root_item.uuid,
BTRFS_UUID_KEY_SUBVOL,
key.objectid);
if (ret < 0) {
btrfs_warn(fs_info, "uuid_tree_add failed %d",
ret);
break;
}
}
if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
ret = btrfs_uuid_tree_add(trans,
root_item.received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
key.objectid);
if (ret < 0) {
btrfs_warn(fs_info, "uuid_tree_add failed %d",
ret);
break;
}
}
skip:
btrfs_release_path(path);
if (trans) {
ret = btrfs_end_transaction(trans);
trans = NULL;
if (ret)
break;
}
if (key.offset < (u64)-1) {
key.offset++;
} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
key.offset = 0;
key.type = BTRFS_ROOT_ITEM_KEY;
} else if (key.objectid < (u64)-1) {
key.offset = 0;
key.type = BTRFS_ROOT_ITEM_KEY;
key.objectid++;
} else {
break;
}
cond_resched();
}
out:
btrfs_free_path(path);
if (trans && !IS_ERR(trans))
btrfs_end_transaction(trans);
if (ret)
btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
else if (!closing)
set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
up(&fs_info->uuid_tree_rescan_sem);
return 0;
}
| 0
|
275,484
|
njs_vm_value_buffer_set(njs_vm_t *vm, njs_value_t *value, const u_char *start,
uint32_t size)
{
return njs_buffer_set(vm, value, start, size);
}
| 0
|
401,534
|
static void process_random_ready_list(void)
{
unsigned long flags;
struct random_ready_callback *rdy, *tmp;
spin_lock_irqsave(&random_ready_list_lock, flags);
list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
struct module *owner = rdy->owner;
list_del_init(&rdy->list);
rdy->func(rdy);
module_put(owner);
}
spin_unlock_irqrestore(&random_ready_list_lock, flags);
}
| 0
|
247,150
|
void gf_fs_print_all_connections(GF_FilterSession *session, char *filter_name, void (*print_fn)(FILE *output, GF_SysPrintArgFlags flags, const char *fmt, ...) )
{
Bool found = GF_FALSE;
GF_List *done;
u32 i, j, count;
u32 llev = gf_log_get_tool_level(GF_LOG_FILTER);
gf_log_set_tool_level(GF_LOG_FILTER, GF_LOG_INFO);
//load JS to inspect its connections
if (filter_name && strstr(filter_name, ".js")) {
gf_fs_print_jsf_connection(session, filter_name, NULL, print_fn);
gf_log_set_tool_level(GF_LOG_FILTER, llev);
return;
}
done = gf_list_new();
count = gf_list_count(session->links);
for (i=0; i<count; i++) {
const GF_FilterRegDesc *src = gf_list_get(session->links, i);
if (filter_name && strcmp(src->freg->name, filter_name))
continue;
if (!src->nb_edges) {
if (print_fn)
print_fn(stderr, 1, "%s: no sources\n", src->freg->name);
else {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, ("%s: no sources\n", src->freg->name));
}
continue;
}
found = GF_TRUE;
if (print_fn)
print_fn(stderr, 1, "%s sources:", src->freg->name);
else {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, ("%s sources:", src->freg->name));
}
for (j=0; j<src->nb_edges; j++) {
if (gf_list_find(done, (void *) src->edges[j].src_reg->freg->name)<0) {
if (print_fn)
print_fn(stderr, 0, " %s", src->edges[j].src_reg->freg->name);
else {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, (" %s", src->edges[j].src_reg->freg->name));
}
gf_list_add(done, (void *) src->edges[j].src_reg->freg->name);
}
}
if (print_fn)
print_fn(stderr, 0, "\n");
else {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, ("\n"));
}
gf_list_reset(done);
}
if (found && filter_name) {
if (print_fn)
print_fn(stderr, 1, "%s sinks:", filter_name);
else {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, ("%s sinks:", filter_name));
}
count = gf_list_count(session->links);
for (i=0; i<count; i++) {
const GF_FilterRegDesc *src = gf_list_get(session->links, i);
if (!strcmp(src->freg->name, filter_name)) {
if (!(src->freg->flags & GF_FS_REG_EXPLICIT_ONLY) || !(src->freg->flags & GF_FS_REG_ALLOW_CYCLIC))
continue;
}
for (j=0; j<src->nb_edges; j++) {
if (strcmp(src->edges[j].src_reg->freg->name, filter_name)) continue;
if (gf_list_find(done, (void *) src->freg->name)<0) {
if (print_fn)
print_fn(stderr, 0, " %s", src->freg->name);
else {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, (" %s", src->freg->name));
}
gf_list_add(done, (void *) src->freg->name);
}
}
gf_list_reset(done);
}
if (print_fn)
print_fn(stderr, 1, " \n");
else {
GF_LOG(GF_LOG_INFO, GF_LOG_APP, (" \n"));
}
}
if (!found && filter_name) {
GF_Err e = GF_OK;
GF_Filter *f = gf_fs_load_filter(session, filter_name, &e);
if (f) {
gf_fs_print_jsf_connection(session, filter_name, f, print_fn);
}
else if (print_fn)
print_fn(stderr, 1, "%s filter not found\n", filter_name);
else {
GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("%s filter not found\n", filter_name));
}
}
gf_list_del(done);
gf_log_set_tool_level(GF_LOG_FILTER, llev);
}
| 0
|
383,370
|
gdImageFilledArc (gdImagePtr im, int cx, int cy, int w, int h, int s, int e, int color, int style)
{
gdPoint pts[3];
int i;
int lx = 0, ly = 0;
int fx = 0, fy = 0;
int w2, h2;
w2 = w / 2;
h2 = h / 2;
while (e < s)
{
e += 360;
}
for (i = s; (i <= e); i++)
{
int x, y;
x = ((long) gdCosT[i % 360] * (long) w2 / 1024) + cx;
y = ((long) gdSinT[i % 360] * (long) h2 / 1024) + cy;
if (i != s)
{
if (!(style & gdChord))
{
if (style & gdNoFill)
{
gdImageLine (im, lx, ly, x, y, color);
}
else
{
/* This is expensive! */
pts[0].x = lx;
pts[0].y = ly;
pts[1].x = x;
pts[1].y = y;
pts[2].x = cx;
pts[2].y = cy;
gdImageFilledPolygon (im, pts, 3, color);
}
}
}
else
{
fx = x;
fy = y;
}
lx = x;
ly = y;
}
if (style & gdChord)
{
if (style & gdNoFill)
{
if (style & gdEdged)
{
gdImageLine (im, cx, cy, lx, ly, color);
gdImageLine (im, cx, cy, fx, fy, color);
}
gdImageLine (im, fx, fy, lx, ly, color);
}
else
{
pts[0].x = fx;
pts[0].y = fy;
pts[1].x = lx;
pts[1].y = ly;
pts[2].x = cx;
pts[2].y = cy;
gdImageFilledPolygon (im, pts, 3, color);
}
}
else
{
if (style & gdNoFill)
{
if (style & gdEdged)
{
gdImageLine (im, cx, cy, lx, ly, color);
gdImageLine (im, cx, cy, fx, fy, color);
}
}
}
}
| 0
|
333,064
|
nfa_regcomp(char_u *expr, int re_flags)
{
nfa_regprog_T *prog = NULL;
size_t prog_size;
int *postfix;
if (expr == NULL)
return NULL;
#ifdef DEBUG
nfa_regengine.expr = expr;
#endif
nfa_re_flags = re_flags;
init_class_tab();
if (nfa_regcomp_start(expr, re_flags) == FAIL)
return NULL;
// Build postfix form of the regexp. Needed to build the NFA
// (and count its size).
postfix = re2post();
if (postfix == NULL)
goto fail; // Cascaded (syntax?) error
/*
* In order to build the NFA, we parse the input regexp twice:
* 1. first pass to count size (so we can allocate space)
* 2. second to emit code
*/
#ifdef ENABLE_LOG
{
FILE *f = fopen(NFA_REGEXP_RUN_LOG, "a");
if (f != NULL)
{
fprintf(f, "\n*****************************\n\n\n\n\tCompiling regexp \"%s\"... hold on !\n", expr);
fclose(f);
}
}
#endif
/*
* PASS 1
* Count number of NFA states in "nstate". Do not build the NFA.
*/
post2nfa(postfix, post_ptr, TRUE);
// allocate the regprog with space for the compiled regexp
prog_size = sizeof(nfa_regprog_T) + sizeof(nfa_state_T) * (nstate - 1);
prog = alloc(prog_size);
if (prog == NULL)
goto fail;
state_ptr = prog->state;
prog->re_in_use = FALSE;
/*
* PASS 2
* Build the NFA
*/
prog->start = post2nfa(postfix, post_ptr, FALSE);
if (prog->start == NULL)
goto fail;
prog->regflags = regflags;
prog->engine = &nfa_regengine;
prog->nstate = nstate;
prog->has_zend = rex.nfa_has_zend;
prog->has_backref = rex.nfa_has_backref;
prog->nsubexp = regnpar;
nfa_postprocess(prog);
prog->reganch = nfa_get_reganch(prog->start, 0);
prog->regstart = nfa_get_regstart(prog->start, 0);
prog->match_text = nfa_get_match_text(prog->start);
#ifdef ENABLE_LOG
nfa_postfix_dump(expr, OK);
nfa_dump(prog);
#endif
#ifdef FEAT_SYN_HL
// Remember whether this pattern has any \z specials in it.
prog->reghasz = re_has_z;
#endif
prog->pattern = vim_strsave(expr);
#ifdef DEBUG
nfa_regengine.expr = NULL;
#endif
out:
VIM_CLEAR(post_start);
post_ptr = post_end = NULL;
state_ptr = NULL;
return (regprog_T *)prog;
fail:
VIM_CLEAR(prog);
#ifdef ENABLE_LOG
nfa_postfix_dump(expr, FAIL);
#endif
#ifdef DEBUG
nfa_regengine.expr = NULL;
#endif
goto out;
}
| 0
|
357,677
|
SQClass::SQClass(SQSharedState *ss,SQClass *base)
{
_base = base;
_typetag = 0;
_hook = NULL;
_udsize = 0;
_locked = false;
_constructoridx = -1;
if(_base) {
_constructoridx = _base->_constructoridx;
_udsize = _base->_udsize;
_defaultvalues.copy(base->_defaultvalues);
_methods.copy(base->_methods);
_COPY_VECTOR(_metamethods,base->_metamethods,MT_LAST);
__ObjAddRef(_base);
}
_members = base?base->_members->Clone() : SQTable::Create(ss,0);
__ObjAddRef(_members);
INIT_CHAIN();
ADD_TO_CHAIN(&_sharedstate->_gc_chain, this);
}
| 0
|
317,325
|
static inline u16 inode_mode_to_security_class(umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFSOCK:
return SECCLASS_SOCK_FILE;
case S_IFLNK:
return SECCLASS_LNK_FILE;
case S_IFREG:
return SECCLASS_FILE;
case S_IFBLK:
return SECCLASS_BLK_FILE;
case S_IFDIR:
return SECCLASS_DIR;
case S_IFCHR:
return SECCLASS_CHR_FILE;
case S_IFIFO:
return SECCLASS_FIFO_FILE;
}
return SECCLASS_FILE;
}
| 0
|
263,297
|
off_t _q_iosend(FILE *outfp, FILE *infp, off_t nbytes) {
if (nbytes == 0) return 0;
unsigned char buf[QIOSEND_CHUNK_SIZE];
off_t total = 0; // total size sent
while (total < nbytes) {
size_t chunksize; // this time sending size
if (nbytes - total <= sizeof(buf)) chunksize = nbytes - total;
else chunksize = sizeof(buf);
// read
size_t rsize = fread(buf, 1, chunksize, infp);
if (rsize == 0) break;
DEBUG("read %zu", rsize);
// write
size_t wsize = fwrite(buf, 1, rsize, outfp);
if (wsize == 0) break;
DEBUG("write %zu", wsize);
total += wsize;
if (rsize != wsize) {
DEBUG("size mismatch. read:%zu, write:%zu", rsize, wsize);
break;
}
}
if (total > 0) return total;
return -1;
}
| 0
|
393,497
|
static SQInteger closure_acall(HSQUIRRELVM v)
{
return _closure_acall(v,SQTrue);
}
| 0
|
384,298
|
gs_heap_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
{
gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
gs_malloc_block_t *bp;
gs_memory_type_ptr_t pstype;
struct_proc_finalize((*finalize));
if_debug3m('a', mem, "[a-]gs_free(%s) 0x%lx(%u)\n",
client_name_string(cname), (ulong) ptr,
(ptr == 0 ? 0 : ((gs_malloc_block_t *) ptr)[-1].size));
if (ptr == 0)
return;
pstype = ((gs_malloc_block_t *) ptr)[-1].type;
finalize = pstype->finalize;
if (finalize != 0) {
if_debug3m('u', mem, "[u]finalizing %s 0x%lx (%s)\n",
struct_type_name_string(pstype),
(ulong) ptr, client_name_string(cname));
(*finalize) (mem, ptr);
}
if (mmem->monitor)
gx_monitor_enter(mmem->monitor); /* Exclusive access */
/* Previously, we used to search through every allocated block to find
* the block we are freeing. This gives us safety in that an attempt to
* free an unallocated block will not go wrong. This does radically
* slow down frees though, so we replace it with this simpler code; we
* now assume that the block is valid, and hence avoid the search.
*/
#if 1
bp = &((gs_malloc_block_t *)ptr)[-1];
if (bp->prev)
bp->prev->next = bp->next;
if (bp->next)
bp->next->prev = bp->prev;
if (bp == mmem->allocated) {
mmem->allocated = bp->next;
mmem->allocated->prev = NULL;
}
mmem->used -= bp->size + sizeof(gs_malloc_block_t);
if (mmem->monitor)
gx_monitor_leave(mmem->monitor); /* Done with exclusive access */
gs_alloc_fill(bp, gs_alloc_fill_free,
bp->size + sizeof(gs_malloc_block_t));
free(bp);
#else
bp = mmem->allocated; /* If 'finalize' releases a memory,
this function could be called recursively and
change mmem->allocated. */
if (ptr == bp + 1) {
mmem->allocated = bp->next;
mmem->used -= bp->size + sizeof(gs_malloc_block_t);
if (mmem->allocated)
mmem->allocated->prev = 0;
if (mmem->monitor)
gx_monitor_leave(mmem->monitor); /* Done with exclusive access */
gs_alloc_fill(bp, gs_alloc_fill_free,
bp->size + sizeof(gs_malloc_block_t));
free(bp);
} else {
gs_malloc_block_t *np;
/*
* bp == 0 at this point is an error, but we'd rather have an
* error message than an invalid access.
*/
if (bp) {
for (; (np = bp->next) != 0; bp = np) {
if (ptr == np + 1) {
bp->next = np->next;
if (np->next)
np->next->prev = bp;
mmem->used -= np->size + sizeof(gs_malloc_block_t);
if (mmem->monitor)
gx_monitor_leave(mmem->monitor); /* Done with exclusive access */
gs_alloc_fill(np, gs_alloc_fill_free,
np->size + sizeof(gs_malloc_block_t));
free(np);
return;
}
}
}
if (mmem->monitor)
gx_monitor_leave(mmem->monitor); /* Done with exclusive access */
lprintf2("%s: free 0x%lx not found!\n",
client_name_string(cname), (ulong) ptr);
free((char *)((gs_malloc_block_t *) ptr - 1));
}
#endif
}
| 0
|
90,176
|
virtual void RemoveNetworkObserver(const std::string& service_path,
NetworkObserver* observer) {
DCHECK(observer);
DCHECK(service_path.size());
NetworkObserverMap::iterator map_iter =
network_observers_.find(service_path);
if (map_iter != network_observers_.end()) {
map_iter->second->RemoveObserver(observer);
if (!map_iter->second->size()) {
delete map_iter->second;
network_observers_.erase(map_iter++);
}
}
}
| 0
|
376,335
|
gpg_ctx_set_ostream (struct _GpgCtx *gpg,
CamelStream *ostream)
{
g_object_ref (ostream);
if (gpg->ostream)
g_object_unref (gpg->ostream);
gpg->ostream = ostream;
gpg->seen_eof1 = FALSE;
}
| 0
|
196,893
|
void DefaultCertValidator::updateDigestForSessionId(bssl::ScopedEVP_MD_CTX& md,
uint8_t hash_buffer[EVP_MAX_MD_SIZE],
unsigned hash_length) {
int rc;
// Hash all the settings that affect whether the server will allow/accept
// the client connection. This ensures that the client is always validated against
// the correct settings, even if session resumption across different listeners
// is enabled.
if (ca_cert_ != nullptr) {
rc = X509_digest(ca_cert_.get(), EVP_sha256(), hash_buffer, &hash_length);
RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(""));
RELEASE_ASSERT(hash_length == SHA256_DIGEST_LENGTH,
fmt::format("invalid SHA256 hash length {}", hash_length));
rc = EVP_DigestUpdate(md.get(), hash_buffer, hash_length);
RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(""));
}
for (const auto& hash : verify_certificate_hash_list_) {
rc = EVP_DigestUpdate(md.get(), hash.data(),
hash.size() *
sizeof(std::remove_reference<decltype(hash)>::type::value_type));
RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(""));
}
for (const auto& hash : verify_certificate_spki_list_) {
rc = EVP_DigestUpdate(md.get(), hash.data(),
hash.size() *
sizeof(std::remove_reference<decltype(hash)>::type::value_type));
RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or(""));
}
}
| 1
|
226,181
|
GF_Err strk_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_SubTrackBox *ptr = (GF_SubTrackBox *)s;
e = gf_isom_box_array_read(s, bs);
if (e) return e;
if (!ptr->info) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing SubTrackInformationBox\n"));
return GF_ISOM_INVALID_FILE;
}
return GF_OK;
| 0
|
231,061
|
BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
const void * const pvItemToQueue,
TickType_t xTicksToWait,
const BaseType_t xCopyPosition )
{
BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
TimeOut_t xTimeOut;
Queue_t * const pxQueue = xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
}
#endif
/*lint -save -e904 This function relaxes the coding standard somewhat to
* allow return statements within the function itself. This is done in the
* interest of execution time efficiency. */
for( ; ; )
{
taskENTER_CRITICAL();
{
/* Is there room on the queue now? The running task must be the
* highest priority task wanting to access the queue. If the head item
* in the queue is to be overwritten then it does not matter if the
* queue is full. */
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{
traceQUEUE_SEND( pxQueue );
#if ( configUSE_QUEUE_SETS == 1 )
{
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
if( pxQueue->pxQueueSetContainer != NULL )
{
if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
{
/* Do not notify the queue set as an existing item
* was overwritten in the queue so the number of items
* in the queue has not changed. */
mtCOVERAGE_TEST_MARKER();
}
else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
{
/* The queue is a member of a queue set, and posting
* to the queue set caused a higher priority task to
* unblock. A context switch is required. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
/* If there was a task waiting for data to arrive on the
* queue then unblock it now. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The unblocked task has a priority higher than
* our own so yield immediately. Yes it is ok to
* do this from within the critical section - the
* kernel takes care of that. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else if( xYieldRequired != pdFALSE )
{
/* This path is a special case that will only get
* executed if the task was holding multiple mutexes
* and the mutexes were given back in an order that is
* different to that in which they were taken. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
#else /* configUSE_QUEUE_SETS */
{
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
/* If there was a task waiting for data to arrive on the
* queue then unblock it now. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The unblocked task has a priority higher than
* our own so yield immediately. Yes it is ok to do
* this from within the critical section - the kernel
* takes care of that. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else if( xYieldRequired != pdFALSE )
{
/* This path is a special case that will only get
* executed if the task was holding multiple mutexes and
* the mutexes were given back in an order that is
* different to that in which they were taken. */
queueYIELD_IF_USING_PREEMPTION();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_QUEUE_SETS */
taskEXIT_CRITICAL();
return pdPASS;
}
else
{
if( xTicksToWait == ( TickType_t ) 0 )
{
/* The queue was full and no block time is specified (or
* the block time has expired) so leave now. */
taskEXIT_CRITICAL();
/* Return to the original privilege level before exiting
* the function. */
traceQUEUE_SEND_FAILED( pxQueue );
return errQUEUE_FULL;
}
else if( xEntryTimeSet == pdFALSE )
{
/* The queue was full and a block time was specified so
* configure the timeout structure. */
vTaskInternalSetTimeOutState( &xTimeOut );
xEntryTimeSet = pdTRUE;
}
else
{
/* Entry time was already set. */
mtCOVERAGE_TEST_MARKER();
}
}
}
taskEXIT_CRITICAL();
/* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */
vTaskSuspendAll();
prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
{
if( prvIsQueueFull( pxQueue ) != pdFALSE )
{
traceBLOCKING_ON_QUEUE_SEND( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
/* Unlocking the queue means queue events can effect the
* event list. It is possible that interrupts occurring now
* remove this task from the event list again - but as the
* scheduler is suspended the task will go onto the pending
* ready last instead of the actual ready list. */
prvUnlockQueue( pxQueue );
/* Resuming the scheduler will move tasks from the pending
* ready list into the ready list - so it is feasible that this
* task is already in a ready list before it yields - in which
* case the yield will not cause a context switch unless there
* is also a higher priority task in the pending ready list. */
if( xTaskResumeAll() == pdFALSE )
{
portYIELD_WITHIN_API();
}
}
else
{
/* Try again. */
prvUnlockQueue( pxQueue );
( void ) xTaskResumeAll();
}
}
else
{
/* The timeout has expired. */
prvUnlockQueue( pxQueue );
( void ) xTaskResumeAll();
traceQUEUE_SEND_FAILED( pxQueue );
return errQUEUE_FULL;
}
} /*lint -restore */
}
| 0
|
384,679
|
check_string (uint32_t option, char *buf, uint32_t len, uint32_t maxlen,
const char *name)
{
if (len > NBD_MAX_STRING || len > maxlen) {
nbdkit_error ("%s: %s too long", name_of_nbd_opt (option), name);
return -1;
}
if (strnlen (buf, len) != len) {
nbdkit_error ("%s: %s may not include NUL bytes",
name_of_nbd_opt (option), name);
return -1;
}
/* TODO: Check for valid UTF-8? */
return 0;
}
| 0
|
509,527
|
int ha_maria::multi_range_read_explain_info(uint mrr_mode, char *str,
size_t size)
{
return ds_mrr.dsmrr_explain_info(mrr_mode, str, size);
}
| 0
|
508,385
|
void close_thread_table(THD *thd, TABLE **table_ptr)
{
TABLE *table= *table_ptr;
DBUG_ENTER("close_thread_table");
DBUG_PRINT("tcache", ("table: '%s'.'%s' %p", table->s->db.str,
table->s->table_name.str, table));
DBUG_ASSERT(!table->file->keyread_enabled());
DBUG_ASSERT(!table->file || table->file->inited == handler::NONE);
/*
The metadata lock must be released after giving back
the table to the table cache.
*/
DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE,
table->s->db.str,
table->s->table_name.str,
MDL_SHARED));
table->mdl_ticket= NULL;
if (table->file)
{
table->file->update_global_table_stats();
table->file->update_global_index_stats();
}
mysql_mutex_lock(&thd->LOCK_thd_data);
*table_ptr=table->next;
mysql_mutex_unlock(&thd->LOCK_thd_data);
if (! table->needs_reopen())
{
/* Avoid having MERGE tables with attached children in table cache. */
table->file->extra(HA_EXTRA_DETACH_CHILDREN);
/* Free memory and reset for next loop. */
free_field_buffers_larger_than(table, MAX_TDC_BLOB_SIZE);
table->file->ha_reset();
}
/*
Do this *before* entering the TABLE_SHARE::tdc.LOCK_table_share
critical section.
*/
MYSQL_UNBIND_TABLE(table->file);
tc_release_table(table);
DBUG_VOID_RETURN;
}
| 0
|
483,491
|
u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
{
u64 size = md->num_pages << EFI_PAGE_SHIFT;
u64 end = md->phys_addr + size;
return end;
}
| 0
|
432,727
|
static void util_set_pen(wmfAPI * API, wmfDC * dc)
{
wmf_magick_t
*ddata = WMF_MAGICK_GetData(API);
wmfPen
*pen = 0;
double
pen_width,
pixel_width;
unsigned int
pen_style;
pen = WMF_DC_PEN(dc);
pen_width = (WMF_PEN_WIDTH(pen) + WMF_PEN_HEIGHT(pen)) / 2;
/* Pixel width is inverse of pixel scale */
pixel_width = (((double) 1 / (ddata->scale_x)) +
((double) 1 / (ddata->scale_y))) / 2;
/* Don't allow pen_width to be much less than pixel_width in order
to avoid dissapearing or spider-web lines */
pen_width = MagickMax(pen_width, pixel_width*0.8);
pen_style = (unsigned int) WMF_PEN_STYLE(pen);
/* Pen style specified? */
if (pen_style == PS_NULL)
{
draw_stroke_color_string(WmfDrawingWand,"none");
return;
}
DrawSetStrokeAntialias(WmfDrawingWand, MagickTrue );
DrawSetStrokeWidth(WmfDrawingWand, (unsigned long) MagickMax(0.0, pen_width));
{
LineCap
linecap;
switch ((unsigned int) WMF_PEN_ENDCAP(pen))
{
case PS_ENDCAP_SQUARE:
linecap = SquareCap;
break;
case PS_ENDCAP_ROUND:
linecap = RoundCap;
break;
case PS_ENDCAP_FLAT:
default:
linecap = ButtCap;
break;
}
DrawSetStrokeLineCap(WmfDrawingWand, linecap);
}
{
LineJoin
linejoin;
switch ((unsigned int) WMF_PEN_JOIN(pen))
{
case PS_JOIN_BEVEL:
linejoin = BevelJoin;
break;
case PS_JOIN_ROUND:
linejoin = RoundJoin;
break;
case PS_JOIN_MITER:
default:
linejoin = MiterJoin;
break;
}
DrawSetStrokeLineJoin(WmfDrawingWand,linejoin);
}
{
double
dasharray[7];
switch (pen_style)
{
case PS_DASH: /* ------- */
{
/* Pattern 18,7 */
dasharray[0] = pixel_width * 18;
dasharray[1] = pixel_width * 7;
dasharray[2] = 0;
DrawSetStrokeAntialias(WmfDrawingWand,MagickFalse);
(void) DrawSetStrokeDashArray(WmfDrawingWand,2,dasharray);
break;
}
case PS_ALTERNATE:
case PS_DOT: /* ....... */
{
/* Pattern 3,3 */
dasharray[0] = pixel_width * 3;
dasharray[1] = pixel_width * 3;
dasharray[2] = 0;
DrawSetStrokeAntialias(WmfDrawingWand,MagickFalse);
(void) DrawSetStrokeDashArray(WmfDrawingWand,2,dasharray);
break;
}
case PS_DASHDOT: /* _._._._ */
{
/* Pattern 9,6,3,6 */
dasharray[0] = pixel_width * 9;
dasharray[1] = pixel_width * 6;
dasharray[2] = pixel_width * 3;
dasharray[3] = pixel_width * 6;
dasharray[4] = 0;
DrawSetStrokeAntialias(WmfDrawingWand,MagickFalse);
(void) DrawSetStrokeDashArray(WmfDrawingWand,4,dasharray);
break;
}
case PS_DASHDOTDOT: /* _.._.._ */
{
/* Pattern 9,3,3,3,3,3 */
dasharray[0] = pixel_width * 9;
dasharray[1] = pixel_width * 3;
dasharray[2] = pixel_width * 3;
dasharray[3] = pixel_width * 3;
dasharray[4] = pixel_width * 3;
dasharray[5] = pixel_width * 3;
dasharray[6] = 0;
DrawSetStrokeAntialias(WmfDrawingWand,MagickFalse);
(void) DrawSetStrokeDashArray(WmfDrawingWand,6,dasharray);
break;
}
case PS_INSIDEFRAME: /* There is nothing to do in this case... */
case PS_SOLID:
default:
{
(void) DrawSetStrokeDashArray(WmfDrawingWand,0,(double *) NULL);
break;
}
}
}
draw_stroke_color_rgb(API,WMF_PEN_COLOR(pen));
}
| 0
|
275,950
|
static void XYcZ_addC(uECC_word_t * X1,
uECC_word_t * Y1,
uECC_word_t * X2,
uECC_word_t * Y2,
uECC_Curve curve) {
/* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
uECC_word_t t5[uECC_MAX_WORDS];
uECC_word_t t6[uECC_MAX_WORDS];
uECC_word_t t7[uECC_MAX_WORDS];
wordcount_t num_words = curve->num_words;
uECC_vli_modSub(t5, X2, X1, curve->p, num_words); /* t5 = x2 - x1 */
uECC_vli_modSquare_fast(t5, t5, curve); /* t5 = (x2 - x1)^2 = A */
uECC_vli_modMult_fast(X1, X1, t5, curve); /* t1 = x1*A = B */
uECC_vli_modMult_fast(X2, X2, t5, curve); /* t3 = x2*A = C */
uECC_vli_modAdd(t5, Y2, Y1, curve->p, num_words); /* t5 = y2 + y1 */
uECC_vli_modSub(Y2, Y2, Y1, curve->p, num_words); /* t4 = y2 - y1 */
uECC_vli_modSub(t6, X2, X1, curve->p, num_words); /* t6 = C - B */
uECC_vli_modMult_fast(Y1, Y1, t6, curve); /* t2 = y1 * (C - B) = E */
uECC_vli_modAdd(t6, X1, X2, curve->p, num_words); /* t6 = B + C */
uECC_vli_modSquare_fast(X2, Y2, curve); /* t3 = (y2 - y1)^2 = D */
uECC_vli_modSub(X2, X2, t6, curve->p, num_words); /* t3 = D - (B + C) = x3 */
uECC_vli_modSub(t7, X1, X2, curve->p, num_words); /* t7 = B - x3 */
uECC_vli_modMult_fast(Y2, Y2, t7, curve); /* t4 = (y2 - y1)*(B - x3) */
uECC_vli_modSub(Y2, Y2, Y1, curve->p, num_words); /* t4 = (y2 - y1)*(B - x3) - E = y3 */
uECC_vli_modSquare_fast(t7, t5, curve); /* t7 = (y2 + y1)^2 = F */
uECC_vli_modSub(t7, t7, t6, curve->p, num_words); /* t7 = F - (B + C) = x3' */
uECC_vli_modSub(t6, t7, X1, curve->p, num_words); /* t6 = x3' - B */
uECC_vli_modMult_fast(t6, t6, t5, curve); /* t6 = (y2+y1)*(x3' - B) */
uECC_vli_modSub(Y1, t6, Y1, curve->p, num_words); /* t2 = (y2+y1)*(x3' - B) - E = y3' */
uECC_vli_set(X1, t7, num_words);
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.