idx
int64 | func
string | target
int64 |
|---|---|---|
126,386
|
Status UpdateFunction(const NodeDef* function_node) {
NameAttrList function;
TF_RETURN_IF_ERROR(NameAndAttrsFromFunctionCall(*function_node, &function));
auto it = fun_to_grappler_function_item_.find(function.name());
if (it == fun_to_grappler_function_item_.end()) {
return errors::InvalidArgument(
function.name(),
" was not previously added to SymbolicShapeRefiner.");
}
const absl::optional<GrapplerFunctionItem>& maybe_grappler_function_item =
it->second;
if (!maybe_grappler_function_item.has_value()) {
VLOG(3) << "Skip failed to instantiate function call: function_name="
<< function.name();
auto* ctx = GetNodeContext(function_node);
auto* ic = ctx->inference_context.get();
for (int i = 0; i < ic->num_outputs(); ++i) {
TF_RETURN_IF_ERROR(SetUnknownShape(function_node, i));
}
return Status::OK();
}
// Copy (not reference) so that changes we make here (e.g., replacing
// _Arg with Const and _Retval with Identity) don't affect one in
// fun_to_grappler_function_item_.
GrapplerFunctionItem grappler_function_item = *maybe_grappler_function_item;
MutableGraphView gv(&grappler_function_item.graph);
// Forward shapes from function input nodes to argument nodes.
for (int i = 0, end = grappler_function_item.inputs().size(); i < end;
++i) {
auto& fun_input = grappler_function_item.input(i);
NodeDef* fun_node = gv.GetNode(fun_input.node_name);
const TensorId input_tensor = ParseTensorName(function_node->input(i));
if (IsControlInput(input_tensor)) {
return errors::FailedPrecondition(
"Function inputs should not contain control nodes.");
}
const NodeDef* input_node = graph_.GetNode(input_tensor.node());
if (input_node == nullptr) {
return errors::FailedPrecondition(input_tensor.node(),
" was not found in the graph.");
}
InferenceContext* input_ic = GetContext(input_node);
if (input_ic == nullptr) {
return errors::FailedPrecondition(
"Inference context has not been created for ", input_tensor.node());
}
int output_port_num = input_tensor.index();
AttrValue attr_output_shape;
TensorShapeProto proto;
const auto handle = input_ic->output(output_port_num);
input_ic->ShapeHandleToProto(handle, &proto);
// There may be dim.size < -1 in SymbolicShapeRefiner. Change those to -1.
NormalizeShapeForOutput(&proto);
// _Arg op's output shape uses _output_shapes attr.
AttrValue output_attr;
output_attr.mutable_list()->add_shape()->Swap(&proto);
(*fun_node->mutable_attr())["_output_shapes"] = output_attr;
// If dtype is DT_RESOURCE, ops that read _Arg op use _handle_dtypes and
// _handle_shapes attr for its shapes and dtypes.
if (fun_input.data_type == DT_RESOURCE) {
auto* shapes_and_types =
input_ic->output_handle_shapes_and_types(output_port_num);
if (shapes_and_types != nullptr && !shapes_and_types->empty()) {
AttrValue dtype_attr;
AttrValue shape_attr;
for (const auto& shape_and_type : *shapes_and_types) {
const auto& dtype = shape_and_type.dtype;
const auto& shape_handle = shape_and_type.shape;
dtype_attr.mutable_list()->add_type(dtype);
input_ic->ShapeHandleToProto(
shape_handle, shape_attr.mutable_list()->add_shape());
}
(*fun_node->mutable_attr())["_handle_dtypes"] = dtype_attr;
(*fun_node->mutable_attr())["_handle_shapes"] = shape_attr;
} else {
// Note that we do not return error here, even if the input node does
// not have shapes_and_types. Within the function, we cannot infer the
// output shape of the DT_RESOURCE input; hence, potentially unknown
// shapes/dims in the function output shapes.
VLOG(2)
<< "A function node (" << function_node->name()
<< ") has input with DT_RESOURCE, but the input node does not "
<< "have shapes_and_types information: \n"
<< "function_node: " << function_node->ShortDebugString() << "\n"
<< "function input: " << i
<< ", input node's output: " << output_port_num << "\n"
<< "input node: " << input_node->ShortDebugString();
}
}
}
// ReplaceInputWithConst() may break GraphView's internal node mapping
// structure; hence, we separately build node name to NodeDef* map, for the
// output nodes (before GraphView becomes invalid). Note that we use string,
// not string_view.
absl::flat_hash_map<std::string, NodeDef*> output_nodes;
for (const auto& output_arg : grappler_function_item.outputs()) {
output_nodes[output_arg.node_name] = gv.GetNode(output_arg.node_name);
}
// Replace input nodes with Consts, if values are known. Note that
// we don't check exceptions here as it's done in the above loop.
auto* ctx = GetNodeContext(function_node);
auto* ic = ctx->inference_context.get();
for (int i = grappler_function_item.inputs().size() - 1; i >= 0; --i) {
const string& input = function_node->input(i);
const string node_name = NodeName(input);
const NodeDef* input_node = graph_.GetNode(node_name);
if (IsConstant(*input_node)) {
TF_CHECK_OK(
ReplaceInputWithConst(*input_node, i, &grappler_function_item));
} else if (static_cast<int>(ctx->input_tensor_protos.size()) > i &&
ctx->input_tensor_protos[i] != nullptr) {
NodeDef const_input_node = MakeConstNodeDefFromTensorProto(
ic, *ctx->input_tensor_protos[i], ctx->input_types[i]);
TF_CHECK_OK(ReplaceInputWithConst(const_input_node, i,
&grappler_function_item));
} else if (static_cast<int>(ic->input_tensors_as_shapes().size()) > i &&
IsShapeFullyDefinedIntegerVectorOrScalar(
ic, ic->input(i), ic->input_tensors_as_shapes()[i],
ctx->input_types[i])) {
// We have fully defined input_tensors_as_shapes for this input; use it
// as a const input to the function node.
NodeDef const_input_node = MakeConstNodeDefFromShape(
ic, ic->input(i), ic->input_tensors_as_shapes()[i],
ctx->input_types[i]);
TF_CHECK_OK(ReplaceInputWithConst(const_input_node, i,
&grappler_function_item));
}
}
// node_name to NodeDef* map in GraphView gv can be broken due to
// ReplaceInputWithConst(). gv should not be used after this.
// Replace output _Retval nodes with Identity nodes. _Retval is a system op
// without outputs and registered shape function.
for (const auto& output_arg : grappler_function_item.outputs()) {
NodeDef* output_node = output_nodes[output_arg.node_name];
DCHECK_EQ(output_node->op(), "_Retval");
output_node->set_op("Identity");
output_node->mutable_attr()->erase("index");
}
// Perform inference on function body.
GraphProperties gp(grappler_function_item);
TF_RETURN_IF_ERROR(gp.InferStatically(
/*assume_valid_feeds=*/true,
/*aggressive_shape_inference=*/aggressive_shape_inference_,
/*include_tensor_values=*/true));
// Add return nodes for output shapes.
int output = 0;
ctx->output_tensors_as_shapes.resize(grappler_function_item.output_size());
ctx->output_tensor_protos.resize(grappler_function_item.output_size(),
nullptr);
for (auto const& out_arg : grappler_function_item.outputs()) {
// It is guaranteed that output_tensors does not contain any control
// inputs, so port_id >= 0.
TensorId out_tensor = ParseTensorName(out_arg.node_name);
if (output_nodes.count(out_tensor.node()) <= 0) {
return errors::FailedPrecondition(
"Unable to find return function_node ", out_tensor.node(), " for ",
function_node->name());
}
const NodeDef* retnode = output_nodes[out_tensor.node()];
auto output_properties = gp.GetOutputProperties(retnode->name());
int output_properties_size = output_properties.size();
if (out_tensor.index() >= output_properties_size) {
return errors::InvalidArgument(
out_tensor.ToString(), " has invalid position ", out_tensor.index(),
" (output_properties.size() = ", output_properties.size(), ").");
}
auto& outprop = output_properties[out_tensor.index()];
TensorShapeProto shape = outprop.shape();
NormalizeShapeForOutput(&shape);
ShapeHandle out;
TF_RETURN_IF_ERROR(ic->MakeShapeFromShapeProto(shape, &out));
ic->set_output(output, out);
if (outprop.has_value()) {
// Forward tensor value to output_tensors_as_shape.
MaybeTensorProtoToShape(ic, outprop.value(),
&ctx->output_tensors_as_shapes[output]);
const_tensors_to_propagate_.push_back(outprop.value());
ctx->output_tensor_protos[output] = &const_tensors_to_propagate_.back();
}
output++;
}
return Status::OK();
}
| 0
|
41,492
|
loff_t mem_lseek(struct file *file, loff_t offset, int orig)
{
switch (orig) {
case 0:
file->f_pos = offset;
break;
case 1:
file->f_pos += offset;
break;
default:
return -EINVAL;
}
force_successful_syscall_return();
return file->f_pos;
}
| 0
|
100,999
|
int cil_resolve_selinuxuser(struct cil_tree_node *current, void *extra_args)
{
struct cil_selinuxuser *selinuxuser = current->data;
struct cil_symtab_datum *user_datum = NULL;
struct cil_symtab_datum *lvlrange_datum = NULL;
struct cil_tree_node *user_node = NULL;
int rc = SEPOL_ERR;
rc = cil_resolve_name(current, selinuxuser->user_str, CIL_SYM_USERS, extra_args, &user_datum);
if (rc != SEPOL_OK) {
goto exit;
}
user_node = NODE(user_datum);
if (user_node->flavor != CIL_USER) {
cil_log(CIL_ERR, "Selinuxuser must be a user: %s\n", user_datum->fqn);
rc = SEPOL_ERR;
goto exit;
}
selinuxuser->user = (struct cil_user*)user_datum;
if (selinuxuser->range_str != NULL) {
rc = cil_resolve_name(current, selinuxuser->range_str, CIL_SYM_LEVELRANGES, extra_args, &lvlrange_datum);
if (rc != SEPOL_OK) {
goto exit;
}
selinuxuser->range = (struct cil_levelrange*)lvlrange_datum;
/* This could still be an anonymous levelrange even if range_str is set, if range_str is a param_str*/
if (selinuxuser->range->datum.name == NULL) {
rc = cil_resolve_levelrange(current, selinuxuser->range, extra_args);
if (rc != SEPOL_OK) {
goto exit;
}
}
} else if (selinuxuser->range != NULL) {
rc = cil_resolve_levelrange(current, selinuxuser->range, extra_args);
if (rc != SEPOL_OK) {
goto exit;
}
}
rc = SEPOL_OK;
exit:
return rc;
}
| 0
|
242,907
|
static void specialuse_flags(const mbentry_t *mbentry, struct buf *attrib,
int isxlist)
{
if (!mbentry) return;
char *inbox = mboxname_user_mbox(imapd_userid, NULL);
int inboxlen = strlen(inbox);
/* doesn't match inbox, not xlistable */
if (strncmp(mbentry->name, inbox, inboxlen)) {
free(inbox);
return;
}
/* inbox - only print if command is XLIST */
if (mbentry->name[inboxlen] == '\0') {
if (isxlist) buf_init_ro_cstr(attrib, "\\Inbox");
}
/* subdir */
else if (mbentry->name[inboxlen] == '.') {
/* check if there's a special use flag set */
annotatemore_lookup(mbentry->name, "/specialuse", imapd_userid, attrib);
}
free(inbox);
/* otherwise it's actually another user who matches for
* the substr. Ok to just print nothing */
}
| 0
|
84,461
|
static inline void assert_list_leaf_cfs_rq(struct rq *rq)
{
}
| 0
|
293,777
|
static Jsi_RC Jsi_DoneWebSocket(Jsi_Interp *interp)
{
Jsi_UserObjUnregister(interp, &websockobject);
Jsi_PkgProvide(interp, "WebSocket", -1, NULL);
return JSI_OK;
}
| 0
|
130,661
|
void rsprintf(const char *format, ...) {
va_list argptr;
char str[10000];
va_start(argptr, format);
vsprintf(str, (char *) format, argptr);
va_end(argptr);
if (strlen_retbuf + (int) strlen(str) + 1 >= return_buffer_size) {
return_buffer = xrealloc(return_buffer, return_buffer_size + 100000);
memset(return_buffer + return_buffer_size, 0, 100000);
return_buffer_size += 100000;
}
strcpy(return_buffer + strlen_retbuf, str);
strlen_retbuf += strlen(str);
}
| 0
|
15,338
|
static int astream_end_of_part ( struct attachment_istream * astream , const char * * error_r ) {
struct attachment_istream_part * part = & astream -> part ;
size_t old_size ;
int ret = 0 ;
switch ( part -> state ) {
case MAIL_ATTACHMENT_STATE_NO : break ;
case MAIL_ATTACHMENT_STATE_MAYBE : if ( part -> part_buf != NULL ) {
stream_add_data ( astream , part -> part_buf -> data , part -> part_buf -> used ) ;
ret = part -> part_buf -> used > 0 ? 1 : 0 ;
}
break ;
case MAIL_ATTACHMENT_STATE_YES : old_size = astream -> istream . pos - astream -> istream . skip ;
if ( astream_part_finish ( astream , error_r ) < 0 ) ret = - 1 ;
else {
ret = astream -> istream . pos - astream -> istream . skip - old_size ;
}
break ;
}
part -> state = MAIL_ATTACHMENT_STATE_NO ;
astream_part_reset ( astream ) ;
return ret ;
}
| 0
|
475,667
|
static int nft_setelem_catchall_insert(const struct net *net,
struct nft_set *set,
const struct nft_set_elem *elem,
struct nft_set_ext **pext)
{
struct nft_set_elem_catchall *catchall;
u8 genmask = nft_genmask_next(net);
struct nft_set_ext *ext;
list_for_each_entry(catchall, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
if (nft_set_elem_active(ext, genmask)) {
*pext = ext;
return -EEXIST;
}
}
catchall = kmalloc(sizeof(*catchall), GFP_KERNEL);
if (!catchall)
return -ENOMEM;
catchall->elem = elem->priv;
list_add_tail_rcu(&catchall->list, &set->catchall_list);
return 0;
}
| 0
|
102,572
|
static int nfs4_xdr_enc_renew(struct rpc_rqst *req, __be32 *p, struct nfs_client *clp)
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
.nops = 1,
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, &hdr);
return encode_renew(&xdr, clp);
}
| 0
|
100,682
|
const char *mz_version(void) { return MZ_VERSION; }
| 0
|
97,452
|
ldns_rr_set_type(ldns_rr *rr, ldns_rr_type rr_type)
{
rr->_rr_type = rr_type;
}
| 0
|
187,656
|
JBIG2Bitmap *getBitmap(Guint idx) { return (idx < size) ? bitmaps[idx] : NULL; }
| 0
|
53,109
|
static int shash_compat_final(struct hash_desc *hdesc, u8 *out)
{
struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
return crypto_shash_final(*descp, out);
}
| 0
|
364,729
|
readFinished(HTTPConnectionPtr client)
{
HTTPRequestPtr request = client->request;
if(client->reqlen - client->reqbegin >= client->bodylen) {
AtomPtr data;
data = internAtomN(client->reqbuf + client->reqbegin,
client->reqlen - client->reqbegin);
client->reqbegin = 0;
client->reqlen = 0;
if(data == NULL) {
do_log(L_ERROR, "Couldn't allocate data.\n");
httpClientError(request, 500,
internAtom("Couldn't allocate data"));
return 1;
}
httpSpecialDoSideFinish(data, request);
return 1;
}
return 0;
}
| 0
|
257,820
|
static cmsBool WriteMatrix ( struct _cms_typehandler_struct * self , cmsIOHANDLER * io , cmsStage * mpe ) {
_cmsStageMatrixData * m = ( _cmsStageMatrixData * ) mpe -> Data ;
if ( ! _cmsWrite15Fixed16Number ( io , m -> Double [ 0 ] ) ) return FALSE ;
if ( ! _cmsWrite15Fixed16Number ( io , m -> Double [ 1 ] ) ) return FALSE ;
if ( ! _cmsWrite15Fixed16Number ( io , m -> Double [ 2 ] ) ) return FALSE ;
if ( ! _cmsWrite15Fixed16Number ( io , m -> Double [ 3 ] ) ) return FALSE ;
if ( ! _cmsWrite15Fixed16Number ( io , m -> Double [ 4 ] ) ) return FALSE ;
if ( ! _cmsWrite15Fixed16Number ( io , m -> Double [ 5 ] ) ) return FALSE ;
if ( ! _cmsWrite15Fixed16Number ( io , m -> Double [ 6 ] ) ) return FALSE ;
if ( ! _cmsWrite15Fixed16Number ( io , m -> Double [ 7 ] ) ) return FALSE ;
if ( ! _cmsWrite15Fixed16Number ( io , m -> Double [ 8 ] ) ) return FALSE ;
if ( m -> Offset != NULL ) {
if ( ! _cmsWrite15Fixed16Number ( io , m -> Offset [ 0 ] ) ) return FALSE ;
if ( ! _cmsWrite15Fixed16Number ( io , m -> Offset [ 1 ] ) ) return FALSE ;
if ( ! _cmsWrite15Fixed16Number ( io , m -> Offset [ 2 ] ) ) return FALSE ;
}
else {
if ( ! _cmsWrite15Fixed16Number ( io , 0 ) ) return FALSE ;
if ( ! _cmsWrite15Fixed16Number ( io , 0 ) ) return FALSE ;
if ( ! _cmsWrite15Fixed16Number ( io , 0 ) ) return FALSE ;
}
return TRUE ;
cmsUNUSED_PARAMETER ( self ) ;
}
| 0
|
431,454
|
bool run(OperationContext* opCtx,
const string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
UserName userName;
Status status = auth::parseAndValidateDropUserCommand(cmdObj, dbname, &userName);
uassertStatusOK(status);
ServiceContext* serviceContext = opCtx->getClient()->getServiceContext();
stdx::lock_guard<stdx::mutex> lk(getAuthzDataMutex(serviceContext));
AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
status = requireWritableAuthSchema28SCRAM(opCtx, authzManager);
uassertStatusOK(status);
audit::logDropUser(Client::getCurrent(), userName);
long long nMatched;
status = removePrivilegeDocuments(opCtx,
BSON(AuthorizationManager::USER_NAME_FIELD_NAME
<< userName.getUser()
<< AuthorizationManager::USER_DB_FIELD_NAME
<< userName.getDB()),
&nMatched);
// Must invalidate even on bad status - what if the write succeeded but the GLE failed?
authzManager->invalidateUserByName(userName);
uassertStatusOK(status);
if (nMatched == 0) {
uasserted(ErrorCodes::UserNotFound,
str::stream() << "User '" << userName.getFullName() << "' not found");
}
return true;
}
| 0
|
378,659
|
static uint64_t vfswrap_disk_free(vfs_handle_struct *handle, const char *path, bool small_query, uint64_t *bsize,
uint64_t *dfree, uint64_t *dsize)
{
uint64_t result;
result = sys_disk_free(handle->conn, path, small_query, bsize, dfree, dsize);
return result;
}
| 0
|
236,074
|
void DownloadManagerImpl::DownloadInterrupted(
download::DownloadItemImpl* download) {
WebContents* web_contents = DownloadItemUtils::GetWebContents(download);
if (!web_contents) {
download::RecordDownloadCountWithSource(
download::INTERRUPTED_WITHOUT_WEBCONTENTS, download->download_source());
}
}
| 0
|
493,640
|
int fuse_fs_ioctl(struct fuse_fs *fs, const char *path, int cmd, void *arg,
struct fuse_file_info *fi, unsigned int flags, void *data)
{
fuse_get_context()->private_data = fs->user_data;
if (fs->op.ioctl) {
/*
if (fs->debug)
fprintf(stderr, "ioctl[%llu] 0x%x flags: 0x%x\n",
(unsigned long long) fi->fh, cmd, flags);
*/
return fs->op.ioctl(path, cmd, arg, fi, flags, data);
} else
return -ENOSYS;
}
| 0
|
102,249
|
_PyBuiltin_Init(void)
{
PyObject *mod, *dict, *debug;
const _PyCoreConfig *config = &_PyInterpreterState_GET_UNSAFE()->core_config;
if (PyType_Ready(&PyFilter_Type) < 0 ||
PyType_Ready(&PyMap_Type) < 0 ||
PyType_Ready(&PyZip_Type) < 0)
return NULL;
mod = _PyModule_CreateInitialized(&builtinsmodule, PYTHON_API_VERSION);
if (mod == NULL)
return NULL;
dict = PyModule_GetDict(mod);
#ifdef Py_TRACE_REFS
/* "builtins" exposes a number of statically allocated objects
* that, before this code was added in 2.3, never showed up in
* the list of "all objects" maintained by Py_TRACE_REFS. As a
* result, programs leaking references to None and False (etc)
* couldn't be diagnosed by examining sys.getobjects(0).
*/
#define ADD_TO_ALL(OBJECT) _Py_AddToAllObjects((PyObject *)(OBJECT), 0)
#else
#define ADD_TO_ALL(OBJECT) (void)0
#endif
#define SETBUILTIN(NAME, OBJECT) \
if (PyDict_SetItemString(dict, NAME, (PyObject *)OBJECT) < 0) \
return NULL; \
ADD_TO_ALL(OBJECT)
SETBUILTIN("None", Py_None);
SETBUILTIN("Ellipsis", Py_Ellipsis);
SETBUILTIN("NotImplemented", Py_NotImplemented);
SETBUILTIN("False", Py_False);
SETBUILTIN("True", Py_True);
SETBUILTIN("bool", &PyBool_Type);
SETBUILTIN("memoryview", &PyMemoryView_Type);
SETBUILTIN("bytearray", &PyByteArray_Type);
SETBUILTIN("bytes", &PyBytes_Type);
SETBUILTIN("classmethod", &PyClassMethod_Type);
SETBUILTIN("complex", &PyComplex_Type);
SETBUILTIN("dict", &PyDict_Type);
SETBUILTIN("enumerate", &PyEnum_Type);
SETBUILTIN("filter", &PyFilter_Type);
SETBUILTIN("float", &PyFloat_Type);
SETBUILTIN("frozenset", &PyFrozenSet_Type);
SETBUILTIN("property", &PyProperty_Type);
SETBUILTIN("int", &PyLong_Type);
SETBUILTIN("list", &PyList_Type);
SETBUILTIN("map", &PyMap_Type);
SETBUILTIN("object", &PyBaseObject_Type);
SETBUILTIN("range", &PyRange_Type);
SETBUILTIN("reversed", &PyReversed_Type);
SETBUILTIN("set", &PySet_Type);
SETBUILTIN("slice", &PySlice_Type);
SETBUILTIN("staticmethod", &PyStaticMethod_Type);
SETBUILTIN("str", &PyUnicode_Type);
SETBUILTIN("super", &PySuper_Type);
SETBUILTIN("tuple", &PyTuple_Type);
SETBUILTIN("type", &PyType_Type);
SETBUILTIN("zip", &PyZip_Type);
debug = PyBool_FromLong(config->optimization_level == 0);
if (PyDict_SetItemString(dict, "__debug__", debug) < 0) {
Py_DECREF(debug);
return NULL;
}
Py_DECREF(debug);
return mod;
#undef ADD_TO_ALL
#undef SETBUILTIN
}
| 0
|
335,858
|
static av_cold int libwebp_anim_encode_init(AVCodecContext *avctx)
{
int ret = ff_libwebp_encode_init_common(avctx);
if (!ret) {
LibWebPAnimContext *s = avctx->priv_data;
WebPAnimEncoderOptions enc_options;
WebPAnimEncoderOptionsInit(&enc_options);
// TODO(urvang): Expose some options on command-line perhaps.
s->enc = WebPAnimEncoderNew(avctx->width, avctx->height, &enc_options);
if (!s->enc)
return AVERROR(EINVAL);
s->prev_frame_pts = -1;
s->done = 0;
}
return ret;
}
| 0
|
127,822
|
int _yr_scan_compare(
uint8_t* data,
size_t data_size,
uint8_t* string,
size_t string_length)
{
uint8_t* s1 = data;
uint8_t* s2 = string;
size_t i = 0;
if (data_size < string_length)
return 0;
while (i < string_length && *s1++ == *s2++)
i++;
return (int) ((i == string_length) ? i : 0);
}
| 0
|
183,060
|
static void MeasureAsVoidMethodMethod(const v8::FunctionCallbackInfo<v8::Value>& info) {
TestObject* impl = V8TestObject::ToImpl(info.Holder());
impl->measureAsVoidMethod();
}
| 0
|
33,896
|
static int proc_sys_revalidate(struct dentry *dentry, unsigned int flags)
{
if (flags & LOOKUP_RCU)
return -ECHILD;
return !PROC_I(d_inode(dentry))->sysctl->unregistering;
}
| 0
|
463,920
|
static CURLcode bearssl_connect_common(struct Curl_easy *data,
struct connectdata *conn,
int sockindex,
bool nonblocking,
bool *done)
{
CURLcode ret;
struct ssl_connect_data *connssl = &conn->ssl[sockindex];
curl_socket_t sockfd = conn->sock[sockindex];
timediff_t timeout_ms;
int what;
/* check if the connection has already been established */
if(ssl_connection_complete == connssl->state) {
*done = TRUE;
return CURLE_OK;
}
if(ssl_connect_1 == connssl->connecting_state) {
ret = bearssl_connect_step1(data, conn, sockindex);
if(ret)
return ret;
}
while(ssl_connect_2 == connssl->connecting_state ||
ssl_connect_2_reading == connssl->connecting_state ||
ssl_connect_2_writing == connssl->connecting_state) {
/* check allowed time left */
timeout_ms = Curl_timeleft(data, NULL, TRUE);
if(timeout_ms < 0) {
/* no need to continue if time already is up */
failf(data, "SSL connection timeout");
return CURLE_OPERATION_TIMEDOUT;
}
/* if ssl is expecting something, check if it's available. */
if(ssl_connect_2_reading == connssl->connecting_state ||
ssl_connect_2_writing == connssl->connecting_state) {
curl_socket_t writefd = ssl_connect_2_writing ==
connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
curl_socket_t readfd = ssl_connect_2_reading ==
connssl->connecting_state?sockfd:CURL_SOCKET_BAD;
what = Curl_socket_check(readfd, CURL_SOCKET_BAD, writefd,
nonblocking?0:timeout_ms);
if(what < 0) {
/* fatal error */
failf(data, "select/poll on SSL socket, errno: %d", SOCKERRNO);
return CURLE_SSL_CONNECT_ERROR;
}
else if(0 == what) {
if(nonblocking) {
*done = FALSE;
return CURLE_OK;
}
else {
/* timeout */
failf(data, "SSL connection timeout");
return CURLE_OPERATION_TIMEDOUT;
}
}
/* socket is readable or writable */
}
/* Run transaction, and return to the caller if it failed or if this
* connection is done nonblocking and this loop would execute again. This
* permits the owner of a multi handle to abort a connection attempt
* before step2 has completed while ensuring that a client using select()
* or epoll() will always have a valid fdset to wait on.
*/
ret = bearssl_connect_step2(data, conn, sockindex);
if(ret || (nonblocking &&
(ssl_connect_2 == connssl->connecting_state ||
ssl_connect_2_reading == connssl->connecting_state ||
ssl_connect_2_writing == connssl->connecting_state)))
return ret;
}
if(ssl_connect_3 == connssl->connecting_state) {
ret = bearssl_connect_step3(data, conn, sockindex);
if(ret)
return ret;
}
if(ssl_connect_done == connssl->connecting_state) {
connssl->state = ssl_connection_complete;
conn->recv[sockindex] = bearssl_recv;
conn->send[sockindex] = bearssl_send;
*done = TRUE;
}
else
*done = FALSE;
/* Reset our connect state machine */
connssl->connecting_state = ssl_connect_1;
return CURLE_OK;
}
| 0
|
24,236
|
static int replace ( DYNAMIC_STRING * ds_str , const char * search_str , ulong search_len , const char * replace_str , ulong replace_len ) {
DYNAMIC_STRING ds_tmp ;
const char * start = strstr ( ds_str -> str , search_str ) ;
if ( ! start ) return 1 ;
init_dynamic_string_checked ( & ds_tmp , "" , ds_str -> length + replace_len , 256 ) ;
dynstr_append_mem_checked ( & ds_tmp , ds_str -> str , start - ds_str -> str ) ;
dynstr_append_mem_checked ( & ds_tmp , replace_str , replace_len ) ;
dynstr_append_checked ( & ds_tmp , start + search_len ) ;
dynstr_set_checked ( ds_str , ds_tmp . str ) ;
dynstr_free ( & ds_tmp ) ;
return 0 ;
}
| 0
|
487,929
|
static int64_t wav_get_length(pcm_reader_t *reader)
{
return ((wav_reader_t *)reader)->length;
}
| 0
|
223,986
|
svc_exit_thread(struct svc_rqst *rqstp)
{
struct svc_serv *serv = rqstp->rq_server;
struct svc_pool *pool = rqstp->rq_pool;
spin_lock_bh(&pool->sp_lock);
pool->sp_nrthreads--;
if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags))
list_del_rcu(&rqstp->rq_all);
spin_unlock_bh(&pool->sp_lock);
svc_rqst_free(rqstp);
/* Release the server */
if (serv)
svc_destroy(serv);
}
| 0
|
381,534
|
typedef void (*TransactEntryFunc_t) (HttpTransact::State* s);
inline bool
is_response_body_precluded(HTTPStatus status_code, int method)
{
////////////////////////////////////////////////////////
// the spec says about message body the following: //
// All responses to the HEAD request method MUST NOT //
// include a message-body, even though the presence //
// of entity-header fields might lead one to believe //
// they do. All 1xx (informational), 204 (no content),//
// and 304 (not modified) responses MUST NOT include //
// a message-body. //
////////////////////////////////////////////////////////
if (((status_code != HTTP_STATUS_OK) &&
((status_code == HTTP_STATUS_NOT_MODIFIED) ||
((status_code<HTTP_STATUS_OK) && (status_code>= HTTP_STATUS_CONTINUE)) ||
(status_code == 204))) || (method == HTTP_WKSIDX_HEAD)) {
return true;
} else {
| 0
|
32,161
|
static inline bool d_is_reg(const struct dentry *dentry)
{
return __d_entry_type(dentry) == DCACHE_REGULAR_TYPE;
}
| 0
|
295,087
|
Index(expr_ty value, PyArena *arena)
{
slice_ty p;
if (!value) {
PyErr_SetString(PyExc_ValueError,
"field value is required for Index");
return NULL;
}
p = (slice_ty)PyArena_Malloc(arena, sizeof(*p));
if (!p)
return NULL;
p->kind = Index_kind;
p->v.Index.value = value;
return p;
}
| 0
|
237,803
|
void GLES2DecoderImpl::ApplyDirtyState() {
if (state_dirty_) {
glColorMask(
mask_red_, mask_green_, mask_blue_,
mask_alpha_ && BoundFramebufferHasColorAttachmentWithAlpha());
bool have_depth = BoundFramebufferHasDepthAttachment();
glDepthMask(mask_depth_ && have_depth);
EnableDisable(GL_DEPTH_TEST, enable_depth_test_ && have_depth);
bool have_stencil = BoundFramebufferHasStencilAttachment();
glStencilMaskSeparate(GL_FRONT, have_stencil ? mask_stencil_front_ : 0);
glStencilMaskSeparate(GL_BACK, have_stencil ? mask_stencil_back_ : 0);
EnableDisable(GL_STENCIL_TEST, enable_stencil_test_ && have_stencil);
EnableDisable(GL_CULL_FACE, enable_cull_face_);
EnableDisable(GL_SCISSOR_TEST, enable_scissor_test_);
EnableDisable(GL_BLEND, enable_blend_);
state_dirty_ = false;
}
}
| 0
|
170,223
|
void PDFiumEngine::GetRegion(const pp::Point& location,
pp::ImageData* image_data,
void** region,
int* stride) const {
if (image_data->is_null()) {
DCHECK(plugin_size_.IsEmpty());
*stride = 0;
*region = nullptr;
return;
}
char* buffer = static_cast<char*>(image_data->data());
*stride = image_data->stride();
pp::Point offset_location = location + page_offset_;
if (!buffer ||
!pp::Rect(page_offset_, plugin_size_).Contains(offset_location)) {
*region = nullptr;
return;
}
buffer += location.y() * (*stride);
buffer += (location.x() + page_offset_.x()) * 4;
*region = buffer;
}
| 0
|
501,392
|
static int FUZ_mallocTests_internal(unsigned seed, double compressibility, unsigned part,
void* inBuffer, size_t inSize, void* outBuffer, size_t outSize)
{
/* test only played in verbose mode, as they are long */
if (g_displayLevel<3) return 0;
/* Create compressible noise */
if (!inBuffer || !outBuffer) {
DISPLAY("Not enough memory, aborting\n");
exit(1);
}
RDG_genBuffer(inBuffer, inSize, compressibility, 0. /*auto*/, seed);
/* simple compression tests */
if (part <= 1)
{ int compressionLevel;
for (compressionLevel=1; compressionLevel<=6; compressionLevel++) {
mallocCounter_t malcount = INIT_MALLOC_COUNTER;
ZSTD_customMem const cMem = { FUZ_mallocDebug, FUZ_freeDebug, &malcount };
ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(cMem);
CHECK_Z( ZSTD_compressCCtx(cctx, outBuffer, outSize, inBuffer, inSize, compressionLevel) );
ZSTD_freeCCtx(cctx);
DISPLAYLEVEL(3, "compressCCtx level %i : ", compressionLevel);
FUZ_displayMallocStats(malcount);
} }
/* streaming compression tests */
if (part <= 2)
{ int compressionLevel;
for (compressionLevel=1; compressionLevel<=6; compressionLevel++) {
mallocCounter_t malcount = INIT_MALLOC_COUNTER;
ZSTD_customMem const cMem = { FUZ_mallocDebug, FUZ_freeDebug, &malcount };
ZSTD_CCtx* const cstream = ZSTD_createCStream_advanced(cMem);
ZSTD_outBuffer out = { outBuffer, outSize, 0 };
ZSTD_inBuffer in = { inBuffer, inSize, 0 };
CHECK_Z( ZSTD_initCStream(cstream, compressionLevel) );
CHECK_Z( ZSTD_compressStream(cstream, &out, &in) );
CHECK_Z( ZSTD_endStream(cstream, &out) );
ZSTD_freeCStream(cstream);
DISPLAYLEVEL(3, "compressStream level %i : ", compressionLevel);
FUZ_displayMallocStats(malcount);
} }
/* advanced MT API test */
if (part <= 3)
{ U32 nbThreads;
for (nbThreads=1; nbThreads<=4; nbThreads++) {
int compressionLevel;
for (compressionLevel=1; compressionLevel<=6; compressionLevel++) {
mallocCounter_t malcount = INIT_MALLOC_COUNTER;
ZSTD_customMem const cMem = { FUZ_mallocDebug, FUZ_freeDebug, &malcount };
ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(cMem);
ZSTD_outBuffer out = { outBuffer, outSize, 0 };
ZSTD_inBuffer in = { inBuffer, inSize, 0 };
CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_p_compressionLevel, (U32)compressionLevel) );
CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_p_nbWorkers, nbThreads) );
while ( ZSTD_compress_generic(cctx, &out, &in, ZSTD_e_end) ) {}
ZSTD_freeCCtx(cctx);
DISPLAYLEVEL(3, "compress_generic,-T%u,end level %i : ",
nbThreads, compressionLevel);
FUZ_displayMallocStats(malcount);
} } }
/* advanced MT streaming API test */
if (part <= 4)
{ U32 nbThreads;
for (nbThreads=1; nbThreads<=4; nbThreads++) {
int compressionLevel;
for (compressionLevel=1; compressionLevel<=6; compressionLevel++) {
mallocCounter_t malcount = INIT_MALLOC_COUNTER;
ZSTD_customMem const cMem = { FUZ_mallocDebug, FUZ_freeDebug, &malcount };
ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(cMem);
ZSTD_outBuffer out = { outBuffer, outSize, 0 };
ZSTD_inBuffer in = { inBuffer, inSize, 0 };
CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_p_compressionLevel, (U32)compressionLevel) );
CHECK_Z( ZSTD_CCtx_setParameter(cctx, ZSTD_p_nbWorkers, nbThreads) );
CHECK_Z( ZSTD_compress_generic(cctx, &out, &in, ZSTD_e_continue) );
while ( ZSTD_compress_generic(cctx, &out, &in, ZSTD_e_end) ) {}
ZSTD_freeCCtx(cctx);
DISPLAYLEVEL(3, "compress_generic,-T%u,continue level %i : ",
nbThreads, compressionLevel);
FUZ_displayMallocStats(malcount);
} } }
return 0;
}
| 0
|
474,484
|
static int hdr_validate_segments(struct crypt_device *cd, json_object *hdr_jobj)
{
json_object *jobj_segments, *jobj_digests, *jobj_offset, *jobj_size, *jobj_type, *jobj_flags, *jobj;
uint64_t offset, size;
int i, r, count, first_backup = -1;
struct interval *intervals = NULL;
if (!json_object_object_get_ex(hdr_jobj, "segments", &jobj_segments)) {
log_dbg(cd, "Missing segments section.");
return 1;
}
count = json_object_object_length(jobj_segments);
if (count < 1) {
log_dbg(cd, "Empty segments section.");
return 1;
}
/* digests should already be validated */
if (!json_object_object_get_ex(hdr_jobj, "digests", &jobj_digests))
return 1;
json_object_object_foreach(jobj_segments, key, val) {
if (!numbered(cd, "Segment", key))
return 1;
/* those fields are mandatory for all segment types */
if (!(jobj_type = json_contains(cd, val, key, "Segment", "type", json_type_string)) ||
!(jobj_offset = json_contains(cd, val, key, "Segment", "offset", json_type_string)) ||
!(jobj_size = json_contains(cd, val, key, "Segment", "size", json_type_string)))
return 1;
if (!numbered(cd, "offset", json_object_get_string(jobj_offset)) ||
!json_str_to_uint64(jobj_offset, &offset))
return 1;
/* size "dynamic" means whole device starting at 'offset' */
if (strcmp(json_object_get_string(jobj_size), "dynamic")) {
if (!numbered(cd, "size", json_object_get_string(jobj_size)) ||
!json_str_to_uint64(jobj_size, &size) || !size)
return 1;
} else
size = 0;
/* all device-mapper devices are aligned to 512 sector size */
if (MISALIGNED_512(offset)) {
log_dbg(cd, "Offset field has to be aligned to sector size: %" PRIu32, SECTOR_SIZE);
return 1;
}
if (MISALIGNED_512(size)) {
log_dbg(cd, "Size field has to be aligned to sector size: %" PRIu32, SECTOR_SIZE);
return 1;
}
/* flags array is optional and must contain strings */
if (json_object_object_get_ex(val, "flags", NULL)) {
if (!(jobj_flags = json_contains(cd, val, key, "Segment", "flags", json_type_array)))
return 1;
for (i = 0; i < (int) json_object_array_length(jobj_flags); i++)
if (!json_object_is_type(json_object_array_get_idx(jobj_flags, i), json_type_string))
return 1;
}
i = atoi(key);
if (json_segment_is_backup(val)) {
if (first_backup < 0 || i < first_backup)
first_backup = i;
} else {
if ((first_backup >= 0) && i >= first_backup) {
log_dbg(cd, "Regular segment at %d is behind backup segment at %d", i, first_backup);
return 1;
}
}
/* crypt */
if (!strcmp(json_object_get_string(jobj_type), "crypt") &&
hdr_validate_crypt_segment(cd, val, key, jobj_digests, offset, size))
return 1;
}
if (first_backup == 0) {
log_dbg(cd, "No regular segment.");
return 1;
}
/* avoid needlessly large allocation when first backup segment is invalid */
if (first_backup >= count) {
log_dbg(cd, "Gap between last regular segment and backup segment at key %d.", first_backup);
return 1;
}
if (first_backup < 0)
first_backup = count;
if ((size_t)first_backup < SIZE_MAX / sizeof(*intervals))
intervals = malloc(first_backup * sizeof(*intervals));
if (!intervals) {
log_dbg(cd, "Not enough memory.");
return 1;
}
for (i = 0; i < first_backup; i++) {
jobj = json_segments_get_segment(jobj_segments, i);
if (!jobj) {
log_dbg(cd, "Gap at key %d in segments object.", i);
free(intervals);
return 1;
}
intervals[i].offset = json_segment_get_offset(jobj, 0);
intervals[i].length = json_segment_get_size(jobj, 0) ?: UINT64_MAX;
}
r = !validate_segment_intervals(cd, first_backup, intervals);
free(intervals);
if (r)
return 1;
for (; i < count; i++) {
if (!json_segments_get_segment(jobj_segments, i)) {
log_dbg(cd, "Gap at key %d in segments object.", i);
return 1;
}
}
return 0;
}
| 0
|
367,909
|
static int restore_sigcontext(struct pt_regs *regs,
struct sigcontext __user *sc, long *_d0)
{
unsigned int err = 0;
if (is_using_fpu(current))
fpu_kill_state(current);
#define COPY(x) err |= __get_user(regs->x, &sc->x)
COPY(d1); COPY(d2); COPY(d3);
COPY(a0); COPY(a1); COPY(a2); COPY(a3);
COPY(e0); COPY(e1); COPY(e2); COPY(e3);
COPY(e4); COPY(e5); COPY(e6); COPY(e7);
COPY(lar); COPY(lir);
COPY(mdr); COPY(mdrq);
COPY(mcvf); COPY(mcrl); COPY(mcrh);
COPY(sp); COPY(pc);
#undef COPY
{
unsigned int tmpflags;
#ifndef CONFIG_MN10300_USING_JTAG
#define USER_EPSW (EPSW_FLAG_Z | EPSW_FLAG_N | EPSW_FLAG_C | EPSW_FLAG_V | \
EPSW_T | EPSW_nAR)
#else
#define USER_EPSW (EPSW_FLAG_Z | EPSW_FLAG_N | EPSW_FLAG_C | EPSW_FLAG_V | \
EPSW_nAR)
#endif
err |= __get_user(tmpflags, &sc->epsw);
regs->epsw = (regs->epsw & ~USER_EPSW) |
(tmpflags & USER_EPSW);
regs->orig_d0 = -1; /* disable syscall checks */
}
{
struct fpucontext *buf;
err |= __get_user(buf, &sc->fpucontext);
if (buf) {
if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
goto badframe;
err |= fpu_restore_sigcontext(buf);
}
}
err |= __get_user(*_d0, &sc->d0);
return err;
badframe:
return 1;
}
| 0
|
331,384
|
static inline int qemu_rdma_buffer_mergable(RDMAContext *rdma,
uint64_t offset, uint64_t len)
{
RDMALocalBlock *block =
&(rdma->local_ram_blocks.block[rdma->current_index]);
uint8_t *host_addr = block->local_host_addr + (offset - block->offset);
uint8_t *chunk_end = ram_chunk_end(block, rdma->current_chunk);
if (rdma->current_length == 0) {
return 0;
}
/*
* Only merge into chunk sequentially.
*/
if (offset != (rdma->current_addr + rdma->current_length)) {
return 0;
}
if (rdma->current_index < 0) {
return 0;
}
if (offset < block->offset) {
return 0;
}
if ((offset + len) > (block->offset + block->length)) {
return 0;
}
if (rdma->current_chunk < 0) {
return 0;
}
if ((host_addr + len) > chunk_end) {
return 0;
}
return 1;
}
| 0
|
489,917
|
static int piv_find_aid(sc_card_t * card, sc_file_t *aid_file)
{
sc_apdu_t apdu;
u8 rbuf[SC_MAX_APDU_BUFFER_SIZE];
int r,i;
const u8 *tag;
size_t taglen;
const u8 *pix;
size_t pixlen;
size_t resplen = sizeof(rbuf);
SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE);
/* first see if the default application will return a template
* that we know about.
*/
r = piv_select_aid(card, piv_aids[0].value, piv_aids[0].len_short, rbuf, &resplen);
if (r >= 0 && resplen > 2 ) {
tag = sc_asn1_find_tag(card->ctx, rbuf, resplen, 0x61, &taglen);
if (tag != NULL) {
pix = sc_asn1_find_tag(card->ctx, tag, taglen, 0x4F, &pixlen);
if (pix != NULL ) {
sc_log(card->ctx, "found PIX");
/* early cards returned full AID, rather then just the pix */
for (i = 0; piv_aids[i].len_long != 0; i++) {
if ((pixlen >= 6 && memcmp(pix, piv_aids[i].value + 5,
piv_aids[i].len_long - 5 ) == 0)
|| ((pixlen >= piv_aids[i].len_short &&
memcmp(pix, piv_aids[i].value,
piv_aids[i].len_short) == 0))) {
if (card->type > SC_CARD_TYPE_PIV_II_BASE &&
card->type < SC_CARD_TYPE_PIV_II_BASE+1000 &&
card->type == piv_aids[i].enumtag) {
LOG_FUNC_RETURN(card->ctx, i);
} else {
LOG_FUNC_RETURN(card->ctx, i);
}
}
}
}
}
}
/* for testing, we can force the use of a specific AID
* by using the card= parameter in conf file
*/
for (i = 0; piv_aids[i].len_long != 0; i++) {
if (card->type > SC_CARD_TYPE_PIV_II_BASE &&
card->type < SC_CARD_TYPE_PIV_II_BASE+1000 &&
card->type != piv_aids[i].enumtag) {
continue;
}
sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0xA4, 0x04, 0x00);
apdu.lc = piv_aids[i].len_long;
apdu.data = piv_aids[i].value;
apdu.datalen = apdu.lc;
apdu.resp = rbuf;
apdu.resplen = sizeof(rbuf);
apdu.le = 256;
r = sc_transmit_apdu(card, &apdu);
LOG_TEST_RET(card->ctx, r, "APDU transmit failed");
r = sc_check_sw(card, apdu.sw1, apdu.sw2);
if (r) {
if (card->type != 0 && card->type == piv_aids[i].enumtag)
LOG_FUNC_RETURN(card->ctx, (r < 0)? r: i);
continue;
}
if ( apdu.resplen == 0 && r == 0) {
/* could be the MSU card */
continue; /* other cards will return a FCI */
}
if (apdu.resp[0] != 0x6f || apdu.resp[1] > apdu.resplen - 2 )
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_NO_CARD_SUPPORT);
card->ops->process_fci(card, aid_file, apdu.resp+2, apdu.resp[1]);
LOG_FUNC_RETURN(card->ctx, i);
}
LOG_FUNC_RETURN(card->ctx, SC_ERROR_NO_CARD_SUPPORT);
}
| 0
|
296,266
|
void putname(const char *name)
{
if (unlikely(!audit_dummy_context()))
audit_putname(name);
else
__putname(name);
}
| 0
|
65,675
|
int __init raw6_proc_init(void)
{
return register_pernet_subsys(&raw6_net_ops);
}
| 0
|
169,599
|
int LvmEffect_enable(EffectContext *pContext){
LVM_ControlParams_t ActiveParams; /* Current control Parameters */
LVM_ReturnStatus_en LvmStatus = LVM_SUCCESS; /* Function call status */
/* Get the current settings */
LvmStatus = LVM_GetControlParameters(pContext->pBundledContext->hInstance,
&ActiveParams);
LVM_ERROR_CHECK(LvmStatus, "LVM_GetControlParameters", "LvmEffect_enable")
if(LvmStatus != LVM_SUCCESS) return -EINVAL;
if(pContext->EffectType == LVM_BASS_BOOST) {
ALOGV("\tLvmEffect_enable : Enabling LVM_BASS_BOOST");
ActiveParams.BE_OperatingMode = LVM_BE_ON;
}
if(pContext->EffectType == LVM_VIRTUALIZER) {
ALOGV("\tLvmEffect_enable : Enabling LVM_VIRTUALIZER");
ActiveParams.VirtualizerOperatingMode = LVM_MODE_ON;
}
if(pContext->EffectType == LVM_EQUALIZER) {
ALOGV("\tLvmEffect_enable : Enabling LVM_EQUALIZER");
ActiveParams.EQNB_OperatingMode = LVM_EQNB_ON;
}
if(pContext->EffectType == LVM_VOLUME) {
ALOGV("\tLvmEffect_enable : Enabling LVM_VOLUME");
}
LvmStatus = LVM_SetControlParameters(pContext->pBundledContext->hInstance, &ActiveParams);
LVM_ERROR_CHECK(LvmStatus, "LVM_SetControlParameters", "LvmEffect_enable")
if(LvmStatus != LVM_SUCCESS) return -EINVAL;
return 0;
}
| 0
|
74,029
|
static int tg3_get_regs_len(struct net_device *dev)
{
return TG3_REG_BLK_SIZE;
}
| 0
|
269,734
|
static int __init i8042_setup_kbd(void)
{
int error;
error = i8042_create_kbd_port();
if (error)
return error;
error = request_irq(I8042_KBD_IRQ, i8042_interrupt, IRQF_SHARED,
"i8042", i8042_platform_device);
if (error)
goto err_free_port;
error = i8042_enable_kbd_port();
if (error)
goto err_free_irq;
i8042_kbd_irq_registered = true;
return 0;
err_free_irq:
free_irq(I8042_KBD_IRQ, i8042_platform_device);
err_free_port:
i8042_free_kbd_port();
return error;
}
| 0
|
510,836
|
inline void Http2Stream::AddChunk(const uint8_t* data, size_t len) {
CHECK(!this->IsDestroyed());
if (this->statistics_.first_byte == 0)
this->statistics_.first_byte = uv_hrtime();
if (flags_ & NGHTTP2_STREAM_FLAG_EOS)
return;
char* buf = nullptr;
if (len > 0 && data != nullptr) {
buf = Malloc<char>(len);
memcpy(buf, data, len);
} else if (data == nullptr) {
flags_ |= NGHTTP2_STREAM_FLAG_EOS;
}
data_chunks_.emplace(uv_buf_init(buf, len));
}
| 0
|
60,929
|
Jsi_OptionsCustomPrint(void* clientData, Jsi_Interp *interp, const char *name, void *rec, int offset)
{
char *record = (char*)rec;
Jsi_Value *valuePtr;
valuePtr = *(Jsi_Value **)(record + offset);
return valuePtr;
}
| 0
|
434,967
|
static int cmpBitmap(unsigned char *buf, int width, int pitch, int height,
int pf, int flags, int gray2rgb)
{
int roffset = tjRedOffset[pf];
int goffset = tjGreenOffset[pf];
int boffset = tjBlueOffset[pf];
int aoffset = tjAlphaOffset[pf];
int ps = tjPixelSize[pf];
int i, j;
for (j = 0; j < height; j++) {
int row = (flags & TJFLAG_BOTTOMUP) ? height - j - 1 : j;
for (i = 0; i < width; i++) {
unsigned char r = (i * 256 / width) % 256;
unsigned char g = (j * 256 / height) % 256;
unsigned char b = (j * 256 / height + i * 256 / width) % 256;
if (pf == TJPF_GRAY) {
if (buf[row * pitch + i * ps] != b)
return 0;
} else if (pf == TJPF_CMYK) {
unsigned char rf, gf, bf;
cmyk_to_rgb(buf[row * pitch + i * ps + 0],
buf[row * pitch + i * ps + 1],
buf[row * pitch + i * ps + 2],
buf[row * pitch + i * ps + 3], &rf, &gf, &bf);
if (gray2rgb) {
if (rf != b || gf != b || bf != b)
return 0;
} else if (rf != r || gf != g || bf != b) return 0;
} else {
if (gray2rgb) {
if (buf[row * pitch + i * ps + roffset] != b ||
buf[row * pitch + i * ps + goffset] != b ||
buf[row * pitch + i * ps + boffset] != b)
return 0;
} else if (buf[row * pitch + i * ps + roffset] != r ||
buf[row * pitch + i * ps + goffset] != g ||
buf[row * pitch + i * ps + boffset] != b)
return 0;
if (aoffset >= 0 && buf[row * pitch + i * ps + aoffset] != 0xFF)
return 0;
}
}
}
return 1;
}
| 0
|
296,588
|
circle_contain(PG_FUNCTION_ARGS)
{
CIRCLE *circle1 = PG_GETARG_CIRCLE_P(0);
CIRCLE *circle2 = PG_GETARG_CIRCLE_P(1);
PG_RETURN_BOOL(FPle((point_dt(&circle1->center, &circle2->center) + circle2->radius), circle1->radius));
}
| 0
|
361,051
|
static char *store_file_unix_basic(connection_struct *conn,
char *pdata,
files_struct *fsp,
const SMB_STRUCT_STAT *psbuf)
{
DEBUG(10,("store_file_unix_basic: SMB_QUERY_FILE_UNIX_BASIC\n"));
DEBUG(4,("store_file_unix_basic: st_mode=%o\n",(int)psbuf->st_ex_mode));
SOFF_T(pdata,0,get_file_size_stat(psbuf)); /* File size 64 Bit */
pdata += 8;
SOFF_T(pdata,0,SMB_VFS_GET_ALLOC_SIZE(conn,fsp,psbuf)); /* Number of bytes used on disk - 64 Bit */
pdata += 8;
put_long_date_timespec(TIMESTAMP_SET_NT_OR_BETTER, pdata, psbuf->st_ex_ctime); /* Change Time 64 Bit */
put_long_date_timespec(TIMESTAMP_SET_NT_OR_BETTER ,pdata+8, psbuf->st_ex_atime); /* Last access time 64 Bit */
put_long_date_timespec(TIMESTAMP_SET_NT_OR_BETTER, pdata+16, psbuf->st_ex_mtime); /* Last modification time 64 Bit */
pdata += 24;
SIVAL(pdata,0,psbuf->st_ex_uid); /* user id for the owner */
SIVAL(pdata,4,0);
pdata += 8;
SIVAL(pdata,0,psbuf->st_ex_gid); /* group id of owner */
SIVAL(pdata,4,0);
pdata += 8;
SIVAL(pdata,0,unix_filetype(psbuf->st_ex_mode));
pdata += 4;
SIVAL(pdata,0,unix_dev_major(psbuf->st_ex_rdev)); /* Major device number if type is device */
SIVAL(pdata,4,0);
pdata += 8;
SIVAL(pdata,0,unix_dev_minor(psbuf->st_ex_rdev)); /* Minor device number if type is device */
SIVAL(pdata,4,0);
pdata += 8;
SINO_T_VAL(pdata,0,(SMB_INO_T)psbuf->st_ex_ino); /* inode number */
pdata += 8;
SIVAL(pdata,0, unix_perms_to_wire(psbuf->st_ex_mode)); /* Standard UNIX file permissions */
SIVAL(pdata,4,0);
pdata += 8;
SIVAL(pdata,0,psbuf->st_ex_nlink); /* number of hard links */
SIVAL(pdata,4,0);
pdata += 8;
return pdata;
}
| 0
|
412,196
|
static int ext4_xattr_inode_dec_ref(handle_t *handle, struct inode *ea_inode)
{
return ext4_xattr_inode_update_ref(handle, ea_inode, -1);
}
| 0
|
84,651
|
static void asmlinkage smm_do_relocation(void *arg)
{
const struct smm_module_params *p;
const struct smm_runtime *runtime;
int cpu;
uintptr_t curr_smbase;
uintptr_t perm_smbase;
p = arg;
runtime = p->runtime;
cpu = p->cpu;
curr_smbase = runtime->smbase;
if (cpu >= CONFIG_MAX_CPUS) {
printk(BIOS_CRIT,
"Invalid CPU number assigned in SMM stub: %d\n", cpu);
return;
}
/*
* The permanent handler runs with all cpus concurrently. Precalculate
* the location of the new SMBASE. If using SMM modules then this
* calculation needs to match that of the module loader.
*/
#if CONFIG(X86_SMM_LOADER_VERSION2)
perm_smbase = smm_get_cpu_smbase(cpu);
mp_state.perm_smbase = perm_smbase;
if (!perm_smbase) {
printk(BIOS_ERR, "%s: bad SMBASE for CPU %d\n", __func__, cpu);
return;
}
#else
perm_smbase = mp_state.perm_smbase;
perm_smbase -= cpu * runtime->save_state_size;
#endif
/* Setup code checks this callback for validity. */
printk(BIOS_INFO, "%s : curr_smbase 0x%x perm_smbase 0x%x, cpu = %d\n",
__func__, (int)curr_smbase, (int)perm_smbase, cpu);
mp_state.ops.relocation_handler(cpu, curr_smbase, perm_smbase);
if (CONFIG(STM)) {
uintptr_t mseg;
mseg = mp_state.perm_smbase +
(mp_state.perm_smsize - CONFIG_MSEG_SIZE);
stm_setup(mseg, p->cpu,
perm_smbase,
mp_state.perm_smbase,
runtime->start32_offset);
}
}
| 0
|
510,957
|
int ossl_ecdsa_sign(int type, const unsigned char *dgst, int dlen,
unsigned char *sig, unsigned int *siglen,
const BIGNUM *kinv, const BIGNUM *r, EC_KEY *eckey)
{
ECDSA_SIG *s;
RAND_seed(dgst, dlen);
s = ECDSA_do_sign_ex(dgst, dlen, kinv, r, eckey);
if (s == NULL) {
*siglen = 0;
return 0;
}
*siglen = i2d_ECDSA_SIG(s, &sig);
ECDSA_SIG_free(s);
return 1;
}
| 0
|
396,832
|
STATIC int GC_write(int fd, const char *buf, size_t len)
{
# if defined(ECOS) || defined(NOSYS)
# ifdef ECOS
/* FIXME: This seems to be defined nowhere at present. */
/* _Jv_diag_write(buf, len); */
# else
/* No writing. */
# endif
return len;
# else
int bytes_written = 0;
int result;
IF_CANCEL(int cancel_state;)
DISABLE_CANCEL(cancel_state);
while ((size_t)bytes_written < len) {
# ifdef GC_SOLARIS_THREADS
result = syscall(SYS_write, fd, buf + bytes_written,
len - bytes_written);
# else
result = write(fd, buf + bytes_written, len - bytes_written);
# endif
if (-1 == result) {
RESTORE_CANCEL(cancel_state);
return(result);
}
bytes_written += result;
}
RESTORE_CANCEL(cancel_state);
return(bytes_written);
# endif
}
| 0
|
71,322
|
void AsyncSSLSocket::connect(
ConnectCallback* callback,
const folly::SocketAddress& address,
std::chrono::milliseconds connectTimeout,
std::chrono::milliseconds totalConnectTimeout,
const OptionMap& options,
const folly::SocketAddress& bindAddr) noexcept {
assert(!server_);
assert(state_ == StateEnum::UNINIT);
assert(sslState_ == STATE_UNINIT || sslState_ == STATE_UNENCRYPTED);
noTransparentTls_ = true;
totalConnectTimeout_ = totalConnectTimeout;
if (sslState_ != STATE_UNENCRYPTED) {
callback = new AsyncSSLSocketConnector(this, callback, totalConnectTimeout);
}
AsyncSocket::connect(
callback, address, int(connectTimeout.count()), options, bindAddr);
}
| 0
|
120,730
|
void MSADPCM::choosePredictorForBlock(const int16_t *decoded)
{
const int kPredictorSampleLength = 3;
int channelCount = m_track->f.channelCount;
for (int c=0; c<channelCount; c++)
{
int bestPredictorIndex = 0;
int bestPredictorError = std::numeric_limits<int>::max();
for (int k=0; k<m_numCoefficients; k++)
{
int a0 = m_coefficients[k][0];
int a1 = m_coefficients[k][1];
int currentPredictorError = 0;
for (int i=2; i<2+kPredictorSampleLength; i++)
{
int error = std::abs(decoded[i*channelCount + c] -
((a0 * decoded[(i-1)*channelCount + c] +
a1 * decoded[(i-2)*channelCount + c]) >> 8));
currentPredictorError += error;
}
currentPredictorError /= 4 * kPredictorSampleLength;
if (currentPredictorError < bestPredictorError)
{
bestPredictorError = currentPredictorError;
bestPredictorIndex = k;
}
if (!currentPredictorError)
break;
}
if (bestPredictorError < 16)
bestPredictorError = 16;
m_state[c].predictorIndex = bestPredictorIndex;
m_state[c].delta = bestPredictorError;
}
}
| 0
|
386,162
|
SPL_METHOD(SplObjectStorage, valid)
{
spl_SplObjectStorage *intern = (spl_SplObjectStorage*)zend_object_store_get_object(getThis() TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_BOOL(zend_hash_has_more_elements_ex(&intern->storage, &intern->pos) == SUCCESS);
} /* }}} */
| 0
|
134,823
|
static unsigned parse_hex4(const unsigned char * const input)
{
unsigned int h = 0;
size_t i = 0;
for (i = 0; i < 4; i++)
{
/* parse digit */
if ((input[i] >= '0') && (input[i] <= '9'))
{
h += (unsigned int) input[i] - '0';
}
else if ((input[i] >= 'A') && (input[i] <= 'F'))
{
h += (unsigned int) 10 + input[i] - 'A';
}
else if ((input[i] >= 'a') && (input[i] <= 'f'))
{
h += (unsigned int) 10 + input[i] - 'a';
}
else /* invalid */
{
return 0;
}
if (i < 3)
{
/* shift left to make place for the next nibble */
h = h << 4;
}
}
return h;
}
| 0
|
278,574
|
status_t SampleTable::setTimeToSampleParams(
off64_t data_offset, size_t data_size) {
if (mHasTimeToSample || data_size < 8) {
return ERROR_MALFORMED;
}
uint8_t header[8];
if (mDataSource->readAt(
data_offset, header, sizeof(header)) < (ssize_t)sizeof(header)) {
return ERROR_IO;
}
if (U32_AT(header) != 0) {
return ERROR_MALFORMED;
}
mTimeToSampleCount = U32_AT(&header[4]);
if (mTimeToSampleCount > UINT32_MAX / (2 * sizeof(uint32_t))) {
ALOGE("Time-to-sample table size too large.");
return ERROR_OUT_OF_RANGE;
}
uint64_t allocSize = (uint64_t)mTimeToSampleCount * 2 * sizeof(uint32_t);
mTotalSize += allocSize;
if (mTotalSize > kMaxTotalSize) {
ALOGE("Time-to-sample table size would make sample table too large.\n"
" Requested time-to-sample table size = %llu\n"
" Eventual sample table size >= %llu\n"
" Allowed sample table size = %llu\n",
(unsigned long long)allocSize,
(unsigned long long)mTotalSize,
(unsigned long long)kMaxTotalSize);
return ERROR_OUT_OF_RANGE;
}
mTimeToSample = new (std::nothrow) uint32_t[mTimeToSampleCount * 2];
if (!mTimeToSample) {
ALOGE("Cannot allocate time-to-sample table with %llu entries.",
(unsigned long long)mTimeToSampleCount);
return ERROR_OUT_OF_RANGE;
}
if (mDataSource->readAt(data_offset + 8, mTimeToSample,
(size_t)allocSize) < (ssize_t)allocSize) {
ALOGE("Incomplete data read for time-to-sample table.");
return ERROR_IO;
}
for (size_t i = 0; i < mTimeToSampleCount * 2; ++i) {
mTimeToSample[i] = ntohl(mTimeToSample[i]);
}
mHasTimeToSample = true;
return OK;
}
| 0
|
147,862
|
invoke_NPN_SetProperty(PluginInstance *plugin, NPObject *npobj, NPIdentifier propertyName,
const NPVariant *value)
{
npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), false);
int error = rpc_method_invoke(g_rpc_connection,
RPC_METHOD_NPN_SET_PROPERTY,
RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin,
RPC_TYPE_NP_OBJECT, npobj,
RPC_TYPE_NP_IDENTIFIER, &propertyName,
RPC_TYPE_NP_VARIANT, value,
RPC_TYPE_INVALID);
if (error != RPC_ERROR_NO_ERROR) {
npw_perror("NPN_SetProperty() invoke", error);
return false;
}
uint32_t ret;
error = rpc_method_wait_for_reply(g_rpc_connection,
RPC_TYPE_UINT32, &ret,
RPC_TYPE_INVALID);
if (error != RPC_ERROR_NO_ERROR) {
npw_perror("NPN_SetProperty() wait for reply", error);
return false;
}
return ret;
}
| 0
|
419,052
|
sigend_bfd(__attribute__ ((unused)) void *v,
__attribute__ ((unused)) int sig)
{
if (master)
thread_add_terminate_event(master);
}
| 0
|
166,440
|
void AudioRendererHost::OnError(media::AudioOutputController* controller,
int error_code) {
BrowserThread::PostTask(
BrowserThread::IO,
FROM_HERE,
NewRunnableMethod(this,
&AudioRendererHost::DoHandleError,
make_scoped_refptr(controller),
error_code));
}
| 0
|
439,699
|
bool SplashOutputDev::axialShadedFill(GfxState *state, GfxAxialShading *shading, double tMin, double tMax) {
SplashAxialPattern *pattern = new SplashAxialPattern(colorMode, state, shading);
bool retVal = univariateShadedFill(state, pattern, tMin, tMax);
delete pattern;
return retVal;
}
| 0
|
42,828
|
static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
struct extra_reg *er;
struct intel_percore *pc;
struct er_account *era;
struct hw_perf_event *hwc = &event->hw;
int i, allref;
if (!cpuc->percore_used)
return;
for (er = x86_pmu.extra_regs; er->msr; er++) {
if (er->event != (hwc->config & er->config_mask))
continue;
pc = cpuc->per_core;
raw_spin_lock(&pc->lock);
for (i = 0; i < MAX_EXTRA_REGS; i++) {
era = &pc->regs[i];
if (era->ref > 0 &&
era->extra_config == hwc->extra_config &&
era->extra_reg == er->msr) {
era->ref--;
hwc->extra_alloc = 0;
break;
}
}
allref = 0;
for (i = 0; i < MAX_EXTRA_REGS; i++)
allref += pc->regs[i].ref;
if (allref == 0)
cpuc->percore_used = 0;
raw_spin_unlock(&pc->lock);
break;
}
}
| 0
|
342,070
|
static void pflash_cfi02_realize(DeviceState *dev, Error **errp)
{
pflash_t *pfl = CFI_PFLASH02(dev);
uint32_t chip_len;
int ret;
Error *local_err = NULL;
chip_len = pfl->sector_len * pfl->nb_blocs;
/* XXX: to be fixed */
#if 0
if (total_len != (8 * 1024 * 1024) && total_len != (16 * 1024 * 1024) &&
total_len != (32 * 1024 * 1024) && total_len != (64 * 1024 * 1024))
return NULL;
#endif
memory_region_init_rom_device(&pfl->orig_mem, OBJECT(pfl), pfl->be ?
&pflash_cfi02_ops_be : &pflash_cfi02_ops_le,
pfl, pfl->name, chip_len, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
vmstate_register_ram(&pfl->orig_mem, DEVICE(pfl));
pfl->storage = memory_region_get_ram_ptr(&pfl->orig_mem);
pfl->chip_len = chip_len;
if (pfl->bs) {
/* read the initial flash content */
ret = bdrv_read(pfl->bs, 0, pfl->storage, chip_len >> 9);
if (ret < 0) {
vmstate_unregister_ram(&pfl->orig_mem, DEVICE(pfl));
error_setg(errp, "failed to read the initial flash content");
return;
}
}
pflash_setup_mappings(pfl);
pfl->rom_mode = 1;
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &pfl->mem);
if (pfl->bs) {
pfl->ro = bdrv_is_read_only(pfl->bs);
} else {
pfl->ro = 0;
}
pfl->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, pflash_timer, pfl);
pfl->wcycle = 0;
pfl->cmd = 0;
pfl->status = 0;
/* Hardcoded CFI table (mostly from SG29 Spansion flash) */
pfl->cfi_len = 0x52;
/* Standard "QRY" string */
pfl->cfi_table[0x10] = 'Q';
pfl->cfi_table[0x11] = 'R';
pfl->cfi_table[0x12] = 'Y';
/* Command set (AMD/Fujitsu) */
pfl->cfi_table[0x13] = 0x02;
pfl->cfi_table[0x14] = 0x00;
/* Primary extended table address */
pfl->cfi_table[0x15] = 0x31;
pfl->cfi_table[0x16] = 0x00;
/* Alternate command set (none) */
pfl->cfi_table[0x17] = 0x00;
pfl->cfi_table[0x18] = 0x00;
/* Alternate extended table (none) */
pfl->cfi_table[0x19] = 0x00;
pfl->cfi_table[0x1A] = 0x00;
/* Vcc min */
pfl->cfi_table[0x1B] = 0x27;
/* Vcc max */
pfl->cfi_table[0x1C] = 0x36;
/* Vpp min (no Vpp pin) */
pfl->cfi_table[0x1D] = 0x00;
/* Vpp max (no Vpp pin) */
pfl->cfi_table[0x1E] = 0x00;
/* Reserved */
pfl->cfi_table[0x1F] = 0x07;
/* Timeout for min size buffer write (NA) */
pfl->cfi_table[0x20] = 0x00;
/* Typical timeout for block erase (512 ms) */
pfl->cfi_table[0x21] = 0x09;
/* Typical timeout for full chip erase (4096 ms) */
pfl->cfi_table[0x22] = 0x0C;
/* Reserved */
pfl->cfi_table[0x23] = 0x01;
/* Max timeout for buffer write (NA) */
pfl->cfi_table[0x24] = 0x00;
/* Max timeout for block erase */
pfl->cfi_table[0x25] = 0x0A;
/* Max timeout for chip erase */
pfl->cfi_table[0x26] = 0x0D;
/* Device size */
pfl->cfi_table[0x27] = ctz32(chip_len);
/* Flash device interface (8 & 16 bits) */
pfl->cfi_table[0x28] = 0x02;
pfl->cfi_table[0x29] = 0x00;
/* Max number of bytes in multi-bytes write */
/* XXX: disable buffered write as it's not supported */
// pfl->cfi_table[0x2A] = 0x05;
pfl->cfi_table[0x2A] = 0x00;
pfl->cfi_table[0x2B] = 0x00;
/* Number of erase block regions (uniform) */
pfl->cfi_table[0x2C] = 0x01;
/* Erase block region 1 */
pfl->cfi_table[0x2D] = pfl->nb_blocs - 1;
pfl->cfi_table[0x2E] = (pfl->nb_blocs - 1) >> 8;
pfl->cfi_table[0x2F] = pfl->sector_len >> 8;
pfl->cfi_table[0x30] = pfl->sector_len >> 16;
/* Extended */
pfl->cfi_table[0x31] = 'P';
pfl->cfi_table[0x32] = 'R';
pfl->cfi_table[0x33] = 'I';
pfl->cfi_table[0x34] = '1';
pfl->cfi_table[0x35] = '0';
pfl->cfi_table[0x36] = 0x00;
pfl->cfi_table[0x37] = 0x00;
pfl->cfi_table[0x38] = 0x00;
pfl->cfi_table[0x39] = 0x00;
pfl->cfi_table[0x3a] = 0x00;
pfl->cfi_table[0x3b] = 0x00;
pfl->cfi_table[0x3c] = 0x00;
}
| 0
|
61,337
|
static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
{
elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
int i = 0;
do
i += 2;
while (auxv[i - 2] != AT_NULL);
fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
}
| 0
|
192,158
|
void AutofillExternalDelegate::OnQuery(int query_id,
const FormData& form,
const FormFieldData& field,
const gfx::RectF& element_bounds) {
query_form_ = form;
query_field_ = field;
query_id_ = query_id;
element_bounds_ = element_bounds;
should_show_scan_credit_card_ =
manager_->ShouldShowScanCreditCard(query_form_, query_field_);
popup_type_ = manager_->GetPopupType(query_form_, query_field_);
should_show_cc_signin_promo_ =
manager_->ShouldShowCreditCardSigninPromo(query_form_, query_field_);
should_show_cards_from_account_option_ =
manager_->ShouldShowCardsFromAccountOption(query_form_, query_field_);
}
| 0
|
253,434
|
void ServiceWorkerPaymentInstrument::InvokePaymentApp(Delegate* delegate) {
delegate_ = delegate;
if (needs_installation_) {
content::PaymentAppProvider::GetInstance()->InstallAndInvokePaymentApp(
web_contents_, CreatePaymentRequestEventData(),
installable_web_app_info_->name,
installable_web_app_info_->icon == nullptr
? SkBitmap()
: *(installable_web_app_info_->icon),
installable_web_app_info_->sw_js_url,
installable_web_app_info_->sw_scope,
installable_web_app_info_->sw_use_cache, installable_enabled_method_,
base::BindOnce(&ServiceWorkerPaymentInstrument::OnPaymentAppInvoked,
weak_ptr_factory_.GetWeakPtr()));
} else {
content::PaymentAppProvider::GetInstance()->InvokePaymentApp(
browser_context_, stored_payment_app_info_->registration_id,
CreatePaymentRequestEventData(),
base::BindOnce(&ServiceWorkerPaymentInstrument::OnPaymentAppInvoked,
weak_ptr_factory_.GetWeakPtr()));
}
payment_request_delegate_->ShowProcessingSpinner();
}
| 0
|
451,280
|
void HeaderMapImpl::iterate(HeaderMap::ConstIterateCb cb) const {
for (const HeaderEntryImpl& header : headers_) {
if (cb(header) == HeaderMap::Iterate::Break) {
break;
}
}
}
| 0
|
173,094
|
WebMediaPlayerMS::~WebMediaPlayerMS() {
DVLOG(1) << __func__;
DCHECK(thread_checker_.CalledOnValidThread());
if (!web_stream_.IsNull())
web_stream_.RemoveObserver(this);
get_client()->SetCcLayer(nullptr);
if (video_layer_) {
DCHECK(!surface_layer_for_video_enabled_);
video_layer_->StopUsingProvider();
}
if (frame_deliverer_)
io_task_runner_->DeleteSoon(FROM_HERE, frame_deliverer_.release());
if (compositor_)
compositor_->StopUsingProvider();
if (video_frame_provider_)
video_frame_provider_->Stop();
if (audio_renderer_)
audio_renderer_->Stop();
media_log_->AddEvent(
media_log_->CreateEvent(media::MediaLogEvent::WEBMEDIAPLAYER_DESTROYED));
delegate_->PlayerGone(delegate_id_);
delegate_->RemoveObserver(delegate_id_);
}
| 0
|
15,895
|
static guint composite_offset ( const tvbuff_t * tvb _U_ , const guint counter ) {
return counter ;
}
| 0
|
241,934
|
PanoramiXRenderSetPictureFilter (ClientPtr client)
{
REQUEST(xRenderSetPictureFilterReq);
int result = Success, j;
PanoramiXRes *pict;
REQUEST_AT_LEAST_SIZE(xRenderSetPictureFilterReq);
VERIFY_XIN_PICTURE(pict, stuff->picture, client, DixWriteAccess);
FOR_NSCREENS_BACKWARD(j) {
stuff->picture = pict->info[j].id;
result = (*PanoramiXSaveRenderVector[X_RenderSetPictureFilter]) (client);
if(result != Success) break;
}
return result;
}
| 0
|
351,237
|
file_asynch_zero (struct rw *rw, struct command *command,
nbd_completion_callback cb, bool allocate)
{
int dummy = 0;
if (!file_synch_zero (rw, command->offset, command->slice.len, allocate))
return false;
if (cb.callback (cb.user_data, &dummy) == -1) {
perror (rw->name);
exit (EXIT_FAILURE);
}
return true;
}
| 1
|
418,583
|
ACLSNMPCommunityStrategy::Instance()
{
return &Instance_;
}
| 0
|
226,339
|
void DesktopWindowTreeHostX11::OnDisplayMetricsChanged(
const display::Display& display,
uint32_t changed_metrics) {
aura::WindowTreeHost::OnDisplayMetricsChanged(display, changed_metrics);
if ((changed_metrics & DISPLAY_METRIC_DEVICE_SCALE_FACTOR) &&
display::Screen::GetScreen()->GetDisplayNearestWindow(window()).id() ==
display.id()) {
RestartDelayedResizeTask();
}
}
| 0
|
94,219
|
static int handle_interrupt_window(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;
/* clear pending irq */
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
kvm_make_request(KVM_REQ_EVENT, vcpu);
++vcpu->stat.irq_window_exits;
/*
* If the user space waits to inject interrupts, exit as soon as
* possible
*/
if (!irqchip_in_kernel(vcpu->kvm) &&
vcpu->run->request_interrupt_window &&
!kvm_cpu_has_interrupt(vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
return 0;
}
return 1;
}
| 0
|
113,000
|
OPJ_BOOL opj_j2k_read_header(opj_stream_private_t *p_stream,
opj_j2k_t* p_j2k,
opj_image_t** p_image,
opj_event_mgr_t* p_manager)
{
/* preconditions */
assert(p_j2k != 00);
assert(p_stream != 00);
assert(p_manager != 00);
/* create an empty image header */
p_j2k->m_private_image = opj_image_create0();
if (! p_j2k->m_private_image) {
return OPJ_FALSE;
}
/* customization of the validation */
if (! opj_j2k_setup_decoding_validation(p_j2k, p_manager)) {
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
return OPJ_FALSE;
}
/* validation of the parameters codec */
if (! opj_j2k_exec(p_j2k, p_j2k->m_validation_list, p_stream, p_manager)) {
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
return OPJ_FALSE;
}
/* customization of the encoding */
if (! opj_j2k_setup_header_reading(p_j2k, p_manager)) {
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
return OPJ_FALSE;
}
/* read header */
if (! opj_j2k_exec(p_j2k, p_j2k->m_procedure_list, p_stream, p_manager)) {
opj_image_destroy(p_j2k->m_private_image);
p_j2k->m_private_image = NULL;
return OPJ_FALSE;
}
*p_image = opj_image_create0();
if (!(*p_image)) {
return OPJ_FALSE;
}
/* Copy codestream image information to the output image */
opj_copy_image_header(p_j2k->m_private_image, *p_image);
/*Allocate and initialize some elements of codestrem index*/
if (!opj_j2k_allocate_tile_element_cstr_index(p_j2k)) {
return OPJ_FALSE;
}
return OPJ_TRUE;
}
| 0
|
378,857
|
static void wsgi_manage_process(int reason, void *data, apr_wait_t status)
{
WSGIDaemonProcess *daemon = data;
switch (reason) {
/* Child daemon process has died. */
case APR_OC_REASON_DEATH: {
int mpm_state;
int stopping;
/*
* Determine if Apache is being shutdown or not and
* if it is not being shutdown, we will need to
* restart the child daemon process that has died.
* If MPM doesn't support query assume that child
* daemon process shouldn't be restarted. Both
* prefork and worker MPMs support this query so
* should always be okay.
*/
stopping = 1;
if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state) == APR_SUCCESS
&& mpm_state != AP_MPMQ_STOPPING) {
stopping = 0;
}
if (!stopping) {
ap_log_error(APLOG_MARK, APLOG_INFO, 0,
wsgi_server, "mod_wsgi (pid=%d): "
"Process '%s' has died, deregister and "
"restart it.", daemon->process.pid,
daemon->group->name);
if (WIFEXITED(status)) {
ap_log_error(APLOG_MARK, APLOG_INFO, 0,
wsgi_server, "mod_wsgi (pid=%d): "
"Process '%s' terminated normally, exit code %d",
daemon->process.pid, daemon->group->name,
WEXITSTATUS(status));
}
else if (WIFSIGNALED(status)) {
ap_log_error(APLOG_MARK, APLOG_INFO, 0,
wsgi_server, "mod_wsgi (pid=%d): "
"Process '%s' terminated by signal %d",
daemon->process.pid, daemon->group->name,
WTERMSIG(status));
}
}
else {
ap_log_error(APLOG_MARK, APLOG_INFO, 0,
wsgi_server, "mod_wsgi (pid=%d): "
"Process '%s' has died but server is "
"being stopped, deregister it.",
daemon->process.pid, daemon->group->name);
}
/* Deregister existing process so we stop watching it. */
apr_proc_other_child_unregister(daemon);
/* Now restart process if not shutting down. */
if (!stopping)
wsgi_start_process(wsgi_parent_pool, daemon);
break;
}
/* Apache is being restarted or shutdown. */
case APR_OC_REASON_RESTART: {
ap_log_error(APLOG_MARK, APLOG_INFO, 0,
wsgi_server, "mod_wsgi (pid=%d): "
"Process '%s' to be deregistered, as server is "
"restarting or being shutdown.",
daemon->process.pid, daemon->group->name);
/* Deregister existing process so we stop watching it. */
apr_proc_other_child_unregister(daemon);
break;
}
/* Child daemon process vanished. */
case APR_OC_REASON_LOST: {
ap_log_error(APLOG_MARK, APLOG_INFO, 0,
wsgi_server, "mod_wsgi (pid=%d): "
"Process '%s' appears to have been lost, "
"deregister and restart it.",
daemon->process.pid, daemon->group->name);
/* Deregister existing process so we stop watching it. */
apr_proc_other_child_unregister(daemon);
/* Restart the child daemon process that has died. */
wsgi_start_process(wsgi_parent_pool, daemon);
break;
}
/* Call to unregister the process. */
case APR_OC_REASON_UNREGISTER: {
/* Nothing to do at present. */
ap_log_error(APLOG_MARK, APLOG_INFO, 0,
wsgi_server, "mod_wsgi (pid=%d): "
"Process '%s' has been deregistered and will "
"no longer be monitored.", daemon->process.pid,
daemon->group->name);
break;
}
default: {
ap_log_error(APLOG_MARK, APLOG_INFO, 0,
wsgi_server, "mod_wsgi (pid=%d): "
"Process '%s' targeted by unexpected event %d.",
daemon->process.pid, daemon->group->name, reason);
}
}
}
| 0
|
12,189
|
WORD32 ihevcd_decode(iv_obj_t *ps_codec_obj, void *pv_api_ip, void *pv_api_op)
{
WORD32 ret = IV_SUCCESS;
codec_t *ps_codec = (codec_t *)(ps_codec_obj->pv_codec_handle);
ivd_video_decode_ip_t *ps_dec_ip;
ivd_video_decode_op_t *ps_dec_op;
WORD32 proc_idx = 0;
WORD32 prev_proc_idx = 0;
/* Initialize error code */
ps_codec->i4_error_code = 0;
ps_dec_ip = (ivd_video_decode_ip_t *)pv_api_ip;
ps_dec_op = (ivd_video_decode_op_t *)pv_api_op;
{
UWORD32 u4_size = ps_dec_op->u4_size;
memset(ps_dec_op, 0, sizeof(ivd_video_decode_op_t));
ps_dec_op->u4_size = u4_size; //Restore size field
}
if(ps_codec->i4_init_done != 1)
{
ps_dec_op->u4_error_code |= 1 << IVD_FATALERROR;
ps_dec_op->u4_error_code |= IHEVCD_INIT_NOT_DONE;
return IV_FAIL;
}
if(ps_codec->u4_pic_cnt >= NUM_FRAMES_LIMIT)
{
ps_dec_op->u4_error_code |= 1 << IVD_FATALERROR;
ps_dec_op->u4_error_code |= IHEVCD_NUM_FRAMES_LIMIT_REACHED;
return IV_FAIL;
}
/* If reset flag is set, flush the existing buffers */
if(ps_codec->i4_reset_flag)
{
ps_codec->i4_flush_mode = 1;
}
/*Data memory barries instruction,so that bitstream write by the application is complete*/
/* In case the decoder is not in flush mode check for input buffer validity */
if(0 == ps_codec->i4_flush_mode)
{
if(ps_dec_ip->pv_stream_buffer == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_FRM_BS_BUF_NULL;
return IV_FAIL;
}
if(ps_dec_ip->u4_num_Bytes <= MIN_START_CODE_LEN)
{
if((WORD32)ps_dec_ip->u4_num_Bytes > 0)
ps_dec_op->u4_num_bytes_consumed = ps_dec_ip->u4_num_Bytes;
else
ps_dec_op->u4_num_bytes_consumed = 0;
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_NUMBYTES_INV;
return IV_FAIL;
}
}
#ifdef APPLY_CONCEALMENT
{
WORD32 num_mbs;
num_mbs = (ps_codec->i4_wd * ps_codec->i4_ht + 255) >> 8;
/* Reset MB Count at the beginning of every process call */
ps_codec->mb_count = 0;
memset(ps_codec->mb_map, 0, ((num_mbs + 7) >> 3));
}
#endif
if(0 == ps_codec->i4_share_disp_buf && ps_codec->i4_header_mode == 0)
{
UWORD32 i;
if(ps_dec_ip->s_out_buffer.u4_num_bufs == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUFS;
return IV_FAIL;
}
for(i = 0; i < ps_dec_ip->s_out_buffer.u4_num_bufs; i++)
{
if(ps_dec_ip->s_out_buffer.pu1_bufs[i] == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_OP_BUF_NULL;
return IV_FAIL;
}
if(ps_dec_ip->s_out_buffer.u4_min_out_buf_size[i] == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUF_SIZE;
return IV_FAIL;
}
}
}
ps_codec->ps_out_buffer = &ps_dec_ip->s_out_buffer;
ps_codec->u4_ts = ps_dec_ip->u4_ts;
if(ps_codec->i4_flush_mode)
{
ps_dec_op->u4_pic_wd = ps_codec->i4_disp_wd;
ps_dec_op->u4_pic_ht = ps_codec->i4_disp_ht;
ps_dec_op->u4_new_seq = 0;
ps_codec->ps_disp_buf = (pic_buf_t *)ihevc_disp_mgr_get(
(disp_mgr_t *)ps_codec->pv_disp_buf_mgr, &ps_codec->i4_disp_buf_id);
/* In case of non-shared mode, then convert/copy the frame to output buffer */
/* Only if the codec is in non-shared mode or in shared mode but needs 420P output */
if((ps_codec->ps_disp_buf)
&& ((0 == ps_codec->i4_share_disp_buf)
|| (IV_YUV_420P
== ps_codec->e_chroma_fmt)))
{
process_ctxt_t *ps_proc = &ps_codec->as_process[prev_proc_idx];
if(0 == ps_proc->i4_init_done)
{
ihevcd_init_proc_ctxt(ps_proc, 0);
}
/* Set remaining number of rows to be processed */
ret = ihevcd_fmt_conv(ps_codec, &ps_codec->as_process[prev_proc_idx],
ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2], 0,
ps_codec->i4_disp_ht);
ihevc_buf_mgr_release((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->i4_disp_buf_id, BUF_MGR_DISP);
}
ihevcd_fill_outargs(ps_codec, ps_dec_ip, ps_dec_op);
if(1 == ps_dec_op->u4_output_present)
{
WORD32 xpos = ps_codec->i4_disp_wd - 32 - LOGO_WD;
WORD32 ypos = ps_codec->i4_disp_ht - 32 - LOGO_HT;
if(ypos < 0)
ypos = 0;
if(xpos < 0)
xpos = 0;
INSERT_LOGO(ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2], ps_codec->i4_disp_strd,
xpos,
ypos,
ps_codec->e_chroma_fmt,
ps_codec->i4_disp_wd,
ps_codec->i4_disp_ht);
}
if(NULL == ps_codec->ps_disp_buf)
{
/* If in flush mode and there are no more buffers to flush,
* check for the reset flag and reset the decoder */
if(ps_codec->i4_reset_flag)
{
ihevcd_init(ps_codec);
}
return (IV_FAIL);
}
return (IV_SUCCESS);
}
/* In case of shared mode, check if there is a free buffer for reconstruction */
if((0 == ps_codec->i4_header_mode) && (1 == ps_codec->i4_share_disp_buf))
{
WORD32 buf_status;
buf_status = 1;
if(ps_codec->pv_pic_buf_mgr)
buf_status = ihevc_buf_mgr_check_free((buf_mgr_t *)ps_codec->pv_pic_buf_mgr);
/* If there is no free buffer, then return with an error code */
if(0 == buf_status)
{
ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
return IV_FAIL;
}
}
ps_codec->i4_bytes_remaining = ps_dec_ip->u4_num_Bytes;
ps_codec->pu1_inp_bitsbuf = (UWORD8 *)ps_dec_ip->pv_stream_buffer;
ps_codec->s_parse.i4_end_of_frame = 0;
ps_codec->i4_pic_present = 0;
ps_codec->i4_slice_error = 0;
ps_codec->ps_disp_buf = NULL;
if(ps_codec->i4_num_cores > 1)
{
ithread_set_affinity(0);
}
while(MIN_START_CODE_LEN < ps_codec->i4_bytes_remaining)
{
WORD32 nal_len;
WORD32 nal_ofst;
WORD32 bits_len;
if(ps_codec->i4_slice_error)
{
slice_header_t *ps_slice_hdr_next = ps_codec->s_parse.ps_slice_hdr_base + (ps_codec->s_parse.i4_cur_slice_idx & (MAX_SLICE_HDR_CNT - 1));
WORD32 next_slice_addr = ps_slice_hdr_next->i2_ctb_x +
ps_slice_hdr_next->i2_ctb_y * ps_codec->s_parse.ps_sps->i2_pic_wd_in_ctb;
if(ps_codec->s_parse.i4_next_ctb_indx == next_slice_addr)
ps_codec->i4_slice_error = 0;
}
if(ps_codec->pu1_bitsbuf_dynamic)
{
ps_codec->pu1_bitsbuf = ps_codec->pu1_bitsbuf_dynamic;
ps_codec->u4_bitsbuf_size = ps_codec->u4_bitsbuf_size_dynamic;
}
else
{
ps_codec->pu1_bitsbuf = ps_codec->pu1_bitsbuf_static;
ps_codec->u4_bitsbuf_size = ps_codec->u4_bitsbuf_size_static;
}
nal_ofst = ihevcd_nal_search_start_code(ps_codec->pu1_inp_bitsbuf,
ps_codec->i4_bytes_remaining);
ps_codec->i4_nal_ofst = nal_ofst;
{
WORD32 bytes_remaining = ps_codec->i4_bytes_remaining - nal_ofst;
bytes_remaining = MIN((UWORD32)bytes_remaining, ps_codec->u4_bitsbuf_size);
ihevcd_nal_remv_emuln_bytes(ps_codec->pu1_inp_bitsbuf + nal_ofst,
ps_codec->pu1_bitsbuf,
bytes_remaining,
&nal_len, &bits_len);
/* Decoder may read upto 8 extra bytes at the end of frame */
/* These are not used, but still set them to zero to avoid uninitialized reads */
if(bits_len < (WORD32)(ps_codec->u4_bitsbuf_size - 8))
{
memset(ps_codec->pu1_bitsbuf + bits_len, 0, 2 * sizeof(UWORD32));
}
}
/* This may be used to update the offsets for tiles and entropy sync row offsets */
ps_codec->i4_num_emln_bytes = nal_len - bits_len;
ps_codec->i4_nal_len = nal_len;
ihevcd_bits_init(&ps_codec->s_parse.s_bitstrm, ps_codec->pu1_bitsbuf,
bits_len);
ret = ihevcd_nal_unit(ps_codec);
/* If the frame is incomplete and
* the bytes remaining is zero or a header is received,
* complete the frame treating it to be in error */
if(ps_codec->i4_pic_present &&
(ps_codec->s_parse.i4_next_ctb_indx != ps_codec->s_parse.ps_sps->i4_pic_size_in_ctb))
{
if((ps_codec->i4_bytes_remaining - (nal_len + nal_ofst) <= MIN_START_CODE_LEN) ||
(ps_codec->i4_header_in_slice_mode))
{
slice_header_t *ps_slice_hdr_next;
ps_codec->s_parse.i4_cur_slice_idx--;
if(ps_codec->s_parse.i4_cur_slice_idx < 0)
ps_codec->s_parse.i4_cur_slice_idx = 0;
ps_slice_hdr_next = ps_codec->s_parse.ps_slice_hdr_base + ((ps_codec->s_parse.i4_cur_slice_idx + 1) & (MAX_SLICE_HDR_CNT - 1));
ps_slice_hdr_next->i2_ctb_x = 0;
ps_slice_hdr_next->i2_ctb_y = ps_codec->s_parse.ps_sps->i2_pic_ht_in_ctb;
ps_codec->i4_slice_error = 1;
continue;
}
}
if(IHEVCD_IGNORE_SLICE == ret)
{
ps_codec->pu1_inp_bitsbuf += (nal_ofst + nal_len);
ps_codec->i4_bytes_remaining -= (nal_ofst + nal_len);
continue;
}
if((IVD_RES_CHANGED == ret) ||
(IHEVCD_UNSUPPORTED_DIMENSIONS == ret))
{
break;
}
/* Update bytes remaining and bytes consumed and input bitstream pointer */
/* Do not consume the NAL in the following cases */
/* Slice header reached during header decode mode */
/* TODO: Next picture's slice reached */
if(ret != IHEVCD_SLICE_IN_HEADER_MODE)
{
if((0 == ps_codec->i4_slice_error) ||
(ps_codec->i4_bytes_remaining - (nal_len + nal_ofst) <= MIN_START_CODE_LEN))
{
ps_codec->pu1_inp_bitsbuf += (nal_ofst + nal_len);
ps_codec->i4_bytes_remaining -= (nal_ofst + nal_len);
}
if(ret != IHEVCD_SUCCESS)
break;
if(ps_codec->s_parse.i4_end_of_frame)
break;
}
else
{
ret = IHEVCD_SUCCESS;
break;
}
/* Allocate dynamic bitstream buffer once SPS is decoded */
if((ps_codec->u4_allocate_dynamic_done == 0) && ps_codec->i4_sps_done)
{
WORD32 ret;
ret = ihevcd_allocate_dynamic_bufs(ps_codec);
if(ret != IV_SUCCESS)
{
/* Free any dynamic buffers that are allocated */
ihevcd_free_dynamic_bufs(ps_codec);
ps_codec->i4_error_code = IVD_MEM_ALLOC_FAILED;
ps_dec_op->u4_error_code |= 1 << IVD_FATALERROR;
ps_dec_op->u4_error_code |= IVD_MEM_ALLOC_FAILED;
return IV_FAIL;
}
}
BREAK_AFTER_SLICE_NAL();
}
if((ps_codec->u4_pic_cnt == 0) && (ret != IHEVCD_SUCCESS))
{
ps_codec->i4_error_code = ret;
ihevcd_fill_outargs(ps_codec, ps_dec_ip, ps_dec_op);
return IV_FAIL;
}
if(1 == ps_codec->i4_pic_present)
{
WORD32 i;
sps_t *ps_sps = ps_codec->s_parse.ps_sps;
ps_codec->i4_first_pic_done = 1;
/*TODO temporary fix: end_of_frame is checked before adding format conversion to job queue */
if(ps_codec->i4_num_cores > 1 && ps_codec->s_parse.i4_end_of_frame)
{
/* Add job queue for format conversion / frame copy for each ctb row */
/* Only if the codec is in non-shared mode or in shared mode but needs 420P output */
process_ctxt_t *ps_proc;
/* i4_num_cores - 1 contexts are currently being used by other threads */
ps_proc = &ps_codec->as_process[ps_codec->i4_num_cores - 1];
if((ps_codec->ps_disp_buf) &&
((0 == ps_codec->i4_share_disp_buf) || (IV_YUV_420P == ps_codec->e_chroma_fmt)))
{
/* If format conversion jobs were not issued in pic_init() add them here */
if((0 == ps_codec->u4_enable_fmt_conv_ahead) ||
(ps_codec->i4_disp_buf_id == ps_proc->i4_cur_pic_buf_id))
for(i = 0; i < ps_sps->i2_pic_ht_in_ctb; i++)
{
proc_job_t s_job;
IHEVCD_ERROR_T ret;
s_job.i4_cmd = CMD_FMTCONV;
s_job.i2_ctb_cnt = 0;
s_job.i2_ctb_x = 0;
s_job.i2_ctb_y = i;
s_job.i2_slice_idx = 0;
s_job.i4_tu_coeff_data_ofst = 0;
ret = ihevcd_jobq_queue((jobq_t *)ps_codec->s_parse.pv_proc_jobq,
&s_job, sizeof(proc_job_t), 1);
if(ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS)
return (WORD32)ret;
}
}
/* Reached end of frame : Signal terminate */
/* The terminate flag is checked only after all the jobs are dequeued */
ret = ihevcd_jobq_terminate((jobq_t *)ps_codec->s_parse.pv_proc_jobq);
while(1)
{
IHEVCD_ERROR_T ret;
proc_job_t s_job;
process_ctxt_t *ps_proc;
/* i4_num_cores - 1 contexts are currently being used by other threads */
ps_proc = &ps_codec->as_process[ps_codec->i4_num_cores - 1];
ret = ihevcd_jobq_dequeue((jobq_t *)ps_proc->pv_proc_jobq, &s_job,
sizeof(proc_job_t), 1);
if((IHEVCD_ERROR_T)IHEVCD_SUCCESS != ret)
break;
ps_proc->i4_ctb_cnt = s_job.i2_ctb_cnt;
ps_proc->i4_ctb_x = s_job.i2_ctb_x;
ps_proc->i4_ctb_y = s_job.i2_ctb_y;
ps_proc->i4_cur_slice_idx = s_job.i2_slice_idx;
if(CMD_PROCESS == s_job.i4_cmd)
{
ihevcd_init_proc_ctxt(ps_proc, s_job.i4_tu_coeff_data_ofst);
ihevcd_process(ps_proc);
}
else if(CMD_FMTCONV == s_job.i4_cmd)
{
sps_t *ps_sps = ps_codec->s_parse.ps_sps;
WORD32 num_rows = 1 << ps_sps->i1_log2_ctb_size;
if(0 == ps_proc->i4_init_done)
{
ihevcd_init_proc_ctxt(ps_proc, 0);
}
num_rows = MIN(num_rows, (ps_codec->i4_disp_ht - (s_job.i2_ctb_y << ps_sps->i1_log2_ctb_size)));
if(num_rows < 0)
num_rows = 0;
ihevcd_fmt_conv(ps_codec, ps_proc,
ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2],
s_job.i2_ctb_y << ps_sps->i1_log2_ctb_size,
num_rows);
}
}
}
/* In case of non-shared mode and while running in single core mode, then convert/copy the frame to output buffer */
/* Only if the codec is in non-shared mode or in shared mode but needs 420P output */
else if((ps_codec->ps_disp_buf) && ((0 == ps_codec->i4_share_disp_buf) ||
(IV_YUV_420P == ps_codec->e_chroma_fmt)) &&
(ps_codec->s_parse.i4_end_of_frame))
{
process_ctxt_t *ps_proc = &ps_codec->as_process[proc_idx];
/* Set remaining number of rows to be processed */
ps_codec->s_fmt_conv.i4_num_rows = ps_codec->i4_disp_ht
- ps_codec->s_fmt_conv.i4_cur_row;
if(0 == ps_proc->i4_init_done)
{
ihevcd_init_proc_ctxt(ps_proc, 0);
}
if(ps_codec->s_fmt_conv.i4_num_rows < 0)
ps_codec->s_fmt_conv.i4_num_rows = 0;
ret = ihevcd_fmt_conv(ps_codec, ps_proc,
ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2],
ps_codec->s_fmt_conv.i4_cur_row,
ps_codec->s_fmt_conv.i4_num_rows);
ps_codec->s_fmt_conv.i4_cur_row += ps_codec->s_fmt_conv.i4_num_rows;
}
DEBUG_DUMP_MV_MAP(ps_codec);
/* Mark MV Buf as needed for reference */
ihevc_buf_mgr_set_status((buf_mgr_t *)ps_codec->pv_mv_buf_mgr,
ps_codec->as_process[proc_idx].i4_cur_mv_bank_buf_id,
BUF_MGR_REF);
/* Mark pic buf as needed for reference */
ihevc_buf_mgr_set_status((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->as_process[proc_idx].i4_cur_pic_buf_id,
BUF_MGR_REF);
/* Mark pic buf as needed for display */
ihevc_buf_mgr_set_status((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->as_process[proc_idx].i4_cur_pic_buf_id,
BUF_MGR_DISP);
/* Insert the current picture as short term reference */
ihevc_dpb_mgr_insert_ref((dpb_mgr_t *)ps_codec->pv_dpb_mgr,
ps_codec->as_process[proc_idx].ps_cur_pic,
ps_codec->as_process[proc_idx].i4_cur_pic_buf_id);
/* If a frame was displayed (in non-shared mode), then release it from display manager */
if((0 == ps_codec->i4_share_disp_buf) && (ps_codec->ps_disp_buf))
ihevc_buf_mgr_release((buf_mgr_t *)ps_codec->pv_pic_buf_mgr,
ps_codec->i4_disp_buf_id, BUF_MGR_DISP);
/* Wait for threads */
for(i = 0; i < (ps_codec->i4_num_cores - 1); i++)
{
if(ps_codec->ai4_process_thread_created[i])
{
ithread_join(ps_codec->apv_process_thread_handle[i], NULL);
ps_codec->ai4_process_thread_created[i] = 0;
}
}
DEBUG_VALIDATE_PADDED_REGION(&ps_codec->as_process[proc_idx]);
if(ps_codec->u4_pic_cnt > 0)
{
DEBUG_DUMP_PIC_PU(ps_codec);
}
DEBUG_DUMP_PIC_BUFFERS(ps_codec);
/* Increment the number of pictures decoded */
ps_codec->u4_pic_cnt++;
}
ihevcd_fill_outargs(ps_codec, ps_dec_ip, ps_dec_op);
if(1 == ps_dec_op->u4_output_present)
{
WORD32 xpos = ps_codec->i4_disp_wd - 32 - LOGO_WD;
WORD32 ypos = ps_codec->i4_disp_ht - 32 - LOGO_HT;
if(ypos < 0)
ypos = 0;
if(xpos < 0)
xpos = 0;
INSERT_LOGO(ps_dec_ip->s_out_buffer.pu1_bufs[0],
ps_dec_ip->s_out_buffer.pu1_bufs[1],
ps_dec_ip->s_out_buffer.pu1_bufs[2], ps_codec->i4_disp_strd,
xpos,
ypos,
ps_codec->e_chroma_fmt,
ps_codec->i4_disp_wd,
ps_codec->i4_disp_ht);
}
return ret;
}
| 1
|
448,909
|
xps_output_page(gx_device *dev, int num_copies, int flush)
{
gx_device_xps *const xps = (gx_device_xps*)dev;
gx_device_vector *vdev = (gx_device_vector *)dev;
int code;
write_str_to_current_page(xps, "</Canvas></FixedPage>");
if (xps->relationship_count > 0)
{
/* Close the relationship xml */
code = close_page_relationship(xps);
if (code < 0)
return gs_rethrow_code(code);
xps->relationship_count = 0; /* Reset for next page */
}
xps->page_count++;
if (gp_ferror(xps->file))
return gs_throw_code(gs_error_ioerror);
if ((code=gx_finish_output_page(dev, num_copies, flush)) < 0)
return code;
/* Check if we need to change the output file for separate
pages. NB not sure if this will work correctly. */
if (gx_outputfile_is_separate_pages(((gx_device_vector *)dev)->fname, dev->memory)) {
if ((code = xps_close_device(dev)) < 0)
return code;
code = xps_open_device(dev);
}
if_debug1m('_', dev->memory, "xps_output_page - page=%d\n", xps->page_count);
vdev->in_page = false;
return code;
}
| 0
|
387,435
|
read_sysvars(void)
{
const struct ctl_var *v;
struct ctl_var *kv;
u_int n;
u_int gotvar;
const u_char *cs;
char * valuep;
const char * pch;
u_char *wants;
size_t wants_count;
/*
* Wants system variables. Figure out which he wants
* and give them to him.
*/
rpkt.status = htons(ctlsysstatus());
if (res_authokay)
ctl_sys_num_events = 0;
wants_count = CS_MAXCODE + 1 + count_var(ext_sys_var);
wants = emalloc_zero(wants_count);
gotvar = 0;
while (NULL != (v = ctl_getitem(sys_var, &valuep))) {
if (!(EOV & v->flags)) {
INSIST(v->code < wants_count);
wants[v->code] = 1;
gotvar = 1;
} else {
v = ctl_getitem(ext_sys_var, &valuep);
INSIST(v != NULL);
if (EOV & v->flags) {
ctl_error(CERR_UNKNOWNVAR);
free(wants);
return;
}
n = v->code + CS_MAXCODE + 1;
INSIST(n < wants_count);
wants[n] = 1;
gotvar = 1;
}
}
if (gotvar) {
for (n = 1; n <= CS_MAXCODE; n++)
if (wants[n])
ctl_putsys(n);
for (n = 0; n + CS_MAXCODE + 1 < wants_count; n++)
if (wants[n + CS_MAXCODE + 1]) {
pch = ext_sys_var[n].text;
ctl_putdata(pch, strlen(pch), 0);
}
} else {
for (cs = def_sys_var; *cs != 0; cs++)
ctl_putsys((int)*cs);
for (kv = ext_sys_var; kv && !(EOV & kv->flags); kv++)
if (DEF & kv->flags)
ctl_putdata(kv->text, strlen(kv->text),
0);
}
free(wants);
ctl_flushpkt(0);
}
| 0
|
361,901
|
void Server::removeLink(Channel *c, Channel *l) {
c->unlink(l);
if (c->bTemporary || l->bTemporary)
return;
TransactionHolder th;
QSqlQuery &query = *th.qsqQuery;
if (l) {
SQLPREP("DELETE FROM `%1channel_links` WHERE `server_id` = ? AND `channel_id` = ? AND `link_id` = ?");
query.addBindValue(iServerNum);
query.addBindValue(c->iId);
query.addBindValue(l->iId);
SQLEXEC();
query.addBindValue(iServerNum);
query.addBindValue(l->iId);
query.addBindValue(c->iId);
SQLEXEC();
} else {
SQLPREP("DELETE FROM `%1channel_links` WHERE `server_id` = ? AND (`channel_id` = ? OR `link_id` = ?)");
query.addBindValue(iServerNum);
query.addBindValue(c->iId);
query.addBindValue(c->iId);
SQLEXEC();
}
}
| 0
|
83,441
|
ShmemBackendArraySize(void)
{
return mul_size(MaxLivePostmasterChildren(), sizeof(Backend));
}
| 0
|
89,227
|
std::string HttpDate() {
std::time_t t = std::time(nullptr);
std::tm gmt = *std::gmtime(&t);
std::ostringstream date;
date.imbue(std::locale::classic()); // Use classic C locale
date << std::put_time(&gmt, "%a, %d %b %Y %H:%M:%S GMT");
return date.str();
}
| 0
|
44,640
|
static void ie_destructor(void *arg)
{
struct ident_entry *ie = arg;
mem_deref(ie->content.publish);
mem_deref(ie->content.accept);
mem_deref(ie->ident);
}
| 0
|
484,281
|
ex_redrawstatus(exarg_T *eap UNUSED)
{
int r = RedrawingDisabled;
int p = p_lz;
RedrawingDisabled = 0;
p_lz = FALSE;
if (eap->forceit)
status_redraw_all();
else
status_redraw_curbuf();
update_screen(VIsual_active ? UPD_INVERTED : 0);
RedrawingDisabled = r;
p_lz = p;
out_flush();
}
| 0
|
25,029
|
static int decode_frame ( AVCodecContext * avctx , void * data , int * got_frame , AVPacket * avpkt ) {
AVFrame * frame = data ;
const uint8_t * buf = avpkt -> data ;
int buf_size = avpkt -> size ;
DxaDecContext * const c = avctx -> priv_data ;
uint8_t * outptr , * srcptr , * tmpptr ;
unsigned long dsize ;
int i , j , compr , ret ;
int stride ;
int orig_buf_size = buf_size ;
int pc = 0 ;
if ( buf [ 0 ] == 'C' && buf [ 1 ] == 'M' && buf [ 2 ] == 'A' && buf [ 3 ] == 'P' ) {
int r , g , b ;
buf += 4 ;
for ( i = 0 ;
i < 256 ;
i ++ ) {
r = * buf ++ ;
g = * buf ++ ;
b = * buf ++ ;
c -> pal [ i ] = ( r << 16 ) | ( g << 8 ) | b ;
}
pc = 1 ;
buf_size -= 768 + 4 ;
}
if ( ( ret = ff_get_buffer ( avctx , frame , AV_GET_BUFFER_FLAG_REF ) ) < 0 ) {
av_log ( avctx , AV_LOG_ERROR , "get_buffer() failed\n" ) ;
return ret ;
}
memcpy ( frame -> data [ 1 ] , c -> pal , AVPALETTE_SIZE ) ;
frame -> palette_has_changed = pc ;
outptr = frame -> data [ 0 ] ;
srcptr = c -> decomp_buf ;
tmpptr = c -> prev . data [ 0 ] ;
stride = frame -> linesize [ 0 ] ;
if ( buf [ 0 ] == 'N' && buf [ 1 ] == 'U' && buf [ 2 ] == 'L' && buf [ 3 ] == 'L' ) compr = - 1 ;
else compr = buf [ 4 ] ;
dsize = c -> dsize ;
if ( ( compr != 4 && compr != - 1 ) && uncompress ( c -> decomp_buf , & dsize , buf + 9 , buf_size - 9 ) != Z_OK ) {
av_log ( avctx , AV_LOG_ERROR , "Uncompress failed!\n" ) ;
return AVERROR_UNKNOWN ;
}
switch ( compr ) {
case - 1 : frame -> key_frame = 0 ;
frame -> pict_type = AV_PICTURE_TYPE_P ;
if ( c -> prev . data [ 0 ] ) memcpy ( frame -> data [ 0 ] , c -> prev . data [ 0 ] , frame -> linesize [ 0 ] * avctx -> height ) ;
else {
memset ( frame -> data [ 0 ] , 0 , frame -> linesize [ 0 ] * avctx -> height ) ;
frame -> key_frame = 1 ;
frame -> pict_type = AV_PICTURE_TYPE_I ;
}
break ;
case 2 : case 3 : case 4 : case 5 : frame -> key_frame = ! ( compr & 1 ) ;
frame -> pict_type = ( compr & 1 ) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I ;
for ( j = 0 ;
j < avctx -> height ;
j ++ ) {
if ( compr & 1 ) {
for ( i = 0 ;
i < avctx -> width ;
i ++ ) outptr [ i ] = srcptr [ i ] ^ tmpptr [ i ] ;
tmpptr += stride ;
}
else memcpy ( outptr , srcptr , avctx -> width ) ;
outptr += stride ;
srcptr += avctx -> width ;
}
break ;
case 12 : case 13 : frame -> key_frame = 0 ;
frame -> pict_type = AV_PICTURE_TYPE_P ;
decode_13 ( avctx , c , frame -> data [ 0 ] , frame -> linesize [ 0 ] , srcptr , c -> prev . data [ 0 ] ) ;
break ;
default : av_log ( avctx , AV_LOG_ERROR , "Unknown/unsupported compression type %d\n" , buf [ 4 ] ) ;
return AVERROR_INVALIDDATA ;
}
av_frame_unref ( & c -> prev ) ;
if ( ( ret = av_frame_ref ( & c -> prev , frame ) ) < 0 ) return ret ;
* got_frame = 1 ;
return orig_buf_size ;
}
| 0
|
100,361
|
static void multi_enable_ms(struct sb_uart_port *port)
{
struct mp_port *mtpt = (struct mp_port *)port;
mtpt->ier |= UART_IER_MSI;
serial_out(mtpt, UART_IER, mtpt->ier);
}
| 0
|
413,531
|
static void _slurmctld_free_comp_msg_list(void *x)
{
slurm_msg_t *msg = (slurm_msg_t*)x;
if (msg) {
if (msg->msg_type == REQUEST_BATCH_JOB_LAUNCH) {
slurm_free_job_launch_msg(msg->data);
msg->data = NULL;
}
slurm_free_comp_msg_list(msg);
}
}
| 0
|
18,075
|
static int dissect_h245_IA5String_SIZE_1_64 ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
offset = dissect_per_IA5String ( tvb , offset , actx , tree , hf_index , 1 , 64 , FALSE ) ;
return offset ;
}
| 0
|
50,531
|
static void __exit bfq_exit(void)
{
elv_unregister(&iosched_bfq_mq);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
blkcg_policy_unregister(&blkcg_policy_bfq);
#endif
bfq_slab_kill();
| 0
|
344,304
|
transit_state (struct dfa *d, int s, unsigned char const **pp)
{
int s1;
int mbclen; /* The length of current input multibyte character. */
int maxlen = 0;
int i, j;
int *match_lens = NULL;
int nelem = d->states[s].mbps.nelem; /* Just a alias. */
position_set follows;
unsigned char const *p1 = *pp;
wchar_t wc;
if (nelem > 0)
/* This state has (a) multibyte operator(s).
We check whether each of them can match or not. */
{
/* Note: caller must free the return value of this function. */
match_lens = check_matching_with_multibyte_ops(d, s, *pp - buf_begin);
for (i = 0; i < nelem; i++)
/* Search the operator which match the longest string,
in this state. */
{
if (match_lens[i] > maxlen)
maxlen = match_lens[i];
}
}
if (nelem == 0 || maxlen == 0)
/* This state has no multibyte operator which can match.
We need to check only one single byte character. */
{
status_transit_state rs;
rs = transit_state_singlebyte(d, s, *pp, &s1);
/* We must update the pointer if state transition succeeded. */
if (rs == TRANSIT_STATE_DONE)
++*pp;
free(match_lens);
return s1;
}
/* This state has some operators which can match a multibyte character. */
alloc_position_set(&follows, d->nleaves);
/* `maxlen' may be longer than the length of a character, because it may
not be a character but a (multi character) collating element.
We enumerate all of the positions which `s' can reach by consuming
`maxlen' bytes. */
transit_state_consume_1char(d, s, pp, match_lens, &mbclen, &follows);
wc = inputwcs[*pp - mbclen - buf_begin];
s1 = state_index(d, &follows, wchar_context (wc));
realloc_trans_if_necessary(d, s1);
while (*pp - p1 < maxlen)
{
transit_state_consume_1char(d, s1, pp, NULL, &mbclen, &follows);
for (i = 0; i < nelem ; i++)
{
if (match_lens[i] == *pp - p1)
for (j = 0;
j < d->follows[d->states[s1].mbps.elems[i].index].nelem; j++)
insert(d->follows[d->states[s1].mbps.elems[i].index].elems[j],
&follows);
}
wc = inputwcs[*pp - mbclen - buf_begin];
s1 = state_index(d, &follows, wchar_context (wc));
realloc_trans_if_necessary(d, s1);
}
free(match_lens);
free(follows.elems);
return s1;
}
| 1
|
131,768
|
flatpak_repo_save_digested_summary_delta (OstreeRepo *repo,
const char *from_digest,
const char *to_digest,
GBytes *delta,
GCancellable *cancellable,
GError **error)
{
int repo_dfd = ostree_repo_get_dfd (repo);
g_autofree char *path = NULL;
g_autofree char *filename = g_strconcat (from_digest, "-", to_digest, ".delta", NULL);
struct stat stbuf;
if (!glnx_shutil_mkdir_p_at (repo_dfd, "summaries",
0775,
cancellable,
error))
return FALSE;
path = g_build_filename ("summaries", filename, NULL);
/* Check for pre-existing copy of same size and avoid re-writing it */
if (fstatat (repo_dfd, path, &stbuf, 0) == 0 &&
stbuf.st_size == g_bytes_get_size (delta))
{
g_debug ("Reusing digested summary-diff for %s", filename);
return TRUE;
}
if (!glnx_file_replace_contents_at (repo_dfd, path,
g_bytes_get_data (delta, NULL),
g_bytes_get_size (delta),
ostree_repo_get_disable_fsync (repo) ? GLNX_FILE_REPLACE_NODATASYNC : GLNX_FILE_REPLACE_DATASYNC_NEW,
cancellable, error))
return FALSE;
g_debug ("Wrote digested summary delta at %s", path);
return TRUE;
}
| 0
|
510,517
|
Item_ident::Item_ident(Name_resolution_context *context_arg,
const char *db_name_arg,const char *table_name_arg,
const char *field_name_arg)
:orig_db_name(db_name_arg), orig_table_name(table_name_arg),
orig_field_name(field_name_arg), context(context_arg),
db_name(db_name_arg), table_name(table_name_arg),
field_name(field_name_arg),
alias_name_used(FALSE), cached_field_index(NO_CACHED_FIELD_INDEX),
cached_table(0), depended_from(0), can_be_depended(TRUE)
{
name = (char*) field_name_arg;
name_length= name ? strlen(name) : 0;
}
| 0
|
457,358
|
sink_if_match (p11_index *index,
index_object *obj,
CK_ATTRIBUTE *match,
CK_ULONG count,
void *data)
{
index_bucket *handles = data;
if (p11_attrs_matchn (obj->attrs, match, count))
bucket_push (handles, obj->handle);
return true;
}
| 0
|
348,919
|
plugin_close (struct backend *b, struct connection *conn)
{
struct backend_plugin *p = container_of (b, struct backend_plugin, backend);
assert (connection_get_handle (conn, 0));
debug ("close");
if (p->plugin.close)
p->plugin.close (connection_get_handle (conn, 0));
backend_set_handle (b, conn, NULL);
}
| 1
|
16,979
|
static void vmsvga_invalidate_display ( void * opaque ) {
struct vmsvga_state_s * s = opaque ;
if ( ! s -> enable ) {
s -> vga . hw_ops -> invalidate ( & s -> vga ) ;
return ;
}
s -> invalidated = 1 ;
}
| 0
|
305,307
|
Bool gf_isom_box_is_file_level(GF_Box *s)
{
if (!s || !s->registry) return GF_FALSE;
if (strstr(s->registry->parents_4cc, "file")!= NULL) return GF_TRUE;
if (strstr(s->registry->parents_4cc, "*")!= NULL) return GF_TRUE;
return GF_FALSE;
}
| 0
|
339,696
|
static void vmsvga_bios_write(void *opaque, uint32_t address, uint32_t data)
{
printf("%s: what are we supposed to do with (%08x)?\n",
__FUNCTION__, data);
}
| 0
|
354,788
|
static int bad_file_release(struct inode *inode, struct file *filp)
{
return -EIO;
}
| 0
|
105,563
|
static int coolkey_write_object(sc_card_t *card, unsigned long object_id,
size_t offset, const u8 *buf, size_t buf_len, const u8 *nonce, size_t nonce_size)
{
coolkey_write_object_param_t params;
size_t operation_len;
size_t left = buf_len;
int r;
size_t max_operation_len;
/* set limit for the card's maximum send size and short write */
max_operation_len = MIN(COOLKEY_MAX_CHUNK_SIZE, (card->max_send_size - sizeof(coolkey_read_object_param_t) - nonce_size));
ulong2bebytes(¶ms.head.object_id[0], object_id);
do {
ulong2bebytes(¶ms.head.offset[0], offset);
operation_len = MIN(left, max_operation_len);
params.head.length = operation_len;
memcpy(params.buf, buf, operation_len);
r = coolkey_apdu_io(card, COOLKEY_CLASS, COOLKEY_INS_WRITE_OBJECT, 0, 0,
(u8 *)¶ms, sizeof(params.head)+operation_len, NULL, 0, nonce, nonce_size);
if (r < 0) {
goto fail;
}
buf += operation_len;
offset += operation_len;
left -= operation_len;
} while (left != 0);
return buf_len - left;
fail:
return r;
}
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.