idx
int64 | func
string | target
int64 |
|---|---|---|
427,807
|
static void dump_ghcb(struct vcpu_svm *svm)
{
struct ghcb *ghcb = svm->ghcb;
unsigned int nbits;
/* Re-use the dump_invalid_vmcb module parameter */
if (!dump_invalid_vmcb) {
pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
return;
}
nbits = sizeof(ghcb->save.valid_bitmap) * 8;
pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
}
| 0
|
389,760
|
cmdopts_t *cmdopts_parse(int argc, char **argv)
{
enum {
CMDOPT_HELP = 0,
CMDOPT_VERBOSE,
CMDOPT_QUIET,
CMDOPT_INFILE,
CMDOPT_INFMT,
CMDOPT_INOPT,
CMDOPT_OUTFILE,
CMDOPT_OUTFMT,
CMDOPT_OUTOPT,
CMDOPT_VERSION,
CMDOPT_DEBUG,
CMDOPT_CMPTNO,
CMDOPT_SRGB,
CMDOPT_MAXMEM,
CMDOPT_LIST_ENABLED_CODECS,
CMDOPT_LIST_ALL_CODECS,
CMDOPT_ENABLE_FORMAT,
CMDOPT_ENABLE_ALL_FORMATS,
};
static const jas_opt_t cmdoptions[] = {
{CMDOPT_HELP, "help", 0},
{CMDOPT_VERBOSE, "verbose", 0},
{CMDOPT_QUIET, "quiet", 0},
{CMDOPT_QUIET, "q", 0},
{CMDOPT_INFILE, "input", JAS_OPT_HASARG},
{CMDOPT_INFILE, "f", JAS_OPT_HASARG},
{CMDOPT_INFMT, "input-format", JAS_OPT_HASARG},
{CMDOPT_INFMT, "t", JAS_OPT_HASARG},
{CMDOPT_INOPT, "input-option", JAS_OPT_HASARG},
{CMDOPT_INOPT, "o", JAS_OPT_HASARG},
{CMDOPT_OUTFILE, "output", JAS_OPT_HASARG},
{CMDOPT_OUTFILE, "F", JAS_OPT_HASARG},
{CMDOPT_OUTFMT, "output-format", JAS_OPT_HASARG},
{CMDOPT_OUTFMT, "T", JAS_OPT_HASARG},
{CMDOPT_OUTOPT, "output-option", JAS_OPT_HASARG},
{CMDOPT_OUTOPT, "O", JAS_OPT_HASARG},
{CMDOPT_VERSION, "version", 0},
{CMDOPT_DEBUG, "debug-level", JAS_OPT_HASARG},
{CMDOPT_CMPTNO, "cmptno", JAS_OPT_HASARG},
{CMDOPT_SRGB, "force-srgb", 0},
{CMDOPT_SRGB, "S", 0},
{CMDOPT_MAXMEM, "memory-limit", JAS_OPT_HASARG},
{CMDOPT_LIST_ENABLED_CODECS, "list-enabled-formats", 0},
{CMDOPT_LIST_ALL_CODECS, "list-all-formats", 0},
{CMDOPT_ENABLE_FORMAT, "enable-format", JAS_OPT_HASARG},
{CMDOPT_ENABLE_ALL_FORMATS, "enable-all-formats", 0},
{-1, 0, 0}
};
cmdopts_t *cmdopts;
int c;
if (!(cmdopts = malloc(sizeof(cmdopts_t)))) {
fprintf(stderr, "error: insufficient memory\n");
exit(EXIT_FAILURE);
}
cmdopts->infile = 0;
cmdopts->infmt = -1;
cmdopts->infmt_str = 0;
cmdopts->inopts = 0;
cmdopts->inoptsbuf[0] = '\0';
cmdopts->outfile = 0;
cmdopts->outfmt = -1;
cmdopts->outfmt_str = 0;
cmdopts->outopts = 0;
cmdopts->outoptsbuf[0] = '\0';
cmdopts->verbose = 0;
cmdopts->version = 0;
cmdopts->cmptno = -1;
cmdopts->debug = 0;
cmdopts->srgb = 0;
cmdopts->list_codecs = 0;
cmdopts->list_codecs_all = 0;
cmdopts->help = 0;
cmdopts->max_mem = get_default_max_mem_usage();
cmdopts->enable_format = 0;
cmdopts->enable_all_formats = 0;
while ((c = jas_getopt(argc, argv, cmdoptions)) != EOF) {
switch (c) {
case CMDOPT_HELP:
cmdopts->help = 1;
break;
case CMDOPT_VERBOSE:
cmdopts->verbose = 1;
break;
case CMDOPT_QUIET:
cmdopts->verbose = -1;
break;
case CMDOPT_VERSION:
cmdopts->version = 1;
break;
case CMDOPT_LIST_ENABLED_CODECS:
cmdopts->list_codecs = 1;
cmdopts->list_codecs_all = 0;
break;
case CMDOPT_LIST_ALL_CODECS:
cmdopts->list_codecs = 1;
cmdopts->list_codecs_all = 1;
break;
case CMDOPT_DEBUG:
cmdopts->debug = atoi(jas_optarg);
break;
case CMDOPT_INFILE:
cmdopts->infile = jas_optarg;
break;
case CMDOPT_INFMT:
cmdopts->infmt_str= jas_optarg;
break;
case CMDOPT_INOPT:
addopt(cmdopts->inoptsbuf, OPTSMAX, jas_optarg);
cmdopts->inopts = cmdopts->inoptsbuf;
break;
case CMDOPT_OUTFILE:
cmdopts->outfile = jas_optarg;
break;
case CMDOPT_OUTFMT:
cmdopts->outfmt_str = jas_optarg;
break;
case CMDOPT_OUTOPT:
addopt(cmdopts->outoptsbuf, OPTSMAX, jas_optarg);
cmdopts->outopts = cmdopts->outoptsbuf;
break;
case CMDOPT_CMPTNO:
cmdopts->cmptno = atoi(jas_optarg);
break;
case CMDOPT_SRGB:
cmdopts->srgb = 1;
break;
case CMDOPT_MAXMEM:
cmdopts->max_mem = strtoull(jas_optarg, 0, 10);
break;
case CMDOPT_ENABLE_FORMAT:
cmdopts->enable_format = jas_optarg;
break;
case CMDOPT_ENABLE_ALL_FORMATS:
cmdopts->enable_all_formats = 1;
break;
default:
cmdopts_destroy(cmdopts);
badusage();
break;
}
}
while (jas_optind < argc) {
fprintf(stderr,
"warning: ignoring bogus command line argument %s\n",
argv[jas_optind]);
++jas_optind;
}
if (cmdopts->version || cmdopts->list_codecs || cmdopts->help) {
goto done;
}
if (!cmdopts->outfmt_str && !cmdopts->outfile) {
fprintf(stderr, "error: cannot determine output format\n");
cmdopts_destroy(cmdopts);
badusage();
}
done:
return cmdopts;
}
| 0
|
274,841
|
TEST(ComparisonsTest, QuantizedInt8GreaterEqualWithBroadcast) {
const float kMin = -127.f;
const float kMax = 127.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_INT8, test_shapes[i], kMin, kMax},
{TensorType_INT8, {}, kMin, kMax}, TensorType_INT8,
BuiltinOperator_GREATER_EQUAL);
model.QuantizeAndPopulate<int8_t>(model.input1(), {20, -2, -71, 8, 11, 20});
model.QuantizeAndPopulate<int8_t>(model.input2(), {8});
model.Invoke();
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, false, true, true, true))
<< "With shape number " << i;
}
}
| 0
|
247,655
|
TestUtilOptions& setExpectedExpirationTimePeerCert(const std::string& expected_expiration) {
expected_expiration_peer_cert_ = expected_expiration;
return *this;
}
| 0
|
247,572
|
TEST_P(SslSocketTest, FailedClientCertificateSpkiVerificationNoCAWrongClientCertificate) {
envoy::config::listener::v3::Listener listener;
envoy::config::listener::v3::FilterChain* filter_chain = listener.add_filter_chains();
envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context;
envoy::extensions::transport_sockets::tls::v3::TlsCertificate* server_cert =
tls_context.mutable_common_tls_context()->add_tls_certificates();
server_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem"));
server_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_key.pem"));
envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext*
server_validation_ctx =
tls_context.mutable_common_tls_context()->mutable_validation_context();
server_validation_ctx->add_verify_certificate_spki(TEST_SAN_DNS_CERT_SPKI);
server_validation_ctx->add_verify_certificate_spki(TEST_SAN_URI_CERT_SPKI);
updateFilterChain(tls_context, *filter_chain);
envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client;
envoy::extensions::transport_sockets::tls::v3::TlsCertificate* client_cert =
client.mutable_common_tls_context()->add_tls_certificates();
client_cert->mutable_certificate_chain()->set_filename(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem"));
client_cert->mutable_private_key()->set_filename(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem"));
TestUtilOptionsV2 test_options(listener, client, false, GetParam());
testUtilV2(test_options.setExpectedServerStats("ssl.fail_verify_cert_hash")
.setExpectedTransportFailureReasonContains("SSLV3_ALERT_CERTIFICATE_UNKNOWN"));
// Fails even with client renegotiation.
client.set_allow_renegotiation(true);
testUtilV2(test_options);
}
| 0
|
221,470
|
flatpak_run_get_pulse_machine_id (void)
{
static const char * const machine_ids[] =
{
"/etc/machine-id",
"/var/lib/dbus/machine-id",
};
gsize i;
for (i = 0; i < G_N_ELEMENTS (machine_ids); i++)
{
g_autofree char *ret = NULL;
if (g_file_get_contents (machine_ids[i], &ret, NULL, NULL))
{
gsize j;
g_strstrip (ret);
for (j = 0; ret[j] != '\0'; j++)
{
if (!g_ascii_isxdigit (ret[j]))
break;
}
if (ret[0] != '\0' && ret[j] == '\0')
return g_steal_pointer (&ret);
}
}
return g_strdup (g_get_host_name ());
}
| 0
|
247,161
|
void gf_fs_post_task_ex(GF_FilterSession *fsess, gf_fs_task_callback task_fun, GF_Filter *filter, GF_FilterPid *pid, const char *log_name, void *udta, Bool is_configure, Bool force_direct_call)
{
GF_FSTask *task;
Bool force_main_thread = GF_FALSE;
Bool notified = GF_FALSE;
assert(fsess);
assert(task_fun);
//only flatten calls if in main thread (we still have some broken filters using threading that could trigger tasks)
if ((force_direct_call || fsess->direct_mode)
&& (!filter || !filter->in_process)
&& fsess->tasks_in_process
&& (gf_th_id()==fsess->main_th.th_id)
) {
GF_FSTask atask;
u64 task_time = gf_sys_clock_high_res();
memset(&atask, 0, sizeof(GF_FSTask));
atask.filter = filter;
atask.pid = pid;
atask.run_task = task_fun;
atask.log_name = log_name;
atask.udta = udta;
GF_LOG(GF_LOG_DEBUG, GF_LOG_SCHEDULER, ("Thread 0 task#%d %p executing Filter %s::%s (%d tasks pending)\n", fsess->main_th.nb_tasks, &atask, filter ? filter->name : "none", log_name, fsess->tasks_pending));
if (filter)
filter->scheduled_for_next_task = GF_TRUE;
task_fun(&atask);
filter = atask.filter;
if (filter) {
filter->time_process += gf_sys_clock_high_res() - task_time;
filter->scheduled_for_next_task = GF_FALSE;
filter->nb_tasks_done++;
}
if (!atask.requeue_request)
return;
//asked to requeue the task, post it
}
/*this was a gf_filter_process_task request but direct call could not be done or requeue is requested.
process_task_queued was incremented by caller without checking for existing process task
- If the task was not treated, dec / inc will give the same state, undo process_task_queued increment
- If the task was requeued, dec will undo the increment done when requeing the task in gf_filter_check_pending_tasks
In both cases, inc will redo the same logic as in gf_filter_post_process_task_internal, not creating task if gf_filter_process_task is
already scheduled for the filter
We must use safe_int_dec/safe_int_inc here for multi thread cases - cf issue #1778
*/
if (force_direct_call) {
assert(filter);
safe_int_dec(&filter->process_task_queued);
if (safe_int_inc(&filter->process_task_queued) > 1) {
return;
}
}
task = gf_fq_pop(fsess->tasks_reservoir);
if (!task) {
GF_SAFEALLOC(task, GF_FSTask);
if (!task) {
GF_LOG(GF_LOG_ERROR, GF_LOG_SCHEDULER, ("No more memory to post new task\n"));
return;
}
}
task->filter = filter;
task->pid = pid;
task->run_task = task_fun;
task->log_name = log_name;
task->udta = udta;
if (filter && is_configure) {
if (filter->freg->flags & GF_FS_REG_CONFIGURE_MAIN_THREAD)
force_main_thread = GF_TRUE;
}
if (filter) {
gf_mx_p(filter->tasks_mx);
//no tasks and not scheduled
if (! filter->scheduled_for_next_task && !gf_fq_count(filter->tasks)) {
notified = task->notified = GF_TRUE;
if (!force_main_thread)
force_main_thread = (filter->main_thread_forced || (filter->freg->flags & GF_FS_REG_MAIN_THREAD)) ? GF_TRUE : GF_FALSE;
} else if (force_main_thread) {
force_main_thread = GF_FALSE;
if (filter->process_th_id && (fsess->main_th.th_id != filter->process_th_id)) {
GF_LOG(GF_LOG_ERROR, GF_LOG_SCHEDULER, ("Cannot post task to main thread, filter is already scheduled\n"));
}
}
if (!force_main_thread)
task->blocking = (filter->freg->flags & GF_FS_REG_BLOCKING) ? GF_TRUE : GF_FALSE;
gf_fq_add(filter->tasks, task);
gf_mx_v(filter->tasks_mx);
GF_LOG(GF_LOG_DEBUG, GF_LOG_SCHEDULER, ("Thread %u Posted task %p Filter %s::%s (%d (%d) pending, %d process tasks) on %s task list\n", gf_th_id(), task, filter->name, task->log_name, fsess->tasks_pending, gf_fq_count(filter->tasks), filter->process_task_queued, task->notified ? (force_main_thread ? "main" : "secondary") : "filter"));
} else {
task->notified = notified = GF_TRUE;
GF_LOG(GF_LOG_DEBUG, GF_LOG_SCHEDULER, ("Thread %u Posted filter-less task %s (%d pending) on secondary task list\n", gf_th_id(), task->log_name, fsess->tasks_pending));
}
//WARNING, do not use task->notified since the task may have been posted to the filter task list and may already have been swapped
//with a different value !
if (notified) {
#ifdef CHECK_TASK_LIST_INTEGRITY
check_task_list(fsess->main_thread_tasks, task);
check_task_list(fsess->tasks, task);
check_task_list(fsess->tasks_reservoir, task);
#endif
assert(task->run_task);
if (filter) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_SCHEDULER, ("Thread %u posting filter task, scheduled_for_next_task %d\n", gf_th_id(), filter->scheduled_for_next_task));
assert(!filter->scheduled_for_next_task);
}
//notify/count tasks posted on the main task or regular task lists
safe_int_inc(&fsess->tasks_pending);
if (filter && force_main_thread) {
gf_fq_add(fsess->main_thread_tasks, task);
gf_fs_sema_io(fsess, GF_TRUE, GF_TRUE);
} else {
assert(task->run_task);
gf_fq_add(fsess->tasks, task);
gf_fs_sema_io(fsess, GF_TRUE, GF_FALSE);
}
}
}
| 0
|
244,214
|
void pdin_box_del(GF_Box *s)
{
GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox*)s;
if (ptr == NULL) return;
if (ptr->rates) gf_free(ptr->rates);
if (ptr->times) gf_free(ptr->times);
gf_free(ptr);
}
| 0
|
246,479
|
static size_t consume_limits_r(RBuffer *b, ut64 bound, struct r_bin_wasm_resizable_limits_t *out) {
r_return_val_if_fail (b && out, 0);
if (bound >= r_buf_size (b) || r_buf_tell (b) > bound || !out) {
return 0;
}
ut32 i = r_buf_tell (b);
if (!consume_u7_r (b, bound, &out->flags)) {
return 0;
}
if (!consume_u32_r (b, bound, &out->initial)) {
return 0;
}
if (out->flags && !consume_u32_r (b, bound, &out->maximum)) {
return 0;
}
int delta = r_buf_tell (b) - i;
return (delta > 0)? delta: 0;
}
| 0
|
218,808
|
MagickExport void XNoticeWidget(Display *display,XWindows *windows,
const char *reason,const char *description)
{
#define DismissButtonText "Dismiss"
#define Timeout 8
const char
*text;
int
x,
y;
Status
status;
time_t
timer;
unsigned int
height,
width;
size_t
state;
XEvent
event;
XFontStruct
*font_info;
XTextProperty
window_name;
XWidgetInfo
dismiss_info;
XWindowChanges
window_changes;
/*
Determine Notice widget attributes.
*/
assert(display != (Display *) NULL);
assert(windows != (XWindows *) NULL);
assert(reason != (char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",reason);
XDelay(display,SuspendTime << 3); /* avoid surpise with delay */
XSetCursorState(display,windows,MagickTrue);
XCheckRefreshWindows(display,windows);
font_info=windows->widget.font_info;
width=WidgetTextWidth(font_info,DismissButtonText);
text=GetLocaleExceptionMessage(XServerError,reason);
if (text != (char *) NULL)
if (WidgetTextWidth(font_info,(char *) text) > width)
width=WidgetTextWidth(font_info,(char *) text);
if (description != (char *) NULL)
{
text=GetLocaleExceptionMessage(XServerError,description);
if (text != (char *) NULL)
if (WidgetTextWidth(font_info,(char *) text) > width)
width=WidgetTextWidth(font_info,(char *) text);
}
height=(unsigned int) (font_info->ascent+font_info->descent);
/*
Position Notice widget.
*/
windows->widget.width=width+4*QuantumMargin;
windows->widget.min_width=width+QuantumMargin;
if (windows->widget.width < windows->widget.min_width)
windows->widget.width=windows->widget.min_width;
windows->widget.height=(unsigned int) (12*height);
windows->widget.min_height=(unsigned int) (7*height);
if (windows->widget.height < windows->widget.min_height)
windows->widget.height=windows->widget.min_height;
XConstrainWindowPosition(display,&windows->widget);
/*
Map Notice widget.
*/
(void) CopyMagickString(windows->widget.name,"Notice",MaxTextExtent);
status=XStringListToTextProperty(&windows->widget.name,1,&window_name);
if (status != False)
{
XSetWMName(display,windows->widget.id,&window_name);
XSetWMIconName(display,windows->widget.id,&window_name);
(void) XFree((void *) window_name.value);
}
window_changes.width=(int) windows->widget.width;
window_changes.height=(int) windows->widget.height;
window_changes.x=windows->widget.x;
window_changes.y=windows->widget.y;
(void) XReconfigureWMWindow(display,windows->widget.id,windows->widget.screen,
(unsigned int) (CWWidth | CWHeight | CWX | CWY),&window_changes);
(void) XMapRaised(display,windows->widget.id);
windows->widget.mapped=MagickFalse;
(void) XBell(display,0);
/*
Respond to X events.
*/
timer=GetMagickTime()+Timeout;
state=UpdateConfigurationState;
do
{
if (GetMagickTime() > timer)
break;
if (state & UpdateConfigurationState)
{
/*
Initialize Dismiss button information.
*/
XGetWidgetInfo(DismissButtonText,&dismiss_info);
dismiss_info.width=(unsigned int) QuantumMargin+
WidgetTextWidth(font_info,DismissButtonText);
dismiss_info.height=(unsigned int) ((3*height) >> 1);
dismiss_info.x=(int)
((windows->widget.width >> 1)-(dismiss_info.width >> 1));
dismiss_info.y=(int)
(windows->widget.height-(dismiss_info.height << 1));
state&=(~UpdateConfigurationState);
}
if (state & RedrawWidgetState)
{
/*
Redraw Notice widget.
*/
width=WidgetTextWidth(font_info,(char *) reason);
x=(int) ((windows->widget.width >> 1)-(width >> 1));
y=(int) ((windows->widget.height >> 1)-(height << 1));
(void) XDrawString(display,windows->widget.id,
windows->widget.annotate_context,x,y,(char *) reason,Extent(reason));
if (description != (char *) NULL)
{
width=WidgetTextWidth(font_info,(char *) description);
x=(int) ((windows->widget.width >> 1)-(width >> 1));
y+=height;
(void) XDrawString(display,windows->widget.id,
windows->widget.annotate_context,x,y,(char *) description,
Extent(description));
}
XDrawBeveledButton(display,&windows->widget,&dismiss_info);
XHighlightWidget(display,&windows->widget,BorderOffset,BorderOffset);
state&=(~RedrawWidgetState);
}
/*
Wait for next event.
*/
if (XCheckIfEvent(display,&event,XScreenEvent,(char *) windows) == MagickFalse)
{
/*
Do not block if delay > 0.
*/
XDelay(display,SuspendTime << 2);
continue;
}
switch (event.type)
{
case ButtonPress:
{
if (MatteIsActive(dismiss_info,event.xbutton))
{
/*
User pressed Dismiss button.
*/
dismiss_info.raised=MagickFalse;
XDrawBeveledButton(display,&windows->widget,&dismiss_info);
break;
}
break;
}
case ButtonRelease:
{
if (windows->widget.mapped == MagickFalse)
break;
if (dismiss_info.raised == MagickFalse)
{
if (event.xbutton.window == windows->widget.id)
if (MatteIsActive(dismiss_info,event.xbutton))
state|=ExitState;
dismiss_info.raised=MagickTrue;
XDrawBeveledButton(display,&windows->widget,&dismiss_info);
}
break;
}
case ClientMessage:
{
/*
If client window delete message, exit.
*/
if (event.xclient.message_type != windows->wm_protocols)
break;
if (*event.xclient.data.l == (int) windows->wm_take_focus)
{
(void) XSetInputFocus(display,event.xclient.window,RevertToParent,
(Time) event.xclient.data.l[1]);
break;
}
if (*event.xclient.data.l != (int) windows->wm_delete_window)
break;
if (event.xclient.window == windows->widget.id)
{
state|=ExitState;
break;
}
break;
}
case ConfigureNotify:
{
/*
Update widget configuration.
*/
if (event.xconfigure.window != windows->widget.id)
break;
if ((event.xconfigure.width == (int) windows->widget.width) &&
(event.xconfigure.height == (int) windows->widget.height))
break;
windows->widget.width=(unsigned int)
MagickMax(event.xconfigure.width,(int) windows->widget.min_width);
windows->widget.height=(unsigned int)
MagickMax(event.xconfigure.height,(int) windows->widget.min_height);
state|=UpdateConfigurationState;
break;
}
case EnterNotify:
{
if (event.xcrossing.window != windows->widget.id)
break;
state&=(~InactiveWidgetState);
break;
}
case Expose:
{
if (event.xexpose.window != windows->widget.id)
break;
if (event.xexpose.count != 0)
break;
state|=RedrawWidgetState;
break;
}
case KeyPress:
{
static char
command[MaxTextExtent];
static KeySym
key_symbol;
/*
Respond to a user key press.
*/
if (event.xkey.window != windows->widget.id)
break;
(void) XLookupString((XKeyEvent *) &event.xkey,command,
(int) sizeof(command),&key_symbol,(XComposeStatus *) NULL);
if ((key_symbol == XK_Return) || (key_symbol == XK_KP_Enter))
{
dismiss_info.raised=MagickFalse;
XDrawBeveledButton(display,&windows->widget,&dismiss_info);
state|=ExitState;
break;
}
break;
}
case LeaveNotify:
{
if (event.xcrossing.window != windows->widget.id)
break;
state|=InactiveWidgetState;
break;
}
case MotionNotify:
{
/*
Discard pending button motion events.
*/
while (XCheckMaskEvent(display,ButtonMotionMask,&event)) ;
if (state & InactiveWidgetState)
break;
if (dismiss_info.raised == MatteIsActive(dismiss_info,event.xmotion))
{
/*
Dismiss button status changed.
*/
dismiss_info.raised=
dismiss_info.raised == MagickFalse ? MagickTrue : MagickFalse;
XDrawBeveledButton(display,&windows->widget,&dismiss_info);
break;
}
break;
}
default:
break;
}
} while ((state & ExitState) == 0);
XSetCursorState(display,windows,MagickFalse);
(void) XWithdrawWindow(display,windows->widget.id,windows->widget.screen);
XCheckRefreshWindows(display,windows);
}
| 0
|
225,048
|
PQconnectdbParams(const char *const *keywords,
const char *const *values,
int expand_dbname)
{
PGconn *conn = PQconnectStartParams(keywords, values, expand_dbname);
if (conn && conn->status != CONNECTION_BAD)
(void) connectDBComplete(conn);
return conn;
}
| 0
|
218,740
|
static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length)
{
ssize_t
count;
count=ReadBlob(image,length,(unsigned char *) p);
if ((count == (ssize_t) length) && (image->endian != MSBEndian))
{
char
*q;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
return(count);
}
| 0
|
244,138
|
GF_Err trgr_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem)
{
GF_TrackGroupBox *ptr = (GF_TrackGroupBox *)s;
BOX_FIELD_LIST_ASSIGN(groups)
return gf_list_add(ptr->groups, a);
}
| 0
|
452,987
|
static int nft_dup_netdev_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
struct nft_dup_netdev *priv = nft_expr_priv(expr);
if (nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
| 0
|
223,442
|
static SLJIT_INLINE void compile_control_verb_backtrackingpath(compiler_common *common, struct backtrack_common *current)
{
DEFINE_COMPILER;
PCRE2_UCHAR opcode = *current->cc;
struct sljit_label *loop;
struct sljit_jump *jump;
if (opcode == OP_THEN || opcode == OP_THEN_ARG)
{
if (common->then_trap != NULL)
{
SLJIT_ASSERT(common->control_head_ptr != 0);
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr);
OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, type_then_trap);
OP1(SLJIT_MOV, TMP2, 0, SLJIT_IMM, common->then_trap->start);
jump = JUMP(SLJIT_JUMP);
loop = LABEL();
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(STACK_TOP), STACK(0));
JUMPHERE(jump);
CMPTO(SLJIT_NOT_EQUAL, SLJIT_MEM1(STACK_TOP), STACK(1), TMP1, 0, loop);
CMPTO(SLJIT_NOT_EQUAL, SLJIT_MEM1(STACK_TOP), STACK(2), TMP2, 0, loop);
add_jump(compiler, &common->then_trap->quit, JUMP(SLJIT_JUMP));
return;
}
else if (!common->local_quit_available && common->in_positive_assertion)
{
add_jump(compiler, &common->positive_assertion_quit, JUMP(SLJIT_JUMP));
return;
}
}
if (common->local_quit_available)
{
/* Abort match with a fail. */
if (common->quit_label == NULL)
add_jump(compiler, &common->quit, JUMP(SLJIT_JUMP));
else
JUMPTO(SLJIT_JUMP, common->quit_label);
return;
}
if (opcode == OP_SKIP_ARG)
{
SLJIT_ASSERT(common->control_head_ptr != 0 && TMP1 == SLJIT_R0 && STR_PTR == SLJIT_R1);
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr);
OP1(SLJIT_MOV, SLJIT_R1, 0, SLJIT_IMM, (sljit_sw)(current->cc + 2));
sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_ARGS2(W, W, W), SLJIT_IMM, SLJIT_FUNC_ADDR(do_search_mark));
OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_R0, 0);
add_jump(compiler, &common->reset_match, CMP(SLJIT_NOT_EQUAL, SLJIT_R0, 0, SLJIT_IMM, 0));
return;
}
if (opcode == OP_SKIP)
OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(STACK_TOP), STACK(0));
else
OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_IMM, 0);
add_jump(compiler, &common->reset_match, JUMP(SLJIT_JUMP));
}
| 0
|
513,182
|
static char *mysql_sys_var_char(THD* thd, int offset)
{
return (char *) intern_sys_var_ptr(thd, offset, true);
}
| 0
|
427,177
|
static void fornum (LexState *ls, TString *varname, int line) {
/* fornum -> NAME = exp,exp[,exp] forbody */
FuncState *fs = ls->fs;
int base = fs->freereg;
new_localvarliteral(ls, "(for state)");
new_localvarliteral(ls, "(for state)");
new_localvarliteral(ls, "(for state)");
new_localvar(ls, varname);
checknext(ls, '=');
exp1(ls); /* initial value */
checknext(ls, ',');
exp1(ls); /* limit */
if (testnext(ls, ','))
exp1(ls); /* optional step */
else { /* default step = 1 */
luaK_int(fs, fs->freereg, 1);
luaK_reserveregs(fs, 1);
}
adjustlocalvars(ls, 3); /* control variables */
forbody(ls, base, line, 1, 0);
}
| 0
|
225,034
|
PQconnectdb(const char *conninfo)
{
PGconn *conn = PQconnectStart(conninfo);
if (conn && conn->status != CONNECTION_BAD)
(void) connectDBComplete(conn);
return conn;
}
| 0
|
484,714
|
MOBIBuffer * mobi_buffer_init_null(unsigned char *data, const size_t len) {
MOBIBuffer *buf = malloc(sizeof(MOBIBuffer));
if (buf == NULL) {
debug_print("%s", "Buffer allocation failed\n");
return NULL;
}
buf->data = data;
buf->offset = 0;
buf->maxlen = len;
buf->error = MOBI_SUCCESS;
return buf;
}
| 0
|
482,495
|
getAChar(FileInfo *file) {
/* Read a big endian, little endian or ASCII 8 file and convert it to
* 16- or 32-bit unsigned integers */
int ch1 = 0, ch2 = 0;
widechar character;
if (file->encoding == ascii8)
if (file->status == 2) {
file->status++;
return file->checkencoding[1];
}
while ((ch1 = fgetc(file->in)) != EOF) {
if (file->status < 2) file->checkencoding[file->status] = ch1;
file->status++;
if (file->status == 2) {
if (file->checkencoding[0] == 0xfe && file->checkencoding[1] == 0xff)
file->encoding = bigEndian;
else if (file->checkencoding[0] == 0xff && file->checkencoding[1] == 0xfe)
file->encoding = littleEndian;
else if (file->checkencoding[0] < 128 && file->checkencoding[1] < 128) {
file->encoding = ascii8;
return file->checkencoding[0];
} else {
compileError(file,
"encoding is neither big-endian, little-endian nor ASCII 8.");
ch1 = EOF;
break;
;
}
continue;
}
switch (file->encoding) {
case noEncoding:
break;
case ascii8:
return ch1;
break;
case bigEndian:
ch2 = fgetc(file->in);
if (ch2 == EOF) break;
character = (widechar)(ch1 << 8) | ch2;
return (int)character;
break;
case littleEndian:
ch2 = fgetc(file->in);
if (ch2 == EOF) break;
character = (widechar)(ch2 << 8) | ch1;
return (int)character;
break;
}
if (ch1 == EOF || ch2 == EOF) break;
}
return EOF;
}
| 0
|
294,483
|
d_lite_gc_mark(void *ptr)
{
union DateData *dat = ptr;
if (simple_dat_p(dat))
rb_gc_mark(dat->s.nth);
else {
rb_gc_mark(dat->c.nth);
rb_gc_mark(dat->c.sf);
}
}
| 0
|
336,150
|
static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
{
struct net_device *dev = t->dev;
struct __ip6_tnl_parm *p = &t->parms;
struct flowi6 *fl6 = &t->fl.u.ip6;
int t_hlen;
if (dev->type != ARPHRD_ETHER) {
memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
}
/* Set up flowi template */
fl6->saddr = p->laddr;
fl6->daddr = p->raddr;
fl6->flowi6_oif = p->link;
fl6->flowlabel = 0;
fl6->flowi6_proto = IPPROTO_GRE;
if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
if (p->flags&IP6_TNL_F_CAP_XMIT &&
p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
dev->flags |= IFF_POINTOPOINT;
else
dev->flags &= ~IFF_POINTOPOINT;
t->tun_hlen = gre_calc_hlen(t->parms.o_flags);
t->hlen = t->encap_hlen + t->tun_hlen;
t_hlen = t->hlen + sizeof(struct ipv6hdr);
if (p->flags & IP6_TNL_F_CAP_XMIT) {
int strict = (ipv6_addr_type(&p->raddr) &
(IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
struct rt6_info *rt = rt6_lookup(t->net,
&p->raddr, &p->laddr,
p->link, strict);
if (!rt)
return;
if (rt->dst.dev) {
dev->hard_header_len = rt->dst.dev->hard_header_len +
t_hlen;
if (set_mtu) {
dev->mtu = rt->dst.dev->mtu - t_hlen;
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu -= 8;
if (dev->type == ARPHRD_ETHER)
dev->mtu -= ETH_HLEN;
if (dev->mtu < IPV6_MIN_MTU)
dev->mtu = IPV6_MIN_MTU;
}
}
ip6_rt_put(rt);
}
}
| 0
|
345,136
|
pxa3xx_gcu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
unsigned long flags;
struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
switch (cmd) {
case PXA3XX_GCU_IOCTL_RESET:
spin_lock_irqsave(&priv->spinlock, flags);
pxa3xx_gcu_reset(priv);
spin_unlock_irqrestore(&priv->spinlock, flags);
return 0;
case PXA3XX_GCU_IOCTL_WAIT_IDLE:
return pxa3xx_gcu_wait_idle(priv);
}
return -ENOSYS;
}
| 0
|
436,154
|
static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
__acquires(&req->ctx->completion_lock)
{
struct io_ring_ctx *ctx = req->ctx;
if (unlikely(req->task->flags & PF_EXITING))
WRITE_ONCE(poll->canceled, true);
if (!req->result && !READ_ONCE(poll->canceled)) {
struct poll_table_struct pt = { ._key = poll->events };
req->result = vfs_poll(req->file, &pt) & poll->events;
}
spin_lock_irq(&ctx->completion_lock);
if (!req->result && !READ_ONCE(poll->canceled)) {
add_wait_queue(poll->head, &poll->wait);
return true;
}
return false;
| 0
|
262,079
|
InstanceFeatureDimKey() : instance(-1), feature_dim(-1) {}
| 0
|
221,674
|
Socket::Socket() {
sck = socket(AF_INET, SOCK_STREAM, 0);
if (sck < 0) {
s_errno = errno;
} else {
memset(&my_adr, 0, sizeof my_adr);
memset(&peer_adr, 0, sizeof peer_adr);
my_adr.sin_family = AF_INET;
peer_adr.sin_family = AF_INET;
peer_adr_length = sizeof(struct sockaddr_in);
int f = 1;
if (sck > 0)
setsockopt(sck, IPPROTO_TCP, TCP_NODELAY, &f, sizeof(int));
my_port = 0;
chunkError = false;
#ifdef __SSLMITM
ssl = NULL;
ctx = NULL;
isssl = false;
issslserver = false;
#else
isssl = false;
#endif
}
}
| 0
|
231,054
|
void vQueueWaitForMessageRestricted( QueueHandle_t xQueue,
TickType_t xTicksToWait,
const BaseType_t xWaitIndefinitely )
{
Queue_t * const pxQueue = xQueue;
/* This function should not be called by application code hence the
* 'Restricted' in its name. It is not part of the public API. It is
* designed for use by kernel code, and has special calling requirements.
* It can result in vListInsert() being called on a list that can only
* possibly ever have one item in it, so the list will be fast, but even
* so it should be called with the scheduler locked and not from a critical
* section. */
/* Only do anything if there are no messages in the queue. This function
* will not actually cause the task to block, just place it on a blocked
* list. It will not block until the scheduler is unlocked - at which
* time a yield will be performed. If an item is added to the queue while
* the queue is locked, and the calling task blocks on the queue, then the
* calling task will be immediately unblocked when the queue is unlocked. */
prvLockQueue( pxQueue );
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
{
/* There is nothing in the queue, block for the specified period. */
vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
prvUnlockQueue( pxQueue );
}
| 0
|
482,558
|
passGetName(CharsString *passLine, int *passLinepos, CharsString *name) {
name->length = 0;
// a name is a sequence of characters in the ranges 'a'..'z' and 'A'..'Z'
do {
widechar c = passLine->chars[*passLinepos];
if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')) {
name->chars[name->length++] = c;
(*passLinepos)++;
} else {
break;
}
} while (*passLinepos < passLine->length);
return 1;
}
| 0
|
200,781
|
cvtchar(register const char *sp)
/* convert a character to a terminfo push */
{
unsigned char c = 0;
int len;
switch (*sp) {
case '\\':
switch (*++sp) {
case '\'':
case '$':
case '\\':
case '%':
c = UChar(*sp);
len = 2;
break;
case '\0':
c = '\\';
len = 1;
break;
case '0':
case '1':
case '2':
case '3':
len = 1;
while (isdigit(UChar(*sp))) {
c = UChar(8 * c + (*sp++ - '0'));
len++;
}
break;
default:
c = UChar(*sp);
len = (c != '\0') ? 2 : 1;
break;
}
break;
case '^':
c = UChar(*++sp);
if (c == '?')
c = 127;
else
c &= 0x1f;
len = 2;
break;
default:
c = UChar(*sp);
len = (c != '\0') ? 1 : 0;
}
if (isgraph(c) && c != ',' && c != '\'' && c != '\\' && c != ':') {
dp = save_string(dp, "%\'");
dp = save_char(dp, c);
dp = save_char(dp, '\'');
} else if (c != '\0') {
dp = save_string(dp, "%{");
if (c > 99)
dp = save_char(dp, c / 100 + '0');
if (c > 9)
dp = save_char(dp, ((int) (c / 10)) % 10 + '0');
dp = save_char(dp, c % 10 + '0');
dp = save_char(dp, '}');
}
return len;
}
| 1
|
225,863
|
GF_Box *sdp_box_new()
{
ISOM_DECL_BOX_ALLOC(GF_SDPBox, GF_ISOM_BOX_TYPE_SDP);
return (GF_Box *)tmp;
}
| 0
|
294,441
|
d_lite_hour(VALUE self)
{
get_d1(self);
return INT2FIX(m_hour(dat));
}
| 0
|
261,448
|
LIBDE265_INLINE static int luma_pos_to_ctbAddrRS(const seq_parameter_set* sps, int x,int y)
{
int ctbX = x >> sps->Log2CtbSizeY;
int ctbY = y >> sps->Log2CtbSizeY;
return ctbY * sps->PicWidthInCtbsY + ctbX;
}
| 0
|
450,380
|
static void zrle_write_u8(VncState *vs, uint8_t value)
{
vnc_write_u8(vs, value);
}
| 0
|
473,966
|
utf16le_is_mbc_newline(const UChar* p, const UChar* end,
OnigEncoding enc ARG_UNUSED)
{
if (p + 1 < end) {
if (*p == 0x0a && *(p+1) == 0x00)
return 1;
#ifdef USE_UNICODE_ALL_LINE_TERMINATORS
if ((
#ifndef USE_CRNL_AS_LINE_TERMINATOR
*p == 0x0d ||
#endif
*p == 0x85) && *(p+1) == 0x00)
return 1;
if (*(p+1) == 0x20 && (*p == 0x29 || *p == 0x28))
return 1;
#endif
}
return 0;
}
| 0
|
234,249
|
print_dwarf_vma (dwarf_vma value, unsigned num_bytes)
{
printf ("%s ", dwarf_vmatoa_1 (NULL, value, num_bytes));
}
| 0
|
512,362
|
longlong val_time_packed(THD *thd)
{
DBUG_ASSERT(0);
return 0;
}
| 0
|
384,813
|
do_browse(
int flags, // BROWSE_SAVE and BROWSE_DIR
char_u *title, // title for the window
char_u *dflt, // default file name (may include directory)
char_u *ext, // extension added
char_u *initdir, // initial directory, NULL for current dir or
// when using path from "dflt"
char_u *filter, // file name filter
buf_T *buf) // buffer to read/write for
{
char_u *fname;
static char_u *last_dir = NULL; // last used directory
char_u *tofree = NULL;
int save_cmod_flags = cmdmod.cmod_flags;
// Must turn off browse to avoid that autocommands will get the
// flag too!
cmdmod.cmod_flags &= ~CMOD_BROWSE;
if (title == NULL || *title == NUL)
{
if (flags & BROWSE_DIR)
title = (char_u *)_("Select Directory dialog");
else if (flags & BROWSE_SAVE)
title = (char_u *)_("Save File dialog");
else
title = (char_u *)_("Open File dialog");
}
// When no directory specified, use default file name, default dir, buffer
// dir, last dir or current dir
if ((initdir == NULL || *initdir == NUL) && dflt != NULL && *dflt != NUL)
{
if (mch_isdir(dflt)) // default file name is a directory
{
initdir = dflt;
dflt = NULL;
}
else if (gettail(dflt) != dflt) // default file name includes a path
{
tofree = vim_strsave(dflt);
if (tofree != NULL)
{
initdir = tofree;
*gettail(initdir) = NUL;
dflt = gettail(dflt);
}
}
}
if (initdir == NULL || *initdir == NUL)
{
// When 'browsedir' is a directory, use it
if (STRCMP(p_bsdir, "last") != 0
&& STRCMP(p_bsdir, "buffer") != 0
&& STRCMP(p_bsdir, "current") != 0
&& mch_isdir(p_bsdir))
initdir = p_bsdir;
// When saving or 'browsedir' is "buffer", use buffer fname
else if (((flags & BROWSE_SAVE) || *p_bsdir == 'b')
&& buf != NULL && buf->b_ffname != NULL)
{
if (dflt == NULL || *dflt == NUL)
dflt = gettail(curbuf->b_ffname);
tofree = vim_strsave(curbuf->b_ffname);
if (tofree != NULL)
{
initdir = tofree;
*gettail(initdir) = NUL;
}
}
// When 'browsedir' is "last", use dir from last browse
else if (*p_bsdir == 'l')
initdir = last_dir;
// When 'browsedir is "current", use current directory. This is the
// default already, leave initdir empty.
}
# ifdef FEAT_GUI
if (gui.in_use) // when this changes, also adjust f_has()!
{
if (filter == NULL
# ifdef FEAT_EVAL
&& (filter = get_var_value((char_u *)"b:browsefilter")) == NULL
&& (filter = get_var_value((char_u *)"g:browsefilter")) == NULL
# endif
)
filter = BROWSE_FILTER_DEFAULT;
if (flags & BROWSE_DIR)
{
# if defined(FEAT_GUI_GTK) || defined(MSWIN)
// For systems that have a directory dialog.
fname = gui_mch_browsedir(title, initdir);
# else
// Generic solution for selecting a directory: select a file and
// remove the file name.
fname = gui_mch_browse(0, title, dflt, ext, initdir, (char_u *)"");
# endif
# if !defined(FEAT_GUI_GTK)
// Win32 adds a dummy file name, others return an arbitrary file
// name. GTK+ 2 returns only the directory,
if (fname != NULL && *fname != NUL && !mch_isdir(fname))
{
// Remove the file name.
char_u *tail = gettail_sep(fname);
if (tail == fname)
*tail++ = '.'; // use current dir
*tail = NUL;
}
# endif
}
else
fname = gui_mch_browse(flags & BROWSE_SAVE,
title, dflt, ext, initdir, (char_u *)_(filter));
// We hang around in the dialog for a while, the user might do some
// things to our files. The Win32 dialog allows deleting or renaming
// a file, check timestamps.
need_check_timestamps = TRUE;
did_check_timestamps = FALSE;
}
else
# endif
{
// TODO: non-GUI file selector here
emsg(_(e_sorry_no_file_browser_in_console_mode));
fname = NULL;
}
// keep the directory for next time
if (fname != NULL)
{
vim_free(last_dir);
last_dir = vim_strsave(fname);
if (last_dir != NULL && !(flags & BROWSE_DIR))
{
*gettail(last_dir) = NUL;
if (*last_dir == NUL)
{
// filename only returned, must be in current dir
vim_free(last_dir);
last_dir = alloc(MAXPATHL);
if (last_dir != NULL)
mch_dirname(last_dir, MAXPATHL);
}
}
}
vim_free(tofree);
cmdmod.cmod_flags = save_cmod_flags;
return fname;
}
| 0
|
238,411
|
static int check_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int off,
int size, bool strict_alignment_once)
{
bool strict = env->strict_alignment || strict_alignment_once;
const char *pointer_desc = "";
switch (reg->type) {
case PTR_TO_PACKET:
case PTR_TO_PACKET_META:
/* Special case, because of NET_IP_ALIGN. Given metadata sits
* right in front, treat it the very same way.
*/
return check_pkt_ptr_alignment(env, reg, off, size, strict);
case PTR_TO_FLOW_KEYS:
pointer_desc = "flow keys ";
break;
case PTR_TO_MAP_KEY:
pointer_desc = "key ";
break;
case PTR_TO_MAP_VALUE:
pointer_desc = "value ";
break;
case PTR_TO_CTX:
pointer_desc = "context ";
break;
case PTR_TO_STACK:
pointer_desc = "stack ";
/* The stack spill tracking logic in check_stack_write_fixed_off()
* and check_stack_read_fixed_off() relies on stack accesses being
* aligned.
*/
strict = true;
break;
case PTR_TO_SOCKET:
pointer_desc = "sock ";
break;
case PTR_TO_SOCK_COMMON:
pointer_desc = "sock_common ";
break;
case PTR_TO_TCP_SOCK:
pointer_desc = "tcp_sock ";
break;
case PTR_TO_XDP_SOCK:
pointer_desc = "xdp_sock ";
break;
default:
break;
}
return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
strict);
}
| 0
|
512,276
|
bool check_cols(uint c)
{
if (cols() != c)
{
my_error(ER_OPERAND_COLUMNS, MYF(0), c);
return true;
}
return false;
}
| 0
|
242,969
|
static int ssl_get_remaining_space_in_datagram( mbedtls_ssl_context const *ssl )
{
size_t const bytes_written = ssl->out_left;
size_t const mtu = ssl_get_maximum_datagram_size( ssl );
/* Double-check that the write-index hasn't gone
* past what we can transmit in a single datagram. */
if( bytes_written > mtu )
{
/* Should never happen... */
return( MBEDTLS_ERR_SSL_INTERNAL_ERROR );
}
return( (int) ( mtu - bytes_written ) );
}
| 0
|
291,826
|
static inline void path_it_init(struct path_it *it, struct rtrs_clt_sess *clt)
{
INIT_LIST_HEAD(&it->skip_list);
it->clt = clt;
it->i = 0;
if (clt->mp_policy == MP_POLICY_RR)
it->next_path = get_next_path_rr;
else if (clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
it->next_path = get_next_path_min_inflight;
else
it->next_path = get_next_path_min_latency;
}
| 0
|
244,094
|
void paen_box_del(GF_Box *s)
{
gf_free(s);
}
| 0
|
265,424
|
static GroupInfo createGroup(std::string comment,int lineNo)
{
//store info related to group
GroupInfo groupInfo;
std::string finalGroupName;
boost::regex regex("\\[(.*?)\\]");
boost::match_results<std::string::const_iterator> match;
while(boost::regex_search(comment, match, regex)) {
std::string groupName = match[1].str();
if (finalGroupName.empty()) {
finalGroupName = groupName;
} else {
finalGroupName = finalGroupName + "-" + groupName;
}
groupName.clear();
comment = match.suffix();
}
groupInfo.commentString = finalGroupName;
groupInfo.lineNo = lineNo;
return groupInfo;
}
| 0
|
226,338
|
GF_Err udta_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
GF_UserDataMap *map;
GF_UserDataBox *ptr = (GF_UserDataBox *)s;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
i=0;
while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) {
//warning: here we are not passing the actual "parent" of the list
//but the UDTA box. The parent itself is not an box, we don't care about it
e = gf_isom_box_array_write(s, map->boxes, bs);
if (e) return e;
}
return GF_OK;
| 0
|
310,092
|
usage(void)
{
#define DATA(s) s "\n"
static const char options_string[] =
{
DATA("Options:")
DATA(" -0 format translation output all capabilities on one line")
DATA(" -1 format translation output one capability per line")
#if NCURSES_XNAMES
DATA(" -a retain commented-out capabilities (sets -x also)")
#endif
DATA(" -C translate entries to termcap source form")
DATA(" -D print list of tic's database locations (first must be writable)")
DATA(" -c check only, validate input without compiling or translating")
DATA(" -e<names> translate/compile only entries named by comma-separated list")
DATA(" -f format complex strings for readability")
DATA(" -G format %{number} to %'char'")
DATA(" -g format %'char' to %{number}")
DATA(" -I translate entries to terminfo source form")
DATA(" -K translate entries to termcap source form with BSD syntax")
DATA(" -L translate entries to full terminfo source form")
DATA(" -N disable smart defaults for source translation")
DATA(" -o<dir> set output directory for compiled entry writes")
DATA(" -Q[n] dump compiled description")
DATA(" -q brief listing, removes headers")
DATA(" -R<name> restrict translation to given terminfo/termcap version")
DATA(" -r force resolution of all use entries in source translation")
DATA(" -s print summary statistics")
DATA(" -T remove size-restrictions on compiled description")
#if NCURSES_XNAMES
DATA(" -t suppress commented-out capabilities")
#endif
DATA(" -U suppress post-processing of entries")
DATA(" -V print version")
DATA(" -W wrap long strings according to -w[n] option")
DATA(" -v[n] set verbosity level")
DATA(" -w[n] set format width for translation output")
#if NCURSES_XNAMES
DATA(" -x treat unknown capabilities as user-defined")
#endif
DATA("")
DATA("Parameters:")
DATA(" <file> file to translate or compile")
};
#undef DATA
fprintf(stderr, "Usage: %s %s\n", _nc_progname, usage_string);
fputs(options_string, stderr);
ExitProgram(EXIT_FAILURE);
}
| 0
|
261,937
|
njs_encode_base64(njs_str_t *dst, const njs_str_t *src)
{
static u_char basis64[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
njs_encode_base64_core(dst, src, basis64, 1);
}
| 0
|
231,025
|
BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
BaseType_t * const pxHigherPriorityTaskWoken )
{
BaseType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
Queue_t * const pxQueue = xQueue;
/* Similar to xQueueGenericSendFromISR() but used with semaphores where the
* item size is 0. Don't directly wake a task that was blocked on a queue
* read, instead return a flag to say whether a context switch is required or
* not (i.e. has a task with a higher priority than us been woken by this
* post). */
configASSERT( pxQueue );
/* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
* if the item size is not 0. */
configASSERT( pxQueue->uxItemSize == 0 );
/* Normally a mutex would not be given from an interrupt, especially if
* there is a mutex holder, as priority inheritance makes no sense for an
* interrupts, only tasks. */
configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->u.xSemaphore.xMutexHolder != NULL ) ) );
/* RTOS ports that support interrupt nesting have the concept of a maximum
* system call (or maximum API call) interrupt priority. Interrupts that are
* above the maximum system call priority are kept permanently enabled, even
* when the RTOS kernel is in a critical section, but cannot make any calls to
* FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
* then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
* failure if a FreeRTOS API function is called from an interrupt that has been
* assigned a priority above the configured maximum system call priority.
* Only FreeRTOS functions that end in FromISR can be called from interrupts
* that have been assigned a priority at or (logically) below the maximum
* system call interrupt priority. FreeRTOS maintains a separate interrupt
* safe API to ensure interrupt entry is as fast and as simple as possible.
* More information (albeit Cortex-M specific) is provided on the following
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
/* When the queue is used to implement a semaphore no data is ever
* moved through the queue but it is still valid to see if the queue 'has
* space'. */
if( uxMessagesWaiting < pxQueue->uxLength )
{
const int8_t cTxLock = pxQueue->cTxLock;
traceQUEUE_SEND_FROM_ISR( pxQueue );
/* A task can only have an inherited priority if it is a mutex
* holder - and if there is a mutex holder then the mutex cannot be
* given from an ISR. As this is the ISR version of the function it
* can be assumed there is no mutex holder and no need to determine if
* priority disinheritance is needed. Simply increase the count of
* messages (semaphores) available. */
pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
/* The event list is not altered if the queue is locked. This will
* be done when the queue is unlocked later. */
if( cTxLock == queueUNLOCKED )
{
#if ( configUSE_QUEUE_SETS == 1 )
{
if( pxQueue->pxQueueSetContainer != NULL )
{
if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
{
/* The semaphore is a member of a queue set, and
* posting to the queue set caused a higher priority
* task to unblock. A context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so
* record that a context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
}
#else /* configUSE_QUEUE_SETS */
{
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so record that a
* context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_QUEUE_SETS */
}
else
{
/* Increment the lock count so the task that unlocks the queue
* knows that data was posted while it was locked. */
configASSERT( cTxLock != queueINT8_MAX );
pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
}
xReturn = pdPASS;
}
else
{
traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
xReturn = errQUEUE_FULL;
}
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return xReturn;
}
| 0
|
223,406
|
static BOOL optimize_class(compiler_common *common, const sljit_u8 *bits, BOOL nclass, BOOL invert, jump_list **backtracks)
{
/* May destroy TMP1. */
if (optimize_class_ranges(common, bits, nclass, invert, backtracks))
return TRUE;
return optimize_class_chars(common, bits, nclass, invert, backtracks);
}
| 0
|
234,729
|
static int relocating_repair_kthread(void *data)
{
struct btrfs_block_group *cache = (struct btrfs_block_group *)data;
struct btrfs_fs_info *fs_info = cache->fs_info;
u64 target;
int ret = 0;
target = cache->start;
btrfs_put_block_group(cache);
if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
btrfs_info(fs_info,
"zoned: skip relocating block group %llu to repair: EBUSY",
target);
return -EBUSY;
}
mutex_lock(&fs_info->reclaim_bgs_lock);
/* Ensure block group still exists */
cache = btrfs_lookup_block_group(fs_info, target);
if (!cache)
goto out;
if (!cache->relocating_repair)
goto out;
ret = btrfs_may_alloc_data_chunk(fs_info, target);
if (ret < 0)
goto out;
btrfs_info(fs_info,
"zoned: relocating block group %llu to repair IO failure",
target);
ret = btrfs_relocate_chunk(fs_info, target);
out:
if (cache)
btrfs_put_block_group(cache);
mutex_unlock(&fs_info->reclaim_bgs_lock);
btrfs_exclop_finish(fs_info);
return ret;
}
| 0
|
220,423
|
mrb_ary_size(mrb_state *mrb, mrb_value self)
{
struct RArray *a = mrb_ary_ptr(self);
return mrb_int_value(mrb, ARY_LEN(a));
}
| 0
|
512,593
|
virtual bool is_evaluable_expression() const { return true; }
| 0
|
224,201
|
void unsubscribeAll(Subscriber *subscriber, bool mayFlush = true) {
if (subscriber) {
for (Topic *topic : subscriber->subscriptions) {
/* We do not want to flush when closing a socket, it makes no sense to do so */
/* If this topic is triggered, drain the tree before we leave */
if (mayFlush && topic->triggered) {
drain();
}
/* Remove us from the topic's set */
topic->subs.erase(subscriber);
trimTree(topic);
}
subscriber->subscriptions.clear();
}
}
| 0
|
512,347
|
const Type_handler *type_handler() const
{
return Type_handler::get_handler_by_field_type(date_time_field_type);
}
| 0
|
338,095
|
void WasmBinaryBuilder::getResizableLimits(Address& initial,
Address& max,
bool& shared,
Type& indexType,
Address defaultIfNoMax) {
auto flags = getU32LEB();
bool hasMax = (flags & BinaryConsts::HasMaximum) != 0;
bool isShared = (flags & BinaryConsts::IsShared) != 0;
bool is64 = (flags & BinaryConsts::Is64) != 0;
initial = is64 ? getU64LEB() : getU32LEB();
if (isShared && !hasMax) {
throwError("shared memory must have max size");
}
shared = isShared;
indexType = is64 ? Type::i64 : Type::i32;
if (hasMax) {
max = is64 ? getU64LEB() : getU32LEB();
} else {
max = defaultIfNoMax;
}
}
| 0
|
240,268
|
set_y_previous(yankreg_T *yreg)
{
y_previous = yreg;
}
| 0
|
206,625
|
raptor_xml_writer_start_element_common(raptor_xml_writer* xml_writer,
raptor_xml_element* element,
int auto_empty)
{
raptor_iostream* iostr = xml_writer->iostr;
raptor_namespace_stack *nstack = xml_writer->nstack;
int depth = xml_writer->depth;
int auto_indent = XML_WRITER_AUTO_INDENT(xml_writer);
struct nsd *nspace_declarations = NULL;
size_t nspace_declarations_count = 0;
unsigned int i;
/* max is 1 per element and 1 for each attribute + size of declared */
if(nstack) {
int nspace_max_count = element->attribute_count+1;
if(element->declared_nspaces)
nspace_max_count += raptor_sequence_size(element->declared_nspaces);
if(element->xml_language)
nspace_max_count++;
nspace_declarations = RAPTOR_CALLOC(struct nsd*, nspace_max_count,
sizeof(struct nsd));
if(!nspace_declarations)
return 1;
}
if(element->name->nspace) {
if(nstack && !raptor_namespaces_namespace_in_scope(nstack, element->name->nspace)) {
nspace_declarations[0].declaration=
raptor_namespace_format_as_xml(element->name->nspace,
&nspace_declarations[0].length);
if(!nspace_declarations[0].declaration)
goto error;
nspace_declarations[0].nspace = element->name->nspace;
nspace_declarations_count++;
}
}
if(nstack && element->attributes) {
for(i = 0; i < element->attribute_count; i++) {
/* qname */
if(element->attributes[i]->nspace) {
/* Check if we need a namespace declaration attribute */
if(nstack &&
!raptor_namespaces_namespace_in_scope(nstack, element->attributes[i]->nspace) && element->attributes[i]->nspace != element->name->nspace) {
/* not in scope and not same as element (so already going to be declared)*/
unsigned int j;
int declare_me = 1;
/* check it wasn't an earlier declaration too */
for(j = 0; j < nspace_declarations_count; j++)
if(nspace_declarations[j].nspace == element->attributes[j]->nspace) {
declare_me = 0;
break;
}
if(declare_me) {
nspace_declarations[nspace_declarations_count].declaration=
raptor_namespace_format_as_xml(element->attributes[i]->nspace,
&nspace_declarations[nspace_declarations_count].length);
if(!nspace_declarations[nspace_declarations_count].declaration)
goto error;
nspace_declarations[nspace_declarations_count].nspace = element->attributes[i]->nspace;
nspace_declarations_count++;
}
}
}
/* Add the attribute + value */
nspace_declarations[nspace_declarations_count].declaration=
raptor_qname_format_as_xml(element->attributes[i],
&nspace_declarations[nspace_declarations_count].length);
if(!nspace_declarations[nspace_declarations_count].declaration)
goto error;
nspace_declarations[nspace_declarations_count].nspace = NULL;
nspace_declarations_count++;
}
}
if(nstack && element->declared_nspaces &&
raptor_sequence_size(element->declared_nspaces) > 0) {
for(i = 0; i< (unsigned int)raptor_sequence_size(element->declared_nspaces); i++) {
raptor_namespace* nspace = (raptor_namespace*)raptor_sequence_get_at(element->declared_nspaces, i);
unsigned int j;
int declare_me = 1;
/* check it wasn't an earlier declaration too */
for(j = 0; j < nspace_declarations_count; j++)
if(nspace_declarations[j].nspace == nspace) {
declare_me = 0;
break;
}
if(declare_me) {
nspace_declarations[nspace_declarations_count].declaration=
raptor_namespace_format_as_xml(nspace,
&nspace_declarations[nspace_declarations_count].length);
if(!nspace_declarations[nspace_declarations_count].declaration)
goto error;
nspace_declarations[nspace_declarations_count].nspace = nspace;
nspace_declarations_count++;
}
}
}
if(nstack && element->xml_language) {
size_t lang_len = strlen(RAPTOR_GOOD_CAST(char*, element->xml_language));
#define XML_LANG_PREFIX_LEN 10
size_t buf_length = XML_LANG_PREFIX_LEN + lang_len + 1;
unsigned char* buffer = RAPTOR_MALLOC(unsigned char*, buf_length + 1);
const char quote = '\"';
unsigned char* p;
memcpy(buffer, "xml:lang=\"", XML_LANG_PREFIX_LEN);
p = buffer + XML_LANG_PREFIX_LEN;
p += raptor_xml_escape_string(xml_writer->world,
element->xml_language, lang_len,
p, buf_length, quote);
*p++ = quote;
*p = '\0';
nspace_declarations[nspace_declarations_count].declaration = buffer;
nspace_declarations[nspace_declarations_count].length = buf_length;
nspace_declarations[nspace_declarations_count].nspace = NULL;
nspace_declarations_count++;
}
raptor_iostream_write_byte('<', iostr);
if(element->name->nspace && element->name->nspace->prefix_length > 0) {
raptor_iostream_counted_string_write((const char*)element->name->nspace->prefix,
element->name->nspace->prefix_length,
iostr);
raptor_iostream_write_byte(':', iostr);
}
raptor_iostream_counted_string_write((const char*)element->name->local_name,
element->name->local_name_length,
iostr);
/* declare namespaces and attributes */
if(nspace_declarations_count) {
int need_indent = 0;
/* sort them into the canonical order */
qsort((void*)nspace_declarations,
nspace_declarations_count, sizeof(struct nsd),
raptor_xml_writer_nsd_compare);
/* declare namespaces first */
for(i = 0; i < nspace_declarations_count; i++) {
if(!nspace_declarations[i].nspace)
continue;
if(auto_indent && need_indent) {
/* indent attributes */
raptor_xml_writer_newline(xml_writer);
xml_writer->depth++;
raptor_xml_writer_indent(xml_writer);
xml_writer->depth--;
}
raptor_iostream_write_byte(' ', iostr);
raptor_iostream_counted_string_write((const char*)nspace_declarations[i].declaration,
nspace_declarations[i].length,
iostr);
RAPTOR_FREE(char*, nspace_declarations[i].declaration);
nspace_declarations[i].declaration = NULL;
need_indent = 1;
if(raptor_namespace_stack_start_namespace(nstack,
(raptor_namespace*)nspace_declarations[i].nspace,
depth))
goto error;
}
/* declare attributes */
for(i = 0; i < nspace_declarations_count; i++) {
if(nspace_declarations[i].nspace)
continue;
if(auto_indent && need_indent) {
/* indent attributes */
raptor_xml_writer_newline(xml_writer);
xml_writer->depth++;
raptor_xml_writer_indent(xml_writer);
xml_writer->depth--;
}
raptor_iostream_write_byte(' ', iostr);
raptor_iostream_counted_string_write((const char*)nspace_declarations[i].declaration,
nspace_declarations[i].length,
iostr);
need_indent = 1;
RAPTOR_FREE(char*, nspace_declarations[i].declaration);
nspace_declarations[i].declaration = NULL;
}
}
if(!auto_empty)
raptor_iostream_write_byte('>', iostr);
if(nstack)
RAPTOR_FREE(stringarray, nspace_declarations);
return 0;
/* Clean up nspace_declarations on error */
error:
for(i = 0; i < nspace_declarations_count; i++) {
if(nspace_declarations[i].declaration)
RAPTOR_FREE(char*, nspace_declarations[i].declaration);
}
RAPTOR_FREE(stringarray, nspace_declarations);
return 1;
}
| 1
|
369,334
|
static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
{
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
struct io_madvise *ma = &req->madvise;
int ret;
if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
if (ret < 0)
req_set_fail(req);
io_req_complete(req, ret);
return 0;
#else
return -EOPNOTSUPP;
#endif
}
| 0
|
224,165
|
void Compute(OpKernelContext* ctx) override {
StagingMap<Ordered>* map = nullptr;
OP_REQUIRES_OK(ctx, GetStagingMap(ctx, def(), &map));
core::ScopedUnref scope(map);
typename StagingMap<Ordered>::Tuple tuple;
const Tensor* key_tensor;
const Tensor* indices_tensor;
OP_REQUIRES_OK(ctx, ctx->input("key", &key_tensor));
OP_REQUIRES_OK(ctx, ctx->input("indices", &indices_tensor));
OP_REQUIRES_OK(ctx, map->get(key_tensor, indices_tensor, &tuple));
OP_REQUIRES(
ctx, tuple.size() == indices_tensor->NumElements(),
errors::InvalidArgument("output/indices size mismatch: ", tuple.size(),
" vs. ", indices_tensor->NumElements()));
for (std::size_t i = 0; i < tuple.size(); ++i) {
ctx->set_output(i, tuple[i]);
}
}
| 0
|
238,465
|
static bool register_is_bounded(struct bpf_reg_state *reg)
{
return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
}
| 0
|
508,388
|
bool setup_tables(THD *thd, Name_resolution_context *context,
List<TABLE_LIST> *from_clause, TABLE_LIST *tables,
List<TABLE_LIST> &leaves, bool select_insert,
bool full_table_list)
{
uint tablenr= 0;
List_iterator<TABLE_LIST> ti(leaves);
TABLE_LIST *table_list;
DBUG_ENTER("setup_tables");
DBUG_ASSERT ((select_insert && !tables->next_name_resolution_table) || !tables ||
(context->table_list && context->first_name_resolution_table));
/*
this is used for INSERT ... SELECT.
For select we setup tables except first (and its underlying tables)
*/
TABLE_LIST *first_select_table= (select_insert ?
tables->next_local:
0);
SELECT_LEX *select_lex= select_insert ? &thd->lex->select_lex :
thd->lex->current_select;
if (select_lex->first_cond_optimization)
{
leaves.empty();
if (select_lex->prep_leaf_list_state != SELECT_LEX::SAVED)
{
make_leaves_list(thd, leaves, tables, full_table_list, first_select_table);
select_lex->prep_leaf_list_state= SELECT_LEX::READY;
select_lex->leaf_tables_exec.empty();
}
else
{
List_iterator_fast <TABLE_LIST> ti(select_lex->leaf_tables_prep);
while ((table_list= ti++))
leaves.push_back(table_list, thd->mem_root);
}
while ((table_list= ti++))
{
TABLE *table= table_list->table;
if (table)
table->pos_in_table_list= table_list;
if (first_select_table &&
table_list->top_table() == first_select_table)
{
/* new counting for SELECT of INSERT ... SELECT command */
first_select_table= 0;
thd->lex->select_lex.insert_tables= tablenr;
tablenr= 0;
}
if(table_list->jtbm_subselect)
{
table_list->jtbm_table_no= tablenr;
}
else if (table)
{
table->pos_in_table_list= table_list;
setup_table_map(table, table_list, tablenr);
if (table_list->process_index_hints(table))
DBUG_RETURN(1);
}
tablenr++;
}
if (tablenr > MAX_TABLES)
{
my_error(ER_TOO_MANY_TABLES,MYF(0), static_cast<int>(MAX_TABLES));
DBUG_RETURN(1);
}
}
else
{
List_iterator_fast <TABLE_LIST> ti(select_lex->leaf_tables_exec);
select_lex->leaf_tables.empty();
while ((table_list= ti++))
{
if(table_list->jtbm_subselect)
{
table_list->jtbm_table_no= table_list->tablenr_exec;
}
else
{
table_list->table->tablenr= table_list->tablenr_exec;
table_list->table->map= table_list->map_exec;
table_list->table->maybe_null= table_list->maybe_null_exec;
table_list->table->pos_in_table_list= table_list;
if (table_list->process_index_hints(table_list->table))
DBUG_RETURN(1);
}
select_lex->leaf_tables.push_back(table_list);
}
}
for (table_list= tables;
table_list;
table_list= table_list->next_local)
{
if (table_list->merge_underlying_list)
{
DBUG_ASSERT(table_list->is_merged_derived());
Query_arena *arena, backup;
arena= thd->activate_stmt_arena_if_needed(&backup);
bool res;
res= table_list->setup_underlying(thd);
if (arena)
thd->restore_active_arena(arena, &backup);
if (res)
DBUG_RETURN(1);
}
if (table_list->jtbm_subselect)
{
Item *item= table_list->jtbm_subselect->optimizer;
if (!table_list->jtbm_subselect->optimizer->fixed &&
table_list->jtbm_subselect->optimizer->fix_fields(thd, &item))
{
my_error(ER_TOO_MANY_TABLES,MYF(0), static_cast<int>(MAX_TABLES)); /* psergey-todo: WHY ER_TOO_MANY_TABLES ???*/
DBUG_RETURN(1);
}
DBUG_ASSERT(item == table_list->jtbm_subselect->optimizer);
}
}
/* Precompute and store the row types of NATURAL/USING joins. */
if (setup_natural_join_row_types(thd, from_clause, context))
DBUG_RETURN(1);
DBUG_RETURN(0);
}
| 0
|
230,301
|
njs_array_prototype_sort(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs,
njs_index_t unused)
{
int64_t i, und, len, nlen, length;
njs_int_t ret, fast_path;
njs_array_t *array;
njs_value_t *this, *comparefn, *start, *strings;
njs_array_sort_ctx_t ctx;
njs_array_sort_slot_t *p, *end, *slots, *nslots;
comparefn = njs_arg(args, nargs, 1);
if (njs_is_defined(comparefn)) {
if (njs_slow_path(!njs_is_function(comparefn))) {
njs_type_error(vm, "comparefn must be callable or undefined");
return NJS_ERROR;
}
ctx.function = njs_function(comparefn);
} else {
ctx.function = NULL;
}
this = njs_argument(args, 0);
ret = njs_value_to_object(vm, this);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
ret = njs_value_length(vm, this, &length);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
if (njs_slow_path(length < 2)) {
vm->retval = *this;
return NJS_OK;
}
slots = NULL;
ctx.vm = vm;
ctx.strings.separate = 0;
ctx.strings.pointer = 0;
ctx.exception = 0;
fast_path = njs_is_fast_array(this);
if (njs_fast_path(fast_path)) {
array = njs_array(this);
start = array->start;
slots = njs_mp_alloc(vm->mem_pool,
sizeof(njs_array_sort_slot_t) * length);
if (njs_slow_path(slots == NULL)) {
return NJS_ERROR;
}
und = 0;
p = slots;
for (i = 0; i < length; i++) {
if (njs_slow_path(!njs_is_valid(&start[i]))) {
fast_path = 0;
njs_mp_free(vm->mem_pool, slots);
slots = NULL;
goto slow_path;
}
if (njs_slow_path(njs_is_undefined(&start[i]))) {
und++;
continue;
}
p->value = start[i];
p->pos = i;
p->str = NULL;
p++;
}
len = p - slots;
} else {
slow_path:
und = 0;
p = NULL;
end = NULL;
for (i = 0; i < length; i++) {
if (p >= end) {
nlen = njs_min(njs_max((p - slots) * 2, 8), length);
nslots = njs_mp_alloc(vm->mem_pool,
sizeof(njs_array_sort_slot_t) * nlen);
if (njs_slow_path(nslots == NULL)) {
njs_memory_error(vm);
return NJS_ERROR;
}
if (slots != NULL) {
p = (void *) njs_cpymem(nslots, slots,
sizeof(njs_array_sort_slot_t) * (p - slots));
njs_mp_free(vm->mem_pool, slots);
} else {
p = nslots;
}
slots = nslots;
end = slots + nlen;
}
ret = njs_value_property_i64(vm, this, i, &p->value);
if (njs_slow_path(ret == NJS_ERROR)) {
ret = NJS_ERROR;
goto exception;
}
if (ret == NJS_DECLINED) {
continue;
}
if (njs_is_undefined(&p->value)) {
und++;
continue;
}
p->pos = i;
p->str = NULL;
p++;
}
len = p - slots;
}
strings = njs_arr_init(vm->mem_pool, &ctx.strings, NULL, len + 1,
sizeof(njs_value_t));
if (njs_slow_path(strings == NULL)) {
ret = NJS_ERROR;
goto exception;
}
njs_qsort(slots, len, sizeof(njs_array_sort_slot_t), njs_array_compare,
&ctx);
if (ctx.exception) {
ret = NJS_ERROR;
goto exception;
}
if (njs_fast_path(fast_path && njs_is_fast_array(this))) {
array = njs_array(this);
start = array->start;
for (i = 0; i < len; i++) {
start[i] = slots[i].value;
}
for (i = len; und-- > 0; i++) {
start[i] = njs_value_undefined;
}
} else {
for (i = 0; i < len; i++) {
if (slots[i].pos != i) {
ret = njs_value_property_i64_set(vm, this, i, &slots[i].value);
if (njs_slow_path(ret == NJS_ERROR)) {
goto exception;
}
}
}
for (i = len; und-- > 0; i++) {
ret = njs_value_property_i64_set(vm, this, i,
njs_value_arg(&njs_value_undefined));
if (njs_slow_path(ret == NJS_ERROR)) {
goto exception;
}
}
for (; i < length; i++) {
ret = njs_value_property_i64_delete(vm, this, i, NULL);
if (njs_slow_path(ret == NJS_ERROR)) {
goto exception;
}
}
}
vm->retval = *this;
ret = NJS_OK;
exception:
if (slots != NULL) {
njs_mp_free(vm->mem_pool, slots);
}
njs_arr_destroy(&ctx.strings);
return ret;
}
| 0
|
427,727
|
cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len,
const cdf_header_t *h, cdf_secid_t id)
{
size_t ss = CDF_SEC_SIZE(h);
size_t pos;
if (SIZE_T_MAX / ss < CAST(size_t, id))
return -1;
pos = CDF_SEC_POS(h, id);
assert(ss == len);
return cdf_read(info, CAST(off_t, pos), RCAST(char *, buf) + offs, len);
}
| 0
|
310,012
|
TPUTS_PROTO(outc, c)
{
int rc = c;
if (interrupted) {
char tmp = (char) c;
if (write(STDOUT_FILENO, &tmp, (size_t) 1) == -1)
rc = EOF;
} else {
rc = putc(c, stdout);
}
TPUTS_RETURN(rc);
}
| 0
|
267,920
|
void ogs_nas_5gs_mobile_identity_guti_to_nas_guti(
ogs_nas_5gs_mobile_identity_guti_t *mobile_identity_guti,
ogs_nas_5gs_guti_t *nas_guti)
{
ogs_assert(mobile_identity_guti);
ogs_assert(nas_guti);
memset(nas_guti, 0, sizeof(*nas_guti));
memcpy(&nas_guti->nas_plmn_id,
&mobile_identity_guti->nas_plmn_id, OGS_PLMN_ID_LEN);
memcpy(&nas_guti->amf_id,
&mobile_identity_guti->amf_id, sizeof(ogs_amf_id_t));
nas_guti->m_tmsi = be32toh(mobile_identity_guti->m_tmsi);
}
| 0
|
219,955
|
int callback_glewlwyd_get_client_module (const struct _u_request * request, struct _u_response * response, void * client_data) {
struct config_elements * config = (struct config_elements *)client_data;
json_t * j_module;
j_module = get_client_module(config, u_map_get(request->map_url, "name"));
if (check_result_value(j_module, G_OK)) {
ulfius_set_json_body_response(response, 200, json_object_get(j_module, "module"));
} else if (check_result_value(j_module, G_ERROR_NOT_FOUND)) {
response->status = 404;
} else {
y_log_message(Y_LOG_LEVEL_ERROR, "callback_glewlwyd_get_client_module - Error get_client_module");
response->status = 500;
}
json_decref(j_module);
return U_CALLBACK_CONTINUE;
}
| 0
|
359,638
|
DEFUN (bgp_deterministic_med,
bgp_deterministic_med_cmd,
"bgp deterministic-med",
"BGP specific commands\n"
"Pick the best-MED path among paths advertised from the neighboring AS\n")
{
struct bgp *bgp;
bgp = vty->index;
bgp_flag_set (bgp, BGP_FLAG_DETERMINISTIC_MED);
return CMD_SUCCESS;
}
| 0
|
238,435
|
static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
struct bpf_reg_state *dst_reg,
enum bpf_reg_type type,
bool range_right_open)
{
int new_range, i;
if (dst_reg->off < 0 ||
(dst_reg->off == 0 && range_right_open))
/* This doesn't give us any range */
return;
if (dst_reg->umax_value > MAX_PACKET_OFF ||
dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
/* Risk of overflow. For instance, ptr + (1<<63) may be less
* than pkt_end, but that's because it's also less than pkt.
*/
return;
new_range = dst_reg->off;
if (range_right_open)
new_range++;
/* Examples for register markings:
*
* pkt_data in dst register:
*
* r2 = r3;
* r2 += 8;
* if (r2 > pkt_end) goto <handle exception>
* <access okay>
*
* r2 = r3;
* r2 += 8;
* if (r2 < pkt_end) goto <access okay>
* <handle exception>
*
* Where:
* r2 == dst_reg, pkt_end == src_reg
* r2=pkt(id=n,off=8,r=0)
* r3=pkt(id=n,off=0,r=0)
*
* pkt_data in src register:
*
* r2 = r3;
* r2 += 8;
* if (pkt_end >= r2) goto <access okay>
* <handle exception>
*
* r2 = r3;
* r2 += 8;
* if (pkt_end <= r2) goto <handle exception>
* <access okay>
*
* Where:
* pkt_end == dst_reg, r2 == src_reg
* r2=pkt(id=n,off=8,r=0)
* r3=pkt(id=n,off=0,r=0)
*
* Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
* or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
* and [r3, r3 + 8-1) respectively is safe to access depending on
* the check.
*/
/* If our ids match, then we must have the same max_value. And we
* don't care about the other reg's fixed offset, since if it's too big
* the range won't allow anything.
* dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
*/
for (i = 0; i <= vstate->curframe; i++)
__find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
new_range);
}
| 0
|
412,106
|
new_rrset(struct regional* region, uint16_t rrtype, uint16_t rrclass)
{
struct packed_rrset_data* pd;
struct ub_packed_rrset_key* rrset = regional_alloc_zero(
region, sizeof(*rrset));
if(!rrset) {
log_err("out of memory");
return NULL;
}
rrset->entry.key = rrset;
pd = regional_alloc_zero(region, sizeof(*pd));
if(!pd) {
log_err("out of memory");
return NULL;
}
pd->trust = rrset_trust_prim_noglue;
pd->security = sec_status_insecure;
rrset->entry.data = pd;
rrset->rk.dname = regional_alloc_zero(region, 1);
if(!rrset->rk.dname) {
log_err("out of memory");
return NULL;
}
rrset->rk.dname_len = 1;
rrset->rk.type = htons(rrtype);
rrset->rk.rrset_class = htons(rrclass);
return rrset;
}
| 0
|
366,170
|
struct vfsmount *fc_mount(struct fs_context *fc)
{
int err = vfs_get_tree(fc);
if (!err) {
up_write(&fc->root->d_sb->s_umount);
return vfs_create_mount(fc);
}
return ERR_PTR(err);
}
| 0
|
229,163
|
int virtio_serial_open(VirtIOSerialPort *port)
{
/* Don't allow opening an already-open port */
if (port->host_connected) {
return 0;
}
/* Send port open notification to the guest */
port->host_connected = true;
send_control_event(port->vser, port->id, VIRTIO_CONSOLE_PORT_OPEN, 1);
return 0;
}
| 0
|
289,304
|
static __poll_t snd_pcm_oss_poll(struct file *file, poll_table * wait)
{
struct snd_pcm_oss_file *pcm_oss_file;
__poll_t mask;
struct snd_pcm_substream *psubstream = NULL, *csubstream = NULL;
pcm_oss_file = file->private_data;
psubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK];
csubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
mask = 0;
if (psubstream != NULL) {
struct snd_pcm_runtime *runtime = psubstream->runtime;
poll_wait(file, &runtime->sleep, wait);
snd_pcm_stream_lock_irq(psubstream);
if (runtime->status->state != SNDRV_PCM_STATE_DRAINING &&
(runtime->status->state != SNDRV_PCM_STATE_RUNNING ||
snd_pcm_oss_playback_ready(psubstream)))
mask |= EPOLLOUT | EPOLLWRNORM;
snd_pcm_stream_unlock_irq(psubstream);
}
if (csubstream != NULL) {
struct snd_pcm_runtime *runtime = csubstream->runtime;
snd_pcm_state_t ostate;
poll_wait(file, &runtime->sleep, wait);
snd_pcm_stream_lock_irq(csubstream);
ostate = runtime->status->state;
if (ostate != SNDRV_PCM_STATE_RUNNING ||
snd_pcm_oss_capture_ready(csubstream))
mask |= EPOLLIN | EPOLLRDNORM;
snd_pcm_stream_unlock_irq(csubstream);
if (ostate != SNDRV_PCM_STATE_RUNNING && runtime->oss.trigger) {
struct snd_pcm_oss_file ofile;
memset(&ofile, 0, sizeof(ofile));
ofile.streams[SNDRV_PCM_STREAM_CAPTURE] = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
runtime->oss.trigger = 0;
snd_pcm_oss_set_trigger(&ofile, PCM_ENABLE_INPUT);
}
}
return mask;
}
| 0
|
384,786
|
f_exepath(typval_T *argvars, typval_T *rettv)
{
char_u *p = NULL;
if (in_vim9script() && check_for_nonempty_string_arg(argvars, 0) == FAIL)
return;
(void)mch_can_exe(tv_get_string(&argvars[0]), &p, TRUE);
rettv->v_type = VAR_STRING;
rettv->vval.v_string = p;
}
| 0
|
473,951
|
code_to_mbclen(OnigCodePoint code, OnigEncoding enc ARG_UNUSED)
{
if (ONIGENC_IS_CODE_ASCII(code)) return 1;
else if (code > 0xffffffff) return 0;
else if ((code & 0xff000000) >= 0x80000000) return 4;
else if ((code & 0xff0000) >= 0x800000) return 3;
else if ((code & 0xff00) >= 0x8000) return 2;
else
return ONIGERR_INVALID_CODE_POINT_VALUE;
}
| 0
|
251,946
|
inline void BinaryBroadcastFiveFold(const ArithmeticParams& unswitched_params,
const RuntimeShape& unswitched_input1_shape,
const T* unswitched_input1_data,
const RuntimeShape& unswitched_input2_shape,
const T* unswitched_input2_data,
const RuntimeShape& output_shape,
T* output_data, ElementwiseF elementwise_f,
ScalarBroadcastF scalar_broadcast_f) {
ArithmeticParams switched_params = unswitched_params;
switched_params.input1_offset = unswitched_params.input2_offset;
switched_params.input1_multiplier = unswitched_params.input2_multiplier;
switched_params.input1_shift = unswitched_params.input2_shift;
switched_params.input2_offset = unswitched_params.input1_offset;
switched_params.input2_multiplier = unswitched_params.input1_multiplier;
switched_params.input2_shift = unswitched_params.input1_shift;
const bool use_unswitched =
unswitched_params.broadcast_category ==
tflite::BroadcastableOpCategory::kFirstInputBroadcastsFast;
const ArithmeticParams& params =
use_unswitched ? unswitched_params : switched_params;
const T* input1_data =
use_unswitched ? unswitched_input1_data : unswitched_input2_data;
const T* input2_data =
use_unswitched ? unswitched_input2_data : unswitched_input1_data;
// Fivefold nested loops. The second input resets its position for each
// iteration of the second loop. The first input resets its position at the
// beginning of the fourth loop. The innermost loop is an elementwise add of
// sections of the arrays.
T* output_data_ptr = output_data;
const T* input1_data_ptr = input1_data;
const T* input2_data_reset = input2_data;
// In the fivefold pattern, y0, y2 and y4 are not broadcast, and so shared
// between input shapes. y3 for input 1 is always broadcast, and so the
// dimension there is 1, whereas optionally y1 might be broadcast for
// input 2. Put another way, input1.shape.FlatSize = y0 * y1 * y2 * y4,
// input2.shape.FlatSize = y0 * y2 * y3 * y4.
int y0 = params.broadcast_shape[0];
int y1 = params.broadcast_shape[1];
int y2 = params.broadcast_shape[2];
int y3 = params.broadcast_shape[3];
int y4 = params.broadcast_shape[4];
if (y4 > 1) {
// General fivefold pattern, with y4 > 1 so there is a non-broadcast inner
// dimension.
for (int i0 = 0; i0 < y0; ++i0) {
const T* input2_data_ptr = nullptr;
for (int i1 = 0; i1 < y1; ++i1) {
input2_data_ptr = input2_data_reset;
for (int i2 = 0; i2 < y2; ++i2) {
for (int i3 = 0; i3 < y3; ++i3) {
elementwise_f(y4, params, input1_data_ptr, input2_data_ptr,
output_data_ptr);
input2_data_ptr += y4;
output_data_ptr += y4;
}
// We have broadcast y4 of input1 data y3 times, and now move on.
input1_data_ptr += y4;
}
}
// We have broadcast y2*y3*y4 of input2 data y1 times, and now move on.
input2_data_reset = input2_data_ptr;
}
} else if (input1_data_ptr != nullptr) {
// Special case of y4 == 1, in which the innermost loop is a single
// element and can be combined with the next (y3) as an inner broadcast.
//
// Note that this handles the case of pure scalar broadcast when
// y0 == y1 == y2 == 1. With low overhead it handles cases such as scalar
// broadcast with batch (as y2 > 1).
//
// NOTE The process is the same as the above general case except
// simplified for y4 == 1 and the loop over y3 is contained within the
// AddScalarBroadcast function.
for (int i0 = 0; i0 < y0; ++i0) {
const T* input2_data_ptr = nullptr;
for (int i1 = 0; i1 < y1; ++i1) {
input2_data_ptr = input2_data_reset;
for (int i2 = 0; i2 < y2; ++i2) {
scalar_broadcast_f(y3, params, *input1_data_ptr, input2_data_ptr,
output_data_ptr);
input2_data_ptr += y3;
output_data_ptr += y3;
input1_data_ptr += 1;
}
}
input2_data_reset = input2_data_ptr;
}
}
}
| 0
|
317,000
|
static int selinux_fs_context_parse_param(struct fs_context *fc,
struct fs_parameter *param)
{
struct fs_parse_result result;
int opt, rc;
opt = fs_parse(fc, selinux_fs_parameters, param, &result);
if (opt < 0)
return opt;
rc = selinux_add_opt(opt, param->string, &fc->security);
if (!rc) {
param->string = NULL;
rc = 1;
}
return rc;
}
| 0
|
225,508
|
void SwapFanoutsMapValues(FanoutsMap* fanouts,
const MutableGraphView::OutputPort& from_port,
const FanoutsMap::iterator& from_fanouts,
const MutableGraphView::OutputPort& to_port,
const FanoutsMap::iterator& to_fanouts) {
const bool from_exists = from_fanouts != fanouts->end();
const bool to_exists = to_fanouts != fanouts->end();
if (from_exists && to_exists) {
std::swap(from_fanouts->second, to_fanouts->second);
} else if (from_exists) {
fanouts->emplace(to_port, std::move(from_fanouts->second));
fanouts->erase(from_port);
} else if (to_exists) {
fanouts->emplace(from_port, std::move(to_fanouts->second));
fanouts->erase(to_port);
}
}
| 0
|
234,185
|
display_loclists_list (struct dwarf_section * section,
unsigned char ** start_ptr,
unsigned int debug_info_entry,
dwarf_vma offset,
dwarf_vma base_address,
unsigned char ** vstart_ptr,
int has_frame_base)
{
unsigned char * start = *start_ptr;
unsigned char * vstart = *vstart_ptr;
unsigned char * section_end = section->start + section->size;
dwarf_vma cu_offset;
unsigned int pointer_size;
unsigned int offset_size;
unsigned int dwarf_version;
/* Initialize it due to a false compiler warning. */
dwarf_vma begin = -1, vbegin = -1;
dwarf_vma end = -1, vend = -1;
dwarf_vma length;
int need_frame_base;
if (debug_info_entry >= num_debug_info_entries)
{
warn (_("No debug information available for "
"loclists lists of entry: %u\n"),
debug_info_entry);
return;
}
cu_offset = debug_information [debug_info_entry].cu_offset;
pointer_size = debug_information [debug_info_entry].pointer_size;
offset_size = debug_information [debug_info_entry].offset_size;
dwarf_version = debug_information [debug_info_entry].dwarf_version;
if (pointer_size < 2 || pointer_size > 8)
{
warn (_("Invalid pointer size (%d) in debug info for entry %d\n"),
pointer_size, debug_info_entry);
return;
}
while (1)
{
dwarf_vma off = offset + (start - *start_ptr);
enum dwarf_location_list_entry_type llet;
if (start + 1 > section_end)
{
warn (_("Location list starting at offset 0x%lx is not terminated.\n"),
(unsigned long) offset);
break;
}
printf (" ");
print_dwarf_vma (off, 4);
SAFE_BYTE_GET_AND_INC (llet, start, 1, section_end);
if (vstart && (llet == DW_LLE_offset_pair
|| llet == DW_LLE_start_end
|| llet == DW_LLE_start_length))
{
off = offset + (vstart - *start_ptr);
READ_ULEB (vbegin, vstart, section_end);
print_dwarf_view (vbegin, pointer_size, 1);
READ_ULEB (vend, vstart, section_end);
print_dwarf_view (vend, pointer_size, 1);
printf (_("views at %8.8lx for:\n %*s "),
(unsigned long) off, 8, "");
}
switch (llet)
{
case DW_LLE_end_of_list:
printf (_("<End of list>\n"));
break;
case DW_LLE_base_addressx:
READ_ULEB (base_address, start, section_end);
print_dwarf_vma (base_address, pointer_size);
printf (_("(index into .debug_addr) "));
base_address = fetch_indexed_addr (base_address, pointer_size);
print_dwarf_vma (base_address, pointer_size);
printf (_("(base address)\n"));
break;
case DW_LLE_startx_endx:
READ_ULEB (begin, start, section_end);
begin = fetch_indexed_addr (begin, pointer_size);
READ_ULEB (end, start, section_end);
end = fetch_indexed_addr (end, pointer_size);
break;
case DW_LLE_startx_length:
READ_ULEB (begin, start, section_end);
begin = fetch_indexed_addr (begin, pointer_size);
READ_ULEB (end, start, section_end);
end += begin;
break;
case DW_LLE_default_location:
begin = end = 0;
break;
case DW_LLE_offset_pair:
READ_ULEB (begin, start, section_end);
begin += base_address;
READ_ULEB (end, start, section_end);
end += base_address;
break;
case DW_LLE_base_address:
SAFE_BYTE_GET_AND_INC (base_address, start, pointer_size,
section_end);
print_dwarf_vma (base_address, pointer_size);
printf (_("(base address)\n"));
break;
case DW_LLE_start_end:
SAFE_BYTE_GET_AND_INC (begin, start, pointer_size, section_end);
SAFE_BYTE_GET_AND_INC (end, start, pointer_size, section_end);
break;
case DW_LLE_start_length:
SAFE_BYTE_GET_AND_INC (begin, start, pointer_size, section_end);
READ_ULEB (end, start, section_end);
end += begin;
break;
#ifdef DW_LLE_view_pair
case DW_LLE_view_pair:
if (vstart)
printf (_("View pair entry in loclist with locviews attribute\n"));
READ_ULEB (vbegin, start, section_end);
print_dwarf_view (vbegin, pointer_size, 1);
READ_ULEB (vend, start, section_end);
print_dwarf_view (vend, pointer_size, 1);
printf (_("views for:\n"));
continue;
#endif
default:
error (_("Invalid location list entry type %d\n"), llet);
return;
}
if (llet == DW_LLE_end_of_list)
break;
if (llet == DW_LLE_base_address
|| llet == DW_LLE_base_addressx)
continue;
if (start == section_end)
{
warn (_("Location list starting at offset 0x%lx is not terminated.\n"),
(unsigned long) offset);
break;
}
READ_ULEB (length, start, section_end);
if (length > (size_t) (section_end - start))
{
warn (_("Location list starting at offset 0x%lx is not terminated.\n"),
(unsigned long) offset);
break;
}
print_dwarf_vma (begin, pointer_size);
print_dwarf_vma (end, pointer_size);
putchar ('(');
need_frame_base = decode_location_expression (start,
pointer_size,
offset_size,
dwarf_version,
length,
cu_offset, section);
putchar (')');
if (need_frame_base && !has_frame_base)
printf (_(" [without DW_AT_frame_base]"));
if (begin == end && vbegin == vend)
fputs (_(" (start == end)"), stdout);
else if (begin > end || (begin == end && vbegin > vend))
fputs (_(" (start > end)"), stdout);
putchar ('\n');
start += length;
vbegin = vend = -1;
}
if (vbegin != vm1 || vend != vm1)
printf (_("Trailing view pair not used in a range"));
*start_ptr = start;
*vstart_ptr = vstart;
}
| 0
|
291,796
|
static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
const struct rtrs_addr *path,
size_t con_num, u32 nr_poll_queues)
{
struct rtrs_clt_path *clt_path;
int err = -ENOMEM;
int cpu;
size_t total_con;
clt_path = kzalloc(sizeof(*clt_path), GFP_KERNEL);
if (!clt_path)
goto err;
/*
* irqmode and poll
* +1: Extra connection for user messages
*/
total_con = con_num + nr_poll_queues + 1;
clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con),
GFP_KERNEL);
if (!clt_path->s.con)
goto err_free_path;
clt_path->s.con_num = total_con;
clt_path->s.irq_con_num = con_num + 1;
clt_path->stats = kzalloc(sizeof(*clt_path->stats), GFP_KERNEL);
if (!clt_path->stats)
goto err_free_con;
mutex_init(&clt_path->init_mutex);
uuid_gen(&clt_path->s.uuid);
memcpy(&clt_path->s.dst_addr, path->dst,
rdma_addr_size((struct sockaddr *)path->dst));
/*
* rdma_resolve_addr() passes src_addr to cma_bind_addr, which
* checks the sa_family to be non-zero. If user passed src_addr=NULL
* the sess->src_addr will contain only zeros, which is then fine.
*/
if (path->src)
memcpy(&clt_path->s.src_addr, path->src,
rdma_addr_size((struct sockaddr *)path->src));
strscpy(clt_path->s.sessname, clt->sessname,
sizeof(clt_path->s.sessname));
clt_path->clt = clt;
clt_path->max_pages_per_mr = RTRS_MAX_SEGMENTS;
init_waitqueue_head(&clt_path->state_wq);
clt_path->state = RTRS_CLT_CONNECTING;
atomic_set(&clt_path->connected_cnt, 0);
INIT_WORK(&clt_path->close_work, rtrs_clt_close_work);
INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work);
rtrs_clt_init_hb(clt_path);
clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry));
if (!clt_path->mp_skip_entry)
goto err_free_stats;
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu));
err = rtrs_clt_init_stats(clt_path->stats);
if (err)
goto err_free_percpu;
return clt_path;
err_free_percpu:
free_percpu(clt_path->mp_skip_entry);
err_free_stats:
kfree(clt_path->stats);
err_free_con:
kfree(clt_path->s.con);
err_free_path:
kfree(clt_path);
err:
return ERR_PTR(err);
}
| 0
|
405,332
|
__xfrm_policy_eval_candidates(struct hlist_head *chain,
struct xfrm_policy *prefer,
const struct flowi *fl,
u8 type, u16 family, int dir, u32 if_id)
{
u32 priority = prefer ? prefer->priority : ~0u;
struct xfrm_policy *pol;
if (!chain)
return NULL;
hlist_for_each_entry_rcu(pol, chain, bydst) {
int err;
if (pol->priority > priority)
break;
err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
if (err) {
if (err != -ESRCH)
return ERR_PTR(err);
continue;
}
if (prefer) {
/* matches. Is it older than *prefer? */
if (pol->priority == priority &&
prefer->pos < pol->pos)
return prefer;
}
return pol;
}
return NULL;
}
| 0
|
310,075
|
can_clear_with(NCURSES_SP_DCLx ARG_CH_T ch)
{
if (!back_color_erase && SP_PARM->_coloron) {
#if NCURSES_EXT_FUNCS
int pair;
if (!SP_PARM->_default_color)
return FALSE;
if (!(isDefaultColor(SP_PARM->_default_fg) &&
isDefaultColor(SP_PARM->_default_bg)))
return FALSE;
if ((pair = GetPair(CHDEREF(ch))) != 0) {
NCURSES_COLOR_T fg, bg;
if (NCURSES_SP_NAME(pair_content) (NCURSES_SP_ARGx
(short) pair,
&fg, &bg) == ERR
|| !(isDefaultColor(fg) && isDefaultColor(bg))) {
return FALSE;
}
}
#else
if (AttrOfD(ch) & A_COLOR)
return FALSE;
#endif
}
return (ISBLANK(CHDEREF(ch)) &&
(AttrOfD(ch) & ~(NONBLANK_ATTR | A_COLOR)) == BLANK_ATTR);
}
| 0
|
474,440
|
ObjectIsSequence(
OBJECT *object // IN: handle to be checked
)
{
pAssert(object != NULL);
return (object->attributes.hmacSeq == SET
|| object->attributes.hashSeq == SET
|| object->attributes.eventSeq == SET);
}
| 0
|
211,832
|
doit (struct query *z, int state)
{
char key[257];
char misc[20], header[12];
char *buf = 0, *cached = 0;
const char *whichserver = 0;
unsigned int rcode = 0;
unsigned int posanswers = 0;
unsigned int len = 0, cachedlen = 0;
uint16 numanswers = 0;
uint16 numauthority = 0;
unsigned int posauthority = 0;
uint16 numglue = 0;
unsigned int posglue = 0;
unsigned int pos = 0, pos2 = 0;
uint16 datalen = 0;
char *control = 0, *d = 0;
const char *dtype = 0;
unsigned int dlen = 0;
int flagout = 0, flagcname = 0;
int flagreferral = 0, flagsoa = 0;
int i = 0, j = 0, k = 0, p = 0, q = 0;
uint32 ttl = 0, soattl = 0, cnamettl = 0;
errno = error_io;
if (state == 1)
goto HAVEPACKET;
if (state == -1)
{
if (debug_level > 1)
log_servfail (z->name[z->level]);
goto SERVFAIL;
}
NEWNAME:
if (++z->loop == 100)
goto DIE;
d = z->name[z->level];
dtype = z->level ? DNS_T_A : z->type;
dlen = dns_domain_length (d);
if (globalip (d, misc))
{
if (z->level)
{
for (k = 0; k < 64; k += 4)
{
if (byte_equal (z->servers[z->level - 1] + k, 4, "\0\0\0\0"))
{
byte_copy (z->servers[z->level - 1] + k, 4, misc);
break;
}
}
goto LOWERLEVEL;
}
if (!rqa (z))
goto DIE;
if (typematch (DNS_T_A, dtype))
{
if (!response_rstart (d, DNS_T_A, 655360))
goto DIE;
if (!response_addbytes (misc, 4))
goto DIE;
response_rfinish (RESPONSE_ANSWER);
}
cleanup (z);
return 1;
}
if (dns_domain_equal (d, "\0011\0010\0010\003127\7in-addr\4arpa\0"))
{
if (z->level)
goto LOWERLEVEL;
if (!rqa (z))
goto DIE;
if (typematch (DNS_T_PTR, dtype))
{
if (!response_rstart (d, DNS_T_PTR, 655360))
goto DIE;
if (!response_addname ("\011localhost\0"))
goto DIE;
response_rfinish (RESPONSE_ANSWER);
}
cleanup (z);
if (debug_level > 2)
log_stats ();
return 1;
}
if (dlen <= 255)
{
byte_copy (key, 2, DNS_T_ANY);
byte_copy (key + 2, dlen, d);
case_lowerb (key + 2, dlen);
cached = cache_get (key, dlen + 2, &cachedlen, &ttl);
if (cached)
{
if (debug_level > 2)
log_cachednxdomain (d);
goto NXDOMAIN;
}
byte_copy (key, 2, DNS_T_CNAME);
cached = cache_get (key, dlen + 2, &cachedlen, &ttl);
if (cached)
{
if (typematch (DNS_T_CNAME, dtype))
{
if (debug_level > 2)
log_cachedanswer (d, DNS_T_CNAME);
if (!rqa (z))
goto DIE;
if (!response_cname (z->name[0], cached, ttl))
goto DIE;
cleanup (z);
return 1;
}
if (debug_level > 2)
log_cachedcname (d, cached);
if (!dns_domain_copy (&cname, cached))
goto DIE;
goto CNAME;
}
if (typematch (DNS_T_NS, dtype))
{
byte_copy (key, 2, DNS_T_NS);
cached = cache_get (key, dlen + 2, &cachedlen, &ttl);
if (cached && (cachedlen || byte_diff (dtype, 2, DNS_T_ANY)))
{
if (debug_level > 2)
log_cachedanswer (d, DNS_T_NS);
if (!rqa (z))
goto DIE;
pos = 0;
while ((pos=dns_packet_getname (cached, cachedlen, pos, &t2)))
{
if (!response_rstart (d, DNS_T_NS, ttl))
goto DIE;
if (!response_addname (t2))
goto DIE;
response_rfinish (RESPONSE_ANSWER);
}
cleanup (z);
return 1;
}
}
if (typematch (DNS_T_PTR, dtype))
{
byte_copy (key, 2, DNS_T_PTR);
cached = cache_get (key, dlen + 2, &cachedlen, &ttl);
if (cached && (cachedlen || byte_diff(dtype, 2, DNS_T_ANY)))
{
if (debug_level > 2)
log_cachedanswer (d, DNS_T_PTR);
if (!rqa (z))
goto DIE;
pos = 0;
while ((pos=dns_packet_getname (cached, cachedlen, pos, &t2)))
{
if (!response_rstart (d, DNS_T_PTR, ttl))
goto DIE;
if (!response_addname (t2))
goto DIE;
response_rfinish (RESPONSE_ANSWER);
}
cleanup(z);
return 1;
}
}
if (typematch (DNS_T_MX, dtype))
{
byte_copy (key, 2, DNS_T_MX);
cached = cache_get (key, dlen + 2, &cachedlen, &ttl);
if (cached && (cachedlen || byte_diff (dtype, 2, DNS_T_ANY)))
{
if (debug_level > 2)
log_cachedanswer (d, DNS_T_MX);
if (!rqa (z))
goto DIE;
pos = 0;
while ((pos=dns_packet_copy (cached, cachedlen, pos, misc, 2)))
{
pos = dns_packet_getname (cached, cachedlen, pos, &t2);
if (!pos)
break;
if (!response_rstart (d, DNS_T_MX, ttl))
goto DIE;
if (!response_addbytes (misc, 2))
goto DIE;
if (!response_addname (t2))
goto DIE;
response_rfinish (RESPONSE_ANSWER);
}
cleanup (z);
return 1;
}
}
if (typematch (DNS_T_A, dtype))
{
byte_copy (key,2,DNS_T_A);
cached = cache_get (key, dlen + 2, &cachedlen, &ttl);
if (cached && (cachedlen || byte_diff (dtype, 2, DNS_T_ANY)))
{
if (z->level)
{
if (debug_level > 2)
log_cachedanswer (d, DNS_T_A);
while (cachedlen >= 4)
{
for (k = 0; k < 64; k += 4)
{
if (byte_equal (z->servers[z->level - 1] + k,
4, "\0\0\0\0"))
{
byte_copy (z->servers[z->level - 1] + k,
4, cached);
break;
}
}
cached += 4;
cachedlen -= 4;
}
goto LOWERLEVEL;
}
if (debug_level > 2)
log_cachedanswer (d, DNS_T_A);
if (!rqa (z))
goto DIE;
while (cachedlen >= 4)
{
if (!response_rstart (d, DNS_T_A, ttl))
goto DIE;
if (!response_addbytes (cached, 4))
goto DIE;
response_rfinish (RESPONSE_ANSWER);
cached += 4;
cachedlen -= 4;
}
cleanup (z);
return 1;
}
}
if (!typematch (DNS_T_ANY, dtype)
&& !typematch (DNS_T_AXFR, dtype)
&& !typematch (DNS_T_CNAME, dtype)
&& !typematch (DNS_T_NS, dtype)
&& !typematch (DNS_T_PTR, dtype)
&& !typematch (DNS_T_A, dtype)
&& !typematch (DNS_T_MX, dtype))
{
byte_copy (key, 2, dtype);
cached = cache_get (key, dlen + 2, &cachedlen, &ttl);
if (cached && (cachedlen || byte_diff (dtype, 2, DNS_T_ANY)))
{
if (debug_level > 2)
log_cachedanswer (d, dtype);
if (!rqa (z))
goto DIE;
while (cachedlen >= 2)
{
uint16_unpack_big (cached, &datalen);
cached += 2;
cachedlen -= 2;
if (datalen > cachedlen)
goto DIE;
if (!response_rstart (d, dtype, ttl))
goto DIE;
if (!response_addbytes (cached, datalen))
goto DIE;
response_rfinish (RESPONSE_ANSWER);
cached += datalen;
cachedlen -= datalen;
}
cleanup (z);
return 1;
}
}
}
for (;;)
{
if (roots (z->servers[z->level], d))
{
for (j = 0; j < QUERY_MAXNS; ++j)
dns_domain_free (&z->ns[z->level][j]);
z->control[z->level] = d;
break;
}
if (!flagforwardonly && (z->level < 2))
{
if (dlen < 255)
{
byte_copy (key,2,DNS_T_NS);
byte_copy (key + 2,dlen,d);
case_lowerb (key + 2,dlen);
cached = cache_get (key, dlen + 2, &cachedlen, &ttl);
if (cached && cachedlen)
{
z->control[z->level] = d;
byte_zero (z->servers[z->level],64);
for (j = 0; j < QUERY_MAXNS; ++j)
dns_domain_free (&z->ns[z->level][j]);
j = pos = 0;
pos = dns_packet_getname (cached, cachedlen, pos, &t1);
while (pos)
{
if (debug_level > 2)
log_cachedns (d, t1);
if (j < QUERY_MAXNS)
if (!dns_domain_copy (&z->ns[z->level][j++], t1))
goto DIE;
pos = dns_packet_getname (cached, cachedlen, pos, &t1);
}
break;
}
}
}
if (!*d)
goto DIE;
j = 1 + (unsigned int) (unsigned char) *d;
dlen -= j;
d += j;
}
HAVENS:
for (j = 0; j < QUERY_MAXNS; ++j)
{
if (z->ns[z->level][j])
{
if (z->level + 1 < QUERY_MAXLEVEL)
{
int dc = dns_domain_copy (&z->name[z->level + 1],
z->ns[z->level][j]);
if (!dc)
goto DIE;
dns_domain_free (&z->ns[z->level][j]);
++z->level;
goto NEWNAME;
}
dns_domain_free (&z->ns[z->level][j]);
}
}
for (j = 0; j < 64; j += 4)
if (byte_diff (z->servers[z->level] + j, 4, "\0\0\0\0"))
break;
if (j == 64)
goto SERVFAIL;
dns_sortip (z->servers[z->level], 64);
if (z->level)
{
if (debug_level > 2)
log_tx (z->name[z->level], DNS_T_A,
z->control[z->level], z->servers[z->level],z->level);
if (dns_transmit_start (&z->dt, z->servers[z->level], flagforwardonly,
z->name[z->level], DNS_T_A,z->localip) == -1)
goto DIE;
}
else
{
if (debug_level > 2)
log_tx (z->name[0], z->type, z->control[0], z->servers[0], 0);
if (dns_transmit_start (&z->dt, z->servers[0], flagforwardonly,
z->name[0], z->type, z->localip) == -1)
goto DIE;
}
return 0;
LOWERLEVEL:
dns_domain_free (&z->name[z->level]);
for (j = 0; j < QUERY_MAXNS; ++j)
dns_domain_free (&z->ns[z->level][j]);
--z->level;
goto HAVENS;
HAVEPACKET:
if (++z->loop == 100)
goto DIE;
buf = z->dt.packet;
len = z->dt.packetlen;
whichserver = z->dt.servers + 4 * z->dt.curserver;
control = z->control[z->level];
d = z->name[z->level];
dtype = z->level ? DNS_T_A : z->type;
if (!(pos = dns_packet_copy (buf, len, 0, header, 12)))
goto DIE;
if (!(pos = dns_packet_skipname (buf, len, pos)))
goto DIE;
pos += 4;
posanswers = pos;
uint16_unpack_big (header + 6, &numanswers);
uint16_unpack_big (header + 8, &numauthority);
uint16_unpack_big (header + 10, &numglue);
rcode = header[3] & 15;
if (rcode && (rcode != 3))
goto DIE; /* impossible; see irrelevant() */
flagsoa = soattl = cnamettl = 0;
flagout = flagcname = flagreferral = 0;
for (j = 0; j < numanswers; ++j)
{
pos = dns_packet_getname (buf, len, pos, &t1);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
if (dns_domain_equal (t1, d))
{
if (byte_equal (header + 2, 2, DNS_C_IN))
{
/* should always be true */
if (typematch (header, dtype))
flagout = 1;
else if (typematch (header, DNS_T_CNAME))
{
if (!dns_packet_getname (buf, len, pos, &cname))
goto DIE;
flagcname = 1;
cnamettl = ttlget (header + 4);
}
}
}
uint16_unpack_big (header + 8, &datalen);
pos += datalen;
}
posauthority = pos;
for (j = 0; j < numauthority; ++j)
{
pos = dns_packet_getname (buf, len, pos, &t1);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
if (typematch (header, DNS_T_SOA))
{
flagsoa = 1;
soattl = ttlget (header + 4);
if (soattl > 3600)
soattl = 3600;
}
else if (typematch (header, DNS_T_NS))
{
flagreferral = 1;
if (!dns_domain_copy (&referral, t1))
goto DIE;
}
uint16_unpack_big (header + 8, &datalen);
pos += datalen;
}
posglue = pos;
if (!flagcname && !rcode && !flagout && flagreferral && !flagsoa)
{
if (dns_domain_equal (referral, control)
|| !dns_domain_suffix (referral, control))
{
if (debug_level > 2)
log_lame (whichserver, control, referral);
byte_zero (whichserver, 4);
goto HAVENS;
}
}
if (records)
{
alloc_free (records);
records = 0;
}
k = numanswers + numauthority + numglue;
records = (unsigned int *) alloc (k * sizeof (unsigned int));
if (!records)
goto DIE;
pos = posanswers;
for (j = 0; j < k; ++j)
{
records[j] = pos;
pos = dns_packet_getname (buf, len, pos, &t1);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
uint16_unpack_big (header + 8, &datalen);
pos += datalen;
}
i = j = k;
while (j > 1)
{
if (i > 1)
{
--i;
pos = records[i - 1];
}
else
{
pos = records[j - 1];
records[j - 1] = records[i - 1];
--j;
}
q = i;
while ((p = q * 2) < j)
{
if (!smaller (buf, len, records[p], records[p - 1]))
++p;
records[q - 1] = records[p - 1];
q = p;
}
if (p == j)
{
records[q - 1] = records[p - 1];
q = p;
}
while ((q > i) && smaller (buf, len, records[(p = q/2) - 1], pos))
{
records[q - 1] = records[p - 1];
q = p;
}
records[q - 1] = pos;
}
i = 0;
while (i < k)
{
char type[2];
if (!(pos = dns_packet_getname (buf, len, records[i], &t1)))
goto DIE;
if (!(pos = dns_packet_copy (buf, len, pos, header, 10)))
goto DIE;
ttl = ttlget (header + 4);
byte_copy (type, 2, header);
if (byte_diff (header + 2, 2, DNS_C_IN))
{
++i;
continue;
}
for (j = i + 1; j < k; ++j)
{
pos = dns_packet_getname (buf, len, records[j], &t2);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
if (!dns_domain_equal (t1, t2))
break;
if (byte_diff (header, 2, type))
break;
if (byte_diff (header + 2, 2, DNS_C_IN))
break;
}
if (!dns_domain_suffix (t1, control))
{
i = j;
continue;
}
if (!roots_same (t1, control))
{
i = j;
continue;
}
if (byte_equal (type, 2, DNS_T_ANY))
;
else if (byte_equal(type, 2, DNS_T_AXFR))
;
else if (byte_equal (type, 2, DNS_T_SOA))
{
while (i < j)
{
pos = dns_packet_skipname (buf, len, records[i]);
if (!pos)
goto DIE;
pos = dns_packet_getname (buf, len, pos + 10, &t2);
if (!pos)
goto DIE;
pos = dns_packet_getname (buf, len, pos, &t3);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, misc, 20);
if (!pos)
goto DIE;
if (records[i] < posauthority && debug_level > 2)
log_rrsoa (whichserver, t1, t2, t3, misc, ttl);
++i;
}
}
else if (byte_equal (type, 2, DNS_T_CNAME))
{
pos = dns_packet_skipname (buf, len, records[j - 1]);
if (!pos)
goto DIE;
pos = dns_packet_getname (buf, len, pos + 10, &t2);
if (!pos)
goto DIE;
if (debug_level > 2)
log_rrcname (whichserver, t1, t2, ttl);
cachegeneric (DNS_T_CNAME, t1, t2, dns_domain_length (t2), ttl);
}
else if (byte_equal (type, 2, DNS_T_PTR))
{
save_start ();
while (i < j)
{
pos = dns_packet_skipname (buf, len, records[i]);
if (!pos)
goto DIE;
pos = dns_packet_getname (buf, len, pos + 10, &t2);
if (!pos)
goto DIE;
if (debug_level > 2)
log_rrptr (whichserver, t1, t2, ttl);
save_data (t2, dns_domain_length (t2));
++i;
}
save_finish (DNS_T_PTR, t1, ttl);
}
else if (byte_equal (type, 2, DNS_T_NS))
{
save_start ();
while (i < j)
{
pos = dns_packet_skipname (buf, len, records[i]);
if (!pos)
goto DIE;
pos = dns_packet_getname (buf, len, pos + 10, &t2);
if (!pos)
goto DIE;
if (debug_level > 2)
log_rrns (whichserver, t1, t2, ttl);
save_data (t2, dns_domain_length (t2));
++i;
}
save_finish (DNS_T_NS, t1, ttl);
}
else if (byte_equal (type, 2, DNS_T_MX))
{
save_start ();
while (i < j)
{
pos = dns_packet_skipname (buf, len, records[i]);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos + 10, misc, 2);
if (!pos)
goto DIE;
pos = dns_packet_getname (buf, len, pos, &t2);
if (!pos)
goto DIE;
if (debug_level > 2)
log_rrmx (whichserver, t1, t2, misc, ttl);
save_data (misc, 2);
save_data (t2, dns_domain_length (t2));
++i;
}
save_finish (DNS_T_MX, t1, ttl);
}
else if (byte_equal (type, 2, DNS_T_A))
{
save_start ();
while (i < j)
{
pos = dns_packet_skipname (buf, len, records[i]);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
if (byte_equal (header + 8, 2, "\0\4"))
{
pos = dns_packet_copy (buf, len, pos, header, 4);
if (!pos)
goto DIE;
save_data (header, 4);
if (debug_level > 2)
log_rr (whichserver, t1, DNS_T_A, header, 4, ttl);
}
++i;
}
save_finish (DNS_T_A, t1, ttl);
}
else
{
save_start ();
while (i < j)
{
pos = dns_packet_skipname (buf, len, records[i]);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
uint16_unpack_big (header + 8, &datalen);
if (datalen > len - pos)
goto DIE;
save_data (header + 8, 2);
save_data (buf + pos, datalen);
if (debug_level > 2)
log_rr (whichserver, t1, type, buf + pos, datalen, ttl);
++i;
}
save_finish (type, t1, ttl);
}
i = j;
}
alloc_free (records);
records = 0;
if (flagcname)
{
ttl = cnamettl;
CNAME:
if (!z->level)
{
if (z->alias[QUERY_MAXALIAS - 1])
goto DIE;
for (j = QUERY_MAXALIAS - 1; j > 0; --j)
z->alias[j] = z->alias[j - 1];
for (j = QUERY_MAXALIAS - 1; j > 0; --j)
z->aliasttl[j] = z->aliasttl[j - 1];
z->alias[0] = z->name[0];
z->aliasttl[0] = ttl;
z->name[0] = 0;
}
if (!dns_domain_copy (&z->name[z->level], cname))
goto DIE;
goto NEWNAME;
}
if (rcode == 3)
{
if (debug_level > 2)
log_nxdomain (whichserver, d, soattl);
cachegeneric (DNS_T_ANY, d, "", 0, soattl);
NXDOMAIN:
if (z->level)
goto LOWERLEVEL;
if (!rqa (z))
goto DIE;
response_nxdomain ();
cleanup (z);
return 1;
}
if (!flagout && flagsoa)
if (byte_diff (DNS_T_ANY, 2, dtype))
if (byte_diff (DNS_T_AXFR, 2, dtype))
if (byte_diff (DNS_T_CNAME, 2, dtype))
{
save_start ();
save_finish (dtype, d, soattl);
if (debug_level > 2)
log_nodata (whichserver, d, dtype, soattl);
}
if (debug_level > 2)
log_stats ();
if (flagout || flagsoa || !flagreferral)
{
if (z->level)
{
pos = posanswers;
for (j = 0; j < numanswers; ++j)
{
pos = dns_packet_getname (buf, len, pos, &t1);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
uint16_unpack_big (header + 8, &datalen);
if (dns_domain_equal (t1, d))
if (typematch (header, DNS_T_A))
if (byte_equal (header + 2, 2, DNS_C_IN))
/* should always be true */
if (datalen == 4)
for (k = 0; k < 64; k += 4)
{
if (byte_equal (z->servers[z->level - 1]
+ k, 4, "\0\0\0\0"))
{
if (!dns_packet_copy (buf, len, pos,
z->servers[z->level - 1] + k, 4))
goto DIE;
break;
}
}
pos += datalen;
}
goto LOWERLEVEL;
}
if (!rqa (z))
goto DIE;
pos = posanswers;
for (j = 0; j < numanswers; ++j)
{
pos = dns_packet_getname (buf, len, pos, &t1);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
ttl = ttlget (header + 4);
uint16_unpack_big (header + 8, &datalen);
if (dns_domain_equal (t1, d))
{
if (byte_equal (header + 2, 2, DNS_C_IN))
{ /* should always be true */
if (typematch (header, dtype))
{
if (!response_rstart (t1, header, ttl))
goto DIE;
if (typematch (header, DNS_T_NS)
|| typematch (header, DNS_T_CNAME)
|| typematch (header, DNS_T_PTR))
{
if (!dns_packet_getname (buf, len, pos, &t2))
goto DIE;
if (!response_addname (t2))
goto DIE;
}
else if (typematch (header, DNS_T_MX))
{
pos2 = dns_packet_copy (buf, len, pos, misc, 2);
if (!pos2)
goto DIE;
if (!response_addbytes (misc, 2))
goto DIE;
if (!dns_packet_getname (buf, len, pos2, &t2))
goto DIE;
if (!response_addname (t2))
goto DIE;
}
else if (typematch (header, DNS_T_SOA))
{
pos2 = dns_packet_getname (buf, len, pos, &t2);
if (!pos2)
goto DIE;
if (!response_addname (t2))
goto DIE;
pos2 = dns_packet_getname (buf, len, pos2, &t3);
if (!pos2)
goto DIE;
if (!response_addname (t3))
goto DIE;
pos2 = dns_packet_copy (buf, len, pos2, misc, 20);
if (!pos2)
goto DIE;
if (!response_addbytes (misc, 20))
goto DIE;
}
else
{
if (pos + datalen > len)
goto DIE;
if (!response_addbytes (buf + pos, datalen))
goto DIE;
}
response_rfinish(RESPONSE_ANSWER);
}
}
}
pos += datalen;
}
cleanup (z);
return 1;
}
if (!dns_domain_suffix (d, referral))
goto DIE;
control = d + dns_domain_suffixpos (d, referral);
z->control[z->level] = control;
byte_zero (z->servers[z->level], 64);
for (j = 0; j < QUERY_MAXNS; ++j)
dns_domain_free (&z->ns[z->level][j]);
k = 0;
pos = posauthority;
for (j = 0; j < numauthority; ++j)
{
pos = dns_packet_getname (buf, len, pos, &t1);
if (!pos)
goto DIE;
pos = dns_packet_copy (buf, len, pos, header, 10);
if (!pos)
goto DIE;
uint16_unpack_big (header + 8, &datalen);
if (dns_domain_equal (referral, t1)) /* should always be true */
if (typematch (header, DNS_T_NS)) /* should always be true */
/* should always be true */
if (byte_equal (header + 2, 2, DNS_C_IN))
if (k < QUERY_MAXNS)
if (!dns_packet_getname (buf, len, pos,
&z->ns[z->level][k++]))
goto DIE;
pos += datalen;
}
goto HAVENS;
SERVFAIL:
if (z->level)
goto LOWERLEVEL;
if (!rqa (z))
goto DIE;
response_servfail ();
cleanup (z);
return 1;
DIE:
cleanup (z);
if (records)
{
alloc_free (records);
records = 0;
}
return -1;
}
| 1
|
333,051
|
pim_equal(nfa_pim_T *one, nfa_pim_T *two)
{
int one_unused = (one == NULL || one->result == NFA_PIM_UNUSED);
int two_unused = (two == NULL || two->result == NFA_PIM_UNUSED);
if (one_unused)
// one is unused: equal when two is also unused
return two_unused;
if (two_unused)
// one is used and two is not: not equal
return FALSE;
// compare the state id
if (one->state->id != two->state->id)
return FALSE;
// compare the position
if (REG_MULTI)
return one->end.pos.lnum == two->end.pos.lnum
&& one->end.pos.col == two->end.pos.col;
return one->end.ptr == two->end.ptr;
}
| 0
|
387,623
|
static int snd_ctl_elem_init_enum_names(struct user_element *ue)
{
char *names, *p;
size_t buf_len, name_len;
unsigned int i;
const uintptr_t user_ptrval = ue->info.value.enumerated.names_ptr;
buf_len = ue->info.value.enumerated.names_length;
if (buf_len > 64 * 1024)
return -EINVAL;
if (check_user_elem_overflow(ue->card, buf_len))
return -ENOMEM;
names = vmemdup_user((const void __user *)user_ptrval, buf_len);
if (IS_ERR(names))
return PTR_ERR(names);
/* check that there are enough valid names */
p = names;
for (i = 0; i < ue->info.value.enumerated.items; ++i) {
name_len = strnlen(p, buf_len);
if (name_len == 0 || name_len >= 64 || name_len == buf_len) {
kvfree(names);
return -EINVAL;
}
p += name_len + 1;
buf_len -= name_len + 1;
}
ue->priv_data = names;
ue->info.value.enumerated.names_ptr = 0;
// increment the allocation size; decremented again at private_free.
ue->card->user_ctl_alloc_size += ue->info.value.enumerated.names_length;
return 0;
}
| 0
|
331,780
|
void QPaintEngineEx::setState(QPainterState *s)
{
QPaintEngine::state = s;
}
| 0
|
462,528
|
std::string quote_empty(const std::string& input) {
if (input.empty()) {
return "''";
} else {
return input;
}
}
| 0
|
225,081
|
PQuser(const PGconn *conn)
{
if (!conn)
return NULL;
return conn->pguser;
}
| 0
|
412,128
|
dnsc_shared_secrets_delkeyfunc(void *k, void* ATTR_UNUSED(arg))
{
struct shared_secret_cache_key* ssk = (struct shared_secret_cache_key*)k;
lock_rw_destroy(&ssk->entry.lock);
free(ssk);
}
| 0
|
208,535
|
static RzList *relocs(RzBinFile *bf) {
rz_return_val_if_fail(bf && bf->o, NULL);
QnxObj *qo = bf->o->bin_obj;
return rz_list_clone(qo->fixups);
}
| 1
|
387,572
|
static ssize_t snd_ctl_read(struct file *file, char __user *buffer,
size_t count, loff_t * offset)
{
struct snd_ctl_file *ctl;
int err = 0;
ssize_t result = 0;
ctl = file->private_data;
if (snd_BUG_ON(!ctl || !ctl->card))
return -ENXIO;
if (!ctl->subscribed)
return -EBADFD;
if (count < sizeof(struct snd_ctl_event))
return -EINVAL;
spin_lock_irq(&ctl->read_lock);
while (count >= sizeof(struct snd_ctl_event)) {
struct snd_ctl_event ev;
struct snd_kctl_event *kev;
while (list_empty(&ctl->events)) {
wait_queue_entry_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto __end_lock;
}
init_waitqueue_entry(&wait, current);
add_wait_queue(&ctl->change_sleep, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&ctl->read_lock);
schedule();
remove_wait_queue(&ctl->change_sleep, &wait);
if (ctl->card->shutdown)
return -ENODEV;
if (signal_pending(current))
return -ERESTARTSYS;
spin_lock_irq(&ctl->read_lock);
}
kev = snd_kctl_event(ctl->events.next);
ev.type = SNDRV_CTL_EVENT_ELEM;
ev.data.elem.mask = kev->mask;
ev.data.elem.id = kev->id;
list_del(&kev->list);
spin_unlock_irq(&ctl->read_lock);
kfree(kev);
if (copy_to_user(buffer, &ev, sizeof(struct snd_ctl_event))) {
err = -EFAULT;
goto __end;
}
spin_lock_irq(&ctl->read_lock);
buffer += sizeof(struct snd_ctl_event);
count -= sizeof(struct snd_ctl_event);
result += sizeof(struct snd_ctl_event);
}
__end_lock:
spin_unlock_irq(&ctl->read_lock);
__end:
return result > 0 ? result : err;
}
| 0
|
241,313
|
mrb_init_class(mrb_state *mrb)
{
struct RClass *bob; /* BasicObject */
struct RClass *obj; /* Object */
struct RClass *mod; /* Module */
struct RClass *cls; /* Class */
/* boot class hierarchy */
bob = boot_defclass(mrb, 0);
obj = boot_defclass(mrb, bob); mrb->object_class = obj;
mod = boot_defclass(mrb, obj); mrb->module_class = mod;/* obj -> mod */
cls = boot_defclass(mrb, mod); mrb->class_class = cls; /* obj -> cls */
/* fix-up loose ends */
bob->c = obj->c = mod->c = cls->c = cls;
make_metaclass(mrb, bob);
make_metaclass(mrb, obj);
make_metaclass(mrb, mod);
make_metaclass(mrb, cls);
/* name basic classes */
mrb_define_const_id(mrb, bob, MRB_SYM(BasicObject), mrb_obj_value(bob));
mrb_define_const_id(mrb, obj, MRB_SYM(Object), mrb_obj_value(obj));
mrb_define_const_id(mrb, obj, MRB_SYM(Module), mrb_obj_value(mod));
mrb_define_const_id(mrb, obj, MRB_SYM(Class), mrb_obj_value(cls));
/* name each classes */
mrb_class_name_class(mrb, NULL, bob, MRB_SYM(BasicObject));
mrb_class_name_class(mrb, NULL, obj, MRB_SYM(Object)); /* 15.2.1 */
mrb_class_name_class(mrb, NULL, mod, MRB_SYM(Module)); /* 15.2.2 */
mrb_class_name_class(mrb, NULL, cls, MRB_SYM(Class)); /* 15.2.3 */
mrb->proc_class = mrb_define_class(mrb, "Proc", mrb->object_class); /* 15.2.17 */
MRB_SET_INSTANCE_TT(mrb->proc_class, MRB_TT_PROC);
MRB_SET_INSTANCE_TT(cls, MRB_TT_CLASS);
mrb_define_method(mrb, bob, "initialize", mrb_do_nothing, MRB_ARGS_NONE());
mrb_define_method(mrb, bob, "!", mrb_bob_not, MRB_ARGS_NONE());
mrb_define_method(mrb, bob, "==", mrb_obj_equal_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.1 */
mrb_define_method(mrb, bob, "__id__", mrb_obj_id_m, MRB_ARGS_NONE()); /* 15.3.1.3.4 */
mrb_define_method(mrb, bob, "__send__", mrb_f_send, MRB_ARGS_REQ(1)|MRB_ARGS_REST()|MRB_ARGS_BLOCK()); /* 15.3.1.3.5 */
mrb_define_method(mrb, bob, "equal?", mrb_obj_equal_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.11 */
mrb_define_method(mrb, bob, "instance_eval", mrb_obj_instance_eval, MRB_ARGS_OPT(1)|MRB_ARGS_BLOCK()); /* 15.3.1.3.18 */
mrb_define_method(mrb, bob, "singleton_method_added", mrb_do_nothing, MRB_ARGS_REQ(1));
mrb_define_class_method(mrb, cls, "new", mrb_class_new_class, MRB_ARGS_OPT(1)|MRB_ARGS_BLOCK());
mrb_define_method(mrb, cls, "allocate", mrb_instance_alloc, MRB_ARGS_NONE());
mrb_define_method(mrb, cls, "superclass", mrb_class_superclass, MRB_ARGS_NONE()); /* 15.2.3.3.4 */
mrb_define_method(mrb, cls, "initialize", mrb_class_initialize, MRB_ARGS_OPT(1)); /* 15.2.3.3.1 */
mrb_define_method(mrb, cls, "inherited", mrb_do_nothing, MRB_ARGS_REQ(1));
init_class_new(mrb, cls);
MRB_SET_INSTANCE_TT(mod, MRB_TT_MODULE);
mrb_define_method(mrb, mod, "extend_object", mrb_mod_extend_object, MRB_ARGS_REQ(1)); /* 15.2.2.4.25 */
mrb_define_method(mrb, mod, "extended", mrb_do_nothing, MRB_ARGS_REQ(1)); /* 15.2.2.4.26 */
mrb_define_method(mrb, mod, "prepended", mrb_do_nothing, MRB_ARGS_REQ(1));
mrb_define_method(mrb, mod, "prepend_features", mrb_mod_prepend_features, MRB_ARGS_REQ(1));
mrb_define_method(mrb, mod, "include?", mrb_mod_include_p, MRB_ARGS_REQ(1)); /* 15.2.2.4.28 */
mrb_define_method(mrb, mod, "append_features", mrb_mod_append_features, MRB_ARGS_REQ(1)); /* 15.2.2.4.10 */
mrb_define_method(mrb, mod, "class_eval", mrb_mod_module_eval, MRB_ARGS_ANY()); /* 15.2.2.4.15 */
mrb_define_method(mrb, mod, "included", mrb_do_nothing, MRB_ARGS_REQ(1)); /* 15.2.2.4.29 */
mrb_define_method(mrb, mod, "initialize", mrb_mod_initialize, MRB_ARGS_NONE()); /* 15.2.2.4.31 */
mrb_define_method(mrb, mod, "module_eval", mrb_mod_module_eval, MRB_ARGS_ANY()); /* 15.2.2.4.35 */
mrb_define_method(mrb, mod, "module_function", mrb_mod_module_function, MRB_ARGS_ANY());
mrb_define_method(mrb, mod, "private", mrb_mod_dummy_visibility, MRB_ARGS_ANY()); /* 15.2.2.4.36 */
mrb_define_method(mrb, mod, "protected", mrb_mod_dummy_visibility, MRB_ARGS_ANY()); /* 15.2.2.4.37 */
mrb_define_method(mrb, mod, "public", mrb_mod_dummy_visibility, MRB_ARGS_ANY()); /* 15.2.2.4.38 */
mrb_define_method(mrb, mod, "attr_reader", mrb_mod_attr_reader, MRB_ARGS_ANY()); /* 15.2.2.4.13 */
mrb_define_method(mrb, mod, "attr_writer", mrb_mod_attr_writer, MRB_ARGS_ANY()); /* 15.2.2.4.14 */
mrb_define_method(mrb, mod, "to_s", mrb_mod_to_s, MRB_ARGS_NONE());
mrb_define_method(mrb, mod, "inspect", mrb_mod_to_s, MRB_ARGS_NONE());
mrb_define_method(mrb, mod, "alias_method", mrb_mod_alias, MRB_ARGS_ANY()); /* 15.2.2.4.8 */
mrb_define_method(mrb, mod, "ancestors", mrb_mod_ancestors, MRB_ARGS_NONE()); /* 15.2.2.4.9 */
mrb_define_method(mrb, mod, "undef_method", mrb_mod_undef, MRB_ARGS_ANY()); /* 15.2.2.4.41 */
mrb_define_method(mrb, mod, "const_defined?", mrb_mod_const_defined, MRB_ARGS_ARG(1,1)); /* 15.2.2.4.20 */
mrb_define_method(mrb, mod, "const_get", mrb_mod_const_get, MRB_ARGS_REQ(1)); /* 15.2.2.4.21 */
mrb_define_method(mrb, mod, "const_set", mrb_mod_const_set, MRB_ARGS_REQ(2)); /* 15.2.2.4.23 */
mrb_define_method(mrb, mod, "remove_const", mrb_mod_remove_const, MRB_ARGS_REQ(1)); /* 15.2.2.4.40 */
mrb_define_method(mrb, mod, "const_missing", mrb_mod_const_missing, MRB_ARGS_REQ(1));
mrb_define_method(mrb, mod, "method_defined?", mrb_mod_method_defined, MRB_ARGS_REQ(1)); /* 15.2.2.4.34 */
mrb_define_method(mrb, mod, "define_method", mod_define_method, MRB_ARGS_ARG(1,1));
mrb_define_method(mrb, mod, "===", mrb_mod_eqq, MRB_ARGS_REQ(1)); /* 15.2.2.4.7 */
mrb_define_method(mrb, mod, "dup", mrb_mod_dup, MRB_ARGS_NONE());
mrb_define_method(mrb, bob, "method_added", mrb_do_nothing, MRB_ARGS_REQ(1));
mrb_undef_method(mrb, cls, "append_features");
mrb_undef_method(mrb, cls, "prepend_features");
mrb_undef_method(mrb, cls, "extend_object");
mrb_undef_method(mrb, cls, "module_function");
mrb->top_self = MRB_OBJ_ALLOC(mrb, MRB_TT_OBJECT, mrb->object_class);
mrb_define_singleton_method(mrb, mrb->top_self, "inspect", inspect_main, MRB_ARGS_NONE());
mrb_define_singleton_method(mrb, mrb->top_self, "to_s", inspect_main, MRB_ARGS_NONE());
mrb_define_singleton_method(mrb, mrb->top_self, "define_method", top_define_method, MRB_ARGS_ARG(1,1));
}
| 0
|
506,437
|
static void rpa_user_response(struct rpa_auth_request *request,
unsigned char digest[STATIC_ARRAY MD5_RESULTLEN])
{
struct md5_context ctx;
unsigned char z[48];
memset(z, 0, sizeof(z));
md5_init(&ctx);
md5_update(&ctx, request->pwd_md5, sizeof(request->pwd_md5));
md5_update(&ctx, z, sizeof(z));
md5_update(&ctx, request->username_ucs2be, request->username_len);
md5_update(&ctx, request->service_ucs2be, request->service_len);
md5_update(&ctx, request->realm_ucs2be, request->realm_len);
md5_update(&ctx, request->user_challenge, request->user_challenge_len);
md5_update(&ctx, request->service_challenge, RPA_SCHALLENGE_LEN);
md5_update(&ctx, request->service_timestamp, RPA_TIMESTAMP_LEN);
md5_update(&ctx, request->pwd_md5, sizeof(request->pwd_md5));
md5_final(&ctx, digest);
}
| 0
|
512,847
|
const Type_handler *real_type_handler() const
{
// Should not be called, Item_blob is used for SHOW purposes only.
DBUG_ASSERT(0);
return &type_handler_varchar;
}
| 0
|
489,220
|
int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
{
struct page *page;
struct address_space *mapping;
__be32 *pptr, *curr, *end;
u32 mask, len, pnr;
int i;
/* is there any actual work to be done? */
if (!count)
return 0;
dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count);
/* are all of the bits in range? */
if ((offset + count) > HFSPLUS_SB(sb).total_blocks)
return -2;
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
pnr = offset / PAGE_CACHE_BITS;
page = read_mapping_page(mapping, pnr, NULL);
pptr = kmap(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
end = pptr + PAGE_CACHE_BITS / 32;
len = count;
/* do any partial u32 at the start */
i = offset % 32;
if (i) {
int j = 32 - i;
mask = 0xffffffffU << j;
if (j > count) {
mask |= 0xffffffffU >> (i + count);
*curr++ &= cpu_to_be32(mask);
goto out;
}
*curr++ &= cpu_to_be32(mask);
count -= j;
}
/* do full u32s */
while (1) {
while (curr < end) {
if (count < 32)
goto done;
*curr++ = 0;
count -= 32;
}
if (!count)
break;
set_page_dirty(page);
kunmap(page);
page = read_mapping_page(mapping, ++pnr, NULL);
pptr = kmap(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
}
done:
/* do any partial u32 at end */
if (count) {
mask = 0xffffffffU >> count;
*curr &= cpu_to_be32(mask);
}
out:
set_page_dirty(page);
kunmap(page);
HFSPLUS_SB(sb).free_blocks += len;
sb->s_dirt = 1;
mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
return 0;
}
| 0
|
482,493
|
lou_readCharFromFile(const char *fileName, int *mode) {
/* Read a character from a file, whether big-endian, little-endian or
* ASCII8 */
int ch;
static FileInfo file;
if (fileName == NULL) return 0;
if (*mode == 1) {
*mode = 0;
file.fileName = fileName;
file.encoding = noEncoding;
file.status = 0;
file.lineNumber = 0;
if (!(file.in = fopen(file.fileName, "r"))) {
_lou_logMessage(LOU_LOG_ERROR, "Cannot open file '%s'", file.fileName);
*mode = 1;
return EOF;
}
}
if (file.in == NULL) {
*mode = 1;
return EOF;
}
ch = getAChar(&file);
if (ch == EOF) {
fclose(file.in);
file.in = NULL;
*mode = 1;
}
return ch;
}
| 0
|
447,051
|
std::string XPathIo::writeDataToFile(const std::string& orgPath) {
Protocol prot = fileProtocol(orgPath);
// generating the name for temp file.
std::time_t timestamp = std::time(NULL);
std::stringstream ss;
ss << timestamp << XPathIo::TEMP_FILE_EXT;
std::string path = ss.str();
std::ofstream fs(path.c_str(), std::ios::out | std::ios::binary | std::ios::trunc);
if (prot == pStdin) {
if (isatty(fileno(stdin)))
throw Error(53);
#if defined(_MSC_VER) || defined(__MINGW__)
// convert stdin to binary
if (_setmode(_fileno(stdin), _O_BINARY) == -1)
throw Error(54);
#endif
// read stdin and write to the temp file.
char readBuf[100*1024];
std::streamsize readBufSize = 0;
do {
std::cin.read(readBuf, sizeof(readBuf));
readBufSize = std::cin.gcount();
if (readBufSize > 0) {
fs.write (readBuf, readBufSize);
}
} while(readBufSize);
} else if (prot == pDataUri) {
// read data uri and write to the temp file.
size_t base64Pos = orgPath.find("base64,");
if (base64Pos == std::string::npos)
throw Error(1, "No base64 data");
std::string data = orgPath.substr(base64Pos+7);
char* decodeData = new char[data.length()];
long size = base64decode(data.c_str(), decodeData, data.length());
if (size > 0)
fs.write(decodeData, size);
else
throw Error(1, "Unable to decode base 64.");
delete[] decodeData;
}
fs.close();
return path;
}
| 0
|
500,045
|
static void* kssl_calloc(size_t nmemb, size_t size)
{
void* p;
p=OPENSSL_malloc(nmemb*size);
if (p){
memset(p, 0, nmemb*size);
}
return p;
}
| 0
|
223,426
|
static void do_utfpeakcharback_invalid(compiler_common *common)
{
/* Peak a character back. Does not modify STR_PTR. */
DEFINE_COMPILER;
struct sljit_jump *jump;
struct sljit_jump *exit_invalid[3];
sljit_emit_fast_enter(compiler, RETURN_ADDR, 0);
jump = CMP(SLJIT_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, 0xe000);
OP2(SLJIT_ADD, TMP2, 0, TMP2, 0, SLJIT_IMM, IN_UCHARS(1));
exit_invalid[0] = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0xdc00);
exit_invalid[1] = CMP(SLJIT_GREATER_EQUAL, TMP2, 0, STR_PTR, 0);
OP1(MOV_UCHAR, TMP2, 0, SLJIT_MEM1(STR_PTR), IN_UCHARS(-2));
OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 0x10000 - 0xdc00);
OP2(SLJIT_SUB, TMP2, 0, TMP2, 0, SLJIT_IMM, 0xd800);
exit_invalid[2] = CMP(SLJIT_GREATER_EQUAL, TMP2, 0, SLJIT_IMM, 0x400);
OP2(SLJIT_SHL, TMP2, 0, TMP2, 0, SLJIT_IMM, 10);
OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, TMP2, 0);
JUMPHERE(jump);
OP_SRC(SLJIT_FAST_RETURN, RETURN_ADDR, 0);
JUMPHERE(exit_invalid[0]);
JUMPHERE(exit_invalid[1]);
JUMPHERE(exit_invalid[2]);
OP1(SLJIT_MOV, TMP1, 0, SLJIT_IMM, INVALID_UTF_CHAR);
OP_SRC(SLJIT_FAST_RETURN, RETURN_ADDR, 0);
}
| 0
|
366,318
|
long do_mount(const char *dev_name, const char __user *dir_name,
const char *type_page, unsigned long flags, void *data_page)
{
struct path path;
int ret;
ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path);
if (ret)
return ret;
ret = path_mount(dev_name, &path, type_page, flags, data_page);
path_put(&path);
return ret;
}
| 0
|
195,073
|
void DecodePngV2(OpKernelContext* context, StringPiece input) {
int channel_bits = (data_type_ == DataType::DT_UINT8) ? 8 : 16;
png::DecodeContext decode;
OP_REQUIRES(
context, png::CommonInitDecode(input, channels_, channel_bits, &decode),
errors::InvalidArgument("Invalid PNG. Failed to initialize decoder."));
// Verify that width and height are not too large:
// - verify width and height don't overflow int.
// - width can later be multiplied by channels_ and sizeof(uint16), so
// verify single dimension is not too large.
// - verify when width and height are multiplied together, there are a few
// bits to spare as well.
const int width = static_cast<int>(decode.width);
const int height = static_cast<int>(decode.height);
const int64_t total_size =
static_cast<int64_t>(width) * static_cast<int64_t>(height);
if (width != static_cast<int64_t>(decode.width) || width <= 0 ||
width >= (1LL << 27) || height != static_cast<int64_t>(decode.height) ||
height <= 0 || height >= (1LL << 27) || total_size >= (1LL << 29)) {
png::CommonFreeDecode(&decode);
OP_REQUIRES(context, false,
errors::InvalidArgument("PNG size too large for int: ",
decode.width, " by ", decode.height));
}
Tensor* output = nullptr;
Status status;
// By the existing API, we support decoding PNG with `DecodeGif` op.
// We need to make sure to return 4-D shapes when using `DecodeGif`.
if (op_type_ == "DecodeGif") {
status = context->allocate_output(
0, TensorShape({1, height, width, decode.channels}), &output);
} else {
status = context->allocate_output(
0, TensorShape({height, width, decode.channels}), &output);
}
if (op_type_ == "DecodeBmp") {
// TODO(b/171060723): Only DecodeBmp as op_type_ is not acceptable here
// because currently `decode_(jpeg|png|gif)` ops can decode any one of
// jpeg, png or gif but not bmp. Similarly, `decode_bmp` cannot decode
// anything but bmp formats. This behavior needs to be revisited. For more
// details, please refer to the bug.
OP_REQUIRES(context, false,
errors::InvalidArgument(
"Trying to decode PNG format using DecodeBmp op. Use "
"`decode_png` or `decode_image` instead."));
} else if (op_type_ == "DecodeAndCropJpeg") {
OP_REQUIRES(context, false,
errors::InvalidArgument(
"DecodeAndCropJpeg operation can run on JPEG only, but "
"detected PNG."));
}
if (!status.ok()) png::CommonFreeDecode(&decode);
OP_REQUIRES_OK(context, status);
if (data_type_ == DataType::DT_UINT8) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint8>().data()),
decode.channels * width * sizeof(uint8), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_UINT16) {
OP_REQUIRES(
context,
png::CommonFinishDecode(
reinterpret_cast<png_bytep>(output->flat<uint16>().data()),
decode.channels * width * sizeof(uint16), &decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
} else if (data_type_ == DataType::DT_FLOAT) {
// `png::CommonFinishDecode` does not support `float`. First allocate
// uint16 buffer for the image and decode in uint16 (lossless). Wrap the
// buffer in `unique_ptr` so that we don't forget to delete the buffer.
std::unique_ptr<uint16[]> buffer(
new uint16[height * width * decode.channels]);
OP_REQUIRES(
context,
png::CommonFinishDecode(reinterpret_cast<png_bytep>(buffer.get()),
decode.channels * width * sizeof(uint16),
&decode),
errors::InvalidArgument("Invalid PNG data, size ", input.size()));
// Convert uint16 image data to desired data type.
// Use eigen threadpooling to speed up the copy operation.
const auto& device = context->eigen_device<Eigen::ThreadPoolDevice>();
TTypes<uint16, 3>::UnalignedConstTensor buf(buffer.get(), height, width,
decode.channels);
float scale = 1. / std::numeric_limits<uint16>::max();
// Fill output tensor with desired dtype.
output->tensor<float, 3>().device(device) = buf.cast<float>() * scale;
}
}
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.