problem
stringlengths 26
131k
| labels
class label 2
classes |
|---|---|
How to extract 3 integers from "v%d.%d.%d" string format? : <p>I'm trying to extract 3 numbers out of the semantic version string which has the following format: <code>"v%d.%d.%d"</code></p>
<p>Here's my example code:</p>
<pre><code>std::string myVersion = "v3.49.1";
int versionMajor, versionMinor, versionPatch;
getVersionInfo(myVersion, versionMajor, versionMinor, versionPatch);
std::cout << versionMajor << " " << versionMinor << " " << versionPatch << '\n';
</code></pre>
<p>The expected result:</p>
<pre><code>3 49 1
</code></pre>
<p>How can I design the function <code>getVersionInfo()</code>?</p>
<p>What would be the most elegant solution?</p>
| 0debug
|
static int mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
{
MpegTSWrite *ts = s->priv_data;
uint8_t data[SECTION_LENGTH], *q, *desc_length_ptr, *program_info_length_ptr;
int val, stream_type, i, err = 0;
q = data;
put16(&q, 0xe000 | service->pcr_pid);
program_info_length_ptr = q;
q += 2;
val = 0xf000 | (q - program_info_length_ptr - 2);
program_info_length_ptr[0] = val >> 8;
program_info_length_ptr[1] = val;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MpegTSWriteStream *ts_st = st->priv_data;
AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
if (q - data > SECTION_LENGTH - 32) {
err = 1;
break;
}
switch (st->codec->codec_id) {
case AV_CODEC_ID_MPEG1VIDEO:
case AV_CODEC_ID_MPEG2VIDEO:
stream_type = STREAM_TYPE_VIDEO_MPEG2;
break;
case AV_CODEC_ID_MPEG4:
stream_type = STREAM_TYPE_VIDEO_MPEG4;
break;
case AV_CODEC_ID_H264:
stream_type = STREAM_TYPE_VIDEO_H264;
break;
case AV_CODEC_ID_HEVC:
stream_type = STREAM_TYPE_VIDEO_HEVC;
break;
case AV_CODEC_ID_CAVS:
stream_type = STREAM_TYPE_VIDEO_CAVS;
break;
case AV_CODEC_ID_DIRAC:
stream_type = STREAM_TYPE_VIDEO_DIRAC;
break;
case AV_CODEC_ID_VC1:
stream_type = STREAM_TYPE_VIDEO_VC1;
break;
case AV_CODEC_ID_MP2:
case AV_CODEC_ID_MP3:
stream_type = STREAM_TYPE_AUDIO_MPEG1;
break;
case AV_CODEC_ID_AAC:
stream_type = (ts->flags & MPEGTS_FLAG_AAC_LATM)
? STREAM_TYPE_AUDIO_AAC_LATM
: STREAM_TYPE_AUDIO_AAC;
break;
case AV_CODEC_ID_AAC_LATM:
stream_type = STREAM_TYPE_AUDIO_AAC_LATM;
break;
case AV_CODEC_ID_AC3:
stream_type = STREAM_TYPE_AUDIO_AC3;
break;
case AV_CODEC_ID_DTS:
stream_type = STREAM_TYPE_AUDIO_DTS;
break;
case AV_CODEC_ID_TRUEHD:
stream_type = STREAM_TYPE_AUDIO_TRUEHD;
break;
case AV_CODEC_ID_OPUS:
stream_type = STREAM_TYPE_PRIVATE_DATA;
break;
default:
stream_type = STREAM_TYPE_PRIVATE_DATA;
break;
}
*q++ = stream_type;
put16(&q, 0xe000 | ts_st->pid);
desc_length_ptr = q;
q += 2;
switch (st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
if (st->codec->codec_id==AV_CODEC_ID_EAC3) {
*q++=0x7a;
*q++=1;
*q++=0;
}
if (st->codec->codec_id==AV_CODEC_ID_S302M) {
*q++ = 0x05;
*q++ = 4;
*q++ = 'B';
*q++ = 'S';
*q++ = 'S';
*q++ = 'D';
}
if (st->codec->codec_id==AV_CODEC_ID_OPUS) {
if (q - data > SECTION_LENGTH - 6 - 4) {
err = 1;
break;
}
*q++ = 0x05;
*q++ = 4;
*q++ = 'O';
*q++ = 'p';
*q++ = 'u';
*q++ = 's';
*q++ = 0x7f;
*q++ = 2;
*q++ = 0x80;
if (st->codec->extradata && st->codec->extradata_size >= 19) {
if (st->codec->extradata[18] == 0 && st->codec->channels <= 2) {
*q++ = st->codec->channels;
} else if (st->codec->extradata[18] == 1 && st->codec->channels <= 8 &&
st->codec->extradata_size >= 21 + st->codec->channels) {
static const uint8_t coupled_stream_counts[9] = {
1, 0, 1, 1, 2, 2, 2, 3, 3
};
static const uint8_t channel_map_a[8][8] = {
{0},
{0, 1},
{0, 2, 1},
{0, 1, 2, 3},
{0, 4, 1, 2, 3},
{0, 4, 1, 2, 3, 5},
{0, 4, 1, 2, 3, 5, 6},
{0, 6, 1, 2, 3, 4, 5, 7},
};
static const uint8_t channel_map_b[8][8] = {
{0},
{0, 1},
{0, 1, 2},
{0, 1, 2, 3},
{0, 1, 2, 3, 4},
{0, 1, 2, 3, 4, 5},
{0, 1, 2, 3, 4, 5, 6},
{0, 1, 2, 3, 4, 5, 6, 7},
};
if (st->codec->extradata[19] == st->codec->channels - coupled_stream_counts[st->codec->channels] &&
st->codec->extradata[20] == coupled_stream_counts[st->codec->channels] &&
memcmp(&st->codec->extradata[21], channel_map_a[st->codec->channels], st->codec->channels) == 0) {
*q++ = st->codec->channels;
} else if (st->codec->channels >= 2 && st->codec->extradata[19] == st->codec->channels &&
st->codec->extradata[20] == 0 &&
memcmp(&st->codec->extradata[21], channel_map_b[st->codec->channels], st->codec->channels) == 0) {
*q++ = st->codec->channels | 0x80;
} else {
av_log(s, AV_LOG_ERROR, "Unsupported Opus Vorbis-style channel mapping");
*q++ = 0xff;
}
} else {
av_log(s, AV_LOG_ERROR, "Unsupported Opus channel mapping for family %d", st->codec->extradata[18]);
*q++ = 0xff;
}
} else if (st->codec->channels <= 2) {
*q++ = st->codec->channels;
} else {
av_log(s, AV_LOG_ERROR, "Unsupported Opus channel mapping");
*q++ = 0xff;
}
}
if (lang) {
char *p;
char *next = lang->value;
uint8_t *len_ptr;
*q++ = 0x0a;
len_ptr = q++;
*len_ptr = 0;
for (p = lang->value; next && *len_ptr < 255 / 4 * 4; p = next + 1) {
if (q - data > SECTION_LENGTH - 4) {
err = 1;
break;
}
next = strchr(p, ',');
if (strlen(p) != 3 && (!next || next != p + 3))
continue;
*q++ = *p++;
*q++ = *p++;
*q++ = *p++;
if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
*q++ = 0x01;
else if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
*q++ = 0x02;
else if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
*q++ = 0x03;
else
*q++ = 0;
*len_ptr += 4;
}
if (*len_ptr == 0)
q -= 2;
}
break;
case AVMEDIA_TYPE_SUBTITLE:
{
const char default_language[] = "und";
const char *language = lang && strlen(lang->value) >= 3 ? lang->value : default_language;
if (st->codec->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
uint8_t *len_ptr;
int extradata_copied = 0;
*q++ = 0x59;
len_ptr = q++;
while (strlen(language) >= 3) {
if (sizeof(data) - (q - data) < 8) {
err = 1;
break;
}
*q++ = *language++;
*q++ = *language++;
*q++ = *language++;
if (*language != '\0')
language++;
if (st->codec->extradata_size - extradata_copied >= 5) {
*q++ = st->codec->extradata[extradata_copied + 4];
memcpy(q, st->codec->extradata + extradata_copied, 4);
extradata_copied += 5;
q += 4;
} else {
*q++ = (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED) ? 0x20 : 0x10;
if ((st->codec->extradata_size == 4) && (extradata_copied == 0)) {
memcpy(q, st->codec->extradata, 4);
extradata_copied += 4;
q += 4;
} else {
put16(&q, 1);
put16(&q, 1);
}
}
}
*len_ptr = q - len_ptr - 1;
} else if (st->codec->codec_id == AV_CODEC_ID_DVB_TELETEXT) {
uint8_t *len_ptr = NULL;
int extradata_copied = 0;
*q++ = 0x56;
len_ptr = q++;
while (strlen(language) >= 3 && q - data < sizeof(data) - 6) {
*q++ = *language++;
*q++ = *language++;
*q++ = *language++;
if (*language != '\0')
language++;
if (st->codec->extradata_size - 1 > extradata_copied) {
memcpy(q, st->codec->extradata + extradata_copied, 2);
extradata_copied += 2;
q += 2;
} else {
*q++ = 0x08;
*q++ = 0x00;
}
}
*len_ptr = q - len_ptr - 1;
}
}
break;
case AVMEDIA_TYPE_VIDEO:
if (stream_type == STREAM_TYPE_VIDEO_DIRAC) {
*q++ = 0x05;
*q++ = 4;
*q++ = 'd';
*q++ = 'r';
*q++ = 'a';
*q++ = 'c';
} else if (stream_type == STREAM_TYPE_VIDEO_VC1) {
*q++ = 0x05;
*q++ = 4;
*q++ = 'V';
*q++ = 'C';
*q++ = '-';
*q++ = '1';
}
break;
case AVMEDIA_TYPE_DATA:
if (st->codec->codec_id == AV_CODEC_ID_SMPTE_KLV) {
*q++ = 0x05;
*q++ = 4;
*q++ = 'K';
*q++ = 'L';
*q++ = 'V';
*q++ = 'A';
}
break;
}
val = 0xf000 | (q - desc_length_ptr - 2);
desc_length_ptr[0] = val >> 8;
desc_length_ptr[1] = val;
}
if (err)
av_log(s, AV_LOG_ERROR,
"The PMT section cannot fit stream %d and all following streams.\n"
"Try reducing the number of languages in the audio streams "
"or the total number of streams.\n", i);
mpegts_write_section1(&service->pmt, PMT_TID, service->sid, ts->tables_version, 0, 0,
data, q - data);
return 0;
}
| 1threat
|
static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options, char **export,
Error **errp)
{
SocketAddress *saddr;
if (qdict_haskey(options, "path") == qdict_haskey(options, "host")) {
if (qdict_haskey(options, "path")) {
error_setg(errp, "path and host may not be used at the same time.");
} else {
error_setg(errp, "one of path and host must be specified.");
}
return NULL;
}
saddr = g_new0(SocketAddress, 1);
if (qdict_haskey(options, "path")) {
UnixSocketAddress *q_unix;
saddr->type = SOCKET_ADDRESS_KIND_UNIX;
q_unix = saddr->u.q_unix.data = g_new0(UnixSocketAddress, 1);
q_unix->path = g_strdup(qdict_get_str(options, "path"));
qdict_del(options, "path");
} else {
InetSocketAddress *inet;
saddr->type = SOCKET_ADDRESS_KIND_INET;
inet = saddr->u.inet.data = g_new0(InetSocketAddress, 1);
inet->host = g_strdup(qdict_get_str(options, "host"));
if (!qdict_get_try_str(options, "port")) {
inet->port = g_strdup_printf("%d", NBD_DEFAULT_PORT);
} else {
inet->port = g_strdup(qdict_get_str(options, "port"));
}
qdict_del(options, "host");
qdict_del(options, "port");
}
s->client.is_unix = saddr->type == SOCKET_ADDRESS_KIND_UNIX;
*export = g_strdup(qdict_get_try_str(options, "export"));
if (*export) {
qdict_del(options, "export");
}
return saddr;
}
| 1threat
|
Android device camera crash issue in Android version > 6.0 : <p>In some of the android devices which have android version > 6.0, when i access the camera, device camera gets opened and when i capture and accept the pic, camera is crashing(loads the camera again) and i have camera access permission . what can be done? any idea?</p>
| 0debug
|
Angular: Get data from one page to another : [example][1]
I want to know how to add inputted data from one page to another page like in the picture above with angular.
I have a home page where I have an input field and a button that leads to a second page. On the home page I want to add a name in the input field and on the second page I want that inputted name to appear. How can I do that?
[1]: https://i.stack.imgur.com/Nb8zX.png
| 0debug
|
How to use post steps with Jenkins pipeline on multiple agents? : <p>When using the Jenkins pipeline where each stage runs on a different agent, it is <a href="https://github.com/jenkinsci/pipeline-model-definition-plugin/wiki/Controlling-your-build-environment#3-i-like-to-drive-stick-get-out-of-my-way" rel="noreferrer">good practice</a> to use <code>agent none</code> at the beginning:</p>
<pre><code>pipeline {
agent none
stages {
stage('Checkout') {
agent { label 'master' }
steps { script { currentBuild.result = 'SUCCESS' } }
}
stage('Build') {
agent { label 'someagent' }
steps { bat "exit 1" }
}
}
post {
always {
step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "test@test.com", sendToIndividuals: true])
}
}
}
</code></pre>
<p>But doing this leads to <code>Required context class hudson.FilePath is missing</code> error message when the email should go out:</p>
<pre><code>[Pipeline] { (Declarative: Post Actions)
[Pipeline] step
Required context class hudson.FilePath is missing
Perhaps you forgot to surround the code with a step that provides this, such as: node
[Pipeline] error
[Pipeline] }
</code></pre>
<p>When I change from <code>agent none</code> to <code>agent any</code>, it works fine.</p>
<p>How can I get the <code>post</code> step to work without using <code>agent any</code>? </p>
| 0debug
|
Why is macOS system printing quicker than Chrome or lpr? : <p>I noticed that printing the exact same pdf file to the exact same printer does not always take the same amount of time:</p>
<ul>
<li>Printing from macOS preview's default printing dialog is very fast.</li>
<li>Printing from Chrome browser's built-in print dialog is slower. The printer pauses for a moment after starting the print job.</li>
<li>Printing from the command line with <code>lpr</code> has the same effect as printing from Chrome's built-in print dialog.</li>
<li>However, switching to the system printing dialog withing Chrome makes things fast again.</li>
</ul>
<p>My goal is to make printing from the command line with <code>lpr</code> as smooth as from the system's dialog. What could actually make the difference?</p>
<p><code>lpr -o landscape /path/to/my/file.pdf</code> is my current command. The generated output is as expected, just too slow.</p>
| 0debug
|
UsedRange in vba macro : Instead of UsedRange, how do I copy the exact cell range? Thanks
Sub export_excel_to_word()
Set obj = CreateObject("Word.Application")
obj.Visible = True
Set newObj = obj.Documents.Add
ActiveSheet.UsedRange.Copy
newObj.Range.Paste
Application.CutCopyMode = False
obj.Activate
newObj.SaveAs Filename:=Application.ActiveWorkbook.Path & "\" & ActiveSheet.Name
End Sub
| 0debug
|
Can I dynamically updated the marker location google map by using below code : <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.2/jquery.min.js"></script>
<script src="https://maps.googleapis.com/maps/api/js?key=hgter...&libraries=places" type="text/javascript">
</script> <script>
var map = new google.maps.Map(document.getElementById('map-canvas'), {
center:{
lat: 12.971599,
lng: 77.594563
},
zoom:15
});
var marker = new google.maps.Marker({
position: {
lat: 12.971599,
lng: 77.594563
},
map: map,
draggable: true
});
var searchBox = new
google.maps.places.SearchBox(document.getElementById('searchmap'));
google.maps.event.addListener(searchBox,'places_changed',function(){
var places = searchBox.getPlaces();
var bounds = new google.maps.LatLngBounds();
var i,place;
for(i=0; place=places[i]; i++){
bounds.extend(place.geometry.location);
marker.setPosition(place.geometry.location);
}
map.fitBounds(bounds);
map.setZoom(15);
});
//here Latitude and Longitude value gets updated dynamically as the users drag
// and drop marker
google.maps.event.addListener(marker,'position_changed',function(){
var lat = marker.getPosition().lat();
var lng = marker.getPosition().lng();
$('#lat').val(lat);
$('#lng').val(lng);
});
</script>
| 0debug
|
static void test_pxe_e1000(void)
{
test_pxe_one("-device e1000,netdev=" NETNAME, false);
}
| 1threat
|
def dog_age(h_age):
if h_age < 0:
exit()
elif h_age <= 2:
d_age = h_age * 10.5
else:
d_age = 21 + (h_age - 2)*4
return d_age
| 0debug
|
static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
{
const int mb_x= s->mb_x;
const int mb_y= s->mb_y;
int i;
#if 0
if (s->interlaced_dct) {
dct_linesize = s->linesize * 2;
dct_offset = s->linesize;
} else {
dct_linesize = s->linesize;
dct_offset = s->linesize * 8;
}
#endif
if (s->mb_intra) {
UINT8 *ptr;
int wrap;
wrap = s->linesize;
ptr = s->new_picture[0] + (mb_y * 16 * wrap) + mb_x * 16;
get_pixels(s->block[0], ptr , wrap);
get_pixels(s->block[1], ptr + 8, wrap);
get_pixels(s->block[2], ptr + 8 * wrap , wrap);
get_pixels(s->block[3], ptr + 8 * wrap + 8, wrap);
wrap >>=1;
ptr = s->new_picture[1] + (mb_y * 8 * wrap) + mb_x * 8;
get_pixels(s->block[4], ptr, wrap);
ptr = s->new_picture[2] + (mb_y * 8 * wrap) + mb_x * 8;
get_pixels(s->block[5], ptr, wrap);
}else{
op_pixels_func *op_pix;
qpel_mc_func *op_qpix;
UINT8 *dest_y, *dest_cb, *dest_cr;
UINT8 *ptr;
int wrap;
dest_y = s->current_picture[0] + (mb_y * 16 * s->linesize ) + mb_x * 16;
dest_cb = s->current_picture[1] + (mb_y * 8 * (s->linesize >> 1)) + mb_x * 8;
dest_cr = s->current_picture[2] + (mb_y * 8 * (s->linesize >> 1)) + mb_x * 8;
if ((!s->no_rounding) || s->pict_type==B_TYPE){
op_pix = put_pixels_tab;
op_qpix= qpel_mc_rnd_tab;
}else{
op_pix = put_no_rnd_pixels_tab;
op_qpix= qpel_mc_no_rnd_tab;
}
if (s->mv_dir & MV_DIR_FORWARD) {
MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture, op_pix, op_qpix);
if ((!s->no_rounding) || s->pict_type==B_TYPE)
op_pix = avg_pixels_tab;
else
op_pix = avg_no_rnd_pixels_tab;
}
if (s->mv_dir & MV_DIR_BACKWARD) {
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture, op_pix, op_qpix);
}
wrap = s->linesize;
ptr = s->new_picture[0] + (mb_y * 16 * wrap) + mb_x * 16;
diff_pixels(s->block[0], ptr , dest_y , wrap);
diff_pixels(s->block[1], ptr + 8, dest_y + 8, wrap);
diff_pixels(s->block[2], ptr + 8 * wrap , dest_y + 8 * wrap , wrap);
diff_pixels(s->block[3], ptr + 8 * wrap + 8, dest_y + 8 * wrap + 8, wrap);
wrap >>=1;
ptr = s->new_picture[1] + (mb_y * 8 * wrap) + mb_x * 8;
diff_pixels(s->block[4], ptr, dest_cb, wrap);
ptr = s->new_picture[2] + (mb_y * 8 * wrap) + mb_x * 8;
diff_pixels(s->block[5], ptr, dest_cr, wrap);
}
#if 0
{
float adap_parm;
adap_parm = ((s->avg_mb_var << 1) + s->mb_var[s->mb_width*mb_y+mb_x] + 1.0) /
((s->mb_var[s->mb_width*mb_y+mb_x] << 1) + s->avg_mb_var + 1.0);
printf("\ntype=%c qscale=%2d adap=%0.2f dquant=%4.2f var=%4d avgvar=%4d",
(s->mb_type[s->mb_width*mb_y+mb_x] > 0) ? 'I' : 'P',
s->qscale, adap_parm, s->qscale*adap_parm,
s->mb_var[s->mb_width*mb_y+mb_x], s->avg_mb_var);
}
#endif
if (s->h263_pred && s->msmpeg4_version!=2) {
h263_dc_scale(s);
} else if (s->h263_aic) {
s->y_dc_scale = 2*s->qscale;
s->c_dc_scale = 2*s->qscale;
} else {
s->y_dc_scale = 8;
s->c_dc_scale = 8;
}
if(s->out_format==FMT_MJPEG){
for(i=0;i<6;i++) {
int overflow;
s->block_last_index[i] = dct_quantize(s, s->block[i], i, 8, &overflow);
if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
}
}else{
for(i=0;i<6;i++) {
int overflow;
s->block_last_index[i] = dct_quantize(s, s->block[i], i, s->qscale, &overflow);
if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
}
}
switch(s->out_format) {
case FMT_MPEG1:
mpeg1_encode_mb(s, s->block, motion_x, motion_y);
break;
case FMT_H263:
if (s->h263_msmpeg4)
msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
else if(s->h263_pred)
mpeg4_encode_mb(s, s->block, motion_x, motion_y);
else
h263_encode_mb(s, s->block, motion_x, motion_y);
break;
case FMT_MJPEG:
mjpeg_encode_mb(s, s->block);
break;
}
}
| 1threat
|
How can I create a Docker image for Solaris+Java (With JAVA_HOME)? : <p>I've tried though, but seems docker isn't supported yet. Still if any ways of doing so is available, let me know.</p>
| 0debug
|
If dictionary value exists multiple times in list of dicts, delete specific dict from list : <p>I have a function that returns a list of dictionaries like this:</p>
<pre><code>[{'Status': 'Deleted', 'Name': "My First Test"}, {'Status': 'Modified', 'Name': "My First Test"}]
</code></pre>
<p>As you can see, "My First Test" is in there twice. Normally this wouldn't be an issue, however, based on what I know about what's happening on the back-end, the only dict that I <em>actually</em> want is the "Modified" dict.</p>
<p>Essentially, I'm looking for a way to say "if dict['Status'] == 'Modified' and dict['Status'] == 'Deleted' for the same Name, delete the one with the 'Deleted' status."</p>
| 0debug
|
static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr,
target_ulong msrm, int keep_msrh)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
#if defined(TARGET_PPC64)
if (msr_is_64bit(env, msr)) {
nip = (uint64_t)nip;
msr &= (uint64_t)msrm;
} else {
nip = (uint32_t)nip;
msr = (uint32_t)(msr & msrm);
if (keep_msrh) {
msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
}
}
#else
nip = (uint32_t)nip;
msr &= (uint32_t)msrm;
#endif
env->nip = nip & ~((target_ulong)0x00000003);
hreg_store_msr(env, msr, 1);
#if defined(DEBUG_OP)
cpu_dump_rfi(env->nip, env->msr);
#endif
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
}
| 1threat
|
Navigation in iOS : <p>How to navigate from one ViewController to Another in iOS.As i was previously working on Android It was intent. I am not able to see proper documentation for iOS using swift. So please help me how to navigate from one view controller to another on a button click ! (I know button click func)</p>
| 0debug
|
float64 uint64_to_float64( uint64 a STATUS_PARAM )
{
if ( a == 0 ) return 0;
return normalizeRoundAndPackFloat64( 0, 0x43C, a STATUS_VAR );
}
| 1threat
|
Why do we use concrete clases in java? : <p><strong>why do we use concrete classes in java?</strong>
I've tried a lot to search a proper reason,but all i found is comparison between abstract class and concrete class.
i want to know in what kind of conditions we need to create concrete classes.</p>
| 0debug
|
How to print the 1398th prime number in python? : <p>hey guys i'm newbie please tell me how to make this code work to print the 1398th prime number.
<a href="https://code.sololearn.com/c1a3lvSiIF9w/?ref=app" rel="nofollow noreferrer">https://code.sololearn.com/c1a3lvSiIF9w/?ref=app</a></p>
<pre><code>#newbie
def IsPrime(X):
IsPrime = 1
for i in range(2):
if i < X:
if X % i == 0:
IsPrime = 0
if IsPrime == 0:
print ("X Is Prime")
else:
print ("X Is Not Prime")
else:
i = i + 1
else:
if IsPrime == 0:
print ("X Is Prime")
else:
print ("X Is Not Prime")
#newbie
for i in range(2):
C = 0
if C < 1398:
if IsPrime(i) == 1:
C = C + 1
i = i + 1
else:
i = i + 1
else:
print (i - 1)
#newbie
</code></pre>
| 0debug
|
static void encode_scale_factors(AVCodecContext *avctx, AACEncContext *s,
SingleChannelElement *sce)
{
int off = sce->sf_idx[0], diff;
int i, w;
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
for (i = 0; i < sce->ics.max_sfb; i++) {
if (!sce->zeroes[w*16 + i]) {
diff = sce->sf_idx[w*16 + i] - off + SCALE_DIFF_ZERO;
if (diff < 0 || diff > 120)
av_log(avctx, AV_LOG_ERROR, "Scalefactor difference is too big to be coded\n");
off = sce->sf_idx[w*16 + i];
put_bits(&s->pb, ff_aac_scalefactor_bits[diff], ff_aac_scalefactor_code[diff]);
}
}
}
}
| 1threat
|
static int create_header32(DumpState *s)
{
int ret = 0;
DiskDumpHeader32 *dh = NULL;
KdumpSubHeader32 *kh = NULL;
size_t size;
int endian = s->dump_info.d_endian;
uint32_t block_size;
uint32_t sub_hdr_size;
uint32_t bitmap_blocks;
uint32_t status = 0;
uint64_t offset_note;
size = sizeof(DiskDumpHeader32);
dh = g_malloc0(size);
strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
dh->header_version = cpu_convert_to_target32(6, endian);
block_size = s->page_size;
dh->block_size = cpu_convert_to_target32(block_size, endian);
sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
dh->sub_hdr_size = cpu_convert_to_target32(sub_hdr_size, endian);
dh->max_mapnr = cpu_convert_to_target32(MIN(s->max_mapnr, UINT_MAX),
endian);
dh->nr_cpus = cpu_convert_to_target32(s->nr_cpus, endian);
bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
dh->bitmap_blocks = cpu_convert_to_target32(bitmap_blocks, endian);
strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
status |= DUMP_DH_COMPRESSED_ZLIB;
}
#ifdef CONFIG_LZO
if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
status |= DUMP_DH_COMPRESSED_LZO;
}
#endif
#ifdef CONFIG_SNAPPY
if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
status |= DUMP_DH_COMPRESSED_SNAPPY;
}
#endif
dh->status = cpu_convert_to_target32(status, endian);
if (write_buffer(s->fd, 0, dh, size) < 0) {
dump_error(s, "dump: failed to write disk dump header.\n");
ret = -1;
goto out;
}
size = sizeof(KdumpSubHeader32);
kh = g_malloc0(size);
kh->max_mapnr_64 = cpu_convert_to_target64(s->max_mapnr, endian);
kh->phys_base = cpu_convert_to_target32(PHYS_BASE, endian);
kh->dump_level = cpu_convert_to_target32(DUMP_LEVEL, endian);
offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
kh->offset_note = cpu_convert_to_target64(offset_note, endian);
kh->note_size = cpu_convert_to_target32(s->note_size, endian);
if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
block_size, kh, size) < 0) {
dump_error(s, "dump: failed to write kdump sub header.\n");
ret = -1;
goto out;
}
s->note_buf = g_malloc0(s->note_size);
s->note_buf_offset = 0;
if (write_elf32_notes(buf_write_note, s) < 0) {
ret = -1;
goto out;
}
if (write_buffer(s->fd, offset_note, s->note_buf,
s->note_size) < 0) {
dump_error(s, "dump: failed to write notes");
ret = -1;
goto out;
}
s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
block_size;
s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
block_size;
out:
g_free(dh);
g_free(kh);
g_free(s->note_buf);
return ret;
}
| 1threat
|
struct omap_gpmc_s *omap_gpmc_init(target_phys_addr_t base, qemu_irq irq)
{
struct omap_gpmc_s *s = (struct omap_gpmc_s *)
g_malloc0(sizeof(struct omap_gpmc_s));
memory_region_init_io(&s->iomem, &omap_gpmc_ops, s, "omap-gpmc", 0x1000);
memory_region_add_subregion(get_system_memory(), base, &s->iomem);
omap_gpmc_reset(s);
return s;
}
| 1threat
|
How is cross_val_score calculated in sklearn? : <p>Is it mean square error? The <a href="http://scikit-learn.org/stable/modules/cross_validation.html#computing-cross-validated-metrics" rel="nofollow noreferrer">documentation</a> doesn't give much detail.</p>
| 0debug
|
How to run multiple mix tasks in one command : <p>I have multiple <code>mix</code> tasks to run in succession. With other build tools, it is possible to run the tasks with a single statement, which saves any startup overhead after the first task. How can this be done with Elixir's <code>mix</code> command?</p>
| 0debug
|
PHP: $this is not available in non object context : I have a class with the following structure:
/**
* @property int ticket_id
*/
class Test {
public function __construct( $ticket_id ) {
$this->ticket_id = $ticket_id;
$this->register();
}
/**
* Register all hooks
*/
public function register(): void {
add_action( 'wp_ajax_test', array( $this, 'test' ) );
}
public function test(): void {
require_once('test.php');
}
}
In my `test.php` I've tried using my parameter `$this->ticket_id` but I got an error that `$this` is not available in non object context. Why? I thought I can use it inside any required file too.
| 0debug
|
Can different classes be operated in python? Like datetime added date? : <p>Can different classes be operated in python?
Like datetime added/reduced date: datetime.datetime(2018,07,18) - datetime.date(2018,07,18) ?</p>
| 0debug
|
static int jpeg2000_decode_packet(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile, int *tp_index,
Jpeg2000CodingStyle *codsty,
Jpeg2000ResLevel *rlevel, int precno,
int layno, uint8_t *expn, int numgbits)
{
int bandno, cblkno, ret, nb_code_blocks;
int cwsno;
if (bytestream2_get_bytes_left(&s->g) == 0 && s->bit_index == 8) {
if (*tp_index < FF_ARRAY_ELEMS(tile->tile_part) - 1) {
s->g = tile->tile_part[++(*tp_index)].tpg;
}
}
if (bytestream2_peek_be32(&s->g) == 0xFF910004)
bytestream2_skip(&s->g, 6);
if (!(ret = get_bits(s, 1))) {
jpeg2000_flush(s);
return 0;
} else if (ret < 0)
return ret;
for (bandno = 0; bandno < rlevel->nbands; bandno++) {
Jpeg2000Band *band = rlevel->band + bandno;
Jpeg2000Prec *prec = band->prec + precno;
if (band->coord[0][0] == band->coord[0][1] ||
band->coord[1][0] == band->coord[1][1])
continue;
nb_code_blocks = prec->nb_codeblocks_height *
prec->nb_codeblocks_width;
for (cblkno = 0; cblkno < nb_code_blocks; cblkno++) {
Jpeg2000Cblk *cblk = prec->cblk + cblkno;
int incl, newpasses, llen;
if (cblk->npasses)
incl = get_bits(s, 1);
else
incl = tag_tree_decode(s, prec->cblkincl + cblkno, layno + 1) == layno;
if (!incl)
continue;
else if (incl < 0)
return incl;
if (!cblk->npasses) {
int v = expn[bandno] + numgbits - 1 -
tag_tree_decode(s, prec->zerobits + cblkno, 100);
if (v < 0) {
av_log(s->avctx, AV_LOG_ERROR,
"nonzerobits %d invalid\n", v);
return AVERROR_INVALIDDATA;
}
cblk->nonzerobits = v;
}
if ((newpasses = getnpasses(s)) < 0)
return newpasses;
av_assert2(newpasses > 0);
if (cblk->npasses + newpasses >= JPEG2000_MAX_PASSES) {
avpriv_request_sample(s->avctx, "Too many passes\n");
return AVERROR_PATCHWELCOME;
}
if ((llen = getlblockinc(s)) < 0)
return llen;
if (cblk->lblock + llen + av_log2(newpasses) > 16) {
avpriv_request_sample(s->avctx,
"Block with length beyond 16 bits\n");
return AVERROR_PATCHWELCOME;
}
cblk->lblock += llen;
cblk->nb_lengthinc = 0;
cblk->nb_terminationsinc = 0;
do {
int newpasses1 = 0;
while (newpasses1 < newpasses) {
newpasses1 ++;
if (needs_termination(codsty->cblk_style, cblk->npasses + newpasses1 - 1)) {
cblk->nb_terminationsinc ++;
break;
}
}
if ((ret = get_bits(s, av_log2(newpasses1) + cblk->lblock)) < 0)
return ret;
if (ret > sizeof(cblk->data)) {
avpriv_request_sample(s->avctx,
"Block with lengthinc greater than %"SIZE_SPECIFIER"",
sizeof(cblk->data));
return AVERROR_PATCHWELCOME;
}
cblk->lengthinc[cblk->nb_lengthinc++] = ret;
cblk->npasses += newpasses1;
newpasses -= newpasses1;
} while(newpasses);
}
}
jpeg2000_flush(s);
if (codsty->csty & JPEG2000_CSTY_EPH) {
if (bytestream2_peek_be16(&s->g) == JPEG2000_EPH)
bytestream2_skip(&s->g, 2);
else
av_log(s->avctx, AV_LOG_ERROR, "EPH marker not found.\n");
}
for (bandno = 0; bandno < rlevel->nbands; bandno++) {
Jpeg2000Band *band = rlevel->band + bandno;
Jpeg2000Prec *prec = band->prec + precno;
nb_code_blocks = prec->nb_codeblocks_height * prec->nb_codeblocks_width;
for (cblkno = 0; cblkno < nb_code_blocks; cblkno++) {
Jpeg2000Cblk *cblk = prec->cblk + cblkno;
for (cwsno = 0; cwsno < cblk->nb_lengthinc; cwsno ++) {
if ( bytestream2_get_bytes_left(&s->g) < cblk->lengthinc[cwsno]
|| sizeof(cblk->data) < cblk->length + cblk->lengthinc[cwsno] + 4
) {
av_log(s->avctx, AV_LOG_ERROR,
"Block length %"PRIu16" or lengthinc %d is too large, left %d\n",
cblk->length, cblk->lengthinc[cwsno], bytestream2_get_bytes_left(&s->g));
return AVERROR_INVALIDDATA;
}
bytestream2_get_bufferu(&s->g, cblk->data + cblk->length, cblk->lengthinc[cwsno]);
cblk->length += cblk->lengthinc[cwsno];
cblk->lengthinc[cwsno] = 0;
if (cblk->nb_terminationsinc) {
cblk->nb_terminationsinc--;
cblk->nb_terminations++;
cblk->data[cblk->length++] = 0xFF;
cblk->data[cblk->length++] = 0xFF;
cblk->data_start[cblk->nb_terminations] = cblk->length;
}
}
}
}
return 0;
}
| 1threat
|
Typescript compile error: error TS1109: Expression expected : <p>I have this very simple typescript file:</p>
<pre><code>export default const VERSION: number = 2016030600;
</code></pre>
<p>Then I run:</p>
<pre><code>tsc version.ts
</code></pre>
<p>typescript compiler (Version 1.8.7, OS X) complains:</p>
<pre><code>version.ts(1,16): error TS1109: Expression expected.
</code></pre>
<p>How can I fix this error? Thank you.</p>
| 0debug
|
.NET Core console application, how to configure appSettings per environment? : <p>I have a .NET Core 1.0.0 console application and two environments. I need to be able to use <code>appSettings.dev.json</code> and <code>appSettings.test.json</code> based on environment variables I set at run time. This seems to be quite straight forward for ASP.NET Core web applications, via dependency injection and IHostingEnvironment and the EnvironmentName env. variable, however how should I wire things up for the console application (besides writing my own custom code that uses <code>Microsoft.Framework.Configuration.EnvironmentVariables</code>)? </p>
<p>Thank you. </p>
| 0debug
|
How to create a map like the one that's there on ikman.lk : <p>I want to know how to create a map like the one that is there on <a href="http://ikman.lk/" rel="nofollow">ikman.lk</a></p>
| 0debug
|
static int vnc_client_io_error(VncState *vs, int ret, int last_errno)
{
if (ret == 0 || ret == -1) {
if (ret == -1) {
switch (last_errno) {
case EINTR:
case EAGAIN:
#ifdef _WIN32
case WSAEWOULDBLOCK:
#endif
return 0;
default:
break;
}
}
VNC_DEBUG("Closing down client sock %d %d\n", ret, ret < 0 ? last_errno : 0);
qemu_set_fd_handler2(vs->csock, NULL, NULL, NULL, NULL);
closesocket(vs->csock);
qemu_del_timer(vs->timer);
qemu_free_timer(vs->timer);
if (vs->input.buffer) qemu_free(vs->input.buffer);
if (vs->output.buffer) qemu_free(vs->output.buffer);
#ifdef CONFIG_VNC_TLS
if (vs->tls_session) {
gnutls_deinit(vs->tls_session);
vs->tls_session = NULL;
}
#endif
audio_del(vs);
VncState *p, *parent = NULL;
for (p = vs->vd->clients; p != NULL; p = p->next) {
if (p == vs) {
if (parent)
parent->next = p->next;
else
vs->vd->clients = p->next;
break;
}
parent = p;
}
if (!vs->vd->clients)
dcl->idle = 1;
qemu_free(vs->old_data);
qemu_free(vs);
return 0;
}
return ret;
}
| 1threat
|
int blk_get_max_transfer_length(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
if (bs) {
return bs->bl.max_transfer_length;
} else {
return 0;
}
}
| 1threat
|
document.location = 'http://evil.com?username=' + user_input;
| 1threat
|
static void nbd_restart_write(void *opaque)
{
BlockDriverState *bs = opaque;
qemu_coroutine_enter(nbd_get_client_session(bs)->send_coroutine, NULL);
}
| 1threat
|
NSURLConnection finished with error - code -1022 : <p>Guys
i try to learn webview it can't load give me error like: </p>
<blockquote>
<p>NSURLConnection finished with error - code -1022</p>
</blockquote>
<pre><code>- (void)viewDidLoad {
[super viewDidLoad];
NSString *urlString = @"http://www.sourcefreeze.com";
NSURL *url = [NSURL URLWithString:urlString];
NSURLRequest *request = [NSURLRequest requestWithURL:url];
[_webView loadRequest:request];
}
</code></pre>
| 0debug
|
merge multiple rows having some identity to a new one that have the sum of a culumn thats is distinct between thems : i have table like this :
id name qt
0 mm 4
1 mm 5
2 xx 8
i want update it or get new table that will have
id name qt
0 mm 9 (sum of the two or multiple some identical )
1 xx 8
| 0debug
|
are updated health checks causing App Engine deployment to fail? : <p>we updated our google app engine health checks from the legacy version to the new version using and now our deployments are failing. Nothing else on the project has changed. We tested the default settings and then extended checks just in case. </p>
<p>This is the error:
<code>ERROR: (gcloud.app.deploy) Error Response: [4] Your deployment has failed to become healthy in the allotted time and therefore was rolled back. If you believe this was an error, try adjusting the 'app_start_timeout_sec' setting in the 'readiness_check' section.
</code></p>
<p>This is our app.yaml:</p>
<pre><code>liveness_check:
check_interval_sec: 120
timeout_sec: 40
failure_threshold: 5
success_threshold: 5
initial_delay_sec: 500
readiness_check:
check_interval_sec: 120
timeout_sec: 40
failure_threshold: 5
success_threshold: 5
app_start_timeout_sec: 1500
</code></pre>
<p>Unfortunately, no matter the configuration, both the readiness and liveness checks are throwing 404s.</p>
<p>What could be causing the problem? and how can we debug this?
Is it possible to rollback to the legacy health checks?</p>
| 0debug
|
static int pci_qdev_init(DeviceState *qdev)
{
PCIDevice *pci_dev = (PCIDevice *)qdev;
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
PCIBus *bus;
int rc;
bool is_default_rom;
if (pc->is_express) {
pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
}
bus = PCI_BUS(qdev_get_parent_bus(qdev));
pci_dev = do_pci_register_device(pci_dev, bus,
object_get_typename(OBJECT(qdev)),
pci_dev->devfn);
if (pci_dev == NULL)
return -1;
if (pc->init) {
rc = pc->init(pci_dev);
if (rc != 0) {
do_pci_unregister_device(pci_dev);
return rc;
}
}
is_default_rom = false;
if (pci_dev->romfile == NULL && pc->romfile != NULL) {
pci_dev->romfile = g_strdup(pc->romfile);
is_default_rom = true;
}
pci_add_option_rom(pci_dev, is_default_rom);
return 0;
}
| 1threat
|
static void body(uint32_t ABCD[4], uint32_t X[16])
{
int i av_unused;
uint32_t t;
uint32_t a = ABCD[3];
uint32_t b = ABCD[2];
uint32_t c = ABCD[1];
uint32_t d = ABCD[0];
#if HAVE_BIGENDIAN
for (i = 0; i < 16; i++)
X[i] = av_bswap32(X[i]);
#endif
#if CONFIG_SMALL
for (i = 0; i < 64; i++) {
CORE(i, a, b, c, d);
t = d;
d = c;
c = b;
b = a;
a = t;
}
#else
#define CORE2(i) \
CORE( i, a,b,c,d); CORE((i+1),d,a,b,c); \
CORE((i+2),c,d,a,b); CORE((i+3),b,c,d,a)
#define CORE4(i) CORE2(i); CORE2((i+4)); CORE2((i+8)); CORE2((i+12))
CORE4(0); CORE4(16); CORE4(32); CORE4(48);
#endif
ABCD[0] += d;
ABCD[1] += c;
ABCD[2] += b;
ABCD[3] += a;
}
| 1threat
|
Access already opened Excel and Internet Explorer to get data : <p>there
I'm trying to make a Robotic Process Automation(called RPA) using python.
There are two windows; one is excel and the other is web.</p>
<p>The Procedure is:</p>
<ol>
<li>Both excel and web must be opened before run the code(It never be changed, there are no alternatives)</li>
<li>the data on the web is copied and pasted to excel file.</li>
<li>done!</li>
</ol>
<p>It looks easy... but, selenium cannot access to already opened web and cannot access to already opened excel file.</p>
<p>of course, it could be easy to access to new web and new or load excel.</p>
<p>It doesn't matter selenium and are not used. </p>
<p>Does somebody know how to solve it??</p>
<p>Thank you.</p>
| 0debug
|
gdb_handlesig (CPUState *env, int sig)
{
GDBState *s;
char buf[256];
int n;
s = gdbserver_state;
if (gdbserver_fd < 0 || s->fd < 0)
return sig;
cpu_single_step(env, 0);
tb_flush(env);
if (sig != 0)
{
snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb (sig));
put_packet(s, buf);
}
if (s->fd < 0)
return sig;
sig = 0;
s->state = RS_IDLE;
s->running_state = 0;
while (s->running_state == 0) {
n = read (s->fd, buf, 256);
if (n > 0)
{
int i;
for (i = 0; i < n; i++)
gdb_read_byte (s, buf[i]);
}
else if (n == 0 || errno != EAGAIN)
{
return sig;
}
}
sig = s->signal;
s->signal = 0;
return sig;
}
| 1threat
|
Firebase: getting value of a child without knowing it's key : How can I get the value of a child element without knowing the key of that child element? Example data structure below;
->cars
-->3282jasjd893j: doors
-->819idid82jkdf: windows
-->fjf842jr8448r: audi
I need to get the values "doors", "windows", "audi", without knowing the keys.
Any advice will be greatly appreciated!
| 0debug
|
static int update_offset(RTMPContext *rt, int size)
{
int old_flv_size;
if (rt->flv_off < rt->flv_size) {
old_flv_size = rt->flv_size;
rt->flv_size += size + 15;
} else {
old_flv_size = 0;
rt->flv_size = size + 15;
rt->flv_off = 0;
}
return old_flv_size;
}
| 1threat
|
C | where's wrong on my program? : [Code link](https://ideone.com/p8QLiB)
Where's wrong?
Why the result r same?
If added a `fflush` in `while` or out of `while`, the result also same.
Plz help me, tell what reason.
Thanks!
| 0debug
|
static void ecc_reset(void *opaque)
{
ECCState *s = opaque;
int i;
s->regs[ECC_MER] &= (ECC_MER_VER | ECC_MER_IMPL);
s->regs[ECC_MER] |= ECC_MER_MRR;
s->regs[ECC_MDR] = 0x20;
s->regs[ECC_MFSR] = 0;
s->regs[ECC_VCR] = 0;
s->regs[ECC_MFAR0] = 0x07c00000;
s->regs[ECC_MFAR1] = 0;
s->regs[ECC_DR] = 0;
s->regs[ECC_ECR0] = 0;
s->regs[ECC_ECR1] = 0;
for (i = 1; i < ECC_NREGS; i++)
s->regs[i] = 0;
}
| 1threat
|
How to set the default of a JSONField to empty list in Django and django-jsonfield? : <h1>Question</h1>
<p>What is the best way to set a <a href="https://github.com/bradjasper/django-jsonfield" rel="noreferrer"><code>JSONField</code></a> to have the default value of a new list in <code>django</code>?</p>
<h2>Context</h2>
<p>There is a model where one of the fields is a list of items. In the case where there are no items set, the model should have an empty list.</p>
<h2>Current solution</h2>
<pre><code>from django.models import Model
class MyModel(Model):
the_list_field = JSONField(default=[])
</code></pre>
<p>Is this the best way to do it? Should it be switched to use <code>list</code> instead?</p>
<p>Thanks!</p>
| 0debug
|
static int mpeg4_decode_sprite_trajectory(Mpeg4DecContext *ctx, GetBitContext *gb)
{
MpegEncContext *s = &ctx->m;
int a = 2 << s->sprite_warping_accuracy;
int rho = 3 - s->sprite_warping_accuracy;
int r = 16 / a;
int alpha = 0;
int beta = 0;
int w = s->width;
int h = s->height;
int min_ab, i, w2, h2, w3, h3;
int sprite_ref[4][2];
int virtual_ref[2][2];
const int vop_ref[4][2] = { { 0, 0 }, { s->width, 0 },
{ 0, s->height }, { s->width, s->height } };
int d[4][2] = { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } };
if (w <= 0 || h <= 0)
return AVERROR_INVALIDDATA;
for (i = 0; i < ctx->num_sprite_warping_points; i++) {
int length;
int x = 0, y = 0;
length = get_vlc2(gb, sprite_trajectory.table, SPRITE_TRAJ_VLC_BITS, 3);
if (length)
x = get_xbits(gb, length);
if (!(ctx->divx_version == 500 && ctx->divx_build == 413))
skip_bits1(gb);
length = get_vlc2(gb, sprite_trajectory.table, SPRITE_TRAJ_VLC_BITS, 3);
if (length)
y = get_xbits(gb, length);
skip_bits1(gb);
ctx->sprite_traj[i][0] = d[i][0] = x;
ctx->sprite_traj[i][1] = d[i][1] = y;
}
for (; i < 4; i++)
ctx->sprite_traj[i][0] = ctx->sprite_traj[i][1] = 0;
while ((1 << alpha) < w)
alpha++;
while ((1 << beta) < h)
beta++;
w2 = 1 << alpha;
h2 = 1 << beta;
if (ctx->divx_version == 500 && ctx->divx_build == 413) {
sprite_ref[0][0] = a * vop_ref[0][0] + d[0][0];
sprite_ref[0][1] = a * vop_ref[0][1] + d[0][1];
sprite_ref[1][0] = a * vop_ref[1][0] + d[0][0] + d[1][0];
sprite_ref[1][1] = a * vop_ref[1][1] + d[0][1] + d[1][1];
sprite_ref[2][0] = a * vop_ref[2][0] + d[0][0] + d[2][0];
sprite_ref[2][1] = a * vop_ref[2][1] + d[0][1] + d[2][1];
} else {
sprite_ref[0][0] = (a >> 1) * (2 * vop_ref[0][0] + d[0][0]);
sprite_ref[0][1] = (a >> 1) * (2 * vop_ref[0][1] + d[0][1]);
sprite_ref[1][0] = (a >> 1) * (2 * vop_ref[1][0] + d[0][0] + d[1][0]);
sprite_ref[1][1] = (a >> 1) * (2 * vop_ref[1][1] + d[0][1] + d[1][1]);
sprite_ref[2][0] = (a >> 1) * (2 * vop_ref[2][0] + d[0][0] + d[2][0]);
sprite_ref[2][1] = (a >> 1) * (2 * vop_ref[2][1] + d[0][1] + d[2][1]);
}
virtual_ref[0][0] = 16 * (vop_ref[0][0] + w2) +
ROUNDED_DIV(((w - w2) *
(r * sprite_ref[0][0] - 16 * vop_ref[0][0]) +
w2 * (r * sprite_ref[1][0] - 16 * vop_ref[1][0])), w);
virtual_ref[0][1] = 16 * vop_ref[0][1] +
ROUNDED_DIV(((w - w2) *
(r * sprite_ref[0][1] - 16 * vop_ref[0][1]) +
w2 * (r * sprite_ref[1][1] - 16 * vop_ref[1][1])), w);
virtual_ref[1][0] = 16 * vop_ref[0][0] +
ROUNDED_DIV(((h - h2) * (r * sprite_ref[0][0] - 16 * vop_ref[0][0]) +
h2 * (r * sprite_ref[2][0] - 16 * vop_ref[2][0])), h);
virtual_ref[1][1] = 16 * (vop_ref[0][1] + h2) +
ROUNDED_DIV(((h - h2) * (r * sprite_ref[0][1] - 16 * vop_ref[0][1]) +
h2 * (r * sprite_ref[2][1] - 16 * vop_ref[2][1])), h);
switch (ctx->num_sprite_warping_points) {
case 0:
s->sprite_offset[0][0] =
s->sprite_offset[0][1] =
s->sprite_offset[1][0] =
s->sprite_offset[1][1] = 0;
s->sprite_delta[0][0] = a;
s->sprite_delta[0][1] =
s->sprite_delta[1][0] = 0;
s->sprite_delta[1][1] = a;
ctx->sprite_shift[0] =
ctx->sprite_shift[1] = 0;
break;
case 1:
s->sprite_offset[0][0] = sprite_ref[0][0] - a * vop_ref[0][0];
s->sprite_offset[0][1] = sprite_ref[0][1] - a * vop_ref[0][1];
s->sprite_offset[1][0] = ((sprite_ref[0][0] >> 1) | (sprite_ref[0][0] & 1)) -
a * (vop_ref[0][0] / 2);
s->sprite_offset[1][1] = ((sprite_ref[0][1] >> 1) | (sprite_ref[0][1] & 1)) -
a * (vop_ref[0][1] / 2);
s->sprite_delta[0][0] = a;
s->sprite_delta[0][1] =
s->sprite_delta[1][0] = 0;
s->sprite_delta[1][1] = a;
ctx->sprite_shift[0] =
ctx->sprite_shift[1] = 0;
break;
case 2:
s->sprite_offset[0][0] = (sprite_ref[0][0] << (alpha + rho)) +
(-r * sprite_ref[0][0] + virtual_ref[0][0]) *
(-vop_ref[0][0]) +
(r * sprite_ref[0][1] - virtual_ref[0][1]) *
(-vop_ref[0][1]) + (1 << (alpha + rho - 1));
s->sprite_offset[0][1] = (sprite_ref[0][1] << (alpha + rho)) +
(-r * sprite_ref[0][1] + virtual_ref[0][1]) *
(-vop_ref[0][0]) +
(-r * sprite_ref[0][0] + virtual_ref[0][0]) *
(-vop_ref[0][1]) + (1 << (alpha + rho - 1));
s->sprite_offset[1][0] = ((-r * sprite_ref[0][0] + virtual_ref[0][0]) *
(-2 * vop_ref[0][0] + 1) +
(r * sprite_ref[0][1] - virtual_ref[0][1]) *
(-2 * vop_ref[0][1] + 1) + 2 * w2 * r *
sprite_ref[0][0] - 16 * w2 + (1 << (alpha + rho + 1)));
s->sprite_offset[1][1] = ((-r * sprite_ref[0][1] + virtual_ref[0][1]) *
(-2 * vop_ref[0][0] + 1) +
(-r * sprite_ref[0][0] + virtual_ref[0][0]) *
(-2 * vop_ref[0][1] + 1) + 2 * w2 * r *
sprite_ref[0][1] - 16 * w2 + (1 << (alpha + rho + 1)));
s->sprite_delta[0][0] = (-r * sprite_ref[0][0] + virtual_ref[0][0]);
s->sprite_delta[0][1] = (+r * sprite_ref[0][1] - virtual_ref[0][1]);
s->sprite_delta[1][0] = (-r * sprite_ref[0][1] + virtual_ref[0][1]);
s->sprite_delta[1][1] = (-r * sprite_ref[0][0] + virtual_ref[0][0]);
ctx->sprite_shift[0] = alpha + rho;
ctx->sprite_shift[1] = alpha + rho + 2;
break;
case 3:
min_ab = FFMIN(alpha, beta);
w3 = w2 >> min_ab;
h3 = h2 >> min_ab;
s->sprite_offset[0][0] = (sprite_ref[0][0] << (alpha + beta + rho - min_ab)) +
(-r * sprite_ref[0][0] + virtual_ref[0][0]) *
h3 * (-vop_ref[0][0]) +
(-r * sprite_ref[0][0] + virtual_ref[1][0]) *
w3 * (-vop_ref[0][1]) +
(1 << (alpha + beta + rho - min_ab - 1));
s->sprite_offset[0][1] = (sprite_ref[0][1] << (alpha + beta + rho - min_ab)) +
(-r * sprite_ref[0][1] + virtual_ref[0][1]) *
h3 * (-vop_ref[0][0]) +
(-r * sprite_ref[0][1] + virtual_ref[1][1]) *
w3 * (-vop_ref[0][1]) +
(1 << (alpha + beta + rho - min_ab - 1));
s->sprite_offset[1][0] = (-r * sprite_ref[0][0] + virtual_ref[0][0]) *
h3 * (-2 * vop_ref[0][0] + 1) +
(-r * sprite_ref[0][0] + virtual_ref[1][0]) *
w3 * (-2 * vop_ref[0][1] + 1) + 2 * w2 * h3 *
r * sprite_ref[0][0] - 16 * w2 * h3 +
(1 << (alpha + beta + rho - min_ab + 1));
s->sprite_offset[1][1] = (-r * sprite_ref[0][1] + virtual_ref[0][1]) *
h3 * (-2 * vop_ref[0][0] + 1) +
(-r * sprite_ref[0][1] + virtual_ref[1][1]) *
w3 * (-2 * vop_ref[0][1] + 1) + 2 * w2 * h3 *
r * sprite_ref[0][1] - 16 * w2 * h3 +
(1 << (alpha + beta + rho - min_ab + 1));
s->sprite_delta[0][0] = (-r * sprite_ref[0][0] + virtual_ref[0][0]) * h3;
s->sprite_delta[0][1] = (-r * sprite_ref[0][0] + virtual_ref[1][0]) * w3;
s->sprite_delta[1][0] = (-r * sprite_ref[0][1] + virtual_ref[0][1]) * h3;
s->sprite_delta[1][1] = (-r * sprite_ref[0][1] + virtual_ref[1][1]) * w3;
ctx->sprite_shift[0] = alpha + beta + rho - min_ab;
ctx->sprite_shift[1] = alpha + beta + rho - min_ab + 2;
break;
}
if (s->sprite_delta[0][0] == a << ctx->sprite_shift[0] &&
s->sprite_delta[0][1] == 0 &&
s->sprite_delta[1][0] == 0 &&
s->sprite_delta[1][1] == a << ctx->sprite_shift[0]) {
s->sprite_offset[0][0] >>= ctx->sprite_shift[0];
s->sprite_offset[0][1] >>= ctx->sprite_shift[0];
s->sprite_offset[1][0] >>= ctx->sprite_shift[1];
s->sprite_offset[1][1] >>= ctx->sprite_shift[1];
s->sprite_delta[0][0] = a;
s->sprite_delta[0][1] = 0;
s->sprite_delta[1][0] = 0;
s->sprite_delta[1][1] = a;
ctx->sprite_shift[0] = 0;
ctx->sprite_shift[1] = 0;
s->real_sprite_warping_points = 1;
} else {
int shift_y = 16 - ctx->sprite_shift[0];
int shift_c = 16 - ctx->sprite_shift[1];
for (i = 0; i < 2; i++) {
s->sprite_offset[0][i] <<= shift_y;
s->sprite_offset[1][i] <<= shift_c;
s->sprite_delta[0][i] <<= shift_y;
s->sprite_delta[1][i] <<= shift_y;
ctx->sprite_shift[i] = 16;
}
s->real_sprite_warping_points = ctx->num_sprite_warping_points;
}
return 0;
}
| 1threat
|
Python - Get output of all prints : <p>I wrote a python script that has a lot of data prints to the user.
I now need to save all of the print statements to a single txt/csv file.</p>
<p>I wonder what is the fastest and most efficient way to achieve this.
I could do this for each row at the time but prefer to this once for the entire script.</p>
| 0debug
|
static int usb_msd_initfn_storage(USBDevice *dev)
{
MSDState *s = DO_UPCAST(MSDState, dev, dev);
BlockDriverState *bs = s->conf.bs;
SCSIDevice *scsi_dev;
Error *err = NULL;
if (!bs) {
error_report("drive property not set");
return -1;
}
blkconf_serial(&s->conf, &dev->serial);
bdrv_detach_dev(bs, &s->dev.qdev);
s->conf.bs = NULL;
usb_desc_create_serial(dev);
usb_desc_init(dev);
scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(dev),
&usb_msd_scsi_info_storage, NULL);
scsi_dev = scsi_bus_legacy_add_drive(&s->bus, bs, 0, !!s->removable,
s->conf.bootindex, dev->serial,
&err);
if (!scsi_dev) {
return -1;
}
s->bus.qbus.allow_hotplug = 0;
usb_msd_handle_reset(dev);
if (bdrv_key_required(bs)) {
if (cur_mon) {
monitor_read_bdrv_key_start(cur_mon, bs, usb_msd_password_cb, s);
s->dev.auto_attach = 0;
} else {
autostart = 0;
}
}
return 0;
}
| 1threat
|
google fonts + webpack : <p>I am new to webpack 2.2 ; I would like to know the best way to integrate a Google font within my project.</p>
<p>I am using the Webpack HTML plugin to generate an <code>index.html</code> from a template. So for the moment I hard-coded the Google font CSS directly in a <code><script></code> tag but I do not really like this 'solution' since it does not really use webpack at all:</p>
<pre><code><!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<link href="https://fonts.googleapis.com/css?family=Love+Ya+Like+A+Sister" rel="stylesheet">
<body>
<div id='app'/>
</body>
</html>
</code></pre>
| 0debug
|
AWS API Gateway error: API Gateway does not have permission to assume the provided role as S3 proxy : <p>There are similar questions but they have answers that I have tried.
I'm not sure what I could be doing wrong but any help would appreciated.</p>
<p>Test details:
<a href="https://i.stack.imgur.com/MtkuV.png" rel="noreferrer"><img src="https://i.stack.imgur.com/MtkuV.png" alt="enter image description here"></a></p>
<p>The Error from a method-execution test; PUT request:</p>
<pre><code>Execution log for request test-request
Mon Oct 16 10:13:47 UTC 2017 : Starting execution for request: test-invoke-request
Mon Oct 16 10:13:47 UTC 2017 : HTTP Method: PUT, Resource Path: /pop-data-xmlz/test.xml
Mon Oct 16 10:13:47 UTC 2017 : Method request path: {item=test.xml, folder=pop-data-xmlz}
Mon Oct 16 10:13:47 UTC 2017 : Method request query string: {}
Mon Oct 16 10:13:47 UTC 2017 : Method request headers: {Content-Type=application/xml}
Mon Oct 16 10:13:47 UTC 2017 : Method request body before transformations: <test>
test string
</test>
Mon Oct 16 10:13:47 UTC 2017 : Request validation succeeded for content type application/json
Mon Oct 16 10:13:47 UTC 2017 : Execution failed due to configuration error: API Gateway does not have permission to assume the provided role
Mon Oct 16 10:13:47 UTC 2017 : Method completed with status: 500
</code></pre>
<p>I am following the API Gateway To S3 tutorial (<a href="http://docs.aws.amazon.com/apigateway/latest/developerguide/integrating-api-with-aws-services-s3.html" rel="noreferrer">http://docs.aws.amazon.com/apigateway/latest/developerguide/integrating-api-with-aws-services-s3.html</a>) and attempting to execute a PUT request.</p>
<p>The API Gateway is in us-east-1 and the S3 bucket in us-east-2.</p>
<p>The created Role: <strong>APIGatewayProxyCustom</strong></p>
<p>A policy (pop-date-ingest) is attached that allows PUT request to S3 buckets.
<a href="https://i.stack.imgur.com/Rygj7.png" rel="noreferrer"><img src="https://i.stack.imgur.com/Rygj7.png" alt="enter image description here"></a></p>
<p>The Role has a trust relationship set:
<a href="https://i.stack.imgur.com/7lZST.png" rel="noreferrer"><img src="https://i.stack.imgur.com/7lZST.png" alt="enter image description here"></a></p>
| 0debug
|
Insert Image in the password protected PDF file Objective C : <p>The insertion of images is easy in the PDF files in iOS. But is there any way to insert image in password protected PDF.</p>
| 0debug
|
static inline void range_dec_normalize(APEContext *ctx)
{
while (ctx->rc.range <= BOTTOM_VALUE) {
ctx->rc.buffer <<= 8;
if(ctx->ptr < ctx->data_end)
ctx->rc.buffer += *ctx->ptr;
ctx->ptr++;
ctx->rc.low = (ctx->rc.low << 8) | ((ctx->rc.buffer >> 1) & 0xFF);
ctx->rc.range <<= 8;
}
}
| 1threat
|
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
TranslationBlock **last_tb, int *tb_exit,
SyncClocks *sc)
{
uintptr_t ret;
if (unlikely(atomic_read(&cpu->exit_request))) {
return;
}
trace_exec_tb(tb, tb->pc);
ret = cpu_tb_exec(cpu, tb);
*last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
*tb_exit = ret & TB_EXIT_MASK;
switch (*tb_exit) {
case TB_EXIT_REQUESTED:
smp_rmb();
*last_tb = NULL;
break;
case TB_EXIT_ICOUNT_EXPIRED:
{
#ifdef CONFIG_USER_ONLY
abort();
#else
int insns_left = cpu->icount_decr.u32;
if (cpu->icount_extra && insns_left >= 0) {
cpu->icount_extra += insns_left;
insns_left = MIN(0xffff, cpu->icount_extra);
cpu->icount_extra -= insns_left;
cpu->icount_decr.u16.low = insns_left;
} else {
if (insns_left > 0) {
cpu_exec_nocache(cpu, insns_left, *last_tb, false);
align_clocks(sc, cpu);
}
cpu->exception_index = EXCP_INTERRUPT;
*last_tb = NULL;
cpu_loop_exit(cpu);
}
break;
#endif
}
default:
break;
}
}
| 1threat
|
Making a Snackbar Without a View? : <p>I want to show a snackbar as soon as the user opens the Google Maps activity, but the thing is that there's no views in the activity to use as the first parameter of the activity (in the <code>findViewById()</code> of <code>Snackbar.make()</code>). What do I put there?
Here's the java class code:</p>
<pre><code>public class MapsActivity extends FragmentActivity implements OnMapReadyCallback {
private GoogleMap mMap;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_maps);
// Obtain the SupportMapFragment and get notified when the map is ready to be used.
SupportMapFragment mapFragment = (SupportMapFragment) getSupportFragmentManager()
.findFragmentById(R.id.map);
mapFragment.getMapAsync(this);
}
@Override
public void onMapReady(GoogleMap googleMap) {
mMap = googleMap;
mMap.setBuildingsEnabled(true);
mMap.getUiSettings().setZoomControlsEnabled(true);
float cameraZoom = 17;
LatLng location = new LatLng(43.404032, -80.478184);
mMap.addMarker(new MarkerOptions().position(location).title("49 McIntyre Place #18, Kitchener, ON N2R 1G3"));
CameraUpdateFactory.newLatLngZoom(location, cameraZoom);
Snackbar.make(findViewById(/*WHAT DO I PUT HERE?*/), "Click the pin for more options", Snackbar.LENGTH_LONG).show();
}
}
</code></pre>
<p>Also, here is the activity xml code:</p>
<pre><code><fragment xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:map="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:id="@+id/map"
android:name="com.google.android.gms.maps.SupportMapFragment"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context="ca.davesautoservice.davesautoservice.MapsActivity" />
</code></pre>
<p>And lastly, here's the stacktrace error:</p>
<pre><code>08-03 11:42:21.333 3901-3901/? E/AndroidRuntime: FATAL EXCEPTION: main
Process: ca.davesautoservice.davesautoservice, PID: 3901
java.lang.NullPointerException
at android.support.design.widget.Snackbar.<init>(Snackbar.java:183)
at android.support.design.widget.Snackbar.make(Snackbar.java:215)
at ca.davesautoservice.davesautoservice.MapsActivity.onMapReady(MapsActivity.java:48)
at com.google.android.gms.maps.SupportMapFragment$zza$1.zza(Unknown Source)
at com.google.android.gms.maps.internal.zzo$zza.onTransact(Unknown Source)
at android.os.Binder.transact(Binder.java:361)
at xz.a(:com.google.android.gms.DynamiteModulesB:82)
at maps.ad.u$5.run(Unknown Source)
at android.os.Handler.handleCallback(Handler.java:808)
at android.os.Handler.dispatchMessage(Handler.java:103)
at android.os.Looper.loop(Looper.java:193)
at android.app.ActivityThread.main(ActivityThread.java:5333)
at java.lang.reflect.Method.invokeNative(Native Method)
at java.lang.reflect.Method.invoke(Method.java:515)
at com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:828)
at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:644)
at dalvik.system.NativeStart.main(Native Method)
</code></pre>
<p>Thanks for the help! :)</p>
| 0debug
|
react-native TextInput how to throttle onBlur function : in my react native project,
<TextInput onBlur={(e) => this.handleBlurCheck(e,...otherParams) }/>
I want to throttle the function `handleBlurCheck`,but in rn ,I don't know how to realize.
and I need `event`
| 0debug
|
static void v9fs_walk(void *opaque)
{
int name_idx;
V9fsQID *qids = NULL;
int i, err = 0;
V9fsPath dpath, path;
uint16_t nwnames;
struct stat stbuf;
size_t offset = 7;
int32_t fid, newfid;
V9fsString *wnames = NULL;
V9fsFidState *fidp;
V9fsFidState *newfidp = NULL;
V9fsPDU *pdu = opaque;
V9fsState *s = pdu->s;
offset += pdu_unmarshal(pdu, offset, "ddw", &fid,
&newfid, &nwnames);
trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames);
if (nwnames && nwnames <= P9_MAXWELEM) {
wnames = g_malloc0(sizeof(wnames[0]) * nwnames);
qids = g_malloc0(sizeof(qids[0]) * nwnames);
for (i = 0; i < nwnames; i++) {
offset += pdu_unmarshal(pdu, offset, "s", &wnames[i]);
}
} else if (nwnames > P9_MAXWELEM) {
err = -EINVAL;
goto out_nofid;
}
fidp = get_fid(pdu, fid);
if (fidp == NULL) {
err = -ENOENT;
goto out_nofid;
}
v9fs_path_init(&dpath);
v9fs_path_init(&path);
v9fs_path_copy(&dpath, &fidp->path);
v9fs_path_copy(&path, &fidp->path);
for (name_idx = 0; name_idx < nwnames; name_idx++) {
err = v9fs_co_name_to_path(pdu, &dpath, wnames[name_idx].data, &path);
if (err < 0) {
goto out;
}
err = v9fs_co_lstat(pdu, &path, &stbuf);
if (err < 0) {
goto out;
}
stat_to_qid(&stbuf, &qids[name_idx]);
v9fs_path_copy(&dpath, &path);
}
if (fid == newfid) {
BUG_ON(fidp->fid_type != P9_FID_NONE);
v9fs_path_copy(&fidp->path, &path);
} else {
newfidp = alloc_fid(s, newfid);
if (newfidp == NULL) {
err = -EINVAL;
goto out;
}
newfidp->uid = fidp->uid;
v9fs_path_copy(&newfidp->path, &path);
}
err = v9fs_walk_marshal(pdu, nwnames, qids);
trace_v9fs_walk_return(pdu->tag, pdu->id, nwnames, qids);
out:
put_fid(pdu, fidp);
if (newfidp) {
put_fid(pdu, newfidp);
}
v9fs_path_free(&dpath);
v9fs_path_free(&path);
out_nofid:
complete_pdu(s, pdu, err);
if (nwnames && nwnames <= P9_MAXWELEM) {
for (name_idx = 0; name_idx < nwnames; name_idx++) {
v9fs_string_free(&wnames[name_idx]);
}
g_free(wnames);
g_free(qids);
}
return;
}
| 1threat
|
static int iscsi_reopen_prepare(BDRVReopenState *state,
BlockReopenQueue *queue, Error **errp)
{
return 0;
}
| 1threat
|
Using ccache when building inside of docker : <p>I am working on moving the build for a C++ project into a docker image. The image will be built and pushed by a Jenkins job. Prior to docker, I made heavy use of ccache to speed up my builds on Jenkins, especially in the case of builds where very little changed. The trouble with docker is that the build now runs in an isolated environment, so I can no longer benefit from ccache. Is there a way to build inside of an ephemeral container while still taking advantage of ccache?</p>
| 0debug
|
static void gen_lq(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
int ra, rd;
TCGv EA;
if (unlikely(ctx->mem_idx == 0)) {
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
ra = rA(ctx->opcode);
rd = rD(ctx->opcode);
if (unlikely((rd & 1) || rd == ra)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
if (unlikely(ctx->le_mode)) {
gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
return;
}
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x0F);
gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
gen_addr_add(ctx, EA, EA, 8);
gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
tcg_temp_free(EA);
#endif
}
| 1threat
|
static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
int *data_size, AVPacket *avpkt)
{
WMAVoiceContext *s = ctx->priv_data;
GetBitContext *gb = &s->gb;
int size, res, pos;
if (*data_size < 480 * sizeof(float)) {
av_log(ctx, AV_LOG_ERROR,
"Output buffer too small (%d given - %zu needed)\n",
*data_size, 480 * sizeof(float));
return -1;
}
for (size = avpkt->size; size > ctx->block_align; size -= ctx->block_align);
if (!size) {
*data_size = 0;
return 0;
}
init_get_bits(&s->gb, avpkt->data, size << 3);
if (size == ctx->block_align) {
if ((res = parse_packet_header(s)) < 0)
return res;
if (s->spillover_nbits > 0) {
if (s->sframe_cache_size > 0) {
int cnt = get_bits_count(gb);
copy_bits(&s->pb, avpkt->data, size, gb, s->spillover_nbits);
flush_put_bits(&s->pb);
s->sframe_cache_size += s->spillover_nbits;
if ((res = synth_superframe(ctx, data, data_size)) == 0 &&
*data_size > 0) {
cnt += s->spillover_nbits;
s->skip_bits_next = cnt & 7;
return cnt >> 3;
} else
skip_bits_long (gb, s->spillover_nbits - cnt +
get_bits_count(gb));
} else
skip_bits_long(gb, s->spillover_nbits);
}
} else if (s->skip_bits_next)
skip_bits(gb, s->skip_bits_next);
s->sframe_cache_size = 0;
s->skip_bits_next = 0;
pos = get_bits_left(gb);
if ((res = synth_superframe(ctx, data, data_size)) < 0) {
return res;
} else if (*data_size > 0) {
int cnt = get_bits_count(gb);
s->skip_bits_next = cnt & 7;
return cnt >> 3;
} else if ((s->sframe_cache_size = pos) > 0) {
init_get_bits(gb, avpkt->data, size << 3);
skip_bits_long(gb, (size << 3) - pos);
assert(get_bits_left(gb) == pos);
init_put_bits(&s->pb, s->sframe_cache, SFRAME_CACHE_MAXSIZE);
copy_bits(&s->pb, avpkt->data, size, gb, s->sframe_cache_size);
}
return size;
}
| 1threat
|
Run elastic search as root user : <p>Im getting below error when i tried to start elastic search 5.0 with command <code>./elasticsearch</code> and getting below error.</p>
<pre><code>[2016-11-23T13:44:09,507][WARN ][o.e.b.ElasticsearchUncaughtExceptionHandler] [] uncaught exception in thread [main]
org.elasticsearch.bootstrap.StartupException: java.lang.RuntimeException: can not run elasticsearch as root
at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:116) ~[elasticsearch-5.0.1.jar:5.0.1]
at org.elasticsearch.bootstrap.Elasticsearch.execute(Elasticsearch.java:103) ~[elasticsearch-5.0.1.jar:5.0.1]
at org.elasticsearch.cli.SettingCommand.execute(SettingCommand.java:54) ~[elasticsearch-5.0.1.jar:5.0.1]
at org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:96) ~[elasticsearch-5.0.1.jar:5.0.1]
at org.elasticsearch.cli.Command.main(Command.java:62) ~[elasticsearch-5.0.1.jar:5.0.1]
at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:80) ~[elasticsearch-5.0.1.jar:5.0.1]
at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:73) ~[elasticsearch-5.0.1.jar:5.0.1]
Caused by: java.lang.RuntimeException: can not run elasticsearch as root
at org.elasticsearch.bootstrap.Bootstrap.initializeNatives(Bootstrap.java:96) ~[elasticsearch-5.0.1.jar:5.0.1]
at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:155) ~[elasticsearch-5.0.1.jar:5.0.1]
at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:286) ~[elasticsearch-5.0.1.jar:5.0.1]
at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:112) ~[elasticsearch-5.0.1.jar:5.0.1]
... 6 more
</code></pre>
<p>I tried to start by switching to other user and tried <code>sudo ./elasticsearch</code> and got the same error.
How to start elastic search as root user ?</p>
| 0debug
|
static int qemu_rbd_parsename(const char *filename,
char *pool, int pool_len,
char *snap, int snap_len,
char *name, int name_len,
char *conf, int conf_len,
Error **errp)
{
const char *start;
char *p, *buf;
int ret = 0;
char *found_str;
Error *local_err = NULL;
if (!strstart(filename, "rbd:", &start)) {
error_setg(errp, "File name must start with 'rbd:'");
return -EINVAL;
}
buf = g_strdup(start);
p = buf;
*snap = '\0';
*conf = '\0';
found_str = qemu_rbd_next_tok(pool_len, p,
'/', "pool name", &p, &local_err);
if (local_err) {
goto done;
}
if (!p) {
ret = -EINVAL;
error_setg(errp, "Pool name is required");
goto done;
}
qemu_rbd_unescape(found_str);
g_strlcpy(pool, found_str, pool_len);
if (strchr(p, '@')) {
found_str = qemu_rbd_next_tok(name_len, p,
'@', "object name", &p, &local_err);
if (local_err) {
goto done;
}
qemu_rbd_unescape(found_str);
g_strlcpy(name, found_str, name_len);
found_str = qemu_rbd_next_tok(snap_len, p,
':', "snap name", &p, &local_err);
if (local_err) {
goto done;
}
qemu_rbd_unescape(found_str);
g_strlcpy(snap, found_str, snap_len);
} else {
found_str = qemu_rbd_next_tok(name_len, p,
':', "object name", &p, &local_err);
if (local_err) {
goto done;
}
qemu_rbd_unescape(found_str);
g_strlcpy(name, found_str, name_len);
}
if (!p) {
goto done;
}
found_str = qemu_rbd_next_tok(conf_len, p,
'\0', "configuration", &p, &local_err);
if (local_err) {
goto done;
}
g_strlcpy(conf, found_str, conf_len);
done:
if (local_err) {
ret = -EINVAL;
error_propagate(errp, local_err);
}
g_free(buf);
return ret;
}
| 1threat
|
def find_rect_num(n):
return n*(n + 1)
| 0debug
|
Android Studio: startActivity(i); shows error : I'm newbie to AndroidStudio(java) and I followed tutorial on YouTube.
But for me it shows an error in "startActivity(i);".
Error: startActivity (android.content.Intent) in Activity cannot be applied.
Here's code:
public class LoadingScreen extends AppCompatActivity {
private static int SplashInterval = 2000;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.loading_screen);
new Handler().postDelayed(new Runnable(){
@Override
public void run(){
//TODO Auto-generated method stub
Intent i = new Intent(LoadingScreen.this, MainActivity.class);
startActivity(i);
this.finish();
}
private void finish(){
//TODO Auto-generated method stub
}
},SplashInterval);
};
}
| 0debug
|
How to get array into set of pairs using for loop and javascript map reduce functoins : <p>[1,2,3,4,5,6] should be changed to set containing {1,2},{2,3},{3,4},{4,5},{5,6} using normal for loop and using javascript map and reduce functions convert array to set of pairs using different techniques Thanks in Advance</p>
| 0debug
|
REORDER_OUT_50(int8, int8_t)
REORDER_OUT_51(int8, int8_t)
REORDER_OUT_71(int8, int8_t)
REORDER_OUT_50(int16, int16_t)
REORDER_OUT_51(int16, int16_t)
REORDER_OUT_71(int16, int16_t)
REORDER_OUT_50(int32, int32_t)
REORDER_OUT_51(int32, int32_t)
REORDER_OUT_71(int32, int32_t)
REORDER_OUT_50(f32, float)
REORDER_OUT_51(f32, float)
REORDER_OUT_71(f32, float)
#define FORMAT_I8 0
#define FORMAT_I16 1
#define FORMAT_I32 2
#define FORMAT_F32 3
#define PICK_REORDER(layout)\
switch(format) {\
case FORMAT_I8: s->reorder_func = alsa_reorder_int8_out_ ##layout; break;\
case FORMAT_I16: s->reorder_func = alsa_reorder_int16_out_ ##layout; break;\
case FORMAT_I32: s->reorder_func = alsa_reorder_int32_out_ ##layout; break;\
case FORMAT_F32: s->reorder_func = alsa_reorder_f32_out_ ##layout; break;\
}
static av_cold int find_reorder_func(AlsaData *s, int codec_id, int64_t layout, int out)
{
int format;
if (!out)
return AVERROR(ENOSYS);
if (layout == AV_CH_LAYOUT_QUAD || layout == AV_CH_LAYOUT_2_2)
return 0;
switch (codec_id) {
case CODEC_ID_PCM_S8:
case CODEC_ID_PCM_U8:
case CODEC_ID_PCM_ALAW:
case CODEC_ID_PCM_MULAW: format = FORMAT_I8; break;
case CODEC_ID_PCM_S16LE:
case CODEC_ID_PCM_S16BE:
case CODEC_ID_PCM_U16LE:
case CODEC_ID_PCM_U16BE: format = FORMAT_I16; break;
case CODEC_ID_PCM_S32LE:
case CODEC_ID_PCM_S32BE:
case CODEC_ID_PCM_U32LE:
case CODEC_ID_PCM_U32BE: format = FORMAT_I32; break;
case CODEC_ID_PCM_F32LE:
case CODEC_ID_PCM_F32BE: format = FORMAT_F32; break;
default: return AVERROR(ENOSYS);
}
if (layout == AV_CH_LAYOUT_5POINT0_BACK || layout == AV_CH_LAYOUT_5POINT0)
PICK_REORDER(50)
else if (layout == AV_CH_LAYOUT_5POINT1_BACK || layout == AV_CH_LAYOUT_5POINT1)
PICK_REORDER(51)
else if (layout == AV_CH_LAYOUT_7POINT1)
PICK_REORDER(71)
return s->reorder_func ? 0 : AVERROR(ENOSYS);
}
| 1threat
|
static inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple){
MpegEncContext * const s = &h->s;
int i;
int step = 1;
int offset = 1;
int uvoffset= 1;
int top_idx = 1;
int skiplast= 0;
src_y -= linesize;
src_cb -= uvlinesize;
src_cr -= uvlinesize;
if(!simple && FRAME_MBAFF){
if(s->mb_y&1){
offset = MB_MBAFF ? 1 : 17;
uvoffset= MB_MBAFF ? 1 : 9;
if(!MB_MBAFF){
*(uint64_t*)(h->top_borders[0][s->mb_x]+ 0)= *(uint64_t*)(src_y + 15*linesize);
*(uint64_t*)(h->top_borders[0][s->mb_x]+ 8)= *(uint64_t*)(src_y +8+15*linesize);
if(simple || !CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
*(uint64_t*)(h->top_borders[0][s->mb_x]+16)= *(uint64_t*)(src_cb+7*uvlinesize);
*(uint64_t*)(h->top_borders[0][s->mb_x]+24)= *(uint64_t*)(src_cr+7*uvlinesize);
}
}
}else{
if(!MB_MBAFF){
h->left_border[0]= h->top_borders[0][s->mb_x][15];
if(simple || !CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
h->left_border[34 ]= h->top_borders[0][s->mb_x][16+7 ];
h->left_border[34+18]= h->top_borders[0][s->mb_x][16+8+7];
}
skiplast= 1;
}
offset =
uvoffset=
top_idx = MB_MBAFF ? 0 : 1;
}
step= MB_MBAFF ? 2 : 1;
}
h->left_border[offset]= h->top_borders[top_idx][s->mb_x][15];
for(i=1; i<17 - skiplast; i++){
h->left_border[offset+i*step]= src_y[15+i* linesize];
}
*(uint64_t*)(h->top_borders[top_idx][s->mb_x]+0)= *(uint64_t*)(src_y + 16*linesize);
*(uint64_t*)(h->top_borders[top_idx][s->mb_x]+8)= *(uint64_t*)(src_y +8+16*linesize);
if(simple || !CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
h->left_border[uvoffset+34 ]= h->top_borders[top_idx][s->mb_x][16+7];
h->left_border[uvoffset+34+18]= h->top_borders[top_idx][s->mb_x][24+7];
for(i=1; i<9 - skiplast; i++){
h->left_border[uvoffset+34 +i*step]= src_cb[7+i*uvlinesize];
h->left_border[uvoffset+34+18+i*step]= src_cr[7+i*uvlinesize];
}
*(uint64_t*)(h->top_borders[top_idx][s->mb_x]+16)= *(uint64_t*)(src_cb+8*uvlinesize);
*(uint64_t*)(h->top_borders[top_idx][s->mb_x]+24)= *(uint64_t*)(src_cr+8*uvlinesize);
}
}
| 1threat
|
static void show_stream(AVFormatContext *fmt_ctx, int stream_idx)
{
AVStream *stream = fmt_ctx->streams[stream_idx];
AVCodecContext *dec_ctx;
AVCodec *dec;
const char *profile;
char val_str[128];
AVRational display_aspect_ratio;
probe_object_header("stream");
probe_int("index", stream->index);
if ((dec_ctx = stream->codec)) {
if ((dec = dec_ctx->codec)) {
probe_str("codec_name", dec->name);
probe_str("codec_long_name", dec->long_name);
} else {
probe_str("codec_name", "unknown");
}
probe_str("codec_type", media_type_string(dec_ctx->codec_type));
probe_str("codec_time_base",
rational_string(val_str, sizeof(val_str),
"/", &dec_ctx->time_base));
av_get_codec_tag_string(val_str, sizeof(val_str), dec_ctx->codec_tag);
probe_str("codec_tag_string", val_str);
probe_str("codec_tag", tag_string(val_str, sizeof(val_str),
dec_ctx->codec_tag));
if (dec && (profile = av_get_profile_name(dec, dec_ctx->profile)))
probe_str("profile", profile);
switch (dec_ctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
probe_int("width", dec_ctx->width);
probe_int("height", dec_ctx->height);
probe_int("has_b_frames", dec_ctx->has_b_frames);
if (dec_ctx->sample_aspect_ratio.num) {
probe_str("sample_aspect_ratio",
rational_string(val_str, sizeof(val_str), ":",
&dec_ctx->sample_aspect_ratio));
av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
dec_ctx->width * dec_ctx->sample_aspect_ratio.num,
dec_ctx->height * dec_ctx->sample_aspect_ratio.den,
1024*1024);
probe_str("display_aspect_ratio",
rational_string(val_str, sizeof(val_str), ":",
&display_aspect_ratio));
}
probe_str("pix_fmt",
dec_ctx->pix_fmt != PIX_FMT_NONE ? av_pix_fmt_descriptors[dec_ctx->pix_fmt].name
: "unknown");
probe_int("level", dec_ctx->level);
break;
case AVMEDIA_TYPE_AUDIO:
probe_str("sample_rate",
value_string(val_str, sizeof(val_str),
dec_ctx->sample_rate,
unit_hertz_str));
probe_int("channels", dec_ctx->channels);
probe_int("bits_per_sample",
av_get_bits_per_sample(dec_ctx->codec_id));
break;
}
} else {
probe_str("codec_type", "unknown");
}
if (fmt_ctx->iformat->flags & AVFMT_SHOW_IDS)
probe_int("id", stream->id);
probe_str("r_frame_rate",
rational_string(val_str, sizeof(val_str), "/",
&stream->r_frame_rate));
probe_str("avg_frame_rate",
rational_string(val_str, sizeof(val_str), "/",
&stream->avg_frame_rate));
probe_str("time_base",
rational_string(val_str, sizeof(val_str), "/",
&stream->time_base));
probe_str("start_time",
time_value_string(val_str, sizeof(val_str),
stream->start_time, &stream->time_base));
probe_str("duration",
time_value_string(val_str, sizeof(val_str),
stream->duration, &stream->time_base));
if (stream->nb_frames)
probe_int("nb_frames", stream->nb_frames);
probe_dict(stream->metadata, "tags");
probe_object_footer("stream");
}
| 1threat
|
Python - How to easily convert string with list lay-out to list? : <p>My code required a (possible nested) list as variable. However, using <code>sys.argv</code> to input a command line argument reads the input as a string.</p>
<p>So given the example input <code>[[1,[2],3,4],5]</code>, what easy ways are there to convert this input from <code>str</code> to a <code>list</code>?</p>
<p>Thanks on forehand.</p>
| 0debug
|
static int qdm2_get_vlc (GetBitContext *gb, VLC *vlc, int flag, int depth)
{
int value;
value = get_vlc2(gb, vlc->table, vlc->bits, depth);
if (value-- == 0)
value = get_bits (gb, get_bits (gb, 3) + 1);
if (flag) {
int tmp = vlc_stage3_values[value];
if ((value & ~3) > 0)
tmp += get_bits (gb, (value >> 2));
value = tmp;
}
return value;
}
| 1threat
|
static int vp3_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
const uint8_t *buf, int buf_size)
{
Vp3DecodeContext *s = avctx->priv_data;
GetBitContext gb;
static int counter = 0;
int i;
init_get_bits(&gb, buf, buf_size * 8);
if (s->theora && get_bits1(&gb))
{
av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n");
return -1;
}
s->keyframe = !get_bits1(&gb);
if (!s->theora)
skip_bits(&gb, 1);
s->last_quality_index = s->quality_index;
s->nqis=0;
do{
s->qis[s->nqis++]= get_bits(&gb, 6);
} while(s->theora >= 0x030200 && s->nqis<3 && get_bits1(&gb));
s->quality_index= s->qis[0];
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
s->keyframe?"key":"", counter, s->quality_index);
counter++;
if (s->quality_index != s->last_quality_index) {
init_dequantizer(s);
init_loop_filter(s);
}
if (s->keyframe) {
if (!s->theora)
{
skip_bits(&gb, 4);
skip_bits(&gb, 4);
if (s->version)
{
s->version = get_bits(&gb, 5);
if (counter == 1)
av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version);
}
}
if (s->version || s->theora)
{
if (get_bits1(&gb))
av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
skip_bits(&gb, 2);
}
if (s->last_frame.data[0] == s->golden_frame.data[0]) {
if (s->golden_frame.data[0])
avctx->release_buffer(avctx, &s->golden_frame);
s->last_frame= s->golden_frame;
} else {
if (s->golden_frame.data[0])
avctx->release_buffer(avctx, &s->golden_frame);
if (s->last_frame.data[0])
avctx->release_buffer(avctx, &s->last_frame);
}
s->golden_frame.reference = 3;
if(avctx->get_buffer(avctx, &s->golden_frame) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
return -1;
}
s->current_frame= s->golden_frame;
if (!s->pixel_addresses_inited)
{
if (!s->flipped_image)
vp3_calculate_pixel_addresses(s);
else
theora_calculate_pixel_addresses(s);
s->pixel_addresses_inited = 1;
}
} else {
s->current_frame.reference = 3;
if (!s->pixel_addresses_inited) {
av_log(s->avctx, AV_LOG_ERROR, "vp3: first frame not a keyframe\n");
return -1;
}
if(avctx->get_buffer(avctx, &s->current_frame) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
return -1;
}
}
s->current_frame.qscale_table= s->qscale_table;
s->current_frame.qstride= 0;
{START_TIMER
init_frame(s, &gb);
STOP_TIMER("init_frame")}
#if KEYFRAMES_ONLY
if (!s->keyframe) {
memcpy(s->current_frame.data[0], s->golden_frame.data[0],
s->current_frame.linesize[0] * s->height);
memcpy(s->current_frame.data[1], s->golden_frame.data[1],
s->current_frame.linesize[1] * s->height / 2);
memcpy(s->current_frame.data[2], s->golden_frame.data[2],
s->current_frame.linesize[2] * s->height / 2);
} else {
#endif
{START_TIMER
if (unpack_superblocks(s, &gb)){
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
return -1;
}
STOP_TIMER("unpack_superblocks")}
{START_TIMER
if (unpack_modes(s, &gb)){
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
return -1;
}
STOP_TIMER("unpack_modes")}
{START_TIMER
if (unpack_vectors(s, &gb)){
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
return -1;
}
STOP_TIMER("unpack_vectors")}
{START_TIMER
if (unpack_dct_coeffs(s, &gb)){
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
return -1;
}
STOP_TIMER("unpack_dct_coeffs")}
{START_TIMER
reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height);
if ((avctx->flags & CODEC_FLAG_GRAY) == 0) {
reverse_dc_prediction(s, s->fragment_start[1],
s->fragment_width / 2, s->fragment_height / 2);
reverse_dc_prediction(s, s->fragment_start[2],
s->fragment_width / 2, s->fragment_height / 2);
}
STOP_TIMER("reverse_dc_prediction")}
{START_TIMER
for (i = 0; i < s->macroblock_height; i++)
render_slice(s, i);
STOP_TIMER("render_fragments")}
{START_TIMER
apply_loop_filter(s);
STOP_TIMER("apply_loop_filter")}
#if KEYFRAMES_ONLY
}
#endif
*data_size=sizeof(AVFrame);
*(AVFrame*)data= s->current_frame;
if ((s->last_frame.data[0]) &&
(s->last_frame.data[0] != s->golden_frame.data[0]))
avctx->release_buffer(avctx, &s->last_frame);
s->last_frame= s->current_frame;
s->current_frame.data[0]= NULL;
return buf_size;
}
| 1threat
|
static always_inline void gen_cmov (TCGCond inv_cond,
int ra, int rb, int rc,
int islit, uint8_t lit, int mask)
{
int l1;
if (unlikely(rc == 31))
return;
l1 = gen_new_label();
if (ra != 31) {
if (mask) {
TCGv tmp = tcg_temp_new(TCG_TYPE_I64);
tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
tcg_temp_free(tmp);
} else
tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
} else {
TCGv tmp = tcg_const_i64(0);
tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
tcg_temp_free(tmp);
}
if (islit)
tcg_gen_movi_i64(cpu_ir[rc], lit);
else
tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
gen_set_label(l1);
}
| 1threat
|
Kotlin: Suppress unused Property? : <p>My source code is as follows:</p>
<p>There are warnings : Property '****' is never used.</p>
<p>I added "@Suppress("UNUSED_PARAMETER")", "@Suppress("UNUSED_PROPERTY_GETTER")", "@Suppress("UNUSED_PROPERTY_SETTER")",<br>
however, none of them work.</p>
<p>How can I suppress this warning?</p>
<p><a href="https://i.stack.imgur.com/hU8X9.png" rel="noreferrer"><img src="https://i.stack.imgur.com/hU8X9.png" alt="enter image description here"></a></p>
| 0debug
|
How to use Python Pandas Stylers for coloring an entire row based on a given column? : <p>I've been trying to print out a Pandas dataframe to html and have specific entire rows highlighted if the value of one specific column's value for that row is over a threshold. I've looked through the Pandas Styler Slicing and tried to vary the highlight_max function for such a use, but seem to be failing miserably; if I try, say, to replace the is_max with a check for whether a given row's value is above said threshold (e.g., something like </p>
<pre><code>is_x = df['column_name'] >= threshold
</code></pre>
<p>), it isn't apparent how to properly pass such a thing or what to return.</p>
<p>I've also tried to simply define it elsewhere using df.loc, but that hasn't worked too well either.</p>
<p>Another concern also came up: If I drop that column (currently the criterion) afterwards, will the styling still hold? I am wondering if a df.loc would prevent such a thing from being a problem.</p>
| 0debug
|
static void qht_bucket_reset__locked(struct qht_bucket *head)
{
struct qht_bucket *b = head;
int i;
seqlock_write_begin(&head->sequence);
do {
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
if (b->pointers[i] == NULL) {
goto done;
}
b->hashes[i] = 0;
atomic_set(&b->pointers[i], NULL);
}
b = b->next;
} while (b);
done:
seqlock_write_end(&head->sequence);
}
| 1threat
|
static void set_year_20xx(void)
{
cmos_write(RTC_REG_B, cmos_read(RTC_REG_B) & ~REG_B_DM);
cmos_write(RTC_REG_A, 0x76);
cmos_write(RTC_YEAR, 0x11);
cmos_write(RTC_CENTURY, 0x20);
cmos_write(RTC_MONTH, 0x02);
cmos_write(RTC_DAY_OF_MONTH, 0x02);
cmos_write(RTC_HOURS, 0x02);
cmos_write(RTC_MINUTES, 0x04);
cmos_write(RTC_SECONDS, 0x58);
cmos_write(RTC_REG_A, 0x26);
g_assert_cmpint(cmos_read(RTC_HOURS), ==, 0x02);
g_assert_cmpint(cmos_read(RTC_MINUTES), ==, 0x04);
g_assert_cmpint(cmos_read(RTC_SECONDS), >=, 0x58);
g_assert_cmpint(cmos_read(RTC_DAY_OF_MONTH), ==, 0x02);
g_assert_cmpint(cmos_read(RTC_MONTH), ==, 0x02);
g_assert_cmpint(cmos_read(RTC_YEAR), ==, 0x11);
g_assert_cmpint(cmos_read(RTC_CENTURY), ==, 0x20);
cmos_write(RTC_REG_A, 0x76);
cmos_write(RTC_YEAR, 0x80);
cmos_write(RTC_REG_A, 0x26);
g_assert_cmpint(cmos_read(RTC_HOURS), ==, 0x02);
g_assert_cmpint(cmos_read(RTC_MINUTES), ==, 0x04);
g_assert_cmpint(cmos_read(RTC_SECONDS), >=, 0x58);
g_assert_cmpint(cmos_read(RTC_DAY_OF_MONTH), ==, 0x02);
g_assert_cmpint(cmos_read(RTC_MONTH), ==, 0x02);
g_assert_cmpint(cmos_read(RTC_YEAR), ==, 0x80);
g_assert_cmpint(cmos_read(RTC_CENTURY), ==, 0x20);
cmos_write(RTC_REG_A, 0x76);
cmos_write(RTC_YEAR, 0x11);
cmos_write(RTC_REG_A, 0x26);
g_assert_cmpint(cmos_read(RTC_HOURS), ==, 0x02);
g_assert_cmpint(cmos_read(RTC_MINUTES), ==, 0x04);
g_assert_cmpint(cmos_read(RTC_SECONDS), >=, 0x58);
g_assert_cmpint(cmos_read(RTC_DAY_OF_MONTH), ==, 0x02);
g_assert_cmpint(cmos_read(RTC_MONTH), ==, 0x02);
g_assert_cmpint(cmos_read(RTC_YEAR), ==, 0x11);
g_assert_cmpint(cmos_read(RTC_CENTURY), ==, 0x20);
| 1threat
|
Flask Bad Request Error; ocurring because of multiple POST requests : <pre><code> <form method = "POST">
<div class=" col-lg-4 col-lg-4 col-lg-4 col-lg-4">
<div class="box">
<input type="text" name="image-url" placeholder="Image URL Link"style="color:black" required="required" value = "new"/>
<textarea cols=80 rows=4 style="color:black" name = "description" placeholder="Place your description here" value = "new"></textarea>
<button type="submit">Upload</button>
</div>
</div>
</form>
{% for i in range(amount_of_images) %}
<div class=" col-lg-4 col-lg-4 col-lg-4 col-lg-4">
<div class="box">
<img src="{{image[i]}}" alt="view" width = "300" height = "300"/>
<form method = "POST">
<textarea cols=80 rows=4 style="color:black" name = "update-description" value = "update">{{description[i]}}</textarea>
<button type="submit">Update Description</button>
</form>
</code></pre>
<p>Above is my HTML/Jinja Code</p>
<pre><code>@app.route("/gallery-manager", methods = ["GET", "POST"])
def gallery_manager():
if request.method == "POST":
if(request.form["image-url"] and request.form["description"]) is not None:
model.add_new_image(request.form["image-url"], request.form["description"])
id, image, description, amount_of_images = model.get_gallery()
return render_template("gallery-manager.html", image = image, description = description, amount_of_images = amount_of_images, id = id)
if request.form['update-description'] is not None:
print("hello")
id, image, description, amount_of_images = model.get_gallery()
return render_template("gallery-manager.html", image = image, description = description, amount_of_images = amount_of_images, id = id)
</code></pre>
<p>Above is my Python/Flask Code...</p>
<p>The issue, when I click on the update-description submit button, aka the second POST being handled in the html code, I get a 400 error</p>
<pre><code>Bad Request
The browser (or proxy) sent a request that this server could not understand.
</code></pre>
<p>I realized that this error occurs when one of the POST fields are empty and can not be found. I understand that this happens because when I click the second POST submit button, it runs through the first POST check (request.form["image-url"] etc and finds that it is satisfied therefore wanting to run that code, but cant because update-description is still empty. How do I avoid this.</p>
<p>In other words how do I handle multiple POST methods.</p>
<p>Thank you,</p>
| 0debug
|
static void e1000e_pci_foreach_callback(QPCIDevice *dev, int devfn, void *data)
{
*(QPCIDevice **) data = dev;
}
| 1threat
|
Difference between Thymeleaf include and replace? : <p>What is the difference between the two Thymeleaf attributes: <code>th:include</code> and <code>th:replace</code>?</p>
| 0debug
|
static void handle_control_message(VirtIOSerial *vser, void *buf)
{
struct VirtIOSerialPort *port;
struct virtio_console_control cpkt, *gcpkt;
uint8_t *buffer;
size_t buffer_len;
gcpkt = buf;
cpkt.event = lduw_p(&gcpkt->event);
cpkt.value = lduw_p(&gcpkt->value);
port = find_port_by_id(vser, ldl_p(&gcpkt->id));
if (!port && cpkt.event != VIRTIO_CONSOLE_DEVICE_READY)
return;
switch(cpkt.event) {
case VIRTIO_CONSOLE_DEVICE_READY:
if (!cpkt.value) {
error_report("virtio-serial-bus: Guest failure in adding device %s\n",
vser->bus->qbus.name);
break;
}
QTAILQ_FOREACH(port, &vser->ports, next) {
send_control_event(port, VIRTIO_CONSOLE_PORT_ADD, 1);
}
break;
case VIRTIO_CONSOLE_PORT_READY:
if (!cpkt.value) {
error_report("virtio-serial-bus: Guest failure in adding port %u for device %s\n",
port->id, vser->bus->qbus.name);
break;
}
if (port->is_console) {
send_control_event(port, VIRTIO_CONSOLE_CONSOLE_PORT, 1);
}
if (port->name) {
stw_p(&cpkt.event, VIRTIO_CONSOLE_PORT_NAME);
stw_p(&cpkt.value, 1);
buffer_len = sizeof(cpkt) + strlen(port->name) + 1;
buffer = qemu_malloc(buffer_len);
memcpy(buffer, &cpkt, sizeof(cpkt));
memcpy(buffer + sizeof(cpkt), port->name, strlen(port->name));
buffer[buffer_len - 1] = 0;
send_control_msg(port, buffer, buffer_len);
qemu_free(buffer);
}
if (port->host_connected) {
send_control_event(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
}
if (port->info->guest_ready) {
port->info->guest_ready(port);
}
break;
case VIRTIO_CONSOLE_PORT_OPEN:
port->guest_connected = cpkt.value;
if (cpkt.value && port->info->guest_open) {
port->info->guest_open(port);
}
if (!cpkt.value && port->info->guest_close) {
port->info->guest_close(port);
}
break;
}
}
| 1threat
|
C# Winform: Inline datagridview edit : <p>initially my <code>datagridview</code> would look like below </p>
<pre><code>ID Name City Action
------ ------ ---- ------
1 Mitch Kolkata Edit
2 Simon Delhi Edit
3 Poly Madras Edit
</code></pre>
<p>all data will be in read only format. so user can not change but when user click on edit button then a textbox will be placed in name column and a city <code>dropdown</code> will be placed on city column on a row whose edit button will be clicked by user.</p>
<p>when user click on edit button then edit button text will be change to Save and when user click on Save button then save button text will be change to Edit. so guide me how to achieve in line edit functionality when working with <code>datagridview</code>. thanks</p>
| 0debug
|
SSH connections keep dropping out due to inactivity : <p>My SSH connections keep dropping out due to inactivity in the EC2 hosts. I have tried to put these options [1] in <code>/etc/ssh/ssh_config</code> in the server and in the client, but the connections keep dropping out.</p>
<pre><code>ServerAliveInterval 15
ServerAliveCountMax 3
</code></pre>
<p>How I make connections keep alive?</p>
| 0debug
|
static int qemu_rdma_write_one(QEMUFile *f, RDMAContext *rdma,
int current_index, uint64_t current_addr,
uint64_t length)
{
struct ibv_sge sge;
struct ibv_send_wr send_wr = { 0 };
struct ibv_send_wr *bad_wr;
int reg_result_idx, ret, count = 0;
uint64_t chunk, chunks;
uint8_t *chunk_start, *chunk_end;
RDMALocalBlock *block = &(rdma->local_ram_blocks.block[current_index]);
RDMARegister reg;
RDMARegisterResult *reg_result;
RDMAControlHeader resp = { .type = RDMA_CONTROL_REGISTER_RESULT };
RDMAControlHeader head = { .len = sizeof(RDMARegister),
.type = RDMA_CONTROL_REGISTER_REQUEST,
.repeat = 1,
};
retry:
sge.addr = (uint64_t)(block->local_host_addr +
(current_addr - block->offset));
sge.length = length;
chunk = ram_chunk_index(block->local_host_addr, (uint8_t *) sge.addr);
chunk_start = ram_chunk_start(block, chunk);
if (block->is_ram_block) {
chunks = length / (1UL << RDMA_REG_CHUNK_SHIFT);
if (chunks && ((length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) {
chunks--;
}
} else {
chunks = block->length / (1UL << RDMA_REG_CHUNK_SHIFT);
if (chunks && ((block->length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) {
chunks--;
}
}
DDPRINTF("Writing %" PRIu64 " chunks, (%" PRIu64 " MB)\n",
chunks + 1, (chunks + 1) * (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024);
chunk_end = ram_chunk_end(block, chunk + chunks);
if (!rdma->pin_all) {
#ifdef RDMA_UNREGISTRATION_EXAMPLE
qemu_rdma_unregister_waiting(rdma);
#endif
}
while (test_bit(chunk, block->transit_bitmap)) {
(void)count;
DDPRINTF("(%d) Not clobbering: block: %d chunk %" PRIu64
" current %" PRIu64 " len %" PRIu64 " %d %d\n",
count++, current_index, chunk,
sge.addr, length, rdma->nb_sent, block->nb_chunks);
ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE);
if (ret < 0) {
fprintf(stderr, "Failed to Wait for previous write to complete "
"block %d chunk %" PRIu64
" current %" PRIu64 " len %" PRIu64 " %d\n",
current_index, chunk, sge.addr, length, rdma->nb_sent);
return ret;
}
}
if (!rdma->pin_all || !block->is_ram_block) {
if (!block->remote_keys[chunk]) {
if (can_use_buffer_find_nonzero_offset((void *)sge.addr, length)
&& buffer_find_nonzero_offset((void *)sge.addr,
length) == length) {
RDMACompress comp = {
.offset = current_addr,
.value = 0,
.block_idx = current_index,
.length = length,
};
head.len = sizeof(comp);
head.type = RDMA_CONTROL_COMPRESS;
DDPRINTF("Entire chunk is zero, sending compress: %"
PRIu64 " for %d "
"bytes, index: %d, offset: %" PRId64 "...\n",
chunk, sge.length, current_index, current_addr);
compress_to_network(&comp);
ret = qemu_rdma_exchange_send(rdma, &head,
(uint8_t *) &comp, NULL, NULL, NULL);
if (ret < 0) {
return -EIO;
}
acct_update_position(f, sge.length, true);
return 1;
}
reg.current_index = current_index;
if (block->is_ram_block) {
reg.key.current_addr = current_addr;
} else {
reg.key.chunk = chunk;
}
reg.chunks = chunks;
DDPRINTF("Sending registration request chunk %" PRIu64 " for %d "
"bytes, index: %d, offset: %" PRId64 "...\n",
chunk, sge.length, current_index, current_addr);
register_to_network(®);
ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) ®,
&resp, ®_result_idx, NULL);
if (ret < 0) {
return ret;
}
if (qemu_rdma_register_and_get_keys(rdma, block,
(uint8_t *) sge.addr,
&sge.lkey, NULL, chunk,
chunk_start, chunk_end)) {
fprintf(stderr, "cannot get lkey!\n");
return -EINVAL;
}
reg_result = (RDMARegisterResult *)
rdma->wr_data[reg_result_idx].control_curr;
network_to_result(reg_result);
DDPRINTF("Received registration result:"
" my key: %x their key %x, chunk %" PRIu64 "\n",
block->remote_keys[chunk], reg_result->rkey, chunk);
block->remote_keys[chunk] = reg_result->rkey;
block->remote_host_addr = reg_result->host_addr;
} else {
if (qemu_rdma_register_and_get_keys(rdma, block,
(uint8_t *)sge.addr,
&sge.lkey, NULL, chunk,
chunk_start, chunk_end)) {
fprintf(stderr, "cannot get lkey!\n");
return -EINVAL;
}
}
send_wr.wr.rdma.rkey = block->remote_keys[chunk];
} else {
send_wr.wr.rdma.rkey = block->remote_rkey;
if (qemu_rdma_register_and_get_keys(rdma, block, (uint8_t *)sge.addr,
&sge.lkey, NULL, chunk,
chunk_start, chunk_end)) {
fprintf(stderr, "cannot get lkey!\n");
return -EINVAL;
}
}
send_wr.wr_id = qemu_rdma_make_wrid(RDMA_WRID_RDMA_WRITE,
current_index, chunk);
send_wr.opcode = IBV_WR_RDMA_WRITE;
send_wr.send_flags = IBV_SEND_SIGNALED;
send_wr.sg_list = &sge;
send_wr.num_sge = 1;
send_wr.wr.rdma.remote_addr = block->remote_host_addr +
(current_addr - block->offset);
DDDPRINTF("Posting chunk: %" PRIu64 ", addr: %lx"
" remote: %lx, bytes %" PRIu32 "\n",
chunk, sge.addr, send_wr.wr.rdma.remote_addr,
sge.length);
ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr);
if (ret == ENOMEM) {
DDPRINTF("send queue is full. wait a little....\n");
ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE);
if (ret < 0) {
fprintf(stderr, "rdma migration: failed to make "
"room in full send queue! %d\n", ret);
return ret;
}
goto retry;
} else if (ret > 0) {
perror("rdma migration: post rdma write failed");
return -ret;
}
set_bit(chunk, block->transit_bitmap);
acct_update_position(f, sge.length, false);
rdma->total_writes++;
return 0;
}
| 1threat
|
how can i get locationoif app is in background or foreground for every 5 min : how can i get it?
-(void)updateLocation {
[self performSelector:@selector(updateLocation) withObject:nil afterDelay:300];
[self.locationTracker updateLocationToServer];
}
but its not working every time
| 0debug
|
connection.query('SELECT * FROM users WHERE username = ' + input_string)
| 1threat
|
static void parse_type_int64(Visitor *v, const char *name, int64_t *obj,
Error **errp)
{
StringInputVisitor *siv = to_siv(v);
if (!siv->string) {
error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null",
"integer");
return;
}
if (parse_str(siv, name, errp) < 0) {
return;
}
if (!siv->ranges) {
goto error;
}
if (!siv->cur_range) {
Range *r;
siv->cur_range = g_list_first(siv->ranges);
if (!siv->cur_range) {
goto error;
}
r = siv->cur_range->data;
if (!r) {
goto error;
}
siv->cur = r->begin;
}
*obj = siv->cur;
siv->cur++;
return;
error:
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null",
"an int64 value or range");
}
| 1threat
|
Converting Negative integer number into positive using if_abs : <p>After prompting a user to enter an integer and printing the absolute value of it, how do you convert a negative input integer into positive using if statements.</p>
| 0debug
|
How to mock a SharedPreferences using Mockito : <p>I have just read about Unit Instrumented Testing in Android and I wonder how I can mock a SharedPreferences without any SharedPreferencesHelper class on it like <a href="https://github.com/googlesamples/android-testing/blob/master/unit/BasicSample/app/src/test/java/com/example/android/testing/unittesting/BasicSample/SharedPreferencesHelperTest.java" rel="noreferrer">here</a></p>
<p>My code is:</p>
<pre><code>public class Auth {
private static SharedPreferences loggedUserData = null;
public static String getValidToken(Context context)
{
initLoggedUserPreferences(context);
String token = loggedUserData.getString(Constants.USER_TOKEN,null);
return token;
}
public static String getLoggedUser(Context context)
{
initLoggedUserPreferences(context);
String user = loggedUserData.getString(Constants.LOGGED_USERNAME,null);
return user;
}
public static void setUserCredentials(Context context, String username, String token)
{
initLoggedUserPreferences(context);
loggedUserData.edit().putString(Constants.LOGGED_USERNAME, username).commit();
loggedUserData.edit().putString(Constants.USER_TOKEN,token).commit();
}
public static HashMap<String, String> setHeaders(String username, String password)
{
HashMap<String, String> headers = new HashMap<String, String>();
String auth = username + ":" + password;
String encoding = Base64.encodeToString(auth.getBytes(), Base64.DEFAULT);
headers.put("Authorization", "Basic " + encoding);
return headers;
}
public static void deleteToken(Context context)
{
initLoggedUserPreferences(context);
loggedUserData.edit().remove(Constants.LOGGED_USERNAME).commit();
loggedUserData.edit().remove(Constants.USER_TOKEN).commit();
}
public static HashMap<String, String> setHeadersWithToken(String token) {
HashMap<String, String> headers = new HashMap<String, String>();
headers.put("Authorization","Token "+token);
return headers;
}
private static SharedPreferences initLoggedUserPreferences(Context context)
{
if(loggedUserData == null)
loggedUserData = context.getSharedPreferences(Constants.LOGGED_USER_PREFERENCES,0);
return loggedUserData;
}}
</code></pre>
<p>Is is possible to mock SharedPreferences without creating other class on it?</p>
| 0debug
|
Java minus sign and hypen difference in regex : <p>I am checking file names with a regex.</p>
<p>File names can be format of </p>
<blockquote>
<p>customer name - company name</p>
</blockquote>
<p>I am using this regex:</p>
<pre><code>private static final Pattern fileRegex = Pattern.compile("^[a-zA-Z0-9_\\-\\.\\s\\,\\[\\]()\\{\\}]+$");
</code></pre>
<p>But hypen sign (minus sign) is not working and it is acting like a dash.
I am not sure maybe it is because of IntelliJ idea settings.
how can I add minus sign to this regex?</p>
<p>For example this format must be valid:</p>
<blockquote>
<p>test - test1 − test2</p>
</blockquote>
<p>In here first one is just simple dash and the second one is minus sign.</p>
| 0debug
|
def bell_number(n):
bell = [[0 for i in range(n+1)] for j in range(n+1)]
bell[0][0] = 1
for i in range(1, n+1):
bell[i][0] = bell[i-1][i-1]
for j in range(1, i+1):
bell[i][j] = bell[i-1][j-1] + bell[i][j-1]
return bell[n][0]
| 0debug
|
ReactNative Flatlist - RenderItem not working : <p>So I'm trying to use React Native's FlatList renderItem property, but something very strange is happening.</p>
<p>The <code>data</code> property is set to an array which has elements which are not undefined, but then, in the <code>renderItem</code> function, it gives me an error saying that the argument of the function is undefined, unless I call the argument <code>item</code>.</p>
<p>Here's my code:</p>
<pre><code>export default class Profile extends React.Component {
onLearnMore = (user) => {
this.props.navigation.navigate('UserDetail', user)
}
render() {
return (
<List>
<FlatList
data={data.users}
renderItem={( {item} ) => {
console.log(item)
return (<ListItem
roundAvatar
title={`${item.fName} ${item.lName}`}
onPress={() => this.onLearnMore(item)}
/>)
}}
/>
</List>
)
}
}
</code></pre>
<p>If I swapped <code>{item}</code> with <code>{userData}</code>, then <code>userData</code> would be undefined later in the function. Does anyone know why this happens?</p>
| 0debug
|
In R plotly subplot graph, how to show only one legend? : <p>I have a basic subplot with two graphs, both have a legend by default, but I want to see only one of them.</p>
<p>I tried this :</p>
<pre><code>require(plotly)
p1 <- plot_ly(data=iris,x=~Sepal.Length,y=~Sepal.Width,split=~Species) %>% layout(showlegend = FALSE)
p2 <- plot_ly(data=iris,x=~Sepal.Length,y=~Sepal.Width,split=~Species) %>% layout(showlegend = TRUE)
subplot(p1,p2)
subplot(p2,p1)
</code></pre>
<p>But it doesn't work : it seems as if only one showlegend attribute was handled, so if I start with p1 I have two legend, if I start with p2 I have two. </p>
<p>Any ideas ?</p>
| 0debug
|
static inline int handle_cpu_signal(uintptr_t pc, unsigned long address,
int is_write, sigset_t *old_set,
void *puc)
{
TranslationBlock *tb;
int ret;
if (cpu_single_env) {
env = cpu_single_env;
}
#if defined(DEBUG_SIGNAL)
qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
pc, address, is_write, *(unsigned long *)old_set);
#endif
if (is_write && page_unprotect(h2g(address), pc, puc)) {
return 1;
}
ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX);
if (ret < 0) {
return 0;
}
if (ret == 0) {
return 1;
}
tb = tb_find_pc(pc);
if (tb) {
cpu_restore_state(tb, env, pc);
}
sigprocmask(SIG_SETMASK, old_set, NULL);
exception_action(env);
return 1;
}
| 1threat
|
How to send request in Alamofire 4.0 only with parameters and Body using POST method in swift : i am using like this, but in this case i need to call API with parametes and Body. Please help to me... Thanks in advance
Alamofire.request(postUrl, method: .post, parameters: params, encoding: CustomPostEncoding(), headers: nil).validate().responseJSON{ response in
switch response.result
{
case .success:
MBProgressHUD.hide(for: self.view, animated: true)
if let val = response.result.value
{
let json = JSON(val)
print(json)
}
case .failure(let error):
print(error)
}
}
| 0debug
|
Detect available memory inside of a WebView : <p>I'm building an app that contains a <code>WebView</code> that runs some JavaScript code. That JavaScript code is quite allocation heavy and can require a lot of memory.</p>
<p>Sometimes, the amount of required memory exceeds the amount JavaScript can require and crashes the Chromium process of the WebView which crashes my app.</p>
<p>I listen to <code>onMemoryTrim</code> in my application - but it is never called in this scenario on devices with more than 1GB of memory. (Not even with <code>TRIM_MEMORY_RUNNING_LOW</code>).</p>
<p>Is there any way I could detect my WebView is running low on memory and either kill it or let it know (so it can free memory)? </p>
<p>I've tried polling <code>performance.memory</code> but it did not work. The following script crashes the WebView if executed in it:</p>
<pre><code>var a = [];
var kek = () => {
var b = [];
for(var i = 0; i < 1024 * 1024 * 2; i++) b.push(Math.random());
return b;
}
var ival = setInterval(() => {
let m = performance.memory;
if(m.jsHeapSizeLimit - m.usedJSHeapSize < 1e5) {
console.log("Memory limited")
} else {
a.push(kek());
}
});
</code></pre>
<p>Is there any way to detect memory is about to run out so I can handle it gracefully without the app crashing? </p>
| 0debug
|
File upload not working in oops php : public function upload($file=array(),$where){
if (file_exists($this->src.$file['name']))
{
$data['error'] = "Sorry, file already exists.";
}
elseif($file["name"]["size"] > 500000) {
$data['error'] = "Sorry, your file is too large.";
}
elseif(is_array($file))
{
if(in_array($file['type'],$this->type))
{
$filePath = $this->src.$file['name'];
$file_Check = move_uploaded_file($file['name']['tmp_name'], $filePath);
if($file_Check )
{
print_r($file); die();
$name = $file['name'];
//$this->update(array('photo'=>$name,'candi_id'=>$where));
}
$data['error'] = 'File has been uploaded';
}
else
{
$data['error'] = 'File formet was not supported';
}
}
else
{
$data['error'] = 'No File was uploaded...';
}
return $data;
}
output:
file path show correctly.function also working fine but file not move to folder?
file path show correctly.function also working fine but file not move to folder?
| 0debug
|
int virtio_get_block_size(void)
{
return blk_cfg.blk_size;
}
| 1threat
|
void visit_end_list(Visitor *v, Error **errp)
{
assert(!error_is_set(errp));
v->end_list(v, errp);
}
| 1threat
|
static inline int fs_channel(target_phys_addr_t addr)
{
return addr >> 13;
}
| 1threat
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.