problem
stringlengths 26
131k
| labels
class label 2
classes |
|---|---|
static void unsafe_flush_warning(BDRVSSHState *s, const char *what)
{
if (!s->unsafe_flush_warning) {
error_report("warning: ssh server %s does not support fsync",
s->inet->host);
if (what) {
error_report("to support fsync, you need %s", what);
}
s->unsafe_flush_warning = true;
}
}
| 1threat
|
static void cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
{
int sx = 0, sy = 0;
int dx = 0, dy = 0;
int depth = 0;
int notify = 0;
if (*s->cirrus_rop == cirrus_bitblt_rop_fwd_src ||
*s->cirrus_rop == cirrus_bitblt_rop_bkwd_src) {
int width, height;
depth = s->vga.get_bpp(&s->vga) / 8;
s->vga.get_resolution(&s->vga, &width, &height);
sx = (src % ABS(s->cirrus_blt_srcpitch)) / depth;
sy = (src / ABS(s->cirrus_blt_srcpitch));
dx = (dst % ABS(s->cirrus_blt_dstpitch)) / depth;
dy = (dst / ABS(s->cirrus_blt_dstpitch));
w /= depth;
if (s->cirrus_blt_dstpitch < 0) {
sx -= (s->cirrus_blt_width / depth) - 1;
dx -= (s->cirrus_blt_width / depth) - 1;
sy -= s->cirrus_blt_height - 1;
dy -= s->cirrus_blt_height - 1;
}
if (sx >= 0 && sy >= 0 && dx >= 0 && dy >= 0 &&
(sx + w) <= width && (sy + h) <= height &&
(dx + w) <= width && (dy + h) <= height) {
notify = 1;
}
}
if (notify)
graphic_hw_update(s->vga.con);
(*s->cirrus_rop) (s, s->vga.vram_ptr +
(s->cirrus_blt_dstaddr & s->cirrus_addr_mask),
s->vga.vram_ptr +
(s->cirrus_blt_srcaddr & s->cirrus_addr_mask),
s->cirrus_blt_dstpitch, s->cirrus_blt_srcpitch,
s->cirrus_blt_width, s->cirrus_blt_height);
if (notify) {
qemu_console_copy(s->vga.con,
sx, sy, dx, dy,
s->cirrus_blt_width / depth,
s->cirrus_blt_height);
}
cirrus_invalidate_region(s, s->cirrus_blt_dstaddr,
s->cirrus_blt_dstpitch, s->cirrus_blt_width,
s->cirrus_blt_height);
}
| 1threat
|
Index match within range : At first it looked like a easy one, however I'm stuck on trying to find a way how to solve. The idea is to find C3 which matches C1 and falls in the given range C2. Basicaly B 40 shuold return -0.15. Any sugesstions ?
[1]:https://imgur.com/a/9m1w1PZ "case"
| 0debug
|
Random Number generator with skewness : <p>I'm looking for a way to generate a random number with:</p>
<ul>
<li>A lower and upper boundary</li>
<li>An average not in the middle of the specified range</li>
<li>An adjustable standard deviation</li>
</ul>
<p>In Java. </p>
<p>More specifically: I want to generate a long list of numbers with a minimum of 0, maximum of 40 and an average of +- 5, and i'd like to be able to adjust how far around the average the numbers are.</p>
| 0debug
|
Please help me understanding i am new with coding : public static void main(String[] args) {
Scanner sc=new Scanner(System.in);
String A=sc.next();
String B=sc.next();
System.out.println(A.length()+B.length());
System.out.println(A.compareTo(B)>0?"Yes":"No");
System.out.println(capitalizeFirstLetter(A) + " " + capitalizeFirstLetter(B));
}
public static String capitalizeFirstLetter(String original) {
if (original == null || original.length() == 0) {
return original;
}
return original.substring(0, 1).toUpperCase() + original.substring(1);
}
i am not understanding it please hepl me understanding this.
| 0debug
|
Random row value display in random place without duplicate value mysql, php : <p>In a mysql table I have some 30 rows. want to display 1 random row data in 3 different places in a same page without duplicate the row.</p>
<p>Can any one help me regarding this?</p>
| 0debug
|
void visit_type_int64(Visitor *v, int64_t *obj, const char *name, Error **errp)
{
if (!error_is_set(errp)) {
if (v->type_int64) {
v->type_int64(v, obj, name, errp);
} else {
v->type_int(v, obj, name, errp);
}
}
}
| 1threat
|
list all globally installed modules with one command in ubuntu : <p>I'm working on <strong><code>ubuntu 14.04</code></strong>, Is there any way to print all global modules (installed using <strong><code>npm</code></strong>) to the command line. How can I do this?</p>
| 0debug
|
static int get_physical_address (CPUMIPSState *env, hwaddr *physical,
int *prot, target_ulong real_address,
int rw, int access_type, int mmu_idx)
{
int user_mode = mmu_idx == MIPS_HFLAG_UM;
int supervisor_mode = mmu_idx == MIPS_HFLAG_SM;
int kernel_mode = !user_mode && !supervisor_mode;
#if defined(TARGET_MIPS64)
int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0;
int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
#endif
int ret = TLBRET_MATCH;
target_ulong address = real_address;
#define USEG_LIMIT 0x7FFFFFFFUL
#define KSEG0_BASE 0x80000000UL
#define KSEG1_BASE 0xA0000000UL
#define KSEG2_BASE 0xC0000000UL
#define KSEG3_BASE 0xE0000000UL
#define KVM_KSEG0_BASE 0x40000000UL
#define KVM_KSEG2_BASE 0x60000000UL
if (kvm_enabled()) {
if (real_address >= KVM_KSEG0_BASE) {
if (real_address < KVM_KSEG2_BASE) {
address += KSEG0_BASE - KVM_KSEG0_BASE;
} else if (real_address <= USEG_LIMIT) {
address += KSEG2_BASE - KVM_KSEG2_BASE;
}
}
}
if (address <= USEG_LIMIT) {
if (env->CP0_Status & (1 << CP0St_ERL)) {
*physical = address & 0xFFFFFFFF;
*prot = PAGE_READ | PAGE_WRITE;
} else {
ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
}
#if defined(TARGET_MIPS64)
} else if (address < 0x4000000000000000ULL) {
if (UX && address <= (0x3FFFFFFFFFFFFFFFULL & env->SEGMask)) {
ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
} else {
ret = TLBRET_BADADDR;
}
} else if (address < 0x8000000000000000ULL) {
if ((supervisor_mode || kernel_mode) &&
SX && address <= (0x7FFFFFFFFFFFFFFFULL & env->SEGMask)) {
ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
} else {
ret = TLBRET_BADADDR;
}
} else if (address < 0xC000000000000000ULL) {
if (kernel_mode && KX &&
(address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) {
*physical = address & env->PAMask;
*prot = PAGE_READ | PAGE_WRITE;
} else {
ret = TLBRET_BADADDR;
}
} else if (address < 0xFFFFFFFF80000000ULL) {
if (kernel_mode && KX &&
address <= (0xFFFFFFFF7FFFFFFFULL & env->SEGMask)) {
ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
} else {
ret = TLBRET_BADADDR;
}
#endif
} else if (address < (int32_t)KSEG1_BASE) {
if (kernel_mode) {
*physical = address - (int32_t)KSEG0_BASE;
*prot = PAGE_READ | PAGE_WRITE;
} else {
ret = TLBRET_BADADDR;
}
} else if (address < (int32_t)KSEG2_BASE) {
if (kernel_mode) {
*physical = address - (int32_t)KSEG1_BASE;
*prot = PAGE_READ | PAGE_WRITE;
} else {
ret = TLBRET_BADADDR;
}
} else if (address < (int32_t)KSEG3_BASE) {
if (supervisor_mode || kernel_mode) {
ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
} else {
ret = TLBRET_BADADDR;
}
} else {
if (kernel_mode) {
ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
} else {
ret = TLBRET_BADADDR;
}
}
return ret;
}
| 1threat
|
int ffio_close_null_buf(AVIOContext *s)
{
DynBuffer *d = s->opaque;
int size;
avio_flush(s);
size = d->size;
av_free(d);
av_free(s);
return size;
}
| 1threat
|
php using for to loop words without any space between them : <p>How can I achieve making words loops without any space between them using <em>for</em></p>
<p>Here is my code: </p>
<pre><code>function a($var) {
for ($i = 0; $i < ; $i++)
{
echo "a";
}
}
a(3);
</code></pre>
<p>I want it to make loops like: </p>
<pre><code>a(3);
output: aaa
</code></pre>
<p>but I get an error saying </p>
<pre><code>syntax error, unexpected ';'
</code></pre>
| 0debug
|
static void add_completion(const char *str)
{
if (nb_completions < NB_COMPLETIONS_MAX) {
completions[nb_completions++] = qemu_strdup(str);
}
}
| 1threat
|
def nCr_mod_p(n, r, p):
if (r > n- r):
r = n - r
C = [0 for i in range(r + 1)]
C[0] = 1
for i in range(1, n + 1):
for j in range(min(i, r), 0, -1):
C[j] = (C[j] + C[j-1]) % p
return C[r]
| 0debug
|
SQL vs Cassandra Data type mappings : <p>I am mapping some data types from SQL server to cassandra, such as int to bigint, real to float, varchar to text. Where can I get the mappings from SQL server to cassandra?</p>
| 0debug
|
Php result from php my admin : I have a table where I want to put my songs in. I don't get the result from the database, the database is connected doesn't say it is unable to connect. My page is also blank
$db = mysqli_connect("$host:$port", $user, $pass) or die("null");
mysqli_select_db($db, $database) or die("Unable to select database");
mysqli_query("SET NAMES utf8", $db);
mysqli_query( "SET CHARACTER SET utf8", $db );
$h.= "";
$h.= "<form><table class='table table-striped table-hover'>";
$h.= " <tr>";
$h.= " <th>#</th>";
$h.= " <th>Home</th>";
$h.= " <th></th>";
$h.= " </tr>";
$sql = mysqli_query("SELECT * FROM songs");
$res = mysqli_result($sql);
for($i=0;$i<$res->num_rows);$i++){
$h.= " <td>".($res,$i,"id")."</td>";
$h.= " <td>".($res,$i,"song")."</td>";
}
| 0debug
|
How do i change the name of my project in Android Studio? : <p>I am creating a project which has to be named as "Emergency Helper". When i started creating the project,I named it as "Emeregency Helper". I want to rename it as "Emergency Helper".Kindly help!</p>
| 0debug
|
I have tag issue on AndroidMainfest.xml file : <p>I am developing an uber app, so I need to connect my app through the internet while I am doing that I saw there are tag issues on my XML file and I tried to fix it changing tags but it's not working.</p>
<pre><code><?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools"
package="com.example.accident">
<!--
The ACCESS_COARSE/FINE_LOCATION permissions are not required to use
Google Maps Android API v2, but you must specify either coarse or fine
location permissions for the 'MyLocation' functionality.
-->
<uses-permission android:name="android.permission.ACCESS_FINE_LOCATION" />
<user-permission android:name="android.permission.ACCESS_COARSE_LOCATION"/>
<user-permission android:name="android.permission.INTERNET'/>
<application
android:allowBackup="true"
android:icon="@mipmap/ic_launcher"
android:label="@string/app_name"
android:roundIcon="@mipmap/ic_launcher_round"
android:supportsRtl="true"
android:theme="@style/AppTheme">
<!--
The API key for Google Maps-based APIs is defined as a string resource.
(See the file "res/values/google_maps_api.xml").
Note that the API key is linked to the encryption key used to sign the APK.
You need a different API key for each encryption key, including the release key that is used to
sign the APK for publishing.
You can define the keys for the debug and release targets in src/debug/ and src/release/.
-->
<meta-data
android:name="com.google.android.geo.API_KEY"
android:value="@string/google_maps_key" />
<activity
android:name=".CivilianMapsActivity"
android:label="@string/title_activity_civilian_maps"> </activity>
<activity android:name=".CivilianLoginRegisterActivity" />
<activity android:name=".PolicemanLoginRegisterActivity" />
<activity android:name=".WelcomeActivity" />
<activity android:name=".MainActivity"
tools:ignore="WrongManifestParent">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
</manifest>
</code></pre>
<p>I need to fix those. Also these two lines not working too. is it because of tags issue?</p>
<pre><code><user-permission android:name="android.permission.ACCESS_COARSE_LOCATION"/>
<user-permission android:name="android.permission.INTERNET'/>
</code></pre>
| 0debug
|
db.execute('SELECT * FROM products WHERE product_id = ' + product_input)
| 1threat
|
create 9 patch images for background images : <p>I have worked out to create 9 patch images for background images.i have struggled to create the nine patch images in making the lines at top and left portion of images.can any one guide on this?</p>
<p>see this link</p>
<p><a href="https://www.google.co.in/search?q=background+images&biw=1366&bih=641&noj=1&tbm=isch&imgil=N8KsG6ve1vbn8M%253A%253B7yPe8a2Anf4ArM%253Bhttp%25253A%25252F%25252Fwww.planwallpaper.com%25252Fbackground-images&source=iu&pf=m&fir=N8KsG6ve1vbn8M%253A%252C7yPe8a2Anf4ArM%252C_&usg=__xv3d9hZA9_uE8q0ivwIJW43bZJQ%3D&ved=0ahUKEwjJpOWLvdvKAhXPC44KHXdvBT8QyjcILw&ei=tOKxVsn6CM-XuAT33pX4Aw#imgrc=N8KsG6ve1vbn8M%3A&usg=__xv3d9hZA9_uE8q0ivwIJW43bZJQ%3D" rel="nofollow">https://www.google.co.in/search?q=background+images&biw=1366&bih=641&noj=1&tbm=isch&imgil=N8KsG6ve1vbn8M%253A%253B7yPe8a2Anf4ArM%253Bhttp%25253A%25252F%25252Fwww.planwallpaper.com%25252Fbackground-images&source=iu&pf=m&fir=N8KsG6ve1vbn8M%253A%252C7yPe8a2Anf4ArM%252C_&usg=__xv3d9hZA9_uE8q0ivwIJW43bZJQ%3D&ved=0ahUKEwjJpOWLvdvKAhXPC44KHXdvBT8QyjcILw&ei=tOKxVsn6CM-XuAT33pX4Aw#imgrc=N8KsG6ve1vbn8M%3A&usg=__xv3d9hZA9_uE8q0ivwIJW43bZJQ%3D</a></p>
<p>i would like to convert this type of image into 9 patch images..how to make stretchable lines on left and top of the image </p>
| 0debug
|
void portio_list_destroy(PortioList *piolist)
{
g_free(piolist->regions);
g_free(piolist->aliases);
}
| 1threat
|
Code Igniter Error On Windows Server : Type: Exception
Message: Session: Configured save path 'C:\Windows\TEMP' is not writable by the PHP process.
Hi , I have an Error Like this:
Can anyone tell me:these error are appearing on website codeigniter http://camelianmultiservices.co.in/index.php
| 0debug
|
Python dict with values as tuples to pandas DataFrame : <p>I have the following dict:</p>
<pre><code>td = {'q1':(111,222), 'q2':(333,444)}
</code></pre>
<p>I would like to convert it to a dataframe that looks like this:</p>
<pre><code>Query Value1 Value2
q1 111 222
q2 333 444
</code></pre>
<p>I have tried the following:</p>
<pre><code>df = pd.DataFrame(td.items())
</code></pre>
<p>The result looks like this:</p>
<pre><code> 0 1
0 q1 (111,222)
1 q2 (333,444)
</code></pre>
<p>If it wasn't entirely obvious, I am new to python and pandas. How can I get a dictionary with values as tuples to behave as separate columns in a dataframe?</p>
<p>My end goal is to display percent difference between value1 and value2. </p>
| 0debug
|
static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
BlockDriverState *bs_src)
{
bs_dest->open_flags = bs_src->open_flags;
bs_dest->dev_ops = bs_src->dev_ops;
bs_dest->dev_opaque = bs_src->dev_opaque;
bs_dest->dev = bs_src->dev;
bs_dest->buffer_alignment = bs_src->buffer_alignment;
bs_dest->copy_on_read = bs_src->copy_on_read;
bs_dest->enable_write_cache = bs_src->enable_write_cache;
bs_dest->slice_time = bs_src->slice_time;
bs_dest->slice_start = bs_src->slice_start;
bs_dest->slice_end = bs_src->slice_end;
bs_dest->io_limits = bs_src->io_limits;
bs_dest->io_base = bs_src->io_base;
bs_dest->throttled_reqs = bs_src->throttled_reqs;
bs_dest->block_timer = bs_src->block_timer;
bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
bs_dest->cyls = bs_src->cyls;
bs_dest->heads = bs_src->heads;
bs_dest->secs = bs_src->secs;
bs_dest->translation = bs_src->translation;
bs_dest->on_read_error = bs_src->on_read_error;
bs_dest->on_write_error = bs_src->on_write_error;
bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
bs_dest->iostatus = bs_src->iostatus;
bs_dest->dirty_count = bs_src->dirty_count;
bs_dest->dirty_bitmap = bs_src->dirty_bitmap;
bs_dest->in_use = bs_src->in_use;
bs_dest->job = bs_src->job;
pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
bs_src->device_name);
bs_dest->list = bs_src->list;
}
| 1threat
|
can't Javascript ajax response stored in global variable : i can not access the global variable which store some ajax response data, when I console log the object itself, it give me all the details, but when i console log the arrays in that object, it returns me []. i have set the ajax request to be synchronous.
Please take a look this http://codepen.io/stanleyyylau/pen/reKrdG
//let's append content
console.log(resultObject);
console.log(resultObject.logo);
for(var ii=0; ii<channelArr.length;ii++){
resultObject.append(resultObject.logo[ii],resultObject.url[ii],resultObject.namee[ii],resultObject.statuss[ii]);
}
| 0debug
|
av_cold void ff_huffyuvdsp_init_ppc(HuffYUVDSPContext *c)
{
#if HAVE_ALTIVEC && HAVE_BIGENDIAN
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
c->add_bytes = add_bytes_altivec;
#endif
}
| 1threat
|
What does TensorFlow's `conv2d_transpose()` operation do? : <p>The documentation for the <code>conv2d_transpose()</code> operation does not clearly explain what it does:</p>
<blockquote>
<p>The transpose of conv2d.</p>
<p>This operation is sometimes called "deconvolution" after
<a href="http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf" rel="noreferrer">Deconvolutional Networks</a>, but is actually the transpose (gradient) of
conv2d rather than an actual deconvolution.</p>
</blockquote>
<p>I went through the paper that the doc points to, but it did not help.</p>
<p>What does this operation do and what are examples of why you would want to use it?</p>
| 0debug
|
Clang and GCC disagree on legality of direct initialization with conversion operator : <p>The latest version of clang (3.9) rejects this code on the second line of <code>f</code>; the latest version of gcc (6.2) accepts it:</p>
<pre><code>struct Y {
Y();
Y(const Y&);
Y(Y&&);
};
struct X {
operator const Y();
};
void f() {
X x;
Y y(x);
}
</code></pre>
<p>If any of these changes are made, clang will then accept the code:</p>
<ul>
<li>Remove <code>Y</code>'s move constructor</li>
<li>Remove <code>const</code> from the conversion operator</li>
<li>Replace <code>Y y(x)</code> with <code>Y y = x</code></li>
</ul>
<p>Is the original example legal? Which compiler is wrong? After checking the sections about conversion functions and overload resolution in the standard I have not been able to find a clear answer.</p>
| 0debug
|
How to write images into Amazon S3 from a AWS EC2 deployed java app : <p>I have deployed a java application(hosted on tomcat) on AWS EC2. And I want to write some image files to AWS S3 storage by using this application.
Is there any proper way to do this.</p>
| 0debug
|
How can i get started on this method? : <p>when one number divides another without leaving a remainder the first number is called a factor of the second. A number n is said to be perfect if the sum of the factors which are less than n are equal to n.For
example, 28 is a perfect number because the sum of its factors which are less than 28(1, 2, 4, 7, and 14) equals 28. Write a boolean method named
isPerfect that has one integer parameter n, which returns
true if n is perfect or false if n is not perfect</p>
| 0debug
|
static void rtas_get_time_of_day(sPAPREnvironment *spapr,
uint32_t token, uint32_t nargs,
target_ulong args,
uint32_t nret, target_ulong rets)
{
struct tm tm;
if (nret != 8) {
rtas_st(rets, 0, -3);
return;
}
qemu_get_timedate(&tm, spapr->rtc_offset);
rtas_st(rets, 0, 0);
rtas_st(rets, 1, tm.tm_year + 1900);
rtas_st(rets, 2, tm.tm_mon + 1);
rtas_st(rets, 3, tm.tm_mday);
rtas_st(rets, 4, tm.tm_hour);
rtas_st(rets, 5, tm.tm_min);
rtas_st(rets, 6, tm.tm_sec);
rtas_st(rets, 7, 0);
}
| 1threat
|
static void vmsvga_init(struct vmsvga_state_s *s, int vga_ram_size)
{
s->scratch_size = SVGA_SCRATCH_SIZE;
s->scratch = qemu_malloc(s->scratch_size * 4);
vmsvga_reset(s);
s->fifo_size = SVGA_FIFO_SIZE;
s->fifo_offset = qemu_ram_alloc(s->fifo_size);
s->fifo_ptr = qemu_get_ram_ptr(s->fifo_offset);
vga_common_init(&s->vga, vga_ram_size);
vga_init(&s->vga);
vmstate_register(0, &vmstate_vga_common, &s->vga);
s->vga.ds = graphic_console_init(vmsvga_update_display,
vmsvga_invalidate_display,
vmsvga_screen_dump,
vmsvga_text_update, s);
vga_init_vbe(&s->vga);
rom_add_vga(VGABIOS_FILENAME);
}
| 1threat
|
Too many React Context providers : <p>New to react here and trying to wrap my head round the new Context API (I haven't looked into Redux etc. yet).</p>
<p>Seems I can do much of what I need to do, but I'm going to end up with lots and lots of providers, all needing a tag to wrap my main app.</p>
<p>I'm going to have a provider for Auth, one for theming, one for chat messages (vis Pusher.com) etc. Also using React Router is another wrapper element.</p>
<p>Am I going to have to end up with this (and many more)....</p>
<pre><code><BrowserRouter>
<AuthProvider>
<ThemeProvider>
<ChatProvider>
<App />
</ChatProvider>
</ThemeProvider>
</AuthProvider>
</BrowserRouter>
</code></pre>
<p>Or is there a better way?</p>
| 0debug
|
Array size redifination : So basically according to definition of array we cannot change array size. But if i am adding element to a same array by shifting other elements to the right of array, so the array size is going to increase. So how this is possible?
| 0debug
|
Hello world example for py2exe giving strange error : <p>I have a file named "mini.py" with the following contents:</p>
<pre><code>print("hello worlds")
</code></pre>
<p>If I run <code>pip install py2exe --upgrade</code> I get:</p>
<pre><code>Requirement already up-to-date: py2exe in c:\python37\lib\site-packages (0.9.2.2)
</code></pre>
<p>If I run <code>build_exe mini.py</code> I get:</p>
<pre><code>Traceback (most recent call last):
File "c:\python37\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\python37\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Python37\Scripts\build_exe.exe\__main__.py", line 9, in <module>
File "c:\python37\lib\site-packages\py2exe\build_exe.py", line 141, in main
builder.analyze()
File "c:\python37\lib\site-packages\py2exe\runtime.py", line 160, in analyze
self.mf.import_hook(modname)
File "c:\python37\lib\site-packages\py2exe\mf3.py", line 120, in import_hook
module = self._gcd_import(name)
File "c:\python37\lib\site-packages\py2exe\mf3.py", line 274, in _gcd_import
return self._find_and_load(name)
File "c:\python37\lib\site-packages\py2exe\mf3.py", line 357, in _find_and_load
self._scan_code(module.__code__, module)
File "c:\python37\lib\site-packages\py2exe\mf3.py", line 388, in _scan_code
for what, args in self._scan_opcodes(code):
File "c:\python37\lib\site-packages\py2exe\mf3.py", line 417, in _scan_opcodes
yield "store", (names[oparg],)
IndexError: tuple index out of range
</code></pre>
<p>Anyone has any idea what could be causing this?</p>
| 0debug
|
struct omap_mmc_s *omap_mmc_init(hwaddr base,
MemoryRegion *sysmem,
BlockBackend *blk,
qemu_irq irq, qemu_irq dma[], omap_clk clk)
{
struct omap_mmc_s *s = (struct omap_mmc_s *)
g_malloc0(sizeof(struct omap_mmc_s));
s->irq = irq;
s->dma = dma;
s->clk = clk;
s->lines = 1;
s->rev = 1;
omap_mmc_reset(s);
memory_region_init_io(&s->iomem, NULL, &omap_mmc_ops, s, "omap.mmc", 0x800);
memory_region_add_subregion(sysmem, base, &s->iomem);
s->card = sd_init(blk, false);
if (s->card == NULL) {
exit(1);
}
return s;
}
| 1threat
|
Can we declare a object inside the if-else statement? : <p>This code is the main function for the implementation of queues using arrays also using template class. </p>
<pre><code>int main(){
int choice, n;
cout<<"Enter 1 for integer 2 for double\n";
cin >> choice;
cout<<"Enter the size of queue\n";
cin>>n;
if (choice == 1)
queue<int> obj(n);
else
queue<double> obj(n);
for(;;){
cout<<"1:Insertrear 2:Deletefront 3:Display 4:Exit\n";
cin >> choice;
switch(choice){
case 1:obj.insertRear();break;
case 2:obj.deleteFront(); break;
case 3:obj.display();break;
default: return 0;
}
}
return 0;
}
</code></pre>
<p>The thing that I could not understand is, why I got error <code>'obj' was not declared in this scope</code> in the line below the <code>switch</code> statement.</p>
<p>Any help will be well appreciated.</p>
| 0debug
|
static void return_frame(AVFilterContext *ctx, int is_second)
{
YADIFContext *yadif = ctx->priv;
AVFilterLink *link= ctx->outputs[0];
int tff;
if (yadif->parity == -1) {
tff = yadif->cur->video->interlaced ?
yadif->cur->video->top_field_first : 1;
} else {
tff = yadif->parity^1;
}
if (is_second) {
yadif->out = ff_get_video_buffer(link, AV_PERM_WRITE | AV_PERM_PRESERVE |
AV_PERM_REUSE, link->w, link->h);
avfilter_copy_buffer_ref_props(yadif->out, yadif->cur);
yadif->out->video->interlaced = 0;
}
if (!yadif->csp)
yadif->csp = &av_pix_fmt_descriptors[link->format];
if (yadif->csp->comp[0].depth_minus1 / 8 == 1)
yadif->filter_line = filter_line_c_16bit;
filter(ctx, yadif->out, tff ^ !is_second, tff);
if (is_second) {
int64_t cur_pts = yadif->cur->pts;
int64_t next_pts = yadif->next->pts;
if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) {
yadif->out->pts = cur_pts + next_pts;
} else {
yadif->out->pts = AV_NOPTS_VALUE;
}
ff_start_frame(ctx->outputs[0], yadif->out);
}
ff_draw_slice(ctx->outputs[0], 0, link->h, 1);
ff_end_frame(ctx->outputs[0]);
yadif->frame_pending = (yadif->mode&1) && !is_second;
}
| 1threat
|
Irfanview resize image with same width and height without quality lost : Can you help me please?
I use irfanview batch for resize my image and i have one problem.
My problem is i want to resize all image with 1840 width and height, without no ratio and no quality lost after process.
I explain me, example if i have one image with 1700 * 1700 resize to 1840* 1840 no problem quality is ok beacause quality after resize is lowest.
But the problem with this case:if i have one image with 1920*1200 1200 is height. here if i execute this command
"C:\irfanview\i_view32.exe" "C:\photo\C0692B\*.jpg" /resize=(1840, 1840) /resample /convert="C:\photo\C0692B\test\*.jpg"
I lost quality for height i dont want resize height beacause is lowest than resize size.
Before commande
[http://www.hostingpics.net/viewer.php?id=714806photo2.jpg][1]
After:
[http://www.hostingpics.net/viewer.php?id=445086photo2.jpg][1]
For this example, i want to center image and add blanck for obtain image 1840 *1840 withotu quality lost.
It's possible?
Thanks.
| 0debug
|
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
int access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
target_ulong *page_size_ptr, uint32_t *fsr)
{
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
MMUFaultType fault_type = translation_fault;
uint32_t level = 1;
uint32_t epd = 0;
int32_t t0sz, t1sz;
uint32_t tg;
uint64_t ttbr;
int ttbr_select;
hwaddr descaddr, descmask;
uint32_t tableattrs;
target_ulong page_size;
uint32_t attrs;
int32_t stride = 9;
int32_t va_size = 32;
int inputsize;
int32_t tbi = 0;
TCR *tcr = regime_tcr(env, mmu_idx);
int ap, ns, xn, pxn;
uint32_t el = regime_el(env, mmu_idx);
bool ttbr1_valid = true;
if (arm_el_is_aa64(env, el)) {
va_size = 64;
if (el > 1) {
if (mmu_idx != ARMMMUIdx_S2NS) {
tbi = extract64(tcr->raw_tcr, 20, 1);
}
} else {
if (extract64(address, 55, 1)) {
tbi = extract64(tcr->raw_tcr, 38, 1);
} else {
tbi = extract64(tcr->raw_tcr, 37, 1);
}
}
tbi *= 8;
if (el > 1) {
ttbr1_valid = false;
}
} else {
if (el == 2) {
ttbr1_valid = false;
}
}
if (va_size == 64) {
t0sz = extract32(tcr->raw_tcr, 0, 6);
t0sz = MIN(t0sz, 39);
t0sz = MAX(t0sz, 16);
} else if (mmu_idx != ARMMMUIdx_S2NS) {
t0sz = extract32(tcr->raw_tcr, 0, 3);
} else {
bool sext = extract32(tcr->raw_tcr, 4, 1);
bool sign = extract32(tcr->raw_tcr, 3, 1);
t0sz = sextract32(tcr->raw_tcr, 0, 4);
if (sign != sext) {
qemu_log_mask(LOG_GUEST_ERROR,
"AArch32: VTCR.S / VTCR.T0SZ[3] missmatch\n");
}
}
t1sz = extract32(tcr->raw_tcr, 16, 6);
if (va_size == 64) {
t1sz = MIN(t1sz, 39);
t1sz = MAX(t1sz, 16);
}
if (t0sz && !extract64(address, va_size - t0sz, t0sz - tbi)) {
ttbr_select = 0;
} else if (ttbr1_valid && t1sz &&
!extract64(~address, va_size - t1sz, t1sz - tbi)) {
ttbr_select = 1;
} else if (!t0sz) {
ttbr_select = 0;
} else if (!t1sz && ttbr1_valid) {
ttbr_select = 1;
} else {
fault_type = translation_fault;
goto do_fault;
}
if (ttbr_select == 0) {
ttbr = regime_ttbr(env, mmu_idx, 0);
if (el < 2) {
epd = extract32(tcr->raw_tcr, 7, 1);
}
inputsize = va_size - t0sz;
tg = extract32(tcr->raw_tcr, 14, 2);
if (tg == 1) {
stride = 13;
}
if (tg == 2) {
stride = 11;
}
} else {
assert(ttbr1_valid);
ttbr = regime_ttbr(env, mmu_idx, 1);
epd = extract32(tcr->raw_tcr, 23, 1);
inputsize = va_size - t1sz;
tg = extract32(tcr->raw_tcr, 30, 2);
if (tg == 3) {
stride = 13;
}
if (tg == 1) {
stride = 11;
}
}
if (epd) {
goto do_fault;
}
if (mmu_idx != ARMMMUIdx_S2NS) {
level = 4 - (inputsize - 4) / stride;
} else {
int startlevel = extract32(tcr->raw_tcr, 6, 2);
bool ok;
if (va_size == 32 || stride == 9) {
level = 2 - startlevel;
} else {
level = 3 - startlevel;
}
ok = check_s2_startlevel(cpu, va_size == 64, level,
inputsize, stride);
if (!ok) {
level = va_size == 64 ? 0 : 1;
fault_type = translation_fault;
goto do_fault;
}
}
if (va_size != inputsize) {
address &= (1ULL << inputsize) - 1;
}
descmask = (1ULL << (stride + 3)) - 1;
descaddr = extract64(ttbr, 0, 48);
descaddr &= ~((1ULL << (inputsize - (stride * (4 - level)))) - 1);
tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
for (;;) {
uint64_t descriptor;
bool nstable;
descaddr |= (address >> (stride * (4 - level))) & descmask;
descaddr &= ~7ULL;
nstable = extract32(tableattrs, 4, 1);
descriptor = arm_ldq_ptw(cs, descaddr, !nstable);
if (!(descriptor & 1) ||
(!(descriptor & 2) && (level == 3))) {
goto do_fault;
}
descaddr = descriptor & 0xfffffff000ULL;
if ((descriptor & 2) && (level < 3)) {
tableattrs |= extract64(descriptor, 59, 5);
level++;
continue;
}
page_size = (1ULL << ((stride * (4 - level)) + 3));
descaddr |= (address & (page_size - 1));
attrs = extract64(descriptor, 2, 10)
| (extract64(descriptor, 52, 12) << 10);
attrs |= extract32(tableattrs, 0, 2) << 11;
attrs |= extract32(tableattrs, 3, 1) << 5;
if (extract32(tableattrs, 2, 1)) {
attrs &= ~(1 << 4);
}
attrs |= nstable << 3;
break;
}
fault_type = access_fault;
if ((attrs & (1 << 8)) == 0) {
goto do_fault;
}
ap = extract32(attrs, 4, 2);
ns = extract32(attrs, 3, 1);
xn = extract32(attrs, 12, 1);
pxn = extract32(attrs, 11, 1);
*prot = get_S1prot(env, mmu_idx, va_size == 64, ap, ns, xn, pxn);
fault_type = permission_fault;
if (!(*prot & (1 << access_type))) {
goto do_fault;
}
if (ns) {
txattrs->secure = false;
}
*phys_ptr = descaddr;
*page_size_ptr = page_size;
return false;
do_fault:
*fsr = (1 << 9) | (fault_type << 2) | level;
return true;
}
| 1threat
|
Realm & React Native - Best practice to implement auto-updates? : <p>What are the best practices/patterns make realm a reactive datasource in a react native app? Especially for <a href="https://medium.com/@dan_abramov/smart-and-dumb-components-7ca2f9a7c7d0#.drlaug899">presentational and container components pattern</a>? </p>
<p>Here is an example which I'd like to make reactive: <a href="https://gist.github.com/Thorbenandresen/fc4bb5fb0ef6554d3ce5">Realm with React Native</a></p>
<p>The <a href="https://realm.io/docs/react-native/latest/#change-events">docs on auto-updates/change-events</a> are a bit thin and the <a href="https://realm.io/docs/react-native/latest/#examples">official example</a> does not make use of this feature (to my knowledge).</p>
| 0debug
|
Automatically fill in the login form for facebook.com : <p>How to automatically fill in the Facebook login form: username and password using c# windows form application.</p>
<p>I want to make the work automatic with a single button and open my Facebook page into my text field or something else. Less work, less time. </p>
| 0debug
|
static int shorten_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
uint8_t *buf, int buf_size)
{
ShortenContext *s = avctx->priv_data;
int i, input_buf_size = 0;
int16_t *samples = data;
if(s->max_framesize == 0){
s->max_framesize= 1024;
s->bitstream= av_fast_realloc(s->bitstream, &s->allocated_bitstream_size, s->max_framesize);
}
if(1 && s->max_framesize){
buf_size= FFMIN(buf_size, s->max_framesize - s->bitstream_size);
input_buf_size= buf_size;
if(s->bitstream_index + s->bitstream_size + buf_size > s->allocated_bitstream_size){
memmove(s->bitstream, &s->bitstream[s->bitstream_index], s->bitstream_size);
s->bitstream_index=0;
}
memcpy(&s->bitstream[s->bitstream_index + s->bitstream_size], buf, buf_size);
buf= &s->bitstream[s->bitstream_index];
buf_size += s->bitstream_size;
s->bitstream_size= buf_size;
if(buf_size < s->max_framesize){
return input_buf_size;
}
}
init_get_bits(&s->gb, buf, buf_size*8);
get_bits(&s->gb, s->bitindex);
if (!s->blocksize)
{
int maxnlpc = 0;
if (get_bits_long(&s->gb, 32) != bswap_32(ff_get_fourcc("ajkg"))) {
av_log(s->avctx, AV_LOG_ERROR, "missing shorten magic 'ajkg'\n");
return -1;
}
s->lpcqoffset = 0;
s->blocksize = DEFAULT_BLOCK_SIZE;
s->channels = 1;
s->nmean = -1;
s->version = get_bits(&s->gb, 8);
s->internal_ftype = get_uint(s, TYPESIZE);
s->channels = get_uint(s, CHANSIZE);
if (s->channels > MAX_CHANNELS) {
av_log(s->avctx, AV_LOG_ERROR, "too many channels: %d\n", s->channels);
return -1;
}
if (s->version > 0) {
int skip_bytes;
s->blocksize = get_uint(s, av_log2(DEFAULT_BLOCK_SIZE));
maxnlpc = get_uint(s, LPCQSIZE);
s->nmean = get_uint(s, 0);
skip_bytes = get_uint(s, NSKIPSIZE);
for (i=0; i<skip_bytes; i++) {
skip_bits(&s->gb, 8);
}
}
s->nwrap = FFMAX(NWRAP, maxnlpc);
allocate_buffers(s);
init_offset(s);
if (s->version > 1)
s->lpcqoffset = V2LPCQOFFSET;
if (get_ur_golomb_shorten(&s->gb, FNSIZE) != FN_VERBATIM) {
av_log(s->avctx, AV_LOG_ERROR, "missing verbatim section at begining of stream\n");
return -1;
}
s->header_size = get_ur_golomb_shorten(&s->gb, VERBATIM_CKSIZE_SIZE);
if (s->header_size >= OUT_BUFFER_SIZE || s->header_size < CANONICAL_HEADER_SIZE) {
av_log(s->avctx, AV_LOG_ERROR, "header is wrong size: %d\n", s->header_size);
return -1;
}
for (i=0; i<s->header_size; i++)
s->header[i] = (char)get_ur_golomb_shorten(&s->gb, VERBATIM_BYTE_SIZE);
if (decode_wave_header(avctx, s->header, s->header_size) < 0)
return -1;
s->cur_chan = 0;
s->bitshift = 0;
}
else
{
int cmd;
int len;
cmd = get_ur_golomb_shorten(&s->gb, FNSIZE);
switch (cmd) {
case FN_ZERO:
case FN_DIFF0:
case FN_DIFF1:
case FN_DIFF2:
case FN_DIFF3:
case FN_QLPC:
{
int residual_size = 0;
int channel = s->cur_chan;
int32_t coffset;
if (cmd != FN_ZERO) {
residual_size = get_ur_golomb_shorten(&s->gb, ENERGYSIZE);
if (s->version == 0)
residual_size--;
}
if (s->nmean == 0)
coffset = s->offset[channel][0];
else {
int32_t sum = (s->version < 2) ? 0 : s->nmean / 2;
for (i=0; i<s->nmean; i++)
sum += s->offset[channel][i];
coffset = sum / s->nmean;
if (s->version >= 2)
coffset >>= FFMIN(1, s->bitshift);
}
switch (cmd) {
case FN_ZERO:
for (i=0; i<s->blocksize; i++)
s->decoded[channel][i] = 0;
break;
case FN_DIFF0:
for (i=0; i<s->blocksize; i++)
s->decoded[channel][i] = get_sr_golomb_shorten(&s->gb, residual_size) + coffset;
break;
case FN_DIFF1:
for (i=0; i<s->blocksize; i++)
s->decoded[channel][i] = get_sr_golomb_shorten(&s->gb, residual_size) + s->decoded[channel][i - 1];
break;
case FN_DIFF2:
for (i=0; i<s->blocksize; i++)
s->decoded[channel][i] = get_sr_golomb_shorten(&s->gb, residual_size) + 2*s->decoded[channel][i-1]
- s->decoded[channel][i-2];
break;
case FN_DIFF3:
for (i=0; i<s->blocksize; i++)
s->decoded[channel][i] = get_sr_golomb_shorten(&s->gb, residual_size) + 3*s->decoded[channel][i-1]
- 3*s->decoded[channel][i-2]
+ s->decoded[channel][i-3];
break;
case FN_QLPC:
{
int pred_order = get_ur_golomb_shorten(&s->gb, LPCQSIZE);
for (i=0; i<pred_order; i++)
s->decoded[channel][i - pred_order] -= coffset;
decode_subframe_lpc(s, channel, residual_size, pred_order);
if (coffset != 0)
for (i=0; i < s->blocksize; i++)
s->decoded[channel][i] += coffset;
}
}
if (s->nmean > 0) {
int32_t sum = (s->version < 2) ? 0 : s->blocksize / 2;
for (i=0; i<s->blocksize; i++)
sum += s->decoded[channel][i];
for (i=1; i<s->nmean; i++)
s->offset[channel][i-1] = s->offset[channel][i];
if (s->version < 2)
s->offset[channel][s->nmean - 1] = sum / s->blocksize;
else
s->offset[channel][s->nmean - 1] = (sum / s->blocksize) << s->bitshift;
}
for (i=-s->nwrap; i<0; i++)
s->decoded[channel][i] = s->decoded[channel][i + s->blocksize];
fix_bitshift(s, s->decoded[channel]);
s->cur_chan++;
if (s->cur_chan == s->channels) {
samples = interleave_buffer(samples, s->channels, s->blocksize, s->decoded);
s->cur_chan = 0;
goto frame_done;
}
break;
}
break;
case FN_VERBATIM:
len = get_ur_golomb_shorten(&s->gb, VERBATIM_CKSIZE_SIZE);
while (len--) {
get_ur_golomb_shorten(&s->gb, VERBATIM_BYTE_SIZE);
}
break;
case FN_BITSHIFT:
s->bitshift = get_ur_golomb_shorten(&s->gb, BITSHIFTSIZE);
break;
case FN_BLOCKSIZE:
s->blocksize = get_uint(s, av_log2(s->blocksize));
break;
case FN_QUIT:
return buf_size;
break;
default:
av_log(avctx, AV_LOG_ERROR, "unknown shorten function %d\n", cmd);
return -1;
break;
}
}
frame_done:
*data_size = (int8_t *)samples - (int8_t *)data;
s->bitindex = get_bits_count(&s->gb) - 8*((get_bits_count(&s->gb))/8);
i= (get_bits_count(&s->gb))/8;
if (i > buf_size) {
av_log(s->avctx, AV_LOG_ERROR, "overread: %d\n", i - buf_size);
s->bitstream_size=0;
s->bitstream_index=0;
return -1;
}
if (s->bitstream_size) {
s->bitstream_index += i;
s->bitstream_size -= i;
return input_buf_size;
} else
return i;
}
| 1threat
|
How to validate a textbox for a filepath entry? : <p>the cases are </p>
<ul>
<li>compulsory '\' char at the first</li>
<li>followed by alphanumeric </li>
<li>compulsory '\' char at the last</li>
</ul>
<p>eg:\abc\bvc\</p>
<p>\abc4\abc3\abc2\abc1\</p>
| 0debug
|
static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt)
{
CFHDContext *s = avctx->priv_data;
GetByteContext gb;
ThreadFrame frame = { .f = data };
AVFrame *pic = data;
int ret = 0, i, j, planes, plane, got_buffer = 0;
int16_t *coeff_data;
s->coded_format = AV_PIX_FMT_YUV422P10;
init_frame_defaults(s);
planes = av_pix_fmt_count_planes(s->coded_format);
bytestream2_init(&gb, avpkt->data, avpkt->size);
while (bytestream2_get_bytes_left(&gb) > 4) {
uint16_t tagu = bytestream2_get_be16(&gb);
int16_t tag = (int16_t)tagu;
int8_t tag8 = (int8_t)(tagu >> 8);
uint16_t abstag = abs(tag);
int8_t abs_tag8 = abs(tag8);
uint16_t data = bytestream2_get_be16(&gb);
if (abs_tag8 >= 0x60 && abs_tag8 <= 0x6f) {
av_log(avctx, AV_LOG_DEBUG, "large len %x\n", ((tagu & 0xff) << 16) | data);
} else if (tag == 20) {
av_log(avctx, AV_LOG_DEBUG, "Width %"PRIu16"\n", data);
s->coded_width = data;
} else if (tag == 21) {
av_log(avctx, AV_LOG_DEBUG, "Height %"PRIu16"\n", data);
s->coded_height = data;
} else if (tag == 101) {
av_log(avctx, AV_LOG_DEBUG, "Bits per component: %"PRIu16"\n", data);
s->bpc = data;
} else if (tag == 12) {
av_log(avctx, AV_LOG_DEBUG, "Channel Count: %"PRIu16"\n", data);
s->channel_cnt = data;
if (data > 4) {
av_log(avctx, AV_LOG_ERROR, "Channel Count of %"PRIu16" is unsupported\n", data);
ret = AVERROR_PATCHWELCOME;
break;
}
} else if (tag == 14) {
av_log(avctx, AV_LOG_DEBUG, "Subband Count: %"PRIu16"\n", data);
if (data != SUBBAND_COUNT) {
av_log(avctx, AV_LOG_ERROR, "Subband Count of %"PRIu16" is unsupported\n", data);
ret = AVERROR_PATCHWELCOME;
break;
}
} else if (tag == 62) {
s->channel_num = data;
av_log(avctx, AV_LOG_DEBUG, "Channel number %"PRIu16"\n", data);
if (s->channel_num >= planes) {
av_log(avctx, AV_LOG_ERROR, "Invalid channel number\n");
ret = AVERROR(EINVAL);
break;
}
init_plane_defaults(s);
} else if (tag == 48) {
if (s->subband_num != 0 && data == 1)
s->level++;
av_log(avctx, AV_LOG_DEBUG, "Subband number %"PRIu16"\n", data);
s->subband_num = data;
if (s->level >= DWT_LEVELS) {
av_log(avctx, AV_LOG_ERROR, "Invalid level\n");
ret = AVERROR(EINVAL);
break;
}
if (s->subband_num > 3) {
av_log(avctx, AV_LOG_ERROR, "Invalid subband number\n");
ret = AVERROR(EINVAL);
break;
}
} else if (tag == 51) {
av_log(avctx, AV_LOG_DEBUG, "Subband number actual %"PRIu16"\n", data);
s->subband_num_actual = data;
if (s->subband_num_actual >= 10) {
av_log(avctx, AV_LOG_ERROR, "Invalid subband number actual\n");
ret = AVERROR(EINVAL);
break;
}
} else if (tag == 35)
av_log(avctx, AV_LOG_DEBUG, "Lowpass precision bits: %"PRIu16"\n", data);
else if (tag == 53) {
s->quantisation = data;
av_log(avctx, AV_LOG_DEBUG, "Quantisation: %"PRIu16"\n", data);
} else if (tag == 109) {
s->prescale_shift[0] = (data >> 0) & 0x7;
s->prescale_shift[1] = (data >> 3) & 0x7;
s->prescale_shift[2] = (data >> 6) & 0x7;
av_log(avctx, AV_LOG_DEBUG, "Prescale shift (VC-5): %x\n", data);
} else if (tag == 27) {
s->plane[s->channel_num].band[0][0].width = data;
s->plane[s->channel_num].band[0][0].stride = data;
av_log(avctx, AV_LOG_DEBUG, "Lowpass width %"PRIu16"\n", data);
if (data < 3 || data > s->plane[s->channel_num].band[0][0].a_width) {
av_log(avctx, AV_LOG_ERROR, "Invalid lowpass width\n");
ret = AVERROR(EINVAL);
break;
}
} else if (tag == 28) {
s->plane[s->channel_num].band[0][0].height = data;
av_log(avctx, AV_LOG_DEBUG, "Lowpass height %"PRIu16"\n", data);
if (data < 3 || data > s->plane[s->channel_num].band[0][0].height) {
av_log(avctx, AV_LOG_ERROR, "Invalid lowpass height\n");
ret = AVERROR(EINVAL);
break;
}
} else if (tag == 1)
av_log(avctx, AV_LOG_DEBUG, "Sample type? %"PRIu16"\n", data);
else if (tag == 10) {
if (data != 0) {
avpriv_report_missing_feature(avctx, "Transform type of %"PRIu16, data);
ret = AVERROR_PATCHWELCOME;
break;
}
av_log(avctx, AV_LOG_DEBUG, "Transform-type? %"PRIu16"\n", data);
} else if (abstag >= 0x4000 && abstag <= 0x40ff) {
av_log(avctx, AV_LOG_DEBUG, "Small chunk length %d %s\n", data * 4, tag < 0 ? "optional" : "required");
bytestream2_skipu(&gb, data * 4);
} else if (tag == 23) {
av_log(avctx, AV_LOG_DEBUG, "Skip frame\n");
avpriv_report_missing_feature(avctx, "Skip frame");
ret = AVERROR_PATCHWELCOME;
break;
} else if (tag == 2) {
av_log(avctx, AV_LOG_DEBUG, "tag=2 header - skipping %i tag/value pairs\n", data);
if (data > bytestream2_get_bytes_left(&gb) / 4) {
av_log(avctx, AV_LOG_ERROR, "too many tag/value pairs (%d)\n", data);
ret = AVERROR_INVALIDDATA;
break;
}
for (i = 0; i < data; i++) {
uint16_t tag2 = bytestream2_get_be16(&gb);
uint16_t val2 = bytestream2_get_be16(&gb);
av_log(avctx, AV_LOG_DEBUG, "Tag/Value = %x %x\n", tag2, val2);
}
} else if (tag == 41) {
s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
av_log(avctx, AV_LOG_DEBUG, "Highpass width %i channel %i level %i subband %i\n", data, s->channel_num, s->level, s->subband_num);
if (data < 3) {
av_log(avctx, AV_LOG_ERROR, "Invalid highpass width\n");
ret = AVERROR(EINVAL);
break;
}
} else if (tag == 42) {
s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
av_log(avctx, AV_LOG_DEBUG, "Highpass height %i\n", data);
if (data < 3) {
av_log(avctx, AV_LOG_ERROR, "Invalid highpass height\n");
ret = AVERROR(EINVAL);
break;
}
} else if (tag == 49) {
s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
av_log(avctx, AV_LOG_DEBUG, "Highpass width2 %i\n", data);
if (data < 3) {
av_log(avctx, AV_LOG_ERROR, "Invalid highpass width2\n");
ret = AVERROR(EINVAL);
break;
}
} else if (tag == 50) {
s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
av_log(avctx, AV_LOG_DEBUG, "Highpass height2 %i\n", data);
if (data < 3) {
av_log(avctx, AV_LOG_ERROR, "Invalid highpass height2\n");
ret = AVERROR(EINVAL);
break;
}
} else if (tag == 71) {
s->codebook = data;
av_log(avctx, AV_LOG_DEBUG, "Codebook %i\n", s->codebook);
} else if (tag == 72) {
s->codebook = data;
av_log(avctx, AV_LOG_DEBUG, "Other codebook? %i\n", s->codebook);
} else if (tag == 70) {
av_log(avctx, AV_LOG_DEBUG, "Subsampling or bit-depth flag? %i\n", data);
s->bpc = data;
if (!(s->bpc == 10 || s->bpc == 12)) {
av_log(avctx, AV_LOG_ERROR, "Invalid bits per channel\n");
ret = AVERROR(EINVAL);
break;
}
} else if (tag == 84) {
av_log(avctx, AV_LOG_DEBUG, "Sample format? %i\n", data);
if (data == 1)
s->coded_format = AV_PIX_FMT_YUV422P10;
else if (data == 3)
s->coded_format = AV_PIX_FMT_GBRP12;
else if (data == 4)
s->coded_format = AV_PIX_FMT_GBRAP12;
else {
avpriv_report_missing_feature(avctx, "Sample format of %"PRIu16, data);
ret = AVERROR_PATCHWELCOME;
break;
}
planes = av_pix_fmt_count_planes(s->coded_format);
} else
av_log(avctx, AV_LOG_DEBUG, "Unknown tag %i data %x\n", tag, data);
if (tag == 4 && data == 0x1a4a && s->coded_width && s->coded_height &&
s->coded_format != AV_PIX_FMT_NONE) {
if (s->a_width != s->coded_width || s->a_height != s->coded_height ||
s->a_format != s->coded_format) {
free_buffers(avctx);
if ((ret = alloc_buffers(avctx)) < 0) {
free_buffers(avctx);
return ret;
}
}
ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height);
if (ret < 0)
return ret;
frame.f->width =
frame.f->height = 0;
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
return ret;
s->coded_width = 0;
s->coded_height = 0;
s->coded_format = AV_PIX_FMT_NONE;
got_buffer = 1;
}
coeff_data = s->plane[s->channel_num].subband[s->subband_num_actual];
if (tag == 4 && data == 0xf0f && s->a_width && s->a_height) {
int lowpass_height = s->plane[s->channel_num].band[0][0].height;
int lowpass_width = s->plane[s->channel_num].band[0][0].width;
int lowpass_a_height = s->plane[s->channel_num].band[0][0].a_height;
int lowpass_a_width = s->plane[s->channel_num].band[0][0].a_width;
if (!got_buffer) {
av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
ret = AVERROR(EINVAL);
goto end;
}
if (lowpass_height > lowpass_a_height || lowpass_width > lowpass_a_width ||
lowpass_a_width * lowpass_a_height * sizeof(int16_t) > bytestream2_get_bytes_left(&gb)) {
av_log(avctx, AV_LOG_ERROR, "Too many lowpass coefficients\n");
ret = AVERROR(EINVAL);
goto end;
}
av_log(avctx, AV_LOG_DEBUG, "Start of lowpass coeffs component %d height:%d, width:%d\n", s->channel_num, lowpass_height, lowpass_width);
for (i = 0; i < lowpass_height; i++) {
for (j = 0; j < lowpass_width; j++)
coeff_data[j] = bytestream2_get_be16u(&gb);
coeff_data += lowpass_width;
}
bytestream2_seek(&gb, bytestream2_tell(&gb) & 3, SEEK_CUR);
if (lowpass_height & 1) {
memcpy(&coeff_data[lowpass_height * lowpass_width],
&coeff_data[(lowpass_height - 1) * lowpass_width],
lowpass_width * sizeof(*coeff_data));
}
av_log(avctx, AV_LOG_DEBUG, "Lowpass coefficients %d\n", lowpass_width * lowpass_height);
}
if (tag == 55 && s->subband_num_actual != 255 && s->a_width && s->a_height) {
int highpass_height = s->plane[s->channel_num].band[s->level][s->subband_num].height;
int highpass_width = s->plane[s->channel_num].band[s->level][s->subband_num].width;
int highpass_a_width = s->plane[s->channel_num].band[s->level][s->subband_num].a_width;
int highpass_a_height = s->plane[s->channel_num].band[s->level][s->subband_num].a_height;
int highpass_stride = s->plane[s->channel_num].band[s->level][s->subband_num].stride;
int expected = highpass_height * highpass_stride;
int a_expected = highpass_a_height * highpass_a_width;
int level, run, coeff;
int count = 0, bytes;
if (!got_buffer) {
av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
ret = AVERROR(EINVAL);
goto end;
}
if (highpass_height > highpass_a_height || highpass_width > highpass_a_width || a_expected < expected) {
av_log(avctx, AV_LOG_ERROR, "Too many highpass coefficients\n");
ret = AVERROR(EINVAL);
goto end;
}
av_log(avctx, AV_LOG_DEBUG, "Start subband coeffs plane %i level %i codebook %i expected %i\n", s->channel_num, s->level, s->codebook, expected);
init_get_bits(&s->gb, gb.buffer, bytestream2_get_bytes_left(&gb) * 8);
{
OPEN_READER(re, &s->gb);
if (!s->codebook) {
while (1) {
UPDATE_CACHE(re, &s->gb);
GET_RL_VLC(level, run, re, &s->gb, s->table_9_rl_vlc,
VLC_BITS, 3, 1);
if (level == 64)
break;
count += run;
if (count > expected)
break;
coeff = dequant_and_decompand(level, s->quantisation);
for (i = 0; i < run; i++)
*coeff_data++ = coeff;
}
} else {
while (1) {
UPDATE_CACHE(re, &s->gb);
GET_RL_VLC(level, run, re, &s->gb, s->table_18_rl_vlc,
VLC_BITS, 3, 1);
if (level == 255 && run == 2)
break;
count += run;
if (count > expected)
break;
coeff = dequant_and_decompand(level, s->quantisation);
for (i = 0; i < run; i++)
*coeff_data++ = coeff;
}
}
CLOSE_READER(re, &s->gb);
}
if (count > expected) {
av_log(avctx, AV_LOG_ERROR, "Escape codeword not found, probably corrupt data\n");
ret = AVERROR(EINVAL);
goto end;
}
bytes = FFALIGN(FF_CEIL_RSHIFT(get_bits_count(&s->gb), 3), 4);
if (bytes > bytestream2_get_bytes_left(&gb)) {
av_log(avctx, AV_LOG_ERROR, "Bitstream overread error\n");
ret = AVERROR(EINVAL);
goto end;
} else
bytestream2_seek(&gb, bytes, SEEK_CUR);
av_log(avctx, AV_LOG_DEBUG, "End subband coeffs %i extra %i\n", count, count - expected);
s->codebook = 0;
if (highpass_height & 1) {
memcpy(&coeff_data[highpass_height * highpass_stride],
&coeff_data[(highpass_height - 1) * highpass_stride],
highpass_stride * sizeof(*coeff_data));
}
}
}
if (!s->a_width || !s->a_height || s->a_format == AV_PIX_FMT_NONE ||
s->coded_width || s->coded_height || s->coded_format != AV_PIX_FMT_NONE) {
av_log(avctx, AV_LOG_ERROR, "Invalid dimensions\n");
ret = AVERROR(EINVAL);
goto end;
}
if (!got_buffer) {
av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
ret = AVERROR(EINVAL);
goto end;
}
planes = av_pix_fmt_count_planes(avctx->pix_fmt);
for (plane = 0; plane < planes && !ret; plane++) {
int lowpass_height = s->plane[plane].band[0][0].height;
int lowpass_width = s->plane[plane].band[0][0].width;
int highpass_stride = s->plane[plane].band[0][1].stride;
int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
int16_t *low, *high, *output, *dst;
if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
!highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width) {
av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
ret = AVERROR(EINVAL);
goto end;
}
av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
low = s->plane[plane].subband[0];
high = s->plane[plane].subband[2];
output = s->plane[plane].l_h[0];
for (i = 0; i < lowpass_width; i++) {
vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
low++;
high++;
output++;
}
low = s->plane[plane].subband[1];
high = s->plane[plane].subband[3];
output = s->plane[plane].l_h[1];
for (i = 0; i < lowpass_width; i++) {
vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
low++;
high++;
output++;
}
low = s->plane[plane].l_h[0];
high = s->plane[plane].l_h[1];
output = s->plane[plane].subband[0];
for (i = 0; i < lowpass_height * 2; i++) {
horiz_filter(output, low, high, lowpass_width);
low += lowpass_width;
high += lowpass_width;
output += lowpass_width * 2;
}
if (s->bpc == 12) {
output = s->plane[plane].subband[0];
for (i = 0; i < lowpass_height * 2; i++) {
for (j = 0; j < lowpass_width * 2; j++)
output[j] <<= 2;
output += lowpass_width * 2;
}
}
lowpass_height = s->plane[plane].band[1][1].height;
lowpass_width = s->plane[plane].band[1][1].width;
highpass_stride = s->plane[plane].band[1][1].stride;
if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
!highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width) {
av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
ret = AVERROR(EINVAL);
goto end;
}
av_log(avctx, AV_LOG_DEBUG, "Level 2 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
low = s->plane[plane].subband[0];
high = s->plane[plane].subband[5];
output = s->plane[plane].l_h[3];
for (i = 0; i < lowpass_width; i++) {
vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
low++;
high++;
output++;
}
low = s->plane[plane].subband[4];
high = s->plane[plane].subband[6];
output = s->plane[plane].l_h[4];
for (i = 0; i < lowpass_width; i++) {
vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
low++;
high++;
output++;
}
low = s->plane[plane].l_h[3];
high = s->plane[plane].l_h[4];
output = s->plane[plane].subband[0];
for (i = 0; i < lowpass_height * 2; i++) {
horiz_filter(output, low, high, lowpass_width);
low += lowpass_width;
high += lowpass_width;
output += lowpass_width * 2;
}
output = s->plane[plane].subband[0];
for (i = 0; i < lowpass_height * 2; i++) {
for (j = 0; j < lowpass_width * 2; j++)
output[j] <<= 2;
output += lowpass_width * 2;
}
lowpass_height = s->plane[plane].band[2][1].height;
lowpass_width = s->plane[plane].band[2][1].width;
highpass_stride = s->plane[plane].band[2][1].stride;
if (lowpass_height > s->plane[plane].band[2][1].a_height || lowpass_width > s->plane[plane].band[2][1].a_width ||
!highpass_stride || s->plane[plane].band[2][1].width > s->plane[plane].band[2][1].a_width) {
av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
ret = AVERROR(EINVAL);
goto end;
}
av_log(avctx, AV_LOG_DEBUG, "Level 3 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
low = s->plane[plane].subband[0];
high = s->plane[plane].subband[8];
output = s->plane[plane].l_h[6];
for (i = 0; i < lowpass_width; i++) {
vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
low++;
high++;
output++;
}
low = s->plane[plane].subband[7];
high = s->plane[plane].subband[9];
output = s->plane[plane].l_h[7];
for (i = 0; i < lowpass_width; i++) {
vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
low++;
high++;
output++;
}
dst = (int16_t *)pic->data[act_plane];
low = s->plane[plane].l_h[6];
high = s->plane[plane].l_h[7];
for (i = 0; i < lowpass_height * 2; i++) {
horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
low += lowpass_width;
high += lowpass_width;
dst += pic->linesize[act_plane] / 2;
}
}
end:
if (ret < 0)
return ret;
*got_frame = 1;
return avpkt->size;
}
| 1threat
|
static void test_visitor_in_fail_list(TestInputVisitorData *data,
const void *unused)
{
int64_t i64 = -1;
Visitor *v;
v = visitor_input_test_init(data, "[ 1, 2, 3 ]");
visit_start_list(v, NULL, NULL, 0, &error_abort);
visit_type_int(v, NULL, &i64, &error_abort);
g_assert_cmpint(i64, ==, 1);
visit_type_int(v, NULL, &i64, &error_abort);
g_assert_cmpint(i64, ==, 2);
visit_end_list(v, NULL);
}
| 1threat
|
static void test_wait_event_notifier(void)
{
EventNotifierTestData data = { .n = 0, .active = 1 };
event_notifier_init(&data.e, false);
aio_set_event_notifier(ctx, &data.e, event_ready_cb, event_active_cb);
g_assert(aio_poll(ctx, false));
g_assert_cmpint(data.n, ==, 0);
g_assert_cmpint(data.active, ==, 1);
event_notifier_set(&data.e);
g_assert(aio_poll(ctx, false));
g_assert_cmpint(data.n, ==, 1);
g_assert_cmpint(data.active, ==, 0);
g_assert(!aio_poll(ctx, false));
g_assert_cmpint(data.n, ==, 1);
g_assert_cmpint(data.active, ==, 0);
aio_set_event_notifier(ctx, &data.e, NULL, NULL);
g_assert(!aio_poll(ctx, false));
g_assert_cmpint(data.n, ==, 1);
event_notifier_cleanup(&data.e);
}
| 1threat
|
How polymer increases efficiency of webpage and better than other framework like AngularJS : <p>I was going through documentation of polymer project but didn't get to know how it can increase UX and efficiency of website.</p>
| 0debug
|
static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
uint8_t *p)
{
int pages = -1;
if (is_zero_range(p, TARGET_PAGE_SIZE)) {
rs->zero_pages++;
rs->bytes_transferred +=
save_page_header(rs, block, offset | RAM_SAVE_FLAG_COMPRESS);
qemu_put_byte(rs->f, 0);
rs->bytes_transferred += 1;
pages = 1;
}
return pages;
}
| 1threat
|
static void lan9118_cleanup(NetClientState *nc)
{
lan9118_state *s = qemu_get_nic_opaque(nc);
s->nic = NULL;
}
| 1threat
|
How to use an encrypted private key with golang ssh : <p>I would appreciate pointers because I cannot work out how to decrypt an encrypted key in order to use it with golang ssh. I'm attempting to mash together two other sources of code (including <a href="http://kukuruku.co/hub/golang/ssh-commands-execution-on-hundreds-of-servers-via-go" rel="noreferrer">this</a> one) but unable to get this to work.</p>
<p>I think I'm getting to a DER but need to marshall this back to PEM in order to use it with crypto/ssh</p>
<pre><code>-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-128-CBC,D7C72273BE168626E5B2D1BC72E56326
...
-----END RSA PRIVATE KEY-----
</code></pre>
<p>I read it:</p>
<pre><code>key, err := ioutil.ReadFile(privateKey)
if err != nil {
log.Fatalf("Unable to read private key: %v", err)
}
</code></pre>
<p>With an unencrypted (!) key, I can then:</p>
<pre><code>signer, err := ssh.ParsePrivateKey(key)
if err != nil {
log.Fatalf("Unable to parse private key: %v", err)
}
config := &ssh.ClientConfig{
User: username,
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
}
</code></pre>
<p>And this would work.</p>
<p>I reused some code that I think gets me the decrypted PEM as a DER:</p>
<pre><code>func decrypt(key []byte, password []byte) []byte {
block, rest := pem.Decode(key)
if len(rest) > 0 {
log.Fatalf("Extra data included in key")
}
der, err := x509.DecryptPEMBlock(block, password)
if err != nil {
log.Fatalf("Decrypt failed: %v", err)
}
return der
}
</code></pre>
<p>But, how do I get from the DER to a signer?</p>
<p>Or, what's the best way to solve this?</p>
| 0debug
|
How to import Azure BlobService in python? : <p>We are able to import azure.storage, but not access the BlobService attribute</p>
<p>The documentation says to use the following import statement:</p>
<pre><code>from azure.storage import BlobService
</code></pre>
<p>But that get's the following error:</p>
<pre><code>ImportError: cannot import name BlobService
</code></pre>
<p>We tried the following:</p>
<pre><code>import azure.storage
...
foo = azure.storage.BlobService(...)
</code></pre>
<p>But that received the following error:</p>
<pre><code>AttributeError: ‘module’ object has no attribute ‘BlobService’
</code></pre>
<p>We also tried all of the above with "azure.storage.blob" instead of "azure.storage"</p>
<p>We tried updating azure-storage package but it is up to date (version 0.30.0)</p>
<p>We also tried uninstalling azure-storage and installing the entire azure package, but we got the same results. We tried installing them with both pip and conda, but same results both times.</p>
<p>I am aware that the output suggests that this version of azure.storage has no BlobService attribute, but the documentation clearly states to import from there.</p>
<p><a href="https://azure.microsoft.com/en-us/documentation/articles/machine-learning-data-science-create-features-blob/" rel="noreferrer">https://azure.microsoft.com/en-us/documentation/articles/machine-learning-data-science-create-features-blob/</a></p>
| 0debug
|
Why does an AWS application load balancer require two subnets? : <p>When trying to create an application load balancer on aws (up until now I was using the classic ones just fine), I get the following message:</p>
<p><code>At least two subnets must be specified</code></p>
<p>Why would an ALB require me to specify two subnets ? Is there any way to create it and use it to LB between instances in a single subnet ?</p>
| 0debug
|
How to convert all String's to lower case in a collection of type HashSet <String>? : <p>I am not sure of best way to convert all Strings in a collection to lowercase. Any thoughts?</p>
<pre><code> private Set<String> email;
if(userEmail instanceof Collection) {
this.email = new HashSet<String>((Collection<String>) userEmail);
model.put("userEmail", this.email); //need to convert this to lower case
}
</code></pre>
<p>Thanks in advance :-)</p>
| 0debug
|
PHPDocumentor 2 and PHP 7 with opcache issues in Doctrine : <p>Hopefully someone here knows a thing or 2 about this.</p>
<p><b>Short Question</b></p>
<p>I am running into an error using phpdoc on the command line, installed via pear on PHP 7.0.2. The error is:</p>
<pre><code>#> phpdoc
PHP Fatal error: Uncaught Doctrine\Common\Annotations\AnnotationException:
You have to enable opcache.load_comments=1 or zend_optimizerplus.load_comments=1.
in /usr/local/php5-7.0.2-20160108-102134/lib/php/phpDocumentor/vendor/doctrine/annotations/lib/Doctrine/Common/Annotations/AnnotationException.php:193
</code></pre>
<p>How do I fix this error?</p>
<p><b>Details</b></p>
<p>Opcache is enabled and <code>opcache.load_comments=1</code> is in my opcache.ini file, verified by using the commands: <code>php -i | grep "Opcode"</code> and <code>php -i | grep "opcache"</code> respectively. Within that .ini file I can verify that changes are loaded by checking enable and disable opcache via that file.</p>
<p>With that said, if I have <code>opcache.load_comments=1</code> in my .ini file, why am I still getting this error?</p>
<p>Thanks!</p>
| 0debug
|
int ff_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf, int buf_size)
{
GetBitContext gb;
int specific_config_bitindex;
init_get_bits(&gb, buf, buf_size*8);
c->object_type = get_object_type(&gb);
c->sample_rate = get_sample_rate(&gb, &c->sampling_index);
c->chan_config = get_bits(&gb, 4);
if (c->chan_config < FF_ARRAY_ELEMS(ff_mpeg4audio_channels))
c->channels = ff_mpeg4audio_channels[c->chan_config];
c->sbr = -1;
if (c->object_type == AOT_SBR || (c->object_type == AOT_PS &&
!(show_bits(&gb, 3) & 0x03 && !(show_bits(&gb, 9) & 0x3F)))) {
c->ext_object_type = AOT_SBR;
c->sbr = 1;
c->ext_sample_rate = get_sample_rate(&gb, &c->ext_sampling_index);
c->object_type = get_object_type(&gb);
if (c->object_type == AOT_ER_BSAC)
c->ext_chan_config = get_bits(&gb, 4);
} else {
c->ext_object_type = AOT_NULL;
c->ext_sample_rate = 0;
}
specific_config_bitindex = get_bits_count(&gb);
if (c->object_type == AOT_ALS) {
skip_bits(&gb, 5);
if (show_bits_long(&gb, 24) != MKBETAG('\0','A','L','S'))
skip_bits_long(&gb, 24);
specific_config_bitindex = get_bits_count(&gb);
if (parse_config_ALS(&gb, c))
return -1;
}
if (c->ext_object_type != AOT_SBR) {
int bits_left = buf_size*8 - get_bits_count(&gb);
for (; bits_left > 15; bits_left--) {
if (show_bits(&gb, 11) == 0x2b7) {
get_bits(&gb, 11);
c->ext_object_type = get_object_type(&gb);
if (c->ext_object_type == AOT_SBR && (c->sbr = get_bits1(&gb)) == 1)
c->ext_sample_rate = get_sample_rate(&gb, &c->ext_sampling_index);
break;
} else
get_bits1(&gb);
}
}
return specific_config_bitindex;
}
| 1threat
|
I have this error in Qt creator : when i create a consol program in qt creator i cant run it from my system terminal .
"" i am using manjaro linux .
And i cant create a gui programs because of this error :
home/ramigamal/Programs/qt/Tools/QtCreator/lib/qtcreator/plugins/QtProject/libClangCodeModel.so: Cannot load library /home/ramigamal/Programs/qt/Tools/QtCreator/lib/qtcreator/plugins/QtProject/libClangCodeModel.so: (libtinfo.so.5: cannot open shared object file: No such file or directory)[enter image description here][1]
[1]: http://i.stack.imgur.com/N6MCv.png
| 0debug
|
How to create a table with clickable hyperlink in pandas & Jupyter Notebook : <p><code>print('http://google.com')</code> outputs a clickable url.</p>
<p>How do I get clickable URLs for <code>pd.DataFrame(['http://google.com', 'http://duckduckgo.com'])</code> ?</p>
| 0debug
|
void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
TCGv_i64 newv, TCGArg idx, TCGMemOp memop)
{
memop = tcg_canonicalize_memop(memop, 1, 0);
if (!parallel_cpus) {
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE);
tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN);
tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1);
tcg_gen_qemu_st_i64(t2, addr, idx, memop);
tcg_temp_free_i64(t2);
if (memop & MO_SIGN) {
tcg_gen_ext_i64(retv, t1, memop);
} else {
tcg_gen_mov_i64(retv, t1);
}
tcg_temp_free_i64(t1);
} else if ((memop & MO_SIZE) == MO_64) {
#ifdef CONFIG_ATOMIC64
gen_atomic_cx_i64 gen;
gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
tcg_debug_assert(gen != NULL);
#ifdef CONFIG_SOFTMMU
{
TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop, idx));
gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv, oi);
tcg_temp_free_i32(oi);
}
#else
gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv);
#endif
#else
gen_helper_exit_atomic(tcg_ctx.tcg_env);
tcg_gen_movi_i64(retv, 0);
#endif
} else {
TCGv_i32 c32 = tcg_temp_new_i32();
TCGv_i32 n32 = tcg_temp_new_i32();
TCGv_i32 r32 = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(c32, cmpv);
tcg_gen_extrl_i64_i32(n32, newv);
tcg_gen_atomic_cmpxchg_i32(r32, addr, c32, n32, idx, memop & ~MO_SIGN);
tcg_temp_free_i32(c32);
tcg_temp_free_i32(n32);
tcg_gen_extu_i32_i64(retv, r32);
tcg_temp_free_i32(r32);
if (memop & MO_SIGN) {
tcg_gen_ext_i64(retv, retv, memop);
}
}
}
| 1threat
|
How get expected output in C# : My program is generating a output but i am expecting some other type.
If i send 6 input numbers it should compare using loop and generate answer.
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
class Solution {
static void Main(String[] args) {
string[] tokens_a0 = Console.ReadLine().Split(' ');
int a0 = Convert.ToInt32(tokens_a0[0]);
int a1 = Convert.ToInt32(tokens_a0[1]);
int a2 = Convert.ToInt32(tokens_a0[2]);
string[] tokens_b0 = Console.ReadLine().Split(' ');
int b0 = Convert.ToInt32(tokens_b0[0]);
int b1 = Convert.ToInt32(tokens_b0[1]);
int b2 = Convert.ToInt32(tokens_b0[2]);
if (a0 > b0 || a0 < b0)
{
Console.WriteLine(1);
}
if (a1 > b1 || a1 < b1)
{
Console.WriteLine(1);
}
if (a2 > b2 || a2 < b2)
{
Console.WriteLine(1);
}
}
}
This program is generating Output
1
1
I need output to show this way
1 1
How to change the loop to generate this kind of output
| 0debug
|
void ff_avg_h264_qpel8_mc02_msa(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
avc_luma_vt_and_aver_dst_8x8_msa(src - (stride * 2), stride, dst, stride);
}
| 1threat
|
How can i use save() with this array with Yii2? : I have a problem with my code . I want cloned a string and into this string there are three data . Each parameter is a different table of the DB.
When i run the code , Yii2 say error:
Call to a member function save() on array
this is in my controller:
`public function actionClone($id)
{
$clone = Helper::get_clone_offer($id);
if ($clone->save()) Helper::add_history(null, null, $id, '',
'Clone', 'Done', 0, 0, 'Offer', 1, 0, '');
return $this->redirect(['index']);
}`
this is in my Helper:
`public static function get_clone_offer($id)
{
$offer = Offers::findOne($id);
$product = Helper::get_product_name($offer->id);
$accessory = Helper::get_offer_product_accessories($offer->id);
$clone = [$offer,$product,$accessory];
$clone[0]->parent_id = $clone[0]->id;
$clone[0]->id += 1000 ;
return $clone;
}`
I'm new in Yii2 and php.
Thank you for help.
| 0debug
|
install latest java version on linux : I am trying to install latest version of java in my linux machine,but I am unable to do so.
steps I followed
1. downloded latest java tar ball
2. copied into /usr/lib/jvm and untar it.
3. adding in to environment variable (export JAVA_HOME=/usr/lib/javm/jdk1.7.0_80 , export PATH=$PATH:$JAVA_HOME/bin)
4. when I try to change the default java version, the installed one is not coming ( $ update-alternatives –-config java )
Could anyone help me to resolve this . I have followed many links, but nothing helps
| 0debug
|
static void esp_pci_io_write(void *opaque, target_phys_addr_t addr,
uint64_t val, unsigned int size)
{
PCIESPState *pci = opaque;
if (size < 4 || addr & 3) {
uint32_t current = 0, mask;
int shift;
if (addr < 0x40) {
current = pci->esp.wregs[addr >> 2];
} else if (addr < 0x60) {
current = pci->dma_regs[(addr - 0x40) >> 2];
} else if (addr < 0x74) {
current = pci->sbac;
}
shift = (4 - size) * 8;
mask = (~(uint32_t)0 << shift) >> shift;
shift = ((4 - (addr & 3)) & 3) * 8;
val <<= shift;
val |= current & ~(mask << shift);
addr &= ~3;
size = 4;
}
if (addr < 0x40) {
esp_reg_write(&pci->esp, addr >> 2, val);
} else if (addr < 0x60) {
esp_pci_dma_write(pci, (addr - 0x40) >> 2, val);
} else if (addr == 0x70) {
trace_esp_pci_sbac_write(pci->sbac, val);
pci->sbac = val;
} else {
trace_esp_pci_error_invalid_write((int)addr);
}
}
| 1threat
|
int av_picture_crop(AVPicture *dst, const AVPicture *src,
enum PixelFormat pix_fmt, int top_band, int left_band)
{
int y_shift;
int x_shift;
if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
return -1;
y_shift = av_pix_fmt_descriptors[pix_fmt].log2_chroma_h;
x_shift = av_pix_fmt_descriptors[pix_fmt].log2_chroma_w;
if (is_yuv_planar(&pix_fmt_info[pix_fmt])) {
dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift);
dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift);
} else{
if(top_band % (1<<y_shift) || left_band % (1<<x_shift))
return -1;
if(left_band)
return -1;
dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
}
dst->linesize[0] = src->linesize[0];
dst->linesize[1] = src->linesize[1];
dst->linesize[2] = src->linesize[2];
return 0;
}
| 1threat
|
Instantiating classes using new : <p>comming from other programming language why can't I do this in c++:</p>
<pre><code>myClass mc = new myClass();
</code></pre>
<p>it seems that it suffices to just write:</p>
<pre><code>myClass mc;
</code></pre>
<p>but then what if I want to use make mc be a new instance of myClass and dump the existing one?</p>
| 0debug
|
static void setup_rt_frame(int sig, struct target_sigaction *ka,
target_siginfo_t *info,
target_sigset_t *set, CPUOpenRISCState *env)
{
int err = 0;
abi_ulong frame_addr;
unsigned long return_ip;
struct target_rt_sigframe *frame;
abi_ulong info_addr, uc_addr;
frame_addr = get_sigframe(ka, env, sizeof(*frame));
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
goto give_sigsegv;
}
info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
__put_user(info_addr, &frame->pinfo);
uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
__put_user(uc_addr, &frame->puc);
if (ka->sa_flags & SA_SIGINFO) {
copy_siginfo_to_user(&frame->info, info);
}
__put_user(0, &frame->uc.tuc_flags);
__put_user(0, &frame->uc.tuc_link);
__put_user(target_sigaltstack_used.ss_sp,
&frame->uc.tuc_stack.ss_sp);
__put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags);
__put_user(target_sigaltstack_used.ss_size,
&frame->uc.tuc_stack.ss_size);
err |= setup_sigcontext(&frame->sc, env, set->sig[0]);
if (err) {
goto give_sigsegv;
}
return_ip = (unsigned long)&frame->retcode;
__put_user(0xa960, (short *)(frame->retcode + 0));
__put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
__put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
__put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
if (err) {
goto give_sigsegv;
}
env->pc = (unsigned long)ka->_sa_handler;
env->gpr[9] = (unsigned long)return_ip;
env->gpr[3] = (unsigned long)sig;
env->gpr[4] = (unsigned long)&frame->info;
env->gpr[5] = (unsigned long)&frame->uc;
env->gpr[1] = (unsigned long)frame;
return;
give_sigsegv:
unlock_user_struct(frame, frame_addr, 1);
if (sig == TARGET_SIGSEGV) {
ka->_sa_handler = TARGET_SIG_DFL;
}
force_sig(TARGET_SIGSEGV);
}
| 1threat
|
Can I use functions imported from .py files in Dask/Distributed? : <p>I have a question about serialization and imports. </p>
<ul>
<li>should functions have their own imports? <a href="https://docs.continuum.io/anaconda-scale/howto/spark-basic#modify-std-script" rel="noreferrer">like I've seen done with PySpark</a></li>
<li>Is the following just plain wrong? Does <code>mod.py</code> need to be a conda/pip package? <code>mod.py</code> was written to a shared filesystem.</li>
</ul>
<p></p>
<pre><code>In [1]: from distributed import Executor
In [2]: e = Executor('127.0.0.1:8786')
In [3]: e
Out[3]: <Executor: scheduler="127.0.0.1:8786" processes=2 cores=2>
In [4]: import socket
In [5]: e.run(socket.gethostname)
Out[5]: {'172.20.12.7:53405': 'n1015', '172.20.12.8:53779': 'n1016'}
In [6]: %%file mod.py
...: def hostname():
...: return 'the hostname'
...:
Overwriting mod.py
In [7]: import mod
In [8]: mod.hostname()
Out[8]: 'the hostname'
In [9]: e.run(mod.hostname)
distributed.utils - ERROR - No module named 'mod'
</code></pre>
| 0debug
|
Difference between `npm link x` and `npm install /path/to/x` : <p>I thought I understood the difference between</p>
<pre><code>npm link x
</code></pre>
<p>and</p>
<pre><code>npm install /local/path/to/x
</code></pre>
<p>originally I thought the former created a symlink to x, whereas the latter installed a separate copy of x in your project, instead of symlinking it.</p>
<p>However, I recently noticed that my original impression was wrong, and they both seem to use symlinks - so is there a difference between the two and what is it?</p>
| 0debug
|
How do I click a link with javascript? : <p>How do I click this using JavaScript?</p>
<p>The "a" only have href and only id on the "div"</p>
<p><div class="snippet" data-lang="js" data-hide="false" data-console="true" data-babel="false">
<div class="snippet-code">
<pre class="snippet-code-js lang-js prettyprint-override"><code><div id="id">
<a href="link">
</a>
</div></code></pre>
</div>
</div>
</p>
| 0debug
|
Why parent object cannot be assigned to a child reference in java? : <p>The is a compile time error displayed on the console when I try to assign a parent object to a child reference. Why parent object cannot be assigned to a child reference in java?</p>
| 0debug
|
PHP Diversion incorrect answer : I have a piece of code that worked perfectly and then just stopped:
$miccpl = $micbudgetspent / $micleads;
echo $micbudgetspent . " / " . $micleads . " = " . $miccpl;
if ($micleads != NULL)
{
echo "$ " . round($miccpl, 2);
}
else
{
echo "TBA";
}
This is the result:
2 000 / 49 = 0.040816326530612$ 0.04
Why is it giving me 0.0408? When it supposed to be 40.80
| 0debug
|
want to move next activity after palying a audio clip : I am creating a app, where activity move will in the next activity after play a audio clip. but when i wrote the code only audio part is working, next page not coming. i am giving my code. please help.
}[enter image description here][1]
[1]: http://i.stack.imgur.com/MtUdw.png
| 0debug
|
How to post raw body data with curl? : <p>Before you post this as a duplicate; I've tried many of the suggestions I found around SO.</p>
<p>So far I've been using postman to post data to a Java web service. That works great as follows:</p>
<p><a href="https://i.stack.imgur.com/voBZy.png" rel="noreferrer"><img src="https://i.stack.imgur.com/voBZy.png" alt="enter image description here"></a></p>
<p>I now want to do the same using curl, so I tried it using the following ways:</p>
<pre><code>$ curl -X POST --data "this is raw data" http://78.41.xx.xx:7778/
$ curl -X POST --data-binary "this is raw data" http://78.41.xx.xx:7778/
$ curl -X POST --data "@/home/kramer65/afile.txt" http://78.41.xx.xx:7778/
$ curl -X POST --data-binary "@/home/kramer65/afile.txt" http://78.41.xx.xx:7778/
</code></pre>
<p>Unfortunately, all of those show an empty raw body on the receiving side. </p>
<p>Does anybody know what I'm doing wrong here? How is my curl request different from my postman request? All tips are welcome!</p>
| 0debug
|
static int _get_transform_coeffs(uint8_t *exps, uint8_t *bap, float chcoeff,
float *samples, int start, int end, int dith_flag, GetBitContext *gb,
dither_state *state)
{
int16_t mantissa;
int i;
int gcode;
mant_group l3_grp, l5_grp, l11_grp;
for (i = 0; i < 3; i++)
l3_grp.gcodes[i] = l5_grp.gcodes[i] = l11_grp.gcodes[i] = -1;
l3_grp.gcptr = l5_grp.gcptr = 3;
l11_grp.gcptr = 2;
i = 0;
while (i < start)
samples[i++] = 0;
for (i = start; i < end; i++) {
switch (bap[i]) {
case 0:
if (!dith_flag)
mantissa = 0;
else
mantissa = dither_int16(state);
samples[i] = to_float(exps[i], mantissa) * chcoeff;
break;
case 1:
if (l3_grp.gcptr > 2) {
gcode = get_bits(gb, qntztab[1]);
if (gcode > 26)
return -1;
l3_grp.gcodes[0] = gcode / 9;
l3_grp.gcodes[1] = (gcode % 9) / 3;
l3_grp.gcodes[2] = (gcode % 9) % 3;
l3_grp.gcptr = 0;
}
mantissa = l3_q_tab[l3_grp.gcodes[l3_grp.gcptr++]];
samples[i] = to_float(exps[i], mantissa) * chcoeff;
break;
case 2:
if (l5_grp.gcptr > 2) {
gcode = get_bits(gb, qntztab[2]);
if (gcode > 124)
return -1;
l5_grp.gcodes[0] = gcode / 25;
l5_grp.gcodes[1] = (gcode % 25) / 5;
l5_grp.gcodes[2] = (gcode % 25) % 5;
l5_grp.gcptr = 0;
}
mantissa = l5_q_tab[l5_grp.gcodes[l5_grp.gcptr++]];
samples[i] = to_float(exps[i], mantissa) * chcoeff;
break;
case 3:
mantissa = get_bits(gb, qntztab[3]);
if (mantissa > 6)
return -1;
mantissa = l7_q_tab[mantissa];
samples[i] = to_float(exps[i], mantissa);
break;
case 4:
if (l11_grp.gcptr > 1) {
gcode = get_bits(gb, qntztab[4]);
if (gcode > 120)
return -1;
l11_grp.gcodes[0] = gcode / 11;
l11_grp.gcodes[1] = gcode % 11;
}
mantissa = l11_q_tab[l11_grp.gcodes[l11_grp.gcptr++]];
samples[i] = to_float(exps[i], mantissa) * chcoeff;
break;
case 5:
mantissa = get_bits(gb, qntztab[5]);
if (mantissa > 14)
return -1;
mantissa = l15_q_tab[mantissa];
samples[i] = to_float(exps[i], mantissa) * chcoeff;
break;
default:
mantissa = get_bits(gb, qntztab[bap[i]]) << (16 - qntztab[bap[i]]);
samples[i] = to_float(exps[i], mantissa) * chcoeff;
break;
}
}
i = end;
while (i < 256)
samples[i++] = 0;
return 0;
}
| 1threat
|
static void pci_piix_init_ports(PCIIDEState *d) {
int i;
struct {
int iobase;
int iobase2;
int isairq;
} port_info[] = {
{0x1f0, 0x3f6, 14},
{0x170, 0x376, 15},
};
for (i = 0; i < 2; i++) {
ide_bus_new(&d->bus[i], &d->dev.qdev, i);
ide_init_ioport(&d->bus[i], port_info[i].iobase, port_info[i].iobase2);
ide_init2(&d->bus[i], isa_reserve_irq(port_info[i].isairq));
bmdma_init(&d->bus[i], &d->bmdma[i]);
d->bmdma[i].bus = &d->bus[i];
qemu_add_vm_change_state_handler(d->bus[i].dma->ops->restart_cb,
&d->bmdma[i].dma);
}
}
| 1threat
|
project euler 4 python : count = 999999
my_list = 0
while count >= 0:
if str(count) == str(count)[::-1]:
count = int(count)
my_list.append(count)
for i in range(999, 100, -1):
for j in range(999, 100, -1):
for k in my_list:
if k == i*j:
print(k)
break
Right so this is my code for Project Euler 4- I keep getting the error that int has no attribute "append". I can't see what's wrong with it. I have only learnt basic python anyway- and I also feel this brute force method is inefficient. Pls help thx in advance.
| 0debug
|
In Angular4 how to render multiple child components based on condition one after another : I have three sections in parent component, each section I want to render one child component like componentA in 1st section, componentB in 2nd section and componentC in 3rd section.
one after another each section should render base on the condition, after submitting form in each component based on response status.
| 0debug
|
def odd_Num_Sum(n) :
j = 0
sm = 0
for i in range(1,n + 1) :
j = (2*i-1)
sm = sm + (j*j*j*j)
return sm
| 0debug
|
document.write('<script src="evil.js"></script>');
| 1threat
|
.NET core custom and default binding combined : <p>I'm creating a custom model binder for a view model, implementing <code>IModelBinder</code></p>
<p>I have a lot of properties in my view model, the majority of which do not need any custom binding. Rather than explicitly set all of the property values on my model individually from the <code>ModelBindingContext</code>, I would to be able to get the framework to bind the model for me, then I would carry out any custom binding:</p>
<pre><code>public class ApplicationViewModelBinder : IModelBinder
{
public Task BindModelAsync(ModelBindingContext bindingContext)
{
if (bindingContext == null)
{
throw new ArgumentNullException(nameof(bindingContext));
}
// get .net core to bind values on model
// Cary out any customization of the models properties
bindingContext.Result = ModelBindingResult.Success(bindingContext.Model);
return Task.CompletedTask;
}
}
</code></pre>
<p>Basically I want to carry out the default model binding, then apply custom binding, similar to the approach taken in this <a href="https://stackoverflow.com/questions/970335/asp-net-mvc-mixing-custom-and-default-model-binding">SO post</a> but for .NET Core, not framework.</p>
<p>I assumed applying the default binding would be straight forward, but haven't been able to find out how to do so. I believe the solution would involve <code>ComplexTypeModelBinder</code> and <code>ComplexTypeModelBinderProvider</code> classes, but can't seem to find out how to go about it.</p>
<p>I know I could just make any changes when the POST request hits my controller method, but this seem the wrong place and wrong time to do so.</p>
| 0debug
|
How to add directory or make directory to current path and use file using python : i want to make directory on current path and add a excel file in that path and use that excel file in script....please help
currently i am doing
my_excel_file = Path(sys.argv[2])
if not my_excel_file.is_file():
print ("Excel File not exist")
logging.error("Excel File not exist")
exit(-2)
but i want to add directory '/tmp/old excel/n.xlsx' in current path and use n.xlsx file
| 0debug
|
how to insert data in lookup wizard data type in ms access using pyodbc(python) : i have a mini project in which i have to use gui and database
i have chosen ms access
what is the query to insert data in lookup wizard datatype in ms access
| 0debug
|
What am I missing in this formula? : <p>I cannot figure out why this formula is resulting in 0. The code will compile, but when I ask it to return the result of 'bloodAlc' it always says 0. I have checked for int and doubles, but as far as I know, all of the return data should be doubles.</p>
<pre><code> #include<iostream>
#include<fstream>
#include<string>
#include "Input_Validation_Extended.h"
int main () {
using namespace std;
ifstream qFile ("Final.txt");
string qLine;
while (!qFile.eof() ) {
getline (qFile, qLine);
cout << qLine << endl;
}
qFile.close();
char gender;
double genderConst, weight, percentAlc, hours, ounces, bloodAlc, alcType;
double beer, wine, spirits;
weight = 0;
hours = 0;
ounces = 0;
bloodAlc = 0.0;
cout << "Please enter your weight in pounds: \n";
cin >> weight;
cout << "Please enter your gender: \n";
cin >> gender;
cout << "Please enter the amount of hours you have been drinking: \n";
cin >> hours;
cout << "How many drinks have you had: ";
cin >> ounces;
cout << "Please choose A, B, or C for the type of beverage you are enjoying: ";
cin >> alcType;
if (gender == 'F' || gender == 'f')
{
genderConst = .66;
}
else if (gender == 'M' || gender == 'm')
{
genderConst = .73;
}
if (alcType == 'A' || alcType == 'a')
{
beer = (12.0 * ounces);
bloodAlc = ((beer * 5.14) / (weight * genderConst)) - (.015 * hours);
}
else if (alcType == 'B' || alcType == 'b')
{
wine = (5.0 * ounces);
bloodAlc = ((wine * 5.14) / (weight * genderConst)) - (.015 * hours);
}
cout << "Your BAC is: " << bloodAlc;
return 0;
}
</code></pre>
| 0debug
|
pandas replace NaN to None exhibits counterintuitive behaviour : <p>Given a series</p>
<pre><code>s = pd.Series([1.1, 1.2, np.nan])
s
0 1.1
1 1.2
2 NaN
dtype: float64
</code></pre>
<p>If the need arises to convert the NaNs to None (to, for example, work with parquets), then I would like to have </p>
<pre><code>0 1.1
1 1.2
2 None
dtype: object
</code></pre>
<p>I would assume <code>Series.replace</code> would be the obvious way of doing this, but here's what the function returns:</p>
<pre><code>s.replace(np.nan, None)
0 1.1
1 1.2
2 1.2
dtype: float64
</code></pre>
<p>The NaN was forward filled, instead of being replaced. Going through the <a href="https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.replace.html" rel="noreferrer">docs</a>, I see that if the second argument is None, then the first argument should be a dictionary. Based on this, I would expect <code>replace</code> to either replace as intended, or throw an exception. </p>
<p>I believe the workaround here is </p>
<pre><code>pd.Series([x if pd.notna(x) else None for x in s], dtype=object)
0 1.1
1 1.2
2 None
dtype: object
</code></pre>
<p>Which is fine. But I would like to understand why this behaviour occurs, whether it is documented, or if it is just a bug and I have to dust off my git profile and log one on the issue tracker... any ideas?</p>
| 0debug
|
How do I delete DB (sqlite3) in Django 1.9 to start from scratch? : <p>I made a spelling error in my model and now one of my columns is misspelled. I'd like to drop all tables in the database, fix the error in the model.py, and recreate the database with the correct spelling in model.</p>
<p>I've tried to use the suggestions in the <a href="http://eli.thegreenplace.net/2014/02/20/clearing-the-database-with-django-commands" rel="noreferrer">this article</a> but the table still exists after I follow the commands outlined there.</p>
<p>Anyone have a quick way to do this?</p>
| 0debug
|
db.execute('SELECT * FROM products WHERE product_id = ' + product_input)
| 1threat
|
Is WPF the best solution for C# GUIs? : <p>I was working with Windows Forms for a while now, and due to certain limitations on Forms, I want to work my way up to a better way of making GUIs. I found WPF, which looks pretty promising, but I'm asking you guys, if there are any better ways for doing nice GUIs for Windows with C#?
Thanks in advance!</p>
| 0debug
|
What is the simplest way to listen for incoming http traffic in Java/make a REST API? : <p>I have a Java JSVC application in which I would like to expose a web/REST API from.</p>
<p>What is the simplest way to do so?</p>
<p>Every time I try to find a simple tutorial it wants me to install at least a framework and a web server (jersey, tomcat, Java EE, gradle, glassfish, spring and maven has been mentioned a lot)...</p>
<p>Is there a lightweight way to do it with as few dependencies as possible?</p>
<p>My app needs to be able to be deployed as a standalone daemon/service.
It would be problematic if people had to set up a tomcat webserver and/or other stuff.</p>
<p>Isn't any Java app be able to bind to a port and listen for data?</p>
| 0debug
|
Why i am unable to use Collectors in my code in java Stream? : <pre><code>import static java.util.stream.Collectors.*;
import java.util.*;
import java.lang.*;
//import java.util.Collections;
public class HelloWorld{
public static void main(String []args){
System.out.println("Hello World");
List<String> strings = Arrays.asList("abc", "", "bc", "efg", "abcd","", "jkl");
List<String> filtered = strings.stream().filter(string -> !string.isEmpty()).collect(Collectors.toList());
}
}
</code></pre>
<p><strong>output</strong></p>
<pre><code>/tmp/java_tdo3eB/HelloWorld.java:10: error: cannot find symbol
List<String> filtered = strings.stream().filter(string -> !string.isEmpty()).collect(Collectors.toList());
^
symbol: variable Collectors
location: class HelloWorld
1 error
</code></pre>
<p>So i query is why i am unable to use Collectors as i have import that class also</p>
| 0debug
|
How to parse text from one activity to all activities in android : I need to parse a single text from my MainActivity.java to all other activities in my app. Is it possible to do so?
Thanks in advance!
| 0debug
|
Java: How to simplify if statement : I am using eclipse to code a program out, is there any way to simplify this if statement?
if(departmentName.equalsIgnoreCase("Business Management")){
departmentName = "SBM";
}
if(departmentName.equalsIgnoreCase("Chemical & Life Sciences")){
departmentName = "SCL";
}
if(departmentName.equalsIgnoreCase("Design")){
departmentName = "SDN";
}
if(departmentName.equalsIgnoreCase("Engineering")){
departmentName = "SEG";
}
if(departmentName.equalsIgnoreCase("Oral Health Therapy")){
departmentName = "SHS(AH)";
}
if(departmentName.equalsIgnoreCase("Nursing")){
departmentName = "SHS(N)";
}
| 0debug
|
cancelAllLocalNotifications not working in iOS10 : <p>I want to remove all previous local notification from NotificationCenter when adding new notifications. But it is working in iOS9.0 and lower version but in iOS 10 it fires multiple local notifications. So it seems like <code>cancelAllLocalNotifications</code> not clearing notifications.</p>
<p>Code compile successfully in iOS10.</p>
<pre><code>UIApplication.shared.cancelAllLocalNotifications()
</code></pre>
| 0debug
|
void mpeg_motion_internal(MpegEncContext *s,
uint8_t *dest_y,
uint8_t *dest_cb,
uint8_t *dest_cr,
int field_based,
int bottom_field,
int field_select,
uint8_t **ref_picture,
op_pixels_func (*pix_op)[4],
int motion_x,
int motion_y,
int h,
int is_mpeg12,
int mb_y)
{
uint8_t *ptr_y, *ptr_cb, *ptr_cr;
int dxy, uvdxy, mx, my, src_x, src_y,
uvsrc_x, uvsrc_y, v_edge_pos;
ptrdiff_t uvlinesize, linesize;
#if 0
if (s->quarter_sample) {
motion_x >>= 1;
motion_y >>= 1;
}
#endif
v_edge_pos = s->v_edge_pos >> field_based;
linesize = s->current_picture.f->linesize[0] << field_based;
uvlinesize = s->current_picture.f->linesize[1] << field_based;
dxy = ((motion_y & 1) << 1) | (motion_x & 1);
src_x = s->mb_x * 16 + (motion_x >> 1);
src_y = (mb_y << (4 - field_based)) + (motion_y >> 1);
if (!is_mpeg12 && s->out_format == FMT_H263) {
if ((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based) {
mx = (motion_x >> 1) | (motion_x & 1);
my = motion_y >> 1;
uvdxy = ((my & 1) << 1) | (mx & 1);
uvsrc_x = s->mb_x * 8 + (mx >> 1);
uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1);
} else {
uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
uvsrc_x = src_x >> 1;
uvsrc_y = src_y >> 1;
}
} else if (!is_mpeg12 && s->out_format == FMT_H261) {
mx = motion_x / 4;
my = motion_y / 4;
uvdxy = 0;
uvsrc_x = s->mb_x * 8 + mx;
uvsrc_y = mb_y * 8 + my;
} else {
if (s->chroma_y_shift) {
mx = motion_x / 2;
my = motion_y / 2;
uvdxy = ((my & 1) << 1) | (mx & 1);
uvsrc_x = s->mb_x * 8 + (mx >> 1);
uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1);
} else {
if (s->chroma_x_shift) {
mx = motion_x / 2;
uvdxy = ((motion_y & 1) << 1) | (mx & 1);
uvsrc_x = s->mb_x * 8 + (mx >> 1);
uvsrc_y = src_y;
} else {
uvdxy = dxy;
uvsrc_x = src_x;
uvsrc_y = src_y;
}
}
}
ptr_y = ref_picture[0] + src_y * linesize + src_x;
ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 1) - 16, 0) ||
(unsigned)src_y > FFMAX(v_edge_pos - (motion_y & 1) - h, 0)) {
if (is_mpeg12 ||
s->codec_id == AV_CODEC_ID_MPEG2VIDEO ||
s->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
av_log(s->avctx, AV_LOG_DEBUG,
"MPEG motion vector out of boundary (%d %d)\n", src_x,
src_y);
return;
}
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
s->linesize, s->linesize,
17, 17 + field_based,
src_x, src_y << field_based,
s->h_edge_pos, s->v_edge_pos);
ptr_y = s->sc.edge_emu_buffer;
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
uint8_t *uvbuf = s->sc.edge_emu_buffer + 18 * s->linesize;
s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
s->uvlinesize, s->uvlinesize,
9, 9 + field_based,
uvsrc_x, uvsrc_y << field_based,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr,
s->uvlinesize, s->uvlinesize,
9, 9 + field_based,
uvsrc_x, uvsrc_y << field_based,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
ptr_cb = uvbuf;
ptr_cr = uvbuf + 16;
}
}
if (bottom_field) {
dest_y += s->linesize;
dest_cb += s->uvlinesize;
dest_cr += s->uvlinesize;
}
if (field_select) {
ptr_y += s->linesize;
ptr_cb += s->uvlinesize;
ptr_cr += s->uvlinesize;
}
pix_op[0][dxy](dest_y, ptr_y, linesize, h);
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
pix_op[s->chroma_x_shift][uvdxy]
(dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
pix_op[s->chroma_x_shift][uvdxy]
(dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
}
if (!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
s->out_format == FMT_H261) {
ff_h261_loop_filter(s);
}
}
| 1threat
|
static void do_video_out(AVFormatContext *s,
OutputStream *ost,
AVFrame *next_picture,
double sync_ipts)
{
int ret, format_video_sync;
AVPacket pkt;
AVCodecContext *enc = ost->enc_ctx;
AVCodecContext *mux_enc = ost->st->codec;
int nb_frames, nb0_frames, i;
double delta, delta0;
double duration = 0;
int frame_size = 0;
InputStream *ist = NULL;
AVFilterContext *filter = ost->filter->filter;
if (ost->source_index >= 0)
ist = input_streams[ost->source_index];
if (filter->inputs[0]->frame_rate.num > 0 &&
filter->inputs[0]->frame_rate.den > 0)
duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
if (!ost->filters_script &&
!ost->filters &&
next_picture &&
ist &&
lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
}
if (!next_picture) {
nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
ost->last_nb0_frames[1],
ost->last_nb0_frames[2]);
} else {
delta0 = sync_ipts - ost->sync_opts;
delta = delta0 + duration;
nb0_frames = 0;
nb_frames = 1;
format_video_sync = video_sync_method;
if (format_video_sync == VSYNC_AUTO) {
if(!strcmp(s->oformat->name, "avi")) {
format_video_sync = VSYNC_VFR;
} else
format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
if ( ist
&& format_video_sync == VSYNC_CFR
&& input_files[ist->file_index]->ctx->nb_streams == 1
&& input_files[ist->file_index]->input_ts_offset == 0) {
format_video_sync = VSYNC_VSCFR;
}
if (format_video_sync == VSYNC_CFR && copy_ts) {
format_video_sync = VSYNC_VSCFR;
}
}
if (delta0 < 0 &&
delta > 0 &&
format_video_sync != VSYNC_PASSTHROUGH &&
format_video_sync != VSYNC_DROP) {
double cor = FFMIN(-delta0, duration);
if (delta0 < -0.6) {
av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
} else
av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
sync_ipts += cor;
duration -= cor;
delta0 += cor;
}
switch (format_video_sync) {
case VSYNC_VSCFR:
if (ost->frame_number == 0 && delta - duration >= 0.5) {
av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
delta = duration;
delta0 = 0;
ost->sync_opts = lrint(sync_ipts);
}
case VSYNC_CFR:
if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
nb_frames = 0;
} else if (delta < -1.1)
nb_frames = 0;
else if (delta > 1.1) {
nb_frames = lrintf(delta);
if (delta0 > 1.1)
nb0_frames = lrintf(delta0 - 0.6);
}
break;
case VSYNC_VFR:
if (delta <= -0.6)
nb_frames = 0;
else if (delta > 0.6)
ost->sync_opts = lrint(sync_ipts);
break;
case VSYNC_DROP:
case VSYNC_PASSTHROUGH:
ost->sync_opts = lrint(sync_ipts);
break;
default:
av_assert0(0);
}
}
nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
nb0_frames = FFMIN(nb0_frames, nb_frames);
memmove(ost->last_nb0_frames + 1,
ost->last_nb0_frames,
sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
ost->last_nb0_frames[0] = nb0_frames;
if (nb0_frames == 0 && ost->last_droped) {
nb_frames_drop++;
av_log(NULL, AV_LOG_VERBOSE,
"*** dropping frame %d from stream %d at ts %"PRId64"\n",
ost->frame_number, ost->st->index, ost->last_frame->pts);
}
if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
if (nb_frames > dts_error_threshold * 30) {
av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
nb_frames_drop++;
}
nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
}
ost->last_droped = nb_frames == nb0_frames && next_picture;
for (i = 0; i < nb_frames; i++) {
AVFrame *in_picture;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
if (i < nb0_frames && ost->last_frame) {
in_picture = ost->last_frame;
} else
in_picture = next_picture;
in_picture->pts = ost->sync_opts;
#if 1
if (!check_recording_time(ost))
#else
if (ost->frame_number >= ost->max_frames)
#endif
if (s->oformat->flags & AVFMT_RAWPICTURE &&
enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
if (in_picture->interlaced_frame)
mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
else
mux_enc->field_order = AV_FIELD_PROGRESSIVE;
pkt.data = (uint8_t *)in_picture;
pkt.size = sizeof(AVPicture);
pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
pkt.flags |= AV_PKT_FLAG_KEY;
write_frame(s, &pkt, ost);
} else {
int got_packet, forced_keyframe = 0;
double pts_time;
if (enc->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME) &&
ost->top_field_first >= 0)
in_picture->top_field_first = !!ost->top_field_first;
if (in_picture->interlaced_frame) {
if (enc->codec->id == AV_CODEC_ID_MJPEG)
mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
else
mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
} else
mux_enc->field_order = AV_FIELD_PROGRESSIVE;
in_picture->quality = enc->global_quality;
in_picture->pict_type = 0;
pts_time = in_picture->pts != AV_NOPTS_VALUE ?
in_picture->pts * av_q2d(enc->time_base) : NAN;
if (ost->forced_kf_index < ost->forced_kf_count &&
in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
ost->forced_kf_index++;
forced_keyframe = 1;
} else if (ost->forced_keyframes_pexpr) {
double res;
ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
res = av_expr_eval(ost->forced_keyframes_pexpr,
ost->forced_keyframes_expr_const_values, NULL);
av_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
ost->forced_keyframes_expr_const_values[FKF_N],
ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
ost->forced_keyframes_expr_const_values[FKF_T],
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
res);
if (res) {
forced_keyframe = 1;
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
ost->forced_keyframes_expr_const_values[FKF_N];
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
ost->forced_keyframes_expr_const_values[FKF_T];
ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
}
ost->forced_keyframes_expr_const_values[FKF_N] += 1;
}
if (forced_keyframe) {
in_picture->pict_type = AV_PICTURE_TYPE_I;
av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
}
update_benchmark(NULL);
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
"frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
enc->time_base.num, enc->time_base.den);
}
ost->frames_encoded++;
ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
exit_program(1);
}
if (got_packet) {
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
}
if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
pkt.pts = ost->sync_opts;
av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
}
frame_size = pkt.size;
write_frame(s, &pkt, ost);
if (ost->logfile && enc->stats_out) {
fprintf(ost->logfile, "%s", enc->stats_out);
}
}
}
ost->sync_opts++;
ost->frame_number++;
if (vstats_filename && frame_size)
do_video_stats(ost, frame_size);
}
if (!ost->last_frame)
ost->last_frame = av_frame_alloc();
av_frame_unref(ost->last_frame);
if (next_picture)
av_frame_ref(ost->last_frame, next_picture);
}
| 1threat
|
How to multiple all digits of x number? : <p>I'm doing a counting program and i need to multiple all digits of x number by it self.</p>
<p>for example: number 123456789;</p>
<p>1*2*3*4*5*6*7*8*9=362,880</p>
| 0debug
|
how to store last character of the world from the string into an array without using any predefine php function : I have a string like -
$str = "Hello how are you";
and i want to store last character in an array then the result look like below-
array(0=>o,1=>w,2=>e,3=>u)
how It can be achieve without the using of php redefine function
| 0debug
|
How to connect one android activity to other? : <p>I am trying to do something like, when I click on "Button_Set" Button on <strong>Activity 1</strong>, it should set the <strong>Edit Box</strong> of <strong>Activity 2</strong>.
I know the below code gives the Null pointer exception and is expected to not work.</p>
<p>Any Idea how do I do this?</p>
<pre><code>order.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
startActivity(new Intent(Activity1.this,Activity2.class));
setorder.setText("Welcome User");
}
});
</code></pre>
| 0debug
|
ActivityCompat error (Cannot resolve symbol) : <p>In my react-native application, I need to get location permission from the user, so I added this in one of the Activity classes...</p>
<pre><code>public void onCreate() {
ActivityCompat.requestPermissions(this,new String[]{Manifest.permission.ACCESS_FINE_LOCATION}, 1);
}
</code></pre>
<p>However, I get a <code>cannot resolve symbol error</code> for both <code>ActivityCompat</code> and <code>Manifest</code>. </p>
<p>After doing some research I found out you need to add the following to the <code>gradle.build</code> file <code>compile 'com.android.support:support-v4:23.0.0'</code></p>
<p>So I did that...</p>
<pre><code>dependencies {
compile project(':react-native-maps')
compile fileTree(dir: "libs", include: ["*.jar"])
compile "com.android.support:appcompat-v7:23.0.1"
compile "com.facebook.react:react-native:+" // From node_modules
compile 'com.android.support:support-v4:23.0.0'
}
</code></pre>
<p>But I still get the same errors. I tried <code>clean</code> still nothing. I also tried restarting Android Studio <code>File > Invalidate Caches/Restart</code>, still get the errors.</p>
<p>How do I fix these errors?</p>
| 0debug
|
static int MPA_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
const AVFrame *frame, int *got_packet_ptr)
{
MpegAudioContext *s = avctx->priv_data;
const int16_t *samples = (const int16_t *)frame->data[0];
short smr[MPA_MAX_CHANNELS][SBLIMIT];
unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT];
int padding, i, ret;
for(i=0;i<s->nb_channels;i++) {
filter(s, i, samples + i, s->nb_channels);
}
for(i=0;i<s->nb_channels;i++) {
compute_scale_factors(s, s->scale_code[i], s->scale_factors[i],
s->sb_samples[i], s->sblimit);
}
for(i=0;i<s->nb_channels;i++) {
psycho_acoustic_model(s, smr[i]);
}
compute_bit_allocation(s, smr, bit_alloc, &padding);
if ((ret = ff_alloc_packet(avpkt, MPA_MAX_CODED_FRAME_SIZE))) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
return ret;
}
init_put_bits(&s->pb, avpkt->data, avpkt->size);
encode_frame(s, bit_alloc, padding);
if (frame->pts != AV_NOPTS_VALUE)
avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->delay);
avpkt->size = put_bits_count(&s->pb) / 8;
*got_packet_ptr = 1;
return 0;
}
| 1threat
|
Flutter onPressed of a Button on a ListView gets called automatically (when it becomes visible). Is this a bug? : <p>As the title mentions <code>onPressed</code> of a Button on a ListView gets called automatically</p>
<p>I currently have something like this (this is a rough sketch of the code).
Basically whenever there is a button in each row and whenever a button gets displayed on the screen its onClick is called. Not sure if this is a bug in flutter or I am doing something wrong any suggestions ?</p>
<pre><code>class ModelEmployeeRow extends StatelessWidget
{
dynamic getInviteButton(String text, {var lambda,var borderRadius,var height})
{
final skillTextStyle = baseTextStyle.copyWith(
color: Colors.white,//const Color(0xffb6b2df),
fontSize: 11.0,
fontWeight: FontWeight.w200
);
var container = new Container(
alignment: Alignment.center,
margin:EdgeInsets.fromLTRB(0.0,0.0,100.0,0.0),
padding:EdgeInsets.fromLTRB(0.0,5.0,0.0,5.0),
decoration: new BoxDecoration(
borderRadius: new BorderRadius.all(new Radius.circular(4.0)),
color: Colors.green
),
child: new Text(text, style:skillTextStyle),
);
var button = new FlatButton(
onPressed: inviteClicked(employee),
child: container
);
return button;
}
@override
Widget build(BuildContext context)
{
var stacked = new Stack(
children: <Widget>
[
//mainContainer,
getInviteButton("Test"),
employeeThumbnail,
],
);
return new Container(
child: stacked,
);
}
}
</code></pre>
<p>and the list view side is this</p>
<pre><code> var emplyeeListView = new ListView.builder(
itemCount: employeeListShared.length,
padding: new EdgeInsets.symmetric(vertical: 16.0),
itemBuilder: (context, index) {
return new ModelEmployeeRow(employeeListShared[index]);
},
);
</code></pre>
<p>Now when ever a row gets visible the onclick of the button is called.</p>
| 0debug
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.