instruction stringlengths 0 30k β |
|---|
My mnemonic for your reference:
`strptime` = str points to time = str -> time.
`strftime` = str from time = str <- time. |
ok, heres whats wrong with your code
1. after assigning the srcObject of a video element you have to call play
notes:
in the setInterval function you get the image data in your let statement but you havent drawn anything to the canvas yet, although it would be available in the second iteration
you could do with some clarification that after invoking face(4) the `then` function is actually part of the getUserMedia, it confused me for a while
its pprobably the most elaborate way perform a Math.floor I have ever seen
<!-- begin snippet: js hide: false console: true babel: false -->
<!-- language: lang-js -->
'use strict';
var timer;
var stream;
document.querySelector('input[value=stop]').onclick = e=>{
clearInterval(timer);
if(stream)stream.getTracks().forEach(track=>track.stop());
}
try {
const w = window,
d = document,
ng = navigator,
id = e => {
return d.getElementById(e)
},
cn = id('lh'),
i = id('i'),
o = id('o'),
cx = cn.getContext('2d', {
willReadFrequently: true
}),
face = r => ng.mediaDevices.getUserMedia({
video: {width:100,height:100}
}).then(s => {
stream=s;
i.srcObject = s;
i.play();
i.onloadedmetadata = e => {
timer=setInterval(() => {
let c = 0,
k = -4,
h = cn.height = i.videoHeight,
w = cn.width = i.videoWidth,
dt = cx.getImageData(0, 0, w, h),
io = dt.data,
dl = io.length,
R, G, B;
R = G = B = 0;
cx.drawImage(i, 0, 0, w, h);
o.src = cn.toDataURL('image/webp');
while ((k += r*4) < dl) {
++c;
R += io[k];
G += io[k + 1];
B += io[k + 2]
};
['R', 'G', 'B'].forEach(e1 => {
eval(e1 + '=' + `~~(${e1}/c)`)
});
let rgb = `rgb(${R},${G},${B})`;
d.body.style.background = rgb
}, -1)
}
});
face(4)
} catch (e) {
alert(e)
}
<!-- language: lang-css -->
canvas {
border:1px solid lightgray;
}
video {
border:1px solid lightgray;
}
img {
border:1px solid lightgray;
}
input {
font-size:16px;
padding:5px;
}
<!-- language: lang-html -->
<canvas id=lh></canvas>
<video id=i></video>
<img id=o>
<input type=button value=stop>
<!-- end snippet -->
i added a stop feature so it can be turned off with reloading the page
im afraid that it wont actually run in the stackoverflow website, i think they must have webcam access turned off and wont allow video to play
ive created a much neater version for anyone who visits this page at a later date, it needs a canvas element and its derived 2d context
var stream = await navigator.mediaDevices.getUserMedia({video:true});
var video = document.createElement('video');
video.srcObject = stream;
video.play();
(function update(){
ctx.drawImage(video,0,0,canvas.width,canvas.height);
var img = ctx.getImageData(0,0,canvas.width,canvas.height);
var n = img.data.length;
var r = 0;
var g = 0;
var b = 0;
for(var i=0;i<n;i+=4){
r += img.data[i]/n*4;
g += img.data[i+1]/n*4;
b += img.data[i+2]/n*4;
}//for
document.body.style.background = `rgb(${r},${g},${b})`;
if(abort)return;
requestAnimationFrame(update);
})();
i thought this was a nice little project so ive added a working example, i dont think a lot of these sites that allow code generation allow video, anyway heres what all the fuss is about
<!-- begin snippet: js hide: false console: true babel: false -->
<!-- language: lang-js -->
var ctx = canvas.getContext('2d');
var balls = ballsmod(10);
var abort = false;
var framerate = 20;
quit.onclick = e=>abort = true;
(function update(){
ctx.clearRect(0,0,canvas.width,canvas.height);
balls();
var data = ctx.getImageData(0,0,canvas.width,canvas.height);
var n = data.data.length;
var r = 0;
var g = 0;
var b = 0;
var s = 4;
for(var i=0;i<n;i+=4*s){
r += data.data[i]/n*4*s;
g += data.data[i+1]/n*4*s;
b += data.data[i+2]/n*4*s;
}//for
document.body.style.background = `rgb(${r},${g},${b})`;
if(abort)return;
setTimeout(update,1000/framerate);
})();
function ballsmod(num){
var cols = ['blue','green','red','yellow','lightblue','lightgray'];
var balls = [];
for(var i=0;i<num;i++)balls.push(ball());
function update(){
balls.forEach(update_ball);
}//update
function rnd(size,offset){return Math.random()*size+(offset||0)}
function ball(){
var col = cols[parseInt(rnd(cols.length))]
var r = rnd(20,20);
var x = rnd(canvas.width-2*r,r);
var y = rnd(canvas.height-2*r,+r);
var dx = rnd(20,-10);
var dy = rnd(20,-10);
var ball = {x,y,r,col,dx,dy,update};
return ball;
}//ball
function update_ball(ball){
ball.x += ball.dx;
ball.y += ball.dy;
if(ball.x-ball.r<0 || ball.x+ball.r>canvas.width){
ball.dx *= -1;
ball.x += ball.dx;
}
if(ball.y-ball.r<0 || ball.y+ball.r>canvas.height){
ball.dy *= -1
ball.y += ball.dy;
}
ctx.beginPath();
ctx.arc(ball.x,ball.y,ball.r,0,2*Math.PI,false);
ctx.fillStyle = ball.col;
ctx.fill();
}//update
return update;
}//ballsmod
<!-- language: lang-css -->
body {
text-align:center;
}
canvas {
border:1px solid lightgray;
}
input {
font-size:16px;
padding:7px;
margin-left:10px;
vertical-align:top;
}
<!-- language: lang-html -->
<input id=quit type=button value=stop>
<canvas id=canvas></canvas>
<!-- end snippet -->
[1]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Bitwise_NOT
[2]: https://stackoverflow.com/questions/22003491/animating-canvas-to-look-like-tv-noise
|
Since all of you `search_in` functions are the same except for `pee` parameter there is no reason to even use a switch case statement and instead you can just rely on overload resolution to call the correct function for you like:
```
template <typename T>
void analyze(vegetable& place, const std::vector<std::uint8_t>& dee, T& pee, int ole, std::uint16_t cee)
{
using beta::search_in;
using gemma::search_in;
using zeta::search_in;
search_in(dee, pee, ole, cee);
}
```
You can see this working with your example code in this[live example][1].
[1]: https://godbolt.org/z/KbY7v6WPK |
How can i edit the "wake-word-detection notebook" on coursera so it fit my own word? |
|machine-learning|deep-learning|google-colaboratory|recurrent-neural-network|keyword-spotting| |
null |
I removed this extension called "stylelint-plus" from vs code and this solved my issue.
If this helped you solving the issue vote up.. :)
[1]: https://i.stack.imgur.com/yPqPZ.png |
My question is similar to the question found in https://stackoverflow.com/questions/75783581/define-typescript-interface-with-using-constant-names-as-keys but is a little bit different.
I start off with the same type of object whose keys are constants
```js
const KEYS = {
KEY1: 'hello',
KEY2: 'world'
} as const;
```
But instead of strings, I have objects as values. So in my code I want to know what *type* of object is in that key. (All of the object types are the same). So for example, I really want something like:
```js
const KEYS : {const: string} {
KEY1: 'hello',
KEY2: 'world'
} as const;
```
Is there some way to do this? I keep getting error ts2353 because the const doesn't appear properly. If possible, I would like a 1 line solution (aka, what do I need to replace `const: string` with to make this work) because I have almost 200 keys, so I'd prefer not rewriting each const individually as a type.
**Edit:**
I don't understand why are people are downvoting. I'm new to typescript, so if I'm asking the question wrong, please let me know instead of just downvoting because that's not helpful.
According to jcalz from below, it sounds like I want something like an "index signature"? I tried a few different types and nothing seems to work so I'm not sure what I'm doing wrong. |
Use : href='../../project/css/style.css' |
{"Voters":[{"Id":19369453,"DisplayName":"Piranavan R"}],"DeleteType":1} |
<html>
<title>
test
</title>
<head>
<style>
.header {
position: absolute;
font-family: 'Lucida Sans', 'Lucida Sans Regular', 'Lucida Grande', 'Lucida Sans Unicode', Geneva, Verdana, sans-serif;
color:bisque;
top:100px;
left:300px;
}
body {
position: absolute;
background-image: url(pexels2.jpg);
height: 50%;
width: 50%;
background-size: cover;
}
.container1 {
position: absolute;
height: 330px;
width: 330px;
left: 100px;
top: 250px;
background-size: contain;
transition: 2s;
}
.container1:hover {
transform: scaleY(-20px);
}
</style>
</head>
<body>
<div class="container1">
<img src="pexels-pick2.jpg" width="330px" height="330px">
</div>
</body>
</html>
i think the box should go up when i hover on it but it doesnt |
CSS :hover effect is not working in my code |
This does not provide any information on why my first attempt was not working, but I randomly tried some things, one of them being doing both the bundle upload and the release creation in one edit, and that worked Β―\_(γ)_/Β―. Here is the adjusted code for anyone encountering the same issue:
```
import os
from googleapiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
# Get the app ID from the environment.
app_id = "com.mycompany.myapp.app"
print(f"App ID set to {app_id}.")
# Get the APK file path from the environment.
aab_file_path = "/output/com.mycompany.myapp.app-Signed.aab"
print(f"Path to bundle set to {aab_file_path}.")
# Get the service account JSON file path from the environment.
service_account_file_path = "/service-account-key.json"
print(f"Path to service account key set to {service_account_file_path}.")
# Create the service account credentials object.
credentials = ServiceAccountCredentials.from_json_keyfile_name(
service_account_file_path, ["https://www.googleapis.com/auth/androidpublisher"]
)
# Create the API client object.
service = build("androidpublisher", "v3", credentials=credentials)
# Get an edit ID.
print(f"Creating an edit for bundle upload...")
request = service.edits().insert(packageName=app_id)
response = request.execute()
editId = response['id']
print(f"Edit ID for bundle upload is {editId}.")
with open(aab_file_path, "rb") as f:
request = service.edits().bundles().upload(
packageName=app_id,
editId=editId,
media_body=aab_file_path,
media_mime_type="application/octet-stream",
)
print(f"Creating edit for bundle upload...")
response = request.execute()
versionCode = response.get("versionCode")
print(f"Bundle version code is {versionCode}.")
release = {
"versionCodes": [
versionCode
],
"releaseNotes": [
{
"language": "en_GB",
"text": "Minor Improvements"
}
],
"status": "draft",
}
track = {
"track": "internal",
"releases": [
release
]
}
request = service.edits().tracks().update(packageName=app_id, editId=editId, track="internal", body=track)
response = request.execute()
request = service.edits().commit(packageName=app_id, editId=editId)
response = request.execute()
``` |
{"Voters":[{"Id":1745001,"DisplayName":"Ed Morton"}]} |
You'd need to type-erase the deferred async operations. Or write out the entire composed operation so the internal type discrepancies don't leak out to `co_composed`.
Both aren't without overhead, so perhaps you might flip the visitation inside out so the composed operation is never variant to begin with:
template <typename Connection, typename CompletionToken> auto non_awaitable_func_impl(Connection& con, CompletionToken&& token) {
return asio::async_initiate<CompletionToken, Sig>(
asio::experimental::co_composed<Sig>([](auto state, Connection& con) -> void {
auto [ec] = co_await con.send(as_tuple(asio::deferred));
co_yield state.complete(ec);
}),
token, con);
}
template <typename CompletionToken> auto non_awaitable_func(connection& con, CompletionToken&& token) {
std::visit(
[&token](auto& con) { return non_awaitable_func_impl(con, std::forward<CompletionToken>(token)); },
con);
}
This works. You can combine the two if you don't mind readability:
template <typename CompletionToken> auto non_awaitable_func(connection& con, CompletionToken&& token) {
return std::visit(
[&token](auto& con) {
return asio::async_initiate<CompletionToken, Sig>(
asio::experimental::co_composed<Sig>([&con](auto state) -> void {
auto [ec] = co_await con.send(as_tuple(asio::deferred));
co_yield state.complete(ec);
}),
token);
},
con);
}
See it **[Live On Coliru](https://coliru.stacked-crooked.com/a/12346f9a8a2cc25f)** (or [Godbolt](https://godbolt.org/z/Tx5vvcM63))
```c++
#pragma GCC diagnostic ignored "-Wmismatched-new-delete"
#include <chrono>
#include <iostream>
#include <boost/asio.hpp>
#include <boost/asio/experimental/co_composed.hpp>
using namespace std::chrono_literals;
namespace asio = boost::asio;
using error_code = boost::system::error_code;
using Sig = void(error_code);
struct tcp {
template <asio::completion_token_for<Sig> Token> auto send(Token&& token) {
// pseudo implementation
auto tim = std::make_unique<asio::steady_timer>(exe_, 1s);
return tim->async_wait(consign(std::forward<Token>(token), std::move(tim)));
}
asio::any_io_executor exe_;
};
struct tls {
template <asio::completion_token_for<Sig> Token> auto send(Token&& token) {
return dispatch( // pseudo implementation
append(std::forward<Token>(token), make_error_code(boost::system::errc::bad_message)));
}
asio::any_io_executor exe_;
};
using connection = std::variant<tcp, tls>;
template <asio::completion_token_for<Sig> Token> auto non_awaitable_func(connection& con, Token&& token) {
return std::visit(
[&token](auto& con) {
return asio::async_initiate<Token, Sig>(
asio::experimental::co_composed<Sig>([&con](auto state) -> void {
auto [ec] = co_await con.send(as_tuple(asio::deferred));
co_return state.complete(ec);
}),
token);
},
con);
}
int main() {
asio::io_context ioc;
connection
con1 = tls{ioc.get_executor()},
con2 = tcp{ioc.get_executor()};
non_awaitable_func(con1, [&](error_code ec) { std::cout << "cb1:" << ec.message() << std::endl; });
non_awaitable_func(con2, [&](error_code ec) { std::cout << "cb2:" << ec.message() << std::endl; });
ioc.run();
}
```
Note that it is important to let ADL find the correct `make_error_code` overload.
Prints:
cb1:Bad message
cb2:Success
## UPDATE: Promise!
I had a brainwave. Another experimental type, `asio::experimental::promise<>` which, like `std::promise`, apparently does some type erasure internally, yet, unlike `std::future` also can be `await`-transformed in Asio coroutines.
And indeed it works:
template <asio::completion_token_for<Sig> Token> //
auto async_send(connection& con, Token&& token) {
return asio::async_initiate<Token, Sig>(
boost::asio::experimental::co_composed<Sig>([&con](auto /*state*/) -> void {
auto [ec] = co_await std::visit(
[](auto& c) { return c.send(asio::as_tuple(asio::experimental::use_promise)); }, con);
co_return {ec};
}),
token);
}
Here's a way more complete test program:
**[Live On Coliru](https://coliru.stacked-crooked.com/a/7d0be2c3d4ba00d1)** or [Godbolt](https://godbolt.org/z/ofo189Kxz)
```c++
#pragma GCC diagnostic ignored "-Wmismatched-new-delete"
#include <boost/asio.hpp>
#include <boost/asio/experimental/co_composed.hpp>
#include <boost/asio/experimental/promise.hpp>
#include <boost/asio/experimental/use_coro.hpp>
#include <boost/asio/experimental/use_promise.hpp>
#include <boost/core/demangle.hpp>
#include <chrono>
#include <iostream>
#include <syncstream>
using namespace std::chrono_literals;
namespace asio = boost::asio;
using error_code = boost::system::error_code;
using Sig = void(error_code);
static inline auto out() { return std::osyncstream(std::clog); }
struct tcp {
template <asio::completion_token_for<Sig> Token> //
auto send(Token&& token) {
// pseudo implementation
auto tim = std::make_unique<asio::steady_timer>(exe_, 1s);
return tim->async_wait(consign(std::forward<Token>(token), std::move(tim)));
}
asio::any_io_executor exe_;
};
struct tls {
template <asio::completion_token_for<Sig> Token> //
auto send(Token&& token) {
return dispatch( // pseudo implementation
append(std::forward<Token>(token), make_error_code(boost::system::errc::bad_message)));
}
asio::any_io_executor exe_;
};
using connection = std::variant<tcp, tls>;
template <asio::completion_token_for<Sig> Token> //
auto async_send(connection& con, Token&& token) {
return asio::async_initiate<Token, Sig>(
boost::asio::experimental::co_composed<Sig>([&con](auto /*state*/) -> void {
auto [ec] = co_await std::visit(
[](auto& c) { return c.send(asio::as_tuple(asio::experimental::use_promise)); }, con);
co_return {ec};
}),
token);
}
template <class V> // HT: https://stackoverflow.com/a/53697591/85371
std::type_info const& var_type(V const& v) {
return std::visit([](auto&& x) -> decltype(auto) { return typeid(x); }, v);
}
int main() {
asio::thread_pool ioc(1);
connection
con1 = tls{ioc.get_executor()},
con2 = tcp{ioc.get_executor()};
{ // callback
async_send(con1, [&](error_code ec) { out() << "cb1:" << ec.message() << std::endl; });
async_send(con2, [&](error_code ec) { out() << "cb2:" << ec.message() << std::endl; });
}
{ // use_future
auto f1 = async_send(con1, as_tuple(asio::use_future));
auto f2 = async_send(con2, as_tuple(asio::use_future));
out() << "f1: " << std::get<0>(f1.get()).message() << std::endl;
out() << "f2: " << std::get<0>(f2.get()).message() << std::endl;
try {
async_send(con1, asio::use_future).get();
} catch (boost::system::system_error const& se) {
out() << "alternatively: " << se.code().message() << std::endl;
}
}
{ // use_awaitable
for (connection& con : {std::ref(con1), std::ref(con2)}) {
auto name = "coro-" + boost::core::demangle(var_type(con).name());
co_spawn(
ioc,
[&con, name]() -> asio::awaitable<void> {
auto [ec_defer] = co_await async_send(con, as_tuple(asio::deferred));
auto [ec_aw] = co_await async_send(con, as_tuple(asio::use_awaitable));
out() << name << ": " << ec_defer.message() << "/" << ec_aw.message() << std::endl;
co_await async_send(con, asio::deferred); // will throw
},
[name](std::exception_ptr e) {
try {
if (e)
std::rethrow_exception(e);
} catch (boost::system::system_error const& se) {
out() << name << " threw " << se.code().message() << std::endl;
}
});
}
}
ioc.join();
}
```
Printing e.g.
cb1:Bad message
f1: Bad message
cb2:Success
f2: Success
alternatively: Bad message
coro-tls: Bad message/Bad message
coro-tls threw Bad message
coro-tcp: Success/Success |
I doubt your program is "crashing" at that particular line of code, because you have exception handling there, which would catch any error thrown in that method.
The only notable difference between the old and the new variant, is that the new one is asynchronous. And somewhere in the callstack that leads to this particular method to be called, you probably don't properly `await` the result of the asynchronous method. Ie you have something like this
public static void Main(string[] args) {
...
methodA();
}
void methodA() {
...
methodB();
}
async Task methodB() {
...
await methodC();
}
async Task<...> methodC() {
try {
HttpResponseMessage responseMessage = await client.GetAsync(requestUrl);
...
return ...
} catch (Exception ex) {
...
}
}
So when you now call `methodA()` from a synchronous context, it calls `methodB()` without awaiting its result. That's perfectly valid (eventhough it will generate a warning in Visual Studio), but at this point you lose your asynchronous context. This means, that `methodA()` will finish *before* `methodB()` and this will bubble up the callstack to the `Main()` method, which will also reach its end before `methodB()` is finished.
And when the `Main` method ends, the program terminates. Regardless of whether there are still asynchronous operations pending or not.
This probably happend when you refactored `JiraRequest(string api)` from being synchronous to asynchonous.
You can try to attach a debugger to your program and setting breakpoints
1. directly before `await client.GetAsync(requestUrl)`
2. directly after `await client.GetAsync(requestUrl)`
3. at the very last statement of your `Main` method
I assume your code will hit breakpoints 1 and 3, but not 2. If that's the case, your program isn't crashing but terminating normally, because the callstack is empty.
To resolve that issue, make sure
* all methods in that callstack are `async`
* all async methods are properly `await`ed
|
null |
I'm attempting to display a list of movies on a website using Jinja2 (a template engine for Python) and Bootstrap (a front-end framework). However, I'm having difficulty getting the movie cards to display correctly.When trying to display the movie cards using Jinja2 and Bootstrap, the cards aren't being displayed as expected. I'm facing difficulties in correctly displaying the background image of the card, as well as ensuring that the movie information is displayed clearly and organized.
```
<!--{% extends 'base.html' %}
{% block conteudo %}
<h2 style="text-align: center;">Teste de filmes</h2>
<hr>
<ul class="list-group">
{% for filme in filmes %}
<li>{{filme.title}}</li>
<p>{{ filme.overview }}</p>
<p>Release Date: {{ filme.release_date }}</p>
<p>Vote Average: {{ filme.vote_average }}</p>
<p>Vote Count: {{ filme.vote_count }}</p>
<hr>
{% endfor %}
</ul>
{% endblock conteudo %}-->
{% extends 'base.html' %}
{% block conteudo %}
<h2 style="text-align:center;">Lista de Filmes</h2>
<hr>
<div class="row">
{% for filme in filmes %}
<div class="col-md-4">
<div class="card" style="width: 18rem;">
<img src="http://image.tmdb.org/t/p/w500{{filme.backdrop_path}}" class="card-img-top" alt="...">
<div class="card-body">
<h5 class="card-title">{{filme.title}}</h5>
<p class="card-text">{{filme.overview}}</p>
<hr>
<h4>Nota mΓ©dia<span class="badge bg-secondary">{{filme.vote_average}}</span></h4>
</div>
</div>
</div>
{% if loop.index % 3 == 0 %}
</div><div class="row">
{% endif %}
{% endfor %}
</div>
{% endblock %}
```
Checking if the URL of the movie's background image is correct and accessible.
Ensuring that all Bootstrap classes are being applied correctly.
Verifying that the movies variable is being passed correctly to the template.
Any help or suggestions would be greatly appreciated! Thank you!
[click to see project image][1]
[1]: https://i.stack.imgur.com/1rPAV.png |
Instead of an array you can use a collection type like **List<BClass>**. This way, you can dynamically add BClass instances at runtime without worrying about the initial size of the array. This is how to do it:
```
using System.Collections.Generic;
public class ClassName
{
public int titleNum { get; set; }
public string author { get; set; }
public List<BClass> Titles { get; set; }
public ClassName()
{
Titles = new List<BClass>();
}
}
```
Here is how to add items to the collection:
```
ClassName a = new ClassName();
a.titleNum = 1;
a.author = "Author Name";
a.Titles.Add(new BClass { titleNum = 1, Title = "Title 1" });
a.Titles.Add(new BClass { titleNum = 2, Title = "Title 2" });
``` |
Can someone explain me why I get this error
> INSERT statement conflicted with the FOREIGN KEY constraint "FK_ArticleTag_Tags_ArticleId". The conflict occurred in database "Blog", table "dbo.Tags", column 'TagId'"
```
public class Article
{
public Article()
{
Comments = new HashSet<Comment>();
Tags = new HashSet<Tag>();
}
[Key]
public int ArticleId { get; set; }
public int? CategoryId { get; set; }
[StringLength(30)]
public string ArticleName { get; set; } = null!;
public string? ArticleDescription { get; set; }
public bool Visibility { get; set; }
[ForeignKey("CategoryId")]
[InverseProperty("Articles")]
public virtual Category Category { get; set; }
[InverseProperty("Article")]
public virtual ICollection<Comment> Comments { get; set; }
[ForeignKey("TagId")]
[InverseProperty("Articles")]
public virtual ICollection<Tag> Tags { get; set; }
}
public class Tag
{
public Tag()
{
Articles = new HashSet<Article>();
}
[Key]
public int TagId { get; set; }
[Required]
[StringLength(50)]
public string Title { get; set; }
[ForeignKey("ArticleId")]
[InverseProperty("Tags")]
public virtual ICollection<Article>? Articles { get; set; }
}
```
After migration, with 50 articles and 20 tags, I cannot add a new row to (autogenerated) `ArticleTag` table where `ArticleId` is greater than 20.
I have no idea what this is about, can someone explain to me what I'm doing wrong? |
Hello Stack Overflow community,
I am encountering a peculiar issue with my PyTorch model where the presence of an initialized but unused FeedForward Network (FFN) affects the model's accuracy. Specifically, when the FFN is initialized in my CRS_A class but not used in the forward pass, my model's accuracy is higher compared to when I completely remove (or comment out) the FFN initialization. The FFN is defined as follows in my model's constructor:
class CRS_A(nn.Module):
def __init__(self, modal_x, modal_y, hid_dim=128, d_ff=512, dropout_rate=0.1):
super(CRS_A, self).__init__()
self.cross_attention = CrossAttention(modal_y, modal_x, hid_dim)
self.ffn = nn.Sequential(
nn.Conv1d(modal_x, d_ff, kernel_size=1),
nn.GELU(),
nn.Dropout(dropout_rate),
nn.Conv1d(d_ff, 128, kernel_size=1),
nn.Dropout(dropout_rate),
)
self.norm = nn.LayerNorm(modal_x)
self.linear1 = nn.Conv1d(1024, 512, kernel_size=1)
self.linear2 = nn.Conv1d(512, 300, kernel_size=1)
self.dropout1 = nn.Dropout(0.1)
self.dropout2 = nn.Dropout(0.1)
def forward(self, x, y, adj):
x = x + self.cross_attention(y, x, adj) #torch.Size([5, 67, 1024])
x = self.norm(x).permute(0, 2, 1)
x = self.dropout1(F.gelu(self.linear1(x))) #torch.Size([5, 512, 67])
x_e = self.dropout2(F.gelu(self.linear2(x))) #torch.Size([5, 300, 67])
return x_e, x
As you can see, the self.ffn is not used in the forward pass. Despite this, removing or commenting out the FFN's initialization leads to a noticeable drop in accuracy.
Could this be due to some form of implicit regularization, or is there another explanation for this behavior? Has anyone encountered a similar situation, and how did you address it? Any insights or explanations would be greatly appreciated. |
There are two ways to filter documents that match a kNN query [pre-filters and post-filters][1]: You can choose the one that suits your use case.
1. pre-filtering β filter is applied during the approximate kNN search to ensure that `k` matching documents are returned.
2. post-filtering β filter is applied after the approximate kNN search completes, which results in fewer than `k` results, even when there are enough matching documents.
Yes, the first approach can be slow and the second approach can has low recall. You can increase the speed by decreasing the `num_candidates` value and you can tune the search relevancy by increasing the `num_candidates`. It's a trade-off between the search speed and relevancy.
### Pre-filtering
Pre-filtering is supported through the filter parameter **inside** of the kNN query. It creates a logical `OR` condition between `lexical search` and `kNN search`.
### Post-filtering
Post-filtering is supported through the filter parameter **outside** of the kNN query. It creates a logical `AND` condition between `lexical search` and `kNN search`.
---
> Continue to read for more details and an example.
separated kNN search and lexical search results
#match query
#1 hits.
GET collection-with-embeddings/_search
{
"query": {
"match": {
"text": "GLOSSARY"
}
}
}
#kNN query
#908 hits
GET collection-with-embeddings/_search
{
"knn": {
"field": "text_embedding.predicted_value",
"query_vector_builder": {
"text_embedding": {
"model_id": "sentence-transformers__msmarco-minilm-l-12-v3",
"model_text": "how do I cook my pasta?"
}
},
"k": 1000,
"num_candidates": 1000
},
"_source": [
"id",
"text"
]
}
\*Β post-filtering (lexical filter outside of the kNN)
#hits: 908
GET collection-with-embeddings/_search
{
"query": {
"term": {
"text": "GLOSSARY"
}
},
"knn": {
"field": "text_embedding.predicted_value",
"query_vector_builder": {
"text_embedding": {
"model_id": "sentence-transformers__msmarco-minilm-l-12-v3",
"model_text": "how do I cook my pasta?"
}
},
"k": 1000,
"num_candidates": 1000
},
"_source": [
"id",
"text"
]
}
> The way vector and lexical matches are combined is through a
> disjunction (i.e., a logical OR condition) where the score of each
> document is computed using Convex Combination, i.e. the weighted sum
> of its vector and lexical scores, as we saw earlier.
> https://opster.com/guides/elasticsearch/operations/elasticsearch-hybrid-search/#Hybrid-search-with-dense-models
\* pre-filtering (lexical filter inside of the kNN)
#hits 1
GET collection-with-embeddings/_search
{
"knn": {
"field": "text_embedding.predicted_value",
"query_vector_builder": {
"text_embedding": {
"model_id": "sentence-transformers__msmarco-minilm-l-12-v3",
"model_text": "how do I cook my pasta?"
}
},
"filter": {
"match": {
"text": "GLOSSARY"
}
},
"k": 1000,
"num_candidates": 1000
},
"_source": [
"id",
"text"
]
}
[1]: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-knn-query.html#knn-query-filtering |
The easiest change is to simply listen for the `input` (or `change` if you prefer) event, and then toggle a class on the `<body>` element based on the checked/unchecked state of the `<input>` as below, with explanatory comments in the code:
<!-- begin snippet: js hide: false console: true babel: false -->
<!-- language: lang-js -->
// here we use document.querySelector() to get the first element on the page that
// matches the supplied CSS selector; note that if you have multiple matching
// elements you'll need to use document.querySelectorAll(), and then use a loop of
// some kind to bind an event to each element (also this should also have been stated
// in your question):
document.querySelector('.toggle__check')
// we then use EventTarget.addEventListener() to bind the anonymous Arrow function
// as the event-handler of the 'input' event fired on the element:
.addEventListener('input',
// here we're using an Arrow function, as we have no requirement to use "this",
// and we pass in a reference to the Event Object (here called "evt", but that's
// a user-defined name, you can call it what you prefer) passed from the
// EventTarget.addEventListener() method:
// within the function body:
// we find the <body> element (via document.body):
(evt) => document.body
// we utilise the Element.classList API:
.classList
// calling the toggle() method, and supply a class-name to add, or remove,
// if the evaluation is true (or truthy) the class-name is added, and if
// false (or falsey) it's removed.
// within the assessment we retrieve the Event.currentTarget property-value
// which returns the element to which the function was bound (the <input>)
// and the 'checked' property of that Element returns a Boolean value,
// true if it's checked and false if it's not checked:
.toggle('alt-color', true === evt.currentTarget.checked));
<!-- language: lang-css -->
:root {
/*========== Colors ==========*/
/*Color mode HSL(hue, saturation, lightness)*/
/* purely as an aside, I've changed your color syntax to use
CSS Colors Level 4 syntax, in which the original
"hsl(234, 12%, 35%) can be written as a space-separated
list of values hsl(<hue> <saturation> <lightness>), and may
include opacity: hsl(<hue> <saturation> <lightness> / <alpha>)
without having to specify hsla(); while I've deliberately
added the 'deg' unit to the initial value, that's implied if
the unit is absent and not necessary to specify; but it is
a personal preference of my own. */
--line-color: hsl(234deg 12% 35%);
--active-color: hsl(234deg 100% 98%);
--inactive-color: hsl(234deg 20% 68%);
--body-color: hsl(189deg 49% 87%);
--background-colour: hsl(189deg 84% 14%);
}
* {
box-sizing: border-box;
}
body {
height: 100vh;
margin: 0;
display: grid;
place-items: center;
background-color: var(--body-color);
/* here we add a transition, in order that the background-color
transitions between different colour-values: */
transition: background-color 300ms linear;
}
/* here we specify a different background-color for the
<body> element if it has the class-name of 'alt-color': */
body.alt-color {
/* as an aside, I strongly suggest that you use the same
spelling of "colour" throughout your CSS; it's up to
you how you spell it (color, colour, or even kolour)
would all be perfectly valid but switching between
spellings (above you have '--body-color' and here you
have '--background-colour') increases the likelihood
of unexpected errors/failures: */
background-color: var(--background-colour);
}
.toggle__content {
display: grid;
row-gap: 1.5rem;
}
.toggle__label {
cursor: pointer;
padding-block: .5rem;
}
.toggle__check {
display: none;
}
.toggle__rail {
position: relative;
width: 52px;
height: 4px;
background-color: var(--line-color);
border-radius: 2rem;
}
.toggle__circle {
display: block;
width: 24px;
height: 24px;
background-color: var(--body-color);
box-shadow: inset 0 0 0 4px var(--inactive-color);
border-radius: 50%;
position: absolute;
left: 0;
top: 0;
bottom: 0;
margin: auto 0;
transition: transform .4s, box-shadow .4s;
z-index: 2;
}
.toggle__border {
position: absolute;
width: 32px;
height: 32px;
background-color: var(--body-color);
border-radius: 50%;
left: -4px;
top: 0;
bottom: 0;
margin: auto 0;
transition: transform .4s;
}
/* Toggle animation effects */
.toggle__check:checked~.toggle__rail .toggle__circle {
transform: translateX(28px);
box-shadow: inset 0 0 0 12px var(--active-color);
}
.toggle__check:checked~.toggle__rail .toggle__border {
transform: translateX(28px);
}
<!-- language: lang-html -->
<div class="toggle__content">
<label class="toggle__label">
<input type="checkbox" class="toggle__check">
<div class="toggle__rail">
<span class="toggle__circle"></span>
<span class="toggle__border"></span>
</div>
</label>
</div>
<!-- end snippet -->
|
I would like to set one culling mask per eye using the XRCamera3D (or alternative) in Godot. |
How do you set a seperate culling mask per eye using XRCamera3D in Godot |
|godot|virtual-reality|openxr| |
Not much to add to the title :
I have a time serie with values every 5 min. I want to upsample it to have values every 10 seconds (the last known value is repeated) so that I can combine these values with values from an other time serie providing values every 10s.
How do I do that please ? is that even possible ?
Would it be easier with PromQL in Prometheus ? |
How to upsample a time serie using Flux (or InfluxQL if easier)? |
|prometheus|influxdb|promql|influxql|flux-influxdb| |
Ive been using dynamic imports with ssr set to false as a workaround for several shadcn components. I also think modals should be dynamic in general.
Eg:
const Modal = dynamic(()=>import("./pathToFile/Modal"), {ssr: false, loading: ()=> <AnyPlaceHolder />}) |
[enter image description here](https://i.stack.imgur.com/rWmbM.png)Why after installing node.js, an error like that occurs after I run the code of various javascript commands, it seems it's not because the code is an error but because node.js doesn't want to connect to the server.
[enter image description here](https://i.stack.imgur.com/PPc9i.png)When I tried to check whether node.js was connected to localhost or not, it turned out it wasn't connected, so how do I get node.js to connect to 127.0.0.1:300? |
ReferenceError: document is not defined(javascript) |
|javascript|node.js|installation|localhost|visual-studio-code-server| |
null |
|java|javabeans|method-reference|propertychangelistener| |
I am having issues with my SQL statement below. The NOT LIKE and OR statement is producing FALSE statements, in that it is producing the output when it should not, since I am using a NOT LIKE. When I put the statement as a stand-alone (without the OR condition), it works as intended.
For example, I am still seeing 'automation' in my ld.lead_name column.
Any help would be greatly appreciated! I can't figure out why this is not working...
SQL Statement
SELECT
ld.status,
ld.lead_name
FROM
DATAWAREHOUSE.SFDC_STAGING.SFDC_LEAD AS ld
WHERE
ld.status <> 'Open'
AND (
ld.lead_name NOT LIKE '%test%'
OR ld.lead_name NOT LIKE '%t3st%'
OR ld.lead_name NOT LIKE '%auto%'
OR ld.lead_name NOT LIKE '%autoXmation%'
OR ld.lead_name NOT LIKE 'automation%'
)
;
|
SQL in Snowflake - using NOT LIKE and OR statement together producing FALSE output |
|sql|snowflake-cloud-data-platform| |
Entity Framework Core 8 dbcontext - can't some rows in many to many relationship |
|entity-framework-core| |
I have a HTTP server and now want to handle POST requests with multiple content types and unknown values. Specifically I want to be able to get the values from the POST request body, both if it's a raw JSON (`application/json`) and also if it is sent from a form (`application/x-www-form-urlencoded` or even `multipart/form-data`). Also the values are not known in advance, so I can't use a predefined struct to save the data into.
Is there some way to handle both cases simultaneously?
I know that for form values, you use `r.ParseForm()` (or `r.Form` and `r.PostForm`) to get the data and for JSON you would read `r.Body` where `r` is the request object. So if I want to handle both form and raw JSON, do I have to check for the content type and then use the appropriate methods or is there some better way? |
Handling both JSON and form values in POST request body with unknown values in Golang |
|json|go|http|request|http-post| |
I have a MERN stack application which I deployed to Render (server side as web service and front end as static).
In my application, I have a profile where users can upload avatar image. I store an image as a URL string in my MongoDB [Atlas_][1] in User model.
Images are saved on the server in uploads folder. The image upload works fine and image fetches successfully, from both deployment and local environment. But after a while I start to get Internal server error when fetching same image.
What is the reason and how to solve this issue? Or is there a better way of storing images in MongoDB Atlas without using cloud or GridFs?
Here's how I set user avatar
```
const setUserAvatar = async (req, res) => {
try {
if (!req.user || !req.user.id) {
return res.status(401).json({ message: 'Unauthorized' });
}
const user = await User.findById(req.user.id);
if (!user) {
return res.status(404).json({ message: 'User not found' });
}
if (!req.file) {
return res.status(400).json({ message: 'No file uploaded' });
}
const fileName = `avatar_${user.id}_${Date.now()}.png`;
const filePath = path.join(__dirname, '..', 'uploads', fileName);
fs.writeFileSync(filePath, req.file.buffer);
user.avatar = { data: fileName };
await user.save();
res.status(200).json({ message: 'Avatar uploaded successfully' });
} catch (error) {
console.error('Error uploading avatar', error);
res.status(500).json({ message: 'Internal Server Error' });
}
};
```
And get:
```
const getUserAvatar = async (req, res) => {
const userId = req.user.id;
try {
const user = await User.findById(userId);
if (user.avatar.data === null) {
return res.status(404).json({ message: 'No avatar found' })
}
if (!user || !user.avatar.data) {
res.send(user.avatar.data)
} else {
const filePath = path.join(__dirname, '..', 'uploads', user.avatar.data);
const avatarData = fs.readFileSync(filePath);
res.setHeader('Content-Type', 'image/*');
res.send(avatarData);
}
} catch (error) {
console.error('Error fetching avatar', error);
res.status(500).json({ message: 'Internal Server Error' });
}
};
```
How I handle avatar change on front end:
```
const handleAvatarChange = async (e) => {
const token = localStorage.getItem('token');
if (token) {
try {
const formData = new FormData();
formData.append('avatar', e.target.files[0])
const response = await fetch('RENDERLINK/avatar', {
'method': 'POST',
'headers': {
'Authorization': `Bearer ${token}`,
},
'body': formData,
'credentials': 'include'
})
if (response.status === 200) {
getUser()
toast.success('Avatar changed successfully')
} else if (response.status === 400) {
toast.error('No file selected')
} else {
toast.error('Error changing avatar')
}
} catch (error) {
toast.error('Oops! Something went wrong. Please try again later.')
}
} else {
console.log('No token found')
navigate('/login')
}
}
```
Fetching user data including avatar:
```
const getUser = async () => {
const token = localStorage.getItem('token');
if (token) {
try {
const response = await fetch('RENDERLINK/profile', {
'method': 'POST',
'headers': {
'Content-Type': 'application/json',
'Authorization': `Bearer ${token}`,
},
'credentials': 'include'
})
if (response.ok) {
const data = await response.json();
setUser({ username: data.username, email: data.email, avatar: data.avatar.data })
if (data.avatar.data !== null) {
// Fetch and set the avatar image
const avatarResponse = await fetch('RENDERLINK/get_avatar', {
headers: {
'Authorization': `Bearer ${token}`,
},
credentials: 'include',
});
const avatarBlob = await avatarResponse.blob();
const avatarUrl = URL.createObjectURL(avatarBlob);
setUser((prevUser) => ({ ...prevUser, avatar: avatarUrl }));
setLoading(false)
}
} else if (response.status === 500) {
toast.error('Oops! Something went wrong. Please try again later.')
}
} catch (error) {
toast.error('Oops! Something went wrong. Please try again later.')
}
} else {
console.log('No token found')
navigate('/login')
}
}
```
[1]: https://en.wikipedia.org/wiki/MongoDB#MongoDB_Atlas
|
I'm trying to set `AccessKeys` in `appsettings.Development.json` to point to my OneDrive folder. Since some team members have their OneDrive at different folders, I was trying to use Windows environment variables like this:
"AppSettings": {
"AccessKeys": "%OneDrive%\\project123\\keys\\",
}
However, the above attempt is not working since the file is not being found.
internal XDocument ReturnFileContent(string filename)
{
string documentPath = _configuration["AppSettings:AccessKeys"];
string xmlFilePath = Path.Combine(documentPath, filename);
var aux = File.Exists(xmlFilePath); <- always false
//...
} |
Unfortunately, without using hacks or installing external fonts, the options are limited. Windows 10's native font rendering system may not fully support color fonts like Noto Color Emoji, leading to rendering issues.
One potential workaround could be to use Unicode characters for the flags instead of relying on specific fonts. However, this approach may not provide the same visual fidelity as using emoji flags.
Another option could be to explore whether the application or platform you're using supports custom emoji rendering methods that might work better on Windows 10.
Ultimately, until Windows improves its support for color fonts or provides better compatibility with fonts like Noto Color Emoji, achieving consistent and reliable rendering of emoji flags might be challenging without resorting to external fonts or workarounds. |
i found a way, using:
- integer pixel coordinates which is much more intuitive and convenient at least for what i'm doing here
- and considering the "pixel size"
|
For my future self who lands here, a better solution is to use parse_obj of pydantic v2. You can use MyModel.parse_obj(my_dict) to generate a model from a dictionary. ([documentation][1])
**Example**:
from pydantic import BaseModel, parse_obj_as
from typing import List
class TimeSlot(BaseModel):
from_: str
to: str
class DayActiveHours(BaseModel):
days: List[str]
time_slots: List[TimeSlot]
# Example nested dictionary
nested_data = {
"days": ["Monday", "Tuesday"],
"time_slots": [
{"from_": "08:00", "to": "12:00"},
{"from_": "13:00", "to": "17:00"}
]
}
# Parse the nested dictionary into Pydantic models
active_hours = DayActiveHours.parse_obj(nested_data)
# Now, you can access the parsed values using the Pydantic model attributes
print(active_hours.days) # Output: ["Monday", "Tuesday"]
print(active_hours.time_slots) # Output: [TimeSlot(from_='08:00', to='12:00'), TimeSlot(from_='13:00', to='17:00')]
Refer the solution here (pydantic v2) : [https://stackoverflow.com/questions/55762673/how-to-parse-list-of-models-with-pydantic][2]
[1]: https://pydantic-docs.helpmanual.io/usage/models/#helper-functions
[2]: https://stackoverflow.com/questions/55762673/how-to-parse-list-of-models-with-pydantic |
I'm trying to implement the AndroidPDFViewer in a JeckPack Compouse Android app. I get this error "Received status code 401 from server: Unauthorized". I already added to the settings.gradle.kts file " maven { url = uri("https://jitpack.io") }" and in the build.gradle.kts file is the implementation " implementation("com.github.barteksc: android-pdf-viewer:1.6.0")
"
I'm trying to get the PDFViewer implemented, it doesn't give me an error. |
android-pdf-viewer Received status code 401 from server: Unauthorized |
|android|android-jetpack-compose|pdf-viewer| |
null |
## `NavigableSet`
The [`NavigableSet`][1], and its predecessor, `SortedSet`, define a contract for a distinct (no duplicates allowed) collection of objects maintained in a certain order.
The [`TreeSet`][2] class bundled with Java implements this interface. For concurrency, use [`ConcurrentSkipListSet`][3].
You said:
> 'Appointment' objects which have the following attributes: 'patient' 'date' 'type'
record Appointment ( String patient , LocalDate date , String type ) {}
Define a [`Comparator`][4] to control the sorting. We pass a method reference to access the member field by which we want to sort. In our example that is the `LocalDate` field named `date`.
```java
Comparator < Appointment > comparator = Comparator.comparing( Appointment :: date );
```
Track our appointments. Specify the comparator to use automatically as elements added to this set.
```java
NavigableSet < Appointment > appointments = new TreeSet <>( comparator );
```
Sample data.
```java
appointments.addAll(
List.of(
new Appointment( "Alice" , LocalDate.of( 2025 , Month.APRIL , 23 ) , "trim" ) ,
new Appointment( "Bob" , LocalDate.of( 2025 , Month.JANUARY , 23 ) , "dye" ) ,
new Appointment( "Carol" , LocalDate.of( 2025 , Month.MARCH , 23 ) , "set" )
)
);
```
Add another element.
```java
appointments.add(
new Appointment( "Davis" , LocalDate.of( 2025 , Month.FEBRUARY , 23 ) , "cut" )
);
```
See results.
```java
System.out.println( "appointments = " + appointments );
```
>appointments = [Appointment[patient=Bob, date=2025-01-23, type=dye], Appointment[patient=Davis, date=2025-02-23, type=cut], Appointment[patient=Carol, date=2025-03-23, type=set], Appointment[patient=Alice, date=2025-04-23, type=trim]]
Sure enough, the `Appointment` objects are kept in sorted order, arranged by the date.
You might want a secondary sort, to order any multiple appointments for the same date.
```java
Comparator < Appointment > comparator =
Comparator
.comparing( Appointment :: date )
.thenComparing( Appointment :: patient );
```
## Sequenced collections
In Java 21+, we have [sequenced collections][5], adding more interfaces to the [*Java Collections Framework*][6]. These interfaces include:
- [`SequencedCollection`][7]
- [`SequencedSet`][8]
[`NavigableSet`][1], `SortedSet`, `TreeSet`, and `ConcurrentSkipListSet` all extend/implement those two new interfaces.
SequencedCollection < Appointment > appointments = new TreeSet <>( comparator );
[![class hierarchy diagram by Stuart Marks of Oracle Corp.][9]][9]
[1]: https://docs.oracle.com/en%2Fjava%2Fjavase%2F22%2Fdocs%2Fapi%2F%2F/java.base/java/util/NavigableSet.html
[2]: https://docs.oracle.com/en%2Fjava%2Fjavase%2F22%2Fdocs%2Fapi%2F%2F/java.base/java/util/TreeSet.html
[3]: https://docs.oracle.com/en%2Fjava%2Fjavase%2F22%2Fdocs%2Fapi%2F%2F/java.base/java/util/concurrent/ConcurrentSkipListSet.html
[4]: https://docs.oracle.com/en%2Fjava%2Fjavase%2F22%2Fdocs%2Fapi%2F%2F/java.base/java/util/Comparator.html
[5]: https://openjdk.org/jeps/431
[6]: https://en.wikipedia.org/wiki/Java_collections_framework
[7]: https://docs.oracle.com/en%2Fjava%2Fjavase%2F22%2Fdocs%2Fapi%2F%2F/java.base/java/util/SequencedCollection.html
[8]: https://docs.oracle.com/en%2Fjava%2Fjavase%2F22%2Fdocs%2Fapi%2F%2F/java.base/java/util/SequencedSet.html
[9]: https://i.stack.imgur.com/d8z7Q.png |
|javascript|html|css| |
null |
Setting `options(seededlda_threads = 1)` gives reproducible results:
(It is unclear in the documentation how one can set a seed for each of the sub-processes when multi-threading so, as r2evans suggests in the comments, it may be worth raising this as a [Github issue](https://github.com/koheiw/seededlda/issues).)
``` r
library(quanteda)
library(seededlda)
options(seededlda_threads = 1)
corp <- data_corpus_moviereviews
toks <- tokens(corp, remove_punct = TRUE, remove_symbols = TRUE,
remove_numbers = TRUE, remove_url = TRUE)
dfmt <- dfm(toks) |>
dfm_remove(stopwords("en")) |>
dfm_remove("*@*") |>
dfm_trim(max_docfreq = 0.1, docfreq_type = "prop")
set.seed(42)
lda_seq <- textmodel_lda(dfmt, k = 5, gamma = 0.5,
batch_size = 0.01, auto_iter = TRUE,
verbose = FALSE)
x <- terms(lda_seq)
set.seed(42)
lda_seq <- textmodel_lda(dfmt, k = 5, gamma = 0.5,
batch_size = 0.01, auto_iter = TRUE,
verbose = FALSE)
y <- terms(lda_seq)
waldo::compare(x, y)
#> β No differences
```
<sup>Created on 2024-03-30 with [reprex v2.1.0](https://reprex.tidyverse.org)</sup> |
I have to use Xcode 12.4 with macOS 10.15 Catalina for a while still for development.
**Is it possible to use `notarytool` instead of `altool`?**
I have copied `notarytool` from Xcode 13.4 Developer Tools to my `usr/local/bin` folder.
When I run in shell:
xcrun notarytool submit myApp.zip
I get this error:
> xcrun: error: unable execute utility "/usr/local/bin/notarytool"
> because it requires a newer version of macOS.
----------
Without a solution, I have those 2 options:
As a last resort, I could notarize with the web API:
[Submitting software for notarization over the web][1]
Or boot into my macOS 12 Monterey partition each time.
[1]: https://developer.apple.com/documentation/notaryapi/submitting_software_for_notarization_over_the_web |
I have the code below and I expect to first have `hi` in my console then the `error` and at the end `why`, but the result is: `hi`, then `why` and the last one is `error`, so I'm wondering, why is this happening?
code:
const test = new Promise((resolve, reject) => {
console.log("hi");
throw new Error("error");
})
test.finally(() => {
console.log("why")
})
|
why promise has a weird precedence for errors in js? |
|javascript|promise|throw| |
null |
{"Voters":[{"Id":19369453,"DisplayName":"Piranavan R"}]} |
Use : href='../../css/style.css' |
Required to access the view from different source database.
I am using the Azure data studio with SQL Database project. First i've exported Source database into dacpac and in new project created the Database reference point to source dacpac.
```
CREATE VIEW [dbo].[v_activitypointer] AS
SELECT * FROM [$(dvdbname)].[dbo].[ap_partitioned];
GO
```
it's working with above statement using \* all column and it's able to build and depoly the project successfully. Instead of all columns, i need few columns when i change to specify column it's failing with SQL71561: SqlComputed column error
```
CREATE VIEW [dbo].[v_activitypointer] AS
SELECT [ucode] FROM [$(dvdbname)].[dbo].[ap_partitioned];
GO
```
```
c:\dbt\cicdtest\v_activitypointer.sql(2,13,2,13):
Build error SQL71561: SqlView: [dbo].[v_activitypointer] has an unresolved reference to object [$(dvdbname)].[dbo].[ap_partitioned].[ucode]. [c:dbtcicdtestcicdtest.sqlproj]
c:dbtcicdtestv_activitypointer.sql(2,13,2,13):
Build error SQL71561: SqlComputedColumn: [dbo].[v_activitypointer].[ucode] has an unresolved reference to object [$(dvdbname]).[dbo].[ap_partitioned].[ucode]. [c:dbtcicdtestcicdtest.sqlproj]
stdout: 0 Warning(s)
stdout: 2 Error(s)
```
Here's .sqlproj file
```
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build">
<Sdk Name="Microsoft.Build.Sql" Version="0.1.12-preview" />
<PropertyGroup>
<Name>cicdtest</Name>
<ProjectGuid>{25E6C2C6-1C07-4516-BDC0-06E5AF0DCE07}</ProjectGuid>
<DSP>Microsoft.Data.Tools.Schema.Sql.SqlServerlessDatabaseSchemaProvider</DSP>
<ModelCollation>1033, CI</ModelCollation>
<VerificationExtract>false</VerificationExtract>
<VerifyExtendedTransactSQLObjectName>False</VerifyExtendedTransactSQLObjectName>
</PropertyGroup>
<ItemGroup>
<SqlCmdVariable Include="dvdbname">
<Value>$(SqlCmdVar__1)</Value>
<DefaultValue>dataverse_uunq6705</DefaultValue>
</SqlCmdVariable>
</ItemGroup>
<ItemGroup>
<ArtifactReference Include="..\dataverse_uunq6705.dacpac">
<SuppressMissingDependenciesErrors>False</SuppressMissingDependenciesErrors>
<DatabaseVariableLiteralValue>dataverse_uunq6705</DatabaseVariableLiteralValue>
</ArtifactReference>
</ItemGroup>
<Target Name="BeforeBuild">
<Delete Files="$(BaseIntermediateOutputPath)\project.assets.json" />
</Target>
</Project>
```
i tried turning off the VerificationExtract & VerifyExtendedTransactSQLObjectName but no use |
I don't really know what you mean with
> I have found out that it's possible to do this without using ajax, using jquery with node...
but I think you're misunderstanding AJAX requests.
An AJAX request (Asynchronous Javascript and XML) is an asynchronous request from the browser (client) to the server. 99% usage for AJAX requests is to handle data (retrieving, posting, removing, ...) in the background of an application, but it **does not directly result in a page transition**. Read [this][1] question for more detailed explanations.
When making an AJAX request, anything besides the handling of the data will be done in the EJS/HTML file.
Let's look at a small example, using the express framework:
`someView.ejs`
``` html
<!DOCTYPE html>
<html>
<head>
<!-- set up html page -->
</head>
<body>
<button onclick="getData()">Get some Data</button>
<div id="content">
<p>Hi! My name is Dave</p>
<p>Today I am 25 years old</p>
</div>
<script>
function getData() {
fetch('/data/get', {method: 'GET', headers: {'Content-Type':'application/json'} })
.then( raw_data => raw_data.json() )
.then( data => {
document.getElementById("content").innerHTML =
`<p>Hi! My name is ${data.name}</p>` +
`<p>Today I am ${data.age} years old</p>`
})
.catch( err => console.log(err) )
}
</script>
</body>
<html>
```
`main.js`
``` js
// imports and app configuration
app.get('/data/get', (req, res) => {
// get data from database/memory, internally from the server, etc. We will use mock data
const data = {name: "Johnny", age: 52};
res.send(data);
})
```
The page will render with a button and the text inside the `<div>`
<!-- begin snippet: js hide: true console: false babel: false -->
<!-- language: lang-html -->
<button onclick="getData()">Get some Data</button>
<div id="content">
<p>Hi! My name is Dave</p>
<p>Today I am 25 years old</p>
</div>
<!-- end snippet -->
Now, if you click on the button (not here on the code snippet, as we don't have a nodejs server here), the browser will make a fetch request to the server. On the server we handle the data, which in our case consists of the name and age of Johnny. Any kind of response, be it `res.send(...)` or `res.render(...)`, will not make the page of the browser load that data. Instead, you will get the data from the fetch call, either inside a `then()` function (like in the example) or if using async/await syntax it would be something like `let raw_data = await fetch(...).catch(...)`. There you can then further work with the data, like in our example, we change the innerHTML of the div. So after pressing the button it would look like this:
<!-- begin snippet: js hide: true console: false babel: false -->
<!-- language: lang-html -->
<button onclick="getData()">Get some Data</button>
<div id="content">
<p>Hi! My name is Johnny</p>
<p>Today I am 52 years old</p>
</div>
<!-- end snippet -->
In most use cases, you want to then change the DOM based on the data retrieved, like we did. But obviously, you shouldn't hard-code the HTML as I have in this example. Rather, you would create a partial to render with a set of data.
`person.ejs`
``` html
<p>Hi! My name is <%= person.name %><</p>
<p>Today I am <%= person.age %> years old</p>
```
Now you need to get this file in `someView.ejs` to render it. The easiest way probably is to do another fetch request on the file:
```
fetch('/views/partials/person.ejs')
.then( raw_data => raw_data.text())
.then( data => {
const html = ejs.render(data, {name: 'Brian', age: 40});
document.getElementById('content').innerHTML = html;
})
```
But, to use the ejs variable on the client side, you need to download either `ejs.js` or `ejs.min.js` from [the github][2] and add it like this `<script src="path/to/file/ejs.min.js"></script>` in the `<head>` (For more help on this, check out [this][3] question or ask me in the comments).
All that's left is combining all the things I explained here together. Good luck!
[1]: https://stackoverflow.com/questions/2130239/what-exactly-is-ajax-request-is-it-different-from-servlet-request
[2]: https://github.com/mde/ejs/releases
[3]: https://stackoverflow.com/questions/41001619/client-side-and-server-side-rendering-of-ejs-template |
My Next.js app is structured as follows:
```
app
βββ (auth)
β βββ layout.tsx
β βββ login
β β βββ page.tsx
β βββ signup
β βββ page.tsx
βββ (dashboard)
β βββ dashboard
β β βββ page.tsx
β βββ layout.tsx
βββ (home)
β βββ about
β β βββ page.tsx
β βββ layout.tsx
β βββ page.tsx
β βββ pricing
β βββ page.tsx
βββ amplifyconfiguration.json
βββ aws-exports.js
βββ favicon.ico
βββ globals.css
```
I don't include a main layout.tsx at the /app level because I want each sub-route (home, dashboard, auth) to have their own layout.
I want to include AWS Amplify in my app and it requires to put this code
```typescript
import { Amplify } from 'aws-amplify';
import config from './amplifyconfiguration.json';
Amplify.configure(config);
```
At the entry point of my app.
I'm not sure where that would be in this setup, how can I make sure this code runs on initialization?
Thanks |
Configuring AWS Amplify in Next.js 14 App Router with no main layout.tsx |
|amazon-web-services|next.js|aws-amplify|next.js13|app-router| |
I am plotting some centered signal distribution and I observe peaks that I would like to automatically detect as well as the range.
Here is a snippet of my data with the plot result.
```
x <- c(-505, -492.278481012658, -479.556962025316, -466.835443037975,
-454.113924050633, -441.392405063291, -428.670886075949, -415.949367088608,
-403.227848101266, -390.506329113924, -377.784810126582, -365.063291139241,
-352.341772151899, -339.620253164557, -326.898734177215, -314.177215189873,
-301.455696202532, -288.73417721519, -276.012658227848, -263.291139240506,
-250.569620253165, -237.848101265823, -225.126582278481, -212.405063291139,
-199.683544303797, -186.962025316456, -174.240506329114, -161.518987341772,
-148.79746835443, -136.075949367089, -123.354430379747, -110.632911392405,
-97.9113924050633, -85.1898734177215, -72.4683544303797, -59.746835443038,
-47.0253164556962, -34.3037974683544, -21.5822784810126, -8.86075949367086,
3.86075949367091, 16.5822784810126, 29.3037974683544, 42.0253164556963,
54.746835443038, 67.4683544303798, 80.1898734177215, 92.9113924050633,
105.632911392405, 118.354430379747, 131.075949367089, 143.79746835443,
156.518987341772, 169.240506329114, 181.962025316456, 194.683544303797,
207.405063291139, 220.126582278481, 232.848101265823, 245.569620253165,
258.291139240506, 271.012658227848, 283.73417721519, 296.455696202532,
309.177215189873, 321.898734177215, 334.620253164557, 347.341772151899,
360.063291139241, 372.784810126582, 385.506329113924, 398.227848101266,
410.949367088608, 423.670886075949, 436.392405063291, 449.113924050633,
461.835443037975, 474.556962025316, 487.278481012658, 500)
y <- c(1.08962117485998, 1.04114060875037, 0.996776905965624, 0.960646929830632,
0.93686754367026, 0.929555610809385, 0.94282799457288, 0.980801558285618,
1.04759316527247, 1.14729099315313, 1.27932537816759, 1.43335917393974,
1.5978253344836, 1.76115681381319, 1.91178656594254, 2.03814754488565,
2.12867270465656, 2.17179499926927, 2.15634540312282, 2.08322610260535,
1.96729487385438, 1.82418687657192, 1.66953727045998, 1.51898121522055,
1.38815387055563, 1.29269039616724, 1.24822595175736, 1.2690426113437,
1.3536928278639, 1.49058091162332, 1.66794203721677, 1.87401137923908,
2.09702411228505, 2.32521541094951, 2.54682044982726, 2.75007440351314,
2.92421034174006, 3.06404525918371, 3.16633736840561, 3.22784683098126,
3.24533380848613, 3.21555846249569, 3.13528095458543, 3.00126144633082,
2.81026468113987, 2.56361890762165, 2.27577932458706, 1.96354702910295,
1.6437231182362, 1.33310868905365, 1.04850483862219, 0.806712664008658,
0.624533262279933, 0.518563841282694, 0.493168255653025, 0.533748662550571,
0.624076105373563, 0.747921627520233, 0.889056272388814, 1.03125108337753,
1.15827710388463, 1.25390537730833, 1.30264258298335, 1.30220153657616,
1.26171800857497, 1.19070441506743, 1.09867317214123, 0.995136695884031,
0.889607402383517, 0.791597707727357, 0.710620028003226, 0.655184811066718,
0.625845182720423, 0.619359642721832, 0.632463321306988, 0.661891348711933,
0.704378855172708, 0.756660970925356, 0.81547282620592, 0.877549551250441
)
test <- as.data.frame(cbind(x, y))
ggplot(test, aes(x = x,y = y)) +
geom_line()+
theme_bw()
```
I am currently testing the `pracma` package which has a `findPeaks()` function. It looks like the function manage to find the right peak summits however the position of the peaks are odd, I don't see yet how to transform them so I can plot vline on the above plot. See my result :
```
library(pracma)
peaks <- as.data.frame(pracma::findpeaks(y, npeaks = 3))
ggplot(test, aes(x = x,y = y)) +
geom_line()+
theme_bw()+
sapply(peaks$V2, function(xint) geom_vline(aes(xintercept = xint)))
```
I would understand if the position of peaks was recorded from 0:N positions in the vector but the distance between peaks don't make sense to me.
What I am missing ?
|
Find peak positions & plot vline in ggplot |
|r|ggplot2| |
Not having compiled the code with -g option may explain this. (locally VSCode take care of this but you have to configure your build process with the java debug flag or equivalent configuration). |
I have these tokens defined in my `lex` file:
(?xi:
ADC|AND|ASL|BIT|BRK|CLC|CLD|CLI|CLV|CMP|CPX|
DEY|EOR|INC|INX|INY|JMP|JSR|LDA|LDX|LDY|LSR|
NOP|ORA|PHA|PHP|PLA|PLP|ROL|ROR|RTI|RTS|SBC|
SEC|SED|SEI|STA|STX|STY|TAX|TAY|TSX|TXA|TXS|
TYA|CPY|DEC|DEX
) {
yylval.str = strdup(yytext);
for(char *ptr = yylval.str; *ptr = tolower(*ptr); ptr++);
return MNEMONIC;
}
[\(\)=Aa#XxYy,:\+\-\<\>] {
return *yytext;
}
\$[0-9a-fA-F]{4} {
yylval.str = strdup(yytext);
return ABSOLUTE;
}
\$[0-9a-fA-F]{2} {
yylval.str = strdup(yytext);
return ZEROPAGE;
}
and this is how I parse them in `bison`:
struct addr_offset {
char *str;
int offset;
};
%union {
char *str;
int number;
struct addr_offset *ao;
}
%type<str> MNEMONIC
%type<str> ABSOLUTE
%type<ao> zp
%token ZEROPAGE
expression:
MNEMONIC { statement(0, $1, NULL, "i"); }
| MNEMONIC zp { statement(5, $1, $2, }
;
zp:
ZEROPAGE { $$->str = strdup($1); }
| '>' ABSOLUTE { $$->str = strdup($2); }
| '<' ABSOLUTE { $$->str = strdup($2); }
;
Weird thing is, if I add the last two parts to the `zp` rule, the `MNEMONIC` is not read correctly in the `expression` rule.
|
{"OriginalQuestionIds":[69396320,32936215],"Voters":[{"Id":15358800,"DisplayName":"Bhargav - Retarded Skills"},{"Id":11985743,"DisplayName":"Xiddoc"},{"Id":1431720,"DisplayName":"Robert"}]} |
Run the following command in your project directory: `ng build --prod.` This will create folder called "dist" in your project directory containing the production-ready files, which you then can upload to your webserver.
more details: [https://angular.io/guide/deployment][1]
[1]: https://angular.io/guide/deployment |
I am not sure what is wrong. I have a basic understanding of git, but I am not an expert.
In my remote branch, I have a file that has a bunch of changes, etc in it.
In my local branch, this file is completely empty.
When I open up github, and the file, I notice it has something like this, so I believe something is out of sync:
<<<<<<< HEAD:my_repo/category_1/run_daily_jobs/summary_daily_job.kjb
<xloc>1328</xloc>
<yloc>80</yloc>
=======
<xloc>1280</xloc>
<yloc>128</yloc>
>>>>>>> 44abcxyzbunchofvalues:my_repo/summary_daily_job.kjb
Based on the comments, here is what I have done:
1. Went to github
2. Found the file
3. Googled "how to read a merge conflict in github" and edited the file appropriately.
4. Ran "git pull origin/my_branch"
5. For some reason, the file is still blank though when I open it in the software I am using (Pentaho). Maybe this is a software issue at this point though? |
|git|github|pentaho| |
I have a weather app that refreshes asynchronously. While I've never seen it crash myself, I do see about a crash per day in Apple's reports, and not from a specific device. The app does have a good amount of users and it refreshes every few minutes, but I have no idea what kind of percentage send reports to Apple, so don't really know how rare the crash really is. I've tried a few things, like making sure I the Async Downloader class that creates the datatask does not get destroyed etc. There are 2 kinds of reported crashes, the most common is at this code:
```objc
-(void)startDownload
{
NSURLRequest *request = [NSURLRequest requestWithURL:[NSURL URLWithString:fileURL] cachePolicy:NSURLRequestUseProtocolCachePolicy timeoutInterval:12];
NSURLSession *session = [NSURLSession sessionWithConfiguration:[NSURLSessionConfiguration defaultSessionConfiguration]];
if (!session || !request || ![session respondsToSelector:@selector(dataTaskWithRequest:completionHandler:)])
return;
// Stack trace points to line below crashing
self.dataTask = [session dataTaskWithRequest:request completionHandler:^(NSData *data, NSURLResponse *response, NSError *error) {...}
// ...
}
```
The "defensive" `if` is a sanity check as the crash stack trace looks like this:
[![Stack trace][1]][1]
That `self.dataTask` is just `@property NSURLSessionDataTask *dataTask;`.
Any ideas on what to look into or try in order to avoid this?
I seems quite rare overall so I am wondering if it's a case of the app is getting killed by the system or something like that which causes an unclean termination. Would welcome any suggestion though.
Edit: Couldn't find how to get Xcode to show me more info from the crash dump, but of course I could just open it with a text editor and here is all the info:
```
Exception Type: EXC_CRASH (SIGABRT)
Exception Codes: 0x0000000000000000, 0x0000000000000000
Triggered by Thread: 9
Last Exception Backtrace:
0 CoreFoundation 0x1ca108cb4 __exceptionPreprocess + 164 (NSException.m:202)
1 libobjc.A.dylib 0x1c32243d0 objc_exception_throw + 60 (objc-exception.mm:356)
2 CoreFoundation 0x1ca27dab8 -[NSObject(NSObject) doesNotRecognizeSelector:] + 136 (NSObject.m:140)
3 CoreFoundation 0x1ca11f0e8 ___forwarding___ + 1592 (NSForwarding.m:3578)
4 CoreFoundation 0x1ca185900 _CF_forwarding_prep_0 + 96 (:-1)
5 App 0x1009667cc -[AsyncDownloader startDownload] + 396 (AsyncDownloader.m:79)
```
Now, going to the thread that triggered this:
```
Thread 9 name:
Thread 9 Crashed:
0 libsystem_kernel.dylib 0x0000000208743558 __pthread_kill + 8 (:-1)
1 libsystem_pthread.dylib 0x0000000229411118 pthread_kill + 268 (pthread.c:1670)
2 libsystem_c.dylib 0x00000001d162b178 abort + 180 (abort.c:118)
3 libc++abi.dylib 0x000000022934fbf8 abort_message + 132 (:-1)
4 libc++abi.dylib 0x000000022933f444 demangling_terminate_handler() + 348 (:-1)
5 libobjc.A.dylib 0x00000001c3229ea4 _objc_terminate() + 144 (objc-exception.mm:498)
6 libc++abi.dylib 0x000000022934efbc std::__terminate(void (*)()) + 16 (:-1)
7 libc++abi.dylib 0x000000022934ef60 std::terminate() + 56 (:-1)
8 libdispatch.dylib 0x00000001d15caec0 _dispatch_client_callout + 40 (object.m:563)
9 libdispatch.dylib 0x00000001d15ce330 _dispatch_continuation_pop + 504 (queue.c:306)
10 libdispatch.dylib 0x00000001d15e1908 _dispatch_source_invoke + 1588 (source.c:961)
11 libdispatch.dylib 0x00000001d15cde6c _dispatch_queue_override_invoke + 500 (queue.c:0)
12 libdispatch.dylib 0x00000001d15dc944 _dispatch_root_queue_drain + 396 (queue.c:7051)
13 libdispatch.dylib 0x00000001d15dd158 _dispatch_worker_thread2 + 164 (queue.c:7119)
14 libsystem_pthread.dylib 0x000000022940ada0 _pthread_wqthread + 228 (pthread.c:2631)
15 libsystem_pthread.dylib 0x000000022940ab7c start_wqthread + 8 (:-1)
Thread 9 crashed with ARM Thread State (64-bit):
x0: 0x0000000000000000 x1: 0x0000000000000000 x2: 0x0000000000000000 x3: 0x0000000000000000
x4: 0x0000000229353647 x5: 0x000000016fa2acb0 x6: 0x000000000000006e x7: 0x0000000000002700
x8: 0xc5115eb4a9cc1e34 x9: 0xc5115eb5c66eae34 x10: 0x0000000000000200 x11: 0x000000000000000b
x12: 0x000000000000000b x13: 0x00000000001ff800 x14: 0x00000000000007fb x15: 0x000000009002802e
x16: 0x0000000000000148 x17: 0x000000016fa2b000 x18: 0x0000000000000000 x19: 0x0000000000000006
x20: 0x000000000001e10f x21: 0x000000016fa2b0e0 x22: 0x0000000000000110 x23: 0x0000000000000000
x24: 0x0000000000000000 x25: 0x000000016fa2b0e0 x26: 0x0000000000030008 x27: 0x0000000282e3d180
x28: 0x0000000220904c80 fp: 0x000000016fa2ac20 lr: 0x0000000229411118
sp: 0x000000016fa2ac00 pc: 0x0000000208743558 cpsr: 0x40001000
esr: 0x56000080 Address size fault
```
[1]: https://i.stack.imgur.com/xdIew.png |
You code contains a lot of useless declarations. We should not try to force things to work by adding a lot of height/min-height/max-height, etc.
<!-- begin snippet: js hide: false console: false babel: false -->
<!-- language: lang-css -->
body {
margin: 0;
}
.page {
height: 100vh; /* full page*/
display: grid;
grid-template-rows: min-content minmax(0,1fr);
}
.quiz-grid {
height: 100%; /* 100% .content height (.content needs nothing)*/
display: grid;
grid-template-columns: minmax(0, 1fr) auto minmax(0, 1fr);
grid-template-rows: minmax(0, 1fr) min-content;
grid-template-areas:
"left main right"
"footer footer footer";
}
.quiz-cell-main {
grid-area: main;
border: 1px red solid;
}
.scroll-container {
background-color: azure;
height: 100%; /* 100% height of .quiz-cell-main */
overflow-y: auto; /* add scrollbar */
}
.quiz-cell-footer {
grid-area: footer;
place-self: center;
}
/*.quiz-cell-left {
grid-area: left;
min-height: 0;
}
.quiz-cell-right {
grid-area: right;
min-height: 0;
}
*/
<!-- language: lang-html -->
<div class="page">
<div class="header">Header</div>
<div class="content">
<div class="quiz-grid">
<div class="quiz-cell-main">
<div class="scroll-container">
<div class="scroll-content">
<h1>something</h1>
<h1>else</h1>
<h1>alice</h1>
<h1>cat</h1>
<h1>or dog</h1>
<h1>now</h1>
<h1>world</h1>
<h1>something</h1>
<h1>else</h1>
<h1>alice</h1>
<h1>cat</h1>
<h1>or dog</h1>
<h1>now</h1>
<h1>world</h1>
<h1>something</h1>
<h1>else</h1>
<h1>alice</h1>
<h1>cat</h1>
<h1>or dog</h1>
<h1>now</h1>
<h1>world</h1>
<h1>something</h1>
<h1>else</h1>
<h1>alice</h1>
<h1>cat</h1>
<h1>or dog</h1>
<h1>now</h1>
<h1>world</h1>
</div>
</div>
</div>
<div class="quiz-cell-footer">
footer
</div>
</div>
</div>
</div>
<!-- end snippet -->
Using flexbox you will need almost the same code structure
<!-- begin snippet: js hide: false console: false babel: false -->
<!-- language: lang-css -->
body {
margin: 0;
}
.page {
height: 100vh; /* full page*/
display: flex;
flex-direction: column;
}
.content {
flex: 1;
min-height: 0;
}
.quiz-grid {
height: 100%;
display: grid;
grid-template-columns: minmax(0, 1fr) auto minmax(0, 1fr);
grid-template-rows: minmax(0, 1fr) min-content;
grid-template-areas:
"left main right"
"footer footer footer";
}
.quiz-cell-main {
grid-area: main;
border: 1px red solid;
}
.scroll-container {
background-color: azure;
height: 100%;
overflow-y: auto;
}
.quiz-cell-footer {
grid-area: footer;
place-self: center;
}
/*.quiz-cell-left {
grid-area: left;
min-height: 0;
}
.quiz-cell-right {
grid-area: right;
min-height: 0;
}
*/
<!-- language: lang-html -->
<div class="page">
<div class="header">Header</div>
<div class="content">
<div class="quiz-grid">
<div class="quiz-cell-main">
<div class="scroll-container">
<div class="scroll-content">
<h1>something</h1>
<h1>else</h1>
<h1>alice</h1>
<h1>cat</h1>
<h1>or dog</h1>
<h1>now</h1>
<h1>world</h1>
<h1>something</h1>
<h1>else</h1>
<h1>alice</h1>
<h1>cat</h1>
<h1>or dog</h1>
<h1>now</h1>
<h1>world</h1>
<h1>something</h1>
<h1>else</h1>
<h1>alice</h1>
<h1>cat</h1>
<h1>or dog</h1>
<h1>now</h1>
<h1>world</h1>
<h1>something</h1>
<h1>else</h1>
<h1>alice</h1>
<h1>cat</h1>
<h1>or dog</h1>
<h1>now</h1>
<h1>world</h1>
</div>
</div>
</div>
<div class="quiz-cell-footer">
footer
</div>
</div>
</div>
</div>
<!-- end snippet -->
|
I need to create multiple queries with various weightages and properties.
The simplified version of couple of queries is this
```
SELECT
Emp_Id,
(30 * ISNULL(BMI,0) +
(20 * ISNULL(Height, 0) +
(10 * ISNULL(Eyesight, 0))
FROM
MyTable1
WHERE
Category = 'Fighter'
SELECT
Emp_Id,
(10 * ISNULL(BMI,0) +
(10 * ISNULL(Height,0) +
(20 * ISNULL(Skill,0) +
(40 * ISNULL(Eyesight,0))
FROM
MyTable1
WHERE
Category = 'Sniper'
```
There are 100s of queries with different weightages and properties. So I wanted to create a table with weightages and properties, then create dynamic query which would be executed since it will be much easier to maintain.
This is my code so far:
```
/* Dummy Table Creation */
DECLARE @DummyWeightageTable TABLE (Category varchar(50), Fieldname varchar(50), Weightage real)
INSERT INTO @DummyWeightageTable
VALUES ('Sniper', 'Eyesight', 40),
('Sniper', 'BMI', 10),
('Sniper', 'Height', 10),
('Sniper', 'Skill', 20),
('Fighter', 'Eyesight', 10),
('Fighter', 'BMI', 30),
('Fighter', 'Height', 20)
/* Actual Functionality */
DECLARE @sql VARCHAR(MAX)
DECLARE @delta VARCHAR(MAX)
DECLARE @TempTableVariable TABLE (Fieldname varchar(50), Weightage real)
INSERT INTO @TempTableVariable
SELECT Fieldname, Weightage
FROM @DummyWeightageTable
WHERE Category = 'Sniper'
SET @sql = 'SELECT Emp_Id,'
/*Do below step for all rows*/
SELECT @delta = '(', Weightage, ' * ISNULL(', Fieldname, ',0) +'
FROM @TempTableVariable
SET @sql = @sql + @delta + '0) from MyDataTable1'
EXEC sp_executesql @sql;
TRUNCATE @TempTableVariable
INSERT INTO @TempTableVariable
SELECT Fieldname, Weightage
FROM @DummyWeightageTable
WHERE Category = 'Fighter'
SET @sql = 'SELECT Emp_Id,'
/*Do below step for all rows*/
SELECT @delta = '(', Weightage, ' * ISNULL(', Fieldname, ',0) +'
FROM @TempTableVariable
SET @sql = @sql + @delta + '0) from MyDataTable1'
EXEC sp_executesql @sql;
```
However SQL Server doesn't allow arrays. So I am getting an error when I try to populate variable `@delta`
> Msg 141, Level 15, State 1, Line 15
> A SELECT statement that assigns a value to a variable must not be combined with data-retrieval operations.
I feel there must be some workaround for this but I couldn't find it. |
here's my code:
```
#include <iostream>
template <typename T>
struct Foo
{
public:
T DataMember;
};
template<>
struct Foo<int>
{
public:
void bar()
{
std::cout << DataMember;
}
};
```
when i try to compile it it gives an error C2065 'DataMember': undeclared identifier
what i want to do is to use templates members in its specialization
i tried a lot of things and googled the problem for hours, but everything i find is examples that don't use templates members and other questions related to c++ templates but not the one i need. |
Why can't i use templates members in its specialization or did i do something wrong? |
|c++|c++14|template-specialization|c++-templates| |
null |
{"Voters":[{"Id":14732669,"DisplayName":"ray"},{"Id":17562044,"DisplayName":"Sunderam Dubey"},{"Id":272109,"DisplayName":"David Makogon"}]} |
For me it worked via setting the diacritical mark between brackets and double quotes:
First the declaration of index properties seems still necessary, otherwise the diacritical mark will simply appear as a subscript. E.g. if I want to have A' instead of A:
```
declare_index_properties (A, [postsuperscript])$
A["'"];
```
A could have other indices, for example:
```
declare_index_properties (A, [postsubscript,postsubscript,postsuperscript])$
A[g,u,"'"];
```
(for more fancy stuff see the help for declare_index_properties).
However, I didn't find a way to add the mark above a variable.
|
I am using Polars (`{ version = "0.38.3", features = ["lazy", "streaming", "parquet", "fmt", "polars-io", "json"] }`) with Rust (`v1.77.0`) to process a large dataset (larger than available memory) inside a Docker container. The Docker container's memory is intentionally limited to 6GB using `--memory=20gb` and `--shm-size=20gb`. I am encountering an out of memory error while performing calculations on the dataset.
Here's an overview of my workflow:
1- Load the dataset from a Parquet file using scan_parquet to create a LazyDataframe.
2- Perform transformations on the dataframe, which is unnesting.
3- Write the resulting data to disk as a Parquet file using sink_parquet.
Here is a code snippet that demonstrates the relevant parts of my Rust code:
```rust
use jemallocator::Jemalloc;
use polars::{
prelude::*,
};
use std::time::Instant;
#[global_allocator]
static GLOBAL: Jemalloc = Jemalloc;
fn main() {
let now = Instant::now();
let mut lf = LazyFrame::scan_parquet(
"./dataset.parquet",
ScanArgsParquet {
low_memory: true,
..Default::default()
},
)
.unwrap()
.with_streaming(true);
lf = lf.unnest(["fields"]);
let query_plan = lf.clone().explain(true).unwrap();
println!("{}", query_plan);
lf.sink_parquet("./result.parquet".into(), Default::default())
.unwrap();
let elapsed = now.elapsed();
println!("Elapsed: {:.2?}", elapsed);
}
```
Despite using LazyFrame and enabling low_memory mode in ScanArgsParquet, I still encounter an out of memory error during the execution of the code.
I have tried the following:
- Using the jemallocator crate as the global allocator.
- Enabling streaming mode using with_streaming(true) for the LazyFrame operations.
- Using the `low_memory: true` in the scan_parquet function.
The printed plan indicates that every operation should be run in the streaming engine:
```
--- STREAMING
UNNEST by:[fields]
Parquet SCAN ./resources/dataset.parquet
PROJECT */2 COLUMNS --- END STREAMING
DF []; PROJECT */0 COLUMNS; SELECTION: "None"
```
However, I am still running into memory issues when processing the large dataset (Parquet file size = 20GB).
My questions are:
- Why I'm getting the OOM error while everything is indicating it is using the streaming engine ?
- Is there another way to leverage disk-based processing or chunking the data to handle datasets larger than memory?
Any guidance or suggestions on how to resolve this issue would be greatly appreciated. Thank you in advance! |
null |
I am very new with power BI and I need dynamically calculate with DAX the distinct count of client that have spend in the analyzed period more than zero.
In the example below if in the report I have the store I expect zero client for store A and 1 for store B. I am not able to have the total spend in the table because this changes depending on what I have in the report. Any recommendation?
|Client| USD|Store|
|------|-----|-----|
|50411542|1000|A|
|50411542|500|A|
|50411542|-2000|A|
|50411542|1000|B| |
Measure in powerbi to distinct count number client with a total spend withing the context > zero |
|powerbi|dax|measure| |
Using `aggregate` with [function for geometric mean](https://stackoverflow.com/a/25555105/6574038). We can get the year of `substr`ing from 1st to 4th character.
> gm_mean <- function(x, na.rm=TRUE) {
+ exp(sum(log(x[x > 0]), na.rm=na.rm)/length(x))
+ }
> aggregate(. ~ cbind(year=substr(dat$Quarter, 1, 4)), dat, gm_mean)[-2]
year A B
1 2021 0.04374784 0.06324555
2 2022 0.04671022 0.02059767
3 2023 0.05024815 0.02296827
To get `%` display we can use `sprintf` with the appropriate format string.
> aggregate(. ~ cbind(year=substr(dat$Quarter, 1, 4)), dat,
+ \(x) sprintf("%.1f%%", gm_mean(x)*100))[-2]
year A B
1 2021 4.4% 6.3%
2 2022 4.7% 2.1%
3 2023 5.0% 2.3%
----
***Data***
dat <- structure(list(Quarter = structure(c(18717, 18808, 18900, 18992,
19082, 19173, 19265, 19357, 19447, 19538, 19630, 19722), class = "Date"),
A = c(0.043, 0.044, 0.044, 0.044, 0.044, 0.046, 0.048, 0.049,
0.05, 0.05, 0.05, 0.051), B = c(-0.002, -0.001, 0.002, 0.008,
0.015, 0.02, 0.024, 0.025, 0.025, 0.023, 0.022, 0.022)), row.names = c(NA,
-12L), class = "data.frame") |
you can use PrimeVue config options
import PrimeVue, { PrimeVueConfiguration } from 'primevue/config';
app.use(PrimeVue, {
locale:{ matchAll: "Ψ¬Ω
ΩΨΉ Ψ§ΩΨ΄Ψ±ΩΨ·" }
} as PrimeVueConfiguration);
alternatively you may modify only a portion of it like this:
import PrimeVue, { PrimeVueConfiguration, defaultOptions } from 'primevue/config';
app.use(PrimeVue, {
locale: {
...defaultOptions.locale,
matchAll: "Ψ¬Ω
ΩΨΉ Ψ§ΩΨ΄Ψ±ΩΨ·" }
} as PrimeVueConfiguration);
I hope this helps! Let me know if you have any other questions.
|
Having:
- a csv file with lots of columns.
- PERMISSIVE mode and column name for corrupt records set.
Is there any way to find out **which specific cell causes a row to be marked as corrupt**?
With default approach I can only see (in corrupt column) input row without any hint which cell was problematic within this row.
(I cannot use Databricks in case of any suggestions related to this platform) |
Which cell makes input row a corrupt row when reading csv file with Apache Spark |
|csv|apache-spark|schema| |
The parent widget contains a Scaffold and a `bottomNavigationBar`. The widget accepts a body, according to the navigation.
I tried the implementation of flutter itself: Overlay Portal (1 screen);
I tried packages from pub.dev:
flutter_portal (2 screen);
dropdown_button 2 (3 screen)
Who has encountered this problem and knows where to dig? (I played with context, but without success)



I tried to use the height margins of the navigation bar
I tried to convey a different context |
Need dynamic query creation in SQL Server with Array like implementation |
I have been working on a project that uses a **`database`**. It makes the user choose from the commands in the program. The program makes you add your **password, E-mail, user id, and birthday**, and every user has his own **user id**. He can make a **new user**, **delete password**, **delete user**, **change password and change user id**.
Every option has its own `command`, but there a problem appeared to me while I was working.
I made a input variable and named it **(Command option)** that prints a message which tells the user to write the command he wants to use, I also made an If condition which is the variable **(Command option)** == "N" or "n" to create new user, and I made an else if condition that if the variable **(Command option)** isn't equal "N" or "n" to print
("Sorry, we don't have this command"),
[My code](https://i.stack.imgur.com/V9FuT.png)
but the problem is that if I put the condition to make a new user before the condition ; to print, doesn't work, even if the variable **(Command option)** isn't equal to "N" or "n", and if I do the opposite to put the condition to print before the condition to make a new user, the condition to make new a user doesn't work, even if the variable **(Command option)** equals "N" or "n" so, what's the problem?
I have been working on a project that uses a **`database`**. It makes the user choose from the commands in the program. The program makes you add your **password, E-mail, user id, and birthday**, and every user has his own **user id**. He can make a **new user**, **delete password**, **delete user**, **change password and change user id**.
Every option has its own `command`, but there a problem appeared to me while I was working.
I made a input variable and named it **(Command option)** that prints a message which tells the user to write the command he wants to use, I also made an If condition which is the variable **(Command option)** == "N" or "n" to create new user, and I made an else if condition that if the variable **(Command option)** isn't equal "N" or "n" to print
("Sorry, we don't have this command"),
[My code](https://i.stack.imgur.com/V9FuT.png)
but the problem is that if I put the condition to make a new user before the condition ; to print, doesn't work, even if the variable **(Command option)** isn't equal to "N" or "n", and if I do the opposite to put the condition to print before the condition to make a new user, the condition to make new a user doesn't work, even if the variable **(Command option)** equals "N" or "n" so, what's the problem? |
Why if condition work but else if condition doesn't work? |