instruction stringlengths 0 30k ⌀ |
|---|
#Step 02
CREATE TABLE company_customer
(
cust_ID INT,
address_num VARCHAR (6),
street VARCHAR(20),
city VARCHAR(20),
cust_contact_num VARCHAR(12) UNIQUE NOT NULL,
cust_alt_contact VARCHAR(12),
cust_email VARCHAR(20) UNIQUE NOT NULL,
company_name VARCHAR(20) NOT NULL,
company_reg_num VARCHAR(25) UNIQUE NOT NULL,
CONSTRAINT ccus_pk PRIMARY KEY (cust_ID)
);
#Step 03
CREATE TABLE individual_customer
(
cust_ID INT ,
address_num VARCHAR (6),
street VARCHAR(20),
city VARCHAR(20),
cust_contact_num VARCHAR(12) UNIQUE NOT NULL,
cust_alt_contact VARCHAR(12),
cust_email VARCHAR(20)UNIQUE NOT NULL,
cust_nic VARCHAR(13)UNIQUE NOT NULL,
c_fname VARCHAR(15)NOT NULL,
c_lname VARCHAR(15)NOT NULL,
CONSTRAINT icus_pk PRIMARY KEY (cust_ID)
);
#Step 04
CREATE TABLE project
(
project_ID INTEGER NOT NULL,
cust_ID INTEGER,
site_ad_num VARCHAR (6)NOT NULL,
site_street VARCHAR(20) NOT NULL,
site_city VARCHAR(20) NOT NULL,
project_type VARCHAR(15) NOT NULL,
proj_start_date DATE NOT NULL,
estimated_completion_date DATE NOT NULL,
actual_completion_date DATE,
current_progress VARCHAR(15)NOT NULL,
specific_requirements VARCHAR (40),
CONSTRAINT prog_ID_pk PRIMARY KEY (project_ID),
CONSTRAINT proj_icusID_fk FOREIGN KEY (cust_ID) REFERENCES individual_customer(cust_ID),
CONSTRAINT proj_ccusID_fk FOREIGN KEY (cust_ID) REFERENCES company_customer(cust_ID)
);
INSERT INTO project
(project_ID,cust_ID,site_ad_num, site_street,site_city ,project_type, proj_start_date, estimated_completion_date,actual_completion_date, current_progress, specific_requirements)
VALUES
(301, 100, '123', 'Beach Road', 'Galle', 'CONSTRUCTION', '2020-04-06', '2020-05-17', null, 'On hold', 'Need additional labour.'),
(302,700,'99/7','Dehiwala Road', 'Maharagama','CONSTRUCTION','2020-08-09','2021-10-11',null,'On hold', 'Payment issue.'),
(303, 305, '456', 'Hillside Avenue', 'Galle', 'RENOVATION', '2021-01-01', '2021-12-12', null, 'Ongoing', null),
(304,205, '67J', 'Temple Road', 'Kottawa', 'CONSTRUCTION', '2021-02-03','2021-08-10', NULL, 'Ongoing', null),
(305, 205, '789', 'Beachfront Road', 'Negombo', 'RENOVATION', '2022-01-02', '2023-09-10', NULL, 'Ongoing',null),
(306,505,'67A','Bangalawatta Road', 'Kandy','CONSTRUCTION','2022-01-23','2023-09-26', '2023-08-29','Complete', null),
(307, 500,'21A', 'Coastal Road', 'Batticaloa', 'RENOVATION', '2023-01-02', '2023-09-05', '2023-10-12', 'Complete', null),
(308,705, '123B', 'Lake View Street', 'Colombo', 'CONSTRUCTION', '2023-01-01', '2023-12-30','2023-12-15', 'Complete', null),
(309, 105, '987/23', 'Hilltop Drive', 'Kottawa','RENOVATION', '2023-09-02', '2024-12-12', NULL, 'Ongoing', null),
(310,400, '45', 'Dabahen Road','Kandy', 'CONSTRUCTION', '2024-01-01','2024-03-30','2024-03-29', 'Complete', null),
(311, 500, '345B', 'Hill Road', 'Colombo', 'RENOVATION', '2024-01-03', '2025-01-02', null, 'On hold', 'Awaiting governent approval.'),
(312,600,'24/56','Samagi Road', 'Homagama','CONSTRUCTION','2024-03-03','2025-01-20',null,'Ongoing',null),
(313,600,'123A','Sinha Road','Dehiwala','RENOVATION','2024-01-30','2025-05-09',null, 'Ongoing',null),
(314,500,'34C','Barnse Road', 'Colombo','RENOVATION','2024-01-01','2024-03-20','2024-03-19', 'Complete',null),
(315,205,'209','Kingsley Road','Colombo','RENOVATION','2024-01-20','2024-04-01','2024-04-02','Complete',null);
if I disable the foriegn key check it runs,I want it to be run without unticking the box |
I am working on one of my projects and I have a problem.
I am trying to create a modal, but when i make it, it dissapears below 992 px which is one of the bootstrap's breakpoints. I made the modal like one of the examples in the docs. Thank you in advance! I have tried many solution including CSS changing the class modal to position fixed, but without success. :( If you want to see the whole concept of the project you can go to https://github.com/dicheto/Dicheto-IT/tree/main/Project-Marchideo
Modal code:
```
<div class="modal" id="MyProfile" tabindex="-1" aria-labelledby="MyProfile"
aria-hidden="true">
<div class="modal-dialog modal-dialog-centered-fix ">
<div class="modal-content modal-MyProfile-fix">
<div class="modal-header">
<h5 class="modal-title text-center pop-up-header"
id="MyProfileLabel">Моят профил</h5>
<button type="button" class="btn-close" data-bs-dismiss="modal"
aria-label="Close"></button>
</div>
<div class="modal-body">
<h3>Лични данни</h3>
<div class="mx-1">
<form class="input-MyProfile needs-validation was-validated"
novalidate="">
<input type="text" class="form-control"
id="MyProfile-Validation01" placeholder="Име"
value="Иван" required>
<input type="text" class="form-control"
id="MyProfile-Validation02" placeholder="Фамилия"
value="Иванов" required>
<input type="number" class="form-control"
id="MyProfile-Validation03" placeholder="Телефон"
required>
<input type="text" class="form-control"
id="MyProfile-Validation04" placeholder="Имейл"
value="Ivan.Ivanov@icloud.com" required>
</form>
</div>
<h3>Изтриване на профила</h3>
<div class="mb-1 mx-1" style="width: 100%;">
<p>Ако изтриете профила си, всички лични данни ще бъдат
премахнати. Изтриването е необратим
процес.</p>
<a href="" class="btn-razgledai">Изтрий</a>
</div>
<h3>Предпочитан начин за връзка</h3>
<div class="mb-1 mx-1" style="width: 100%;">
<p><ion-icon name="checkmark-outline"></ion-icon> по
телефона</p>
<p><ion-icon name="checkmark-outline"></ion-icon> по Имейл
</p>
</div>
<h3>Свързване</h3>
<div class="mb-1 mx-1 row" style="width: 100%;">
<div class="col-6">
<p class="profile-facebook"><ion-icon
class="profile-facebook"
name="logo-facebook"></ion-icon> Facebook
</p>
<p class="profile-google"><ion-icon
class="profile-google"
name="logo-google"></ion-icon>
Google</p>
</div>
<div class="col-6 text-end">
<p class="profile-facebook">Свържи</p>
<p class="profile-google"><ion-icon
class="profile-google"
name="trash-bin-outline"></ion-icon></p>
</div>
</div>
</div>
<div class="modal-footer justify-content-center">
<button type="button" class="btn-razgledai text-center"
data-bs-dismiss="modal">Запази</button>
</div>
</div>
</div>
</div>
```
CSS changing the class "modal"
Modifing the browser z-index
Activating and deactivating things in the inspector |
How to fix - The modal dissapear below 992px.? |
|twitter-bootstrap|bootstrap-5| |
null |
I am making a simple chat app website in Socket.io. But emit method not working in client.
when i do on and emitting event from client it doesn't do on function
here is client html body code :
```
<body>
<ul id="messages"></ul>
<form id="form" action="">
<input id="input" autocomplete="off" /><button>Send</button>
</form>
<script src="/socket.io/socket.io.min.js"></script>
<script>
const socket = io();
socket.on("chat", (msg) => {
console.log(msg);
});
socket.emit('chat', 'hello');
</script>
</body>
```
and this server file content:
```
const express = require("express");
const { createServer } = require("node:http");
const { join } = require("node:path");
const { Server } = require("socket.io");
const app = express();
const server = createServer(app);
const io = new Server(server);
app.get("/", (req, res) => {
res.sendFile(join(__dirname, "index.html"));
});
io.on("connection", (socket) => {
console.log("a user connected");
});
server.listen(4001, () => {
console.log("server running at http://localhost:4001");
});
``` |
Socket.io event doesn't emitting from client itself client |
|javascript|node.js|socket.io|socket.io-client| |
I have a mixin that is shared between two models. In it, I have a method that takes a dictionary and creates an SQLAlchemy object, this is raising a warning. The two models are very similar (all the same except for one column that has a relationship to a different table. However, all the column names have the same names.
Here is the mixin:
```
class ConstructionBase:
id: Mapped[int] = mapped_column(db.Integer, autoincrement=True, primary_key=True)
data: Mapped[bytes] = mapped_column(db.LargeBinary, unique=False, nullable=True)
measurement_date: Mapped[datetime] = mapped_column(db.DateTime, nullable=True)
def from_dict(self, data):
if 'module' in data:
self.module = Module.get_by_sn(data['module'])
if 'component' in data:
self.component = Component.get_by_sn(data['component'])
if 'type' in data:
TypeClass = type(self).type.property.mapper.class_
self.type = TypeClass.get_by_name(data['type'])
```
And my two models:
```
class Assembly(db.Model, UtilityMixin, PaginatedAPIMixin, TimeStampMixin, MTDdbSyncMixin, ConstructionBase):
__tablename__ = "assembly"
module_id: Mapped[int] = mapped_column(db.Integer, db.ForeignKey('module.id', ondelete="CASCADE"), nullable=True)
component_id: Mapped[int] = mapped_column(db.Integer, db.ForeignKey('component.id', ondelete="CASCADE"), nullable=True)
type_id: Mapped[int] = mapped_column(db.Integer, db.ForeignKey('assembly_type.id'), nullable=False)
#Relationships
module: Mapped["Module"] = relationship(back_populates="assembly")
component: Mapped[List["Component"]] = relationship(back_populates="assembly")
type: Mapped["AssemblyType"] = relationship(back_populates="assembly")
class Test(db.Model, UtilityMixin, PaginatedAPIMixin, TimeStampMixin, MTDdbSyncMixin, ConstructionBase):
__tablename__ = "test"
module_id: Mapped[int] = mapped_column(db.Integer, db.ForeignKey('module.id', ondelete="CASCADE"), nullable=True)
component_id: Mapped[int] = mapped_column(db.Integer, db.ForeignKey('component.id', ondelete="CASCADE"), nullable=True)
type_id: Mapped[int] = mapped_column(db.Integer, db.ForeignKey('test_type.id'), nullable=False)
#Relationships
module: Mapped["Module"] = relationship(back_populates="test")
component: Mapped["Component"] = relationship(back_populates="test")
type: Mapped["TestType"] = relationship(back_populates="test")
```
With this mixin I would can do something like
```
fake_assembly = Assembly()
fake_assembly.from_dict({'module':'mod_sn','component':'comp_sn','type':'the type'})
```
However when I do that I get this warning:
```
/home/application/models.py:656: SAWarning: Object of type <Assembly> not in session, add operation along 'Module.assembly' will not proceed (This warning originated from the Session 'autoflush' process, which was invoked automatically in response to a user-initiated operation.)
return Component.query.filter_by(serial_number=serial_number).first()
```
But the object works, when I print it out it is `<Assembly None>` which is wrong but when I do `fake_assembly.module` it gives the correct module object. And I can add it to the session and commit it just fine. Also, it only throws the error for objects that have relationships to other tables.
In regards to the warning I have been unable to find anything useful, so I have come here to try to learn about it. Maybe the way I am currently trying to do things is bad practice. Any insight or help is greatly appreciated! |
I aim to select full name of employees that either have no boss or their boss lives on a street that contains letter 'o' or letter 'u'. Then, I want to list them in descending order by full name.
The problem comes in the ordering, because by queries that I think are the same I get different answers.
When I introduce in MySQL Workbench the following command:
```
select concat(surnames,', ',name) as 'Full Name', street
from employee
where boss is null or (boss is not null and (street like'%u%' or steet like '%o%'))
order by concat(surnames,', ',name) desc;
```
By this command I get the answer I want, that is:
Full Name Street
Suárez García, Juan Juan Valdés 25
Sarasola Goñi, Vanesa Austria
Requena Santos, Pilar Alicante 3
Puertas Elorza, Marta Lope de Vega 3
Piedra Trujillo, Ramón Madre Vedruna 21
Narváez Alonso, Alba Vara de Rey 22
Gómez de la Sierra, Francisco Loscertales 9
Chávarri Díez, Lorea
Arrieta Alcorta, Kepa Urbieta 33
Álvarez González, Ana Graus 19
But when I change the ordering by another that looks the same to me:
```
select concat(surnames,', ',name) as 'Full Name', street
from employee
where boss is null or (boss is not null and (street like'%u%' or steet like '%o%'))
order by 'FullName' desc;
```
I get a wrong answer that looks like:
Full Name Street
Suárez García, Juan Juan Valdés 25
Puertas Elorza, Marta Lope de Vega 3
Chávarri Díez, Lorea
Narváez Alonso, Alba Vara de Rey 22
Gómez de la Sierra, Francisco Loscertales 9
Piedra Trujillo, Ramón Madre Vedruna 21
Sarasola Goñi, Vanesa Austria
Requena Santos, Pilar Alicante 3
Álvarez González, Ana Graus 19
Arrieta Alcorta, Kepa Urbieta 33
Can somebody tell me what's going on here?
|
import kivy
kivy.require('1.11.1')
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
class LoginScreen(GridLayout):
def __init__(self, **kwargs):
super(LoginScreen, self).__init__(**kwargs)
self.cols = 2
self.add_widget(Label(text='User Name'))
self.username = TextInput(multiline=False)
self.add_widget(self.username)
self.add_widget(Label(text='Password'))
self.password = TextInput(password=True, multiline=False)
self.add_widget(self.password)
class MyApp(App):
def build(self):
return LoginScreen()
if __name__ == '__main__':
MyApp().run()
Then I got this message:
```
Can’t exec “aclocal”: No such file or directory at /usr/share/autoconf/Autom4te/FileUtils.pm line 326.
autoreconf: failed to run aclocal: No such file or directory
STDERR:
# Command failed: /usr/bin/python -m pythonforandroid.toolchain create — dist_name=myapp — bootstrap=sdl2 — requirements=python3,kivy — arch armeabi-v7a — copy-libs — color=always — storage-dir=”/home/js/PycharmProjects/anotherkivytest/.buildozer/android/platform/build” — ndk-api=21
# ENVIRONMENT:
# CLUTTER_IM_MODULE = ‘ibus’
# LS_COLORS = ‘rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.zst=01;31:*.tzst=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.wim=01;31:*.swm=01;31:*.dwm=01;31:*.esd=01;31:*.jpg=01;35:*.jpeg=01;35:*.mjpg=01;35:*.mjpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:’
# LESSCLOSE = ‘/usr/bin/lesspipe %s %s’
# LANG = ‘pt_BR.UTF-8’
# GDM_LANG = ‘pt_BR’
# DISPLAY = ‘:0’
# GTK_OVERLAY_SCROLLING = ‘1’
# COLORTERM = ‘truecolor’
# XDG_VTNR = ‘7’
# SSH_AUTH_SOCK = ‘/run/user/1000/keyring/ssh’
# MANDATORY_PATH = ‘/usr/share/gconf/cinnamon.mandatory.path’
# XDG_SESSION_ID = ‘c2’
# XDG_GREETER_DATA_DIR = ‘/var/lib/lightdm-data/js’
# USER = ‘js’
# DESKTOP_SESSION = ‘cinnamon’
# QT4_IM_MODULE = ‘ibus’
# GNOME_TERMINAL_SCREEN = ‘/org/gnome/Terminal/screen/41d14b2e_8140_419f_8f11_36ec747c2d25’
# DEFAULTS_PATH = ‘/usr/share/gconf/cinnamon.default.path’
# QT_QPA_PLATFORMTHEME = ‘qt5ct’
# PWD = ‘/home/js/PycharmProjects/anotherkivytest’
# HOME = ‘/home/js’
# SSH_AGENT_PID = ‘1424’
# QT_ACCESSIBILITY = ‘1’
# XDG_SESSION_TYPE = ‘x11’
# XDG_DATA_DIRS = ‘/usr/share/cinnamon:/usr/share/gnome:/home/js/.local/share/flatpak/exports/share:/var/lib/flatpak/exports/share:/usr/local/share:/usr/share:/var/lib/snapd/desktop’
# XDG_SESSION_DESKTOP = ‘cinnamon’
# GJS_DEBUG_OUTPUT = ‘stderr’
# GTK_MODULES = ‘gail:atk-bridge’
# TERM = ‘xterm-256color’
# SHELL = ‘/bin/bash’
# VTE_VERSION = ‘5202’
# XDG_SEAT_PATH = ‘/org/freedesktop/DisplayManager/Seat0’
# QT_IM_MODULE = ‘ibus’
# XMODIFIERS = ‘@im=ibus’
# XDG_CURRENT_DESKTOP = ‘X-Cinnamon’
# GPG_AGENT_INFO = ‘/run/user/1000/gnupg/S.gpg-agent:0:1’
# GNOME_TERMINAL_SERVICE = ‘:1.228’
# XDG_SEAT = ‘seat0’
# SHLVL = ‘1’
# LANGUAGE = ‘pt_BR:pt:en’
# GDMSESSION = ‘cinnamon’
# GNOME_DESKTOP_SESSION_ID = ‘this-is-deprecated’
# LOGNAME = ‘js’
# DBUS_SESSION_BUS_ADDRESS = ‘unix:path=/run/user/1000/bus’
# XDG_RUNTIME_DIR = ‘/run/user/1000’
# XAUTHORITY = ‘/home/js/.Xauthority’
# XDG_SESSION_PATH = ‘/org/freedesktop/DisplayManager/Session0’
# XDG_CONFIG_DIRS = ‘/etc/xdg/xdg-cinnamon:/etc/xdg’
# PATH = ‘/home/js/.buildozer/android/platform/apache-ant-1.9.4/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin’
# CINNAMON_VERSION = ‘4.0.8’
# GJS_DEBUG_TOPICS = ‘JS ERROR;JS LOG’
# SESSION_MANAGER = ‘local/js:@/tmp/.ICE-unix/1356,unix/js:/tmp/.ICE-unix/1356’
# LESSOPEN = ‘| /usr/bin/lesspipe %s’
# GTK_IM_MODULE = ‘ibus’
# _ = ‘/usr/local/bin/buildozer’
# OLDPWD = ‘/home/js/PycharmProjects’
# PACKAGES_PATH = ‘/home/js/.buildozer/android/packages’
# ANDROIDSDK = ‘/home/js/.buildozer/android/platform/android-sdk’
# ANDROIDNDK = ‘/home/js/.buildozer/android/platform/android-ndk-r17c’
# ANDROIDAPI = ‘27’
# ANDROIDMINAPI = ‘21’
#
# Buildozer failed to execute the last command
# The error might be hidden in the log above this error
# Please read the full log, and search for it before
# raising an issue with buildozer itself.
# In case of a bug report, please add a full log with log_level = 2
```
|
I have a FIFO implementation in Verilog that is based on this article: http://www.sunburst-design.com/papers/CummingsSNUG2002SJ_FIFO1.pdf
Whilst using this FIFO as a CDC FIFO, with the read side being clocked at 100Mhz and the write side at 8Mhz I ran into a timing issue. Please note that the issue is not reproducible in simulation whether it is behavioral, post-synthesis or post-implementation. It can only be see in the timing reports given by the Vivado tool.
Here are snippets of code that are of interest for the question:
```verilog
module CDC_FIFO #(
parameter DSIZE = 28,
parameter ASIZE = 10
)(
output [DSIZE-1:0] rdata,
output wfull,
output rempty,
output r_almost_empty,
input [DSIZE-1:0] wdata,
output [ASIZE:0] wr_diff,
input winc,
input wclk,
input wrst_n,
input rinc,
input rclk,
input rrst_n
);
wire [ASIZE-1:0] waddr, raddr;
wire [ASIZE:0] wptr, rptr, wq2_rptr, rq2_wptr;
localparam AWAIT_RINC = 0;
localparam AWAIT_W_TO_R = 1;
reg [ASIZE:0] rptr_save = 0;
reg [1:0] state = AWAIT_RINC;
reg r_to_w = 0;
wire w_to_r;
always @(posedge rclk) begin
case (state)
AWAIT_RINC: begin
if (rinc && !w_to_r) begin
state <= AWAIT_W_TO_R;
r_to_w <= 1;
rptr_save <= rptr;
end
else begin
r_to_w <= 0;
end
end
AWAIT_W_TO_R: begin
if (w_to_r) begin
state <= AWAIT_RINC;
r_to_w <= 0;
end
end
default: ;
endcase
end
sync_r2w #(.ADDRSIZE(ASIZE)) sync_r2w
(
.r_to_w(r_to_w),
.w_to_r(w_to_r),
.wq2_rptr(wq2_rptr),
.rptr(rptr_save),
.wclk(wclk),
.wrst_n(wrst_n)
);
// Other instantiations of other modules
endmodule;
```
```
module sync_r2w #(
parameter ADDRSIZE = 4
)(
output reg w_to_r = 0,
input r_to_w,
output [ADDRSIZE:0] wq2_rptr,
input [ADDRSIZE:0] rptr,
input wclk, wrst_n
);
reg [ADDRSIZE:0] wq1_rptr = 0;
reg [ADDRSIZE:0] wq2_rptr_reg = 0;
assign wq2_rptr = wq2_rptr_reg;
always @(posedge wclk)
begin
if (!wrst_n)
begin
{ wq2_rptr_reg,wq1_rptr} <= 0;
end
else
begin
if (w_to_r || r_to_w) begin
{ wq2_rptr_reg,wq1_rptr} <= {wq1_rptr,rptr};
end
end
end
always @(posedge wclk) begin
if (!wrst_n) begin
w_to_r <= 0;
end
else begin
w_to_r <= r_to_w;
end
end
endmodule
```
The issue arises when the read pointer has to be passed to the write domain through the sync_r2w module. So I have added a state machine to CDC_FIFO, to initiate a sort of handshake between the two, so to be sure that the rptr_save holds its value long enough for the write side to process it. However it seems that this has not fixed my issue.
Here is a photo of the critical path that is currently failing (one of the bits of rptr_save)
[![enter image description here][1]][1]
What exactly is the problem in my logic that still does not fix the timing? It seems to me that the current FSM basically waits for the write side to assert that it has indeed taken the rptr_save value and than "switches off" the handshake condition to prevent further update.
Thanks
[1]: https://i.stack.imgur.com/Pi0IW.png |
Livewire component JS script Uncaught SyntaxError: Unexpected token |
|javascript|laravel|laravel-livewire| |
Your code has compile errors when I try to run with the version of `iverilog` that I have installed (10.3).
However, the code produces the expected output when I run with the Cadence and Synopsys simulators:
result_matrix[0] = 0.000000
result_matrix[1] = 0.000000
result_matrix[2] = 0.000000
result_matrix[3] = 0.000000
result_matrix[0] = 7.000000
result_matrix[1] = 10.000000
result_matrix[2] = 15.000000
result_matrix[3] = 22.000000
result_matrix[0] = 7.000000
result_matrix[1] = 10.000000
result_matrix[2] = 15.000000
result_matrix[3] = 22.000000
It is a bug in the version of `iverilog` you are using. Try your code on [EDA Playground][1].
---
It would be helpful to show the `$time` and hierarchy `%m` in your `$display` statements.
[1]: https://www.edaplayground.com/ |
`tqdm(position=1, disable=not is_main_process)` solves this problem.
- `Position=1` can ensure that PDSH prints a progress bar.
- `not is_main_process` can ensure only one process can print a progress bar, and this is a common trick used in tqdm.
The reason for this is that PDSH uses a line buffer, and it doesn't wrap when the program only prints a progress bar. Using `position=1` allows you to use TQDM's own mechanism to get around this problem. |
projection and EPSG mismatch when buffering a geometry |
How I can fix this,
all that occurs when I installed the firebase package
```
FAILURE: Build failed with an exception.
* What went wrong:
Execution failed for task ':cloud_firestore:generateDebugRFile'.
> Could not resolve all files for configuration ':cloud_firestore:debugCompileClasspath'.
> Failed to transform firebase-auth-interop-19.0.2.aar (com.google.firebase:firebase-auth-interop:19.0.2) to match attributes {artifactType=android-symbol-with-package-name, org.gradle.status=release}.
> Could not download firebase-auth-interop-19.0.2.aar (com.google.firebase:firebase-auth-interop:19.0.2)
> Could not get resource 'https://dl.google.com/dl/android/maven2/com/google/firebase/firebase-auth-interop/19.0.2/firebase-auth-interop-19.0.2.aar'.
> Could not GET 'https://dl.google.com/dl/android/maven2/com/google/firebase/firebase-auth-interop/19.0.2/firebase-auth-interop-19.0.2.aar'.
> No such host is known (dl.google.com)
```
I tried to upgrade the dependencies
flutter pub upgrade
also I tried to change `compilerSdkVersion` to 33
but it doesn't work
some info in `app/build.gradle`
```
apply plugin: 'com.android.application'
apply plugin: 'kotlin-android'
apply plugin: 'com.google.gms.google-services'
apply from: "$flutterRoot/packages/flutter_tools/gradle/flutter.gradle"
android {
compileSdkVersion 33
ndkVersion flutter.ndkVersion
compileOptions {
sourceCompatibility JavaVersion.VERSION_1_8
targetCompatibility JavaVersion.VERSION_1_8
}
kotlinOptions {
jvmTarget = '1.8'
}
sourceSets {
main.java.srcDirs += 'src/main/kotlin'
}
minSdkVersion 19
targetSdkVersion flutter.targetSdkVersion
versionCode flutterVersionCode.toInteger()
versionName flutterVersionName
}
}
dependencies {
implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:$kotlin_version"
}
```
and the dependencies in `android/build.gradle`
```
dependencies {
classpath 'com.android.tools.build:gradle:7.1.2'
// classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
// classpath 'com.google.gms:google-services:4.3.8'
// classpath 'com.android.tools.build:gradle:3.5.0'
classpath 'com.google.gms:google-services:4.3.2'
classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:1.3.41"
}
}
```
That is it if you have any solution please
|
<!-- language-all: sh -->
[zett42](https://stackoverflow.com/users/7571258/zett42) has provided the crucial pointer:
From the [`ConvertFrom-StringData`](https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.utility/convertfrom-stringdata) help topic (emphasis added), as of PowerShell 7.4.x:
> `ConvertFrom-StringData` supports escape character sequences that are allowed by conventional machine translation tools. That is, the **cmdlet can interpret backslashes (`\`) *as escape characters* in the string data** [...] You **can also preserve a literal backslash in your results by escaping it with a preceding backslash, like this: `\\`**. Unescaped backslash characters, such as those that are commonly used in file paths, can render as illegal escape sequences in your results.
Therefore, assuming that all `\` characters in your input file are meant to be interpreted _literally_:
```
(Get-Content -Raw C:\<ConfigFolder>\basic.conf).Replace('\', '\\') |
ConvertFrom-StringData).FileLocation
```
Note the use of `-Raw` with [`Get-Content`](https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.management/get-content) to ensure that the file content is read _in full_, as a single, (typically) _multiline_ string, which in turn ensures that `ConvertFrom-StringData` outputs a _single_ `[hashtable]` instance.
---
Given that this escaping need is both somewhat unexpected and inconvenient, it would be helpful if `ConvertFrom-StringData` itself supported _verbatim_ (literal) parsing as an _opt-in_:
* [GitHub issue #20418](https://github.com/PowerShell/PowerShell/issues/20418) asks for just that, by way of a future `-Raw` switch. This enhancement has been green-lit, but is yet to be implemented. |
GNU sed offers a simpler way to do it,
```
sed '0,/word/{//d}
0,/word/{//d}
0,/word/{//d}
'
```
or even, since there's only the one search,
```
sed '0,/word/{//d}; 0,//{//d}; 0,//{//d}'
```
|
I am trying to telnet a device while getting IP address from a file but getting the attached error. I am attaching code from both files as images.[myswitches.py](https://i.stack.imgur.com/AQ5Y9.png)[test.py](https://i.stack.imgur.com/LDHPZ.png)
```
root@UbuntuDockerGuest-1:~# ./Test.py
Enter your Telnet Username: david
Password:
Configuring Switch 192.168.122.71
Traceback (most recent call last):
File "./Test.py", line 15, in <module>
tn = telnetlib.Telnet(HOST)
File "/usr/lib/python2.7/telnetlib.py", line 211, in __init__
self.open(host, port, timeout)
File "/usr/lib/python2.7/telnetlib.py", line 227, in open
self.sock = socket.create_connection((host, port), timeout)
File "/usr/lib/python2.7/socket.py", line 557, in create_connection
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
socket.gaierror: [Errno -2] Name or service not known
``` |
Telnet function in Python |
|python|python-2.7| |
null |
{"OriginalQuestionIds":[5039875],"Voters":[{"Id":3959875,"DisplayName":"wOxxOm","BindingReason":{"GoldTagBadge":"google-chrome-extension"}}]} |
I'm developing the following Modal in a [Storybook](https://storybook.js.org/) story:
```
import React from 'react';
import type { Meta, StoryObj } from '@storybook/react';
import { Modal } from '@repo/ui/Modal';
import Title from '@repo/ui/Title';
import Field from '@repo/ui/Field';
import { DialogHeader } from '@repo/ui/radix/dialog';
type ItemModalProps = {
avatarUrl: string;
maxWidth?: 'sm' | 'md' | 'lg';
isOpen: boolean;
hasFullImage?: boolean;
onCloseModal?: () => void;
};
const ItemModal: React.FC<ItemModalProps> = ({
avatarUrl,
maxWidth = 'sm',
isOpen,
hasFullImage,
onCloseModal = () => {},
}) => (
<Modal
isOpen={isOpen}
onCloseModal={onCloseModal}
hasFullImage={hasFullImage}
maxWidth={maxWidth}
>
<DialogHeader className="relative h-60">
<img src={avatarUrl} className="absolute h-full w-full object-cover" />
</DialogHeader>
<div className="flex flex-col items-center space-y-4 p-6">
<Title>Test</Title>
<div className="grid grid-cols-2 gap-x-10 gap-y-2">
<Field label="Name" text="Name" />
</div>
</div>
</Modal>
);
const meta: Meta<typeof ItemModal> = {
title: 'Example/Modal',
component: ItemModal,
};
export default meta;
type Story = StoryObj<typeof ItemModal>;
export const DefaultModal: Story = {
args: {
avatarUrl: 'https://placehold.co/100',
isOpen: true,
hasFullImage: true,
onCloseModal: () => {},
},
};
```
How to change the code so that `onCloseModal` makes `isOpen` to become `false`? |
Updates to pgsodium encrypted values don't use specified key_id |
|postgresql|supabase|libsodium| |
null |
I know this is about advantages and not disadvantages, but a lock disadvantage is that it does not work well with async await. That is because the thread might be changed between before and after the await: you are not allowed to enter the lock on one thread and leave it on another. When this happens the lock will throw an exception. SemaphoreSlim does not have this issue.
https://stackoverflow.com/a/75781414 |
This problem happens when 'Player' goes in and out of sightRadius of 'Enemy' multiple times.
here is the bug code
```
bool FoundPlayer()
{
int numColliders = Physics.OverlapSphereNonAlloc(transform.position, sightRadius, colliders);
Debug.Log(numColliders);
for (int i = 0; i < numColliders; i++)
{
var target = colliders[i];
if (target.CompareTag("Player"))
{
attackTarget = target.gameObject;
return true;
}
}
attackTarget = null;
return false;
}
```
When Player is in the sightRadius of enemy, I get log: '3'; the enemy chase my player
[enter image description here](https://i.stack.imgur.com/qWPgA.png)
When Player is out of the sightRadius of enemy, I get log: '2';
[enter image description here](https://i.stack.imgur.com/uzR9l.png)
After I control the Player to go in and out of sightRadius several times.
[enter image description here](https://i.stack.imgur.com/cf3KG.png)
I get log '2' . And my enemy will not chase my Player anymore. But if I replay in the editor, Physics.OverlapSphere work well again.
This problem happens both in unity 2020 and 2022.
My code used OverlapSphereNonAlloc but I met the same problem, trying both OverlapSphereNonAlloc and OverlapSphere.
Here is the full code of enemy
```
using UnityEngine;
using UnityEngine.AI;
enum EnemyStatus
{
GURAD, // վ��
PATROL, // Ѳ�߹�
CHASE, DEAD
}
[RequireComponent(typeof(NavMeshAgent))]
[RequireComponent(typeof(BoxCollider))]
public class EnemyController : MonoBehaviour
{
private EnemyStatus enemyStates;
private NavMeshAgent agent;
private Animator animator;
[Header("Basic Settings")]
public float sightRadius;
private GameObject attackTarget;
// enermy will find 10 max players at one frame
const int maxColliders = 10;
private Collider[] colliders = new Collider[maxColliders];
// speed set in agent.speed, but patrol, chase and return from chase have different speed
private float speed;
// is GUARD or PATROL when no enemy
public bool isGuard;
[Header("Patrol State")]
public float patrolRange;
private Vector3 wayPoint;
private Vector3 centerPosition;
// animator
bool isWalk;
// entry of Chase Layer
bool isChase;
//
bool isFollow;
private void Awake()
{
agent = GetComponent<NavMeshAgent>();
animator = GetComponent<Animator>();
speed = agent.speed;
wayPoint = centerPosition = transform.position;
}
private void Start()
{
if (isGuard)
{
enemyStates = EnemyStatus.GURAD;
}
else
{
enemyStates = EnemyStatus.PATROL;
}
}
private void Update()
{
SwitchStates();
SwitchAnimation();
}
void SwitchAnimation()
{
animator.SetBool("Walk", isWalk);
animator.SetBool("ChaseState", isChase);
animator.SetBool("Follow", isFollow);
}
private void SwitchStates()
{
if (FoundPlayer())
{
enemyStates = EnemyStatus.CHASE;
}
switch(enemyStates)
{
case EnemyStatus.GURAD:
break;
case EnemyStatus.PATROL:
// this is error, when enemy reach wayPoint, it will stop walking
// isWalk = true;
isChase = false;
isFollow = false;
if (Vector3.Distance(transform.position, wayPoint) <= agent.stoppingDistance )
{
wayPoint = GetNewWayPoint();
agent.destination = wayPoint;
agent.speed = speed * 0.5f;
isWalk = false;
}
else
{
isWalk = true;
}
break;
case EnemyStatus.CHASE:
//TODO: back to gurad or patrol if player far away
//TODO: attack player
//TODO: attack animator
isWalk = false;
isChase = true;
if (FoundPlayer())
{
//TODO: chase player
agent.destination = attackTarget.transform.position;
agent.speed = speed;
isFollow = true;
}
else
{
isFollow = false;
// stop at the current when lost target
agent.destination = transform.position;
}
break;
case EnemyStatus.DEAD:
break;
}
}
bool FoundPlayer()
{
int numColliders = Physics.OverlapSphereNonAlloc(transform.position, sightRadius, colliders);
Debug.Log(numColliders);
for (int i = 0; i < numColliders; i++)
{
var target = colliders[i];
if (target.CompareTag("Player"))
{
attackTarget = target.gameObject;
return true;
}
}
attackTarget = null;
return false;
}
Vector3 GetNewWayPoint ()
{
Vector3 wayPoint = new Vector3(centerPosition.x + Random.Range(-patrolRange, patrolRange), centerPosition.y, centerPosition.z + Random.Range(-patrolRange, patrolRange));
NavMeshHit hit;
// sample the nearnet reachable point for enemy to go.
// if can not find any point, SamplePosition will retturn false and enemy will stand there for next sample next frame.
wayPoint = NavMesh.SamplePosition(wayPoint, out hit, patrolRange, 1) ? hit.position : transform.position;
return wayPoint;
}
private void OnDrawGizmosSelected()
{
Gizmos.color = Color.blue;
Gizmos.DrawWireSphere(transform.position, sightRadius);
Vector3 size = new Vector3(patrolRange, patrolRange, patrolRange);
Gizmos.DrawWireCube(centerPosition, size);
}
}
```
I try different API and different version of unity |
Physics.OverlapSphere couldn't detect my ‘Player’ |
|unity-game-engine|game-development| |
null |
the input is:

need output as:

is this possible in just XL format or a python script is required.
if yes can you help with python script?
thanks.
my script:
my logic is read each df.col and compare value from 1 to 20, if equal write it out or write blank.
```python
import pandas as pd
df = pd.read_excel(r"C:\Users\my\scripts\test-file.xlsx")
print(df)
for column in df.columns[0:]:
print(df[column])
``` |
|python|pandas| |
{"Voters":[{"Id":6036253,"DisplayName":"Matt Haberland"}]} |
As you can read in the [documentation of `list.sort`][1]
> This method modifies the sequence in place for economy of space when sorting a large sequence. To remind users that it operates by side effect, it does not return the sorted sequence (use sorted() to explicitly request a new sorted list instance).
[1]: https://docs.python.org/3/library/stdtypes.html#list.sort |
I have a Flutter app where I am using the two_dimensional_scrollables TableView widget to display a complex spreadsheet style display.
For the Flutter web app, running this in Chrome on Windows 10 I can't get the horizontal scroll to work. The vertical scroll works fine using the mouse wheel.
In the top level MaterialApp class, I have the the 'scrollBehaviour' property set to:
class GdScrollBehavior extends MaterialScrollBehavior {
// Override behavior methods and getters like dragDevices
@override
Set<PointerDeviceKind> get dragDevices => {
PointerDeviceKind.touch,
PointerDeviceKind.mouse,
// etc.
};
}
I suspect that this problem is something to do with setting up a Gesture behaviour in TableView to have a horizontal drag recognized as a 'scroll horizontally' instruction, but the widget docs are not too clear on how to set this up.
Any suggestions?
|
Flutter two_dimensional_scrollables Web app Chrome - cannot get horizontal scroll to work? |
|flutter| |
I am prevented from inserting data into a column because of a foreign key violation. However, the foreign key is a valid reference |
|mysql| |
null |
# A confusion of getting to the bottom of a jq parse.
I'm learning, apreciate your help.
Two arrays here, both with same type ouput result, just integer values (spaced), same for string values.
Ex: 1 2 3 4 5
A multi-nested weather dump:
I trying to understand to return just an index ([0...]) value or/and an array to sort - json and/or bash of the values. I've tried, and yes have read the man, jq i'm loving, just ?...
#
#Query 1
drp=$(cat "$HOME/..." | jq '.DailyForecasts[].Day.RainProbability | .')
"Day": {
"Icon": 2,
"IconPhrase": "Mostly sunny",
"HasPrecipitation": false,
"ShortPhrase": "Mostly sunny and breezy",
"LongPhrase": "Mostly sunny and breezy",
"PrecipitationProbability": 1,
"ThunderstormProbability": 0,
"RainProbability": 1,
"SnowProbability": 0,
"IceProbability": 0,
"Wind": {
"Speed": {
"Value": 15,
"Unit": "mi/h",
"UnitType": 9
...
}
#
#Query 2
Ive tried to map, etc, number of ways (ex. going for the grass value).
apg=$(cat "$HOME/..." | jq '.DailyForecasts[].AirAndPollen | .[1] | .Value | .')
"AirAndPollen": [
{
"Name": "AirQuality",
"Value": 44,
"Category": "Good",
"CategoryValue": 1,
"Type": "Ozone"
},
{
"Name": "Grass",
"Value": 12,
"Category": "Moderate",
"CategoryValue": 2
},
...
]
|
JQ JSON - Values to Array |
|json|bash|jq| |
If your XXXWidget and XXXWidgetIntents both access privacy APIs, you need to add two `PrivacyInfo.xcprivacy` files for them.
I also checked the widget targets when adding "PrivacyInfo.xcprivacy". As the screenshot shown below:
![screenshot][1]
Or you can check the "Build Phases - Copy Bundle Resources" of Widget & WidgetIntents to see if the `PrivacyInfo.xcprivacy` file is included.
[![enter image description here][2]][2]
I'm not quite sure if this is necessary (i.e., adding "PrivacyInfo.xcprivacy" as a resource file), but I don't get the warning email anymore. Hope it helps.
[1]: https://i.stack.imgur.com/8xYhj.png
[2]: https://i.stack.imgur.com/9txjA.png |
```
from keras.models import Sequential
from keras.layers.core import Dense, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
`
`
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-35-e0ce7722f83a> in <cell line: 2>()
1 from keras.models import Sequential
----> 2 from keras.layers.core import Dense, Flatten
3 from keras.layers.convolutional import Conv2D, MaxPooling2D
ModuleNotFoundError: No module named 'keras.layers.core'
---------------------------------------------------------------------------
NOTE: If your import is failing due to a missing package, you can
manually install dependencies using either !pip or !apt.
To view examples of installing some common dependencies, click the
"Open Examples" button below.
```
use Chat GPT the answer is
```
The error indicates that the module keras.layers.core cannot be found, suggesting an issue with the Keras installation or compatibility.
As of Keras version 2.7.0 (the latest stable version as of my last update), the module organization has changed, and keras.layers.core is no longer used. Instead, core layers like Dense and Flatten are directly available under keras.layers.
```
|
SAWaring when using a Mixin to make SQLAlchemy Objects |
|python-3.x|sqlalchemy| |
You shouldn't call setState in the middle of the component's rendering process. This will cause the component to [become impure][1]. [Strict mode][2] helps you catch this bug early by causing the component to render twice, highlighting the inconsistency. What happened was:
> Component first render -> react see useState("init") -> reach the if
> statement -> setState("goBack") -> the component re-render before
> reaching the `useEffect` -> rerender -> state is "goBack" -> run the effect
> -> navigate(-1) -> Strict mode makes the component run the second time -> navigate(-1) again.
You can use this to run with and without StrictMode to see the detail:
const GoBack = () => {
// const navigate = useNavigate();
const [state, setState] = useState("init");
console.log("before setState:", state);
if (state === "init") {
console.log("setState");
setState("goBack");
}
console.log("after setstate:", state);
useEffect(() => {
console.log("inside useEffect:", state);
if (state === "goBack") {
console.log("navigate(-1)", state);
// navigate(-1);
}
}, [state]);
return null;
};
Move the if check into the `useEffect` and the problem is gone. If your intend was to prevent users from seeing anything on the screen then you can use [useLayoutEffect][3]
[1]: https://react.dev/learn/keeping-components-pure
[2]: https://react.dev/reference/react/StrictMode#fixing-bugs-found-by-double-rendering-in-development
[3]: https://react.dev/reference/react/useLayoutEffect |
I grabbed an image and added a title (`MUPPETRY`) and description (`PUPPETRY`) in **Adobe Lightroom** using the metadata panel like this and then exported as JPEG:
[![enter image description here][1]][1]
In case you are wondering, I chose silly names for near certain uniqueness.
---
It seems that **Lightroom** puts the title and caption in the IPTC section. You can tell that by using:
exiftool -G0 export.jpg | grep UPPETRY
[EXIF] Image Description : PUPPETRY
[IPTC] Object Name : MUPPETRY
[IPTC] Caption-Abstract : PUPPETRY
[XMP] Title : MUPPETRY
[XMP] Description : PUPPETRY
On Windows, use `FINDSTR` instead of `grep`.
You can extract them with `exiftool` like this:
exiftool -IPTC:Caption-Abstract -IPTC:ObjectName export.jpg
Caption-Abstract : PUPPETRY
Object Name : MUPPETRY
Or, more succinctly:
exiftool -Title -Description export.jpg
Title : MUPPETRY
Description : PUPPETRY
Or, if you just want the value in a `bash` variable, use the `-short` option like this:
title=$(exiftool -s3 -Title export.jpg)
echo $title
MUPPETRY
---
If you want to see IPTC metadata with **ImageMagick**, use this to discover all the data:
magick export.jpg IPTCTEXT:-
1#90#City="%G"
2#0="�"
2#5#Image Name="MUPPETRY"
2#120#Caption="PUPPETRY"
You can then see the field numbers of the ones you want and extract just those:
identify -format "%[IPTC:2:5]" export.jpg
MUPPETRY
identify -format "%[IPTC:2:120]" export.jpg
PUPPETRY
[1]: https://i.stack.imgur.com/0mHLK.jpg |
I am new to sendmail, installed the s-nail package on RHEL 8 VM hosted on AWS. I configured the sendmail.mc file but still unable to get mail.
Command used,
echo "First mail" | mail -v -s "Important mail" jadeallice302@gmail.com
It shows an error like below, it looks like mail is accepted for delivery but connection got timed out.
> Mar 31 10:35:47 ip-172-31-8-189 sendmail[15076]: 42VAZlCI015076:
> to=jadeallice302@gmail.com, ctladdr=ec2-user (1000/1000),
> delay=00:00:00, xdelay=00:00:00, mailer=relay, pri=30130,
> relay=[127.0.0.1] [127.0.0.1], dsn=2.0.0, stat=Sent (42VAZlxs015077
> Message accepted for delivery)
>
> Mar 31 10:36:47 ip-172-31-8-189 sendmail[15079]: 42VAZlxs015077:
> to=<jadeallice302@gmail.com>,
> ctladdr=<ec2-user@ip-172-31-8-189.ap-south-1.compute.internal>
> (1000/1000), delay=00:01:00, xdelay=00:01:00, mailer=relay,
> pri=120469, relay=smtp.gmail.com. [142.250.4.108], dsn=4.0.0,
> stat=Deferred: Connection timed out with smtp.gmail.com.
|
I updated my ElectronJs version from 27.3.8 to 28.0.0 and encountered an issue where my application no longer launches. The project initializes successfully (creates app), but the application does not proceed further. On Windows, nothing happens in the terminal, while on Linux, the following errors occur:
```bash
1. /snap/core20/current/lib/x86_64-linux-gnu/libstdc++.so.6: version 'GLIBCXX_3.4.29' not found (required by /lib/x86_64-linux-gnu/libproxy.so.1)
2. Failed to load module: /home/admin-msk/snap/code/common/.cache/gio-modules/libgiolibproxy.so
```
My operating systems are Windows and Linux. I have also checked the breakpoints in version 28 of Electron and did not find any changes that could impact my application.
I have verified the dependencies and ensured compatibility with the new Electron version. I have also checked the configuration of the project and confirmed that it matches the requirements of the new Electron version. Additionally, I have attempted to roll back to the previous stable version of Electron and gradually introduce changes to pinpoint the issue.
I expected the application to launch successfully after updating to ElectronJs version 28.0.0 without any major issues.
The project initializes without errors, but the application fails to launch as expected on both Windows and Linux, displaying the mentioned errors.
**How can I resolve this issue and successfully launch the application after updating ElectronJs?** |
Issue with launching application after updating ElectronJs to version 28.0.0 on Windows and Linux |
|linux|windows|electron|electron-builder| |
null |
Somehow Netbeans prints German warnings to the console when building a project. Likely because my laptop is set to German, but why it is exactly I am not sure. The terminal is in English, the Java JVM locale is set to English and the netbeans.conf is also set to English.
* JVM Local > java -Duser.language=en -Duser.region=US
* netbeans.conf > netbeans_default_options="... -J-Duser.language=en -J-Duser.region=US"
But still I get the following console warnings in German, which is annoying when Googling the meaning of these warnings:
```
Systemmodulpfad nicht zusammen mit -source 11 festgelegt
io/ost/finance/io/TransactionWriterForDone.java:[43,58] Nicht-varargs-Aufruf von varargs-Methode mit ungenauem Argumenttyp für den letzten Parameter.
Führen Sie für einen varargs-Aufruf eine Umwandlung mit Cast in java.lang.Object aus
Führen Sie für einen Nicht-varargs-Aufruf eine Umwandlung mit Cast in java.lang.Object[] aus, um diese Warnung zu unterdrücken
```
Anybody has an answer? |
How do I set isOpen false in the following Storybook story? |
|reactjs|typescript|storybook| |
In react 17 they tried to hide the fact that they were doing a double render in strict mode by temporarily overwriting the `console.log` function to be a no-op during that second render. So it is rendering twice, you just can't log it out. [This caused confusion and was eventually removed.][1]
In the following code i have added a variable for counting the number of renders. You'll notice that on every click, the count increases by 2.
<!-- begin snippet: js hide: false console: true babel: false -->
<!-- language: lang-html -->
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Document</title>
<script
src="https://unpkg.com/react@17/umd/react.development.js"
crossorigin
></script>
<script
src="https://unpkg.com/react-dom@17/umd/react-dom.development.js"
crossorigin
></script>
<script src="https://unpkg.com/babel-standalone@6/babel.min.js"></script>
<script type="text/babel">
console.log(React.version, ReactDOM.version);
let renderCount = 0;
class App extends React.Component {
state = {
count: 0,
};
render() {
renderCount++;
console.log("render", renderCount);
return (
<div>
<button
onClick={() => {
this.setState({
count: this.state.count + 1,
});
}}
>
click
</button>
<div>count {this.state.count}</div>
</div>
);
}
}
ReactDOM.render(
<React.StrictMode>
<App />
</React.StrictMode>,
document.getElementById("root")
);
</script>
</head>
<body>
<div id="root"></div>
</body>
</html>
<!-- end snippet -->
[1]: https://github.com/facebook/react/issues/21783 |
I have a table like below.
I want to have a cell where I can get the number depending on the current month.
For example, today is 31/3/2024. I would like the cell to be 4
Tomorrow when it is 1/4/2024. I would like it to automatically update to 3425
I don't want to use app script
| Date | Number |
| -------- | -------------- |
| 3/2024| 4|
| 4/2024| 3425|
| 5/2024| 6|
| 6/2024| 8|
| 7/2024| 456|
| 8/2024| 2343|
| 9/2024| 1234534|
| 10/2024| 546|
| 11/2024| 6|
| 12/2024| 234|
| 1/2025| 5634|
| 2/2025| 3454|
|
Look up a value using month of the date |
|google-sheets|google-sheets-formula| |
There are a couple of things here. I don't think `sox` supports "tailing" a file, but I know `mplayer` does. However, in order to have better control over the pipeline, using `gstreamer` might be the way to go, as it has a parallel event stream built into its effects pipeline.
If you want to stick with `sox`, I would first get rid of the redundant second invocation of `sox`, e.g.:
rtl_fm -M wbfm -f 88.1M -d 0 -s 22050k -l 310 |
sox -ts16 -r8k -c1 -V1 - some_file.mp3
And in order to play the stream while transcoding it, you could multiplex it with `tee`, e.g.:
rtl_fm -M wbfm -f 88.1M -d 0 -s 22050k -l 310 |
tee >(sox -ts16 -r8k -c1 -V1 - some_file.mp3) |
play -ts16 -r8k -c1 -
Or if you want them to be separate processes:
<!-- language: shell -->
# Save stream to a file
rtl_fm -M wbfm -f 88.1M -d 0 -s 22050k -l 310 > some_file.s16
# Encode stream
sox -ts16 -r8k -c1 -V1 some_file.s16 some_file.mp3
# Start playing the file at 10 seconds in
tail -c+$((8000 * 10)) -f some_file.s16 |
play -ts16 -r8k -c1 - |
Working bilingual, Shortcut Ctrl+' used to work for me for years in both languages (English and Hebrew), but not in 2016. Is there a way to make Ctrl+, (the second language equivalent of Ctrl+') do the same copy-from-previous-record action?
This is an all Access feature - not limited to a specific field or row or table or form or application.
Thanks! |
If all parameters are set to make it completely transparent, making it impossible to see, will Unity still render it? If yes, are there built-in Unity functions to prevent this? |
Does Unity render invisible material? |
|unity-game-engine|gpu|render| |
I suggest to follow the steps described in the official documentation when creating/using areas: [Areas in ASP.NET Core][1]
When following the documentation and adding an area you see the following `ScaffoldingReadMe.txt`:
```
Scaffolding has generated all the files and added the required dependencies.
However the Application's Startup code may require additional changes for things to work end to end.
Add the following code to the Configure method in your Application's Startup class if not already done:
app.UseEndpoints(endpoints =>
{
endpoints.MapControllerRoute(
name : "areas",
pattern : "{area:exists}/{controller=Home}/{action=Index}/{id?}"
);
});
```
This is exactly what intended to make the routing process work correctly. In your case use the following code:
``` c#
app.UseRouting();
app.UseAuthorization();
app.MapControllerRoute(
name: "areas",
pattern: "{area:exists}/{controller=Home}/{action=Index}/{id?}"
);
app.MapControllerRoute(
name: "default",
pattern: "{controller=Home}/{action=Index}/{id?}");
app.Run();
```
The pattern `areas` described above is generic and will be used for another areas too.
Don't forget to use correct [Area folder structure][2] too.
[1]: https://learn.microsoft.com/en-us/aspnet/core/mvc/controllers/areas?view=aspnetcore-8.0
[2]: https://learn.microsoft.com/en-us/aspnet/core/mvc/controllers/areas?view=aspnetcore-8.0#area-folder-structure |
`list.sort()` does not return any value. `print(list.sort())` prints `None` because that is the value that represents no value. |
If you're writing something that does "the same thing, with just one thing changing at each step", that's a loop. You don't use separate `if` statements. Not even "when you're lazy": being lazy is an _excellent_ property to have when you're a programmer, because it means you want to do as little work as possible. Of course, in this case that means "why am I even doing this, [`npm install marked`](https://www.npmjs.com/package/marked), oh look I'm done", but even if you insist on implementing a markdown parser yourself (because sometimes you just want to write code to see if you can do it) you don't use a sequence of `if` statements, it takes more time to write, and takes more time to maintain/update.
However, even if you _do_ use `if` statements, resolve them either such that you handle "the largest thing first", to ensure there's no fall-through, _or_ with if-else statements, so there's no fall-though. (And based on your question about whether to use a switch: why stop there? Why not just use a mapping object with `#` sequences as keys instead so the lookup runs in O(1)?)
However, you don't need any of this, because what you're really doing is simple text matching, so you can use the best tool in the toolset for that: you can trivially get both the `#` sequence and "remaining text" with a dead simple regex, and then generate the replacement HTML [using the captured data](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace#replacement):
<!-- begin snippet: js hide: false console: true babel: false -->
<!-- language: lang-js -->
function markdownToHTML(doc) {
return convertMultiLineMD(doc.split(`\n`)).join(`\n`);
}
function convertMultiLineMD(lines) {
// convert tables, lists, etc, while also making sure
// to perform inline markup conversion for any content
// that doesn't span multiple lines. For the purpose of
// this answer, we're going to ignore multi-line entirely:
return convertInlineMD(lines);
}
function convertInlineMD(lines) {
return lines.map((line) => {
// convert headings
line = line.replace(
// two capture groups, one for the markup, and one for the heading,
// with a third optional group so we don't capture EOL whitespace.
/^(#+)\s+(.+?)(\s+)?$/,
// and we extract the first group's length immediately
(_, { length: h }, text) => `<h${h}>${text}</h${h}>`
);
// then wrap bare text in <p>, convert bold, italic, etc. etc.
return line;
});
}
// And a simple test based on what you indicated:
const docs = [`## he#llo\nthere\n# yooo `, `# he#llo\nthere\n## yooo`];
docs.forEach((doc, i) => console.log(`[doc ${i + 1}]\n`, markdownToHTML(doc)));
<!-- end snippet -->
However, this is also a naive approach to writing a transpiler, and will have dismal runtime performance compared to writing a DFA based on the markdown grammar (the "markup language specification" grammar, i.e. the rules that say which tokens can follow which other tokens), where you run through your document by tracking what kind of token we're dealing with, and convert on the fly as we pass token terminations.
(This is, in fact, how regular expressions work: they generate a DFA from the pattern you specify, then run the input through that DFA about as fast as can be implemented)
That's wildly beyond the scope of this answer, but worth digging into if you're doing this just to see if you can do it: anyone can write code "that works" but is extremely inefficient, so that's not an exercise that's going to improve your skill as a programmer. |
Set Netbeans Console to English |
|java|netbeans|locale| |
I am trying to create a structure `Student` which contains q substructures named `Course`. Each `Course` is a structure with a credit and point `int` values.
How do I set up my `Student` structure to have an integer number of `Course` structures within it? Thanks
```
struct Student{
int q;
Course course[q];
};
struct Course{
int credit;
int point;
};
```
I tried this but VSC is telling me it is wrong.
Edit error:
```
#include <iostream>
#include <math.h>
#include <stdlib.h>
#include <vector>
using namespace std;
struct Course{
string name;
int credit;
int point;
};
typedef struct Student{
vector<Course> courses;
};
```
Error text!
'Course': undeclared identifiercpp(C2065)
'std::vector': 'Course' is not a valid template type argument for parameter '_Ty'cpp(C2923)
'std::vector': too few template argumentscpp(C2976)
class std::vector<Course>
|
There is no `name` argument in `list()`. You need to give space id to `parent` as `"spaces/{space}"`.
Use [API Reference][1] whenever in doubt.
[1]: https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces.messages/list |
I have been tinkering around with the {waterfalls} package lately, which makes it easy to create and customize waterfall plots by leveraging {ggplot2} and most of its logic under the hood. The plots can easily be converted to {plotly} via the `ggplotly()` function afterwards.
If requested, the main function `waterfalls::waterfall()` calculates an overall sum on the fly, so there is no need to do the math manually. I am only keeping the last row from your sample data for my second, more advanced example on manual label placement.
```r
library(plotly)
## sample data
df = data.frame(
Variable = c("Group 1", "A", "B", "C", "D", "E", "Group 2")
, Value = c(10, 2, 2, 2, 1, 1, 0)
, Color = c("blue", rep("green", 5L), "yellow")
)
## convert column 'Variable' to `factor`
df$Variable = factor(
df$Variable
, levels = unique(df$Variable)
)
```
The following code snippet creates a basic waterfall plot. As mentioned earlier, there is no need for manual calculations as long as `calc_total = TRUE`, so the last row of data is discarded before generating the plot.
```
## discard row with overall sum
sbs = utils::head(
df
, n = -1L
)
## generate waterfall plot
p0 = waterfalls::waterfall(
sbs
, rect_text_labels = paste0("$", sbs$Value)
, calc_total = TRUE
, total_axis_text = "Group 2"
, total_rect_text = paste0("$", sum(sbs$Value))
, total_rect_color = "yellow"
, total_rect_text_color = "black"
, total_rect_border_color = "transparent"
, fill_colours = sbs$Color
, fill_by_sign = FALSE
, rect_border = "transparent"
, draw_axis.x = "front"
) +
labs(
x = NULL
, y = NULL
) +
scale_y_continuous(
labels = scales::dollar_format()
, expand = c(0, 0)
) +
theme_minimal()
ggplotly(p0)
```
[![base][1]][1]
Let us also explore a second, more advanced example where the aim is to place labels at the top of each bar as shown in your example. Although there is an argument 'put_rect_text_outside_when_value_below
' that would allow to place labels outside the boxes, it does not seem to integrate with 'calc_total' – at least not with {waterfalls} package version `‘1.0.0’`. Instead, I am usiing `ggplot2::geom_text()` for a finer control over customized text annotations.
```r
## create custom labels
df$Text = ifelse(
df$Variable == "Group 2"
, cumsum(df$Value)
, df$Value
)
## add offset for label placement (+5% of total)
df$Position = cumsum(df$Value) +
0.05 * sum(df$Value)
## generate advanced waterfall plot
p1 = waterfalls::waterfall(
sbs
# "turn off" text labels
, rect_text_labels = rep("", nrow(sbs))
, calc_total = TRUE
, total_axis_text = "Group 2"
# "turn off" total text label
, total_rect_text = ""
, total_rect_color = "yellow"
, total_rect_border_color = "transparent"
, fill_colours = sbs$Color
, fill_by_sign = FALSE
, rect_border = "transparent"
, draw_axis.x = "front"
) +
geom_text(
aes(
x = Variable
, y = Position
, label = paste0("$", Text)
)
, data = df
, inherit.aes = FALSE
) +
labs(
x = NULL
, y = NULL
) +
scale_y_continuous(
labels = scales::dollar_format()
) +
theme_minimal()
ggplotly(p1)
```
[![advanced][2]][2]
[1]: https://i.stack.imgur.com/VqISs.png
[2]: https://i.stack.imgur.com/A6rNI.png |
|glsl|shader| |
The way FTP works is to have a constant conenction on a control channel, port 21, and to set up and tear down connections to pass data over, on some random channel decided by the server or the client
In PORT (active) mode the client decides what port it will open, it tells the server and the server connects to the client. This isn't very good these days because most clients are likely being some NAT device that won't forward the connection on
In PASV (passive) mode the server decides what port to open, and the client conents into it. This is much more compatible with the majority of client side NAT devices, and the range of ports the server will open is typically configured by the network admin responsible for the server who is also responsible for the firewall on the server's network (so they can configure the forwarding rules to work out)
You're using `quote` a lot in your log - this sends a raw command to the server but the client won't necessarily take any further interest in it - just because you `quote PASV` to request the server go to passive mode doesn't mean it will switch the client over to knowing it has to operate in passively so even when the server does switch to expecting a connection, the client won't connect.
If you're using windows FTP command line client with a server that requires passive connections I'm afraid you're sunk - the client doesn't suport passive operation. I recommend seeking an alternative client with a command line version that does support passive. |
**The error is probably caused by the way MetalLB's IP Adress pool is configured.**
In your current configuration As of right now, MetalLB is configured to automatically assign an external IP address from the range that is provided in IPAdressPool.The service is accessible from within the cluster. The connection failed because the VM itself has a route for that IP range, but your local device does not have a route for the IP range that the MetalLB uses.
**To resolve your issue :**
- Verify whether the IP range listed in the IPAdresspool address is a publicly routable range.Private IP ranges cannot be used for external access.Update IPAdressPool with a public IP range supplied by your cloud provider or hosting environment if the present range is private.
- Verify again that the firewall rules on the virtual machines permit traffic on port 80.
**This solution should be working for your on-premises or cloud-based Kubernetes cluster since you aren't using Minikube or any similar product.**
Refer to this [gitlink](https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/baremetal.md) for more information
|
I am using node server for the backend. Connection to Cassandra is done using cassandra-driver nodejs.
Connection is done as follows:
const client = new cassandra.Client({
contactPoints: ['h1', 'h2'],
localDataCenter: 'datacenter1',
keyspace: 'ks1'
});
1. In contactPoints, do I just need to add 'seed' nodes or can I can add any nodes from the datacenter?
2. How to connect to multiple datacenter from the same nodejs backend service? Do I need to run separate backend service for each datacenter?
3. Any recommended way for setting backend server such that bandwidth can be minimized between Cassandra nodes and backend server
|
Apace Cassandra Node Driver Connection |
The `sessionStorage` is a browser API that is not available in the server-side rendering (SSR) environment of Next.js. This is why you're getting the `ReferenceError: sessionStorage is not defined` error.
To access the `sessionStorage` in Next.js, you need to use the useEffect hook and check if `window` and `window.sessionStorage` are available before attempting to access `sessionStorage`
```js
"use client";
import { useEffect, useState } from "react";
import { useRouter } from "next/navigation";
import { valJwt } from "@/libs/jwtSec";
export default function isAuth(Component: any) {
return function IsAuth(props: any) {
const [token, setToken] = useState<string | null>(null);
const router = useRouter();
useEffect(() => {
if (typeof window !== "undefined") {
const tok_ses = window.sessionStorage.getItem("token_test");
if (tok_ses) {
setToken(tok_ses);
}
}
}, []);
useEffect(() => {
if (token) {
const auth = valJwt(token);
if (!auth) {
router.push("/");
}
}
}, [token, router]);
if (!token) {
return null;
}
return <Component {...props} />;
};
}
``` |
I know the question was asked 5 years ago. Answering, in case someone else runs into it.
We noticed this problem when we changed httpRuntime targetFramework from 4.6 to 4.8 in the web.config.
## How to address this issue?
Add the following to your web.config:
```
<configuration>
<appSettings>
<add key="AppContext.SetSwitch:Switch.System.IdentityModel.DisableCngCertificates" value="true" />
</appSettings>
</configuration>
```
## What is going on?
This exception is thrown from the RsaEncryptionCookieTransform.Encode method:
https://referencesource.microsoft.com/#System.IdentityModel/System/IdentityModel/RsaEncryptionCookieTransform.cs,303
```
RSACryptoServiceProvider provider = encryptionKey as RSACryptoServiceProvider;
if ( provider == null )
{
throw DiagnosticUtility.ThrowHelperInvalidOperation( SR.GetString( SR.ID6041 ) );
}
```
[On line 72][1] This class calls [X509Util.EnsureAndGetPrivateRSAKey][2], which calls [CngLightup.GetRSAPrivateKey][3] when DisableCngCertificates is false.
[CngLightup.GetRSAPrivateKey][3] creates [RCACng][4], which is derived from RSA, but not [RSACryptoServiceProvider][5]
Thanks,
--Vladimir
[1]: https://referencesource.microsoft.com/#System.IdentityModel/System/IdentityModel/RsaEncryptionCookieTransform.cs,72
[2]: https://referencesource.microsoft.com/#System.IdentityModel/System/IdentityModel/X509Util.cs,44
[3]: https://referencesource.microsoft.com/#System.Core/System/Security/Cryptography/X509Certificates/RSACertificateExtensions.cs,89
[4]: https://learn.microsoft.com/en-us/dotnet/api/system.security.cryptography.rsacng
[5]: https://learn.microsoft.com/en-us/dotnet/api/system.security.cryptography.rsacryptoserviceprovider |
I have this app script code that first deletes any existing graphs then I want it to put the x-axis as xRange and the series as yValues (To find my ranges, I used some other functions but they work correctly and get my wanted ranges) (I want the graph on the Graph spreadsheet)
```
function createLineGraph(){
var ss = SpreadsheetApp.getActiveSpreadsheet();
var attendance = ss.getSheetByName("Attendance");
var graph = ss.getSheetByName("Graph");
// Delete any existing line charts in the sheet
var chts=graph.getCharts();
for(var i=0;i<chts.length;i++){
graph.removeChart(chts[i]);
}
var xRange = attendance.getRange("B1:"+getColumnLetter(findTotalAttendanceColumn(attendance)-1)+"1");
var yValues = attendance.getRange("B"+findTotalAttendanceRow(attendance)+":"+getColumnLetter(findTotalAttendanceColumn(attendance)-1)+findTotalAttendanceRow(attendance));
}
```
For example: If the xRange was "hello","bye","idk" and the yValues were 2,8,5, then the line graph would look like this:
[![enter image description here][1]][1]
[1]: https://i.stack.imgur.com/mxdCx.png
Could someone please tell me how to do it? Thanks |
To access your data, you simply need to access the `0` (string, not number) key of the returned object, so you could just do:
```
this.job = job['0'];
```
---
I notice you have nested subscribes, which are not desirable, because without a reference to the subscription object, you have no way to unsubscribe. This means that when you leave your component, the subscription is still active, possibly running your logic after the component has been destroyed.
There are a few ways to deal with this, but I'll mention what I find to be the simpliest code. The idea is to define an observable that emits exactly the data your view needs, in this case, the `Job`. Then, instead of explicitly subscribing in the component's controller, we can leverage the `async` pipe in the template which will subscribe to the observable and also unsubscribe when the component is destroyed.
Here's an example:
```typescript
export class JobDetailComponent {
job$: Observable<Job> = this.activeRoute.params.pipe(
switchMap(params => this.apiService.getJob(params.id)),
map(response => response['0']),
);
constructor(
private apiService: ApiService,
private activeRoute: ActivatedRoute
) { }
}
```
You'll notice in the above code we simply declare the `job$` observable. We start with the "active route params observable" and pipe it to the "get job observable".
If you're not familiar with `switchMap`, it simply takes the incoming value and maps it to an observable (*in this case the call to `apiService.getJob()`*). The emissions from this "inner observable" are then emitted.
We then use the `map` operator to transform the full shape into the value at the `"0"` key of the object.
Now that we have an `Observble<Job>`, we can simply use the async pipe in the template:
```html
<div *ngIf="job$ | async as job">
{{ job.name }} ({{ job.id }})
</div>
``` |
def function1(dd:pd.DataFrame):
if dd.query("col3==1").pipe(len)>1:
dd.loc[dd.query("col3==1").index[1]:,'col3']=0
return dd
df1=a.reset_index()
df1.columns=['col1','col2','col3']
df1.groupby(['col1',df1.col3.eq(0).cumsum()|df1.col3.diff().gt(0).cumsum()],as_index=0,group_keys=0).apply(function1)
col1 col2 col3
0 15891 2018-07-28 1
1 15891 2018-08-28 0
2 16063 2018-11-28 0
3 16063 2018-12-28 0
4 16063 2019-01-28 0
5 16063 2019-02-28 0
6 16063 2019-03-28 0
7 16063 2019-04-28 0
8 16063 2019-05-28 0
9 16203 2018-12-28 0
10 16203 2019-01-28 1
11 16203 2019-02-28 0
12 16203 2019-03-28 0
13 16203 2019-04-28 0
14 16203 2019-05-28 0
15 16502 2018-09-28 0
16 16502 2018-10-28 1
17 16502 2018-11-28 0
18 16502 2018-12-28 0
19 16502 2019-01-28 0 |
You may try a modified version of the previous awk script here.
```sh
cat parse.awk
BEGIN { OFS="\t"; FS="=" }
/^[[:blank:]]*public / {++n}
NF==2 && /^[[:blank:]]*String | pac *=/ {
gsub(/^[[:blank:]]*("|new +)|[()";]+$/, "", $2)
if ($1 ~ / (server|pac)/)
col1[n] = $2
else if ($1 ~ / method/)
col2[n] = $2
}
END {
for (i=1; i<=n; ++i)
print col1[i], col2[i]
}
```
Then use it as:
```sh
awk -f parse.awk file
AAA retrieveA
BBB retrieveB
CCC
retrieveD
EEE retrieveE
FFFFF retrieveF
GGGGGG getG
HHHHHHH getH
``` |
|node.js|cassandra|cassandra-driver| |
Actually there seems to be a misconception about ```cv2.imread()```, cv2 reads the image in BGR format Yes, **but** when prompted to save the image or showing it using ```cv2.imshow``` it correctly renders them, the problem is in matplotlib partially. Because cv2 uses BGR color format and matplotlib uses RGB color format.
So what are essentially doing is :
1. Load an image in BGR
2. Convert the image into RGB
3. Save it as RGB.
4. Load it again as BGR
5. Viewing it inverted with matplotlib.
A better solution would be to remove ```image3``` as below :
```Python
from matplotlib import pyplot as plt
import cv2
image1 = cv2.imread('rgb.png')
image2 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
cv2.imwrite('test_img_bgr_cv.png', image2)
plt.figure(figsize=(10, 10))
plt.subplot(1, 3, 1)
plt.imshow(image1)
plt.title('Image 1')
plt.figure(figsize=(10, 10))
plt.subplot(1, 3, 2)
plt.imshow(image2)
plt.title('Image 1')
plt.show()
```
----------
**EDIT:**
Regarding ```cv2.imwrite``` then use ```Image.open()``` although I believe it would add necessary complications, you can use the following :
```python
import cv2
from PIL import Image
image1 = cv2.imread('rgb.png')
image2 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
cv2.imwrite('test_img_bgr_cv.png', image2)
im = Image.open('test_img_bgr_cv.png') # Load the Image normally
b, g, r = im.split() # Split the color channel, change to b, g, r, a in case of need for Alpha.
im = Image.merge("RGB", (r, g, b)) # Reorder the color channels the way intended.
# .show() is for "testing" only
im.show()
```
The other one is more simple just flip the return of the ```cv2.imread()``` :
```Python
import cv2
from PIL import Image
image = cv2.imread('rgb.png')[:, :, ::-1] # The slicing here is done to avoid the height and width of image just reorder them.
img = Image.fromarray(image)
img.show()
```
P.S.: Please be more accurate regarding what you are asking about.
|
Just now i found the answer how to get form data/values from modal: via table `Livewire` object. Use `$table->getLivewire()->getMountedTableBulkActionForm()->getState()`.
Somewhere in your Filament Resource:
<?php
use Filament\Forms\Components\TextInput;
use Filament\Tables\Actions\BulkAction;
use Filament\Tables\Table;
use Illuminate\Support\Collection;
public static function table(Table $table): Table
{
return $table
->bulkActions([
BulkAction::make('my_bulk_action')
->form(function (Collection $records) {
return [
TextInput::make('name')
];
})
->action(function (Collection $records) use ($table) {
$data = $table->getLivewire()->getMountedTableBulkActionForm()->getState();
// will return TextInput value in $data['name']
});
]);
}
*Hope that helps.* |
# IDEA
## introduction
### IMPORTANT NOTIFICATION
THE EXPLAINATION IS NOT VERY CLEAR, AND I EXPECT YOU TO GUESS. GUESS IS GOOD FOR YOUR BRAIN.
BELIEVE YOURSELF. Your GUESS, if you think is LOGICAL, is RIGHT.
In fact they are just simple ideas, with little guide then you will also understand.
So you are expected to directedly understand what `order`, `label`, `start` is.
If you still feel unsure, scroll down and see explanation.
### better function's procedure
#### example (they will looks like C#)
``` C#
class ExpClass1{
int a;
int b;
int c;
int d;
virtual (int,int) F(int a,int b) {
this.a=a;
this.b=b;
this.c=a+b;
this.d=a-b;
return (c,d);
}
}
```
Normally, computer just do as it says, linear. But is that the best?
For example, for "d=a-b", it don't need the result of c, and it just need the value of a and b.
In short, any action can be down when all parameter are gained.
A better function (guess it):
``` C#
(int,int) F(int a,int b) {
order start,[SetA,SetB,GainC,GainD],end;
label SetA: {this.a=a;}
label SetB: {this.b=b;}
label GainC: {this.c=a+b;}
label GainD: {this.d=a-b;}
label End: {return (c,d);}
}
```
Gramma is casual. And don't worry about the code. The labels are expected to be automatically added by lang.
Notice that the label is important.
More complex procedure will be discussed later <del>(by you)</del>
<del>logical spaghetti</del>
### What does order do
Tell computer that don't worry about the order of some procedures.
And also tell us that some procedures are not related.
"decoupling between procedures"
"deeper work of logic"
### extend it
the same, extra works just needed to insert with label and order
``` C#
class ExpClass2:ExpClass1{
int e;
override (int,int) F(int a,int b) {
order [GainC,GainD],GainE,End;
label GainE:{this.e=c*d;}
}
}
```
ExpClass2.F is called "extensional function for ExpClass1.F"
### multiply extend it
You (and computer) just realized that 2 functions can be combined without conflict.
``` C#
class ExpClass3:ExpClass1{
double e;
override (int,int) F(int a,int b) {
order [GainC,GainD],GainE,End;
label GainE:{this.e=c/d;}
}
}
// always as C++ virtual class
class ExpClass4:ExpClass2,ExpClass3
{
double f;
override (int,int) F(int a,int b) {
order [ExpClass2::GainE,ExpClass3::GainE],GainF,End;
label GainF:{this.f=ExpClass2::e+ExpClass3::e;}
}
}
```
perfect( believe ).
### What does extension do
Feel it by your self
Break big hard block into toy bricks, which is programs should be.
### Example
It is hard to find a example really requires this to solve ,<br>
because normally normal combine (like python's Super()) is enough.
But when you really need this, you will feel its power.
Create a abstract class `SearchTree` and `Node`. (good design) (for instance, image Delete(key))<br>
Then create a abstract subclass `NodeLinked` that provides ability of `LinkedList` and O(n) enum.<br>
Then create a abstract subclass `TreeWithCache` that provides a cache to fast access last searched value<br>
Then combine these 2 together as `TreeWithCacheAndLinked`, which provides fast access of value near the cached<br>
Then create a subclass `RBTree` of `SearchTree` that provides Red-Black tree structure<br>
Then combine all together and feel its power.
### more talk
#### what does it do
decoupling between procedure
#### what else can it do
protogenetic multithreading
less pain for some situation
``` C#
class FA{
void FA(); void FB();
virtual void FC(){
FA();
FCBetweenFAAndFB(); //Toooo strange but necessary
FB();
}
virtual void FCBetweenFAAndFB(){
}
}
```
One engine (I forgot) uses about 10 labels to sort update procedures.
### Problems (<del>disclaimer</del>)
#### Conflict: it must be caused by bad design (1k word omitted)
Not related extend function normally should have no conflict.
If they both modify return, or they both cause conflict side-effect, then they may shouldn't be used together.
Notice that `a+=b` and `a+=c` has no conflict, they can be considered to be used for side-effect.
#### More chars and spaghetti: the cost of more abstract
In other sight, they just provide more possibility, logically, and be expected to don't ruin original codes.
(image it is the same as writing annotation)
#### Implement
They can(in theory) be implemented by multithreading, but can also be implemented by just compiling them into normal, linear one.
#### <del>poor english, poor expression</del>
I believe you can understand what I says
When I try to explain them more clearly, I feel my brain changes state.<br>
The state of that I write this is the same as that I come up with the idea.<br>
And the state of that I explain this is that I learn this.
I think the state of "mind-blowing" is not only good, but also important.<br>
So I eventually decided to keep this version (and make it more abstract), and give further explanation later
### better function's process - Explained
Any action should be down only, and just when all parameter are gained.<br>
Then, we can consider the requirement order and try to make use of it.
``` C#
class ExpClass1{
int a;
int b;
int c;
int d;
virtual (int,int) F(int a,int b) {
order start,[SetA,SetB,GainC,GainD],end;
label SetA: {this.a=a;}
label SetB: {this.b=b;}
label GainC: {this.c=a+b;}
label GainD: {this.d=a-b;}
label End: {return (c,d);}
}
}
```
Each "label" marks a piece of code.<br>
After all the previous label to be done, code of this label should be done.<br>
And after it, the label is considered as done.
"order" explain the order for code to run.<br>
the basic order is (defined label A,B,C,D):<br>
`order B,D;`<br>
`order C,D;`<br>
which means D should be done after B and C to be done.<br>
then, `order A,[B,C],D` means after A to be done, B and C should be done.<br>
and after B and C to be done, D should be done.<br>
`start` is the label of start (maybe it can just be omitted).
### attemp to explain how this solve multiple inheritance
assume A; B,C inherit A; D inherit B,C
Multiple inheritance is not only for code reuse, but also SHOULD be logical (e.g. Liskov substitution principle),which MEANS no conflict in code.<br>
For override function, this MEANS B,C added code have no conflict, and can be combined together.<br>
Image 2 flow graph for B,C. One linear, one flowed by requirement<br>
As previous fouction structure, computer and you (JUST) don't know how to combine 2 big hard blocks.<br>
But now, every extensional function is no longer a single function but a plug-in, and label of main function is socket. Then it is free to add multiple plug-in into main function
## implement
1. implement ordered code to fit all situation
2. manage linear procedure code into ordered
3. manage ordered code into linear procedure
This part is not implemented, and need more disucssion.<br>
Valuable ideas from reply will be added into body (also citation).
### situation
- functional
- procedure
if
loop
continue/break
? goto
### possible way
2 way to implement
1: for each Label, mark its referance count, done this, then minus after label's count, when the count is 0, do it
2: for each Label, mark its state(Undone, Doing, Done), try to do its previous label, and done this.
#### way 1
``` C#
class LabelCode{
CodeSnippet LabeledCode;
///which should be done fter this
LabelCode[] Afters;
int Count;
///manager will set Count=Afters.Length for each, then do start.TryDo
void TryDo(Codes codes){
Count-=1;
if(Count>0) return;
codes.AddSnippet(LabeledCode);
foreach(var i in Afters) i.TryDo(codes);
}
}
```
#### way 2
``` C#
class LabelCode{
CodeSnippet LabeledCode;
///which should be done before this
LabelCode[] Befores;
DoneState state;//
enum DoneState{
Undone,
Doing,
Done
}
///manager will set all LabelCode's state to Undone, then invoke Do for each
void Do(Codes codes){
if(i.state==Doing) {
throw new exception();// really?
}
if(i.state==Done) return;
state=Doing;
foreach(var i in Befores) i.Do(codes);
codes.AddSnippet(LabeledCode);
state=DoneState.Done;
}
}
```
### situation : if
#### for way 1
as a special label that only TryDo for 1 branch, then another branch won't done due to lack count-1
#### for way 2
???
pissoble: DoneState.DontDo and spread this
### situation : loop
#### example
``` C#
int i=2;
int a,b;
while(i<100){
i+=1;//loop 1 by 1
a+=i;//sole thread, because a+=i,a+=j don't have order requirement
b*=i;//sole thread
}
```
#### possible problem
We may need $\infin$ amount of label
#### Easy way
consider loop block as a block like function, and invoke it repeatedly
But it can't apply the best multithreading
#### possible way
give lable a subscript, and express loop as n,n+1
#### for way 1
???
#### for way 2
???
|
No module named 'keras.layers.core' |
```
from keras.models import Sequential
from keras.layers.core import Dense, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
```
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-35-e0ce7722f83a> in <cell line: 2>()
1 from keras.models import Sequential
----> 2 from keras.layers.core import Dense, Flatten
3 from keras.layers.convolutional import Conv2D, MaxPooling2D
ModuleNotFoundError: No module named 'keras.layers.core'
---------------------------------------------------------------------------
NOTE: If your import is failing due to a missing package, you can
manually install dependencies using either !pip or !apt.
To view examples of installing some common dependencies, click the
"Open Examples" button below.
use Chat GPT the answer is
> The error indicates that the module keras.layers.core cannot be found, suggesting an issue with the Keras installation or compatibility.
>
> As of Keras version 2.7.0 (the latest stable version as of my last update), the module organization has changed, and keras.layers.core is no longer used. Instead, core layers like Dense and Flatten are directly available under keras.layers.
|
The error:
"org.springframework.mail.MailSendException: Mail server connection failed. Failed messages: jakarta.mail.MessagingException: Could not convert socket to TLS;\n nested exception is:\n\tjava.net.SocketTimeoutException: Read timed out; message exception details (1) are:\r\nFailed message 1:\r\njakarta.mail.MessagingException: Could not convert socket to TLS;\n nested exception
I have no antivirus on my pc and try some things i've here on stack overflow but the program isnt working yet. I will show some snippets of my code:
aplications.yml:
spring:
mail:
host: email-smtp.us-east-1.amazonaws.com
port: 587
username: ses-smtp-user.20240330-201531
password:
properties:
mail:
smtp:
auth: true
starttls:
enable: true
connectiontimeout: 5000
timeout: 300
writetimeout: 5000
ssl:
protocols: TLSv1.2
server:
port: 8081
-Djavax:
net:
debug=all:
My service class:
@Service
public class EmailService {
@Autowired
private JavaMailSender emailSender;
public void sendEmail(Email email) throws MessagingException {
MimeMessage message = emailSender.createMimeMessage();
MimeMessageHelper mimeMessageHelper = new MimeMessageHelper(message, true);
mimeMessageHelper.setFrom(email.getFrom());
mimeMessageHelper.setTo(email.getTo());
mimeMessageHelper.setSubject(email.getSubject());
mimeMessageHelper.setText(email.getText());
Properties props = new Properties();
props.put("mail.smtp.ssl.protocols", "TLSv1.2");
emailSender.send(message);
}
}
Can someone please help me?
I tried to start the program with debug
-Djavax:
net:
debug=all:
but doesnt work. tried to add props in the service class but didnt work also i commented the java.security line #jdk.tls.disabledAlgorithms=SSLv3, TLSv1, TLSv1.1, DTLSv1.0, RC4, DES, \
# MD5withRSA, DH keySize < 1024, EC keySize < 224, 3DES_EDE_CBC, anon, NULL, \
#
still dont work |
Why my mail service api spring isnt working? |
|java|spring|amazon-web-services|jakarta-mail| |
null |
I found a workaround shared by Damian from AppDynamics support that helped me adjust the log levels for both the Proxy and the Watchdog in AppDynamics. Here's a summary of the steps:
- **Proxy Log Level:** The `log4j2.xml` file controls this. You can find it within the appdynamics_bindeps module. For example, in my WSL setup, it's located at `/home/wsl/.pyenv/versions/3.11.6/lib/python3.11/site-packages/appdynamics_bindeps/proxy/conf/logging/log4j2.xml`. In the Docker image python:3.9, the path is `/usr/local/lib/python3.9/site-packages/appdynamics_bindeps/proxy/conf/logging/log4j2.xml`. Modify the <AsyncLogger> level within the <Loggers> section to one of the following: debug, info, warn, error, or fatal.
- **Watch Dog Log Level:** This can be adjusted in the `proxy.py` file found within the appdynamics Python module. For example, in my WSL setup, it's located at `/home/wsl/.pyenv/versions/3.11.6/lib/python3.11/site-packages/appdynamics/scripts/pyagent/commands/proxy.py`. In the Docker image python:3.9, the path is `/usr/local/lib/python3.9/site-packages/appdynamics/scripts/pyagent/commands/proxy.py`. You will need to hardcode the log level in the configure_proxy_logger and configure_watchdog_logger functions by changing the level variable.
## My versions
```bash
$ pip freeze | grep appdynamics
appdynamics==24.2.0.6567
appdynamics-bindeps-linux-x64==24.2.0
appdynamics-proxysupport-linux-x64==11.68.3
```
## Original files
### log4j2.xml
```
<Loggers>
<!-- Modify each <AsyncLogger> level as needed -->
<AsyncLogger name="com.singularity" level="info" additivity="false">
<AppenderRef ref="Default"/>
<AppenderRef ref="RESTAppender"/>
<AppenderRef ref="Console"/>
</AsyncLogger>
</Loggers>
```
### proxy.py
```python
def configure_proxy_logger(debug):
logger = logging.getLogger('appdynamics.proxy')
level = logging.DEBUG if debug else logging.INFO
pass
def configure_watchdog_logger(debug):
logger = logging.getLogger('appdynamics.proxy')
level = logging.DEBUG if debug else logging.INFO
pass
```
## My Script to create environment variables to log4j2.xml and proxy.py
### update_appdynamics_log_level.sh
```bash
#!/bin/sh
# Check if PYENV_ROOT is not set
if [ -z "$PYENV_ROOT" ]; then
# If PYENV_ROOT is not set, then set it to the default value
export PYENV_ROOT="/usr/local/lib"
echo "PYENV_ROOT was not set. Setting it to default: $PYENV_ROOT"
else
echo "PYENV_ROOT is already set to: $PYENV_ROOT"
fi
echo "=========================== log4j2 - appdynamics_bindeps module ========================="
# Find the appdynamics_bindeps directory
APP_APPD_BINDEPS_DIR=$(find "$PYENV_ROOT" -type d -name "appdynamics_bindeps" -print -quit)
if [ -z "$APP_APPD_BINDEPS_DIR" ]; then
echo "Error: appdynamics_bindeps directory not found."
exit 1
fi
echo "Found appdynamics_bindeps directory at $APP_APPD_BINDEPS_DIR"
# Find the log4j2.xml file within the appdynamics_bindeps directory
APP_LOG4J2_FILE=$(find "$APP_APPD_BINDEPS_DIR" -type f -name "log4j2.xml" -print -quit)
if [ -z "$APP_LOG4J2_FILE" ]; then
echo "Error: log4j2.xml file not found within the appdynamics_bindeps directory."
exit 1
fi
echo "Found log4j2.xml file at $APP_LOG4J2_FILE"
# Modify the log level in the log4j2.xml file
echo "Modifying log level in log4j2.xml file"
sed -i 's/level="info"/level="${env:APP_APPD_LOG4J2_LOG_LEVEL:-info}"/g' "$APP_LOG4J2_FILE"
echo "log4j2.xml file modified successfully."
echo "=========================== watchdog - appdynamics module ==============================="
# Find the appdynamics directory
APP_APPD_DIR=$(find "$PYENV_ROOT" -type d -name "appdynamics" -print -quit)
if [ -z "$APP_APPD_DIR" ]; then
echo "Error: appdynamics directory not found."
exit 1
fi
echo "Found appdynamics directory at $APP_APPD_DIR"
# Find the proxy.py file within the appdynamics directory
APP_PROXY_PY_FILE=$(find "$APP_APPD_DIR" -type f -name "proxy.py" -print -quit)
if [ -z "$APP_PROXY_PY_FILE" ]; then
echo "Error: proxy.py file not found within the appdynamics directory."
exit 1
fi
echo "Found proxy.py file at $APP_PROXY_PY_FILE"
# Modify the log level in the proxy.py file
echo "Modifying log level in proxy.py file"
sed -i 's/logging.DEBUG if debug else logging.INFO/os.getenv("APP_APPD_WATCHDOG_LOG_LEVEL", "info").upper()/g' "$APP_PROXY_PY_FILE"
echo "proxy.py file modified successfully."
```
## Dockerfile
Dockerfile to run pyagent with FastAPI and run this script
```dockerfile
# Use a specific version of the python image
FROM python:3.9
# Set the working directory in the container
WORKDIR /app
# First, copy only the requirements file and install dependencies to leverage Docker cache
COPY requirements.txt ./
RUN python3 -m pip install --no-cache-dir -r requirements.txt
# Now copy the rest of the application to the container
COPY . .
# Make the update_log4j2.sh and update_watchdog.sh scripts executable and run them
RUN chmod +x update_appdynamics_log_level.sh && \
./update_appdynamics_log_level.sh
# Set environment variables
ENV APP_APPD_LOG4J2_LOG_LEVEL="warn" \
APP_APPD_WATCHDOG_LOG_LEVEL="warn"
EXPOSE 8000
# Command to run the FastAPI application with pyagent
CMD ["pyagent", "run", "uvicorn", "main:app", "--proxy-headers", "--host","0.0.0.0", "--port","8000"]
```
## Files changed by the script
### log4j2.xml
```xml
<Loggers>
<!-- Modify each <AsyncLogger> level as needed -->
<AsyncLogger name="com.singularity" level="${env:APP_APPD_LOG4J2_LOG_LEVEL:-info}" additivity="false">
<AppenderRef ref="Default"/>
<AppenderRef ref="RESTAppender"/>
<AppenderRef ref="Console"/>
</AsyncLogger>
</Loggers>
```
### proxy.py
```python
def configure_proxy_logger(debug):
logger = logging.getLogger('appdynamics.proxy')
level = os.getenv("APP_APPD_WATCHDOG_LOG_LEVEL", "info").upper()
pass
def configure_watchdog_logger(debug):
logger = logging.getLogger('appdynamics.proxy')
level = os.getenv("APP_APPD_WATCHDOG_LOG_LEVEL", "info").upper()
pass
```
## Warning
Please note, these paths and methods may vary based on your AppDynamics version and environment setup. Always backup files before making changes and be aware that updates to AppDynamics may overwrite your customizations.
I hope this helps! |