instruction stringlengths 0 30k β |
|---|
|if-statement| |
null |
You have to use a `\U` + 8-hexadecimal code instead of `\u` + 4-hexadecimal code. You'lll also need an appropriate font so it may or may not show up below on your system.
```py
import unicodedata as ud
mystr = 'ABC'
trans_dict = {'A':'\U00011D72','B':'\U00011D73','C':'\U00011D74'}
mytable = mystr.maketrans(trans_dict)
mystr_translated = mystr.translate(mytable)
for c in mystr_translated:
print(c, ud.name(c))
```
Output:
```
GUNJALA GONDI LETTER KHA
GUNJALA GONDI LETTER TA
GUNJALA GONDI LETTER THA
``` |
I'm struggling edit a Jtabel with a picture on it.
So basically, i can edit the table ONLY if i edit the picture on it.
I can't edit anything if i am not editing the coloumn on the picture on it:
trying to debug but the listener isn't doing the query on the server.
here the code
```
public class VisualizzaprodottiPanel extends JPanel {
// Attributi
private JTable jtable;
private ListaProdottiTableModel tableModel;
private Set<Integer> righeModificate = new HashSet<>(); // Set per tenere traccia delle righe modificate
// Costruttore
public VisualizzaprodottiPanel() {
setLayout(new BorderLayout());
// Creazione del modello della tabella e inizializzazione dei dati
IArticoloDAO articoloDAO = ArticoloDAO.getInstance();
List<Prodotto> prodotti = articoloDAO.findAll();
tableModel = new ListaProdottiTableModel(prodotti, articoloDAO);
// Creazione della tabella con il modello
JTable tabellaProdotti = new JTable(tableModel);
tabellaProdotti.setRowHeight(100); // Altezza della riga
tabellaProdotti.setShowGrid(false); // Nasconde le linee di griglia
tabellaProdotti.setIntercellSpacing(new Dimension(0, 0)); // Nessuno spazio tra le celle
JButton modifica = new JButton("Modifica");
Map<Integer, String> imagePathMap = new HashMap<>();
modifica.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
DbOperationExecutor executor = new DbOperationExecutor(); //conn database
for (int row : righeModificate) {
Prodotto p = new Prodotto();
p.setId(prodotti.get(row).getId()); //gestire dopo la roba degli id
p.setNome(tabellaProdotti.getValueAt(row,0).toString());
p.setTipoProdotto(IArticolo.TIPO_PRODOTTO.valueOf(tabellaProdotti.getValueAt(row,1).toString()));
p.setDisponibilita(Integer.valueOf(tabellaProdotti.getValueAt(row,2).toString()));
p.setPrezzo(Float.valueOf(tabellaProdotti.getValueAt(row,3).toString()));
p.setFoto(imagePathMap.get(row));
p.setDescrizione(tabellaProdotti.getValueAt(row,5).toString());
articoloDAO.updateProdotto(p);
}
// Pulisci il set delle righe modificate dopo l'aggiornamento nel database
righeModificate.clear();
}
});
tabellaProdotti.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseClicked(java.awt.event.MouseEvent evt) {
int column = tabellaProdotti.columnAtPoint(evt.getPoint());
if (column == 4) { // Controlla se il click Γ¨ sulla colonna delle immagini
int row = tabellaProdotti.rowAtPoint(evt.getPoint());
JFileChooser fileChooser = new JFileChooser();
fileChooser.setCurrentDirectory(new File(System.getProperty("user.home")));
int result = fileChooser.showOpenDialog(VisualizzaprodottiPanel.this);
if (result == JFileChooser.APPROVE_OPTION) {
File selectedFile = fileChooser.getSelectedFile();
String imagePath = selectedFile.getAbsolutePath();
imagePath = imagePath.replace("\\", "\\\\");
// Aggiorna il percorso dell'immagine nel modello della tabella
tableModel.setValueAt(imagePath, row, column);
// Aggiungi la riga modificate al set
righeModificate.add(row);
imagePathMap.put(row, imagePath);
}
}
}
});
JScrollPane scrollPane = new JScrollPane(tabellaProdotti);
add(scrollPane, BorderLayout.CENTER);
add(modifica, BorderLayout.SOUTH);
}
}
```
The Table Model that im working with it( i dont think the problem is there)
```
public class ListaProdottiTableModel extends AbstractTableModel {
private List<Prodotto> prodotti = new ArrayList<>();
private IArticoloDAO articoloDAO;
public ListaProdottiTableModel(List<Prodotto> prodotti, IArticoloDAO articoloDAO) {
this.prodotti = prodotti;
this.articoloDAO = articoloDAO;
}
@Override
public int getRowCount() {
return prodotti.size();
}
@Override
public int getColumnCount() {
return 6;
}
@Override
public Object getValueAt(int rowIndex, int columnIndex) {
Prodotto prodotto = prodotti.get(rowIndex);
switch (columnIndex) {
case 0: return prodotto.getNome();
case 1: return prodotto.getTipoProdotto();
case 2: return prodotto.getDisponibilita();
case 3: return prodotto.getPrezzo();
case 4:
String imagePath = prodotto.getFoto();
if (imagePath != null && !imagePath.isEmpty()) {
ImageIcon imageIcon = new ImageIcon(imagePath);
Image image = imageIcon.getImage();
Image scaledImage = image.getScaledInstance(100, 100, Image.SCALE_SMOOTH);
return new ImageIcon(scaledImage);
}else {
return null;
}
case 5: return prodotto.getDescrizione();
default:return null;
}
}
@Override
public void setValueAt(Object aValue, int rowIndex, int columnIndex) {
Prodotto prodotto = prodotti.get(rowIndex);
switch (columnIndex) {
case 0: prodotto.setNome((String) aValue); break;
case 1: // Converte la stringa in enum
try {
IArticolo.TIPO_PRODOTTO tipoProdotto = IArticolo.TIPO_PRODOTTO.valueOf((String) aValue);
prodotto.setTipoProdotto(tipoProdotto);
} catch (IllegalArgumentException e) {
// Gestisce il caso in cui la stringa non corrisponda a nessun valore dell'enum
// Potresti mostrare un messaggio di errore o gestire il caso in un altro modo appropriato
System.out.println("Valore non valido per il tipo di prodotto.");
}
break;
case 2: try {
int disponibilita = Integer.parseInt((String) aValue);
prodotto.setDisponibilita(disponibilita);
} catch (NumberFormatException e) {
System.out.println("Errore durante la conversione della disponibilitΓ in intero.");
}
break;
case 3: prodotto.setPrezzo((float) aValue); break;
case 4: prodotto.setFoto((String) aValue); break;
case 5: prodotto.setDescrizione((String) aValue); break;
}
// Aggiorna il prodotto nel database
//articoloDAO.updateProdotto(prodotto);
// Aggiorna la tabella
fireTableRowsUpdated(rowIndex, rowIndex);
}
@Override
public Class<?> getColumnClass(int columnIndex) {
if (columnIndex == 4) {
return ImageIcon.class; // Immagine
} else {
return super.getColumnClass(columnIndex);
}
}
@Override
public boolean isCellEditable(int rowIndex, int columnIndex) {
return true;
}
}
```
Maybe the problem is on the mouseLister but i cant solve the problem. |
Issue edit a jtable with a pictures |
|java|jtable| |
null |
I have producer and consumer configuration
Producer
@Bean
public KafkaTemplate<String, BeltEventDescription> kafkaBeltEventDescriptionTemplate(final ProducerFactory<String, BeltEventDescription> producerFactory) {
return new KafkaTemplate<>(producerFactory);
}
with config properties
@Bean
public ProducerFactory<String, BeltEventDescription> producerBeltEventDescriptionFactory() {
return new DefaultKafkaProducerFactory<>(Map.of(
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapAddress,
ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class,
ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class
));
}
Consumer
@Bean
public KafkaConsumer<String, String> kafkaStringConsumer() {
return new KafkaConsumer<>(consumerFactory(beltGroupId).getConfigurationProperties());
}
with config props
private ConsumerFactory<String, BeltEvent> consumerFactory(final String groupId) {
return new DefaultKafkaConsumerFactory<>(Map.of(
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapAddress,
ConsumerConfig.GROUP_ID_CONFIG, groupId,
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class,
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonDeserializer.class,
ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 1000,
ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 10000,
ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, ("SchedulerCoordinator-" + UUID.randomUUID()))
);
}
but when I try sending message or listening, I don't know what fails exactly, I get the message
> Listener failed
> No method found for class java.lang.String
What's wrong? |
No method found for class java.lang.String in Kafka |
|java|apache-kafka|json-deserialization| |
I have a google sheet with values on it. Think it like this:
| header1 | col1 | header 3 | col2 |
| -------- | -------------- | --------- | --------
| First | | row | |
| Second | | row | |
I will have another data that will come and fill the 2nd and 4th column by respectively.
So what I want to use append_row with specific column name because after each process (my code) I want to immediately add it to my google sheet.
--------
WHAT I DO FOR NOW (I want to change this logic):
I have 2 columns like this. So what I have done before after my code completes (all data is ready now) I was adding those data with worksheet update like this (I am using gspread):
```python
headers = worksheet.row_values(1)
col1_index = headers.index('col1') + 1
col2_index = headers.index('col2') + 1
for item in result:
col1_list.append(item['col1'])
col2_list.append(item['col2'])
col1_transposed = [[item] for item in col1_list]
col2_transposed = [[item] for item in col2_list]
col1_range = '{}2:{}{}'.format(chr(65 + col1_index - 1), chr(65 + col1_index - 1),
len(col1_list) + 1)
col2_range = '{}2:{}{}'.format(chr(65 + col2_index - 1), chr(65 + col2_index - 1),
len(col2_list) + 1)
worksheet.update(col1_range, col1_transposed)
worksheet.update(col2_range, col2_transposed)
```
But now I want to say like I want to append my data row by row to specific columns. After each process I will have a data like this
{'col1': 'value1', 'col2': 'value2'}
and value1 will be on the col1 column and value2 will be on the col2 column in the first row.
After I will have the same thing from the code:
{'col1': 'value3', 'col2': 'value4'}
The result I would like to see:
| header1 | col1 | header 3 | col2 |
| -------- | -------------- | --------- | --------
| First | value1 | row | value2 |
| Second | value 3| row | value4 |
|
It is impossible to achieve using a single Lua pattern, but you can chain a few of them:
```
local s = "/<\\/>//b\\\\/c" -- 4 payloads here (the second one is empty)
for x in s
:gsub("/", "/\1") -- make every payload non-empty by prepending string.char(1)
:gsub("\\(.)", "\2%1") -- replace magic backslashes with string.char(2)
:gsub("%f[/\2]/", "\0") -- replace non-escaped slashes with string.char(0)
:gsub("[\1\2]", "") -- remove temporary symbols string.char(1) and string.char(2)
:gmatch"%z(%Z*)" -- split by string.char(0)
do
print(x)
end
```
Output:
```
</>
b\
c
``` |
We are [splitting][1] the files into the component.vue, styles.scss and template.html, something like:
<script setup lang="ts">
// Common elements
import Category from '@/components/Elements/Category/Category.vue';
// COMPOSABLES
const localePath = useLocalePath();
// DATA
const session = ref(getSession());
</script>
<template src="./CustomTable.html" />
<style lang="scss" src="./custom-table.scss" />
But even that `Category`, `localePath` and `session` are used on `./CustomTable.html` the linter provides this issues:
3:8 warning 'Category' is defined but never used @typescript-eslint/no-unused-vars
6:7 warning 'localePath' is assigned a value but never used @typescript-eslint/no-unused-vars
9:7 warning 'session' is assigned a value but never used @typescript-eslint/no-unused-vars
I know we could use `eslint-disable`
<script setup lang="ts">
/* eslint-disable @typescript-eslint/no-unused-vars */
// Common elements
import Category from '@/components/Elements/Category/Category.vue';
// COMPOSABLES
const localePath = useLocalePath();
// DATA
const session = ref(getSession());
/* eslint-enable @typescript-eslint/no-unused-vars */
</script>
<template src="./CustomTable.html" />
<style lang="scss" src="./custom-table.scss" />
But that could lead to truly un-used variables to be ignored
Is there any way to make the linter to search for usages in the linked template file? else what do you suggest we could do about it?
[1]: https://mokkapps.de/vue-tips/split-your-sfc-into-multiple-files |
When splitting SFC components in multiple lint provides false positives |
|nuxt.js|eslint|typescript-eslint|false-positive|no-unused-vars| |
**FluentD 1.16.3** and **Fluent-bit 1.8.11**
I have the following lines in my container log file */var/log/containers/*.log*
**When a ldap user credentials fail (bad user/password)**
024-03-28T16:09:23.048182266Z [28/Mar/2024:16:08:58.721901994 +0000] conn=13532278 op=0 BIND dn="uid=f_lastname,ou=People,dc=inf.team" method=128 version=3
2024-03-28T16:09:23.048186511Z [28/Mar/2024:16:08:58.724475049 +0000] conn=13532278 op=0 RESULT err=49 tag=97 nentries=0 wtime=0.045728791 optime=0.002580362 etime=0.048307196 - Invalid credentials
**When user's password is reset in LDAP**
time: 20240328011425
dn: uid=utest_ignore,ou=People,dc=inf.team
result: 0
changetype: modify
replace: userPassword
userPassword:: e1NTSEE1ZCJ9VkRsSy9xMmlyVExacjJVT0pVMCtOUFg4bWVneVFRYlMvY1k3T1B
ab2wzbUs1RWZMS3RtN0dyOTNBhYlleTk2UFhBd01WQjVgYhUxZTcyWWR4QnlPSDdxN2RibTlLaGNv
-
replace: modifiersName
modifiersName: cn=ldag_mgr
-
replace: modifyTimestamp
modifyTimestamp: 20240328011433Z
-
[28/Mar/2024:00:46:51.830952828 +0000] - DEBUG - NS7bitAttr - preop_modify - MODIFY begin
I tried the fluent-bit Multiline parser configuration, but that's not giving me the desired output, I need. It says, that it basically concatenates/clubs all (multiple) lines into one object as string value (as per examples shown in this URL).
https://docs.fluentbit.io/manual/v/1.8/administration/configuring-fluent-bit/multiline-parsing - not very straight forward.
In **Fluentd**, there's a multiline parser which looks promising, but I'm not getting the desired output either.
https://docs.fluentd.org/parser/multiline
Tried the following conf file for catching the data (when user's password is reset in LDAP):
<parse>
@type multiline
format_firstline /^time: (?<time>[^ ]+)\n/
format1 /^dn: (?<dn>[^ ]+)\n/
format2 /^result: (?<result>[^ ]+)\n/
format3 /^changetype: (?<changetype>[^ ]+)\n)/
.... so on ...
</parse>
but I'm getting errors that my above conf file is incorrect.
**Desired output I would like, using FluentD configuration is:** I want only the **first 5 lines from "time:" line** (I don't care about other lines after 5th line).
{"time": "20240328011425",
"dn": "uid=utest_ignore,ou=People,dc=inf.team",
"result": "0",
"changetype": "modify",
"replace": "userPassword"
}
For the case of: **When a ldap user credentials fail (bad user/password)** I want to **cherry pick only few fields from those 2 log lines** above, resulting into a JSON blob. i.e.
{
"conn": "13532278",
"op": "0",
"dn": "uid=f_lastname,ou=People,dc=inf.team",
"result": "Invalid credentials"
}
|
It does not have a preconditioned and it's not supposed to. I mixed it up with HPCG which does incorporate a precondtioner. |
@helder-sepulveda's answer has a minor bug - validation will fail when all variables are set to `"false"`
Here's a working example:
```terraform
variable "sc1_default" {
default = "true"
}
variable "sc2_default" {
default = "false"
}
variable "sc3_default" {
default = "false"
}
variable "sc4_default" {
default = "true"
}
provider "null" {}
locals {
joined_scs = join("", [var.sc1_default, var.sc2_default, var.sc3_default, var.sc4_default])
scs_are_valid = replace(local.joined_scs, "false", "") == "true"
}
resource "null_resource" "validation" {
lifecycle {
precondition {
condition = local.scs_are_valid
error_message = "One and only one SC should be set to true."
}
}
}
```
Running `terraform plan`:
Planning failed. Terraform encountered an error while generating this plan.
β·
β Error: Resource precondition failed
β
β on main.tf line 27, in resource "null_resource" "validation":
β 27: condition = local.scs_are_valid
β βββββββββββββββββ
β β local.scs_are_valid is false
β
β One and only one SC should be set to true. |
i have created a 10k lines dictionary for program iam making , some of the entries has duplicate keys because of the way the data need to be presented .
the fix for this is to turn the "value" into a list removing duplicate keys .
i was wondering if there is an automated way to do this ? (pycharm)
for example :
dict = {
"A": "Red",
"A": "Blue",
"A": "Green",
"B": "Yellow",
"C": "Black"
}
wanted output :
dict = {
"A": ["Red","Blue","Green"],
"B": "Yellow",
"C": "Black"
}
iv tried chatgpt and manual labor :D
looking for a smarter way to do this and learning new ways
|
I am trying to disable the builtin `kill` in the bash running inside the container, and I would like to know if it is possible to run a command after the container start running, or if it is possible to run `/bash/bash` with some specific parameter to disable a builtin function.
I have a `Dockerfile` with the following content:
```
...
ENTRYPOINT [ "/entrypoint.sh" ]
CMD [ "/bin/bash" ]
```
in the `entrypoint.sh`, I have the following:
```
#!/bin/bash
exec "$@"
```
What I know is possible to do to disable `kill` is run the command `enable -n kill` and the builtin will be disabled. But this only works if I run the command inside the container (after `exec` when I docker run).
Is there some way to disable the kill bultin in the entrypoint or in the Dockerfile? |
How to disable a bash builtin inside a docker container |
|bash|docker| |
I am trying to install GMP (c++) for Windows using MSYS2. I made sure that when I installed msys2 that I installed gcc and set the PATH to C:\\msys64\\mingw64\\bin. I restarted my computer to check if GCC is working (gcc --version in the cmd works), but when I try to install GMP it doesn't work.
When I try to do ./configure --enable-cxx this happens
```
MY-DESKTOP-NAME MSYS /c/GMP/gmp-6.3.0
$ ./configure --enable-cxx
configure: loading site script /etc/config.site
checking build system type... x86_64-pc-msys
checking host system type... x86_64-pc-msys
checking for a BSD-compatible install... /usr/bin/install -c
checking whether build environment is sane... yes
checking for a thread-safe mkdir -p... /usr/bin/mkdir -p
checking for gawk... gawk
checking whether make sets $(MAKE)... no
checking whether make supports nested variables... no
checking whether to enable maintainer-specific portions of Makefiles... no
checking ABI=64
checking compiler gcc -O2 -pedantic -fomit-frame-pointer -m64 ... no
checking compiler cc -O ... no
checking ABI=x32
checking compiler gcc -O2 -pedantic -fomit-frame-pointer -mx32 ... no
checking compiler cc ... no
checking ABI=32
checking compiler gcc -m32 -O2 -pedantic -fomit-frame-pointer ... no
checking compiler gcc -O2 -pedantic -fomit-frame-pointer ... no
checking compiler icc -no-gcc ... no
checking compiler cc -O ... no
configure: error: could not find a working compiler, see config.log for details
```
Here is the config.log file
```
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by GNU MP configure 6.3.0, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ ./configure --enable-cxx
## ---------
## Platform.
## ---------
hostname = MY-DESKTOP-NAME
uname -m = x86_64
uname -r = 3.4.10.x86_64
uname -s = MSYS_NT-10.0-22631
uname -v = 2023-12-22 10:06 UTC
/usr/bin/uname -p = unknown
/bin/uname -X = unknown
/bin/arch = x86_64
/usr/bin/arch -k = unknown
/usr/convex/getsysinfo = unknown
/usr/bin/hostinfo = unknown
/bin/machine = unknown
/usr/bin/oslevel = unknown
/bin/universe = unknown
PATH: /usr/local/bin
PATH: /usr/bin
PATH: /bin
PATH: /opt/bin
PATH: /c/Windows/System32
PATH: /c/Windows
PATH: /c/Windows/System32/Wbem
PATH: /c/Windows/System32/WindowsPowerShell/v1.0/
PATH: /usr/bin/site_perl
PATH: /usr/bin/vendor_perl
PATH: /usr/bin/core_perl
## -----------
## Core tests.
## -----------
configure:2885: loading site script /etc/config.site
| # This file is in public domain.
| # Original author: Karlson2k (Evgeny Grin)
| # Written for MSys2 to help running 'configure' scripts
|
| # Defaults for MSys2/MinGW64-targeted programs
|
| # Set proper selfname on bash and fallback to default name on other shells
| test -n "${BASH_SOURCE}" 2\>/dev/null && config_site_me="${BASH_SOURCE\[0\]##\*/}" || config_site_me=config.site
|
| # Set default 'host' to speedup configure
| if test -z "$build_alias"; then
| build_alias="${MSYSTEM_CHOST}" && \
| ${as_echo-echo} "$config_site_me:${as_lineno-$LINENO}: default build_alias set to $build_alias" \>&5
| fi
|
| # Set default 'prefix'
| if ( test -z "$prefix" || test "x$prefix" = "xNONE" ) && \
| ( test -z "$exec_prefix" || test "x$exec_prefix" = "xNONE" ); then
| prefix="${MSYSTEM_PREFIX}" && \
| ${as_echo-echo} "$config_site_me:${as_lineno-$LINENO}: default prefix set to $prefix" \>&5
| fi
config.site:13: default build_alias set to x86_64-pc-msys
config.site:20: default prefix set to /usr
configure:3056: checking build system type
configure:3070: result: x86_64-pc-msys
configure:3090: checking host system type
configure:3103: result: x86_64-pc-msys
configure:3140: checking for a BSD-compatible install
configure:3208: result: /usr/bin/install -c
configure:3219: checking whether build environment is sane
configure:3274: result: yes
configure:3425: checking for a thread-safe mkdir -p
configure:3464: result: /usr/bin/mkdir -p
configure:3471: checking for gawk
configure:3487: found /usr/bin/gawk
configure:3498: result: gawk
configure:3509: checking whether make sets $(MAKE)
configure:3535: result: no
configure:3560: checking whether make supports nested variables
configure:3577: result: no
configure:3706: checking whether to enable maintainer-specific portions of Makefiles
configure:3715: result: no
User:
ABI=
CC=
CFLAGS=(unset)
CPPFLAGS=(unset)
MPN_PATH=
GMP:
abilist=64 x32 32
cclist=gcc icc cc
configure:5911: gcc 2\>&1 | grep xlc \>/dev/null
configure:5914: $? = 1
configure:5968: checking compiler gcc -O2 -pedantic -fomit-frame-pointer -m64
Test compile:
configure:5982: gcc -O2 -pedantic -fomit-frame-pointer -m64 conftest.c \>&5
./configure: line 5983: gcc: command not found
configure:5985: $? = 127
failed program was:
int main () { return 0; }
configure:7072: result: no
configure:5889: cc -c conftest.c \>&5
./configure: line 5890: cc: command not found
configure:5892: $? = 127
configure:5911: cc 2\>&1 | grep xlc \>/dev/null
configure:5914: $? = 1
configure:5968: checking compiler cc -O
Test compile:
configure:5982: cc -O conftest.c \>&5
./configure: line 5983: cc: command not found
configure:5985: $? = 127
failed program was:
int main () { return 0; }
configure:7072: result: no
configure:5911: gcc 2\>&1 | grep xlc \>/dev/null
configure:5914: $? = 1
configure:5968: checking compiler gcc -O2 -pedantic -fomit-frame-pointer -mx32
Test compile:
configure:5982: gcc -O2 -pedantic -fomit-frame-pointer -mx32 conftest.c \>&5
./configure: line 5983: gcc: command not found
configure:5985: $? = 127
failed program was:
int main () { return 0; }
configure:7072: result: no
configure:5889: cc -c conftest.c \>&5
./configure: line 5890: cc: command not found
configure:5892: $? = 127
configure:5911: cc 2\>&1 | grep xlc \>/dev/null
configure:5914: $? = 1
configure:5968: checking compiler cc
Test compile:
configure:5982: cc conftest.c \>&5
./configure: line 5983: cc: command not found
configure:5985: $? = 127
failed program was:
int main () { return 0; }
configure:7072: result: no
configure:5911: gcc 2\>&1 | grep xlc \>/dev/null
configure:5914: $? = 1
configure:5968: checking compiler gcc -m32 -O2 -pedantic -fomit-frame-pointer
Test compile:
configure:5982: gcc -m32 -O2 -pedantic -fomit-frame-pointer conftest.c \>&5
./configure: line 5983: gcc: command not found
configure:5985: $? = 127
failed program was:
int main () { return 0; }
configure:7072: result: no
configure:5968: checking compiler gcc -O2 -pedantic -fomit-frame-pointer
Test compile:
configure:5982: gcc -O2 -pedantic -fomit-frame-pointer conftest.c \>&5
./configure: line 5983: gcc: command not found
configure:5985: $? = 127
failed program was:
int main () { return 0; }
configure:7072: result: no
configure:5889: icc -c conftest.c \>&5
./configure: line 5890: icc: command not found
configure:5892: $? = 127
configure:5911: icc 2\>&1 | grep xlc \>/dev/null
configure:5914: $? = 1
configure:5968: checking compiler icc -no-gcc
Test compile:
configure:5982: icc -no-gcc conftest.c \>&5
./configure: line 5983: icc: command not found
configure:5985: $? = 127
failed program was:
int main () { return 0; }
configure:7072: result: no
configure:5889: cc -c conftest.c \>&5
./configure: line 5890: cc: command not found
configure:5892: $? = 127
configure:5911: cc 2\>&1 | grep xlc \>/dev/null
configure:5914: $? = 1
configure:5968: checking compiler cc -O
Test compile:
configure:5982: cc -O conftest.c \>&5
./configure: line 5983: cc: command not found
configure:5985: $? = 127
failed program was:
int main () { return 0; }
configure:7072: result: no
configure:7310: error: could not find a working compiler, see config.log for details
## ----------------
## Cache variables.
## ----------------
ac_cv_build=x86_64-pc-msys
ac_cv_env_ABI_set=
ac_cv_env_ABI_value=
ac_cv_env_CCC_set=
ac_cv_env_CCC_value=
ac_cv_env_CC_FOR_BUILD_set=
ac_cv_env_CC_FOR_BUILD_value=
ac_cv_env_CC_set=
ac_cv_env_CC_value=
ac_cv_env_CFLAGS_set=
ac_cv_env_CFLAGS_value=
ac_cv_env_CPPFLAGS_set=
ac_cv_env_CPPFLAGS_value=
ac_cv_env_CPP_FOR_BUILD_set=
ac_cv_env_CPP_FOR_BUILD_value=
ac_cv_env_CPP_set=
ac_cv_env_CPP_value=
ac_cv_env_CXXCPP_set=
ac_cv_env_CXXCPP_value=
ac_cv_env_CXXFLAGS_set=
ac_cv_env_CXXFLAGS_value=
ac_cv_env_CXX_set=
ac_cv_env_CXX_value=
ac_cv_env_LDFLAGS_set=
ac_cv_env_LDFLAGS_value=
ac_cv_env_LIBS_set=
ac_cv_env_LIBS_value=
ac_cv_env_LT_SYS_LIBRARY_PATH_set=
ac_cv_env_LT_SYS_LIBRARY_PATH_value=
ac_cv_env_M4_set=
ac_cv_env_M4_value=
ac_cv_env_YACC_set=
ac_cv_env_YACC_value=
ac_cv_env_YFLAGS_set=
ac_cv_env_YFLAGS_value=
ac_cv_env_build_alias_set=
ac_cv_env_build_alias_value=
ac_cv_env_host_alias_set=
ac_cv_env_host_alias_value=
ac_cv_env_target_alias_set=
ac_cv_env_target_alias_value=
ac_cv_host=x86_64-pc-msys
ac_cv_path_install='/usr/bin/install -c'
ac_cv_path_mkdir=/usr/bin/mkdir
ac_cv_prog_AWK=gawk
ac_cv_prog_make_make_set=no
am_cv_make_support_nested_variables=no
## -----------------
## Output variables.
## -----------------
ABI=''
ACLOCAL='${SHELL} /c/GMP/gmp-6.3.0/missing aclocal-1.15'
AMTAR='$${TAR-tar}'
AM_BACKSLASH=''
AM_DEFAULT_V='1'
AM_DEFAULT_VERBOSITY='1'
AM_V='1'
AR=''
AS=''
ASMFLAGS=''
AUTOCONF='${SHELL} /c/GMP/gmp-6.3.0/missing autoconf'
AUTOHEADER='${SHELL} /c/GMP/gmp-6.3.0/missing autoheader'
AUTOMAKE='${SHELL} /c/GMP/gmp-6.3.0/missing automake-1.15'
AWK='gawk'
CALLING_CONVENTIONS_OBJS='x86call.lo x86check$U.lo'
CC=''
CCAS=''
CC_FOR_BUILD=''
CFLAGS=''
CPP=''
CPPFLAGS=''
CPP_FOR_BUILD=''
CXX=''
CXXCPP=''
CXXFLAGS=''
CYGPATH_W='cygpath -w'
DEFN_LONG_LONG_LIMB=''
DEFS=''
DLLTOOL=''
DSYMUTIL=''
DUMPBIN=''
ECHO_C=''
ECHO_N='-n'
ECHO_T=''
EGREP=''
ENABLE_STATIC_FALSE=''
ENABLE_STATIC_TRUE=''
EXEEXT=''
EXEEXT_FOR_BUILD=''
FGREP=''
GMP_LDFLAGS=''
GMP_LIMB_BITS=''
GMP_NAIL_BITS='0'
GREP=''
HAVE_CLOCK_01=''
HAVE_CPUTIME_01=''
HAVE_GETRUSAGE_01=''
HAVE_GETTIMEOFDAY_01=''
HAVE_HOST_CPU_FAMILY_power='0'
HAVE_HOST_CPU_FAMILY_powerpc='0'
HAVE_SIGACTION_01=''
HAVE_SIGALTSTACK_01=''
HAVE_SIGSTACK_01=''
HAVE_STACK_T_01=''
HAVE_SYS_RESOURCE_H_01=''
INSTALL_DATA='${INSTALL} -m 644'
INSTALL_PROGRAM='${INSTALL}'
INSTALL_SCRIPT='${INSTALL}'
INSTALL_STRIP_PROGRAM='$(install_sh) -c -s'
LD=''
LDFLAGS=''
LEX=''
LEXLIB=''
LEX_OUTPUT_ROOT=''
LIBCURSES=''
LIBGMPXX_LDFLAGS=''
LIBGMP_DLL=''
LIBGMP_LDFLAGS=''
LIBM=''
LIBM_FOR_BUILD=''
LIBOBJS=''
LIBREADLINE=''
LIBS=''
LIBTOOL=''
LIPO=''
LN_S=''
LTLIBOBJS=''
LT_SYS_LIBRARY_PATH=''
M4=''
MAINT='#'
MAINTAINER_MODE_FALSE=''
MAINTAINER_MODE_TRUE='#'
MAKEINFO='${SHELL} /c/GMP/gmp-6.3.0/missing makeinfo'
MANIFEST_TOOL=''
MKDIR_P='/usr/bin/mkdir -p'
NM=''
NMEDIT=''
OBJDUMP=''
OBJEXT=''
OTOOL64=''
OTOOL=''
PACKAGE='gmp'
PACKAGE_BUGREPORT='gmp-bugs@gmplib.org (see https://gmplib.org/manual/Reporting-Bugs.html)'
PACKAGE_NAME='GNU MP'
PACKAGE_STRING='GNU MP 6.3.0'
PACKAGE_TARNAME='gmp'
PACKAGE_URL='http://www.gnu.org/software/gmp/'
PACKAGE_VERSION='6.3.0'
PATH_SEPARATOR=':'
RANLIB=''
SED=''
SET_MAKE='MAKE=make'
SHELL='/bin/sh'
SPEED_CYCLECOUNTER_OBJ='pentium.lo'
STRIP=''
TAL_OBJECT=''
TUNE_LIBS=''
TUNE_SQR_OBJ=''
U_FOR_BUILD=''
VERSION='6.3.0'
WANT_CXX_FALSE=''
WANT_CXX_TRUE=''
WITH_READLINE_01=''
YACC=''
YFLAGS=''
ac_ct_AR=''
ac_ct_CC=''
ac_ct_CXX=''
ac_ct_DUMPBIN=''
am__EXEEXT_FALSE=''
am__EXEEXT_TRUE=''
am__isrc=''
am__leading_dot='.'
am__tar='$${TAR-tar} chof - "$$tardir"'
am__untar='$${TAR-tar} xf -'
bindir='${exec_prefix}/bin'
build='x86_64-pc-msys'
build_alias='x86_64-pc-msys'
build_cpu='x86_64'
build_os='msys'
build_vendor='pc'
datadir='${datarootdir}'
datarootdir='${prefix}/share'
docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
dvidir='${docdir}'
exec_prefix='NONE'
gmp_srclinks=''
host='x86_64-pc-msys'
host_alias=''
host_cpu='x86_64'
host_os='msys'
host_vendor='pc'
htmldir='${docdir}'
includedir='${prefix}/include'
infodir='${datarootdir}/info'
install_sh='${SHELL} /c/GMP/gmp-6.3.0/install-sh'
libdir='${exec_prefix}/lib'
libexecdir='${exec_prefix}/libexec'
localedir='${datarootdir}/locale'
localstatedir='${prefix}/var'
mandir='${datarootdir}/man'
mkdir_p='$(MKDIR_P)'
mpn_objects=''
mpn_objs_in_libgmp=''
oldincludedir='/usr/include'
pdfdir='${docdir}'
prefix='/usr'
program_transform_name='s,x,x,'
psdir='${docdir}'
sbindir='${exec_prefix}/sbin'
sharedstatedir='${prefix}/com'
sysconfdir='${prefix}/etc'
target_alias=''
## -----------
## confdefs.h.
## -----------
/\* confdefs.h \*/
#define PACKAGE_NAME "GNU MP"
#define PACKAGE_TARNAME "gmp"
#define PACKAGE_VERSION "6.3.0"
#define PACKAGE_STRING "GNU MP 6.3.0"
#define PACKAGE_BUGREPORT "gmp-bugs@gmplib.org (see https://gmplib.org/manual/Reporting-Bugs.html)"
#define PACKAGE_URL "http://www.gnu.org/software/gmp/"
#define PACKAGE "gmp"
#define VERSION "6.3.0"
#define WANT_FFT 1
#define HAVE_HOST_CPU_x86_64 1
#define HOST_DOS64 1
configure: exit 1
``` |
GMP Windows installation "configure: error: could not find a working compiler" |
|c++|gcc|gmp|msys| |
null |
I have a code that works like this:
template<size_t size>
struct myClass {
public:
std::array<uint8_t, size> bytes;
bool operator==(const myClass<size>& o) const {
for (size_t i{0}; i < size; ++i) {
if (bytes[i] != o.bytes[i]) return false;
}
return true;
}
uint8_t& operator[] (size_t ind) {
return bytes[ind];
}
const uint8_t& operator[] (size_t ind) const {
return bytes[ind]; // function body is identical, can I write only once?
}
};
int main() {
myClass<4> my{23,24,55,26};
my.bytes[2] = 24;
cout << std::dec << static_cast<int32_t>(my[2]) << endl;
my[2] = 44;
cout << static_cast<int32_t>(my[2]) << endl;
uint8_t *p1 = &my[2];
cout << static_cast<int32_t>(*p1) << endl;
const myClass<4> my2{23,24,55,26};
const uint8_t *p2 = &my2[2];
cout << static_cast<int32_t>(*p2++) << endl;
cout << static_cast<int32_t>(*p2) << endl;
return 0;}
My question is that, the `uint8_t& operator[](size_t ind){}` and `const uint8_t& operator[](size_t ind) const {}` have identical function body of `return bytes[ind];`. Therefore, I need to write identical code for two times. Is there a method that I only write the function body for once, and also make the code work? |
I use shared preference in my project to store user information, but I encounter problems that the program encounters an error and does not load at all, and in terms of code, there is no problem with my code. But the application does not come up, And the error text is not sent, so that I can put it to you, the application just closes and cannot be continued.
How can I fix it?
This is the code of the class where I store the information:
public class UserManager {
private SharedPreferences sharedPreferences;
public UserManager(Context context) {
sharedPreferences = context.getSharedPreferences("user_information", Context.MODE_PRIVATE);
}
public void saveUserInformation(String fullName, String email, String gender) {
@SuppressLint("CommitPrefEdits") SharedPreferences.Editor editor = sharedPreferences.edit();
editor.putString("full_name", fullName);
editor.putString("email", email);
editor.putString("gender", gender);
editor.apply();
}
public String getFullName() {
return sharedPreferences.getString("full_name", "");
}
public String getEmail() {
return sharedPreferences.getString("email", "");
}
public String getGender() {
return sharedPreferences.getString("gender", "");
}
}
And this is the code of the main class that I receive the information after saving it:
public class MainActivity extends AppCompatActivity {
private UserManager userManager;
private String gender = "";
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
userManager = new UserManager(this);
TextInputEditText fullNameEt = findViewById(R.id.et_main_fullName);
fullNameEt.setText(userManager.getFullName());
TextInputEditText emailEt = findViewById(R.id.et_main_email);
emailEt.setText(userManager.getEmail());
RadioGroup genderRadioGroup = findViewById(R.id.radioGroup_main_gender);
genderRadioGroup.setOnCheckedChangeListener(new RadioGroup.OnCheckedChangeListener() {
@Override
public void onCheckedChanged(RadioGroup group, int checkedId) {
if (checkedId == R.id.btn_main_male) {
gender = "male";
} else {
gender = "female";
}
}
});
gender = userManager.getGender();
if (gender.equalsIgnoreCase("male")) {
genderRadioGroup.check(R.id.btn_main_male);
} else if (gender.equalsIgnoreCase("female")) {
genderRadioGroup.check(R.id.btn_main_female);
}
View saveBtn = findViewById(R.id.btn_main_save);
saveBtn.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
userManager.saveUserInformation(fullNameEt.getText().toString(),
emailEt.getText().toString(),
gender);
}
});
}
} |
I'm currently working on a maven project where I would like to have 2 docker container where one launch all the tests and the other compile the project if all the tests succeed.
The problem is that both containers launch the prod dockerfile.
So my question is why after pointing each Dockerfile in the docker compose they both start on the prod one
docker-compose.yml:
```docker
version: '3.8'
services:
db:
container_name: db
image: mysql
command: --default-authentication-plugin=mysql_native_password
restart: always
environment:
MYSQL_DATABASE: cuisine
MYSQL_ROOT_PASSWORD: example
healthcheck:
test: ["CMD", "mysqladmin" ,"ping", "-h", "localhost"]
timeout: 20s
retries: 10
ports:
- "3306:3306"
volumes:
- ./docker/mysql-dump/cuisine:/docker-entrypoint-initdb.d
- mysql:/var/lib/mysql
adminer:
image: adminer
restart: always
ports:
- 8080:8080
test:
container_name: java-test
image: spring-boot
build:
context: .
dockerfile: docker/test/Dockerfile
ports:
- "8081:8081"
- "5005:5005"
depends_on:
db:
condition: service_healthy
volumes:
- ${APPLICATION_ROOT_FOLDER}:/usr/src/mymaven
- ${MAVEN_SETTINGS_FOLDER}:/root/.m2
java:
container_name: java
image: spring-boot
build:
context: .
dockerfile: docker/prod/Dockerfile
ports:
- "8082:8082"
- "5006:5006"
depends_on:
db:
condition: service_healthy
volumes:
- ${APPLICATION_ROOT_FOLDER}:/usr/src/mymaven
- ${MAVEN_SETTINGS_FOLDER}:/root/.m2
volumes:
mysql:
``` |
I'm making an app in Swift that connects to a web service. I want to make requests, such as login. After logging in, I want to navigate to HomeView but never switch.
I have these files:
`
MainView:
struct MainView: View {
@ObservedObject var viewModel = MainViewViewModel()
var body: some View {
if viewModel.savedStudent != nil {
HomeView()
} else {
LoginView()
}
}
}
`
MainViewViewModel:
`
class MainViewViewModel: ObservableObject {
@Published var savedStudent: Student?
init() {
loadSavedStudent()
}
func loadSavedStudent() {
if let savedStudentData = UserDefaults.standard.data(forKey: "student") {
if let savedStudent = try? JSONDecoder().decode(Student.self, from: savedStudentData) {
self.savedStudent = savedStudent
} else {
print("Error al decodificar el estudiante.")
}
} else {
print("No existe ningun estudiante logueado.")
}
}
}
`
`
LoginViewViewModel:
class APIFetchHandler: ObservableObject {
@Published var legajo = ""
@Published var password = ""
@Published var errorMessage = ""
static let sharedInstance = APIFetchHandler()
func postAPIData() {
guard self.validate() else {
return
}
let url = "https://webservice.frvm.utn.edu.ar/autogestion/login";
let headers: HTTPHeaders = [
"nick": legajo,
"password": password
]
AF.request(url, method: .post, parameters: nil, encoding: JSONEncoding.default, headers: headers).responseData { response in
switch response.result {
case .success(let data):
do {
// GUARDA EL ESTUDIANTE EN UserDefaults
let student = try JSONDecoder().decode(Student.self, from: data)
if let encodedStudent = try? JSONEncoder().encode(student) {
UserDefaults.standard.set(encodedStudent, forKey: "student")
UserDefaults.standard.synchronize()
print("Student saved to UserDefaults")
self.errorMessage = ""
} else {
print("Error encoding student object")
}
// PRINTEAR UserDefaults PARA VER SI SE GUARDO CORRECTAMENTE
if let savedStudentData = UserDefaults.standard.data(forKey: "student") {
if let savedStudent = try? JSONDecoder().decode(Student.self, from: savedStudentData) {
print(savedStudent)
} else {
print("Error decoding student object")
}
} else {
print("No student data found in UserDefaults")
}
} catch {
if let statusCode = response.response?.statusCode, statusCode == 401 {
print("Credenciales incorrectas.")
self.errorMessage = "Legajo/ContraseΓ±a incorrectos."
}
return
}
case .failure(let error):
print("------------ ERROR ------------")
print(error)
print("------------ ERROR ------------")
}
}
}
private func validate() -> Bool {
guard !legajo.trimmingCharacters(in: .whitespaces).isEmpty,
!password.trimmingCharacters(in: .whitespaces).isEmpty else {
self.errorMessage = "Llenar todos los campos, por favor."
return false
}
return true
}
}
`
`
LoginView:
struct LoginView: View {
@StateObject var viewModel = APIFetchHandler()
var body: some View {
VStack {
Text("UTN - FRVM")
.font(.title)
Text("Iniciar sesiΓ³n")
Form {
if !viewModel.errorMessage.isEmpty {
Text(viewModel.errorMessage)
.foregroundStyle(.red)
}
TextField("Legajo", text: $viewModel.legajo)
.textFieldStyle(DefaultTextFieldStyle())
SecureField("Password", text: $viewModel.password)
.textFieldStyle(DefaultTextFieldStyle())
Button {
viewModel.postAPIData()
} label: {
ZStack {
RoundedRectangle(cornerRadius: 10)
Text("Iniciar sesiΓ³n")
.foregroundColor(.white)
.bold()
}
}
.padding()
}
}
}
}` |
Navigate after logged in with webservice |
|swift|swiftui| |
null |
In your `ProductModel` schema the `category` property has a `name` property given to it. That suggests you want `category` to be an Object with a `name` property. I pressume that was not your intention. You need to remove that so that `category` is just a property with a value of type `ObjectId`.
````js
const ProductModel = new Schema({
//...
category: {
name: String, //< delete this
type: Schema.Types.ObjectId,
ref: 'Category',
},
})
```` |
hope you are all doing well :)
I'm having a bit of a problem with my Dart app. The app is a to-do list creator where the user can create several lists and add items to each one of them.
With this in mind, I started adding functionalities and from a simple, single-list app, now I have created a more complete thing where the user can reorder, edit and delete items and the same for lists.
I then organized the lists on a tab system because I think it looks well. So, for the last part of the v1.0.0, I tried to add the "Edit list" functionality, which allows the user to delete a list or edit its' name.
Thing is, when I hit "Save" a black screen appears and makes me have to restart the app. The worst thing is that the code actually works, because the name of the lists actually changes, but the black screen is unavoidable.
Here are the two widgets the "Edit list" dialog box uses:
```
//delete or edit list
void confirmDeleteList(String listName) {
try {
setState(() {
db.toDoLists.remove(listName);
tabNames.remove(listName);
if (_tabController.animation!.status == AnimationStatus.completed) {
if (!_isTabControllerDisposed) {
_tabController.dispose(); // Dispose of the old TabController
_isTabControllerDisposed = true;
}
_tabController = TabController(length: db.toDoLists.keys.length, vsync: this);
_isTabControllerDisposed = false;
if (_tabController.length - 1 < _tabController.previousIndex) {
_tabController.index = _tabController.length - 2;
} else {
_tabController.index = _tabController.previousIndex;
}
_currentList = tabNames[_tabController.index]; // Set _currentList to the current tab
}
});
db.updateDatabase();
} catch (e) {
print('Error in confirmDeleteList: $e');
}
}
void deleteOrEditList(String listName) {
try {
showDialog(
context: context,
builder: (context) {
// Create a new TextEditingController for the DeleteListBox
final deleteBoxController = TextEditingController();
return DeleteListBox(
controller: deleteBoxController,
onSaved: () async {
if (deleteBoxController.text.isNotEmpty) {
setState(() {
// Remove the old list and add a new one with the updated name
var items = db.toDoLists.remove(listName);
db.toDoLists[deleteBoxController.text] = items!;
// Update tabNames
tabNames.remove(listName);
tabNames.add(deleteBoxController.text);
if (_tabController.animation!.status == AnimationStatus.completed) {
if (!_isTabControllerDisposed) {
_tabController.dispose(); // Dispose of the old TabController
_isTabControllerDisposed = true;
}
_tabController = TabController(length: db.toDoLists.keys.length, vsync: this);
_isTabControllerDisposed = false;
_tabController.index = tabNames.indexOf(deleteBoxController.text); // Set the renamed tab as the current tab
_currentList = deleteBoxController.text; // Set _currentList to the renamed tab
}
});
db.updateDatabase();
deleteBoxController.clear();
}
Navigator.of(context).pop(); // Close the dialog before disposing the TabController
},
onSlided: () {
confirmDeleteList(listName);
Navigator.of(context).pop(); // Close the dialog before disposing the TabController
return Future.value();
},
tabName: listName,
);
},
);
} catch (e) {
print('Error in deleteList: $e');
}
}
```
Since I'm kinda new in programming, I don't understand if this bloc of code is enough to determine the reason of the problem. I've been fighting with this for two days now and I'm starting to get a little frustrated.
I can, of course, provide the rest of the code if necessary, just let me know :)
Thank you in advance!! |
Flutter + Dart: Editing name of a tab shows up a black screen |
|android|flutter|dart| |
null |
Could my question look a bit silly, I am a newbie in R, I don't have much experience. Is there a straightforward way to extract a colored table into Excel?
I created this matrix using ggplot [colored Table/matrix](https://i.stack.imgur.com/RiUjJ.png)
How can I extract an editable table into excel with same colors to adjust?
Thank you so much in advance
|
I created this matrix using ggplot [colored Table/matrix](https://i.stack.imgur.com/RiUjJ.png)
How can I extract an editable table into excel with same colors to adjust?
Could my question look a bit silly, I am a newbie in R, I don't have much experience. Is there a straightforward way to extract a colored table into Excel?
Thank you so much in advance
|
Pending more details from the poster, here is an example that has a `MainThreadClass` running on the main thread of a program and an `asyncio` event loop running on another thread. When `MainThreadClass.event` is called, it sends a message to the event loop via an `asyncio.Queue` and `asyncio.run_coroutine_threadsafe`. From there, the `asyncio` event loop is continuously listening for messages on the queue and processes the message, printing it to the console.
```python
import asyncio
from threading import Thread
class MainThreadClass:
def __init__(self):
# Create an `asyncio.Queue` so that the main thread can send messages
# to the `asyncio` event loop via this queue and `asyncio.run_coroutine_threadsafe`
self._async_queue = asyncio.Queue()
self._async_event_loop = asyncio.new_event_loop()
def event(self, some_incoming_data):
# You can't directly use `put_nowait` here because `asyncio.Queue` methods
# not threadsafe
asyncio.run_coroutine_threadsafe(self._async_queue.put(some_incoming_data), self._async_event_loop)
async def listen_to_queue(queue: asyncio.Queue):
while True:
message = await queue.get()
print(f"async event loop received: {message}")
# ... do something with `message` ...
async def async_main(queue: asyncio.Queue):
await asyncio.gather(listen_to_queue(queue))
def start_asyncio_event_loop(loop: asyncio.AbstractEventLoop) -> None:
"""Starts the given `asyncio` loop on whatever the current thread is"""
asyncio.set_event_loop(loop)
loop.run_forever()
def run_event_loop(queue: asyncio.Queue, loop: asyncio.AbstractEventLoop) -> None:
"""Runs the given `asyncio` loop on a separate thread, passing the `AsyncInbox`
to the event loop for any other thread to send messages to the event loop.
"""
thread = Thread(target=start_asyncio_event_loop, args=(loop,))
thread.start()
asyncio.run_coroutine_threadsafe(async_main(queue), loop=loop)
if __name__ == "__main__":
main_thread_class = MainThreadClass()
async_queue = main_thread_class._async_queue
async_event_loop = main_thread_class._async_event_loop
run_event_loop(queue=async_queue, loop=async_event_loop)
main_thread_class.event("testing")
```
This program prints the following to the console:
```
async event loop received: testing
``` |
FluentD / Fluent-Bit: Concatenate multiple lines of log files and generate one JSON record for all key-value from each line |
|elasticsearch|parsing|multiline|fluentd|fluent-bit| |
This would pull out the decimal day duration between two rows in your specific format
[![enter image description here][1]][1]
let Source = Excel.CurrentWorkbook(){[Name="Table1"]}[Content],
#"Convert" = Table.TransformColumns(Source,{{"Column1",
each let a=_,
b = Text.Replace(a,"Days","*1+"),
c = Text.Replace(b,"Day","*1+"),
d = Text.Replace(c,"Hours","*1/24+"),
e = Text.Replace(d,"Hour","*1/24+"),
f = Text.Replace(e,"Minutes","*1/24/60+"),
g = Text.Replace(f,"Minute","*1/24/60+"),
h = Text.Replace(g,"Seconds","*1/24/60/60+"),
i = Text.Replace(h,"Second","*1/24/60/60+"),
j = i &"0"
in Expression.Evaluate(j), type number}}),
#"Added Custom" = Table.AddColumn(#"Convert", "Duration", each List.Max(#"Convert"[Column1])-List.Min(#"Convert"[Column1])),
#"CleanUp" = Table.FirstN(#"Added Custom",1),
#"Removed Columns" = Table.RemoveColumns(CleanUp,{"Column1"})
in #"Removed Columns"
[1]: https://i.stack.imgur.com/3WD1w.png |
With go 1.22+, I'd use `slices.Reverse()`:
```go
package main
import (
"fmt"
"slices"
)
func main() {
s := []int{5, 2, 6, 3, 1, 4}
slices.Reverse(s)
fmt.Println(s)
}
```
https://go.dev/play/p/he4pn_5ySq0 |
My issue was caused by using an enum in the class
late final RelationshipStatus _status;
Caused an issue:
Map<String, dynamic> toJson() => {
'_status': _status,
};
Solution:
Map<String, dynamic> toJson() => {
'_status': _status.name
}; |
In our company, we have a private repo on Github where we have our internal nuget packages.
On my laptop, on another API solution, I can restore the nuget packages using my github credentials.
I'm configuring the github actions to check the PR and deploy the API but when I want to restore nugets, I have an 403 error:
> Your request could not be authenticated by the GitHub Packages
> service. Please ensure your access token is valid and has the
> appropriate scopes configured.
I have read many documentations and posts on Stack overflow explaining how to use a PAT. But this is not the solution I want to use. I don't want the worklow linked to my account. If I'm not there anymore in the company I want the workflow to continue to work.
So I decided to use the Github App, I'm able to generate the token.
- uses: actions/create-github-app-token@v1
id: app-token
with:
app-id: ${{ vars.APP_ID }}
private-key: ${{ secrets.PRIVATE_KEY }}
Then I add the nuget source using the token
- name: Restore .NET project Dependencies
run: dotnet nuget update source SKDotNetPackages --source "https://nuget.pkg.github.com/sidekick-interactive/index.json" --username ${{ github.event.pull_request.user.login }} --password ${{ steps.app-token.outputs.token }} --store-password-in-clear-text
but when I restore the nugets using:
- name: Restore .NET project Dependencies
run: find . -name '*.csproj' -exec dotnet restore {} \;
it generates a 403 error.
Do you have any idea how to restore nuget from private github feed using Github App and no PAT?
|
If you want to get table of today forecast you can use this example:
```py
import pandas as pd
import requests
from bs4 import BeautifulSoup
headers = {"User-Agent": "Mozilla/5.0"}
url = "https://weather.com/en-IN/weather/today/l/a0e0a5a98f7825e44d5b44b26d6f3c2e76a8d70e0426d099bff73e764af3087a"
soup = BeautifulSoup(requests.get(url, headers=headers).content, "html.parser")
today_forecast = []
for a in soup.select(".TodayWeatherCard--TableWrapper--globn a"):
today_forecast.append(
t.get_text(strip=True, separator=" ") for t in a.find_all(recursive=False)
)
df = pd.DataFrame(
today_forecast, columns=["Time of day", "Degrees", "Text", "Chance of rain"]
)
print(df)
```
Prints:
```none
Time of day Degrees Text Chance of rain
0 Morning 11 Β° Partly Cloudy --
1 Afternoon 20 Β° Partly Cloudy --
2 Evening 14 Β° Partly Cloudy Night Rain Chance of Rain 3%
3 Overnight 10 Β° Cloudy Rain Chance of Rain 5%
``` |
i found a way, using:
- integer pixel coordinates which is much more intuitive and convenient at least for what i'm doing here
- and considering the "pixel size"
i wanted to paste my code but the crazy blocking rules pretending my code is not "formatted" correctly prohibits it! ctrl-k does not work ... so i wonder how people can post anything here anymore?
|
|javascript|vue.js|vuejs3|vue-i18n|matter.js| |
In one iteration over the input array you can:
* Collect the unique values in their order of first appearance
* Count how many you have of each
(Both can be captured by using a hash map that maintains insertion order, like for instance a dict in Python 3.6+, a Map in JavaScript, or a LinkedHashMap in Java)
With this information you can produce the sorted output.
Here is a runnable implementation with an execution of your example input:
<!-- begin snippet: js hide: false console: true babel: false -->
<!-- language: lang-js -->
function sort(data) {
const map = new Map;
for (const value of data) {
const count = map.get(value) ?? 0; // 0 is default
map.set(value, count + 1);
}
const result = [];
for (let [value, count] of map) {
while (count-- > 0) result.push(value);
}
return result;
}
const data = [1,2,1,30,1,1,2,1,40,30,1,40,2, 50, 40, 50, 30];
console.log(sort(data));
<!-- end snippet -->
|
So I'm having a problem when building my Flutter app in release mode for flutter.
When I build my Flutter app in debug mode it works fine and I can use my app in any emulator without any problem. But when I click edit scheme and try to run it in release mode the build fails and I get cloud_firestore module not found.
The reason I'm trying to build for release mode is that when I submitted my app for the app store it got rejected almost immediately and the reason was that the app crashed when launching.
Information you should know:
Mac Version:
[](https://i.stack.imgur.com/ZAMyP.png)
App works fine in debug mode
XCode version: v14.2
Flutter doctor: No errors
Cocoapods version: 1.15.2
Flutter version: 3.19.5 (Stable version)
Dart version: 3.3.3
Podfile content:
```
platform :ios, '12.0'
ENV['COCOAPODS_DISABLE_STATS'] = 'true'
project 'Runner', {
'Debug' => :debug,
'Profile' => :release,
'Release' => :release,
}
def flutter_root
generated_xcode_build_settings_path = File.expand_path(File.join('..', 'Flutter', 'Generated.xcconfig'), __FILE__)
unless File.exist?(generated_xcode_build_settings_path)
raise "#{generated_xcode_build_settings_path} must exist. If you're running pod install manually, make sure flutter pub get is executed first"
end
File.foreach(generated_xcode_build_settings_path) do |line|
matches = line.match(/FLUTTER_ROOT\=(.*)/)
return matches[1].strip if matches
end
raise "FLUTTER_ROOT not found in #{generated_xcode_build_settings_path}. Try deleting Generated.xcconfig, then run flutter pub get"
end
require File.expand_path(File.join('packages', 'flutter_tools', 'bin', 'podhelper'), flutter_root)
flutter_ios_podfile_setup
target 'Runner' do
use_frameworks!
use_modular_headers!
flutter_install_all_ios_pods File.dirname(File.realpath(__FILE__))
target 'RunnerTests' do
inherit! :search_paths
end
end
post_install do |installer|
installer.pods_project.targets.each do |target|
flutter_additional_ios_build_settings(target)
end
end
```
Pubspec.yaml content:
```
name: lifenavigator
version: 1.0.0+1
publish_to: none
description: A new Flutter project.
environment:
sdk: '>=3.2.3 <4.0.0'
dependencies:
cupertino_icons: ^1.0.2
get: 4.6.6
flutter:
sdk: flutter
google_fonts: 6.1.0
firebase_core: ^2.27.0
firebase_auth: ^4.17.8
font_awesome_flutter: ^10.7.0
convex_bottom_bar: ^3.2.0
cloud_firestore: ^4.15.8
firebase_messaging: ^14.7.19
google_sign_in: ^6.2.1
flutter_facebook_auth: ^6.1.1
flutter_dotenv: ^5.1.0
dio: ^5.4.1
url_launcher: ^6.2.5
flutter_launcher_icons: ^0.13.1
google_mobile_ads: ^5.0.0
dev_dependencies:
flutter_lints: ^3.0.2
flutter_test:
sdk: flutter
flutter_launcher_icons: "^0.13.1"
flutter_launcher_icons:
android: "launcher_icon"
ios: true
image_path: "assets/applogo.png"
flutter:
uses-material-design: true
assets:
- assets/
- .env
```
Solutions I tried:
I tried cleaning the previous builds and building again.
I tried `Flutter upgrade` and `pod repo update` and `flutter pub get` and `pod install`
I tried adding cloudFirestore in Podfile as `pod 'FirebaseCore', :modular_headers => true` (trigggered a new error)
I was trying to build my application for release mode but the build failed and I got module "cloud_firestore" not found |
you can use PrimeVue config options
import PrimeVue, { PrimeVueConfiguration } from 'primevue/config';
app.use(PrimeVue, {
locale:{ matchAll: "Ψ¬Ω
ΩΨΉ Ψ§ΩΨ΄Ψ±ΩΨ·", .... }
} as PrimeVueConfiguration);
alternatively you may modify only a portion of it like this:
import PrimeVue, { PrimeVueConfiguration, defaultOptions } from 'primevue/config';
app.use(PrimeVue, {
locale: {
...defaultOptions.locale,
matchAll: "Ψ¬Ω
ΩΨΉ Ψ§ΩΨ΄Ψ±ΩΨ·" }
} as PrimeVueConfiguration);
I hope this helps! Let me know if you have any other questions.
|
I am having issues with my SQL statement below. The NOT LIKE and OR statement is producing FALSE statements, in that it is producing the output when it should not, since I am using a NOT LIKE. When I put the statement as a stand-alone (without the OR condition), it works as intended.
For example, I am still seeing 'automation' in my ld.lead_name column.
Any help would be greatly appreciated! I can't figure out why this is not working...
SQL Statement
SELECT
ld.status,
ld.lead_name
FROM
DATAWAREHOUSE.SFDC_STAGING.SFDC_LEAD AS ld
WHERE
ld.status <> 'Open'
AND (
ld.lead_name NOT LIKE '%test%'
OR ld.lead_name NOT LIKE '%t3st%'
OR ld.lead_name NOT LIKE '%auto%'
OR ld.lead_name NOT LIKE '%autoXmation%'
OR ld.lead_name NOT LIKE 'automation%'
)
;
|
I created `person` table, then inserted 2 rows into it as shown below:
```sql
CREATE TABLE person (
id INTEGER,
name VARCHAR(20)
);
INSERT INTO person (id, name)
VALUES (1, 'John'), (2, 'David');
```
Then, I created `my_func()` which returns `person` table as shown below:
```sql
CREATE FUNCTION my_func() RETURNS TABLE(id INTEGER, name VARCHAR)
AS $$
BEGIN
RETURN QUERY SELECT id, name FROM person;
END; -- β β β β β β β β β β β β β β
$$ LANGUAGE plpgsql;
```
Finally, calling `my_func()` got the same error as shown below:
```sql
postgres=# SELECT my_func();
ERROR: column reference "id" is ambiguous
LINE 1: SELECT id, name FROM person
^
DETAIL: It could refer to either a PL/pgSQL variable or a table column.
QUERY: SELECT id, name FROM person
CONTEXT: PL/pgSQL function my_func() line 3 at RETURN QUERY
```
So, I set the table name `person` with `.` just before `id` and `name` as shown below:
```sql
CREATE FUNCTION my_func() RETURNS TABLE(id INTEGER, name VARCHAR(20))
AS $$
BEGIN
RETURN QUERY SELECT person.id, person.name FROM person;
END; -- β β β β β β β β
$$ LANGUAGE plpgsql;
```
Finally, I could call `my_func()` without error as shown below. *Omitting `FROM` clause from [SELECT statement][1] gets [the error][2]:
```sql
postgres=# SELECT my_func();
my_func
-----------
(1,John)
(2,David)
(2 rows)
```
[1]: https://www.postgresql.org/docs/current/sql-select.html
[2]: https://stackoverflow.com/questions/19975755/missing-from-clause-entry-for-a-table/77810473#77810473 |
**Monster script** (it is connected to 2DSprite in Monster.tscn)
```
extends Sprite2D
var monster = preload("res://Monster.tscn")
func _process(delta):
if Input.is_action_just_pressed("ui_accept"):
var random_right = randi() % 10 + 1
var random_left = randi() % 10 + 1
var random_up = randi() % 10 + 1
var random_down = randi() % 10 + 1
if random_right <= 5:
var instance = monster.instantiate()
get_parent().add_child(instance)
instance.position.x = position.x + 80
instance.position.y = position.y
if random_left <= 5:
var instance = monster.instantiate()
get_parent().add_child(instance)
instance.position.x = position.x - 80
instance.position.y = position.y
if random_up <= 5:
var instance = monster.instantiate()
get_parent().add_child(instance)
instance.position.x = position.x
instance.position.y = position.y + 80
if random_down <= 5:
var instance = monster.instantiate()
add_child(instance)
instance.position.x = position.x
instance.position.y = position.y - 80
```
**How it works**
(https://i.stack.imgur.com/upgc3.png)
I checked if everything is connected how it should be. My only guess to why it is not working properly is because it takes position.x and position.y from the starting monster and I don't know how to change that so please help. |
I have a query that I was asked to modify.
The original field is: Startdate with a MM/DD/YYYY format.
1/1/2020
In the source table, startdate is a string field.
I was asked to change the format to YYYY-MM-DD (mod_startdate1), but will need a 00:00:00 timestamp included.
2020-01-01
The best i can get to is mod_startdate2, but note that the minute field is replicating the month field
2020-01-01 00:01:00
query used for the screenshot.
select startdate, from_unixtime(unix_timestamp(startdate,'mm/dd/yyyy'), 'yyyy-mm-dd') as mod_startdate1, from_unixtime(unix_timestamp(startdate,'mm/dd/yyyy'), 'yyyy-mm-dd HH:mm:ss') as mod_startdate2
from datamart_core.fbp_baseline limit 50
--returns date format, returns timestamp, but the month is also populating as the minutes
HIVE SQL format[enter image description here](https://i.stack.imgur.com/6Qb3J.jpg)
I'd appreciate any ideas to help resolve my query. Thank you.
I've found multiple repositories, but have not found 'the' solution yet |
> Will this impact the integrity of data?
**No**. This exception was triggered in a `ReadStage` thread - This type of thread is responsible for local reads, which don't modify the dataset in any way.
> And is there something I can do to avoid the error e.g. resize some param on Cassandra.yaml?
**Yes**. I would start by finding the root cause and addressing it, rather than changing configuration. I can think of likely 2 scenarios where this exception would be triggered:
1. The client scanned through a **large partition** in a single query (exceeding ~128 MiB). To validate this you can verify what's the max partition uncompressed size by running the following:
1. Cassandra 4.1.X and above:
```nodetool tablestats -s compacted_partition_maximum_bytes -t 1```
2. Previous versions:
`nodetool tablestats | grep "Compacted partition maximum bytes" | awk '{print $5}' | sort -n | tail -1`
If you see a partition over 128MiB, then it may be necessary to check if there is a query reading whole partitions in the correspondent table. And if there is one, rethink the data model in order to control partition size. One common solution to this problem is to bucket partitions by time or other arbitrary fields that can split the partitions in a balanced way.
2. A client is issuing a **range scan**. This includes read queries that read multiple partitions, such as queries that need `ALLOW FILTERING` and don't filter by partition key, and it's usually very expensive in Cassandra. Generally you'll be able to catch those in ***debug.log*** through **slow query logs**. If this is the case, I strongly recommend to consider [modeling a table for each of those queries][1] so that all reads are single-partition reads and the database performance scales well with the workload.
------------------
Finally, the quick configuration fix (in Cassandra 4.X) is to edit the following parameters in ***cassandra.yaml*** and restart nodes to apply changes:
`internode_application_send_queue_reserve_endpoint_capacity_in_bytes` - defaults to 134217728
`internode_application_receive_queue_reserve_endpoint_capacity_in_bytes` - defaults to 134217728
Feel free to check the official documentation on internode messaging [here][2].
[1]: https://cassandra.apache.org/doc/stable/cassandra/data_modeling/data_modeling_rdbms.html#query-first-design
[2]: https://cassandra.apache.org/doc/4.0/cassandra/new/messaging.html#resource-limits-on-queued-messages |
can I override c++ operator[] with only one function? |
|c++|constants|overriding|operator-keyword| |
I have written a pipeline , which reads streaming data from PubSub, and aggregate the results and write the result to Bigquery.
The pipeline is working fine and writing agg result to Bigquery table.
But, I am not able to understand, how its calculating the result.
Below is my code of apache-beam:
```
import json
import os
import typing
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.io.gcp.internal.clients import bigquery
from apache_beam.transforms.sql import SqlTransform
table_spec1 = bigquery.TableReference(
projectId=<PROJECT_ID>,
datasetId='training',
tableId='dflow_agg')
SCHEMA = {
"fields": [
{
"name": 'Name',
"type": "STRING",
},
{
"name": 'avg_sal',
"type": "FLOAT64"
},
{
"name": 'window_start',
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": 'window_end',
"type": "STRING",
"mode": "NULLABLE"
}
]
}
pipeline_options = PipelineOptions( streaming=True)
class ProcessWords(beam.DoFn):
def process(self, ele):
yield eval(ele)
def run():
with beam.Pipeline(options=pipeline_options) as p:
out= (
p
| "Read from Pub/Sub subscription" >> beam.io.ReadFromPubSub(subscription="projects/<PROJECT_ID>/subscriptions/Test-sub")
| "Decode and parse Json" >> beam.Map(lambda element: element.decode("utf-8"))
|"Formatting " >> beam.ParDo(ProcessWords()) #.with_output_types(CommonLog)
| "Create beam Row" >> beam.Map(lambda x: beam.Row(Name=str(x[0]),Stream=str(x[1]),Salary=int(x[2])))
|"window" >> beam.WindowInto(beam.window.FixedWindows(30))
| SqlTransform(
"""
SELECT
Name,
AVG(Salary) AS avg_sal
FROM PCOLLECTION
GROUP BY Name
""")
| "Assemble Dictionary" >> beam.Map(
lambda row,
window=beam.DoFn.WindowParam: {
"Name": row.Name,
"avg_sal": row.avg_sal,
"window_start": window.start.to_rfc3339(),
"window_end": window.end.to_rfc3339()
})
| "Write to BigQuery" >> beam.io.WriteToBigQuery(
table=table_spec1,
dataset='training',
schema=SCHEMA,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND,
)
# |beam.MapTuple(lambda N,S,M : {"Name":N,"Stream":S,"Marks":M})
# | beam.Map(print)
)
# p.run()
if __name__ == '__main__':
run()
```
I have a python code, which publishes msg to a PubSub topic with a sleep time of 5 seconds.
Below is my publisher code.
```
import time
import random
from google.cloud import pubsub_v1
project_id = "<PROJECT_ID>"
topic_name = "Test"
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_name)
i=0
while i < 101:
name=['A','B','C']
sal=[10,12,11,23,77,54,23,21,4,9,5,22,19]
stream=["Stream1","Stream2","Stream3"]
data = '{}'.format((random.choice(name),random.choice(stream),random.choice(sal)))
# Data must be a bytestring
data = data.encode("utf-8")
future = publisher.publish(topic_path, data)
print(data)
future.result()
time.sleep(5)
i+=1
```
I have printed the msgs which are published to the topic and below are 20 msg from beginning.
```
b"('B', 'Stream2', 23)"
b"('A', 'Stream3', 77)"
b"('C', 'Stream2', 10)"
b"('B', 'Stream2', 10)"
b"('B', 'Stream3', 10)"
b"('B', 'Stream2', 19)"
b"('C', 'Stream1', 11)"
b"('C', 'Stream2', 22)"
b"('A', 'Stream2', 12)"
b"('B', 'Stream1', 11)"
b"('A', 'Stream2', 23)"
b"('C', 'Stream3', 23)"
b"('A', 'Stream2', 4)"
b"('C', 'Stream2', 22)"
b"('B', 'Stream2', 4)"
b"('C', 'Stream2', 10)"
b"('B', 'Stream3', 11)"
b"('C', 'Stream2', 10)"
b"('C', 'Stream2', 22)"
b"('A', 'Stream2', 19)"
```
Bigquery results when ordering by Window start time looks like this:
select * from training.dflow_agg
order by window_start
[Bigquery Table data](https://i.stack.imgur.com/jfoSo.png)
Now, I want to understand, how the avg is calculated.
Case1:
If window size is 60 sec, then the first window will contain element
```
b"('B', 'Stream2', 23)"
b"('A', 'Stream3', 77)"
b"('C', 'Stream2', 10)"
b"('B', 'Stream2', 10)"
b"('B', 'Stream3', 10)"
b"('B', 'Stream2', 19)"
```
**So, The Avg of A will be 77-> This is correct,as there is only one A and Avg of one element is same.**
**Avg of B should be 23+10+10+10+19= 72/4 =18 ( Divide by 4, because total 4 B's are there)**
**Why C is not in the Same window start and window end time,as of A and B, because C was also published within 30 sec of window time (See, pubsub output above)**
Can anyone explain me the output.
Thanks in advance.
Above all things I have tried |
Can anyone explain the output of apache-beam streaming pipeline with Fixed Window of 60 seconds? |
|python|google-cloud-platform|google-cloud-dataflow|apache-beam| |
null |
I am having this problem. Basically I want to parse an integer string to an int** where the sub int arrays are two long. The first index is the digit and the second is the power of ten associated to the digit.
For example 12345 would give [[1, 4], [2, 3], [3, 2], [4, 1], [5, 0]]
Here is the code:
```c
#include <stdlib.h>
int ft_strlen(char *str)
{
int count;
count = 0;
while (*str++)
count++;
return (count);
}
int **parray(char *str)
{
int **ret;
int len;
int i;
len = ft_strlen(str);
ret = (int **)malloc(sizeof(int) * len);
if (ret == NULL)
return (NULL);
i = 0;
while (i < len)
{
ret[i] = (int *)malloc(sizeof(int) * 2);
if (ret[i] == NULL)
return (NULL);
ret[i][0] = str[i] - '0';
ret[i][1] = len - 1 - i;
i++;
}
return (ret);
}
```
This then get tested in a main function as follows:
```c
#include <stdio.h>
int main()
{
char *str = "1255555555555555";
int**ret = parray(str);
int i = 0;
while (i < ft_strlen(str))
{
printf("%d ", ret[i][0]);
printf("%d ", ret[i][1]);
printf("\n");
i++;
}
}
```
This for some reason prints out:
```
29791280 0
29791408 0
5 13
5 12
5 11
5 10
5 9
5 8
5 7
5 6
5 5
5 4
5 3
5 2
5 1
5 0
```
Note that the program works fine with strings less or equal to 4 digits. The first two non working values also always change when u run the program again. Im assuimng there is some overflow somewhere but I don't know where, nor how. Im fairly new to c so please excuse me if this is a silly mistake. |
Mallocing int* inside of int** gives unexpected integer values in the first and sometimes second allocation |
|c++|c|memory-management|malloc| |
I don't really know how I would fix this problem, but my past experience with tkinter has told me that it might be a problem with the font. You can use the command "xfd -fa "[your font here]"" to see all possible emojis/symbols for your font. I believe you have to have the emojis pack for ubuntu installed,
[1]: https://simpleit.rocks/linux/showing-emojis-in-ubuntu-linux/ |
The modal is designed to appear when there is an empty filled that has no value except some input fields. It shows up, yes, but after clicking OK, It won't hide.
I did some troubleshoot but nothing really happened. Any advise or help would be appreciated.
<!--Required Fields modal -->
<div id="requiredFieldsModal" class="hidden fixed flex top-0 left-0 w-full h-full items-center justify-center bg-black bg-opacity-50">
<div class="bg-white p-5 rounded-lg text-center">
<h2 class="mb-4">Please fill in all required fields.</h2>
<button id="confirmFill" class="mr-2 px-4 py-2 bg-yellow-400 hover:bg-yellow-500 text-white rounded">OK</button>
</div>
</div>
<script>
document.addEventListener('DOMContentLoaded', () => {
const form = document.querySelector('form');
const inputs = form.querySelectorAll('input');
const requiredFieldsModal = document.querySelector('#requiredFieldsModal');
const confirmFill = document.querySelector('#confirmFill');
form.addEventListener('submit', (event) => {
console.log('Form submitted'); // Add this line to check if the code is being triggered
let hasEmptyField = false;
inputs.forEach((input) => {
if (input.value.trim() === '' && input.name !== 'email' && input.name !== 'contactNumber' && input.name !== 'endOfEmployment') {
hasEmptyField = true;
}
});
if (hasEmptyField) {
event.preventDefault();
requiredFieldsModal.classList.remove('hidden');
}
});
confirmFill.addEventListener('click', () => {
requiredFieldsModal.classList.add('hidden');
});
});
</script> |
{"Voters":[{"Id":1773237,"DisplayName":"Hhovhann"}]} |
I've got a solution for you which required 2 basic changes
1. Convert the SVG to a .svg image URL.
2. Add a `::before` to the anchor tag and add the SVG to its `content`
Example:
<!-- begin snippet: js hide: false console: true babel: false -->
<!-- language: lang-css -->
p {
max-width: 140px;
}
a {
position: relative;
display: flex;
align-items: center;
justify-content: center;
}
.a::before {
content: url('https://img.cdn4dd.com/s/media/photosV2/1954ba82-d91f-43a2-8838-767fd2ad386f-retina-large.svg');
display: block;
width: auto;
height: auto;
}
<!-- language: lang-html -->
<p>
This is some text that includes a link
<a class="a" href="https://example.com">
<span>
(example)
</span>
</a>
</p>
<!-- end snippet -->
|
I don't 100% understand what you are after.
``<nobr>`` can keep content together:
<!-- begin snippet: js hide: false console: true babel: false -->
<!-- language: lang-css -->
p {
width: 140px
}
<!-- language: lang-html -->
<p>This is some text that includes a link
<nobr>(
<a href="https://example.com">
<svg style="height: 1em; color: green;" viewbox="0 0 10 10" fill="currentColor"><circle cx="5" cy="5" r="5" /></svg>example
</nobr>
</a>)</p>
<!-- end snippet -->
|
I need to set up a remote clocking system for my employees as they have the flexibility to come in whenever they want. This system should be capable of remotely storing employees' arrival and departure times using iris and fingerprint recognition.
My questions are as follows:
1. What hardware do you recommend for this system?
2. How can I register users with their biometric information?
3. I want to have two or three remote devices acting as clients, along with an administrator interface to control all devices. How can I set up this architecture?
I would really appreciate any help you can provide. As a Laravel developer, I have skills in web development, but I am new to the field of biometrics and setting up remote clocking systems.
Thank you in advance for your assistance!
Best regards,
Ben Becker
In a high school of commerce, I found two electronic devices equipped with fingerprint readers and cameras for facial recognition. I believe that the cameras can be considered as users. And from these devices, authentication can be performed. |
Setting Up a Remote Employee Clocking System with Biometric Recognition |
|laravel|client-server|system|communication| |
null |
Doesn't accept Spring MVC ResponseEntity<Resource> an InputStreamResource? |
I have created an ASP.NET Core 6 Web API in C# with Docker support and it worked perfectly when I try to run from Visual Studio. When I deploy the docker build image to docker desktop app, I get an error:
> <sup>2024-03-29 17:50:53 info: Microsoft.Hosting.Lifetime[14]
> 2024-03-29 17:50:53 Now listening on: http://[::]:8080
> 2024-03-29 17:50:53 info: Microsoft.Hosting.Lifetime[0]
> 2024-03-29 17:50:53 Application started. Press Ctrl+C to shut down.
> 2024-03-29 17:50:53 info: Microsoft.Hosting.Lifetime[0]
> 2024-03-29 17:50:53 Hosting environment: Development
> 2024-03-29 17:50:53 info: Microsoft.Hosting.Lifetime[0]
> 2024-03-29 17:50:53 Content root path: /app/
> 2024-03-29 17:50:59 warn: Microsoft.AspNetCore.HttpsPolicy.HttpsRedirectionMiddleware[3]
> 2024-03-29 17:50:59 Failed to determine the https port for redirect.
> 2024-03-29 17:50:59 info: WebApplication Controllers.HealthController[0]
> 2024-03-29 17:50:59 Getting token Async
> 2024-03-29 17:50:59 info: WebApplication Controllers.HealthController[0]
> 2024-03-29 17:50:59 Use Key Vault True
> 2024-03-29 17:50:59 info: WebApplication Controllers.HealthController[0]
> 2024-03-29 17:50:59 Keyvault : : XXXXX
> 2024-03-29 17:50:59 info: WebApplication Controllers.HealthController[0]
> 2024-03-29 17:50:59 Keyvault URL : : https://XXXXX.vault.azure.net
> 2024-03-29 17:50:59 info: WebApplication Controllers.HealthController[0]
> 2024-03-29 17:50:59 Creating Secret client
> 2024-03-29 17:50:59 info: WebApplication Controllers.HealthController[0]
> 2024-03-29 17:50:59 Getting Secret
> 2024-03-29 17:51:02 fail: WebApplication Controllers.HealthController[0]
> 2024-03-29 17:51:02 Health Controller : InteractiveBrowserCredential authentication failed: Persistence check failed. Inspect inner exception for details
> 2024-03-29 17:51:02 Azure.Identity.AuthenticationFailedException: InteractiveBrowserCredential authentication failed: Persistence check failed. Inspect inner exception for details
> 2024-03-29 17:51:02 ---> Microsoft.Identity.Client.Extensions.Msal.MsalCachePersistenceException: Persistence check failed. Inspect inner exception for details
> 2024-03-29 17:51:02 ---> System.DllNotFoundException: Unable to load shared library 'libsecret-1.so.0' or one of its dependencies. In order to help diagnose loading problems, consider setting the LD_DEBUG environment variable: liblibsecret-1.so.0: cannot open shared object file: No such file or directory
> 2024-03-29 17:51:02 at Microsoft.Identity.Client.Extensions.Msal.Libsecret.secret_schema_new(String name, Int32 flags, String attribute1, Int32 attribute1Type, String attribute2, Int32 attribute2Type, IntPtr end)
> 2024-03-29 17:51:02 at Microsoft.Identity.Client.Extensions.Msal.LinuxKeyringAccessor.GetLibsecretSchema()
> 2024-03-29 17:51:02 at Microsoft.Identity.Client.Extensions.Msal.LinuxKeyringAccessor.Write(Byte[] data)
> 2024-03-29 17:51:02 at Microsoft.Identity.Client.Extensions.Msal.Storage.VerifyPersistence()
> 2024-03-29 17:51:02 --- End of inner exception stack trace ---
> 2024-03-29 17:51:02 at Microsoft.Identity.Client.Extensions.Msal.Storage.VerifyPersistence()
> 2024-03-29 17:51:02 at Microsoft.Identity.Client.Extensions.Msal.MsalCacheHelper.VerifyPersistence()
> 2024-03-29 17:51:02 at Azure.Identity.MsalCacheHelperWrapper.VerifyPersistence()
> 2024-03-29 17:51:02 at Azure.Identity.TokenCache.GetCacheHelperAsync(Boolean async, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 at Azure.Identity.TokenCache.GetCacheHelperAsync(Boolean async, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 at Azure.Identity.TokenCache.RegisterCache(Boolean async, ITokenCache tokenCache, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 at Azure.Identity.MsalClientBase GetClientAsync(Boolean enableCae, Boolean async, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 at Azure.Identity.MsalPublicClient.AcquireTokenInteractiveCoreAsync(String[] scopes, String claims, Prompt prompt, String loginHint, String tenantId, Boolean enableCae, BrowserCustomizationOptions browserOptions, Boolean async, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 at Azure.Identity.MsalPublicClient.AcquireTokenInteractiveAsync(String[] scopes, String claims, Prompt prompt, String loginHint, String tenantId, Boolean enableCae, BrowserCustomizationOptions browserOptions, Boolean async, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 at Azure.Identity.InteractiveBrowserCredential.GetTokenViaBrowserLoginAsync(TokenRequestContext context, Boolean async, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 at Azure.Identity.InteractiveBrowserCredential.GetTokenImplAsync(Boolean async, TokenRequestContext requestContext, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 --- End of inner exception stack trace ---
> 2024-03-29 17:51:02 at Azure.Identity.CredentialDiagnosticScope.FailWrapAndThrow(Exception ex, String additionalMessage, Boolean isCredentialUnavailable)
> 2024-03-29 17:51:02 at Azure.Identity.InteractiveBrowserCredential.GetTokenImplAsync(Boolean async, TokenRequestContext requestContext, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 at Azure.Identity.InteractiveBrowserCredential.GetTokenAsync(TokenRequestContext requestContext, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 at Azure.Identity.DefaultAzureCredential.GetTokenFromSourcesAsync(TokenCredential[] sources, TokenRequestContext requestContext, Boolean async, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 at Azure.Identity.DefaultAzureCredential.GetTokenImplAsync(Boolean async, TokenRequestContext requestContext, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 at Azure.Identity.CredentialDiagnosticScope.FailWrapAndThrow(Exception ex, String additionalMessage, Boolean isCredentialUnavailable)
> 2024-03-29 17:51:02 at Azure.Identity.DefaultAzureCredential.GetTokenImplAsync(Boolean async, TokenRequestContext requestContext, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 at Azure.Identity.DefaultAzureCredential.GetTokenAsync(TokenRequestContext requestContext, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 at Azure.Core.Pipeline.BearerTokenAuthenticationPolicy.AccessTokenCache.GetHeaderValueFromCredentialAsync(TokenRequestContext context, Boolean async, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 at Azure.Core.Pipeline.BearerTokenAuthenticationPolicy.AccessTokenCache.GetHeaderValueAsync(HttpMessage message, TokenRequestContext context, Boolean async)
> 2024-03-29 17:51:02 at Azure.Core.Pipeline.BearerTokenAuthenticationPolicy.AccessTokenCache.GetHeaderValueAsync(HttpMessage message, TokenRequestContext context, Boolean async)
> 2024-03-29 17:51:02 at Azure.Core.Pipeline.BearerTokenAuthenticationPolicy.AuthenticateAndAuthorizeRequestAsync(HttpMessage message, TokenRequestContext context)
> 2024-03-29 17:51:02 at Azure.Security.KeyVault.ChallengeBasedAuthenticationPolicy.AuthorizeRequestOnChallengeAsyncInternal(HttpMessage message, Boolean async)
> 2024-03-29 17:51:02 at Azure.Core.Pipeline.BearerTokenAuthenticationPolicy.ProcessAsync(HttpMessage message, ReadOnlyMemory 1 pipeline, Boolean async)
> 2024-03-29 17:51:02 at Azure.Core.Pipeline.RedirectPolicy.ProcessAsync(HttpMessage message, ReadOnlyMemory 1 pipeline, Boolean async)
> 2024-03-29 17:51:02 at Azure.Core.Pipeline.RetryPolicy.ProcessAsync(HttpMessage message, ReadOnlyMemory 1 pipeline, Boolean async)
> 2024-03-29 17:51:02 at Azure.Core.Pipeline.RetryPolicy.ProcessAsync(HttpMessage message, ReadOnlyMemory 1 pipeline, Boolean async)
> 2024-03-29 17:51:02 at Azure.Core.Pipeline.HttpPipeline.SendRequestAsync(Request request, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 at Azure.Security.KeyVault.KeyVaultPipeline.SendRequestAsync(Request request, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 at Azure.Security.KeyVault.KeyVaultPipeline.SendRequestAsync[TResult](RequestMethod method, Func 1 resultFactory, CancellationToken cancellationToken, String[] path)
> 2024-03-29 17:51:02 at Azure.Security.KeyVault.Secrets.SecretClient.GetSecretAsync(String name, String version, CancellationToken cancellationToken)
> 2024-03-29 17:51:02 at WebApplication Controllers.AdministratorControllerBase.GetSpnSecretAsync(String secretKey) in /src/WebApplication1/Controllers/AdministratorControllerBase.cs:line 43
> 2024-03-29 17:51:02 at WebApplication Controllers.AdministratorControllerBase.GetTokenAsync() in /src/WebApplication1/Controllers/AdministratorControllerBase.cs:line 59
> 2024-03-29 17:51:02 at WebApplication Controllers.HealthController.Get() in /src/WebApplication1/Controllers/HealthController.cs:line 30
> 2024-03-29 17:51:10 info: WebApplication1 Controllers.WeatherForecastController[0]
> 2024-03-29 17:51:10 Use Key Vault True
> 2024-03-29 17:51:10 info: WebApplication Controllers.WeatherForecastController[0]
> 2024-03-29 17:51:10 Keyvault : : XXXXX
> 2024-03-29 17:51:10 info: WebApplication Controllers.WeatherForecastController[0]
> 2024-03-29 17:51:10 Keyvault URL : : https://XXXX.vault.azure.net
> 2024-03-29 17:51:10 info: WebApplication Controllers.WeatherForecastController[0]
> 2024-03-29 17:51:10 Creating Secret client
> 2024-03-29 17:51:10 info: WebApplication Controllers.WeatherForecastController[0]
> 2024-03-29 17:51:10 Getting Secret
> 2024-03-29 19:13:04 info: Microsoft.Hosting.Lifetime[0]
> 2024-03-29 19:13:04 Application is shutting down...
> </sup>
I also tried to run the project with .NET 7 and used the latest packages and followed the checked on the
https://github.com/Azure/azure-sdk-for-net/issues/28120
This is my code:
protected static async Task<string> GetSpnSecretAsync(string secretKey)
{
var keyVaultName = Environment.GetEnvironmentVariable("KEYVAULT");
var keyVaultUrl = $"https://{keyVaultName}.vault.azure.net";
var credential = new DefaultAzureCredential(includeInteractiveCredentials: true);
var client = new SecretClient(vaultUri: new Uri(keyVaultUrl), credential: credential);
var secret = await client.GetSecretAsync(secretKey);
var secretValue = secret.Value.Value;
return secretValue;
}
|
Unable to connect to Azure Keyvault when I deploy ASP.NET Core 6 Web API (C#) , the docker image to docker desktop app |
I no longer see this issue you mention on the emulator or preview. The `.padding(start=` should add padding to the start. The `paddingFromBaseline(top=` should add padding from Baseline (ie) you should see top padding getting added. This is the output of your code that I see
[![enter image description here][1]][1]
> I'm under the impression that the order in which Modifier methods are called doesn't matter so the extra padding shouldn't be there.
No. The order of the Modifier methods MATTER and you should be concise when you chain your Modifier methods. [Refer the official doc](https://developer.android.com/develop/ui/compose/modifiers#order-modifier-matters)
[1]: https://i.stack.imgur.com/NNSYl.png |
when I clicked on publish in vs 2022, it gives a dialog to choose specifications of my release when i click on show all settings the ef migrations gives this error.
> Entity Framework Migrations
> X dotnet tool install dotnet-ef
> The tool package could not be restored. Tool 'dotnet-ef' failed to install. This failure may have been caused by:
> * You are attempting to install a preview release and did not use the --version option to specify the versi
> * A package by this name was found, but it was not a .NET tool.
> * The required NuGet feed cannot be accessed, perhaps because of an Internet connection problem.
[dotnet ef issue image](https://github.com/dotnet/EntityFramework.Docs/assets/20419743/9ec12b37-fbde-4dc9-8f79-ae2b30552457)
I am trying to publish an asp core mvc `(target dotnet version is 6)`.
I tried the cli to publish `dotnet publish -p:PublishProfile=FolderProfile` give me this error code `MSB4236`, I found the file that is not found located in `C:\Program Files\dotnet\sdk\6.0.419\Sdks` however when I execute that command seems it can't find that location and it says that it couldn't find the file in `C:\ProgramFiles\dotnet\sdk\6.0.419`, so i manually copied the content of `Skds` to its parent directory `..\sdk\6.0.419` then I rerun the command again as result it throws this error `MSB4044 The "ProcessFrameworkReferences" task was not given a value for the required parameter "RuntimeGraphPath".`
then I tried the `vs 2022` wizard to publish it gives the error:
> Entity Framework Migrations
> dotnet tool restore
> Package "dotnet-ef" failed to restore, due to Microsoft. DotNet. ToolPackage. ToolPackageException: The tool package could not be restored. at Microsoft. DotNet. Tools. Tool.Install. ProjectRestorer. Restore (FilePath project, PackageLocation packageLocation, String verbosity) at Microsoft.DotNet. Tool Package. Tool Packageinstaller.Install Package To External Managed Location (PackageLocation packageLocation, Packageld packageld, VersionRar at Microsoft.DotNet. Tools. Tool. Restore. Tool RestoreCommand.Install Packages (ToolManifest Package package, Nullable`1 configFile) Restore failed
[dotnet ef issue 2 image](https://github.com/dotnet/EntityFramework.Docs/assets/20419743/94cfee6a-b7ad-41db-9440-c69531310372)
I tried to install it manually using cli:
`dotnet tool install --global dotnet-ef`
`dotnet tool install --local dotnet-ef`
It says `dotnet-ef is already installed` but when I tried the same command in other pc it says `dotnet-ef is installed or updated` no matter how many times I ran the command.
I forget to mention that I did not find the `.Net Sdk` in `visual studio installer individual components`
this is an image from an other pc that has the `.net sdk`:
[dotnet ef issue 3 image](https://github.com/dotnet/EntityFramework.Docs/assets/20419743/92d04ec4-04fa-4abb-ae45-4cf285a36895)
and I tried these answers:
[cannot install dotnet-ef tool on windows 10](https://stackoverflow.com/questions/58764322/cannot-install-dotnet-ef-tool-on-windows-10), also this answer [install dotnet-ef tool](https://stackoverflow.com/questions/71583816/install-dotnet-ef-tool) I did not find `.Net Sdk` as mentioned above.
I did not tried this answer yet [67272316](https://stackoverflow.com/questions/67272316/dotnet-tool-install-global-dotnet-ef) because I am away from the pc that has the issue (Monday I'll try it).
> [my gh issue](https://github.com/dotnet/EntityFramework.Docs/issues/4698). |
VS Community 2022 cannot install dotnet-ef when i try to publish |
|c#|.net|visual-studio|asp.net-core| |
null |
I have problem in this sample code.
```
#include <stdio.h>
#include <stdlib.h>
int main()
{
int i = 1, j = 2 ;
int **ipp = NULL;
int *ip1 = &i, *ip2= &j ;
ipp = &ip1;
// both below lines cause segmentation fault
// *ipp = ip1 ;
// **ipp = 3;
printf("ipp: %p %p %d \n",ipp,*ipp , **ipp);
printf("ip1: %p %d \n",ip1,*ip1 );
printf("ip2: %p %d \n",ip2,*ip2);
printf("i: %d \n",i);
printf("j: %d \n",j);
return 0 ;
}
```
"I would really appreciate it if someone could guide me."
|
change the value of double pointer with indirection operator cause segmentation fault |
|c|pointers|segmentation-fault|double-pointer|multiple-indirection| |
null |
If you are using VS Code, you can put a breakpoint after `rdpcap()`, then run the script via Python debugger. In the VS Code debug panel you should be able to expand the object and find readable information. |
I'm trying to load offline a VGG19 checkpoint from a local file instead of the regular pytorch method (which download it online) and got problems.
so basicly i'm doing this :
https://pytorch.org/tutorials/advanced/neural_style_tutorial.html
and instead of
cnn = models.vgg19(pretrained=True).features.to(device).eval()
Which works well with the rest, I want to work from a local *.pth file (the same, 'vgg19-dcbb9e9d.pth', put in specific folder) then I tried using this method :
checkpoint = torch.load('models/vgg19-dcbb9e9d.pth')
cnn = models.vgg19()
cnn.load_state_dict(checkpoint)
cnn.eval()
but then got an error
---> 32 raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))
33
34 model.add_module(name, layer)
RuntimeError: Unrecognized layer: Sequential
basicly the model wasn't loaded or read correctly as it seems it didn't find the layers the code is looking for.
Is there something I'm missing ? |
Using your code I was able to reproduce the problem.
You are running into a well-known, decades old, "_Won't Fix_" bug in Delphi. The workaround is to set the `Form.AutoSize` property to `False` at design time:

This then lets the form be maximized correctly both at runtime and design time:

I'm guessing the arm-chair commenters are running at standard DPI. |
# Option #1
We can do that with a [lifecycle precondition][1] in a null_resource
A couple of things before we dive into the code:
- Your code has two `sc1_default` variables I'm assuming that the second one was a typo and it what we need there is `sc1_default`
- For additional validation use `type` in the variables, it's a good practice makes the code more readable and if someone accidentally passes the wrong type the code fails gracefully.
see sample code below
``` lang-hcl
variable "sc1_default" {
type = bool
default = "false"
}
variable "sc2_default" {
type = bool
default = "false"
}
variable "sc3_default" {
type = bool
default = "true"
}
variable "sc4_default" {
type = bool
default = "true"
}
resource "null_resource" "validation" {
lifecycle {
precondition {
condition = (
(var.sc1_default ? 1 : 0) +
(var.sc2_default ? 1 : 0) +
(var.sc3_default ? 1 : 0) +
(var.sc4_default ? 1 : 0)
) < 2
error_message = "Only one sc can be true"
}
}
}
```
You can see I set the `sc3_default` and `sc4_default` both to true just to trigger the error ...
The condition is the core of this validation we are just adding all the true with the help of shorthand if syntax `(var.sc_default ? 1 : 0)` and the total should be less than two, I'm assuming that all false is OK, but if not you can change that logic to check that is precisely one.
A terraform plan on that code will error out with the following message:
``` lang-txt
Planning failed. Terraform encountered an error while generating this plan.
β·
β Error: Resource precondition failed
β
β on main.tf line 22, in resource "null_resource" "validation":
β 22: condition = (
β 23: (var.sc1_default ? 1 : 0) +
β 24: (var.sc2_default ? 1 : 0) +
β 25: (var.sc3_default ? 1 : 0) +
β 26: (var.sc4_default ? 1 : 0)
β 27: ) < 2
β βββββββββββββββββ
β β var.sc1_default is "false"
β β var.sc2_default is "false"
β β var.sc3_default is "true"
β β var.sc4_default is "true"
β
β Only one sc can be true
```
____
# Option #2
If you can change the input variables we could reduce it to just one with a `list(bool)` the code will be smaller and the validation would be right on the variable
``` lang-hcl
variable "sc_default" {
type = list(bool)
description = "list of sc default values"
default = ["false", "false", "false", "false"]
validation {
condition = length(var.sc_default) == 4
error_message = "Four defaults expected"
}
validation {
condition = sum([for x in var.sc_default : x ? 1 : 0]) < 2
error_message = "Only one sc can be true"
}
}
```
[1]: https://developer.hashicorp.com/terraform/language/meta-arguments/lifecycle#custom-condition-checks |
I'm trying to create a monster that spreads in every direction infinitely but for some reason it's just making a "+" symbol |
|2d|godot|gdscript| |
null |
This will correctly infer keys in the return value, you have to add params etc, but this should be a good start
```typescript
type StringSetting = 'outputPath' | 'fileName'
type StringSettingWithParam = 'emailAddress' | 'homeDirectory'
type ArrayOfStringsSetting = 'inputPaths'
type Settings = StringSetting | StringSettingWithParam | ArrayOfStringsSetting
type GetSettingsArgs<T> = {
[key in keyof T]: { setting: Settings }
}
type GetSettingsReturnValue<T> = {
[key in keyof T]: string
}
function getSettings<T>(args: GetSettingsArgs<T>): GetSettingsReturnValue<T> {
return Object.fromEntries(Object.keys(args).map((key) => [key, "somesettings"])) as GetSettingsReturnValue<T>
}
const settings = getSettings({
a: { setting: "outputPath" }
})
const aSettings = settings.a;
``` |
The problem I am trying to solve is that the loop I wrote that to read All `NetCDF` files nested in multiple folders is not able to process `CDO` operation as I expect doing for individual files. Below is a brief description of what I am looking for;
I have a directory on my Linux machine, let's name it MYDIR_SST, with a number of folders labeled based on the years (1982, 1983, 1984, ..., 2022). Whithin each sub-folder, there are other folders named according to the months (01, 02, 03, ..., 12) and several NetCDF daily data inside each of these folders as follow:
MYDIR_SST[directory]
--1982[first nested folder]
------01[second nested folder]
------02[second nested folder]
------03[second nested folder]
------12[second nested folder]
--1982[first nested folder]
------01[second nested folder]
------02[second nested folder]
------12[second nested folder]
~/MYDIR_SST/1982/01 19820101120000-UKMO-L4_GHRSST-SSTfnd-OSTIA-GLOB_REP-v02.0-fv02.0.nc 19820102120000-UKMO-L4_GHRSST-SSTfnd-OSTIA-GLOB_REP-v02.0-fv02.0.nc 19820103120000-UKMO-L4_GHRSST-SSTfnd-OSTIA-GLOB_REP-v02.0-fv02.0.nc 19820104120000-UKMO-L4_GHRSST-SSTfnd-OSTIA-GLOB_REP-v02.0-fv02.0.nc
~/MYDIR_SST/1983/01 19830101120000-UKMO-L4_GHRSST-SSTfnd-OSTIA-GLOB_REP-v02.0-fv02.0.nc 19830102120000-UKMO-L4_GHRSST-SSTfnd-OSTIA-GLOB_REP-v02.0-fv02.0.nc 19830103120000-UKMO-L4_GHRSST-SSTfnd-OSTIA-GLOB_REP-v02.0-fv02.0.nc 19830104120000-UKMO-L4_GHRSST-SSTfnd-OSTIA-GLOB_REP-v02.0-fv02.0.nc
I want to perform 2 operations on each file in all the folders. The operations are defined below as I am doing for Netcdf files in folder ~/MYDIR_SST/1982/01.
```
cdo mergetime *.nc outfile.nc;
cdo ymonmean outfile.nc L4_GHRSST-SSTfnd_1982_01.nc
```
I want to save FINAL OUTPUTS AS BELOW FORMAT;
**L4_GHRSST-SSTfnd_YYYY_MM.nc**
And save all L4_GHRSST-SSTfnd_1982_01.nc, L4_GHRSST-SSTfnd_1982_02.nc,...,L4_GHRSST-SSTfnd_2022_01.nc,..., L4_GHRSST-SSTfnd_2022_12.nc in a different folder named monmean in MYDIR_SST (~/MYDIR_SST/monmean). Can someone help me create a shell script which can perform the mentioned process?
pattern should be, L4_GHRSST-SSTfnd_1982_01.nc, L4_GHRSST-SSTfnd_1982_02.nc, etc.
```
#!/bin/sh
set -xv #debugging
for file in GLO_SST_L4_OBS_CP/*/*/
do
echo "$file"
done | sed 's/-L4_.*//' | sort -u | while read -r pattern
do
cdo mergetime "${pattern}"* "${pattern}_mergetime.nc"
done
echo 'JOB DONE'
``` |
Best list implementation for ordering objects by field in Java? |
Assuming your data frame is called `bird_df` after you've read it in with `read.csv()`, you can subset the data to only contain rows where `Order1` is equal to Passeriformes, Charadriiformes, Psittaciformes, or Struthioniformes using:
```r
subset_bird_df <- bird_df[bird_df$Order1 %in% c("Passeriformes", "Charadriiformes", "Psittaciformes", "Struthioniformes"),]
``` |
Extensions on tuples must conform tuples of any number of elements to a protocol. Your use case only makes sense to use 3-tuples, so a tuple extension isn't very suitable.
You should keep your `Brio` struct, and create a result builder for `Brio`s,
```
@resultBuilder
enum BrioBuilder {
static func buildExpression(_ expression: (String, String, String)) -> Brio {
let (c, t, i) = expression
return Brio(c, t, i)
}
static func buildExpression(_ expression: Divider) -> Brio {
Brio.divider
}
static func buildBlock(_ components: Brio...) -> [Brio] {
components
}
}
func buildBrios(@BrioBuilder block: () -> [Brio]) -> [Brio] {
block()
}
```
Then you can achieve a usage like this:
```
// noms will be of type [Brio]
let noms = buildBrios {
("CA", "Cats", "cat.fill")
("DO", "Dogs", "fido.circle")
("DO", "Horsies", "equine.circle")
Divider()
("DEV", "Dev reload", "link")
}
```
You cannot use the `.divider` syntax in a result builder (See [here][1]). You can write `Brio.divider`, or use a separate struct (I have borrowed the `Divider` from SwiftUI here).
You can extend this further by implementing `buildArray`, `buildIf`, etc in the result builder. This would allow you to use loops and if statements in the result builder closure. For more information, see the [Swift Evolution proposal][2].
[1]: https://stackoverflow.com/q/70224957/5133585
[2]: https://github.com/apple/swift-evolution/blob/main/proposals/0289-result-builders.md |