instruction stringlengths 0 30k ⌀ |
|---|
|swiper.js|gsap|react-swiper| |
null |
I am using cytoscape.js to build my nice networks. I would like to know if there is any way to export the styles and the elements of the network into one file so that users of my web-app to be able to load it into their own Cytoscape desktop for further exploration and modification.
I know I can get the elements with: `cy.json().elements` and styles with: `cy.style().json()`.
But I can't figure out how these can be merged into a file that is recognisable from Cytoscape Desktop.
|
I am trying to help my niece with her Mercedes Sprinter W903 2000 model.
The car is facing a well known "Start Error" fault, which is caused by a fault in the immobilizer unit.
I have found a immobilizer emulator which works with the CR2 ECU in the car, but in order to get this to work, i need to overwrite some data in the ST95P08 EEPROM in the ECU.
ST95P08 datasheet link: ST95P08 Datasheet(PDF) - STMicroelectronics
The EEPROM has been desoldered and wired up to my Arduino Uno.
Using the following code, i am able to read the content of all the 1000 addresses in the EEPROM:
` #include <SPI.h>
#define CS_PIN 10 // Chip select pin (S)
#define MOSI_PIN 11 // Serial Data input (D)
#define MISO_PIN 12 // Serial data output (Q)
#define SCK_PIN 13 // Serial clock
#define W_PIN 7 // Write Protect (W)
#define HOLD_PIN 6 // Hold
void setup() {
// Set the pin modes
pinMode(CS_PIN, OUTPUT);
pinMode(W_PIN, OUTPUT);
pinMode(HOLD_PIN, OUTPUT);
// pinMode(MOSI_PIN, OUTPUT);
// pinMode(MISO_PIN, INPUT);
// pinMode(SCK_PIN, OUTPUT);
// Start the SPI library
SPI.begin();
// Deselect the EEPROM
digitalWrite(CS_PIN, HIGH);
// Only readmode
digitalWrite(W_PIN, LOW); // Enable write protect (FOR READING)
digitalWrite(HOLD_PIN, HIGH); // Disable HOLD
Serial.begin(9600); // Init serial comm
// Adjust SPI settings for EEPROM
SPI.beginTransaction(SPISettings(2000000, MSBFIRST, SPI_MODE0));
}
void loop() {
// Read from address 0x00 to 0x3FF (for a 1 KByte EEPROM)
for (unsigned int address = 0; address < 1024; address++) {
byte data = readEEPROM(address);
Serial.print("Address 0x");
if (address < 0x10) Serial.print("0"); // Print leading zero for addresses less than 0x10
Serial.print(address, HEX); // Print the address in HEX
Serial.print(": 0x");
Serial.println(data, HEX); // Print the data in HEX
delay(50); // Short delay to not overwhelm the serial monitor
}
while(1); // Stop after reading the entire EEPROM
}
byte readEEPROM(byte address) {
digitalWrite(CS_PIN, LOW); // Select the EEPROM
SPI.transfer(0x03); // READ instruction code
// For the ST95P08, which has 1KByte, only one address byte is needed.
SPI.transfer(address); // Send the LSB of address to read from
byte result = SPI.transfer(0x00); // Read the byte
digitalWrite(CS_PIN, HIGH); // Deselect the EEPROM
return result;
}
void writeEEPROM(unsigned int address, byte data) {
digitalWrite(CS_PIN, LOW);
SPI.transfer(0x02); // WRITE instruction code
SPI.transfer((address >> 8) & 0xFF); // MSB of the address
SPI.transfer(address & 0xFF); // LSB of the address
SPI.transfer(data);
digitalWrite(CS_PIN, HIGH);
// Wait for the write to complete
delay(10); // This delay depends on the EEPROM's write cycle time; adjust as necessary
}`
This works fine, and the output (shortened) looks like this:
`
Address 0x00: 0x61
Address 0x01: 0xFF
Address 0x02: 0xFF
Address 0x03: 0x71
Address 0x04: 0xFF
Address 0x05: 0x16
Address 0x06: 0x16
Address 0x07: 0x30
Address 0x08: 0x39
`
The four addresses wich needs new data is: 0x1D0, 0x1D1, 0x1D2, 0x1D3, 0x1D4
When the data of these addresses are read, i get:
`
Address 0x1D0: 0xFF
Address 0x1D1: 0xFF
Address 0x1D2: 0xFF
Address 0x1D3: 0xFF
Address 0x1D4: 0xFF
`
I try to write to these addresses using the following code:
`
#include <SPI.h>
#define CS_PIN 10 // Chip select pin (S)
#define W_PIN 7 // Write Protect (W)
#define HOLD_PIN 6 // Hold
bool isWritingMode = false; // Set to 'true' for writing, 'false' for reading
// Function Prototypes
void writeEnable();
void writeEEPROM(unsigned int address, byte data);
byte readEEPROM(unsigned int address);
byte readStatusRegister();
void waitForWriteCompletion();
void setup() {
pinMode(CS_PIN, OUTPUT);
digitalWrite(W_PIN, HIGH);
pinMode(W_PIN, OUTPUT);
pinMode(HOLD_PIN, OUTPUT);
digitalWrite(HOLD_PIN, HIGH); // Disable HOLD
digitalWrite(CS_PIN, HIGH); // Ensure EEPROM is not selected
SPI.begin();
Serial.begin(115200);
SPI.beginTransaction(SPISettings(2000000, MSBFIRST, SPI_MODE0));
// Test WREN-able, write "0x01" to addr 0x01 (current value: 0xFF)
testWriteEnableLatchAndWrite();
// checkBlockProtection();
// if (isWritingMode) {
// // Disable write protection for writing
// digitalWrite(W_PIN, HIGH);
// // digitalWrite(W_PIN, LOW);
// } else {
// // Enable write protection for reading
// // digitalWrite(W_PIN, HIGH);
// digitalWrite(W_PIN, LOW);
// }
}
void loop() {
}
void testWriteEnableLatchAndWrite() {
writeEEPROM(0x1D0, 0x02);
waitForWriteCompletion();
delay(100);
byte readValue1 = readEEPROM(0x1D0);
Serial.print("Read back value from address 0x1D0: 0x");
Serial.println(readValue1, HEX);
writeEEPROM(0x1D1, 0x02);
waitForWriteCompletion();
delay(100);
byte readValue2 = readEEPROM(0x1D1);
Serial.print("Read back value from address 0x1D1: 0x");
Serial.println(readValue2, HEX);
writeEEPROM(0x1D2, 0x00);
waitForWriteCompletion();
delay(100);
byte readValue3 = readEEPROM(0x1D2);
Serial.print("Read back value from address 0x1D2: 0x");
Serial.println(readValue3, HEX);
writeEEPROM(0x1D4, 0xFF);
waitForWriteCompletion();
delay(100);
byte readValue4 = readEEPROM(0x1D4);
Serial.print("Read back value from address 0x1D4: 0x");
Serial.println(readValue4, HEX);
}
byte readStatusRegister() {
digitalWrite(CS_PIN, LOW);
SPI.transfer(0x05); // RDSR command
byte status = SPI.transfer(0x00); // Dummy transfer to read the status
digitalWrite(CS_PIN, HIGH);
return status;
}
void checkBlockProtection() {
byte status = readStatusRegister();
Serial.print("Status Register: 0x");
Serial.println(status, HEX);
// Extracting BP bits
byte bpBits = (status & 0x0C) >> 2; // BP bits are b3 and b4, shifting right by 2 to get the value
Serial.print("Block Protection Bits (BP1 BP0): ");
Serial.println(bpBits, BIN);
switch(bpBits) {
case 0b00:
Serial.println("No block protection.");
break;
case 0b01:
Serial.println("Upper quarter write-protected.");
break;
case 0b10:
Serial.println("Upper half write-protected.");
break;
case 0b11:
Serial.println("All memory write-protected.");
break;
default:
This case should not happen as BP bits are only two bits
Serial.println("Invalid Block Protection Bits.");
break;
}
}
byte readEEPROM(unsigned int address) {
digitalWrite(CS_PIN, LOW);
SPI.transfer(0x03);
SPI.transfer((address >> 8) & 0xFF); // MSB of the address
SPI.transfer(address & 0xFF); // LSB of the address
byte result = SPI.transfer(0x00);
digitalWrite(CS_PIN, HIGH);
return result;
}
void writeEnable() {
digitalWrite(CS_PIN, LOW);
SPI.transfer(0x06); // WREN command
delay(5);
digitalWrite(CS_PIN, HIGH);
}
void waitForWriteCompletion() {
Serial.println("Waiting for write to complete...");
byte status;
do {
status = readStatusRegister();
if (status & 0x01) { // If WIP bit is set, write is still in progress
Serial.println("Write in progress...");
}
delay(10); // Short delay before checking again
} while (status & 0x01); // Loop until WIP bit clears
Serial.println("Write operation completed.");
}
bool checkWELCleared() {
byte status = readStatusRegister();
if (status & 0x02) { // If WEL bit is set, then it's not cleared yet
Serial.println("WEL bit is not cleared yet.");
return false;
} else {
Serial.println("WEL bit cleared.");
return true;
}
}
bool checkWriteEnable() {
digitalWrite(CS_PIN, LOW);
SPI.transfer(0x05); // RDSR command to read the status register
byte status = SPI.transfer(0x00); // Dummy transfer to read the response
digitalWrite(CS_PIN, HIGH);
// Print the status register value for debugging
Serial.print("Status register: 0x");
Serial.println(status, HEX);
// Check if the WEL bit is set
bool isWELSet = status & 0x02; // 0x02 corresponds to the WEL bit
if (isWELSet) {
Serial.println("WEL bit is set. EEPROM is ready for write operation.");
} else {
Serial.println("WEL bit is not set. EEPROM is not ready for write operation.");
}
return isWELSet;
}
void writeEEPROM(unsigned int address, byte data) {
// Serial.println("Enable EEPROM write...");
writeEnable(); // Ensure the EEPROM is write-enabled
delay(100); // Delay after sending WREN, ensure EEPROM has enough time to set WEL
// Serial.println("Checking if WEL bit is set...");
if (!checkWriteEnable()) {
Serial.println("Write Enable Latch not set! Exiting..");
return; // Exit if WEL not set
}
// Serial.println("Setting CS pin low...");
digitalWrite(CS_PIN, LOW);
// Serial.println("Sending WRITE instruction code...");
SPI.transfer(0x02); // WRITE instruction code
// Serial.println("Sending MSB of address...");
SPI.transfer((address >> 8) & 0xFF); // MSB of the address
// Serial.println("Sending LSB of address...");
SPI.transfer(address & 0xFF); // LSB of the address
// Serial.print("Sending data to address: 0x");
// Serial.print(address, HEX);
// Serial.print(" -> Data: 0x");
// Serial.println(data, HEX);
SPI.transfer(data);
// Serial.println("Setting CS pin HIGH...");
digitalWrite(CS_PIN, HIGH);
delay(100);
// delay(10); // Wait for the write to complete
// Serial.println("Waiting for write completion...");
// waitForWriteCompletion();
}
` |
SPI - R/W to ST95P08 EEPROM |
|arduino|spi|eeprom| |
null |
I don't think that's directly possible. You can certainly import the elements, but I don't know of a way to import the styles from cytoscape.js into Cytoscape desktop.
You can do the other way by exporting to CX from the Desktop and then reading that into cytoscape.js, though. This might be worth posting a feature request, though. We're just going through the Cytoscape 3.10 planning process right now...
|
We are trying to export all current User Rights from our production environments following the guide posted here:
https://community.sap.com/t5/crm-and-cx-blogs-by-sap/sap-commerce-export-only-usergroup-employee-access-rights/ba-p/13488710
This works fine in 3 out of 6 environments and takes less than 5 minutes. In the other environments the export takes several hours only for it to fail with the following error:
> line 3 at main script: error executing code line at 3 : Entity not found ( pk = <PK always different> name = 'de.hybris.platform.persistence.core_User' type code = '4' db table = 'users')
This does also not happen on any Test/QA/Dev environments either. Any idea what could be the issue here?
Adding new search restrictions like filtering for the current users ID only. Removing search restrictions to increase the data received from the export (which counteracts the purpose of the restrictions).
Running the export on a single node instead of load balancer. Clearing caches before exporting. Always results in the same error. |
Getting Entity not found when exporting User Rights from production only |
Well, the short answer is that "it depends". If the network is heavily partitioned, then the individual partitions are laid out in parallel. If it is a single partition, then most of the force directed algorithms don't parallelize well, unfortunately.
|
I have a button called EXPORT and now I need to create a shortcut to export file to excel. The short cut is Ctrl+Alt+E. When I press the shortcut it have to call the function "onCommandExecution" which will check the ID condition and execute the function "onExportToExcel". I have been made debugging an I the ID was not called in the function. In other words the function onCommandExecution has not been called. The problem have to be either in view file or in manifest file but I see that both are correct!Does someone have a solution please?
Thank you
//Controller.js file
onCommandExecution: function(oEvent) {
var mSId = oEvent.getSource().getId();
if (mSId === "CE_EXPORT") {
this.onExportToExcel();
}
},
onExportToExcel: function() {
var mSId = oEvent.getSource().getId();
var oSettings, oSheet, aProducts;
var aCols = [];
aCols = this.createColumnConfig();
aProducts = this.byId("messageTable").getModel("oMessageModel").getProperty('/');
var oDate = new Date();
var regex = new RegExp(",", "g");
var aDateArr = oDate.toISOString().split("T")[0].split("-");
var sDate = aDateArr.join().replace(regex, "");
var aTimeArr = oDate.toISOString().split("T")[1].split(":");
var sSeconds = oDate.toISOString().split("T")[1].split(":")[2].split(".")[0];
var sTime = aTimeArr[0] + aTimeArr[1] + sSeconds;
oSettings = {
workbook: {
columns: aCols
},
dataSource: aProducts,
fileName: "export_" + sDate + sTime
};
if (mSId === "CE_EXPORT") {
oSheet = new Spreadsheet(oSettings);
oSheet.build()
.then(function() {
MessageToast.show(this.getOwnerComponent().getModel("i18n").getResourceBundle().getText("excelDownloadSuccessful"));
})
.finally(function() {
oSheet.destroy();
});
}
},
<View.xml file>
<Page>
<footer>
<OverflowToolbar >
<Button icon="sap-icon://excel-attachment" text="{i18n>exportToExcelBtn}" press="onExportToExcel" tooltip="{i18n>exportToExcelBtnTooltip}"/>
</OverflowToolbar>
</footer>
<dependents>
<core:CommandExecution id="CE_EXPORT" command="Export" enabled="true" execute="onCommandExecution" />
</dependents>
</Page>
<manifest.json file>
"sap.ui5": {
"rootView": {
"viewName": "com.volkswagen.ifdb.cc.sa.view.Main",
"type": "XML"
},
"dependencies": {
"minUI5Version": "1.65.0",
"libs": {
"sap.m": {},
"sap.ui.comp": {},
"sap.ui.core": {},
"sap.ui.layout": {},
"sap.ushell": {}
}
},
"contentDensities": {
"compact": true,
"cozy": true
},
"commands": {
"Export":{
"shortcut": "Ctrl+Alt+E"
}
},
|
How do you add an AltRoot for Windows using `p4 client` non-interactively? |
|windows|perforce| |
Having done this wrong several times, I'll contribute a solution to SAFELY and RELIABLY copy a table from one remote db to another. There's a lot that can go wrong between the dump and restore. For clarity, some additional criteria in this solution:
- **Copy only one table**
- **Does not delete anything in either source/dest database**
- **Makes sure the id sequence resumes in the to_table, instead of resetting to 1**
- **Avoids `drop table` or `--clean` mistakes from hasty copy-paste**
- Separates dump and restore into two different steps
- Allows flexibility in customizing the to_table (different indexes, etc)
- Both databases are remote
- Each database has a different hostname, port, username, pass
Prerequsites: get `pg_dump`, `pg_restore`, `psql` matching the remote db version
```sh
# Figure out which database version is running
# to use the pg_dump, pg_restore with the version.
# Run the query:
# select version() # PostgreSQL 14.10
# Then install the matching version
brew tap homebrew/versions
brew search postgresql@
brew install postgresql@14
# Later we can switch back
brew install postgresql@16
```
Export a table from the remote db, **including all large objects** in the table
```sh
# Dump from 10.0.1.123:1234
#
# -Fc Uses "format custom" optimized for pg_restore
# -b include all large objects, i.e. blobs, bytea, etc
# -U username
# -h hostname
# -p port
# -a only include table data and large objects
# -t table name
# PGPASSWORD is the supported env var to pass in a password
PGPASSWORD="FROM-DB-PASSWORD" pg_dump -Fc -b -U FROM-DB-USERNAME -h 10.0.1.123 -p 1234 -a -t from_table from_db_name > from_table.dump
# Get the last id sequence for restore later
psql -h 10.0.1.123 -p 1234 -d from_db_name -U FROM-DB-USERNAME -W -c "select * from from_table_name_id_seq;"
# last_value == 9999
```
Import the table into another remote db
```sh
# NO CLEAN, NO DROP/DELETE
#
# Safely create a table with a different name for now.
# This helps avoid copy-paste errors accidentally
# importing back to or deleting things in from_db.
psql -h 10.0.1.456 -p 4567 -d to_db_name -U TO-DB-USERNAME -W -c "create table to_table (id bigserial not null primary key, . . . );"
# Restore to 10.0.1.456:4567
#
# -U username
# -h hostname
# -p port
# -a only include table data and large objects
# -t table name
# -d database name
PGPASSWORD="TO-DB_PASSWORD" pg_restore -h 10.0.1.456 -p 4567 -d to_db_name -U TO-DB-USERNAME -a -t to_table_name from_table.dump
# Restore the id sequence we got from the last export step above.
psql -h 10.0.1.456 -p 4567 -d to_db_name -U TO-DB-USERNAME -W -c "alter sequence to_table_name_id_seq restart with 9999;"
# Rename the table to match the from_table_name
psql -h 10.0.1.456 -p 4567 -d to_db_name -U TO-DB-USERNAME -W -c "alter table to_table_name rename to name_matching_from_table_name;"
# Cleanup
rm from_table.dump
``` |
I'm trying out deferrable views and have this code in the component of a normal (NgModule-based) component.
@defer (on interaction) {
<app-defer></app-defer>
}@placeholder {
<p>click to load</p>
}
The DeferComponent in the `@defer` block is standalone, but it is included in the initial bundle. The placeholder is shown until it is clicked, but no new content is loaded in after that. However, when I make the parent component standalone, the `@defer` block works as expected and the DeferComponent is not loaded in until the placeholder is clicked.
Is this expected? I'm confused about this because the [documentation](https://angular.io/guide/defer#behavior-with-ngmodule) from Angular seems to indicate that only the components in the `@defer` block should be standalone in order for this to work . |
PATCH Request Body empty after converting Symfony HttpFoundation to PSR7 |
|symfony|guzzle|psr-7| |
I need to add the chart in excel. From table data ,need a chart to be drawn in the excel.I have add xlsxAdd to append chart in excel, but its throwing **Missing helper: \"xlsxAdd\**
error. So table is already there in the excel but chart is not rendering using this handlebars.This is the correct way to add chart in excel ?
```
<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1" />
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Report</title>
<link rel="stylesheet" href="/stylesheets/style.css" />
<script src="https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.9.4/Chart.min.js"></script>
</head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
table {
width: 100%
}
td {
text-align: center
}
</style>
<body>
<div>
<img src="https://icicipruamcdev.azureedge.net/static/media/logo.86f7e0e9.svg" style="height: 150px;">
</div>
<canvas id="canvas" style="width:90vw"></canvas>
<script>
document.addEventListener('DOMContentLoaded', function() {
// Extract data from the table
const table = document.getElementById('report-table');
const rows = table.querySelectorAll('tr');
const labels = [];
const navData = [];
rows.forEach((row, index) => {
if (index > 0) { // Skip header row
const cells = row.querySelectorAll('td');
labels.push(cells[1].textContent); // Assuming date is in the second column
navData.push(parseFloat(cells[0].textContent)); // Assuming nav is in the first column
}
});
// Chart data
const chartData = {
labels: labels,
datasets: [{
label: "NAV_HISTORY",
type: 'line',
data: navData,
backgroundColor: 'transparent',
borderColor: 'blue',
borderWidth: 1,
lineTension: 0,
pointBackgroundColor: 'blue',
pointBackgroundColor: 'transparent',
borderColor: 'navy',
pointRadius: 3
}]
};
// Chart initialization
const ctx = document.getElementById('canvas').getContext('2d');
const chart = new Chart(ctx, {
type: 'line',
data: chartData,
options: {
responsive: false,
animation: false,
maintainAspectRatio: false,
devicePixelRatio: 1,
scales: {
yAxes: [{
ticks: {
beginAtZero: true,
stepSize: 20
}
}],
xAxes: [{
ticks: {
autoSkip: true,
maxTicksLimit: 8,
maxRotation: 0,
minRotation: 0
}
}]
}
}
});
});
</script>
<div><br/></div>
<div class="invoice-box">
<!-- Assuming you have a table structure like this -->
<table>
<thead>
<tr>
<th>NAV</th>
<th>Date</th>
</tr>
</thead>
<tbody>
{{#each data}}
<tr>
<td>{{this.Name}}</td>
<td>{{this.Amount}}</td>
</tr>
{{/each}}
</tbody>
</table>
<!-- Now, let's add the xlsxAdd functionality -->
{{#xlsxAdd "xl/drawings/drawing1.xml" "xdr:wsDr.twoCellAnchor"}}
{{#each data}}
<row>
<c t="inlineStr"><is><t>{{this.Name}}</t></is></c>
<c><v>{{this.Amount}}</v></c>
</row>
{{/each}}
{{/xlsxAdd}}
</div>
</body>
</html>
|
How to add the chart in excel using handlebars? |
|javascript|excel|charts|handlebars.js|handlebarshelper| |
I have a sidebar that's scrollable if the viewports height gets too low.
The bottom entry is always visible because it's set to sticky.
When you click on the bottom element, you get content that's sometimes wider than the navbar.
That content needs to overflow the sidebar, while the sidebar needs to stay scrollable, but I can only seem to get one of the requirements.
Either the scroll is lost, if I set the overflow to visible or the overflowing text gets clipped.
The HTML tree should stay in a way that makes sense for accessibility and the keeps the functionality (button on bottom visible and on top of menu items if viewport small, sidebar fixed and sticky bottom button).
```
/* scrollable sidebar */
.sidebar__nav {
overflow: auto;
}
.stickyBottom {
position: sticky;
z-index: 1;
}
.menuWithTextoverflow {
position: absolute;
width: max-content;
z-index: 3;
}
```
here's a codesandbox if it helps:
https://codesandbox.io/p/live/5db8cacb-81da-4638-ac87-ceb476b0322d
|
How to get x-overflowing text on a y-scrollable container |
|html|css|position|overflow| |
null |
It seems no matter what model I'm trying to build I get the following error on the last line:
```model <- jags.model(paste0(tmpdir,'model.bug'), data = list(), n.chains = num_cores)
Compiling model graph
Resolving undeclared variables
Allocating nodes
Deleting model
Error in jags.model(paste0(tmpdir, "model.bug"), data = list(), n.chains = num_cores) :
RUNTIME ERROR:
Compilation error on line 143.
Unknown variable CVD.p
Either supply values for this variable with the data
or define it on the left hand side of a relation.
```
I get this error on rjags and Windows jags. The strange this is, as far as I can tell, the exact same code worked a year ago or so (on Windows JAGS). The error on Windows:
```C:\Users\Stacey Cherny\tmp\jags>jags-terminal ".\script_1.R"
Welcome to JAGS 4.3.1 on Wed Mar 6 15:17:58 2024
JAGS is free software and comes with ABSOLUTELY NO WARRANTY
Loading module: basemod: ok
Loading module: bugs: ok
Reading data file post_params.R
Compiling model graph
Resolving undeclared variables
Allocating nodes
RUNTIME ERROR:
Compilation error on line 143.
Dimension mismatch taking subset of CVD.p
Deleting model
C:\Users\Stacey Cherny\tmp\jags>
```
I'm really at a loss, given this code worked at one point (different variables, but built the same way, using code someone else wrote). I'm hoping that I'm just doing something stupid and someone can point that out to me easily!
I've deleted variables from my model and every time I get the same error on the very last variable.
Thanks in advance for any insights and help anyone can offer.
model.bug:
```
model {
AGE ~ dnorm(mu.AGE,prec.AGE);
mu.AGE<- AGE.c0;
AGE.M0 ~ dcat(AGE.p[ ,2]);
AGE.c0 <-AGE.p[AGE.M0,1];
prec.AGE.M ~ dcat(prec.AGE.p[ ,2]);
prec.AGE <- prec.AGE.p[prec.AGE.M,1];
ECW ~ dnorm(mu.ECW,prec.ECW);
mu.ECW<- ECW.c0+ECW.c1*AGE+ECW.c2*LYMPabs+ECW.c3*SIRI;
ECW.M0 ~ dcat(ECW.p[ ,2]);
ECW.c0 <-ECW.p[ECW.M0,1];
ECW.M1 ~ dcat(ECW.p[ ,4]);
ECW.c1 <-ECW.p[ECW.M1,3];
ECW.M2 ~ dcat(ECW.p[ ,6]);
ECW.c2 <-ECW.p[ECW.M2,5];
ECW.M3 ~ dcat(ECW.p[ ,8]);
ECW.c3 <-ECW.p[ECW.M3,7];
prec.ECW.M ~ dcat(prec.ECW.p[ ,2]);
prec.ECW <- prec.ECW.p[prec.ECW.M,1];
GDF15_LnLn ~ dnorm(mu.GDF15_LnLn,prec.GDF15_LnLn);
mu.GDF15_LnLn<- GDF15_LnLn.c0+GDF15_LnLn.c1*AGE+GDF15_LnLn.c2*ECW+GDF15_LnLn.c3*D2TM;
GDF15_LnLn.M0 ~ dcat(GDF15_LnLn.p[ ,2]);
GDF15_LnLn.c0 <-GDF15_LnLn.p[GDF15_LnLn.M0,1];
GDF15_LnLn.M1 ~ dcat(GDF15_LnLn.p[ ,4]);
GDF15_LnLn.c1 <-GDF15_LnLn.p[GDF15_LnLn.M1,3];
GDF15_LnLn.M2 ~ dcat(GDF15_LnLn.p[ ,6]);
GDF15_LnLn.c2 <-GDF15_LnLn.p[GDF15_LnLn.M2,5];
GDF15_LnLn.M3 ~ dcat(GDF15_LnLn.p[ ,8]);
GDF15_LnLn.c3 <-GDF15_LnLn.p[GDF15_LnLn.M3,7];
prec.GDF15_LnLn.M ~ dcat(prec.GDF15_LnLn.p[ ,2]);
prec.GDF15_LnLn <- prec.GDF15_LnLn.p[prec.GDF15_LnLn.M,1];
LA_ratio_Ln ~ dnorm(mu.LA_ratio_Ln,prec.LA_ratio_Ln);
mu.LA_ratio_Ln<- LA_ratio_Ln.c0+LA_ratio_Ln.c1*AGE;
LA_ratio_Ln.M0 ~ dcat(LA_ratio_Ln.p[ ,2]);
LA_ratio_Ln.c0 <-LA_ratio_Ln.p[LA_ratio_Ln.M0,1];
LA_ratio_Ln.M1 ~ dcat(LA_ratio_Ln.p[ ,4]);
LA_ratio_Ln.c1 <-LA_ratio_Ln.p[LA_ratio_Ln.M1,3];
prec.LA_ratio_Ln.M ~ dcat(prec.LA_ratio_Ln.p[ ,2]);
prec.LA_ratio_Ln <- prec.LA_ratio_Ln.p[prec.LA_ratio_Ln.M,1];
LYMPabs ~ dnorm(mu.LYMPabs,prec.LYMPabs);
mu.LYMPabs<- LYMPabs.c0;
LYMPabs.M0 ~ dcat(LYMPabs.p[ ,2]);
LYMPabs.c0 <-LYMPabs.p[LYMPabs.M0,1];
prec.LYMPabs.M ~ dcat(prec.LYMPabs.p[ ,2]);
prec.LYMPabs <- prec.LYMPabs.p[prec.LYMPabs.M,1];
SIRI ~ dnorm(mu.SIRI,prec.SIRI);
mu.SIRI<- SIRI.c0;
SIRI.M0 ~ dcat(SIRI.p[ ,2]);
SIRI.c0 <-SIRI.p[SIRI.M0,1];
prec.SIRI.M ~ dcat(prec.SIRI.p[ ,2]);
prec.SIRI <- prec.SIRI.p[prec.SIRI.M,1];
D2TM ~ dbern(p8);
logit(p8)<- D2TM.c0+D2TM.c1*AGE+D2TM.c2*HLD+D2TM.c3*HTM;
D2TM.M0 ~ dcat(D2TM.p[ ,2]);
D2TM.c0 <-D2TM.p[D2TM.M0,1];
D2TM.M1 ~ dcat(D2TM.p[ ,4]);
D2TM.c1 <-D2TM.p[D2TM.M1,3];
D2TM.M2 ~ dcat(D2TM.p[ ,6]);
D2TM.c2 <-D2TM.p[D2TM.M2,5];
D2TM.M3 ~ dcat(D2TM.p[ ,8]);
D2TM.c3 <-D2TM.p[D2TM.M3,7];
HLD ~ dbern(p9);
logit(p9)<- HLD.c0+HLD.c1*AGE+HLD.c2*LYMPabs+HLD.c3*HTM;
HLD.M0 ~ dcat(HLD.p[ ,2]);
HLD.c0 <-HLD.p[HLD.M0,1];
HLD.M1 ~ dcat(HLD.p[ ,4]);
HLD.c1 <-HLD.p[HLD.M1,3];
HLD.M2 ~ dcat(HLD.p[ ,6]);
HLD.c2 <-HLD.p[HLD.M2,5];
HLD.M3 ~ dcat(HLD.p[ ,8]);
HLD.c3 <-HLD.p[HLD.M3,7];
HTM ~ dbern(p10);
logit(p10)<- HTM.c0+HTM.c1*AGE+HTM.c2*ECW+HTM.c3*LA_ratio_Ln;
HTM.M0 ~ dcat(HTM.p[ ,2]);
HTM.c0 <-HTM.p[HTM.M0,1];
HTM.M1 ~ dcat(HTM.p[ ,4]);
HTM.c1 <-HTM.p[HTM.M1,3];
HTM.M2 ~ dcat(HTM.p[ ,6]);
HTM.c2 <-HTM.p[HTM.M2,5];
HTM.M3 ~ dcat(HTM.p[ ,8]);
HTM.c3 <-HTM.p[HTM.M3,7];
CVD ~ dbern(p11);
logit(p11)<- CVD.c0+CVD.c1*ECW+CVD.c2*GDF15_LnLn+CVD.c3*D2TM+CVD.c4*HLD+CVD.c5*HTM;
CVD.M0 ~ dcat(CVD.p[ ,2]);
CVD.c0 <-CVD.p[CVD.M0,1];
CVD.M1 ~ dcat(CVD.p[ ,4]);
CVD.c1 <-CVD.p[CVD.M1,3];
CVD.M2 ~ dcat(CVD.p[ ,6]);
CVD.c2 <-CVD.p[CVD.M2,5];
CVD.M3 ~ dcat(CVD.p[ ,8]);
CVD.c3 <-CVD.p[CVD.M3,7];
CVD.M4 ~ dcat(CVD.p[ ,10]);
CVD.c4 <-CVD.p[CVD.M4,9];
CVD.M5 ~ dcat(CVD.p[ ,12]);
CVD.c5 <-CVD.p[CVD.M5,11];
}
```
script_1.R:
```
model in model.bug
data in post_params.R
compile, nchains(1)
parameters in init_1.R, chain(1)
initialize
update 10000, by(1000)
monitor AGE, thin(10)
monitor ECW, thin(10)
monitor GDF15_LnLn, thin(10)
monitor LA_ratio_Ln, thin(10)
monitor LYMPabs, thin(10)
monitor SIRI, thin(10)
monitor D2TM, thin(10)
monitor HLD, thin(10)
monitor HTM, thin(10)
monitor CVD, thin(10)
update 10780, by(1078)
coda *, stem("out_1")
```
init_1.R
```
".RNG.name" <-"base::Mersenne-Twister"
".RNG.seed" <- 1
```
|
null |
In order to add multiple related projects to the same IntelliJ IDEA project, you'll need to add them as modules.
1. In your client project, open the other project(s) from the menu File | New | Module from Existing Sources.
2. Select the directory in where one of the other projects (for example, Service Model A) is located and click Open.
3. In the dialog that opens, select Create module from existing sources.
4. Select the relevant model, like Gradle or Maven, if applicable.
Check the [docs](
https://www.jetbrains.com/help/idea/creating-and-managing-modules.html#import-module-from-sources) or watch [this video](https://youtu.be/WAjGGd9LED4?feature=shared&t=434) for more details.
|
For me it was enough to set `clipToOutline = true` on the webview as recommended by the [documentation on `AndroidView`][1] and [this comment][2] on the google issue tracker.
```kotlin
WebView(context).apply {
clipToOutline = true
}
```
[1]: https://github.com/androidx/androidx/blob/42d844f7f349384761bca5dad6adaed43a8ae677/compose/ui/ui/src/androidMain/kotlin/androidx/compose/ui/viewinterop/AndroidView.android.kt#L175
[2]: https://issuetracker.google.com/issues/174233728#comment5 |
I follow warnings and fix types in bind layout call.
I found on https://www.w3.org/TR/webgpu/:
```
enum GPUSamplerBindingType {
"filtering",
"non-filtering",
"comparison",
};
```
Also 'float' for texture.
Correct way looks :
```js
{
binding: 3,
visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,
texture: {
sampleType: 'float',
},
},
{
binding: 4,
visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT,
sampler: {
type: 'filtering',
},
},
```
In shader i use `input.shadowPos.xy` like vec2 to satisfait textureSample.
```
let textureColor = textureSample(meshTexture, meshSampler, input.shadowPos.xy);
```
And works fine !
|
What is the purpose of the keyword "by" (property delegates) when declaring a MutableState<T> with remember? |
|android|kotlin|android-studio|android-jetpack-compose|delegates| |
Thanks! I added something to it so that all sublabels are displayed
function getAllLabels(){
var results = [];
var labels = GmailApp.getUserLabels();
for (var i = 0; i < labels.length; i++) {
Logger.log("label: " + labels[i].getName());
results.push(labels[i].getName());
}
for (var i = 0; i < results.length; i++) {
if(results[i].indexOf('/')>0){Logger.log(results[i]+' has a subLabel')};
}
}
// Proceed Sub Labels ⬇️
function processSubLabels(label, prefix) {
var subLabels = label.getLabels();
for (var i = 0; i < subLabels.length; i++) {
var subLabelName = prefix + "/" + subLabels[i].getName();
Logger.log("subLabel: " + subLabelName);
processSubLabels(subLabels[i], subLabelName);
}
} |
I'm currently creating a HTML5 music editing program.
I started with recording audio.
I figured out how to get access on microphon and so on.
Code from "Recorder.js" helped me a lot.
But instead of writing into an .wave file I want to create an temporary audiobuffer.
I got the Float32Array's from the inputtbuffer in "onaudioprocess" event and saved them all together in one Float32Array.
Now I have an Array with values, let's say from 3 Seconds of recording.
Now I want to create an Audiobuffer to play the sound I recorded (saved in that array).
In read the ([Webaudio API Specification])[1] but didn't found any possibility to create an new audiobuffer with an exisiting Float32Array.
Maybe I'm doing the recording wrong or I'm simply blind in reading.
Every other Questions I read were linking on Recorder.js, but I don't want to save a file first and then load it again.
Anyone knowing?
Would be awesome.
[1]: https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html |
|html|web-audio-api|audio-recording| |
I seem to have solved it. The login app sends a POST request to a server side API that accepts the user input and then the php code checks if the password is valid.
Here is the the php that receives the user input POST then gives the user input to the function that does the validation:
````````````````````````````````````````````````````````````````````````
if ($_SERVER['REQUEST_METHOD'] === 'POST') {
$raw_data = file_get_contents('php://input'); // Get raw data from request body
// Check if data is in JSON format (optional)
if (strpos($raw_data, '{') !== false && strpos($raw_data, '}') !== false) {
$data = json_decode($raw_data, true); // Decode JSON data
if (isset($data['username']) && isset($data['password'])) {
// Access username and password from decoded data
$username = $data['username'];
$password = $data['password'];
````````````````````````````````````````````````````````````````````````
here is the verification logic used:
````````````````````````````````````````````````````````````````````````
function user_login($username, $password)
{
global $CFG, $DB, $USER;
if (!$user = $DB->get_record('user', array('username' => $username))) {
return false; // User not found
}
// Verify the password using password_verify() instead of directly comparing hashes
if (password_verify($password, $user->password)) {
return true; // Passwords match
} else {
return false; // Passwords do not match
}
}
```````````````````````````````````````````````````````````````````````` |
i'm trying to use a custom API provided by a client, its a login authentication api and when i test the url on tools it works fine but when i call it in my code i receive this error "Access to XMLHttpRequest at 'api url' from origin 'http://localhost:5174' has been blocked by CORS policy: Response to preflight request doesn't pass access control check: No 'Access-Control-Allow-Origin' header is present on the requested resource."
"A cross-origin resource sharing (CORS) request was blocked because of invalid or missing response headers of the request or the associated preflight request ."
i have tried using chrome extensions but its the same issue, i've never worked with a custom apis before so i'm not sure if its an issue from the server side, API provider or my code? and how to check |
API request blocked by CORS policy |
|reactjs|api|cors|postman|swagger| |
null |
If you are looking to have a single producer of some data and multiple "consumers" of that data, then the following code uses `Condition` instances with notification. For demo purposes the producer only produces 10 pieces of data and we have 3 consumers. This will keep the output to a reasonable length:
**Update Using Linked List**
```python
import threading
class Node:
def __init__(self, data, previous_node):
self.consumed_count = 0
self.next = None
self.data = data
if previous_node:
previous_node.next = self
def __repr__(self):
return f'[data: {repr(self.data)}, consumed_count: {self.consumed_count}, next: {self.next}]'
class data_provider:
def __init__(self, num_consumers) -> None:
self.num_consumers = num_consumers
self.lock = threading.Lock()
self.condition = threading.Condition()
self.running = True
# To simplify the code, the first node in the list is a dummy:
self.linked_list = Node(None, None)
data_generator_thread = threading.Thread(target=self.data_generator)
data_generator_thread.start()
def data_generator(self):
import time
last_node = self.linked_list
for cnt in range(1, 11) : # Reduced count for demo purposes
# For demo purposes let's introduce a pause:
time.sleep(.5)
last_node = Node({'Name': 'Data', 'Count': cnt}, last_node)
with self.condition:
self.condition.notify_all()
print('Done producing')
# Let consumers know that no more data will be coming:
with self.condition:
self.running = False
self.condition.notify_all()
def remove_consumed_nodes(self):
with self.lock:
# Remove completely consumed links except for the last one:
prev_node = self.linked_list.next
node = prev_node.next
while node and node.consumed_count == self.num_consumers:
prev_node = node
node = node.next
self.linked_list.next = prev_node
N_PRINTERS = 3 # The number of printer threads:
obj = data_provider(N_PRINTERS)
def printing(id):
last_node = obj.linked_list
while True:
with obj.condition:
obj.condition.wait_for(
lambda: not obj.running or last_node.next
)
if not last_node.next:
return
last_node = last_node.next
while True:
print(id, ':', last_node.data)
with obj.lock:
last_node.consumed_count += 1
if not last_node.next:
break
last_node = last_node.next
obj.remove_consumed_nodes()
printer_threads = []
for i in range(N_PRINTERS):
thread = threading.Thread(target=printing, args=(i,))
thread.start()
printer_threads.append(thread)
for thread in printer_threads:
thread.join()
print('End')
print(obj.linked_list)
```
Prints:
```lang-None
1 : {'Name': 'Data', 'Count': 1}
0 : {'Name': 'Data', 'Count': 1}
2 : {'Name': 'Data', 'Count': 1}
0 : {'Name': 'Data', 'Count': 2}
1 : {'Name': 'Data', 'Count': 2}
2 : {'Name': 'Data', 'Count': 2}
0 : {'Name': 'Data', 'Count': 3}
2 : {'Name': 'Data', 'Count': 3}
1 : {'Name': 'Data', 'Count': 3}
0 : {'Name': 'Data', 'Count': 4}
1 : {'Name': 'Data', 'Count': 4}
2 : {'Name': 'Data', 'Count': 4}
0 : {'Name': 'Data', 'Count': 5}
1 : {'Name': 'Data', 'Count': 5}
2 : {'Name': 'Data', 'Count': 5}
0 : {'Name': 'Data', 'Count': 6}
2 : {'Name': 'Data', 'Count': 6}
1 : {'Name': 'Data', 'Count': 6}
0 : {'Name': 'Data', 'Count': 7}
1 : {'Name': 'Data', 'Count': 7}
2 : {'Name': 'Data', 'Count': 7}
2 : {'Name': 'Data', 'Count': 8}
0 : {'Name': 'Data', 'Count': 8}
1 : {'Name': 'Data', 'Count': 8}
2 : {'Name': 'Data', 'Count': 9}
0 : {'Name': 'Data', 'Count': 9}
1 : {'Name': 'Data', 'Count': 9}
Done producing
2 : {'Name': 'Data', 'Count': 10}
0 : {'Name': 'Data', 'Count': 10}
1 : {'Name': 'Data', 'Count': 10}
End
[data: None, consumed_count: 0, next: [data: {'Name': 'Data', 'Count': 10}, consumed_count: 3, next: None]]
```
**Reusable MultiConsumerProducer Class**
The above code can be re-engineered for improved reusability.
```python
import threading
from typing import Iterable, List, Any
class MultiConsumerProducer:
class Node:
def __init__(self, data: Any, previous_node: 'Node'):
self._consumed_count = 0
self._next = None
self._data = data
if previous_node:
previous_node._next = self
@property
def data(self) -> Any:
return self._data
def __repr__(self):
return f'[_data: {repr(self._data)}, _consumed_count: {self._consumed_count}, _next: {self._next}]'
def __init__(self, num_consumers: int, data_collection: Iterable) -> None:
self._num_consumers = num_consumers
self._lock = threading.Lock()
self._condition = threading.Condition()
self._running = True
# To simplify the code, the first node in the list is a dummy:
self._linked_list = MultiConsumerProducer.Node(None, None)
threading.Thread(target=self._data_generator, args=(data_collection,), daemon=True).start()
def print_nodes(self) -> None:
"""Print linked list of nodes."""
print(self._linked_list)
def _data_generator(self, data_collection):
"""Generate nodes."""
last_node = self._linked_list
for data in data_collection:
last_node = MultiConsumerProducer.Node(data, last_node)
with self._condition:
self._condition.notify_all()
self._running = False
with self._condition:
self._condition.notify_all()
def get_next_nodes(self, last_node_processed: Node=None) -> List[Node]:
"""Get next list of ready nodes."""
last_node = last_node_processed or self._linked_list
with self._condition:
self._condition.wait_for(
lambda: not self._running or last_node._next
)
if not last_node._next:
return []
nodes = []
last_node = last_node._next
while True:
nodes.append(last_node)
if not last_node._next:
return nodes
last_node = last_node._next
def consumed_node(self, node: Node) -> None:
"""Show node has been consumed."""
with self._lock:
node._consumed_count += 1
if node._consumed_count == self._num_consumers:
# Remove completely consumed links except for the last one:
prev_node = self._linked_list._next
node = prev_node._next
while node and node._consumed_count == self._num_consumers:
prev_node = node
node = node._next
self._linked_list._next = prev_node
##############################################################
def producer():
import time
for cnt in range(1, 11) : # Reduced count for demo purposes
# For demo purposes let's introduce a pause:
time.sleep(.5)
yield {'Name': 'Data', 'Count': cnt}
print('Done producing')
N_PRINTERS = 3 # The number of printer threads:
obj = MultiConsumerProducer(N_PRINTERS, producer())
def printing(id):
last_node_processed = None
while (nodes := obj.get_next_nodes(last_node_processed)):
for last_node_processed in nodes:
print(id, ':', last_node_processed.data)
obj.consumed_node(last_node_processed)
printer_threads = []
for i in range(N_PRINTERS):
thread = threading.Thread(target=printing, args=(i,))
thread.start()
printer_threads.append(thread)
for thread in printer_threads:
thread.join()
print('End')
print('\nNodes:')
obj.print_nodes()
```
**One More Time**
The following function generates an abstract base class that uses queues for delivering work and supports producer/consumers running in either threads or processes
```python
def generate_multi_consumer_producer(n_consumers, use_multiprocessing: bool=False, queue_size=0):
"""Generate an abstract base for single producer multiple consumers.
n_consumers: The number of consumers.
use_multiprocessing: True to use producer/consumers that run in child processes
otherwise child threads are used.
queue_size: If producing is faster than consumption, you can specify a
a positive value for queue_size to prevent the queues from conyinuously
growing."""
from abc import ABC, abstractmethod
from typing import List, Iterable
if use_multiprocessing:
from multiprocessing import Process as BaseClass, JoinableQueue as QueueType
else:
from threading import Thread as BaseClass
from queue import Queue as QueueType
class MultiConsumerProducer(BaseClass, ABC):
def __init__(self, n_consumers: int=n_consumers, queue_size=queue_size) -> None:
super().__init__()
self._n_consumers = n_consumers
self._queues = [QueueType(queue_size) for _ in range(n_consumers)]
self.start()
def run(self):
# Start the consumers:
for consumer_id in range(self._n_consumers):
BaseClass(
target=self._consumer,
args=(consumer_id, self._queues[consumer_id]),
daemon=True
).start()
# Produce the data
for data in self.produce():
for queue in self._queues:
queue.put(data)
# Wait for all work to be completed
for queue in self._queues:
queue.join()
def _consumer(self, consumer_id: int, queue: QueueType):
while True:
data = queue.get()
try:
self.consume(consumer_id, data)
except Exception as e:
print(f'Exception in consumer {consumer_id}: {e}')
finally:
queue.task_done()
@abstractmethod
def produce(self):
"""This should be a generator function."""
pass
@abstractmethod
def consume(self, consumer_id: int, data: object) -> None:
pass
return MultiConsumerProducer
```
The usage is:
```python
def consumer_0(consumer_id, n):
print(f'id {consumer_id}: {n} ** 1 = {n}')
def consumer_1(consumer_id, n):
print(f'id {consumer_id}: {n} ** 2 = {n ** 2}')
def consumer_2(consumer_id, n):
print(f'id {consumer_id}: {n} ** 3 = {n ** 3}')
MultiConsumerProducer = generate_multi_consumer_producer(3, use_multiprocessing=True)
class MyMultiConsumerProducer(MultiConsumerProducer):
"""An example that uses 3 different consumers."""
consumers = [consumer_0, consumer_1, consumer_2]
def produce(self):
import time
for n in range(1, 11) : # Reduced count for demo purposes
# For demo purposes let's introduce a pause:
time.sleep(.5)
yield n
print('Done producing', flush=True)
def consume(self, consumer_id, data):
self.consumers[consumer_id](consumer_id, data)
if __name__ == '__main__':
p = MyMultiConsumerProducer(3)
# Wait for all work to complete:
p.join()
```
Prints:
```lang-None
id 0: 1 ** 1 = 1
id 1: 1 ** 2 = 1
id 2: 1 ** 3 = 1
id 0: 2 ** 1 = 2
id 2: 2 ** 3 = 8
id 1: 2 ** 2 = 4
id 0: 3 ** 1 = 3
id 2: 3 ** 3 = 27
id 1: 3 ** 2 = 9
id 2: 4 ** 3 = 64
id 1: 4 ** 2 = 16
id 0: 4 ** 1 = 4
id 0: 5 ** 1 = 5
id 1: 5 ** 2 = 25
id 2: 5 ** 3 = 125
id 0: 6 ** 1 = 6
id 1: 6 ** 2 = 36
id 2: 6 ** 3 = 216
id 0: 7 ** 1 = 7
id 1: 7 ** 2 = 49
id 2: 7 ** 3 = 343
id 0: 8 ** 1 = 8
id 1: 8 ** 2 = 64
id 2: 8 ** 3 = 512
id 0: 9 ** 1 = 9
id 2: 9 ** 3 = 729
id 1: 9 ** 2 = 81
Done producing
id 2: 10 ** 3 = 1000
id 1: 10 ** 2 = 100
id 0: 10 ** 1 = 10
``` |
|docker-compose|logstash|elk| |
I'm currently working on implementing a loader animation on my website, and I've noticed that there's a transparent background on my image. Is there a way to override this transparent background using CSS? It seems that setting a background color is not having any effect in covering up the transparency.
For instance, consider the image of a Pokeball below, which has a transparent background.
[![enter image description here][1]][1]
Below is my sample code on how I animate my loader.
<!-- begin snippet: js hide: false console: true babel: false -->
<!-- language: lang-css -->
body {
min-height: 100vh;
display: grid;
place-items: center;
.loader {
position: fixed;
top: 0;
left: 0;
width: 100vh;
height: 100vh;
width: 100%;
height: 100%;
display: flex;
justify-content: center;
align-items: center;
background-color: #f7f9fb;
transform: opacity 0.75s, visiblity 0.75s;
img {
width: 300px;
height: 300px;
animation: loading 2s ease infinite;
}
@keyframes loading {
from {
transform: rotate(0turn);
}
to {
transform: rotate(1turn);
}
}
}
.loader-hidden {
opacity: 0;
visibility: hidden;
}
}
<!-- language: lang-html -->
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<link rel="preconnect" href="https://fonts.googleapis.com" />
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin />
<link
href="https://fonts.googleapis.com/css2?family=Roboto:ital,wght@0,100;0,300;0,400;0,500;0,700;0,900;1,100;1,300;1,400;1,500;1,700;1,900&display=swap"
rel="stylesheet"
/>
<link rel="stylesheet" href="dist/style.css" />
<script src="https://cdn.jsdelivr.net/npm/axios/dist/axios.min.js"></script>
<title>Loader</title>
</head>
<body>
<div class="loader">
<img src="https://i.postimg.cc/NFzNjN86/pokemon-ball.png" alt="pokeball" />
</div>
<script src="/app/js/app.js"></script>
</body>
</html>
<!-- end snippet -->
[1]: https://i.stack.imgur.com/gzWWx.png |
The problem you're having is that the [generic constraint](https://www.typescriptlang.org/docs/handbook/2/generics.html#generic-constraints) `ObjectWithSchemaId extends { schemaId: TSchemaId }` is not an *inference site* for the `TSchemaId` type parameter. Such functionality was suggested at [microsoft/TypeScript#7234](https://github.com/microsoft/TypeScript/issues/7234) but it was never implemented, since you can usually get the desired behavior in other ways. That means there's nowhere at all for TypeScript to infer `TSchemaId` from, so it falls back to *its* constraint of `SchemaId`, and everything follows from that.
---
You'll need to refactor if you want to change how it works. You could either just accept `TObjectWithSchemaId` and compute `TSchemaId` from it as an [indexed access type](https://www.typescriptlang.org/docs/handbook/2/indexed-access-types.html)
function inferObjectSchemaId<
TObjectWithSchemaId extends { schemaId: SchemaId },
>(objectWithSchemaId: TObjectWithSchemaId): TObjectWithSchemaId["schemaId"] {
return objectWithSchemaId.schemaId
}
or just accept `TSchemaId` and compute an appropriate type for `objectWithSchemaId` form it:
function inferObjectSchemaId<
TSchemaId extends SchemaId,
>(objectWithSchemaId: { schemaId: TSchemaId, [k: string]: any }): TSchemaId {
return objectWithSchemaId.schemaId
}
or take some other approach that doesn't involve trying to infer from constraints.
[Playground link to code](https://www.typescriptlang.org/play?#code/KYOwrgtgBAygxgC2BAhgSQCZQN4CgqwDCAEgKICyAggPoCMUAvFAOQwkU23MA0+RZVagCZGLNgJpCeuAL65cAMzAg4AFwCWAexBR1IBcABO8JKkwAeAConk6LMAAeq0BgDOsRLcwA+ABSvPMwwALihrQLsASlDw0zscPkNgVTBDHQC4zFl5EBQIYFcABxQ4YCgAeRAyvAIlFQ1tXX0jcoAjACtgNRsg8z4CSzbOtQB1dVUEHvjHZxA3HCgMrxCPTKwZXgI-TQ6u1TGJqcwYob2DyYjMaLDT0fGLtYBtACIloOeAXQSCAiSUtKgO2G+3uRwwADo3nY+HI+HBtK5VIt1PQmHoDMZLhhfGDweIOHRIj9iSTST8APTk1bLPHsQS0OEIpGudQiNHNQy3VRg3zYRZY0K4-H0qAyImU6lBWkSOjZXC5fJFEplSwAd003ygdTUWh06Jau26WL6xNiyygMxc7jBmyg20NIMOAoWUOOYRtUEeAGtQojDHoAOYfUIoEAAT1F1zNQU1v2SqR0QLOoKxkKxMPkBHhIERyNRTQxPKFdM4RLJ5ZJEuLMoZWaZyLZBYNwJ5fNdK2rBPoYqgVdTws48hkQA) |
creating querybuilder shows error , cant't figure it . what is happening
```
async findAllMetaData(q: any, offset: number = 0, limit: number = 10): Promise<ProductMetaDataEntity[]> {
const qb = this.productMetaDataReposistory
.createQueryBuilder('pm')
.leftJoinAndSelect('pm.metaKey', 'metaKey')
.take(limit)
.skip(offset);
qb.orderBy('pm.created_at', 'DESC');
const metaData = await qb.getMany();
return metaData;
}
@Entity('product_metadata')
export class ProductMetaDataEntity {
@PrimaryGeneratedColumn({ name: 'id' })
id: number;
@Column({ name: 'meta_value' })
metaValue: string;
@Column({ name: 'meta_order', nullable: true })
metaOrder: number;
@ManyToOne(type => MetaKeyEntity, metaKey => metaKey.id, { eager: true, cascade: false })
@JoinColumn({ name: 'metakey_id' })
metaKey: MetaKeyEntity;
}
```
query builder showing error
TypeError: Cannot read properties of undefined (reading 'databaseName')
and after joining somehow by this
```
async findAllMetaData(q: any, offset: number = 0, limit: number = 10): Promise<ProductMetaDataEntity[]> {
const qb = this.productMetaDataReposistory
.createQueryBuilder('pm')
.leftJoin('pm.metaKey', 'metaKey')
.take(limit)
.skip(offset);
qb.orderBy('pm.created_at', 'DESC');
const metaData = await qb.getMany();
return metaData;
}
```
i am getting only this data (no metakey details inside)
``` {
"id": 165,
"metaValue": "metavalue",
"metaOrder": 1,
"createdAt": "2023-12-14T12:17:52.071Z",
"updatedAt": "2023-12-14T12:17:52.071Z"
},
expectation is
{
"id": 1,
"metaValue": "test meta value",
"metaOrder": null,
"createdAt": "2024-01-08T11:22:37.796Z",
"updatedAt": "2024-01-08T11:22:37.796Z",
"metaKey": {
"id": 1,
"name": "Test",
"image": "",
"slug": "Test",
"type": "test",
"createdAt": "2024-01-08T11:21:55.017Z",
"updatedAt": "2024-01-08T11:21:55.017Z"
}
},
```
expectation is , needing metakey data inside also |
I am trying to figure out the best way to loop multiple array values together.
Sample input:
`a = (1 2 3 4)
b = ('A' 'B' 'C')
c = ('x' 'y')`
Now I want to create a combination of commands like following.
```
command A x 1
command A y 1
command A x 2
command A y 2
command A X 3
....
command C x 3
command C y 3
command C x 4
command C y 4
```
Please help. I was trying with the following, but not sure if that works. Please note, I am a beginner in coding/bash, facing similar requirement at work.
```
#!/bin/bash
a = (1 2 3 4)
b = ('A' 'B' 'C')
c = ('x' 'y')
for aa in a; do
for bb in b; do
for cc in c; do
command $(aa[@]) $(bb[@]) $(cc[@])
done
done
done
```
|
what is the best way to loop multiple arrays in bash? |
|bash| |
null |
null |
null |
null |
null |
null |
As written in comments and other answers, use `CEntityManager::EBroadcastTypes` as the enum type, and use `CEntityManager::FAll` etc. as enum values.
This compiles for me without warnings:
```C++
// EntityManager.h
#include <vector>
#include <string>
class CEntityManager
{
public:
enum EBroadcastTypes
{
FAll,
FVehicle,
FProjectile,
FDynamic, // Any Entity that can movement on update (ie. Vehicle and Shells)
FStatic, // Any Entity that never moves (ie. Scenery)
};
struct BroadcastFilter
{
EBroadcastTypes type;
std::vector<std::string> channels;
};
std::vector<BroadcastFilter> m_BroadcastFilters;
std::vector<std::string> GetChannelsOfFilter(EBroadcastTypes Type)
{
for (const auto& BroadcastFilter : m_BroadcastFilters)
{
if (BroadcastFilter.type == Type)
{
return BroadcastFilter.channels;
}
}
}
};
```
```C++
// CRayCast.cpp
#include "EntityManager.h"
struct CRay {};
typedef float TFloat32;
typedef int HitResult;
HitResult RayCast(CRay, CEntityManager::EBroadcastTypes Type, TFloat32) {
return Type == CEntityManager::FAll ? 5 : 6;
}
```
Compile command: `g++ -std=c++11 -c -O2 -W -Wall CRayCast.cpp`
|
For version <5 you can use
<!-- begin snippet: js hide: false console: true babel: false -->
<!-- language: lang-js -->
const isMutating = !!useIsMutating({ mutationKey: [YOUR_MUTATION_KEY] });
<!-- end snippet -->
|
Currently, I am using Tanstack Query V5 in my vite-react project.
Whenever I try to use `invalidateQueries` it throws an error `TypeError: Cannot read from private field`
I want is, when the mutation for invoice deletion will trigger it refetch the invoice list again
main.tsx
```
import React from 'react'
import ReactDOM from 'react-dom/client'
import { BrowserRouter as Router } from 'react-router-dom'
import { QueryClient, QueryClientProvider } from '@tanstack/react-query'
import { ReactQueryDevtools } from '@tanstack/react-query-devtools'
const queryClient = new QueryClient()
import App from './App.tsx'
import { DashboardProvider } from './context/Dashboard.context'
import { Toaster } from '@/components/ui/toaster'
import './index.css'
ReactDOM.createRoot(document.getElementById('root')!).render(
<React.StrictMode>
<Router>
<QueryClientProvider client={queryClient}>
<DashboardProvider>
<App />
<Toaster />
</DashboardProvider>
<ReactQueryDevtools initialIsOpen={false} />
</QueryClientProvider>
</Router>
</React.StrictMode>
)
```
Here is my code for useQuery
```
const { isFetching, data } = useQuery({
queryKey: ['invoiceList', page, limit],
queryFn: async () => {
const { data } = await apiRequest.get('/client/invoiceList', {
params: { page, limit },
})
return data
},
})
```
Here is my code for useMutation & invalidateQueries
```
const { invalidateQueries } = useQueryClient()
const { mutate, isPending } = useMutation({
mutationFn: async () => {
return await apiRequest.delete('/client/deleteInvoice', {
params: { id: invoice?.id },
})
},
onSuccess: () => {
toast({
variant: 'success',
title: 'Invoice deleted successfully',
description: `Invoice ${invoice?.reference} has been deleted successfully`,
})
invalidateQueries({ queryKey: ['invoiceList'] })
},
onError: (error) => {
console.log(error?.message)
handleError(error, toast)
},
})
```
Here is the complete error -
```
TypeError: Cannot read from private field
at __accessCheck (chunk-TIUEEL27.js?v=ae984db9:41:11)
at __privateGet (chunk-TIUEEL27.js?v=ae984db9:44:3)
at chunk-PF574KJM.js?v=ae984db9:1620:7
at Object.batch (chunk-PF574KJM.js?v=ae984db9:499:16)
at invalidateQueries (chunk-PF574KJM.js?v=ae984db9:1619:26)
at onClick (index.tsx:81:15)
at HTMLUnknownElement.callCallback2 (chunk-EL3PUDHE.js?v=ae984db9:3674:22)
at Object.invokeGuardedCallbackDev (chunk-EL3PUDHE.js?v=ae984db9:3699:24)
at invokeGuardedCallback (chunk-EL3PUDHE.js?v=ae984db9:3733:39)
at invokeGuardedCallbackAndCatchFirstError (chunk-EL3PUDHE.js?v=ae984db9:3736:33)
```
Initially, I thought this error was occurring for the version. So I update the version from 5.14.2 to ^5.26.3. But the problem remains unsolved. |
WSO2 MI 4.2.0
I am using Wso2 micro integrator version 4.2.0. and a rollover policy based on a time period for the log files. I'm trying to delete old rollover files with more than 58 days, meaning that I want to keep ~58 days of logs with the following configuration (as recommended on https://apim.docs.wso2.com/en/latest/administer/logging-and-monitoring/logging/managing-log-growth/):
`appender.CARBON_LOGFILE.strategy.action.type = Delete
appender.CARBON_LOGFILE.strategy.action.basepath = ${sys:carbon.home}/repository/logs/
appender.CARBON_LOGFILE.strategy.action.maxdepth = 1
appender.CARBON_LOGFILE.strategy.action.condition.type = IfLastModified
appender.CARBON_LOGFILE.strategy.action.condition.age = 58D
appender.CARBON_LOGFILE.strategy.action.PathConditions.type = IfFileName
appender.CARBON_LOGFILE.strategy.action.PathConditions.glob = wso2carbon-`
Here is an image of all the configurations I have in log4j.properties for the carbon_logfile:
[log4j config](https://i.stack.imgur.com/Q18l6.png)
But the configuration seams to not be having any effect on wso2carbon log rotation or restart of the service.
The service has 60 files with the pattern wso2carbon-* and increasing each day.
[files](https://i.stack.imgur.com/KyuY5.png)
Those anyone come accross any simular issue?
Is there something wrong in the configuration for the delete action to be applied?
Get old log files to be deleted by the log4j wso2 mi policie. |
I had the same problem. The thing with gc.collect() and cuda.empty_cache() is that these methods don't remove the model from your GPU they just clean the cache.
So you need to delete your model from Cuda memory after each trial and probably clean the cache as well, without doing this every trial a new model will remain on your Cuda device.
So I did these steps at the end of each trial:
<!-- begin snippet: js hide: false console: true babel: false -->
<!-- language: py -->
del model
torch.cuda.empty_cache()
gc.collect
<!-- end snippet -->
|
I tried installing Superset by following the steps provided in https://artifacthub.io/packages/helm/superset/superset.
Postgresql and redis managed to install correctly.
When i am running `helm install my-superset superset/superset --version 0.12.6` it takes a lot of time and returns
```
Error: INSTALLATION FAILED: failed post-install: 1 error occurred:
* timed out waiting for the condition
```
and some pods are in error status
```
C:\Users\Comarch>kubectl get pods
NAME READY STATUS RESTARTS AGE
my-redis-master-0 1/1 Running 0 11m
my-redis-replicas-0 1/1 Running 0 11m
my-postgresql-0 1/1 Running 0 11m
my-redis-replicas-1 1/1 Running 0 11m
my-redis-replicas-2 1/1 Running 0 10m
my-superset-postgresql-0 1/1 Running 0 7m4s
my-superset-init-db-mhqpn 0/1 Error 0 7m4s
my-superset-redis-master-0 1/1 Running 0 7m4s
my-superset-init-db-s8g6z 0/1 Error 0 6m34s
my-superset-init-db-6kngd 0/1 Error 0 6m8s
my-superset-init-db-tjvtf 0/1 Error 0 5m23s
my-superset-init-db-27x6z 0/1 Error 0 3m57s
my-superset-init-db-tndb6 0/1 Error 0 72s
my-superset-7449574d54-fvlgf 0/1 Running 1 (63s ago) 7m4s
my-superset-worker-85b4c79494-fqg8c 0/1 CrashLoopBackOff 6 (14s ago) 7m4s
```
Logs from k3s
```
I0226 14:08:35.678505 1224 scope.go:117] "RemoveContainer" containerID="e364b25e0b28f773a7bcd14d8f3092ddb9663b9a80e37f82b1a52551e74feba4"
E0226 14:08:35.680011 1224 pod_workers.go:1298] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"superset\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=superset pod=my-superset-worker-85b4c79494-fqg8c_default(102794ea-195f-414a-a218-ec474173736c)\"" pod="default/my-superset-worker-85b4c79494-fqg8c" podUID="102794ea-195f-414a-a218-ec474173736c"
```
Logs from docker
```
time="2024-02-26T14:09:32.963550813Z" level=error msg="collecting stats for 41bce38aca76aea4d7f668ca38619991711f7210030e187799f424a5ed46d697: no metrics received"
time="2024-02-26T14:09:33.218075523Z" level=error msg="loading cgroup for 106958" error="cgroups: cannot find cgroup mount destination"
```
There are no errors in the logs of individual pods. There is only a warning about SECRET_KEY
```
--------------------------------------------------------------------------------
WARNING
--------------------------------------------------------------------------------
A Default SECRET_KEY was detected, please use superset_config.py to override it.
Use a strong complex alphanumeric string and use a tool to help you generate
a sufficiently random sequence, ex: openssl rand -base64 42
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
Refusing to start due to insecure SECRET_KEY
[2024-02-26 14:16:31 +0000] [634] [INFO] Worker exiting (pid: 634)
Loaded your LOCAL configuration at [/app/pythonpath/superset_config.py]
[2024-02-26 14:16:32 +0000] [8] [ERROR] Worker (pid:634) exited with code 1
[2024-02-26 14:16:32 +0000] [8] [ERROR] Worker (pid:634) exited with code 1.
[2024-02-26 14:16:32 +0000] [659] [INFO] Booting worker with pid: 659
```
```
Redis version: 18.16.1
PostgreSQL version: 14.2.3
Helm version: 3.14.1
```
I tried installing on different versions of redis and postgresql and with my_values.yaml. The result is always the same. I will add that I am not familiar with Kubernetes.
I will be grateful for any help. |
|postgresql|query-optimization| |
I am a investigating the decorator pattern in kotlin and would like to encrypt/decrypt data class fields with a cryptography decorator.
one thing i do not understand is that the decorator approach requires an instance of the class being decorated. which means the data class fields i wish to encrypt will already containing plaintext before the decorator has an opportunity to encrypt any values.
i cannot find any examples of how to implement encryption/decryption using the decorator pattern related to class constructor fields. is it possible to achieve my desired implementation.
i am attempting to create this cryptography decorator to allow me to persist plaintext data as encrypted strings in my current android application room local database.
i have achieved the desired result using a room type converter for my custom data type field as shown below, however i would also like an alternative approach of employing a decorator class combined with kotlin delegation.
`class EncryptedStringConverter : KoinComponent {
private val cryptographer: Cryptographer by inject()
@TypeConverter
fun fromEncryptedStringToString(encryptedString: EncryptedString): String = encrypt(encryptedString.plaintText)
@TypeConverter
fun fromPlaintextToEncryptedString(plaintext: String): EncryptedString = decrypt(plaintext)
private fun encrypt(plaintext: String): String = cryptographer.conceal(plainText = plaintext)
private fun decrypt(encryptedString: String): EncryptedString = EncryptedString(cryptographer.reveal(encryptedString))
}
`
`
data class EncryptedString(val plaintText: String)
`
i have achieved the desired result using a room type converter for my custom data type field as shown below, however i would also like an alternative approach of employing a decorator class combined with kotlin delegation.
`class EncryptedStringConverter : KoinComponent {
private val cryptographer: Cryptographer by inject()
@TypeConverter
fun fromEncryptedStringToString(encryptedString: EncryptedString): String = encrypt(encryptedString.plaintText)
@TypeConverter
fun fromPlaintextToEncryptedString(plaintext: String): EncryptedString = decrypt(plaintext)
private fun encrypt(plaintext: String): String = cryptographer.conceal(plainText = plaintext)
private fun decrypt(encryptedString: String): EncryptedString = EncryptedString(cryptographer.reveal(encryptedString))
}
`
`
data class EncryptedString(val plaintText: String)
`
@Entity(
tableName = "user_table"
)
data class UserDO(
var firstName: EncryptedString,
var lastName: EncryptedString,
var email: EncryptedString,
var companyName: EncryptedString,
) |
Put desired color to fill transparent areas of an image in CSS |
|javascript|html|css| |
Is there any way to setup a type checker to validate that SQL queries are using the right columns from the right tables and the right dialect? |
Is there anyway to setup VSCode to check that SQL queries are valid |
|postgresql|sqlite|visual-studio-code| |
Because the audio source is not loaded directly on the audio element, and rather on the children nodes called `source`, you can't access it via `document.querySelector('audio').src`.
The right way to do it is to get the child element and access the source from there, but since Puppeteer is a middle man between Node and the DOM, you have to use eval to get the child element, and using Vanilla JavaScript, you can get the same effect by using what fiddlingaway suggested:
document.querySelectorAll("audio > source[type='audio/ogg']")[0].src |
Thought this might be helpful for other, While adding signing ssh key in git hub
In the upper-right corner of any page, click your profile photo,
then click **Settings --> SSH and PGP Keys --> new ssh key**
Select the type of key as **signing**
there are **authentication** and **signing** options |
# Problem
The real problem is the Xcode version does not match up with your cocoapods version. The command could be pod init or pod install, you'll face with the same error as the original question shared.
# Solutions
I've faced this issue multiple times, so I'll be sharing all the different scenarios that I've faced in the past couple of years.
## First
Open the .xcodeproj. Navigate to **Project Navigator > Click the root project, check the right-hand panel > Find Identity and type**.
Over there, change the project format to any version lower than the latest version.
## Second
You need to first run the:
gem update xcodeproj
If the update operation does not fix your problem, then you have to run:
gem uninstall xcodeproj
and then:
gem install xcodeproj
## Third
I've another Mac machine that I didn't update anything on the terminal for a long time. In that machine, the solution was different than the solutions listed above. I've tried all of them but didn't work for me. My only solution was the uninstall and reinstall the cocoapods itself. First run:
sudo gem uninstall cocoapods
then run:
sudo gem install cocoapods
I hope this answer will collect all the possible solutions for the case. If you've experienced an alternative scenario please let me know in the comments below.
|
Because the audio source is not loaded directly on the audio element, and rather on the children nodes called `source`, you can't access it via `document.querySelector('audio').src`.
The right way to do it is to get the child element and access the source from there, but since Puppeteer is a middle man between Node and the DOM, you have to use `eval` to get the child element, and using Vanilla JavaScript, you can get the same effect by using what fiddlingaway suggested:
document.querySelectorAll("audio > source[type='audio/ogg']")[0].src |
try to use " import { http } from 'msw' " instead
https://mswjs.io/docs/migrations/1.x-to-2.x/ |
The newest MSW version has deprecated rest in favor of http.
The solution then should be to update your import to:
import { http } from 'msw';
From there you can follow the migration guides at this link: https://mswjs.io/docs/migrations/1.x-to-2.x/ to ensure you're properly setting up your mocks. |
The ggplot function expand_limits(x = 0) produces very pleasing spacing for many graphs (in a loop) but I'd rather not have the zero label. Does anyone know how to remove the zero label only ? The labels also need to be integer so I've tried to modify Joshua Cook's function:
integer_breaks <- function(n = 5, ...) {
fxn <- function(x) {
breaks <- floor(pretty(x, n, ...))
names(breaks) <- attr(breaks, "labels")
breaks
}
return(fxn)
}
to no avail. A minimal example is:
xxx <- c(1, 2, 4, 1, 1, 4, 2, 4, 1, 1, 3, 3, 4 )
yyy <- c(11, 22, 64, 45, 76, 47, 23, 44, 65, 86, 87, 83, 56 )
data <- data.frame(xxx, yyy)
p <- ggplot(data = data, aes(x = data[ , 1], y = data[ , 2]), group = data[ , 1]) + geom_count(color = "blue") + expand_limits(x = 0) + scale_x_continuous(breaks = integer_breaks())
p
I don't want the zero on the x-axis but I don't know in advance how many other x-axis values there will be. An alternative would be to produce a space on the left-hand side of the graph in a similar way to that given by expand limits - but the width of this would have to relate to the number of x-axis values.
|
R ggplot2: Is it possible to remove the zero label after using expand_limits(x = 0)? |
|ggplot2|integer|zero|x-axis| |
I press the send message key, everything works normally, I press the stop key, again normal, but app.stop does not stop, I get an error when sending a message again
Code;
```
from customtkinter import *
import customtkinter as ctk
from threading import Thread
import time, os, asyncio
from pyrogram import Client
from pyrogram.enums import MessageMediaType
run = True
path = os.getcwd()
app = Client(path + "\\users\\" + "user")
async def tg_bot():
global run
global app # Reference the global app variable
s = 0
await app.start()
async for ii in app.get_chat_history(-1002070231241):
if ii.media == MessageMediaType.VIDEO:
if run:
s = s + 1
await app.copy_message(-1002142940185, -1002070231241, ii.id, caption="")
print(s, "sended.")
await asyncio.sleep(1)
await app.stop()
def start():
global run
run = True
def don(dongu, _e):
try:
dongu.run_until_complete(tg_bot())
finally:
dongu.close()
loop = asyncio.get_event_loop()
event = asyncio.Event()
oto = Thread(target=don, args=(loop, event))
oto.daemon = True
oto.start()
def stop():
global run
run = False
global app
app.stop
root = ctk.CTk()
root.geometry("300x300")
basla = CTkButton(root, text="message send", command=start)
basla.place(x=75, y=100)
dur = CTkButton(root, text="stop", command=stop)
dur.place(x=75, y=200)
root.mainloop()
```
Video;
https://drive.google.com/file/d/1tTuNKZ_WR9KK7WOjXbdzP3-M_TC4l6yk/view?usp=drive_link |
TypeError: Cannot read from private field in Tanstack React Query |
|javascript|reactjs|react-query|tanstackreact-query| |
The problem. I had the button click inside a form tag on my page and the button type was not set so it defaulted to submit. Everytime it was clicked it tried posting and performing my async web call followed by page navigation. It then became confused and I got intermittent results.
<button class="btn btn-primary mt-3" @onclick="DoAsync" type="button">Click me</button>
|
In regex, you have to escape a slash with a backslash (`/` -> `\/`)
So in your case:
```
\/log[^\/].*
^ ^
added backslashes
```
[link to regex101.com](https://regex101.com/r/tru7sf/1) |
I haven't had time to check it on my code. But from my analysis it is only running random image name once and the function keeps running because of the server running only called the function once (singleton). I think you can refactor and create helper function for getting the randomImgName in different file like
randomImgNameGenerator.js
```
function randomImgNameGenerator(){
const randomImageName = (bytes = 32) => crypto.randomBytes(bytes).toString('hex')
return randomImageName
}
```
And call it inside callback in multer. see will it helps |
I am working with SpringBatch and JPARepository.
I have 2 Connections DB2 & Postgres.
I need to switch the DataSources inside a JpaRepository
So, to hold the DataSource Context, I followed the link : [AbstractRoutingDataSource][1]
So here below is the code
public class ClientDatabaseContextHolder {
private static ThreadLocal<ClientDatabase> CONTEXT
= new ThreadLocal<>();
public static void set(ClientDatabase clientDatabase) {
Assert.notNull(clientDatabase, "clientDatabase cannot be null");
CONTEXT.set(clientDatabase);
}
public static ClientDatabase getClientDatabase() {
return CONTEXT.get();
}
public static void clear() {
CONTEXT.remove();
}
}
Then, In my RepositoryItemReader I want to set to My 1st DataSource Client
@Component
public class ClientReader extends RepositoryItemReader<Client> {
public ClientReader(final ClientModelsRepository clientRepository) {
DataSourceContextHolder.set("client"); // <===== SET HERE MY 1st Datasource
this.setRepository(clientRepository);
this.setMethodName("findAllWithFriends");
this.setSort(new HashMap<>());
this.setPageSize(5000);
}
}
Then In my JPARepository, I would like to change the DataSource to the 2nd.
@Transactional(Transactional.TxType.NOT_SUPPORTED)
default Page<Client> findAllWithFriends(final Pageable pageable) {
final Page<Client> clients = this.findAll(pageable);
if (clients.hasContent()) {
DataSourceContextHolder.set("friends"); // <== I WANT TO CHANGE THE DATASOURCE THERE
final List<Friend> friends = AppContext.getBean(FriendRepository.class).findByClientCodeIn(clientStringList);
clients.getContent().forEach(c -> { c.setFriends(friends); });
}
return clients;
}
My question is how to change of datasource when reading a chunck. I dont think it is possible to set 2 different datasources and methods in the ClientReader. I have also tried to set the 2nd datasource inside the FriendRepository.class but it doesn't work too. I don't want to do it in the Processor because I want to get all friends matching for the 5000 Client codes. Do you have any ideas ?
[1]: https://www.baeldung.com/spring-abstract-routing-data-source |
I have a Next.js website where I've created a component named TestMe.tsx. This component uses Next.js Link and Image components, and my project is using Next.js version 14.1.0 . I want to generate a build of this component so that I can reuse it in other Next.js projects as an Npm package.
I prefer to configure the next.config.mjs file to achieve this. How can I set up my next.config.mjs file to build TestMe.tsx as a standalone package that can be published on npm and easily reused in other Next.js projects?
I tried the following configuration but it didn't work:
const nextConfig = {
webpack(config, { isServer }) {
if (!isServer) {
config.externals.push("/src/components/TestMe/index.tsx");
config.output.library = "TestMe";
config.output.libraryTarget = "umd";
config.output.publicPath = "auto";
}
return config;
},
experimental: {
externalDir: true,
},
};
export default nextConfig;
please help me! thanks. |
create reusable package from component in next.js |
|javascript|reactjs|typescript|webpack|next.js| |
i had both of these conditions inside an if statement to check if x was even or odd, but the !(x&1) seemed to execute the body of the if in case of even x,while x&1==0 didnt execute.
i expected both to give 0 considering 1&0 is 0 and 1 in 32 or 64 bit representations will be 000..01 an if say x is something like 10010101100(even), then their bit-wise and should yield 0.Hence, i'm still not sure why !(x&1) works. Please correct me if i am wrong in anywhere.
Thank you. |
why do x&1==0 and !(x&1) not yield the same results in an if statement in c++? |
|c++17| |
null |
So I have a navbar, and in this navbar I have the logo, the options and then the button. Whenever I apply padding to the button it pushes everything else to the left? Any advice cause I have never had this happen before. This happens with other stuff as well, so if I give the logo any padding it also pushes the items around. Why is this happening because I watched some tutorials just to be sure and I have no clue what I am doing wrong. Also just wanted to add that align-items: center also isn't working.
<!-- begin snippet: js hide: false console: true babel: false -->
<!-- language: lang-css -->
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
html {
font-size: 10px;
}
.container {
height: auto;
width: 100%;
/* overflow: hidden; */
}
.section1 {
background: rgb(180, 135, 238);
background: linear-gradient(90deg, rgba(180, 135, 238, 1) 0%, rgba(148, 187, 233, 1) 100%);
}
.logo {
font-size: 4rem;
font-family: "Protest Revolution", sans-serif;
font-weight: 400;
font-style: normal;
color: white;
margin-bottom: 10px;
margin-left: -10px;
}
.navbar {
width: 100%;
font-family: 'Montserrat', sans-serif;
background: rgb(180, 135, 238);
background: linear-gradient(90deg, rgba(180, 135, 238, 1) 0%, rgba(148, 187, 233, 1) 100%);
display: flex;
justify-content: space-between;
align-items: center;
padding: 25px 40px;
position: sticky;
top: 0;
z-index: 99999;
}
.nav-buttons li {
font-weight: 500;
font-size: 1.7rem;
display: inline-block;
padding: 20px;
list-style: none;
}
.nav-buttons li a {
color: white;
text-decoration: none;
}
.navbar .sign-up {
color: white;
font-size: 1.5rem;
text-decoration: none;
border: 2px solid white;
border-radius: 5px;
padding: 8px 50px 8px 50px;
}
<!-- language: lang-html -->
<div class="container">
<section class=section1>
<div class="navbar">
<label class="logo">appy</label>
<ul class="nav-buttons">
<li><a href="#">Home</a></li>
<li><a href="#">About</a></li>
<li><a href="#">Pricing</a></li>
<li><a href="#">Features</a></li>
<li><a href="#">FAQ</a></li>
</ul>
<a href="#" class="sign-up">SIGN UP</a>
</div>
<!-- end snippet -->
|
How to implement encryption for kotlin data class fields using decorator pattern |
|kotlin|decorator| |
null |
I'm very new to jboss.
I want to extract jboss metrics from my application using jmx_exporter. I'm working with version 5.2.0 of jboss and centos 7.
the available docs are using jboss 7 so I didn't find any help in the doc or the examples in internet.
I want just to extract some specefic jboss metrics and not all of them.
Here is an exemple of what I did:
```
---
lowercaseOutputName: true
lowercaseOutputLabelNames: true
whitelistObjectNames:
- "jboss.jca:*"
rules:
- pattern: 'jboss.jca<name=(.*),type=null,attribute=ConnectionCount>'
name: jboss_jca_managedconnectionpool_connectioncount
help: JBoss JCA ManagedConnectionPool Connection Count
labels:
name: $1
type: GAUGE
```
But it is not giving a result
someone can help please? |
extracting jboss metrics using jmx_exporter and jboss 5.2.0 version |
|jboss|jmx-exporter| |
null |
I am trying to generate dynamically a sidebar using the template coreui with angular 17
The error ExpressionChangedAfterItHasBeenCheckedError
```
ERROR Error: NG0100: ExpressionChangedAfterItHasBeenCheckedError:
Expression has changed after it was checked. Previous value: 'undefined'. Current value: 'disabled'. Expression location: \_SidebarNavLinkComponent component It seems like the view has been created after its parent and its children have been dirty checked. Has it been created in a change detection hook? Find more at https://angular.io/errors/NG0100
at throwErrorIfNoChangesMode (core.mjs:11912:11)
at bindingUpdated (core.mjs:17587:17)
at ɵɵproperty (core.mjs:20396:9)
at SidebarNavLinkComponent_Template (coreui-angular.mjs:13043:14)
at executeTemplate (core.mjs:12263:9)
at refreshView (core.mjs:13490:13)
at detectChangesInView (core.mjs:13714:9)
at detectChangesInViewIfAttached (core.mjs:13677:5)
at detectChangesInComponent (core.mjs:13666:5)
at detectChangesInChildComponents (core.mjs:13727:9)
```
I'm trying to update the coreui sidebar dyanamically.
Here is my view layout.component.html
```
<!--sidebar-->
<c-sidebar #sidebar="cSidebar"
class="d-print-none sidebar sidebar-fixed"
id="sidebar"
visible>
<c-sidebar-brand [brandFull]="{
src: 'assets/img/brand/Success-logo.svg',
width: 200,
height: 46,
alt: 'Success sarl Logo'
}"
[brandNarrow]="{
src: 'assets/img/brand/coreui-signet-white.svg',
width: 46,
height: 46,
alt: 'Success sarl Logo'
}"
routerLink="./" />
<ng-scrollbar pointerEventsMethod="scrollbar">
<c-sidebar-nav [navItems]="navItems"
dropdownMode="close" />
</ng-scrollbar>
<c-sidebar-toggler *ngIf="!sidebar.narrow"
toggle="unfoldable"
cSidebarToggle="sidebar" />
</c-sidebar>
<!--main-->
<div class="wrapper d-flex flex-column min-vh-100 bg-light dark:bg-transparent">
<!--app-header-->
<app-default-header class="mb-4 d-print-none header header-sticky" position="sticky" sidebarId="sidebar" />
<!--app-body-->
<div class="body flex-grow-1 px-3">
<c-container breakpoint="lg" class="h-auto">
<router-outlet />
</c-container>
</div>
<!--app footer-->
<app-default-footer />
</div>
```
Here is my layout.component.ts
```
export class UserLayoutComponent implements OnInit {
public navItem?: Observable<any>;
public navItems: any;
public userAuthenticated = false;
public showView: any;
constructor(private _authService: AuthService,
private _repository: RepositoryService,
private route: ActivatedRoute,
private _router: Router,
private sideBarGenerateService: SideBarGenerateService,
) {
this._authService.loginChanged.subscribe(userAuthenticated => {
this.userAuthenticated = userAuthenticated;
})
}
ngOnInit(): void {
this._authService.isAuthenticated().then(userAuthenticated => {
this.userAuthenticated = userAuthenticated;
});
this._authService.checkUserClaims(Constants.adminClaims).then( res => {
if (!res) {
let user = this._authService.getUserInfo();
this._repository.getData(`api/v1/UserHasContrat/users/${this.route.snapshot.queryParams['filter']}`).subscribe((response: ApiHttpResponse) => {
if (!(response.body?.every((elem: any) => (user.profile.sub).includes(elem?.userId)))) {
this._router.navigate(['/dashboard']);
}
});
}
});
setTimeout(() => {
this.navItems = this.sideBarGenerateService.getSideBar();
},0)
}
getSideBar = () => {
//Get claims of user
this.userClaims = this._authService.getUserInfo()['profile']['role'];
return navItems.filter(this.isToBeDisplayed);
}
//Filter items to display
isToBeDisplayed = (value: any) => {
if (value.hasOwnProperty('children') && value.children.length > 0) {
value.children = value.children.filter((currentValue: any) => {
if (currentValue?.attributes) {
if ((currentValue.attributes['role'].every((elem: string) => this.userClaims.includes(elem)))) {
return currentValue;
}
}
})
return value;
}
else {
return value;
}
}
}
```
Please help me check out this code. Can't have a result without ExpressionChangedAfterItHasBeenCheckedError.
|
Angular 17 user-layout.component.ts:70 ERROR Error: NG0100 |