instruction
stringlengths
0
30k
I am experiencing the following problem. When I setup an ADC with DMA for 5 channels, I get lower readings than the expected ones. We have PCBs of the same batch in 3 countries, but only the ones tested in China are failing. At all times the ADC readings read lower than the expected values for all the ADC channels, but the PCBs at the other countries return the expected reading in all channels. All those PCBs are identical and they are driving small motors, have a keypad, have some LEDs UI, and a UART debugging port were we connect a USB to TTL UART cable. Here is the ADC configuration with DMA (source code attached too): `[CubeMX](https://i.stack.imgur.com/P1fOj.png)` `[CubeMX](https://i.stack.imgur.com/CxRQa.png)` `[schematic](https://i.stack.imgur.com/z33wz.png)` `[schematic](https://i.stack.imgur.com/MyD57.png)` `[schematic](https://i.stack.imgur.com/RaEIj.png)` We have measured the reference voltage and all the expected voltages on ADC input pins (some are fixed and known) and all looking good too. On the other hand, when I sample only one ADC channel in polling mode, I do not get this problem, the voltages read as expected. For example: the readings of a healthy ADC reading at 10-bit resolution should read something like 914 raw value which will correspond to 3652mV for the battery voltage, as opposed to the lower reading which will be something like 837 raw value which corresponds to 3346mV for the battery voltage. ```c /* USER CODE BEGIN Header */ /** ****************************************************************************** * @file adc.c * @brief This file provides code for the configuration * of the ADC instances. ****************************************************************************** * @attention * * Copyright (c) 2023 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ /* USER CODE END Header */ /* Includes ------------------------------------------------------------------*/ #include "adc.h" /* USER CODE BEGIN 0 */ /* USER CODE END 0 */ ADC_HandleTypeDef hadc1; DMA_HandleTypeDef hdma_adc1; /* ADC1 init function */ void MX_ADC1_Init(void) { /* USER CODE BEGIN ADC1_Init 0 */ /* USER CODE END ADC1_Init 0 */ ADC_ChannelConfTypeDef sConfig = {0}; /* USER CODE BEGIN ADC1_Init 1 */ /* USER CODE END ADC1_Init 1 */ /** Configure the global features of the ADC (Clock, Resolution, Data Alignment and number of conversion) */ hadc1.Instance = ADC1; hadc1.Init.ClockPrescaler = ADC_CLOCK_ASYNC_DIV10; hadc1.Init.Resolution = ADC_RESOLUTION_10B; hadc1.Init.DataAlign = ADC_DATAALIGN_RIGHT; hadc1.Init.ScanConvMode = ADC_SCAN_ENABLE; hadc1.Init.EOCSelection = ADC_EOC_SEQ_CONV; hadc1.Init.LowPowerAutoWait = ENABLE; hadc1.Init.LowPowerAutoPowerOff = ENABLE; hadc1.Init.ContinuousConvMode = ENABLE; hadc1.Init.NbrOfConversion = 5; hadc1.Init.DiscontinuousConvMode = DISABLE; hadc1.Init.ExternalTrigConv = ADC_SOFTWARE_START; hadc1.Init.ExternalTrigConvEdge = ADC_EXTERNALTRIGCONVEDGE_NONE; hadc1.Init.DMAContinuousRequests = ENABLE; hadc1.Init.Overrun = ADC_OVR_DATA_PRESERVED; hadc1.Init.SamplingTimeCommon1 = ADC_SAMPLETIME_160CYCLES_5; hadc1.Init.SamplingTimeCommon2 = ADC_SAMPLETIME_160CYCLES_5; hadc1.Init.OversamplingMode = DISABLE; hadc1.Init.TriggerFrequencyMode = ADC_TRIGGER_FREQ_LOW; if (HAL_ADC_Init(&hadc1) != HAL_OK) { Error_Handler(); } /** Configure Regular Channel */ sConfig.Channel = ADC_CHANNEL_0; sConfig.Rank = ADC_REGULAR_RANK_1; sConfig.SamplingTime = ADC_SAMPLINGTIME_COMMON_1; if (HAL_ADC_ConfigChannel(&hadc1, &sConfig) != HAL_OK) { Error_Handler(); } /** Configure Regular Channel */ sConfig.Channel = ADC_CHANNEL_1; sConfig.Rank = ADC_REGULAR_RANK_2; if (HAL_ADC_ConfigChannel(&hadc1, &sConfig) != HAL_OK) { Error_Handler(); } /** Configure Regular Channel */ sConfig.Channel = ADC_CHANNEL_2; sConfig.Rank = ADC_REGULAR_RANK_3; if (HAL_ADC_ConfigChannel(&hadc1, &sConfig) != HAL_OK) { Error_Handler(); } /** Configure Regular Channel */ sConfig.Channel = ADC_CHANNEL_3; sConfig.Rank = ADC_REGULAR_RANK_4; if (HAL_ADC_ConfigChannel(&hadc1, &sConfig) != HAL_OK) { Error_Handler(); } /** Configure Regular Channel */ sConfig.Channel = ADC_CHANNEL_TEMPSENSOR; sConfig.Rank = ADC_REGULAR_RANK_5; if (HAL_ADC_ConfigChannel(&hadc1, &sConfig) != HAL_OK) { Error_Handler(); } /* USER CODE BEGIN ADC1_Init 2 */ /* USER CODE END ADC1_Init 2 */ } void HAL_ADC_MspInit(ADC_HandleTypeDef* adcHandle) { GPIO_InitTypeDef GPIO_InitStruct = {0}; RCC_PeriphCLKInitTypeDef PeriphClkInit = {0}; if(adcHandle->Instance==ADC1) { /* USER CODE BEGIN ADC1_MspInit 0 */ /* USER CODE END ADC1_MspInit 0 */ /** Initializes the peripherals clocks */ PeriphClkInit.PeriphClockSelection = RCC_PERIPHCLK_ADC; PeriphClkInit.AdcClockSelection = RCC_ADCCLKSOURCE_SYSCLK; if (HAL_RCCEx_PeriphCLKConfig(&PeriphClkInit) != HAL_OK) { Error_Handler(); } /* ADC1 clock enable */ __HAL_RCC_ADC_CLK_ENABLE(); __HAL_RCC_GPIOA_CLK_ENABLE(); /**ADC1 GPIO Configuration PA0 ------> ADC1_IN0 PA1 ------> ADC1_IN1 PA2 ------> ADC1_IN2 PA3 ------> ADC1_IN3 */ GPIO_InitStruct.Pin = V_BAT_IN_Pin|I_BAT_IN_Pin|VARIANT_ID_Pin|BOARD_ID_Pin; GPIO_InitStruct.Mode = GPIO_MODE_ANALOG; GPIO_InitStruct.Pull = GPIO_NOPULL; HAL_GPIO_Init(GPIOA, &GPIO_InitStruct); /* ADC1 DMA Init */ /* ADC1 Init */ hdma_adc1.Instance = DMA1_Channel1; hdma_adc1.Init.Request = DMA_REQUEST_ADC1; hdma_adc1.Init.Direction = DMA_PERIPH_TO_MEMORY; hdma_adc1.Init.PeriphInc = DMA_PINC_DISABLE; hdma_adc1.Init.MemInc = DMA_MINC_ENABLE; hdma_adc1.Init.PeriphDataAlignment = DMA_PDATAALIGN_HALFWORD; hdma_adc1.Init.MemDataAlignment = DMA_MDATAALIGN_HALFWORD; hdma_adc1.Init.Mode = DMA_CIRCULAR; hdma_adc1.Init.Priority = DMA_PRIORITY_LOW; if (HAL_DMA_Init(&hdma_adc1) != HAL_OK) { Error_Handler(); } __HAL_LINKDMA(adcHandle,DMA_Handle,hdma_adc1); /* ADC1 interrupt Init */ HAL_NVIC_SetPriority(ADC1_IRQn, 0, 0); HAL_NVIC_EnableIRQ(ADC1_IRQn); /* USER CODE BEGIN ADC1_MspInit 1 */ /* USER CODE END ADC1_MspInit 1 */ } } void HAL_ADC_MspDeInit(ADC_HandleTypeDef* adcHandle) { if(adcHandle->Instance==ADC1) { /* USER CODE BEGIN ADC1_MspDeInit 0 */ /* USER CODE END ADC1_MspDeInit 0 */ /* Peripheral clock disable */ __HAL_RCC_ADC_CLK_DISABLE(); /**ADC1 GPIO Configuration PA0 ------> ADC1_IN0 PA1 ------> ADC1_IN1 PA2 ------> ADC1_IN2 PA3 ------> ADC1_IN3 */ HAL_GPIO_DeInit(GPIOA, V_BAT_IN_Pin|I_BAT_IN_Pin|VARIANT_ID_Pin|BOARD_ID_Pin); /* ADC1 DMA DeInit */ HAL_DMA_DeInit(adcHandle->DMA_Handle); /* ADC1 interrupt Deinit */ HAL_NVIC_DisableIRQ(ADC1_IRQn); /* USER CODE BEGIN ADC1_MspDeInit 1 */ /* USER CODE END ADC1_MspDeInit 1 */ } } /* USER CODE BEGIN 1 */ /* USER CODE END 1 */ /* USER CODE BEGIN Header */ /** ****************************************************************************** * @file dma.c * @brief This file provides code for the configuration * of all the requested memory to memory DMA transfers. ****************************************************************************** * @attention * * Copyright (c) 2024 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ /* USER CODE END Header */ /* Includes ------------------------------------------------------------------*/ #include "dma.h" /* USER CODE BEGIN 0 */ /* USER CODE END 0 */ /*----------------------------------------------------------------------------*/ /* Configure DMA */ /*----------------------------------------------------------------------------*/ /* USER CODE BEGIN 1 */ /* USER CODE END 1 */ /** * Enable DMA controller clock */ void MX_DMA_Init(void) { /* DMA controller clock enable */ __HAL_RCC_DMA1_CLK_ENABLE(); /* DMA interrupt init */ /* DMA1_Channel1_IRQn interrupt configuration */ HAL_NVIC_SetPriority(DMA1_Channel1_IRQn, 0, 0); HAL_NVIC_EnableIRQ(DMA1_Channel1_IRQn); } /* USER CODE BEGIN 2 */ /* USER CODE END 2 */ /* USER CODE BEGIN Header */ /** ****************************************************************************** * @file BxADC.c * @brief Analogue interface with HAL and data conversions * ****************************************************************************** * @attention * * Copyright Statement: * Copyright (c) Bboxx Ltd 2023 * The copyright in this document, which contains information of a proprietary and confidential nature, * is vested in Bboxx Limited. The content of this document may not be used for purposes other * than that for which it has been supplied and may not be reproduced, either wholly or in part, * nor may it be used by, or its contents divulged to, any other person who so ever without written permission * of Bboxx Limited. * ****************************************************************************** */ /* USER CODE END Header */ //---------------------------------------------------------------------------------------------------------------------- // Includes //---------------------------------------------------------------------------------------------------------------------- #include <stdbool.h> #include "adc.h" #include "stm32g0xx_hal_adc.h" #include "main.h" #include "Bboxx.h" #include "BxMotor.h" #include "BxADC.h" #include "BxMessage.h" #include "BxSerial.h" //---------------------------------------------------------------------------------------------------------------------- // Defines //---------------------------------------------------------------------------------------------------------------------- typedef enum { V_BAT, I_BAT, VARIANT_ID, BOARD_ID, TEMPERATURE, NUM_OF_ADC_CH } ADC_CHS; //---------------------------------------------------------------------------------------------------------------------- // Variables //---------------------------------------------------------------------------------------------------------------------- static uint16_t ADCreadings[NUM_OF_ADC_CH]; static uint16_t ADCoffset; static uint16_t ADCref = ADCREF; static uint16_t ADCpsuConv = ((ADCREF*(ADCPSUR1+ADCPSUR2))/(ADCMAX*ADCPSUR1)) * RESULTMAG; static uint16_t ADCiConv = (ADCREF/(ADCIGAIN*ADCISENSER)); extern ADC_HandleTypeDef hadc1; //---------------------------------------------------------------------------------------------------------------------- // Functions //---------------------------------------------------------------------------------------------------------------------- void ADC_Initialise(void) /** * @brief Initialises all ADC * @param None * @retval None */ { HAL_GPIO_WritePin(En3v3A_GPIO_Port, En3v3A_Pin, GPIO_PIN_RESET); // Turn On current amp power HAL_GPIO_WritePin(En3v3B_GPIO_Port, En3v3B_Pin, GPIO_PIN_RESET); // Turn On IDs (+Motor) power HAL_ADC_Start_DMA(&hadc1, (uint16_t*)ADCreadings, sizeof(ADCreadings)/sizeof(ADCreadings[V_BAT])); // start ADC conversion return; } void ADC_Calibrate(void) /** * @brief Calibrates ADC * @param None * @retval None */ { HAL_Delay(2000); static bool firstTime = true; // Run once while (firstTime) { // Calibrate to a known voltage such as the battery uint16_t volt = ADC_Get_Voltage(); if (volt > (uint16_t)3750u) { firstTime = false; // Stop if battery voltage is reached ADCoffset = ADCreadings[I_BAT]; // Save the run mode current offset Set_String_Tx_Buffer("calibration done "); Message_16_Bit_Number(volt); Set_String_Tx_Buffer(" mV and "); Message_16_Bit_Number(ADCref); Set_String_Tx_Buffer(" mV"); Set_String_Tx_Buffer(NEWLINE); } else { ADCref++; // Increase gain via adding error to the reference voltage } } } uint16_t ADC_Remove_Offset(uint16_t RawValue) /** * @brief Removes the offset from the ADC circuit * @param None * @retval None */ { if(RawValue < ADCoffset) RawValue =0; else RawValue -= ADCoffset; return RawValue; } uint16_t ADC_Convert_Voltage(uint16_t RawValue) /** * @brief Converts the Raw ADC data into mM * @param None * @retval None */ { uint32_t Result =0; RawValue =ADC_Remove_Offset(RawValue); ADCpsuConv = ((ADCref*(ADCPSUR1+ADCPSUR2))/(ADCMAX*ADCPSUR1)) * RESULTMAG; Result =(ADCpsuConv * RawValue); // scale result Result =(Result>>ADCVARDIVIDE); return (uint16_t) Result; } uint16_t ADC_Convert_Current(uint16_t RawValue) /** * @brief Converts the Raw ADC data into mA * @param None * @retval None */ { uint32_t Result =0; RawValue =ADC_Remove_Offset(RawValue); ADCiConv = (ADCref/(ADCIGAIN*ADCISENSER)); Result = (ADCiConv * RawValue); Result =(Result>>ADCVARDIVIDE); return (uint16_t) Result; } uint16_t ADC_Convert_ID(uint16_t RawValue) /** * @brief Converts the Raw ADC data into An ID voltage 10*units * @param None * @retval None */ { uint16_t Result16 =0; // For Faster calculation uint32_t Result32 =0; // For higher resolution RawValue =ADC_Remove_Offset(RawValue); // Needs 32bit calculation Result32 =(ADCref * RawValue); // scale result Result16 =(uint16_t)(Result32>>ADCVARDIVIDE); // mV Result16 += 50; // add 50mV for rounding up Result16 = Result16/100; // Scale for 10*units return (uint8_t) Result16; } uint16_t ADC_Get_Voltage(void) /** * @brief returns the value of the PSU voltage (mV) * @param None * @retval None */ { return ADC_Convert_Voltage(ADCreadings[V_BAT]); } uint16_t ADC_Get_Current(void) /** * @brief returns the current current value (mA) * @param None * @retval None */ { return ADC_Convert_Current(ADCreadings[I_BAT]); } uint8_t ADC_Get_Variant(void) /** * @brief returns the Variant as defined by R68//R70 in 10*units * @param None * @retval None */ { return ADC_Convert_ID(ADCreadings[VARIANT_ID]); } uint8_t ADC_Get_BoardID(void) /** * @brief returns the ID as defined by R67//R71 in 10*units * @param None * @retval None */ { return ADC_Convert_ID(ADCreadings[BOARD_ID]); } uint16_t ADC_Get_Temperature(void) /** * @brief returns the Temp in 100*C * @param None * @retval None */ { return ADCreadings[TEMPERATURE]; } void ADC_Print_Raw(void) /** * @brief Prints the Raw ADC data in serial terminal * @param None * @retval None */ { Set_String_Tx_Buffer("ADCref== "); Message_16_Bit_Number(ADCref); Set_String_Tx_Buffer(NEWLINE); Set_String_Tx_Buffer("ADCpsuConv== "); Message_16_Bit_Number(ADCpsuConv); Set_String_Tx_Buffer(NEWLINE); Set_String_Tx_Buffer("ADCiConv== "); Message_16_Bit_Number(ADCiConv); Set_String_Tx_Buffer(NEWLINE); Set_String_Tx_Buffer("ADCoffset== "); Message_16_Bit_Number(ADCoffset); Set_String_Tx_Buffer(NEWLINE); Set_String_Tx_Buffer("ADCvoltageRAW== "); Message_16_Bit_Number(ADCreadings[V_BAT]); Set_String_Tx_Buffer("\t\tADCvoltage== "); Message_16_Bit_Number_Formatted(ADC_Get_Voltage(), BareNumber); Set_String_Tx_Buffer("mV"); Set_String_Tx_Buffer(NEWLINE); Set_String_Tx_Buffer("ADCcurrentRAW== "); Message_16_Bit_Number(ADCreadings[I_BAT]); Set_String_Tx_Buffer("\t\tADCcurrentRAW== "); Message_16_Bit_Number_Formatted(ADC_Get_Current(), BareNumber); Set_String_Tx_Buffer("mA"); Set_String_Tx_Buffer(NEWLINE); Set_String_Tx_Buffer("ADCvariantRAW== "); Message_16_Bit_Number(ADCreadings[VARIANT_ID]); Set_String_Tx_Buffer("\t\tADCvariant== "); Message_16_Bit_Number_Formatted(ADC_Get_Variant(), BareNumber); Set_String_Tx_Buffer(NEWLINE); Set_String_Tx_Buffer("ADCboardIDRAW== "); Message_16_Bit_Number(ADCreadings[BOARD_ID]); Set_String_Tx_Buffer("\t\tADCboardID== "); Message_16_Bit_Number_Formatted(ADC_Get_BoardID(), BareNumber); Set_String_Tx_Buffer(NEWLINE); Set_String_Tx_Buffer("Temperature== "); Message_16_Bit_Number_Formatted(ADC_Get_Temperature(), BareNumber); Set_String_Tx_Buffer(NEWLINE); Set_String_Tx_Buffer("En3v3A== "); Message_8_Bit_Number(HAL_GPIO_ReadPin(En3v3A_GPIO_Port, En3v3A_Pin)); Set_String_Tx_Buffer(NEWLINE); Set_String_Tx_Buffer("En3v3B== "); Message_8_Bit_Number(HAL_GPIO_ReadPin(En3v3B_GPIO_Port, En3v3B_Pin)); Set_String_Tx_Buffer(NEWLINE); } void HAL_ADC_ConvCpltCallback(ADC_HandleTypeDef *hadc) { //ADC_Print_Raw(); } //---------------------------------------------------------------------------------------------------------------------- //---------------------------------------------------< end of file >---------------------------------------------------- //---------------------------------------------------------------------------------------------------------------------- ```
STM32 ADC DMA low raw/Voltage readings
|stm32|dma|adc|
null
I want to create a very simple, low load application, and first time decide to use Quarkus framework. May be it sounds strange, but I want to use Sqlite for database, and authentication by an ip address. App will run in secured LAN, users are only trusted peoples, others are blocked in firewall. In database I have a table, which contains mapping users and theirs ip addresses. After reading the docs, I understand that I need to implement my HttpAuthenticationMechanism, that takes user by ip address from that table, and if record in database exists, than return SecurityIdentity (like in io.quarkus.vertx.http.security.HeaderAuthenticator by header). Problem is that Sqlite seems does not support reactive mode. I wrote this implementation: class IpAddressAuthenticationMechanism : HttpAuthenticationMechanism { @Inject lateinit var entityManager: EntityManager override fun authenticate( exchange: RoutingContext?, identityProviderManager: IdentityProviderManager? ): Uni<SecurityIdentity> { val ip = exchange!!.request().remoteAddress().hostAddress() val criteriaBuilder: CriteriaBuilder = entityManager.getCriteriaBuilder() val criteriaQuery = criteriaBuilder.createQuery(UserAuth::class.java) val root: Root<UserAuth> = criteriaQuery.from(UserAuth::class.java) val data = criteriaQuery.select(root).where(criteriaBuilder.equal(root.get<String>("ip"), ip)) return Uni.createFrom().nullItem() } ... } It not finished code, because it fails on `val criteriaBuilder: CriteriaBuilder = entityManager.getCriteriaBuilder()` with exception: io.quarkus.runtime.BlockingOperationNotAllowedException: You have attempted to perform a blocking operation on a IO thread. This is not allowed, as blocking the IO thread will cause major performance issues with your application. If you want to perform blocking EntityManager operations make sure you are doing it from a worker thread. at io.quarkus.hibernate.orm.runtime.session.TransactionScopedSession.checkBlocking(TransactionScopedSession.java:116) at io.quarkus.hibernate.orm.runtime.session.TransactionScopedSession.getCriteriaBuilder(TransactionScopedSession.java:537) at org.hibernate.engine.spi.SessionLazyDelegator.getCriteriaBuilder(SessionLazyDelegator.java:699) at org.hibernate.engine.spi.SessionLazyDelegator.getCriteriaBuilder(SessionLazyDelegator.java:67) at org.hibernate.Session_OpdLahisOZ9nWRPXMsEFQmQU03A_Synthetic_ClientProxy.getCriteriaBuilder(Unknown Source) at org.myapp.services.IpAddressAuthenticationMechanism.authenticate(IpAddressAuthenticationMechanism.kt:33) ... It is possible to query Sqlite database to get users in this place?
BlockingOperationNotAllowedException in HttpAuthenticationMechanism
|quarkus|
This represents an array of pointers to `char`, in other words, strings. The way you recognize this is that the array syntax `[]` is demoted to a pointer, so this is the same as `**char`, a pointer to pointers of `char`s.
I think best would be just to set the `left` and `top` using `%` units. I did the same for the `width` as well. <!-- begin snippet: js hide: false console: true babel: false --> <!-- language: lang-js --> window.addEventListener("load", function() { document.getElementById('map1').onclick = function(event) { const rect = event.target.getBoundingClientRect(); const x = event.clientX - rect.left; const y = event.clientY - rect.top; addMarker(x, y, rect.width, rect.height); } }); function resize() { document.getElementById('map1').style.width = ((Math.random() * 50) + 50) + "%" } function addMarker(x, y, w, h) { var i = new Image(); i.src = 'https://i.postimg.cc/7h8KpVyG/icon-exclamation-triangle-32.png'; i.style.position = "absolute"; i.onload = function() { var yOffset = y; var xOffset = x; console.log() // Apply offsets for CSS margins etc. yOffset -= i.height / 2; xOffset -= i.width / 2; // Set image insertion coordinates // i.style.top = yOffset + 'px'; // i.style.left = xOffset + 'px'; i.style.top = yOffset / h * 100 + '%'; i.style.left = xOffset / w * 100 + '%'; // optionally: i.style.width = i.width / w * 100 + '%' } // Append the image to the DOM document.getElementById('map1').appendChild(i); } <!-- language: lang-css --> .responsive { position: relative; max-width: 1200px; width: 100%; height: auto; border: 1px solid red; } .responsive #myImg { width: 100%; } <!-- language: lang-html --> <div class="container"> <div style="height: auto; padding: 3px 0 3px 0;"> <div class="card"> <div class="card_content"> <h1>Map PNG Testing <button onclick="resize()">toggle size</button></h1> <div id="map1" class="responsive"> <img src="https://i.postimg.cc/8c4zqZrt/skwh.png" id="myImg"> </div> </div> </div> </div> </div> <!-- end snippet -->
I have two columns, column 1 has product, column 2 has sales. I want to sort the table by sales descending. Total sales value/4 will give me quarter value. I want to then create a group based on this quarter value. Name the group - Q1, Q2, Q3, Q4 and omit any rows with zero sales. Q1 then would be my highest selling products by value, Q2, second highest etc. How to do it in sql ? Sample table - [![enter image description here](https://i.stack.imgur.com/FvjN1.jpg)](https://i.stack.imgur.com/FvjN1.jpg) Expected output - [![enter image description here](https://i.stack.imgur.com/qCBFh.jpg)](https://i.stack.imgur.com/qCBFh.jpg) I tried a case when statement, but unlike the sample I have 1 million rows of data that needs to be grouped, hence hardcoding numbers in case when does not help, need some expert sql help here !
SQL - Split data into quantiles based on sum
|postgresql|
|langchain|chatgpt-api|llama-index|gpt-index|
null
Pair combinations of array column values in PySpark
I am trying to execute pig latin script to find TF-IDF value for book dataset My input file bookdataset.txt contains the following lines<br> Document 1: "The quick brown fox jumps over the lazy dog."<br> Document 2: "A quick brown dog jumps over the lazy fox."<br> Document 3: "The lazy cat sleeps all day."<br> Document 4: "A lazy dog is a happy dog."<br> And my pig script is as follows<br> -- Load the book dataset<br> documents = LOAD 'input/book_dataset.txt' USING TextLoader AS (line:chararray); -- Tokenize the documents into words<br> tokenized_documents = FOREACH documents GENERATE FLATTEN(TOKENIZE(REPLACE(LOWER(line), '[^a-zA-Z0-9\\s]', ''))) AS word, line; -- Compute the term frequency (TF) for each word in each document<br> word_counts = GROUP tokenized_documents BY (word, line);<br> word_tf = FOREACH word_counts GENERATE group.word AS word, group.line AS line, COUNT(tokenized_documents) AS tf; -- Compute the document frequency (DF) for each word<br> word_df = FOREACH (GROUP word_tf BY word) GENERATE group AS word, COUNT(word_tf) AS df; -- Compute the number of unique documents<br> num_documents = DISTINCT tokenized_documents.line;<br> num_documents_count = FOREACH (GROUP num_documents ALL) GENERATE COUNT(num_documents) AS num_docs; -- Compute the inverse document frequency (IDF) for each word<br> word_idf = FOREACH word_df GENERATE word, LOG((double)(num_docs.$0) / (double)df) AS idf; -- Join TF and IDF to calculate TF-IDF for each word in each document<br> word_tf_idf = JOIN word_tf BY word LEFT OUTER, word_idf BY word;<br> tf_idf = FOREACH word_tf_idf GENERATE word_tf::line AS document, word_tf::word AS word, word_tf::tf * word_idf::idf AS tf_idf; -- Group TF-IDF values by document and store the result<br> grouped_tf_idf = GROUP tf_idf BY document;<br> final_tf_idf = FOREACH grouped_tf_idf GENERATE group AS document, tf_idf; -- Store the result<br> STORE final_tf_idf INTO 'output/tf_idf_values' USING PigStorage(); When i try to execute above script it gives the following error in following line <br> num_documents = DISTINCT tokenized_documents.line;<br> I guess the error with use of DISTINCT operator<br> **ERROR org.apache.pig.tools.grunt.Grunt - ERROR 1200: <line 20, column 44> mismatched input '.' expecting SEMI_COLON**<br> Please resolve the issue
Apache PIG ERROR org.apache.pig.tools.grunt.Grunt - ERROR 1200: <line 20, column 44> mismatched input '.' expecting SEMI_COLON
|apache-pig|
I need your help, I've checked all the documentation but I can't find the solution. I have a simple HTML navigation. In CSS, I have a fade in and out animation. The fade in works perfectly, but the fade out does not work. I've tried a million things, it just won't work. ``` <header class="mobile"> <div class="logo"> <img src="/media/yogism_logo_header.svg" alt="Logo" /> </div> <nav class="mobile-nav"> <div class="hamburger-menu" id="hamburger-menu"> <img src="/media/hamburger_nav.svg" alt="Menu" id="menu-icon" /> </div> <ul> <li><a href="#">Features</a></li> <li><a href="#">About Us</a></li> <li><a href="#">Testimonial</a></li> <li><a href="#">Pricing</a></li> <li><a href="#">Blog</a></li> </ul> </nav> </header> ``` ``` .mobile-nav { display: block; width: 100%; background-color: var(--neutral-1000); z-index: 1000; position: absolute; top: 0; left: 0; } .mobile-nav.open { animation: slideDown 0.3s ease forwards; } .mobile-nav.close { animation: slideUp 0.3s ease forwards; } @keyframes slideDown { from { transform: translateY(-100%); } to { transform: translateY(0); } } @keyframes slideUp { from { transform: translateY(0); } to { transform: translateY(-100%); } } ``` ``` document.addEventListener("DOMContentLoaded", function () { const hamburgerMenu = document.getElementById("hamburger-menu"); const mobileNav = document.querySelector(".mobile-nav"); hamburgerMenu.addEventListener("click", function () { if (mobileNav.classList.contains("open")) { mobileNav.classList.remove("open"); mobileNav.classList.add("close"); } else { mobileNav.classList.remove("close"); mobileNav.classList.add("open"); } const menuIcon = document.getElementById("menu-icon"); menuIcon.src = mobileNav.classList.contains("open") ? "/media/cross_nav.svg" : "/media/hamburger_nav.svg"; }); }); ``` The class does toggle. When the page is just being loaded the first time, there is no class: ``` class="mobile-nav" ``` When I click it ones, it opens with the animation and becomes this: ``` class="mobile-nav open" ``` When I click it again, the navigation just dissapears without an animation, the html becomes this: ``` class="mobile-nav close" ``` When I then click it again, it opens again WITH the animation: ``` class="mobile-nav open" ``` Can anyone help on this easter sunday?
You should add those assets into `pubspec.yaml` file like bellow flutter: assets: - images/example.png - assets/example.mp3 - after adding those assets run `flutter clean` command and then run `flutter pub get`
When you define an alias, you can use either single-quotes, as if it's a string literal, or you can use back-ticks, as if it's an identifier. But when you refer to the alias later in the query, you must treat it as an identifier. order by 'FullName' desc; This orders by the constant string value `'FullName'`, not the alias. If you want to refer to the _identifier_, use back-ticks: order by `FullName` desc; This page in the manual talks about this difference: https://dev.mysql.com/doc/refman/8.0/en/problems-with-alias.html
Another approach worth considering would be writing a template function for all possible maps: template<typename K, typename V> V getValueAtIndex(map<K, V> &mapRef, int index) { for (const auto& entry : mapRef) { if (index == 0) { return entry.second; } index--; } V defaultValue; return defaultValue; } I assumed here that you know that value is in map. The `defaultValue` there is basically to prevent compiler warning. Function could be modified to take some actual default value and return it in case index is negative or bigger than map size but I don't think it's useful in this case. Here is some example usage of above function: map<string, int> fruitsCount; fruitsCount["apple"] = 5; // index 0 fruitsCount["banana"] = 3; // index 1 fruitsCount["cherry"] = 7; // index 2 auto middleValue = getValueAtIndex(fruitsCount, 1); // check index 1 cout << "middleValue: " << middleValue << endl; // prints middleValue: 3 Of course you need to remember that map is sorted by key, so if I would change the order of fruitsCount initialization the result would still be the same.
I read a little bit everywhere that the "in" operator have a time complexity of O(n), yet I used it in a code and idk why but it's acting as if it have the time complexity O(1) So I was solving a problem on leetcode and ended up having this algorithm ``` class Solution(object): def twoSum(self, nums, target): """ :type nums: List[int] :type target: int :rtype: List[int] """ i=0 n=len(nums) for i in range (0, n-1): if (target-nums[i]) in nums: for j in range (i+1, n): if nums[i]+nums[j]==target: return[i, j] ``` and I ended up having runtime pretty similar to code like these that involve hashmaps: ``` class Solution(object): def twoSum(self, nums, target): """ :type nums: List[int] :type target: int :rtype: List[int] """ hash_map = {} for i in range(len(nums)): complement = target - nums[i] if complement in hash_map.keys(): return [i, hash_map[complement]] hash_map[nums[i]] = i ``` It's weird because I read everywhere that the time complexity of the in operator for list is O(n), so I assumed this line `if (target-nums[i]) in nums:` would make my code equivalent to ``` class Solution(object): def twoSum(self, nums, target): self=list() n=len(nums) for i in range(n-1): for j in range(i+1,n): if nums[i]+nums[j]==target: return [i,j] ``` yet it has the runtime of a code using hashmaps, and uses the memory of the code using the same list so O(1), anyone could explain that to me please?
null
No need for long process: simplly go to your congig/admin.php, set this to yes. if not there, add it, /* |-------------------------------------------------------------------------- | Access via `https` |-------------------------------------------------------------------------- | | If your page is going to be accessed via https, set it to `true`. | */ 'https' => env('ADMIN_HTTPS', true),
I have declared a vector of a class I created like this: typedef Eigen::Vector<Trade, Dynamic> VectorT; And I am trying to sum a rolling window of trade.amount Before switching from std::vector to Eigen::Vector I did this for (size_t i = 30; i < trades.size(); ++i) { double cum_buy_qty = 0; for (size_t k = i - 30; k <= i; ++k) { cum_buy_qty += trades[k].buyQuantity; }} Now I am slicing the vector like that: for (size_t i = 30; i < trades.rows(); ++i) { VectorT slice = trades(seq(i - 30, i)); } But I don;t know I could sum the attribute buyQuantity for each Trade in my new slice vector. Any idea pelase?
How to sum attributes of a Eigen vector of class
|c++|eigen|
Thank you for reaching out My name is Conar and I'm with DocuSign Developer Support. The DocuSign sender cannot control notifications for the recipient. This is by design. The recipient can opt-out of notifications on their end. Please confirm if this case can be closed. Best regards, Conar | DocuSign Developer Support
Use optString, optBoolean and so on: ``` java for (int i = 0; i < data.length(); i++) { JSONObject jsonObject = data.getJSONObject(i); jsonObject.optString("employer_name", "default_value"); } ``` check [documentation][1] of JSONObject. For your other question, there are libraries like Gson for turning a jsonString to a class, [check this guide][2] and this other [SO question][3] [1]: https://www.javadoc.io/static/org.json/json/20171018/index.html?org/json/JSONObject.html [2]: https://mkyong.com/java/how-do-convert-java-object-to-from-json-format-gson-api/ [3]: https://stackoverflow.com/a/28957612/17234028
Finally, I solved the issue, as Paypal is very concerned about its webhooks security, I can't run my webhook locally with Postman, the solution was to download and set up **ngrok** and change my webhook URL to the URL **ngrok** generated for me and add "/paypal/subscriptions/webhook" at the end of the URL something like this: `https://ngrok-url/paypal/subscriptions/webhook` on "Sandbox Webhooks"
The predefined variable [Build.CronSchedule.DisplayName][1] can be used for condition decision. An example is also given in the link. For the given case: schedules: - cron: '0 16 * * *' displayName: 'schedule 1' branches: include: - master always: True - cron: '0 4 * * *' displayName: 'schedule 2' branches: include: - master always: True ... jobs: - job: job x condition: eq(variables['Build.CronSchedule.DisplayName'], 'schedule 1') ... - job: job y condition: eq(variables['Build.CronSchedule.DisplayName'], 'schedule 2') [1]: https://learn.microsoft.com/en-us/azure/devops/pipelines/yaml-schema/schedules-cron?view=azure-pipelines#buildcronscheduledisplayname-variable
Will the following work? And if so, will it provide a significant performance improvement. My AppUser object includes: public class AppUser { public int Id { get; private set; } // lots of other properties public List<Tag>? Tags { get; set; } } The total number of tags is presently 22 and is unlikely to grow beyond 200. So I read all of them in and cache them for any call where I need the tag(s). Would it work to have a singleton service that creates a `DbContext` on first use and keeps that DbContext for the life of my application. And this is a DbContext with tracking on. And on startup it reads in all the tags. As follows: async Task Startup() { dbContext = await TrackingDbFactory.CreateDbContextAsync(); tags = await dbContext.Tags.ToListAsync(); } Then when I need to read in an AppUser, I do: async Task<AppUser?> GetUserAsync(int id){ return await dbConect.Include(u => u.Tags).FirstOrDefaultAsync(u => u.Id == id); } In the above case, will it re-read the AppUser.Tasks from the database? Or will the DbContext use the tags list it read in earlier and re-use those already read in objects? I do know it will need to read the AppUserTags join table. But not also reading the Tags table again would be a performance improvement. And I have 3 other list properties I would do this for, so the total performance savings would be decent. This seems to work, but I don’t know Entity Framework well enough to test this thoroughly. So, will this work consistently, not re-reading the `Tags` table?
It seems simplest to me that you just use `preg_replace()` to add an email domain to any string that doesn't have it, then join the results with a comma and a space. Code: ([Demo][1]) $recipients = [ "kumar", "ram@test.com", "ravi", "rob@example.com" ]; echo implode( ', ', preg_replace( '/^[^@]+$/', '$0@test.com', $recipients ) ); Result: kumar@test.com, ram@test.com, ravi@test.com, rob@example.com [1]: https://3v4l.org/ABCI9
null
The `ksh` language is pretty neat and I'm glad somebody is taking an interest in the underused features. (Language?! Quite so, it is a fully functional and fairly powerful language that runs slow as... but that's another story.) I'll give you this example and I'll fill in an explanation about what I am doing after. #!/usr/local/bin/ksh # This will do a show and tell using the typeset -T feature # of ksh # Sat Mar 30 01:01:35 AM EDT 2024 # typeset -T TheTime_T=( typeset -S skew=0 function get { now=$( date +%s ) (( .sh.value=now+skew )) (( skew+=1 )) } ) typeset -T Upper_T=( TheTime_T now typeset one=11 typeset two=2U typeset countU=0 typeset start="Upper" function initialize { typeset -S countS=0 # static typeset countI=0 # instance (( _.countU+=1 )) (( countS+=1 )) countI=$(( countI+1 )) echo "init of Upper: ${!_} S=${countS} I=${countI} U=${_.countU}" } function setStart { echo "Upper:setStart ${_.now} $@" } function endStart { echo "Upper:endStart ${_.now} $@" } ) typeset -T Middle_T=( Upper_T _ typeset middleVal="middle value" typeset start=middle typeset two="middle" function initialize { echo "init of Middle: ${!_}" .sh.type.Upper_T.initialize ${!_} } function endStart { echo "Middle:endStart $@" _.two="midEnd" } ) typeset -T Lower_T=( Middle_T _ typeset one=1L typeset start="lower" function initialize { echo "init of Lower: ${!_}" .sh.type.Upper_T.initialize ${!_} } function endStart { echo "Lower:endStart $@" echo "Ending the start process in mv=${_.middleVal} t=${_.two} ${_.one}" } ) Upper_T uu uu.initialize toStart uu.setStart hownow uu.endStart then Middle_T mm mm.initialize inMiddle mm.setStart middleStart mm.endStart middleStartThen Lower_T ll=( middleVal="lower val" ) ll.initialize TowardsEnd ll.setStart startingLower ll.endStart endingLower When run with ksh2020 (which is better than I remembered) this resulted in: $ ./tryT.sh init of Upper: uu S=1 I=1 U=1 Upper:setStart 1711826692 hownow Upper:endStart 1711826693 then init of Middle: mm init of Upper: Upper_T S=2 I=1 U=1 Upper:setStart 1711826694 middleStart Middle:endStart middleStartThen init of Lower: ll init of Upper: Upper_T S=3 I=1 U=2 Upper:setStart 1711826695 startingLower Lower:endStart endingLower Ending the start process in mv=lower val t=midEnd lower I think it is explained in the man page that `Name_T _ ` means inheritance. Class names start with caps and end with `_T`. You don't have to but it's a good convention. Variables that are prefixed by `_.` are class variables. By way of explanation, I created the `TheTime_T` class to give the idea of a get function. "At the office," we have a `now` built-in that gives Unix seconds since 1970 but for show and tell, I created this function that counts up each time it is called so we give the idea that time is progressing. You can see the three classes `Upper_T`, `Middle_T` and `Lower_T`, each is based upon the one above, inheriting functions (aka, methods) and variables (properties). I'm not sure if it is in the man pages but `_` is the current object so declaring `_` to be of type `Upper_T` says that `Middle_T` is derived from `Upper_T`. The `Upper_T.initialize` function has two count variables, one is declared with the `-S`, static. As always, static means shared between instances so the `countS` gets incremented for all instances `countI` only gets incremented for each instance. Why is `countI` incremented differently? I'll leave that as an exercise for the student (I don't know, that's what works). A bit more about the syntax, - You can see `_.two="midEnd"`. (I did not need the quotes but they don't hurt.) That sets the two variable in that `Middle_T` instance. - Observe the `.sh.type.Upper_T.initialize ${!_}` This is calling the initialize function in that class. There's should be a way to do it with something like `_._.initialize` but it does not work. - We see the echo of `init of Upper: ${!_}`. When this was run as a part of a variable of type `Upper_T`, this printed the variable name. When run when called directly from a lower class, it displayed the class name. I put in a bunch of goofy variable manipulations that don't serve any purpose and are not particularly illustrative but maybe they will help getting the idea. My colleague *loved* ksh to distraction and wrote massive programs with great complicated classes. I wish he had loved Python that much so the code would be written in a language that is actually controlled and generally agreed upon rather than just what happens to work. Questions?
I would like to write an Update query. I need to update target_table field in tblnames1 based on tblenames2. **tblNames1**: |filename | Description | target table |----|-----|------------------------------------------- | /app/data/shared/mbs/test.yaml | select * from test.fn_hierarchy_prod_group(1); | |/app/data/shared/nkm/test1.yaml | select *from run_update_query | | /app/data/shared/nkm/test5.yaml | select *from func_datad_addr_1 | |/app/data/shared/nkm/test2.yaml | INSERT INTO a_base(evnt_nbr,triggering_evnt,)SELECT evnt_nbr,triggering_evnt FROM delim;| **tblNames2**: |ID | Description | target table |----|-----|---------------------- | 1082 | test.fn_hierarchy_prod_group | dba.l,dba.z | 1091 | func_datad_addr | dba.n | 1099 | fn_hierarchy_customer | dba.m | 1100 | run_update_query | dba.j Output - The query should return and update target table column in tblNames1 using 2 ways - i) by comparing description field of tablenames2 into tablenames1 and populate target table of tblnames2 into tblnames 1 ii) If its insert statement then populate the target after insert into tablename, pattern is not fixed though: |filename |Description| target table |--|--|---------------------------------------------------------- | /app/data/shared/mbs/test.yaml | select * from test.fn_hierarchy_prod_group(1); | dba.l | /app/data/shared/mbs/test.yaml | select * from test.fn_hierarchy_prod_group(1); | dba.z |/app/data/shared/nkm/test1.yaml | select *from run_update_query |dba.j | /app/data/shared/nkm/test5.yaml | select *from func_datad_addr_1 | |/app/data/shared/nkm/test2.yaml | INSERT INTO a_base(evnt_nbr,triggering_evnt,)SELECT evnt_nbr,triggering_evnt FROM delim;|a.base
CSS navigation fade in / fade out on click animation does not work
|javascript|html|css|
Lists are horribly inefficient as well as objects : they store FP number as objects and lists contains pointers to these objects. It prevent all optimizations and cause a very bad memory access pattern. Not to mention objects are reference counted and protected by the Global Interpreter Lock (GIL). Any code involving CPython objects in a hot loop is doomed to be very slow, especially when this loop is interpreted. Such a thing happens for `min_list` and `min_nd`. Also Numpy cannot operate on lists directly so it converts them to an array. This conversion is generally much slower than any basic Numpy array computation (which is done again and again in `np.min(a)`, hence the bad performance). As for `np.min` vs `b.min`, they are both equivalent, though the first can be a bit slower on small array because of the function fetch overhead of CPython and a different path taken in Numpy. Numpy is optimized for large array. On small arrays you will see many overheads which are hard to understand without delving into the code of the Numpy implementation (since such overheads are clearly implementation dependent). On my machine (i5-9600KF CPU with CPython 3.8.1 and Numpy 1.24.4), the former takes 4.5 µs and the later 2.5 µs so a very small time. This is typically the overhead of Numpy (i.e 1-10 µs per call). With a 100x times bigger array, I get 80 µs versus 83 µs (+/- 2 µs) so the two are statistically not different. It also shows **most of the time spent in your small array benchmark is just pure overhead** (~60% to ~80%). If you want to reduce these overheads, then you should use tools like Cython/Numba capable of compiling (specialized) Python codes, or just not use Python in the first place but a native language like C/C++/Rust.
not really, you don't have move here. In fact, you are constructing a new std::string object when doing `+"FOO"`, which means you are doing memory allocation. Which means it can throw.
I'm encountering an issue with passing the "size" parameter from my JavaScript function to my Django backend. Here's the relevant code snippet from my Django view: ```python def get_product(request, slug): try: product = Product.objects.get(slug=slug) if request.GET.get('size'): size = request.GET.get('size') print(size) return render(request, "core/product_detail.html") except Exception as e: print(e) ``` And here's the JavaScript function: ```javascript function get_correct_price(size) { console.log(size); window.location.href = window.location.pathname + `?size=${size}`; } ``` In my HTML template, I'm calling the JavaScript function with the size parameter: ```html <button onclick="get_correct_price('{{ s.name }}')">{{ s.name }}</button> ``` However, when I click the button, the size parameter doesn't seem to be passed to the backend. I've verified that the JavaScript function is being called, and the size parameter is logged correctly in the console. Could you please help me identify why the size parameter is not being received in the backend? Any insights or suggestions would be greatly appreciated. Thank you! urls.py: ```python path("products/",product_list_view,name="product_list"), path("product/<pid>/",product_detail_view,name="product_detail"), path("product/<slug>/",get_product,name="product_detail"), ``` In my models.py: ```python class Size(models.Model): name=models.CharField(max_length=100) code= models.CharField(max_length=50,blank=True,null=True) price = models.DecimalField(max_digits=10, decimal_places=2, blank=True, null=True) def __str__(self): return self.name class Product(models.Model): pid=ShortUUIDField(length=10,max_length=100,prefix="prd",alphabet="abcdef") user=models.ForeignKey(CustomUser, on_delete=models.SET_NULL ,null=True) cagtegory=models.ForeignKey(Category, on_delete=models.SET_NULL ,null=True,related_name="category") vendor=models.ForeignKey(Vendor, on_delete=models.SET_NULL,null=True,related_name="product") color=models.ManyToManyField(Color,blank=True) size=models.ManyToManyField(Size,blank=True) title=models.CharField(max_length=100,default="Apple") image=models.ImageField(upload_to=user_directory_path,default="product.jpg") description=RichTextUploadingField(null=True, blank=True,default="This is a product") price = models.DecimalField(max_digits=10, decimal_places=2, default=1.99) old_price = models.DecimalField(max_digits=10, decimal_places=2, default=2.99) specifications=RichTextUploadingField(null=True, blank=True) tags=TaggableManager(blank=True) product_status=models.CharField(choices=STATUS, max_length=10,default="In_review") status=models.BooleanField(default=True) in_stock=models.BooleanField(default=True) featured=models.BooleanField(default=False) digital=models.BooleanField(default=False) sku=ShortUUIDField(length=10,max_length=100,prefix="sku",alphabet="abcdef") date=models.DateTimeField(auto_now_add=True) updated=models.DateTimeField(null=True,blank=True) class Meta: verbose_name_plural="Products" def product_image(self): return mark_safe('<img src="%s" width="50" height="50"/>'%(self.image.url)) def __str__(self): return self.title def get_percentage(self): new_price=((self.old_price-self.price)/self.old_price)*100 return new_price ``` In short: I simply want that ```http://127.0.0.1:8000/product/prdeeffeafaec/?size=15inch``` from the url it should print ```15inch``` this
For me it was because I had [_date] in my php code. The code worked well until recently. Changed it to [date] and it started working again.
I write fastapi sqlmodel sqlalchemy project. I got type error in PyCharm: ``` expected type 'Literal["*"] | QueryableAttribute', got 'list[Account] | None' instead ``` error was caused by this code: ``` from sqlalchemy.orm import selectinload class CRUDCounterPartyCommon( CRUDBase[CounterParty, CounterpartyCommonCreateUpdateSchema, CounterpartyCommonCreateUpdateSchema] ): async def get_by_uuid(self, db: AsyncSession, *, _uuid: uuid.UUID) -> Optional[CounterParty]: counterparty = select(CounterParty).where(CounterParty.id == _uuid).options(selectinload(CounterParty.accounts)) ``` Warning was caused by selectinload. What should I do to solve this problem? Update mypy says: error: Argument 1 to "selectinload" has incompatible type "list[Account] | None"; expected "Literal['*'] | QueryableAttribute[Any]" [arg-type]
def function1(dd:pd.DataFrame): col1=dd.test_result.ne("fail").cumsum() ss1=dd.assign(col1=col1).loc[dd.test_result.eq("fail")].col1.value_counts(ascending=False) ends_with_pass = 'yes' if ss1.size and (col1.eq(ss1.index[0])&col1.shift(-1).eq(ss1.index[0]+1)).any() else 'no' return dd.iloc[[0]].assign(fails_in_a_row=ss1.max(),ends_with_pass=ends_with_pass).drop(columns='test_result') df1.groupby(['bike','test_type'],as_index=False,group_keys=False).apply(function1).query("fails_in_a_row>0") : | bike | test_type | fails_in_a_row | ends_with_pass | |------|-----------|----------------|----------------| | b | fast | 2 | yes | | b | slow | 2 | yes | | c | fast | 2 | no | | c | slow | 2 | no |
Permutation/Combination problems are always a tough nut to crack. Given a Rock paper scissors game of N turns played by 2 people (round win : 1 point, round draw : 0) Player 1 sequence has been provided. **Player 2 needs to win without playing same move twice in a row. Find Number of ways to win** (with Mod 1000000007 to prevent integer overflow). **(Win condition - Player 2 has more round wins than Player 1)** (R- Rock, P- Paper, S-Scissor) e.g. 3 rounds - RRR Player 2 can win by PRP, PSP, RPR Answer : 3 e.g. 2 rounds - RP Player 2 can win by PS, RS (PP not allowed because consecutive same moves cannot be played by Player 2) Answer : 2 Is it a true Permutation problem or does the no consecutive same moves condition make it more of a DP problem? (I remember that every Permutation/Combination can be a DP but every DP cannot be a Perm/Comb) PS - This is not part of any active programming contest. It was a part of an interview round in past and had a time limit of 30 min. Bruteforce trying to generate each string would not work. Total ways to win would be sum of the number of ways to win by 1 round, 2 rounds, ... N rounds. Without the constraint of no two consecutive moves same, it would be much simpler. For N round wins - 1 way For N-1 round wins - N ways (for N-1 wins and 1 draw) For N-2 round wins - N*N-1 ways (for N-2 wins and 2 draws) + N ways (for N-1 wins and 1 loss) Ans = (1) + (N) + (N*N-1 + N) ... But as soon as the constraint arrives, the sequence of Players 1 moves matter. Now with the constraint For N round wins - it may not be possible if the player 1 sequence has even 1 character repeated in sequence e.g. PRR (cannot win 3 rounds) Was unable to identify how the number of was for each count of round wins would be.
Let's stick to the statement: > The `Index_ID` is not unique over the complete dataset, it is only unique per year and month. This means that `Previous_Index_ID` points to an `Index_ID` at some fixed past period. Based on the data provided, I assume that `Previous_Index_ID` is the `Index_ID` for the previous calendar month. To number groups, I'll assume that: - data are sorted by `Year, Month` - indexes of data records are unique integers The plan is to join the indices of the corresponding previous records to the current ones; then replace group roots with unique ids and pointers to previous rows with the corresponding data in the records they're pointing at: ``` year, month = divmod(df['Month'], 12) year += df['Year'] month += 1 index_id = pd.MultiIndex.from_arrays([year, month, df['Index_ID']]) root = df.index.min() - 1 # group starting point marker df = ( df # join the indices of the previous records as group_id .join(pd.Series(df.index, index_id, name='group_id'), on=['Year','Month','Previous_Index_ID']) .fillna({'group_id': root}) .astype({'group_id': int}) ) for current, previous in df['group_id'].items(): df.loc[current, 'group_id'] = (current if previous == root else df.at[previous, 'group_id']) ``` Note: in the for-loop, we can replace `... = current if ...` with some other supplier of unique group ids if needed, for example: ``` group_id = 0 for current, ... df.loc[current, ...] = (group_id:=group_id+1) if ... # or from itertools import count group_id = count(1) for current, ... df.loc[current, ...] = next(group_id) if ... ```
I am trying to Get a response from the OpenAi assistants API using a Django project. I implement a model class inside models.py as such: from django.db import models from typing_extensions import override from openai import AssistantEventHandler from django.http import HttpResponse # Create your models here. class Eventhandler(models.Model,AssistantEventHandler): class meta: managed = False @override def on_text_created(self, text) -> None: print(f"\nassistant > ", end="", flush=True) @override def on_text_delta(self, delta, snapshot): print(delta.value, end="", flush=True) def on_tool_call_created(self, tool_call): print(f"\nassistant > {tool_call.type}\n", flush=True) def on_tool_call_delta(self, delta, snapshot): if delta.type == 'code_interpreter': if delta.code_interpreter.input: print(delta.code_interpreter.input, end="", flush=True) if delta.code_interpreter.outputs: print(f"\n\noutput >", flush=True) for output in delta.code_interpreter.outputs: if output.type == "logs": print(f"\n{output.logs}", flush=True) Now, **what is important** is that the Eventhandler class sends the response back to the client! I am a getting an exception of type: ValueError: *The view theassistantcallerapp.views.callerview didn't return an HttpResponse object. It returned None instead.* Now, **callerview is the view inside views.py**. It maps to the URL that will start the GET request to the OpenAi Assistants API. This implementation I am using is for streaming responses. Now, here is the view insides views.py and **NOTICE HOW THE EVENTHANDLER CLASS IS USED** #inside views.py # this is callerview(request, paramm): thread = client.beta.threads.create() message = client.beta.threads.messages.create( thread_id=thread.id,role="user", content=otro) myeventhandler = Eventhandler() <-----------NOTICE HERE!!!!!!!!!!!!! with client.beta.threads.runs.create_and_stream( thread_id=thread.id, assistant_id="asst_dddfffggg", instructions="Please address the user as Donkey Kong. The user has a premium account.", event_handler=myeventhandler, <-----------NOTICE HERE!!!!!!!!!!!!! ) as stream: stream.until_done() How can I make the class Eventhandler inside models.py return the response from the OpenAi Assistants API ?
For my Windows 11 PC Windows doesn't get a snapshot and notices updates to the directory, but maybe not reliably. #include <Windows.h> #include <iostream> using namespace std; int main() { constexpr wchar_t const *FILENAME = L"file.bin"; DeleteFileW( FILENAME ); WIN32_FIND_DATAW fdw; HANDLE hFind = FindFirstFileW( L"*", &fdw ); auto ret = [] { return GetLastError() == ERROR_FILE_NOT_FOUND ? EXIT_SUCCESS : EXIT_FAILURE; }; if( hFind == INVALID_HANDLE_VALUE ) return ret(); HANDLE hFile = CreateFileW( FILENAME, GENERIC_READ | GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL ); if( hFile == INVALID_HANDLE_VALUE ) return EXIT_FAILURE; do wcout << fdw.cFileName << endl; while( FindNextFileW( hFind, &fdw ) ); return ret(); }
You can adjust your code with `saveData` method to manage the process of staging the data into Snowflake creating a INSERT INTO statement and executing the operation asynchronously. Setting the `PreparedStatement` parameters can be more efficient. ``` public Mono<Void> saveData(List<Map<String, Object>> dataMapList, String jdbcUrl, String username, String password, String targetObject) { return Mono.fromCallable(() -> { HikariDataSource dataSource = getOrCreateDataSource(jdbcUrl, username, password); try (Connection conn = dataSource.getConnection()) { if (dataMapList == null || dataMapList.isEmpty()) { return Mono.empty(); } // snowflake using COPY INTO command String copyIntoStatement = buildCopyIntoStatement(targetObject, dataMapList); try (PreparedStatement pstmt = conn.prepareStatement(copyIntoStatement)) { IntStream.range(0, dataMapList.size()) .forEach(i -> setPreparedStatementParams(pstmt, dataMapList.get(i), i)); pstmt.executeBatch(); } return Mono.empty(); // success } catch (Exception ex) { throw new RuntimeException("Error while saving data: " + ex.getMessage(), ex); } }).subscribeOn(Schedulers.boundedElastic()).then(); } private String buildInsertIntoStatement(String targetObject, List<Map<String, Object>> dataMapList) { Map<String, Object> firstDataMap = dataMapList.get(0); String columnNames = firstDataMap.keySet().stream() .collect(Collectors.joining(", ")); String valuesPlaceholders = IntStream.range(0, dataMapList.size()) .mapToObj(i -> "(" + firstDataMap.keySet().stream() .map(columnName -> "?") .collect(Collectors.joining(", ")) + ")") .collect(Collectors.joining(", ")); return "INSERT INTO " + targetObject + " (" + columnNames + ") VALUES " + valuesPlaceholders; } private void setPreparedStatementParams(PreparedStatement pstmt, Map<String, Object> data, int rowIndex) { int parameterIndex = rowIndex * data.size() + 1; data.values().forEach(value -> { try { pstmt.setObject(parameterIndex++, value); } catch (Exception ex) { throw new RuntimeException("Error while setting prepared statement parameter: " + ex.getMessage(), ex); } }); } ```
As the error message _implies_, `head` etc. shouldn't be indented, they should be on the same level as `doctype html` (which makes sense, since the `doctype` tag doesn't hold the rest of the document). ``` doctype html head meta(charset='UTF-8') title This Is Pugjs Page body .container This Is Container ``` is correct.
You should not violate the rules stated by the React documentation, because React (when used as intended) makes certain guarantees on how it will behave. React doesn't make guarantees on how it would behave if used differently. The internal implementation of React may change fundamentally without notice. React will keep their guarantees, but the actual behavior when used outside of the rules may change completely. ### About your example If you use a `useState` but never call its `setState`, the value behaves effectively in the same way as if you would use a `useRef`. But ... 1. Your `x`, including the manually changed property, would be overwritten as soon as `setX` is called. 2. Your changed `x` would never be displayed, if not something else (e.g. the change of your `y`) triggers a re-render (that's why your `onClick` version doesn't work). This introduces an unwanted dependency between unrelated things. 3. You are not following the rules and standards, so other people may be confused by your code. Your `onClick` handler effects the `x` in the same as the `useEffect`, the difference is, that the `y` is not changed, so there is no reason for React to re-render. Try to call `setY` after changing the `x`, and the new value of `x` should also become visible. ### Conclusion That said, you basically have the exact behavior as with a `useRef`, so you should use a `useRef` if you want this behavior. If you use `useState`, you should use it as intended.
My Android app uses Fragments and includes Google signin authorization via Firebase. After signing in and starting a Fragment transaction, the following appears, overlaying the Fragment underneath it: [![enter image description here][1]][1] It's as if the gray screen was a Dialog window. When I touch the screen, the gray view disappears: [![enter image description here][2]][2] Has anyone here seen this problem or know what's causing it? [1]: https://i.stack.imgur.com/BW3MF.jpg [2]: https://i.stack.imgur.com/3WYsQ.jpg
I know existence of a `throw` inside of a `promise`, change the state of that specific promise to `rejected` and the result will be the `error` message, but when I type this `throw` inside of a `setTimeout` located inside of a `promise`, that `throw` doesn't modify the state and result of that `promise`, at first I thought maybe there is something wrong with `setTimeout`, but I tried `resolve()` and `reject()` inside that `setTimeout` and I received the expected result. so I wonder why this happen with a `throw` located in `setTimeout` inside of a `promise`? code: const test = new Promise((resolve, reject) => { setTimeout(() => { throw new Error("error"); }, 2000) }); //output after 2 sec: [[PromiseState]]: pending //[[PromiseResult]]: undefined; <!--end-snippet--> const test = new Promise((resolve, reject) => { setTimeout(() => { resolve("success"); }, 2000) }); //output after 2 sec:[[PromiseState]]: fulfilled //[[PromiseResult]]: success
why throw inside of an setTimeout, located inside a promise, doesn't change state and result of the output promise?
|javascript|promise|settimeout|throw|
Have Prettier formating corretly but only when `Ctrl+shift+p` and selecting `"Format Document With..."` -> `"Prettier - Coder formatter"`. When selecting `"Format Document"` **does nothing**... There is a third option "Format Document (Forced)" this works and formats the file but seems to indicate some kind of probleam because referes to the word **Forced** Now whant to format when saving but doesn't work as well, but seems because the "Format Document" is not working, what configuration is still missing? or wrong configuration that needs fixing? in user settings "editor.defaultFormatter": "esbenp.prettier-vscode", "editor.formatOnSaveMode": "file", "editor.formatOnSave": true, added this one to test but didn't change anything "prettier.requireConfig": true
I have to handle xml documents that are big enough (up to 1GB) and parse them with python. I am using the [`iterparse()`][1] function (SAX style parsing). My concern is the following, imagine you have an xml like this <?xml version="1.0" encoding="UTF-8" ?> <families> <family> <name>Simpson</name> <members> <name>Homer</name> <name>Marge</name> <name>Bart</name> </members> </family> <family> <name>Griffin</name> <members> <name>Peter</name> <name>Brian</name> <name>Meg</name> </members> </family> </families> The problem is, of course to know when I am getting a family name (as Simpsons) and when I am getting the name of one of that family member (for example Homer) What I have been doing so far is to use "switches" which will tell me if I am inside a "members" tag or not, the code will look like this import xml.etree.cElementTree as ET __author__ = 'moriano' file_path = "test.xml" context = ET.iterparse(file_path, events=("start", "end")) # turn it into an iterator context = iter(context) on_members_tag = False for event, elem in context: tag = elem.tag value = elem.text if value : value = value.encode('utf-8').strip() if event == 'start' : if tag == "members" : on_members_tag = True elif tag == 'name' : if on_members_tag : print "The member of the family is %s" % value else : print "The family is %s " % value if event == 'end' and tag =='members' : on_members_tag = False elem.clear() And this works fine as the output is The family is Simpson The member of the family is Homer The member of the family is Marge The member of the family is Bart The family is Griffin The member of the family is Peter The member of the family is Brian The member of the family is Meg My concern is that with this (simple) example i had to create an extra variable to know in which tag i was (on_members_tag) imagine with the true xml examples that I have to handle, they have more nested tags. Also note that this is a very reduced example, so you can assume that i may be facing an xml with more tags, more inner tags and trying to get different tag names, attributes and so on. So question is. Am I doing something horribly stupid here? I feel like there must be a more elegant solution to this. [1]: https://web.archive.org/web/20201111201837/http://effbot.org/zone/element-iterparse.htm
null
I'm trying to visualize the data coming through the API with Sigma.JS, but I can't always get results. When I tried to visualize the dynamic incoming data without mock data by writing a simple API with Flask, it worked, but this data is slightly different from the real json data of the project, I can't integrate the new incoming data, although I took custom rendering as an example, I couldn't solve it. I will be leaving the json data I will be using below. In the nodes section, the json data is divided into two groups according to the y index in the form of attacker and destination, think of them as SIEM logs. ip is available in the id section, attackType in the label section ``` <!doctype html> <html lang="en"> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1"> <title>Sigma.js Example</title> <script src="https://cdnjs.cloudflare.com/ajax/libs/sigma.js/2.4.0/sigma.min.js"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/graphology/0.25.4/graphology.umd.min.js"></script> <style> #container { width: 100%; height: 100vh; background: white; } </style> </head> <body> <div id="container"></div> <script> const graph = new graphology.Graph(); function shuffleArray(array) { for (let i = array.length - 1; i > 0; i--) { const j = Math.floor(Math.random() * (i + 1)); [array[i], array[j]] = [array[j], array[i]]; } } async function fetchAttackTypes() { try { const response = await fetch('http://127.0.0.1:5000/data'); const data = await response.json(); return data.attackTypes; } catch (error) { console.error('Error:', error); return []; } } async function renderGraph() { const attackTypes = await fetchAttackTypes(); shuffleArray(attackTypes); attackTypes.forEach((type, index) => { const angle = index * (2 * Math.PI / attackTypes.length); const x = 200 * Math.cos(angle); const y = 200 * Math.sin(angle); graph.addNode(type, { label: type, x, y, size: 10, color: '#00f' }); }); attackTypes.forEach(type => { const otherNodes = attackTypes.filter(otherType => otherType !== type); const hasConnections = graph.neighbors(type).length > 0; if (!hasConnections) { const randomIndex = Math.floor(Math.random() * otherNodes.length); const randomType = otherNodes[randomIndex]; graph.addEdge(type, randomType, { color: '#f00' }); } }); const sigmaInstance = new Sigma(graph, document.getElementById("container")); sigmaInstance.on('overNode', function(e) { e.data.node.color = '#f00'; e.data.node.size *= 2; sigmaInstance.refresh(); }); sigmaInstance.on('outNode', function(e) { e.data.node.color = '#00f'; e.data.node.size /= 2; sigmaInstance.refresh(); }); } renderGraph(); </script> </body> </html> ``` nodes.json ``` "nodes": [ { "color": "blue", "id": "59.166.0.9", "label": Exploit, "size": 10, "x": 0, "y": 0 }, { "color": "blue", "id": "149.171.126.1", "label": Fuzzing, "size": 10, "x": 0, "y": 0 }, { "color": "blue", "id": "59.166.0.2", "label": NaN, "size": 10, "x": 0, "y": 1 }, { "color": "blue", "id": "149.171.126.7", "label": NaN, "size": 10, "x": 0, "y": 1 }, { "color": "blue", "id": "59.166.0.9", "label": NaN, "size": 10, "x": 0, "y": 2 }, { "color": "blue", "id": "149.171.126.4", "label": NaN, "size": 10, "x": 0, "y": 2 }, { "color": "blue", "id": "59.166.0.0", "label": NaN, "size": 10, "x": 0, "y": 3 }, { "color": "blue", "id": "149.171.126.3", "label": NaN, "size": 10, "x": 0, "y": 3 } ] ``` edges.json ``` "edges": [ { "color": "purple", "id": "e1_1000", "size": 5, "source": "175.45.176.1", "target": "149.171.126.10" }, { "color": "purple", "id": "e1_1001", "size": 5, "source": "175.45.176.1", "target": "149.171.126.10" }, { "color": "purple", "id": "e1_1002", "size": 5, "source": "175.45.176.0", "target": "149.171.126.14" }, { "color": "purple", "id": "e1_1003", "size": 5, "source": "149.171.126.18", "target": "175.45.176.3" }, { "color": "purple", "id": "e1_1004", "size": 5, "source": "149.171.126.18", "target": "175.45.176.3" }, ] ``` [Image of the code I wrote while it is running](https://i.stack.imgur.com/LiH6X.jpg) [I need to show the incoming data](https://i.stack.imgur.com/pa3sv.png)
Sigma.JS custom rendering
|javascript|json|flask|visualization|sigma.js|
null
I have this code that tries to retrieve all data from a sheet , including metadata: $response = $this->spreadsheet_service->spreadsheets->get($this->spreadsheet_id, ['ranges' => $sheet_name ]); $sheetData = $response->getSheets(); foreach ($sheetData as $sheet) { echo "sheet:"; var_dump($sheet); $data = $sheet->getData(); echo "sheet Data:"; var_dump($data); die; } The dump of `$sheet` shows me an object with `rowCount = 996` , which means it's not empty. But the dump of `$data` shows: sheet Data:array(0) { } If i use `$this->spreadsheet_service->spreadsheets_values` , i can get all the values correctly , but i need a way to get metadata too , like each cell's last update time and coordinates. Maybe with my API credentials i cannot access metadata , that's why the code above won't work. Is there a permission i need to grant to my API user ? Or what else could be the problem? Thanks
You can save `d[a]` to a constant, check that, and reuse it: ``` const entry = d[a]; if (entry.prop) { entry.prop++; // OK } ``` [Playground link][1] That works even though `a` is not constant because `entry` is, so TypeScript's flow analysis can see that `entry.prop` has been narrowed. [1]: https://www.typescriptlang.org/play?target=2&module=0&ssl=11&ssc=1&pln=15&pc=1#code/JYOwLgpgTgZghgYwgAgCLAWZBvAsAKGSOQG0BrALmQGcwpQBzAXSr0OI4AcoB7TgfiogArgFsARtADcBDgF8Z+OQQIIeIWsgAmVdJmQBeHApX4ANhCxxDyAETBIo24tXrNEcFACeNrSThMisAwyAAUHnReAHTcfACUOLLEEd4xvJwA1BlSyAD0ucgA8gDSBMr4QA
In my production Next.js application using Clerk for authentication, I am unable to access my authenticated session, even though the user is successfully created in Clerk and even successfully synced to my database using a webhook. It works in localhost but in my server-side logs, I see this error > ⨯ node_modules/@clerk/nextjs/dist/esm/server/createGetAuth.js (26:12) @ eval ⨯ Error: Clerk: auth() was called but Clerk can't detect usage of authMiddleware(). Please ensure the following: - authMiddleware() is used in your Next.js Middleware. - Your Middleware matcher is configured to match this route or page. - If you are using the src directory, make sure the Middleware file is inside of it. I am not using /src and I have my middleware.ts file in the root directory of my project, alongside the app directory. I am also running on Node v21.7.1. This is my middleware.ts: ``` import { authMiddleware } from "@clerk/nextjs"; export default authMiddleware({ publicRoutes: ["/", "/sign-in", "/sign-up", "/api/webhooks(.*)", "/api/send(.*)", "/api/notification-test", "/api/upload-set(.*)"], }); export const config = { matcher: ["/((?!.+\\.[\\w]+$|_next).*)", "/"], }; ```
Clerk auth session not working in production
|next.js|middleware|clerk|
I am the admin of my company's website i got some problem. I have carousel on which category page, on my computer the rendering is perfect but on others computers all the images are displayed as you can see : [On my computer](https://i.stack.imgur.com/WrhSe.png)[Others computer](https://i.stack.imgur.com/8Wt3q.jpg) https://galit.co.il/%d7%9e%d7%99%d7%9d/ The problem is that i don't know how to solve the problem because i can't see it on my admin computer. I tried to modify with elementor but don't know how to solve.
How to solve rendering problem on my wordpress website
|wordpress|
null
The observation is not enabled on Spring Kafka components by default. See `KafkaTemplate.setObservationEnabled(true)`. More in docs: https://docs.spring.io/spring-kafka/reference/3.1-SNAPSHOT/kafka/micrometer.html#observation
Both your implementations use a stack. The first uses the callstack, while the other uses an explicit stack. There are several differences that impact performance: 1. The call stack is managed implicitly, using compiled code (CPython), which gives an advantage over a stack that must be managed with Python code (`append`, `pop`) 2. The second implementation does not translate the recursive pattern 1-to-1. It populates the stack in *advance* for what needs to be visited after the first child's subtree has been visited, meaning the stack footprint could be larger than in the recursive version. Besides the overhead of the `reversed` iterator, we need to take into account that memory allocation also costs time. We cannot do much about the first point, so it is not expected to do better with a stack-based iterative implementation. Still, to tackle the second point, we could try to write an iterative version that more closely mimics the recursive version: ``` def itr_dfs2(graph, start): visited = set() stack = deque() current = iter([start]) try: while True: for node in current: if node not in visited: visited.add(node) stack.append(current) current = iter(graph[node]) break else: current = stack.pop() except IndexError: pass ``` Here the stack consists of iterators, not nodes. This more closely resembles the recursive implementation, where the state of the `for` loop iterations are part of the stack frame, and these loops get resumed one a recursive call comes back to it. To time your code it is better to make use of `timeit` and take the minimum of several measurements (using `timeit.repeat`) so to exclude a bit the effect of other load on the machine it runs on. So I changed the time measuring code to this: ``` import timeit n = 10000 r = 5 t1 = min(timeit.repeat(lambda: rec_dfs(graph, 'A'), repeat=r, number=n)) t2 = min(timeit.repeat(lambda: itr_dfs(graph, 'A'), repeat=r, number=n)) t3 = min(timeit.repeat(lambda: itr_dfs2(graph, 'A'), repeat=r, number=n)) print(t1) print(t2) print(t3) ``` The results I got showed that `t3` was often (but not always) somewhere between `t1` and `t2`.
How to return HTTP Get request response from models class in Django project
|python|django|openai-api|openai-assistants-api|
I was thinking of making a new project and I want to create a website where we can draw some image in Paint and using that image we can generate some lottie animation. so, for that I wanted to know the JSON for lottie animation is generated . what is the login behind the lottie animation and how can I give some Slight motion to images. is there is some API's which I can use. If any one can help we can colander and create a website or Android application for this. so, for that I wanted to know the JSON for lottie animation is generated . what is the login behind the lottie animation and how can I give some Slight motion to images. is there is some API's which I can use.
How to create a lottie animation
|android|web|lottie|image-generation|dynamic-image-generation|
null
I am making a java script in which I need to occasionally change the headers of a specific request, however the token I am passing vs what i need it to be is not changing. (from what I can see) (sorry for the terrible explanation, I don't even know what I'm doing to be honest.) Example: `var abc23token = token1` shortly after, in the beginning of the script ``` var headers = { headers: { 'Authorization': abc23token, // SHOULD change 'Content-Type': 'application/json' // doesnt change } } ``` Then, wayyy later in the script `abc23token = token2` however, the request still sends as token1. token1 and token2 ARE variables, shouldn't matter as its just a string like this ``` var token1 = "OT...Q" var token2 = "OD...yE" ``` do i have to redefine headers? should I just ditch the idea of using a variable for tokens and use a plain string for abc23token? please note that i don't use javascript very much and I am very much a noob at this, and I apologize if this question is completely stupid or if its inefficient to do it this way.
Variable inside a Variable, not updating
|javascript|node.js|
null
I am recently trying to self-study OS and playing with the xv6 OS for teaching. The version I am using is the x86 one from [GitHub](https://github.com/mit-pdos/xv6-public). What I've been doing is try to use 2-level paging when initiating the system. For that purpose, I created a page table and a page directory in main.c as follows: ``` __attribute__((__aligned__(PGSIZE))) pde_t entrypgdir[NPDENTRIES]; __attribute__((__aligned__(PGSIZE))) pte_t entrypgtable[NPTENTRIES]; ``` Then in entry.S, I have got some assembly codes to initialize these two arrays as follows: ``` # The xv6 kernel starts executing in this file. This file is linked with # the kernel C code, so it can refer to kernel symbols such as main(). # The boot block (bootasm.S and bootmain.c) jumps to entry below. # Multiboot header, for multiboot boot loaders like GNU Grub. # http://www.gnu.org/software/grub/manual/multiboot/multiboot.html # # Using GRUB 2, you can boot xv6 from a file stored in a # Linux file system by copying kernel or kernelmemfs to /boot # and then adding this menu entry: # # menuentry "xv6" { # insmod ext2 # set root='(hd0,msdos1)' # set kernel='/boot/kernel' # echo "Loading ${kernel}..." # multiboot ${kernel} ${kernel} # boot # } #include "asm.h" #include "memlayout.h" #include "mmu.h" #include "param.h" # Multiboot header. Data to direct multiboot loader. .p2align 2 .text .globl multiboot_header multiboot_header: #define magic 0x1badb002 #define flags 0 .long magic .long flags .long (-magic-flags) # By convention, the _start symbol specifies the ELF entry point. # Since we haven't set up virtual memory yet, our entry point is # the physical address of 'entry'. .globl _start _start = V2P_WO(entry) # Entering xv6 on boot processor, with paging off. .globl entry entry: # Turn on page size extension for 4Mbyte pages // I comment the following to not use bigger pages #movl %cr4, %eax #orl $(CR4_PSE), %eax #movl %eax, %cr4 # Set page directory //My assembly code to initialize a page table starts #set up the first page table page xor %esi, %esi 1: movl %esi, %eax shll $12, %eax orl $(PTE_P|PTE_W), %eax movl $(V2P_WO(entrypgtable)), %edi movl %esi, %ebx shll $2, %ebx addl %ebx, %edi movl %eax, (%edi) incl %esi cmpl $1024, %esi jb 1b # Set page directory movl $0, %esi movl %esi, %ebx shll $2, %ebx movl $(V2P_WO(entrypgdir)), %edi addl %ebx, %edi movl $(V2P_WO(entrypgtable)), (%edi) orl $(PTE_P | PTE_W), (%edi) movl $512, %esi movl %esi, %ebx shll $2, %ebx movl $(V2P_WO(entrypgdir)), %edi addl %ebx, %edi movl $(V2P_WO(entrypgtable)), (%edi) orl $(PTE_P | PTE_W), (%edi) //My assembly code to initialize a page table ends movl $(V2P_WO(entrypgdir)), %eax movl %eax, %cr3 # Turn on paging. movl %cr0, %eax orl $(CR0_PG|CR0_WP), %eax movl %eax, %cr0 # Set up the stack pointer. movl $(stack + KSTACKSIZE), %esp # Jump to main(), and switch to executing at # high addresses. The indirect call is needed because # the assembler produces a PC-relative instruction # for a direct jump. mov $main, %eax jmp *%eax .comm stack, KSTACKSIZE ``` As you can see, the basic idea is simple: the original xv6 system uses 4mb big pages when initializing the system, so that they only need the page directory to map the kernel code to the first physical page. I disable that option and use 4kb pages, then I create a new page table accordingly, then put the new page table into the page directory array. If my understanding is correct, the whole process should be equivalent to the following C code: ``` void init_entrypgdir(void) { for (int i = 0; i < NPTENTRIES; i++) { entrypgtable[i] = (i << 12) | PTE_P | PTE_W; } entrypgdir[0] = (V2P(entrypgtable)) | PTE_P | PTE_W; entrypgdir[KERNBASE>>PDXSHIFT] = (V2P(entrypgtable)) | PTE_P | PTE_W; } ``` Now comes the problem: what if I want to get rid of self-written assembly code and use the above C code to initialize xv6? Is there a way to replace my assembly code with my C code and still successfully initialize the OS? So far, I have tried - .globl init_entrypgdir then call init_entrypgdir then ret - mimic how the original system called main but replace main with init_entrypgdir: mov $init_entrypgdir, %eax jmp *%eax Well, at least no error occurred, but it doesn't seem to work, and xv6 freezes on startup. I have to admit I am not a pro in assembly, maybe not even in C. Any help would be deeply appreciated!
How to call a C language function from x86 assembly code?
|c|assembly|x86|qemu|xv6|
null
I want to send events and messages of different object types using Kafka topics. So I have producer configuration class in which I create the beans of the class `KafkaTemplate` and `ProducerFactory`: @Bean public KafkaTemplate<String, BeltMessage> kafkaBeltMessageTemplate(final ProducerFactory<String, BeltMessage> producerFactory) { return new KafkaTemplate<>(producerFactory); } @Bean public KafkaTemplate<String, String> kafkaStringTemplate(final ProducerFactory<String, String> producerFactory) { return new KafkaTemplate<>(producerFactory); } @Bean public KafkaTemplate<String, BeltEventDescription> kafkaBeltEventDescriptionTemplate(final ProducerFactory<String, BeltEventDescription> producerFactory) { return new KafkaTemplate<>(producerFactory); } @Bean public KafkaTemplate<String, BeltItemEvent> kafkaBeltItemEventTemplate(final ProducerFactory<String, BeltItemEvent> producerFactory) { return new KafkaTemplate<>(producerFactory); } @Bean public ProducerFactory<String, BeltMessage> producerBeltMessageFactory() { return new DefaultKafkaProducerFactory<>(Map.of( ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapAddress, ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class, ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class )); } @Bean public ProducerFactory<String, String> producerStringFactory() { return new DefaultKafkaProducerFactory<>(Map.of( ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapAddress, ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class, ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class )); } @Bean public ProducerFactory<String, BeltEventDescription> producerBeltEventDescriptionFactory() { return new DefaultKafkaProducerFactory<>(Map.of( ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapAddress, ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class, ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class )); } @Bean public ProducerFactory<String, BeltItemEvent> producerBeltItemEventFactory() { return new DefaultKafkaProducerFactory<>(Map.of( ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapAddress, ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class, ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class )); } Now, it's the same implementation for everyone of them and it's just code duplicated for each bean separately, the only change is the template parameter. Is there a way to create a generic bean like `KafkaTemplate<String, Object>`? I tried to abstract out an interface from `BeltItemEvent` and `BeltEventDescription`, but then I got an error in runtime because it could not be serialized by Kafka. > `Servlet.service()` for servlet [`dispatcherServlet]` in context with path [] threw exception [Request processing failed: `org.apache.kafka.common.errors.SerializationException`: Can't convert value of class `com.example.events.BeltEventDescription` to class `org.apache.kafka.common.serialization.StringSerializer` specified in `value.serializer`] with root cause > `java.lang.ClassCastException`: class `com.example.events.BeltEventDescription` cannot be cast to class `java.lang.String` (`com.example.events.BeltEventDescription` is in unnamed module of loader 'app'; `java.lang.String` is in module `java.base` of loader 'bootstrap') But there is a lot of code duplication in case I want to create new events that declare the various contracts I might need in the future.
Customizing Dawn Theme and in multicolumn I have added bullet text but am not able to change icons before the text. Font Awesome Icons are not visible. I have added CDN of font awesome. I am looking for a similar format as icons text (Need one column)only. I have added this code but not showing any icons. <div class="multicolumn-card__info"> {%- if block.settings.title != blank -%} <h3 class="inline-richtext">{{ block.settings.title }}</h3> {%- endif -%} {%- if block.settings.text != blank -%} <div class="rte"> {{ block.settings.text | replace: '<ul>', '<ul class="fa-ul">' | replace: '<li>', '<li><i class="fa-li fas fa-check"></i>' }} </div> {%- endif -%} {%- if block.settings.link_label != blank -%} <a class="link animate-arrow" {% if block.settings.link == blank %} role="link" aria-disabled="true" {% else %} href="{{ block.settings.link }}" {% endif %} > {{- block.settings.link_label | escape -}} <span class="icon-wrap">&nbsp;{% render 'icon-arrow' %}</span> </a> {%- endif -%} </div> [![enter image description here][1]][1] [1]: https://i.stack.imgur.com/spkIh.png Appreciate all the help.
Shopify Dawn Theme - Add Icons in place of Bullet points in multicolumn layout
|shopify|shopify-app|shopify-api|shopify-template|shopify-storefront-api|
I imported tensorflow as: ```import tensorflow as tf ``` and got this error: ``` Traceback (most recent call last): File "C:\Users\USER\Downloads\import_tensorflow.py", line 1, in <module> import tensorflow as tf File "C:\Users\USER\AppData\Local\Programs\Python\Python312\Lib\site-packages\tensorflow\__init__.py", line 24, in <module> from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\USER\AppData\Local\Programs\Python\Python312\Lib\site-packages\tensorflow\python\__init__.py", line 49, in <module> from tensorflow.python import pywrap_tensorflow File "C:\Users\USER\AppData\Local\Programs\Python\Python312\Lib\site-packages\tensorflow\python\pywrap_tensorflow.py", line 58, in <module> from tensorflow.python.pywrap_tensorflow_internal import * File "C:\Users\USER\AppData\Local\Programs\Python\Python312\Lib\site-packages\tensorflow\python\pywrap_tensorflow_internal.py", line 114 def TFE_ContextOptionsSetAsync(arg1, async): ^^^^^ SyntaxError: invalid syntax ``` reults of ```pip list``` and ```pip show``` shows that I have tensorflow installed on my computer... pip list: ```tensorflow 1.8.0 ``` pip show tensorflow: ```Name: tensorflow Version: 1.8.0 Summary: TensorFlow helps the tensors flow Home-page: https://www.tensorflow.org/ Author: Google Inc. Author-email: opensource@google.com License: Apache 2.0 ```
How to solve Syntax error when importing tensorflow 1.8.0?
|python|tensorflow|pip|module|python-import|
In the C++ standard, `max_size()` is defined as > *Returns*: `distance(begin(), end())` for the largest possible container. \- [[container.reqmts] `max_size`](https://eel.is/c++draft/container.reqmts#lib:max_size,containers) Initially, this looks good, and there is no *Preconditions* paragraph. However, `std::distance` returns `(last - first)` ([[iterator.operations] p5](https://eel.is/c++draft/iterator.operations#5)) and this is possibly undefined behavior for random access iterators (note the *Preconditions* in [[iterator.requirements] `b - a`](http://eel.is/c++draft/iterator.requirements#tab:randomaccessiterator-row-7)). What is the standard actually trying to say here? 1. `max_size()` has no preconditions, so while it isn't *explicitly* stated, the largest possible container is required to have a `max_size()` so that `distance(begin(), end())` is well-defined. 2. `max_size()` is missing a *Preconditions* paragraph, and it's possible for `max_size()` to be undefined behavior if `end() - begin()` and by proxy, `std::distance(begin(), end())` is UB. 3. Something else. The underlying question is: > Does the absence of a *Preconditions* paragraph impose requirements on the implementation, or is it an editorial mistake? There might not be a clear answer to this. `std::distance` is defined in terms of `(last - first)`, which can be undefined behavior, but `std::distance` also doesn't have a precondition which requires that the iterator difference is representable. It's not plausible that `&b - &a` is UB but `std::distance(&a, &b)` is well-defined. Based on this, I think it's an editorial mistake, `max_size()` is missing a precondition, and can be undefined. However, I may be missing something here.
<!-- begin snippet: js hide: true console: false babel: null --> <!-- language: lang-js --> "use strict"; window.addEventListener('load', onLoaded, false); // called when the window has finished loading // - attaches a click handler to all buttons. // buttons are selected with the css selector 'button' // 'button.someClass' would select all 'button' elements // that had the class 'someClass' function onLoaded(evt) { let buttons = document.querySelectorAll('button'); // this code just does the same thing as the line below // buttons.forEach( // function(btn) // { // btn.addEventListener('click', onCopyBtnClick, false); // } // ); buttons.forEach( btn => btn.addEventListener('click', onCopyBtnClick, false) ); } function onCopyBtnClick(evt) { // get reference to btn that fired the event let btn = this; // the textarea may not be the element immediately following // the button. In the code below, there's actually a // text-node between the two walk through the list of // siblings until we get to the first text-area after the button. let tmp = btn.nextSibling; while (tmp.nodeName != 'TEXTAREA') tmp = tmp.nextSibling; let targetTextarea = tmp; // select and copy the text targetTextarea.select(); document.execCommand('copy'); } <!-- language: lang-html --> <button>COPY</button> <textarea rows='1' cols='25'>Test123 </textarea> <button>COPY</button> <textarea rows='1' cols='25'>Test456 </textarea> <!-- end snippet -->
|razor|dependency-injection|blazor|inversion-of-control|
Using Pycharm Community Edition 2023.3.2 with Micropython 1.4.3-2023.3 installed and enabled in Pycharm. I want to utilize the 2 core multithreading on a Raspberry Pi Pico W using the experimental "_thread" library. When I try to use a method from the module, Pycharm says there is no reference to the method, even though it can find _thread.pyi: [_thread imported properly but methods](https://i.stack.imgur.com/rsAUQ.png) When I dot search for any available methods only these 4 dunder methods show up: [enter image description here](https://i.stack.imgur.com/Yfwch.png) I think it's just Pycharm's referencing that is the problem, and the Pico W would still execute the code, but it would be nice to have some kind of reference to double check what I'm doing is in line with the syntax. I have tried repairing the IDE up to invalidating the caches and restarting. I have also redownloaded and reinstalled the same Micropython version on 2 different devices with the same version of Pycharm. I don't know any further steps of troubleshooting this.
Micropython: _thread module imported but not showing methods