text
stringlengths
1
2.12k
source
dict
python, python-3.x, console, ssh, curses If it failed the first time why do you think it won't fail after you wait for .05? What is the cause of the problem? elif KEY_PRESS == 10 and self.position == 1: self.status = 0 self.action = 1 What do any of those numbers mean? Use constants or enums instead of plain numbers to convey such information. There is much more to cover but I think fixing problems I pointed out will be a good starting point. To sum it up: Meaningful names: names should convey the meaning, it should be clear what information fields contain, what functions do, what classes are for by looking at their names Project structure: split your code into files to make it less crowded and ideally less interdependent Exceptions handling: don't silence exceptions, always show what exceptions you're expecting for in a try block Magical numbers: don't leave undocumented numbers in your code, it's impossible to know what a particular number means Good luck!
{ "domain": "codereview.stackexchange", "id": 43973, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, python-3.x, console, ssh, curses", "url": null }
c, multithreading, sorting, radix-sort, c89 Title: Fast portable, parallel MSD radix sort for unsigned keys in C89 Question: Now I have this portable, parallel MSD radix sort for unsigned keys. It exhibits linear speedup on small values of \$P\$ and has a running time of $$ \Theta(N/P + P), $$ where \$P\$ is the number of processors available. What comes to portability, my implementation runs on Windows, Linux and macOSX. The entire project lives in GitHub; it contains the files for Visual Studio 2022 and a funky Makefile for *nix. My code looks like this: #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #ifdef _WIN32 #include <windows.h> #elif defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) #include <limits.h> #include <pthread.h> #include <sys/time.h> #include <unistd.h> #else #error "Unsupported platform." #endif #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #define BUCKETS 256 static const size_t BITS_PER_BUCKET = 8; static const size_t BUCKET_MASK = 0xff; static const size_t MERGESORT_THRESHOLD = 4096; static const size_t INSERTION_SORT_THRESHOLD = 16; static const size_t THREAD_THRESHOLD = 65536; /****************************************************************************** * Array list data structure. * ******************************************************************************/ typedef struct { void** data; size_t size; } array_t; static void array_t_init(array_t* array, size_t capacity) { array->size = 0; array->data = malloc(capacity * sizeof(void*)); } static void array_t_add(array_t* array, void* datum) { array->data[array->size++] = datum; } static void* array_t_get(array_t* array, size_t index) { return array->data[index]; } static void array_t_shuffle(array_t* array) { size_t i; size_t j; void* temp; srand(time(NULL));
{ "domain": "codereview.stackexchange", "id": 43974, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, multithreading, sorting, radix-sort, c89", "url": null }
c, multithreading, sorting, radix-sort, c89 srand(time(NULL)); for (i = 0; i != array->size - 1; ++i) { j = i + rand() % (array->size - i); temp = array->data[i]; array->data[i] = array->data[j]; array->data[j] = temp; } } static size_t array_t_size(array_t* array) { return array->size; } static void array_t_destruct(array_t* array) { free(array->data); } /****************************************************************************** * Thread-specific data structures. * ******************************************************************************/ typedef struct { size_t local_bucket_size_map[BUCKETS]; unsigned* source; size_t recursion_deph; size_t from_index; size_t to_index; } bucket_size_counter_thread_data; typedef struct { unsigned* source; unsigned* target; size_t* start_index_map; size_t* processed_map; size_t recursion_depth; size_t from_index; size_t to_index; } bucket_inserter_thread_data; typedef struct { unsigned* source; unsigned* target; size_t threads; size_t recursion_depth; size_t from_index; size_t to_index; } task; /****************************************************************************** * End of data structures. * ******************************************************************************/ size_t get_number_of_cpus() { #ifdef _WIN32 SYSTEM_INFO system_info; GetSystemInfo(&system_info); return (size_t) system_info.dwNumberOfProcessors; #elif defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) return (size_t) sysconf(_SC_NPROCESSORS_ONLN); #endif } static size_t get_bucket_index(unsigned datum, size_t recursion_depth) { size_t bit_shift = CHAR_BIT * sizeof(unsigned) - (recursion_depth + 1) * BITS_PER_BUCKET; return (((size_t) datum) >> bit_shift) & BUCKET_MASK; }
{ "domain": "codereview.stackexchange", "id": 43974, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, multithreading, sorting, radix-sort, c89", "url": null }
c, multithreading, sorting, radix-sort, c89 return (((size_t) datum) >> bit_shift) & BUCKET_MASK; } static void parallel_radix_sort_impl(unsigned* source, unsigned* target, size_t threads, size_t recursion_depth, size_t from_index, size_t to_index); static void radix_sort_impl_no_threads(unsigned* source, unsigned* target, size_t recursion_depth, size_t from_index, size_t to_index); static void process_bucket_size_counter_thread( bucket_size_counter_thread_data* data) { size_t i; memset(data->local_bucket_size_map, 0, BUCKETS * sizeof(size_t)); for (i = data->from_index; i != data->to_index; ++i) { data->local_bucket_size_map[ get_bucket_index( data->source[i], data->recursion_deph)]++; } } static void process_bucket_inserter_thread(bucket_inserter_thread_data* data) { size_t bucket_index; size_t i; unsigned datum; for (i = data->from_index; i != data->to_index; ++i) { datum = data->source[i]; bucket_index = get_bucket_index(datum, data->recursion_depth); data->target[data->start_index_map[bucket_index] + data->processed_map[bucket_index]++] = datum; } } static void process_sorter_thread(array_t* data) { size_t i; task* t; for (i = 0; i != array_t_size(data); ++i) { t = array_t_get(data, i);
{ "domain": "codereview.stackexchange", "id": 43974, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, multithreading, sorting, radix-sort, c89", "url": null }
c, multithreading, sorting, radix-sort, c89 for (i = 0; i != array_t_size(data); ++i) { t = array_t_get(data, i); if (t->threads > 1) { parallel_radix_sort_impl(t->source, t->target, t->threads, t->recursion_depth, t->from_index, t->to_index); } else { radix_sort_impl_no_threads(t->source, t->target, t->recursion_depth, t->from_index, t->to_index); } } } #ifdef _WIN32 static DWORD WINAPI count_bucket_sizes_thread_func_win(LPVOID parameter) { bucket_size_counter_thread_data* thread_data = (bucket_size_counter_thread_data*) parameter; process_bucket_size_counter_thread(thread_data); return 0; } #elif defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) static void* count_bucket_sizes_thread_func_pthreads(void* parameter) { bucket_size_counter_thread_data* thread_data = (bucket_size_counter_thread_data*) parameter; process_bucket_size_counter_thread(thread_data); return NULL; } #endif #ifdef _WIN32 static DWORD WINAPI insert_to_buckets_thread_func_win(LPVOID parameter) { bucket_inserter_thread_data* thread_data = (bucket_inserter_thread_data*) parameter; process_bucket_inserter_thread(thread_data); return 0; } #elif defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) static void* insert_to_buckets_thread_func_pthreads(void* parameter) { bucket_inserter_thread_data* thread_data = (bucket_inserter_thread_data*) parameter; process_bucket_inserter_thread(thread_data); return NULL; } #endif #ifdef _WIN32
{ "domain": "codereview.stackexchange", "id": 43974, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, multithreading, sorting, radix-sort, c89", "url": null }
c, multithreading, sorting, radix-sort, c89 process_bucket_inserter_thread(thread_data); return NULL; } #endif #ifdef _WIN32 static DWORD WINAPI sort_buckets_thread_func_win(LPVOID parameter) { array_t* thread_data = (array_t*) parameter; process_sorter_thread(thread_data); return 0; } #elif defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) static void* sort_buckets_thread_func_pthreads(void* parameter) { array_t* thread_data = (array_t*) parameter; process_sorter_thread(thread_data); return NULL; } #endif static void insertion_sort(unsigned* data, size_t length) { size_t i; signed long j; unsigned datum; for (i = 1; i != length; ++i) { datum = data[i]; j = i - 1; while (j >= 0 && data[j] > datum) { data[j + 1] = data[j]; --j; } data[j + 1] = datum; } } static void merge(unsigned* source, unsigned* target, size_t left_index, size_t left_bound, size_t right_bound) { size_t right_index = left_bound; size_t target_index = left_index; while (left_index < left_bound && right_index < right_bound) { target[target_index++] = source[left_index] < source[right_index] ? source[left_index++] : source[right_index++]; } memcpy(target + target_index, source + left_index, sizeof(unsigned) * (left_bound - left_index)); memcpy(target + target_index, source + right_index, sizeof(unsigned) * (right_bound - right_index)); } static void radix_sort_mergesort(unsigned* source, unsigned* target, size_t recursion_depth, size_t from_index, size_t to_index) { unsigned* s; unsigned* t; unsigned* temp; int even;
{ "domain": "codereview.stackexchange", "id": 43974, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, multithreading, sorting, radix-sort, c89", "url": null }
c, multithreading, sorting, radix-sort, c89 unsigned* s; unsigned* t; unsigned* temp; int even; size_t i; size_t left_bound; size_t left_index; size_t offset = from_index; size_t passes = 0; size_t range_length; size_t right_bound; size_t runs; size_t run_index; size_t run_width; range_length = to_index - from_index; s = source; t = target; runs = range_length / INSERTION_SORT_THRESHOLD; for (i = 0; i != runs; ++i) { insertion_sort(source + offset, INSERTION_SORT_THRESHOLD); offset += INSERTION_SORT_THRESHOLD; } if (range_length % INSERTION_SORT_THRESHOLD != 0) { /* Sort the rightmost run that is smaller than */ /* INSERTION_SORT_THRESHOLD. */ insertion_sort(source + offset, to_index - offset); runs++; } run_width = INSERTION_SORT_THRESHOLD; while (runs != 1) { passes++; run_index = 0; for (; run_index < runs - 1; run_index += 2) { left_index = from_index + run_index * run_width; left_bound = left_index + run_width; right_bound = MIN(left_bound + run_width, to_index); merge(s, t, left_index, left_bound, right_bound); } if (run_index < runs) { memcpy(t + from_index + run_index * run_width, s + from_index + run_index * run_width, sizeof(unsigned) * (range_length - run_index * run_width)); } runs = (runs / 2) + (runs % 2 == 0 ? 0 : 1); temp = s; s = t; t = temp; run_width *= 2; } even = (passes % 2 == 0) ? 1 : 0;
{ "domain": "codereview.stackexchange", "id": 43974, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, multithreading, sorting, radix-sort, c89", "url": null }
c, multithreading, sorting, radix-sort, c89 even = (passes % 2 == 0) ? 1 : 0; if (recursion_depth % 2 == 1) { if (even == 1) { memcpy(target + from_index, /* Destination */ source + from_index, /* Source */ sizeof(unsigned) * (to_index - from_index)); } } else { /* Here, recursion_depth % 2 == 0 holds: */ if (even == 0) { memcpy(source + from_index, /* Destination */ target + from_index, /* Source */ sizeof(unsigned) * (to_index - from_index)); } } } static void radix_sort_impl_no_threads(unsigned* source, unsigned* target, size_t recursion_depth, size_t from_index, size_t to_index) { size_t bucket_key; size_t i; size_t bucket_size_map[BUCKETS]; size_t processed_map[BUCKETS]; size_t range_length; size_t start_index_map[BUCKETS]; unsigned datum; range_length = to_index - from_index; if (range_length <= MERGESORT_THRESHOLD) { radix_sort_mergesort(source, target, recursion_depth, from_index, to_index); return; } memset(bucket_size_map, 0, BUCKETS * sizeof(size_t)); memset(start_index_map, 0, BUCKETS * sizeof(size_t)); memset(processed_map, 0, BUCKETS * sizeof(size_t)); /* Compute the size of each bucket: */ for (i = from_index; i != to_index; i++) { bucket_size_map[get_bucket_index(source[i], recursion_depth)]++; } /* Initialize thee start index map: */ start_index_map[0] = from_index; for (i = 1; i != BUCKETS; ++i) { start_index_map[i] = start_index_map[i - 1] + bucket_size_map[i - 1]; }
{ "domain": "codereview.stackexchange", "id": 43974, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, multithreading, sorting, radix-sort, c89", "url": null }
c, multithreading, sorting, radix-sort, c89 /* Insert the data from 'source' into their */ /* respective position in 'target': */ for (i = from_index; i != to_index; ++i) { datum = source[i]; bucket_key = get_bucket_index(datum, recursion_depth); target[start_index_map[bucket_key] + processed_map[bucket_key]++] = datum; } if (recursion_depth == sizeof(unsigned) - 1) { memcpy(source + from_index, /* Destination */ target + from_index, /* Source */ sizeof(unsigned) * (to_index - from_index)); /* There is nowhere to recur, return. */ return; } for (i = 0; i != BUCKETS; ++i) { if (bucket_size_map[i] != 0) { radix_sort_impl_no_threads(target, source, recursion_depth + 1, start_index_map[i], start_index_map[i] + bucket_size_map[i]); } } } void radix_sort(unsigned* data, size_t length) { unsigned* buffer; if (length < 2) { return; } buffer = malloc(sizeof(unsigned) * length); radix_sort_impl_no_threads(data, buffer, 0, 0, length); free(buffer); }
{ "domain": "codereview.stackexchange", "id": 43974, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, multithreading, sorting, radix-sort, c89", "url": null }
c, multithreading, sorting, radix-sort, c89 static void parallel_radix_sort_impl(unsigned* source, unsigned* target, size_t threads, size_t recursion_depth, size_t from_index, size_t to_index) { size_t bucket_key; size_t f; size_t i; size_t idx; size_t j; size_t list_index; size_t number_of_nonempty_buckets; size_t optimal_subrange_length; size_t packed; size_t range_length; size_t spawn_degree; size_t start; size_t subrange_length; size_t sz; size_t sz2; size_t tmp; size_t* partial_bucket_size_map; size_t* thread_count_map; size_t bucket_size_map[BUCKETS] = { 0 }; size_t start_index_map[BUCKETS]; size_t** processed_map; bucket_size_counter_thread_data* bucket_size_counter_threads_data; bucket_inserter_thread_data* bucket_inserter_threads_data; array_t array_of_task_arrays; array_t bucket_index_list_array; array_t non_empty_bucket_indices; array_t* arr2; task* t; #ifdef _WIN32 HANDLE windows_thread_handle; HANDLE* win_thread_handles; #elif defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) pthread_t pthread_handle; pthread_t* unix_thread_ids; #endif range_length = to_index - from_index; if (range_length <= MERGESORT_THRESHOLD) { radix_sort_mergesort(source, target, recursion_depth, from_index, to_index); return; } if (threads < 2) { radix_sort_impl_no_threads(source, target, recursion_depth, from_index, to_index); return; }
{ "domain": "codereview.stackexchange", "id": 43974, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, multithreading, sorting, radix-sort, c89", "url": null }
c, multithreading, sorting, radix-sort, c89 bucket_size_counter_threads_data = malloc(threads * sizeof(*bucket_size_counter_threads_data)); start = from_index; subrange_length = range_length / threads; #ifdef _WIN32 win_thread_handles = malloc(threads * sizeof(HANDLE)); #elif defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) unix_thread_ids = malloc(threads * sizeof(pthread_t)); #endif for (i = 0; i != threads - 1; ++i) { bucket_size_counter_threads_data[i].source = source; bucket_size_counter_threads_data[i].recursion_deph = recursion_depth; bucket_size_counter_threads_data[i].from_index = start; bucket_size_counter_threads_data[i].to_index = start += subrange_length; memset(&(bucket_size_counter_threads_data[i] .local_bucket_size_map), 0, BUCKETS * sizeof(size_t)); #ifdef _WIN32 win_thread_handles[i] = CreateThread(NULL, 0, count_bucket_sizes_thread_func_win, &bucket_size_counter_threads_data[i], 0, NULL); #elif defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) pthread_create(&pthread_handle, NULL, count_bucket_sizes_thread_func_pthreads, &bucket_size_counter_threads_data[i]); unix_thread_ids[i] = pthread_handle; #endif } /* Process the rightmost bucket in THIS thread. No need to spawn */ /* any more. */ bucket_size_counter_threads_data[threads - 1].source = source; bucket_size_counter_threads_data[threads - 1].recursion_deph = recursion_depth; bucket_size_counter_threads_data[threads - 1].from_index = start; bucket_size_counter_threads_data[threads - 1].to_index = to_index;
{ "domain": "codereview.stackexchange", "id": 43974, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, multithreading, sorting, radix-sort, c89", "url": null }
c, multithreading, sorting, radix-sort, c89 memset(&(bucket_size_counter_threads_data[threads - 1] .local_bucket_size_map), 0, BUCKETS * sizeof(size_t)); /* Run the rightmost thread routine in THIS thread. */ /* No need to span another thread: */ process_bucket_size_counter_thread( &bucket_size_counter_threads_data[threads - 1]); /* Wait for all the bucket counters: */ for (i = 0; i != threads - 1; ++i) { #ifdef _WIN32 WaitForSingleObject(win_thread_handles[i], INFINITE); #elif defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) pthread_join(unix_thread_ids[i], NULL); #endif } /* Build the global bucket size map for the entire sorting range: */ for (i = 0; i != threads; ++i) { for (j = 0; j != BUCKETS; ++j) { bucket_size_map[j] += bucket_size_counter_threads_data[i].local_bucket_size_map[j]; } } number_of_nonempty_buckets = 0; for (i = 0; i != BUCKETS; ++i) { if (bucket_size_map[i] != 0) { number_of_nonempty_buckets++; } } spawn_degree = MIN(number_of_nonempty_buckets, threads); /* Prepare the starting indices of each bucket: */ start_index_map[0] = from_index; for (i = 1; i != BUCKETS; ++i) { start_index_map[i] = start_index_map[i - 1] + bucket_size_map[i - 1]; } processed_map = malloc(spawn_degree * sizeof(size_t*)); for (i = 0; i != spawn_degree; ++i) { processed_map[i] = calloc(BUCKETS, sizeof(size_t)); } /* Make the preprocessed_map of each thread independent of the other. */ for (i = 1; i != spawn_degree; ++i) { partial_bucket_size_map = (bucket_size_counter_threads_data[i - 1].local_bucket_size_map); for (j = 0; j != BUCKETS; ++j) { processed_map[i][j] = processed_map[i - 1][j] + partial_bucket_size_map[j]; } }
{ "domain": "codereview.stackexchange", "id": 43974, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, multithreading, sorting, radix-sort, c89", "url": null }
c, multithreading, sorting, radix-sort, c89 start = from_index; bucket_inserter_threads_data = malloc(spawn_degree * sizeof(bucket_inserter_thread_data)); for (i = 0; i != spawn_degree - 1; ++i) { bucket_inserter_threads_data[i].start_index_map = start_index_map; bucket_inserter_threads_data[i].processed_map = processed_map[i]; bucket_inserter_threads_data[i].source = source; bucket_inserter_threads_data[i].target = target; bucket_inserter_threads_data[i].recursion_depth = recursion_depth; bucket_inserter_threads_data[i].from_index = start; bucket_inserter_threads_data[i].to_index = start += subrange_length; #ifdef _WIN32 win_thread_handles[i] = CreateThread(NULL, 0, insert_to_buckets_thread_func_win, &bucket_inserter_threads_data[i], 0, NULL); #elif defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) pthread_create(&pthread_handle, NULL, insert_to_buckets_thread_func_pthreads, &bucket_inserter_threads_data[i]); unix_thread_ids[i] = pthread_handle; #endif } /* Process the rightmost bucket in THIS thread. No need to spawn */ /* any more. */ bucket_inserter_threads_data[spawn_degree - 1].start_index_map = start_index_map; bucket_inserter_threads_data[spawn_degree - 1].processed_map = processed_map[spawn_degree - 1]; bucket_inserter_threads_data[spawn_degree - 1].source = source; bucket_inserter_threads_data[spawn_degree - 1].target = target; bucket_inserter_threads_data[spawn_degree - 1].recursion_depth = recursion_depth; bucket_inserter_threads_data[spawn_degree - 1].from_index = start; bucket_inserter_threads_data[spawn_degree - 1].to_index = to_index;
{ "domain": "codereview.stackexchange", "id": 43974, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, multithreading, sorting, radix-sort, c89", "url": null }
c, multithreading, sorting, radix-sort, c89 process_bucket_inserter_thread( &bucket_inserter_threads_data[spawn_degree - 1]); /* Wait for all the bucket inserters: */ for (i = 0; i != spawn_degree - 1; ++i) { #ifdef _WIN32 WaitForSingleObject(win_thread_handles[i], INFINITE); #elif defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) pthread_join(unix_thread_ids[i], NULL); #endif } free(bucket_size_counter_threads_data); free(bucket_inserter_threads_data); for (i = 0; i != spawn_degree; ++i) { free(processed_map[i]); } free(processed_map); if (recursion_depth == sizeof(unsigned) - 1) { /* Nowhere to recur. */ return; } array_t_init(&bucket_index_list_array, spawn_degree); for (i = 0; i != spawn_degree; ++i) { array_t* bucket_key_array = malloc(sizeof(array_t)); array_t_init(bucket_key_array, number_of_nonempty_buckets); array_t_add(&bucket_index_list_array, bucket_key_array); } thread_count_map = calloc(spawn_degree, sizeof(size_t)); for (i = 0; i != spawn_degree; ++i) { thread_count_map[i] = threads / spawn_degree; } for (i = 0; i != threads % spawn_degree; ++i) { ++thread_count_map[i]; } array_t_init(&non_empty_bucket_indices, number_of_nonempty_buckets); for (bucket_key = 0; bucket_key != BUCKETS; ++bucket_key) { if (bucket_size_map[bucket_key] != 0) { array_t_add(&non_empty_bucket_indices, (void*) bucket_key); } } array_t_shuffle(&non_empty_bucket_indices); f = 0; j = 0; list_index = 0; optimal_subrange_length = range_length / spawn_degree; packed = 0; sz = array_t_size(&non_empty_bucket_indices); while (j != sz) { size_t bucket_key = (size_t) array_t_get(&non_empty_bucket_indices, j++); tmp = bucket_size_map[bucket_key]; packed += tmp;
{ "domain": "codereview.stackexchange", "id": 43974, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, multithreading, sorting, radix-sort, c89", "url": null }
c, multithreading, sorting, radix-sort, c89 tmp = bucket_size_map[bucket_key]; packed += tmp; if (packed >= optimal_subrange_length || j == array_t_size(&non_empty_bucket_indices)) { packed = 0; for (i = f; i != j; ++i) { size_t bucket_key = (size_t) array_t_get(&non_empty_bucket_indices, i); array_t* arr = array_t_get(&bucket_index_list_array, list_index); array_t_add(arr, (void*) bucket_key); } ++list_index; f = j; } } array_t_init(&array_of_task_arrays, spawn_degree); for (i = 0; i != spawn_degree; ++i) { array_t* task_array = malloc(sizeof(array_t)); array_t_init(task_array, BUCKETS); arr2 = (array_t*) array_t_get(&bucket_index_list_array, i); sz = array_t_size(arr2); for (idx = 0; idx != sz; ++idx) { bucket_key = (size_t) array_t_get(arr2, idx); t = malloc(sizeof(task)); t->source = target; t->target = source; t->threads = thread_count_map[i]; t->recursion_depth = recursion_depth + 1; t->from_index = start_index_map[bucket_key]; t->to_index = start_index_map[bucket_key] + bucket_size_map[bucket_key]; array_t_add(task_array, t); } array_t_add(&array_of_task_arrays, task_array); } for (i = 0; i != spawn_degree - 1; ++i) { array_t* task_array = array_t_get(&array_of_task_arrays, i); #ifdef _WIN32 win_thread_handles[i] = CreateThread(NULL, 0, sort_buckets_thread_func_win, task_array, 0, NULL); #elif defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))
{ "domain": "codereview.stackexchange", "id": 43974, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, multithreading, sorting, radix-sort, c89", "url": null }
c, multithreading, sorting, radix-sort, c89 #elif defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) pthread_create(&pthread_handle, NULL, sort_buckets_thread_func_pthreads, task_array); unix_thread_ids[i] = pthread_handle; #endif } /* Sort the rightmost thread in THIS thread. */ /* No need to spawn one more thread. */ process_sorter_thread( array_t_get( &array_of_task_arrays, spawn_degree - 1)); for (i = 0; i != spawn_degree - 1; ++i) { #ifdef _WIN32 WaitForSingleObject(win_thread_handles[i], INFINITE); #elif defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) pthread_join(unix_thread_ids[i], NULL); #endif } #ifdef _WIN32 free(win_thread_handles); #elif defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) free(unix_thread_ids); #endif sz = array_t_size(&array_of_task_arrays); for (i = 0; i != sz; ++i) { array_t* task_array = array_t_get(&array_of_task_arrays, i); sz2 = array_t_size(task_array); for (j = 0; j != sz2; ++j) { free(array_t_get(task_array, j)); } array_t_destruct(task_array); free(task_array); } sz = array_t_size(&bucket_index_list_array); for (i = 0; i != sz; ++i) { array_t* array = array_t_get(&bucket_index_list_array, i); array_t_destruct(array); free(array); } free(thread_count_map); array_t_destruct(&array_of_task_arrays); array_t_destruct(&bucket_index_list_array); array_t_destruct(&non_empty_bucket_indices); } static void bitwise_radix_sort_impl(unsigned* data, size_t bucket_length, size_t bit_index) { size_t size_of_left_bucket; size_t size_of_right_bucket; unsigned bit_is_on; unsigned datum; unsigned mask; unsigned temp;
{ "domain": "codereview.stackexchange", "id": 43974, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, multithreading, sorting, radix-sort, c89", "url": null }
c, multithreading, sorting, radix-sort, c89 unsigned bit_is_on; unsigned datum; unsigned mask; unsigned temp; if (bucket_length < 2) { /* Trivially sorted. */ return; } size_of_left_bucket = 0; size_of_right_bucket = 0; mask = 1U << bit_index; /* Bucketize the current range: */ while (size_of_left_bucket + size_of_right_bucket < bucket_length) { datum = data[size_of_left_bucket]; bit_is_on = datum & mask; if (bit_is_on) { /* Kick the datum to the right 1-bucket: */ temp = data[bucket_length - size_of_right_bucket - 1]; data[bucket_length - size_of_right_bucket - 1] = datum; data[size_of_left_bucket] = temp; size_of_right_bucket++; } else { /* Omit the datum: */ size_of_left_bucket++; } } /* Any bits to proceed? */ if (bit_index > 0) { /* Sort the 0-bucket of this recursion level: */ bitwise_radix_sort_impl(data, size_of_left_bucket, bit_index - 1); /* Sort the 1-bucket of this recursion level: */ bitwise_radix_sort_impl(data + size_of_left_bucket, size_of_right_bucket, bit_index - 1); } } void bitwise_radix_sort(unsigned* data, size_t length) { bitwise_radix_sort_impl(data, length, sizeof(unsigned) * CHAR_BIT - 1); } void parallel_radix_sort(unsigned* data, size_t length) { unsigned* buffer; size_t threads; if (length < 2) { return; } buffer = malloc(sizeof(unsigned) * length); threads = get_number_of_cpus(); threads = MIN(threads, length / THREAD_THRESHOLD); parallel_radix_sort_impl(data, buffer, threads, 0, 0, length); free(buffer); }
{ "domain": "codereview.stackexchange", "id": 43974, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, multithreading, sorting, radix-sort, c89", "url": null }
c, multithreading, sorting, radix-sort, c89 Typical output Number of sorting threads: 8 Number of keys to sort: 50000000 Created the arrays in 2938 milliseconds. qsort in 11015 milliseconds. bitwise_radix_sort in 6391 milliseconds. radix_sort in 1500 milliseconds. parallel_radix_sort in 422 milliseconds. Algorithms agree: 1 array1 is sorted: 1 array2 is sorted: 1 array3 is sorted: 1 array4 is sorted: 1 The above benchmark was run on a quad-core CPU. Critique request As always, I would like to hear anything that comes to mind. Answer: The function parallel_radix_sort_impl() is over 400 lines of code. There must be some way to break this up into multiple functions. This is way too complex, and too difficult to understand and maintain. Back in 1989 this might have been accepted because of processor speed but it isn't acceptable now. One of the differences between ANSI C in 1989 and K&R C in 1972 was that you could define variables where necessary rather than only at the top of the logic block. There is no reason to have all of your variables defined at the top of the function. As mentioned in another answer there are newer C standards that might be more appropriate. C99 allows you to declare for loop control variables in the for loop. This would decrease the number of lines of code. Anything that decreases the number of lines of code decreases the number of faults in the code. In 1991 I spent a year converting K&R C to ANSI C. The company I worked for felt it was necessary to convert the code. There are good reasons to follow newer standards.
{ "domain": "codereview.stackexchange", "id": 43974, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, multithreading, sorting, radix-sort, c89", "url": null }
c, integer, assembly Title: Detecting arithmetic overflow in C with NASM Question: This snippet is about detecting whether the carry flag is set or cleared. I work on Mac OSX, so my snippet supports Mac only. First, we need a routine that does the job: func.s: global _read_carry_flag _read_carry_flag: mov al, 0 jnc end mov al, 1 end: ret (Try nasm2 -f macho64 func.s for compiling into an object file.) main.c: #include <stdbool.h> #include <stdint.h> #include <stdio.h> #define A (2 * 1000 * 1000 * 1000) #define B (1 * 1000 * 1000 * 1000) extern bool read_carry_flag(); int main(int argc, char* argv[]) { int32_t a = A; int32_t b = B; int32_t ret = a + a + b; printf("%d\n", read_carry_flag()); a = A; b = 1; ret = a + b; printf("%d\n", read_carry_flag()); return 0; } (Try gcc -o prog main.c func.o for obtaining a process image.) I would like to hear about possible improvements/extensions to the idea. Answer: Wrong flag I believe you should be looking at the overflow flag instead of the carry flag, since all of your operands are signed values. On x86, the overflow flag is set if signed addition overflows. The carry flag is set if unsigned addition overflows. Not reliable As @Edward pointed out, it doesn't seem reliable to use this kind of function because you never know how the compiler is going to rearrange your code. Even without the compiler rearranging your code, the results could be confusing. From your own example: ret = a + a + b; overflow = read_overflow_flag(); Here, you are only detecting overflow over the second of the two additions. If the first addition overflowed but the second didn't, you wouldn't catch it. In other words, the assembly might look like this: add %ecx, %edx, %edx // ret = a + a <- overflow not detected here add %ecx, %ecx, %ebx // ret = ret + b seto %al // overflow = overflow flag
{ "domain": "codereview.stackexchange", "id": 43975, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, integer, assembly", "url": null }
c, integer, assembly To do it correctly, I would suggest you use a function that adds two numbers together and updates a cumulative overflow: // If the add overflows, 1 will be added to *pOverflow. int32_t add32_with_overflow(int32_t x, int32_t y, int *pOverflow); int32_t ret = 0; int overflow = 0; ret = add32_with_overflow(a, a, &overflow); ret = add32_with_overflow(ret, b, &overflow); printf("Overflow = %d\n", overflow);
{ "domain": "codereview.stackexchange", "id": 43975, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, integer, assembly", "url": null }
c++, beginner, object-oriented Title: A simple console-based banking system Question: I am an amateur. I have been coding for 3 months now. If you have any comments,suggestion or critics about my code please feel free to speak up(i am urging you to). I want to be a really good engineer. This is my header file: #ifndef BANKSYS_H #define BANKSYS_H #include <iostream> #include <string> using namespace std; class bank_application { public: void register_acc(); void deposit(); void withdraw(); void sign_in(); void menu(); private: string account_holder{}; double account_balance{}; long int account_num{}; }; void bank_application::menu() { int option_pick{}; cout << "OPTIONS \n 1> Withdraw \n 2> deposit " << endl; cin >> option_pick; switch (option_pick) { case 1: { withdraw(); break; } case 2: { deposit(); break; } } } void bank_application::register_acc() { cout << "Type in your full name " << endl; getline(cin >> std::ws, account_holder); cout << "Type in your account number " << endl; cin >> account_num; cout << "registration succesful " << endl; } void bank_application::sign_in() { string full_name{}; long int acc{}; cout << "SIGN IN FORM : " << endl; cout << "please sign into your account : type in your full name " << endl; getline(cin >> std::ws, full_name); cout << "please type in your account number " << endl; cin >> acc; if ((full_name == account_holder) && (acc == account_num)) { cout << "log in succesful ..........." << endl; } else { cout << "log in failed " << endl; EXIT_FAILURE; } } void bank_application::deposit() { long int deposit_amount{}; cout << "how much do you want to deposit " << endl; cin >> deposit_amount; account_balance += deposit_amount;
{ "domain": "codereview.stackexchange", "id": 43976, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, beginner, object-oriented", "url": null }
c++, beginner, object-oriented cout << "you have succesfully deposited " << deposit_amount << "$" << "\n " << "account balance is " << account_balance << endl; } void bank_application::withdraw() { long int withdrawal_amount{}; cout << "type in the amount you want to withdraw " << endl; cin >> withdrawal_amount; if (account_balance < withdrawal_amount) cout << "operation cancelled " << endl; else account_balance -= withdrawal_amount; cout << "you have succesfully withdrawn " << withdrawal_amount << "\n " << "account balance is " << account_balance << endl; } #endif This is my main cpp file: #include <iostream> #include <string> #include "banksys.h" using namespace std; int main() { bank_application b_application; cout << "welcome to starcity bank , please register " << endl; b_application.register_acc(); cout << "now please sign in " << endl; b_application.sign_in(); char reply{}; while (true) { b_application.menu(); cout << "Are you done [Y/N] " << endl; cin >> reply; if ((reply == 'Y') || (reply == 'y')) continue; else break; } } Answer: General Observations Not bad for a beginner, but definitely needs improvement. The functions are generally properly sized and simple (good thing). The include guards are good, I prefer this style over #pragma once. The object oriented design needs some work. There should be multiple classes, one of the classes should be account. The indentation in the switch statement in bank_application::menu() is questionable. The vertical spacing needs work, there are too many blank lines, and blank lines are missing where they should be such as between these 2 functions. void bank_application::menu() { int option_pick{}; cout << "OPTIONS \n 1> Withdraw \n 2> deposit " << endl; cin >> option_pick; switch (option_pick) { case 1: { withdraw();
{ "domain": "codereview.stackexchange", "id": 43976, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, beginner, object-oriented", "url": null }
c++, beginner, object-oriented switch (option_pick) { case 1: { withdraw(); break; } case 2: { deposit(); break; } } } void bank_application::register_acc() { cout << "Type in your full name " << endl; getline(cin >> std::ws, account_holder); cout << "Type in your account number " << endl; cin >> account_num; cout << "registration succesful " << endl; }
{ "domain": "codereview.stackexchange", "id": 43976, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, beginner, object-oriented", "url": null }
c++, beginner, object-oriented Avoid using namespace std; If you are coding professionally you probably should get out of the habit of using the using namespace std; statement. The code will more clearly define where cout and other identifiers are coming from (std::cin, std::cout). As you start using namespaces in your code it is better to identify where each function comes from because there may be function name collisions from different namespaces. The identifiercout you may override within your own classes, and you may override the operator << in your own classes as well. This stack overflow question discusses this in more detail. You should definitely avoid putting using namespace std; in a header file. Prefer C++ Container Classes The 3 array declared in the class bank_application are using the old C programming style arrays. The C++ Standard Library (STL) provides many container classes that would be preferable, such as std::array and std::vector. In this particular case std::vector would probably be better. File Organization Unlike Java and C#, C++ has header files and source files. There are libraries such as the boost libraries that do put all the code in the header files, but generally in C++ data definitions such as classes, structs and enums are defined in header file, and the executable code is in C++ source files. The code is organized this way to improve build times and to reduce link time errors. The function definitions (not the declarations) should be in a file probably called banksys.cpp. The file banksys.h should only contain the class declaration. This organization also allows bugs in the code to be corrected without forcing recompilation of the source files that include the header file. Prefer '\n' over std::endl For performance reasons output lines are generally terminated with '\n', std::endl performs a file flush after output, which requires a system call. System calls are generally expensive time wise.
{ "domain": "codereview.stackexchange", "id": 43976, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, beginner, object-oriented", "url": null }
rust, concurrency Title: Approximate concurrent counter in Rust Question: I am reading chapter 29 of OS: the three easy pieces, which is about concurrent data structures. The first example these is an approximate counter. This data structure increments numbers by using a global Mutex and several local Mutexes with local counters. When a local counter hits a threshold, it grabs the global mutex and flushes its local counter number to the global counter. This chapter shows code in C language. Since I'm practising Rust language, I implemented the concurrent data structure in Rust. When I see my code, I feel that there must be room to improve my code quality and performance. Notes: The book shows a graph that approximate counter scales very well, but in my test code, performance of my approximate counter was not good. Because mutex is used in the original C code, I used mutex in the Rust code. but the rust programming language book in the official site, shows an example of using std::sync::mpsc module. Would it be better to use message rather than mutex to avoid mistakes made by humans? Original code in OS book: typedef struct __counter_t { int global; // global count pthread_mutex_t glock; // global lock int local[NUMCPUS]; // per-CPU count pthread_mutex_t llock[NUMCPUS]; // ... and locks int threshold; // update frequency } counter_t; // init: record threshold, init locks, init values // of all local counts and global count void init(counter_t *c, int threshold) { c->threshold = threshold; c->global = 0; pthread_mutex_init(&c->glock, NULL); int i; for (i = 0; i < NUMCPUS; i++) { c->local[i] = 0; pthread_mutex_init(&c->llock[i], NULL); } }
{ "domain": "codereview.stackexchange", "id": 43977, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "rust, concurrency", "url": null }
rust, concurrency // update: usually, just grab local lock and update // local amount; once local count has risen ’threshold’, // grab global lock and transfer local values to it void update(counter_t *c, int threadID, int amt) { int cpu = threadID % NUMCPUS; pthread_mutex_lock(&c->llock[cpu]); c->local[cpu] += amt; if (c->local[cpu] >= c->threshold) { // transfer to global (assumes amt>0) pthread_mutex_lock(&c->glock); c->global += c->local[cpu]; pthread_mutex_unlock(&c->glock); c->local[cpu] = 0; } pthread_mutex_unlock(&c->llock[cpu]); } // get: just return global amount (approximate) int get(counter_t *c) { pthread_mutex_lock(&c->glock); int val = c->global; pthread_mutex_unlock(&c->glock); return val; // only approximate! } My Counter traits in counter.rs module. use std::fmt; use std::sync::Mutex; pub struct Counter { value: Mutex<i32> } impl Counter { pub fn new() -> Self { Counter { value: Mutex::new(0)} } pub fn test_and_increment(&self) -> i32 { let mut value = self.value.lock().unwrap(); *value += 1; if *value >= 250 { let old = *value; *value = 0; return old; } else { return 0; } } pub fn get(&self) -> i32 { *(self.value.lock().unwrap()) } pub fn add(&self, value: i32) { *(self.value.lock().unwrap()) += value; } } impl fmt::Display for Counter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", *self.value.lock().unwrap()) } } pub struct ApproximateCounter { value: Counter, local_counters: [Counter; 4] } impl ApproximateCounter { pub fn new() -> Self { ApproximateCounter { value: Counter::new(), local_counters: [Counter::new(), Counter::new(), Counter::new(), Counter::new()] } }
{ "domain": "codereview.stackexchange", "id": 43977, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "rust, concurrency", "url": null }
rust, concurrency pub fn increment(&self, i: usize) { let local_value = self.local_counters[i].test_and_increment(); if local_value > 0 { self.value.add(local_value); } } pub fn get(&self) -> i32 { self.value.get() } } main.rs use std::time::Instant; use std::sync::Arc; use std::thread; mod counter; const NUM_TO_LOOP: i32 = 1000000000; fn main() { { let now = Instant::now(); let counter = counter::Counter::new(); let mut result = 0; for _ in 0..(NUM_TO_LOOP) { result += counter.test_and_increment(); } let elapsed = now.elapsed(); println!("{}", result); println!("[one thread + thread-safe counter] Elapsed: {:.2?}", elapsed); } { let now = Instant::now(); let counter = Arc::new(counter::ApproximateCounter::new()); let mut threads = Vec::new(); for i in 0..4 { let c_counter = counter.clone(); threads.push(thread::spawn(move || { for _ in 0..(NUM_TO_LOOP / 4) { c_counter.increment(i); } })); } for thread in threads { thread.join().unwrap(); } println!("{}", counter.get()); let elapsed = now.elapsed(); println!("[four threads + ApproximateCounter]: Elapsed: {:.2?}", elapsed) } } Result of running main.rs: 1000000000 [one thread + thread-safe counter] Elapsed: 146.02s 1000000000 [four threads + ApproximateCounter]: Elapsed: 104.38s After following the answer by @Matthieu M. I got following result. Using Mutex without false sharing 1000000000 [one thread + thread-safe counter] Elapsed: 144.44s 1000000000 [four threads + ApproximateCounter]: Elapsed: 77.16s Using Atomic with false sharing 999999813 [one thread + thread-safe counter] Elapsed: 35.47s 999999060 [four threads + ApproximateCounter]: Elapsed: 36.40s
{ "domain": "codereview.stackexchange", "id": 43977, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "rust, concurrency", "url": null }
rust, concurrency Using atomic without false sharing 999999813 [one thread + thread-safe counter] Elapsed: 46.65s 999999060 [four threads + ApproximateCounter]: Elapsed: 28.29s Still need to figure out why Atomic do not give 1000000000 in one thread test. Answer: Your code is looking pretty good, overall. Well done. Embrace Everything is an Expression In Rust, nearly everything is an expression. In particular, an if is an expression, a block is an expression, etc... As a result, it is superfluous to use a return statement as the last statement of a function: the function body will return the value of the function "block" expression anyway, for example: impl Counter { pub fn test_and_increment(&self) -> i32 { let mut value = self.value.lock().unwrap(); *value += 1; if *value >= 250 { let old = *value; *value = 0; old } else { 0 } } } mem is Full of Goodies The core::mem (or std::mem) module is full of simple functions to perform swaps/exchanges of values: replace, swap, take, ... In this case, both replace and take would be appropriate, with the former being more explicit: take(x) is replace(x, Default::default()) but not everybody may immediately realize that the default is 0. impl Counter { pub fn test_and_increment(&self) -> i32 { let mut value = self.value.lock().unwrap(); *value += 1; if *value >= 250 { mem::replace(&mut *value, 0) } else { 0 } } } Implement Default when appropriate When a struct can be constructed without any argument, it should implement the Default trait. You can do so manually: impl Default for Counter { fn default() -> Self { Self::new() } } Although if no special logic is necessary, it's simpler to just derive it: #[derive(Default)] struct Counter { ... }
{ "domain": "codereview.stackexchange", "id": 43977, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "rust, concurrency", "url": null }
rust, concurrency The derive will simply use the Default value for each field. The same applies to ApproximateCounter. Derive more Speaking of derive, it is generally good form to derive a number of useful properties. The available ones in the standard library are: #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] struct Counter { ... } Each deriving the trait of the same name. Now, they may not all be appropriate -- or even possible -- however I would recommend always implementing Debug at the very least, and Clone whenever possible as well. Do Not Repeat Yourself Your implementation of Display reaches deep inside the value, when there's a perfectly good getter that already does the job: impl fmt::Display for Counter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.get()) } } Use Clippy! You can run Clippy with cargo clippy. Formally, Clippy is a linter, but I like to think of it as a mentor: it would have highlighted that you should implement Default for example. It contains lots of simple lints to make your code simpler, and more idiomatic, and it's constantly updated to keep up with new standard library functions or new language features. Use the Rust Playground The Rust Playground allows easily trying out small snippets of code. You can build the code (the default), and on the right you'll find tools such as cargo fmt (to reformat your code) and cargo clippy (to lint errors). It's also very handy to share your code with others, as people can immediately start tinkering. Perf Hint: Atomics & Alignment. The goal of the exercise was to use Mutex, however it is not here the best tool for the job: an AtomicI32 would be equally suited, and yield better performance. For example, here is an atomic-based implementation (also available on the playground): #[derive(Debug, Default)] pub struct Counter { value: AtomicI32, } impl Counter { pub fn new() -> Self { Self::default() }
{ "domain": "codereview.stackexchange", "id": 43977, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "rust, concurrency", "url": null }
rust, concurrency impl Counter { pub fn new() -> Self { Self::default() } pub fn test_and_increment(&self) -> i32 { let value = self.value.fetch_add(1, Ordering::Relaxed); if value >= 250 { self.value.swap(0, Ordering::Relaxed) } else { 0 } } pub fn get(&self) -> i32 { self.value.load(Ordering::Relaxed) } pub fn add(&self, value: i32) { self.value.fetch_add(value, Ordering::Relaxed); } } This should be quite faster. Another avenue of improvement is alignment. CPUs in general operate at the cache-line level, which is often 64 bytes. This means that if two values are within the same 64 bytes, even though they are independent, writing to one requires exclusive access to both. This leads to a "ping-pong" if one core writes to one in a loop and another core writes to the other in a loop in parallel. This is called false sharing. Intel CPUs are worse, in this regard, in that they often grab 2 cache lines at a time, instead of 1, and thus requires values to be in separate 128 bytes bins. It can be beneficial to pad structures, or force their alignment, to avoid this. This is as simple as using repr with the align directive: #[derive(Debug, Default)] #[repr(align(128))] pub struct Counter { value: AtomicI32, }
{ "domain": "codereview.stackexchange", "id": 43977, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "rust, concurrency", "url": null }
python, python-3.x, time-limit-exceeded, dynamic-programming, palindrome Title: Making my DP algorithm faster - longest palindromic substring Question: The following code is my solution to a LeetCode question - find the longest palindromic substring. My code is 100% correct, it passed once but it took too long, and in most of the reruns I hit a "time limit exceeded" error. I would appreciate any suggestions on why it's taking too long and how to improve the performance. class Solution: def longestPalindrome(self, s: str) -> str: n = len(s) # edge cases if n == 1: return s dp = [[0 for _ in range(n)] for _ in range(n)] for i in range(n): dp[i][i] = 1 substring = s[0] # my dp is bottom-up and only works with the matrix cells to the right of the # diagonal. I start from the very last row. for row in range(n-2,-1,-1): for col in range(row+1, n): if s[row] == s[col]: if col - row == 1 or dp[row+1][col-1]: dp[row][col] = 1 if len(s[row:col+1]) > len(substring): substring = s[row:col+1] return substring This is the last version of my code. Previous versions had slightly different implementations, for example: I assigned dp[i][i] = 1 while initializing the dp. It looked like this: dp = [[1 if (c==r) else 0 for c in range(n)] for r in range(n)]. But thinking about this at the assembly level, it was adding an extra few lines for the conditional for every iteration, which adds unnecessary overhead. For comparing the length of the new palindrome, I used a maxLen variable, and then replaced it with what I have now: I thought that finding the length of a string (which is readily available) might take less time than incrementing the variable maxLen. Although I'm not sure if that's correct.
{ "domain": "codereview.stackexchange", "id": 43978, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, python-3.x, time-limit-exceeded, dynamic-programming, palindrome", "url": null }
python, python-3.x, time-limit-exceeded, dynamic-programming, palindrome Answer: Beware of the cost of string slicing This piece of code creates an unnecessary string slice: if len(s[row:col+1]) > len(substring): substring = s[row:col+1] To create a string slice, Python has to allocate memory for the slice and copy memory content. This is equivalent without an unnecessary string slice: if col + 1 - row > len(substring): substring = s[row:col+1] This change alone seems to improve the performance by more than 10%, greatly increasing the success rate. Related to this, creating substring is also unnecessary. Although in this case slices are only created when a longer substring is found, if this happens many times across all the test cases, the performance difference can be noticeable. Instead of create the longest substring using a slice, you could track the start and end points: longest_start = 0 longest_end = 1 for row in range(n-2, -1, -1): for col in range(row+1, n): if s[row] == s[col]: if col - row == 1 or dp[row+1][col-1]: dp[row][col] = 1 if col + 1 - row > longest_end - longest_start: longest_start = row longest_end = col + 1 return s[longest_start:longest_end] This seems to boosts the performance by another 10%+. Minor things A simpler way to initialize dp: dp = [[0] * n for _ in range(n)] And the special treatment for the edge case of n == 1 is unnecessary, the implementation handles that case correctly.
{ "domain": "codereview.stackexchange", "id": 43978, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, python-3.x, time-limit-exceeded, dynamic-programming, palindrome", "url": null }
c++, object-oriented, database Title: Database buffer class Question: I designed the following DataBaseBuffer class, to be used by different threads that generate queries and need to send them to the database. It was inspired by producer–consumer problem solution. #include <nanodbc/nanodbc.h> #include <queue> #include <string> #include <mutex> #include <thread> #include <algorithm> class DataBaseBuffer { public: DataBaseBuffer(std::string& _connection_string_); void run(); // thread to send queries to the database void push(std::string& _query); nanodbc::connection conn_; // connection to db private: void send_queries(); nanodbc::connection conn_; // connection to db std::string& connection_string_; // save connection incase of reconnection std::queue<std::string> buffer_; std::queue<std::string> buffer_secondary_; std::mutex mutex_; }; DataBaseBuffer::DataBasBuffer(std::string& _connection_string) : connection_string_(_connection_string) {} void DataBaseBuffer::run() { conn_.connect(connection_string_); while (true) { send_queries(); std::this_thread::sleep_for(std::chrono::milliseconds(1)); } } void DataBaseBuffer::send_queries() { { std::lock_guard<std::mutex> lock(mutex_); std::swap(buffer_, buffer_secondary_); // move queries from main buffer to the secondary } while (!buffer_secondary_.empty()) { nanodbc::execute(conn_, buffer_secondary_.front()); buffer_secondary_.pop(); } } void DataBaseBuffer::push(std::string& _query) { { std::lock_guard<std::mutex> lock(mutex_); buffer_.push(_query); } } Are there any flaws in this design that could affect the smooth running of the program?
{ "domain": "codereview.stackexchange", "id": 43979, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, object-oriented, database", "url": null }
c++, object-oriented, database Are there any flaws in this design that could affect the smooth running of the program? Answer: Separate concerns Your class does two things: it interacts with the database and it implements a thread-safe queue. It would be much better to separate those two things. Create a class ThreadSafeQueue that implements thread-safe access to the queue. Then DatabaseBuffer can in turn use that, so it's implementation is simplified: class ThreadSafeQueue { public: void push(const std::string& item); std::string pop(); private: std::queue<std::string> buffer_; std::mutex mutex_; }; class DatabaseBuffer { public: /* same public functions as before */ ... private: void send_queries(); nanodbc::connection conn_; std::string& connection_string_; ThreadSafeQueue queue_; }; Pass strings by const reference where appropriate If you pass a parameter by pointer or reference, but the parameter should not be changed, then make sure it is const as well. This allows the compiler to produce an error if you do accidentily write to the parameter, and it also allows the compiler to generate more optimal code. Use a condition variable to sleep until the queue is non-empty Repeatedly checking if the queue is non-empty in a loop is problematic, even if you call sleep_for(). Sleeping for too long obviously causes delays in the processing of queries, but sleeping too short will still use a bit of CPU time, and more importantly it will prevent the CPU from going into a deeper sleep mode, and thus it will keep using more power than necessary. The proper way to handle this is to use a condition variable that you can wait for to be notified by another thread. Here is an example of how ThreadSafeQueue could be implemented: class ThreadSafeQueue { public: void push(const std::string& item) { { std::lock_guard lock(mutex_); queue_.push(item); } cv_.notify_one(); }
{ "domain": "codereview.stackexchange", "id": 43979, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, object-oriented, database", "url": null }
c++, object-oriented, database cv_.notify_one(); } std::string pop() { std::lock_guard lock(mutex_); cv_.wait(mutex_, []{return !queue_.empty();}); auto item = queue_.front(); queue.pop(); return item; } private: std::queue<std::string> buffer_; std::mutex mutex_; std::condition_variable cv_; }; This will cause the thread calling pop() to check if the queue is empty, and if so it will sleep until it gets notified by another thread. Some things to think about: You can change it to return a whole std::queue<std::string> instead of just one item, if you think that is important for efficiency. One issue is that if the producer stops, pop() might then wait indefinitely for an item to be added that never comes. Think of a way to safely terminate the consumer. You can make ThreadSafeQueue a template so it can store things other than std::strings. What about query results? Your code only executes queries, but it doesn't look at the result at all. Even an INSERT statement could fail, so at least you'd want some status result to be returned, but if you have a SELECT query then you also want the data returned by the database to be stored somewhere. There are various ways to deal with this. I'll present one here that uses std::async() instead of a queue runner: class DatabaseBuffer { public: ... std::future<nanodbc::result> push(const std::string& query); private: nanodbc::connection conn_; std::string& connection_string_; std::mutex mutex_; }; std::future<nanodbc::result> DatabaseBuffer::push(const std::string& query) { return std::async(std::launch::async, [&](std::string query){ std::lock_guard lock(mutex_); return nanodbc::execute(conn_, query); }, query); }
{ "domain": "codereview.stackexchange", "id": 43979, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, object-oriented, database", "url": null }
c++, object-oriented, database The push() function calls std::async(), which in turn will launch a thread that runs a function that locks the mutex and executes the query. While this happens, a std::future<> is immediately returned to the caller. The caller can then do other things, like launcher more queries, before checking the future. Getting the result from the future will block until the thread running the async function has finished. So to use it: DatabaseBuffer db(...); auto future = db.push("SELECT name from users;"); // Do something else ... // Show the result for (auto result = future.get(); result.next();) { std::cout << result.get<int>(0) << '\n'; } You don't have to use std::async() to be able to use std::futures though; you can still have your own queue runner, but use std::promise objects to return a std::future to the caller that you can later then set to the result of a query.
{ "domain": "codereview.stackexchange", "id": 43979, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, object-oriented, database", "url": null }
python, matrix, numpy Title: Pad a ragged multidimensional array to rectangular shape Question: For some machine learning purpose, I need to work with sequences with different lengths. To be able to process efficiently those sequences, I need to process them in batches of size size_batch. A batch typically has 4 dimensions and I want to convert it to a numpy's ndarray with 4 dimensions. For each sequence, I need to pad with some defined pad_value such that each element has the same size: the maximal size. For example, with 3 dimensional input: [[[0, 1, 2], [3], [4, 5]], [[6]], [[7, 8], [9]]] desired output for pad_value -1 is: [[[0, 1, 2], [3, -1, -1], [4, 5, -1]], [[6, -1, -1], [-1, -1, -1], [-1, -1, -1]] [[7, 8, -1], [9, -1, -1], [-1, -1, -1]]] which has shape (3, 3, 3). For this problem, one can assume that there are no empty lists in the input. Here is the solution I came up with: import numpy as np import itertools as it from typing import List def pad(array: List, pad_value: np.int32, dtype: type = np.int32) -> np.ndarray: """ Pads a nested list to the max shape and fill empty values with pad_value :param array: high dimensional list to be padded :param pad_value: value appended to :param dtype: type of the output :return: padded copy of param array """ # Get max shape def get_max_shape(arr, ax=0, dims=[]): try: if ax >= len(dims): dims.append(len(arr)) else: dims[ax] = max(dims[ax], len(arr)) for i in arr: get_max_shape(i, ax+1, dims) except TypeError: # On non iterable / lengthless objects (leaves) pass return dims dims = get_max_shape(array) # Pad values def get_item(arr, idx): while True: i, *idx = idx arr = arr[i] if not idx: break return arr
{ "domain": "codereview.stackexchange", "id": 43980, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, matrix, numpy", "url": null }
python, matrix, numpy r = np.zeros(dims, dtype=dtype) + pad_value for idx in it.product(*map(range, dims)): # idx run though all possible tuple of indices that might # contain a value in array try: r[idx] = get_item(array, idx) except IndexError: continue return r It does not feel really pythonic but does the job. Is there any better way to do it I should know ? I think I might be able to improve its speed by doing smart breaks in the last loop but I haven't dug much yet. Answer: nested methods Why do you nest the get_max_shape etcetera in the pad? There is no need to do this. get_max_shape Here you use recursion and a global variable. A simpler way would be to have a generator that recursively runs through the array, and yields the level and length of that part, and then another function to aggregate this results. That way you can avoid passing def get_dimensions(array, level=0): yield level, len(array) try: for row in array: yield from get_dimensions(row, level + 1) except TypeError: #not an iterable pass [(0, 3), (1, 3), (2, 3), (2, 1), (2, 2), (1, 1), (2, 1), (1, 2), (2, 2), (2, 1)] The aggregation can be very simple using collections.defaultdict: def get_max_shape(array): dimensions = defaultdict(int) for level, length in get_dimensions(array): dimensions[level] = max(dimensions[level], length) return [value for _, value in sorted(dimensions.items())] [3, 3, 3]
{ "domain": "codereview.stackexchange", "id": 43980, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, matrix, numpy", "url": null }
python, matrix, numpy [3, 3, 3] creating the result Instead of r = np.zeros(dims, dtype=dtype) + pad_value you can use np.full You iterate over all possible indices, and check whether it is present in the original array. Depening on how "full" the original array is, this can save some time. It also allows you to do this without your custom get_item method to get the element at the nested index def iterate_nested_array(array, index=()): try: for idx, row in enumerate(array): yield from iterate_nested_array(row, (*index, idx)) except TypeError: # final level for idx, item in enumerate(array): yield (*index, idx), item [((0, 0, 0), 0), ((0, 0, 1), 1), ((0, 0, 2), 2), ((0, 1, 0), 3), ((0, 2, 0), 4), ((0, 2, 1), 5), ((1, 0, 0), 6), ((2, 0, 0), 7), ((2, 0, 1), 8), ((2, 1, 0), 9)] slice an even better way, as suggested by@hpaulj uses slices: def iterate_nested_array(array, index=()): try: for idx, row in enumerate(array): yield from iterate_nested_array(row, (*index, idx)) except TypeError: # final level yield (*index, slice(len(array))), array [((0, 0, slice(None, 3, None)), [0, 1, 2]), ((0, 1, slice(None, 1, None)), [3]), ((0, 2, slice(None, 2, None)), [4, 5]), ((1, 0, slice(None, 1, None)), [6]), ((2, 0, slice(None, 2, None)), [7, 8]), ((2, 1, slice(None, 1, None)), [9])] padding def pad(array, fill_value): dimensions = get_max_shape(array) result = np.full(dimensions, fill_value) for index, value in iterate_nested_array(array): result[index] = value return result array([[[ 0, 1, 2], [ 3, -1, -1], [ 4, 5, -1]], [[ 6, -1, -1], [-1, -1, -1], [-1, -1, -1]], [[ 7, 8, -1], [ 9, -1, -1], [-1, -1, -1]]])
{ "domain": "codereview.stackexchange", "id": 43980, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, matrix, numpy", "url": null }
rust, numerical-methods Title: Runge-Kutta 4 implementation in Rust Question: This is a toy problem. I want to try using Rust for numerical modeling, but I found zero tutorials for this kind of stuff. I also just started learning Rust yesterday, so please bear with me. Usually when I'm doing an ODE solver, I create a grid array, then values array, define initial conditions and other parameters, then use a loop to find the solution. Of course, I also need to write the result to a file, so I can use it later. Now, I found it surprisingly hard to do any of these tasks in Rust. I eventually managed, but it took me most of the day to write this simple program. Could you please check my code and point me to any possible improvements? Including the array creation, the loop, and file output. // Program to find f(t) = cos(t) through ODE // df/dt = g // dg/dt = -f use std::fs; use std::io::Write; fn main() { //make directory let dirname = "results"; fs::create_dir(&dirname).expect("Error creating directory"); //initiate the grid const NT:usize = 1000; let dt = 0.05; //initiate the time array let t: [f64; NT] = core::array::from_fn(|i| i as f64 * dt); //initiate exact solution array let fe: [f64; NT] = core::array::from_fn(|i| (i as f64 * dt).cos()); //initiate numerical solution array //initial condition let f0 = 1.0; let g0 = 0.0; let mut f = [f0; NT]; let mut g = [g0; NT]; //for RK4 scheme let (mut k1, mut k2, mut k3, mut k4): (f64, f64, f64, f64); let (mut l1, mut l2, mut l3, mut l4): (f64, f64, f64, f64);
{ "domain": "codereview.stackexchange", "id": 43981, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "rust, numerical-methods", "url": null }
rust, numerical-methods for i in 1..NT { // RK4 scheme k1 = g[i-1]; l1 = -f[i-1]; k2 = k1 + l1*dt/2.0; l2 = l1 - k1*dt/2.0; k3 = k1 + l2*dt/2.0; l3 = l1 - k2*dt/2.0; k4 = k1 + l3*dt; l4 = l1 - k3*dt; // next values f[i] = -l1 + (k1 + 2.0*k2 + 2.0*k3 + k4)*dt/6.0; g[i] = k1 + (l1 + 2.0*l2 + 2.0*l3 + l4)*dt/6.0; } let filepath = format!("{}/{}",dirname,"RK4.dat"); fs::File::create(&filepath).expect("Error creating file"); let mut myfile = fs::OpenOptions::new() .write(true) .append(true) .open(filepath) .unwrap(); let mut iline: String; for j in 0..NT { iline = format!("{} {} {}",t[j], fe[j], f[j]); writeln!(myfile, "{}", iline).expect("Error writing to file"); } } This is the plot of RK4.dat, which shows that the program output is correct: Answer: Use Rustfmt Rust ships with a formatter, use it. It'll help keep your code readable and consistent with other code. Invoke it via cargo fmt or rustfmt path/to/file.rs. Use Clippy Rust also ships with Clippy, which contains a whole bunch lints to catch redundant, incorrect, or otherwise smelly code that doesn't make sense to have in rustc. I ran it on your code and got warning: the borrowed expression implements the required traits --> src/main.rs:10:20 | 10 | fs::create_dir(&dirname).expect("Error creating directory"); | ^^^^^^^^ help: change this to: `dirname` | = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#needless_borrow = note: `#[warn(clippy::needless_borrow)]` on by default warning: `playground` (bin "playground") generated 1 warning
{ "domain": "codereview.stackexchange", "id": 43981, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "rust, numerical-methods", "url": null }
rust, numerical-methods warning: `playground` (bin "playground") generated 1 warning Note that string literals are just an &'static str not a String, so they implement Copy, meaning that borrowing it is entirely useless here. Avoid mixing core and std imports For consistency's sake, unless you are writing code that needs to know about the distinction get all your imports from std, not core. In this case, It's using core::array::from_fn that you need to fix. Use snake_case instead of runonsentencecase Just a readability thing, rust uses snake case for variable names. For example, let myfile = /* stuff */ should be let my_file = /* stuff */. File systems are hard File systems are racy, complicated, platform-specific, and come with extremely awkward path handling. Rust does its best to provide tools to overcome these challenges, but they can be tricky to figure out for the uninitiated. Use PathBuf to build paths For any operation on paths more complicated than hardcoding, pull out std::path to handle the details for you. In particular, this will handle things like proper canonicalization and path separator differences for you. In your case that probably looks like this: let mut filepath = PathBuf::from("results"); fs::create_dir(&filepath).expect("Error creating directory"); filepath.push("RK4.dat"); fs::File::create(&filepath).expect("Error creating file"); // and so on
{ "domain": "codereview.stackexchange", "id": 43981, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "rust, numerical-methods", "url": null }
rust, numerical-methods Handle results directory already existing fs::create_dir returns an error if the desired directory already exists, meaning your code unconditionally panics if run twice in the same directory. You can handle this by checking the returned error and continuing if it's caused by the directory already existing, or by using fs::create_dir_all, which considers the directory already existing being a success condition. File::create doesn't do what you think it does And even if it did, you've still opened yourself up to a race condition. File::create(path) works just like let file = fs::OpenOptions::new() .write(true) .truncate(true) .create(true) .open(path); This means your code creates/truncates and then opens the file, closes the file, and then immediately opens it in append mode without ensuring that it actually still exists. That isn't ideal- depending on whether you want to append or truncate the file either add .create(true) to your OpenOptions and drop File::create entirely or just use the value returned from File::create. Inline your formatting when using writeln! The writeln! macro supports the same formatting options that all rust's other formatting macros do, meaning the write loop should be: for j in 0..NT { writeln!(myfile, "{} {} {}", t[j], fe[j], f[j]).expect("Error writing to file"); } Flush writers before dropping them Any time use you use an io::Write based object, you should flush it before dropping. This ensures data won't be silently lost if it can't be flushed successfully when it's being dropped. // all your other file-based stuff here myfile.flush().expect("failed to flush data to file");
{ "domain": "codereview.stackexchange", "id": 43981, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "rust, numerical-methods", "url": null }
rust, numerical-methods Avoid panicking because of user error in application code You should reserve panicking for instances when you the writer screw up, not for when you the user screw up. In your case, that means avoiding expect and unwrap calls near file system operations. I would recommend simply propagating the errors up through main, see the rust book's chapter on working with Results for a good explanation about that. Consider buffering file writes Not mandatory, but small numbers of large writes tend to be much faster when working with files then large numbers of small writes, so it may be wise to wrap the file in a BufWriter. Final code and thoughts // Program to find f(t) = cos(t) through ODE // df/dt = g // dg/dt = -f use std::fs; use std::io::{self, Write}; use std::path::PathBuf; fn main() -> io::Result<()> { //initiate the grid const NT: usize = 1000; let dt = 0.05; //initiate the time array let t: [f64; NT] = std::array::from_fn(|i| i as f64 * dt); //initiate exact solution array let fe: [f64; NT] = std::array::from_fn(|i| (i as f64 * dt).cos()); //initiate numerical solution array //initial condition let f0 = 1.0; let g0 = 0.0; let mut f = [f0; NT]; let mut g = [g0; NT]; //for RK4 scheme let (mut k1, mut k2, mut k3, mut k4): (f64, f64, f64, f64); let (mut l1, mut l2, mut l3, mut l4): (f64, f64, f64, f64); for i in 1..NT { // RK4 scheme k1 = g[i - 1]; l1 = -f[i - 1]; k2 = k1 + l1 * dt / 2.0; l2 = l1 - k1 * dt / 2.0; k3 = k1 + l2 * dt / 2.0; l3 = l1 - k2 * dt / 2.0; k4 = k1 + l3 * dt; l4 = l1 - k3 * dt; // next values f[i] = -l1 + (k1 + 2.0 * k2 + 2.0 * k3 + k4) * dt / 6.0; g[i] = k1 + (l1 + 2.0 * l2 + 2.0 * l3 + l4) * dt / 6.0; }
{ "domain": "codereview.stackexchange", "id": 43981, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "rust, numerical-methods", "url": null }
rust, numerical-methods let mut file_path = PathBuf::from("results"); fs::create_dir_all(&file_path)?; //note that the `?` postfix operator is for error propagation, see the previously linked book chapter for details file_path.push("RK4.dat"); let mut my_file = fs::File::create(&file_path)?; for j in 0..NT { writeln!(my_file, "{} {} {}", t[j], fe[j], f[j])?; } my_file.flush()?; Ok(()) } Playground Your code is pretty good overall, especially considering you've only been using rust for a day- the issues primarily come from the fact that file systems suck and code examples love to avoid proper error handling. Anyway, since you're interested in numerical modeling, here are a few crates that might be of interest: The num crate- a collection of utilities including big ints, complex and rational numbers, and abstractions over numerical operations and primitive types Plotters- a drawing library primarily designed for creating graphs, charts, and the like. Nalgebra- a general purpose linear algebra library. Also of note are rapier and salva, which are physics and fluid simulation libraries, respectively, and are created by the same organization. Ndarray- general purpose numpy style multidimensional array. Rayon- simple, safe, and fast parallel iterators. Not really specific to numerical modeling, but can greatly accelerate the processing of large quantities of data. Anyway, welcome to rust, I hope you enjoy your stay.
{ "domain": "codereview.stackexchange", "id": 43981, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "rust, numerical-methods", "url": null }
python, python-3.x, google-api Title: Python function that takes a response from Google Ads API and sorts / aggregates it Question: I am working on this function here and it produces the desired output. I just want to make sure I'm going about things in a smart way. It effectively just sorts through the data, with consideration to the fact that campaigns may be created by different firms. Then it aggregates the data. from collections import defaultdict
{ "domain": "codereview.stackexchange", "id": 43982, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, python-3.x, google-api", "url": null }
python, python-3.x, google-api def aggregate_ads(ad_data, labels=list(), default_advertiser='internal'): # Creates dicts to hold data, structured to provide code-readability ads_data = defaultdict( lambda: defaultdict( lambda: defaultdict(int) )) # Lowercases all labels labels = map(str.lower, labels) # Sorts each instance into its channel and adds for adgroup in ad_data: # Cleans and standardizes campaign name campaign_name = adgroup['campaign']['name'] campaign_name = campaign_name.replace('-', ' ').replace('_', ' ').lower() # Handling where ad_group type is not provided if not adgroup['ad_group'].get('type_'): adgroup['ad_group']['type_'] = 'MIXED' # Collects channel and metrics channel = adgroup['ad_group']['type_'] metrics = dict( impressions= int(adgroup['metrics']['impressions']), clicks = int(adgroup['metrics']['clicks']), # Converts cost in microns to usd cost = round(int(adgroup['metrics']['cost_micros'])/1000000, 2), ) # Checks for labels in campaign name and defaults to specified default advertisers = set(labels).intersection(campaign_name.split()) if not advertisers: advertisers.add(default_advertiser) # Adds the variables to ads_data for advertiser in advertisers: ads_data[advertiser][channel]['impressions'] += metrics['impressions'] ads_data[advertiser][channel]['clicks'] += metrics['clicks'] ads_data[advertiser][channel]['cost'] += metrics['cost'] # Converts into regular dict on return return dict( (advertiser, dict((ad_type, dict(metrics)) for ad_type, metrics in ad_data.items())) for advertiser, ad_data in ads_data.items())
{ "domain": "codereview.stackexchange", "id": 43982, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, python-3.x, google-api", "url": null }
python, python-3.x, google-api Here's my output: {'internal': {'DISPLAY_STANDARD': {'clicks': 163, 'cost': 11.8, 'impressions': 6785}, 'MIXED': {'clicks': 6, 'cost': 0.1, 'impressions': 434}, 'SEARCH_STANDARD': {'clicks': 2, 'cost': 5.89, 'impressions': 151}}, 'play': {'MIXED': {'clicks': 5, 'cost': 0.05, 'impressions': 242}}}
{ "domain": "codereview.stackexchange", "id": 43982, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, python-3.x, google-api", "url": null }
python, python-3.x, google-api and some sample input: example = [ {'campaign': {'resource_name': 'blahblahbah', 'serving_status': 'SERVING', 'name': 'Google Play Market-USA/Canada-2022-08', 'start_date': '2022-07-20', 'end_date': '2037-12-30'}, 'ad_group': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'type_': 'MIXED'}, 'metrics': {'clicks': '5', 'cost_micros': '54238', 'impressions': '242'}, 'ad_group_ad': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'ad': {'resource_name': 'blahblahbah'}}, 'segments': {'date': '2022-10-11'}}, {'campaign': {'resource_name': 'blahblahbah', 'serving_status': 'SERVING', 'name': 'Google Play Market-USA/Canada-2022-08', 'start_date': '2022-07-20', 'end_date': '2037-12-30'}, 'ad_group': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'type_': 'MIXED'}, 'metrics': {'clicks': '3', 'cost_micros': '53943', 'impressions': '217'}, 'ad_group_ad': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'ad': {'resource_name': 'blahblahbah'}}, 'segments': {'date': '2022-10-11'}}, {'campaign': {'resource_name': 'blahblahbah', 'serving_status': 'SERVING', 'name': 'Google Play Market-USA/Canada-2022-08', 'start_date': '2022-07-20', 'end_date': '2037-12-30'}, 'ad_group': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'type_': 'MIXED'}, 'metrics': {'clicks': '3', 'cost_micros': '53943', 'impressions': '217'}, 'ad_group_ad': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'ad': {'resource_name': 'blahblahbah'}}, 'segments': {'date': '2022-10-11'}}, {'campaign': {'resource_name': 'blahblahbah', 'serving_status': 'SERVING', 'name': 'Display-Global-Desktop-202208', 'start_date': '2022-07-21', 'end_date': '2037-12-30'}, 'ad_group': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'type_': 'DISPLAY_STANDARD'}, 'metrics': {'clicks': '95', 'cost_micros': '6036546', 'impressions': '4186'},
{ "domain": "codereview.stackexchange", "id": 43982, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, python-3.x, google-api", "url": null }
python, python-3.x, google-api 'metrics': {'clicks': '95', 'cost_micros': '6036546', 'impressions': '4186'}, 'ad_group_ad': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'ad': {'resource_name': 'blahblahbah'}}, 'segments': {'date': '2022-10-11'}}, {'campaign': {'resource_name': 'blahblahbah', 'serving_status': 'SERVING', 'name': 'Search-USA/NOTES', 'start_date': '2022-08-30', 'end_date': '2037-12-30'}, 'ad_group': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'type_': 'SEARCH_STANDARD'}, 'metrics': {'clicks': '2', 'cost_micros': '5890000', 'impressions': '151'}, 'ad_group_ad': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'ad': {'resource_name': 'blahblahbah'}}, 'segments': {'date': '2022-10-11'}}, {'campaign': {'resource_name': 'blahblahbah', 'serving_status': 'SERVING', 'name': 'Display-Global--Desktop-Files', 'start_date': '2022-09-02', 'end_date': '2037-12-30'}, 'ad_group': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'type_': 'DISPLAY_STANDARD'}, 'metrics': {'clicks': '68', 'cost_micros': '5757098', 'impressions': '2599'}, 'ad_group_ad': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'ad': {'resource_name': 'blahblahbah'}}, 'segments': {'date': '2022-10-11'}} ]
{ "domain": "codereview.stackexchange", "id": 43982, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, python-3.x, google-api", "url": null }
python, python-3.x, google-api labels = ['play'] # In reality, this is partner labels they'd put in the campaign name aggregate_ads(example, labels) Answer: You have a critical bug. You assign a map to labels, but this is never materialised and so is consumed and subsequently looks like an empty collection. I'm going to suggest that you throw away most of your implementation and replace it with Pandas, which is well-suited to your case. The entry point will be putting your example code through pd.json_normalize, which will give you a data frame that has six rows for your example data. Replacing your dict generators with Pandas to_dict, and performing the sum in a vectorised manner instead of in a loop, your code could look like the following: from pprint import pprint import pandas as pd def aggregate_ads(ad_data: dict, labels: set[str], default_advertiser: str = 'internal') -> pd.DataFrame: df = pd.json_normalize(ad_data).astype({ 'metrics.clicks': int, 'metrics.cost_micros': int, 'metrics.impressions': int, }) df['channel'] = df['ad_group.type_'].fillna('MIXED') df['metrics.cost'] = df['metrics.cost_micros'] / 1e6 campaign_fragments = ( df['campaign.name'] .str.replace('-', ' ').str.replace('_', ' ').str.lower().str.split() .apply(set).apply(labels.__and__) ) campaign_fragments[campaign_fragments == set()] = default_advertiser df['advertisers'] = campaign_fragments return ( df.explode('advertisers') .groupby(['advertisers', 'channel']) ['metrics.impressions', 'metrics.clicks', 'metrics.cost'] .sum() .rename(columns={ 'metrics.impressions': 'impressions', 'metrics.clicks': 'clicks', 'metrics.cost': 'cost', }) ) def ads_to_json(groups: pd.DataFrame) -> dict: inners = groups.groupby(level=0).apply( lambda df: df.droplevel(0).to_dict('index')) return inners.to_dict()
{ "domain": "codereview.stackexchange", "id": 43982, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, python-3.x, google-api", "url": null }
python, python-3.x, google-api def test() -> None: example = [ {'campaign': {'resource_name': 'blahblahbah', 'serving_status': 'SERVING', 'name': 'Google Play Market-USA/Canada-2022-08', 'start_date': '2022-07-20', 'end_date': '2037-12-30'}, 'ad_group': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'type_': 'MIXED'}, 'metrics': {'clicks': '5', 'cost_micros': '54238', 'impressions': '242'}, 'ad_group_ad': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'ad': {'resource_name': 'blahblahbah'}}, 'segments': {'date': '2022-10-11'}}, {'campaign': {'resource_name': 'blahblahbah', 'serving_status': 'SERVING', 'name': 'Google Play Market-USA/Canada-2022-08', 'start_date': '2022-07-20', 'end_date': '2037-12-30'}, 'ad_group': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'type_': 'MIXED'}, 'metrics': {'clicks': '3', 'cost_micros': '53943', 'impressions': '217'}, 'ad_group_ad': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'ad': {'resource_name': 'blahblahbah'}}, 'segments': {'date': '2022-10-11'}}, {'campaign': {'resource_name': 'blahblahbah', 'serving_status': 'SERVING', 'name': 'Google Play Market-USA/Canada-2022-08', 'start_date': '2022-07-20', 'end_date': '2037-12-30'}, 'ad_group': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'type_': 'MIXED'}, 'metrics': {'clicks': '3', 'cost_micros': '53943', 'impressions': '217'},
{ "domain": "codereview.stackexchange", "id": 43982, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, python-3.x, google-api", "url": null }
python, python-3.x, google-api 'metrics': {'clicks': '3', 'cost_micros': '53943', 'impressions': '217'}, 'ad_group_ad': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'ad': {'resource_name': 'blahblahbah'}}, 'segments': {'date': '2022-10-11'}}, {'campaign': {'resource_name': 'blahblahbah', 'serving_status': 'SERVING', 'name': 'Display-Global-Desktop-202208', 'start_date': '2022-07-21', 'end_date': '2037-12-30'}, 'ad_group': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'type_': 'DISPLAY_STANDARD'}, 'metrics': {'clicks': '95', 'cost_micros': '6036546', 'impressions': '4186'}, 'ad_group_ad': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'ad': {'resource_name': 'blahblahbah'}}, 'segments': {'date': '2022-10-11'}}, {'campaign': {'resource_name': 'blahblahbah', 'serving_status': 'SERVING', 'name': 'Search-USA/NOTES', 'start_date': '2022-08-30', 'end_date': '2037-12-30'}, 'ad_group': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'type_': 'SEARCH_STANDARD'}, 'metrics': {'clicks': '2', 'cost_micros': '5890000', 'impressions': '151'}, 'ad_group_ad': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'ad': {'resource_name': 'blahblahbah'}}, 'segments': {'date': '2022-10-11'}}, {'campaign': {'resource_name': 'blahblahbah', 'serving_status': 'SERVING', 'name': 'Display-Global--Desktop-Files', 'start_date': '2022-09-02', 'end_date': '2037-12-30'},
{ "domain": "codereview.stackexchange", "id": 43982, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, python-3.x, google-api", "url": null }
python, python-3.x, google-api 'start_date': '2022-09-02', 'end_date': '2037-12-30'}, 'ad_group': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'type_': 'DISPLAY_STANDARD'}, 'metrics': {'clicks': '68', 'cost_micros': '5757098', 'impressions': '2599'}, 'ad_group_ad': {'resource_name': 'blahblahbah', 'status': 'ENABLED', 'ad': {'resource_name': 'blahblahbah'}}, 'segments': {'date': '2022-10-11'}} ]
{ "domain": "codereview.stackexchange", "id": 43982, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, python-3.x, google-api", "url": null }
python, python-3.x, google-api grouped = aggregate_ads(example, labels={'play'}) js = ads_to_json(grouped) pprint(js) if __name__ == '__main__': test()
{ "domain": "codereview.stackexchange", "id": 43982, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, python-3.x, google-api", "url": null }
javascript Title: Pattern for javascript string replacement Question: Does the following look like acceptable code to replace some escaped regex characters? export function parseString(s: string): string { let r : string; if ( s[0] === 'r' || s[0] === 'R' ) { r = s.substring(1); // no escape } else { r = s.substring(1, s.length - 1); r= r.replace('\\n', '\n'); r= r.replace('\\r', '\r'); r= r.replace('\"', '"'); r= r.replace('\'', '\''); r= r.replace('\\a', '\a'); r= r.replace('\\b', '\b'); r= r.replace('\\r', '\r'); r= r.replace('\\t', '\v'); r= r.replace('\\\\', '\\'); r= r.replace('\\?', '?'); r= r.replace('\\`', '\`'); const reg = /\\x../gi; const regReseult = r.matchAll(reg); let iter = regReseult.next(); while (!iter.done) { r=r.replace(iter.value[0], escape_hex(iter.value[0])); iter = regReseult.next(); } } return r; } function escape_hex( s: string ) : string { let ss= s.toLowerCase(); if ( ss[0]!== '\\' || ss[1]!== 'x') { throw new Error('Invalid hexadecimal escape sequence'); } if ( ss.length !== 4) { throw new Error('Invalid hexadecimal escape sequence'); } const reg = /[0-9a-f][0-9a-f]/i; if ( ss.match(reg) === null ) { throw new Error('Invalid hexadecimal escape sequence'); } ss= ss.replace('\\x', '0x'); return String.fromCharCode(_parseInt(ss)); } Or, would a better pattern be to capture the various cases as a single regex such as \\[nr"'abrt\\?`] and then do a substitution based on the capture char? What might be the cleanest way to write the above function? The goal of the above function is to parse a literal string as defined in BigQuery string-literals, which may accept a string in various forms such as "hello", 'hello', and r'hello\nthere'.
{ "domain": "codereview.stackexchange", "id": 43983, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "javascript", "url": null }
javascript Answer: I think you can refactor code this way to avoid these many lines of replace statements like this, const replaceWith = { '\\n': '\n', '\\r': '\r', '\"' : '\'' }; // you can always add more replacement here for (const key in replaceWith) { r = r.replace(key, replaceWith[key]); }
{ "domain": "codereview.stackexchange", "id": 43983, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "javascript", "url": null }
c, file, posix Title: Opening a directory as a file descriptor creating it if it does not exist Question: Variables: conf of type struct Conf whose details are not relevant to this review stores the configuration data being read. str is a string containing the path of the directory the app uses to save data. Code: bool exist = 0; if ((mkdir(str, 0755) && !(exist = errno == EEXIST)) || (Dir = open(str, O_DIRECTORY)) == -1) { perror(str); return errno; } if (exist) { const int fd = openat(Dir, ".Conf", O_RDONLY); if (fd != -1) { const ssize_t r = read(fd, &conf, sizeof(struct Conf)); if (r < sizeof(struct Conf)) r != -1 ? (void)memset(&conf, 0, r) : perror(".Conf"); if (close(fd)) perror(".Conf"); } else if (errno != ENOENT) perror(".Conf"); } The issue is I am not sure if this is the cleanest way to accomplish this task. The program relies on the mkdir system call failing to signal that the directory already exists. Is it good practice to expect a system call to fail? Another issue is creating a bool variable to set somewhere and use as a condition elsewhere, was what I thought of as a quick hack to achieve the correct logic. What is a cleaner way to achieve this? Answer: I had to add a lot of declarations to get this anywhere near compilable: struct Conf { char a; unsigned char b; }; #define _POSIX_C_SOURCE 200809L #include <stdbool.h> #include <stdio.h> #include <errno.h> #include <sys/stat.h> #include <string.h> #include <fcntl.h> #include <unistd.h> int read_conf(const char *str, struct Conf conf) { int Dir; Beware of reading from file directly into a structure like this - any small change to structure layout (e.g. using different a compiler) will cause data corruption. This line is hard to read, with its mix of conditionals and side-effects: if ((mkdir(str, 0755) && !(exist = errno == EEXIST)) || (Dir = open(str, O_DIRECTORY)) == -1) {
{ "domain": "codereview.stackexchange", "id": 43984, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, file, posix", "url": null }
c, file, posix if ((mkdir(str, 0755) && !(exist = errno == EEXIST)) || (Dir = open(str, O_DIRECTORY)) == -1) { I recommend encapsulating this in a function that is similar to open(str, O_CREAT) (but creating a directory rather than a plain file). Even if it's only used once, it makes the code much clearer to understand. In any case, we need to close Dir once we're finished with it, so we're leaking the descriptor. The if (exist) test seems pointless, as if the directory is newly-created, openat() will quickly fail with ENOENT. Saving a single system-call is unlikely to provide a measurable benefit, so go with the simpler code. It's usually simpler to deal with error cases first - if we can return early, that reduces indentation in subsequent code, and therefore reduces cognitive load. There's some more confusing code: if (r < sizeof(struct Conf)) r != -1 ? (void)memset(conf, 0, r) : perror(".Conf"); Much clearer if we unwrap that: if (r < 0) { perror(".Conf"); goto err; } const size_t ur = (size_t)r; if (ur < sizeof conf) { memset(&conf, 0, ur); goto err; } ⋮ int retval; err: retval = errno; close(fd); return retval; It still doesn't make sense - why do we overwrite just the part the config we read? And is all-bytes-zero appropriate for a struct Conf? We should be continuing the read, like this: static bool read_n(int fd, char *dest, size_t count) { while (count) { ssize_t bytes_read = read(fd, dest, count); if (bytes_read <= 0) { return false; } dest += bytes_read; count -= (size_t)bytes_read; } return true; } And probably better to keep a static const struct Conf default_config around, for the purpose of overwriting. Passing a struct Conf by value means that we are modifying the wrong structure. Pass it as a pointer, so we can write to the caller's version.
{ "domain": "codereview.stackexchange", "id": 43984, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, file, posix", "url": null }
c, file, posix Naming is inconsistent, with a mix of lower-case and PascalCase names. Stick with one convention (preferably the platform's usual convention) to avoid making life difficult for maintainers. Names such as str carry no useful information. Prefer names that convey the purpose of variables, rather than their type. Since exist is a boolean, prefer to initialise with false rather than 0. Prefer to use sizeof on objects rather than types. That makes it easier to see that the correct size is being used in calls such as read(fd, &conf, sizeof conf). Improved code static int open_or_mkdir(const char *dirname) { /* try creating it (harmless if it already exists) */ mkdir(dirname, 0755); /* open could still fail, if mkdir failed or another process intervenes */ return open(dirname, O_DIRECTORY); } static bool read_n(int fd, void *dest, size_t count) { char *p = dest; while (count) { ssize_t bytes_read = read(fd, p, count); if (bytes_read == 0) { /* this will have to do to indicate file-too-short */ errno = EPIPE; } if (bytes_read <= 0) { return false; } p += bytes_read; count -= (size_t)bytes_read; } return true; } int read_conf(const char *str, struct Conf *conf) { int dir = open_or_mkdir(str); if (dir < 0) { perror(str); return errno; } const int fd = openat(dir, ".Conf", O_RDONLY); close(dir); if (fd < 0) { if (errno == ENOENT) { /* fine, and normal */ return 0; } perror(".Conf"); return errno; } errno = 0; read_n(fd, conf, sizeof *conf); if (close(fd)) { perror(".Conf"); } return errno; }
{ "domain": "codereview.stackexchange", "id": 43984, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, file, posix", "url": null }
python, file-system Title: Automate the Boring Stuff Chapter 10 - Selective Copy by file extension Question: The project outline: Write a program that walks through a folder tree and searches for files with a certain file extension (such as .pdf or .jpg). Copy these files from whatever location they are in to a new folder. My solution: import os, shutil from pathlib import Path def copy_extension(basedir, newdir, extension): for foldername, subfolders, filenames in os.walk(basedir): for filename in filenames: if not filename.endswith(extension): continue file_path = Path(foldername) / Path(filename) destination = newdir / filename if destination.exists(): new_destination = copy_increment(destination) shutil.copy(file_path, new_destination) else: shutil.copy(file_path, destination) def copy_increment(destination): marker = 0 stem, extension = os.path.splitext(destination) while destination.exists(): marker += 1 destination = Path(f"{stem} ({marker}){extension}") return destination def main(): while True: user_search = input("Please enter a folder to search: ") basedir = Path(user_search) if not basedir.is_dir(): print("This path does not exist.") continue else: user_destination = input("Please enter a the director of the new folder: ") newdir = Path(user_destination) extension = input("Extension of files to copy: ") copy_extension(basedir, newdir, extension) if __name__ == '__main__': main()
{ "domain": "codereview.stackexchange", "id": 43985, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, file-system", "url": null }
python, file-system if __name__ == '__main__': main() Answer: Old style -vs- new style You are mixing old-style os. operations and the newer pathlib.Path functions. Pick one ... specifically the newer version. For instance, os.walk(), and subsequent filtering (such as by extension) can easily be replaced with just Path.rglob(): source = Path(source_path) for src in source.rglob("*.txt"): # process the `src` file, which will be a "something.txt" file Similarly, if destination was a Path: stem, extension = os.path.splitext(destination) ... destination = Path(f"{stem} ({marker}){extension}") using the newer pathlib becomes a one-line statement: destination = destination.with_stem(f"{stem} ({marker})") Complexity As it stands, your script could suffer an \$O(N^2)\$ time complexity slowdown. Consider dozens of directories, each with dozens of sub-directories, each with a dozens of sub-sub-directories, each with a README.txt file. You script finds ... the first README.txt file, and copies it. the second README.txt file, notes it already exists, so writes README (1).txt. the third README.txt file, notes it already exists, notes README (1).txt exists, so writes README (2).txt the fourthREADME.txt file, notes it already exists, notes README (1).txt exists, notes README (2).txt exists, so writes README (3).txt With N README.txt files, .exists() is called for README.txt N times, for README (1).txt N-1 times, for README (2).txt N-2 times, for README (3).txt N-3 times, for README (4).txt N-4 times, ... ... for a total of \$N (N - 1) / 2\$ calls. If you maintained a counter for each source filename, you wouldn't need to start at 0 and check for existence of each variant of the filename. You could simply start at the correct marker value. Reworked code: import shutil from pathlib import Path from collections import Counter def selective_copy(destination: Path, source: Path, pattern: str) -> None: occurrences = Counter()
{ "domain": "codereview.stackexchange", "id": 43985, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, file-system", "url": null }
python, file-system for src in source.rglob(pattern): count = occurrences[src.name] stem = src.stem dst = destination / src.name if count: dst = dst.with_stem(f"{stem} ({count})") while dst.exists(): count += 1 dst = dst.with_stem(f"{stem} ({count})") shutil.copy(src, dst) occurrences[src.name] = count + 1 if __name__ == '__main__': selective_copy(Path("to"), Path("from"), "*.txt") Of course, replace the main code with your querying the user for the source, destination and pattern (or extension, to which you'd add the "*." prefix).
{ "domain": "codereview.stackexchange", "id": 43985, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, file-system", "url": null }
c#, parsing, .net Title: Struct that parses full name into first name, middle name, last name and suffix Question: The struct below parses full name into first name, middle name, last name and suffix. Perhaps Builder pattern is more appropriate here? I remember there was a principle referring to the number of constructor parameters which Builder solves. record vs struct? Records inherit from IEquatable. https://sharplab.io/#v2:CYLg1APgAgzABAJwKYGMD2DhwB4FgBQA3gXHAJYB2ALnAIYDcBAvgUA= Is it a good idea to replace Parse with TryParse pattern? Code /// <summary> /// Represents a person's name. /// </summary> public struct FullName : IEquatable<FullName>, IFormattable { /// <summary> /// Initializes a new instance of the <see cref="FullName" /> struct. /// </summary> /// <param name="firstName">The given name.</param> /// <param name="lastName">The family name.</param> /// <param name="middleName">The middle name.</param> /// <param name="suffix">The suffix.</param> public FullName(string? firstName, string? lastName = default, string? middleName = default, string? suffix = default) { FirstName = firstName; MiddleName = middleName; LastName = lastName; Suffix = suffix; } /// <summary> /// Gets or sets the person's first name. /// </summary> public string? FirstName { get; set; } /// <summary> /// Gets or sets the middle name. /// </summary> public string? MiddleName { get; set; } /// <summary> /// Gets or sets the person's last name. /// </summary> public string? LastName { get; set; } /// <summary> /// Gets or sets the suffix. (e.g. PhD, Jr, etc) /// </summary> public string? Suffix { get; set; }
{ "domain": "codereview.stackexchange", "id": 43986, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c#, parsing, .net", "url": null }
c#, parsing, .net /// <summary> /// Parses the specified text. /// </summary> /// <param name="text">The text.</param> public static FullName Parse(string text) { if (string.IsNullOrEmpty(text)) { return new FullName(); } var parts = text.Split(' '); string? Get(int idx) { return idx >= parts.Length ? null : parts[idx]; } switch (parts.Length) { case 1: case 2: return new FullName(Get(0), Get(1)); case 3: var last = Get(2); return IsSuffix(last) ? new FullName(Get(0), Get(1), default, last) : new FullName(Get(0), Get(2), Get(1)); case 4: return new FullName(Get(0), Get(2), Get(1), Get(3)); } return new FullName(); } #region IFormattable /// <summary>Converts to string.</summary> /// <returns>A <see cref="System.String" /> that represents this instance.</returns> public override string ToString() { return string.Join(" ", FirstName, MiddleName, LastName, Suffix).Replace(" ", " ").Trim(); } /// <summary> /// Converts to string. /// </summary> /// <param name="format">The format.</param> /// <returns> /// A <see cref="System.String" /> that represents this instance. /// </returns> public string ToString(string format) { return ToString(string.Concat("{0:", string.IsNullOrEmpty(format) ? "G" : format, '}'), Formatter); }
{ "domain": "codereview.stackexchange", "id": 43986, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c#, parsing, .net", "url": null }
c#, parsing, .net /// <summary> /// Converts to string. /// </summary> /// <param name="format">The format.</param> /// <param name="formatProvider">The format provider.</param> /// <returns> /// A <see cref="System.String" /> that represents this instance. /// </returns> public string ToString(string? format, IFormatProvider? formatProvider) { return string.Format(formatProvider ?? Formatter, string.IsNullOrEmpty(format) ? "{0:G}" : format, this); } public static implicit operator string(FullName obj) { return obj.ToString(); } public static explicit operator FullName(string obj) { return Parse(obj); } #endregion IFormattable #region IEquatable /// <summary> /// Determines if the specified names are equal. /// </summary> /// <param name="a">The first name.</param> /// <param name="b">The other name.</param> /// <param name="comparison">The comparison (defaults to <see cref="StringComparison.InvariantCultureIgnoreCase" />).</param> public static bool Equals(FullName a, FullName b, StringComparison comparison = StringComparison.InvariantCultureIgnoreCase) { return string.Equals(a.FirstName, b.FirstName, comparison) && string.Equals(a.MiddleName, b.MiddleName, comparison) && string.Equals(a.LastName, b.LastName, comparison) && string.Equals(a.Suffix, b.Suffix, comparison); } public static bool operator ==(FullName x, FullName y) { return Equals(x, y); } public static bool operator !=(FullName x, FullName y) { return !Equals(x, y); }
{ "domain": "codereview.stackexchange", "id": 43986, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c#, parsing, .net", "url": null }
c#, parsing, .net public static bool operator !=(FullName x, FullName y) { return !Equals(x, y); } /// <summary> /// Indicates whether the current object is equal to another object of the same type. /// </summary> /// <param name="other">An object to compare with this object.</param> /// <returns> /// true if the current object is equal to the <paramref name="other">other</paramref> parameter; otherwise, false. /// </returns> public bool Equals(FullName other) { return Equals(this, other); } /// <summary> /// Determines whether the specified <see cref="System.Object" />, is equal to this instance. /// </summary> /// <param name="obj">The <see cref="System.Object" /> to compare with this instance.</param> /// <returns> /// <c>true</c> if the specified <see cref="System.Object" /> is equal to this instance; otherwise, <c>false</c>. /// </returns> public override bool Equals(object? obj) { return obj is FullName name && Equals(this, name); } public override int GetHashCode() { return FirstName?.GetHashCode() ?? 0 ^ MiddleName?.GetHashCode() ?? 0 ^ LastName?.GetHashCode() ?? 0 ^ Suffix?.GetHashCode() ?? 0; } #endregion IEquatable #region Backing Members internal static bool IsSuffix(string text) { // NOTE: A suffix is a string that ends with a period (.) // or has a uppercase beyond the first character. if (string.IsNullOrEmpty(text)) { return false; } if (text[^1] == '.') { return true; } for (var i = 1; i < text.Length; i++) { var c = text[i]; if (char.IsLetter(c) && char.IsUpper(c)) { return true; } } return text.Length is 3 or 4 || IsAllCaps(text); }
{ "domain": "codereview.stackexchange", "id": 43986, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c#, parsing, .net", "url": null }
c#, parsing, .net return text.Length is 3 or 4 || IsAllCaps(text); } internal static bool IsAllCaps(string text) { return text.All(c => !char.IsLetter(c) || !char.IsLower(c)); } private static NameFormatter? _formatter; private static NameFormatter Formatter => _formatter ??= new NameFormatter(); #endregion Backing Members } internal class NameFormatter : ICustomFormatter, IFormatProvider { public string Format(string? format, object? arg, IFormatProvider? formatProvider) { if (arg is FullName name) { var n = format.Length; var builder = new StringBuilder(); for (var i = 0; i < n; i++) { char c; switch (c = format[i]) { default: builder.Append(c); break; case 'G': builder.Append(name.ToString()); break; case 'S': builder.Append(string.Join(" ", name.FirstName, name.LastName).Trim()); break; case '1': case 'f': case 'F': builder.Append(name.FirstName); break; case '2': case 'm': case 'M': builder.Append(name.MiddleName); break; case '3': case 'l': case 'L': builder.Append(name.LastName); break; case '\\': /* Escape */ builder.Append(i + 1 < n ? format[++i] : c); break; } } return builder.ToString().Trim(); } return GetFallbackFormat(format, arg); }
{ "domain": "codereview.stackexchange", "id": 43986, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c#, parsing, .net", "url": null }
c#, parsing, .net return GetFallbackFormat(format, arg); } public object? GetFormat(Type? formatType) { return formatType == typeof(ICustomFormatter) ? this : null; } private string? GetFallbackFormat(string format, object? arg) { if (arg is IFormattable formattable) { return formattable.ToString(format, CultureInfo.CurrentCulture); } return arg != null ? arg.ToString() : string.Empty; } } Unit tests public sealed class FullNameTests { [Theory] [InlineData("Иван Иванов", "Иван", default, "Иванов")] [InlineData("Анна-Мария Драганова Иванова", "Анна-Мария", "Драганова", "Иванова")] public void Build_ShouldReturnCorrectUsernamePassword_WhenGivenFullName( string fullName, string expectedFirstName, string expectedMiddleName, string expectedLastName) { // Arrange // Act var actual = FullName.Parse(fullName); // Assert actual.FirstName.Should().Be(expectedFirstName); actual.MiddleName.Should().Be(expectedMiddleName); actual.LastName.Should().Be(expectedLastName); } } Answer: Lots of questions here. /// <param name="firstName">The given name.</param> /// <param name="lastName">The family name.</param> /// <param name="middleName">The middle name.</param> /// <param name="suffix">The suffix.</param> Why are you expecting first-last-middle? Why would someone not put their middle name in the middle? public string? FirstName { get; set; } For the name properties, why do they have public setters? Are you expecting external classes to change someone's name? I would make them private or protected: public string? FirstName { get; protected set; }, etc. for (var i = 1; i < text.Length; i++) { var c = text[i]; if (char.IsLetter(c) && char.IsUpper(c)) { return true; } }
{ "domain": "codereview.stackexchange", "id": 43986, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c#, parsing, .net", "url": null }
c#, parsing, .net For checking if a name is a suffix or not, why do you keep looking past the second letter? If someone's name were Betsy DeVos (American politician) then the last name DeVos would trigger as a suffix because you keep looking for a capital letter. I would flip the check here, if any letter is NOT capital then it's false (not a suffix). If you check all the letters and don't return false; then you should return true. if (string.IsNullOrEmpty(text)) { return new FullName(); } Why would you allow an empty name? You're in Parse(), not TryParse(), so (IMO) it's okay to throw here. I'd do an invalid argument exception. Speaking of which, I think you could easily implement a TryParse as well, if you throw from Parse: public static bool TryParse(string text, out FullName name) { name = new FullName(); try { name = Parse(text); return true; } catch { return false; } } anything that's not valid causes Parse to fail, which would cause TryParse to return false. Since it's returning false the out name is assumed to be invalid. return text.Length is 3 or 4 || IsAllCaps(text); Back on the IsSuffix check, why does it matter about the length? I think your code would take Lana Del Rey (American singer) and treat her last name as a suffix because it's three letters long. switch (parts.Length) { // ... case 4: return new FullName(Get(0), Get(2), Get(1), Get(3)); } Last comment here, George H. W. Bush (former American president) has two middle names, which would trigger Bush to get treated as a suffix, both for being 4 letters long and for being the fourth argument in the name. And, because of the first-last-middle scheme, his name would get scrambled. This is also not mentioning people like José María Álvarez del Manzano y López del Hierro (Spanish politician), whose paternal surname is Álvarez del Manzano.
{ "domain": "codereview.stackexchange", "id": 43986, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c#, parsing, .net", "url": null }
c#, parsing, .net Ultimately, if you've got the ability to enforce first-last-middle, then I'd argue to enforce a comma to separate the suffix, like Charles Philip Arthur George, III or Saul Goodman, Esq.
{ "domain": "codereview.stackexchange", "id": 43986, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c#, parsing, .net", "url": null }
python, programming-challenge, palindrome Title: Leetcode, longest palindromic substring Question: Here's a link Given a string s, return the longest palindromic substring in s. A string is called a palindrome string if the reverse of that string is the same as the original string. Example 1: Input: s = "babad" Output: "bab" Explanation: "aba" is also a valid answer. Example 2: Input: s = "cbbd" Output: "bb" from collections import defaultdict def is_palindrome(s): return s == s[::-1] def solve(s): seen = defaultdict(list) found = '' for i, char in enumerate(s): if seen[char]: for j in seen[char]: ss = s[j : i + 1] if is_palindrome(ss): found = max(ss, found, key=len) break seen[char].append(i) if not found: found = s[0] return found Answer: Well done optimization over a brute-force O(N^2) solution. It is still O(N^2) of course but will run much faster, especially for short strings of random characters. If you're interested in a linear solution, here it is (although it's not something you can realistically invent in 5 minutes). Some interviewers might complain about naming: found sounds like a boolean name for whether or not a palindrome was found, s and ss don't say anything at all. It may take some time to come up with a fitting name, but if you inform the interviewer why you're taking your time, it will be a big plus: naming is very important in production code. Also, you didn't specify the constraints so this solution breaks if the string is empty. Make sure to check all the corner cases and constraints during a real interview, it is very important. Being able to find those corner cases is one of the main things such interviews are meant to check.
{ "domain": "codereview.stackexchange", "id": 43987, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, programming-challenge, palindrome", "url": null }
beginner, rust, image, fractals Title: Creating an image of the Mandelbrot set in Rust Question: I'm in the process of familiarizing myself with Rust. In order to get some practice, I decided to make a program that generates images of the Mandelbrot set. The function I use to generate the image is included below. I'm specifically interested in feedback regarding my use (or lack thereof) of Rust idioms, as well as my interaction with the borrowing system. Notice that I had to clone the PathBuf as I have two uses of it, and the call to save borrows the value. Of course, however, any and all feedback is appreciated as well. use image::{ImageBuffer, Rgb}; use num::Complex; use std::path::PathBuf; pub fn generate_image(width: u32, height: u32, iterations: u32, zoom: f32, out: PathBuf) { let to_imaginary_domain = |x: u32, y: u32| -> (f32, f32) { let re: f32 = x as f32 - width as f32 / 2.0; let im: f32 = y as f32 - height as f32 / 2.0; (re / zoom, im / zoom) }; println!("Generating {} x {} image of the Mandelbrot set...", width, height); let img = ImageBuffer::from_fn(width, height, |px, py| { let (x, y) = to_imaginary_domain(px, py); let c = Complex::<f32> { re: x, im: y }; let mut z = Complex::<f32> { re: 0.0, im: 0.0 }; for _i in 0..iterations { z = z * z + c; if z.norm() >= 2.0 { return Rgb::<u8>([0x00, 0x00, 0x00]); } } Rgb::<u8>([0xFF, 0xFF, 0xFF]) }); match img.save(out.clone()) { Ok(_) => { println!("Successfully saved image to {:#?}.", out.as_os_str()); }, Err(error) => { panic!("Failed to save the image: {:#?}", error); } }; } Answer: Disclaimer: I'm not an experienced Rust developer. Note for other reviewers/people who want to test the code Here are the dependencies you can use: [dependencies] image = "0.24.4" num = "0.4.0"
{ "domain": "codereview.stackexchange", "id": 43988, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "beginner, rust, image, fractals", "url": null }
beginner, rust, image, fractals and here are parameters that worked fine for me: width: 1000 height: 1000 iterations: 100 zoom: 300 Clippy clippy is a really nice tool to catch mistakes and improve your code. In your case, there is not much to say: a few things about integer types (but you are a bit stuck with the ImageBuffer::from_fn types) a few things about Path and PathBuffer which may answer your initial question. I ended up with: /// # Panics /// /// Will panic if image is not saved pub fn generate_image(width: u32, height: u32, iterations: u32, zoom: f32, out: &Path) { ... match img.save(out) { Ok(_) => { println!("Successfully saved image to {:#?}.", out.as_os_str()); } Err(error) => { panic!("Failed to save the image: {:#?}", error); } }; Splitting the logic in functions Having a to_imaginary_domain function is nice but it is a bit surprising to me that it does not return a complex number. let to_imaginary_domain = |x: u32, y: u32| -> Complex<f32> { let re: f32 = x as f32 - width as f32 / 2.0; let im: f32 = y as f32 - height as f32 / 2.0; Complex::<f32> { re: re / zoom, im: im / zoom, } }; The mathematical operation probably deserves to be in a dedicated function as well. #[must_use] pub fn mandelbrot_func_diverges(c: Complex<f32>, iterations: u32) -> bool { let mut z = Complex::<f32> { re: 0.0, im: 0.0 }; for _i in 0..iterations { z = z * z + c; if z.norm() >= 2.0 { return true; } } false } Then, in the from_fn call, we just have: let img = ImageBuffer::from_fn(width, height, |px, py| { if mandelbrot_func_diverges(to_imaginary_domain(px, py), iterations) { Rgb::<u8>([0x00, 0x00, 0x00]) } else { Rgb::<u8>([0xFF, 0xFF, 0xFF]) } });
{ "domain": "codereview.stackexchange", "id": 43988, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "beginner, rust, image, fractals", "url": null }
beginner, rust, image, fractals If it was for me, I'd also rewrite the generate_image so that it returns the ImageBuffer instead of dealing with saving it, but it makes things slightly more verbose. #[must_use] pub fn generate_image( width: u32, height: u32, iterations: u32, zoom: f32, ) -> ImageBuffer<Rgb<u8>, Vec<u8>> { More ideas In order to make the output somehow better: you could center the image around a different point you could get the number of iterations before divergence and use this number to get a gradient of color
{ "domain": "codereview.stackexchange", "id": 43988, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "beginner, rust, image, fractals", "url": null }
python Title: Automate the Boring Stuff Chapter 10 - Deleting Unneeded Files Question: The project outline: It’s not uncommon for a few unneeded but humongous files or folders to take up the bulk of the space on your hard drive. If you’re trying to free up room on your computer, you’ll get the most bang for your buck by deleting the most massive of the unwanted files. But first you have to find them. Write a program that walks through a folder tree and searches for exceptionally large files or folders—say, ones that have a file size of more than 100MB. (Remember that to get a file’s size, you can use os.path.getsize() from the os module.) Print these files with their absolute path to the screen. My solution: import send2trash from pathlib import Path def main(): while True: basedir = Path(input("Please enter a folder to search: ")) if not basedir.is_dir(): print("This path does not exist.") continue else: for filename in basedir.rglob("*"): if filename.stat().st_size > 100000000: print(f"{filename} is {filename.stat().st_size} bytes, sending to trash...") send2trash.send2trash(filename) if __name__ == '__main__': main() Answer: Write a program that walks through a folder tree and searches for exceptionally large files ... Your code only scans the single folder. It doesn't walk a folder tree. ... or folders That is perhaps the trickiest part, and the problem statement is very unclear, perhaps intentionally. A size of a folder, as reported by stat() is usually very small. The task - likely - is to sum the sizes of files in the entire tree undeer a given folder. and report it if necessary. But first you have to find them. The task is to find those files. Don't send2trash.send2trash them right away. In any case, os.walk is your friend.
{ "domain": "codereview.stackexchange", "id": 43989, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python", "url": null }
c++, performance, beginner, time-limit-exceeded Title: Print the count of input numbers less than each of mutiple values "q" Question: I was solving a question in a competition I joined that asked for a program that calculates how many numbers are less than \$q\$. I solved the question but didn't get the full mark because the code was too slow. The input is 4 lines, the 1st contains \$n\$ (the length of the array), the second contains the elements of the array, the third contains how many numbers it wants the program to find out how many numbers are less than it and the fourth line contains the numbers. I remember some of the variables can be as big as \$10^9\$. the output is the number of numbers less than q with a space after it. there can be multiple "\$q\$"s. here is the code I entered during the competition: #include <iostream> using namespace std; int main() { int n,Q; cin>>n; int x[n]; for(int i=0;i<n;i++){ cin>>x[i]; } cin>>Q; for(int i=0;i<Q;i++){ int q,count=0; cin>>q; for(int j=0;j<n;j++){ if(q>x[j]){ count++; } } cout<<count<<' '; } } I have qualified for the next round and decided to revisit the question as revision so I came up with this code, unfortunately I don't know how to test how fast it is: #include <iostream> #include <bits/stdc++.h> using namespace std; int main() { int n,Q,q; cin>>n; int x[n]; for(int i=0;i<n;i++){ cin>>x[i]; } sort(x,x+n); cin>>Q; for(int i=0;i<Q;i++){ cin>>q; for(int j=0;j<n;j++){ if(q<=x[j]){ cout<<j<<' '; j+=n; } } } } Answer: Stylistically you should change j += n to a break for exiting the loop and make the query variable local. #include <iostream> using namespace std;
{ "domain": "codereview.stackexchange", "id": 43990, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, performance, beginner, time-limit-exceeded", "url": null }
c++, performance, beginner, time-limit-exceeded int main() { int n,Q; cin>>n; int x[n]; for(int i=0;i<n;i++){ cin>>x[i]; } sort(x,x+n); cin>>Q; for(int i=0;i<Q;i++){ int q; cin>>q; for(int j=0;j<n;j++){ if(q<=x[j]){ cout<<j<<' '; break; } } } } Your code is still \$O(n*q)\$ since it may still read every element of the array. It is better to use binary search, which takes logarithmic time. It now takes \$O(n \log n + q \log n)\$ time. #include <iostream> #include <algorithm> using namespace std; int main() { int n,Q; cin>>n; int x[n]; for(auto &num : x) cin>>num; sort(x,x+n); cin>>Q; for(int i=0;i<Q;i++){ int q; cin>>q; /* get the index of the first number that is not less than q, or n if none exists */ int ans = lower_bound(x, x+n, q) - x; cout << ans << endl; } }
{ "domain": "codereview.stackexchange", "id": 43990, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, performance, beginner, time-limit-exceeded", "url": null }
python, classes, singleton Title: Alpha finance trader singleton Question: I used to write singleton in Python, which actually Alpha only need to initialize once and the trading bot will run forever. For example in main.py: from time import sleep import asyncio from collections import deque from typing import Dict, Union, Any, List import aiohttp from lib.utils import Config from pprint import pprint from logs.logger import logger from binance.websocket.spot.websocket_client import SpotWebsocketClient as Client from binance.um_futures import UMFutures from redisUtil import redisConsumer from bybit.usdt_perpetual import USDT_Pepertual class Alpha(object): def __init__(self) -> None: self._config = Config() self.um_futures_client = UMFutures() self.my_client = Client() self.strategy_name = self._config.get_config_value("strategy_name") self.reference_window = self._config.get_config_value("reference_window") self.stop_loss = self._config.get_config_value("stop_loss") self.bet_size = self._config.get_config_value("bet_size") self.symbol = self._config.get_config_value("symbol") self.period = self._config.get_config_value("period") self.redis_client_consumer = redisConsumer(host=self._config.get_config_value("host"), port=self._config.get_config_value("port"), password=self._config.get_config_value("password")) self.subscription_list = self.update_subscription_list() self.current_close_price = 0 # for storing the close price in a fixed length list self.close_price_list = deque([], maxlen=self.reference_window + 1) # for storing the funding rate in a fixed length list self.funding_rate_list = deque([], maxlen=self.reference_window + 1) self.bybit_client = USDT_Pepertual() self.__main_logger = logger("logging")
{ "domain": "codereview.stackexchange", "id": 43991, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, classes, singleton", "url": null }
python, classes, singleton self.bybit_client = USDT_Pepertual() self.__main_logger = logger("logging") def update_subscription_list(self) -> List[dict]: subscription_list = self.redis_client_consumer.get_subscript(strategy_name=self.strategy_name) return subscription_list def subscribe_binance_websocket_kline(self): self.my_client.start() self.my_client.kline(symbol=self.symbol, id=1, interval=self.period, callback=self.message_handler) def message_handler(self, message: dict) -> None: if message: if message.get('k', {}).get('x'): close_price = float(message.get('k', {}).get('c')) self.executor(close_price=close_price) def generate_trading_signal(self) -> List[str]: signal_list = [] # delete because contain private trading logic, return list := ["OPEN_LONG", "CLOSE_SHORT]/ ["OPEN_SHORT"] return signal_list def get_funding_rate(self) -> str: funding_rate = self.um_futures_client.mark_price(symbol=self.symbol).get("lastFundingRate") return funding_rate def store_data(self, close_price: float) -> None: funding_rate = self.get_funding_rate() self.close_price_list.append(float(close_price)) self.funding_rate_list.append(float(funding_rate)) def executor(self, close_price: float) -> None: self.store_data(close_price=close_price) if len(self.close_price_list) == self.close_price_list.maxlen: signal_list = self.generate_trading_signal() if signal_list: self.trade(signal_list=signal_list)
{ "domain": "codereview.stackexchange", "id": 43991, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, classes, singleton", "url": null }
python, classes, singleton def _dispatch_request(self, signal: str) -> Dict[str, Union[str, Dict[str, Any]]]: match signal: case "OPEN_LONG": request_detail = self.bybit_client.open_market_long() case "OPEN_SHORT": request_detail = self.bybit_client.open_market_short() case "CLOSE_LONG": request_detail = self.bybit_client.close_market_long() case "CLOSE_SHORT": request_detail = self.bybit_client.close_market_short() return request_detail def add_size_to_client_list(self, request_dict: dict, position_dict_list: list) -> list: current_price = self.close_price_list[-1] reduce_only = request_dict.get("body", {}).get("reduce_only") if reduce_only == False: for subscription in position_dict_list: lot_size = int( (float(subscription["invest_amount"]) / current_price * self.bet_size / 100) * 1000) / 1000 subscription.update({"size": lot_size}) return position_dict_list def _dispatch_order_pre_condition(self, request_dict: dict, request_detail: dict) -> bool: reduce_only = request_dict.get("body", {}).get("reduce_only") if isinstance(reduce_only, bool): side = request_dict["body"]["side"] if reduce_only: if side == "Buy": return float(request_detail.get("size", 0)) < 0 elif side == "Sell": return float(request_detail.get("size", 0)) > 0 else: return float(request_detail.get("size", -1)) == 0
{ "domain": "codereview.stackexchange", "id": 43991, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, classes, singleton", "url": null }
python, classes, singleton def _order_list_filtration(self, request_dict: dict, position_dict_list: list) -> List[dict]: filtered_order_list = [request_detail for request_detail in position_dict_list if self._dispatch_order_pre_condition(request_dict=request_dict, request_detail=request_detail)] final_order_list = self.add_size_to_client_list(request_dict=request_dict, position_dict_list=filtered_order_list) return final_order_list def run(self) -> None: try: self.subscribe_binance_websocket_kline() while True: self.update_subscription_list() sleep(5) except BaseException as e: self.__main_logger.error(e) async def execute_task(self, request_dict: dict, subscription_list: list = None): if subscription_list is None: subscription_list = self.subscription_list async with aiohttp.ClientSession() as session: all_response = await asyncio.gather(*[self.bybit_client.fetch( session=session, subscription_detail=request_detail, request_dict=request_dict ) for request_detail in subscription_list]) return all_response def logging_log(self, ret_msgs: list) -> None: for msg_dict in ret_msgs: logger(api_key=msg_dict.get("API_key")).info(msg=msg_dict.get("log_info"))
{ "domain": "codereview.stackexchange", "id": 43991, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, classes, singleton", "url": null }
python, classes, singleton def trade(self, signal_list: list) -> None: for signal in signal_list: request_dict = self._dispatch_request(signal=signal) # gather all client pre-order position status, with or without position position_dict_list = asyncio.run(self.execute_task(request_dict=self.bybit_client.my_position())) pprint(f"{position_dict_list=}") # if client have long position & signal is LONG, ignore it etc final_order_list = self._order_list_filtration(request_dict=request_dict, position_dict_list=position_dict_list) pprint(f"{final_order_list=}") # execute the LONG/SHORT order, return a list of response message ret_msgs = asyncio.run(self.execute_task(request_dict=request_dict, subscription_list=final_order_list)) # pprint(request_dict) pprint(f"{ret_msgs=}") # logging self.logging_log(ret_msgs=ret_msgs) if __name__ == "__main__": alpha = Alpha() alpha.run() However, after I read something about singleton pattern in Python: Python programmers almost never implement the Singleton Pattern. (Gang of Four book) How should I improve the code in order to aviod the singleton pattern and handle a lot of original instance variables and object instances? Or any things else I could improve in this script? The purpose of this script is to execute a trading bot while receving the data, analysis signal, and execute the trade. Answer: You're not using the singleton pattern since there is no logic that prevents you from constructing 2 objects of that type. Creating and using only one object is completely fine. Notable examples are: app = Flask(__name__) and window = tkinter.Tk() Classes are not only a way to create multiple entities of the same type, but also an encapsulation mechanism.
{ "domain": "codereview.stackexchange", "id": 43991, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, classes, singleton", "url": null }
python, classes, singleton Your class does too many things. You should split it into smaller ones following single-responsibility principle. Things to make separate classes for: Logger Signal Manager ... funding_rate = self.um_futures_client.mark_price(symbol=self.symbol).get("lastFundingRate") return funding_rate This can be shortened to one string by not declaring a name and straight up returning: return self.um_futures_client.mark_price(symbol=self.symbol).get("lastFundingRate") This problem is present at least 4 times in the code, it just loads the brain of the reader with extra names to keep track of for no reason. Also um_futures_client, funding_rate_list and get_funding_rate() are never actually used by your logic and can be deleted. if subscription_list is None: subscription_list = self.subscription_list This can be avoided by writing an overload for your method that doesn't take subscription_list parameter: async def execute_task(self, request_dict: dict): execute_task(request_dict=dict, subscription_list=self.subscription_list) All your comments explain what the code does instead of explaining why. If the code is well written we can figure out what it does just by looking at it, this type of comments is redundant. You use __ and _ prefixes for some of your fields and methods. These are used as access modifiers, __a means that it can only be used inside this class and _b means that it can be used in all of its subclasses, but not from outside. You use them seemingly randomly which is confusing. Thumbs up for typing annotations and if __name__ == "__main__": Good luck! P.S. The book you are referring to came out almost 30 years ago when python was in version 1.0. I'd recommend this site if you want to learn the topic.
{ "domain": "codereview.stackexchange", "id": 43991, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, classes, singleton", "url": null }
python, csv Title: Read a large compressed CSV file and aggregate/process rows by field Question: I have as input a potentially large CSV file (gzip compressed) with a known structure. I don't know in advance the size of this file, but let's say it can't fit in memory. The rows in this CSV are ordered like the following: key1, …other fields key1, key1, key1, key2, key2, key3, key4, key4, ⋮ They are ordered by the value in the first column (let's call it key), but it is unknown how many rows are there for each distinct key. I need to scan the whole file and process only the first N rows matching each key (there could be more than N rows for some of the keys). These N rows per key can be processed in memory. I came up with this code, but I don't like it very much. It is a bit messy: import gzip def process_rows(key rows): print(f'Processed rows for key {key}') def main(file_path, N=1000): with gzip.GzipFile(filename=file_path) as file: curr_key = None rows_to_process = [] for line in file: line = line.decode().strip() if len(line) == 0: continue fields = line.split(',') [key, field2, field3] = fields if curr_key is not None: if curr_key != key or (len(rows_to_process) > 0 and len(rows_to_process) % N == 0): process_rows(key, rows_to_process) # Find next key if needed while curr_key == key: line = next(file, None) if line is None: return # End of file, exit line = line.decode().strip() if len(line) < 1: continue fields = line.split(',') [key, field2, field3] = fields print('Found next key', key)
{ "domain": "codereview.stackexchange", "id": 43992, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, csv", "url": null }
python, csv print('Found next key', key) # Reset rows to process rows_to_process = [] curr_key = key rows_to_process.append([key, field2, field3]) # Flush trailing data if (len(rows_to_process) > 0): process_rows(key, rows_to_process) Is there a cleaner way to do this? Answer: Minor niggles Testing len() > 0 is over-wordy. If we want to test whether a string or list is non-empty, its truthiness directly indicates that (see below, if not line:). Structure and design There's a lot in main(), and it doesn't lend itself to unit-testing very well. I would split its responsibilities, probably splitting into a generator that emits each line as an array (or more likely, a tuple) of fields, and a consumer that batches your N rows: def rows(input): ''' Generator function yielding the first three fields of each line of input. ''' for line in input: line = line.decode().strip() if not line: continue fields = line.split(',') # assumes no quoted ',' in fields yield fields[0:2] For the grouping of up to N lines, we can take advantage of the standard library functions in itertools to eliminate most of the logic you wrote. Specifically, itertools.groupby() to get an iterator for each group of rows, and itertools.islice() to take the first N elements from each of those: import gzip import itertools import operator def main(file_path, N=1000, func=process_rows): groups = itertools.groupby(rows(gzip.GzipFile(filename=file_path)), operator.itemgetter(0)) for (key, values) in groups: func(key, itertools.islice(values, N)) I tested the code with a modified process function: def process_rows(key, rows): print(f'Processed {len(list(rows))} row(s) for key {key}') and this input: alpha,0,1 alpha,0,2 alpha,0,3 alpha,1,4 beta,2,6 gamma,3,0 gamma,4,0 gamma,5,0 gamma,6,0
{ "domain": "codereview.stackexchange", "id": 43992, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, csv", "url": null }
python, csv The output is: Processed 3 row(s) for key alpha Processed 1 row(s) for key beta Processed 3 row(s) for key gamma
{ "domain": "codereview.stackexchange", "id": 43992, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, csv", "url": null }
javascript, reinventing-the-wheel, ecmascript-6, animation, timer Title: Creating custom setTimout function using requestAnimationFrame loop Question: I created a function using requestAnimationFrame loop that works pretty much like the plain JS setTimeout function (except that a custom starting time can be optionally given for the duration, and it returns the finish time). I wonder whether this is an ideal way of writing it. In particular, I had some issues with self-references while defining the main object, so I had to define them separately. (Maybe it would make more sense to create a class, but I'd like to keep it as simple as possible.) const DT = { // initiate RTOloop ("setTimeout" via RAF loop) // actually defined below, because of self-reference setRTO: undefined, // RTOloop actually defined below, because of self-reference RTOloop: undefined, // start time for setRTOLoop RTOstart: undefined, // duration for setRTOLoop RTOduration: undefined, // callback function for setRTOLoop RTOcallback: undefined, // now: gets the appropriate version of "performance.now()" // normally available in all modern browsers, but it can resort to the still precise Date() // see https://developer.mozilla.org/en-US/docs/Web/API/Performance/now now: function() { let performance = window.performance || {}; performance.now = (function() { return ( performance.now || performance.webkitNow || performance.msNow || performance.oNow || performance.mozNow || function() { return new Date().getTime(); } ); })(); return performance.now(); } }; // add the RAF Time-Out (RTO) initiation function definition DT.setRTO = function(callback, duration, start = DT.now()) { DT.RTOstart = start; DT.RTOduration = duration; DT.RTOcallback = callback; DT.RTOloop(start); };
{ "domain": "codereview.stackexchange", "id": 43993, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "javascript, reinventing-the-wheel, ecmascript-6, animation, timer", "url": null }
javascript, reinventing-the-wheel, ecmascript-6, animation, timer // add the RAF setRTO function definition DT.RTOloop = function(timestamp) { if (timestamp - DT.RTOstart < DT.RTOduration) { requestAnimationFrame((RAFtimestamp) => DT.RTOloop(RAFtimestamp)); } else { DT.RTOcallback(timestamp, DT.RTOstart); DT.RTOcallback = undefined; DT.RTOstart = undefined; DT.RTOduration = undefined; } }; Object.seal(DT); // example use DT.setRTO((stamp, start) => console.log(stamp - start), 500); (Edit: This function is intended to be called from within an initial RAF callback, hence the timing is expected to be in sync with repaints.) Answer: I must point out that I can see no usefulness in this code at all. Its acts like a timer with a randomized 16.66...ms error and provides no frame sync at all. requestAnimationFrame does not sync with display frames, rather it tells the compositor to hold visual changes made in the rAF callback (stored in back buf) until safe to move to display buffer (during VSync). Most browsers these days also do this with setTimeout and setInterval Back to the review. General points Better to implement DT via a factory rather than flat code (see rewrite) Not sure why you define setRTO and RTOLoop outside the object's (DT) declaration. Good to see Object.seal though my preference is to use Object.freeze. This forces the exposed object to act more like an interface, allowing only getters and setters to modify state and ensure object integrity. Avoid indirect calls as they are just a waste of source code characters and CPU cycles. Eg requestAnimationFrame((RAFtimestamp) => DT.RTOloop(RAFtimestamp)); can be requestAnimationFrame(DT.RTOloop); You are not guarding your object's state. callback is not vetted as a function and thus can throw in your code. start and duration are not checked to be numbers and within a valid range. Always ensure your object has a valid state.
{ "domain": "codereview.stackexchange", "id": 43993, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "javascript, reinventing-the-wheel, ecmascript-6, animation, timer", "url": null }
javascript, reinventing-the-wheel, ecmascript-6, animation, timer Undefined unwanted behaviour There is nothing preventing a new call to setRTO starting a new rAF loop while an existing loop is running. This could cause a serious resource drain as more and more loops wait to terminate. You need to ensure existing loops will terminate when expected, or prevent new loops starting before the current loop has ended. The rewrite (below) uses the semaphore running within DT's closure to prevent more than one loop running at a time. Use modern JS. At first I thought you were aiming for legacy browser support, however you have modern syntax (arrow function in DT.RTOLoop and default parameter in DT.setRTO) which negates all of the old style code, and I question the need to define performance.now, if the browser supports arrow function, performance.now is a given. Modern JS points. More arrow functions. Object function shorthand declaration. { now(){} } rather than { now: function(){} } ?? rather than ||. Eg performance = window.performance ?? {} Rewrite Rewrite creates a single instance of DT via a IIFE using a factory pattern. DT is simplified to one property setRTO The behaviour is vetted against invalid parameters and will do nothing if not given good data. Will not start a new timer until existing one (if any) has completed.
{ "domain": "codereview.stackexchange", "id": 43993, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "javascript, reinventing-the-wheel, ecmascript-6, animation, timer", "url": null }
javascript, reinventing-the-wheel, ecmascript-6, animation, timer const DT = (() => { const isNum = val => !isNaN(val) && val < Infinity; // Special case Infinity is not a number var start, duration, callback, running; function onFrame(time) { time - start < duration ? requestAnimationFrame(onFrame) : (running = false, callback(time, start)); } return Object.freeze({ setRTO(cb, duration_, start_ = performance.now()) { if (cb instanceof Function && isNum(duration_) && isNum(start_) && !running) { callback = cb; start = start_; duration = duration_; running = true; onFrame(start); } else { // todo Fail code here } } }); })(); DT.setRTO((stamp, start) => console.log(stamp - start), 500); DT.setRTO((stamp, start) => console.log(stamp - start), 100); // this timer will not start
{ "domain": "codereview.stackexchange", "id": 43993, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "javascript, reinventing-the-wheel, ecmascript-6, animation, timer", "url": null }
javascript, html Title: Random music plays when one stops Question: I have this script and I want to ask if it's good/optimal and if it's not how should I change it? What it's doing is when the website loads music, it starts to play when the user clicks prevent play() not running because user didn't interact) from an array of music and when a music stops a new one plays. In the future I will have many songs in the array. I thought maybe I could change it so it selects from a folder (like a physhical folder in Windows) is this possible? console.logs for debugging var music = document.getElementById("BackgroundM"); var source = document.getElementById("source"); var musicArray = [ "/music/Cruel.mp3", "/music/Ragnarok.mp3", "/music/Berserk.mp3", "/music/Wrath.mp3", "/music/Immaculate.mp3", "/music/BassSlut.mp3" ] music.volume = 0.05; var musicSelect = function() { if(music.paused) { var rand = Math.floor(Math.random() * musicArray.length) musicArray[rand] source.src = musicArray[rand] music.load(); music.play(); console.log("Playing " + source.src) } } document.addEventListener('click', function() { console.log(music.paused) musicSelect() }) music.addEventListener('pause', function(){ console.log("Music is paused") musicSelect() }) Answer: I saw that you were advised to bring this question over here from Stack Overflow and I think I understand what you are asking: Is the code optimal and can it be improved i.e. made faster due to the concern that you will have a much bigger music selection than in the current array. What you are currently doing is: storing the file paths of music tracks in an array (to use as the music src) creating a random number within the limits of the array length selecting the music track using that random number as the index of the array
{ "domain": "codereview.stackexchange", "id": 43994, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "javascript, html", "url": null }
javascript, html As it is, that is pretty 'optimal' arguably, it may be quicker to access data from an object literal musicStore = {0: "/music/Cruel.mp3"} but, unless you have a phenomenal amount of entries in the array / store then the speed difference will be unnoticeable. As for your future aspirations for this app and the possibilities of selecting a random file from a directory (if this was what you meant) then no! It is not possible. JavaScript has no access to the file system on the client or server. The closest you can get to the file system is through an HTML input type="file" element which gives the user the ability to select a file, or selection of files from their local HDD, which will not achieve what you want.
{ "domain": "codereview.stackexchange", "id": 43994, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "javascript, html", "url": null }
python, performance, algorithm, physics, markov-chain Title: Monte Carlo simulation for the harmonic oscillator Question: Is there any improvement that can be made to the following code, written to simulate the harmonic oscillator in the path integral formulation with Monte Carlo methods? #STRAIGHT-LINE INITIALIZATION def cold_path(N): return np.zeros(N) #RANDOM PATH INITIALIZATION def hot_path(N): return np.random.uniform(-1,1,N) #MONTE CARLO SIMULATION def Metropolis_HO(start,N,eta,delta,ntimes): #Set path at initial step if start=='cold': path=cold_path(N) elif start=='hot': path=hot_path(N) else: raise Exception('Choose either hot or cold starting path configuration.') #Initialize arrays of observables obs1=np.zeros(ntimes) obs2=np.zeros(ntimes) #Useful constants c1=1./eta c2=(1./eta+eta/2.) #Iterate loop on all sites for i in range(ntimes): for j in range(N): for repeat in range(3): #Set y as j-th point on path y=path[j] #Propose modification y_p=np.random.uniform(y-delta,y+delta) #Calculate accept probability force=path[(j+1)%N]+path[(j-1)] p_ratio=c1*y_p*force-c2*(y_p**2)-c1*y*force+c2*(y**2) #Accept-reject if np.random.rand()<min(np.exp(p_ratio),1): path[j]=y_p #Average of y^2 on the path obs1[i]=np.average(path**2) #Average of Delta y^2 on the path temp=0. for k in range(N): temp+=(path[k]-path[(k+1)%N])**2 obs2[i]=temp/N #Get rid of non-equilibrium states and decorrelate n_corr=1 n_term=10000 obs1=obs1[n_term:ntimes:n_corr] obs2=obs2[n_term:ntimes:n_corr] return obs1,obs2
{ "domain": "codereview.stackexchange", "id": 43995, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, performance, algorithm, physics, markov-chain", "url": null }
python, performance, algorithm, physics, markov-chain Answer: I don't think there's a lot of value in the "path start" mechanism. Just pass in a starting path vector, and assume N to be the size of this vector. Don't capitalise method names in Python. This form of comment: #MONTE CARLO SIMULATION def Metropolis_HO(start,N,eta,delta,ntimes): should actually be a docstring: def metropolis_ho(path: np.ndarray, eta: float, delta: float, ntimes: int) -> tuple[ np.ndarray, np.ndarray, ]: """Monte Carlo Simulation""" Add PEP484 typehints to your signatures (example above). obs1=np.zeros should actually use np.empty(). This: c1=1./eta c2=(1./eta+eta/2.) is really just c1 = 1/eta c2 = c1 + eta/2 repeat is unused, so name it _ (the convention for unused loop variables). This: p_ratio=c1*y_p*force-c2*(y_p**2)-c1*y*force+c2*(y**2) is clearer as p_ratio = c1*force*(y_p - y) + c2*(y*y - y_p*y_p) In this expression, the min is not necessary: if np.random.rand()<min(np.exp(p_ratio),1): because rand() itself ranges from 0 through 1, and so an exp producing a value above 1 will not make the behaviour any different. This expression: obs1[i]=np.average(path**2) can be re-expressed as a self-dot product which might help marginally with speed; it will look like obs1[i] = np.dot(path, path)/n Avoid this loop: temp=0. for k in range(N): temp+=(path[k]-path[(k+1)%N])**2 obs2[i]=temp/N Instead, use the same self-dot product trick, but on a roll()ed array: diff = path - np.roll(path, -1) obs2[i] = np.dot(diff, diff)/n Your non-equilibrium filter is trouble. It arbitrarily starts the output at element 10,000 when that should be parametric (especially for callers that use a small value of ntimes). Since n_corr is always 1, delete it. And since the slice always terminates at the end of the array, remove that, too. That leaves us with n_term = equilibrium_start obs1 = obs1[n_term:] obs2 = obs2[n_term:]
{ "domain": "codereview.stackexchange", "id": 43995, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, performance, algorithm, physics, markov-chain", "url": null }
python, performance, algorithm, physics, markov-chain Add tests, at least for regression. This ties in with another important concept: even though your algorithm relies on random behaviour, it should be repeatable based on a seed. Best to pass in the newer Numpy random generator interface. If you can turn the loop-pair of for j in range(n): / for _ in range(3): inside out so that j becomes the innermost index, then this can be further vectorised. If not, you're stuck. Suggested import numpy as np from numpy.random import default_rng, Generator def metropolis_ho( path: np.ndarray, rand: Generator, eta: float, delta: float, ntimes: int, equilibrium_start: int = 10_000, ) -> tuple[ np.ndarray, # observables 1 np.ndarray, # observables 2 ]: """Monte Carlo Simulation""" n = len(path) # Initialize arrays of observables obs1 = np.empty(ntimes) obs2 = np.empty(ntimes) # Useful constants c1 = 1/eta c2 = c1 + eta/2 # Iterate loop on all sites for i in range(ntimes): for j in range(n): for _ in range(3): # Set y as j-th point on path y = path[j] # Propose modification y_p = rand.uniform(y - delta, y + delta) # Calculate accept probability force = path[(j + 1) % n] + path[j - 1] p_ratio = c1*force*(y_p - y) + c2*(y*y - y_p*y_p) # Accept-reject if rand.random() < np.exp(p_ratio): path[j] = y_p # Average of y^2 on the path obs1[i] = np.dot(path, path)/n # Average of Delta y^2 on the path diff = path - np.roll(path, -1) obs2[i] = np.dot(diff, diff)/n # Get rid of non-equilibrium states and decorrelate n_term = equilibrium_start obs1 = obs1[n_term:] obs2 = obs2[n_term:] return obs1, obs2 def main() -> None: hot = True n = 400 rand: Generator = default_rng(seed=0)
{ "domain": "codereview.stackexchange", "id": 43995, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, performance, algorithm, physics, markov-chain", "url": null }
python, performance, algorithm, physics, markov-chain def main() -> None: hot = True n = 400 rand: Generator = default_rng(seed=0) if hot: start = rand.uniform(low=-1, high=1, size=n) else: start = np.zeros(n) obs1, obs2 = metropolis_ho( path=start, rand=rand, eta=0.6, delta=0.1, ntimes=50, equilibrium_start=10, ) assert np.allclose( obs1, np.array([ 0.33481635, 0.33975680, 0.33848696, 0.33565933, 0.34104504, 0.33577587, 0.34060038, 0.34019111, 0.34048678, 0.34476147, 0.34417650, 0.34058942, 0.34307716, 0.34851236, 0.33542469, 0.33176036, 0.32263985, 0.33208625, 0.33240874, 0.32467590, 0.32252395, 0.32424555, 0.32694504, 0.33374541, 0.32667225, 0.32566617, 0.31967787, 0.32302223, 0.31925758, 0.32326829, 0.32998249, 0.33500381, 0.34054321, 0.34033330, 0.33718049, 0.33962281, 0.33585350, 0.34389458, 0.34816599, 0.34695869, ]), ) assert np.allclose( obs2, np.array([ 0.60471689, 0.60145567, 0.59598397, 0.57394761, 0.58472186, 0.56454233, 0.56965071, 0.55589532, 0.55379822, 0.54761523, 0.5447842 , 0.54175528, 0.53238427, 0.53223468, 0.51024062, 0.48941357, 0.47810786, 0.50182631, 0.48789695, 0.47479243, 0.46272811, 0.45823329, 0.45743116, 0.46647511, 0.45989627, 0.46653626, 0.45287477, 0.44861666, 0.43814114, 0.44599284, 0.44905000, 0.46638112, 0.46607791, 0.45834453, 0.44513739, 0.44357120, 0.43453803, 0.43544248, 0.44287501, 0.42685933, ]), ) if __name__ == '__main__': main()
{ "domain": "codereview.stackexchange", "id": 43995, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, performance, algorithm, physics, markov-chain", "url": null }
python Title: Automate the Boring Stuff Chapter 10 - Deleting Unneeded Files Alternative Question: This is another solution to a previously asked question. The project outline: It’s not uncommon for a few unneeded but humongous files or folders to take up the bulk of the space on your hard drive. If you’re trying to free up room on your computer, you’ll get the most bang for your buck by deleting the most massive of the unwanted files. But first you have to find them. Write a program that walks through a folder tree and searches for exceptionally large files or folders—say, ones that have a file size of more than 100MB. (Remember that to get a file’s size, you can use os.path.getsize() from the os module.) Print these files with their absolute path to the screen. My solution: import send2trash, os import pyinputplus as pyip def get_size(basedir, find_size): large_folders = {} large_files = {} for folder, subfolders, filenames in os.walk(basedir): folder_size = 0 for filename in filenames: filepath = os.path.join(folder, filename) folder_size += os.path.getsize(filepath) file_size = os.path.getsize(filepath) if file_size > find_size: large_files[filepath] = file_size if folder_size > find_size: large_folders[folder] = folder_size return large_folders, large_files
{ "domain": "codereview.stackexchange", "id": 43996, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python", "url": null }
python def main(): while True: basedir = input("Please enter a folder to search: ") if not os.path.exists(basedir): print("This path does not exist.") continue else: find_size = pyip.inputInt(prompt="Enter the size in bytes of the folders and files to file: ") large_folders, large_files = get_size(basedir, find_size) print("Large folders:") for folder, size in large_folders.items(): print(f"{folder} is {size} bytes") print("Large files:") for file, size in large_files.items(): print(f"{file} is {size} bytes") while True: deletedir = input("Enter the directory of the file or folder you want to delete, or enter nothing to choose another folder to search: ") if not deletedir: break elif not os.path.exists(deletedir): print("This path does not exist.") continue else: print("Deleting path...") send2trash.send2trash(deletedir) continue if __name__ == '__main__': main() Answer: Nothing serious here, you improved it a lot since the last time. for folder, subfolders, filenames in os.walk(basedir): can be for folder, _, filenames in os.walk(basedir): since you never use subfolders, there is no need to load the reader with yet another name. if not os.path.exists(basedir): print("This path does not exist.") continue else: else is redundant and adds an extra level of nesting. if not deletedir: break elif not os.path.exists(deletedir): Again no need to write elif, just if will fit. continue else:
{ "domain": "codereview.stackexchange", "id": 43996, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python", "url": null }
python Again no need to write elif, just if will fit. continue else: Same story. Try adding blank lines after control statements like break and continue to see where the execution can end preemptively. Function get_size() doesn't get any size, it gets dicts of large files and folders and should be named accordingly, e.g. get_large_files(). find_size again, doesn't tell what the name is for, could be min_size. Overall pretty clean code, keep it up!
{ "domain": "codereview.stackexchange", "id": 43996, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python", "url": null }