text stringlengths 1 2.12k | source dict |
|---|---|
c++, c++11, homework
/**
* @brief Sorts an array using the QuickSort algorithm
* @param arr The array to be sorted
* @param low Starting index of the array
* @param high Ending index of the array
*/
void quick_sort_helper(int* arr, int low, int high) {
if (low < high) {
int partition_index = partition(arr, low, high); // partition the array and get the partition index
quick_sort_helper(arr, low, partition_index - 1); // recursively sort the left subarray
quick_sort_helper(arr, partition_index + 1, high); // recursively sort the right subarray
}
}
/**
* @brief Sorts an array using the QuickSort algorithm
* @param arr The array to be sorted
* @param size The size of the array
*/
void quick_sort(int* arr, int size) {
quick_sort_helper(arr, 0, size - 1);
}
/**
* @brief
* @param arr
*/
void print_arr(int *arr, int size) {
std::cout << "[";
for(int i = 0; i < size; i++) {
if(i == size-1) {
std::cout << arr[i]; //drop comma if last element
} else {
std::cout << arr[i] << ", ";
}
}
std::cout << "]" << std::endl;
}
/**
* @brief Checks if the array is sorted by going through every element in the array
* @param arr Array of integers
* @param size Size of the Array
* @return Boolean, True if it's sorted and False if not
*/
bool sorted(int *arr, int size) {
for (int i = 1; i < size; i++) {
if (arr[i] < arr[i - 1]) {
return false;
}
}
return true;
} | {
"domain": "codereview.stackexchange",
"id": 44766,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, c++11, homework",
"url": null
} |
c++, c++11, homework
/**
* @brief Measures the execution time of a sorting algorithm on arrays of different sizes.
* @param sorting_function The sorting function to be measured.
*/
void measure_sort(void (*sorting_function)(int*, int)) {
int sizes[] = {10, 100, 1000, 10000, 100000}; // sizes of the array
int const MAX = 100000;
int const SMALL = 10;
std::random_device rd; // a seed source for the random number engine
std::mt19937 gen(rd()); // mersenne_twister_engine seeded with rd()
std::uniform_int_distribution<> distrib(1, MAX);
for (auto i = 0; i < 5; i++) {
int* arr = new int[sizes[i]];
for(auto j = 0; j < sizes[i]; j++) { //fill array with random numbers
arr[j] = distrib(gen);
}
if (sizes[i] == SMALL) { //print og array before sorting
std::cout << "\n[Original]: "; // << std::setw(2);
print_arr(arr, sizes[i]);
}
//{
/**
* @note Measure execution time
* @typedef std::chrono::high_resolution_clock::time_point as clock for better readability
* @typedef std::chrono::microseconds as ms for better readability
*/
//}
typedef std::chrono::high_resolution_clock::time_point clock;
typedef std::chrono::microseconds ms; | {
"domain": "codereview.stackexchange",
"id": 44766,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, c++11, homework",
"url": null
} |
c++, c++11, homework
clock start = std::chrono::high_resolution_clock::now();
sorting_function(arr, sizes[i]);
clock end = std::chrono::high_resolution_clock::now();
ms duration = std::chrono::duration_cast<ms>(end - start);
long long durationCount = duration.count();
if(sizes[i] == SMALL) {
std::string const SPACE = " "; //width const to align output
std::cout << std::setw(4) << "[Sorted]:" << SPACE;
print_arr(arr, sizes[i]);
std::cout << std::endl << std::endl;
}
int const SIZE_W = 9;
int const TIME_W = 8;
int const W = 6;
std::cout << std::left << std::setw(SIZE_W) << "[size]: " << std::setw(W+1) << sizes[i] << std::left <<std::setw(TIME_W) << "[time]: " << std::setw(W) << durationCount << " [ms]" << std::endl;
// Clean up dynamically allocated memory
delete[] arr;
}
}
/**
* @brief Brains of the program, handles the logic
* @return void-type
*/
void run() {
/** @note srand seed */
std::cout << std::endl;
std::cout << "Measuring Sorting Algorithms" << std::endl;
std::cout << "\n[***** [Merge Sort] *****]" << std::endl;
measure_sort(merge_sort);
std::cout << "\n[***** [Quick Sort] *****]" << std::endl;
measure_sort(quick_sort);
std::cout << "\n[***** [Heap Sort] *****]" << std::endl;
measure_sort(heap_sort);
std::cout << std::endl;
}
/**
* @brief Main function of the program, calls run()
* @return EXIT_SUCCESS upon successful execution
*/
int main() {
std::srand(static_cast<unsigned int>(std::time(nullptr)));
run();
return EXIT_SUCCESS;
}
Please excuse some typos and spelling errors, English is not my first language and I'm really trying my best. Oh, and also, I am aware that the typedef statements are sort of useless especially when you give them a name like ms, in my mind it seemed right and I thought it improved readability. | {
"domain": "codereview.stackexchange",
"id": 44766,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, c++11, homework",
"url": null
} |
c++, c++11, homework
Answer: I will mainly review your algorithms (except the quicksort, though). Namely, you can go faster with them.
Advice 1 - heapify
In your heapify, when you sift down an element, you make 3 assignments in std::swap, so we have \$3n\$ assignments to sift \$n\$ times. You can do better: \$n + 1\$ assignments, and here is how:
void coderodde_sift_down(int* arr, int index, int heap_size) {
int left_child_index = index * 2 + 1;
int right_child_index = left_child_index + 1;
int maximum_index = index;
int target = arr[index];
while (true) {
if (left_child_index < heap_size && arr[left_child_index] > target) {
maximum_index = left_child_index;
}
if (maximum_index == index) {
if (right_child_index < heap_size && arr[right_child_index] > target) {
maximum_index = right_child_index;
}
}
else if (right_child_index < heap_size && arr[right_child_index] > arr[left_child_index]) {
maximum_index = right_child_index;
}
if (maximum_index == index) {
arr[maximum_index] = target;
return;
}
arr[index] = arr[maximum_index];
index = maximum_index;
left_child_index = index * 2 + 1;
right_child_index = left_child_index + 1;
}
} | {
"domain": "codereview.stackexchange",
"id": 44766,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, c++11, homework",
"url": null
} |
c++, c++11, homework
Advice 2 - Mergesort
As most novices do while implementing mergesort, you keep allocating memory for left and right runs at each recursion level worth \$n\$ ints. (In total of \$\Theta(n \log n)\$ worth memory allocations.)
One trick you could do is to allocate (only once) a buffer array with exactly the same content as the input array, and keep alternating their roles: at the bottom recursion level, you take two adjacent runs from a source array and merge them into a target array. Then, at the next recursion level, you swap the roles of the two arrays and keep doing that until you merge two topmost runs (covering the entire array) from the source array to the target array. (Using recursion magic, we can ensure that at that point the target array is the input array we want to sort; not the buffer.)
(The entire program for running all you sorts and my mergesort + heapsort is behind this link.)
Advice 3 - sorted
You have defined the sorted function, yet you don't use it. Since the data on each run is random, to me it seems sensible to deploy sorted to make sure that algorithms don't fail. | {
"domain": "codereview.stackexchange",
"id": 44766,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, c++11, homework",
"url": null
} |
c++, mathematics, embedded
Title: Arcsine function for a real-time control program
Question: I have been developing control software for a three-phase induction motor. The main task of this C++ code is to control the torque of the motor. In one part of the algorithm (The control algorithm is executed with a period of 100 µs.), I need to calculate the arcsine function. Here is my implementation. I would appreciate your assessment.
Math.h
#include <cstdint>
class Math {
public:
/**
* @brief Function calculates arcsine of a given argument.
* @param x argument, \f$x\in\left<-1, 1\right>\f$
* @return arcsine value of x
*/
static float arcsine(float x);
private:
static const uint16_t kArcSineLutSize = 101;
static const float arcsine_lut[kArcSineLutSize];
}
Math.cpp
float Math::arcsine(float x)
{
bool neg_table_value = false;
if (x < 0) {
// arcsin(-x) = -arcsin(x)
x = -x;
neg_table_value = true;
}
uint16_t index = static_cast<uint16_t>(x * 100.0);
float tmp =
(arcsine_lut[index + 1] - arcsine_lut[index]) * (100.0 * x - index) +
arcsine_lut[index];
if (neg_table_value) {
return -tmp;
} else {
return tmp;
}
} | {
"domain": "codereview.stackexchange",
"id": 44767,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, mathematics, embedded",
"url": null
} |
c++, mathematics, embedded
const float Math::arcsine_lut[Math::kArcSineLutSize] = {
0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0701, 0.0801,
0.0901, 0.1002, 0.1102, 0.1203, 0.1304, 0.1405, 0.1506, 0.1607, 0.1708,
0.1810, 0.1912, 0.2014, 0.2116, 0.2218, 0.2321, 0.2424, 0.2527, 0.2630,
0.2734, 0.2838, 0.2942, 0.3047, 0.3152, 0.3257, 0.3363, 0.3469, 0.3576,
0.3683, 0.3790, 0.3898, 0.4006, 0.4115, 0.4225, 0.4334, 0.4445, 0.4556,
0.4668, 0.4780, 0.4893, 0.5007, 0.5121, 0.5236, 0.5352, 0.5469, 0.5586,
0.5704, 0.5824, 0.5944, 0.6065, 0.6187, 0.6311, 0.6435, 0.6561, 0.6687,
0.6816, 0.6945, 0.7076, 0.7208, 0.7342, 0.7478, 0.7615, 0.7754, 0.7895,
0.8038, 0.8183, 0.8331, 0.8481, 0.8633, 0.8788, 0.8947, 0.9108, 0.9273,
0.9442, 0.9614, 0.9791, 0.9973, 1.0160, 1.0353, 1.0552, 1.0759, 1.0973,
1.1198, 1.1433, 1.1681, 1.1944, 1.2226, 1.2532, 1.2870, 1.3252, 1.3705,
1.4293, 1.5708};
Answer: Use a namespace instead of a class
There is no reason to use a class if you only have static member functions and variables. Consider creating a namespace instead:
namespace Math {
static float arcsine(float x);
} | {
"domain": "codereview.stackexchange",
"id": 44767,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, mathematics, embedded",
"url": null
} |
c++, mathematics, embedded
You don't even have to declare the private static members in the header file this way.
What are the requirements?
Why are there only 100 entries in the table? Why is every value only specified up to 4 decimals? This seems very arbitrary. The fewer points you have in your table and the less precise the points in the table are, the bigger the error is compared to the real arcsine. So you should ask yourself: what is the error budget?
A quick check of your functions versus std::asin() reveals that near 0, you have errors of about 0.06%, which is probably fine, but near ±1 you have an error of almost 2.4%. If that is not good enough for your purpose, you probably need to increase the number of points, use more precision for each point, and/or use something a bit more sophisticated than linear interpolation.
I also have to wonder why you are using a look-up table instead of just using std::asin(). If your CPU is not fast enough to do this calculation every 100 µs, then it's a good approach. If it's fast enough but it doesn't come with a standard library that contains an arcsine function, then you could consider implementing your own. I would then recommend using Chebyshev polynomials.
Calculate the table at compile-time
Instead of hardcoding the values in the look-up table, you can have the compiler generate the table for you. For example:
template<std::size_t N>
consteval auto generate_arcsine_lut() {
std::array<float, N> values;
for (std::size_t i = 0; i < N; ++i) {
values[i] = std::asin(i / static_cast<float>(N));
}
return values;
}
static const auto arcsine_lut = generate_arcsine_lut<100>();
See this on godbolt.org. Note however that std::asin() is not a constexpr function, and while GCC is fine with this, Clang does not want to compile it. The way around this is to implement your own implementation of asin() (as mentioned above).
Do you really need the arcsine? | {
"domain": "codereview.stackexchange",
"id": 44767,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, mathematics, embedded",
"url": null
} |
c++, mathematics, embedded
The main task of this C++ software is to control the torque of the motor. In a part of the control algorithm (the control algorithm is executed with 100μs period.) I need to calculate the arcsine function.
You might have some mathematical formulas with an angle in them, but in the end you are not interested in that angle, just in how much you have to energize each coil of the motor. So probably you have some values coming in (from Hall sensors or some rotary encoder?), from which you create a vector of the current direction the axis is pointing in. If you just want to rotate that vector a little bit, then instead of calculating the angle, adding something to the angle, and converting that back into a vector, you can just multiply the first vector with a rotation matrix. The matrix is constant, so once precalculated you can just apply it to a vector using a few multiplication and additions.
If you want to vary the rotation you apply a lot, but if the rotation angle is always very small, then you can use the small-angle approximations of sin and cos to calculate that matrix very cheaply. Of course, keep your error budget in mind. | {
"domain": "codereview.stackexchange",
"id": 44767,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, mathematics, embedded",
"url": null
} |
python, enum
Title: How to refactor function with string inputs to select a "mode" to avoid magic strings?
Question: See code from this question about the advantages of Enums.
def print_my_string(my_string: str, mode: Literal['l', 'u', 'c']):
if mode == 'l':
print(my_string.lower())
elif mode == 'u':
print(my_string.upper())
elif mode == 'c':
print(my_string.capitalize())
else:
raise ValueError("Unrecognised mode")
There is a single function which has multiple behaviors depending on mode flag that has been passed in. It is possible to communicate to users via the documentation, exception, and source code, that valid modes are 'l', 'u', and 'c'. But in this code these are essentially magic strings.
Code like this is used without issue all over the place, see scipy least_squares. Nonetheless I'm trying to understand better or best practices.
One improvement would be to define constants within the module.
LOWER_MODE = 'l'
UPPER_MODE = 'u'
CAPITALIZE_MODE = 'c'
def print_my_string(my_string: str, mode: Literal['l', 'u', 'c']):
if mode == LOWER_MODE :
print(my_string.lower())
elif mode == UPPER_MODE:
print(my_string.upper())
elif mode == CAPITALIZE_MODE:
print(my_string.capitalize())
else:
raise ValueError("Unrecognised mode")
However, I often see Enums come up as a solution to this problem.
from enum import Enum
class StringMode(Enum):
LOWER_MODE = 'l'
UPPER_MODE = 'u'
CAPITALIZE_MODE = 'c'
def print_my_string(my_string: str, mode: StringMode):
if mode == StringMode.LOWER_MODE :
print(my_string.lower())
elif mode == StringMode.UPPER_MODE:
print(my_string.upper())
elif mode == StringMode.CAPITALIZE_MODE:
print(my_string.capitalize())
else:
raise ValueError("Unrecognised mode") | {
"domain": "codereview.stackexchange",
"id": 44768,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, enum",
"url": null
} |
python, enum
Somehow this seems nice, helpful for ides/code completion, etc. But there is one major downside it has for me (and which has me scratching my head when trying to use Enums to replace magic strings. If print_my_string is a public facing method then the user can't use this method without ALSO importing, understanding, and using the StringMode enum. I don't want to burden the user with this. I want to maintain the non-magic enum handling of options on the back end, but allow the users to continue to pass regular documented strings.
checks like 'l' == StringMode.LOWER do not work. Instead I have to do 'l' == StringMode.LOWER.value. This isn't great because if I'm using print_my_string internally I now can't use mode == StringMode.LOWER unless I check against BOTH StringMode.LOWER and StringMode.LOWER.value which just complicates things.
Is there a nice way to handle this that avoids using magic strings on the backend by using Enums, but allows users to pass in simple string literals on the front-end?
Right now the second method I showed using hard coded constants is feeling more attractive. But it somehow feels like what I'm doing here is almost exactly what enums are meant for, I just can't see quite how to make it work.
Answer: Use an enum mixed with the str type -- this allows your users to use either the string literal, or the enum member, and allows you to do direct comparisons in your code:
class StringMode(str, Enum): # or StrEnum in Python 3.11+
LOWER = 'l'
UPPER = 'u'
CAPITALIZE = 'c'
def print_my_string(my_string, mode):
if mode == 'l':
print(my_string.lower())
elif mode == 'u':
print(my_string.upper())
elif mode == 'c':
print(my_string.capitalize())
else:
raise ValueError("Unrecognised mode") | {
"domain": "codereview.stackexchange",
"id": 44768,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, enum",
"url": null
} |
python, enum
This allows for backwards-compatibility, and forward progress. As an old user of your code, I don't need to change a thing; as a new user, I would do something like:
from jager import print_my_string, StringMode as SM
print_my_string('See how easy?', SM.LOWER) | {
"domain": "codereview.stackexchange",
"id": 44768,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, enum",
"url": null
} |
java, validation, rest, spring
Title: Search/Query API cross parameter check
Question: Background
I was working on a coding challenge as part of an interview process. I had to create a REST API where the user can report sensor data and query derived metrics based on several filter criteria.
Ingest
In the sensor data ingest handler I could simply annotate the expected request body entity with validation rules.
@Data
@Builder
public class SensorIngestRequest {
final static String DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss";
@NotNull(message = "must be provided in the following format: " + DATE_FORMAT)
@JsonFormat(shape = JsonFormat.Shape.STRING, pattern = DATE_FORMAT, timezone = "GMT")
Date observedAt;
@NotNull(message = "must be provided and it should be a hyphened UUID v4")
UUID deviceId;
@NotNull(message = "must be provided")
Integer measuredValue;
@NotEmpty(message = "must be provided and should not be empty")
String measureUnit;
}
then I could use @Valid to enforce validation
@PostMapping("/{sensor-name}")
ResponseEntity<?> ingestSensorData(
@PathVariable(name="sensor-name") String sensorName,
@Valid @RequestBody SensorIngestRequest ingestRequest) {
//Compute
var serviceModel = ingestService.ingest(sensorName, ingestRequest);
//Generate Response << not important from the question perspective
//Calculate surrounding days for observed at
var calendar = Calendar.getInstance();
calendar.setTime(serviceModel.getObservedAt());
calendar.set(Calendar.HOUR_OF_DAY, 0);
var from = calendar.getTime();
calendar.add(Calendar.DATE, 1);
var till = calendar.getTime(); | {
"domain": "codereview.stackexchange",
"id": 44769,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, validation, rest, spring",
"url": null
} |
java, validation, rest, spring
//Generate links << not important from the question perspective
var linkSingle = createLinkForGetMetric(sensorName, from, till, Optional.of(ingestRequest.getDeviceId()), LinkRelation.of(RELATION_NAME_FOR_SINGLE_DEVICE_METRIC));
var linkAll = createLinkForGetMetric(sensorName, from, till, Optional.empty(), LinkRelation.of(RELATION_NAME_FOR_ALL_DEVICES_METRIC));
var responseModel = EntityModel.of(serviceModel, linkSingle, linkAll);
return ResponseEntity
.created(responseModel.getRequiredLink(RELATION_NAME_FOR_SINGLE_DEVICE_METRIC).toUri())
.body(responseModel);
}
If the validation fails then it throws a MethodArgumentNotValidException against which I can define a ControllerAdvice
@ControllerAdvice
public class ValidationExceptionAdvice {
final static String ERROR_FORMAT = "'%s' parameter %s";
@ExceptionHandler(MethodArgumentNotValidException.class)
public ResponseEntity<Problem> handleMANVE(MethodArgumentNotValidException ex) {
var errors = ex.getBindingResult().getAllErrors().stream()
.map(error -> String.format(ERROR_FORMAT,((FieldError) error).getField(), error.getDefaultMessage()))
.sorted() //Make errors' ordering consistent
.collect(Collectors.toList());
return getResponseEntity(errors);
}
ResponseEntity<Problem> getResponseEntity(List<String> errors) {
return ResponseEntity
.status(HttpStatus.BAD_REQUEST)
.body(Problem.create()
.withTitle("Invalid data has been provided, please correct it")
.withDetail(errors.toString()));
}
}
Query
In the query/search handler I anticipate two dates (a from and a to). These could be used to narrow the date interval against which the metric calculation should be performed. A sample request looks like this
GET /api/v1/sensors/windSpeed/metrics/avg?from=2023-05-22&till=2023-05-23 | {
"domain": "codereview.stackexchange",
"id": 44769,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, validation, rest, spring",
"url": null
} |
java, validation, rest, spring
@GetMapping("/{sensor-name}/metrics/{metric-type}")
ResponseEntity<?> getSensorMetricForADateRangeForADevice(
@PathVariable(name="sensor-name") String sensorName,
@PathVariable(name="metric-type") String metricType,
@RequestParam("from") @Valid @DateTimeFormat(pattern="yyyy-MM-dd") Date from,
@RequestParam("till") @Valid @DateTimeFormat(pattern="yyyy-MM-dd") Date till,
@RequestParam(name="device-id",required = false) Optional<UUID> deviceId) {
//Extra input data validation
if (!"avg".equalsIgnoreCase(metricType)) {
throw createCVE("metric-type", 1, metricType, String.class, "currently supports only 'avg'");
}
if (from.compareTo(till) > 0) {
throw createCVE("till", 3, till, Date.class,"should be greater than from");
}
//Generate Response
//... << omitted for the sake of brevity
return ResponseEntity.ok().body(EntityModel.of(serviceModel, linkSingle, linkAll));
}
As you can see I had to compare from and to to make sure that from is smaller than to. I have found pretty hard to throw a hand-crafted MethodArgumentNotValidException. So, I've decided to use ConstraintViolationException instead. Well, that is also not an easy peasy task.
<T> ConstraintViolationException createCVE(
String parameterName,
int parameterIndex,
T parameter,
Class<T> parameterType,
String violationMessage) {
//NOTE: here the method name does not matter
final var propertyPath = PathImpl.createPathFromString("");
propertyPath.addParameterNode(parameterName, parameterIndex); | {
"domain": "codereview.stackexchange",
"id": 44769,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, validation, rest, spring",
"url": null
} |
java, validation, rest, spring
var constraintViolation = ConstraintViolationImpl.forParameterValidation(
violationMessage,
Collections.emptyMap(),
Collections.emptyMap(),
violationMessage,
parameterType,
parameter,
null,
parameter,
propertyPath,
null,
null,
null);
return new ConstraintViolationException(Set.of(constraintViolation));
}
Quite frankly I don't like approach either. I had to write another ExceptionHandler as well to create a proper Problem response.
@ExceptionHandler(ConstraintViolationException.class)
public ResponseEntity<Problem> handleCVE(ConstraintViolationException ex) {
var errors = ex.getConstraintViolations().stream()
.map(error -> String.format(ERROR_FORMAT, error.getPropertyPath(), error.getMessage()))
.collect(Collectors.toList());
return getResponseEntity(errors);
}
Question
Is there any better way to perform custom cross parameter check for GET request?
Answer: The first thing I noticed is the use of the deprecated Date class instead of the LocalDate class that has been introduced with Java 8. The code presented and the flow of the program are both correct, but some simplification can be introduced with the explicit use of the BindingResult class that is not directly used. Starting from the signature of the search method described in the question:
@GetMapping("/{sensor-name}/metrics/{metric-type}")
ResponseEntity<?> getSensorMetricForADateRangeForADevice(
@PathVariable(name="sensor-name") String sensorName,
@PathVariable(name="metric-type") String metricType,
@RequestParam("from") @Valid @DateTimeFormat(pattern="yyyy-MM-dd") Date from,
@RequestParam("till") @Valid @DateTimeFormat(pattern="yyyy-MM-dd") Date till) | {
"domain": "codereview.stackexchange",
"id": 44769,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, validation, rest, spring",
"url": null
} |
java, validation, rest, spring
With a new helper class containing both the Date from and Date till fields and with the BindingResult class can be rewritten like below:
@Data
public class LocalDateChecker {
@NotNull
@DateTimeFormat(pattern="yyyy-MM-dd")
private LocalDate from;
@NotNull
@DateTimeFormat(pattern="yyyy-MM-dd")
private LocalDate till;
}
@RestController
public class SensorMetricController {
@GetMapping("/api/v1/sensors/{sensor-name}/metrics/{metric-type}")
public ResponseEntity getSensorMetricForADateRangeForADevice(
@PathVariable(name = "sensor-name") String sensorName,
@PathVariable(name = "metric-type") String metricType,
@Valid LocalDateChecker localDateChecker,
BindingResult bindingResult)
}
The advantage is that the bindingResult contains all the validation errors and relative messages erasing the propagation of the validation code outside of the relative controller. A possible way to rewrite the controller is below:
@RestController
public class SensorMetricController {
@GetMapping("/api/v1/sensors/{sensor-name}/metrics/{metric-type}")
public ResponseEntity getSensorMetricForADateRangeForADevice(
@PathVariable(name = "sensor-name") String sensorName,
@PathVariable(name = "metric-type") String metricType,
@Valid LocalDateChecker localDateChecker,
BindingResult bindingResult) {
//I'm writing the first error, but all errors could be reported
// in one time
if (bindingResult.hasErrors()) {
FieldError fieldError = bindingResult.getFieldErrors().get(0);
throw new MetricException(fieldError.getField() + " " + fieldError.getDefaultMessage());
}
if (!"avg".equalsIgnoreCase(metricType)) {
throw new MetricException("metric type currently supports only 'avg'");
} | {
"domain": "codereview.stackexchange",
"id": 44769,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, validation, rest, spring",
"url": null
} |
java, validation, rest, spring
if (localDateChecker.getFrom().isAfter(localDateChecker.getTill())) {
throw new MetricException(("till shoud be greater or equal to from"));
}
return new ResponseEntity(List.of(
sensorName,
metricType,
localDateChecker.getFrom(),
localDateChecker.getTill()
), HttpStatus.OK);
}
}
Because the generic user has to understand just which is the absent or malformed field not generating a correct output the only important thing is the propagation of a new generated MetricException exception containing the error message to the ControllerAdvice that will communicate the bad request error to the user:
public class MetricException extends RuntimeException {
public MetricException(String message) {
super(message);
}
}
@ControllerAdvice
public class SensorMetricAdvice {
@ResponseBody
@ExceptionHandler(MetricException.class)
@ResponseStatus(HttpStatus.BAD_REQUEST)
String metricExceptionHandler(MetricException ex) {
return ex.getMessage();
}
}
Substantially all the original code presented in the question is absolutely correct, but with help of spring classes its size can be reduced, the code I wrote it's based on the validating-form-input (adapted to a rest controller) and Building REST services with Spring. | {
"domain": "codereview.stackexchange",
"id": 44769,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, validation, rest, spring",
"url": null
} |
brainfuck
Title: Add two values in brainfuck
Question: I'm currently learning brainfuck as a challenge for golfing, and I'd like a code review to make sure I'm understanding things properly. For starters, here's my snippet:
[
The purpose of this program is to add two
predetermined values together and print the result.
For this program the values 15 and 20 have been
chosen to add together which should give a
result of 35 and print # at the end.
]
+++++ Iterate 5 times
[
>+++ Add 3 to cell one
>++++ Add 4 to cell two
<<- Subtract 1 from cell zero
]
Currently cell one is 15 and cell two is 20
The resulting output should be 35
>[ Move to cell one and iterate until cell one is zero
>+ Add 1 to cell two
<- Subtract 1 from cell one
]
>. Print cell two
The objective of this snippet is to take two predetermined values 15 and 20, add them together (resulting in 35), and print the result to the console. Since brainfuck works with ASCII values, the octothorp should be printed to the console at the end:
#
I'm sure there are shorter ways to do this, but I'm looking for feedback on how well I've documented what it's doing, the formatting/style of the breakdown, and the overall approach to the problem.
Answer: Shorter ways
I'm sure there are shorter ways to do this
What makes you so sure?
There is no shorter way to shorten the moving of one cell to another. I doubt there are any shorter and more efficient ways to setup the numbers 15 and 20. See https://esolangs.org/wiki/Brainfuck_constants for how to setup any particular number in Brainfuck, but when setting up multiple numbers at the same time it is much faster and performant to not set them up individually. For more on this topic, see one of my previous Brainfuck answers
Documentation | {
"domain": "codereview.stackexchange",
"id": 44770,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "brainfuck",
"url": null
} |
brainfuck
Documentation
I'm looking for feedback on how well I've documented what it's doing, the formatting/style of the breakdown, and the overall approach to the problem.
I think it is very well-documented. I like that you are having a section of documentation at the start, that is very common and a very safe thing to do, as all values are zero the code inside (you did have two print instructions inside) will never run.
For those who are used to reading brainfuck (believe it or not, we do exist), it's not necessary to write the details about iteration and how much you are adding and substracting at each cell.
I normally structure my brainfuck programs in three sections: Setup, logic, output.
The only documentation needed in the beginning would be something like "Setup the values 0 15 20 on the tape".
The documentation of what you are doing is also very well-documented, but the [>+<-] pattern is very common in Brainfuck so it would be enough to document "Move value at cell 2 to cell 1". It's very clear that you are moving values (again, for those of us who are used to reading brainfuck), but which values are being moved is the interesting part here for the one who reads the documentation.
Approach to the problem
You have a very clear and very solid approach to the problem.
Next steps...
Adding hard-coded values is quite easy. How about letting the user input numbers and adding them together? That would force you to convert between actual values and ASCII characters.
See also my brainfuck questions | {
"domain": "codereview.stackexchange",
"id": 44770,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "brainfuck",
"url": null
} |
python, beginner, validation
Title: Pythonic way for validating and categorizing user input
Question: In multiple parts of my program, user input needs to be validated and categorized. As these "validation trees" can get pretty unwieldy, I've started splitting them out in different functions, for example like this:
from enum import Enum, auto
class InputCat(Enum):
INVALID = auto()
HELP = auto()
SELECT = auto()
GOTO_PAGE = auto()
GOTO_PLAYER = auto()
def _goto_player(user_input: str) -> InputCat:
"""Checks if string is of type GOTO_PLAYER."""
for char in user_input[1:]:
if not (char.isalpha() or char in {' ', '-'}):
print("Invalid input: if character after '>' is alpha, all other characters must be alpha, ' ' or '-'.")
return InputCat.INVALID
return InputCat.GOTO_PLAYER
def _goto_page(user_input: str) -> InputCat:
"""Checks if string is of type GOTO_PAGE."""
for char in user_input[1:]:
if not char.isnumeric():
print("Invalid input: if character after '>' is numeric, all other characters must be numeric too.")
return InputCat.INVALID
return InputCat.GOTO_PAGE
def _goto(user_input: str) -> InputCat:
"""Checks if string is of type GOTO_PAGE or GOTO_PLAYER."""
if len(user_input) == 1:
print("Invalid input: need more input after '>'.")
return InputCat.INVALID
if user_input[1].isnumeric():
return _goto_page(user_input)
elif user_input[1].isalpha():
return _goto_player(user_input)
else:
print("Invalid input: character after '>' must be alphanumeric.")
return InputCat.INVALID
def _select(user_input: str) -> InputCat:
"""Checks if string is of type SELECT."""
for char in user_input:
if not char.isnumeric():
print("Invalid input: if first character is numeric, all other characters must be numeric too.")
return InputCat.INVALID
return InputCat.SELECT | {
"domain": "codereview.stackexchange",
"id": 44771,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, beginner, validation",
"url": null
} |
python, beginner, validation
def _help(user_input: str) -> InputCat:
"""Checks if string is of type HELP."""
if len(user_input) > 1:
print("Invalid input: when using '?', no other characters are allowed.")
return InputCat.INVALID
return InputCat.HELP
def get_category(user_input: str) -> InputCat:
"""Checks if string is of type HELP, SELECT, GOTO_PAGE, GOTO_PLAYER or INVALID."""
if not user_input:
print('Invalid input: no input.')
return InputCat.INVALID
if user_input[0] == '?':
return _help(user_input)
elif user_input[0].isnumeric():
return _select(user_input)
elif user_input[0] == '>':
return _goto(user_input)
else:
print("Invalid input: first char must be alphanumeric, '>' or '?'.")
return InputCat.INVALID
I was wondering if this is a readable and 'pythonic' way of doing this? If not, what would be better alternatives? | {
"domain": "codereview.stackexchange",
"id": 44771,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, beginner, validation",
"url": null
} |
python, beginner, validation
Answer: Don't print promiscuously. You've broken your problem down into several
small functions, which is a good general instinct. But you've undermined those
functions by polluting them with side effects -- printing in this case.
Whenever feasible, prefer program designs that rely mostly on data-oriented
functions (take data as input and return data as output, without causing side
effects). Try to consolidate your program's necessary side effects in just a
few places -- for example, in a top-level main() function (or some
equivalent) that coordinates the interaction with the user.
Don't over-engineer. You have described a simple usage pattern: ? for
help; N for select; >N for goto-page, and >S for goto-player (where N
is an integer and S is a string of letters). Validating that kind of input
could be done reasonably in various ways, but none of them require so much
scaffolding. The small demo below uses (INPUT_CAT, REGEX) pairs. For more
complex input scenarios, you could use
callables (functions or classes) instead of regexes.
Resist the temptation for fine-grained dialogue with users. My impression
is that you want to give the user specific feedback when their input is
invalid. For example, if the user enters ?foo, instead of providing a
general error message (eg, "Invalid input") you say something specific
("Invalid input: when using '?', no other characters are allowed."). That
specificity requires more coding on your part and reading/understanding on the
part of users as you pepper them with different flavors of invalid-reply
messages. But is it really worth it? I would suggest that the answer is no.
Instead of fussing with all of those details, just provide clear documentation
in your usage/help text. If a user provides bad input, tell them so in a
general way and, optionally, remind them how to view the usage text.
When feasible, let a data structure drive the algorithm rather than
logic. Your current implementation is based heavily on conditional | {
"domain": "codereview.stackexchange",
"id": 44771,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, beginner, validation",
"url": null
} |
python, beginner, validation
logic. Your current implementation is based heavily on conditional
logic. With the right data structure (INPUT_RGXS in the demo below),
the need for most of that logic disappears.
import sys
import re
from enum import Enum, auto | {
"domain": "codereview.stackexchange",
"id": 44771,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, beginner, validation",
"url": null
} |
python, beginner, validation
class InputCat(Enum):
HELP = auto()
SELECT = auto()
GOTO_PAGE = auto()
GOTO_PLAYER = auto()
INVALID = auto()
INPUT_RGXS = (
(InputCat.HELP, re.compile(r'^\?$')),
(InputCat.SELECT, re.compile(r'^\d+$')),
(InputCat.GOTO_PAGE, re.compile(r'^>\d+$')),
(InputCat.GOTO_PLAYER, re.compile(r'^>\w+$')), # Adjust as desired.
)
def main(args):
# Printing occurs only at the program's outer edges.
for user_input in args:
ic = get_input_category(user_input)
if ic == InputCat.INVALID:
print(f'Invalid reply: {user_input!r}')
else:
print(ic)
def get_input_category(user_input: str) -> InputCat:
# A data-oriented function.
for ic, rgx in INPUT_RGXS:
if rgx.search(user_input):
return ic
return InputCat.INVALID
if __name__ == '__main__':
main(sys.argv[1:]) | {
"domain": "codereview.stackexchange",
"id": 44771,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, beginner, validation",
"url": null
} |
c++11, pointers
Title: dynamic_pointer_cast for std::unique_ptr
Question: dynamic_pointer_cast is only implemented for std::shared_ptr. I need the same functionality for unique pointers.
The wrinkle is that dynamic_casting a pointer could fail (yield nullptr), so what do we want to happen then? I decided that in that case I would like the original pointer to remain unchanged.
I have implemented the following:
template< typename T, typename S >
inline std::unique_ptr<T> dynamic_pointer_cast(std::unique_ptr<S>&& ptr_)
{
T* const converted_ptr = dynamic_cast<T*>(ptr_.get());
if (!converted_ptr)
// cast failed, leave input untouched, return nullptr
return nullptr;
// cast succeeded, clear input, return casted ptr
ptr_.release();
return std::unique_ptr<T>(converted_ptr);
}
Testing code:
#include <memory>
int main(int argc, char **argv)
{
std::unique_ptr<Base> basePtr = std::make_unique<Derived1>();
// this should fail, basePtr should remain non-empty, return should be empty
auto deriv2Ptr = dynamic_pointer_cast<Derived2>(std::move(basePtr));
// this should succeed, basePtr should become empty, return should be non-empty
auto deriv1Ptr = dynamic_pointer_cast<Derived1>(std::move(basePtr));
return 0;
}
Is this safe? Does the interface make sense? For the latter question: I decided to take an R-value ref so users have to write std::move, denoting pointer will be emptied. But then it may not if the cast fails... Normal ref is the other option, but then it's less clear at the call site that the unique_ptr will likely be cleared. | {
"domain": "codereview.stackexchange",
"id": 44772,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++11, pointers",
"url": null
} |
c++11, pointers
Answer: The dynamic-cast function
I'm pretty sure <memory> needs to be included before the template function's definition (GCC certainly thinks so).
Passing as an rvalue-reference seems exactly the right choice to me - it shows that the function is at least potentially a sink. Obviously if an actual rvalue is bound to that reference, and the cast fails, the pointed-to object is destroyed. But I don't think that will surprise anyone.
I believe it can safely be declared noexcept.
I would probably write the return nullptr as return {} to emphasise that the return type isn't a raw pointer.
We could turn the condition around so that we test the positive case; that allows us to declare and initialise converted_ptr in the condition:
template<typename T, typename S>
std::unique_ptr<T> dynamic_pointer_cast(std::unique_ptr<S>&& p) noexcept
{
if (T* const converted = dynamic_cast<T*>(p.get())) {
// cast succeeded; clear input
p.release();
return std::unique_ptr<T>{converted};
}
// cast failed; leave input untouched
return {};
}
Or, always create a (possibly-null) unique pointer, and use its bool conversion as test:
template<typename T, typename S>
std::unique_ptr<T> dynamic_pointer_cast(std::unique_ptr<S>&& p) noexcept
{
auto converted = std::unique_ptr<T>{dynamic_cast<T*>(p.get())};
if (converted) {
p.release(); // no longer owns the pointer
}
return converted;
} | {
"domain": "codereview.stackexchange",
"id": 44772,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++11, pointers",
"url": null
} |
c++11, pointers
I think this last version is the simplest and clearest.
As another answer suggested, consider moving (not copying, if we want noexcept) the deleter into the new unique-pointer. We can use get_deleter() to access it by reference; it probably makes sense to swap the new and old pointers' deleters:
#include <memory>
template<typename T, typename S, typename Deleter>
auto dynamic_pointer_cast(std::unique_ptr<S, Deleter>&& p) noexcept
{
auto converted = std::unique_ptr<T, Deleter>{dynamic_cast<T*>(p.get())};
if (converted) {
std::swap(converted.get_deleter(), p.get_deleter());
p.release(); // no longer owns the pointer
}
return converted;
}
The test program
This wouldn't compile, due to the lack of definitions for Base, Derived1 and Derived2. I took a guess at:
struct Base{ virtual ~Base()=default; };
struct Derived1 : Base{};
struct Derived2 : Base{};
The test program doesn't use its command-line arguments, so prefer int main() instead of int main(int argc, char **argv).
It always returns 0 (success), rather than actually checking the expectations written in the comments. I fixed that before refactoring the function, to give more confidence:
int main()
{
std::unique_ptr<Base> b = std::make_unique<Derived1>();
// this should fail: b should remain non-empty, return should be empty
auto d2 = dynamic_pointer_cast<Derived2>(std::move(b));
// this should succeed: b should become empty, return should be non-empty
auto d1 = dynamic_pointer_cast<Derived1>(std::move(b));
return d2 || !d1 || b;
} | {
"domain": "codereview.stackexchange",
"id": 44772,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++11, pointers",
"url": null
} |
python, performance, object-oriented, programming-challenge, simulation
Title: Python Logic Gate Circuit Simulator
Question: I have recently become interested in simulating logic circuits because of a electronics class at school. I have created a functioning simulator for logic gate circuits in Python. My implementation works, but I was wondering if it could be optimized or improved in any way? The project code is too big to fit in this question but is located at https://github.com/barrettkwise/circuitsim. Please let me know of any improvements that can be made.
print("thanks")
print("for")
print("your help!")
Answer: Note: In the future, you should probably post a snippet of your code that you have the most doubts about or that is typical of your code, then provide a link to the rest. All good though!
Overall, your code is pretty good. It seems to work well, and it's well-formatted; I especially like your liberal use of type hints. I don't understand the reasoning behind the .line system, but that's beside the point.
In gateobject.py and simulator.py, you eval arbitrary user text. This is bad practice (though in this case, the fact that self.type is uppercased minimizes the potential security risk). If the user inputs an invalid gate, it will give an ugly error like this:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<string>", line 1, in <module>
NameError: name 'BADGATE' is not defined
Instead, try using a dictionary (like operators = {'OR': OR, 'AND': AND...}); then, if self.type not in operators, you can handle the error from there (print an error message and exit, raise a custom error, etc.). This will make your code (a bit) cleaner, error handling/messages (somewhat) nicer, and your program (slightly) faster.
You could also make a helper function (like def eval_op(op: str, arg1: str, arg2: str) -> str) that handles that for you. | {
"domain": "codereview.stackexchange",
"id": 44773,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, object-oriented, programming-challenge, simulation",
"url": null
} |
python, performance, object-oriented, programming-challenge, simulation
In several places, you import other files using extremely short names like s, ps or g. Generally, you want to avoid this, as it can lead to confusion and overlaps with variables in your code. The names aren't particularly long, so you could leave them as-is.
You often use enumerate, which is good! However, you specify enumerate(x 0), when the default is to start at 0 anyways. You can just leave it at enumerate(x).
In simulator.py:15-16, you count the variable p_level from 0 to len(self.circuit), incrementing it only at the end of each loop. This is better expressed with for p_level in range(0, len(self.circuit))
On simulator.py:18-21, you use the names curr_ids and curr_vals for ids and vlaues that are, according to your comment, not current. True, they are the ones you are currently working with, but you might want to rename them to avoid confusion.
On simulator.py:49-50, you do gate.out.value = eval(gate.type)(gate.in1.value, gate.in2.value), and again later without the second argument. You should probably separate this into a method on Gate, such as setOutput() or evalInputs().
On simulator.py:53, you eval(gate.type)(...) even though you know that gate.type == "NOT", and can just do NOT(...).
Throughout the whole program, you use "1" and "0" to denote true and false. If instead you replaced those with True and False, it will do two things: first, it would simplify your logicops.py functions (i.e. NOT(a: bool) -> bool: !a). Second, it would probably make your program run somewhat faster and/or use somewhat less space, since it's not dealing with strings, just booleans.
All in all, I would recommend you look for simpler ways to represent your data. You seem to be handy with the concepts, and effective at implementing them; you just need to simplify your code with builtins like for and bool. Please give me feedback on my feedback, and happy programming! | {
"domain": "codereview.stackexchange",
"id": 44773,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, object-oriented, programming-challenge, simulation",
"url": null
} |
python, performance, pandas, complexity, iterator
Title: Finding highly correlated variables in a dataframe by evaluating its correlation matrix's values
Question: I read data from Excel into a Pandas DataFrame, so that every column represents a different variable, and every row represents a different sample. I made the function below to identify potential highly correlated variables within a DataFrame, with 'high correlation' being determined by the given "threshold" input when calling the function.
import pandas as pd
def find_highly_correlated_variables(dataframe, threshold):
'''
Parameters
----------
dataframe : pandas.DataFrame
threshold : float, representing minimal absolute value for correlation between variables to be selected
Output
------
string : reading how no highly correlated variables have been found, if none have been found
list : containing pair(s) of highly correlated variables if one or more have been found
'''
# Initialization of variables and lists to work with.
df = dataframe
th = threshold
column_names = list(df.columns.values)
highly_correlated_indices = []
highly_correlated_variables = []
# Correlation matrix is created, so that correlation values can be accessed easily.
correlation_array = df.corr().values.tolist()
for i_column, column in enumerate(correlation_array):
for i_element, element in enumerate(column):
if (abs(element) >= th) & (abs(element) != 1.0):
# Prevent duplicate information from being added.
if [i_column, i_element] not in highly_correlated_indices:
highly_correlated_indices.append([i_element, i_column])
# 'Translate' element and column indices into the variable names.
for indices in highly_correlated_indices:
highly_correlated_variables.append([column_names[indices[0]], column_names[indices[1]]]) | {
"domain": "codereview.stackexchange",
"id": 44774,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, pandas, complexity, iterator",
"url": null
} |
python, performance, pandas, complexity, iterator
if len(highly_correlated_indices) == 0:
print("No highly correlated variables found.")
else:
return highly_correlated_variables
I know that nested for loops are not ideal with regard to time complexity, so I tried to solve it using the 'zip' function and somehow do it as follows:
for index, (column, element) in enumerate(zip(correlation_array, column)
Though I got stuck in trying to make such a solution work.
For that reason, I was quite curious whether it would be possible to improve that part of the code so that it speeds up the process compared to what it is now.
I wouldn't mind hearing other tips for improvement of course (e.g. maybe some parts can be more compact), so please don't hesitate to share such thoughts with me.
Answer:
nested [interpreted] for loops are not ideal with regard to time complexity
You meant regarding "time elapsed".
The big-Oh
complexity was determined by the point we'd computed the
covariance.
But profiling
will reveal that time spent in interpreted bytecode
tends to dominate time spent in numpy's compiled C code,
as you were observing.
highly_correlated_indices = []
...
for i_column, column in ...:
for i_element, element in ...:
if (abs(element) >= th) & (abs(element) != 1.0):
if [i_column, i_element] not in highly_correlated_indices:
highly_correlated_indices.append(...)
This looks quadratic at first blush.
But it's worse, it's cubic.
Rather than a list you wanted a set there,
so the in test could complete in O(1) constant time.
You are right that, instead of the interpreter examining one
value at a time, it would be desirable to do a
vectorized broadcast across the matrix.
Here is one approach:
import numpy as np
import pandas as pd
rng = np.random.default_rng(1)
df = pd.DataFrame(rng.random((8, 3)))
c = df.corr().abs()
th = .4
z = c[(c != 1) & (c > th)] | {
"domain": "codereview.stackexchange",
"id": 44774,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, pandas, complexity, iterator",
"url": null
} |
python, performance, pandas, complexity, iterator
>>> np.round(z, 2)
0 1 2
0 NaN NaN NaN
1 NaN NaN 0.44
2 NaN 0.44 NaN
I found it convenient to round to two places,
but clearly you don't have to.
At this point you can readily iterate
over the positive columns:
>>> np.round(z.sum(axis=0), 2)
0 0.00
1 0.44
2 0.44 | {
"domain": "codereview.stackexchange",
"id": 44774,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, pandas, complexity, iterator",
"url": null
} |
linux, sh
Title: Shell script to download multiple files from different URL's using for loop to download and verify files on each invocation
Question: The expected behavior of the script is to download a file from a URL if the file is not present in the current directory and compare the md5 checksum of the file against the md5 checksum from the server. If the file is present then perform the verification again and download the file if the verification fails. As I have a poor internet connection I attempt the download and verification a second time if the initial verification fails. If verification fails on the second attempt the script exits and logs a fatal error.
The script is called with two inputs. The first input is a file that contains the full URL with filename to download. One file per line. The second input is a file that contains the expected md5checksum and the filename. Expected format of checksum file list "checksum" "filename" without the quotes.
Currently the script works as expected. As I'm in the process of teaching myself bash/sh scripting. Is there a cleaner way I can implement the expected behavior. I am aware of the continue option for wget. I choose not to use it as it felt "too easy" and not in the spirit of learning sh scripting.
The two sourced files can be found at my forks of the original repos https://github.com/CJ-Systems/log4sh and https://github.com/CJ-Systems/shlib/blob/master/functions/shlib_ansi
#!/bin/sh
#
# load log4sh
if [ -r ./lib/log4sh ]; then
. ./lib/log4sh
else
echo "ERROR: could not load (log4sh)" >&2
exit 1
fi
# load shlib_ansi
if [ -r ./lib/shlib_ansi ]; then
. ./lib/shlib_ansi
else
echo "ERROR: could not load (shlib_ansi)" >&2
exit 1
fi
# Download single file
downloadfile() {
local fn=$1
log DEBUG "${shlib_ansi_red}Downloading $bn ${shlib_ansi_none}"
wget --quiet --show-progress --no-use-server-timestamps $fn -O $bn
} | {
"domain": "codereview.stackexchange",
"id": 44775,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "linux, sh",
"url": null
} |
linux, sh
# Compare md5 checksums
verifyfile() {
local check_sum=$1
exp_md5=$(cat $check_sum | grep -i $bn | cut -d' ' -f1)
md5_local=$(md5sum $bn | cut -d' ' -f1)
if [ $md5_local = $exp_md5 ]
then
log DEBUG "Checksum valid for $bn"
return 0
else
log DEBUG "Checksum invalid for $bn"
return 1
fi
} | {
"domain": "codereview.stackexchange",
"id": 44775,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "linux, sh",
"url": null
} |
linux, sh
# Download all files in file_list
download() {
local file_list=$1
local check_sums=$2
for f in $(cat $file_list)
do
# Get name file of file to download
bn=$(basename $f)
# If file not present attempt to download
if [ ! -f $bn ]
then
log INFO "Downloading $bn"
downloadfile $f
if verifyfile $check_sums
then
log DEBUG "Verification of $bn successful"
else
log WARN "Verification of $bn failed. Retrying download"
downloadfile $f
if ! verifyfile $check_sums
then
log FATAL "Failed to verify $bn download unsuccessful"
exit
else
log DEBUG "Verification successful"
fi
fi
# Local copy of file exists verify checksum redownload if necessary
else
log INFO "File $bn already downloaded. Verifying checksum"
if ! verifyfile $check_sums
then
log WARN "Verification of $bn failed. Redownloading"
downloadfile $f
if verifyfile $check_sums
then
log INFO "Redownload and verification of $bn successful"
else
log WARN "Verification of $bn failed. Retrying download"
downloadfile $f
if ! verifyfile $check_sums
then
log FATAL "Failed to verify $bn redownload unsuccessful"
exit
fi
fi
else log INFO "Verification of $bn successful"
fi
fi
done;
}
shlib_ansi_init auto
download $1 $2 | {
"domain": "codereview.stackexchange",
"id": 44775,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "linux, sh",
"url": null
} |
linux, sh
Answer: I don't see any value in testing files for readability before sourcing them. Just attempt, and test whether we were successful:
die()
{
echo "$@" >&2
exit 1
}
. ./lib/log4sh || die "ERROR: could not load (log4sh)"
. ./lib/shlib_ansi || die "ERROR: could not load (shlib_ansi)"
Lots of variable expansions are missing quotes - always use quote:
wget --quiet --show-progress --no-use-server-timestamps "$fn" -O "$bn"
downloadfile "$f"
We're using variables ($bn) to communicate values into functions - prefer to use function arguments for this:
# Download single URL ($1) and save it to local file ($2)
downloadfile() {
log DEBUG "${shlib_ansi_red}Downloading $b2 ${shlib_ansi_none}"
wget --quiet --show-progress --no-use-server-timestamps $1 -O $2
}
In verifyfile we should be able to use md5sum -c to directly check instead of using [:
# true if checksum record line in $1 matches MD5 of file $2
verifyfile()
{
if awk -F ' [* ]' -vfile="$2" '$2==file' "$1" | md5sum --status --check 2>/dev/null
then
log DEBUG "Checksum valid for $bn"
return 0
else
log DEBUG "Checksum invalid for $bn"
return 1
fi
}
In download, we have to store the entire contents of the list, which might be large, due to $(cat $file_list). It's probably better to read from this file, which will greatly reduce our memory requirement.
There's a lot of repetition in download. If we treat a non-existent file the same as a checksum-failed file, we can simplify a lot:
If file not present, download it.
Test checksum. If successful, we're done.
Re-download.
Test checksum. | {
"domain": "codereview.stackexchange",
"id": 44775,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "linux, sh",
"url": null
} |
linux, sh
That makes for a much simpler function:
download()
{
file_list=$1
check_sums=$2
result=true
while read -r f
do
# Get name file of file to download
bn=$(basename "$f")
# If file not present attempt to download
if ! [ -f "$bn" ]
then
log INFO "Downloading $bn"
downloadfile "$f" "$bn"
fi
if verifyfile "$check_sums" "$bn"
then
log DEBUG "Verification of $bn successful"
continue;
fi
log WARN "Verification of $bn failed. Retrying download"
if downloadfile "$f" "$bn" && verifyfile "$check_sums" "$bn"
then
log DEBUG "Verification of $bn successful"
fi
log FATAL "Failed to verify $bn download unsuccessful"
result=false
done <"$file_list"
"$result"
} | {
"domain": "codereview.stackexchange",
"id": 44775,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "linux, sh",
"url": null
} |
linux, sh
For reference, here's what Shellcheck reports:
285297.sh:22:5: warning: In POSIX sh, 'local' is undefined. [SC3043]
285297.sh:22:14: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:24:61: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:24:68: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:29:5: warning: In POSIX sh, 'local' is undefined. [SC3043]
285297.sh:29:21: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:30:19: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:30:40: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:31:24: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:32:10: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:32:23: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:44:5: warning: In POSIX sh, 'local' is undefined. [SC3043]
285297.sh:44:21: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:45:5: warning: In POSIX sh, 'local' is undefined. [SC3043]
285297.sh:45:22: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:46:14: note: To read lines rather than words, pipe/redirect to a 'while read' loop. [SC2013]
285297.sh:46:20: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:49:23: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:51:19: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:54:26: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:55:27: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:60:30: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:61:33: note: Double quote to prevent globbing and word splitting. [SC2086] | {
"domain": "codereview.stackexchange",
"id": 44775,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "linux, sh",
"url": null
} |
linux, sh
285297.sh:61:33: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:72:29: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:75:30: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:76:31: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:81:34: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:82:37: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:94:10: note: Double quote to prevent globbing and word splitting. [SC2086]
285297.sh:94:13: note: Double quote to prevent globbing and word splitting. [SC2086] | {
"domain": "codereview.stackexchange",
"id": 44775,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "linux, sh",
"url": null
} |
c++, memory-management, pointers
Title: Copying allocated data into std::map in a smart way
Question: I come across a problem and I solved it. The solution works but I have some feelings that there is something wrong with my solution/code.
To be clear, let's assume that cars on the race track transmit
their velocity periodically. I want to store only the final velocity data of each car in a std::map where the key represents the car id and the value represents the velocity. However, velocity is not an integer but an allocated memory that belongs to some other code and it is deleted automatically after reading. So, when a car transmits its velocity data I want to copy allocated data and insert it into a map with a key.
While inserting (key1, data1) pair into the std::map, if there is no element with "key1" then the code should simply copy data and insert
it into the std::map. However, if there is an element with a key "key1" then the code should delete allocated memory in std::map and copy the final data into the map.
I solved my problem using a shared pointer to allocate/deallocate memory easily as below. However, I think this solution is complex and
I believe that there is an easier solution. Do you have any idea?
#include <iostream>
#include <map>
#include <memory>
#include <cstring>
class Packet
{
public:
Packet(char* data, int length) : mLength{length}
{
mData = new char[mLength];
memcpy(mData, data, length);
}
~Packet()
{
delete [] mData;
}
private:
int mLength;
char* mData = nullptr;
};
class Key
{
public:
Key(int codeType1, int codeType2, int codeType3):
mCodeType1{codeType1}, mCodeType2{codeType2} {}
inline bool operator<(const Key& rhs) const
{
if(mCodeType1 < rhs.mCodeType1)
return true;
if(mCodeType1 == rhs.mCodeType1 && mCodeType2 < rhs.mCodeType2)
return true;
return false;
}
private:
int mCodeType1 = 0;
int mCodeType2 = 0;
}; | {
"domain": "codereview.stackexchange",
"id": 44776,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, memory-management, pointers",
"url": null
} |
c++, memory-management, pointers
return false;
}
private:
int mCodeType1 = 0;
int mCodeType2 = 0;
};
class Value
{
public:
Value(char* data, int length) : sharedPacketPtr{new Packet(data, length)} {}
private:
std::shared_ptr<Packet> sharedPacketPtr;
};
int main()
{
std::map<Key, Value> myMap;
const int dataLength = 1000; // const is just for demonstration
//data 1,2,3 comes from somewhere else (I can't change data type)
char* data1 = new char[dataLength];
char* data2 = new char[dataLength];
char* data3 = new char[dataLength];
myMap.insert(std::make_pair(Key{1, 1, 1}, Value(data1, dataLength)));
myMap.insert(std::make_pair(Key{1, 1, 1}, Value(data2, dataLength)));
myMap.insert(std::make_pair(Key{3, 3, 3}, Value(data3, dataLength)));
// data 1,2,3 delete themselves (no need to worry)
delete [] data1;
delete [] data2;
delete [] data3;
return 0;
}
Answer: If a Packet is copied, then we'll have two objects with the same value of mData. This will mean that we delete[] this pointer twice, which causes Undefined Behaviour. Instead of this class, can't we just use a std::vector, which will manage memory properly with no work on our part? That looks much easier:
struct Packet
{
std::vector<char> mData;
Packet(char const* data, std::size_t length)
: mData{data, data+length}
{}
};
That quietly fixes the above issue (and also the misspelling of std::memcpy, which is no longer needed).
I don't see why we need three arguments to construct a Key, given the third one is ignored.
Key::operator<() looks like it could simply be defined = default:
auto operator<=>(const Key& rhs) const = default;
There's no real benefit from the extra level of indirection introduced by the std::shared_ptr. That would be useful only if we intended to share ownership of the objects in the map. Even if we did, creating a class should be unnecessary - just use plain old std::make_shared(). | {
"domain": "codereview.stackexchange",
"id": 44776,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, memory-management, pointers",
"url": null
} |
python, performance, pandas, cuda
Title: Applying cointegration function from statsmodels on a large dataframe
Question: I need to apply the coint function from the statsmodels library to 207 times series with 1397 points each, two by two.
Currently, it takes between 35-40 minutes on my computer with an Intel 24 Cores CPU, last generation.
I tried to use Cython, or Data processing hacks from this article but I get the exact same processing time.
Here is the code to reproduce it:
# Data generation (no improvement needed)
df_timeseries = pd.DataFrame(np.random.uniform(low=2.25, high=2784.07,size=(1397, 207)))
# Downcasting to float 16 (could be unsigned too)
df_timeseries[df_timeseries.columns] = df_timeseries[df_timeseries.columns].astype(np.float16)
The cointegration function requires two time series, so I build a permutation of time series data frame column names:
from more_itertools import distinct_permutations as idp
# Shape: 42642 rows, 2 columns
df_permut = pd.DataFrame(idp(df_timeseries.columns, 2), columns=['ts1', 'ts2'])
Then, I apply the coint function to the permutation dataframe and extract only the p-value from the return (coint function returns coint_t, pvalue, crit_value):
import statsmodels.tsa.stattools as st
df_permut["pvalue"] = df_permut.apply(
lambda x:
[*st.coint(
df_timeseries[x['ts1']].values,
df_timeseries[x['ts2']].values
)][1], axis=1) | {
"domain": "codereview.stackexchange",
"id": 44777,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, pandas, cuda",
"url": null
} |
python, performance, pandas, cuda
This last part takes about 40 mins to run. I know that statsmodels is quite optimized and there is no chance I can imporved the coint method (I did try by extracting the code and removing the numerous unwanted check, but the linear regression runs from statsmodels take most of the time)
I don't believe there is many places for improvements on my code as the coint method is the most resource greedy.
How to drastically improve the speed? If there is no way, is that a path to move the coint method to GPU?
Answer: Your code is short, clear, and time consuming.
How to drastically improve the speed? | {
"domain": "codereview.stackexchange",
"id": 44777,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, pandas, cuda",
"url": null
} |
python, performance, pandas, cuda
You need to compute fewer figures.
For N time series you perform O(N^2) cointegration tests.
The OP does not describe the use case nor the observed data pattern.
Fortunately you do know what pattern the results showed,
since you patiently waited 40 minutes for the exhaustive comparison.
break symmetry: 2x
Currently for each TS pair (a, b) you also test (b, a).
Consult your corpus of results to see if that's really necessary.
Many pairs, of course, will show no cointegration, in either direction,
showing no "hit".
Is there anything you know about each time series
that lets you predict in which direction we might see a hit?
For example, given a time series of Close prices you're
likely to also know its corresponding volume, volatility,
vertical segment, and which market it trades on.
Given hits from your results corpus,
there's an opportunity to do K-means clustering
to identify predictive features.
autolag: 10x
By default we optimize an information criterion: AIC.
Running coint with autolag=None skips that,
so it runs an order of magnitude quicker.
Consult your results corpus to see if AIC produces
an important difference in the filtering result.
common sense
Your results corpus might show for example that
a high volume stock will only forecast the price
of a low volume stock, or vice versa.
Or that only stocks within the same vertical
have predictive power for one another.
Prune your search space accordingly.
simple features
The quadratic search is killing performance.
You want to be able to process each time series
exactly once to extract relevant features,
and then perform simple all-pairs comparisons.
One candidate is
Change Point Detection.
Sometimes an exogenous news event will affect a vertical
market, inducing immediate or delayed effects in
small or large players.
Set parameter K to a conveniently small number, like 1.
Identify the K most likely Change Points in each time series,
sort them, and scan the sorted list of timestamps to identify | {
"domain": "codereview.stackexchange",
"id": 44777,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, pandas, cuda",
"url": null
} |
python, performance, pandas, cuda
sort them, and scan the sorted list of timestamps to identify
candidate pairs of time series to analyze. | {
"domain": "codereview.stackexchange",
"id": 44777,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, pandas, cuda",
"url": null
} |
python, performance, pandas, cuda
tl;dr:
Prune the search space.
There is knowledge, from the real world and from your results corpus,
that you can bring into this problem so you don't perform
a quadratic number of expensive analyses. | {
"domain": "codereview.stackexchange",
"id": 44777,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, pandas, cuda",
"url": null
} |
python, performance, parsing, json, ip-address
Title: Filtering a large (50gb+) JSON lines file matching CIDR's
Question: I'm trying to speed up a Python script that reads a large log file (JSON lines, 50gb+) and filter out results that match 1 of 2000 CIDR ranges.
Logfile
20 million lines
{"ip":"xxx.xxx.xxx.xxx","timestamp":"2017-05-27T04:00:35-04:00","data":{},"error":"EOF","error_component":"banner"}
{"ip":"xxx.xxx.xxx.xxx","timestamp":"2017-05-27T04:00:35-04:00","data":{"banner":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","ehlo":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","starttls":"500 Unknown command\r\n"},"error":"Bad return code for STARTTLS","error_component":"starttls"}
{"ip":"xxx.xxx.xxx.xxx","timestamp":"2017-05-27T04:00:35-04:00","data":{},"error":"EOF","error_component":"banner"}
{"ip":"xxx.xxx.xxx.xxx","timestamp":"2017-05-27T04:00:35-04:00","data":{"banner":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","ehlo":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","starttls":"502 No such command\r\n"},"error":"Bad return code for STARTTLS","error_component":"starttls"}
CIDR file
2,000 lines
86.80.0.0/12
77.160.0.0/12
188.200.0.0/13
178.224.0.0/13
84.24.0.0/13
Script
import sys
import json
from netaddr import *
reload(sys)
sys.setdefaultencoding('utf-8')
filename = 'results.json'
filename = unicode(filename, 'utf-8')
cidr_filename = 'cidr.txt'
rowcount = 0
count = 0
# Load CIDR ranges
with open(cidr_filename, 'r') as f:
cidr = [line.strip() for line in f]
# Load JSON line by line
with open(filename) as f:
for line in f:
output = json.loads(line)
rowcount += 1
# Match if IP is in CIDR ranges
if all_matching_cidrs(output['ip'], cidr):
if 'banner' in output['data']:
print(output['ip'] + '\t' + output['data']['banner'])
count += 1
print('---------------------------------------')
print('LINES: {rowcount}')
print('RESULTS: {count}')
print('---------------------------------------') | {
"domain": "codereview.stackexchange",
"id": 44778,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, parsing, json, ip-address",
"url": null
} |
python, performance, parsing, json, ip-address
Current results
Parsing an example set of 100,000 rows takes now 8 minutes using:
Pypy
MacBook Pro with 2.8 GHz Intel Core i7, 16Gb RAM, SSD
Parsing the complete set of 20,000,000 rows would take a staggering 26 hours.
---------------------------------------
LINES: 100000
RESULTS: 1243
---------------------------------------
real 7m57.739s
user 7m52.127s
sys 0m4.177s
The bottleneck is the number of CIDR ranges to search within, when I run an example set of 100,000 row against 1 CIDR range it takes only 1.2 seconds.
---------------------------------------
LINES: 100000
RESULTS: 4
---------------------------------------
real 0m1.201s
user 0m1.095s
sys 0m0.090s
Is there a faster way of accomplishing this? Would Multithreading/Multiprocessing speed things up? Any help or other feedback would be much appreciated!
Things I've done:
Using Pypy, this is 9x(!) faster than Python 2.7 for this job.
Tried using Tim Bray's Widefinder but couldn't make it work as it focuses on regex searches IMHO.
UPDATE
rolfl's solution brought my times to parse 20,344,457 rows from ±26 hours to 4.5 minutes!
---------------------------------------
LINES: 20344457
RESULTS: 130863
---------------------------------------
real 4m27.661s
user 3m55.171s
sys 0m26.793s
TemporalWolf's advice to cProfile my code showed that indeed json.loads() was a bottleneck:
ncalls tottime percall cumtime percall filename:lineno(function)
16607394 131.960 0.000 131.960 0.000 {_pypyjson.loads}
Following his advice to slice the IP address natively instead of loading each line as JSON it was 2.5x times faster!
---------------------------------------
LINES: 20344457
RESULTS: 130863
---------------------------------------
real 1m40.548s
user 1m13.885s
sys 0m22.664s
Answer:
Is there a faster way of accomplishing this? Would Multithreading/Multiprocessing speed things up? Any help or other feedback would be much appreciated! | {
"domain": "codereview.stackexchange",
"id": 44778,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, parsing, json, ip-address",
"url": null
} |
python, performance, parsing, json, ip-address
No, for the most part, multi-threading will make no difference for you. At some point the bottleneck should be the IO speed of reading 50GB of file content, and not the speed of the processing. You also need to read the file sequentially (I presume) to get the output in the same order as the input.
But, fundamentally, the solution should not need to have multi-threaded execution in order to improve the performance.
Learning how to measure performance of various parts of your code is an important skill. For the moment, it may be as simple as timing this code:
# Load JSON line by line
with open(filename) as f:
for line in f:
output = json.loads(line)
rowcount+=1
i.e. convert each line from JSON, and count the lines.... how fast is that? I would expect that the whole program should be just as fast when the IP CIDR lookups work fast too.
Your performance issue is almost certainly related to this line here:
if all_matching_cidrs(output['ip'], cidr): | {
"domain": "codereview.stackexchange",
"id": 44778,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, parsing, json, ip-address",
"url": null
} |
python, performance, parsing, json, ip-address
Your timings already support this.... it takes 1 second to search all records in 1 CIRD, but significantly longer for 2000 CIDRs...
So, you have a performance problem in the order of \$O(mn)\$ where \$m\$ is the number of rows in the file, and \$n\$ is the number of CIDRs.
You can't improve the performance related to the number of rows in the files, but you can improve the cost of the CIDR lookups. What if it was a fixed-cost to check all CIDR matches? Then your overall performance becomes \$O(m)\$ and does not depend on the number of CIDR records.
You can do this by preprocessing the CIDR data in to a structure that allows a fixed-cost lookup.
The structure I would use is a binary tree consisting of nodes representing each bit in the CIDR specs. Each leaf node represents a CIDR to include. I.e. you preprocess the CIDRs in to the tree that at most has 32 levels (for a /32 CIDR).
Then, for the lookup, you take your IP from the JSON, convert it in to an integer, and start shifting bits from the most significant. For each bit, you start descending the CIDR tree, and if you can descend the tree until you hit a leaf node, then you have found a matching CIDR. At most, this will be 32 iterations down the tree, but for the most part, CIDR's seldom are that specific. So, let's assume at most a /24 CIDR, meaning that you reduce your lookups to at most 24 descents, instead of as many as 2000 complete checks.
It comes down to the algorithm.
Update - example lookup
Note, I hacked together this tree for supporting faster lookups of IPs in a number of CIDR ranges. Python is not my primary language, so inspect it carefully, and adjust as needed. Specifically, I have used some naive mecheanisms for parsing IP addresses in to integers. Use dedicated libraries to do that instead.
You can see it running on ideone: https://ideone.com/cd0O2I
def parseIPPart(ipx, shift):
try:
return int(ipx) << shift
except TypeError:
return 0 | {
"domain": "codereview.stackexchange",
"id": 44778,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, parsing, json, ip-address",
"url": null
} |
python, performance, parsing, json, ip-address
def parseIP(ipString):
ips_shifts = zip(ipString.split("."), range(24, -1, -8))
addr = [parseIPPart(ip, shift) for ip, shift in ips_shifts]
return sum(addr)
def parseCIDR(cidr):
addrString, bitsString = cidr.split('/')
try:
bits = int(bitsString)
except TypeError:
bits = 32
addr = parseIP(addrString)
return addr, bits
class CIDRTree:
class CIDRNode:
def __init__(self, depth):
self.depth = depth
self.isset = None
self.unset = None
self.leaf = False
def __init__(self):
self.root = CIDRTree.CIDRNode(-1)
def addCIDR(self, cidr):
ip, bits = parseCIDR(cidr)
node = self.root
for b in range(bits):
if node.leaf:
# Previous bigger CIDR Covers this subnet
return
mask = 1 << (31 - b)
if (ip & mask) != 0:
if node.isset is None:
node.isset = CIDRTree.CIDRNode(b)
kid = node.isset
else:
if node.unset is None:
node.unset = CIDRTree.CIDRNode(b)
kid = node.unset
node = kid
# node is now a representation of the leaf that comes from this CIDR.
# Clear out any more specific CIDRs that are no longer relevant (this CIDR includes a previous CIDR)
node.isset = None
node.unset = None
node.leaf = True
#print("Added CIDR ", ip, " and bits ", bits)
def matches(self, ipString):
ip = parseIP(ipString)
node = self.root
shift = 0
while node is not None and not node.leaf:
shift += 1
mask = 1 << (32 - shift)
val = (ip & mask) != 0
node = node.isset if val else node.unset
return node is not None and node.leaf | {
"domain": "codereview.stackexchange",
"id": 44778,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, parsing, json, ip-address",
"url": null
} |
python, performance, parsing, json, ip-address
return node is not None and node.leaf
if __name__ == "__main__":
cidrTree = CIDRTree()
cidrTree.addCIDR("8.0.0.0/8")
cidrTree.addCIDR("9.8.7.0/24")
print ("Tree matches 8.8.8.8:", cidrTree.matches("8.8.8.8"))
print ("Tree matches 9.9.9.9:", cidrTree.matches("9.9.9.9"))
print ("Tree matches 9.8.7.6:", cidrTree.matches("9.8.7.6")) | {
"domain": "codereview.stackexchange",
"id": 44778,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, parsing, json, ip-address",
"url": null
} |
javascript, performance
Title: String iteration algorithms performance in jQuery Terminal
Question: I have a JavaScript library jQuery Terminal and I have internal formatting that is used to change colors and style of the text that look like this:
[[b;red;green]this is bold red text with green background]
I have a main function that is used to process strings with formatting. The main function used by other functions is interate_formatting and functions that use it are split_equal and substring.
$.terminal = {
// ... | {
"domain": "codereview.stackexchange",
"id": 44779,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, performance",
"url": null
} |
javascript, performance
// ---------------------------------------------------------------------
// :: split text into lines with equal length so each line can be
// :: rendered separately (text formatting can be longer then a line).
// ---------------------------------------------------------------------
split_equal: function split_equal(str, length, options) {
if (typeof options === 'boolean') {
options = {
keepWords: options
};
}
var settings = $.extend({
trim: false,
keepWords: false
}, options);
var prev_format = '';
var result = [];
var array = $.terminal.normalize(str).split(/\n/g);
var have_formatting = $.terminal.have_formatting(str);
for (var i = 0, len = array.length; i < len; ++i) {
if (array[i] === '') {
result.push('');
continue;
}
var line = array[i];
var get_next_character = make_next_char_fun(line);
var first_index = 0;
var output;
var line_length = line.length;
var last_bracket = /\[\[[^\]]+\](?:[^\][]|\\\])+\]$/.test(line);
var leading_spaces = /^( |\s)/.test(line);
if (!have_formatting && line_length < length) {
result.push(line);
continue;
}
$.terminal.iterate_formatting(line, function callback(data) {
var chr, substring;
if (data.length >= length || data.last ||
(data.length === length - 1 &&
strlen(line[data.index + 1]) === 2)) {
var can_break = false;
// TODO: this need work
if (settings.keepWords && data.space !== -1) {
// replace html entities with characters
var stripped = text(line).substring(data.space_count); | {
"domain": "codereview.stackexchange",
"id": 44779,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, performance",
"url": null
} |
javascript, performance
var stripped = text(line).substring(data.space_count);
// real length, not counting formatting
stripped = stripped.slice(0, length).replace(/\s+$/, '');
var text_len = strlen(stripped);
if (space_re.test(stripped) || text_len < length) {
can_break = true;
}
}
// if words is true we split at last space and make next loop
// continue where the space where located
var after_index = data.index + data.size;
if (last_bracket) {
after_index += 1;
}
var new_index;
if (settings.keepWords && data.space !== -1 &&
after_index !== line_length && can_break) {
output = line.slice(first_index, data.space);
new_index = data.space - 1;
} else {
substring = line.slice(data.index);
chr = get_next_character(substring);
output = line.slice(first_index, data.index) + chr;
if (data.last && last_bracket && chr !== ']') {
output += ']';
}
new_index = data.index + chr.length - 1;
}
if (settings.trim || settings.keepWords) {
output = output.replace(/( |\s)+$/g, '');
if (!leading_spaces) {
output = output.replace(/^( |\s)+/g, '');
}
}
first_index = (new_index || data.index) + 1;
if (prev_format) {
var closed_formatting = /^[^\]]*\]/.test(output); | {
"domain": "codereview.stackexchange",
"id": 44779,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, performance",
"url": null
} |
javascript, performance
var closed_formatting = /^[^\]]*\]/.test(output);
output = prev_format + output;
if (closed_formatting) {
prev_format = '';
}
}
var matched = output.match(format_re);
if (matched) {
var last = matched[matched.length - 1];
if (last[last.length - 1] !== ']') {
prev_format = last.match(format_begin_re)[1];
output += ']';
} else if (format_end_re.test(output)) {
output = output.replace(format_end_re, '');
prev_format = last.match(format_begin_re)[1];
}
}
result.push(output);
// modify loop by returing new data
return {index: new_index, length: 0, space: -1};
}
});
}
return result;
},
// ---------------------------------------------------------------------
// :: formatting aware substring function
// ---------------------------------------------------------------------
substring: function substring(string, start_index, end_index) {
var chars = $.terminal.split_characters(string);
if (!chars.slice(start_index, end_index).length) {
return '';
}
if (!$.terminal.have_formatting(string)) {
return chars.slice(start_index, end_index).join('');
}
var start = 0;
var end;
var start_formatting = '';
var end_formatting = '';
var prev_index;
var offset = 1;
$.terminal.iterate_formatting(string, function callback(data) {
if (start_index && data.count === start_index + 1) {
start = data.index; | {
"domain": "codereview.stackexchange",
"id": 44779,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, performance",
"url": null
} |
javascript, performance
if (start_index && data.count === start_index + 1) {
start = data.index;
if (data.formatting) {
start_formatting = data.formatting;
}
}
if (end_index && data.count === end_index) {
end_formatting = data.formatting;
prev_index = data.index;
offset = data.size;
}
if (data.count === end_index + 1) {
end = data.index;
if (data.formatting) {
end = prev_index + offset;
}
}
});
if (start_index && !start) {
return '';
}
if (end === undefined) {
end = string.length;
}
string = start_formatting + string.slice(start, end);
if (end_formatting) {
string = string.replace(/(\[\[^\]]+)?\]$/, '');
string += ']';
}
return string;
},
// ---------------------------------------------------------------------
// :: helper function used by substring and split_equal it loop over
// :: string and execute callback with text count and other data
// ---------------------------------------------------------------------
iterate_formatting: function iterate_formatting(string, callback) {
function is_any_space(str) {
return str === ' ' || str === '\t' || str === '\n';
}
// ----------------------------------------------------------------
function is_space(i) {
if (!have_entities) {
return is_any_space(string[i - 1]);
}
return string.slice(i - 6, i) === ' ' ||
is_any_space(string[i - 1]);
}
// ----------------------------------------------------------------
function match_entity(index) {
if (!have_entities) {
return null;
} | {
"domain": "codereview.stackexchange",
"id": 44779,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, performance",
"url": null
} |
javascript, performance
if (!have_entities) {
return null;
}
return string.slice(index).match(entity_re);
}
// ----------------------------------------------------------------
function is_open_formatting(i) {
return string[i] === '[' && string[i + 1] === '[';
}
// ----------------------------------------------------------------
function is_escape_bracket(i) {
return string[i - 1] !== '\\' && string[i] === '\\' &&
string[i + 1] === ']';
}
// ----------------------------------------------------------------
function is_bracket(i) {
return string[i] === ']' || string[i] === '[';
}
// ----------------------------------------------------------------
function is_text(i) {
return (not_formatting && !opening &&
((string[i] !== ']' && !closing_formatting) ||
!have_formatting)) || (in_text && !formatting);
}
// ----------------------------------------------------------------
// :: function will skip to next character in main loop
// :: TODO: improve performance of emoji regex and check whole
// :: string it's complex string if not use simple function
// ----------------------------------------------------------------
var get_next_character = make_next_char_fun(string);
function next_iteration() {
var char = get_next_character(substring);
if (char.length > 1 && $.terminal.length(substring) > 1) {
return char.length - 1;
}
return 0;
}
// ----------------------------------------------------------------
function is_next_space() {
return (is_space(i) && (not_formatting || opening)) &&
(space === -1 && prev_space !== i || space !== -1);
} | {
"domain": "codereview.stackexchange",
"id": 44779,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, performance",
"url": null
} |
javascript, performance
(space === -1 && prev_space !== i || space !== -1);
}
// ----------------------------------------------------------------
// :: last iteration or one before closing formatting
// ----------------------------------------------------------------
var last = false;
function is_last() {
if (i === string.length - 1 && !last) {
last = true;
} else {
last = formatting && !!substring.match(/^.]$/);
}
return last;
}
// ----------------------------------------------------------------
var have_formatting = $.terminal.have_formatting(string);
var have_entities = entity_re.test(string);
var formatting = '';
var in_text = false;
var count = 0;
var match;
var space = -1;
var space_count = -1;
var prev_space;
var length = 0;
var offset = 0;
var re_ent = /(&[^;]+);$/;
for (var i = 0; i < string.length; i++) {
var substring = string.slice(i);
var closing_formatting = false;
match = substring.match(format_start_re);
if (match) {
formatting = match[1];
in_text = false;
} else if (formatting) {
if (string[i] === ']') {
closing_formatting = in_text;
if (in_text) {
formatting = '';
in_text = false;
} else {
in_text = true;
}
}
} else {
in_text = true;
}
var not_formatting = (formatting && in_text) || !formatting;
var opening = is_open_formatting(i);
if (is_next_space()) {
space = i;
space_count = count;
} | {
"domain": "codereview.stackexchange",
"id": 44779,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, performance",
"url": null
} |
javascript, performance
space = i;
space_count = count;
}
var braket = is_bracket(i);
offset = 0;
if (not_formatting) {
// treat entity as one character
if (string[i] === '&') {
match = match_entity(i);
if (match) {
i += match[1].length - 2; // 2 because continue adds 1 to i
continue;
}
++count;
++length;
} else if (is_escape_bracket(i)) {
// escape \] and \\ counts as one character
++count;
++length;
offset = 1;
i += 1;
} else if (!braket || !have_formatting || (in_text && !formatting)) {
++count;
++length;
}
}
if (is_text(i)) {
if (strlen(string[i]) === 2) {
length++;
}
var char = get_next_character(substring);
var size = char.length;
// begining of enity that we've skipped, we are at the end
if (char === ';') {
match = string.slice(0, i + 1).match(re_ent);
if (match) {
offset = match[1].length;
size = offset + 1;
}
}
var data = {
last: is_last(),
count: count,
index: i - offset,
formatting: formatting,
length: length,
text: in_text,
size: size,
space: space,
space_count: space_count
};
var ret = callback(data);
if (ret === false) { | {
"domain": "codereview.stackexchange",
"id": 44779,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, performance",
"url": null
} |
javascript, performance
};
var ret = callback(data);
if (ret === false) {
break;
} else if (ret) {
if (ret.count !== undefined) {
count = ret.count;
}
if (ret.length !== undefined) {
length = ret.length;
}
if (ret.space !== undefined) {
prev_space = space;
space = ret.space;
}
if (ret.index !== undefined) {
i = ret.index;
continue;
}
}
} else if (i === string.length - 1 && !last) {
// last iteration, if formatting have last bracket,
// from formatting, then last iteration
// was already called (in if) #550
callback({
last: true,
count: count + 1,
index: i,
formatting: formatting,
length: 0,
text: in_text,
space: space
});
}
// handle emoji, suroggate pairs and combine characters
if (in_text) {
i += next_iteration();
}
}
},
// ...
}; | {
"domain": "codereview.stackexchange",
"id": 44779,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, performance",
"url": null
} |
javascript, performance
I need some advice on how to optimize that code. The code works fine for short strings but it the strings get longer it's really slow and when I have an animation with 0 delay is visible that the library slows down when it needs to process longer strings.
split_equal function split the string into lines with optional whitespace wrapping.
substring is slower when it needs to return characters from the end of the string.
I need general advice on how to optimize this code to make it faster.
I was thinking maybe of splitting the string into individual formatting and text between and process which may be faster because I can find the right position in the string without the need to iterate over the whole string.
I know that there is a lot of code but maybe someone can help.
The issue of performance is tracked on GitHub: https://github.com/jcubic/jquery.terminal/issues/820
NOTE I was suggested to ask here.
Answer: The linked bug gave a good repro case, so I ran the profiler.
Apologies for the wide image, but it shows what's taking the most time in a slow call to split_equal. The leaf nodes on this graph are mostly next_char calling test_re, definitions below:
function starts_with(match) {
return match && match.index === 0;
}
function make_re_fn(re) {
return function test_re(string) {
var m = string.match(re);
if (starts_with(m)) {
return m[1];
}
};
} | {
"domain": "codereview.stackexchange",
"id": 44779,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, performance",
"url": null
} |
javascript, performance
function make_next_char_fun(string) {
var tests = [];
[
entity_re,
emoji_re,
combine_chr_re
].forEach(function(re) {
if (re.test(string)) {
tests.push(make_re_fn(re));
}
});
if (astral_symbols_re.test(string)) {
tests.push(function test_astral(string) {
var m1 = string.match(astral_symbols_re);
if (starts_with(m1)) {
var m2 = string.match(combine_chr_re);
if (m2 && m2.index === 1) {
return string.slice(0, 3);
}
return m1[1];
}
});
}
return function next_char(string) {
for (var i = 0; i < tests.length; ++i) {
var test = tests[i];
var ret = test(string);
if (ret) {
return ret;
}
}
return string[0];
};
}
It does seem that test_re is getting longer and longer strings with more iterations, which is possibly the most important thing to fix (do you really need to process thousands of characters each char you print?).
But I'll note what you're doing with the regex and these big input strings is:
Match the entire string against a regex
Check if the match is at the start of the string
If so, return the first match group
Performance-wise you'll probably be better served using a regex starting with ^, which might not be correct in all usages here, but I copied emoji_re and a thousand characters of text into a benchmarking tool and it became an order of magnitude faster when I added the caret. | {
"domain": "codereview.stackexchange",
"id": 44779,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, performance",
"url": null
} |
python, performance
Title: Computation of average speed in Python
Question: someone can help me with this code? I'm trying to compute the average speed every 100 m, starting from a list of spans that contains [distance, time, avg speed].
This is what I've tried to do, and seems to work pretty well (can you confirm?). The problem is that the code is a little bit bulky, and I would like to streamline it if possible.
# list as example
list=[[230,20,11.5],[80,4,20],[300,15,20],[20,1,20],[400,80,5],]
current_dist=0
span_speeds=[]
remaining_time=list[0][1]
current_dist=list[0][0]
speed=list[0][2]
for element in list[1:]:
while current_dist >= 100:
span_speeds.append(speed)
current_dist=current_dist-100
remaining_time=remaining_time-100/speed
while current_dist < 100:
length,time,speed=element
new_dist=current_dist+length
if new_dist<100:
remaining_time=remaining_time+time
current_dist=new_dist
break
tot_time=remaining_time+(100-current_dist)/speed
span_speeds.append(100/tot_time)
remaining_time=time-(100-current_dist)/speed
current_dist=new_dist-100
continue
# computation about the last span
while current_dist >= 100:
span_speeds.append(speed)
current_dist=current_dist-100
remaining_time=remaining_time-100/speed
This are the results that i'm seeing:
span_speeds = [11.5, 11.5, 16.370106761565832, 20.0, 20, 20, 20.0, 6.451612903225806, 5, 5, 5]
Answer: Checking your work: implied total time. Total distance and time in the
input example are 1030 and 120. Your code's output speeds can be used to compute
total time, along the lines sketched below. Unfortunately, that calculation
produces 105 rather than 120, so we know that something is off in your speeds.
speeds = [... your results ...] | {
"domain": "codereview.stackexchange",
"id": 44780,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance",
"url": null
} |
python, performance
last = len(speeds) - 1
tot_time = sum(
(30 if i == last else 100) / s # time = dist / speed
for i, s in enumerate(speeds)
) # 105 seconds
Checking your work: expected partitioning. We can also examine the example
input distances and quickly figure out how the partitioning into distance
chunks of 100 should play out, as shown in the table below. Notice the last
row: it implies that the last four average speeds should be the same -- namely,
the speed associated with 400 in the input data. That speed is 5.
Unfortunately, your output contains only three values of 5 at the end. I did
not bother to figure out where your code went wrong, but my guess is that it
occurred when you needed to merge 3 distances to achieve 100 (10 + 20 + 70).
Input distances | Partitions into 100s
--------------------------------------
230 | 100 100 30
80 | 70 10
300 | 90 100 100 10
20 | 20
400 | 70 100 100 100 30
Your speeds (rounded for display here):
11.5, 11.5, 16.4, 20, 20, 20, 20, 6.5, 5, 5, 5 | {
"domain": "codereview.stackexchange",
"id": 44780,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance",
"url": null
} |
python, performance
Your speeds (rounded for display here):
11.5, 11.5, 16.4, 20, 20, 20, 20, 6.5, 5, 5, 5
Very quick code review. Your code isn't in functions (it should be). Your
code has a cramped, hard-to-read layout (it should add spaces around
operators/etc and include blank lines to separate the code into meaningful
sections). Your code relies on primitive data objects -- a list of triples --
and thus forces the reader to interpret and remember list-index numbers (it
should used data objects that are readable and self-documenting).
How I might start a rewrite. Begin with a meaningful object to represent
your data. You have distance-time data representing travel
segments/chunks/spans of a moving object (not sure whether there is a proper
physics term for this). Vocabulary aside, you might define an object holding
distance and time, and then leave speed as a derived attribute. Both for
computing the speed and for other parts of your algorithm, it might be useful
to include a property indicating whether the span is empty (zero time or
distance). For example:
from dataclasses import dataclass
@dataclass
class Span:
dist: float
time: float
@property
def speed(self):
return None if self.empty else self.dist / self.time
@property
def empty(self):
return self.time == 0
Next steps. With that data-object defined, you might add some useful
behaviors to it, such as the ability for one span to merge with all/part of
another (up to some needed amount of distance) or the ability to take a span
and a distance-limit and split it apart into two new spans (the second one
might be empty).
class Span:
...
def merge_with(self, other, needed = float('inf')):
# Compute distance we are adding.
dist = min(other.dist, needed)
ratio = dist / other.dist
# Add to self.
self.dist += dist
self.time += other.time * ratio
# Subtract from other.
... | {
"domain": "codereview.stackexchange",
"id": 44780,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance",
"url": null
} |
python, performance
# Subtract from other.
...
@classmethod
def split(cls, s, limit):
remainder = s.dist - limit
if remainder > 0:
ratio = remainder / s.dist
return (
cls(limit, s.time * (1 - ratio)),
cls(remainder, s.time * ratio),
)
else:
return (s, cls(0, 0))
With those building blocks in place, writing a function to convert the
raw-triples into spans is not necessarily easy, but the resulting code would be
a lot more readable. | {
"domain": "codereview.stackexchange",
"id": 44780,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance",
"url": null
} |
python, python-3.x, fizzbuzz
Title: 1, 2, Fizz, 4, Buzz: FizzBuzz
Question: I just recently started working on python (interesting language, actually), and, as I always do when I learn a new language (with the exception of BrainFuck, that's too hard), I write a FizzBuzz program.
I notice there is no error-checking, so my program would have problems with incorrect input, but please ignore that as I haven't gotten to that part yet.
Code:
def getString(num, fizz, buzz):
if num % (fizz * buzz) == 0:
return "FizzBuzz"
elif num % fizz == 0:
return "Fizz"
elif num % buzz == 0:
return "Buzz"
else:
return num
def fizzbuzz(maxNum, fizz, buzz):
num = 1;
while num <= maxNum:
print(getString(num, fizz, buzz))
num += 1
fizz = int(input("Enter the number to Fizz: "))
buzz = int(input("Enter the number to Buzz: "))
maxNum = int(input("Enter the maximum number: "))
fizzbuzz(maxNum, fizz, buzz)
The code will take a fizz, buzz, and a maxNum as input, and will output, all the fizzing and buzzing the code is doing.
Concerns:
Does it follow Python conventions?
Answer:
getString and maxNum should be get_string and max_num, by PEP 8 recommendation.
getString sometimes returns a number, and sometimes returns a string. This violates the expectation set up by the function name. Also, a function that is indecisive about its return type is harder to work with.
num = 1; ← No semicolons in Python, please.
The fizzbuzz loop is written idiomatically as for num in range(1, maxNum + 1): … | {
"domain": "codereview.stackexchange",
"id": 44781,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, python-3.x, fizzbuzz",
"url": null
} |
python, python-3.x, image, machine-learning, opencv
Title: Remove hot-spots from picture without touching edges
Question: In the picture below there are some regions which are very bright (i.e. more white). Some bright regions are wide and some are narrow or thin. The red box covers one such wide bright spot, and blue box covers one thin bright spot. Thin bright spots are called edges and wide bright spots are called hot-spots.
I want to remove all the hot-spots from the image (i.e. make them black), but no edge should be removed.
I've written Python code (using OpenCV) to remove all hot-spots but not the edges.
My code:
import cv2
import numpy as np
# Load the image
image1 = cv2.imread('orange.jpg', cv2.IMREAD_GRAYSCALE)
original_image = image1
# Otsu's thresholding
_, image2 = cv2.threshold(image1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Erosion
kernel = np.ones((5, 5), np.uint8)
image3 = cv2.erode(image2, kernel, iterations=1)
# Define the threshold distance K
K = 2
# Create the circular mask
mask = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2 * K, 2 * K))
# Iterate over image1 pixels and generate the final image
final_image = np.copy(image1)
for y in range(image1.shape[0]):
for x in range(image1.shape[1]):
# Check if any illuminated pixel exists within K distance in image3
if image2[y, x] > 0:
neighborhood = image3[max(y - K, 0):min(y + K + 1, image3.shape[0]),
max(x - K, 0):min(x + K + 1, image3.shape[1])]
if np.sum(neighborhood) > 0:
final_image[y, x] = 0
# Display the original and final image
cv2.imshow('Original', original_image)
cv2.imshow('Final Image', final_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
Output:
My question:
How can I reduce the computational complexity of my code? My code's complexity is O(n²) because of the two nested for loops; how can I make it O(n) or O(n log n)? | {
"domain": "codereview.stackexchange",
"id": 44782,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, python-3.x, image, machine-learning, opencv",
"url": null
} |
python, python-3.x, image, machine-learning, opencv
Answer: np.sum(neighborhood) is computing the mean filter with a rectangular neighborhood (sum filter, really, but since you compare to 0, the scaling doesn’t matter). This filter can be applied in OpenCV with cv.blur.
You then compare to 0 for pixels where image2 is larger than 0. So you can combine the result of the two full-image comparisons with the logical AND operator, and use the result to index into the output image.
I think the following code is equivalent to your loop (not tested, use with care):
final_image[
(cv.blur(image3, [2*K+1, 2*K+1]) > 0) &
(image2 > 0)
] = 0
Note that the complexity of you code is O(n), for n pixels in the image. The double loop iterates once over all pixels. The complexity of the line I wrote here is still O(n). The difference is that the loop over all pixels in OpenCV is written in a compiled language, which is much faster than the Python interpreter.
Note also that the code in the OP computes the sum over the neighborhood only for some pixels, the code in this answer does it for all pixels. But in the OP, each sum is O(K²), whereas here it is O(1), independent of the size of the neighborhood. It is computed with only 3 additions per pixel. So even if we compute the filter at many more pixels, we’re computing it with fewer operations overall (even when ignoring the extra overhead of the Python interpreter).
As I commented on your (now deleted) post over at Stack Overflow, you are using names well established in the field of image processing, and giving them a different meaning. This makes communication difficult. An edge is a sharp tradition in intensity, which typically remarks the boundary of an object in the image. You referring to a line as an edge is highly confusing. | {
"domain": "codereview.stackexchange",
"id": 44782,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, python-3.x, image, machine-learning, opencv",
"url": null
} |
python, python-3.x, image, machine-learning, opencv
Now for the code review itself:
image1, image2, image3 are not meaningful names. Other variables holding images have good names. image1 seems to be an alias for original_image, which is not used.
People will complain about variable names K and x, y, because variable names are supposed to be long and descriptive. I personally like x, y for loops over a coordinate system, it is very clear and any other names will be less clear IMO. K could be threshold_distance, for example.
Another typical comment here would be to put your code in a reusable function, scripts are easy to type but much less flexible.
I would also suggest adding more vertical space, a blank line between code blocks can increase readability significantly. | {
"domain": "codereview.stackexchange",
"id": 44782,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, python-3.x, image, machine-learning, opencv",
"url": null
} |
c++, c++20, locking, atomic
Title: Lock Guard Atomic alternative
Question: I've recently written a Vulkan library for creating 2D applications with ease. The catch was I need std::lock_guard for my window resize event to resize resources related to the window on a secondary render thread (separate from the main GLFW window thread).
While testing on an AMD machine (running on an AMD APU 5600G) std::lock_guard would crash my program. I thought it was strange and tried it again using a blank console program and the same issue happened. Presumably due to issues with memory ordering with caching hits when attempting to lock mutexes on (some?) AMD platforms. The solution was to use std::atomic to force standardized memory ordering.
The end result was my own custom atomic_lock and atomic_mutex. This should work similarly to std::lock_guard except with an escape timeout (in milliseconds):
#pragma once
#ifndef ATOMIC_LOCK
#define ATOMIC_LOCK
#include <mutex>
struct atomic_mutex {
public:
std::atomic_bool signal;
std::mutex lock;
};
template<bool wait = false, size_t timeout = 100>
class _NODISCARD_LOCK atomic_lock {
private:
std::atomic_bool signal;
public:
atomic_mutex& lock;
~atomic_lock() noexcept { ForceUnlock(); }
atomic_lock(const atomic_lock&) = delete;
atomic_lock& operator=(const atomic_lock&) = delete;
explicit atomic_lock(atomic_mutex& lock) : lock(lock) {
signal = static_cast<bool>(lock.signal); | {
"domain": "codereview.stackexchange",
"id": 44783,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, c++20, locking, atomic",
"url": null
} |
c++, c++20, locking, atomic
if constexpr(wait) {
std::chrono::time_point<std::chrono::system_clock> now = std::chrono::system_clock::now();
std::chrono::milliseconds startTime = duration_cast<std::chrono::milliseconds>(now.time_since_epoch());
std::chrono::milliseconds endTime = duration_cast<std::chrono::milliseconds>(now.time_since_epoch());
while (signal) {
endTime = duration_cast<std::chrono::milliseconds>(now.time_since_epoch());
if (endTime.count() - startTime.count() >= static_cast<long long>(timeout))
break;
signal = static_cast<bool>(lock.signal);
}
}
lock.signal = !signal;
if (signal == false)
lock.lock.lock();
}
bool AcquiredLock() { return !signal; }
void ForceUnlock() {
if (!signal) {
signal = true;
lock.signal = false;
lock.lock.unlock();
}
}
};
#endif
atomic_lock takes in an atomic_mutex (struct with atomic_bool & mutex, see above) and attempts to acquire the mutex before the timeout. Then provides a function with the acquired lock state and another to unlock if needed. Then will unlock itself on destruct (out-of-scope).
Usage Pattern:
atomic_mutex mylock; // default constructors for std::atomic_bool and std::mutex.
void function(atomic_mutex& mutex) {
atomic_lock alock(mutex);
// atomic_lock<true, 500> alock(mutex) // template params optional.
// Do some multi-threaded work...
}
Answer: Your standard library is probably fine
std::lock_guard would crash my program. | {
"domain": "codereview.stackexchange",
"id": 44783,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, c++20, locking, atomic",
"url": null
} |
c++, c++20, locking, atomic
Answer: Your standard library is probably fine
std::lock_guard would crash my program.
std::lock_guard is a rather simple class that just calls lock() on the std::mutex you pass it in its constructor, and calls unlock() on it in its destructor. It is almost certainly a programming error if your use of it causes a crash. Possible errors could be that you didn't pass it a valid mutex object, or perhaps it was already locked in the same thread and you try to lock it again.
If calling lock()/unlock() on a mutex would cause a problem, then I would also expect your class to have a problem since you just call those manually.
Presumably due to issues with memory ordering with caching hits when attempting to lock mutexes on (some?) AMD platforms.
This is very unlikely. Even if there is a mistake I would not expect this to cause a crash on lock/unlock, but rather cause a race condition that will rarely trigger.
The interface is not very safe
Constructing an atomic_lock may or may not actually lock the atomic_mutex object. The caller should immediately call AquiredLock() to check whether it was actually locked. Your example usage pattern doesn't even show that. This is in contrast with the default behavior of std::lock_guard, which ensures the mutex is locked.
signal is not updated atomically
Another issue is that while signal is a std::atomic_bool, your class does not read and write lock.signal and call lock.lock() in an atomic way. Consider two threads constructing an atomic_lock object for the same atomic_mutex object at the same time:
Thread 1 Thread 2
signal = lock.signal; // false
signal = lock.signal; // false
lock.signal = !signal; // true
lock.signal = !signal; // true
lock.lock.lock();
lock.lock.lock(); | {
"domain": "codereview.stackexchange",
"id": 44783,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, c++20, locking, atomic",
"url": null
} |
c++, c++20, locking, atomic
This causes both threads to immediately lock the mutex, even though it was supposed to be such that one thread would wait for the other to unlock.
atomic_lock::signal should just be a regular bool
I don't think you are supposed to access the same atomic_lock object from multiple threads. Certainly, ForceUnlock() is not thread-safe. That means there is no reason for atomic_lock::signal to be a std::atomic_bool, it could just be a regular bool.
Consider sleeping instead of spinning
The while-loop spins until the mutex is unlocked or some time has elapsed. However, until that happens it uses 100% CPU. This wastes energy for no good reason, and even worse, if you would run this on a computer with only one core, and another thread has locked the mutex, but now your thread has a timeslice, then you can check lock.signal all you want but it will never change until your timeslice ends.
I could tell you to use std::this_thread::yield() or std::this_thread::sleep_for() in your while-loop, but you shouldn't have to do this at all; you should be using std::lock_guard and make sure it doesn't crash.
Also note that std::chrono::system_clock is not the right clock to use; it can suffer from jumps forwards and backwards, for example due to NTP updates, or to daylight savings time kicking in. You should have used std::chrono::steady_clock instead.
Consider using std::timed_mutex
Basically, your class implements a mutex with a timeout. There is already a type for that in the standard library: std::timed_mutex. | {
"domain": "codereview.stackexchange",
"id": 44783,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, c++20, locking, atomic",
"url": null
} |
c#, beginner, sorting, playing-cards, shuffle
Title: Deck of cards with shuffle and sort functionality
Question: Going through Head First C#'s Chapter 8: Enums and Collections, I learned about List<T>, as well as IComparable<T> and IComparer<T>. One exercise near the end asked to make a program to draw cards at random, then sort them.
I wanted to go the extra mile and make a fully usable, realistic card deck (their solution just picked 5 values at random from 2 enums, which could result in drawing the same card twice).
Any and all advice is welcome, albeit please understand that I have yet to learn LINQ in a coming chapter, so if you suggest to use it for something please at least explain why/advantages over vanilla C#.
I'm not including the using statement, they are the default ones that Visual Studio adds when creating a C# console project.
Kind
enum Kind
{
Ace,
Two,
Three,
Four,
Five,
Six,
Seven,
Eight,
Nine,
Ten,
Jack,
Queen,
King,
}
Suit
enum Suit
{
Clubs,
Diamonds,
Hearts,
Spades,
}
Card
class Card : IComparable<Card>
{
public Kind Kind;
public Suit Suit;
public Card(Kind kind, Suit suit)
{
Kind = kind;
Suit = suit;
}
public int CompareTo(Card other)
{
if (Suit > other.Suit)
{
return 1;
}
if (Suit < other.Suit)
{
return -1;
}
return Kind > other.Kind ? 1 : -1;
}
public override string ToString()
{
return $"{Kind} of {Suit}";
}
}
CardDeck
class CardDeck
{
public List<Card> Cards;
public CardDeck()
{
Cards = new List<Card>();
int numSuits = Enum.GetNames(typeof(Suit)).Length;
int numKinds = Enum.GetNames(typeof(Kind)).Length;
for (int suit = 0; suit < numSuits; suit++)
{
for (int kind = 0; kind < numKinds; kind++)
{
Cards.Add(new Card((Kind)kind, (Suit)suit));
}
}
} | {
"domain": "codereview.stackexchange",
"id": 44784,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c#, beginner, sorting, playing-cards, shuffle",
"url": null
} |
c#, beginner, sorting, playing-cards, shuffle
public int CountCardsInDeck => Cards.Count;
public Card DrawTopCard()
{
Card drawnCard = Cards[0];
Cards.RemoveAt(0);
return drawnCard;
}
public Card DrawBottomCard()
{
int lastCardIndex = CountCardsInDeck - 1;
Card drawnCard = Cards[lastCardIndex];
Cards.RemoveAt(lastCardIndex);
return drawnCard;
}
public Card DrawRandomCard()
{
Random random = new Random();
int randomCardIndex = random.Next(CountCardsInDeck);
Card drawnCard = Cards[randomCardIndex];
Cards.RemoveAt(randomCardIndex);
return drawnCard;
}
public void AddCardOnTop(Card card)
{
if (!Cards.Contains(card))
{
Cards[0] = card;
return;
}
throw new InvalidOperationException($"Deck already contains card {card}.");
}
public void AddCardOnBottom(Card card)
{
if (!Cards.Contains(card))
{
Cards.Add(card);
return;
}
throw new InvalidOperationException($"Deck already contains card {card}.");
}
public void AddCardAtRandom(Card card)
{
if (!Cards.Contains(card))
{
Random random = new Random();
Cards[random.Next(CountCardsInDeck)] = card;
return;
}
throw new InvalidOperationException($"Deck already contains card {card}.");
}
public void Shuffle()
{
// Fisher-Yates shuffle method
Random random = new Random();
int n = CountCardsInDeck;
while (n > 1)
{
n--;
int k = random.Next(n + 1);
Card randomCard = Cards[k];
Cards[k] = Cards[n];
Cards[n] = randomCard;
}
}
public void Sort() => Cards.Sort();
public void Sort(IComparer<Card> comparer) => Cards.Sort(comparer); | {
"domain": "codereview.stackexchange",
"id": 44784,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c#, beginner, sorting, playing-cards, shuffle",
"url": null
} |
c#, beginner, sorting, playing-cards, shuffle
public void Sort(IComparer<Card> comparer) => Cards.Sort(comparer);
public void WriteToConsole()
{
foreach (Card card in Cards)
{
Console.WriteLine(card);
}
}
}
CardOrderMethod
enum CardOrderMethod
{
SuitThenKind,
KindThenSuit,
}
CardSorter
class CardSorter : IComparer<Card>
{
public CardOrderMethod SortBy = CardOrderMethod.SuitThenKind;
public int Compare(Card x, Card y)
{
if (SortBy == CardOrderMethod.SuitThenKind)
{
if (x.Suit > y.Suit)
{
return 1;
}
if (x.Suit < y.Suit)
{
return -1;
}
return x.Kind > y.Kind ? 1 : -1;
}
if (SortBy == CardOrderMethod.KindThenSuit)
{
if (x.Kind > y.Kind)
{
return 1;
}
if (x.Kind < y.Kind)
{
return -1;
}
return x.Suit > y.Suit ? 1 : -1;
}
throw new NotImplementedException($"CardOrderMethod {SortBy} is not implemented.");
}
}
Program
class Program
{
static void Main(string[] args)
{
CardDeck cardDeck = new CardDeck();
cardDeck.Shuffle();
Console.WriteLine("---Shuffled deck---");
cardDeck.WriteToConsole();
CardSorter sorter = new CardSorter
{
SortBy = CardOrderMethod.SuitThenKind
};
cardDeck.Sort(sorter);
Console.WriteLine("---Sorted deck: SuitThenKind---");
cardDeck.WriteToConsole();
cardDeck.Shuffle();
sorter.SortBy = CardOrderMethod.KindThenSuit;
cardDeck.Sort(sorter);
Console.WriteLine("---Sorted deck: Kind Then Suit---");
cardDeck.WriteToConsole();
// Keep console open until a key is pressed
Console.ReadKey();
}
}
Answer: enum Kind
{
...
Queen,
King,
Joker // ???
} | {
"domain": "codereview.stackexchange",
"id": 44784,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c#, beginner, sorting, playing-cards, shuffle",
"url": null
} |
c#, beginner, sorting, playing-cards, shuffle
Answer: enum Kind
{
...
Queen,
King,
Joker // ???
}
Jokers is the joker. You're not considering jokers
class Card : IComparable<Card>
{
...
I'm not convinced that there is a default comparison for cards? It could be misunderstood by consumers. On the other hand a deck of cards is always sorted by Suit and then Kind, but the comparison of cards is highly context dependent - dependent on the rules of the game. See further below.
public int CompareTo(Card other)
{
if (Suit > other.Suit)
{
return 1;
}
if (Suit < other.Suit)
{
return -1;
}
return Kind > other.Kind ? 1 : -1;
}
can be simplified to:
public int CompareTo(Card other)
{
if (other == null) return 1;
if (Suit != other.Suit)
return Suit.CompareTo(other.Suit);
return Kind.CompareTo(other.Kind);
}
CardDeck is maybe a little verbose. IMO Deck is sufficient.
Cards = new List<Card>();
int numSuits = Enum.GetNames(typeof(Suit)).Length;
int numKinds = Enum.GetNames(typeof(Kind)).Length;
for (int suit = 0; suit < numSuits; suit++)
{
for (int kind = 0; kind < numKinds; kind++)
{
Cards.Add(new Card((Kind)kind, (Suit)suit));
}
}
There is a simpler way to do this:
Cards = new List<Card>();
foreach (Suit suit in Enum.GetValues(typeof(Suit)))
{
foreach (Kind kind in Enum.GetValues(typeof(Kind)))
{
Cards.Add(new Card(kind, suit));
}
}
CountCardsInDeck again: Count is sufficient. What should it else count if not cards in the deck?
public Card DrawTopCard()
{
Card drawnCard = Cards[0];
Cards.RemoveAt(0);
return drawnCard;
}
public Card DrawBottomCard()
{
int lastCardIndex = CountCardsInDeck - 1;
Card drawnCard = Cards[lastCardIndex];
Cards.RemoveAt(lastCardIndex);
return drawnCard;
} | {
"domain": "codereview.stackexchange",
"id": 44784,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c#, beginner, sorting, playing-cards, shuffle",
"url": null
} |
c#, beginner, sorting, playing-cards, shuffle
public Card DrawRandomCard()
{
Random random = new Random();
int randomCardIndex = random.Next(CountCardsInDeck);
Card drawnCard = Cards[randomCardIndex];
Cards.RemoveAt(randomCardIndex);
return drawnCard;
}
This can be simplified:
public Card DrawCardAt(int index)
{
if (index < 0 || index >= Count)
throw new ArgumentOutOfRangeException(nameof(index));
Card card = Cards[index];
Cards.RemoveAt(index);
return card;
}
public Card DrawTopCard()
{
return DrawCardAt(0);
}
public Card DrawBottomCard()
{
return DrawCardAt(Count - 1);
}
public Card DrawRandomCard()
{
Random random = new Random();
int index = random.Next(Count);
return DrawCardAt(index);
}
public void AddCardOnTop(Card card)
{
if (!Cards.Contains(card))
{
Cards[0] = card;
return;
}
throw new InvalidOperationException($"Deck already contains card {card}.");
}
A cleaner way to make precautions:
public void AddCardOnTop(Card card)
{
if (Cards.Contains(card))
throw new InvalidOperationException($"Deck already contains card {card}.");
//Cards[0] = card;
Cards.Insert(0, card);
}
You replace the existing first card with a new one. Is that what you want? If so the method should be called SetTop() or ReplaceTop(). My suggestion is that you want to insert? The same could be said about AddCardOnBottom() and AddCardAtRandom().
public void WriteToConsole()
{
foreach (Card card in Cards)
{
Console.WriteLine(card);
}
}
Keep the UI out of your models. You could override ToString() and produce a line-string instead.
class CardSorter strictly speaking it is not a sorter but a comparer. | {
"domain": "codereview.stackexchange",
"id": 44784,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c#, beginner, sorting, playing-cards, shuffle",
"url": null
} |
c#, beginner, sorting, playing-cards, shuffle
class CardSorter strictly speaking it is not a sorter but a comparer.
As discussed above, I maybe think that your comparer is somewhat redundant, because the comparison of cards depends on the game rules. I'm not a huge fan of card games, but I can't image games or situations where a deck of cards should be sorted differently than just by Suit and then Kind. But if you insists, you should consider other sorttypes:
enum CardSortType
{
KindOnly, // HH: In some games only the kind matters
SuitOnly, // HH: I can't image any games where this is used??
SuitThenKind,
KindThenSuit,
}
class CardSorter : IComparer<Card>
{
public CardSorter(CardSortType sortBy = CardSortType.SuitThenKind)
{
SortBy = sortBy;
}
public CardSortType SortBy { get; } // HH: Make it readonly
public int Compare(Card x, Card y)
{
switch (SortBy)
{
case CardSortType.KindOnly:
return x.Kind.CompareTo(y.Kind);
case CardSortType.SuitOnly:
return x.Suit.CompareTo(y.Suit);
case CardSortType.SuitThenKind:
if (x.Suit != y.Suit) return x.Suit.CompareTo(y.Suit);
return x.Kind.CompareTo(y.Kind);
case CardSortType.KindThenSuit:
if (x.Kind != y.Kind) return x.Kind.CompareTo(y.Kind);
return x.Suit.CompareTo(y.Suit);
default:
throw new NotImplementedException($"CardOrderMethod {SortBy} is not implemented.");
}
}
}
In the above, I suggest a simpler comparison. | {
"domain": "codereview.stackexchange",
"id": 44784,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c#, beginner, sorting, playing-cards, shuffle",
"url": null
} |
c++, design-patterns, pointers
Title: Implementing The Decorator Design Pattern in C++
Question: I recently tried to implement the decorator design pattern in C++. Here is the code in full:
#include <algorithm>
#include <iostream>
#include <memory>
#include <numeric>
#include <string>
#include <vector>
//Interface for items of food
class Food {
public:
virtual float Price() const = 0; //The price of the food in some currency
virtual std::vector<std::string> IngredientsList() const = 0;
virtual ~Food() = default;
};
//An example of an item of food (concrete component)
class Pizza : public Food {
public:
float Price() const override {return _base_cost;};
std::vector<std::string> IngredientsList() const override {return {"Dough"};}
private:
float _base_cost{3.0}; //The cost of a pizza with no toppings
};
//Interface for decorators
class IngredientsDecorator : public Food {
public:
explicit IngredientsDecorator(Food* food) : _food{food} {}
virtual float Price() const override {return _food->Price();}
virtual std::vector<std::string> IngredientsList() const override {return _food->IngredientsList();}
protected:
Food* _food;
};
//An example of a decorator
class MozzarellaDecorator : public IngredientsDecorator {
public:
explicit MozzarellaDecorator(std::shared_ptr<Food> food)
: IngredientsDecorator{food.get()} {}
float Price() const override {return IngredientsDecorator::Price() + _cost;};
std::vector<std::string> IngredientsList() const override {
auto AllIngredients = IngredientsDecorator::IngredientsList();
auto MozzarellaIngredients = Ingredients();
AllIngredients.insert(AllIngredients.end(), MozzarellaIngredients.begin(), MozzarellaIngredients.end());
return AllIngredients;
}
private:
float _cost{0.50};
std::vector<std::string> Ingredients() const {return {"Mozzarella"};};
};
//An order consists of a number of food items | {
"domain": "codereview.stackexchange",
"id": 44785,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, design-patterns, pointers",
"url": null
} |
c++, design-patterns, pointers
//An order consists of a number of food items
class Order {
public:
void AddToOrder(std::shared_ptr<Food> food) {
_food.push_back(food.get());
}
void ClearOrder() {_food.clear();}
float Price() const {
return std::accumulate(_food.begin(),_food.end(),0.f, [](float a, Food* b) -> float {return a + b->Price();});
}
std::vector<std::string> IngredientsList() const {
std::vector<std::string> all_ingredients;
std::for_each(_food.begin(),
_food.end(),
[&all_ingredients](Food* f) {
auto curr_ingredients = f->IngredientsList();
all_ingredients.insert(all_ingredients.end(), curr_ingredients.begin(), curr_ingredients.end());
});
return all_ingredients;
}
void PrintOrderInformation() const {
std::cout<<Price()<<'\n';
auto ingredients{IngredientsList()};
for(const auto& e : ingredients) {
std::cout<<e<<'\n';
}
std::cout<<"\n-------------\n";
};
private:
std::vector<Food*> _food;
};
int main()
{
//Create an empty order
Order my_order;
//Add a mozzarella pizza
auto pizza_base = std::make_shared<Pizza>();
auto mozzarella_pizza = std::make_shared<MozzarellaDecorator>(pizza_base);
my_order.AddToOrder(mozzarella_pizza);
my_order.PrintOrderInformation();
my_order.ClearOrder();
//New order for a pizza with double mozzarella
auto double_mozzarella_pizza = std::make_shared<MozzarellaDecorator>(mozzarella_pizza);
my_order.AddToOrder(double_mozzarella_pizza);
my_order.PrintOrderInformation();
my_order.ClearOrder();
}
My questions are the following: | {
"domain": "codereview.stackexchange",
"id": 44785,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, design-patterns, pointers",
"url": null
} |
c++, design-patterns, pointers
}
My questions are the following:
Have I implemented the decorator pattern correctly?
Have I used smart pointers correctly? I'm not 100% sure whether the functions should
take a smart pointer or a raw pointer.
Is there a way to compose decorators in a constructor? Specifically, is there a way I can write the following
auto base_pizza = std::make_shared<Pizza>();
auto single_mozzarella = std::make_shared<MozzarellaDecorator>(base_pizza);
as something like this instead?
auto single_mozzarella = std::make_shared<MozzarellaDecorator>(std::make_shared<Pizza>());
Answer: The goal of design patterns
The goal of design patterns and principles is so we create more flexible and maintainable code. In particular, I would say they are there so we keep the act of programming itself \$O(1)\$. Consider for example why the decorator pattern was created: without it, if you would want to extend a class with some extra functionality, you might create a derived class:
class Pizza: Food {…};
class PizzaWithExtraMozzarella: Pizza {…};
class PizzaWithExtraAnchovies: Pizza {…};
The problem is now: what if I want a pizza with anchovies and double mozzarella? Do I really want to add another class for that, like the following?
class PizzaWithAnchoviesAndDoubleMozzarella: Pizza {…}; | {
"domain": "codereview.stackexchange",
"id": 44785,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, design-patterns, pointers",
"url": null
} |
c++, design-patterns, pointers
Because that way, if you have \$N\$ ingredients to choose from, you'd need at least \$N^2\$ extra classes just so you can get any combination of one extra ingredient, and that doesn't even cover adding the same ingredient twice.
The decorator pattern allows you to avoid the \$N^2\$ explosion.
However, if we have to write a separate decorator class for each of the \$N\$ possible ingredients, we still end up having to write \$O(N)\$ classes. That's still not great! This is why it would be much better to just have a mutable list of ingredients, with an AddIngredient() function we can call to add more ingredients. This function could either be added to the base class Food itself, or we can create one decorator that provides that functionality:
class ExtraIngredientsDecorator: public Food {
public:
explicit ExtraIngredientsDecorator(std::unique_ptr<Food> food):
_food{std::move(food)} {}
virtual float Price() const override {
return _food->Price() + _extra_cost;
}
virtual std::vector<std::string> IngredientsList() const override {
auto AllIngredients = _food->IngredientsList();
AllIngredients.insert(AllIngredients.end(),
_extra_ingredients.begin(), _extra_ingredients.end());
return AllIngredients;
}
void AddIngredient(std::string ingredient, float cost) {
_extra_ingredients.push_back(ingredient);
_extra_cost += cost;
}
private:
std::unique_ptr<Food> _food;
float _extra_cost{};
std::vector<std::string> _extra_ingredients;
};
Because now you can write:
auto pizza_with_extras = ExtraIngredientsDecorator(std::make_unique<Pizza>);
pizza_with_extras.AddIngredient("anchovies", 0.40);
pizza_with_extras.AddIngredient("mozzarella", 0.50);
pizza_with_extras.AddIngredient("mozzarella", 0.50); | {
"domain": "codereview.stackexchange",
"id": 44785,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, design-patterns, pointers",
"url": null
} |
c++, design-patterns, pointers
Prefer std::unique_ptr if possible
You used both std::shared_ptr and raw pointers in your code. The former does atomic reference counting, which might be unnecessary, and the latter is not very safe. While std::shared_ptr works in your example because every object is immutable, if you have mutable data (like in my ExtraIngredientsDecorator), then with std::shared_ptr being used behind the scenes it becomes hard to understand what happens when copies made of objects. For example, will other copies get modified as well when you call something like AddIngredient() on one of them?
std::unique_ptr has none of these issues. It's clear that there can be only one owner. I've shown it in the example above. The only issue is that you might have to explicitly std::move() objects if they were not temporaries, like for example:
auto pizza_base = std::make_unique<Pizza>;
auto pizza_with_extras = ExtraIngredientsDecorator(std::move(pizza_base));
Also turn your raw pointers into std::unique_ptr; this avoids the possibility of dangling pointers. So:
class Order {
public:
void AddToOrder(std::unique_ptr<Food> food) {
_food.push_back(std::move(food));
}
…
private:
std::vector<std::unique_ptr<Food>> _food;
};
And then a full usage example would be:
Order my_order;
// Add a mozzarella pizza
{
auto pizza_base = std::make_unique<Pizza>;
auto pizza_with_extras =
std::make_unique<ExtraIngredientsDecorator>(std::move(pizza_base));
pizza_with_extras.AddIngredient("mozzarella", 0.50);
my_order.AddToOrder(std::move(pizza_with_extras));
}
my_order.PrintOrderInformation(); | {
"domain": "codereview.stackexchange",
"id": 44785,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, design-patterns, pointers",
"url": null
} |
c++, design-patterns, pointers
my_order.PrintOrderInformation();
The above code is safe, even though pizza_base and pizza_with_extras went out of scope before PrintOrderInformation() was called.
Composing decorators
You can indeed compose decorators the way you wrote it. It's the same when using unique pointers. You could even write something like:
Order my_order;
my_order.AddToOrder(
std::make_unique<ExtraIngredientsDecorator>(
std::make_unique<Pizza>()
)
); | {
"domain": "codereview.stackexchange",
"id": 44785,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, design-patterns, pointers",
"url": null
} |
vba, excel
Title: Efficiently creating an array from a filtered sheet
Question: [edit] I realised why everyone does this with loops; my plan was to use .SpecialCells(xlCellTypeVisible).EntireRow.Address to return the row(s) of the results of the results of the filter, but that returns a string. Strings have a limit of 256 characters, so with even moderately fragmented data some will be truncated. However, I think this approach is still more efficient than grabbing the table contents row-by-row.
I recently needed to make an array from a filtered sheet. Because the filtered results will be in multiple areas, the code I found online all referred back to the sheet in loops. I believe I have come up with a much more efficient approach, which grabs all the table in one go, then grabs the information relating to the rows that are visible. It does not have to go row-by-row to grab the data, nor does it have to redim the array after its initial size is set.
Some operations could be combined (sacrificing readability for efficiency) but otherwise I think this is a faster and neater way than any I've yet seen.
I am particularly interested in any efficiency improvements, and any vulnerabilities which could/should be dealt with. Thanks in advance!
Function ArrayFromFilter(Table As Range) As Variant
Dim FullData() As Variant, Trimdata() As Variant, FilterZones() As String, Filters() As String, FilterRows() As Long
Dim numZones As Long, curZone As Long, curRow As Long, curCol As Long, FirstRow As Long
Dim numItems As Long, i As Long, j As Long
Dim Output As String | {
"domain": "codereview.stackexchange",
"id": 44786,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "vba, excel",
"url": null
} |
vba, excel
FirstRow = Table.Rows(1).Row - 1 'this an offset used later
'grab the entire unfiltered table - faster to process it in VBA than to cycle through on the sheet
FullData() = Table
'grab the result row numbers by area
numZones = Table.SpecialCells(xlCellTypeVisible).Areas.Count
ReDim Filters(1 To numZones)
For curZone = 1 To numZones
Filters(curZone) = Table.SpecialCells(xlCellTypeVisible).Areas(curZone).EntireRow.Address
Next
ReDim FilterZones(1 To numZones, 1 To 2)
ReDim FilterRows(1 To numZones, 1 To 2)
'split the zones into start and end rows
For i = LBound(Filters()) To UBound(Filters())
'first split each zone into its first and last rows
FilterZones(i, 1) = Left(Filters(i), InStr(Filters(i), ":") - 1)
FilterZones(i, 2) = Right(Filters(i), InStr(Filters(i), ":") + 1)
'now take just the row from each cell and convert to a number, then remove the offset of the start of the data
FilterRows(i, 1) = (CDbl(Split(FilterZones(i, 1), "$")(1)) - FirstRow)
FilterRows(i, 2) = (CDbl(Split(FilterZones(i, 2), "$")(1)) - FirstRow)
'ta-da we have an array with the first and last row of each zone inside the overall filtered data - now work out how many items there are
numItems = numItems + (FilterRows(i, 2) - FilterRows(i, 1)) + 1
Next
'go through the table, moving only the useful bits to the trimmed data
ReDim Trimdata(1 To numItems, 1 To UBound(FullData, 2))
curRow = 1
For i = 1 To numZones
For j = FilterRows(i, 1) To FilterRows(i, 2)
For curCol = 1 To UBound(FullData, 2)
Trimdata(curRow, curCol) = FullData(j, curCol)
Next
curRow = curRow + 1
Next
Next | {
"domain": "codereview.stackexchange",
"id": 44786,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "vba, excel",
"url": null
} |
vba, excel
'uncomment this block for testing to show that the data has been grabbed
' For i = 1 To UBound(Trimdata(), 1)
' For j = 1 To UBound(Trimdata(), 2)
' If j = 1 Then
' Output = Trimdata(i, j)
' Else
' Output = Output & ", " & Trimdata(i, j)
' End If
' Next
' Debug.Print Output
' Next
ArrayFromFilter = Trimdata
End Function
Answer: Use of error handlers with SpecialCells: Using SpecialCells in VBA can potentially throw errors, particularly Error 1004 when no cells of the specified type are found. It's good practice to have error handling in place for these instances. An example implementation might look like this:
On Error Resume Next
Set rngSpecial = Table.SpecialCells(xlCellTypeVisible)
If Err.Number <> 0 Then
Err.Clear
' Handle error or exit function
End If
On Error GoTo 0
Storing the result of SpecialCells in a variable: The SpecialCells method is called multiple times in this code. This method can be resource intensive, particularly with larger ranges. Storing the result in a variable and then using that variable would improve efficiency. Example:
Dim rngSpecial As Range
Set rngSpecial = Table.SpecialCells(xlCellTypeVisible)
Now, you can use rngSpecial instead of calling Table.SpecialCells(xlCellTypeVisible) each time.
Misleading variable name - Trimdata: The variable name Trimdata can be misleading because in programming contexts, "trim" is often used to refer to removing white spaces from the start and/or end of strings. Here, a more accurate name might be FilteredData or VisibleData to better reflect that this array contains only the visible (i.e., unfiltered) data from the range.
Simpler calculation of row numbers: The calculation for the number of rows in a range can be simplified by dividing the total count of cells in the range by the count of columns. This can be done as follows:
numRows = Table.Count / Table.Columns.Count | {
"domain": "codereview.stackexchange",
"id": 44786,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "vba, excel",
"url": null
} |
vba, excel
numRows = Table.Count / Table.Columns.Count
Remember that this will give you the total number of rows, including those hidden by filters. If you only want the number of visible rows, you would need to perform this operation on the range returned by SpecialCells(xlCellTypeVisible).
' This function accepts a range in Excel (the "Table") as input and returns a filtered array that excludes any hidden or non-visible cells in the range.
Function ArrayFromFilteredRange(Table As Range) As Variant
' Declare a Range object to hold the target cells
Dim Target As Range
' In case of any error in the following operations, program execution will resume at the next statement
On Error Resume Next
' Assign to "Target" only the visible cells from "Table"
Set Target = Table.SpecialCells(xlCellTypeVisible)
' Resets the error handling to its default behavior
On Error GoTo 0
' If no visible cells were found, return an empty array and exit the function
If Target Is Nothing Then
ArrayFromFilteredRange = Array()
Debug.Print "ArrayFromFilteredRange:", "Target Is Nothing"
Exit Function
End If
' Declare a variable to hold the number of columns in the target range
Dim columnCount As Long
columnCount = Target.Columns.Count
' Declare a variable to hold the number of rows in the target range
Dim rowCount As Long
rowCount = Target.Count / columnCount
' Declare an array to hold the data from the target range
Dim Result As Variant
' Set the dimensions of the Result array to match the size of the target range
ReDim Result(1 To rowCount, 1 To columnCount)
' Declare variables to assist in navigating the target range and populating the Result array
Dim Area As Range
Dim AreaData As Variant
Dim c As Long, r As Long, row As Long | {
"domain": "codereview.stackexchange",
"id": 44786,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "vba, excel",
"url": null
} |
vba, excel
' Loop through each area of the target range
For Each Area In Target.Areas
' Loop through each row in the current area
For row = 1 To Area.Rows.Count
' Increment the row count for the Result array
r = r + 1
' Extract the values from the current area
AreaData = Area.Value
' Loop through each column in the current area
For c = 1 To columnCount
' Assign the current cell's value to the corresponding element in the Result array
Result(r, c) = AreaData(row, c)
Next
Next
Next
' Return the Result array
ArrayFromFilteredRange = Result
End Function
Addendum
I overlooked the whole point of the OP's code. He postulates that it is more efficient to load all the data into a variable and loop over it. As opposed to iterating over each areas values.
' This function calculates the relative row index of an area within a given target range.
' It takes the target range and area index as inputs and returns the relative row index.
Function GetAreaRelativeRowIndex(Target As Range, AreaIndex As Long) As Long
' Get the row number of the first cell in the given area within the target range
' Subtract the row number of the first cell in the target range to calculate the relative row index
' Adding 1 at the end accounts for the 1-based index system in VBA
GetAreaRelativeRowIndex = Target.Areas(AreaIndex).Row - Target.Row + 1
End Function
' This function accepts a range in Excel (the "Table") as input and returns a filtered array
' that excludes any hidden or non-visible cells in the range.
Function FilteredRangeData(Table As Range) As Variant | {
"domain": "codereview.stackexchange",
"id": 44786,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "vba, excel",
"url": null
} |
vba, excel
' Declare a Range object to hold the target cells
Dim Target As Range
' On Error Resume Next allows the program to continue with the next line of code
' even if there is an error
On Error Resume Next
' Assign to "Target" only the visible cells from "Table"
' .SpecialCells(xlCellTypeVisible) allows to target only cells that are visible
Set Target = Table.SpecialCells(xlCellTypeVisible)
' Resets the error handling to its default behavior
On Error GoTo 0
' If no visible cells were found or if Table is a single cell,
' return an empty array and exit the function
If Target Is Nothing Or Table.Count = 1 Then
Dim EmtpyResult(1 To 1, 1 To 1) ' Define a 1x1 array
FilteredRangeData = EmtpyResult ' Set function result as the empty array
Debug.Print "FilteredRangeData:", "Target Is Nothing" ' Output message for debugging
Exit Function ' Exit the function
End If
' Declare a variable to hold the number of columns in the target range
Dim columnCount As Long
columnCount = Target.Columns.Count | {
"domain": "codereview.stackexchange",
"id": 44786,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "vba, excel",
"url": null
} |
vba, excel
' Declare a variable to hold the number of rows in the target range
' This is calculated by dividing the total count of cells in the target by the column count
Dim rowCount As Long
rowCount = Target.Count / columnCount
' Declare an array to hold the data from the target range
Dim Result As Variant
' Set the dimensions of the Result array to match the size of the target range
ReDim Result(1 To rowCount, 1 To columnCount)
' Declare a variable to store all values in the table
Dim AllValues As Variant
AllValues = Table.Value
Dim AreaIndex As Long
Dim AreaRowCount As Long
Dim AreaRelativeRowIndex As Long
Dim Count As Long
Dim r As Long
Dim c As Long
' Loop through all the areas in the target range
For AreaIndex = 1 To Target.Areas.Count
' Get the relative row index of the current area in the target range
AreaRelativeRowIndex = GetAreaRelativeRowIndex(Target, AreaIndex)
' Get the number of rows in the current area
AreaRowCount = Target.Areas(AreaIndex).Rows.Count
' Loop through all the rows in the current area
For r = AreaRelativeRowIndex To AreaRelativeRowIndex + AreaRowCount - 1
' Increment the count
Count = Count + 1
' Loop through all the columns
For c = 1 To columnCount
' Assign the cell value in AllValues to the corresponding cell in Result
Result(Count, c) = AllValues(r, c)
Next c
Next r
Next AreaIndex
' Return the Result array
FilteredRangeData = Result
End Function | {
"domain": "codereview.stackexchange",
"id": 44786,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "vba, excel",
"url": null
} |
python, embedded, raspberry-pi
Title: Raspberry Pi/PySimpleGUI based resistance test system
Question: Hardware: Raspberry Pi 3B+, Elecrow touchscreen, DFR0660 Barcode Scanner, ADS1115 ADC
I would like any bad practices/possible failure points pointed out specifically in the loop(), read_adc(), and catheter_test() functions. The write_to_report() and measure_single_catheter() functions were removed. The system is fully functional.
import sys
from time import sleep
import RPi.GPIO as GPIO
from os import system
from datetime import datetime
from fpdf import FPDF
from fpdf.enums import XPos, YPos
import pyautogui
import pygame
from itertools import chain
import subprocess
from hw_init import *
from cath_test_init import *
from gui_init import *
sys.path.append('/usr/lib/python39.zip')
# !/usr/bin python3
# VERBOSE LOGGING FOR TROUBLESHOOTING
if sys.argv[1] == 'log':
logfile_date = datetime.now().strftime("%m_%d_%Y")
logfile_name = 'logfile' + logfile_date + '.txt'
sys.stdout = open(logfile_name, 'w')
def log_print(string):
logfile_now = datetime.now().strftime("%m_%d_%Y_%H:%M:%S")
print(logfile_now + "\t" + string)
class PDF(FPDF):
# Page footer
def footer(self):
self.set_y(-40)
self.set_font('Times', 'I', 12)
# Page number {nb} comes from alias_nb_page
self.cell(0, 10, 'Model Selected: ' + MODEL_SELECTED +
' Sofware Version:' + SOFTWARE_VERSION,
new_x=XPos.LMARGIN, new_y=YPos.NEXT, align='L')
self.cell(0, 10, 'Page ' + str(self.page_no()) + '/{nb}',
new_x=XPos.RIGHT, new_y=YPos.TOP, align='C') | {
"domain": "codereview.stackexchange",
"id": 44787,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, embedded, raspberry-pi",
"url": null
} |
python, embedded, raspberry-pi
# AUDIO SETUP
FAIL_SOUND = 'fail.mp3'
PASS_SOUND = 'pass.mp3'
PLAY_SOUND = True
pygame.init()
pygame.mixer.init()
'''Loading and playing sounds in order to load dependencies of library'''
pygame.mixer.music.load(FAIL_SOUND)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pygame.time.Clock().tick(10)
pygame.mixer.music.load(PASS_SOUND)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pygame.time.Clock().tick(10)
def get_ip():
ip = subprocess.run(['hostname', '-I'],
stdout=subprocess.PIPE).stdout.decode('utf-8')
return ip
def terminate_script():
window.close()
sys.exit("Terminated at admin request")
def reboot_system():
system('sudo reboot')
def shutdown_system():
system('sudo shutdown -h now')
def set_bias_mux_to_low_range():
log_print("set_bias_mux_to_low_range() called")
GPIO.output(A_BIAS_MUX, GPIO.LOW)
log_print("set_bias_mux_to_low_range() returning")
def set_bias_mux_to_hi_range():
log_print("set_bias_mux_to_hi_range() called")
GPIO.output(A_BIAS_MUX, GPIO.HIGH)
log_print("set_bias_mux_to_hi_range() returning")
def set_dut_mux_to_input_res():
log_print("set_dut_mux_to_input_res() called")
GPIO.output(A_DUT_MUX, GPIO.LOW)
GPIO.output(B_DUT_MUX, GPIO.LOW)
log_print("set_dut_mux_to_input_res() returning")
def set_dut_mux_to_output_res():
log_print("set_dut_mux_to_output_res() called")
GPIO.output(A_DUT_MUX, GPIO.HIGH)
GPIO.output(B_DUT_MUX, GPIO.LOW)
log_print("set_dut_mux_to_output_res() returning")
def reset_mouse_position():
# log_print("reset_mouse_position() called")
pyautogui.moveTo(700, 160)
# log_print("reset_mouse_position() returning")
def no_blank_screen():
log_print("no_blank_screen() called")
cmd_list = ['xset s noblank', 'xset -dpms', 'xset s off']
for command in cmd_list:
system(command)
log_print("no_blank_screen() returning") | {
"domain": "codereview.stackexchange",
"id": 44787,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, embedded, raspberry-pi",
"url": null
} |
python, embedded, raspberry-pi
def audio_feedback(local_result):
log_print("audio_feedback(%s) called" % local_result)
global PLAY_SOUND
if local_result == 'FAIL':
pygame.mixer.music.load(FAIL_SOUND)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pygame.time.Clock().tick(10)
elif local_result == 'PASS':
pygame.mixer.music.load(PASS_SOUND)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pygame.time.Clock().tick(10)
else:
print('result is invalid. Cant play audio')
if PLAY_SOUND:
PLAY_SOUND = False
log_print("audio_feedback(%s) returning" % local_result) | {
"domain": "codereview.stackexchange",
"id": 44787,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, embedded, raspberry-pi",
"url": null
} |
python, embedded, raspberry-pi
def gui_frame_msngr_update(frame_to_show,
new_current_process_message='No message'):
user_messager_window.update(new_current_process_message)
current_frame_visibility = [frame1.visible, frame2.visible, frame3.visible,
frame4.visible, frame5.visible,
frame6.visible]
frame_update = [False, True, False, False, False, False, False, False,
False]
if frame_to_show == 1:
frame_update = [True, False, False, False, False, False, False, False,
False]
elif frame_to_show == 2:
frame_update = [False, True, False, False, False, False, False, False,
False]
elif frame_to_show == 3:
reset_mouse_position()
frame_update = [False, False, True, False, False, False, False, False,
False]
keypad_message_box.update(new_current_process_message)
elif frame_to_show == 4:
frame_update = [False, False, False, True, False, False, False, False,
False]
pass_test_text_box.update(new_current_process_message)
elif frame_to_show == 5:
frame_update = [False, False, False, False, True, False, False, False,
False]
fail_test_text_box.update(new_current_process_message)
elif frame_to_show == 6:
frame_update = [False, False, False, False, False, True, False, False,
False]
elif frame_to_show == 7:
frame_update = [False, False, False, False, False, False, True, False,
False]
elif frame_to_show == 8:
frame_update = [False, False, False, False, False, False, False, True,
False]
frame8_message_box.update(new_current_process_message)
elif frame_to_show == 9:
frame_update = [False, False, False, False, False, False, False, False,
True] | {
"domain": "codereview.stackexchange",
"id": 44787,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, embedded, raspberry-pi",
"url": null
} |
python, embedded, raspberry-pi
True]
frame9_ip_add_box.update(new_current_process_message)
if not (current_frame_visibility == frame_update):
frame1.update(visible=frame_update[0])
frame2.update(visible=frame_update[1])
frame3.update(visible=frame_update[2])
frame4.update(visible=frame_update[3])
frame5.update(visible=frame_update[4])
frame6.update(visible=frame_update[5])
frame7.update(visible=frame_update[6])
frame8.update(visible=frame_update[7])
frame9.update(visible=frame_update[8])
window.refresh() | {
"domain": "codereview.stackexchange",
"id": 44787,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, embedded, raspberry-pi",
"url": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.