text
stringlengths
1
2.12k
source
dict
python, python-3.x def order_num(): prompt=prompts[6] return pyip.inputInt(prompt) choosed_bread=bread_order() choosed_protein=protein_order() choosed_cheese=cheese_order() choosed_add_ons=add_ons_order() ordered_num=order_num() bread_price=bread_type[choosed_bread] protein_price=protein_type[choosed_protein] cheese_price=cheese_type[choosed_cheese] add_on_price=add_on_type[choosed_add_ons] total_price = ordered_num*(bread_price+protein_price+cheese_price+add_on_price) print("Order Summary:") print('Bread type: %s \t %f', choosed_bread,bread_price) print('Protein type: %s \t %f', choosed_protein,protein_price) print('Cheese type: %s \t %f', choosed_cheese,cheese_price) print('Add-on type: %s \t %f', choosed_add_ons,add_on_price) print('Total price: \t %f', total_price) However when I run the code until where it asks for cheese, the later prompt already appears(the bold line below). What bread do you want? 1. Wheat 2. White 3. Sourdough 1 What protein do you want? 1. Chicken 2. Turkey 3. Ham 4. Tofu 1 Do you want cheese in your sandwich? yes What type of cheese do you want? **Do you want some add ons to your sandwich?** 1. Cheddar 2. Swiss 3. Mozzarella 4. No cheese I somehow avoid this problem by searching through the internet, learning from others code, but I am just curious why does this happens and how do I avoid this in the future? I believe this must be related to how Python actually runs, it might helps me in further understanding in Python as well. Also, below are my current codes, I would like to have some feedbacks on how can I further improve on this code. import pyinputplus as pyip
{ "domain": "codereview.stackexchange", "id": 44315, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, python-3.x", "url": null }
python, python-3.x def main(): menu={'Bread':{"Wheat":2.2,"White":1.2,"Sourdough":4.6}, 'Protein':{"Chicken":3.3,"Turkey":5.9,"Ham":4.9,"Tofu":1.2}, 'Cheese':{"Cheddar":2.2,"Swiss":1.2,"Mozzarella":4.6,"No cheese":0.0}, 'Add_ons':{"Mayo":1.0,"Mustard":0.8,"Lettuce":1.4,"Tomato":1.6,"No add-ons":0.0}} prompts=['What bread do you want?\n', 'What protein do you want?\n', ['Do you want cheese in your sandwich?(y/n)\n', 'What type of cheese do you want?\n'], ['Do you want some add ons to your sandwich?(y/n)\n', 'What add-ons do you want?\n'], 'How many of this sandwich do you want?\n'] menu_type_list=list(menu.keys()) orders=[] for i in range(len(menu_type_list)): if i <= 1: orders.append(pyip.inputMenu(list(menu[menu_type_list[i]]), prompts[i], numbered='True')) elif i <= 3: if pyip.inputYesNo(prompt=prompts[i][0],yesVal='y',noVal='n')=='y' : orders.append(pyip.inputMenu(list(menu[menu_type_list[i]]), prompts[i][1], numbered='True')) else: orders.append(list(menu[menu_type_list[i]].keys())[-1]) order_qty=pyip.inputInt(prompt=prompts[-1],default=1,greaterThan=0) total_price=0 for i in range(len(orders)): total_price+=menu[menu_type_list[i]][orders[i]] print("Order Summary:") print(('Item type \t Items \t Price').expandtabs(15)) for i in range(len(orders)): print(('%s type: \t %s \t %.2f' % (menu_type_list[i], orders[i], menu[menu_type_list[i]][orders[i]])) .expandtabs(15)) print('Number of sandwich: %d'%(order_qty)) print(('Total price: \t \t %.2f' % (order_qty*total_price)).expandtabs(15))
{ "domain": "codereview.stackexchange", "id": 44315, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, python-3.x", "url": null }
python, python-3.x print(('Total price: \t \t %.2f' % (order_qty*total_price)).expandtabs(15)) if __name__ == '__main__' : main()
{ "domain": "codereview.stackexchange", "id": 44315, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, python-3.x", "url": null }
python, python-3.x Thanks in advance! All help will be much appreciated! Answer: That's a subtle bug. prompts=[..., 'What type of cheese do you want?\n' 'Do you want some add ons to your sandwich?\n', ...] Notice that a comma is missing - because of that, this is now a single (multiline) string, instead of two strings. https://docs.python.org/3/reference/lexical_analysis.html#string-literal-concatenation I'd highly recommend getting rid of prompts altogether. Either go for global constants: PROMPT_BREAD = 'What bread do you want?\n' ... def bread_order(): bread_choice=bread_type.keys() return pyip.inputMenu(list(bread_choice),prompt=PROMPT_BREAD,numbered=True) or for local ones: def bread_order(): prompt = 'What bread do you want?\n' bread_choice=bread_type.keys() return pyip.inputMenu(list(bread_choice),prompt=prompt,numbered=True)
{ "domain": "codereview.stackexchange", "id": 44315, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, python-3.x", "url": null }
javascript, jquery, jquery-ui Title: JQuery fixed header that hides on scroll down and shows on scroll up Question: I am currently working on a site product where I have to create a site-header that is always fixed on top of the viewport but when I scroll down it hides, and again when I scroll up, it becomes visible again. Actually, I made it happen somehow but I think the JQuery I used can be simpler and currently has some unnecessary variations in it. I used the JQuery from a reference available on internet. Here is my code. HTML <header class="site-header"> <div class="inner"> </div> </header> CSS .site-header { position: fixed; top: 0; z-index: 1; width: 100%; height: 60px; background: #fff; transition: transform .25s;} .hidden {transform:translateY(-100%)} JQuery jQuery( function( $ ) { var didScroll; var lastScrollTop = 0; var delta = 5; var navbarHeight = $('.site-header').outerHeight(); $(window).scroll(function(event){ didScroll = true; }); setInterval(function() { if (didScroll) { hasScrolled(); didScroll = false; } }, 250); function hasScrolled() { var st = $(this).scrollTop(); // Make scroll more than delta if(Math.abs(lastScrollTop - st) <= delta) return; // If scrolled down and past the site-header, add class .hidden. if (st > lastScrollTop && st > navbarHeight){ // Scroll Down $('.site-header').addClass('hidden'); } else { // Scroll Up if(st + $(window).height() < $(document).height()) { $('.site-header').removeClass('hidden'); } } lastScrollTop = st; } } ); Now, all these things together are functional and I have a header that is fixed; when I scroll down, it disappears and when I scroll back up, it becomes visible again - as I wanted. But, since I am not very familiar with JQuery, I am not sure whether this JQuery could be simpler. I think it would be great if I can achieve the same results with a smaller and simpler JQuery.
{ "domain": "codereview.stackexchange", "id": 44316, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "javascript, jquery, jquery-ui", "url": null }
javascript, jquery, jquery-ui Answer: First improvement I saw is about that always true condition: if (st + $(window).height() < $(document).height()) The amount of pixel scrolled + the viewport height simply cannot be greater than the document height. ;) Knowing this... You end up with a simple if/else condition to add or remove the hidden class. So instead of it, you can use toggleClass and use the condition result as the second argument. function hasScrolled() { var st = $(this).scrollTop(); // Make scroll more than delta if (Math.abs(lastScrollTop - st) <= delta) return; $(".site-header").toggleClass("hidden", st > lastScrollTop && st > navbarHeight); lastScrollTop = st; } Second is about the interval... I think it's useless to run the hasScrolled function every 250ms when this does the same visual effect: $(window).scroll(hasScrolled); Last: do not use var!. let or const are better. Read here I left the delta in there, but for 5 pixels, I think it's useless too. Maybe there is a use-case for a bigger value... Here is the improved code that I suggest: jQuery(function ($) { let lastScrollTop = 0; const delta = 5; const navbarHeight = $(".site-header").outerHeight(); const header = $(".site-header") $(window).scroll(hasScrolled); function hasScrolled() { const st = $(this).scrollTop(); if (Math.abs(lastScrollTop - st) <= delta) return; header.toggleClass("hidden", st > lastScrollTop && st > navbarHeight); lastScrollTop = st; } }); Without delta: jQuery(function ($) { let lastScrollTop = 0; const navbarHeight = $(".site-header").outerHeight(); const header = $(".site-header") $(window).scroll(hasScrolled); function hasScrolled() { const st = $(this).scrollTop(); header.toggleClass("hidden", st > lastScrollTop && st > navbarHeight); lastScrollTop = st; } });
{ "domain": "codereview.stackexchange", "id": 44316, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "javascript, jquery, jquery-ui", "url": null }
c, array, vectors, mpi Title: Compute lp-norm of a vector in parallel with MPI Question: I'm trying to solve the following exercise: Compute in parallel with MPI the lp norm of a vector of a predefined size N allocated on the heap. Your implementation must work also when the number of processes p does not divide the number of of elements N. Here's my code. p is the number of ranks, while norm_p describes the mathematical norm I wish to compute. I have a question about the allocation on the heap. Even if the array vec is communicated to every processes with a MPI_Bcast, then I do not need to call free(vec) for every rank, right? Indeed, if I do so I have runtime errors of the type (pointer being freed was not allocated), but I'd like also to understand what is the best way to take care of this situation. #include <math.h> #include <mpi.h> #include <stdio.h> #include <stdlib.h> // Compute the lp-norm of a vector, p != infinity #define N 12 int main(int argc, char *argv[]) { int rank, p, global_sum, norm_p; int local_size, local_a, local_b; int local_sum = 0; int *vec; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &p); if (rank == 0) { printf("Choose the norm you want: p=0,... \n"); scanf("%d", &norm_p); vec = (int *)malloc(sizeof(int) * N); for (int i = 0; i < N; i++) vec[i] = i * i; } MPI_Bcast(vec, N, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&norm_p, 1, MPI_INT, 0, MPI_COMM_WORLD); local_size = N / p; int rem = N % p; if (rank < rem) { local_a = rank * (local_size + 1); local_b = local_a + local_size; } else { local_a = rank * local_size + rem; local_b = local_a + (local_size - 1); } printf("On rank %d I'm computing the sum from %d to %d \n", rank, local_a, local_b); for (int i = local_a; i <= local_b; ++i) { local_sum += fabs(pow(vec[i], norm_p)); } printf("On rank %d the computed local sum is %d \n", rank, local_sum);
{ "domain": "codereview.stackexchange", "id": 44317, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, array, vectors, mpi", "url": null }
c, array, vectors, mpi MPI_Reduce(&local_sum, &global_sum, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0) { printf("On rank 0 I'm showing the norm of the vector: %lf \n", pow(global_sum, 1. / norm_p)); free(vec); } MPI_Finalize(); return 0; } Answer: Your assignment is ambiguous. You can indeed create the vector on one process and broadcast it, but that is bad MPI practice for two reasons: it's a sequential bottleneck, and it means that process zero needs to have as much memory as all other processes combined. I have a cluster with more than 100 thousand cores. Do you think that's realistic? More importantly, you are thinking in shared memory terms. You only allocate vec on process zero. MPI is dstributed memory: each process has its own memory. So each process needs to allocate its own instance of vec. There is no memory that they all share. You have set N very small. Set it to something realistic, like at least a few million. You will then see that your code bombs because of the problem in the previous paragraph. Also: for small N the communication cost of MPI completely overwhelms the computation of the norm. Also also: if you insist on a small N, it would be fun to test your code if you have more processes than the value of N.
{ "domain": "codereview.stackexchange", "id": 44317, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, array, vectors, mpi", "url": null }
go, io, pointers, iteration, reflection Title: Readlines abstraction for Golang Question: I've just started learning Golang and was solving some problems which required me to read lines of text from a file. I decided to abstract away the reading part so that I can use Go's range keyword to iterate over lines of any "file". Here is my attempt: import ( "bufio" "fmt" "io" "io/fs" "os" "reflect" ) func ReadLines[F string | *fs.File](fileOrName F) chan string { ch := make(chan string, 10) var file io.Reader switch fon := reflect.ValueOf(fileOrName); fon.Kind() { case reflect.String: if _file, err := os.Open(fon.String()); err != nil { panic(fmt.Sprintf("Could not open file: '%s'. Error: %v", fileOrName, err)) } else { file = _file //goland:noinspection GoUnhandledErrorResult defer _file.Close() } case reflect.Pointer: file = *(*fs.File)(fon.UnsafePointer()) } fileScanner := bufio.NewScanner(file) go func() { defer close(ch) for fileScanner.Scan() { ch <- fileScanner.Text() } }() return ch } In particular, I would like some feedback on the following: Pointers: My knowledge on how pointers work comes from C++ and C, so I would like to know if my code has an accent that may still be influenced by my C background, or if I've done it right. Generics and reflect package: I was hoping Golang supported multiple dispatch, but from my reading, Go is a structurally typed language, so multiple dispatch doesn't work. This is why I used generics to emulate the behavior. I also use reflection to determine the type of the parameter passed in. Any other feedback is appreciated. Golang version: 1.19.4
{ "domain": "codereview.stackexchange", "id": 44318, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "go, io, pointers, iteration, reflection", "url": null }
go, io, pointers, iteration, reflection Any other feedback is appreciated. Golang version: 1.19.4 Answer: A few problems stand out right away in your solution. One is that you can't write a union of types with | in the type list of a generic function. Another is that fs.File is an interface type, and pointers to interfaces are seldom needed or used in Go. Another is that defer _file.Close() will fire as soon as ReadLines returns, while the goroutine is still trying to read from the file. (You want to close the file only after you're done reading from it, which is when the goroutine finishes.) It would be simpler, more general, and more idiomatic to write your function in terms of io.Reader, requiring: callers with filenames to do their own os.Open (and their own Close); callers reading from strings to wrap them with strings.NewReader(s) before calling your function; callers reading from []byte slices to wrap them with bytes.NewReader(s); callers with fs.FS objects to call Open on them; etc. Note that fs.File already is an io.Reader. // ReadLines produces a channel of the lines scanned from r. // It also produces an error-returning function. // The caller may call that after consuming the channel // in order to discover any error // that may have been encountered during the scan. func ReadLines(ctx context.Context, r io.Reader) (<-chan string, func() error) { var ( ch = make(chan string) sc = bufio.NewScanner(r) err error ) go func() { defer close(ch) for sc.Scan() { line := sc.Text() select { case <-ctx.Done(): err = ctx.Err() return case ch <- line: } } err = sc.Err() }() return ch, func() error { return err } } Things to note in this code:
{ "domain": "codereview.stackexchange", "id": 44318, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "go, io, pointers, iteration, reflection", "url": null }
go, io, pointers, iteration, reflection Things to note in this code: The function takes a context.Context, which is a good practice when you're spawning a long-lived goroutine. It gives a clean and idiomatic way to clean up resources when the caller needs to impose a timeout or otherwise cancel. The function returns a <-chan, not a chan, because the caller should be allowed only to read from it, not write to it. The function also returns a func() error that produces any error that may have been encountered in the goroutine. The channel is unbuffered. A rule of thumb in Go is that channels should be unbuffered or have a buffer size of 1. Any other number is usually either a premature optimization or simply unnecessary. In this case, writing to the unbuffered channel will cause this goroutine to block until some other goroutine is ready to read from the channel, which will wake this goroutine back up, and that's just fine. A select statement chooses the first channel operation that can proceed: either detecting the context object's cancellation, or sending the next line on the output.
{ "domain": "codereview.stackexchange", "id": 44318, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "go, io, pointers, iteration, reflection", "url": null }
go, io, pointers, iteration, reflection I'll point out that all of this is exactly how iter.Lines behaves (in my "go-generics" module), except that the output channel and the deferred error-reading function are encapsulated in the iter.Of[string] interface. The same caveat mentioned there applies here: bufio.Scanner imposes a maximum line length that can cause panics when exceeded. You can work around this by using your own buffer, see Scanner.Buffer. If you're dead set on doing type dispatch, you don't need generics or unsafe pointers. This should work: func ReadLines(ctx context.Context, arg any) (<-chan string, func() error) { var ( r io.Reader c io.Closer ) switch arg := arg.(type) { case io.Reader: r = arg case string: // presumably a filename f, err := os.Open(arg) if err != nil { // Note, callers must now be prepared to get back a nil channel. return nil, func() error { return err } } r, c = f, f default: return nil, func() error { fmt.Errorf("unsupported arg type %T", arg) } } // The rest is mostly like my first version above, // except that there is now also an io.Closer // to make sure we close anything we opened, // but not until we're done reading from it. var ( ch = make(chan string) sc = bufio.NewScanner(r) err error ) go func() { defer close(ch) if c != nil { defer c.Close() } for sc.Scan() { line := sc.Text() select { case <-ctx.Done(): err = ctx.Err() return case ch <- line: } } err = sc.Err() }() return ch, func() error { return err } } ```
{ "domain": "codereview.stackexchange", "id": 44318, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "go, io, pointers, iteration, reflection", "url": null }
performance, rust, unicode, lexical-analysis Title: supporting unicode in rust lexer Question: I am working on a lexer in rust. It has been through many different iterations, going from iterating over a Vec<char> to std::str::Chars to std::str::CharIndices, from pushing chars onto a String to taking a slice of a &str. i'm trying to speed up my implementations, so today i decided to ditch the Chars and instead have been experimenting with a &[u8] based approach. The problem is, not all characters are ascii, and this makes indexing the &[u8] quite hard. I've figured out a solution with u8::leading_ones but this still doesn't solve the problem of implementing take_while. Right now it only works with ascii chars/u8s, but doesn't support unicode. The reason is, to check a predicate against a unicode char, i need to: get the character width using the leading ones of the first character take a slice of the &str ok, now what? The &str slice mentioned in step 2 can be implemented in take_while using &self.string[s..self.pos] however, this is still a &str and not a char. I could just get the character using .chars().next() however this seems too hacky and weird to me. The "WET" alternative would be to clone the code from take_while for every time i want to parse a token, and use a hacky approach when i actually want to check if a unicode character matches a predicate (e.g: parsing an identifier) Any ideas? struct Cursor<'a> { bytes: &'a [u8], string: &'a str, pos: usize } impl<'a> Cursor<'a> { pub fn new(string: &'a str) -> Self { Self { bytes: string.as_bytes(), string, pos: 0 } } pub fn next_char(&mut self) -> Option<&'a str> { let start = self.pos; if start >= self.string.len() { return None; } self.pos += self.bytes[start].leading_ones() as usize; if self.pos == start { self.pos += 1; } Some(&self.string[start..self.pos]) }
{ "domain": "codereview.stackexchange", "id": 44319, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "performance, rust, unicode, lexical-analysis", "url": null }
performance, rust, unicode, lexical-analysis pub fn take(&mut self, amount: usize) -> Option<&'a str> { let start = self.pos; for _ in 0..amount { let s = self.pos; if self.pos >= self.string.len() { return None; } self.pos += self.bytes[self.pos].leading_ones() as usize; if self.pos == s { self.pos += 1; } } Some(&self.string[start..self.pos]) } pub fn take_while<F: FnMut(u8) -> bool>(&mut self, mut pred: F) -> &'a str { let start = self.pos; loop { if self.pos >= self.string.len() - 1 { self.pos += 1; break } let s = self.pos; self.pos += self.bytes[self.pos].leading_ones() as usize; if self.pos == s { self.pos += 1; if !pred(self.bytes[self.pos]) { break } } } &self.string[start..self.pos] } } fn main() { let mut cursor: Cursor<'_> = Cursor::new("123NowTheIntegerLiteralIsOver"); println!("{}", cursor.take_while(|x| x.is_ascii_digit())); } (and yes, Cursor will impl Iterator in future, only in this case will yield &str or a possible Token type rather than char) Answer: struct Cursor<'a> { bytes: &'a [u8], string: &'a str, pos: usize } Having both bytes and string is redundant because they are both the same thing. In terms of memory layout &str and &[u8] are exactly the same. I think you do this so that you can slice the string: &self.string[start..self.pos] Both this doesn't do what you probably want. In particular, slicing a str does a runtime check for the validity of the slice indexes which rather defeats the purpose of operating at the u8 level. You probably want: unsafe { std::str::from_utf8_unchecked(&self.bytes[start..self.pos]) }
{ "domain": "codereview.stackexchange", "id": 44319, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "performance, rust, unicode, lexical-analysis", "url": null }
performance, rust, unicode, lexical-analysis This is unsafe because it is up to you to ensure the correctness of the slice indexes. let s = self.pos; self.pos += self.bytes[self.pos].leading_ones() as usize; if self.pos == s { self.pos += 1; } I'd do: self.pos += self.bytes[self.pos].leading_ones().max(1) as usize; In general you can create a reusable function for moving forward in the stream: fn move_forward(&mut self) -> bool { if self.pos >= self.string.len() { return false; } self.pos += self.bytes[self.pos].leading_ones().max(1) as usize; true } Then you can use it to get rid of most the repetition in your functions:
{ "domain": "codereview.stackexchange", "id": 44319, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "performance, rust, unicode, lexical-analysis", "url": null }
c++, ai, visitor-pattern Title: Visitor Pattern to handle human and AI responses Question: Let's suppose there is a method of an Inputter class that queries many questions. The Human inputter would have to answer all those questions, but an AI inputter will do so without the questions being displayed to the screen. Some goes for command classes that contains the Inputter* as data member. We don't want separate methods for the two different types of inputters just because the AI will decide its own answers and does not need the questions displayed on the screen. So I've decided to use the Visitor Pattern to handle dealing with the two types of inputters. #include <iostream> #include <vector> #include <ctime> class Visitor; class Human; class AI; class Inputter { protected: std::string name; public: Inputter (const std::string& n) : name(n) { } virtual ~Inputter() = default; std::string getName() const { return name; } void dine(); virtual void accept (Visitor&) = 0; }; class Visitor { public: virtual void visit (Human*) = 0; virtual void visit (AI*) = 0; }; class Human : public Inputter { using Inputter::Inputter; void accept (Visitor& visitor) override { visitor.visit(this); } }; class AI : public Inputter { using Inputter::Inputter; void accept (Visitor& visitor) override { visitor.visit(this); } }; // Use separate Visitor classes for each query to follow the open-closed principle. class MealVisitor : public Visitor { const std::vector<std::string> mealOptions; std::string mealChosen; public: MealVisitor (const std::vector<std::string>& m) : mealOptions(m) { } std::string getMealChosen() const { return mealChosen; } private: void visit (Human*) override; void visit (AI*) override; }; class TipVisitor : public Visitor { int tipGiven; public: int getTipGiven() const { return tipGiven; } private: void visit (Human*) override; void visit (AI*) override; };
{ "domain": "codereview.stackexchange", "id": 44320, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, ai, visitor-pattern", "url": null }
c++, ai, visitor-pattern class Command { protected: Inputter* inputter; public: Command (Inputter* i) : inputter(i) { } virtual void execute() const = 0; }; class PlayCommand : public Command { struct PlayCommandVisitor : Visitor { bool yes; }; struct BreakVisitor : PlayCommandVisitor { void visit (Human*) override; void visit (AI*) override; }; struct ResumeVisitor : PlayCommandVisitor { void visit (Human*) override; void visit (AI*) override; }; using Command::Command; public: void execute() const override; private: template <typename VisitorType> bool getYesOrNo() const; }; void Inputter::dine() { std::cout << name << " enters the restaurant.\n"; MealVisitor mealVisitor({"steak", "spaghetti", "burger"}); accept(mealVisitor); const std::string meal = mealVisitor.getMealChosen(); std::cout << "20 minutes later, " << name << " has eaten the " << meal << ".\n"; TipVisitor tipVisitor; accept(tipVisitor); const int tipGiven = tipVisitor.getTipGiven(); std::cout << name << " gives " << "$" << tipGiven << " tip.\n\n"; } void MealVisitor::visit (Human*) { int choiceNumber = 0; std::cout << "Choose a meal:\n"; for (const std::string& meal : mealOptions) std::cout << ++choiceNumber << ". " << meal << '\n'; while (true) { std::cin >> choiceNumber; if (1 <= choiceNumber && choiceNumber <= (int)mealOptions.size()) break; std::cout << "Please choose one of the above options.\n"; } mealChosen = mealOptions[choiceNumber - 1]; } void MealVisitor::visit (AI*) { // No presentation with std::cout shall be given. mealChosen = mealOptions[std::rand() & mealOptions.size() - 1]; } void TipVisitor::visit (Human* human) { std::cout << "How much will " << human->getName() << " tip? \n"; std::cin >> tipGiven; }
{ "domain": "codereview.stackexchange", "id": 44320, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, ai, visitor-pattern", "url": null }
c++, ai, visitor-pattern void TipVisitor::visit (AI*) { // Do not query the AI. Just let the AI do its part. tipGiven = std::rand() % 10 + 5; } void PlayCommand::execute() const { std::cout << inputter->getName() << " has been playing for 30 minutes.\n"; const bool stop = getYesOrNo<BreakVisitor>(); if (stop) { std::cout << inputter->getName() << " has decided to take a break from playing.\n"; std::cout << inputter->getName() << " has taken a break for 10 minutes.\n"; const bool resume = getYesOrNo<ResumeVisitor>(); if (resume) std::cout << inputter->getName() << " resumes playing.\n"; else std::cout << inputter->getName() << " does not want to play anymore.\n"; } else std::cout << inputter->getName() << " continues to play.\n"; } template <typename VisitorType> bool PlayCommand::getYesOrNo() const { VisitorType visitor; inputter->accept(visitor); return visitor.yes; } bool ask_yn (const std::string& question) { while (true) { std::cout << question << " (y/n) "; char yesNo; if (std::cin >> yesNo) { switch (yesNo) { case 'y': case 'Y': return true; case 'n': case 'N': return false; } } else { // Input failed. Assume no. std::cout << '\n'; return false; } } } void PlayCommand::BreakVisitor::visit (Human* human) { yes = ask_yn("Does " + human->getName() + " want to stop take a break?"); } void PlayCommand::BreakVisitor::visit (AI*) { yes = std::rand() % 5; } void PlayCommand::ResumeVisitor::visit (Human* human) { yes = ask_yn("Does " + human->getName() + " want to resume playing?"); } void PlayCommand::ResumeVisitor::visit (AI*) { yes = std::rand() % 2; }
{ "domain": "codereview.stackexchange", "id": 44320, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, ai, visitor-pattern", "url": null }
c++, ai, visitor-pattern void PlayCommand::ResumeVisitor::visit (AI*) { yes = std::rand() % 2; } int main() { std::srand(std::time(nullptr)); Human human("Bob"); AI ai("Max"); human.dine(); ai.dine(); const PlayCommand playCommand1(&human), playCommand2(&ai); playCommand1.execute(); playCommand2.execute(); } Sample output: Bob enters the restaurant. Choose a meal: 1. steak 2. spaghetti 3. burger 2 20 minutes later, Bob has eaten the spaghetti. How much will Bob tip? 10 Bob gives $10 tip. Max enters the restaurant. 20 minutes later, Max has eaten the steak. Max gives $13 tip. Bob has been playing for 30 minutes. Does Bob want to stop take a break? (y/n) y Bob has decided to take a break from playing. Bob has taken a break for 10 minutes. Does Bob want to resume playing? (y/n) n Bob does not want to play anymore. Max has been playing for 30 minutes. Max has decided to take a break from playing. Max has taken a break for 10 minutes. Max resumes playing. Answer: Use C++'s random number facilities Avoid C's srand() and rand() functions, they are notoriously bad, and C++ has much better ways to deal with random numbers. Even if it doesn't really matter for the code you posted, it is better to teach yourself to do it correctly. See this example for a quick start. Prefer to pass references instead of pointers When you have to pass a pointer to a function and it should never be nullptr, pass a reference instead. So for example: class Visitor { public: virtual void visit (Human&) = 0; virtual void visit (AI&) = 0; }; And: class Command { protected: Inputter& inputter; public: Command (Inputter& i) : inputter(i) { } virtual void execute() const = 0; };
{ "domain": "codereview.stackexchange", "id": 44320, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, ai, visitor-pattern", "url": null }
c++, ai, visitor-pattern Do you need the visitor pattern? Given your history of questions on Code Review, I sense a pattern (pun intended): you are trying to write code that excercises each design pattern out there. While it might be good to know about design patterns so you know how to implement them when you really need them, you should not contort your code to use a certain pattern if it doesn't need it, otherwise you're doing the equivalent of using a hammer to put in screws. Consider that instead of creating a Visitor class, and deriving a MealVisitor and TipVisitor from it, you could just add virtual member functions chooseMeal() and chooseTip() to Inputter, which Human and AI could implement however they want. So then you have code like: void Inputter::dine() { std::cout << name << " enters the restaurant.\n"; const std::string meal = chooseMeal({"steak", "spaghetti", "burger"}); std::cout << "20 minutes later, " << name << " has eaten the " << meal << ".\n"; const int tipGiven = chooseTip(); std::cout << name << " gives " << "$" << tipGiven << " tip.\n\n"; } … std::string Human::chooseMeal(const std::vector<std::string>& mealOptions) { std::size_t choiceNumber = 0; … return mealOptions[choiceNumber - 1]; } std::string AI::chooseMeal(const std::vector<std::string>& mealOptions) { return mealOptions[std::rand() & mealOptions.size() - 1]; }
{ "domain": "codereview.stackexchange", "id": 44320, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, ai, visitor-pattern", "url": null }
c++, ai, visitor-pattern Note how dine() is now much more compact, and you didn't have to create Visitor and derived classes. So the visitor pattern was overkill here and just made the code needlessly complicated. Of course the visitor pattern is useful, and its greatest strength is that it allows you to remove the implementation of chooseMeal() and chooseTip() from Human and AI, making those classes simpler. It allows you to add behavior later, like choosing a payment method, without having to modify Inputter and its derived classes. But at that point you should ask yourself: should dine() even be a member function of Inputter? Why not create a DineVisitor? Inheritance vs. generic programming Instead of using inheritance and virtual member functions, you can use generic programming in C++ to get similar results. For example, you can write: template<typename T> void dine(T& inputter) { std::cout << inputter.name << " enters the restaurant.\n"; const std::string meal = chooseMeal(inputter, {"steak", "spaghetti", "burger"}); … } std::string chooseMeal(Human&, const std::vector<std::string>& mealOptions) { std::size_t choiceNumber = 0; … return mealOptions[choiceNumber - 1]; } std::string chooseMeal(AI&, const std::vector<std::string>& mealOptions) { return mealOptions[std::rand() & mealOptions.size()]; } And then write: Human human("Bob"); AI ai("Max"); dine(human); dine(ai); Whereas with your code, dine() only needed to know about the base class, the problem here is that dine() only works on the derived classes. If you want to create a vector of Inputters, then since C++17 you can use std::variant, and write: std::vector<std::variant<Human, AI>> inputters = { Human("Bob"), AI("Max"), Human("Alice"), … }; for (auto& inputter: inputters) { std::visit(dine, inputter); }
{ "domain": "codereview.stackexchange", "id": 44320, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, ai, visitor-pattern", "url": null }
c++, ai, visitor-pattern for (auto& inputter: inputters) { std::visit(dine, inputter); } Note that Human and AI don't need to derive from any base class when using generics. Avoid casting where possible It is better to avoid casting, as incorrect casting might cause subtle problems. While it doesn't matter in your code, it is better to do it correctly so you learn the correct habit, and don't make a mistake when it does matter. In particular, you are casting mealOptions.size() to int using a C-style cast. You should already prefer to use static_cast<>() here, as it is safer than C-style casts, but it is even better to avoid it entirely by making choiceNumber a std::size_t instead. Review of your update As for the version of PlayCommand that uses std::variant, you can make a std::variant of references, but you have to use std::reference_wrapper. But you don't need std::variant in the visitor itself, see below. You can avoid storing both a reference to Inputter and a variant by using std::visit() to call .name() on the inputter, or just one that returns a pointer to the base class. If you are learning design patterns from a book that uses Java to show how to implement the patterns, they have to work around some limitations of that language. In C++ you can have free functions, so instead of trying to create a class hierarchy, consider creating a stand-alone function instead: static bool queryBreak(Human& human) { return ask_yn("Does " + human.getName() + " want to stop take a break?"); }
{ "domain": "codereview.stackexchange", "id": 44320, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, ai, visitor-pattern", "url": null }
c++, ai, visitor-pattern static bool queryBreak(AI&) { return std::rand() % 2; } … void play(auto& inputter) { std::cout << inputter.getName() << " has been playing for 30 minutes.\n"; const bool stop = queryBreak(inputter); if (stop) { std::cout << inputter.getName() << " has decided to take a break from playing.\n"; std::cout << inputter.getName() << " has taken a break for 10 minutes.\n"; const bool resume = queryResume(inputter); if (resume) std::cout << inputter.getName() << " resumes playing.\n"; else std::cout << inputter.getName() << " does not want to play anymore.\n"; } else std::cout << inputter.getName() << " continues to play.\n"; } Note how play() still fits the definition of a visitor. One drawback is that play() is now a template. This can be avoided by passing it a variant of references or pointers as a parameter. To get a pointer to Inputter, you can use a visitor. I also show here how to use std::reference_wrapper: using InputterReferenceVariant = std::variant< std::reference_wrapper<Human>, std::reference_wrapper<AI> >; void play(InputterReferenceVariant inputterVariant) { Inputter& inputter = std::visit([](auto& v) -> Inputter& { return v; }, inputterVariant); std::cout << inputter.getName() << " has been playing for 30 minutes.\n"; const bool stop = std::visit(queryBreak, inputterVariant); … } … Human human("Bob"); AI ai("Max"); play(human); play(ai);
{ "domain": "codereview.stackexchange", "id": 44320, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, ai, visitor-pattern", "url": null }
c, bitset Title: Something like std::bitset in C
{ "domain": "codereview.stackexchange", "id": 44321, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, bitset", "url": null }
c, bitset Question: I am looking for opinion on this bitset implementation. It only has 4 functions and they are set - set a bit/clear - clear a bit/check - test a bit /zero - zero out all bits. There is another one as well but that isn't meant to be used by the users of bitset. I have used macro to make the underlying type that bitset uses generic as well as the size. I am very new to macros for generic programming. I have been using them here and there in C++ but it had templates and constexpr so there was never really a need. So I want the emphasis of this review to be on use of macros with a little bit on the actual implementation on the side. First is some test code that shows how bitset can be used. #include "cim_generic_bitset.h" #include <stdio.h> enum { n_bits = 150 }; //CIM_GENERIC_BITSET_DECL(state, uint8_t, CIM_BITSET_OF_SIZE(uint8_t, n_bits)); CIM_GENERIC_BITSET_DEF(state, uint8_t, CIM_BITSET_OF_SIZE(uint8_t, n_bits)); int main() { uint8_t state[CIM_BITSET_OF_SIZE(uint8_t, n_bits)]; cim_state_bitset_zero(state); cim_state_bitset_set(state, 0); cim_state_bitset_set(state,22); cim_state_bitset_set(state, 49); cim_state_bitset_set(state, 93); cim_state_bitset_set(state, 141); cim_state_bitset_set(state, 149); assert(cim_state_bitset_check(state, 0)); assert(cim_state_bitset_check(state, 22)); assert(cim_state_bitset_check(state, 49)); assert(cim_state_bitset_check(state, 93)); assert(cim_state_bitset_check(state, 141)); assert(cim_state_bitset_check(state, 149)); for (int i = 0; i < n_bits; i++) { if (i != 0 && i != 22 && i != 49 && i != 93 && i != 141 && i != 149) assert(!cim_state_bitset_check(state, i)); } for (int i = 0; i < n_bits; i++) cim_state_bitset_clear(state, i); assert(!cim_state_bitset_check(state, 0)); assert(!cim_state_bitset_check(state, 22)); assert(!cim_state_bitset_check(state, 49));
{ "domain": "codereview.stackexchange", "id": 44321, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, bitset", "url": null }
c, bitset assert(!cim_state_bitset_check(state, 22)); assert(!cim_state_bitset_check(state, 49)); assert(!cim_state_bitset_check(state, 93)); assert(!cim_state_bitset_check(state, 141)); assert(!cim_state_bitset_check(state, 149)); return 0; }
{ "domain": "codereview.stackexchange", "id": 44321, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, bitset", "url": null }
c, bitset This is the bitset implementation. #pragma once #include <inttypes.h> #include <stdbool.h> #include <string.h> #include <assert.h> typedef struct cim_byte_bit_index { size_t byte; size_t bit; } cim_byte_bit_index; //1 extra sizeof(type) will be allocated if n_bits / sizeof(type) happens to be a whole number //but idk how to implement a constexpr ceil soo #define CIM_BITSET_OF_SIZE(type, n_bits) (size_t)( (n_bits / (sizeof(type) * 8)) + 1) #define CIM_GENERIC_BITSET_ZERO_DECL(name, type, size)\ void cim_##name##_bitset_zero(type* bitfield); #define CIM_GENERIC_BITSET_ZERO_DEF(name, type, size)\ void cim_##name##_bitset_zero(type* bitfield)\ {\ memset(bitfield, 0, size * sizeof(type));\ } #define CIM_GENERIC_BITSET_GET_BIT_BYTE_INDEX_DECL(name, type, size)\ cim_byte_bit_index cim_get_##name##_byte_bit_index(size_t field_size, size_t index); #define CIM_GENERIC_BITSET_GET_BIT_BYTE_INDEX_DEF(name, type, size)\ cim_byte_bit_index cim_get_##name##_byte_bit_index(size_t field_size, size_t index)\ {\ size_t word_size = sizeof(type) * 8;\ size_t which_byte = (size_t)(index / (float)word_size);\ size_t which_bit = (size_t)(index - (which_byte * word_size));\ cim_byte_bit_index byte_bit_index;\ byte_bit_index.byte = which_byte;\ byte_bit_index.bit = which_bit;\ return byte_bit_index;\ } #define CIM_GENERIC_BITSET_SET_BIT_DECL(name, type, size)\ void cim_##name##_bitset_set(type* bitfield, size_t index); #define CIM_GENERIC_BITSET_SET_BIT_DEF(name, type, size)\ void cim_##name##_bitset_set(type* bitfield, size_t index)\ {\ assert(index < size * sizeof(type) * 8 && "Bitset index out of range");\ cim_byte_bit_index byte_bit_index = cim_get_##name##_byte_bit_index(size, index);\ bitfield[byte_bit_index.byte] |= 1 << byte_bit_index.bit;\ } #define CIM_GENERIC_BITSET_CLEAR_BIT_DECL(name, type, size)\ void cim_##name##_bitset_clear(type* bitfield, size_t index);
{ "domain": "codereview.stackexchange", "id": 44321, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, bitset", "url": null }
c, bitset void cim_##name##_bitset_clear(type* bitfield, size_t index); #define CIM_GENERIC_BITSET_CLEAR_BIT_DEF(name, type, size)\ void cim_##name##_bitset_clear(type* bitfield, size_t index)\ {\ assert(index < size * sizeof(type) * 8 && "Bitset index out of range");\ cim_byte_bit_index byte_bit_index = cim_get_##name##_byte_bit_index(size, index);\ bitfield[byte_bit_index.byte] &= ~(1 << byte_bit_index.bit);\ } #define CIM_GENERIC_BITSET_CHECK_BIT_DECL(name, type, size)\ bool cim_##name##_bitset_check(type* bitfield, size_t index); #define CIM_GENERIC_BITSET_CHECK_BIT_DEF(name, type, size)\ bool cim_##name##_bitset_check(type* bitfield, size_t index)\ {\ assert(index < size * sizeof(type) * 8 && "Bitset index out of range");\ cim_byte_bit_index byte_bit_index = cim_get_##name##_byte_bit_index(size, index);\ return bitfield[byte_bit_index.byte] & (1 << byte_bit_index.bit);\ } #define CIM_GENERIC_BITSET_DECL(name, type, size)\ CIM_GENERIC_BITSET_GET_BIT_BYTE_INDEX_DECL(name, type, size)\ CIM_GENERIC_BITSET_SET_BIT_DECL(name, type, size)\ CIM_GENERIC_BITSET_CHECK_BIT_DECL(name, type, size)\ CIM_GENERIC_BITSET_ZERO_DECL(name, type, size)\ CIM_GENERIC_BITSET_CLEAR_BIT_DECL(name, type, size) #define CIM_GENERIC_BITSET_DEF(name, type, size)\ CIM_GENERIC_BITSET_GET_BIT_BYTE_INDEX_DEF(name, type, size)\ CIM_GENERIC_BITSET_SET_BIT_DEF(name, type, size)\ CIM_GENERIC_BITSET_CHECK_BIT_DEF(name, type, size)\ CIM_GENERIC_BITSET_ZERO_DEF(name, type, size)\ CIM_GENERIC_BITSET_CLEAR_BIT_DEF(name, type, size)
{ "domain": "codereview.stackexchange", "id": 44321, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, bitset", "url": null }
c, bitset Answer: size_t word_size = sizeof(type) * 8;\ That magic number 8 looks like it ought to be CHAR_BIT. The same number crops up in other places where it looks like CHAR_BIT is intended. //1 extra sizeof(type) will be allocated if n_bits / sizeof(type) happens to be a whole number //but idk how to implement a constexpr ceil soo #define CIM_BITSET_OF_SIZE(type, n_bits) (size_t)( (n_bits / (sizeof(type) * 8)) + 1) Oops - we need to ensure precedence rules in the expansion, by parenthesising (n_bits). We should probably avoid the over-allocation by adjusting before we divide: #define CIM_BITSET_OF_SIZE(type, n_bits) \ ((size_t)((n_bits) - 1) / sizeof (type) / CHAR_BIT) + 1) #define CIM_GENERIC_BITSET_GET_BIT_BYTE_INDEX_DEF(name, type, size)\ cim_byte_bit_index cim_get_##name##_byte_bit_index(size_t field_size, size_t index)\ {\ size_t word_size = sizeof(type) * 8;\ That should probably be a static const Instead of having separate ⋯_DECL() and ⋯_DEF() macros, perhaps just define functions with file scope (i.e. static)? That will increase opportunities for inlining, too.
{ "domain": "codereview.stackexchange", "id": 44321, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, bitset", "url": null }
typescript, angular-2+ Title: Check if string starts with vowel Question: I am using a template for a text inputs and feedback messages to simplify my html code. This passes in variable string called label and populates the placeholder value for this input, and also uses the same string for the invalid-feedback message, but for the latter I want the correct a or an depending on if the label starts with a vowel or not. I currently use the registerOnTouched() function in text-input-component.ts as this is being called when the inputs are clicked/typed into to correct the feedback messages but I feel like there is a better solution/place to do this check. Maybe something like a ternary operator or a pipe in the template? And theoretically I could omit all the vowels except E from my check but that isn't future proof. Form: <app-text-input [formControl]="$any(loginForm.controls['email'])" [label]="'Email'" [type]="'email'"></app-text-input> <app-text-input [formControl]="$any(loginForm.controls['password'])" [label]="'Password'" [type]="'password'"></app-text-input> Text-Input Template CSS: <div class="form-group"> <input [class.is-invalid]="control.touched && control.invalid" type={{type}} class="form-control" [formControl]="control" placeholder={{label}} > <div *ngIf="control.errors?.['required']" class="invalid-feedback">Please enter a{{labelAfterVowelCheck}}</div> </div> Text-Input Template TypeScript: import { Component, Input, Self } from '@angular/core'; import { ControlValueAccessor, FormControl, NgControl, PatternValidator } from '@angular/forms'; @Component({ selector: 'app-text-input', templateUrl: './text-input.component.html', styleUrls: ['./text-input.component.css'] }) export class TextInputComponent implements ControlValueAccessor { @Input() label?: string; labelAfterVowelCheck = ''; @Input() type: string = 'text';
{ "domain": "codereview.stackexchange", "id": 44322, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "typescript, angular-2+", "url": null }
typescript, angular-2+ constructor(@Self() public ngControl: NgControl) { this.ngControl.valueAccessor = this; } writeValue(obj: any): void { } registerOnChange(fn: any): void { } registerOnTouched(fn: any): void { this.labelAfterVowelCheck = this.label!; if(["A","E","I","O","U","a","e","i","o","u"].some(vowel => this.label?.startsWith(vowel))){ this.labelAfterVowelCheck = 'n ' + this.label; } else{ this.labelAfterVowelCheck = ' ' + this.label; } } get control(): FormControl { return this.ngControl.control as FormControl; } } Answer: Just a couple ideas: I might separate out the "validation" into its own function, isValid here, but you can create a better name. ie. registerOnTouched(fn: any): void { this.labelAfterVowelCheck = this.label!; if(isValid(this.label)){ this.labelAfterVowelCheck = 'n ' + this.label; } else{ this.labelAfterVowelCheck = ' ' + this.label; } } With this change, it's easier to see that you don't need the first assignment (as it will be redone in the line below below). And as you hint at, a ternary dries it up a little: registerOnTouched(fn: any): void { this.labelAfterVowelCheck = (isValid(this.label) ? 'n ' : ' ') + this.label; } The validation itself can be simplified with a regular expression (although I get the sense this code is temporary). ie. /^[aeiou]/i.exec(label) Hope this helps!
{ "domain": "codereview.stackexchange", "id": 44322, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "typescript, angular-2+", "url": null }
search, elisp Title: `nov.el` function to search the entire `epub` ebook Question: nov.el does not provide an easy function to search the whole of the ebook content. Every search performed on buffer only applies to the "document" or chapter of the book on display at that time. I wrote my own search function with reference to this answer below: (defun nov-search (pattern) "Search whole ebook in nov." (interactive "sEnter search pattern: ") (let ((doc-size (length nov-documents)) (version nov-epub-version) (doc-index 1) result) (while (< doc-index (1- doc-size)) (seq-let (id &rest path) (aref nov-documents doc-index) ;; prepare temp buffer for search (with-temp-buffer (cond ((and (version< version "3.0") (eq id nov-toc-id)) (insert (nov-ncx-to-html path))) (t (insert (nov-slurp path)))) (nov-render-html) (goto-char (point-min)) ;; search function (while (search-forward pattern nil t) (push (list (concat (file-name-base path) ": " (replace-regexp-in-string "\n" " " (thing-at-point 'sentence))) doc-index (point)) result)) ;; iterate to next document (setq doc-index (1+ doc-index))))) (seq-let (doc pos) (alist-get (completing-read "Jump to: " (reverse result)) result nil nil #'string=) (nov-goto-document doc) (goto-char pos) )))
{ "domain": "codereview.stackexchange", "id": 44323, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "search, elisp", "url": null }
search, elisp The function generally does what is expected, but response is relatively slow - for a 10MB ebook, a simple search term takes more than 10 seconds to generate results. I would like to seek help in this review to improve the efficiency of the search, possibly (1) by the implementation of some form of indexing beforehand, or (2) through a refactoring of the above search function to make use of existing libraries more suited for the task and (3) point out algorithmic design issues that affect response speed in the code above, if there is any.
{ "domain": "codereview.stackexchange", "id": 44323, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "search, elisp", "url": null }
search, elisp Answer: One thing you could do to speed up the search, is to search for a match first in the raw html, and if you find a match, only then render it using (nov-render-html) to obtain the correct positions. The replace newline looks redundant when using sentence in thing-at-point. In order to get the correct position of the match in the buffer, you have to use nov-render-html first, otherwise you would search in the raw html file and the position would be off (test it by commenting out that line). Therefore, I guess you can not use any other tool, and this will be the fastest solution (except that you are not skipping rendering images, as in my original answer). You could try how much the following version speeds up things: (defun nov-search (pattern) (interactive "sEnter search pattern: ") (let ((version nov-epub-version) (index 1) results) (while (< index (1- (length nov-documents))) (seq-let (id &rest path) (aref nov-documents index) (let (;; HACK: this should be looked up in the manifest (imagep (seq-find (lambda (item) (string-match-p (car item) path)) image-type-file-name-regexps)) ;; NOTE: allows resolving image references correctly (default-directory (file-name-directory path))) (unless imagep (with-temp-buffer (if (and (version< version "3.0") (eq id nov-toc-id)) (insert (nov-ncx-to-html path)) (insert (nov-slurp path))) (goto-char (point-min)) (when (search-forward pattern nil t) (nov-render-html) (goto-char (point-min)) (while (search-forward pattern nil t) (push (list (format "%d %s" index (replace-regexp-in-string "\n" " " (thing-at-point 'line)))
{ "domain": "codereview.stackexchange", "id": 44323, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "search, elisp", "url": null }
search, elisp (thing-at-point 'line))) index (point)) results))))) (setq index (1+ index))))) ;; (print results))) (seq-let (index point) (alist-get (completing-read "Jump to: " (reverse results)) results nil nil #'string=) (nov-goto-document index) (goto-char point))))
{ "domain": "codereview.stackexchange", "id": 44323, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "search, elisp", "url": null }
powershell Title: Fetch specific data from nested JSON-file, then append it to a CSV-file Question: I have written a PowerShell-script, which reads specific data-pieces out of a given JSON-file. Then reassembles the read data. Finally prepends the data to a existing CSV-file by adding it to the top of the file. My script: $srcFile = ".\output copy.json" $targetFile = ".\demo1.csv" # Exit the script, if one of the files is missing. if (!(Test-Path $srcFile)) { Write-Warning "${srcFile} is missing." return } elseif (!(Test-Path $targetFile)) { Write-Warning "${targetFile} is missing." return } # Read JSON-Report in. $json = Get-Content -Path $srcFile | ConvertFrom-Json # Fetch the relevant data from the nested JSON-structure. $tests = $json.stats.tests $passed = $json.stats.passes $failures = $json.stats.failures # Date-format (YYYY-MM-DDThh:mm:ss) not usable. Has to be converted. $dateSegms = $json.stats.start.Split("T")[0].Split("-") # Becomes: DD.MM.YYYY $dateValid = $dateSegms[2] + "." + $dateSegms[1] + "." + $dateSegms[0] $newEntry = "${tests};${passed};${failures};${dateValid}" # Read the existing CSV-file in. $existingCsv = Get-Content -Path $targetFile # The entries have to be ordered descending. $sUpdatedCsv = $newEntry + " " + $existingCsv $aUpdatedCsv = $sUpdatedCsv.Split(" ") # Remove the content of the CSV-file. Clear-Content $targetFile # Write the updated data to the CSV-file. foreach ($line in $aUpdatedCsv) { Add-Content -Path $targetFile "${line}" } # Delete the JSON-report. Remove-Item -Path $srcFile I'm not the most experienced PowerShell-programmer and would like to know where and how my code could be improved. Especially concerning my date-conversion. Are there better ways in PowerShell to convert a date in the given format "YYYY-MM-DDThh:mm:ss" to the needed format "DD.MM.YYYY"? The script produces the needed results. But are there flaws in my code? All comments and answers concerning my approach welcomed.
{ "domain": "codereview.stackexchange", "id": 44324, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "powershell", "url": null }
powershell Answer: Date conversion # Date-format (YYYY-MM-DDThh:mm:ss) not usable. Has to be converted. $dateSegms = $json.stats.start.Split("T")[0].Split("-") # Becomes: DD.MM.YYYY $dateValid = $dateSegms[2] + "." + $dateSegms[1] + "." + $dateSegms[0] That can be written simpler as $startDate = [Datetime]::ParseExact($json.stats.start, "yyyy-MM-ddTHH:mm:ss") $dateValid = $startDate.ToString("dd.MM.yyyy") Functions Your whole script could be separated into some suggestively named functions, yielding a top-level structure like: $testResultFilename = ".\output copy.json" $targetFilename = ".\demo1.csv" $newEntry = Read-Results $testResultFilename Append-Entry -Entry $newEntry -TargetFile $targetFilename Remove-Item -Path $testResultFilename Where, for example Read-Results would contain all code to reliable convert the contents of the test results to the new entry line: function Read-Results($srcFile) { if (!(Test-Path $srcFile)) { throw "${srcFile} is missing." } $json = Get-Content -Path $srcFile | ConvertFrom-Json $tests = $json.stats.tests $passed = $json.stats.passes $failures = $json.stats.failures $startDate = [Datetime]::ParseExact($json.stats.start, "yyyy-MM-ddTHH:mm:ss") $dateValid = $startDate.ToString("dd.MM.yyyy") return "${tests};${passed};${failures};${dateValid}" }
{ "domain": "codereview.stackexchange", "id": 44324, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "powershell", "url": null }
f# Title: Returning different elements between a Hashtable and list in F# Question: I have two collections: A list of strings A hashtable let properties = [ "First"; "Second" ] let hash = System.Collections.Hashtable () hash.Add("First", "Blub") hash.Add("Third", "Blob") My goal is to find the key which is not in the properties list. My solution is so far: // Casting my hashtable to a sequence to work with it easier let keys:seq<string> = hash.Keys |> Seq.cast // Casting the sequence to a set to get the difference in the end let setAll = Set.ofSeq keys // Getting all elements which are also in the properties list let validKeySeq = Seq.filter (fun elem -> List.contains elem properties) keys // Casting the sequence to a set to get the difference in the following line let validKeys = Set.ofSeq validKeySeq // Getting the element which is not in the list Set.difference setAll validKeys The above code works but is there a clearer, more concise or more idiomatic way of getting the element which is not in the list? Answer: The most direct way would be with the Seq.except function, which returns a new sequence that omits any elements that appear in the other parameter. table.Keys |> Seq.cast<string> |> Seq.except properties
{ "domain": "codereview.stackexchange", "id": 44325, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "f#", "url": null }
f# I've named the hash table table rather than hash, because a Hashtable is a type of table that uses hashes, rather than a hash itself (I call this out, because I see 'hash' used like this quite often, and it's always confusing). Seq.cast is still needed, because we are working with an untyped collection, but removes the complexity in the rest of the implementation: it would be nice to trap the error this throws when there is invalid input and qualify it. It's fine to give nice names to each step of a computation as you have done (good practise, even), but using a pipeline like this is idiomatic F#. Note that your implementation uses a linear-scan to check each key of the table against the list of properties; using Seq.except leaves the choice of method up to the BCL, but it most likely builds a HashSet from Properties and queries against that, which will generally give much better performance (closer to linear rather than quadratic). Note also that your implementation can be simplified by inverting the filter: you are selecting those keys that are also in properties, but you can just as well filter for keys that are not properties (with the same cost). let differentKeys = Seq.filter (fun elem -> not (List.contains elem properties)) keys Let's package up the implementation nicely in a function, and add some inline documentation (///) /// Enumerates all keys of the given table that are not in the sequence of properties let nonPropertyKeys (table:IDictionary) properties = table.Keys |> Seq.cast<string> |> Seq.except properties
{ "domain": "codereview.stackexchange", "id": 44325, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "f#", "url": null }
f# We can leave F# to infer the type of properties (or qualify it as seq<string> if you like) and return type (also seq<string>). Note also that this function doesn't expect a HashTable specifically: it will accept any IDictionary, which may or may not be a useful generalisation for your scenario. Generally you should avoid older types like Hashtable, and use newer (generic) versions, e.g. Dictionary<TKey, TValue> (and IReadOnlyDictionary<TKey, TValue>), and consider using a standard F# (immutable) collection like map<'key, 'value>. I assume your exact scenario doesn't permit this, however. If we want to trap the InvalidCastException for an invalid input table (thrown when any key is not a string), then we need to actualise the result before returning. let nonPropertyKeys (table:IDictionary) (properties:seq<string>) = try table.Keys |> Seq.cast<string> |> Seq.except properties |> Seq.toList with | :? InvalidCastException as ice -> raise (ArgumentException(nameof(table), "Table must only have string keys")) This may be important for avoiding confusion, and makes the method more consistent in terms of failure condition with your original code (which also forces evaluation). Alternatively, I note while your code produces a list, your comment mentions returning 'the key' that is not in properties, which you could achieve with the Seq.exactlyOne method: this will raise an exception if there are no elements in a collection, or if there is more than one element. It has a non-raising brother Seq.tryExactlyOne that returns an option 'a. Either of these would also force the evaluation of the original sequences.
{ "domain": "codereview.stackexchange", "id": 44325, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "f#", "url": null }
c++, stream, sdl Title: SDL Stream for C++ Question: Looking at the SDL library. Looking to load images from file into an SDL_Surface using the SDL2 Image API to do this I need to implement the SDL_RWops object class in terms of a C++ stream. The reason I want to use SDL_RWops and use a C++ stream is so that I can do a tiny amount of wrapping and use the standard C++ operator<< to load a file. ThorSDLStream.h #ifndef THORSANVIL_UI_THOR_SDL_STREAM_H #define THORSANVIL_UI_THOR_SDL_STREAM_H #include <SDL.h> #include <SDL_image.h> struct ThorSDLStreamRead: public SDL_RWops { std::istream& stream; ThorSDLStreamRead(std::istream& stream); }; struct ThorSDLStreamWrite: public SDL_RWops { std::ostream& stream; ThorSDLStreamWrite(std::ostream& stream); }; } #endif ThorSDLStream.cpp #include "Surface.h" #include <SDL_image.h> extern "C" { Sint64 streamSizeThors(SDL_RWops*); Sint64 streamSeekThorRead(SDL_RWops*, Sint64, int); Sint64 streamSeekThorWrite(SDL_RWops*, Sint64, int); size_t streamReadThor(SDL_RWops*, void*, size_t, size_t); size_t streamReadThorBad(SDL_RWops*, void*, size_t, size_t); size_t streamWriteThor(SDL_RWops*, const void*, size_t, size_t); size_t streamWriteThorBad(SDL_RWops*, const void*, size_t, size_t); int streamCloseThor(SDL_RWops*); } std::ios_base::seekdir convertSDLDirectionThor(int dir) { switch (dir) { case RW_SEEK_SET: return std::ios_base::beg; case RW_SEEK_CUR: return std::ios_base::cur; case RW_SEEK_END: return std::ios_base::end; } return std::ios_base::beg; }
{ "domain": "codereview.stackexchange", "id": 44326, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, stream, sdl", "url": null }
c++, stream, sdl /* * The SDL_RWops Seek calls this function. * This is used to reset the stream to the clean state when things * go wrong (such as when trying to discover file format). * * The result is the offset into the file after the seek is done. * * The code use TELL => SEEK(CUR, 0) */ Sint64 streamSeekThorRead(SDL_RWops* input, Sint64 dist, int dir) { ThorSDLStreamRead* data = reinterpret_cast<ThorSDLStreamRead*>(input); data->stream.clear(); std::ios_base::seekdir direction = convertSDLDirectionThor(dir); data->stream.seekg(dist, direction); return data->stream ? static_cast<Sint64>(data->stream.tellg()) : -1; } /* * Like streamSeekThorRead but uses write stream. */ Sint64 streamSeekThorWrite(SDL_RWops* input, Sint64 dist, int dir) { ThorSDLStreamWrite* data = reinterpret_cast<ThorSDLStreamWrite*>(input); data->stream.clear(); std::ios_base::seekdir direction = convertSDLDirectionThor(dir); data->stream.seekp(dist, direction); return data->stream ? static_cast<Sint64>(data->stream.tellp()) : -1; } /* * Reads `num` objects of size `size` from the stream. * Return -1 on failure or the number of objects read. */ size_t streamReadThor(SDL_RWops* input, void* dst, size_t size, size_t num) { ThorSDLStreamRead* data = reinterpret_cast<ThorSDLStreamRead*>(input); data->stream.read(reinterpret_cast<char*>(dst), num * size); return data->stream ? data->stream.gcount() / size : -1; } /* * Write `num` objects of size `size` to the stream. * Return -1 on failure or the number of objects written. */ size_t streamWriteThor(SDL_RWops* input, const void* src, size_t size, size_t num) { ThorSDLStreamWrite* data = reinterpret_cast<ThorSDLStreamWrite*>(input); auto before = data->stream.tellp(); data->stream.write(reinterpret_cast<char const*>(src), num * size); return data->stream ? (data->stream.tellp() - before) / size : -1; }
{ "domain": "codereview.stackexchange", "id": 44326, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, stream, sdl", "url": null }
c++, stream, sdl /* Functionality that does not make sense for this implementation */ int streamCloseThor(SDL_RWops*) {return 0;} Sint64 streamSizeThor(SDL_RWops*) {return -1;} size_t streamReadThorBad(SDL_RWops*, void*, size_t, size_t) {return -1;} size_t streamWriteThorBad(SDL_RWops*, const void*, size_t, size_t) {return -1;} // Set up a read object. ThorSDLStreamRead::ThorSDLStreamRead(std::istream& stream) : stream(stream) { seek = streamSeekThorRead; read = streamReadThor; type = SDL_RWOPS_UNKNOWN; close = streamCloseThor; write = streamWriteThorBad; size = streamSizeThor; } // Set up a write object. ThorSDLStreamWrite::ThorSDLStreamWrite(std::ostream& stream) : stream(stream) { seek = streamSeekThorWrite; write = streamWriteThor; type = SDL_RWOPS_UNKNOWN; close = streamCloseThor; read = streamReadThorBad; size = streamSizeThor; } I may do a code review of the Surface object later but basically its a wrapper around SDL_Surface. But to give context to the above I will show the usage in the Surface class. // will be called from `operator>>` std::istream& Surface::loadFromStream(std::istream& stream) { if (stream) { try { ThorSDLStreamRead streamWrapper(stream); SDL_Surface* newSurface = IMG_Load_RW(&streamWrapper, 0); SDL::Surface tmp(newSurface, "Failed to load image from stream"); std::swap(surface, tmp); } catch (std::exception const& e) { stream.setstate(std::ios::failbit); } } return stream; }
{ "domain": "codereview.stackexchange", "id": 44326, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, stream, sdl", "url": null }
c++, stream, sdl // Will be called from `operator<<` std::ostream& SurfaceToPNG::saveToStream(std::ostream& stream) const { if (stream) { ThorSDLStreamWrite streamWrapper(stream); int status = IMG_SavePNG_RW(surface.surface, &streamWrapper, 0); if (status != 0) { stream.setstate(std::ios::failbit); } } return stream; }; // Will be called from `operator<<` std::ostream& SurfaceToJPG::saveToStream(std::ostream& stream) const { if (stream) { ThorSDLStreamWrite streamWrapper(stream); int status = IMG_SaveJPG_RW(surface.surface, &streamWrapper, 0, quality); if (status != 0) { stream.setstate(std::ios::failbit); } } return stream; } Answer: Use static_cast<>() to cast from base to derived You can use static_cast<>() to cast from a pointer to base class to a pointer to derived class. This is much safer, because it will give an error if you try to cast to an unrelated class, whereas reinterpret_cast<>() will just let that happen. Make the C functions static All the functions marked extern "C" only need to be visible inside ThorSDLStream.cpp, so make them static, or put them inside an anonymous namespace. This avoids conflicts in the global namespace. In fact, you don't have to put Thor in their names if you do this. Naming It's confusing to see streamWriteThor(SDL_RWops* input, …). Are you writing to an input? Just name the first parameter output. No need to query for the number of bytes read/written If read() and write() on a stream succeed, they will have read or written exactly the number of bytes that you requested, otherwise the conversion of data->stream to bool will result in false.
{ "domain": "codereview.stackexchange", "id": 44326, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, stream, sdl", "url": null }
c#, error-handling, csv Title: CSV to list, map to database Question: I have a code block that imports CSV to list and map to a database. And there are few validations and I've to return error messages based on those validations. Right now all those logic is handled by multiple if else conditions. The code is working but I don't think it's the right approach. Is there any way I can replace these conditions with something clean? if (fileCSV == null && fileCSV.ContentLength == 0) { importModel.Error = "Error1"; return importModel; } else { List<ImportModel> mappings = _importService.ImportCSVToList<ImportModel>(fileCSV); if (mappings.Count > 0) { IEnumerable<ImportModel> duplicates = mappings.GroupBy(x => x.ProductSku).SelectMany(g => g.Skip(1)).ToList(); if (duplicates.Any()) { importModel.Error = "Error2"; return importModel; } else { var products = _productService.GetProducts(productSkuList).ToList(); if (!importModel.InvalidSkuList.Any()) { bool isImported = _productService.Import(mappings); if (!isImported) { importModel.Error = "Error3"; } } else { return importModel; } } } else { importModel.Error = "Error4"; return importModel; } } Answer: Generally speaking whenever you are facing the problem like the above one what you can do is to perform an assessment against your current flow control and/or try to replace some part of your logic to reduce code complexity. The former one tries to logically reduce the complexity while the latter one mechanically. Assessment Iterate through the different branches and try to describe each of them with simple words Try to visualize the nestedness by using indented lines Try to simplfy on it
{ "domain": "codereview.stackexchange", "id": 44327, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c#, error-handling, csv", "url": null }
c#, error-handling, csv By combining multiple branches into a single one By removing unnecessary branches By splitting up the whole logic into smaller chucks Before When entity is present When entity's id is even When entity's parent is X When entity's parent is Y Otherwise Otherwise Otherwise After When entity is not present or entity's id is odd When entity's parent is one of [X, Y] Otherwise Replacement In C# you have a couple of options. Just to name a few: Ternary conditional operator If you two branches with simple logic both returns with something then replace it with ternary conditional operator From if(condition) { return X(); } else { return Y(); } To return condition ? X() or Y(); Null coalescing operator A special case of the previous one is when you want to return X is it not null otherwise Y as a fallback value From var x = X(); if (x != null) { return x; } else { return Y(); } To return X() ?? Y(); Early exit If you use the if-else structure to perform early exiting in the if branch then simply get rid of the else block From if (parameter is null) { return -1; } else { //The core logic } To if (parameter is null) { return -1; } //The core logic Switch statement/expression If you have a couple of else if blocks to handle different cases then prefer switch instead From if (x == "A") { return A(); } else if(x == "B") { return B(); } ... else { return Fallback(); } To switch (x) { case "A": return A(); case "B": return B(); ... default: return Fallback(); } Or return x switch { "A" => A(), "B" => B(), ... _: => Fallback() }; Applying these to your code //I assume you wanted to check OR not AND in your original code if (fileCSV?.ContentLength == 0) { importModel.Error = "Error1"; return importModel; } List<ImportModel> mappings = _importService.ImportCSVToList<ImportModel>(fileCSV); if (mappings.Count == 0) { importModel.Error = "Error4"; return importModel; }
{ "domain": "codereview.stackexchange", "id": 44327, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c#, error-handling, csv", "url": null }
c#, error-handling, csv IEnumerable<ImportModel> duplicates = mappings.GroupBy(x => x.ProductSku).SelectMany(g => g.Skip(1)).ToList(); if (duplicates.Any()) { importModel.Error = "Error2"; return importModel; } //It seems like the products is unused, so this statement is unnecessary //var products = _productService.GetProducts(productSkuList).ToList(); if (!importModel.InvalidSkuList.Any()) { importModel.Error = _productService.Import(mappings) ? importModel.Error : "Error3"; } return importModel; As I noted in the code I assumed that you wanted to write fileCSV == null || fileCSV.ContentLength == 0 in your outer most if statement, because with AND it does not make any sense.
{ "domain": "codereview.stackexchange", "id": 44327, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c#, error-handling, csv", "url": null }
beginner, c Title: Write a program to remove trailing blanks and tabs from each line of input, and to delete entirely blank lines Question: Continuing to slowly progress through The C Programming Language by Brian Kernighan and Dennis Ritchie. The code I came up with below is for Exercise 1-18 - Write a program to remove trailing blanks and tabs from each line of input and to delete entirely blank lines. Feedback is much appreciated. In particular, I would like to know: How would you go about keeping track of the dynamic changing size of curLine and increasing its size should the current line eclipse 1000 bytes. The charCount output for each line in the output seems to be 1 less than what is displayed in Vim for character count. Why is that? How can I ensure that the character count output in my code is not off by one? // Exercise 1-18. Write a program to remove trailing blanks and tabs from each line of input, and to delete entirely blank lines. #include <stdio.h> int main() { int c; int charCount = 0; char curLine[1000];
{ "domain": "codereview.stackexchange", "id": 44328, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "beginner, c", "url": null }
beginner, c #include <stdio.h> int main() { int c; int charCount = 0; char curLine[1000]; while ((c = getchar()) != EOF) { curLine[charCount] = c; if (c == '\n') { // Skip entirely blank lines if (charCount > 1) { // reduce charCount for trailing blanks and tabs ahead of printing int temp = charCount; for (int i = temp; curLine[i] == ' ' || curLine[i] == '\t' || curLine[i] == '\n'; i--) { charCount--; } // print the line for (int i = 0; i < charCount + 1; i++) { printf("%c", curLine[i]); } // Printing charCount to ensure trailing spaces, tabs, and entirely blank lines are removed printf("\nChar count after triming line: %i\n", charCount); } charCount = 0; } else { charCount++; curLine[charCount] = c; } } // If last line ends on a space instead of new line if (charCount > 0) { for (int i = 0; i < charCount; i++) { printf("%c", curLine[i]); } // Printing charCount to ensure trailing spaces, tabs, and entirely blank lines are removed printf("\n%i\n", charCount); } } Updates Edited code in response to vnp comment. Answer: How would you go about keeping track of the dynamic changing size of curLine and increasing its size should the current line eclipse 1000 bytes.
{ "domain": "codereview.stackexchange", "id": 44328, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "beginner, c", "url": null }
beginner, c One can code for that, yet consider growing an input buffer to any size allows a nefarious user to overwhelm memory resources. Often, it is simply reasonable to limit input length to some large value and treat huge lines as foolish input or an attack and detect overlong lines. Consider BUFSIZ from <stdio.h>. "BUFSIZ which expands to an integer constant expression that is the size of the buffer used by the setbuf function;" "The value of the macro BUFSIZ shall be at least 256." C17dr 7.21. Sample: char curLine[BUFSIZ]; while ((c = getchar()) != EOF) { if (charCount >= BUFSIZ - 1) { fprintf(stderr, "Overly long line ignored.\n"); while ((c = getchar()) != EOF && c != '\n') { ; } curLine[charCount] = c; charCount = 0; continue; } ... The charCount output for each line in the output seems to be 1 less than what is displayed in Vim for character count. Why is that? How can I ensure that the character count output in my code is not off by one? charCount fails to include the '\n' in its summation. Other int vs. char OP correctly uses int ch instead of char ch to store the typically 257 different possible responses from getchar(). Code re-use Since processing a line inside the loop is like processing the line after the loop, consider a helper function. It appears the char count after trimming line includes the '\n'. void process_line(int length, const char *line) { // Only process and print line if not empty and not just a \n if (length > 0 && line[0] != '\n') { bool eol_found = false; if (line[length-1] == '\n') { length--; eol_found = true; } // remove trailing blanks and tabs while (length > 0 && (line[length-1] == ' ' || line[length-1] == '\t')) { length--; } // %.*s prints a character array up to the supplied length or a null character printf("%.*s\n", length, line); printf("char count after trimming line: %i\n", length + eol_found); } }
{ "domain": "codereview.stackexchange", "id": 44328, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "beginner, c", "url": null }
beginner, c Simplification In this sample code, curLine[] does not become a string as a null character is not appended. Instead charCount is used to control print width. int c; int charCount = 0; char curLine[BUFSIZ]; while ((ch = getchar()) != EOF) { if (charCount >= BUFSIZ) { ; // As above error handling } curLine[charCount++] = c; if (c == '\n') { process_line(charCount, curLine); charCount = 0; } } process_line(charCount, curLine);
{ "domain": "codereview.stackexchange", "id": 44328, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "beginner, c", "url": null }
c++, stream, sdl Title: SDL Stream for C++ follow up Question: Initial review requested here: The big thing I had missed was that read/write either work successfully (and all requested data is read/written) or it fails the flags are set appropriately. For write this is easy. It work and we can return num or it fails and we return -1. For read it is slightly more complex. A read that gets data but hits eof() has still worked (but my original function would have returned -1 indicating failure). So a read fails if the eof() is true before the read is tried or one of the following is true bad() or fail() or gcount() == 0 after the read is done. /* * If we read and reach eof() that is still a good read (as long as data is read) * So this test does not check for eof() as a failure. * * Note: if eof() is true then the next call to streamReadThor() * will result in a failure, as it test to see if the stream * is good before a read. */ bool streamHadGoodRead(std::istream& stream) { return !stream.bad() && !stream.fail() && stream.gcount() != 0; } /* * Reads `num` objects of size `size` from the stream. * Return -1 on failure or the number of objects read. */ size_t streamReadThor(SDL_RWops* input, void* dst, size_t size, size_t num) { ThorSDLStreamRead* data = static_cast<ThorSDLStreamRead*>(input); if (!data->stream) { return -1; } data->stream.read(reinterpret_cast<char*>(dst), num * size); return streamHadGoodRead(data->stream) ? data->stream.gcount() / size : -1; } /* * Write `num` objects of size `size` to the stream. * Return -1 on failure or the number of objects written. */ size_t streamWriteThor(SDL_RWops* output, const void* src, size_t size, size_t num) { ThorSDLStreamWrite* data = static_cast<ThorSDLStreamWrite*>(output); if (!data->stream) { return -1; } data->stream.write(reinterpret_cast<char const*>(src), num * size); return data->stream ? num : -1; }
{ "domain": "codereview.stackexchange", "id": 44329, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, stream, sdl", "url": null }
c++, stream, sdl Answer: Make sure the comments match the code. In streamHadGoodRead() the comments talk about eof(), but the code doesn't contain any eof(). Furthermore, the code is still not correct. Read the documentation of std::ifstream::read() carefully: you'll notice that on reaching the end of the file, it will set both eofbit and failbit. I also checked the documentation for SDL_RWops: apparently, read() should return 0 on error or EOF, not -1. That also means that you don't need to special case for gcount() == 0. Also, write() should return the number of objects that were fully written, and not -1 on error. /* * Read `num` objects of size `size` from the stream. * Return the number of objects successfully read, * which might be less than num if an error occurred or EOF has been reached. */ size_t streamReadThor(SDL_RWops* input, void* dst, size_t size, size_t num) { auto& stream = static_cast<ThorSDLStreamRead*>(input)->stream; stream.read(reinterpret_cast<char*>(dst), num * size); return stream.gcount() / size; } /* * Write `num` objects of size `size` to the stream. * Return the number of objects successfully written, * which might be less than num if an error occurred. */ size_t streamWriteThor(SDL_RWops* output, const void* src, size_t size, size_t num) { auto& stream = static_cast<ThorSDLStreamWrite*>(output)->stream; if (!stream) return 0; auto before = stream.tellp(); stream.write(reinterpret_cast<char const*>(src), num * size); return (stream.tellp() - before) / size; } Note that std::ostream::write() does not set failbit on error, so tellp() would still be valid after a write error. However, if failbit was set because of any other action on the stream, then tellp() cannot be used, so we still need the check if the stream is good before doing anything.
{ "domain": "codereview.stackexchange", "id": 44329, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c++, stream, sdl", "url": null }
c Title: Finding the sum of 2^x and all powers of 8 less than 2^x Question: Is there a more efficient way, without a loop for example, to find a the end value for n for a specific x? #include <stdio.h> int main(void) { const int x = 13; int n; for (int c = 0, p = 0; p < x; ++p) { n = c+(1<<p); if (p%3 == 0) c = n; } printf("%d\n", n); } The first few values of this function should be: 1 3 5 9 25 41 73 201 329 585 1609 2633 4681 Answer: c should not exist, and calculating n before your p%3 check is not a good idea. Further, don't p%3 at all; instead, just increment p by 3 instead of 1. In Python this would look like (verified) x = 13 c = sum(1<<p for p in range(0, x, 3)) | 1<<x print(c) # 12873 In C, since you're using integer math, don't add: instead, use binary-or. I didn't compile this: const int x = 13; unsigned n = 1 << x; for (int p = 0; p < n; p += 3) n |= 1u << p; A more exotic algorithm that completes in O(log(n)) instead of O(n) time is: x = 13 power = 3 total = 1 limit = 1<<x while total < limit: total |= total << power power <<= 1 total = (total & (limit-1)) | limit But given that your problem scale is tiny, this is premature optimisation and I doubt that it would be worth it. At 64-bit integer sizes the difference will be imperceptible. Speaking of 64 bits: depending on the size of your int, you can just pre-populate a constant integer and then mask it without a loop, as in x = 13 limit = 1 << x full64 = 0x9249_2492_4924_9249 print(full64 & (limit - 1) | limit) In C, something like const int x = 13; const uint64_t limit = 1ull << x; const uint64_t full = 0x9249249249249249; const uint64_t n = full & (limit - 1) | limit; or even, in octal, const int x = 13; const uint64_t limit = 1ull << x; const uint64_t full = 01111111111111111111111; const uint64_t n = full & (limit - 1) | limit;
{ "domain": "codereview.stackexchange", "id": 44330, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c", "url": null }
c, gcc Title: 32x32x32 units octree, that supports 15-bit data units Question: The idea is to have an octree that stores a 32x32x32 region of 15 bit values more efficiently than a 32x32x32 array, because if all 8 leaf nodes of a given branch are the same, the branch does not need to be allocated and can just be stored as a single value instead of 8+. This recursively means that if all values are the same in the octree, only one object needs to be stored. Each node is a 16 bit object, where the MSB determines if the remaining 15 bits are an pointer (relative to the start of the octree) to a set of 8 subnodes, or a value. The provided code is a basic implementation of this idea, but does not include more advanced features such as detecting and merging branches whose values are all identical, or multiple parents referencing a single child set if they are the same (i.e. an octree DAG). The Set function resizes the octree via realloc when more space is needed, to the smallest power of 2 large enough to contain all the nodes. One issue is then a full octree would have 65536 allocated objects but only 37448 will ever be needed at any point (excluding the root node0). A recommendation for a better reallocation scheme, along with general advice for improvement, would be heavily appreciated <3. A valid all-0 octree could be initialized as follows: Octree *o0 = memset(malloc(sizeof(Octree)), 0, sizeof(Octree));
{ "domain": "codereview.stackexchange", "id": 44331, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, gcc", "url": null }
c, gcc The code: typedef union Node8 { uint64_t y[2], c[2]; uint32_t z[2][2], b[4]; uint16_t x[2][2][2], a[8]; } Node8; typedef struct Octree { uint64_t *data; // data* fields are reserved for future use uint16_t datasize, nodesize; uint8_t dataalloc, nodealloc; uint16_t node0; // root node Node8 node[]; // node[-1] == node0 } Octree; uint16_t *Get(Octree *const o, const uint8_t x, const uint8_t z, const uint8_t y, uint_fast8_t *const p) { uint16_t *n = &o->node0; uint_fast8_t i = 4; while (*n&0x8000 && (n = &o->node[*n&0x7FFF].x[y>>i&1][z>>i&1][x>>i&1], i)) --i; if (p) *p = i; return n; } Octree *Set(Octree *o, const uint8_t x, const uint8_t z, const uint8_t y, const uint16_t v) { uint_fast8_t i; uint16_t *n = Get(o, x, z, y, &i); if (i) { const uint16_t p = *n; const unsigned size = o->nodesize+i; if (size >= 1U<<o->nodealloc) { const ptrdiff_t off = n-(uint16_t *)o->node; n = (uint16_t *)(o = realloc(o, sizeof(Octree)+(sizeof(Node8)<<(o->nodealloc = 32-__builtin_clz(size)))))->node+off; } do { *n = o->nodesize|0x8000; Node8 *const s = &o->node[o->nodesize++]; n = &s->x[y>>i&1][z>>i&1][x>>i&1]; s->a[1] = s->a[0] = p; // copy 8 objects in 3 steps instead of 8 s->b[1] = s->b[0]; s->c[1] = s->c[0]; } while (i--); } *n = v; return o; } void Iter(const Octree *const o) { const uint16_t *node[6] = {&o->node0}; int_fast8_t index[6] = {0}; for (uint_fast8_t i = 0;;) { const int_fast8_t j = index[i]--; if (j == -1) { if (i--) continue; break; } const uint16_t n = node[i][j]; if (n&0x8000 && i < 5) { node[++i] = o->node[n&0x7FFF].a; index[i] = 7; continue; } printf("%d\n", n); } }
{ "domain": "codereview.stackexchange", "id": 44331, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, gcc", "url": null }
c, gcc Answer: Make your code more readable Your code is written in a very compact style, making it hard for someone not familiar with it to understand it. Avoid multiple statements per line, avoid assignments inside expressions, and don't hesitate to create helper functions even for small amounts of code if it helps readability. Consider: static bool entry_is_pointer(uint16_t entry) { return entry & 0x8000; } static Node8 *entry_to_node(Octree *o, uint16_t entry) { return &o->node[entry & 0x7fff]; } static uint16_t *descend(Node8 *node, uint8_t x, uint8_t z, uint8_t y, fast_uint8_t level) { return &node.x[(x >> level) & 1][(y >> level) & 1][(z >> level) & 1]; } uint16_t *Get(Octree *octree, uint8_t x, uint8_t z, uint8_t y, uint_fast8_t *levelptr) { uint16_t *entry = &octree->node0; uint_fast8_t level = 5; while (entry_is_pointer(*entry)) { --level; Node8 *node = entry_to_node(octree, *entry); entry = descend(node, x, y, z, level); } if (levelptr) *levelptr = level; return entry; }
{ "domain": "codereview.stackexchange", "id": 44331, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, gcc", "url": null }
c, gcc The binary generated by the compiler should still be exactly the same. Use const in the right place I see you made the parameters const, but not in the most useful way. Ideally, you pass a const Octree* to Get(), and get a const uint16_t* back. This will allow the compiler to better optimize code for callers that query the tree but don't want to modify it. Of course, you are using Get() inside Set(), so that is a bit problematic; either you have to duplicate the code to get to an entry but return a non-const pointer, or you have to cast the constness away inside Set(). The latter is safe though, because you already have a non-const pointer to an Octree there. Missing error checking Calls to malloc() and realloc() can fail and return NULL. You should check for that. If you really cannot do anything about it, call abort() or another function to terminate the program. Also note that if realloc() fails, the original allocation is still valid, so o = realloc(o, …) will then leak memory. Optimizing for space or bandwidth efficiency? Disregarding your allocation strategy for now, consider that most CPUs nowadays read 64-byte cache lines at a time. This means 32 16-bit entries in one go, whereas a Node8 is only 8 16-bit entries, or 16 bytes. That means that if while descending the octree the nodes are not consecutive in memory (likely), you are wasting 75% of the memory bandwidth. Instead of using an octree, you could use a different tree structure, where first you store 32 planes indexed by x, then 32 lines indexed by y, and then 32 values indexed by z. So you only need to fetch at most 3 cache lines to get to the desired value. You still get to make "nodes" in this tree have the same value everywhere, but they are then either a whole plane or a whole line. Depending on what you store in the tree, that might be more or less efficient when it comes to the size of the resulting tree. Check if v is the same as already present
{ "domain": "codereview.stackexchange", "id": 44331, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, gcc", "url": null }
c, gcc Check if v is the same as already present In Set(), you never check if v has the same value as is already present at the given location. If so, you can return without having to allocate new nodes. Allocation strategy
{ "domain": "codereview.stackexchange", "id": 44331, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, gcc", "url": null }
c, gcc One issue is then a full octree would have 65536 allocated objects but only 37448 will ever be needed at any point (excluding the root node0). Your code is more complex than necessary. If you are in Set() and i != 0, then you know that you need to allocate exactly i new nodes. Of course, calling realloc() every time you just need to add a few is not efficient, so indeed you can add more in one go. Doubling the size every time gives \$O(1)\$ amortized cost for allocation. But since you know you don't need more than 37448 nodes, just cap it to that number? Also, just keep a simple counter of how many nodes that are used and how many nodes you have allocated. That avoids all the bit shifting and the non-standard _builtin_clz().
{ "domain": "codereview.stackexchange", "id": 44331, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "c, gcc", "url": null }
ruby-on-rails Title: Find items by their properties Question: Given a map of items to their attributes: OPTIONS = { paper: %w[A4_paper countable universal], pencil: %w[trackable universal trackable], chalk: %w[stationery countable trackable A4_paper], }.freeze I need to return the items that have a particular attribute. For example, fetch_type_values('countable') should return ["paper", "chalk"]. Here's my working implementation: def fetch_type_values(option) OPTIONS.map { |key, value| key if value.include?(option.to_s) }.compact.map(&:to_s) end I feel that this is a bit difficult to read; How can I refactor/simplify it to make it more readable? Answer: I would use select since that is what you are really doing def fetch_type_values(option) OPTIONS.select { |_key, value| value.include?(option.to_s) }.map(&:to_s) end I also wouldn't stringify the result. I would keep the options as symbols since that is your internal representation. I would even make the attributes symbols. i.e. OPTIONS = { paper: %i[A4_paper countable universal], pencil: %i[trackable universal trackable], chalk: %i[stationery countable trackable A4_paper], }.freeze
{ "domain": "codereview.stackexchange", "id": 44332, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "ruby-on-rails", "url": null }
python, performance, pathfinding Title: Optimizing Python BFS Code Question: g = input().split() N, M = int(g[0]), int(g[1]) high = 2 ** 32 - 1 map = [[(high) for i in range(N)] for i in range(N)] for i in range(M): g = input().split() left, right, value = int(g[0])-1, int(g[1])-1, int(g[2]) map[left][right] = min(value, map[left][right]) map[right][left] = min(value, map[right][left]) # END INPUT queue = [0] costs = [(high) for i in range(N)] costs[0] = 0 #camefrom = [-1 for i in range(N)] while len(queue) > 0: current = queue.pop() cost = costs[current] for i in range(N): #if i == current: # continue if costs[current] >= costs[i]: continue g = cost + map[current][i] if g < costs[i]: queue.append(i) costs[i] = g print('\n'.join([("-1" if i == high else str(i)) for i in costs])) While trying to translate my Java solution (that did work) to Python for this problem I've realized that it keeps reaching the time limit. Otherwise, it works perfectly fine, getting maybe half the answers right. I've looked at other answers and I just can't figure out how their code is so much more efficient than mine. Any ideas for optimization? Answer: current = queue.pop() What this actually does, which perhaps you did not notice, is remove the last element of the list. So the queue is really a stack, and instead of BFS this algorithm is really DFS. To make the queue work as a queue, you can use pop(0). That makes a difference: DFS is more likely to do unnecessary redundant work in this scenario. Consider some node near the start. It has just been discovered with some suboptimal distance. DFS then immediately dives into all its neighbours and so on and updates all of them with suboptimal distances as well, which means that they'll get put on the queue (stack, really) again and again every time that they're updated with some new-but-still-suboptimal cost.
{ "domain": "codereview.stackexchange", "id": 44333, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, performance, pathfinding", "url": null }
scheme Title: Performant Text File Reading in Scheme Question: I have been experimenting with methods to read large text files into a Scheme program as a single string in a performant manner. Development has been in R7RS Scheme, specifically Chibi-Scheme. After trying many approaches, some quite complex and involved, the best performing have been quite simple. Following are three examples of such along with some example usages and timing results. (import (scheme base) (scheme time)) ;; Use with-input-from-file. ;; Return the contents of a text file as a single string, ;; #\newlines included. (define (file->string1 path) (let* ((start-time (current-second))) (let ((lst (with-input-from-file path (lambda () (let ((p (current-input-port))) (let loop ((ch (read-char p)) (acc '())) (if (eof-object? ch) acc (loop (read-char p) (cons ch acc))))))))) (let ((result (list->string (reverse lst))) (duration (- (current-second) start-time))) (display "file->string1 complete in ") (display duration) (display " seconds.\n") result)))) ;; Use call-with-input-file. ;; Return the contents of a text file as a single string, ;; #\newlines included. (define (file->string2 path) (let ((start-time (current-second)) (lst (call-with-input-file path (lambda (p) (let loop ((ch (read-char p)) (acc '())) (if (eof-object? ch) acc (loop (read-char p) (cons ch acc)))))))) (let ((result (list->string (reverse lst))) (duration (- (current-second) start-time))) (display "file->string2 complete in ") (display duration) (display " seconds.\n") result)))
{ "domain": "codereview.stackexchange", "id": 44334, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "scheme", "url": null }
scheme ;; Use an output string to collect the data read. ;; Return the contents of a text file as a single string, ;; #\newlines included. (define (file->string3 path) (let* ((start-time (current-second)) (result (call-with-input-file path (lambda (p) (let ((out (open-output-string))) (let loop () (cond ((eof-object? (peek-char p)) (get-output-string out)) (else (write-char (read-char p) out) (loop)))))))) (duration (- (current-second) start-time))) (display "file->string3 complete in ") (display duration) (display " seconds.\n") result)) ;; Example usage and results loading "War and Peace" from the ;; Project Gutenberg. ;; https://www.gutenberg.org/cache/epub/2600/pg2600.txt ;; ;; ➜ david in schemacs on branch: (main) ! rlwrap chibi-scheme ;; > (load "file-ops.scm") ;; > (define wap1 (file->string1 "war-and-peace.txt")) ;; file->string1 complete in 0.6330661773681641 seconds. ;; > (define wap2 (file->string2 "war-and-peace.txt")) ;; file->string2 complete in 5.0067901611328125e-06 seconds. ;; > (define wap3 (file->string3 "war-and-peace.txt")) ;; file->string3 complete in 0.48236799240112305 seconds. ;; > (string-length wap1) ;; 3227709 ;; > (string-length wap2) ;; 3227709 ;; > (string-length wap3) ;; 3227709 ;; > (string=? wap1 wap2 wap3) ;; #t
{ "domain": "codereview.stackexchange", "id": 44334, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "scheme", "url": null }
scheme As a test case, I have used a copy of "War and Peace" obtained from Project Gutenberg as the file to read. It consists of 3,227,709 characters, including some preamble and final comments from Project Gutenberg. All three procedures produce identical results, but the second consistently reports a much faster execution time. Timing values close to those above have been reported on repeated runs. Using a copy of "Moby Dick" as the file to read yields a similar timing relationship between the procedures. I can't see any reason for the difference in timings for the second procedure. Are the results bogus for some reason? Is one procedure preferable to the others for stylistic reasons? Answer: Thank you for this very nice investigation into timings. With all due respect, I don't view those figures as performant at all. More on that later. file->string1 uses let* to assign start-time, and then (with confusing indentation) computes the read result. file->string2 uses a single let for two assignments. It is allowed to do them in either order. The optimizer probably noticed that one of them had more dependencies to resolve and should be scheduled first, possibly exploiting overlap with computation of the other expression. Which took ~ five microseconds to evaluate. tl;dr: Benchmarking is hard to get right. Sweat the details. Let's take a step back and critique this from a higher level. I confess I'm not happy with the whole business of reading more than 3 million characters and examining each one to see "is this EOF?", "is this one EOF?". Surely scheme offers a POSIX bulk read primitive? Something like with-input-from-file, and then use read-string with char-set:full, or maybe :designated or even :standard or :printing. If not, I recommend using FFI foreign function interface to call the chapter 2 C read() function directly.
{ "domain": "codereview.stackexchange", "id": 44334, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "scheme", "url": null }
scheme Let's switch gears for a moment to python, which no one would accuse of being a "fast" language. from pathlib import Path from time import time import requests temp = Path("/tmp") def read_war_and_peace(url="https://www.gutenberg.org/ebooks/2600.txt.utf-8"): cache = temp / Path(url).name if not cache.exists(): resp = requests.get(url) cache.write_text(resp.text) print(cache.stat().st_size) with open(cache) as fin: return fin.read() if __name__ == "__main__": t0 = time() print(len(read_war_and_peace()), time() - t0) There's two things going on here. We read 3_359_372 binary bytes, and then we decode them to 3_227_489 code points (characters) of UTF8 text. Doing that with fin.read() was trivial to specify. Observed performance? On a 2.2 GHz (2015) MacBook Air I observed 16 msec elapsed time, or ~ 202 MiB / sec. Relative to evaluation of bytecode, almost all of that time was devoted to C code. Scheme compilers are quite sophisticated, so scheme applications can reasonably be competitive with Rust or C if they are using appropriate data structures that let the compiler shine. I submit that the three functions we see here are Fine but are not Performant. The 30x haircut seems like more than we'd want to accept. And if we wish to complete in less than 482 msec, we should operate on large chunks rather than character-at-a-time.
{ "domain": "codereview.stackexchange", "id": 44334, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "scheme", "url": null }
python, performance, time-limit-exceeded Title: Dijkstra's Algorithm + an additional cost to start at each node Question: I am trying to solve the 2009 Canadian Computing Competition Senior #4. The question gives you a map (graph) of different cities, and the cost of transporting something between each city. There are also a few select cities that sell pencils, at a specific cost. You are then given a destination city, and asked to find the cheapest place to buy a pencil from, taking into account the cost of the pencil, and the transportation cost from the pencil seller to the destination city (if the destination city sells pencils, the transportation cost is 0). # Cities: $$1 \le N \le 5,000$$ # Trade Routes: $$1 \le T \le 5,000,000$$ Route costs: $$0 \le R_{i,j} \le 10,000$$ # Pencils: $$1 \le K \le N$$ Pencil costs: $$0 \le P_i \le 10,000$$ I used Dijkstra's algorithm to find the smallest transportation cost from each city to the destination, and then added it to the cost of a pencil from that city, and then printed out the minimum. On the judge at DMOJ, I got the first three test cases correct, however, I exceed the time limit of five seconds on the next three test cases. So, I'm interested in knowing if there are any ways I can further optimize my code to save time? I have already tried using a 2D array instead of a dictionary to store the trade routes, but it makes taking input longer, and I don't how to make Dijkstra's algorithm work better on a 2D array instead of a dictionary. The reason for slowness could also be due to the act of input() taking too long, which I saw in a discussion about solving the question in C++, but I'm not if that also applies to Python. # https://dmoj.ca/problem/ccc09s4 from heapq import heappop, heappush n = int(input()) trade_routes = {i: {} for i in range(1, n + 1)} pencil_cities = {} t = int(input()) for _ in range(t): data = list(map(int, input().split())) trade_routes[data[0]][data[1]] = int(data[2]) trade_routes[data[1]][data[0]] = int(data[2])
{ "domain": "codereview.stackexchange", "id": 44335, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, performance, time-limit-exceeded", "url": null }
python, performance, time-limit-exceeded k = int(input()) pencil_costs = {} for _ in range(k): data = list(map(str, input().split())) pencil_costs[int(data[0])] = int(data[1]) d = input() def shortest_path_weighted(graph, start): """ (dict, str, str) -> int, list Returns the cost and path of the shortest path on a weighted graph """ distances = [100000000 for _ in range(len(graph) + 1)] # cost from start to each node distances[start] = 0 visited = set() visited.add(start) queue = [(distances[start], start)] while len(queue) > 0: (cost, target) = heappop(queue) if target not in visited: visited.add(target) for neighbor in graph[target]: if neighbor not in visited: temp_distance = distances[target] + graph[target][neighbor] if temp_distance < distances[neighbor]: distances[neighbor] = temp_distance heappush(queue, (temp_distance, neighbor)) return distances smallest_cost = 99999999 travel_costs = shortest_path_weighted(trade_routes, int(d)) for pencil_cost in pencil_costs: if travel_costs[pencil_cost] + pencil_costs[pencil_cost] < smallest_cost: smallest_cost = travel_costs[pencil_cost] + pencil_costs[pencil_cost] print(smallest_cost) """ Sample Test Case 3 3 1 2 4 2 3 2 1 3 3 3 1 14 2 8 3 3 1 -- 6 """ ````
{ "domain": "codereview.stackexchange", "id": 44335, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, performance, time-limit-exceeded", "url": null }
python, performance, time-limit-exceeded print(smallest_cost) """ Sample Test Case 3 3 1 2 4 2 3 2 1 3 3 3 1 14 2 8 3 3 1 -- 6 """ ```` Answer: Code Review While I understand you're looking for performance improvements, this is Code Review, so let's start with seeing if we can improve the code first -- including minor performance improvements -- before we concentrate on algorithmic improvements. Structure Code should be organized. With Python, the convention is import statements, then function definitions, then mainline code. You've got a function definition embedded in the mainline code, which is really quite jarring when it comes to reading the code. Move the function up to just below the import statement. Only one function? There could be many more! Functions for: input number of cities inputing the trade route data inputing the pencil cost data determining smallest manufacting cost + transportation cost Naming While n is often used as a stand-in for a counting number, it doesn't tell us what it is the count of. True, in programming contest problem statements, they often give things like n is the number of cities, and t is the number of trade routes, and so on. But look at your code: no where is there even a comment which suggests k is the number of pencil manufacturing cities. How is a reader supposed to determine this, unless they also have the problem statement. Code should be self documenting, so I would change: n → num_cities t → num_trade_routes k → num_pencil_manufacturers d → destination_city
{ "domain": "codereview.stackexchange", "id": 44335, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, performance, time-limit-exceeded", "url": null }
python, performance, time-limit-exceeded n → num_cities t → num_trade_routes k → num_pencil_manufacturers d → destination_city Note that while Python is an "interpreted language", using longer names won't actually slow the program down significantly. There is a "compile" step which happens when the Python source code is read by the interpreter, which ensures the syntax is correct and so on, but it also interns the variable names into tokens, which means variable lookup times will be near constant regardless of variable name length. Heterogeneous variables data = list(map(int, input().split())) trade_routes[data[0]][data[1]] = int(data[2]) trade_routes[data[1]][data[0]] = int(data[2])
{ "domain": "codereview.stackexchange", "id": 44335, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, performance, time-limit-exceeded", "url": null }
python, performance, time-limit-exceeded Continuing with naming, what is data? What is data[0]? What is data[2]? The code doesn't give us a clue! data is a variable which holds a list of ... data items. Specifically, it holds two cities and a route cost, so it needs to be called ... uhm, two_cities_and_route_cost? Yuck! The problem here is there are 2 different types of data in that list -- cities and cost -- so there isn't a good name for that. This means they shouldn't be stored inside one variable. The code has a few other problems: there is no check that data actually contains 3 values. It could hold 5, and two would be silently forgotten. It could hold only one or two, and the next statement would actually generate an IndexOutOfBound exception, but that is too late; the error would be flagged on the wrong statement, and the programmer is forced to trace through the code to discover where the problem actually started. The .split() function has an optional maxsplit parameter, which could be used to ensure you don't end up with more than 3 values. While that helps, it doesn't go far enough. For this issue Python has "tuple unpacking". Assigning a list of data into a tuple ensures that the list has exactly the correct number of items. It also gives names to the values, solving the early point. Finally, the int(...) in int(data[2]) is redundant, as map(int, ...) has already converted the value to an int. city1, city2, cost = map(int, input().split()) trade_routes[city1][city2] = cost trade_routes[city2][city1] = cost
{ "domain": "codereview.stackexchange", "id": 44335, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, performance, time-limit-exceeded", "url": null }
python, performance, time-limit-exceeded This code is much clearer, and safer. input() is .split() into an unknown number of terms. Then map(int, ...) returns an iterable which will convert each term in succession into an integer ... but nothing has happened yet. Previously you used list(...) to force the iteration. Now, we simply assign the iterable to a tuple, which causes tuple-unpacking. The tuple has three elements, so the iterable MUST resolve itself into 3 values for the assignment to succeed. If more or less than three values are given, this statement itself raises the exception ... not usage of a data variable at some (much) later point in the code. The heterogeneous data variable has been eliminated AND we've given names to each of the values. Win-win! The same should be applied to the pencil costs input. Incorrect Documentation The only thing worse than no documentation is incorrect documentation. def shortest_path_weighted(graph, start): """ (dict, str, str) -> int, list Returns the cost and path of the shortest path on a weighted graph """ ... return distances The """docstring""" seems to suggest the function takes 3 parameters (a dict and two strings) and returns an integer (the cost?) and a list (the path through the graph?). The function signature only has 2 parameters, and when it is called it is a passed trade_routes, int(d) ... the second argument which is clearly an int; there is no string in sight! Use type hints, and use a linter which actually checks the type hints. Initialization visited = set() visited.add(start) Why two statements, when one will do? visited = {start} Magic numbers What is 100000000, and where did it come from? How about 99999999? Use named constants where possible. MAX_CITIES = 5_000 MAX_ROUTE_SEGMENT_COST = 10_000 MAX_PENCIL_COST = 10_000 MAX_TRANSPORT_COST = MAX_CITIES * MAX_ROUTE_SEGMENT_COST MAX_TOTAL_COST = MAX_PENCIL_COST + MAX_TRANSPORT_COST
{ "domain": "codereview.stackexchange", "id": 44335, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, performance, time-limit-exceeded", "url": null }
python, performance, time-limit-exceeded Or don't worry about that exact maximum value, and simply use math.inf. Busy Work if target not in visited: visited.add(target) Why two statements? visited is a set; adding target to visited if it is already in the set is a no-op. You could just write: visited.add(target) Bug/inefficiency? Or are you supposed skip the for neighbour in graph[target]: loop if target is visited? In which case, you want to keep the test, but move the loop into the body of the if statement. Algorithmic Improvements Consider the test case: $$N = 5000$$ $$C_{i,j} = j - i, 1 \le i \lt j \le 5000$$ $$K = 1$$ $$P_2 = 10$$ $$D = 1$$ 5000 cities, all in a straight line, with fully connected trade routes, and pencils only manufactured in the second city. Question: do you really need to calculate the minimum distance to every other city? No. As soon as you've expanded your minimum distance to include the last pencil manufacturing city (in this case, the only one), you can stop computing the transportation costs ... in this case ignoring the remaining 4998 cities, and 4998*4997/2 trade routes, which would be a considerable time savings. Let's add another pencil manufacturer, \$P_{4000} = 4\$. Well, now we have to continue exploring a bit further. How much? After \$C_{1,8} = 7\$, a pencil costing 4 won't have a net cost lower than the 11 from City #2, so we don't have to explore until we reach all of the pencil cities ... just until there are no pencils cheaper than the cheapest total cost less the transportation distance we've explored to. That's a complicated stopping condition, but if it terminates the search before exploring the entire network, it should be worthwhile. But we can make things even simpler by changing the problem slightly: Only one pencil manufacturing city: city #0 Manufacturing cost is 0 \$R_{0,i} = P_i, 1 \le i \le K\$
{ "domain": "codereview.stackexchange", "id": 44335, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, performance, time-limit-exceeded", "url": null }
python, performance, time-limit-exceeded Only one pencil manufacturing city: city #0 Manufacturing cost is 0 \$R_{0,i} = P_i, 1 \le i \le K\$ We've added a route with a cost equal to the manufacture cost of the pencil at each pencil manufacturing city. Now the problem is merely the shortest weighted path from city #0 to the destination city: $$R_{0,D}$$ and early termination should be trivial.
{ "domain": "codereview.stackexchange", "id": 44335, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "python, performance, time-limit-exceeded", "url": null }
javascript, css, react.js, typescript Title: How to write repetitive styled components in a cleaner way Question: I'm using Styled Components for styling and there are many icons defined in the project so in the style file we have this code: my-component.styles.ts import { ReactComponent as CloseIcon } from 'svg/close.svg'; import { ReactComponent as OpenIcon } from 'svg/open.svg'; import { ReactComponent as DeleteIcon } from 'svg/delete.svg'; import { ReactComponent as CheckIcon } from 'svg/check.svg'; ... export const StyledCloseIcon = styled(CloseIcon)` width: 20px; fill: white; `; export const StyledOpenIcon = styled(OpenIcon)` width: 20px; fill: white; `; export const StyledDeleteIcon = styled(DeleteIcon)` width: 20px; fill: white; `; export const StyledCheckIcon = styled(CheckIcon)` width: 20px; fill: white; `; ... As it can be seen above, all icons are using the same styling. And in another component they are used: import { StyledCloseIcon, StyledOpenIcon, StyledDeleteIcon, StyledCheckIcon } from './my-component.styles'; and then: <StyledCloseIcon /> Is there a way to write it in a shorter way? Answer: You can declare a new variable like "sameStyles" const sameStyles = ` width: 20px; fill: white; `; and pass it in as a template literal expression export const StyledCloseIcon = styled(CloseIcon)` ${sameStyles} `; export const StyledOpenIcon = styled(OpenIcon)` ${sameStyles} `;
{ "domain": "codereview.stackexchange", "id": 44336, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "javascript, css, react.js, typescript", "url": null }
graph, react.js, typescript Title: A graph interface for Wikipedia Question: I'm building a graph-based interface to explore Wikipedia, but I'm not really familiar with TypeScript/React (especially state management), so I really feel like I'm just Frankensteining things together, which makes me feel pretty uncomfortable. I've found many things in my code so far that have made me facepalm. I'd appreciate any feedback on state management, modularity, style, implementation, or anything else! If anyone wants to take a look at the entire source-code: https://github.com/lee-janice/wikigraph Otherwise, here's the core bit: App.tsx import "./styles/App.css"; import WikiGraph from "./components/wikigraph"; import { useEffect, useState } from "react"; const NEO4J_DB = String(process.env.REACT_APP_NEO4J_DB); const NEO4J_URI = String(process.env.REACT_APP_NEO4J_URI); const NEO4J_USER = String(process.env.REACT_APP_NEO4J_USER); const NEO4J_PASSWORD = String(process.env.REACT_APP_NEO4J_PASSWORD); function App() { // set initial theme and keep track of dark mode state const [darkMode, setDarkMode] = useState(window.matchMedia("(prefers-color-scheme: dark)").matches); // handle change in dark mode toggle useEffect(() => { if (darkMode) { document.body.classList.add("dark"); document.body.classList.remove("light"); } else { document.body.classList.add("light"); document.body.classList.remove("dark"); } }, [darkMode]);
{ "domain": "codereview.stackexchange", "id": 44337, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "graph, react.js, typescript", "url": null }
graph, react.js, typescript return ( <> <header> <h1> <strong>WikiGraph</strong> </h1> <p className="subtitle">A graph-based approach to exploring the depths of Wikipedia</p> </header> <div className="App"> {/* graph visualization */} <WikiGraph containerId={"vis"} serverDatabase={NEO4J_DB} serverURI={NEO4J_URI} serverUser={NEO4J_USER} serverPassword={NEO4J_PASSWORD} darkMode={darkMode} /> {/* light/dark mode toggle */} <label id="theme-toggle"> <input type="checkbox" checked={darkMode} onChange={() => setDarkMode(!darkMode)} /> Dark mode </label> </div> </> ); } export default App; wikigraph.tsx import { useEffect, useRef, useState } from "react"; import NeoVis, { NeoVisEvents } from "neovis.js/dist/neovis.js"; import ContextMenu, { ContextMenuState, ContextMenuType } from "./contextMenu"; import NavBar, { NavTab } from "./sidebar/navbar"; import UserManual from "./sidebar/userManual"; import About from "./sidebar/about"; import WikipediaSummaries, { WikiSummary } from "./sidebar/wikipediaSummaries"; import styled from "styled-components"; import { createConfig } from "../util/neo4jConfig"; const StyledCanvas = styled.div` height: ${(props) => (props.theme.expanded ? "100%;" : "80%;")} width: ${(props) => (props.theme.expanded ? "100%;" : "60%;")} top: ${(props) => (props.theme.expanded ? "0px;" : "inherit;")} left: ${(props) => (props.theme.expanded ? "0px;" : "inherit;")} z-index: ${(props) => (props.theme.expanded ? "100000;" : "100;")} position: fixed;
{ "domain": "codereview.stackexchange", "id": 44337, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "graph, react.js, typescript", "url": null }
graph, react.js, typescript @media (max-width: 1100px) { height: ${(props) => (props.theme.expanded ? "100%;" : "55%;")} width: ${(props) => (props.theme.expanded ? "100%;" : "90%;")} } `; StyledCanvas.defaultProps = { theme: { expanded: false, }, }; /* https://www.w3schools.com/howto/howto_css_fixed_sidebar.asp */ const StyledSidebar = styled.div` height: 100%; width: 33%; padding-top: 20px; top: 0; right: 0; position: fixed; /* stay in place on scroll */ z-index: 100; overflow-x: hidden; /* disable horizontal scroll */ border-left: 1px solid var(--borderColor); background-color: var(--primaryBackgroundColor); @media (max-width: 1100px) { height: 100%; width: 100%; top: 80%; display: block; position: absolute; z-index: 10000; border-left: none; border-top: 1px solid var(--borderColor); } `; // TODO: figure out how to import this from vis.js export type IdType = string | number; interface Props { containerId: string; serverDatabase: string; serverURI: string; serverUser: string; serverPassword: string; darkMode: boolean; } const WikiGraph: React.FC<Props> = ({ containerId, serverDatabase, serverURI, serverUser, serverPassword, darkMode, }) => { // keep vis object in state const [vis, setVis] = useState<NeoVis | null>(null); const [visIsExpanded, setVisIsExpanded] = useState(false); // keep track of selected nodes and labels // TODO: combine into one object const [selection, setSelection] = useState<IdType[]>([]); const [selectionLabels, setSelectionLabels] = useState([""]); // keep track of summaries // TODO: combine into one object const [summaries, setSummaries] = useState<WikiSummary[]>([]); const [currentSummary, setCurrentSummary] = useState<WikiSummary | null>(null);
{ "domain": "codereview.stackexchange", "id": 44337, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "graph, react.js, typescript", "url": null }
graph, react.js, typescript // keep track of search bar input const [input, setInput] = useState(""); // keep track of nav bar tab state const [currentNavTab, setCurrentNavTab] = useState<NavTab>(NavTab.Home); // keep track of whether the context menu is open or closed const [contextMenuState, setContextMenuState] = useState<ContextMenuState>({ open: false, type: ContextMenuType.Canvas, mobile: window.innerWidth < 1100, x: 0, y: 0, }); window.onresize = () => { if (window.innerWidth < 1100) { if (!contextMenuState.mobile) { setContextMenuState({ ...contextMenuState, mobile: true }); } } else { if (contextMenuState.mobile) { setContextMenuState({ ...contextMenuState, mobile: false }); } } }; // get reference to selection so that we can use the current value in the vis event listeners // otherwise, the value lags behind const selectionRef = useRef(selection); // so that we only register event listeners once const completionRef = useRef(false); // ----- initialize visualization and neovis object ----- useEffect(() => { const vis = createConfig(containerId, serverDatabase, serverURI, serverUser, serverPassword); vis.render(); setVis(vis); // create event listeners once the visualization is rendered vis?.registerOnEvent(NeoVisEvents.CompletionEvent, (e) => { if (!completionRef.current) { completionRef.current = true; const updateSelectionState = (nodeIds: IdType[]) => { // update selection setSelection(nodeIds); selectionRef.current = nodeIds;
{ "domain": "codereview.stackexchange", "id": 44337, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "graph, react.js, typescript", "url": null }
graph, react.js, typescript // update selection labels var labels = vis.nodes .get() .filter((node: any) => (nodeIds ? nodeIds.includes(node.id) : "")) .map(({ label }: { label?: any }) => { return label; }); setSelectionLabels(labels); }; // 1. listener for "select" vis.network?.on("select", (e) => { var nodeIds = vis.network?.getSelectedNodes(); if (nodeIds) { updateSelectionState(nodeIds); } }); // 2. listener for "click" vis.network?.on("click", (click) => { setContextMenuState({ open: false, type: ContextMenuType.Canvas, mobile: window.innerWidth < 1100, x: 0, y: 0, }); }); // 3. listener for "double click" vis.network?.on("doubleClick", (click) => { // if there's a node under the cursor, update visualization with its links if (click.nodes.length > 0) { const nodeId = click.nodes[0]; var cypher = `MATCH (p1: Page)-[l: LINKS_TO]-(p2: Page) WHERE ID(p1) = ${nodeId} RETURN p1, l, p2`; vis?.updateWithCypher(cypher); } }); // 4. listener for "right click" vis.network?.on("oncontext", (click) => { click.event.preventDefault();
{ "domain": "codereview.stackexchange", "id": 44337, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "graph, react.js, typescript", "url": null }
graph, react.js, typescript // TODO: figure out why click.nodes is not accurate on right click // get adjusted coordinates to place the context menu var rect = click.event.target.getBoundingClientRect(); let correctedX = click.event.x - rect.x; let correctedY = click.event.y - rect.y; var type = ContextMenuType.Canvas; // check if there's a node under the cursor var nodeId = vis.network?.getNodeAt({ x: correctedX, y: correctedY }); if (nodeId) { // select node that was right-clicked if (selectionRef.current) { vis.network?.selectNodes([...selectionRef.current, nodeId]); } else { vis.network?.selectNodes([nodeId]); } // update selection state const nodeIds = vis.network?.getSelectedNodes(); if (nodeIds) { updateSelectionState(nodeIds); nodeIds.length > 1 ? (type = ContextMenuType.Nodes) : (type = ContextMenuType.Node); } } else { type = ContextMenuType.Canvas; } setContextMenuState({ open: true, type: type, mobile: window.screen.width < 1100, x: correctedX, y: correctedY, }); }); } }); }, [containerId, serverDatabase, serverURI, serverUser, serverPassword]);
{ "domain": "codereview.stackexchange", "id": 44337, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "graph, react.js, typescript", "url": null }
graph, react.js, typescript // ----- execute cypher query when user inputs search, update visualization ----- const createNewGraph = () => { // TODO: replace this with something that does not open the DB up to an injection attack var cypher = 'CALL { MATCH (p:Page) WHERE apoc.text.levenshteinSimilarity(p.title, "' + input + '") > 0.65 RETURN p.title as title ORDER BY apoc.text.levenshteinSimilarity(p.title, "' + input + '") DESC LIMIT 1 } MATCH (p1:Page)-[l:LINKS_TO]-(p2:Page) WHERE p1.title = title RETURN p1, l, p2'; // TODO: only render if the query returns > 0 nodes, otherwise tell user no nodes were found vis?.renderWithCypher(cypher); vis?.network?.moveTo({ position: { x: 0, y: 0 } }); }; const addToGraph = () => { var cypher = 'CALL { MATCH (p:Page) WHERE apoc.text.levenshteinSimilarity(p.title, "' + input + '") > 0.65 RETURN p.title as title ORDER BY apoc.text.levenshteinSimilarity(p.title, "' + input + '") DESC LIMIT 1 } MATCH (p1:Page)-[l:LINKS_TO]-(p2:Page) WHERE p1.title = title RETURN p1, l, p2'; vis?.updateWithCypher(cypher); vis?.network?.moveTo({ position: { x: 0, y: 0 } }); };
{ "domain": "codereview.stackexchange", "id": 44337, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "graph, react.js, typescript", "url": null }
graph, react.js, typescript return ( <> {/* graph visualization */} <StyledCanvas theme={{ expanded: visIsExpanded }} id="canvas"> <div id={containerId} /> <img src={ visIsExpanded ? darkMode ? "icons/collapse-white.png" : "icons/collapse.png" : darkMode ? "icons/expand-white.png" : "icons/expand.png" } alt={visIsExpanded ? "Collapse visualization button" : "Expand visualization button"} className="vis-expand-button" onClick={() => setVisIsExpanded(!visIsExpanded)} /> {contextMenuState.mobile && ( <img src={ contextMenuState.open ? darkMode ? "icons/close-white.png" : "icons/close.png" : darkMode ? "icons/kebab-white.png" : "icons/kebab.png" } alt={visIsExpanded ? "Collapse visualization button" : "Expand visualization button"} className="mobile-context-button" onClick={() => { var type; if (selection.length === 0) { type = ContextMenuType.Canvas; } else if (selection.length === 1) { type = ContextMenuType.Node; } else { type = ContextMenuType.Nodes; }
{ "domain": "codereview.stackexchange", "id": 44337, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "graph, react.js, typescript", "url": null }
graph, react.js, typescript type = ContextMenuType.Nodes; } setContextMenuState({ ...contextMenuState, open: !contextMenuState.open, type: type }); }} /> )} <input type="submit" value="Stabilize" id="stabilize-button" onClick={() => { vis?.stabilize(); }} /> <input type="submit" value="Center" id="center-button" onClick={() => vis?.network?.fit()} /> <ContextMenu vis={vis} darkMode={darkMode} state={contextMenuState} setState={setContextMenuState} selection={selection} setSelection={setSelection} selectionLabels={selectionLabels} setSelectionLabels={setSelectionLabels} summaries={summaries} setSummaries={setSummaries} setCurrentSummary={setCurrentSummary} /> </StyledCanvas> {/* sidebar */} <StyledSidebar className="sidebar"> <NavBar currentNavTab={currentNavTab} setCurrentNavTab={setCurrentNavTab} /> {currentNavTab === NavTab.Home && ( <> <WikipediaSummaries summaries={summaries} setSummaries={setSummaries} currentSummary={currentSummary} setCurrentSummary={setCurrentSummary} /> <div className="search-bar"> Search for a Wikipedia article: <br /> <input
{ "domain": "codereview.stackexchange", "id": 44337, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "graph, react.js, typescript", "url": null }
graph, react.js, typescript <br /> <input type="search" placeholder="Article title" onChange={(e) => setInput(e.target.value)} /> <br /> <input type="submit" value="Create new graph" onClick={createNewGraph} /> <input type="submit" value="Add to graph" onClick={addToGraph} /> </div> </> )} {currentNavTab === NavTab.About && <About />} {currentNavTab === NavTab.UserManual && <UserManual />} </StyledSidebar> </> ); };
{ "domain": "codereview.stackexchange", "id": 44337, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "graph, react.js, typescript", "url": null }
graph, react.js, typescript export default WikiGraph; Answer: Took me a while to find the time to write a proper review. General I like your project and what you are trying to achieve with it. In order to keep it maintainable and understandable you might want to consider the following recommendations: Reduce nesting I noticed places where the so called arrow anti pattern is noticable. Try to reduce nesting, this can be done by checking for values not being present rather than checking for values being present. Consider this: // ✅ const do = () => { if (value === null) { return; } if (valueTwo === null) { return; } // Do some stuff with value and valueTwo as it is now present } // vs. // ❌ const dont = () => { if (value) { if (valueTwo) { // work on value and valueTwo } } } Avoid direct DOM manipulation To set the dark mode class you can use a style sheet and directly set it at the most outer div you have inside the App component. Something like this: const [darkMode, setDarkMode] = useState(window.matchMedia("(prefers-color-scheme: dark)").matches); return <div className={darkMode ? "dark" : "light"}>{/* ... */}</div>
{ "domain": "codereview.stackexchange", "id": 44337, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "graph, react.js, typescript", "url": null }
graph, react.js, typescript Avoid direct DOM manipulation, as it can have negative side effects on your React experience and ultimately that's what React is for, keeping your UI up-to-date with the Application state of the App. WikiGraph.tsx As I already pointed out in the comments, the WikiGraph.tsx file is doing a lot of things. So I'd start to extract things out of there. After thinking about it, you might want to start to factor out general stuff without external state management libraries. If you are still curious about it though, I'd highly recommend to you and try the getting-started of Redux: https://redux.js.org/introduction/getting-started From the naming, my assumption of this component is, that it is responsible for rendering the graph and only the graph. Given that, let's have a look at the currentNavTab and setCurrentNavTab state first. Sidebar To get rid of the sidebar state we could extract a separate component let's call it Sidebar which could look something along those lines: export const SideBar = (/* introduce your own props */) => { const [currentNavTab, setCurrentNavTab] = useState<NavTab>(NavTab.Home); return (<StyledSidebar className="sidebar"> <NavBar currentNavTab={currentNavTab} setCurrentNavTab={setCurrentNavTab} /> {currentNavTab === NavTab.Home && ( <> <WikipediaSummaries summaries={summaries} setSummaries={setSummaries} currentSummary={currentSummary} setCurrentSummary={setCurrentSummary} /> <div className="search-bar"> Search for a Wikipedia article: <br /> <input type="search" placeholder="Article title" onChange={(e) => setInput(e.target.value)} /> <br /> <input type="submit" value="Create new graph" onClick={createNewGraph} />
{ "domain": "codereview.stackexchange", "id": 44337, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "graph, react.js, typescript", "url": null }
graph, react.js, typescript <input type="submit" value="Create new graph" onClick={createNewGraph} /> <input type="submit" value="Add to graph" onClick={addToGraph} /> </div> </> )} {currentNavTab === NavTab.About && <About />} {currentNavTab === NavTab.UserManual && <UserManual />} </StyledSidebar>); }
{ "domain": "codereview.stackexchange", "id": 44337, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "graph, react.js, typescript", "url": null }
graph, react.js, typescript The problem here is in general, that the sidebar is tightly coupled with the graph component, therefore here are some methods missing. A simple solution would be to pass those methods via props to this component, which I think would be fine at this point in time - but in the future you might want to re-evaluate this decision. Example, but not complete, interface: interface SideBarProps { summaries: WikiSummary[]; currentSummary: WikiSummary | null; setSummaries: (e: WikiSummary | null) => void; // ... and so on } What todo with the vis object For now, it might be sufficient to extract it out of the component, by something like this: // vis.ts const createVis = () => createConfig(containerId, serverDatabase, serverURI, serverUser, serverPassword); let _vis: NeoVis | undefined; const getOrCreateVis = () => _vis === undefined ? createVis() : _vis; export getOrCreateVis; Hint: In a later point in time you can consider to move it into a global statemanagement library like Redux where you business logic is then held in reducers. This would move the responsibility of creating the Vis object into the getOrCreateVis method. You might want to extend/split/refactor this method to your needs. Thus it can be use like this: // wikigraph.tsx omitting not relevant code const WikiGraph = (/*...*/) => { useEffect(() => { getOrCreateVis().registerOnEvent(NeoVisEvents.CompletionEvent, (e) => {/**..*/}); }); } From there it is desirable to extract the registerOnEvent method as well and just pass the methods you want to register. I'd scribble this like this: createVis({ select: () => { /* do your stuff on the select event */ }, click: () => {/* ... */} });
{ "domain": "codereview.stackexchange", "id": 44337, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "graph, react.js, typescript", "url": null }
graph, react.js, typescript Events The events like select and so on, looks a bit odd to me, especially the select event. That's because I'd assume that the event would return the selected nodes inside the event object. Rather than checking the global vis object and asking it for the selected nodes. In an ideal world this I'd expect something like this: const select = (e) => updateSelectionState(e); Assuming that those are calls are valid, the above registration call could look like this: getOrCreateVis({ select: updateSelectionState, /* ... more events like: */ doubleClick: (click) => { if (click.nodes.length === 0) { return; } const nodeId = click.nodes[0]; const cypher = `MATCH (p1: Page)-[l: LINKS_TO]-(p2: Page) WHERE ID(p1) = ${nodeId} RETURN p1, l, p2`; vis?.updateWithCypher(cypher); } }); CreateNewGraph & addToGraph Those methods are basically static and only depend on the input. Pull them out of the component. // anywhere outside of wikigraph const createNewGraph = (input) => { // TODO: replace this with something that does not open the DB up to an injection attack var cypher = 'CALL { MATCH (p:Page) WHERE apoc.text.levenshteinSimilarity(p.title, "' + input + '") > 0.65 RETURN p.title as title ORDER BY apoc.text.levenshteinSimilarity(p.title, "' + input + '") DESC LIMIT 1 } MATCH (p1:Page)-[l:LINKS_TO]-(p2:Page) WHERE p1.title = title RETURN p1, l, p2'; // TODO: only render if the query returns > 0 nodes, otherwise tell user no nodes were found vis?.renderWithCypher(cypher); vis?.network?.moveTo({ position: { x: 0, y: 0 } }); };
{ "domain": "codereview.stackexchange", "id": 44337, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "graph, react.js, typescript", "url": null }
graph, react.js, typescript const addToGraph = (input) => { var cypher = 'CALL { MATCH (p:Page) WHERE apoc.text.levenshteinSimilarity(p.title, "' + input + '") > 0.65 RETURN p.title as title ORDER BY apoc.text.levenshteinSimilarity(p.title, "' + input + '") DESC LIMIT 1 } MATCH (p1:Page)-[l:LINKS_TO]-(p2:Page) WHERE p1.title = title RETURN p1, l, p2'; getOrCreateVis().updateWithCypher(cypher); getOrCreateVis().network?.moveTo({ position: { x: 0, y: 0 } }); }; // inside wikigraph const onCreate = () => createNewGraph(input); const onAdd = () => addToGraph(input); /* Use our newly created sidebar component and simply pass on the props ... */ <SideBar create={onCreate} add={onAdd} /> Summary I left out the Redux (or basically any other global state management library) part on purpose, because once you get the hang of pulling out common logic, you should be pretty fast in adapting it to such a state management framework. That being said, don't be too scared to pull things out of the component. In my eyes, working on that, helps you to enhance your code and adapt to other things.
{ "domain": "codereview.stackexchange", "id": 44337, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "graph, react.js, typescript", "url": null }
performance, cuda Title: CUDA-Kernel for a Dense-Sparse matrix multiplication Question: i have been working on a very big project for some time. Within this project, I wrote my own CUDA-kernels to do various operations. One of them is to perform a sparse affine transformation to a list of sparse inputs. Basically my input is a list of sparse vectors which are always either 1 or 0. I know for a fact that I can have at most 32 ones in a single vector. v1 = [0, 0, 0, 1, 0, 0, 1, ...] v2 = [1, 0, 0, 0, 0, 0, 1, ...] ... My idea now was to wrap all these vectors into a sparse format like: 3 2 4 2 ... ----------- 1 2 3 2 5 4 4 4 9 . 5 . . . 8 . Its basically a matrix. The first row coresponds to the amount of non-zero entries. The values below are the indices of the non-zero entries. Now when performing the matrix-vector multiplication, all I have to do for each output element is look at the input matrix, get the weights at the given indices and add them up. So far so good. I wrote the following kernel: __global__ void sparse_affine_kernel( const float* __restrict__ mat, const unsigned int* __restrict__ inp_col_indices, const unsigned int inp_col_max_entries, const float* __restrict__ bia, float* __restrict__ res, const unsigned int m, const unsigned int n, const unsigned int lda, const unsigned int ldc){ // clang-format on // compute which output value we are looking at int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; // skip out of bounds if (col >= n || row >= m) return; // get the offset at which we look into our sparse input int offset = col * (inp_col_max_entries + 1); // check how many values we are going to read int count = inp_col_indices[offset]; // track the sum float sum = bia[row];
{ "domain": "codereview.stackexchange", "id": 44338, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "performance, cuda", "url": null }
performance, cuda // track the sum float sum = bia[row]; // start at offset + 1 (offset contains the amount of values to read) for (int i = offset + 1; i < offset + 1 + count; i++) { // get the sparse index (set row of the input) auto b_row = inp_col_indices[i]; // get the corresponding weight auto wgt = mat[MATRIX_INDEX(lda, row, b_row)]; sum += wgt; } res[MATRIX_INDEX(ldc, row, col)] = sum; }; Now the code should be somewhat straight forward. Id like to know the following things: Do you see any concrete way of improving this somewhat straight forward operation? Is there anything directly related to CUDA which I could use to improve the performance of this code? Maybe using shared memory? I tried using some shared-memory some time ago and simply remembered that I wasnt able to improve the performance of the code. I am very happy for a review and optimization-ideas for my code :) Greetings Finn Answer: Thank you for offering this for review. I understand you're primarily interested in performance. But I confess I found the code a little on the opaque side and not quite ready to invite lots of folks to collaborate on it. Your introductory paragraphs, outside of the code artifact, were very clear and helpful. I am reading the signature. It could be improved. Starting with a URL pointing to data structure documentation, similar to your opening paragraphs. The code artifact should be self-describing. The terse nomenclature of the signature leaves me with several questions. I do not know what "bia", "lda", & "ldc" mean. For example, when reading them, should I mentally pronounce it "bias"? linear discriminant array? linked data column? IDK. Google offered no relevant abbreviation expansions.
{ "domain": "codereview.stackexchange", "id": 44338, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "performance, cuda", "url": null }
performance, cuda Please spell the 5th argument result. Abbreviating its single use helps no one. It was clear enough, but identifiers in a public API have a higher documentation burden than locals. Consider adhering to the convention where input args tend to appear near beginning of signature and outputs near the end. The 1st two lines of code use lovely identifiers and are wonderfully clear, thank you. I am skeptical about the out-of-bounds return. Maybe it is conventional and the right thing to do. In other languages I would expect an exception to be raised. Here, I don't see so much as an errno or error counter being affected. We consult a pair of block globals and a thread global. I am concerned that a subset of threads will win, higher threads will lose, and we've just offered the app developer the gift of a silent Heisenbug. In particular, from a DbC perspective, it does not appear to me that "caller was incorrect" if we're out of bounds. So responsibility is still with the library routine to fulfill the contract. That might be "set an error flag or side-effect the matrix", but that's not what we see implemented. Consider eliding the track the sum comment, as it doesn't add anything beyond what the well-chosen identifier is telling us. I found b_row slightly puzzling. Maybe it could be bit_row? But it seems to be used where a column might be expected. Rather than wgt, please just call it weight, and then we probably don't need the comment to explain it. I imagine that MATRIX_INDEX is a macro with a few adds and multiplies (or shifts), but you did not include it. I was hoping it would help me to better understand the lda / ldc distinction. Overall? This is simple enough code, but it's not a code base I would want to assign or accept maintenance tasks for, not yet.
{ "domain": "codereview.stackexchange", "id": 44338, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "performance, cuda", "url": null }
go, io Title: Capture stdout output of a function call Question: Are there any major flaws to implement this test helper function? I know that the preferred way (refactoring) would simply make the function to be tested return its value and delegate printing to another part, but sometimes one has to test what is there. import ( "io" "os" ) // captureStdout calls a function f and returns its stdout side-effect as string func captureStdout(f func()) string { // return to original state afterwards // note: defer evaluates (and saves) function ARGUMENT values at definition // time, so the original value of os.Stdout is preserved before it is changed // further into this function. defer func(orig *os.File) { os.Stdout = orig }(os.Stdout) r, w, _ := os.Pipe() os.Stdout = w f() w.Close() out, _ := io.ReadAll(r) return string(out) } Motivational full example main.go package main import "fmt" func main() { fmt.Println("Hello, World!") } main_test.go package main import ( "io" "os" "testing" ) func Test_main(t *testing.T) { want := "Hello, World!\n" got := captureStdout(main) if got != want { t.Errorf("main() = %v, want %v", got, want) } } // captureStdout calls a function f and returns its stdout side-effect as string func captureStdout(f func()) string { // return to original state afterwards // note: defer evaluates (and saves) function ARGUMENT values at definition // time, so the original value of os.Stdout is preserved before it is // changed further into this function. defer func(orig *os.File) { os.Stdout = orig }(os.Stdout) r, w, _ := os.Pipe() os.Stdout = w f() w.Close() out, _ := io.ReadAll(r) return string(out) } Answer: The Go testing package and tools have built-in facilities for this: From the documentation of testing:
{ "domain": "codereview.stackexchange", "id": 44339, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "go, io", "url": null }
go, io The package also runs and verifies example code. Example functions may include a concluding line comment that begins with "Output:" and is compared with the standard output of the function when the tests are run. (The comparison ignores leading and trailing space.) These are examples of an example: func ExampleHello() { fmt.Println("hello") // Output: hello } This works suitably for testing output against a static, known result of printable text, like in your example.
{ "domain": "codereview.stackexchange", "id": 44339, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "go, io", "url": null }
android, kotlin, kotlin-compose Title: Jetpack Compose: Length-Units Converter Question: I have made a length-units converter with Jetpack Compose. Here's the source-code: class MainActivity : ComponentActivity() { override fun onCreate(savedInstanceState: Bundle?) { super.onCreate(savedInstanceState) setContent { LengthConverterTheme { Surface( modifier = Modifier.fillMaxSize(), color = MaterialTheme.colors.background ) { MainUI() } } } } } fun convertInput(fromUnit: Units, toUnit: Units, fromValue: Double): Double { var lengthMeter = 0.0 when (fromUnit) { Units.meter -> lengthMeter = fromValue * 1.0 Units.kilometer -> lengthMeter = fromValue * 1000.0 Units.feet -> lengthMeter = fromValue * 0.3048 Units.yard -> lengthMeter = fromValue * 0.9144 Units.miles -> lengthMeter = fromValue * 1609.34 } var resultVal = 0.0 when (toUnit) { Units.meter -> resultVal = lengthMeter Units.kilometer -> resultVal = lengthMeter / 1000.0 Units.feet -> resultVal = lengthMeter * 3.28084 Units.yard -> resultVal = lengthMeter * 1.09361 Units.miles -> { resultVal = lengthMeter * 0.000621371 } } return resultVal } @Composable fun MainUI() { val context = LocalContext.current var isSelectedFrom by remember { mutableStateOf(Units.meter) } var isSelectedTo by remember { mutableStateOf(Units.meter) } var userInput by remember { mutableStateOf("0.0") } var currentResult by remember { mutableStateOf(0.0) }
{ "domain": "codereview.stackexchange", "id": 44340, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "android, kotlin, kotlin-compose", "url": null }
android, kotlin, kotlin-compose Column(modifier = Modifier .fillMaxWidth() .padding( top = 15.dp, start = 25.dp, end = 25.dp )) { TextField(value = userInput, modifier = Modifier .fillMaxWidth() .padding(top = 10.dp), placeholder = { Text("Enter value to convert") }, colors = TextFieldDefaults.textFieldColors( backgroundColor = Color.White, textColor = Color.Black), onValueChange = { if (it.isNotEmpty()) { userInput = it currentResult = convertInput(isSelectedFrom, isSelectedTo, it.toDouble()) } }) UnitPicker(title = "Convert from: ", currentlySelected = isSelectedFrom) { isSelectedFrom = it currentResult = convertInput(isSelectedFrom, isSelectedTo, userInput.toDouble()) } UnitPicker(title = "Convert to: ", currentlySelected = isSelectedTo) { isSelectedTo = it currentResult = convertInput(isSelectedFrom, isSelectedTo, userInput.toDouble()) } Text("Result: ${currentResult.toString()}", modifier = Modifier.padding(top = 25.dp), fontSize = 20.sp, fontWeight = FontWeight.Bold) } }
{ "domain": "codereview.stackexchange", "id": 44340, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "android, kotlin, kotlin-compose", "url": null }
android, kotlin, kotlin-compose @Composable fun UnitPicker(title: String, currentlySelected: Units, setUnit: (Units) -> Unit) { Text("Convert from: ", fontSize = 20.sp, fontWeight = FontWeight.Bold) Units.values().forEach { Row( Modifier.fillMaxWidth(), horizontalArrangement = Arrangement.Start, verticalAlignment = Alignment.CenterVertically) { Text(it.name.replaceFirstChar { if (it.isLowerCase()) it.titlecase(Locale.ROOT) else it.toString() }) var isSelected = it == currentlySelected RadioButton(selected = isSelected, onClick = { setUnit(it) }) } } Divider() } enum class Units { meter, kilometer, feet, yard, miles } Could the central algorithm (within the function 'convertInput') become improved? Is there are more elegant solution? What should me modified to accomplish a more Kotlin-ideomatic code? Looking forward to reading your answers and comments? Answer: Regarding making this more idiomatic Kotlin code you can take advantage of direct value assignment like so: val lengthMeter = when (fromUnit) { Units.Meter -> fromValue * 1.0 Units.Kilometer -> fromValue * 1000.0 Units.Feet -> fromValue * 0.3048 Units.Yard -> fromValue * 0.9144 Units.Miles -> fromValue * 1609.34 } Likewise you can return from a function in this way: return when (toUnit) { Units.Meter -> lengthMeter Units.Kilometer -> lengthMeter / 1000.0 Units.Feet -> lengthMeter * 3.28084 Units.Yard -> lengthMeter * 1.09361 Units.Miles -> lengthMeter * 0.000621371 } The complete function then looks like this: fun convertInput(fromUnit: Units, toUnit: Units, fromValue: Double):Double { val lengthMeter = when (fromUnit) { Units.Meter -> fromValue * 1.0 Units.Kilometer -> fromValue * 1000.0 Units.Feet -> fromValue * 0.3048 Units.Yard -> fromValue * 0.9144 Units.Miles -> fromValue * 1609.34 }
{ "domain": "codereview.stackexchange", "id": 44340, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "android, kotlin, kotlin-compose", "url": null }
android, kotlin, kotlin-compose return when (toUnit) { Units.Meter -> lengthMeter Units.Kilometer -> lengthMeter / 1000.0 Units.Feet -> lengthMeter * 3.28084 Units.Yard -> lengthMeter * 1.09361 Units.Miles -> lengthMeter * 0.000621371 } } Also. The naming convention for enum values in Kotlin is to use uppercase as below enum class Units { Meter, Kilometer, Feet, Yard, Miles }
{ "domain": "codereview.stackexchange", "id": 44340, "lm_label": null, "lm_name": null, "lm_q1_score": null, "lm_q1q2_score": null, "lm_q2_score": null, "openwebmath_perplexity": null, "openwebmath_score": null, "tags": "android, kotlin, kotlin-compose", "url": null }