text stringlengths 1 2.12k | source dict |
|---|---|
python, file
def sf_show_notice(path_1, path_2, sf_errorname): # Win10toast used
toaster.show_toast('Sync Successfully',
'The Files in "' + path_1 + '" and "' + path_2 + '" are Synchronized',
icon_path=mf_data_path + r'Movefile.ico',
duration=10,
threaded=False)
if len(sf_errorname) > 0:
toaster.show_toast("Couldn't sync files",
sf_errorname + sf_label_text_dic['can_not_move_notice'][language_number],
icon_path=mf_data_path + r'Movefile.ico',
duration=10,
threaded=False)
def get_task(barroot): # shitty block here, help me! (baroot is a root showing progress)
all_files_1 = scan_items(path1)[1]
all_files_2 = scan_items(path2)[1]
sync_tasks = []
pass_folder_rpaths = []
task_number = 0
for pass_folder in pass_folder_paths.split(','):
if pass_folder.startswith(path1):
pass_folder_rpaths.append(pass_folder.replace(path1, path1.split('\\')[-1]))
elif pass_folder:
pass_folder_rpaths.append(pass_folder.replace(path2, path2.split('\\')[-1]))
file_info_1 = {} # 存储文件1的信息:(哈希值, 大小, 修改时间)
for file1 in all_files_1:
file1_path = path1 + file1
file_info_1[file1] = (filehash(file1_path), os.path.getsize(file1_path), os.path.getmtime(file1_path))
for file2 in all_files_2:
file2_path = path2 + file2
if file2 in all_files_1:
file1 = file2
file1_path = path1 + file1
file_info = file_info_1[file1]
file2_info = (filehash(file2_path), os.path.getsize(file2_path), os.path.getmtime(file2_path))
if file_info == file2_info:
continue | {
"domain": "codereview.stackexchange",
"id": 44919,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, file",
"url": null
} |
python, file
if file_info == file2_info:
continue
if single_sync and file_info[0] == file2_info[0]:
continue
new_file, old_file = file1, file2
new_file_path, old_file_path = file1_path, file2_path
new_file_rpath = path1.split('\\')[-1] + file1
old_file_rpath = path2.split('\\')[-1] + file2
if int(os.stat(new_file_path).st_mtime) < int(os.stat(old_file_path).st_mtime):
if single_sync:
continue
old_file, new_file = new_file, old_file
new_file_path, old_file_path = old_file_path, new_file_path
new_file_rpath, old_file_rpath = old_file_path, new_file_rpath
if any(pfolder.startswith(old_file_rpath) for pfolder in pass_folder_rpaths) or any(
old_file.endswith(pfile) for pfile in pass_item_rpath.split(',')):
continue
task_number += 1
barroot.set_label1(sf_label_text_dic['main_progress_label'][language_number] + file1.split('\\')[-1])
sync_tasks.append([new_file_path, old_file_path, False])
else:
new_file_rpath = path2.split('\\')[-1] + file2
if any(pfolder.startswith(new_file_rpath) for pfolder in pass_folder_rpaths):
continue
task_number += 1
barroot.set_label1(sf_label_text_dic['main_progress_label'][language_number] + file2.split('\\')[-1])
barroot.progress_root.update_idletasks()
sync_tasks.append([file2_path, path1 + file2, True])
if not single_sync:
for file1 in all_files_1:
if file1 not in all_files_2:
file1_path = path1 + file1
newfile1_rpath = path2.split('\\')[-1] + file1 | {
"domain": "codereview.stackexchange",
"id": 44919,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, file",
"url": null
} |
python, file
if any(pfolder.startswith(newfile1_rpath) for pfolder in pass_folder_rpaths):
continue
task_number += 1
barroot.set_label1(
sf_label_text_dic['main_progress_label'][language_number] + file1.split('\\')[-1])
barroot.progress_root.update_idletasks()
sync_tasks.append([file1_path, path2 + file1, True])
return sync_tasks
def synchronize_files(baroot, task):
baroot.set_label2(sf_label_text_dic["current_file_label1"][language_number] + task[0].split('\\')[-1])
new_file_path, old_file_path, create_folder = task
if create_folder:
try:
sf_creat_folder(old_file_path)
except:
pass
try:
shutil.copy2(new_file_path, old_file_path)
except:
return new_file_path
return None
def run_sync_tasks(baroot):
sf_errorname = ''
baroot.main_progress_bar['value'] = 0
baroot.progress_root.update_idletasks()
tasks = get_task(baroot)
baroot.main_progress_bar['maximum'] = len(tasks)
baroot.set_label1(
f'{sf_label_text_dic["main_progress_label1"][language_number][0]}{str(baroot.main_progress_bar["value"])}/{str(len(tasks))} {sf_label_text_dic["main_progress_label1"][language_number][1]}')
with ThreadPoolExecutor() as executor:
futures = [executor.submit(synchronize_files, baroot, task) for task in tasks]
for future in as_completed(futures):
result = future.result()
if result:
sf_errorname += result + ' , ' | {
"domain": "codereview.stackexchange",
"id": 44919,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, file",
"url": null
} |
python, file
baroot.main_progress_bar['value'] += 1
baroot.set_label1(
f'{sf_label_text_dic["main_progress_label1"][language_number][0]}{str(baroot.main_progress_bar["value"])}/{str(len(tasks))} {sf_label_text_dic["main_progress_label1"][language_number][1]}')
baroot.progress_root.update_idletasks()
baroot.progress_root.withdraw()
path_name_1 = path1.split('\\')[-1]
if area_name:
path_name_1 = area_name
try:
sf_show_notice(path_name_1, path2.split('\\')[-1], sf_errorname)
except:
pass
finally:
baroot.progress_root.withdraw()
global sync_bar_root, sync_bar_root_task
sync_bar_root = ProgressBar('Movefile -Syncfile Progress',
sf_label_text_dic["main_progress_label2"][language_number],
sf_label_text_dic["current_file_label"][language_number],
language_number)
sync_bar_root_task = threading.Thread(target=lambda: sync_bar_root.launch(), daemon=True)
sync_bar_root_task.start()
while not sync_bar_root.initialization_done:
time.sleep(0.01)
run_tasks = threading.Thread(target=lambda: run_sync_tasks(sync_bar_root), daemon=True)
run_tasks.start()
And the progress bar class:
class ProgressBar:
def __init__(self, title, label1, label2, lang_num):
self.initialization_done = False
from LT_Dic import progress_root_label_dic
self.title = title
self.label1 = label1
self.label2 = label2
self.label_dic = progress_root_label_dic
self.lang_num = lang_num
self.main_progress_label = None
self.main_progress_bar = None
self.current_file_label = None
self.show_running_bar = None
self.progress_root = None
self.roll_bar = None | {
"domain": "codereview.stackexchange",
"id": 44919,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, file",
"url": null
} |
python, file
def set_label1(self, content):
self.main_progress_label['text'] = content
def set_label2(self, content):
self.current_file_label['text'] = content
def launch(self):
self.progress_root = tk.Tk()
self.progress_root.title(self.title)
self.progress_root.geometry('420x115')
self.progress_root.iconbitmap(mf_data_path + r'Movefile.ico')
self.main_progress_label = ttk.Label(self.progress_root, text=self.label1)
self.main_progress_label.grid(row=0, column=0, padx=10, pady=5, sticky='SW')
self.main_progress_bar = ttk.Progressbar(self.progress_root)
self.main_progress_bar.grid(row=1, column=0, padx=10, pady=0, ipadx=150, sticky='W')
self.current_file_label = ttk.Label(self.progress_root, text=self.label2)
self.current_file_label.grid(row=2, column=0, padx=10, pady=5, sticky='SW')
self.show_running_bar = ttk.Progressbar(self.progress_root, mode='indeterminate')
self.show_running_bar.grid(row=3, column=0, padx=10, pady=0, ipadx=150, sticky='W')
self.progress_root.protocol('WM_DELETE_WINDOW', lambda: self.sync_bar_on_exit())
self.roll_bar = threading.Thread(target=self.show_running, daemon=True)
self.roll_bar.start()
self.initialization_done = True
self.progress_root.mainloop()
def show_running(self):
self.show_running_bar.start(10)
def sync_bar_on_exit(self):
if tkinter.messagebox.askyesno(title='Syncfile', message=self.label_dic['confirm_exit_text'][self.lang_num]):
self.progress_root.withdraw()
self.roll_bar.join()
return True
else:
return False
def progress_root_destruction(self):
self.progress_root.quit()
self.progress_root.destroy() | {
"domain": "codereview.stackexchange",
"id": 44919,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, file",
"url": null
} |
python, file
And the label part here, if you want~
sf_label_text_dic = {
'main_progress_label': ['扫描文件中... 发现文件:', 'Scanning items... Found item:'],
'main_progress_label1': [['总进度:', '已完成'], ['Progress:', 'Completed']],
'main_progress_label2': ['扫描文件中...', 'Scanning items...'],
'current_file_label': ['等待中...', 'Waiting...'],
'current_file_label1': ['同步中文件:', 'File in process:'],
'exit_sync': ['''文件正在同步中,
确定中断同步进程并退出?''', '''Synchronization is in progress,
Are you sure to interrupt the process and exit?'''],
'can_not_move_notice': ["""
无法被移动,请在关闭文件或移除重名文件后重试""", """
Couldn't be moved, Please try again after closing the file
or removing the duplicate file """]
}
progress_root_label_dic = {
'confirm_exit_text': ['''文件正在复制中,
确定中断进程并退出?''', '''The file is currently being copied,
Are you sure to interrupt the process and exit?''']
}
It sucks, as you can see......
The main problem is how to improve the efficiency of the get_task() function, because it always takes a long time to find out the files needed to be synchronized.
Probably we don't have to compare the hash of the files at all? Some advice here too.
So, help me write a more efficient code to implement the functions contained in the original code, including single-direction sync option, ability to keep spicific files unchangeable, and keep new files only when syncing, and so on you can see from my original code...
And of course, make the structure of the new code looks more professional if you can~
please~
Tip: you can see the whole program in github: https://github.com/HNRobert/Movefile, if you are interested in the program, you can give me some more suggestions there! | {
"domain": "codereview.stackexchange",
"id": 44919,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, file",
"url": null
} |
python, file
Answer: Python libraries
The first problem I see is that there is a lot of code that can be replaced with built-in Python functions.
os.walk()
You can replace the entire scan_items() function with os.walk(). The function walks down through a directory, giving lists of files and folders in each directory found. It is used like this:
for current_directory, folders, files in os.walk(path1):
# current_directory is the current directory location of the walk
# folders is a list of directories in the current directory
# files is a list of the files in the current directory
If you want to control which files and folders are skipped (keeping them unchanged), you can edit the lists in-place. Anything removed from the folders list will be skipped by os.walk(). Use folders[:] = edited_folder_list instead of folders = edited_folder_list. The latter has no effect on os.walk().
filecmp.cmpfiles()
The other library you should look into for get_task() is filecmp, which has a set of functions for comparing files. Specifically, filecmp.cmpfiles() is probably what you want to use instead of hashing (see more comments on that in the next section).
matching, mismatching, errors = filecmp.cmpfiles(path_1, path_2, file_list, shallow=False)
This call will take a list of files (file_list) and compare the versions in path_1 and path_2, sorting the files into three lists:
matching: files that match according to the shallow criteria.
mismatching: files that are different.
errors: files that could not be compared due to errors such as not existing in one of the directories. | {
"domain": "codereview.stackexchange",
"id": 44919,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, file",
"url": null
} |
python, file
The shallow parameter controls whether the whole of both files are examined for differences (shallow=False) or whether only size, type, and modification time are compared (shallow=True, which is the default).
File hashing
I would bet that the main reason that synchronizing files is taking so long is that the program calculate the hash of every file it encounters. Specifically this line in get_task():
file_info_1[file1] = (filehash(file1_path), os.path.getsize(file1_path), os.path.getmtime(file1_path))
and the corresponding line for file2. Getting the hash of a file requires reading the entire file. Doing this for large directory trees will be slow because every file has to be read up to three times: once for file1, once for file2, and once when copying on to the other. Plus, when the files being compared are not on separate networked computers, computing a hash is just overhead plus a small chance of a hash collision.
When shallow=False, filecmp.cmpfiles() will compare the two files byte-for-byte to see if they are the same. When shallow=True, only size, type, and modification date are used to judge if two files are the same. The latter is much faster, but possibly not as accurate. But, this possible inaccuracy has a very small probability of occurring unless the user is doing weird things to their file system. If you modify a file, its modification date and (most likely) its size are going to change. That's usually enough of a clue to determine when a file needs to be synchronized. | {
"domain": "codereview.stackexchange",
"id": 44919,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, file",
"url": null
} |
c++, design-patterns
Title: A Model View Controller Pattern implementation. Do I get it right?
Question: I know there has been many questions on the Model-View-Controller pattern, but this question is a follow up on a previous question I asked regarding the implementation of a download manager:
Get files downloaded asynchronously after double clicking on list item (C++)
One of the reviewers suggested that I implement (besides the threading aspect, which was the core of the question) the whole thing using an MVC pattern. As I had never done that before but was extremely interested in finally learning about this, I decided to address improvements to the code by first tackling the MVC implementation. I have been extremely impressed by the quality of the answers I received to my first question. So would like to progress in that way.
Now again I know they are many posts/questions about this topic, but I am particularly interested in the implementation I came up with.
First, the issue with everything that is out there is that there are in fact, many different solutions provided to the problem, and most articles insist on the fact that there's no "definite" right way of implementing this pattern.
As I have no experience, I have tried to learn how this is done by studying Chrome's source code. The code base is too complicated for someone of my level but I have been basing my observations on the very first version published so the complexity remains acceptable: | {
"domain": "codereview.stackexchange",
"id": 44920,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, design-patterns",
"url": null
} |
c++, design-patterns
https://github.com/chromium/chromium/tree/3.0.195.25
I have been mostly looking at the ItemShelfDialog which is a view that implements a table view and a table model. In the case of this implementation, this is would be a list of URLs (and the table model would essentially be a list of URLs).
I did my best to re-implement as well as I could a simplified version of that code (see below).
I did a diagram to show (if I got it right) the relations between the components. Whereas (in Google's implementation) the colors represent the files in which the different classes would be implemented (e.g. class TableModel and class ToableModelObserver are defined in the same file)
https://github.com/chromium/chromium/blob/3.0.195.25/app/table_model.h
Here is my understanding of a possible "correct" implementation:
The model is the central piece. It holds well... the model (the data).
The view (TableView) observes the model (I am the table model observer).
The "controller" (not named controller in this particular example, but I understand the class ShelfItemDialog acts as the controller here) observes the view (I am the table view observer).
The ShelfItemDialog (the wrapper view) holds the model and the view as member variables (it is itself a view so that it can be placed into the UI framework). When it is created, it creates the model creates the view, set itself (the controller) as the view's observer, and set the view as the model's observer.
Use cases:
At creation, the model is loaded (Reload()), which causes the view to be "signaled" by the model through OnModelChange(). The view can now display the list of items from the table (a list of files, in my case).
The view receives events (double click) which causes the "controller" to be "signaled" through OnDoubleClick. The controller can then download the files (that was my original goal - design as an exercise, a program that downloads files). OnDoubleClick not implemented in the code below.
Questions: | {
"domain": "codereview.stackexchange",
"id": 44920,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, design-patterns",
"url": null
} |
c++, design-patterns
Questions:
Would like any general comments about the code itself of course (disclaimer: it's not because I based this on Google's code - also rather old - that mine is good. I may have incorrectly interpreted the original author's intent).
More importantly, I want to understand if I got the pattern right (in terms of how it should be coded) and, if not, what I need to fix.
Since the View has access to the model, can it queries the data from the model to display a list of say files (like in my case) or should it maintain its own internal std::vector<std::pair<int, std::string>> where the int would be an index in the model's data, and string the name of the file as displayed in the view?
If a user deletes an item from the file list (say delete key), the view will receive this event. But then should the view call the model to say, "hey remove item at index X from your model data" or should the view call the controller (RemoveItemByIndex(X))that in turn will call the model (RemoveItemByIndex(X)), which in turn will call the view (update your view, the model changed). Looking at Google's code:
void TableView::OnKeyDown(unsigned short virtual_keycode) {
if (!ignore_listview_change_ && table_view_observer_) {
table_view_observer_->OnKeyDown(virtual_keycode);
}
}
It calls the observer (the controller), which would then process the key (which can be a del key). Is the latter then the correct way? (though in Google's impl, OnKeyDown() is not overwritten - but irrelevant - it might as well had been). In general, should every event received by the view always be sent to the controller and never directly to the model? I know stupid question, since it would break the MVC pattern in the first place, yet, I am curious to see if this would sometimes be ok.
The code:
#include <vector>
#include <memory>
#include <string>
#include <iostream>
// MODELS | {
"domain": "codereview.stackexchange",
"id": 44920,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, design-patterns",
"url": null
} |
c++, design-patterns
// MODELS
class TableModelObserver { // table_model_observer
public:
virtual void OnModelChanged() = 0;
//virtual void OnItemsChanged(int start, int length) = 0;
//virtual void OnItemsAdded(int start, int length) = 0;
//virtual void OnItemsRemoved(int start, int length) = 0;
};
class TableModel { // table_model
public:
struct Group {
std::wstring title;
int id;
};
typedef std::vector<Group> Groups;
virtual void SetObserver(TableModelObserver* observer) = 0;
};
class PossibleURLModel : public TableModel {
public:
PossibleURLModel() {
}
void Reload() {
// create some fake results
struct SomeTableResultType {
std::wstring filename;
std::wstring title;
};
std::vector<SomeTableResultType> results = {{L"path/foo.bin", L"foo"}, {L"path/bar.bin", L"bar"}};
results_.resize(results.size());
for (size_t i = 0; i < results.size(); ++i) {
results_[i].filename = results[i].filename;
results_[i].title = results[i].title;
results_[i].index = i;
}
if (observer_)
observer_->OnModelChanged();
}
virtual void SetObserver(TableModelObserver* observer) {
observer_ = observer;
}
struct Result {
std::wstring filename;
std::wstring title;
size_t index{0};
};
// Results we are showing
std::vector<Result> results_;
// Our observer
TableModelObserver* observer_;
};
// VIEWS
class View {
};
class TableViewObserver {
public:
//virtual void OnSelectionChanged() = 0;
virtual void OnDoubleClick() = 0;
};
class TableView : public View, TableModelObserver {
public:
TableView(TableModel* model)
: model_(model) {
model_->SetObserver(this); // the view is observing the model. If the model changes, table view shall be notified
} | {
"domain": "codereview.stackexchange",
"id": 44920,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, design-patterns",
"url": null
} |
c++, design-patterns
void SetObserver(TableViewObserver* observer) {
table_view_observer_ = observer;
}
virtual void OnModelChanged() { std::cerr << "TableView::OnModelChanged()" << std::endl; }
TableModel* model_;
TableViewObserver* table_view_observer_{nullptr};
};
// CONTROLLER
class ShelfItemDialog : public View, TableViewObserver { // shelf_item_dialog
public:
ShelfItemDialog() {
url_table_model_.reset(new PossibleURLModel());
url_table_ = new TableView(url_table_model_.get());
url_table_->SetObserver(this);
}
~ShelfItemDialog() = default;
void Show() {
url_table_model_->Reload();
}
void OnDoubleClick() {
//int selection = url_table_->FirstSelectedRow();
//if (selection >= 0 && selection < url_table_model_->RowCount()) {
// OnSelectionChanged();
// PerformModelChange();
//}
}
TableView* url_table_;
std::unique_ptr<PossibleURLModel> url_table_model_;
};
int main() {
ShelfItemDialog a;
a.Show();
return 0;
}
Answer: Answers to your questions
Would like any general comments about the code itself […]
See below.
More importantly, I want to understand if I got the pattern right (in terms of how it should be coded) and, if not, what I need to fix.
As you already mentioned, there is no one right way to implement the pattern. It's just a guide to show you how to separate a system into a model, view and controller, so that you reduce coupling between those components, and make it easier to swap out say, a GUI view, with a command line view, without having to modify your model.
You already noticed that the controller is in the ShelfItemDialog(), which is also the view. This often happens in GUI code. However, as long as the controller's action doesn't directly change the view, but lets the view only update based on changes to the model, it is fine. | {
"domain": "codereview.stackexchange",
"id": 44920,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, design-patterns",
"url": null
} |
c++, design-patterns
Since the View has access to the model, can it queries the data from the model to display a list of say files (like in my case) or should it maintain its own internal std::vector<std::pair<int, std::string>> where the int would be an index in the model's data, and string the name of the file as displayed in the view?
It's perfectly fine to query the data from the model. This avoids double bookkeeping and all the issues associated with it. Of course it depends on the situation. If your model is stored on a slow tape drive, you might want your view to cache the data in RAM.
If a user deletes an item from the file list (say delete key), the view will receive this event.
You will have registered a handler for delete key press events. That handler is part of the controller in the MVC pattern. Don't let the fact that your GUI library doesn't explicitly mark things as being "view" or "controller" confuse you.
Again, because the view and the controller are often closely related, you will implement them together. Sometimes that's just better than to force them to be completely separated in your code.
Overuse of classes
You don't need classes for everything in C++. There are some classes which are unnecessary in your code. For example, class View is used as a base class, but the base itself is never used. It can't be used anyway, since View is an empty class. I would just remove it entirely.
The Observer classes are have a use, but there are other ways to do this that don't rely on classes, and are a bit more flexible. You can use std::function<> to hold references to functions you can call. For example:
class TableModel {
public:
…
std::function<void()> onModelChanged;
};
class PossibleURLModel : public TableModel {
…
void Reload() {
…
if (onModelChanged)
onModelChanged();
}
};
class TableView {
public:
TableView(TableModel* model)
: model_(model) {
model_->onModelChanged = [&]{ OnModelChanged(); };
} | {
"domain": "codereview.stackexchange",
"id": 44920,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, design-patterns",
"url": null
} |
c++, design-patterns
void OnModelChanged() {
…
}
…
};
Note how there is no longer a need for a TableModelObserver class in the above code. Also, the function you register for onModelChanged can be anything; it doesn't have to be calling a member function of TableView, it could call a member function of another class or call a free function as well. This improves the decoupling between components (which is one of the reasons for having the model-view-controller pattern in the first place).
In most GUI libraries, you can register multiple callbacks for a single event, using signals and signal slots. A commonly used library to implement this is libsigc++.
Unnecessary heap allocations
Often you can avoid using new and delete in C++; containers and smart pointers will take care of that for you. But even better than using a smart pointer is to avoid needing any kind of pointer. In class ShelfItemDialog you allocate memory for a PossibleURLModel using std::unique_ptr, and for a TableModel using a raw pointer. The latter is a memory leak, since there is no corresponding delete. But you could just have stored these objects by value:
class ShelfItemDialog : TableViewObserver {
public:
ShelfItemDialog() {
url_table_.SetObserver(this);
}
void Show() {
url_table_model_.Reload();
}
…
PossibleURLModel url_table_model_;
TableView url_table_{&url_table_model_};
}; | {
"domain": "codereview.stackexchange",
"id": 44920,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, design-patterns",
"url": null
} |
performance, c
Title: Calculating standard deviation
Question: I am getting about 215 MB/s throughput on this implementation of Welford's algorithm. My hard drive is rated for 400 MB/s reads, so I am wondering how I can optimise this. I assume the bottleneck is in getline or atof as 400 MB/s throughput gives my 4 GHz processor 640 cycles per double to work with, which should be more than enough for the computational part of the loop. Are there faster alternatives?
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
int main(int argc, char **argv)
{
if (argc != 1) {
printf("Usage: %s < data\n", argv[0]);
return EXIT_FAILURE;
}
size_t count = 0;
double mean = 0;
double m2 = 0;
char *line = NULL;
size_t len = 0;
ssize_t nread;
while ((nread = getline(&line, &len, stdin)) != -1) {
double newValue = atof(line);
count++;
double delta = newValue - mean;
mean += delta / count;
m2 += delta * (newValue - mean);
}
printf("%lf\n%lf\n", mean, sqrt(m2 / count));
free(line);
return 0;
}
Answer: Avoid intermediate copies
By using getline() you make a copy of some characters into a temporary buffer, then you call atof() on that. There is some overhead in that. You could consider doing this instead:
while(scanf("%lf ", &newValue) == 1) {
…
} | {
"domain": "codereview.stackexchange",
"id": 44921,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, c",
"url": null
} |
performance, c
Which may or may not be faster. Note that getline() also allocates heap memory for the buffer, which probably doesn't matter much since it will only do it a few times at most, but it could be avoided by using a fixed-size array; after all, numbers are only so big.
Use a better floating point number parser
The standard library might not have an optimal algorithm to convert strings to floating point values, and apart from that it might take into account locales, which adds some overhead. In recent years some advances have been made in string-to-float algorithms, and some (standard) libraries also have functions to parse numbers without bothering with locales. The C++ library for example has gotten std::from_chars(). You might also want to look at external libaries, like Daniel Lemire's fast_double_parser.
Use multiple threads
Even if you can make the parsing so fast that your program is purely I/O bound, you might still get a benefit from making it multi-threaded. That's because SSDs nowadays can work faster if they receive multiple I/O requests to different parts of the SSD.
Missing error checking
Your should check after the while-loop that feof(stdin) == true, to verify that you actually read all the way to the end of the file. If it's false, that means an error occured, in which case you should not print out an incorrect standard deviation, but print an error message to stderr and return EXIT_FAILURE.
Note that parsing a number might fail as well (maybe the file containing the data was corrupted for example, or it's not a file with numbers at all), so it would be better if you could check for those kinds of errors as well, although it will come at some cost to performance. | {
"domain": "codereview.stackexchange",
"id": 44921,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, c",
"url": null
} |
python, performance, numpy, pandas
Title: Is this the right implementation for Linear Programming (puLP) on python?
Question: I have created a LP function to help maximize a set of features. My first time playing with this library and also conducting LP.
Variables:
Number of features => X
Number of Categories => Y
Problem function:
Maximize the Z(s) given changes in X and Y. If I add more features (X) from specific categories or the pool of categories (Y) then Z should be at its max.
Constraints:
feature can come from specific category though it does not have to
feature may have a specific threshold though it does not have to
the number of features in total regardless of the categories must be 5
Code:
import pandas as pd
import numpy as np
from pulp import *
import random | {
"domain": "codereview.stackexchange",
"id": 44922,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, numpy, pandas",
"url": null
} |
python, performance, numpy, pandas
data = [{'category': 'category 1',
'item_title': 'item 1',
'feature 1': 10.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 0.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 8.0,
'feature 17': 0.0},
{'category': 'category 1',
'item_title': 'item 2',
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 0.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 10.0,
'feature 8': 30.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 9.0,
'feature 17': 0.0},
{'category': 'category 1',
'item_title': 'item 3',
'feature 1': 0.0,
'feature 2': 22.0,
'feature 3': 0.0,
'feature 4': 0.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 7.0,
'feature 17': 0.0},
{'category': 'category 1',
'item_title': 'item 4',
'feature 1': 0.0,
'feature 2': 36.0,
'feature 3': 0.0,
'feature 4': 0.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 18.0,
'feature 17': 0.0},
{'category': 'category 1',
'item_title': 'item 5',
'feature 1': 0.0,
'feature 2': 54.0,
'feature 3': 0.0,
'feature 4': 0.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 5.0, | {
"domain": "codereview.stackexchange",
"id": 44922,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, numpy, pandas",
"url": null
} |
python, performance, numpy, pandas
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 5.0,
'feature 16': 32.0,
'feature 17': 0.0},
{'category': 'category 1',
'item_title': 'item 6',
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 20.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 15.0,
'feature 17': 0.0},
{'category': 'category 1',
'item_title': 'item 7',
'feature 1': 2.0,
'feature 2': 0.0,
'feature 3': 4.0,
'feature 4': 0.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 10.0,
'feature 17': 0.0},
{'category': 'category 1',
'item_title': 'item 8',
'feature 1': 8.0,
'feature 2': 0.0,
'feature 3': 2.0,
'feature 4': 0.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 20.0,
'feature 16': 0.0,
'feature 17': 0.0},
{'category': 'category 1',
'item_title': 'item 9',
'feature 1': 0.0,
'feature 2': 19.0,
'feature 3': 0.0,
'feature 4': 8.0,
'feature 5': 0.0,
'feature 6': 8.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 5.0,
'feature 14': 0.0,
'feature 15': 5.0,
'feature 16': 5.0,
'feature 17': 0.0},
{'category': 'category 2',
'item_title': 'item 10',
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 0.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0, | {
"domain": "codereview.stackexchange",
"id": 44922,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, numpy, pandas",
"url": null
} |
python, performance, numpy, pandas
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 55.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 5.0,
'feature 16': 0.0,
'feature 17': 0.0},
{'category': 'category 2',
'item_title': 'item 11',
'feature 1': 0.0,
'feature 2': 89.0,
'feature 3': 0.0,
'feature 4': 0.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 35.0,
'feature 12': 9.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 0.0,
'feature 17': 0.0},
{'category': 'category 2',
'item_title': 'item 12',
'feature 1': 0.0,
'feature 2': 12.0,
'feature 3': 0.0,
'feature 4': 7.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 27.0,
'feature 12': 50.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 0.0,
'feature 17': 0.0},
{'category': 'category 2',
'item_title': 'item 13',
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 9.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 37.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 0.0,
'feature 17': 0.0},
{'category': 'category 2',
'item_title': 'item 14',
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 110.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 29.0,
'feature 12': 6.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 0.0,
'feature 17': 0.0},
{'category': 'category 2',
'item_title': 'item 15',
'feature 1': 0.0,
'feature 2': 5.0,
'feature 3': 0.0, | {
"domain": "codereview.stackexchange",
"id": 44922,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, numpy, pandas",
"url": null
} |
python, performance, numpy, pandas
'item_title': 'item 15',
'feature 1': 0.0,
'feature 2': 5.0,
'feature 3': 0.0,
'feature 4': 0.0,
'feature 5': 0.0,
'feature 6': 8.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 43.0,
'feature 12': 0.0,
'feature 13': 6.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 3.0,
'feature 17': 0.0},
{'category': 'category 3',
'item_title': 'item 16',
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 64.0,
'feature 5': 12.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 52.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 0.0,
'feature 17': 0.0},
{'category': 'category 3',
'item_title': 'item 17',
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 66.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 0.0,
'feature 17': 8.0},
{'category': 'category 3',
'item_title': 'item 18',
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 8.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 0.0,
'feature 17': 18.0},
{'category': 'category 3',
'item_title': 'item 19',
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 1.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 0.0,
'feature 17': 4.0}, | {
"domain": "codereview.stackexchange",
"id": 44922,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, numpy, pandas",
"url": null
} |
python, performance, numpy, pandas
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 0.0,
'feature 17': 4.0},
{'category': 'category 3',
'item_title': 'item 20',
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 0.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 9.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 5.0,
'feature 16': 0.0,
'feature 17': 4.0},
{'category': 'category 3',
'item_title': 'item 21',
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 90.0,
'feature 5': 2.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 62.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 0.0,
'feature 17': 0.0},
{'category': 'category 3',
'item_title': 'item 22',
'feature 1': 0.0,
'feature 2': 17.0,
'feature 3': 0.0,
'feature 4': 19.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 42.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 0.0,
'feature 17': 0.0},
{'category': 'category 3',
'item_title': 'item 23',
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 4.0,
'feature 5': 2.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 0.0,
'feature 17': 19.0},
{'category': 'category 3',
'item_title': 'item 24',
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 45.0,
'feature 5': 20.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0, | {
"domain": "codereview.stackexchange",
"id": 44922,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, numpy, pandas",
"url": null
} |
python, performance, numpy, pandas
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 0.0,
'feature 17': 0.0},
{'category': 'category 3',
'item_title': 'item 25',
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 18.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 25.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 0.0,
'feature 16': 0.0,
'feature 17': 0.0},
{'category': 'category 4',
'item_title': 'item 26',
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 0.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 6.0,
'feature 14': 0.0,
'feature 15': 6.0,
'feature 16': 0.0,
'feature 17': 0.0},
{'category': 'category 4',
'item_title': 'item 27',
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 0.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 80.0,
'feature 15': 0.0,
'feature 16': 0.0,
'feature 17': 0.0},
{'category': 'category 4',
'item_title': 'item 28',
'feature 1': 90.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 0.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 40.0,
'feature 16': 0.0,
'feature 17': 0.0},
{'category': 'category 4',
'item_title': 'item 29',
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 0.0,
'feature 5': 0.0, | {
"domain": "codereview.stackexchange",
"id": 44922,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, numpy, pandas",
"url": null
} |
python, performance, numpy, pandas
'feature 1': 0.0,
'feature 2': 0.0,
'feature 3': 0.0,
'feature 4': 0.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 10.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 7.0,
'feature 16': 0.0,
'feature 17': 0.0},
{'category': 'category 4',
'item_title': 'item 30',
'feature 1': 0.0,
'feature 2': 10.0,
'feature 3': 0.0,
'feature 4': 0.0,
'feature 5': 0.0,
'feature 6': 0.0,
'feature 7': 0.0,
'feature 8': 0.0,
'feature 9': 0.0,
'feature 10': 0.0,
'feature 11': 0.0,
'feature 12': 0.0,
'feature 13': 0.0,
'feature 14': 0.0,
'feature 15': 9.0,
'feature 16': 0.0,
'feature 17': 0.0}] | {
"domain": "codereview.stackexchange",
"id": 44922,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, numpy, pandas",
"url": null
} |
python, performance, numpy, pandas
df = pd.DataFrame(data)
input_features = [{'variable':'feature 1', 'sum_threshold':100, 'dType':"Integer", "constrained_Group":"category 1"},
{'variable':'feature 2', 'sum_threshold':49, 'dType':"Integer", "constrained_Group":"category 2"},
{'variable':'feature 8', 'sum_threshold':66, 'dType':"Integer", "constrained_Group":"category 3"},
]
categories = list(set(df.category)) # categories in data
problem = LpProblem("Best Features", LpMaximize) # initialise problem
indexes_for_categories = [] #to store the indexes of all categories that are used in input_features
# Loop through list of dictionary to store conditions/constraints in problem
for dict_ in input_features:
# Create index of items which will help to extract final features at the end
items = df.index.tolist()
# Create problem variables as dict - index of data frame and the column from desired variable
problem_var = dict(zip(items, np.array(df[dict_['variable']].tolist())))
# Need to create unique names for x so that pulp does not run into error of duplicates
X = LpVariable.dicts(f"x_{random.uniform(1,7)}", indices=items, lowBound=0, upBound=1, cat=dict_['dType'], indexStart=[])
# problem to solve. Maximize the sum of chosen variables
problem += lpSum( [X[i] * problem_var[i] for i in items])
# if category is applied, must apply constraint - max sum must only be within this category
if dict_['constrained_Group'] is not None:
constrained_df = df[df['category'].str.contains(dict_['constrained_Group'])].fillna(0)
constrained_df_items = constrained_df.index.tolist()
constrained_df_problem_var = dict(zip(constrained_df_items, np.array(constrained_df[dict_['variable']].tolist())))
problem += lpSum( [X[i] * constrained_df_problem_var[i] for i in constrained_df_items]) | {
"domain": "codereview.stackexchange",
"id": 44922,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, numpy, pandas",
"url": null
} |
python, performance, numpy, pandas
# if threshold provided when category is provided, max must be within this threshold
if dict_['sum_threshold'] is not None:
problem += lpSum([X[i] * constrained_df_problem_var[i] for i in constrained_df_items]) <= dict_['sum_threshold']
# Range of indexes from categories selected - to be used if all input features explicitly state a category. This will be the sample from which to select all 6 items.
category_index = np.arange(constrained_df.index.min(),constrained_df.index.max()).tolist()
indexes_for_categories.append(category_index)
# if no category is provided
else:
# if threshold is provided when no category is provided solution must be within this threshold
if dict_['sum_threshold'] is not None:
problem += lpSum([X[i] * problem_var[i] for i in items]) <= dict_['sum_threshold']
# if all input features (list of dicts) all have categories, then need to constrain total number of items (5) to just those in the categories selected. If not then select best 5 from total pool of items.
only_constrained_gs = [dict_['constrained_Group'] for dict_ in input_features if dict_['constrained_Group'] != None ]
if len(only_constrained_gs) == len(input_features):
sample_to_choose_from = np.concatenate(indexes_for_categories)
problem += lpSum( [X[i] for i in sample_to_choose_from] ) == 5
else:
problem += lpSum( [X[i] for i in items] ) == 5
# solve problem
problem.solve()
# store variables and extract indexes to then extract from original data
variables = []
values = []
for v in problem.variables():
variable = v.name
value = v.varValue
variables.append(variable)
values.append(value)
values = np.array(values).astype(int)
items_list = pd.DataFrame(np.array([variables,values]).T,columns = ['Variable','Optimal Value'])
items_list['Optimal Value'] = items_list['Optimal Value'].astype(int)
items_list_opt = items_list[items_list['Optimal Value']!=0] | {
"domain": "codereview.stackexchange",
"id": 44922,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, numpy, pandas",
"url": null
} |
python, performance, numpy, pandas
res_df = []
for dict_ in input_features:
index_pos = np.array([int(i) for i in items_list_opt["Variable"].str.split('_').str[-1].tolist()])
items_attribute_vals = df[dict_['variable']].loc[index_pos].astype(int)
items_names = df['item_title'].loc[index_pos] #.astype(int)
result_optimize = pd.concat([items_names, items_attribute_vals], axis=1).T
res_df.append(result_optimize)
df[df.index.isin(pd.concat(res_df, axis=1).T.drop_duplicates(subset="item_title").index)]
The current output:
category item_title feature 1 feature 2 feature 3 feature 4 feature 5 feature 6 feature 7 feature 8 feature 9 feature 10 feature 11 feature 12 feature 13 feature 14 feature 15 feature 16 feature 17
0 category 1 item 1 10.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 8.0 0.0
1 category 1 item 2 0.0 0.0 0.0 0.0 0.0 0.0 10.0 30.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 9.0 0.0
10 category 2 item 11 0.0 89.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 35.0 9.0 0.0 0.0 0.0 0.0 0.0
11 category 2 item 12 0.0 12.0 0.0 7.0 0.0 0.0 0.0 0.0 0.0 0.0 27.0 50.0 0.0 0.0 0.0 0.0 0.0
20 category 3 item 21 0.0 0.0 0.0 90.0 2.0 0.0 0.0 62.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
I built a quick codesandbox here if you wish to test it out.
Is this the right implementation for the problem I am trying to solve? Would appreciate some guidance on this code implementation. | {
"domain": "codereview.stackexchange",
"id": 44922,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, numpy, pandas",
"url": null
} |
python, performance, numpy, pandas
Answer: Don't import *.
Move your data to a CSV.
Your constraint construction seems a little too generic - you've written for cases that will never actually be evaluated. If you want to keep it that way, you need to write functions with well-defined parameters. I've demonstrated the opposite and only showed the code for constraints that are currently applied.
Don't list(set) in this context.
Best Features in its current format is going to be ignored and an underscore added, so you might as well do that yourself.
random.uniform is... appalling on many dimensions. Think about your actual data and what index values would make your variable names meaningful and unique. In this case it will be based on item and feature.
lpSum is not needed when the variables are in a dataframe. Even if you did want to use the pulp utility, it would be lpDot and not lpSum.
lowBound=0, upBound=1, cat=dict_['dType'] is equivalent to just cat=pulp.LpBinary.
It's critically important that you check the status after a solve. Otherwise you're outputting garbage if the solve fails.
Your feature matrix is highly sparse, and I don't entirely understand why the zeros are still observed in your output. I demonstrate a way to only optimize for the non-zero values.
If I understand your code correctly, it could instead look like
import pandas as pd
import numpy as np
import pulp
# Load dataframe from CSV, moving category and item to index and removing redundant prefixes
df = pd.read_csv('features.csv')
df['category'] = df.category.str.removeprefix('category ')
df['item_title'] = df['item_title'].str.removeprefix('item ')
df.set_index(['category', 'item_title'], inplace=True)
df.index.names = 'category', 'item'
df.columns = df.columns.str.removeprefix('feature ')
df.columns.name = 'feature' | {
"domain": "codereview.stackexchange",
"id": 44922,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, numpy, pandas",
"url": null
} |
python, performance, numpy, pandas
# Convert from a sparse rectangular frame to a dense triple-indexed column series,
# and only keep input features of interest
inputs = df[['1', '2', '8']].replace(0, np.nan).stack()
print('Input feature values:')
print(inputs.to_string(), end='\n\n')
# assignment variable names (omitting category as it is not needed for uniqueness)
var_names = (
'i'
+ inputs.index.get_level_values('item')
+ '_f'
+ inputs.index.get_level_values('feature')
).to_series(index=inputs.index)
# binary assignments, again only for non-zero feature positions
assigns = var_names.apply(pulp.LpVariable, cat=pulp.LpBinary)
print('Assignment variables:')
print(assigns.to_string(), end='\n\n')
problem = pulp.LpProblem('best_features', pulp.LpMaximize)
problem.objective = assigns.dot(inputs)
problem.addConstraint(name='total', constraint=assigns.sum() == 5)
# For each input feature, and its constraint parameters
for (feature, feature_group), category, sum_threshold in zip(
assigns.groupby(level='feature'),
('1', '2', '3'),
(100, 49, 66),
):
item = slice(None) # all items within the category
constrained_assigns = feature_group.loc[(category, item)]
constrained_vals = inputs.loc[(category, item, feature)]
# The dot product of the assignments and feature values is given an upper bound
problem.addConstraint(
name=f'sum_f{feature}_c{category}',
constraint=constrained_assigns.dot(constrained_vals.values) <= sum_threshold,
)
print(problem)
problem.solve()
assert problem.status == pulp.LpStatusOptimal
print('Assigned features:')
outputs = inputs[assigns.apply(pulp.LpVariable.value) > 0.5]
print(outputs.to_string()) | {
"domain": "codereview.stackexchange",
"id": 44922,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, numpy, pandas",
"url": null
} |
python, performance, numpy, pandas
Input feature values:
category item feature
1 1 1 10.0
2 8 30.0
3 2 22.0
4 2 36.0
5 2 54.0
7 1 2.0
8 1 8.0
9 2 19.0
2 11 2 89.0
12 2 12.0
15 2 5.0
3 16 8 52.0
21 8 62.0
22 2 17.0
8 42.0
25 8 25.0
4 28 1 90.0
30 2 10.0
Assignment variables:
category item feature
1 1 1 i1_f1
2 8 i2_f8
3 2 i3_f2
4 2 i4_f2
5 2 i5_f2
7 1 i7_f1
8 1 i8_f1
9 2 i9_f2
2 11 2 i11_f2
12 2 i12_f2
15 2 i15_f2
3 16 8 i16_f8
21 8 i21_f8
22 2 i22_f2
8 i22_f8
25 8 i25_f8
4 28 1 i28_f1
30 2 i30_f2
best_features:
MAXIMIZE
89.0*i11_f2 + 12.0*i12_f2 + 5.0*i15_f2 + 52.0*i16_f8 + 10.0*i1_f1 + 62.0*i21_f8 + 17.0*i22_f2 + 42.0*i22_f8 + 25.0*i25_f8 + 90.0*i28_f1 + 30.0*i2_f8 + 10.0*i30_f2 + 22.0*i3_f2 + 36.0*i4_f2 + 54.0*i5_f2 + 2.0*i7_f1 + 8.0*i8_f1 + 19.0*i9_f2 + 0.0
SUBJECT TO
total: i11_f2 + i12_f2 + i15_f2 + i16_f8 + i1_f1 + i21_f8 + i22_f2 + i22_f8
+ i25_f8 + i28_f1 + i2_f8 + i30_f2 + i3_f2 + i4_f2 + i5_f2 + i7_f1 + i8_f1
+ i9_f2 = 5
sum_f1_c1: 10 i1_f1 + 2 i7_f1 + 8 i8_f1 <= 100
sum_f2_c2: 89 i11_f2 + 12 i12_f2 + 5 i15_f2 <= 49
sum_f8_c3: 52 i16_f8 + 62 i21_f8 + 42 i22_f8 + 25 i25_f8 <= 66 | {
"domain": "codereview.stackexchange",
"id": 44922,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, numpy, pandas",
"url": null
} |
python, performance, numpy, pandas
sum_f8_c3: 52 i16_f8 + 62 i21_f8 + 42 i22_f8 + 25 i25_f8 <= 66
VARIABLES
0 <= i11_f2 <= 1 Integer
0 <= i12_f2 <= 1 Integer
0 <= i15_f2 <= 1 Integer
0 <= i16_f8 <= 1 Integer
0 <= i1_f1 <= 1 Integer
0 <= i21_f8 <= 1 Integer
0 <= i22_f2 <= 1 Integer
0 <= i22_f8 <= 1 Integer
0 <= i25_f8 <= 1 Integer
0 <= i28_f1 <= 1 Integer
0 <= i2_f8 <= 1 Integer
0 <= i30_f2 <= 1 Integer
0 <= i3_f2 <= 1 Integer
0 <= i4_f2 <= 1 Integer
0 <= i5_f2 <= 1 Integer
0 <= i7_f1 <= 1 Integer
0 <= i8_f1 <= 1 Integer
0 <= i9_f2 <= 1 Integer
Welcome to the CBC MILP Solver
Version: 2.10.3
Build Date: Dec 15 2019
At line 2 NAME MODEL
At line 3 ROWS
At line 9 COLUMNS
At line 92 RHS
At line 97 BOUNDS
At line 116 ENDATA
Problem MODEL has 4 rows, 18 columns and 28 elements
Coin0008I MODEL read with 0 errors
Result - Optimal solution found
Objective value: 272.00000000
Enumerated nodes: 0
Total iterations: 0
Time (CPU seconds): 0.00
Time (Wallclock seconds): 0.00
Option for printingOptions changed from normal to all
Total time (CPU seconds): 0.00 (Wallclock seconds): 0.00
Assigned features:
category item feature
1 2 8 30.0
4 2 36.0
5 2 54.0
3 21 8 62.0
4 28 1 90.0
You ask:
I wanted to add the constraint of making certain items from categories be a certain amount of the total variables. Example:, category 1 == 3 and the last 2 come from other categories.
Add this constraint:
problem.addConstraint(name='cat_1', constraint=assigns.loc['1'].sum() == 3) | {
"domain": "codereview.stackexchange",
"id": 44922,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, numpy, pandas",
"url": null
} |
c++, performance
Title: Suggestions on performance
Question: I have following piece of code that is called by a program many thousands of times as part of Monte-Carlo simulation. using gprof, I see that 38% of the time was spent on this function. Are there any obvious areas of improvement in this code?
double StudentTCopula::uniform_to_default_time_student(double u, const
std::vector<double>& times, const std::vector<double>& values)
{
if (u == 0.0)
return 99999.0;
if (u == 1.0)
return 0.0;
size_t num_points = times.size();
size_t index = 0;
for (size_t i{1};i<num_points;++i){
if (u <= values[i - 1] && u > values[i]){
index = i;
break;
}
}
double tau = 0.0;
if (index == num_points + 1) {
auto t1 = times[num_points - 1];
auto q1 = values[num_points - 1];
auto t2 = times[num_points];
auto q2 = values[num_points];
auto lam = log(q1 / q2) / (t2 - t1);
tau = t2 - log(u / q2) / lam;
} else if (index == 0){
auto t1 = times.back();
auto q1 = values.back();
auto t2 = times[index];
auto q2 = values[index];
tau = (t1 * log(q2 / u) + t2 * log(u / q1)) / log(q2 / q1);
} else {
auto t1 = times[index - 1];
auto q1 = values[index - 1];
auto t2 = times[index];
auto q2 = values[index];
tau = (t1 * log(q2 / u) + t2 * log(u / q1)) / log(q2 / q1);
}
return tau;
}
I have tried removing the intermediate variables and directly genearting a long return expression with the calculations, which did not affect gprof results.
Thank you for your time.
Answer:
Comparing floating point representations for equality has its own risks - consider
if (u <= 0 + EPS)
return ‹BIG_VALUE›;
if (1 <= u + EPS)
return 0;
For suitable values of ‹BIG_VALUE›(there should be a better name) and EPS (possibly 0). | {
"domain": "codereview.stackexchange",
"id": 44923,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, performance",
"url": null
} |
c++, performance
For suitable values of ‹BIG_VALUE›(there should be a better name) and EPS (possibly 0).
Most of the handling for index != num_points + 1 is common. Don't repeat yourself:
} else {
auto t1 = (index == 0) ? times.back() : times[index - 1];
auto q1 = (index == 0) ? values.back() : values[index - 1];
auto t2 = times[index];
auto q2 = values[index];
tau = (t1 * log(q2 / u) + t2 * log(u / q1)) / log(q2 / q1);
}
Just before the end of uniform_to_default_time_student(), code indentation is slightly inconsistent.
G. Sliepen: use log(a/b)=log(a)−log(b) to avoid the division:
auto log_u = log(u),
log_q1 = log(q1),
log_q2 = log(q2);
tau = (t1 * (log_q2 - log_u) + t2 * (log_u - log_q1)) / (log_q2 - log_q1);
(implementation imperfections by greybeard)
I have no idea whether substituting two more divisions is useful:
if (index == num_points + 1) {
auto t1 = times[num_points - 1];
auto q1 = values[num_points - 1];
auto t2 = times[num_points];
auto _q2 = 1 / values[num_points];
auto _lam = (t2 - t1) / log(q1 * _q2);
tau = t2 - log(u * _q2) * _lam;
} | {
"domain": "codereview.stackexchange",
"id": 44923,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, performance",
"url": null
} |
beginner, rust
Title: Print Hadamard matrix of order N
Question: Problem statement: The \$n\$-by-\$n\$ Hadamard matrix \$H(n)\$ is a boolean matrix with the remarkable property that any two rows differ in exactly \$\frac{\text{n}}{\text{2}}\$ bits. \$H(1)\$ is a \$1\$-by-\$1\$ matrix with the single entry true, and for \$n > 1\$, \$H(2n)\$ is obtained by aligning four copies of \$H(n)\$ in a large square, and then inverting all of the entries in the lower right n-by-n copy. Write a program that takes one command-line argument n and prints \$H(n)\$. Assume that n is a power of \$2\$.
This is one of my self-imposed challenges in Rust to become better at it. The problem was taken from Sedgewick Exercise 1.4.29.
Here is my code:
use clap::Parser;
use std::ops::RangeFrom;
use std::process::exit;
const VALID_ORDERS: RangeFrom<usize> = 1..;
#[derive(Debug, Parser)]
struct Arguments {
#[arg(index = 1)]
order: usize,
}
fn main() {
let arguments = Arguments::parse();
let order: usize = arguments.order;
let hadamard_matrix = match create_hadamard_matrix(order) {
Ok(hadamard_matrix) => hadamard_matrix,
Err(error) => {
eprintln!("{}", error);
exit(1);
}
};
for v in hadamard_matrix {
for i in v {
if i {
print!("* ");
} else {
print!(" ");
}
}
println!();
}
}
fn create_hadamard_matrix(order: usize) -> Result<Vec<Vec<bool>>, String> {
if !VALID_ORDERS.contains(&order) {
return Err(format!("Order must be at least {}.", VALID_ORDERS.start));
}
if !is_power_of_two(order) {
return Err("Order must be a power of 2.".to_string());
}
let mut hadamard_matrix = vec![vec![false; order]; order];
hadamard_matrix[0][0] = true;
let mut k: usize = 1; | {
"domain": "codereview.stackexchange",
"id": 44924,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "beginner, rust",
"url": null
} |
beginner, rust
let mut k: usize = 1;
while k < order {
for i in 0..k {
for j in 0..k {
hadamard_matrix[i + k][j] = hadamard_matrix[i][j];
hadamard_matrix[i][j + k] = hadamard_matrix[i][j];
hadamard_matrix[i + k][j + k] = !hadamard_matrix[i][j];
}
}
k += k;
}
Ok(hadamard_matrix)
}
fn is_power_of_two(order: usize) -> bool {
let mut k: usize = 1;
while k <= order / 2 {
k *= 2;
}
order - k == 0
}
Is there any way that I can improve my code?
Answer:
fn is_power_of_two(order: usize) -> bool {
let mut k: usize = 1;
while k <= order / 2 {
k *= 2;
}
order - k == 0
}
That looks like a way to do it, I'm not 100% sure because it is a bit subtle, but in any case you don't need to write it: there is a built-in is_power_of_two for usize (and other integer types) already. | {
"domain": "codereview.stackexchange",
"id": 44924,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "beginner, rust",
"url": null
} |
python, java
Title: Format an English-language count of items
Question: I started reading Robert Martin's Clean Code. I'm trying to "translate" all his examples into Python, so I can understand them better since my knowledge of Python is greater than that of Java.
This class formats a count in into a natural English sentence:
Java original code of the book
public class GuessStatisticsMessage {
private String number;
private String verb;
private String pluralModifier;
public String make(char candidate, int count) {
createPluralDependentMessageParts(count);
return String.format(
"There %s %s %s%s",
verb, number, candidate, pluralModifier);
}
private void createPluralDependentMessageParts(int count) {
if (count == 0) {
thereAreNoLetters();
} else if (count == 1) {
thereIsOneLetter();
} else {
thereAreManyLetters(count);
}
}
private void thereAreManyLetters(int count) {
number = Integer.toString(count);
verb = "are";
pluralModifier = "s";
}
private void thereIsOneLetter() {
number = "1";
verb = "is";
pluralModifier = "";
}
private void thereAreNoLetters() {
number = "no";
verb = "are";
pluralModifier = "s";
}
}
My Python version
class GuessStatsMessage:
def __init__(self, candidate, count):
self.candidate = candidate
self.count = count
self.__number = self.__verb = self.__plural_modifier = ''
def make_message(self):
self.__create_pluraldependant_message_parts()
guess_message = (f'There {self.__verb} '
f'{self.__number} '
f'{self.candidate}{self.__plural_modifier} ')
print(guess_message) | {
"domain": "codereview.stackexchange",
"id": 44925,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, java",
"url": null
} |
python, java
def __create_pluraldependant_message_parts(self):
if self.count == 0:
self.__there_are_no_letters()
elif self.count == 1:
self.__there_is_one_letter()
else:
self.__there_are_many_letters()
def __there_are_no_letters(self):
self.__number = 'no'
self.__verb = 'are'
self.__plural_modifier = 's'
def __there_is_one_letter(self):
self.__number = '1'
self.__verb = 'is'
self.__plural_modifier = ''
def __there_are_many_letters(self):
self.__number = str(self.count)
self.__verb = 'are'
self.__plural_modifier = 's'
It should be used as follows:
message = GuessStatsMessage('Foo', 10)
message.make_message()
# output: There are 10 Foos
Answer: IMO, the python implementation reads like Java, and isn't Pythonic. How can we improve it?
This isn't a class
It looks like one, and it has a class definition, but it's really two functions, one of which is __init__. The other "private" methods are just if statements.
Let's refactor this as a single function:
def make_message(candidate, count):
if not count:
number, verb, plural = 'no', 'are', 's'
elif count == 1:
number, verb, plural = count, 'is', ''
else:
number, verb, plural = count, 'are', 's'
message = f'There {verb} {number} {candidate}{plural}'
return message
Now you can simply do:
print(make_message('Foo', 2))
There are 2 Foos
Or
msg = make_message('Bar', 3)
print(msg)
There are 3 Bars | {
"domain": "codereview.stackexchange",
"id": 44925,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, java",
"url": null
} |
python, java
Or
msg = make_message('Bar', 3)
print(msg)
There are 3 Bars
Now, the number, verb and plural names aren't able to be accessed by anything outside the function, not that we needed the privacy model anyways. There are also fewer methods to read and maintain, your eye isn't jumping between code blocks.
The naming is short and concise, and the code is pretty easy to reason about.
We've also used the if not count idiom, where a 0 behaves like False and nonzero values act like True.
Lastly, the tuple unpacking used for number, verb, plural is a very common technique for defining multiple variables inline.
Docstrings and Type Hints
We could further enhance the readability of this function by adding a docstring and type annotations for our variables:
def make_message(candidate: str, count: int) -> str:
"""Returns a message for a number of objects with the name candidate"""
... | {
"domain": "codereview.stackexchange",
"id": 44925,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, java",
"url": null
} |
c++, multithreading, thread-safety
Title: C++: algorithm that uses fixed-size buffer of data that are produced in stream, faster than the algorithm speed; modified version
Question: This is a, I hope, an improved version of this code, limiting atomic usage (following @GSliepen, @G.Sliepen advice) (NB I'm limited to, at most C++17):
#include <algorithm>
#include <array>
#include <atomic>
#include <cassert>
#include <cmath>
#include <condition_variable>
#include <cstddef>
#include <cstdint>
#include <iostream>
#include <mutex>
#include <thread>
// #define LOG
// #define USEACTIVESLEEP
#ifdef USEACTIVESLEEP
#define MAYBEUNSUSED(var) static_cast<void>(var)
// functions to sleep for short period of times
// active wait but thread sleep has a too large overhead to allow for short
// delays
namespace {
// Iterations per nanosec
double gk_ItPerns;
void EstimateItPerns() noexcept {
auto start = std::chrono::steady_clock::now();
constexpr std::size_t NbIt{1000000};
for (std::size_t i = 0; i < NbIt; ++i) {
volatile std::size_t DoNotOptimize = 0;
MAYBEUNSUSED(DoNotOptimize);
}
auto end = std::chrono::steady_clock::now();
auto delay =
std::chrono::duration_cast<std::chrono::nanoseconds>(end - start)
.count();
gk_ItPerns = static_cast<double>(NbIt) / static_cast<double>(delay);
}
void ActiveSleep(double i_ns) noexcept {
std::size_t NbIt = static_cast<std::size_t>(i_ns * gk_ItPerns);
for (std::size_t i = 0; i < NbIt; ++i) {
volatile std::size_t DoNotOptimize = 0;
MAYBEUNSUSED(DoNotOptimize);
}
}
} // namespace
#endif
class CAsyncAlgo {
public:
using Data_t = std::size_t;
private:
static constexpr std::size_t mNbData = 1024;
std::size_t mWorkingIndex = 1;
std::size_t mBufferIndex = 0;
// type of data buffer
using DataBuffer_t = std::array<Data_t, mNbData>;
std::size_t mIndex = 0;
bool mHasData = false;
std::array<DataBuffer_t, 2> mSamples; | {
"domain": "codereview.stackexchange",
"id": 44926,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, multithreading, thread-safety",
"url": null
} |
c++, multithreading, thread-safety
std::size_t mIndex = 0;
bool mHasData = false;
std::array<DataBuffer_t, 2> mSamples;
// Mutex for condition_variable and atomics
std::mutex mMutex;
// Condition variable used to wake up the working thread
std::condition_variable mWakeUp;
// To stop the worker
std::atomic<bool> mStop{false};
// Is an Algo instance running?
std::atomic<bool> mBusy{false};
// 1- Can an Algo instance be launched (for testing spurious wake-up)?
// not atomic because always accessed inside critical section
bool mReady{false};
// working thread
std::thread Worker;
// WorkLoad internals
// previous seen max value in buffer
Data_t mMaxVal = 0;
// number of processed data
Data_t mProcessed = 0; | {
"domain": "codereview.stackexchange",
"id": 44926,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, multithreading, thread-safety",
"url": null
} |
c++, multithreading, thread-safety
private:
bool Stop() const noexcept {
// 2- no synch needed?
return (mStop.load(std::memory_order_relaxed));
}
bool Ready() const noexcept { return (mReady); }
void WaitForJob() {
std::unique_lock<std::mutex> lock(mMutex);
#ifdef LOG
std::cout << "entering waiting state " << std::boolalpha << mStop
<< std::endl;
#endif
// 3- std::memory_order_relaxed possible as locking mMutex will prevent
// it to be reordered before the previous Workload call
mBusy.store(false, std::memory_order_relaxed);
assert(lock.owns_lock());
mWakeUp.wait(lock, [this]() -> bool { return (Stop() || Ready()); });
assert(lock.owns_lock());
assert(mBusy || Stop());
mReady = false;
#ifdef LOG
std::cout << "waked up " << std::this_thread::get_id() << std::endl;
#endif
}
// Check if the working buffer is holding increasing successive integers
// from some point max value must be strictly greater than the one of the
// previous call {5,6,7,3,4} is valid if previous greatest value is strictly
// smaller than 7 {5,6,7,2,4} is invalid smallest value must also be
// strictly greater than mMaxVal as buffers do not overlap
void WorkLoad() {
Data_t Max = mSamples[mWorkingIndex][mNbData - 1];
Data_t Min = mSamples[mWorkingIndex][0];
for (std::size_t i = 1; i < mNbData; ++i) {
if (mSamples[mWorkingIndex][i] !=
mSamples[mWorkingIndex][i - 1] + 1) {
assert(mSamples[mWorkingIndex][i - 1] ==
(mSamples[mWorkingIndex][i] + mNbData - 1));
Max = mSamples[mWorkingIndex][i - 1];
Min = mSamples[mWorkingIndex][i];
}
}
assert(Max > mMaxVal);
assert(Min > mMaxVal);
mMaxVal = Max;
mProcessed += mNbData;
}
void MainLoop() {
while (!Stop()) {
WaitForJob(); | {
"domain": "codereview.stackexchange",
"id": 44926,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, multithreading, thread-safety",
"url": null
} |
c++, multithreading, thread-safety
}
void MainLoop() {
while (!Stop()) {
WaitForJob();
if (Stop()) {
return;
}
WorkLoad();
}
} | {
"domain": "codereview.stackexchange",
"id": 44926,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, multithreading, thread-safety",
"url": null
} |
c++, multithreading, thread-safety
public:
CAsyncAlgo() : Worker([this]() mutable -> void { MainLoop(); }) {}
void Push(Data_t const Sample, std::size_t) {
// writing one sample in current circular buffer
mSamples[mBufferIndex][mIndex] = Sample;
mIndex = (mIndex + 1) % mNbData;
if (mIndex == 0) {
// buffer is full
mHasData = true;
}
}
bool IsReady() {
if (mHasData && (mBusy.load(std::memory_order_acquire) == false)) {
return true;
}
return false;
}
void SubmitJob() {
#ifdef LOG
std::cout << "SubmitJob" << std::endl;
#endif
{
std::lock_guard<std::mutex> lock(mMutex);
mReady = true;
// 4- std::memory_order_relaxed because no synch needed, read only
// by this thread
mBusy.store(true, std::memory_order_relaxed);
std::swap(mWorkingIndex, mBufferIndex);
mIndex = 0;
mHasData = false;
}
mWakeUp.notify_one();
}
void Run(double const, double &) const {
// NOP
} | {
"domain": "codereview.stackexchange",
"id": 44926,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, multithreading, thread-safety",
"url": null
} |
c++, multithreading, thread-safety
// destructor
///\details finishing computation and releasing resources
///\todo explicitely "close" computation before the end of life of the
/// object
~CAsyncAlgo() {
{
#ifdef LOG
std::cout << "closing" << std::endl;
#endif
std::lock_guard<std::mutex> lock(mMutex);
// 5- std::memory_order_relaxed: on unlocking may synchronise with
// the lock in wait in this case, the worker will see true
mStop.store(true, std::memory_order_relaxed);
}
mWakeUp.notify_one();
if (Worker.joinable()) {
#ifdef LOG
std::cout << "waiting for last run" << std::endl;
#endif
Worker.join();
#ifdef LOG
std::cout << "finished" << std::endl;
#endif
std::cout << "Processed " << GetNbProcessed() << " data"
<< std::endl;
}
}
std::size_t GetNbProcessed() { return mProcessed; }
};
static constexpr std::size_t NbSamples = 1000000;
int main() {
CAsyncAlgo Algo;
std::cout << std::this_thread::get_id() << std::endl; | {
"domain": "codereview.stackexchange",
"id": 44926,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, multithreading, thread-safety",
"url": null
} |
c++, multithreading, thread-safety
int main() {
CAsyncAlgo Algo;
std::cout << std::this_thread::get_id() << std::endl;
#ifdef USEACTIVESLEEP
EstimateItPerns();
std::size_t acc{0};
#endif
for (std::size_t i = 0; i < NbSamples; ++i) {
#ifdef USEACTIVESLEEP
double period = 10000.; // ns
// manage data production frequency
auto start = std::chrono::steady_clock::now();
#endif
CAsyncAlgo::Data_t data =
static_cast<CAsyncAlgo::Data_t>(i + 1); // 0 is reserved
Algo.Push(data, i);
#ifdef USEACTIVESLEEP
auto end = std::chrono::steady_clock::now();
// no more synchro needed as only this thread is designed to launch a
// new computation
if (static_cast<double>(
std::chrono::duration_cast<std::chrono::nanoseconds>(end -
start)
.count()) < period) {
ActiveSleep(
period -
static_cast<double>(
std::chrono::duration_cast<std::chrono::nanoseconds>(end -
start)
.count()));
}
end = std::chrono::steady_clock::now();
acc = acc + static_cast<std::size_t>(
std::chrono::duration_cast<std::chrono::microseconds>(
end - start)
.count());
#endif
if (Algo.IsReady()) {
#ifdef LOG
#ifdef USEACTIVESLEEP
std::cout << "Ready " << i << " "
<< static_cast<double>(acc) /
(static_cast<double>(i) + 1.)
<< " us/Sample" << std::endl;
#endif
#endif
Algo.SubmitJob();
}
double res;
Algo.Run(3.14, res);
}
#ifdef USEACTIVESLEEP
std::cout << static_cast<double>(acc) / NbSamples << "us/Sample"
<< std::endl;
#endif | {
"domain": "codereview.stackexchange",
"id": 44926,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, multithreading, thread-safety",
"url": null
} |
c++, multithreading, thread-safety
return 0;
}
Live
One atomic has been replaced by a simple bool as it was only accessed in critical sections.
The other where kept due to access out of critical sections but with less constraint memory order as, once again, when inside such sections, they provides enough synchronization and ordering constraints.
Separating buffers management from workflow would still have to be done.
Yet I'd just like to have feedback on remaining comments (1 to 5)
Answer: A large part of my answer to your previous version still applies. I'll revisit some of it below, but first lets start with the changes and the remaining questions.
Can we remove all atomics?
I already mentioned that you should avoid mixing mutexes and atomic variables. That still applies. You have two atomic variables left. Inside WaitForJob() and SubmitJob(), those variables are accessed with the mutex held. There are a few places where these atomic variables are read while not holding a mutex. We can avoid needing those reads by having WaitForJob() return the value of mStop, and by creating a TrySubmitJob() that itself check mReady:
class CAsyncAlgo {
…
std::mutex mMutex;
std::condition_variable mWakeUp;
bool mStop{false};
bool mBusy{false};
…
bool WaitForJob() {
std::unique_lock<std::mutex> lock(mMutex);
mBusy = false;
mWakeUp.wait(lock, [&]{ return mStop || mReady; });
mReady = false;
return !mStop;
}
void TrySubmitJob() {
{
std::lock_guard<std::mutex> lock(mMutex);
if (!mHasData || mBusy)
return;
mReady = true;
mBusy = true
std::swap(mWorkingIndex, mBufferIndex);
mIndex = 0;
mHasData = false;
}
mWakeUp.notify_one();
}
void MainLoop() {
while (WaitForJob())
WorkLoad();
}
…
}; | {
"domain": "codereview.stackexchange",
"id": 44926,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, multithreading, thread-safety",
"url": null
} |
c++, multithreading, thread-safety
void MainLoop() {
while (WaitForJob())
WorkLoad();
}
…
};
Note how MainLoop() has simplified a lot. WaitForJob() does less atomic operations now, so it's actually more efficient.
The only issue is that TrySubmitJob() will always lock the mutex, while if (Algo.IsReady()) Algo.SubmitJob() avoided locking the mutex if the consumer was still busy. So if you are going to call this every time you Push() one number to the buffer, then it probably is worth it to keep mBusy as an atomic variable. However, its value quickly diminishes if you do a lot more work between calls to TrySubmitJob().
Questions in the comments
// 1- Can an Algo instance be launched (for testing spurious wake-up)?
I don't know what you mean by this.
// 2- no synch needed?
return (mStop.load(std::memory_order_relaxed));
It's probably fine here because you check it in WaitForJob() with the mutex held as well, and it's only ever set with a held mutex, so that will ensure things will be synchronized. For the two calls of Stop() that are done without the mutex held: the first one is just an unnecessary check (it looks like an optimization, but because it is mostly false for most of the time your program runs, it's actually a pessimization), the second will be guaranteed to see true if WaitForJob() saw that it was true. As mentioned above, it's better if you make this non-atomic.
// 3- std::memory_order_relaxed possible as locking mMutex will prevent
// it to be reordered before the previous Workload call
mBusy.store(false, std::memory_order_relaxed);
This is not true. Locking the mutex is a std::memory_order_acquire action. From cppreference.com:
memory_order_acquire: A load operation with this memory order performs the acquire operation on the affected memory location: no reads or writes in the current thread can be reordered before this load. All writes in other threads that release the same atomic variable are visible in the current thread […] | {
"domain": "codereview.stackexchange",
"id": 44926,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, multithreading, thread-safety",
"url": null
} |
c++, multithreading, thread-safety
Note the emphasis. The write to mBusy cannot be reordered before the acquire action on the mutex. But whatever WorkLoad() is doing has nothing to do with the mutex nor mBusy. Reads and writes from WorkLoad() could be reordered to after the locking of the mutex (at least as seen from other threads).
It will still be OK in your code: at worst SubmitJob() is called too early, but since it takes the mutex as well, it will at least not run before WaitForJob() has released its mutex.
// 4- std::memory_order_relaxed because no synch needed, read only
// by this thread
mBusy.store(true, std::memory_order_relaxed);
One of the assert() calls in WaitForJob() reads mBusy, so it's not "read only by this thread" unless you compile with assertions disabled (this might thus be a Heisenbug). Apart from that it's fine.
// 5- std::memory_order_relaxed: on unlocking may synchronise with
// the lock in wait in this case, the worker will see true
mStop.store(true, std::memory_order_relaxed);
See comment 2.
Using atomics correctly is tricky, especially if you want to have the most relaxed memory ordering possible. Your intution might fail you. What works fine on one architecture might not work on another. I think your reasoning is incorrect in some cases, and that might lead to bugs. I personally would stick to using release/acquire semantics, or just purely relying on mutexes instead of atomics, as that is easier to understand.
Separate the data structure from the workers
Again, the class CSyncAlgo is too complicated, making it harder to understand, and making it less flexible. I strongly suggest you split things up. Create a class that just manages the buffers, then you can have two threads that access an object of that class. Consider being able to write:
int main() {
DoubleBuffer<std::size_t, 1024> dblbuf; | {
"domain": "codereview.stackexchange",
"id": 44926,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, multithreading, thread-safety",
"url": null
} |
c++, multithreading, thread-safety
std::thread producer([&]{
for (…) {
…
dblbuf.Push(data, i);
dblbuf.TrySubmit();
…
}
dblbuf.Stop();
});
std::thread consumer([&]{
while (dblbuf.WaitForJob()) {
for (…) {
…
auto sample = dblBuf.Pop(i);
…
}
}
});
consumer.join();
producer.join();
}
Consider using at least three buffers for better performance
The way your code works now, it checks for every sample written if it can call SubmitJob(). Even if you do this with an atomic variable, it's still one atomic operation per sample. It would be better if you would not have to do this per-sample check. If you have three buffers, then the producer can write a full buffer, and then swap to another buffer. This way, you only need a check for one full buffer. | {
"domain": "codereview.stackexchange",
"id": 44926,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, multithreading, thread-safety",
"url": null
} |
c++, converting
Title: fast convert string_view to floating point
Question: The code makes following assumptions:
std::string_view is not null-terminated
there are no exponents, all floating point inputs are in the way 123.123
limiting digits before and after decimal point
<ctype.h> free. No locale. Uses own isspace() and isdigits() | {
"domain": "codereview.stackexchange",
"id": 44927,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, converting",
"url": null
} |
c++, converting
#include <cstdint>
#include <string_view>
#include <limits>
// based on http://www.jbox.dk/sanos/source/lib/strtod.c.html
namespace to_fp_impl_{
namespace{
constexpr char decimal_point = '.';
auto isspace(char c){
return c == ' ' || c == '\t';
};
auto isdigit(char c){
return c >= '0' && c <= '9';
}
template<typename FP>
void add_char(FP &number, char c){
number = number * 10 + (c - '0');
}
}
template<typename FP>
struct ResultFP{
FP num;
bool ok = true;
static auto err(){
return ResultFP{ 0, false };
}
};
} // namespace to_fp_impl_
template<
typename FP,
uint8_t digits = std::numeric_limits<FP>::digits10,
uint8_t decimals = digits
>
auto to_fp(std::string_view const str){
static_assert(
std::is_floating_point_v<FP> &&
digits <= std::numeric_limits<FP>::digits10 &&
decimals <= std::numeric_limits<FP>::digits10
);
// -----
using namespace to_fp_impl_;
using Result = ResultFP<FP>;
auto it = std::begin(str);
auto end = std::end(str);
// -----
// Skip leading whitespace
while (it != end && isspace(*it))
++it;
// -----
// Handle optional sign
int8_t sign = +1;
if (it != end){
if (*it == '-'){
sign = -1;
++it;
}else if (*it == '+'){
// sign = +1;
++it;
}
}
// -----
FP number = 0.0;
uint8_t total_digits = 0;
// Process digits
{
uint8_t num_digits = 0;
while (it != end && isdigit(*it)){
if (++num_digits > digits)
return Result::err();
add_char(number, *it);
++it;
}
total_digits += num_digits;
}
// ----- | {
"domain": "codereview.stackexchange",
"id": 44927,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, converting",
"url": null
} |
c++, converting
++it;
}
total_digits += num_digits;
}
// -----
// Process decimal part
if (it != end && *it == decimal_point){
++it;
FP exp = 1;
uint8_t num_digits = 0;
while (it != end && isdigit(*it)){
if (++num_digits > decimals)
return Result::err();
add_char(number, *it);
++it;
exp *= 0.1;
}
total_digits += num_digits;
number *= exp;
}
// -----
return Result{ number * sign, total_digits != 0 };
}
template<
typename FP,
uint8_t digits = std::numeric_limits<FP>::digits10,
uint8_t decimals = digits
>
auto to_fp_def(std::string_view const str, FP def = 0){
auto[num, ok] = to_fp<FP, digits, decimals>(str);
return ok ? num : def;
}
template<
uint8_t digits = std::numeric_limits<double>::digits10,
uint8_t decimals = digits
>
auto to_double_def(std::string_view const str, double def = 0){
return to_fp_def<double, digits, decimals>(str, def);
}
template<
uint8_t digits = std::numeric_limits<float>::digits10,
uint8_t decimals = digits
>
auto to_float_def(std::string_view const str, float def = 0){
return to_fp_def<float, digits, decimals>(str, def);
}
#include <cstdio>
int main(){
auto _ = [](const char *s){
const char *mask = ">%32.15f<\n";
constexpr double def = -0.111111111111111;
printf(mask, to_double_def(s, def));
};
_(" ." ); //error
_(" -." ); //error
_(" +." ); //error
_(" .0" );
_(" -.0" );
_(" +.0" );
_(" 0." );
_(" -0." );
_(" +0." );
_(" 0" );
_(" -0" );
_(" +0" ); | {
"domain": "codereview.stackexchange",
"id": 44927,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, converting",
"url": null
} |
c++, converting
_(" 0" );
_(" -0" );
_(" +0" );
_(" 100.1" );
_("-100.1" );
_("+100.1" );
_(" 123456789012345" );
_(" 123456789012345." );
_(" .123456789012345" );
_(" 123456789012345.123456789012345" ); // OK, but truncated
_(" -123456789012345" );
_(" -123456789012345." );
_(" -.123456789012345" );
_(" -123456789012345.123456789012345" ); // OK, but truncated
_(" 1234567890123456." ); //error, too long
_(" .1234567890123456" ); //error, too long
_(" 1234567890123456.1234567890123456" ); //error, too long
} | {
"domain": "codereview.stackexchange",
"id": 44927,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, converting",
"url": null
} |
c++, converting
Result:
> -0.111111111111111<
> -0.111111111111111<
> -0.111111111111111<
> 0.000000000000000<
> -0.000000000000000<
> 0.000000000000000<
> 0.000000000000000<
> -0.000000000000000<
> 0.000000000000000<
> 0.000000000000000<
> -0.000000000000000<
> 0.000000000000000<
> 100.100000000000009<
> -100.100000000000009<
> 100.100000000000009<
> 123456789012345.000000000000000<
> 123456789012345.000000000000000<
> 0.123456789012345<
> 123456789012345.250000000000000<
>-123456789012345.000000000000000<
>-123456789012345.000000000000000<
> -0.123456789012345<
>-123456789012345.250000000000000<
> -0.111111111111111<
> -0.111111111111111<
> -0.111111111111111<
Benchmark:
the code is 4.6 x faster than std::strtod(), but slower than std::from_chars()
https://quick-bench.com/q/cHVL5PW9m4WSp6OvPnrF-ApCrG4
Answer: hoist out of loop
Consider comparing num_digits to decimals
at the very end,
so you bench quicker.
FP accumulated error
This expression concerns me:
exp *= 0.1;
We're supposed to be able to roundtrip any
expression between FP and string, losslessly.
Yet this is an inexact representation of 1/10th.
Consider incrementing an integer and then
computing the exp multiplier afresh each time
from that integer.
EDIT
Actually, it turns out that this is pretty terrible:
number *= exp;
Why?
Because exp is inexact.
It is 10 ** -N, e.g. 10 ** -6.
A much better approach would be
to assign it 10 ** N, e.g. a million, and compute
number /= exp;
In this case we're dividing an exact quantity,
an integer (many integers fit fine within a double),
by another exact quantity: number / exp.
Only then do we round-off to 53 bits.
In contrast, the OP code suffers from a pair of
rounding operations: | {
"domain": "codereview.stackexchange",
"id": 44927,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, converting",
"url": null
} |
c++, converting
infinite repeating binary fraction for 10 ** -N
division
Truncating twice, to 53 bits, leads to worse errors.
Try it and see.
Good examples are 0.3 and 0.7.
We don't want to obtain a result
like 0.30000000000000004
or 0.7000000000000001 -- we'd much
rather be able to roundtrip back and forth
without such noise creeping in. | {
"domain": "codereview.stackexchange",
"id": 44927,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, converting",
"url": null
} |
c++, converting
There is a subtler difficulty with this computation.
The spec
requires rounding toward an even number in some cases.
Let's back up a moment.
A 64-bit double doesn't quite represent a number.
Rather, it represents an interval on the real number line,
bounded by adjacent bit patterns.
Here, "adjacent" pretty much corresponds to
C cast punning where you treat the significand
bits as an int and use the ++ increment operator.
While the real number line is infinitely divisible,
FP space consists of a finite number of adjacent intervals
(plus NaNs, infinities, and that whole -0.0 thing).
So the double 1.0 covers 1 through 1+ϵ,
and 2.0 covers 2 through 2 plus a slightly bigger epsilon.
If our division result falls within that ϵ interval,
then we can roundtrip back and forth all day long
without noise, going from string to double to string to double.
Many numbers are amenable to this, such as 0.2 and even 0.3.
They are infinite repeating binary fractions
which we truncate to 53 bits and obtain a perfectly nice result.
The fraction goes on infinitely but it lands squarely
within an interval.
The truncation can be viewed as appending an
infinite number of zeros on the end.
For a number that appears to land right on an
interval boundary, when rounding to 53 bits we're
required to round to even.
Kind of a tricky requirement.
Again, this is like having infinite zeros to the right.
Here is an example ASCII string which illustrates this:
"0.8750000000582075".
The correct to_fp() result would be
0.8750000000582076 rather than
0.8750000000582075.
Once we see that ...76 ending in the double,
we can roundtrip to string to double to string
all day long and it remains ...76.
The underlying representation for ...75, and for ...76, is
0x1.c00000007ffffp-1, while for ...77 it is
0x1.c000000080000p-1. | {
"domain": "codereview.stackexchange",
"id": 44927,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, converting",
"url": null
} |
python, performance, tree, search, numba
Title: Optimizing a node search method given a 2-D spatial point
Question: I have a tree-like structure called grid. I have designed it as a structured numpy array. Each element of grid is a tree-node. Each node itself is a structured numpy array, with fields that describe its bounding box (xmin, xmax, ymin, ymax) in 2-D space. Each node has an ID which is basically the index of that node in grid. Each node has a field parent which is the ID of its parent. Each node has a field called children which is a numpy array of integers containing the IDs of its children. nChildren obviously denotes number of children that node has (this tree is not a strict binary/quadtree). Root node has parent ID = -1 and -99999 is just a flag for when I want to return an integer instead of None.
Given below is a function, whose arguments are (r, z), a spatial-point in 2-D space, c_index which is the node we start with, and of course, the whole grid object. Task is to find the smallest node given (r, z) and a starting c_index that contains the point (speaking of that, if anybody has an idea why we use squares of the xmin and xmax when checking if the point is in the cell, please tell me).
I have done profiling of the function. Without using Numba-JIT with nopython=True, the function takes around ~75 seconds for around ~180K calls. With Numba-JIT with nopython=True, it takes around ~14 seconds for around the same number of calls. That is good and all, but I desire a bit more performance as you can tell it is called an obscenely large number of times. The problem is that these results are for a test run with a small number of parameters than I will be actually using. When the codebase will be actually deployed, this function will be called probably around a million times, so times add up.
Here is the function:
def locate_photon_cell_mirror(r, z, c_index, grid):
NMAX = 1000000
found = False
cout_index = c_index
abs_z = np.abs(z) | {
"domain": "codereview.stackexchange",
"id": 44928,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, tree, search, numba",
"url": null
} |
python, performance, tree, search, numba
for j in range(NMAX):
cout = grid[cout_index]
if (cout['xmin']**2 <= r and
cout['xmax']**2 >= r and
cout['ymin'] <= abs_z and
cout['ymax'] >= abs_z):
if (cout['nChildren'] == 0):
found = True
return cout_index, found
flag = True
for i in range(cout['nChildren']):
child_cell = grid[cout['children'][i]]
if (child_cell['xmin']**2 <= r and
child_cell['xmax']**2 >= r and
child_cell['ymin'] <= abs_z and
child_cell['ymax'] >= abs_z):
cout_index = cout['children'][i]
flag = False
break
if (flag):
cout_index = -999999
return cout_index, found
else:
cout_parent = cout['parent']
if cout_parent != -1:
cout_index = cout_parent
else:
cout_index = -999999
return cout_index, found
cout_index = -999999
return cout_index, found
As Numba-JIT is not enough for me, I'm looking for a faster algorithm to achieve this. If I understand it correctly, the problem is basically to do this: given a point in a plane and a rectangle, what is the smallest rectangle that contains the point? (as all child nodes will be part of the parent node, as is the case in 2-D space-partitioning trees).
Answer: flags
I appreciate the clarity of this, thank you.
found = True
return cout_index, found | {
"domain": "codereview.stackexchange",
"id": 44928,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, tree, search, numba",
"url": null
} |
python, performance, tree, search, numba
That is, I know exactly what the return tuple means.
It's much clearer than return cout_index, True.
Consider creating a
namedtuple
so you can concisely return ... , found=True).
That way things are very clear to the caller, as well,
for example during a debugging session.
Here is the only other place we assign that variable:
found = False
There are three returns.
Maybe we don't need that variable,
and could have three literal False values in return statements?
This is not a terrific name:
flag = True
It has a related meaning.
Maybe it wants to claim the "found" name?
Or maybe we could come up with a more informative identifier.
The whole for i loop is really begging for Extract Helper.
Then each boolean would operate within its own scope,
reducing the coupling which prevented me from immediately
grasping Author's Intent behind these loops.
sentinel
Please define a
symbolic name
for the -999999 sentinel you use in a few places.
Consider defining another name for -1,
which is used with cout_parent.
extra parens
Extra ( ) parentheses are Good in those 4-line conditionals.
Thank you for avoiding \ backwhack line continuations in that way.
But this isn't C.
Avoid saying things like this:
if (cout['nChildren'] == 0):
if (flag):
A simple if flag: suffices, and is definitely preferrable.
I know, I know, "it's a lotta
rules!"
But hey, there's no need to remember all of them.
That's what the machine is for.
Just run "$ black *.py"
and you're done, it's all fixed up yet still means the same.
The whole idea is to prevent trivial spelling nits like
this from even surfacing during code review. | {
"domain": "codereview.stackexchange",
"id": 44928,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, tree, search, numba",
"url": null
} |
python, performance, tree, search, numba
range for bbox
First, you're storing {xmin, xmax} values
but you might find it more convenient to store the square
of those values, so you square just once.
Or compute r_sqrt just once on the way in,
similar to z_abs.
Second, you're storing a pair of numbers which are only
used in comparisons.
You might find it more convenient to store
cout['x_range'] = range(xmin**2, xmax**2)
so that later you can just ask if r in cout['x_range'].
(I know, there's a closed vs half-open interval detail,
but we can finesse that.)
It would be helpful for the source code to cite a reference,
perhaps an URL, so we understand where the (r, z)
terminology came from.
Initially I was mentally reading that as (radius, theta),
until I saw it doesn't apply here.
caching, discretizing
You didn't describe what distributions we
see over r and z values.
Let's imagine that both range over the unit interval,
and that some values are more common than others,
perhaps due to a normal distribution.
I'm going to assume
(r, z) completely specifies a grid location since c_index is just a starting hint,
there's a limited number of cell mirror locations, maybe a thousand, and
values are high-resolution, say six or nine decimal places.
Given that, you might want to decorate a helper with
@lru_cache.
The idea would be to discretize values to something
coarser but "good enough", say three or four places,
and call the decorated helper with those values.
Fix r and z for the moment.
Then a call with (r + 2ϵ, z) might return index 7.
And a subsequent (r + ϵ, z) call would enjoy
a cache hit, also returning 7, without nested loops.
You know the spacing between mirror cell locations
much better than I do, and better than that LRU utility.
Feel free to implement your own simple caching approach,
which would be free to not cache (r, z) values
that are "too close to the border",
forcing careful evaluation of which cell to return. | {
"domain": "codereview.stackexchange",
"id": 44928,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, tree, search, numba",
"url": null
} |
python, performance, tree, search, numba
datastructure, Voronoi
There are plenty of
Voronoi diagram
implementations available on
pypi.
You might use Voronoi cell boundaries to map (r, z) to nearest cell.
Or brute force it.
I hear that memory is cheap.
Allocate a numpy array,
impose a sufficiently fine discrete grid on your space,
and exhaustively calculate "nearest mirror cell" for every point.
This can be cheap to do: iterate over each mirror,
fill in "I am nearest!" for some limited radius of grid points,
then go to next mirror and repeat, overwriting in the case
where that mirror turns out to be nearest.
datastructure, spatial index
Why does this even need to be your problem?
PostGIS
and similar products have already solved it.
Use postgres queries to rapidly identify containing shapes.
Note that a 2-D index is very different
from having a pair of indexes,
one on the r column and another on the z column.
Consider annotating those shapes with a size or area attribute,
so you can rapidly identify the smallest containing shape. | {
"domain": "codereview.stackexchange",
"id": 44928,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, tree, search, numba",
"url": null
} |
pandas, matplotlib
Title: groupby in pandas and plot | {
"domain": "codereview.stackexchange",
"id": 44929,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "pandas, matplotlib",
"url": null
} |
pandas, matplotlib
Question: I have a csv file that looks like this:
,age,department,education,recruitment_type,job_level,rating,awards,certifications,salary,gender,entry_date,satisfied
0,28,HR,Postgraduate,Referral,5,2.0,1,0,78075.0,Male,2019-02-01,1
1,50,Technology,Postgraduate,Recruitment Agency,3,5.0,2,1,38177.1,Male,2017-01-17,0
2,43,Technology,Undergraduate,Referral,4,1.0,2,0,59143.5,Female,2012-08-27,1
3,44,Sales,Postgraduate,On-Campus,2,3.0,0,0,26824.5,Female,2017-07-25,1
4,33,HR,Undergraduate,Recruitment Agency,2,1.0,5,0,26824.5,Male,2019-05-17,1
5,40,Purchasing,Undergraduate,Walk-in,3,3.0,7,1,38177.1,Male,2004-04-22,1
6,26,Purchasing,Undergraduate,Referral,5,5.0,2,0,78075.0,Male,2019-12-10,1
7,25,Technology,Undergraduate,Recruitment Agency,1,1.0,4,0,21668.4,Female,2017-03-18,0
8,35,HR,Postgraduate,Referral,3,4.0,0,0,38177.1,Female,2015-04-02,1
9,45,Technology,Postgraduate,Referral,3,3.0,9,0,38177.1,Female,2004-03-19,0
10,31,Marketing,Undergraduate,Walk-in,4,4.0,6,0,59143.5,Male,2009-01-24,1
11,43,Technology,Postgraduate,Recruitment Agency,2,1.0,9,1,26824.5,Male,2016-03-10,1
12,28,Technology,Undergraduate,On-Campus,3,4.0,0,0,38177.1,Female,2013-04-24,0
13,48,Purchasing,Postgraduate,Referral,3,4.0,8,0,38177.1,Male,2010-07-25,1
14,52,Purchasing,Postgraduate,Recruitment Agency,5,1.0,7,0,78075.0,Male,2018-02-07,1
15,50,Purchasing,Undergraduate,Recruitment Agency,5,5.0,6,0,78075.0,Male,2014-04-24,1
16,34,Marketing,Postgraduate,On-Campus,1,4.0,9,0,21668.4,Male,2014-12-10,0
17,24,Purchasing,Undergraduate,Recruitment Agency,4,4.0,6,0,59143.5,Female,2018-02-18,1
18,54,HR,Postgraduate,On-Campus,1,5.0,4,0,21668.4,Female,2014-05-07,1
19,25,Sales,Undergraduate,Recruitment Agency,5,4.0,4,0,78075.0,Male,2012-02-15,1
20,35,HR,Undergraduate,On-Campus,2,4.0,4,0,26824.5,Female,2008-01-15,1
21,50,HR,Postgraduate,Referral,5,4.0,0,0,78075.0,Male,2015-04-13,1
22,34,Purchasing,Postgraduate,Referral,4,2.0,7,1,59143.5,Male,2013-07-02,1 | {
"domain": "codereview.stackexchange",
"id": 44929,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "pandas, matplotlib",
"url": null
} |
pandas, matplotlib
22,34,Purchasing,Postgraduate,Referral,4,2.0,7,1,59143.5,Male,2013-07-02,1
23,37,Sales,Undergraduate,Recruitment Agency,5,5.0,0,1,78075.0,Male,2016-03-22,1
24,31,Sales,Postgraduate,Walk-in,4,4.0,3,1,59143.5,Female,2006-09-05,1
25,53,Sales,Postgraduate,Walk-in,4,5.0,8,1,59143.5,Female,2005-10-08,1
26,45,Marketing,Undergraduate,Walk-in,4,3.0,8,0,59143.5,Male,2008-01-08,1
27,40,Purchasing,Undergraduate,Walk-in,4,3.0,4,1,59143.5,Female,2005-11-19,0 | {
"domain": "codereview.stackexchange",
"id": 44929,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "pandas, matplotlib",
"url": null
} |
pandas, matplotlib
The question that should be answered is how many people are recruited per department as a function of time. This should be shown in a line chart.
This was my solution:
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("employees_satisfaction_transformed.csv", index_col=0)
recruitment_groups = df.groupby("recruitment_type")
campus = recruitment_groups.get_group("On-Campus")["entry_date"]
walk_in = recruitment_groups.get_group("Walk-in")["entry_date"]
referral = recruitment_groups.get_group("Referral")["entry_date"]
agency = recruitment_groups.get_group("Recruitment Agency")["entry_date"]
campus = campus.sort_values().reset_index()
campus['index'] = campus.index
walk_in = walk_in.sort_values().reset_index()
walk_in['index'] = walk_in.index
referral = referral.sort_values().reset_index()
referral['index'] = referral.index
agency = agency.sort_values().reset_index()
agency['index'] = agency.index
plt.plot(campus['entry_date'], campus['index'], label="campus")
plt.plot(walk_in['entry_date'], walk_in['index'], label="walk_in")
plt.plot(referral['entry_date'], referral['index'], label="referral")
plt.plot(agency['entry_date'], agency['index'], label="agency")
plt.legend(loc='best')
plt.show()
I'm sort of new to pandas so any critique is welcome.
Answer: break out helpers
You serially assign these four variables:
campus
walk_in
referral
agency
This code is just
crying out to you
to define a helper method and then iterate
over those four columns.
Which would include the whole sort / reset thing.
There's an opportunity for a for loop
to do some plotting, but that's a separate item.
define function
You created a bunch of top-level global variables. To reduce
coupling
define them within a function,
perhaps def main():,
so they go out of scope once the function exits
and then they won't pollute the global namespace. | {
"domain": "codereview.stackexchange",
"id": 44929,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "pandas, matplotlib",
"url": null
} |
beginner, c
Title: CS50 Week 2: Converting vowels to numbers and outputting new string
Question: Problem: User inputs a word or phrase as command-line argument and a new word is printed with some vowels replaced by numbers.
Question: How do I improve my code design and style? Is it possible to not have to repeat x[i] == '…' in my if statement?
#include <cs50.h>
#include <stdio.h>
#include <string.h>
string replace(string x);
int main(int argc, string argv[])
{
if (argc != 2) // if user does not input 2 words, then print 'ERROR' message
{
printf("ERROR\n");
return 1;
}
else
{
replace(argv[1]);
printf("\n");
}
}
string replace(string x)
{
for(int i = 0, c = strlen(x); i < c; i++) //for each character 'i', do the following until the position of character is less than the length of the string
{
if (x[i] == 'a' || x[i] == 'e' || x[i] == 'i' || x[i] == 'o')
{
switch (x[i])
{
case 'a':
printf("6");
break;
case 'e':
printf("3");
break;
case 'i':
printf("1");
break;
case 'o':
printf("0");
break;
default:
printf("%s\n", x);
}
}
else //if not a vowel as above, print same character
{
printf("%c", x[i]);
}
}
return 0;
}
Answer: We don't need to forward-declare replace() if we define it before main(). That saves having to keep the two signatures maintained in parallel. As a general guide, put main() last in your program, and the lowest-level functions first. | {
"domain": "codereview.stackexchange",
"id": 44930,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "beginner, c",
"url": null
} |
beginner, c
We can improve the error handling:
#include <stdlib.h>
fprintf(stderr, "Usage: %s <string>\n", argv[0]);
return EXIT_FAILURE;
replace() returns a string but we don't use that. I would expect something like
printf("%s\n", replace(argv[1]));
Because the replace() function has two responsibilities (transforming the input and printing it), it's less reusable than a function that just does one thing.
We have a type mismatch in int c = strlen(x). The function returns a size_t, so we should make c the same type. Instead of using strlen(), consider using a pointer that advances until it reaches a null character.
We don't need if/else around the switch. Use the default: label for the non-matching letters.
Modified code
Applying my improvements (and using ordinary char* instead of CS50 string):
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
char *replace(char *const s)
{
for (char *p = s; *p; ++p) {
switch (*p) {
case 'a':
*p = '6';
break;
case 'e':
*p = '3';
break;
case 'i':
*p = '1';
break;
case 'o':
*p = '0';
break;
/* default: no change */
}
}
return s;
}
int main(int argc, char *argv[])
{
if (argc != 2)
{
fprintf(stderr, "Usage: %s <string>\n", argv[0]);
return EXIT_FAILURE;
} else {
printf("%s\n", replace(argv[1]));
}
} | {
"domain": "codereview.stackexchange",
"id": 44930,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "beginner, c",
"url": null
} |
c++, unit-testing, c++17, http, url-routing
Title: HTTP routing with Mongoose
Question: I'm trying to extend the Mongoose RESTful server example with URL routing.
This code relies on the Mongoose library available here.
This is heavily inspired by the routes class from this question but I've tried to use a multimap to store routes and make lookup more efficient, as well as support Mongoose's wildcard matches.
I've also added some unit tests (and intend to add more), but have had to rely on checking a side effect to determine test success. Is there a better way to do this?
CMakeLists.txt
cmake_minimum_required(VERSION 3.14)
project ("Mongoose_Routes")
set(CMAKE_CXX_STANDARD 17)
add_compile_options(-Werror -Wall -Wextra)
add_executable (Mongoose_Routes "main.cpp" "routes.cpp" "routes.h" "mongoose/mongoose.c")
include(FetchContent)
FetchContent_Declare(
googletest
URL https://github.com/google/googletest/archive/refs/tags/release-1.12.1.zip
)
# For Windows: Prevent overriding the parent project's compiler/linker settings
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
FetchContent_MakeAvailable(googletest)
add_executable(UnitTests "tests/routes_test.cpp" "routes.cpp" "routes.h" "mongoose/mongoose.c")
target_link_libraries(UnitTests PUBLIC gtest_main)
Routes
#ifndef _ROUTES_H_INCLUDED_
#define _ROUTES_H_INCLUDED_
#include <functional>
#include <map>
#include <string>
extern "C"
{
#include "mongoose/mongoose.h"
}
namespace Routes
{
enum class HTTPMethod
{
NONE,
GET,
POST,
PUT,
FIRST = GET,
LAST = PUT
};
enum class RouteStatus
{
SUCCESS,
E_NOROUTE,
E_NOMETHOD
};
typedef std::function<void(struct mg_connection*, struct mg_http_message*, void*)> RouteCallback; | {
"domain": "codereview.stackexchange",
"id": 44931,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, unit-testing, c++17, http, url-routing",
"url": null
} |
c++, unit-testing, c++17, http, url-routing
class RouteManager
{
public:
RouteStatus GetRouteCallback(mg_str path, HTTPMethod method, RouteCallback& callback) const;
bool SetRoute(std::string path, HTTPMethod method, std::function<void(struct mg_connection*, struct mg_http_message*, void*)> callback);
private:
typedef std::pair<std::string, HTTPMethod> RouteID;
struct RouteCompare
{
bool operator()(const RouteID& lhs, const RouteID& rhs) const
{
if (lhs.second == rhs.second)
{
size_t i = 0, j = 0, ni = 0, nj = 0;
std::string a = lhs.first, b = rhs.first;
while (i < a.size() || j < b.size())
{
if (i < a.size() && j < b.size() && (a[i] == '?' || b[j] == a[i]))
{
i++; j++;
}
else if (i < a.size() && (a[i] == '*' || a[i] == '#'))
{
ni = i++; nj = j + 1;
}
else if (nj > 0 && nj <= b.size() && (a[ni] == '#' || b[j] != '/'))
{
i = ni; j = nj;
}
else
{
if (i == j) return a[i] < b[j];
else return i < j;
}
}
return false;
}
else return lhs.second < rhs.second;
}
};
std::multimap<RouteID, RouteCallback, RouteCompare> routes;
};
HTTPMethod ParseHTTPMethod(mg_str method);
}
#endif // _ROUTES_H_INCLUDED_
#include "routes.h" | {
"domain": "codereview.stackexchange",
"id": 44931,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, unit-testing, c++17, http, url-routing",
"url": null
} |
c++, unit-testing, c++17, http, url-routing
HTTPMethod ParseHTTPMethod(mg_str method);
}
#endif // _ROUTES_H_INCLUDED_
#include "routes.h"
//----------------------------------------------------------------
//
//----------------------------------------------------------------
Routes::RouteStatus Routes::RouteManager::GetRouteCallback(mg_str path, HTTPMethod method, RouteCallback& callback) const
{
auto route = RouteID{ std::string(path.ptr, path.len), method };
auto lower = routes.lower_bound(route);
if (lower == routes.end() || !mg_match(path, mg_str(lower->first.first.c_str()), NULL)) return RouteStatus::E_NOROUTE;
else if (lower->first.second != method) return RouteStatus::E_NOMETHOD;
// Want the longest (and therefore lexicographically largest) path which matches
auto upper = routes.upper_bound(route);
while (std::next(lower) != upper) { lower = std::next(lower); }
callback = lower->second;
return RouteStatus::SUCCESS;
}
//----------------------------------------------------------------
//
//----------------------------------------------------------------
bool Routes::RouteManager::SetRoute(std::string path, HTTPMethod method,
std::function<void(struct mg_connection*, struct mg_http_message*, void*)> callback)
{
if (path.empty()) return false;
if (method < HTTPMethod::FIRST || method > HTTPMethod::LAST) return false;
if (callback == nullptr) return false;
routes.emplace(RouteID{ path, method }, callback);
return true;
}
//----------------------------------------------------------------
//
//----------------------------------------------------------------
Routes::HTTPMethod Routes::ParseHTTPMethod(mg_str m)
{
Routes::HTTPMethod method = Routes::HTTPMethod::NONE;
if (mg_vcmp(&m, "GET") == 0) method = Routes::HTTPMethod::GET;
else if (mg_vcmp(&m, "POST") == 0) method = Routes::HTTPMethod::POST;
else if (mg_vcmp(&m, "PUT") == 0) method = Routes::HTTPMethod::PUT;
return method;
}
Main
#include <memory> | {
"domain": "codereview.stackexchange",
"id": 44931,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, unit-testing, c++17, http, url-routing",
"url": null
} |
c++, unit-testing, c++17, http, url-routing
return method;
}
Main
#include <memory>
#include "routes.h"
//----------------------------------------------------------------
//
//----------------------------------------------------------------
void fn(struct mg_connection* c, int ev, void* ev_data, void* fn_data)
{
if (ev == MG_EV_HTTP_MSG)
{
Routes::RouteManager* pRouteManager = static_cast<Routes::RouteManager*>(fn_data);
struct mg_http_message* hm = (struct mg_http_message*)ev_data;
Routes::RouteCallback callback;
auto status = pRouteManager->GetRouteCallback(hm->uri, Routes::ParseHTTPMethod(hm->method), callback);
if (status == Routes::RouteStatus::SUCCESS)
{
(callback)(c, hm, fn_data);
}
else if (status == Routes::RouteStatus::E_NOMETHOD) mg_http_reply(c, 405, NULL, "");
else if (status == Routes::RouteStatus::E_NOROUTE) mg_http_reply(c, 404, NULL, "");
}
}
//----------------------------------------------------------------
//
//----------------------------------------------------------------
int main()
{
struct mg_mgr mgr;
mg_mgr_init(&mgr);
auto pRouteManager = std::make_unique<Routes::RouteManager>();
pRouteManager->SetRoute("/foo/#", Routes::HTTPMethod::GET,
[](struct mg_connection* c, struct mg_http_message*, void*)
{
mg_http_reply(c, 200, NULL, "GET");
});
pRouteManager->SetRoute("/foo/#", Routes::HTTPMethod::PUT,
[](struct mg_connection* c, struct mg_http_message*, void*)
{
mg_http_reply(c, 200, NULL, "PUT");
});
pRouteManager->SetRoute("/foo/bar", Routes::HTTPMethod::GET,
[](struct mg_connection* c, struct mg_http_message*, void*)
{
mg_http_reply(c, 200, NULL, "BAR");
});
if (mg_http_listen(&mgr, "localhost:8000", fn, pRouteManager.get()) == NULL)
{
perror("Failed to bind Webserver");
return EXIT_FAILURE;
} | {
"domain": "codereview.stackexchange",
"id": 44931,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, unit-testing, c++17, http, url-routing",
"url": null
} |
c++, unit-testing, c++17, http, url-routing
while (true)
{
mg_mgr_poll(&mgr, 100);
}
mg_mgr_free(&mgr);
exit(EXIT_SUCCESS);
}
Tests
#include <gtest/gtest.h>
#include <memory>
#include "../routes.h"
class RouteTest : public ::testing::Test
{
protected:
std::unique_ptr<Routes::RouteManager> pRouteManager;
void SetUp() override
{
pRouteManager = std::make_unique<Routes::RouteManager>();
}
};
TEST_F(RouteTest, Empty)
{
Routes::RouteCallback callback;
auto status = pRouteManager->GetRouteCallback(mg_str("/foo/bar"), Routes::HTTPMethod::GET, callback);
EXPECT_EQ(status, Routes::RouteStatus::E_NOROUTE);
}
TEST_F(RouteTest, GETSimple)
{
bool called = false;
pRouteManager->SetRoute("/foo/bar", Routes::HTTPMethod::GET,
[&called](struct mg_connection*, struct mg_http_message*, void*)
{
called = true;
});
pRouteManager->SetRoute("/foo/baz", Routes::HTTPMethod::GET,
[&called](struct mg_connection*, struct mg_http_message*, void*)
{
called = false;
});
Routes::RouteCallback callback;
auto status = pRouteManager->GetRouteCallback(mg_str("/foo/bar"), Routes::HTTPMethod::GET, callback);
if (status == Routes::RouteStatus::SUCCESS)
{
(callback)(nullptr, nullptr, nullptr);
EXPECT_TRUE(called);
}
EXPECT_EQ(status, Routes::RouteStatus::SUCCESS);
}
Edit: This implementation has bugs ("/foo/bar" vs "/foo/#" depends on which route is added first). A second test is required to determine which of the matching routes is largest.
Answer: Unnecessary use of smart pointers
You use std::unique_ptr twice, but both times it is unnecessary. In main(), you can just allocate a Route::RoutesManager on the stack:
int main() {
…
Routes::RouteManager routeManager;
routeManager.setRoute(…);
…
if (mg_http_listen(&mgr, "localhost:8000", fn, &routeManager) == NULL) {
…
}
…
} | {
"domain": "codereview.stackexchange",
"id": 44931,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, unit-testing, c++17, http, url-routing",
"url": null
} |
c++, unit-testing, c++17, http, url-routing
And in the test fixture you also can store it by value:
class RouteTest : public ::testing::Test
{
protected:
Routes::RouteManager routeManager;
};
TEST_F(RouteTest, Empty)
{
…
auto status = routeManager.GetRouteCallback(…);
…
}
Make RouteCompare::operator() readable and test it
The function RouteCompare::operator() is completely unreadable. Lots of one or two-letter variable names and no comments. I would also add lots of tests to ensure the comparisons work as expected, and especially test edge cases and weird uses of wildcards.
Create a class RouteID
Instead of using std::pair, I would make RouteID a proper class. That way, you can give names to the two fields, so you no longer have to use .first and .second, which require you to remember the correct order. Also, you can then move the comparison operator into class RouteID, so you no longer have to explicitly specify it when declaring the std::multimap.
Simplify GetRouteCallback()
You can use equal_range() instead of using both lower_bound() upper_bound(), and make use of the fact that you can call std::prev() the end() iterator, if you know that the lower bound was not equal to end():
auto [lower, upper] = routes.equal_range(route);
if (lower == routes.end() || …)
return RouteStatus::E_…;
callback = std::prev(upper)->second;
Using side-effects during testing
I've also added some unit tests (and intend to add more), but have had to rely on checking a side effect to determine test success. Is there a better way to do this?
You can make the callback return a value. Of course, just because your tests needed a bool called is not a good reason to have your callbacks return a bool. But I don't think it's necessary; it's perfectly fine to capture a reference to some variable in the callback function. No global variables were used, everything was local to the unit test. | {
"domain": "codereview.stackexchange",
"id": 44931,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, unit-testing, c++17, http, url-routing",
"url": null
} |
python
Title: Cleaning up a large python class for readability and becoming more pythonic/OO
Question: I have a class that takes in two strings via the __init___ method and then does a bunch of string manipulation to return numerous other strings via instance methods. Some instance methods rely on other instance methods. Anyways, I feel like this could be made more pythonic and OO, but I'm quite bad at more advanced OO topics, like using @property, @staticmethod, @classmethod, etc.
ACTIVE_DATASETS = active.datasets
class FileMetadata:
def __init__(self, object_bucket: str, object_location: str):
self.object_bucket = object_bucket
self.object_location = object_location
self.date = self._date()
self.source_bucket = self._source_bucket()
self.source_object_location = self._source_object_location()
self.source_object_path_prefix = self._source_object_path_prefix()
self.source_object_filename = self._source_object_filename()
self.source_object_extension = self._source_object_extension()
self.destination_bucket = self._destination_bucket()
self.destination_path_prefix = self._destination_path_prefix()
self.destination_object_location = self._destination_object_location()
self.datasets = self._get_datasets()
def _match_inbound_csv_to_dataset(
self,
):
datasets = []
for dataset in ACTIVE_DATASETS:
if re.match(rf"{dataset.file_match_regex}", self.source_object_filename):
datasets.append(dataset.dataset_name)
if len(datasets) == 0:
raise MatchingDatasetNotFound(
f"No matching dataset found for inbound file: {self.source_object_filename}"
)
if len(datasets) > 1:
raise MultipleMatchingDatasetsFound(
f"Multiple matching datasets found for inbound file: {self.source_object_filename}"
) | {
"domain": "codereview.stackexchange",
"id": 44932,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
if len(datasets) == 1:
print(
f"Matching dataset {datasets[0]} found for inbound file: {self.source_object_filename}"
)
return datasets
def _get_datasets(self):
if self.source_object_extension == "xlsx":
return None
else:
return self._match_inbound_csv_to_dataset()
def _date(self) -> str:
return datetime.today().strftime("%Y-%m-%d")
def _source_bucket(self) -> str:
return self.object_bucket
def _source_object_location(self) -> str:
return self.object_location
def _source_object_path_prefix(self) -> str:
"Used for filtering objects not dropped in the proper folder"
split = self.object_location.split("/")[:-1]
path = "/".join(split) + "/"
return path
def _source_object_filename(self) -> str:
return self.object_location.split("/")[-1]
def _source_object_extension(self) -> str:
split = self.source_object_filename.split(".")
if len(split) == 1:
return None
return self.source_object_filename.split(".")[-1]
def _destination_bucket(self) -> str:
return os.environ.get(
"EMPASSION_INGEST_BUCKET_NAME", "dev-ingest"
)
def _destination_path_prefix(self) -> str:
return f"{self.source_bucket}/{self.date}"
def _destination_object_location(self) -> str:
return f"{self.source_bucket}/{self.date}/{self.source_object_filename}"
it's called and used like so:
class Input(BaseModel):
object_bucket: str
object_location: str
class Output(BaseModel):
source_bucket: str
source_object_location: str
source_object_path_prefix: str
source_object_filename: str
source_object_extension: str
destination_bucket: str
destination_path_prefix: str
destination_object_location: str
datasets: List[str] | {
"domain": "codereview.stackexchange",
"id": 44932,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
@http
def main(request):
resp = Input(**request.get_json())
file_metadata = FileMetadata(resp.object_bucket, resp.object_location)
output = Output(
source_bucket=file_metadata.source_bucket,
source_object_location=file_metadata.source_object_location,
source_object_path_prefix=file_metadata.source_object_path_prefix,
source_object_filename=file_metadata.source_object_filename,
source_object_extension=file_metadata.source_object_extension,
destination_bucket=file_metadata.destination_bucket,
destination_path_prefix=file_metadata.destination_path_prefix,
destination_object_location=file_metadata.destination_object_location,
datasets=file_metadata.datasets,
)
return json.dumps(output.model_dump()), 200
Answer: Getters
You define a lot of "private" getters like self.date = self._date() which returns datetime.today.... Let's use a few @propertys for that
from pathlib import Path
class FileMetadata:
def __init__(self, object_bucket: str, object_location: str):
self.object_bucket = object_bucket
self.object_location = object_location
self.source_bucket = object_bucket
# this has some nice convenience features
p = Path(self.object_location)
# like p.parent, p.name, and p.suffix
self.source_object_path_prefix = f"{p.parent}/"
self.source_object_filename = p.name
self.source_object_extension = p.suffix
self.destination_bucket = os.environ.get(
"EMPASSION_INGEST_BUCKET_NAME", "dev-ingest"
)
self.datasets = self._get_datasets()
@property
def date(self):
# do you care if this script runs past midnight?
if not getattr(self, '_date', None):
self._date = datetime.today().strftime("%Y-%m-%d")
return self._date
@property
def source_object_location(self):
return self.object_location | {
"domain": "codereview.stackexchange",
"id": 44932,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
@property
def source_object_location(self):
return self.object_location
@property
def destination_path_prefix(self):
return f"{self.source_bucket}/{self.date}"
@property
def destination_object_location(self):
return (f"{self.destination_path_prefix}/{self.source_object_filename}"
)
def _get_datasets(self):
if self.source_object_extension == "xlsx":
return None
else:
return self._match_inbound_csv_to_dataset()
Now we have way fewer methods. While we're here, I've utilized pathlib.Path to take advantage of some convenience functions.
Repeated regexes
It might be useful to cache repeated regexes here:
class FileMetadata:
dataset_cache = {}
def __init__(self, ...):
~snip~
def _match_inbound_csv_to_dataset(
self,
):
datasets = []
for dataset in ACTIVE_DATASETS:
name, regex = dataset.name, dataset.file_match_regex
# pre-compile the regex
if name not in self.dataset_cache:
r = re.compile(regex)
self.dataset_cache[name] = r
else:
r = self.dataset_cache[name]
# use the compiled regex to match
if r.match(self.source_object_filename):
datasets.append(name)
Testing if a list is empty
Don't test if len(lst) == 0, use if lst:
if not datasets:
raise MatchingDatasetNotFound(
f"No matching dataset found for inbound file: {self.source_object_filename}"
)
self.datasets
I don't like the naming here, since you require only one dataset to match. I might be inclined to call it self.dataset, but it looks like it needs to be a list so I'll leave it alone. Let's use some unpacking to check the length of the list:
def _match_inbound_csv_to_dataset(self):
~snip~
dataset, *others = datasets | {
"domain": "codereview.stackexchange",
"id": 44932,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
if others:
raise MultipleMatchingDatasetsFound(
f"Multiple matching datasets found for inbound file: {self.source_object_filename}"
)
print(
f"Matching dataset {dataset} found for inbound file: {self.source_object_filename}"
)
return [dataset]
cached_property
If you are on python 3.8+ you have access to functools.cached_property which helps simplify the date property:
from functools import cached_property
class FileMetadata:
~snip~
@cached_property
def date(self):
return datetime.today().strftime("%Y-%m-%d") | {
"domain": "codereview.stackexchange",
"id": 44932,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
functional-programming, clojure, lisp
Title: My implementation of Clojure's assoc-in
Question: This is my implementation of Clojure's assoc-in function. I am looking for tips on making it more idiomatic and in general, better.
(defn my-assoc-in
[m [& ks] v]
(let [sorted-keys (reverse ks)]
(loop [curr-key (first sorted-keys)
rem-keys (rest sorted-keys)
curr-map (get-in m (reverse rem-keys))
acc-val (assoc curr-map curr-key v)]
(if (empty? rem-keys)
acc-val
(let [next-key (first rem-keys)
next-rem (rest rem-keys)
next-map (get-in m next-rem)
next-val (assoc next-map next-key acc-val)]
(recur next-key next-rem next-map next-val))))))
Answer: curr-map (get-in m (reverse rem-keys))
That seems expensive (linear in number of keys).
At this point, isn't (reverse rem-keys) simply
(rest ks) ? That's available in O(1) rather than O(N) time.
acc-val (assoc curr-map curr-key v)]
(if (empty? rem-keys)
acc-val
(let ...
We unconditionally create an assoc result.
But we only use it in the base case where there's no more keys.
Prefer to use that expression directly, eliding the acc-val symbol:
(if (empty? rem-keys)
(assoc curr-map curr-key v)
(let ...
I get that you were shooting for parallel structure
with the four let variables.
The next-val down there is very nice
and should be retained since it reads smoothly.
BTW, even if you keep all eight identifiers,
there's an opportunity to rename to curr-rem
and curr-val if you want to call out the parallel structure. | {
"domain": "codereview.stackexchange",
"id": 44933,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "functional-programming, clojure, lisp",
"url": null
} |
php
Title: How can I make my image upload more secure?
Question: I gathered a lot of information in order to try to make my code as secure as possible, and I would like to know what I can do to improve more its security. (Sorry for my english if it's bad)
What did I do:
Only logged-in users will be able to upload images.
If a user has uploaded more than 10 images in the last hour, they will have to answer a captcha for new uploads.
I check for errors during the upload.
I verify the file extension and mime type (using pathinfo and finfo).
I check if the tmp_name exists.
I limit the maximum file size.
Using getimagesize, I get the width and height to set maximum and minimum values and verify the mime type again.
Images will be stored in a folder outside the public_html directory.
All images will be converted to webp format.
Images will be displayed on a subdomain.
The folder and image files will have permissions set to 644.
Images will have two generated names: one for the .webp file and another name that will be used to identify the image in the database.
My implementation:
<?
//Allows only logged in users to upload images
if($ta_logado == 0){
die('{"error":"Você precisa estar logado"}');
}
//If the user has sent more than 10 images in the last hour, request captcha
if(lastHourUploadImage() > 10){
if (isset($_POST['g-recaptcha-response'])) {
$captcha_data = empty($_POST['g-recaptcha-response']) ? NULL : filter_input(INPUT_POST, 'g-recaptcha-response', FILTER_SANITIZE_STRING);
}else{
die('{"error":"desafio-captcha-pendente"}');
}
$ip_user_valid = filter_input(INPUT_SERVER, 'HTTP_CF_CONNECTING_IP', FILTER_VALIDATE_IP); | {
"domain": "codereview.stackexchange",
"id": 44934,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "php",
"url": null
} |
php
$ip_user_valid = filter_input(INPUT_SERVER, 'HTTP_CF_CONNECTING_IP', FILTER_VALIDATE_IP);
if(!filter_var($ip_user_valid, FILTER_VALIDATE_IP)){
$ip_user_valid = '0.0.0.0';
}
$resposta = file_get_contents("https://www.google.com/recaptcha/api/siteverify?secret=SECRET_KEY&response=".$captcha_data."&remoteip=".$ip_user_valid);
$resposta = json_decode($resposta, true)['success'];
if($resposta === false){
die('{"error":"Captcha invalida"}');
}
}
// Check if upload is not empty
if(empty($_FILES['upload'])){
die('{"error":"Faça upload da imagem"}');
}
// Check if upload is not empty
if (empty($_FILES['upload']['tmp_name'])) {
die('{"error":"Faça upload da imagem"}');
}
// Check for upload errors
if ($_FILES['upload']['error'] > 0) {
die('{"error":"Houve algum erro durante o upload da imagem"}');
}
$extensoesPermitidas = array('jpg', 'jpeg', 'png');
$mimePermitidos = array('image/jpeg', 'image/png');
$tamanhoMaximo = 2 * 1024 * 1024; // 2MB
// Check file extension
$extensao = pathinfo($_FILES['upload']['name'], PATHINFO_EXTENSION);
if (!in_array(strtolower($extensao), $extensoesPermitidas)) {
die('{"error":"Extensão de arquivo não permitida."}');
}
// Check file mime
$tipoArquivo = getMimeType($_FILES['upload']['tmp_name']);
if (!in_array($tipoArquivo, $mimePermitidos)) {
die('{"error":"Tipo de arquivo não permitido."}');
}
// Check if tmp file exists
if (!file_exists($_FILES['upload']['tmp_name'])){
die('{"error":"Imagem não encontrada no servidor."}');
}
// Check file size
if ($_FILES['upload']['size'] > $tamanhoMaximo) {
die('{"error":"O tamanho do arquivo excedeu o limite permitido."}');
}
//Check the image once more, but with GD now
$verifyimg2 = getimagesize($_FILES['upload']['tmp_name']);
if (!$verifyimg2){
die('{"error":"Imagem invalida"}');
}
//Width and height
$largura = $verifyimg2[0];
$altura = $verifyimg2[1]; | {
"domain": "codereview.stackexchange",
"id": 44934,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "php",
"url": null
} |
php
//Width and height
$largura = $verifyimg2[0];
$altura = $verifyimg2[1];
// Check minimum height and width
if ($largura < 100 || $altura < 100){
die('{"status": "error", "text": "Esse arquivo e muito pequeno tente enviar em dimensões superiores a 100px"}');
}
// Check maximum height and width
if ($largura > 2000 || $altura > 2000){
die('{"status": "error", "text": "Esse arquivo e muito grande tente enviar em dimensões inferiores a 2000px"}');
}
// Verify File Mime with GD
if (!in_array($verifyimg2['mime'], $mimePermitidos)) {
die('{"error":"Tipo de arquivo não permitido."}');
}
// Working the image with GD
if ($verifyimg2['mime'] == "image/jpeg") {
$image_create = imagecreatefromjpeg($_FILES['upload']['tmp_name']);
}
if ($verifyimg2['mime'] == "image/png") {
$image_create = imagecreatefrompng($_FILES['upload']['tmp_name']);
imagepalettetotruecolor($image_create);
imagealphablending($image_create, true);
imagesavealpha($image_create, true);
}
if(!isset($image_create)){
die('{"error":"Erro desconhecido."}');
}
//If tipo_upload is avatar, save in different folder
if(isset($_POST['tipo_upload']) && $_POST['tipo_upload'] == 'avatar'){
$path = createDirectoryIfNotExist('../upload/avatar_uploaded');
$is_avatar = 1;
}else{
$path = createDirectoryIfNotExist('../upload/images_uploaded');
$is_avatar = 0;
}
// If the image is an avatar, we will leave it with a size of 140px and if it is not, we will do nothing
if($is_avatar == 1){
// Cria uma nova imagem com o novo tamanho
$resizedImage = imagecreatetruecolor(140, 140);
// Redimensiona a imagem original para a nova imagem
imagecopyresampled(
$resizedImage, // Imagem de destino
$image_create, // Imagem original
0, 0, // Coordenadas da imagem de destino
0, 0, // Coordenadas da imagem original
140, 140, // Novo tamanho
$largura, $altura // Tamanho original
);
$image_create = $resizedImage; | {
"domain": "codereview.stackexchange",
"id": 44934,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "php",
"url": null
} |
php
$image_create = $resizedImage;
imagedestroy($resizedImage);
}
//Create file names
$name_save = uniqid('IMG-'.$id_user_logado.'-', true) . '_' . str_shuffle(implode(range('e', 'q'))).'.webp';
$slug_name = uniqid('IMG-'.$id_user_logado.'-', true) . '_' . str_shuffle(implode(range('e', 'q')));
// Convert image to WebP
$success = imagewebp($image_create, $path.'/'.$name_save, 80);
// Free memory
imagedestroy($image_create);
if ($success) {
// Set file permissions
chmod($path.'/'.$name_save, 0644);
//Send to the database
sql($db_user, "INSERT INTO images_upload(user_id, slug_name, name_save, data_send, is_avatar) VALUES (?, ?, ?, NOW(), ?)", array($id_user_logado, $slug_name, $name_save, $is_avatar), "fake");
die('{"success":"http://images.localhost/media/'.$slug_name.'"}');
} else {
die('{"error":"Erro ao concluir processamento do arquivo."}');
}
function createDirectoryIfNotExist($baseDir){
// get day and month
list($year, $month) = explode('-', date('y-m-d'));
// Create the path
$targetDir = $baseDir . '/' . $year . '/' . $month;
// Check the path
if (!is_dir($targetDir)) {
if (!mkdir($targetDir, 0644, true)) {
die('{"error":"Não foi possivel seguir com sua solicitação contate um administrador"}');
}
}
return $targetDir;
}
function lastHourUploadImage(){
global $db_user, $id_user_logado;
return sql($db_user, "SELECT * FROM images_upload WHERE user_id = ? AND data_send >= DATE_SUB(NOW(), INTERVAL 1 HOUR)", array($id_user_logado), "count");
}
function getMimeType($filename) {
$finfo = finfo_open(FILEINFO_MIME_TYPE);
$mimeType = finfo_file($finfo, $filename);
finfo_close($finfo);
return $mimeType;
}
How do I show the image
<?
//Dados banco de dados usuarios
define("HOST", 'localhost');
define("USER2", 'root');
define("PASS2", '');
define("NAME2", 'admin_account'); | {
"domain": "codereview.stackexchange",
"id": 44934,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "php",
"url": null
} |
php
//Conectamos ao banco de dados dos usuarios
try {
$db_user = new PDO("mysql:host=".HOST.";dbname=".NAME2.";charset=utf8mb4", "".USER2."", "".PASS2."");
$db_user->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION);
$db_user->setAttribute(PDO::ATTR_DEFAULT_FETCH_MODE, PDO::FETCH_ASSOC);
$db_user->setAttribute(PDO::ATTR_EMULATE_PREPARES, false);
}catch(PDOException $e2) {
die("Erro ao conectar no banco de dados");
}
// Função basica para gerenciamento PDO
function sql($db_atual, $q, $params, $return) {
try {
//prepara a querry
$stmt = $db_atual->prepare($q);
//executa a query
$stmt->execute($params);
// retorna no formato solicitado pela variavel
if ($return == "rows") {
return $stmt->fetch();
}elseif ($return == "rowsall") {
return $stmt->fetchAll();
}elseif ($return == "count") {
return $stmt->rowCount();
}elseif( $return == "lastid"){
return $db_atual->lastInsertId();
}
}catch(PDOException $e) {
echo "DataBase Erro: ".$e->getMessage();
error_log($e->getMessage());
die();
}catch(Exception $e) {
echo "Erro Geral: ".$e->getMessage();
error_log($e->getMessage());
die();
}
}
if(!isset($_GET['image'])){
die('Informe a imagem');
}
$buscar_image = sql($db_user, "SELECT * FROM images_upload WHERE slug_name = ?", array($_GET['image']), "rows");
if(!isset($buscar_image['id'])){
die('ERROR - IMAGEM NÃO ENCONTRADA');
}
if($buscar_image['is_avatar'] == 1){
$info_path = 'avatar_uploaded';
}else{
$info_path = 'images_uploaded';
}
list($ano, $mes) = explode('-', date('y-m-d', strtotime($buscar_image['data_send'])));
$path = '../upload/'.$info_path.'/'.$ano.'/'.$mes.'/'.$buscar_image['name_save']; | {
"domain": "codereview.stackexchange",
"id": 44934,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "php",
"url": null
} |
php
$path = '../upload/'.$info_path.'/'.$ano.'/'.$mes.'/'.$buscar_image['name_save'];
header("Content-Type: image/webp");
header('X-Content-Type-Options: nosniff');
readfile($path);
exit;
Answer: Security
If you are worried about security (and you should), then you should not connect to the database with the root user. The Mysql root user has extended rights like reading other databases, reading arbitrary files on the file system, and even writing to some designated directories (or worse if you have explicitly tweaked configuration).
In case of vulnerability in your code (more specifically a SQL injection) this could escalate to takeover of your server, in addition to exfiltration of your database.
At first glance, I think this is the biggest oversight. A hacker will be going for the low hanging fruit, and an attack will not necessarily take place where you expect.
Create a user that has access to that database only, and do not grant more rights than necessary for your purpose. Usually you just need to read from and write to tables, you don't need to change schema or FILE privilege.
Speaking of SQL injection, you've done what has to be done.
Unchecked checks
Some stuff is redundant eg:
// Check if upload is not empty
if(empty($_FILES['upload'])){
die('{"error":"Faça upload da imagem"}');
}
// Check if upload is not empty
if (empty($_FILES['upload']['tmp_name'])) {
die('{"error":"Faça upload da imagem"}');
}
It should be sufficient to use the is_uploaded_file function and then there is no conceivable reason why the tmp_name attribute would be missing. Even if that happened, an exception would be raised and the code would simply stop.
And you also do this further down:
// Check if tmp file exists
if (!file_exists($_FILES['upload']['tmp_name'])){
die('{"error":"Imagem não encontrada no servidor."}');
} | {
"domain": "codereview.stackexchange",
"id": 44934,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "php",
"url": null
} |
php
Which is another unneeded check. The file must exist if the previous checks have gone through.
Make sure you have read the PHP docs on file uploads, which would be a good starting point to understand the possible pitfalls.
This at line 93:
// Verify File Mime with GD
if (!in_array($verifyimg2['mime'], $mimePermitidos)) {
die('{"error":"Tipo de arquivo não permitido."}');
}
is a repetition of lines 58-60:
if (!in_array($tipoArquivo, $mimePermitidos)) {
die('{"error":"Tipo de arquivo não permitido."}');
} | {
"domain": "codereview.stackexchange",
"id": 44934,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "php",
"url": null
} |
php
As far as I can tell, you are still processing the same image. It seems to me that you've added so many checks that you are losing track of the process.
This code could be simplified a bit.
It would surely be useful to split your code in small functions. For example, write a dedicated function to check images. Then the redundant code would stand out better.
File uploads are dangerous if not done right. Probably the most important is to restrict the file extension: if an attacker can upload a .php file (a webshell) to your server then you have a problem. Some webserver configuration can still mitigate this problem, for example I believe Nginx won't run files it does not own. Other pitfalls to avoid include directory traversal attacks, which can happen if rely on file name without sufficient sanitization.
I believe the code is safe enough at first glance. I haven't run it though, and this is nothing more than visual analysis.
File size and disposal
You're restricting the file size to 2 Mb which is low by today's standards. Pictures from digital cameras or smartphones typically weight a couple Mbs. Many of your users will have to tweak with settings or edit the picture on their own before they can even upload it to your site. Consider raising the limit a bit. Since you are not using the move_uploaded_file function but building a new image, I suggest you erase the temporary file when you're done with it. Although the temp directory should get flushed at the next reboot, some servers do not reboot frequently. It's conceivable that the temp could fill up under heavy usage.
Misc
To determine that the user is logged in you rely on this:
$ta_logado == 0
It's not known how and when this variable is set, and whether it will always be in sync. I would rather expect a reference to a session object here. | {
"domain": "codereview.stackexchange",
"id": 44934,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "php",
"url": null
} |
python, numpy, cython
Title: Fast complex absolute argmax in Cython
Question: I'm thinking I implemented it optimally, but somehow it's much slower than what should be much slower, np.argmax(np.abs(x)). Where am I off?
Code rationale & results
Mathematically, abs is sqrt(real**2 + imag**2), but argmax(abs(x)) == argmax(abs(x)**2), so no need for square root
np.abs(x) also allocates and writes an array. Instead I overwrite a single value, current_abs2, which should eliminate allocation and only leave writing
Argmax logic should be identical to NumPy's (I've not checked but only one best way to do it?)
Views (R, I) are for... I don't recall, saw somewhere
So savings are in dropping sqrt and len(x)-sized allocation. Yet it's much slower...
%timeit np.argmax(np.abs(x))
%timeit abs_argmax(x.real, x.imag)
409 µs ± 2.33 µs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)
3.09 ms ± 14.9 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
Here's the generated C code, just the function; the whole _optimized.c is 26000 lines.
The following Numba achieves 108 µs, very satisfactory, though I'm interested in why Cython fails.
Code
import cython
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef int abs_argmax(double[:] re, double[:] im):
# initialize variables
cdef Py_ssize_t N = re.shape[0]
cdef double[:] R = re # view
cdef double[:] I = im # view
cdef Py_ssize_t i = 0
cdef int max_idx = 0
cdef double current_max = 0
cdef double current_abs2 = 0
# main loop
while i < N:
current_abs2 = R[i]**2 + I[i]**2
if current_abs2 > current_max:
max_idx = i
current_max = current_abs2
i += 1
# return
return max_idx
Setup & execution
I use python setup.py build_ext --inplace, setup.py shown at bottom. Then,
import numpy as np
from _optimized import abs_argmax
x = np.random.randn(100000) + 1j*np.random.randn(100000)
%timeit np.argmax(np.abs(x))
%timeit abs_argmax(x.real, x.imag) | {
"domain": "codereview.stackexchange",
"id": 44935,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, numpy, cython",
"url": null
} |
python, numpy, cython
setup.py (I forget the rationale, just took certain recommendations)
from distutils import _msvccompiler
_msvccompiler.PLAT_TO_VCVARS['win-amd64'] = 'amd64'
from setuptools import setup, Extension
from Cython.Build import cythonize
import numpy as np
setup(
ext_modules=cythonize(Extension("_optimized", ["_optimized.pyx"]),
language_level=3),
include_dirs=[np.get_include()],
)
Environment
Windows 11, i7-13700HX CPU, Python 3.11.4, Cython 3.0.0, setuptools 68.0.0, numpy 1.24.4
Answer: In a generated code (slightly edited for readability)
__pyx_v_current_abs2 = (
pow((*((double *) ((__pyx_v_R.data + __pyx_t_2 * __pyx_v_R.strides[0]) ))), 2.0) +
pow((*((double *) ((__pyx_v_I.data + __pyx_t_3 * __pyx_v_I.strides[0]) ))), 2.0)
);
I do not like calls to pow. Apparently, cython is not smart enough, and transpiles ** 2 into a function call, rather that a simple multiplication. Try to help it:ㅤㅤㅤㅤㅤㅤ
current_abs2 = R[i]*R[i] + I[i]*I[i]
and see what happens. As of the rest - failed branch predictions, missed vectorization, etc - we may only theorize. | {
"domain": "codereview.stackexchange",
"id": 44935,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, numpy, cython",
"url": null
} |
c++, reinventing-the-wheel, vectors
Title: C++ std::vector Implementation
Question: Implementation of the vector class in C++ for learning purposes. Tried to implement interesting parts from the API shown on cppreference.com, but some elements like custom allocator support are missing.
template <typename T>
class Vector {
public:
using value_type = T;
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using reference = value_type&;
using const_reference = const value_type&;
using pointer = value_type*;
// Iterator support
using iterator = pointer;
using const_iterator = const pointer;
Vector(): data{nullptr}, m_size{0}, m_capacity{0} {}
explicit Vector(size_type capacity) : Vector() {
resize(capacity);
}
Vector(size_type count, const_reference value): Vector() {
resize(count, value);
}
template<typename InputIt>
Vector(InputIt first, InputIt last) : Vector() {
assign(first, last);
}
Vector(std::initializer_list<T> init) : Vector() {
assign(init.begin(), init.end());
}
Vector(const Vector& other): Vector() {
assignRange(other.begin(), other.end());
}
Vector& operator=(const Vector& other) {
Vector temp = other;
swap(temp);
return *this;
}
Vector(Vector&& other) noexcept : Vector() {
swap(other);
}
Vector& operator=(Vector&& other) noexcept {
swap(other);
return *this;
}
~Vector() {
clear();
deallocate(data);
}
size_type size() const noexcept {
return m_size;
}
reference operator[](size_type index) {
return data[index];
}
void resize(size_type count, const_reference value = T()) {
if (count < m_size) {
for (size_type i = count; i < m_size; i++) {
data[i].~T();
} | {
"domain": "codereview.stackexchange",
"id": 44936,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, reinventing-the-wheel, vectors",
"url": null
} |
c++, reinventing-the-wheel, vectors
} else if (count > m_size) {
if (count > m_capacity) {
reserve(count);
}
for (size_type i = m_size; i < count; i++) {
new (data + i) T(value);
}
}
m_size = count;
}
void reserve(size_type count) {
if (m_capacity < count) {
pointer newData = allocate(count);
if (m_size > 0) {
std::uninitialized_move(data, data + m_size, newData);
}
deallocate(data);
data = newData;
m_capacity = count;
}
}
void reserve_if_needed() {
if (m_size == m_capacity) {
if (m_capacity == 0) {
reserve(1);
} else {
reserve(m_capacity * 2);
}
}
}
void push_back(const_reference item) {
reserve_if_needed();
data[m_size++] = item;
}
void pop_back() {
// The standard says pop_back() on an empty vector is
// undefined behavior, so this check is possibly unnecessary
// since implementations can technically do whatever
// in case of undefined behavior?
if (m_size > 0) {
data[m_size - 1].~T();
m_size -= 1;
}
}
template<typename... Args>
void emplace_back(Args&&... args) {
reserve_if_needed();
new (data + m_size) T(std::forward<Args>(args)...);
m_size++;
}
void shrink_to_fit() {
if (m_capacity > m_size) {
pointer new_data = allocate(m_size);
if (m_size > 0) {
std::uninitialized_move(data, data + m_size, new_data);
}
deallocate(data);
data = new_data;
m_capacity = m_size;
}
}
void swap (Vector& other) noexcept {
using std::swap; | {
"domain": "codereview.stackexchange",
"id": 44936,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, reinventing-the-wheel, vectors",
"url": null
} |
c++, reinventing-the-wheel, vectors
void swap (Vector& other) noexcept {
using std::swap;
swap(data, other.data);
swap(m_size, other.m_size);
swap(m_capacity, other.m_capacity);
}
// Iterator support
iterator begin() const {
return data;
}
iterator end() const {
return data + m_size;
}
iterator insert(iterator pos, const_reference item) {
return insert(pos, 1, item);
}
iterator insert(iterator pos, size_type count, const_reference item) {
size_type index = pos - data;
size_type remaining = m_size - index;
if (m_capacity < m_size + count) {
reserve(m_size + count);
}
std::uninitialized_move(data + index, data + m_size, data + index + count);
std::uninitialized_fill(data + index, data + index + count, item);
m_size += count;
return data + index;
}
iterator erase(iterator pos) {
return erase(pos, pos + 1);
}
iterator erase(iterator first, iterator last) {
size_type n_elements = last - first;
size_type index = first - data;
for (size_type i = index; i < index + n_elements; i++) {
data[i].~T();
}
std::move(data + index + n_elements, data + m_size, data + index);
m_size -= n_elements;
return first;
}
template<typename InputIt>
void assign(InputIt first, InputIt last) {
assignRange(first, last);
}
void clear() noexcept {
for (std::size_t i = 0; i < m_size; i++) {
data[i].~T();
}
m_size = 0;
}
template<typename InputIt>
void assignRange(InputIt first, InputIt last) {
clear();
resize(std::distance(first, last));
std::uninitialized_copy(first, last, data);
}
void assign(size_type count, const_reference value) {
Vector temp(count, value);
swap(temp);
} | {
"domain": "codereview.stackexchange",
"id": 44936,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, reinventing-the-wheel, vectors",
"url": null
} |
c++, reinventing-the-wheel, vectors
private:
pointer data = nullptr;
size_type m_size;
size_type m_capacity;
pointer allocate(size_type count) {
return static_cast<pointer>(::operator new(count * sizeof(value_type)));
}
void deallocate(pointer p) {
if (p != nullptr) {
::operator delete(p);
}
}
};
Answer: As vector is such a common thing we review here I have written up a series of articles about implementing a vector.
Overview
Very good.
I found one major bug in push_back(). You have potential memory leaks in reserve() and shrinktofit() that are easy to fix. You can simplify your assignment operator (currently you have copy and move versions) they can be combined into a single version that works for both.
Minor comments about some missing functionaity that is in std::vector.
Code review
Sure you can have empty vectors.
Vector(): data{nullptr}, m_size{0}, m_capacity{0} {}
But I am wondering if the best strategy is not to simply always allocate capacity for minimal size. How often is a vector allocated but not used?
But pointed out by @chrysante below, the std::vector default constructor is noexcept so can't have any memory allocation (as that can potentially throw). So if you want to go that route you can mark this default constructor noexcept.
Vector() noexcept
: data{nullptr}
, m_size{0}
, m_capacity{0}
{}
One comment on style. Like in variable declarations in the code blocks its nice to have member initialization in the constructor one per line (its easier to read). You are not trying to save vertical space.
Slight deviation from the interface of std::vector! Sure you can do it. But it will confuse people. Also I use it to simplify things below. I have the same constructor in my class but mine is private so it can only be used internally.
explicit Vector(size_type capacity) : Vector() {
resize(capacity);
} | {
"domain": "codereview.stackexchange",
"id": 44936,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, reinventing-the-wheel, vectors",
"url": null
} |
c++, reinventing-the-wheel, vectors
You can simplify these two assignments into a single method:
Vector& operator=(const Vector& other) {
Vector temp = other;
swap(temp);
return *this;
}
Vector& operator=(Vector&& other) noexcept {
swap(other);
return *this;
}
Easy to write as:
// Notice the parameter is value.
// If passed a RValue it is moved into the parameter.
// If passed an LValue it is copied.
// So you get the same effect with less code.
Vector& operator=(Vector other) noexcept
{
swap(other);
return *this;
}
This is good:
reference operator[](size_type index) {
return data[index];
}
But what about accesses to a const Vector? Just because you can't modify does not mean you can't use the operator[] on it.
const_reference operator[](size_type index) const
{
return data[index];
}
While we are here: Why is there no at() method?
void resize(size_type count, const_reference value = T()) {
// Not valid here ^^^^^^
// That should be in the header only
This is fine. But std::vector destroys them from back to front.
Just like how it deletes elements during destruction. This is to mimic the behavior of the C-style array (objects are destroyed in reverse order of creation).
if (count < m_size) {
for (size_type i = count; i < m_size; i++) {
data[i].~T();
}
} else if (count > m_size) {
if (count > m_capacity) {
reserve(count);
}
for (size_type i = m_size; i < count; i++) {
new (data + i) T(value);
}
}
m_size = count;
}
void reserve(size_type count) {
if (m_capacity < count) {
pointer newData = allocate(count); | {
"domain": "codereview.stackexchange",
"id": 44936,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, reinventing-the-wheel, vectors",
"url": null
} |
c++, reinventing-the-wheel, vectors
// Note sure why you need to check against 0 here.
// Copy zero data is a NOOP so would not be more expensive.
// If I had to guess this is actually a pesimization as you
// code has an extra branch.
if (m_size > 0) {
// A new function to me.
// Thats really cool.
std::uninitialized_move(data, data + m_size, newData);
}
deallocate(data);
data = newData;
m_capacity = count;
}
}
The one thing here I would watch is that you have a potential leak.
It's hard to spot. But if the type T does not support move construction then the compiler will use the copy constructor during the std::uninitialized_move. If one of the copy constructors fails (i.e. throws) then you will leave this function needing to clean up newData. Though your function does provide the strong exception guarantee.
You can make this simpler by re-using the Vector :-)
void reserve(size_type count) {
if (m_capacity < count) {
Vector temp(count) // Use your vector with pre-reserved size.
// Remember that Vector is a friend of Vector
// So you can reach into the other class and mess with
// its members (just remember to unit test).
std::uninitialized_move(data, data + m_size, temp.data);
temp.m_size = m_size;
swap(temp);
}
}
This is broken. You are pushing into uninitialized memory so you need to construct the object in place.
void push_back(const_reference item) {
reserve_if_needed();
data[m_size++] = item;
// Should be this.
new (std::addressof(data[m_size++])) T(item);
} | {
"domain": "codereview.stackexchange",
"id": 44936,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, reinventing-the-wheel, vectors",
"url": null
} |
c++, reinventing-the-wheel, vectors
// Should be this.
new (std::addressof(data[m_size++])) T(item);
}
What about pushing an RVALUE?
Replace the above with:
// Note: Pass by value.
// RVALUE are moved into item then moved to container.
// LVALUE are copied into item then moved to the container.
void push_back(T item) {
reserve_if_needed();
new (std::addressof(data[m_size++])) T(std::move(item));
}
if (m_size > 0) {
// Why not swap the next two lines?
// This would make the line to call the destructor
// simpler and easier to read as you don't need the -1
data[m_size - 1].~T();
m_size -= 1;
}
Same issue as reserve()
void shrink_to_fit() {
if (m_capacity > m_size) {
pointer new_data = allocate(m_size);
if (m_size > 0) {
std::uninitialized_move(data, data + m_size, new_data);
}
deallocate(data);
data = new_data;
m_capacity = m_size;
}
}
Same solution:
void shrink_to_fit() {
if (m_capacity > m_size) {
Vector temp(m_size); // Vector with reserved capacity
std::uninitialized_move(data, data + m_size, temp.data);
temp.m_size = m_size;
swap(temp);
}
}
Sure standard iterators:
// Iterator support
iterator begin() const {
return data;
}
iterator end() const {
return data + m_size;
} | {
"domain": "codereview.stackexchange",
"id": 44936,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, reinventing-the-wheel, vectors",
"url": null
} |
c++, reinventing-the-wheel, vectors
iterator end() const {
return data + m_size;
}
But what about const iterators or reverse iterators or const reverse iterators? Also when calling begin() on a const object you will get a const_iterator.
iterator begin() { // normal begin
const_iterator begin() const { // begin on const object
const_iterator cbegin() const { // explicitly asking for const
reverse_iterator rbegin() { // normal rbegin
const_reverse_iterator rbegin() const { // rbegin on const object
const_reverse_iterator crbegin() const { // explicitly asking for re
etc
To be similar to C-style arrays (and the C++ standard idiom that objects are destroyed in reverse order of creation), you should destroy the members in reverse order.
void clear() noexcept {
for (std::size_t i = 0; i < m_size; i++) {
data[i].~T();
}
m_size = 0;
}
No need to check for null pointers here!
void deallocate(pointer p) {
if (p != nullptr) {
::operator delete(p);
}
}
Questions:
What is the slight deviation from the standard interface in the explicit Vector(size_type capacity)? Is it that the param is named 'capacity' and not 'count?
If you look at the standard (I link to a non standard but reputable source) std::vector::vector you will see there is no constructor that takes a "capacity" (ie. you have allocated space but zero size). There is one that takes a size and fills it with values (but you have that one).
When you say "T()" should be in the header only, do you mean the template declaration?
No. Default Parameters should be defined in the class header file only. They are not part of the declaration:
class Vector
{
void resize(size_type count, const_reference value = T());
// This is good.
// Reading the interface you see you can have a default value.
}; | {
"domain": "codereview.stackexchange",
"id": 44936,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, reinventing-the-wheel, vectors",
"url": null
} |
c++, reinventing-the-wheel, vectors
// Don't define the default parameters here.
// It should generate an error if you do this.
void Vector::resize(size_type count, const_reference value)
{
// STUFF
}
How does constructing the item in place in push_back() resolve the uninitialized memory issue?
This is a problem becasue:
// This code uses the assignment operator.
data[m_size++] = item;
// The assignment operator assumes that the object
// referenced on the left hand side has already been constructed
// but in your case that is not true, this is unintialized memory.
// So you are using assignment to uninitialized memory
// which could be anything and thus with non trivial T
// will cause an issue.
This is solved by constructing in place:
// Should be this.
new (std::addressof(data[m_size++])) T(item);
The constructor assumes the memory has not been constructed before. You pass the address to operator new it will not allocate space but simply use the pointer provided as the location and then call the constructor for the type T to correctly initialize the memory.
do you have any references or documentation on the "pass by value" idiom for avoiding the two assignment operators?
Nope. There are lots of questions on Stackoverflow that go over this though. Should be simple to find.
And does the SFINAE approach involve checking std::is_nothrow_move_constructible or something in overload resolution? I should look into how this is done in some compiler implementation
Yep. | {
"domain": "codereview.stackexchange",
"id": 44936,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, reinventing-the-wheel, vectors",
"url": null
} |
php
Title: Is overloading a database object a bad practice?
Question: I created the below class as a convenient way to create instances of PDO that I then inject into the classes that need access to my database. Instead of doing $this->db->pdo in subsequent classes, I overloaded the call method, but I've been reading in some posts that it's a bad practice. Would it be better to just create a static method that returns PDO that I then inject into my classes or another better way of doing this? (Is there some kinda of design pattern similar to this?) Thanks.
<?php
namespace ShipExplorer\Database;
use PDO;
class Database
{
protected $pdo;
function __construct($config = ['host' => 'localhost', 'user' => 'root', 'password' => '', 'dbname' => ''], $pdoOptions = [])
{
$dsn = "mysql:host={$config['host']};dbname={$config['dbname']}";
$this->pdo = new PDO($dsn, $config['user'], $config['password'], $pdoOptions);
}
function __call($name, $arguments)
{
$result = $this->pdo->$name(...$arguments);
if ($result) {
return $result;
}
}
}
Answer: I have to be careful here and stick to objective arguments.
Let's first get one thing out of the way. Many people argue that PDO is a complete database class in itself that doesn't need another class to make it accessible. I understand their point, but I don't fully agree with it. Sometimes it can be useful to add something to the PDO class. However, if you do, you better have a good reason for it.
More about overloading
Overloading makes your code more difficult to understand, especially in more complicated cases. I admit that in your case this is not a real problem.
People who use an Integrated Development Environment (IDE) often find that the tools, which help them write code, don't work well with overloading.
You don't check, in __call(), whether a method exists, which means you can get errors that are more difficult to debug. | {
"domain": "codereview.stackexchange",
"id": 44937,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "php",
"url": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.