text stringlengths 1 2.12k | source dict |
|---|---|
python
5-state,3.138042382334295,443,k,k1,k2,scaling_factor9_6,2386448359.562344±19494372901467.324,2.2797663839686257±8.99213073838378,0.00014840645650715523±1.2177908738987473,9.108420018222319e-09±1.720118310283389e-11
5-state,1.5030255742867429,443,k,k1,k2,scaling_factor5_2,4120363589.170806±35342036476688.13,4.820419295200315±33.97683403933,0.00015426414264951838±1.3288314349534769,4.700186018169461e-09±9.988778188425362e-12
5-state,1.1791747642993826,443,k,k1,k2,scaling_factor2_9,16015799.719901312±459587190.82078475,407223.47455762303±142981362319.22458,0.0658662751744219±2.1169942760134686,2.378018226778522e-09±7.525996407008208e-12 | {
"domain": "codereview.stackexchange",
"id": 44996,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
And here is my code to extract data from the above csv
import re
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import approx_fprime
sample_list2=['WT_2017','L273A_2019','L273A_2020','I272A_2017','I272A_2019','I272A_2020','ILAA_2017','ILAA_2019','ILAA_2020']
mw=42000
def convert_concentration(conc):
return float(((conc/mw)*1000000000))
def filter_data(input_line):
temp_solutions=[]
temp_uncertanties=[]
temp_variable_names=[]
temp_concentrations=[]
temp_fit_names=[input_line[0]]
for items in input_line:
variable_names=re.search('k\d*|scaling_factor(\d+_\d+)*',items)
if re.search('\u00B1',items) is not None:
temp_solutions.append(items.split('\u00B1')[0])
temp_uncertanties.append(items.split('\u00B1')[1])
if variable_names is not None:
temp_variable_names.append(variable_names.group(0))
if variable_names.group(1) is not None:
temp_concentrations.append(convert_concentration(float(variable_names.group(1).replace('_','.'))))
return [temp_fit_names,temp_variable_names,temp_solutions,temp_uncertanties],temp_concentrations | {
"domain": "codereview.stackexchange",
"id": 44996,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
def import_data():
count=-1
samples=[]
mega_list=[]
total_concentrations=[]
per_line_1=[]
per_line_2=[]
per_line_3=[]
per_line_4=[]
per_line_5=[]
per_line_6=[]
per_line_7=[]
per_line_8=[]
per_line_9=[]
temp_concentrations_1=[]
temp_concentrations_2=[]
temp_concentrations_3=[]
temp_concentrations_4=[]
temp_concentrations_5=[]
temp_concentrations_6=[]
temp_concentrations_7=[]
temp_concentrations_8=[]
temp_concentrations_9=[]
with open('scaled_uncertanty_fits.txt') as data_file:
for lines in data_file:
line=lines.split(',')
sample_names=(line[0]).strip()
if sample_names in sample_list2:
samples.append(sample_names)
count+=1
if re.search('Open-Closed',sample_names) is not None and samples[count] == 'WT_2017':
per_line_temp,concentration=filter_data(line)
temp_concentrations_1.append(concentration)
per_line_1.append(per_line_temp)
if re.search('Monomer-Closed',sample_names) is not None and samples[count] == 'L273A_2019':
per_line_temp,concentration=filter_data(line)
temp_concentrations_2.append(concentration)
per_line_2.append(per_line_temp)
if re.search('Monomer-Closed',sample_names) is not None and samples[count] == 'L273A_2020':
per_line_temp,concentration=filter_data(line)
temp_concentrations_3.append(concentration)
per_line_3.append(per_line_temp)
if re.search('Monomer-Closed',sample_names) is not None and samples[count] == 'I272A_2017':
per_line_temp,concentration=filter_data(line)
temp_concentrations_4.append(concentration)
per_line_4.append(per_line_temp) | {
"domain": "codereview.stackexchange",
"id": 44996,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
per_line_4.append(per_line_temp)
if re.search('Monomer-Closed',sample_names) is not None and samples[count] == 'I272A_2019':
per_line_temp,concentration=filter_data(line)
temp_concentrations_5.append(concentration)
per_line_5.append(per_line_temp)
if re.search('Monomer-Closed',sample_names) is not None and samples[count] == 'I272A_2020':
per_line_temp,concentration=filter_data(line)
temp_concentrations_6.append(concentration)
per_line_6.append(per_line_temp)
if re.search('3-state',sample_names) is not None and samples[count] == 'ILAA_2017':
per_line_temp,concentration=filter_data(line)
temp_concentrations_7.append(concentration)
per_line_7.append(per_line_temp)
if re.search('3-state',sample_names) is not None and samples[count] == 'ILAA_2019':
per_line_temp,concentration=filter_data(line)
temp_concentrations_8.append(concentration)
per_line_8.append(per_line_temp)
if re.search('3-state',sample_names) is not None and samples[count] == 'ILAA_2020':
per_line_temp,concentration=filter_data(line)
temp_concentrations_9.append(concentration)
per_line_9.append(per_line_temp)
total_concentrations.append(list(dict.fromkeys([item for sublist in temp_concentrations_1 for item in sublist])))
mega_list.append(per_line_1)
total_concentrations.append(list(dict.fromkeys([item for sublist in temp_concentrations_2 for item in sublist])))
mega_list.append(per_line_2)
total_concentrations.append(list(dict.fromkeys([item for sublist in temp_concentrations_3 for item in sublist])))
mega_list.append(per_line_3)
total_concentrations.append(list(dict.fromkeys([item for sublist in temp_concentrations_4 for item in sublist]))) | {
"domain": "codereview.stackexchange",
"id": 44996,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
mega_list.append(per_line_4)
total_concentrations.append(list(dict.fromkeys([item for sublist in temp_concentrations_5 for item in sublist])))
mega_list.append(per_line_5)
total_concentrations.append(list(dict.fromkeys([item for sublist in temp_concentrations_6 for item in sublist])))
mega_list.append(per_line_6)
total_concentrations.append(list(dict.fromkeys([item for sublist in temp_concentrations_7 for item in sublist])))
mega_list.append(per_line_7)
total_concentrations.append(list(dict.fromkeys([item for sublist in temp_concentrations_8 for item in sublist])))
mega_list.append(per_line_8)
total_concentrations.append(list(dict.fromkeys([item for sublist in temp_concentrations_9 for item in sublist])))
mega_list.append(per_line_9)
return mega_list,total_concentrations | {
"domain": "codereview.stackexchange",
"id": 44996,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
def open_closed(k,conc):
open_concentration=(k*conc)/(k+1)
closed_concentration=conc/(k+1)
monomer_concentration=0
return np.array([open_concentration/conc,closed_concentration/conc,monomer_concentration/conc])
def monomer_closed(k,conc):
open_concentration=0
monomer_concentration=(np.sqrt((8*k*conc)+k**2)-k)/4
closed_concentration=(-np.sqrt(k)*np.sqrt((8*conc)+k)+k+(4*conc))/4
return np.array([open_concentration/conc,closed_concentration/conc,monomer_concentration/conc])
def three_state(k0,k1,conc):
monomer_concentration=((np.sqrt(k0)*np.sqrt(k1))*(np.sqrt((8*conc*(k1+1))+(k0*k1)))-(k0*k1))/(4*(k1+1))
open_concentration=(k1*((-np.sqrt(k0)*np.sqrt(k1))*(np.sqrt((8*conc*(k1+1))+(k0*k1)))+(k0*k1)+(4*conc*(k1+1))))/(4*((k1+1)**2))
closed_concentration=((-np.sqrt(k0)*np.sqrt(k1))*(np.sqrt((8*conc*(k1+1))+(k0*k1)))+(k0*k1)+(4*conc*(k1+1)))/(4*((k1+1)**2))
return np.array([open_concentration/conc,closed_concentration/conc,monomer_concentration/conc]) | {
"domain": "codereview.stackexchange",
"id": 44996,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
mega_list,total_concentrations=import_data()
for items,concentrations,name in zip(mega_list,total_concentrations,sample_list2):
concen_individuals=[]
populations_monomer_individuals=[]
populations_closed_individuals=[]
populations_open_individuals=[]
populations_monomer_errors=[]
populations_closed_errors=[]
populations_open_errors=[]
h=1e-8
for i in items:
concen=[]
populations_monomer=[]
populations_closed=[]
populations_open=[]
monomer_errors=[]
closed_errors=[]
open_errors=[]
sample_name,variable_names,solutions,uncertanties=i
if re.search('Scale(s)*',sample_name[0]) is not None:
k_value=float(solutions[variable_names.index('k')])
k_error=float(uncertanties[variable_names.index('k')])
if re.search('Open-Closed',sample_name[0]) is not None:
for io in concentrations:
fo,fc,fm=open_closed(k_value,io)
populations_open.append(fo)
populations_closed.append(fc)
populations_monomer.append(fm)
concen.append(io)
dk_fo,dk_fc,dk_fm=((open_closed(k_value+h,io)-open_closed(k_value,io))/h)
open_errors.append((k_error**2*dk_fo**2))
closed_errors.append((k_error**2*dk_fc**2))
monomer_errors.append((k_error**2*dk_fm**2))
if re.search('Monomer-Closed',sample_name[0]) is not None:
for io in concentrations:
fo,fc,fm=monomer_closed(k_value,io)
populations_open.append(fo)
populations_closed.append(fc)
populations_monomer.append(fm)
concen.append(io)
dk_fo,dk_fc,dk_fm=((monomer_closed(k_value+h,io)-monomer_closed(k_value,io))/h)
open_errors.append((k_error**2*dk_fo**2)) | {
"domain": "codereview.stackexchange",
"id": 44996,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
open_errors.append((k_error**2*dk_fo**2))
closed_errors.append((k_error**2*dk_fc**2))
monomer_errors.append((k_error**2*dk_fm**2))
if re.search('3-state',sample_name[0]) is not None:
k1_value=float(solutions[variable_names.index('k1')])
k1_error=float(uncertanties[variable_names.index('k1')])
for io in concentrations:
fo,fc,fm=three_state(k_value,k1_value,io)
populations_open.append(fo)
populations_closed.append(fc)
populations_monomer.append(fm)
concen.append(io)
dk_fo,dk_fc,dk_fm=((three_state(k_value+h,k1_value,io)-three_state(k_value,k1_value,io))/h)
dk1_fo,dk1_fc,dk1_fm=((three_state(k_value,k1_value+h,io)-three_state(k_value,k1_value,io))/h)
open_errors.append((k_error**2*dk_fo**2)+(k1_error**2+dk_fo**2))
closed_errors.append((k_error**2*dk_fc**2)+(k1_error**2+dk_fc**2))
monomer_errors.append((k_error**2*dk_fm**2)+(k1_error**2+dk_fm**2))
if re.search('Individual',sample_name[0]) is not None:
plt.scatter(concen,populations_monomer,c='b',label='Monomer Individual Scale')
plt.scatter(concen,populations_open,c='g',label='Open Individual Scale')
plt.scatter(concen,populations_closed,c='r',label='Closed Individual Scale')
plt.errorbar(concen,populations_monomer,yerr=monomer_errors,linestyle='None',ecolor='b')
plt.errorbar(concen,populations_open,yerr=open_errors,linestyle='None',ecolor='g')
plt.errorbar(concen,populations_closed,yerr=closed_errors,linestyle='None',ecolor='r')
else: | {
"domain": "codereview.stackexchange",
"id": 44996,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
else:
plt.scatter(concen,populations_monomer,c='b',label='Monomer Single Scale',marker='^')
plt.scatter(concen,populations_open,c='g',label='Open Single Scale',marker='^')
plt.scatter(concen,populations_closed,c='r',label='Closed Single Scale',marker='^')
plt.errorbar(concen,populations_monomer,yerr=monomer_errors,linestyle='None',ecolor='b')
plt.errorbar(concen,populations_open,yerr=open_errors,linestyle='None',ecolor='g')
plt.errorbar(concen,populations_closed,yerr=closed_errors,linestyle='None',ecolor='r')
else:
k_value=float(solutions[variable_names.index('k')])
k_error=float(uncertanties[variable_names.index('k')])
for entries in variable_names:
if re.search('scaling_factor(\d+_\d+)',entries) is not None:
io=convert_concentration(float(((re.search('scaling_factor(\d+_\d+)',entries)).group(1)).replace('_','.')))
if re.search('Open-Closed',sample_name[0]) is not None:
fo,fc,fm=open_closed(k_value,io)
dk_fo,dk_fc,dk_fm=((open_closed(k_value+h,io)-open_closed(k_value,io))/h)
populations_open_errors.append((k_error**2*dk_fo**2))
populations_closed_errors.append((k_error**2*dk_fc**2))
populations_monomer_errors.append((k_error**2*dk_fm**2))
if re.search('Monomer-Closed',sample_name[0]) is not None:
fo,fc,fm=monomer_closed(k_value,io)
dk_fo,dk_fc,dk_fm=((monomer_closed(k_value+h,io)-monomer_closed(k_value,io))/h)
populations_open_errors.append((k_error**2*dk_fo**2))
populations_closed_errors.append((k_error**2*dk_fc**2))
populations_monomer_errors.append((k_error**2*dk_fm**2))
if re.search('3-state',sample_name[0]) is not None: | {
"domain": "codereview.stackexchange",
"id": 44996,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
if re.search('3-state',sample_name[0]) is not None:
k1_value=float(solutions[variable_names.index('k1')])
k1_error=float(uncertanties[variable_names.index('k1')])
fo,fc,fm=three_state(k_value,k1_value,io)
dk_fo,dk_fc,dk_fm=((three_state(k_value+h,k1_value,io)-three_state(k_value,k1_value,io))/h)
dk1_fo,dk1_fc,dk1_fm=((three_state(k_value,k1_value+h,io)-three_state(k_value,k1_value,io))/h)
populations_open_errors.append((k_error**2*dk_fo**2)+(k1_error**2+dk_fo**2))
populations_closed_errors.append((k_error**2*dk_fc**2)+(k1_error**2+dk_fc**2))
populations_monomer_errors.append((k_error**2*dk_fm**2)+(k1_error**2+dk_fm**2)) | {
"domain": "codereview.stackexchange",
"id": 44996,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
populations_open_individuals.append(fo)
populations_closed_individuals.append(fc)
populations_monomer_individuals.append(fm)
concen_individuals.append(io)
print(populations_closed_errors)
plt.scatter(concen_individuals,populations_monomer_individuals,c='b',label='Monomer Individual Fit',marker='X')
plt.scatter(concen_individuals,populations_open_individuals,c='g',label='Open Individual Fit',marker='X')
plt.scatter(concen_individuals,populations_closed_individuals,c='r',label='Closed Individual Fit',marker='X')
a=plt.errorbar(concen_individuals,populations_monomer_individuals,yerr=populations_monomer_errors,linestyle='None',ecolor='b')
b=plt.errorbar(concen_individuals,populations_open_individuals,yerr=populations_open_errors,linestyle='None',ecolor='g')
c=plt.errorbar(concen_individuals,populations_closed_individuals,yerr=populations_closed_errors,linestyle='None',ecolor='r')
a[-1][0].set_linestyle('--')
b[-1][0].set_linestyle('--')
c[-1][0].set_linestyle('--')
plt.title(name)
plt.legend()
plt.xlabel('Concentrations(nM)')
plt.ylabel('Fractional Populations')
plt.ylim((-0.1,1.1))
plt.show()
Answer:
the only way I could get this to work is....bad.
Yes. This is what Pandas was made for, and should replace basically everything that you've done in your current program (plotting aside; that part is vaguely OK).
The only big challenge is that your input file format is truly cursed. It has jagged rows, +/- separators, and what look to be dataset titles interspersed with real data.
The following approach can be used to normalize the data:
import pandas as pd
MAX_VARS = 20
MW = 42_000 | {
"domain": "codereview.stackexchange",
"id": 44996,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
MAX_VARS = 20
MW = 42_000
def load(filename: str = 'scaled_uncertanty_fits.txt') -> pd.DataFrame:
df = pd.read_csv(
filename,
sep=',|±',
skiprows=1, # skip the original headers - they aren't wide enough
names=[
'Sample', 'RedChi2', 'DoF', *range(3*MAX_VARS),
]
)
# Forward-fill the dataset name (e.g. WT_2017)
is_dataset = df.iloc[:, 1].isna()
df.insert(loc=0, column='dataset', value=df.loc[is_dataset, 'Sample'])
df['dataset'] = df['dataset'].ffill()
return df[~is_dataset]
def normalize_vars(df: pd.DataFrame) -> pd.DataFrame:
"""Normalize variable-solution-uncertainty triples"""
var_offset = 4
var_cols = df.iloc[:, var_offset:]
meta_names = pd.Index(name='varmeta', data=['Variable', 'Solution', 'Uncertainty'])
rectangular = pd.DataFrame(
index=df.index,
columns=pd.MultiIndex.from_product((
meta_names,
pd.RangeIndex(name='varno', start=0, stop=MAX_VARS),
))
)
n_vars = var_cols.notna().sum(axis=1)//3
for row_vars, group in var_cols.groupby(n_vars):
source = group.iloc[:, :row_vars]
source.columns = pd.MultiIndex.from_product(
(('Variable',), range(row_vars)),
names=('varmeta', 'varno'),
)
rectangular.loc[group.index, source.columns] = source
source = group.iloc[:, row_vars: row_vars*2]
source.columns = pd.MultiIndex.from_product(
(('Solution',), range(row_vars)),
names=('varmeta', 'varno'),
)
rectangular.loc[group.index, source.columns] = source
source = group.iloc[:, row_vars*2: row_vars*3]
source.columns = pd.MultiIndex.from_product(
(('Uncertainty',), range(row_vars)),
names=('varmeta', 'varno'),
)
rectangular.loc[group.index, source.columns] = source | {
"domain": "codereview.stackexchange",
"id": 44996,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
long = rectangular.stack(level='varno')
normalized = pd.merge(
left=df[['dataset', 'Sample', 'RedChi2', 'DoF']], right=long[meta_names],
left_index=True, right_on=long.index.get_level_values(0),
).drop('key_0', axis=1).set_index(long.index)
return normalized.astype({
'Solution': float, 'Uncertainty': float,
})
def main() -> None:
df = load()
df = normalize_vars(df)
# ...
if __name__ == '__main__':
main()
From here, everything is "easy". For instance: want to convert concentrations, only for scaling_factor variables, only for the WT_2017 dataset and sample Open-Closed?
converted = df.loc[
df['Variable'].str.startswith('scaling_factor')
& (df['dataset'] == 'WT_2017')
& (df['Sample'] == 'Open-Closed'),
['Solution', 'Uncertainty'],
] / MW * 1e9
Solution Uncertainty
varno
22 1 220.922794 1.177406e-07
25 1 173.364558 4.268723e-08
28 1 0.000000 0.000000e+00
Here is an example demonstrating some of the logic that you wrote for concentrations and errors, with Pandas:
import numpy as np
import pandas as pd
MAX_VARS = 20
MW = 42_000
def load(filename: str = 'scaled_uncertanty_fits.txt') -> pd.DataFrame:
df = pd.read_csv(
filename,
sep=',|±',
skiprows=1, # skip the original headers - they aren't wide enough
names=[
'Sample', 'RedChi2', 'DoF', *range(3*MAX_VARS),
]
)
df.index.name = 'csv_index'
# Forward-fill the dataset name (e.g. WT_2017)
is_dataset = df.iloc[:, 1].isna()
df.insert(loc=0, column='dataset', value=df.loc[is_dataset, 'Sample'])
df['dataset'].ffill(inplace=True)
return df[~is_dataset] | {
"domain": "codereview.stackexchange",
"id": 44996,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
def normalize_vars(df: pd.DataFrame) -> pd.DataFrame:
"""Normalize variable-solution-uncertainty triples"""
var_offset = 4
var_cols = df.iloc[:, var_offset:]
meta_names = pd.Index(name='varmeta', data=['Variable', 'Solution', 'Uncertainty'])
rectangular = pd.DataFrame(
index=df.index,
columns=pd.MultiIndex.from_product((
meta_names,
pd.RangeIndex(name='varno', start=0, stop=MAX_VARS),
))
)
n_vars = var_cols.notna().sum(axis=1)//3
for row_vars, group in var_cols.groupby(n_vars):
source = group.iloc[:, :row_vars]
source.columns = pd.MultiIndex.from_product(
(('Variable',), range(row_vars)),
names=('varmeta', 'varno'),
)
rectangular.loc[group.index, source.columns] = source
source = group.iloc[:, row_vars: row_vars*2]
source.columns = pd.MultiIndex.from_product(
(('Solution',), range(row_vars)),
names=('varmeta', 'varno'),
)
rectangular.loc[group.index, source.columns] = source
source = group.iloc[:, row_vars*2: row_vars*3]
source.columns = pd.MultiIndex.from_product(
(('Uncertainty',), range(row_vars)),
names=('varmeta', 'varno'),
)
rectangular.loc[group.index, source.columns] = source
long = rectangular.stack(level='varno')
normalized = (
pd.merge(left=df[['dataset', 'Sample']], right=long[meta_names], on='csv_index')
.set_index(['dataset', 'Sample', 'Variable'], append=True)
.astype({'Solution': float, 'Uncertainty': float})
.unstack('Variable')
)
return normalized | {
"domain": "codereview.stackexchange",
"id": 44996,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
python
return normalized
def extract_factors(df: pd.DataFrame):
names = df.columns[df.columns.get_loc('Solution')].droplevel(0)
values = (
names.get_level_values('Variable')
.to_series(name='factor', index=names)
.str.replace('_', '.')
.str.extract(r'(\d+\.\d+)$', expand=False)
.dropna()
.astype(float)
)
return values
def main() -> None:
df = load()
df = normalize_vars(df)
# For all datasets and Sample ~ Open-Closed*
open_closed = df[df.index.get_level_values('Sample').str.contains('Open-Closed')]
# io is converted concentrations from the number embedded in the scaling_factornnn names
factors = extract_factors(df)
io = conc = factors * 1e9/MW
k = open_closed.loc[:, ('Solution', 'k')]
k_error = open_closed.loc[:, ('Uncertainty', 'k')]
fo = k/(k + 1)
fc = 1/(k + 1)
fm = 0
h = 1e-8
kh = k + h
dk_fo = kh/(kh + 1)
dk_fc = 1/(kh + 1)
dk_fm = 0
open_errors = k_error**2 * dk_fo**2
closed_errors = k_error**2 * dk_fc**2
monomer_errors = k_error**2 * dk_fm**2
if __name__ == '__main__':
main() | {
"domain": "codereview.stackexchange",
"id": 44996,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python",
"url": null
} |
beginner, rust
Title: Rust Elo rating
Question: I have just started learning Rust and wanted some feedback on a simple program for calculating Elo ratings.
All of the following code was in a single file, but I imagine that this is the part that would be put into a library to be called by another program (see the next section).
enum Outcome {
Win,
Loss,
Draw,
}
const WIN: f64 = 1.0;
const LOSS: f64 = 0.0;
const DRAW: f64 = 0.5;
// Returns the new ratings of two players of rating `r1` and `r2` given an outcome
fn elo(r1: i32, r2: i32, k: f64, outcome: Outcome) -> (i32, i32) {
// Converting to float here to allow integer input in API
let mut r1 = r1 as f64;
let mut r2 = r2 as f64;
// Probabilities of r1 and r2 winning
let p1 = prob(r1, r2);
let p2 = prob(r2, r1);
match outcome {
Outcome::Win => {
r1 += k * (WIN - p1);
r2 += k * (LOSS - p2);
}
Outcome::Loss => {
r1 += k * (LOSS - p1);
r2 += k * (WIN - p2);
}
Outcome::Draw => {
r1 += k * (DRAW - p1);
r2 += k * (DRAW - p2);
}
}
(r1 as i32, r2 as i32)
}
// Returns the probability that a player with rating r1 beats a player with rating r2
fn prob(r1: f64, r2: f64) -> f64 {
let diff = (r2 - r1) / 400.0;
1.0 / (1.0 + f64::powf(10.0, diff))
}
And this is how I expect it would be used:
fn main() {
// User sets their own k value
const K: f64 = 30.0;
// The ratings of the two players
let r1 = 1600;
let r2 = 1200;
let (r1_new, r2_new) = elo(r1, r2, K, Outcome::Win);
let r1_diff = r1_new - r1;
let r2_diff = r2_new - r2;
println!("Player 1 ({}) vs Player 2 ({})", r1, r2);
println!(
"Player 1 rating changed from {} to {} ({:+})",
r1, r1_new, r1_diff
);
println!(
"Player 2 rating changed from {} to {} ({:+})",
r2, r2_new, r2_diff
);
} | {
"domain": "codereview.stackexchange",
"id": 44997,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "beginner, rust",
"url": null
} |
beginner, rust
Any feedback on style, performance, layout etc. is appreciated, as I am a very new programmer.
Answer:
prob doesn't compute the probability of winning. If r1 == r2, the function returns 0.5 Do you think that equally rated players have a 50% probability of winning?
What this function computes is, in full accordance with Elo system, an expected result. Call it so.
Once you compute the expected result r1 for a player 1, an expected result for a player 2 is 1 - r. There is no need to call the expensive powf for a second time.
It would be much simpler to pass numerical constants (WIN, LOSS, DRAW) as an actual result instead of Outcome. Or at least convert Outcome into an actual result in the beginning of elo, and recompute the ratings as
match outcome {
Outcome::Win => { actual = WIN; }
Outcome::Draw => { actual = DRAW; }
Outcome::Loss => { actual = LOSS; }
}
expected = elo_expected_score(r1, r2);
r1 += k * (actual - expected);
r2 += k * (expected - actual);
(Disclaimer: my Rust is a read-only. I am sure there is something more elegant and less error-prone than the matching above). | {
"domain": "codereview.stackexchange",
"id": 44997,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "beginner, rust",
"url": null
} |
recursion, homework, scala
Title: Pascal triangle algorithm is good but fails
Question: I have created a function which returns the number that corresponds to the given row and column from the pascal triangle. I have used the formula for this:
n! / r! * (r-n)!
The code:
def pascal(c: Int, r: Int): Int = {
def factorial(number: Int): Int = {
def loop(adder: Int, num: Int): Int = {
if( num == 0) adder
else loop(adder * num, num - 1)
}
loop(1,number)
}
factorial(r) / (factorial(c) * factorial(r-c));
}
I received 180 points out of 190 for this function because it returns an arithmetic exception for a given case (they don't give me the case). How could I improve this code and how can I make it better? Most importantly, how can I spot the bug?
Answer: The code appears to be correct, though the explanatory formula you posted is wrong.
Your factorial function is more complex than it needs to be. Furthermore, adder is a poor variable name, since the result is multiplicative rather than additive. The traditional recursive definition is:
def factorial(n: Int): Int = {
if (n == 0) 1
else n * factorial(n - 1)
}
The main problem with your approach is that you work with large numbers in both the numerator and the denominator. If your program is generally working, then your problem is probably arithmetic overflow. An Int would only handle values up to 231 - 1 correctly.
Therefore, your best bet is to use an entirely different way to construct Pascal's Triangle, as suggested by @janos. | {
"domain": "codereview.stackexchange",
"id": 44998,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "recursion, homework, scala",
"url": null
} |
python, performance, python-3.x, pygame
Title: How can I get my CPU Utilization Down? PYGAME PROJECT
Question: I get 25% CPU usage on this simple project. I hope it's okay if I just post the whole thing. It probably something to do with the rendering code. It's definitely the project though, my fans get loud, then I exit the program, and quiet again.
#python3 -OO main.py
import pygame
import json
import sys
import os
#OS
xMax = 800
yMax = 640
xWin = 1366 - xMax #pins window to top right of the display
yWin = 0
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d, %d" % (xWin, yWin)
#PYGAME
pygame.init()
pygame.font.init()
pygame.display.set_caption("Mountain of Doom")
screen = pygame.display.set_mode((xMax, yMax))
font = pygame.font.SysFont(None, 30)
with open('saves/save.json') as save_file:
data = json.load(save_file)
tileSize = 16
tile_images = {
1: pygame.image.load("resources/graphics/sprites/grass.png"),
#ABOUT 10 MORE CALLS
}
with open("resources/maps/demo.json", "r") as json_file:
json_data = json.load(json_file)
tileMap = json_data["tileMap"]
class Background:
def __init__(self, tile_images, tileSize, tileMap):
self.tile_images = tile_images
self.tileSize = tileSize
self.tileMap = tileMap
def draw(self, screen):
background = pygame.Surface((len(self.tileMap[0]) * self.tileSize, len(self.tileMap) * self.tileSize))
for row in range(len(self.tileMap)):
for col in range(len(self.tileMap[0])):
tile_id = self.tileMap[row][col]
if tile_id in self.tile_images:
tile_image = self.tile_images[tile_id]
background.blit(tile_image, (col * self.tileSize, row * self.tileSize))
screen.blit(background, (0, 0)) | {
"domain": "codereview.stackexchange",
"id": 44999,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, python-3.x, pygame",
"url": null
} |
python, performance, python-3.x, pygame
class Player:
def __init__(self, position, player_path):
self.position = position
self.image = pygame.image.load(player_path)
self.speed = 16
def move(self, keys):
x, y = self.position
speed = self.speed
if keys[pygame.K_UP] and y > 0:
y -= speed
elif keys[pygame.K_DOWN]:
y += speed
elif keys[pygame.K_LEFT] and x > 0:
x -= speed
elif keys[pygame.K_RIGHT]:
x += speed
self.position = (x, y)
def draw(self, screen):
screen.blit(self.image, (self.position))
def update_data(player):
data['player_x'] = player.position[0]
data['player_y'] = player.position[1]
with open('saves/save.json', 'w') as save_file:
json.dump(data, save_file)
def main():
background = Background(tile_images, tileSize, tileMap)
player_path = "resources/graphics/sprites/player1.png"
position = (data['player_x'], data['player_y'])
player = Player(position, player_path)
clock = pygame.time.Clock()
while True:
clock.tick(60)
mbreak = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
mbreak = True
keys = pygame.key.get_pressed()
if keys[pygame.K_ESCAPE]:
mbreak = True
background.draw(screen)
player.move(keys)
player.draw(screen)
text = f'{player.position[0]}, {player.position[1]}'
img = font.render(text, True, (0xFFFFFFFF))
screen.blit(img, (20, 20))
pygame.display.update()
if mbreak:
break
update_data(player)
pygame.quit()
sys.exit()
if __name__ == "__main__":
main() | {
"domain": "codereview.stackexchange",
"id": 44999,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, python-3.x, pygame",
"url": null
} |
python, performance, python-3.x, pygame
Answer: FROM:
tile_images = {
1: pygame.image.load("resources/graphics/sprites/grass.png"),
2: pygame.image.load("resources/graphics/sprites/water.png"),
3: pygame.image.load("resources/graphics/sprites/tree.png").,
4: pygame.image.load("resources/graphics/sprites/bones.png"),
5: pygame.image.load("resources/graphics/sprites/rocks.png"),
6: pygame.image.load("resources/graphics/sprites/brick1.png"),
7: pygame.image.load("resources/graphics/sprites/brick2.png"),
8: pygame.image.load("resources/graphics/sprites/tree2.png")
}
TO:
tile_images = {
1: pygame.image.load("resources/graphics/sprites/grass.png").convert_alpha(),
2: pygame.image.load("resources/graphics/sprites/water.png").convert_alpha(),
3: pygame.image.load("resources/graphics/sprites/tree.png").convert_alpha(),
4: pygame.image.load("resources/graphics/sprites/bones.png").convert_alpha(),
5: pygame.image.load("resources/graphics/sprites/rocks.png").convert_alpha(),
6: pygame.image.load("resources/graphics/sprites/brick1.png").convert_alpha(),
7: pygame.image.load("resources/graphics/sprites/brick2.png").convert_alpha(),
8: pygame.image.load("resources/graphics/sprites/tree2.png").convert_alpha()
}
This got my CPU usage down from 25%-26% to 7%-8%. | {
"domain": "codereview.stackexchange",
"id": 44999,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, python-3.x, pygame",
"url": null
} |
python, algorithm, binary-search
Title: Optimizing the Egg Drop Problem implemented with Python
Question: I've implemented the Egg Drop Problem using Python. As described in the code, I'm trying to find the highest floor of a 102-story building from which I could drop an egg without it breaking.
The approach I took here is by using a binary search method, where I initialize the upper bound as the building height, the lower bound as 0, and the middle floor as half the building height. Depending on whether the egg breaks or not, I adjust the upper or lower bounds, and calculate a new middle floor for the next iteration, i.e., O(log(N)).
I know there are dynamic programming techniques to optimize this further which (a) reduce the number of eggs used or (b) minimize the number of drops. Can someone provide insights, resources or suggestions?
# The Empire State Building is 102 stories high. A man wanted to know the highest floor from
# which he could drop an egg without the egg breaking. He proposed to drop an egg from the
# top floor. If it broke, he would go down a floor, and try it again. He would do this until
# the egg did not break. At worst, this method requires 102 eggs. Implement a method that at
# worst uses seven eggs.
height = 102
def EggDrop(floor):
eggs = 7
upper, middle, lower = height, height // 2, 0
while eggs > 0:
if middle > floor:
upper = middle - 1
eggs = eggs - 1
elif middle < floor:
lower = middle + 1
else:
if middle + 1 > floor:
print(f"The highest floor we can drop an egg from is {middle}, with {eggs} eggs left over.")
break
middle = (upper + lower) // 2
for i in range(height + 1):
EggDrop(i)
Answer: conventional spelling
def EggDrop(floor): | {
"domain": "codereview.stackexchange",
"id": 45000,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, algorithm, binary-search",
"url": null
} |
python, algorithm, binary-search
for i in range(height + 1):
EggDrop(i)
Answer: conventional spelling
def EggDrop(floor):
When writing an English sentence,
yore bedder aff riting korrekt ledders
than surprising the reader with non-standard spellings.
Will the reader understand the meaning? Yes.
But you could do a better job of conveying what you want understood.
Here, you and pep-8
are telling me that this is SomeClass,
which is a bit jarring coming on the heels of def
which introduces some_function.
Worse, down below the EggDrop(i) call appears to be
invoking a class constructor and discarding the resulting class instance.
In a small program the Gentle Reader will remember these odd aspects
and eventually realize it's a function call.
But in a program with many classes and functions written by diverse authors,
things can quickly grow unmanageable.
Please write python code the way the rest of the python community does
when you wish to collaborate with others.
return values
The biggest critique of this code is we evaluate a complex loop
for side effects (for printing a result) rather than returning a result.
This makes it harder to read, since it's only way down in the middle + 1 > floor
clause that we get to the punch line which reveals the function's purpose.
Consider adopting any one of these signatures:
def egg_drop(target_floor: int) -> int:
def egg_drop(target_floor: int) -> tuple[int, int]:
def egg_drop(target_floor: int) -> str:
The middle one would return (middle, eggs),
while the last would return the same formatted string you're printing now.
All of them would be
unit testable,
with the first two being especially convenient to test.
(Yes, the OP code could be tested by redirecting sys.stdout to a buffer,
but there's no need to design a Public API that is gratuitously hard to test.
In general, side effects complicate things and make testing harder.) | {
"domain": "codereview.stackexchange",
"id": 45000,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, algorithm, binary-search",
"url": null
} |
python, algorithm, binary-search
The "problem setup" comments at top of source file
were very nice and I thank you for them.
write """docstrings"""
I suggested renaming your formal parameter to something like target_floor,
the floor number we binary search for,
because I found the current signature initially misleading.
I thought I understood your meaning, of "floor we took the
elevator to and performed a drop experiment from", and soon
found that interpretation was dead wrong.
Putting a """docstring""" below the signature gives you an opportunity
to describe the meaning of outputs and inputs, even if you
retain the floor name.
Always take the opportunity to write at least one sentence,
since the signature usually won't spell out everything of interest.
Optionally go on with more description if you feel there's more to say.
invariants
There are some important relationships among your loop variables.
Write them down in # comments.
Clearly each time we enter the while, middle will
be in the range lower .. upper.
You could have made that more clear by using a single
assignment after the while, rather than a pair of assignments.
The intent is that upper > lower, which is a little tricky to prove.
It is OK to have middle == lower, and it wouldn't
hurt to mention that in a comment.
We intend that target_floor shall be in the range lower .. upper,
but we neglected to check that upon function entry.
European floors would be numbered 0 .. 101, while
American floor numbers range from 1 .. 102.
Computer scientists like zero-origin, even if they don't live in Europe.
It would be helpful to spell out what floor numbers mean in this context,
to avoid an OBOB.
Then write the occasional unit test to verify. | {
"domain": "codereview.stackexchange",
"id": 45000,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, algorithm, binary-search",
"url": null
} |
c, binary-search
Title: K&R exercise 3-1
Question:
int binsearch(int x, int v[], int n)
{
int low, high, mid;
low = 0;
high = n-1;
while (low <= high)
{
mid = (low+high)/2;
if (x < v[mid])
high = mid -1;
else if (x > v[mid])
low = mid + 1;
else
return mid; /* found match */
}
return -1; /* no match */
}
Exercise 3-1. Our binary search makes two tests inside the loop, when
one would suffice (at the price of more tests outside.) Write a
version with only one test inside the loop and measure the difference
in run-time.
This is the code I wrote :
int binsearch(int x, int v[], int n)
{
int lh[2] = {0, n-1}, a[2] = {-1, 1}, mid;
while (lh[0] <= lh[1])
{
mid = (lh[1] + lh[0])/2;
if (x != v[mid])
lh[x < v[mid]] = mid + a[x > v[mid]];
else
return mid;
}
return -1;
}
But they have about the same run-time? Why didn't removing one of the if statements reduce it?
Answer: The performance is expected to be pretty much the same as the original.
In the most common case (x != v[mid]) the first code computes:
x < v[mid]
x > v[mid] (half the time, when the first condition is false)
Either high = mid -1 or low = mid + 1
The second code computes:
x != v[mid]
x < v[mid] and x > v[mid] for indexing lh and a respectively.
That hasn't reduced the complexity; in fact it's slightly increased it.
I think that what the exercise is looking for is that you can find the smallest value greater than or equal to x. Then after the loop, test whether that element actually is x.
I wouldn't expect to see a significant performance difference there either, but such a version would more reusable. It's often useful in binary search to locate the index where the sought element would be¹, so it makes sense to build search from such a version, and when we have that position, to test whether the value actually is present at that position. | {
"domain": "codereview.stackexchange",
"id": 45001,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, binary-search",
"url": null
} |
c, binary-search
A subtlety that's present in both versions looks like this:
mid = (low+high)/2;
When low and high get large, their sum can be too big to represent in an int, and we get Undefined Behaviour. This means that literally anything is allowed to happen, and that's not a good thing.
The way to avoid this, given that we know low and high are both positive, is to compute mid by adding half the difference:
mid = low + (high - low) / 2;
That's mathematically the same, but computationally very different (in particular, no overflow can occur).
¹ Frequently we want to insert a value if it's not already there - perhaps if we're building a cache, or counting occurrences. | {
"domain": "codereview.stackexchange",
"id": 45001,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, binary-search",
"url": null
} |
c++, recursion, time-limit-exceeded, iteration, knapsack-problem
Title: Knock down tall buildings
Question: I'm trying to solve the following problem:
As you know Bob likes to destroy the buildings.
There are N buildings in a city, ith building has ai floors.
Bob can do the following operation.
Choose a building, and destroy its uppermost floor with cost h. Where h is building's height before removing the floor.
You can do this operation any number of times but total cost should be less than or equal to K.
Since you don't like tall buildings you want to decrease their heights.
You have to minimise the maximum height of buildings and report this height (height of tallest among them after operations).
(Use long long data type instead of int to avoid overflow errors)
Input Format
T
N1 K1
a1 a2 a3 ..... aN1
N2 K2
a1 a2 a3 ..... aN2 .
.
.
NT KT
a1 a2 a3 ..... aNT
Constraints
1<=T<=1000 Number of test cases
1<=N<=100000 Number of buildings
1<=K<=10^15
0<=ai<=1000000
Sum of N over all test cases is at most 200000.
Output Format
ans1
ans2
ans3
.
.
.
ansT
Sample Input 0
3
5 23
1 3 2 4 5
5 3000000
0 1000000 2 5 99999
3 9
3 3 3
Sample Output 0
2
999997
2
Here we can only use the iostream library, so all the other functions needs to be written by me. In that case, for the T iterations in a while loop, we are first taking in a an array followed by merge sorting the algorithm before proceeding to the helper function.
In the helper function we are trying to introduce recursion with the fact that as long as K is greater than the tallest building, it is going to reduce K with the height of the tallest building and reduce the top floor by 1.
In case the K is greater than equal to the height of the tallest building, it reduces the height of the tallest building by 1, and sorts the array of building heights and updates the value of accordingly. Finally it return the height of the tallest building if K falls below the same.
My code is:
#include <iostream>
using namespace std; | {
"domain": "codereview.stackexchange",
"id": 45002,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, recursion, time-limit-exceeded, iteration, knapsack-problem",
"url": null
} |
c++, recursion, time-limit-exceeded, iteration, knapsack-problem
void merge(long long array[], int const left, int const mid,
int const right)
{
int const subArrayOne = mid - left + 1;
int const subArrayTwo = right - mid;
auto *leftArray = new int[subArrayOne],
*rightArray = new int[subArrayTwo];
for (auto i = 0; i < subArrayOne; i++)
leftArray[i] = array[left + i];
for (auto j = 0; j < subArrayTwo; j++)
rightArray[j] = array[mid + 1 + j];
auto indexOfSubArrayOne = 0, indexOfSubArrayTwo = 0;
int indexOfMergedArray = left;
while (indexOfSubArrayOne < subArrayOne
&& indexOfSubArrayTwo < subArrayTwo) {
if (leftArray[indexOfSubArrayOne]
<= rightArray[indexOfSubArrayTwo]) {
array[indexOfMergedArray]
= leftArray[indexOfSubArrayOne];
indexOfSubArrayOne++;
}
else {
array[indexOfMergedArray]
= rightArray[indexOfSubArrayTwo];
indexOfSubArrayTwo++;
}
indexOfMergedArray++;
}
while (indexOfSubArrayOne < subArrayOne) {
array[indexOfMergedArray]
= leftArray[indexOfSubArrayOne];
indexOfSubArrayOne++;
indexOfMergedArray++;
}
while (indexOfSubArrayTwo < subArrayTwo) {
array[indexOfMergedArray]
= rightArray[indexOfSubArrayTwo];
indexOfSubArrayTwo++;
indexOfMergedArray++;
}
delete[] leftArray;
delete[] rightArray;
}
void mergeSort(long long array[], int const begin, int const end)
{
if (begin >= end)
return;
int mid = begin + (end - begin) / 2;
mergeSort(array, begin, mid);
mergeSort(array, mid + 1, end);
merge(array, begin, mid, end);
} | {
"domain": "codereview.stackexchange",
"id": 45002,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, recursion, time-limit-exceeded, iteration, knapsack-problem",
"url": null
} |
c++, recursion, time-limit-exceeded, iteration, knapsack-problem
long long minimizeMaxHeight(long long heights[], int N, int K){
if(K<0) return heights[N-1]+1;
if(K==0) return heights[N-1];
if(K>0){
if(heights[N-1]>=heights[N-2]){
heights[N-1] = heights[N-1]-1;
return minimizeMaxHeight(heights, N, K-heights[N-1]);
}
else{
mergeSort(heights, 0, N - 1);
return minimizeMaxHeight(heights, N, K);
}
}
return 0;
}
long long solve(long long heights[],int n,int k){
//Recursive solution
if(heights[n-1]<=k){
heights[n-1]-=1;
mergeSort(heights,0,n-1);
return solve(heights,n,k-heights[n-1]);
}
return heights[n-1];
}
int main() {
int T;
std::cin >> T;
while (T--) {
int N;
long long K;
std::cin >> N >> K;
long long heights[N];
for (int i = 0; i < N; ++i) {
std::cin >> heights[i];
}mergeSort(heights, 0, N - 1);
long long ans = solve(heights, N, K);
std::cout << ans << std::endl;
}
return 0;
}
The above is the recursive approach and there's another iterative approach which I'm trying:
#include <iostream>
using namespace std;
int main() {
int T;
std::cin >> T;
while (T--) {
int N;
long long K;
std::cin >> N >> K;
long long total[1000001] = {0};
long long heights[N];
for (int i = 0; i < N; ++i) {
std::cin >> heights[i];
total[heights[i]]++;
}
long long ans = 0;
for(int i = 1000000; i>0;){
if(total[i]==0) {
i--;
continue;
}
else if(total[i]>0&&K>=i){
total[i]--;
total[i-1]++;
K=K-i;
}
else if(total[i]>0&&K<i) {
ans = i;
break;
}
}
std::cout << ans << std::endl;
}
return 0;
} | {
"domain": "codereview.stackexchange",
"id": 45002,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, recursion, time-limit-exceeded, iteration, knapsack-problem",
"url": null
} |
c++, recursion, time-limit-exceeded, iteration, knapsack-problem
For the recursive approach the code run only for one test case and for the iterative approach the code fails for two test cases.
For all the failed test cases in both the recursive and the iterative approach the issue is: Time Limit Exceeded
Answer: Algorithm
You remove one floor from one building at a time. The complexity of the (iterative) code is therefore proportional to the total amount of floors, which is prohibitive. Better think this way:
Let's say the tallest building has a height \$H_0\$, and there are \$N_0\$ of them. The next tallest building has a height \$H_1\$, and there are \$N_1\$ of such buildings. To equalize heights, you have to remove \$H_0 - H_1\$ floors by the cost of
\$C = (H_1 + 1) + (H_1 + 2) + ... + H_0 = (H_0 - H_1)*(H_0 + H_1 + 1)/2\$
(do you see an arithmetic progression?) from \$N_0\$ building, giving the total cost of \$T = C * N_0\$. If you cannot afford this, you are done. If you can, you end up with \$N_0 + N_1\$ buildings of height \$H_1\$. Adjust the balance: \$K = K - T\$, and keep going.
To be really effective, you need a bit more elaborate data structure, a (sorted) list of pairs height, number.
Review
using namespace std is a very poor practice. Besides, you use std:: prefix anyway.
There is no need for heights array. You never use it. One long long variable is just enough.
More whitespaces please. total[i]>0&&K>=i is barely readable. Consider (total[i] > 0) && (K >= i)
There is no need to test for total[i] > 0 in the else/else if branches. | {
"domain": "codereview.stackexchange",
"id": 45002,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, recursion, time-limit-exceeded, iteration, knapsack-problem",
"url": null
} |
recursion, time-limit-exceeded, dynamic-programming, c++20, memoization
Title: k-dice Ways to get a target value
Question: I'm trying to solve the following problem:
You have a k-dice.
A k-dice is a dice which have k-faces and each face have value written from 1 to k.
Eg. A 6-dice is the normal dice we use while playing games.
For a given N, you have to calculate the number of ways you can throw this dice so that we get sum equal to N.
Since number of ways can be large you have to calculate ways mod 998244353
Refer to samples for better understanding.
(Use long long data type instead of int to avoid overflow errors)
Input Format
T
N1 K1
N2 K2
N3 K3
.
.
.
NT KT
Constraints
N, K and T are integers
1<=T<=100
1<=N<=100 (20% points)
1<=N<=10^18 (100% points)
2<=k<=20
Output Format
ans1
ans2
ans3
.
.
ansT
Sample Input 0
3
5 3
7 4
8 10
Sample Output 0
13
56
128
Explanation 0
In first test case
1+1+1+1+1=5
1+1+1+2=5
1+1+2+1=5
1+2+1+1=5
2+1+1+1=5
2+2+1=5
2+1+2=5
1+2+2=5
3+1+1=5
1+3+1=5
1+1+3=5
3+2=5
2+3=5
So there are 13 ways.
Here we can only use the iostream library, so all the other functions needs to be written by me. In that case, for the T iterations in a while loop, we are first taking in a an array followed by merge sorting the algorithm before proceeding to the helper function.
In the helper function we are trying to introduce recursion with the fact that as long as N is greater than the total sum, we find a way to add different numbers among the K possible numbers on the dice.
As a base case if N turns 0, there happens to be just one way to return it, thus we define it as our base case. Finally to make our code efficient, we ended up memoizing our code in order to store the earlier values, and in case the value is already stored, it'll directly return that without passing the entire recursion.
My code is:
#include <iostream>
using namespace std; | {
"domain": "codereview.stackexchange",
"id": 45003,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "recursion, time-limit-exceeded, dynamic-programming, c++20, memoization",
"url": null
} |
recursion, time-limit-exceeded, dynamic-programming, c++20, memoization
long long countWays(int N, int K, long long dp[]) {
if (N == 0) return 1;
if (dp[N] != 0) return dp[N];
long long ways = 0;
for (int j = 1; j <= min(N, K); j++) {
ways = (ways + countWays(N - j, K, dp)) % 998244353;
}
return dp[N] = ways;
}
int main() {
int T;
cin >> T;
while (T--) {
int N, K;
cin >> N >> K;
long long dp[N + 1] = {0};
long long ways = countWays(N, K, dp);
cout << ways << endl;
}
return 0;
}
While the code works find for 3/6 test cases, the other 3 test cases end up giving a segmentation fault.
I believe the segmentation fault is because of the constraints as the value which is coming as an input is of the order 10^18.
One such example of the segmentation fault is:
This is precisely the case of space complexity error.
The code works precisely fine, but the segmentation fault is cause of exceeding the space complexity. Secondly, I performed another approach as suggested by @JustinChang in the answers:
#include <iostream>
using namespace std;
long long countWays(const long long N, const int K) {
// for convenience
//const int K1 = (K + 1);
// the number of ways for a sum of i is stored in dp[i % K1]
// initialize from 0 to K inclusive: 1, 1, 2, 4, ...
long long dp[21] = {1, 1};
for (int i = 2; i < (K + 1); ++i)
dp[i] = 2 * dp[i - 1];
// cycle, filling each slot with the sum of the other slots
for (long long i = (K + 1); i <= N; ++i) {
// int curr_i = i % (K + 1);
// int prev_i = (i - 1) % (K + 1);
dp[i % (K + 1)] = (2 * dp[(i - 1) % (K + 1)] - dp[i % (K + 1)]) % 998244353;
}
// because C++ can have negative remainders
return (dp[N % (K + 1)] + 998244353) % 998244353;
}
int main() {
int T;
cin >> T; | {
"domain": "codereview.stackexchange",
"id": 45003,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "recursion, time-limit-exceeded, dynamic-programming, c++20, memoization",
"url": null
} |
recursion, time-limit-exceeded, dynamic-programming, c++20, memoization
int main() {
int T;
cin >> T;
while (T--) {
int N, K;
cin >> N >> K;
//long long dp[N + 1] = {0};
long long ways = countWays(N, K);
cout << ways << endl;
}
return 0;
}
Here the code works fine in terms of space complexity, but exceeds the time complexity for larger inputs (of the order 10^18). The allowed time limit is just 2 seconds as it always is.
Your code did not pass this test case.
Input (stdin)
100000000 10
705702493 12
27395703283 5
Your Output (stdout)
362391665
Expected Output
362391665
837869791
564958105
Compiler Message
Terminated due to timeout
Answer: Note that % can be negative in C++. The recurrence relation can also be simplified:
// only works for i >= K + 1
dp[i] = 2 * dp[i - 1] - dp[i - K - 1]
Other than that, I agree the main issue is that dp can exceed available memory, as it can be up to 8 * 10^18 bytes, or 8 exabytes. Additionally, the recursive calls create a lot of stack frames. Instead, you can iteratively work your way up, keeping only the last K+1 numbers at any given moment. Since K <= 20, you can declare a fixed-size array of length 21 (but if the sizes were more variable, I would use malloc):
long long countWays(long long N, int K) {
// for convenience
int K1 = (K + 1);
// the number of ways for a sum of i is stored in dp[i % K1]
// initialize from 0 to K inclusive: 1, 1, 2, 4, ...
long long dp[21] = { 1, 1 };
for (int i = 2; i < K1; ++i)
dp[i] = 2 * dp[i - 1];
// cycle, filling each slot with the sum of the other slots
for (long long i = K1; i <= N; ++i) {
int curr_i = i % K1;
int prev_i = (i - 1) % K1;
dp[curr_i] = (2 * dp[prev_i] - dp[curr_i]) % 998244353;
}
// because C++ can have negative remainders
return (dp[N % K1] + 998244353) % 998244353;
} | {
"domain": "codereview.stackexchange",
"id": 45003,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "recursion, time-limit-exceeded, dynamic-programming, c++20, memoization",
"url": null
} |
recursion, time-limit-exceeded, dynamic-programming, c++20, memoization
// because C++ can have negative remainders
return (dp[N % K1] + 998244353) % 998244353;
}
I was able to compute the provided cases in less than 5 minutes. Hope that helps.
EDIT: Let dp[0], dp[1], ... dp[K-1] be the first K terms. If you treat this like a column matrix and multiply by a matrix Q, you can turn it into dp[1], dp[2], ..., dp[K]. By repeating this multiplication N - K + 1 times, you get the final answer. So now the question is: how do you quickly compute Q^(N - K + 1)? The solution is a dynamic programming technique called exponentiation by squares. You will need to put it all together:
#include <iostream>
using namespace std;
// malloc but (q)uit on errors
void *malloc_q(size_t s)
{
void *p = malloc(s);
if (p == NULL) exit(EXIT_FAILURE);
return p;
}
// matrix of long longs
class MatrixLL
{
public:
int m, n; // rows, cols
long long **v; // values
// returns an uninitialized matrix
MatrixLL(int m, int n)
{
long long **v = (long long **) malloc_q(m * sizeof(long long *));
for (int i = 0; i < m; ++i)
v[i] = (long long *) malloc_q(n * sizeof(long long));
this->m = m;
this->n = n;
this->v = v;
}
// copy constructor
MatrixLL(const MatrixLL &mat) : MatrixLL(mat.m, mat.n)
{
for (int i = 0; i < mat.m; ++i)
for (int j = 0; j < mat.n; ++j)
v[i][j] = mat.v[i][j];
}
};
// computes (a * b mod M), assuming a.n == b.m
MatrixLL timesMod(const MatrixLL &a, const MatrixLL &b, long long M)
{
MatrixLL dst(a.m, b.n);
for (int i = 0; i < a.m; ++i)
{
for (int j = 0; j < b.n; ++j)
{
long long inner = 0; // sum(a[i][.] * b[.][j])
for (int k = 0; k < a.n; ++k)
inner = (inner + a.v[i][k] * b.v[k][j]) % M;
dst.v[i][j] = inner;
}
}
return dst;
} | {
"domain": "codereview.stackexchange",
"id": 45003,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "recursion, time-limit-exceeded, dynamic-programming, c++20, memoization",
"url": null
} |
recursion, time-limit-exceeded, dynamic-programming, c++20, memoization
// computes (base^exp mod M), assuming exp > 0
MatrixLL powerMod(const MatrixLL &base, long long exp, long long M)
{
MatrixLL result(base);
--exp; // result already has 1 power of base
MatrixLL term(base); // answer is (result * term^exp)
while (exp > 0)
{
if (exp % 2 == 1)
result = timesMod(term, result, M);
term = timesMod(term, term, M);
exp /= 2;
}
return result;
}
// countWays but O(K^3 * log(N)) time, O(K^2) space
long long countWaysFancy(long long N, int K, long long M)
{
// initialize starting matrix
MatrixLL start(K, 1);
start.v[0][0] = 1;
start.v[1][0] = 1;
for (int i = 2; i < K; ++i)
start.v[i][0] = (2 * start.v[i-1][0]) % M;
// if no transformations are needed, you're done
if (N < K)
return start.v[N][0];
// initialize transform matrix Q
MatrixLL transform(K, K);
for (int i = 0; i < K - 1; ++i)
for (int j = 0; j < K; ++j)
transform.v[i][j] = 0;
for (int i = 0; i < K; ++i)
transform.v[K - 1][i] = 1;
for (int i = 0; i < K - 1; ++i)
transform.v[i][i + 1] = 1;
// compute the final transform with quick exponentiation
MatrixLL finalform = powerMod(transform, N - K + 1, M);
MatrixLL resultMat = timesMod(finalform, start, M);
return resultMat.v[K - 1][0];
}
// test all provided cases
int main()
{
cout << countWaysFancy(5, 3, 998244353) << "\n";
cout << countWaysFancy(7, 4, 998244353) << "\n";
cout << countWaysFancy(8, 10, 998244353) << "\n";
cout << countWaysFancy(100000000, 10, 998244353) << "\n";
cout << countWaysFancy(705702493, 12, 998244353) << "\n";
cout << countWaysFancy(27395703283, 5, 998244353) << "\n";
return 0;
}
This completes the cases in < 1 second. If you are wondering what Q looks like:
// Q(K = 3)
0 1 0
0 0 1
1 1 1
// Q(K = 5)
0, 1, 0, 0, 0
0, 0, 1, 0, 0
0, 0, 0, 1, 0
0, 0, 0, 0, 1
1, 1, 1, 1, 1 | {
"domain": "codereview.stackexchange",
"id": 45003,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "recursion, time-limit-exceeded, dynamic-programming, c++20, memoization",
"url": null
} |
performance, beginner, unit-testing, rust
Title: Simulate M random walkers N times and visualize relationship between walker number and grid size
Question: Problem statement: Suppose that n random walkers, starting in the center of an n-by-n grid, move one step at a time, choosing to go left, right, up, or down with equal probability at each step. Write a program to help formulate and test a hypothesis about the number of steps taken before all cells are touched.
This is one of my self-imposed challenges in Rust to become better at it. The problem was taken from Sedgewick Exercise 1.4.36.
Here is my code:
use clap::Parser;
use plotly::common::Title;
use plotly::layout::{Axis, Layout};
use plotly::{HeatMap, Plot};
use rand::rngs::ThreadRng;
use rand::Rng;
#[derive(Debug, Parser)]
struct Arguments {
#[arg(index = 1)]
number_of_trials: usize,
#[arg(index = 2)]
walker_number_range_start: usize,
#[arg(index = 3)]
walker_number_range_end: usize,
#[arg(index = 4)]
walker_number_range_step: usize,
#[arg(index = 5)]
grid_size_range_start: usize,
#[arg(index = 6)]
grid_size_range_end: usize,
#[arg(index = 7)]
grid_size_range_step: usize,
#[arg(index = 8)]
grid_type: String,
}
fn main() -> Result<(), String> {
let arguments = Arguments::parse();
let number_of_trials = arguments.number_of_trials;
let walker_number_range_start = arguments.walker_number_range_start;
let walker_number_range_end = arguments.walker_number_range_end;
let walker_number_range_step = arguments.walker_number_range_step;
let grid_size_range_start = arguments.grid_size_range_start;
let grid_size_range_end = arguments.grid_size_range_end;
let grid_size_range_step = arguments.grid_size_range_step;
let grid_type = arguments.grid_type;
let mut rng = rand::thread_rng(); | {
"domain": "codereview.stackexchange",
"id": 45004,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, beginner, unit-testing, rust",
"url": null
} |
performance, beginner, unit-testing, rust
let mut rng = rand::thread_rng();
let walker_number_range = (
walker_number_range_start,
walker_number_range_end,
walker_number_range_step,
);
let grid_size_range = (
grid_size_range_start,
grid_size_range_end,
grid_size_range_step,
);
let heat_map = run_parametric_experiment(
number_of_trials,
walker_number_range,
grid_size_range,
&grid_type,
&mut rng,
)?;
let heat_map_row_indices: Vec<usize> = (walker_number_range.0..walker_number_range.1)
.step_by(walker_number_range.2)
.collect();
let heat_map_column_indices: Vec<usize> = (grid_size_range.0..grid_size_range.1)
.step_by(grid_size_range.2)
.collect();
let mut plot = Plot::new();
let trace = HeatMap::new(heat_map_row_indices, heat_map_column_indices, heat_map);
let layout = Layout::new()
.x_axis(Axis::new().title(Title::from("Grid Size")))
.y_axis(Axis::new().title(Title::from("Walker Number")));
plot.add_trace(trace);
plot.set_layout(layout);
plot.show();
Ok(())
}
#[derive(Clone, Debug)]
struct Walker {
x: usize,
y: usize,
grid_size: usize,
grid_type: String,
}
impl Walker {
fn new(x: usize, y: usize, grid_size: usize, grid_type: &str) -> Result<Self, String> {
if grid_size < 1 {
return Err("Grid size must be at least 1".to_string());
}
if x >= grid_size {
return Err(format!(
"X coordinate: {} greater than or equal to grid size: {}",
x, grid_size
));
} else if y >= grid_size {
return Err(format!(
"Y coordinate: {} greater than or equal to grid size: {}",
y, grid_size
));
}
if grid_type != "plane" && grid_type != "torus" {
return Err("Defined grid types are plane and torus".to_string());
} | {
"domain": "codereview.stackexchange",
"id": 45004,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, beginner, unit-testing, rust",
"url": null
} |
performance, beginner, unit-testing, rust
Ok(Walker {
x,
y,
grid_size,
grid_type: grid_type.to_string(),
})
}
fn get_x_coordinate(&self) -> usize {
self.x
}
fn get_y_coordinate(&self) -> usize {
self.y
}
fn walk_on_plane(&mut self, rng: &mut ThreadRng) {
let r = rng.gen_range(0.0..1.0); | {
"domain": "codereview.stackexchange",
"id": 45004,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, beginner, unit-testing, rust",
"url": null
} |
performance, beginner, unit-testing, rust
// Unit case
if self.grid_size == 1 {
}
// Corner cases
else if self.x == 0 && self.y == 0 {
if r < 1.0 / 2.0 {
self.x += 1;
} else {
self.y += 1;
}
} else if self.x == 0 && self.y == self.grid_size - 1 {
if r < 1.0 / 2.0 {
self.x += 1;
} else {
self.y -= 1;
}
} else if self.x == self.grid_size - 1 && self.y == self.grid_size - 1 {
if r < 1.0 / 2.0 {
self.x -= 1;
} else {
self.y -= 1;
}
} else if self.x == self.grid_size - 1 && self.y == 0 {
if r < 1.0 / 2.0 {
self.x -= 1;
} else {
self.y += 1;
}
}
// Edge cases
else if self.x == 0 {
if r < 1.0 / 3.0 {
self.x += 1;
} else if r < 2.0 / 3.0 {
self.y += 1;
} else {
self.y -= 1;
}
} else if self.x == self.grid_size - 1 {
if r < 1.0 / 3.0 {
self.x -= 1;
} else if r < 2.0 / 3.0 {
self.y += 1;
} else {
self.y -= 1;
}
} else if self.y == 0 {
if r < 1.0 / 3.0 {
self.x += 1;
} else if r < 2.0 / 3.0 {
self.x -= 1;
} else {
self.y += 1;
}
} else if self.y == self.grid_size - 1 {
if r < 1.0 / 3.0 {
self.x += 1;
} else if r < 2.0 / 3.0 {
self.x -= 1;
} else {
self.y -= 1;
}
// Regular case
} else if r < 1.0 / 4.0 {
self.x += 1;
} else if r < 2.0 / 4.0 {
self.x -= 1;
} else if r < 3.0 / 4.0 {
self.y += 1; | {
"domain": "codereview.stackexchange",
"id": 45004,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, beginner, unit-testing, rust",
"url": null
} |
performance, beginner, unit-testing, rust
self.x -= 1;
} else if r < 3.0 / 4.0 {
self.y += 1;
} else {
self.y -= 1;
}
} | {
"domain": "codereview.stackexchange",
"id": 45004,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, beginner, unit-testing, rust",
"url": null
} |
performance, beginner, unit-testing, rust
fn walk_on_torus(&mut self, rng: &mut ThreadRng) {
let r = rng.gen_range(0.0..1.0);
if r < 1.0 / 4.0 {
self.x = (((self.x + 1) % self.grid_size) + self.grid_size) % self.grid_size;
} else if r < 2.0 / 4.0 {
if self.x == 0 {
self.x = self.grid_size - 1;
} else {
self.x = (((self.x - 1) % self.grid_size) + self.grid_size) % self.grid_size;
}
} else if r < 3.0 / 4.0 {
self.y = (((self.y + 1) % self.grid_size) + self.grid_size) % self.grid_size;
} else if self.y == 0 {
self.y = self.grid_size - 1;
} else {
self.y = (((self.y - 1) % self.grid_size) + self.grid_size) % self.grid_size;
}
}
fn walk(&mut self, rng: &mut ThreadRng) {
if self.grid_type == "plane" {
self.walk_on_plane(rng);
} else if self.grid_type == "torus" {
self.walk_on_torus(rng);
}
}
}
fn simulate_n_walkers_1_time(
walker_number: usize,
grid_size: usize,
grid_type: &str,
rng: &mut ThreadRng,
) -> Result<usize, String> {
if walker_number < 1 {
return Err("Number of walkers must be at least 1".to_string());
}
let x: usize = grid_size / 2;
let y: usize = grid_size / 2;
let mut walkers = vec![Walker::new(x, y, grid_size, grid_type)?; walker_number];
let mut grid = vec![vec![false; grid_size]; grid_size];
grid[x][y] = true;
let total_grid_cell_number: usize = grid_size * grid_size;
let mut walked_grid_cell_number: usize = 1;
let mut number_of_steps: usize = 0;
while walked_grid_cell_number < total_grid_cell_number {
number_of_steps += 1;
for walker in walkers.iter_mut().take(walker_number) {
walker.walk(rng);
let x_coordinate = walker.get_x_coordinate();
let y_coordinate = walker.get_y_coordinate(); | {
"domain": "codereview.stackexchange",
"id": 45004,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, beginner, unit-testing, rust",
"url": null
} |
performance, beginner, unit-testing, rust
if !grid[x_coordinate][y_coordinate] {
grid[x_coordinate][y_coordinate] = true;
walked_grid_cell_number += 1;
}
}
}
Ok(number_of_steps)
}
fn simulate_m_walkers_n_times(
number_of_trials: usize,
walker_number: usize,
grid_size: usize,
grid_type: &str,
rng: &mut ThreadRng,
) -> Result<usize, String> {
if number_of_trials < 1 {
return Err("Number of trials must be at least 1".to_string());
}
let mut number_of_steps = 0;
for _ in 0..number_of_trials {
number_of_steps += simulate_n_walkers_1_time(walker_number, grid_size, grid_type, rng)?;
}
Ok(number_of_steps / number_of_trials)
}
fn run_parametric_experiment(
number_of_trials: usize,
walker_number_range: (usize, usize, usize),
grid_size_range: (usize, usize, usize),
grid_type: &str,
rng: &mut ThreadRng,
) -> Result<Vec<Vec<usize>>, String> {
let number_of_walker_numbers =
(walker_number_range.1 - walker_number_range.0) / walker_number_range.2;
let number_of_grid_sizes = (grid_size_range.1 - grid_size_range.0) / grid_size_range.2;
let mut heat_map = vec![vec![0; number_of_walker_numbers]; number_of_grid_sizes];
let walker_number_iterator_start = walker_number_range.0;
let walker_number_iterator_end =
number_of_walker_numbers * walker_number_range.2 + walker_number_range.0;
let walker_number_iterator =
(walker_number_iterator_start..walker_number_iterator_end).step_by(walker_number_range.2);
let grid_size_iterator_start = grid_size_range.0;
let grid_size_iterator_end = number_of_grid_sizes * grid_size_range.2 + grid_size_range.0;
let grid_size_iterator =
(grid_size_iterator_start..grid_size_iterator_end).step_by(grid_size_range.2); | {
"domain": "codereview.stackexchange",
"id": 45004,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, beginner, unit-testing, rust",
"url": null
} |
performance, beginner, unit-testing, rust
for (i, walker_number) in walker_number_iterator.enumerate() {
for (j, grid_number) in grid_size_iterator.clone().enumerate() {
heat_map[i][j] = simulate_m_walkers_n_times(
number_of_trials,
walker_number,
grid_number,
grid_type,
rng,
)?;
}
}
Ok(heat_map)
}
Example input:
cargo run --release 1000 11 111 10 11 111 10 torus
Example output:
Is there any way that I can improve my code?
Answer: fn get_x_coordinate(&self) -> usize {
self.x
}
fn get_y_coordinate(&self) -> usize {
self.y
}
This code breaks C-GETTERS:
With a few exceptions, the get_ prefix is not used for getters in Rust code.
The get naming is used only when there is a single and obvious thing that could reasonably be gotten by a getter. For example Cell::get accesses the content of a Cell.
When dealing with coordinates, it's useful to have a Point2 or Vec2 type. You can implement Add, Sub and other useful traits for that struct.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
struct Point {
x: i32,
y: i32,
}
impl Add<Point> for Point {
type Output = Self;
fn add(self, rhs: Point) -> Self::Output {
Point { x: self.x + rhs.x, y: self.y + rhs.y }
}
}
impl Sub<Point> for Point {
type Output = Self;
fn sub(self, rhs: Point) -> Self::Output {
Point { x: self.x + rhs.x, y: self.y + rhs.y }
}
}
With the aforementioned type, you can define possible directions to walk:
const NORTH: Point = Point { x: 0, y: -1 };
const EAST: Point = Point { x: 1, y: 0 };
const SOUTH: Point = Point { x: 0, y: 1 };
const WEST: Point = Point { x: -1, y: 0 };
const CARDINAL_DIRECTIONS: [Point; 4] = [NORTH, EAST, SOUTH, WEST]; | {
"domain": "codereview.stackexchange",
"id": 45004,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, beginner, unit-testing, rust",
"url": null
} |
performance, beginner, unit-testing, rust
const CARDINAL_DIRECTIONS: [Point; 4] = [NORTH, EAST, SOUTH, WEST];
You can also define a translate method that returns None when a Point is translated off the board and Some(Point) when it's on the board:
pub const fn translate(p1: Point, p2: Point) -> Option<Point> {
let result = p1 + p2;
if result.x < 0 || result.y < 0 || result.x > self.max.x || result > self.max.y {
None
} else {
Some(result)
}
}
Combined with rand::seq::SliceRandom::choose, you can condense your walk_on_plane method to this:
fn walk_on_plane(&mut self, rng: &mut ThreadRng) {
loop {
let delta = *CARDINAL_DIRECTIONS.choose(rng).expect("we can safely unwrap, CARDINAL_DIRECTIONS is not empty");
if let Some(next_position) = translate(self.position, delta) {
self.position = next_position
}
}
}
I don't think you need Walker at all. What you could do is manage your walkers as a Vec<Point> on your board. You could have a trait Grid like so, with specific implementations defining how to walk on it:
trait Grid {
fn walk_once(&mut self, position: Point) -> Point;
fn walkers(&mut self) -> &mut Vec<Point>;
fn iterate(&mut self) {
*self.walkers() =
self.walkers()
.into_iter()
.map(|p| self.walk_once(*p))
.collect::<Vec<_>>()
}
}
struct Euclidian {
walkers: Vec<Point>,
min: Point,
max: Point,
rng: Box<dyn RngCore>,
}
impl Grid for Euclidian {
fn walk_once(&mut self, position: Point) -> Point {
loop {
let delta = *CARDINAL_DIRECTIONS.choose(&mut self.rng).expect("we can safely unwrap, CARDINAL_DIRECTIONS is not empty");
if let Some(next_position) = translate(position, delta) {
return next_position;
}
}
}
fn walkers(&mut self) -> &mut Vec<Point> {
&mut self.walkers
}
} | {
"domain": "codereview.stackexchange",
"id": 45004,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, beginner, unit-testing, rust",
"url": null
} |
performance, beginner, unit-testing, rust
fn walkers(&mut self) -> &mut Vec<Point> {
&mut self.walkers
}
}
String as an Error type is marginal at best. Follow C-GOOD-ERR. Generally, it's a good idea to make your errors enums and have them be Error + Send + Sync + 'static. When naming errors, keep in mind C-WORD-ORDER. Please note that, as discussed, the Walker type is flawed in other ways, this is just to demonstrate an example of error handling.
#[derive(Debug)]
enum InitWalkerError {
GridSizeIsZero,
XOutOfBounds { actual: usize, expected: usize },
YOutOfBounds { actual: usize, expected: usize },
UnknownGrid { actual: String, expected: Vec<String> },
}
impl Display for InitWalkerError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
InitWalkerError::GridSizeIsZero => write!(f, "grid size is zero"),
InitWalkerError::XOutOfBounds { expected, actual } => write!(f, "x should be smaller or equal to {}, was {}", expected, actual),
InitWalkerError::YOutOfBounds { expected, actual } => write!(f, "y should be smaller or equal to {}, was {}", expected, actual),
InitWalkerError::UnknownGrid { expected, actual } => write!(f, "grid should be one of {:?}, was \"{}\"", expected, actual),
}
}
}
impl Error for InitWalkerError {} | {
"domain": "codereview.stackexchange",
"id": 45004,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, beginner, unit-testing, rust",
"url": null
} |
performance, beginner, unit-testing, rust
impl Error for InitWalkerError {}
impl Walker {
fn new(x: usize, y: usize, grid_size: usize, grid_type: &str) -> Result<Self, InitWalkerError> {
if grid_size < 1 {
Err(InitWalkerError::GridSizeIsZero)
} else if x >= grid_size {
Err(InitWalkerError::XOutOfBounds { expected: grid_size, actual: x })
} else if y >= grid_size {
Err(InitWalkerError::YOutOfBounds { expected: grid_size, actual: y })
} else if grid_type != "plane" && grid_type != "torus" {
Err(InitWalkerError::UnknownGrid { expected: vec!["plane".to_string(), "torus".to_string()], actual: grid_type.to_string() })
} else {
Ok(Walker {
...
})
}
}
...
}
There are probably more things to discuss, but here are some of my thoughts. Please excuse me for being a bit all over the place with this, I hope it's still helpful. I haven't ran the example code, so it might have some issues, but I hope you get the gist. | {
"domain": "codereview.stackexchange",
"id": 45004,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, beginner, unit-testing, rust",
"url": null
} |
javascript, search
Title: JavaScript search algorithm to find a town in a particular state
Question: I built a function in JavaScript which executes a search algorithm to find a town in a particular state. The dataset, referenced by the variable townsAndStates is an array of object pairs, each of which has a town and a state, and only returns the first few results. An example of an object in this array is {"state": "Rhode Island", "town": "Quahog"}.
The search algorithm is quite simple:
Check if the search input is a town and a state, OR just the name of a single town or a single state.
If it's the former, then filter through the dataset finding both a matching town name AND state name.
If it's the latter, then filter through the dataset finding either a matching town name or a matching state name.
Is there a way to make this function perform better and improve the readability of the code?
Here is the function:
const searchTownsAndStates = (input) => {
let townsAndStates = getRawTownsAndStates();
let regex = /(?<town>[A-Z ]+),(?<state>[A-Z ]*)/gi;
if (regex.test(input)) {
const [town, state] = input.split(",");
return townsAndStates
.filter((row) => {
return row.town.includes(town) && row.state.includes(state?.trim(" "));
})
.slice(0, 5);
} else {
return townsAndStates
.filter((row) => {
return row.town.includes(input) || row.state.includes(input);
})
.slice(0, 5);
}
}; | {
"domain": "codereview.stackexchange",
"id": 45005,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, search",
"url": null
} |
javascript, search
Answer: It seems a bit odd that you've defined named capturing groups in your regular expression but then you don't use them because you only use it with the test method, and retrieve the different groups through a subsequent use of split. You could leverage these groups to retrieve the particular town and state values from your search query, since you're already matching your regular expression against it anyway.
(Thanks for using them though, because I didn't recognise the syntax and learned something by looking it up)
There's also some work you could do up front. Currently, you're retrieving your search index and constructing your regular expression fresh each time your function is called. If your search index might change from call to call, then that makes sense, but since retrieving it isn't asynchronous I'm guessing it's a static object so you could move it outside your function and cache the result. You can also call trim the state part just once instead of each time you check if your query matches against an entry in your search index.
As a minor nitpick, I'd recommend using const instead of let for variables with values that never change. This can lower the cognitive load when reading your code, since once you've seen how a variable was defined you typically don't have to keep in mind whether or not it's been modified since then. Really it is a question of code style, but I find it a useful strategy for keeping code clear and readable, and it is a common linting rule.
I think the most impactful way this function could be improved, though, is by not using filter and then slice in order to limit the number of results you're returning. Using this approach means you're still checking your search query against every entry in your index, even if you hit your maximum number of results to return very early on in your search. Instead, you can use a loop to fill up an array of results only until you hit the maximum, at which point you can break out of the loop. | {
"domain": "codereview.stackexchange",
"id": 45005,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, search",
"url": null
} |
javascript, search
Since you don't seem to have any sort of relevance score you're sorting by, just finding the first results based on the order of entries in your search index, you don't need to match against the entire index before you decide which results to return.
Because your strategy for collecting results is the same for both matching strategies, I think it would be useful to move that branch in your code so it directly affects just the matching strategy. To do that, I've defined an isMatch function that changes based on the structure of the input, and then gets called the same way regardless when the results are being collected. Reducing how deeply nested your code gets can also help with reducing mental load when reading it, and with this approach it's clearer how much the form of the input affects the way the function works - once the matching strategy has been determined, the input's format isn't checked anymore.
I've added a few JSDoc comments, mainly because I'm predominantly a TypeScript developer and I find type definitions useful. But documentation in general is also helpful, and you may like to extend these comments with descriptions, particularly on your searchTownsAndStates function itself if it's likely to be made available elsewhere in your codebase and/or called by other developers. For example, you could describe how the input argument is expected to be structured, or you could use the @example tag to provide an example of how this function could be called.
JSDoc will get picked up automatically by a lot of IDEs, such as VS code, and will provide hover tooltips. Including type information can also help your IDE provide more useful autocomplete suggestions. | {
"domain": "codereview.stackexchange",
"id": 45005,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, search",
"url": null
} |
javascript, search
I've also pulled the magic number 5 out into an optional parameter maxResults, but if this shouldn't be configurable on the function you could have it as a const variable instead. Even if it never changes, defining magic numbers like this as configuration higher up can make it easier to see at a glance how something behaves, rather than having it defined within the implementation itself.
/**
* @typedef {Object} TownAndStateIndexEntry
*
* @property {string} town
* @property {string} state
*/ | {
"domain": "codereview.stackexchange",
"id": 45005,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, search",
"url": null
} |
javascript, search
/**
* @type {Array<TownAndStateIndexEntry>}
*/
const townsAndStates = getRawTownsAndStates();
const queryPattern = /(?<town>[A-Z ]+),(?<state>[A-Z ]*)/gi;
/**
* @param {string} input
* @param {number} maxResults
*/
const searchTownsAndStates = (input, maxResults = 5) => {
const groups = input.match(queryPattern).groups;
const town = groups?.town;
const state = groups?.state?.trim(" ");
// Determine match strategy based on query structure
const isMatch = groups
? (row) => row.town.includes(town) && row.state.includes(state)
: (row) => row.town.includes(input) || row.state.includes(input);
// Collect results
const results = [];
for (const row of townsAndStates) {
if (isMatch(row)) {
results.push(row);
}
if (results.length >= maxResults) {
break;
}
}
return results;
};
``` | {
"domain": "codereview.stackexchange",
"id": 45005,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, search",
"url": null
} |
beginner, rust
Title: Another pig latinize rust implementation
Question: Yes, I know this exercise has been posted before, but I'm posting it again because I want feedback on why certain parts of my code aren't idiomatic. I'm new to rust, and my code works, but as I said, I'd like to have parts pointed out that could be implemented another way, and why my way is "wrong". Thx in advance!
I haven't given naming that much of a thought, many variables could probably be named better. I could also extract a lot of parts into their own functions, however I'm more gunning for advice on which kind of patterns are applicable to my solution, rather than how I could modularize my code more by simple extraction. I'm also not looking for "This is the best solution for this problem", I'm looking for "this part is 'wrong/unsafe/stupid' because of X, here is how to do it better".
pub fn pig_latinize(text: String) -> String {
let allowed_specials = ['.', ',', '?', '!'];
text.split_whitespace()
.map(|word_with_special_char| {
let annoying_character = allowed_specials
.iter()
.any(|special| word_with_special_char.ends_with(special.to_string().as_str()));
let mut suffix = "";
let word = if annoying_character {
let (word_without_special_char, special_char) = word_with_special_char
.split_at(word_with_special_char.len() - 1);
suffix = special_char;
word_without_special_char
} else {
word_with_special_char
}
.to_string();
pig_latin_single_word(word) + suffix
})
.collect::<Vec<_>>()
.join(" ")
} | {
"domain": "codereview.stackexchange",
"id": 45006,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "beginner, rust",
"url": null
} |
beginner, rust
fn pig_latin_single_word(word: String) -> String {
let mut word = word.clone();
let vowels = "aeiou".to_string();
let first_letter = word.remove(0);
let is_uppercase = first_letter.is_uppercase();
let mut lower_first_letter = first_letter.to_lowercase().to_string();
if vowels.contains(lower_first_letter.as_str()) {
word.insert(0, first_letter.clone());
lower_first_letter = "h".to_string();
}
word.push_str(format!("-{}ay", lower_first_letter).as_str());
if is_uppercase {
let first_letter = word.remove(0);
word.insert(0, first_letter.to_string().to_uppercase().remove(0));
}
word
}
Tests:
#[cfg(test)]
mod tests {
use super::*; | {
"domain": "codereview.stackexchange",
"id": 45006,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "beginner, rust",
"url": null
} |
beginner, rust
Tests:
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_pig_latin() {
let input = "This is a very complicated sentence. It is actually more than one sentence.".to_string();
let expected = "His-tay is-hay a-hay ery-vay omplicated-cay entence-say. It-hay is-hay actually-hay ore-may han-tay one-hay entence-say.".to_string();
let actual = pig_latinize(input);
assert_eq!(actual, expected);
}
#[test]
fn test_pig_latin_multiple_words() {
let input = "This is a very complicated sentence".to_string();
let expected = "His-tay is-hay a-hay ery-vay omplicated-cay entence-say".to_string();
let actual = pig_latinize(input);
assert_eq!(actual, expected);
}
#[test]
fn test_pig_latin_small1() {
let input = "This".to_string();
let expected = "His-tay".to_string();
let actual = pig_latin_single_word(input);
assert_eq!(actual, expected);
}
#[test]
fn test_pig_latin_small2() {
let input = "this".to_string();
let expected = "his-tay".to_string();
let actual = pig_latin_single_word(input);
assert_eq!(actual, expected);
}
#[test]
fn test_pig_latin_small3() {
let input = "one".to_string();
let expected = "one-hay".to_string();
let actual = pig_latin_single_word(input);
assert_eq!(actual, expected);
}
}
``` | {
"domain": "codereview.stackexchange",
"id": 45006,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "beginner, rust",
"url": null
} |
beginner, rust
Answer: The two things that jump out at me are:
Use &str Instead of String for Input Parameters
All other stringy types should convert to this efficiently. If you use String, you’ll often need to clone. Since cloning is expensive, Rust deliberately requires you to do it explicitly, and there will be a lot of .to_string() calls around. These are code smells.
If you were editing a String in place, or could move instead of copying, there would be a reason to prefer text: mut String to text: &str. As it is, though, you’re only forcing yourself to make extra copies of your test strings on the heap. Instead of,
let input = "this".to_string();
let expected = "his-tay".to_string();
let actual = pig_latin_single_word(input);
assert_eq!(actual, expected);
You should be able to write something like:
assert_eq!(&pig_latin_single_word("this"), "his-tay"); | {
"domain": "codereview.stackexchange",
"id": 45006,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "beginner, rust",
"url": null
} |
beginner, rust
You should be able to write something like:
assert_eq!(&pig_latin_single_word("this"), "his-tay");
Use Helper Functions
The implementation of the closure within .map looks idiomatic in its structure, but it’s long enough that you might want to move it to a nested helper function.
You likely want to split the input string into slices of consecutive alphabetic or non-alphabetic characters, and call pig_latin_single_word only on the alphabetic chunks, without making a deep copy of them. You can then take advantage of the fact that every alphabetic chunk is followed by a non-alphabetic chunk, and vice versa, because of how you’re splitting them.
Consider Building From Slices
A Pig-Latin string will be composed entirely of slices of the original string, plus a few const strings such as "ay" That is, if your original text is "this!", the Pig-Latin output ("his-tay!") will be &text[1..4], followed by "-". followed by &text[0..1], followed by "ay", followed by &text[4..5]. Each of these slices either has the same lifetime as text, or 'static lifetime.
One approach you might therefore take is to write a while loop, which starts with an empty String and builds the output string by appending each chunk in order. This is similar to how you would write an efficient Pig-Latin function in C++.
You might also build an iterator or Vec<&str> over the chunks of the Pig-Latin string. Write a function that turns an input chunk into one to four output chunks, and call it with .flat_map(). Once you have the iterator over output chunks, you could then .fold() over the output chunks, appending each one to a String initialized to String::new(). You don’t need to get that complicated to solve this problem, but it’s a handy technique to have in your toolbox. It comes in especially handy when you’re passing the chunks to a buffered I/O function. | {
"domain": "codereview.stackexchange",
"id": 45006,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "beginner, rust",
"url": null
} |
c
Title: K&R "The C Programming Language" exercise 1-13: histograms
Question:
Exercise 1-13. Write a program to print a histogram of the lengths of
words in its input. It is easy to draw the histogram with the bars
horizontal; a vertical orientation is more challenging.
The code I wrote:
#include <stdio.h>
#define MAXWLEN 35 /* assuming no word longer than 35 characters will appear in the input */
#define IN 0
#define OUT 1
int main()
{
int c, len, longest, state, chart[MAXWLEN];
for (int i = 0; i < MAXWLEN; ++i)
chart[i] = 0;
len = 0; /* length of the current word */
longest = 0; /* the longest word appearing in the input */
state = OUT; /* the state of being inside or outside a word */
while ((c = getchar()) != EOF)
{
if (c == ' ' || c == '\t' || c == '\n')
{
if (state == IN)
if (longest < ++chart[len-1])
longest = chart[len-1];
state = OUT;
len = 0;
}
else
{
state = IN;
++len;
}
}
char q[2][3] = {"", " "};
for (int i = longest; i; --i)
for (int j = (c = putchar('\n')) - c; j < MAXWLEN; ++j)
(i <= chart[j]) ? printf(" * ") : printf("%s", q[chart[j] > 0]);
putchar('\n');
for (int i = 1; i <= MAXWLEN; ++i)
if(chart[i-1])
printf("%2d ", i);
putchar('\n');
return 0;
}
Answer:
for (int j = (c = putchar('\n')) - c; is too clever to be good. You save one line of vertical space by the price of a reviewer's mental effort. Don't do this. Be explicit:
putchar('\n');
for (int j = 0; ....)
is much more readable.
Ditto for
(i <= chart[j]) ? printf(" * ") : printf("%s", q[chart[j] > 0]); | {
"domain": "codereview.stackexchange",
"id": 45007,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c",
"url": null
} |
c
As said in the comment, more functions please. There are clearly init_chart, fill_chart, and print_chart (every loop deserves to be a function).
getword is also nice to have. It lets you avoid a IN/OUT state machine.
c == ' ' || c == '\t' || c == '\n' is insufficient. At least there is \v (vertical tab) and \f (formfeed). Prefer isspace (don't forget to #include <ctype.h>. (I know that it the K&R times it was not available)
putchar may fail. Be prepared. | {
"domain": "codereview.stackexchange",
"id": 45007,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c",
"url": null
} |
java, console
Title: Simple vocabulary trainer based on flashcards
Question: Motivation
When I was young, I had to learn english in school and I was really bad. My father told me to write words on little cards, with a word on the one side and its foreign counterpart on the backside. These flashcards were in a box with 5 categories: 5 is very bad and 1 is very well trained. When i guessed a word correctly, it got moved one category up, and if not it got moved down (for example from 4 to 5).
I suddenly got interested in learning english and make notes in a csv about unknown words. I then searched for a simple vocabulary trainer that deals with csv and tried also anki but there was no program that I like, so I tried to make a simple cli tool for that.
What do you think about my code? And especially: Is my way of file handling state of art? I have to admit that I've done a lot without security features to keep that program small and simple.
About a code
I maybe later want to write a GUI program with Swing so I tried to write code that can later easily be adapted for the event handlers etc. The structure is a simple one: Objects of TrainingSession read a csv file and allows a user to be trained in a given category. When a session is over, the file gets properly updated. It maintaines a list of objects of Flashcard for that purpose. This class represents a flashcard and some additional information like example sentences, if given.
Latest Version (not to be covered here)
https://login.yoursecurecloud.de/d/2b731dee455b49d19913/
Main.java
import java.util.Scanner;
public class Main {
private static Scanner scanner = new Scanner(System.in);
public static void main(String[] args) {
System.out.print("Category: ");
int input = Integer.parseInt(scanner.nextLine());
TrainingSession session = new TrainingSession(input);
session.train();
}
} | {
"domain": "codereview.stackexchange",
"id": 45008,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, console",
"url": null
} |
java, console
TrainingSession.java
import java.util.List;
import java.util.ArrayList;
import java.util.Random;
import java.util.Scanner;
import java.io.File;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths; | {
"domain": "codereview.stackexchange",
"id": 45008,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, console",
"url": null
} |
java, console
public class TrainingSession {
private static Scanner scanner = new Scanner(System.in);
private static Random random = new Random();
private List<Flashcard> flashcards;
private List<Flashcard> otherFlashcards;
private String headlines;
public TrainingSession(int categoryToTrain) {
List<String> lines = null;
try {
lines = Files.readAllLines(new File("./flashcards.csv").toPath());
} catch (IOException e) {
e.printStackTrace();
}
// first line are headlines
headlines = lines.get(0);
lines.remove(0);
flashcards = new ArrayList<>();
otherFlashcards = new ArrayList<>();
for (String line : lines) {
String[] parts = line.split(";");
int categoryOfLine = Integer.parseInt(parts[4]);
parts[1] = parts[1].replace(", ", ",");
List<String> words = List.of(parts[1].split(","));
if (categoryOfLine == categoryToTrain)
flashcards.add(new Flashcard(parts[0], words, parts[2], parts[3], Integer.parseInt(parts[4])));
else
otherFlashcards.add(new Flashcard(parts[0], words, parts[2], parts[3], Integer.parseInt(parts[4])));
}
}
public void train() {
while (flashcards.size() > 0) {
Flashcard current = flashcards.get(random.nextInt(flashcards.size()));
System.out.println(current.getWord() + " (" + current.getWordClass() + ")");
if (!current.getExampleSentence().isEmpty())
System.out.println("Example Sentence: " + current.getExampleSentence());
System.out.print("Input: ");
String input = scanner.nextLine();
if (current.guess(input)) {
System.out.println("Correct!");
} else {
System.out.println("Wrong!");
}
flashcards.remove(current); | {
"domain": "codereview.stackexchange",
"id": 45008,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, console",
"url": null
} |
java, console
System.out.println("Wrong!");
}
flashcards.remove(current);
otherFlashcards.add(current);
System.out.println("Correct words: " + current.getWordsToLearn() + "\n");
}
// saveProgress
StringBuilder fileContent = new StringBuilder();
fileContent.append(headlines + "\n");
for (Flashcard flashcard : otherFlashcards) {
fileContent.append(flashcard.toCsvString() + "\n");
}
try {
Files.write(Paths.get("./flashcards.csv"), fileContent.toString().getBytes());
} catch (IOException e) {
e.printStackTrace();
}
}
} | {
"domain": "codereview.stackexchange",
"id": 45008,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, console",
"url": null
} |
java, console
Flashcard.java
import java.util.List;
public class Flashcard {
private String word;
private List<String> wordsToLearn;
private String wordClass;
private String exampleSentence;
private int category; // from 1 to 5. 1 = well trained, 5 = newly added for learning
public Flashcard(String word, List<String> wordsToLearn, String wordClass, String exampleSentence, int category) {
this.word = word;
this.wordsToLearn = wordsToLearn;
this.wordClass = wordClass;
this.exampleSentence = exampleSentence;
this.category = category;
}
public String getWord() {
return word;
}
public String getWordsToLearn() {
return String.join(", ", wordsToLearn);
}
public String getWordClass() {
return wordClass;
}
public String getExampleSentence() {
return exampleSentence;
}
public int getCategory() {
return category;
}
public boolean guess(String guess) {
boolean right = StaticFunctions.equalsOneOf(guess, wordsToLearn, true);
if (right && category > 1) {
category--;
} else {
if (category < 5) {
category++;
}
}
return right;
}
public String toCsvString() {
StringBuilder sb = new StringBuilder();
sb.append(word + ";");
sb.append(getWordsToLearn() + ";");
sb.append(wordClass + ";");
sb.append(exampleSentence + ";");
sb.append(category);
return sb.toString();
}
}
StaticFunctions.java
import java.util.List;
import java.util.ArrayList;
public abstract class StaticFunctions {
public static boolean equalsOneOf(String str, List<String> strList, boolean ignoreCase) {
for (String strFromList : strList) {
if (strFromList.equals(str)) {
return true;
}
}
return false;
}
} | {
"domain": "codereview.stackexchange",
"id": 45008,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, console",
"url": null
} |
java, console
flashcards.csv
Wort;Lernwort;Wortart;Beispielsatz;Kategorie
run;laufen, rennen;Verb;They run into the forrest.;3
home;Haus;Substantiv;I go home.;4
Answer: Overall looks and works well. Nice job!
Some minor things that could be improved...
Flashcard.java
Looks like everything except for the category field could be declared as final
Avoid string concatenation in a StringBuilder
Main.java
The scanner can be declared as final
StaticFunctions.java
Looks like the ArrayList class can be removed from the imports
Also looks like parameter ignoreCase isn't used
TrainingSession.java
Looks like the fields could be made final
Avoid string concatenation in a StringBuilder | {
"domain": "codereview.stackexchange",
"id": 45008,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, console",
"url": null
} |
python, hash-map, iteration
Title: Fast unpack of SAM flag bitfield to named symbols
Question: def sam_flag_explainer(flag:int):
"""
Intepret sam flag and make a flag-explained-dictionary.
"""
try:
# Convert sam flags to binary format, like 0101001
# flag is a integer.
flag_binary = "{0:b}".format(flag)
except Exception as err:
if int(flag) == flag:
flag = int(flag)
flag_binary = "{0:b}".format(flag)
else:
print(f"{flag=}")
raise ValueError from err
flag_template = {
"read_paired":False,
"read_mapped_in_proper_pair":False,
"read_unmapped":False,
"mate_unmapped":False,
"read_reverse_strand":False,
"mate_reverse_strand":False,
"first_in_pair":False,
"second_in_pair":False,
"not_primary_alignment":False,
"read_fails_platform/vendor_quality_checks":False,
"read_is_PCR_or_optical_duplicate":False,
"supplementary_alignment":False,
}
for k, f in zip(flag_template.keys(), flag_binary[::-1]): # should intepret the flag from back.
if int(f) == 1:
flag_template[k] = True
return flag_template
The code is the above.
Schematically, this function does:
Convert integer into binary number -> Read it from the back -> Modify the value of the key based on binary number element.
This function is called a lot of times in my program.
And this function is slow and it accounts for a large percentage of the overall execution time.
How could I make this faster? | {
"domain": "codereview.stackexchange",
"id": 45009,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, hash-map, iteration",
"url": null
} |
python, hash-map, iteration
Answer: I would prefer an implementation that is simple, built-in, type-safe, and explicit (the original implementation is arguably explicit, but not simple or type-safe; and whereas it does use built-ins it doesn't necessarily take advantage of the right ones).
This is faster, but if it isn't fast enough, you'd be in diminishing returns and need to re-evaluate your choice of using Python. Justin Chang's brute-force cache method does technically work and is quite fast, but is also higher-complexity, has less structure, and takes up 32 kB of memory; it's up to you to choose which trade-offs are more important.
__slots__ has at least three advantages - it improves class performance; it rejects arbitrary member assignment; and it gets the class halfway to type safety (defining which members are there, but not their types). Whereas it does require duplication of member names, as Matthieu M has suggested this can be reused to form _fields_ in runtime.
A note about type safety: sadly, the ctypes machinery doesn't really produce type-safe structs in a strict sense unless you go out of your way and add a hint for every member as e.g. read_paired: int. It's unclear if this is of value to you.
Also, having a / in a member name is not a great idea (even though it does technically work with ctypes), so I have replaced this with another underscore.
import ctypes
import struct
class SAMFlags(ctypes.LittleEndianStructure):
__slots__ = (
'read_paired',
'read_mapped_in_proper_pair',
'read_unmapped',
'mate_unmapped',
'read_reverse_strand',
'mate_reverse_strand',
'first_in_pair',
'second_in_pair',
'not_primary_alignment',
'read_fails_platform_vendor_quality_checks',
'read_is_PCR_or_optical_duplicate',
'supplementary_alignment',
)
_fields_ = [(name, ctypes.c_uint8, 1) for name in __slots__] | {
"domain": "codereview.stackexchange",
"id": 45009,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, hash-map, iteration",
"url": null
} |
python, hash-map, iteration
_fields_ = [(name, ctypes.c_uint8, 1) for name in __slots__]
def __init__(self, flags: int) -> None:
struct.pack_into('<H', self, 0, flags)
With this quick hack to do timing:
measures = []
for method in (SAMFlags, sam_flag_explainer_op):
for bits_set in range(13):
flags = (1 << bits_set) - 1
def run():
return method(flags)
for _ in range(1_000):
measures.append([
method.__name__,
bits_set,
timeit.timeit(stmt=run, number=100)/100,
])
df = pd.DataFrame(
data=measures,
columns=['method', 'bits', 'time']
)
seaborn.lineplot(data=df, x='bits', y='time', hue='method')
plt.show()
we get:
Unsurprisingly, the original method exhibits a clear O(n) in the number of bits set in the flag. | {
"domain": "codereview.stackexchange",
"id": 45009,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, hash-map, iteration",
"url": null
} |
c#, asp.net, authentication, session
Title: logging in using cookie authentication/authorization in asp.net C#
Question: How safe is Cookie based Authentication/Authorization in ASP.NET C#? Take a look at the example below, don't worry about password hashing, All this code does is that it takes a username and a password and lets a user login. After they login, I have multiple pages that use the role for authorization purposes. is this a secure/semi-secure method, or sufficient enough to prevent attacks and exploits against a web application? would you change anything in this code?
public async Task<IActionResult> OnPostAsync()
{
var user = await _context.UsersTableTest.FirstOrDefaultAsync(u => u.UserName == Username);
if (user != null && user.PasswordHash == Password)
{
var claims = new List<Claim>
{
new Claim(ClaimTypes.Name, user.UserName),
new Claim("UserDepartment", user.UserDepartment) // Assuming you have a Department property in the user model
};
var claimsIdentity = new ClaimsIdentity(
claims, CookieAuthenticationDefaults.AuthenticationScheme);
var authProperties = new AuthenticationProperties
{
// Set additional properties if needed
};
await HttpContext.SignInAsync(
CookieAuthenticationDefaults.AuthenticationScheme,
new ClaimsPrincipal(claimsIdentity),
authProperties);
return RedirectToPage("/Index"); // Redirect to a protected page
}
else
{
ModelState.AddModelError("", "Invalid login attempt.");
return Page();
}
}
Answer: var user = await _context.UsersTableTest.FirstOrDefaultAsync(u => u.UserName == Username); | {
"domain": "codereview.stackexchange",
"id": 45010,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c#, asp.net, authentication, session",
"url": null
} |
c#, asp.net, authentication, session
Answer: var user = await _context.UsersTableTest.FirstOrDefaultAsync(u => u.UserName == Username);
Please bear in mind that == operator are case sensitive. Depending on the database setup it might be okay to use like this. I just wanted to highlight under some circumstances it might return null due to case sensitivity.
if (user != null && user.PasswordHash == Password)
You have a giant if-else block inside your method. Both of them returns an IActionResult. So, either move the code from the else block to the same level as the if and then delete the empty else block. Or you can reverse the condition and guard only the error path, something like this
if (user == null || user.PasswordHash != Password)
{
ModelState.AddModelError("", "Invalid login attempt.");
return Page();
}
var claims = new List<Claim>
{
new Claim(ClaimTypes.Name, user.UserName),
new Claim("UserDepartment", user.UserDepartment)
};
...
return RedirectToPage("/Index");
BTW1: where do you define/receive the Password?
BTW2: why don't you pass the password hash as well to the linq query?
var claims = ...
var claimsIdentity = ...
var claimsPrincipal = ...
I haven't used the built-in authentication feature of ASP.NET for a while but I think you could rely on it (HttpContext.AuthenticateAsync) for this simple use case rather than hand crafting every single piece.
await HttpContext.SignInAsync(
You might need to set the Principal to the HttpContext as well (HttpContext.User = claimsPrincipal; prior to the call of SignInAsync. | {
"domain": "codereview.stackexchange",
"id": 45010,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c#, asp.net, authentication, session",
"url": null
} |
verilog, fpga, system-verilog
Title: Clock frequency meter module
Question: The module measures input clocks. It requires some reference clock. There can be from one to five input clocks to measure it. Output values are usual unsigned ones. As expected, it should be reset before starting. If it is used in Vivado, the user can choose the number of ports to show, depending on the number of input clocks to measure.
Example for Vivado block diagram:
Accuracy for 125MHz, 150MHz, 50MHz, 100MHz:
Top module:
`timescale 1ns / 1ps
module clock_meter_top #
(
parameter integer CLK_MHZ = 50,
parameter integer CLK_NUM = 1,
localparam integer MSR_CLK_VAL_WIDTH = 32
)
(
input logic clk,
input logic a_rst_n,
input logic msr_clk_0,
input logic msr_clk_1,
input logic msr_clk_2,
input logic msr_clk_3,
input logic msr_clk_4,
output logic [MSR_CLK_VAL_WIDTH - 1 : 0 ] msr_clk_val_0,
output logic [MSR_CLK_VAL_WIDTH - 1 : 0 ] msr_clk_val_1,
output logic [MSR_CLK_VAL_WIDTH - 1 : 0 ] msr_clk_val_2,
output logic [MSR_CLK_VAL_WIDTH - 1 : 0 ] msr_clk_val_3,
output logic [MSR_CLK_VAL_WIDTH - 1 : 0 ] msr_clk_val_4
);
logic msr_clk [CLK_NUM - 1: 0];
logic [MSR_CLK_VAL_WIDTH - 1 : 0 ] msr_clk_val [CLK_NUM - 1: 0]; | {
"domain": "codereview.stackexchange",
"id": 45011,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "verilog, fpga, system-verilog",
"url": null
} |
verilog, fpga, system-verilog
genvar i;
generate
always_comb
begin
msr_clk[0] = msr_clk_0;
msr_clk_val_0 = msr_clk_val[0];
end
if (CLK_NUM > 1)
begin
always_comb
begin
msr_clk[1] = msr_clk_1;
msr_clk_val_1 = msr_clk_val[1];
end
end
if (CLK_NUM > 2)
begin
always_comb
begin
msr_clk[2] = msr_clk_2;
msr_clk_val_2 = msr_clk_val[2];
end
end
if (CLK_NUM > 3)
begin
always_comb
begin
msr_clk[3] = msr_clk_3;
msr_clk_val_3 = msr_clk_val[3];
end
end
if (CLK_NUM > 4)
begin
always_comb
begin
msr_clk[4] = msr_clk_4;
msr_clk_val_4 = msr_clk_val[4];
end
end
for (i = 0; i < CLK_NUM; i = i + 1)
begin : gen_clock_meters
clock_meter #
(
.CLK_MHZ (CLK_MHZ)
)
clock_meter_inst
(
.clk_i (clk ),
.a_rst_n_i (a_rst_n ),
.msr_clk_i (msr_clk[i] ),
.msr_clk_val_o (msr_clk_val[i])
);
end
endgenerate
endmodule
Module:
`timescale 1ns / 1ps | {
"domain": "codereview.stackexchange",
"id": 45011,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "verilog, fpga, system-verilog",
"url": null
} |
verilog, fpga, system-verilog
endmodule
Module:
`timescale 1ns / 1ps
module clock_meter #
(
parameter integer CLK_MHZ = 100,
localparam integer MSR_CLK_VAL_WIDTH = 32
)
(
input logic clk_i,
input logic a_rst_n_i,
input logic msr_clk_i,
output logic [MSR_CLK_VAL_WIDTH - 1 : 0] msr_clk_val_o
);
localparam integer REF_CNT_NUM = 10000000;
localparam integer REF_CNT_WIDTH = $clog2(REF_CNT_NUM);
localparam integer DECIM_CLK_MHZ = CLK_MHZ / 10;
logic [REF_CNT_WIDTH - 1 : 0] ref_cnt;
logic [MSR_CLK_VAL_WIDTH - 1 : 0] msr_cnt;
logic [MSR_CLK_VAL_WIDTH - 1 : 0] msr_clk_val;
logic msr_cnt_rst;
logic [MSR_CLK_VAL_WIDTH - 1 : 0] gray_msr_cnt;
logic [MSR_CLK_VAL_WIDTH - 1 : 0] res_msr_cnt;
integer i; | {
"domain": "codereview.stackexchange",
"id": 45011,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "verilog, fpga, system-verilog",
"url": null
} |
verilog, fpga, system-verilog
always_ff @(posedge clk_i)
begin
if (a_rst_n_i == 1'b0)
begin
ref_cnt <= 'h0;
end
else
begin
if (ref_cnt == (REF_CNT_NUM - 1))
begin
ref_cnt <= 'h0;
end
else
begin
ref_cnt <= ref_cnt + 1'b1;
end
end
end
always_ff @(posedge clk_i)
begin
if (a_rst_n_i == 1'b0)
begin
msr_cnt_rst <= 'h0;
end
else
begin
if (ref_cnt == (REF_CNT_NUM - 1))
begin
msr_cnt_rst <= 'h1;
end
else
begin
msr_cnt_rst <= 1'b0;
end
end
end
always_ff @(posedge msr_clk_i)
begin
if (a_rst_n_i == 1'b0)
begin
gray_msr_cnt <= 'h0;
msr_cnt <= 'h0;
end
else
begin
if (msr_cnt_rst == 1'b1)
begin
msr_cnt <= 'h0;
gray_msr_cnt <= 'h0;
end
else
begin
msr_cnt <= msr_cnt + 1'b1;
for(i = 0; i < MSR_CLK_VAL_WIDTH - 1; i = i + 1)
begin
gray_msr_cnt[i] <= msr_cnt[i+1] ^ msr_cnt[i];
end
gray_msr_cnt[MSR_CLK_VAL_WIDTH - 1] <= msr_cnt[MSR_CLK_VAL_WIDTH - 1];
end
end
end
always_ff @(posedge clk_i)
begin
if (a_rst_n_i == 1'b0)
begin
res_msr_cnt <= 'h0;
end
else if (ref_cnt == (REF_CNT_NUM - 1))
begin
for(i = 0; i < MSR_CLK_VAL_WIDTH; i = i + 1)
res_msr_cnt[i] <= ^(gray_msr_cnt >> i);
end
end
always_ff @(posedge clk_i)
begin
if (a_rst_n_i == 1'b0)
begin
msr_clk_val <= 'h0;
end
else
begin
msr_clk_val <= DECIM_CLK_MHZ * res_msr_cnt; | {
"domain": "codereview.stackexchange",
"id": 45011,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "verilog, fpga, system-verilog",
"url": null
} |
verilog, fpga, system-verilog
end
else
begin
msr_clk_val <= DECIM_CLK_MHZ * res_msr_cnt;
end
end
always_comb
begin
msr_clk_val_o = msr_clk_val;
end | {
"domain": "codereview.stackexchange",
"id": 45011,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "verilog, fpga, system-verilog",
"url": null
} |
verilog, fpga, system-verilog
endmodule
TB:
`timescale 1ps / 1ps
module clock_meter_tb;
localparam integer MSR_CLK_VAL_WIDTH = 32;
localparam integer CLOCK_100_00_PS = 10000; //ps == 10MHz
localparam integer CLOCK_184_32_PS = 5425; //ps == 184.32Hz
localparam integer CLOCK_368_64_PS = 2712; //ps == 368.64MHz
localparam integer TEST_ITER_NUM = 1000000000;
reg clk_100_00 = 'b0;
reg a_rst_n = 'b0;
reg msr_clk_184_32 = 'b0;
reg msr_clk_368_64 = 'b0;
wire [MSR_CLK_VAL_WIDTH - 1 : 0] msr_clk_val_184_32;
wire [MSR_CLK_VAL_WIDTH - 1 : 0] msr_clk_val_368_64;
clock_meter #
(
.CLK_MHZ (100)
)
clock_meter_dut_184_32
(
.clk_i (clk_100_00 ),
.a_rst_n_i (a_rst_n ),
.msr_clk_i (msr_clk_184_32 ),
.msr_clk_val_o (msr_clk_val_184_32)
);
clock_meter #
(
.CLK_MHZ (100)
)
clock_meter_dut_368_64
(
.clk_i (clk_100_00 ),
.a_rst_n_i (a_rst_n ),
.msr_clk_i (msr_clk_368_64 ),
.msr_clk_val_o (msr_clk_val_368_64)
);
always
begin
#(CLOCK_100_00_PS / 2) clk_100_00 = !clk_100_00;
end
always
begin
#(CLOCK_184_32_PS / 2) msr_clk_184_32 = !msr_clk_184_32;
end
always
begin
#(CLOCK_368_64_PS / 2) msr_clk_368_64 = !msr_clk_368_64;
end
initial
begin
a_rst_n <= 'b0;
@(posedge clk_100_00);
a_rst_n <= 'b1;
@(posedge clk_100_00);
repeat(TEST_ITER_NUM)
begin
@(posedge clk_100_00);
end
$stop();
end
endmodule | {
"domain": "codereview.stackexchange",
"id": 45011,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "verilog, fpga, system-verilog",
"url": null
} |
verilog, fpga, system-verilog
endmodule
Answer: Generally speaking, the code has consistent layout, is easy to understand, uses descriptive variable names and makes good use of parameters for constant values.
You have a good separation between sequential logic and combinational logic, and you take advantage of the additional automatic code check offered by the SystemVerilog always_ff and always_comb keywords. These keywords also convey design intent well.
However, I get compile errors with 2 different simulators due to the integer i declaration in the clock_meter module. Perhaps your simulator is more forgiving. Regardless, to make your code more portable, I recommend declaring the iterator variable locally within each for loop.
You can simplify the for loop iterator using the increment operator, ++.
Change all:
i = i + 1
to
i++
As an example, for loop, change:
for(i = 0; i < MSR_CLK_VAL_WIDTH - 1; i = i + 1)
to:
for (int i = 0; i < (MSR_CLK_VAL_WIDTH - 1); i++)
I also added an extra set of parentheses to avoid potential operator precedence issues and to make the code easier to read.
Use underscores to make large integer values easier to read:
localparam integer TEST_ITER_NUM = 1000000000;
to:
localparam integer TEST_ITER_NUM = 1_000_000_000;
The `timescale compiler directive is not needed for the clock_meter module since there are no delays specified. Also, it can be confusing if you use multiple different time units: 1ps vs. 1ns. It is better to stick to one.
In the testbench (clock_meter_tb), consider replacing the reg types (4-state) with bit types (2-state) for driving the DUT input ports. It is rare to drive the inputs with x or z, and 2-state has simulation performance benefits. bit types default to 0, so there is no need to initialize them (but this is a preference)
bit clk_100_00 ;
bit a_rst_n ;
bit msr_clk_184_32;
bit msr_clk_368_64; | {
"domain": "codereview.stackexchange",
"id": 45011,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "verilog, fpga, system-verilog",
"url": null
} |
java, event-handling, statistics, gui, javafx
Title: A JavaFX program to find out the mouse refresh rate
Question: (See the next version here.)
Intro
I have this tiny JavaFX program that shows a 800x600 pixel canvas. You are supposed to drag the mouse within the canvas. When you are done dragging, the program will print in the standard output the number of frames per second.
For example,
my system prints
196 frames in 1566 milliseconds, refresh rate of 125.15964240102171 Hz.
Code
com.github.coderodde.javafx.mouseupdaterate.MouseUpdateRateFinder.java:
package com.github.coderodde.javafx.mouseupdaterate;
import javafx.application.Application;
import javafx.event.EventHandler;
import javafx.event.EventType;
import javafx.scene.Group;
import javafx.scene.Scene;
import javafx.scene.canvas.Canvas;
import javafx.scene.input.MouseEvent;
import javafx.stage.Stage;
public final class MouseUpdateRateFinder extends Application {
private static final int SCREEN_WIDTH = 800;
private static final int SCREEN_HEIGHT = 600;
public static void main(String[] args) {
launch(args);
}
@Override
public void start(Stage stage) throws Exception {
Group root = new Group();
Canvas canvas = new Canvas(SCREEN_WIDTH, SCREEN_HEIGHT);
canvas.addEventHandler(MouseEvent.ANY, new MyMouseListener());
root.getChildren().add(canvas);
Scene scene = new Scene(root);
stage.setScene(scene);
stage.show();
}
private static final class MyMouseListener implements EventHandler<MouseEvent> { | {
"domain": "codereview.stackexchange",
"id": 45012,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, event-handling, statistics, gui, javafx",
"url": null
} |
java, event-handling, statistics, gui, javafx
private long mouseButtonEventStart;
private int frameCounter;
@Override
public void handle(MouseEvent t) {
EventType<? extends MouseEvent> eventType = t.getEventType();
if (eventType.equals(MouseEvent.MOUSE_PRESSED)) {
mouseButtonEventStart = System.currentTimeMillis();
frameCounter = 0;
} else if (eventType.equals(MouseEvent.MOUSE_RELEASED)) {
long duration = System.currentTimeMillis()
- mouseButtonEventStart;
System.out.println(
frameCounter
+ " frames in "
+ duration
+ " milliseconds, refresh rate of "
+ computeRefreshRate(duration, frameCounter)
+ " Hz.");
} else if (eventType.equals(MouseEvent.MOUSE_DRAGGED)) {
frameCounter++;
}
}
private double computeRefreshRate(long duration, int frameCounter) {
double t = ((double) duration) / 1000.0;
return ((double) frameCounter) / t;
}
}
}
Critique request
Is there any room for improvement? Please tell me anything. | {
"domain": "codereview.stackexchange",
"id": 45012,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, event-handling, statistics, gui, javafx",
"url": null
} |
java, event-handling, statistics, gui, javafx
Critique request
Is there any room for improvement? Please tell me anything.
Answer: Your start doesn't actually throw any checked exceptions, so remove throw Exception (don't worry; this doesn't break override compatibility).
Writing an "everything handler" that then switches on event type is an anti-pattern; just register three separate handlers. You can use the new(ish) Java method reference sugar for this purpose. It's not strictly necessary to have a separate class but it's probably a good idea to keep it out of separation of concerns.
Might as well use nanoTime() instead of currentTimeMillis() for higher precision.
Avoid repeated-concatenation formatting; instead just use printf. When you do this, you should limit floating-point precision in your output.
Suggested
package com.github.coderodde.javafx.mouseupdaterate;
import javafx.application.Application;
import javafx.scene.Group;
import javafx.scene.Scene;
import javafx.scene.canvas.Canvas;
import javafx.scene.input.MouseEvent;
import javafx.stage.Stage;
public final class MouseUpdateRateFinder extends Application {
private static final int SCREEN_WIDTH = 800;
private static final int SCREEN_HEIGHT = 600;
public static void main(String[] args) {
launch(args);
}
@Override
public void start(Stage stage) {
Group root = new Group();
MyMouseListener listener = new MyMouseListener();
Canvas canvas = new Canvas(SCREEN_WIDTH, SCREEN_HEIGHT);
canvas.addEventHandler(MouseEvent.MOUSE_PRESSED, listener::handlePressed);
canvas.addEventHandler(MouseEvent.MOUSE_DRAGGED, listener::handleDragged);
canvas.addEventHandler(MouseEvent.MOUSE_RELEASED, listener::handleReleased);
root.getChildren().add(canvas);
stage.setScene(new Scene(root));
stage.show();
}
private static final class MyMouseListener {
private long dragStartNs;
private int frameCounter; | {
"domain": "codereview.stackexchange",
"id": 45012,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, event-handling, statistics, gui, javafx",
"url": null
} |
java, event-handling, statistics, gui, javafx
public void handlePressed(MouseEvent t) {
dragStartNs = System.nanoTime();
frameCounter = 0;
}
public void handleDragged(MouseEvent t) {
frameCounter++;
}
public void handleReleased(MouseEvent t) {
double duration = 1e-9*(System.nanoTime() - dragStartNs);
System.out.printf(
"%d frames in %.2f s, refresh rate of %.1f Hz%n",
frameCounter, duration, frameCounter / duration);
}
}
}
Output
155 frames in 1.61 s, refresh rate of 96.0 Hz
184 frames in 1.46 s, refresh rate of 126.2 Hz | {
"domain": "codereview.stackexchange",
"id": 45012,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, event-handling, statistics, gui, javafx",
"url": null
} |
programming-challenge, haskell, trie
Title: Autocomplete system with prefix tree
Question: I am quite new to Haskell, and this problem is from dailycodingproblem.com:
Implement an autocomplete system.
That is, given a query string s and a set of all possible query strings,
return all strings in the set that have s as a prefix.
For example, given the query string de and the set of strings [dog, deer, deal], return [deer, deal].
Hint: Try preprocessing the dictionary into a more efficient data structure to speed up queries.
import Data.Map.Strict (Map)
import qualified Data.Map.Strict as Map
import Data.HashSet (HashSet)
import qualified Data.HashSet as HashSet
data Trie = Trie {children :: Map Char Trie, wordsWithPrefix :: HashSet String} deriving Show
emptyTrie :: Trie
emptyTrie = Trie Map.empty HashSet.empty
insertWord :: Trie -> String -> Trie
insertWord trie [] = trie
insertWord (Trie childs wwp) (x:xs) = Trie newChildren newWordsWithPrefix
where
childTrie = Map.findWithDefault emptyTrie x childs
newChildTrie = insertWord childTrie xs
newChildren = Map.insert x newChildTrie childs
newWordsWithPrefix = HashSet.insert (x:xs) wwp
searchPrefix :: Trie -> String -> HashSet String
searchPrefix trie [] = wordsWithPrefix trie
searchPrefix trie (x:xs) =
case Map.lookup x (children trie) of
Nothing -> HashSet.empty
Just child -> HashSet.map (x:) (searchPrefix child xs)
makeTrie :: [String] -> Trie
makeTrie wordSet = foldl insertWord emptyTrie wordSet
autocomplete :: [String] -> String -> [String]
autocomplete wordSet s = HashSet.toList $ searchPrefix (makeTrie wordSet) s
It works, but I'm not sure about the performance and less sure that it aligns with best practices. | {
"domain": "codereview.stackexchange",
"id": 45013,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "programming-challenge, haskell, trie",
"url": null
} |
programming-challenge, haskell, trie
It works, but I'm not sure about the performance and less sure that it aligns with best practices.
Answer: My first instinct here was to not respond, on the basis that there's not much to add. I'm not about to set up benchmarking to measure the performance, and the code is fine idiomatic haskell. Specifically, there's just small set of functions that do obvious things and combine in obvious ways. What's not to like?
Except...
The nature of a Trie is that the data is encoded in the data-structure's structure. So why are you storing it in duplicate in the wordsWithPrefix field? Our instinct is to store things in just one way; whether it's more efficient or not, it avoids conflicts between the different storage systems.
Of course it's not obvious else we should be doing it, and there may be performance advantages to your strategy. Do we want a separate Leaf String constructor for when there's only one suffix going forward from a node? How do we distinguish the Tries of ["abc"] and ["ab", "abc"]? Of course we could (and really, we probably should) just use something from the tries package, but if we're going to roll our own we should first try to have as few moving parts as possible. I came up with
data Trie = Occupied (Map Char Trie) | UnOccupied (Map Char Trie)
Take a moment to notice a semantic detail implicit in this declaration: emptyTrie is obviously UnOccupied Map.empty, which is different from Occupied Map.empty; therefore insertWord trie [] is not trie!
Of course having written that, I immediately abstracted out the Char in favor of data Trie a = ..., and after working with it for a minute I switched to the less-pretty but more-user-friendly
data Trie a = Trie { children :: Map Char (Trie a), occupied :: Bool } | {
"domain": "codereview.stackexchange",
"id": 45013,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "programming-challenge, haskell, trie",
"url": null
} |
programming-challenge, haskell, trie
insertWord is doing most of your work, so let's focus on that for a moment. Using {-# LANGUAGE NamedFieldPuns #-} and record syntax, I get
insertWord :: Trie a -> [a] -> Trie a
insertWord trie [] = trie{occupied=True}
insertWord trie@Trie{children} (x:xs) = trie{children=newChildren}
where childTrie = Map.findWithDefault emptyTrie x children
newChildTrie = insertWord childTrie xs
newChildren = Map.insert x newChildTrie children
What else do I find as I go?
searchPrefix should probably return a List; the conversion will need to happen someplace and doing it here makes some stuff simpler.
Using mempty instead of the various things it can stand for saves the need to changes stuff as your implementation changes, and is often more succinct.
Many data structures have a singleton option to go with empty; might as well give Trie one too.
Many data structures have a toList and fromList; you can use all of these interchangeably with import GHC.Exts (fromList, toList). Might as well implement IsList (Trie a) instead of makeTrie.
The presence of emptyTrie is suggestive of the Monoid class. Implement Semigroup and Monoid for Trie.
The payoff of implementing all this stuff for Trie is, in addition to making your data type more generally useful, that you can now implement insertWord and searchPrefix succinctly in terms of class operations.
Rename emptyTrie and insertWord to empty and insert for consistency with Map and other comparable data structures.
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE TypeFamilies #-}
module Main where
import Data.List (sort)
import Data.Map.Strict (Map)
import qualified Data.Map.Strict as Map
import GHC.Exts (IsList(Item), fromList, toList)
data Trie a = Trie { children :: Map a (Trie a), occupied :: Bool } deriving Show
empty :: Trie a -- We could skip this in favor of mempty, but `Map` has it's own empty which doesn't require `Ord`, so we'll follow their lead.
empty = Trie Map.empty False | {
"domain": "codereview.stackexchange",
"id": 45013,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "programming-challenge, haskell, trie",
"url": null
} |
programming-challenge, haskell, trie
singleton :: [a] -> Trie a
singleton [] = empty{occupied=True}
singleton (a:as) = empty{children = Map.singleton a $ singleton as}
instance (Ord a) => Semigroup (Trie a) where
t1 <> t2 = Trie {children = Map.unionWith (<>) (children t1) (children t2), occupied = occupied t1 || occupied t2}
instance (Ord a) => Monoid (Trie a) where mempty = empty
instance (Ord a) => IsList (Trie a) where
type Item (Trie a) = [a]
fromList = foldMap singleton
toList Trie{children, occupied} = [ [] | occupied] -- this syntax isn't well documented, but works fine and is kinda common.
++ (concatMap accumulate . toList . fmap toList $ children)
where accumulate (a, as) = (a:) <$> as -- Pretty sure we could replace this with some trainwreak based on `curry (:)`, but let's not.
insert :: (Ord a) => Trie a -> [a] -> Trie a
insert trie as = trie <> singleton as
search :: (Ord a) => Trie a -> [a] -> [[a]]
search trie [] = toList trie
search Trie{children} (x:xs) =
case Map.lookup x children of
Nothing -> mempty
Just child -> (x:) <$> search child xs
autocomplete :: [String] -> String -> [String]
autocomplete wordSet = search (fromList wordSet)
main :: IO ()
main = do
let ws = ["dog", "deer", "deal"]
let res = autocomplete ws "de"
print res
print $ sort res == sort ["deer", "deal"] | {
"domain": "codereview.stackexchange",
"id": 45013,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "programming-challenge, haskell, trie",
"url": null
} |
c++, asynchronous, thread-safety, c++20, boost
Title: C++ Readers-Writer Lock using Asio's Strands
Question: The locking mechanism is implemented in the class RW_Lock.
The saved_async_completion_handler structure is used in RW_Lock to enqueue completion handlers that can't be called right away.
The rest of the code completes a test-scenario program for the lock. A group of threads run the main io_context where the lock and test logic reside. An independent thread runs another context used for printing to the console. Readers and writers (that do nothing but wait a certain amount of time) are posted periodically.
Motivation: Well, I had a multi threaded server with some data structures that were heavily read, but written every once in a while and I needed to lock them while writing. All I found was the usual mutex-based approach, which blocks threads waiting. I thought that would be bad for performance and implemented this version using Asio's strand.
(Btw, if there already is a solution for that, could you point me at it?)
I think it works just fine. But synchronization is usually tricky to get right. Maybe I missed something? Is there anything considered bad practice? What can be improved?
The code and updates can be found in
GitHub
#include <thread>
#include <vector>
#include <queue>
#include <asio.hpp>
// only for debug
#include <iostream>
#include <iomanip>
#include <string>
// The io_context used for printing
asio::io_context cout_io_ctx; | {
"domain": "codereview.stackexchange",
"id": 45014,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, asynchronous, thread-safety, c++20, boost",
"url": null
} |
c++, asynchronous, thread-safety, c++20, boost
// The io_context used for printing
asio::io_context cout_io_ctx;
// Holds a completion handler, it's associated executor and a work_guard
// Provides constructors that automatically create the work guard
// The executor is obtained by calling `asio::get_associated_executor`
template <class Signature,
class Executor = asio::any_completion_executor,
class CompletionHandler = asio::any_completion_handler<Signature>>
struct saved_async_completion_handler
{
using signature = Signature;
using executor_type = Executor;
using work_guard_type = asio::executor_work_guard<executor_type>;
using completion_handler_type = CompletionHandler;
saved_async_completion_handler(executor_type ex, completion_handler_type && handler)
: m_ex(asio::get_associated_executor(handler, ex))
, m_handler(std::forward<completion_handler_type>(handler))
, m_work_guard(asio::make_work_guard(m_ex))
{}
template <class Handler>
saved_async_completion_handler(Handler && handler)
: m_ex(asio::get_associated_executor(handler))
, m_handler(std::forward<Handler>(handler))
, m_work_guard(asio::make_work_guard(m_ex))
{}
executor_type m_ex;
completion_handler_type m_handler;
work_guard_type m_work_guard;
};
template <asio::execution::executor Executor = asio::any_io_executor>
class RW_Lock : asio::noncopyable
{
public: // Type definitions
using executor_type = Executor;
// RAII
struct Read_Lock : asio::noncopyable
{
Read_Lock(RW_Lock & rw_lock) : m_rw_lock(&rw_lock) { m_rw_lock->begin_read(); }
Read_Lock(Read_Lock && other) : m_rw_lock(other.m_rw_lock) { other.m_rw_lock = nullptr; }
~Read_Lock(){ if (m_rw_lock) m_rw_lock->end_read(); }
RW_Lock * m_rw_lock;
}; | {
"domain": "codereview.stackexchange",
"id": 45014,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, asynchronous, thread-safety, c++20, boost",
"url": null
} |
c++, asynchronous, thread-safety, c++20, boost
RW_Lock * m_rw_lock;
};
// RAII
struct Write_Lock : asio::noncopyable
{
Write_Lock(RW_Lock & rw_lock) : m_rw_lock(&rw_lock) { m_rw_lock->begin_write(); }
Write_Lock(Write_Lock && other) : m_rw_lock(other.m_rw_lock) { other.m_rw_lock = nullptr; }
~Write_Lock(){ if (m_rw_lock) m_rw_lock->end_write(); }
RW_Lock * m_rw_lock;
};
// Completion signatures for the interface async functions
using Read_Signature = void(Read_Lock);
using Write_Signature = void(Write_Lock);
public: // Constructor
RW_Lock(executor_type executor)
: m_lock_strand(executor)
{ }
public: // Interface
template <asio::completion_token_for<Read_Signature> CompletionToken>
auto
async_get_read_lock(CompletionToken && completion_token)
{
auto initiation = [&](asio::completion_handler_for<Read_Signature> auto && completion_handler)
{
auto f = [
&,
completion_handler = std::forward<decltype(completion_handler)>(completion_handler)
]() mutable
{
auto completion_executor = asio::get_associated_executor(completion_handler);
static_assert(!std::same_as<decltype(completion_executor), asio::system_executor>, "system executor not allowed. Bind executor to your completion handler");
if (readers_allowed())
{
asio::post(completion_executor,
[
completion_handler = std::forward<decltype(completion_handler)>(completion_handler),
rl = Read_Lock{*this}
] () mutable
{ completion_handler(std::move(rl)); });
}
else
{
m_pending_readers.push( { completion_executor, std::forward<decltype(completion_handler)>(completion_handler) } ); | {
"domain": "codereview.stackexchange",
"id": 45014,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, asynchronous, thread-safety, c++20, boost",
"url": null
} |
c++, asynchronous, thread-safety, c++20, boost
print_debug_info("enqueue_reader");
}
};
asio::dispatch(m_lock_strand, std::move(f));
};
return asio::async_initiate<CompletionToken, Read_Signature>
(std::move(initiation), completion_token);
}
template <asio::completion_token_for<Write_Signature> CompletionToken>
auto
async_get_write_lock(CompletionToken && completion_token)
{
auto initiation = [&](asio::completion_handler_for<Write_Signature> auto && completion_handler)
{
auto f = [
&,
completion_handler = std::forward<decltype(completion_handler)>(completion_handler)
]() mutable
{
auto completion_executor = asio::get_associated_executor(completion_handler);
static_assert(!std::same_as<decltype(completion_executor), asio::system_executor>, "system executor not allowed. Bind executor to your completion handler");
if (writers_allowed())
{
asio::post(completion_executor,
[
completion_handler = std::forward<decltype(completion_handler)>(completion_handler),
wlock = Write_Lock{*this}
] () mutable
{ completion_handler(std::move(wlock)); });
}
else
{
m_pending_writers.push( { completion_executor, std::forward<decltype(completion_handler)>(completion_handler) } );
print_debug_info("enqueue_writer");
}
};
asio::dispatch(m_lock_strand, std::move(f));
};
return asio::async_initiate<CompletionToken, Write_Signature>
(std::move(initiation), completion_token);
} | {
"domain": "codereview.stackexchange",
"id": 45014,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, asynchronous, thread-safety, c++20, boost",
"url": null
} |
c++, asynchronous, thread-safety, c++20, boost
public: // for debugging
auto get_active_reader_count() const { return m_reader_count; }
auto get_active_writer_count() const { return m_writer_count; }
auto get_queued_reader_count() const { return m_pending_readers.size(); }
auto get_queued_writer_count() const { return m_pending_writers.size(); }
void print_debug_info(std::string tag)
{
// This function only gets called from the lock strand
// but it dispatches the printing to a different context
// with a dedicated thread.
//
// This is done to so that printing does not affect (much)
// the lock strand performance
//
// It also means we must take copies of changing values
// before dispatching to the printing thread
// Get elapsed time since the first time this function is called
static const auto start_time = std::chrono::steady_clock::now();
auto current_time = std::chrono::steady_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::duration<double>>(current_time - start_time);
// Copy lock counters
auto ar = get_active_reader_count();
auto aw = get_active_writer_count();
auto qr = get_queued_reader_count();
auto qw = get_queued_writer_count(); | {
"domain": "codereview.stackexchange",
"id": 45014,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, asynchronous, thread-safety, c++20, boost",
"url": null
} |
c++, asynchronous, thread-safety, c++20, boost
// NOTE: lock counters are copied again here, but this being a debug feature,
// the nicer looks and readability are preferred over inserting all function calls
// into the lambda capture or moving all copies there.
// Also, the compiler should be able to take care of that optimization by itself.
asio::dispatch(cout_io_ctx, [=, tag = std::move(tag)](){
std::cout << std::setw(16) << std::setprecision(10) << elapsed << " "
<< std::setw(16) << ("<" + tag + ">") << "\t"
<< "active r/w: " << ar << " / " << aw << "\t"
<< "queued r/w: " << qr << " / " << qw << std::endl;
});
}
private: // auxiliary functions
// Determine if readers can run now or should be enqueued
// Returns false if there are writers running or queued
bool readers_allowed() const
{
return m_writer_count == 0 && m_pending_writers.empty();
}
// Determine if writers can run now or should be enqueued
// Returns false if there are readers running
bool writers_allowed() const
{
return m_reader_count == 0 && m_writer_count == 0;
}
// Dispatches pending writers and readers, in that order
// Only dispatches readers/writers when allowed (functions above)
void dispatch_pending()
{
while (writers_allowed() && !m_pending_writers.empty())
{
auto [ex, handler, guard] = std::move(m_pending_writers.front());
m_pending_writers.pop();
print_debug_info("dequeue_writer");
asio::dispatch(ex, [h = std::move(handler), wl = Write_Lock{*this}]() mutable { h(std::move(wl)); });
guard.reset();
}
while (readers_allowed() && !m_pending_readers.empty())
{
auto [ex, handler, guard] = std::move(m_pending_readers.front());
m_pending_readers.pop();
print_debug_info("dequeue_reader"); | {
"domain": "codereview.stackexchange",
"id": 45014,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, asynchronous, thread-safety, c++20, boost",
"url": null
} |
c++, asynchronous, thread-safety, c++20, boost
print_debug_info("dequeue_reader");
asio::dispatch(ex, [h = std::move(handler), rl = Read_Lock{*this}]() mutable { h(std::move(rl)); });
guard.reset();
}
}
void begin_read()
{
// always called from the lock strand
++m_reader_count;
print_debug_info("begin_read");
}
void begin_write()
{
// always called from the lock strand
++m_writer_count;
print_debug_info("begin_write");
}
void end_read()
{
// this will be called without synchronization
// dispatch to lock strand
asio::dispatch(m_lock_strand, [&](){
--m_reader_count;
print_debug_info("end_read");
dispatch_pending();
});
}
void end_write()
{
// this will be called without synchronization
// dispatch to lock strand
asio::dispatch(m_lock_strand, [&](){
--m_writer_count;
print_debug_info("end_write");
dispatch_pending();
});
}
private: // members
asio::strand<executor_type> m_lock_strand;
int m_reader_count{};
int m_writer_count{};
std::queue<saved_async_completion_handler<Read_Signature>> m_pending_readers;
std::queue<saved_async_completion_handler<Write_Signature>> m_pending_writers;
};
// a fake reader that just waits
asio::awaitable<void> reader(asio::any_io_executor executor, RW_Lock<> & lock)
{
auto rl = co_await lock.async_get_read_lock(asio::use_awaitable);
// simulate busy reader that blocks the thread
// std::this_thread::sleep_for(std::chrono::milliseconds{200});
// simulate lightweight reader that doesn't hog the CPU
asio::steady_timer t{executor};
t.expires_after(std::chrono::milliseconds{200});
co_await t.async_wait(asio::use_awaitable);
} | {
"domain": "codereview.stackexchange",
"id": 45014,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, asynchronous, thread-safety, c++20, boost",
"url": null
} |
c++, asynchronous, thread-safety, c++20, boost
// a fake writer that just waits
asio::awaitable<void> writer(asio::any_io_executor executor, RW_Lock<> & lock)
{
auto wl = co_await lock.async_get_write_lock(asio::use_awaitable);
// simulate busy writer that blocks the thread
// std::this_thread::sleep_for(std::chrono::milliseconds{750});
// simulate lightweight writer that doesn't hog the CPU
asio::steady_timer t{executor};
t.expires_after(std::chrono::milliseconds{750});
co_await t.async_wait(asio::use_awaitable);
}
// a coroutine that posts new readers periodically
asio::awaitable<void> post_readers(asio::any_io_executor executor, RW_Lock<> & lock)
{
asio::steady_timer t{executor};
while (true)
{
t.expires_after(std::chrono::milliseconds{50});
co_await t.async_wait(asio::use_awaitable);
asio::co_spawn(executor, reader(executor, lock), asio::detached);
}
}
// a coroutine that posts new writers periodically
asio::awaitable<void> post_writers(asio::any_io_executor executor, RW_Lock<> & lock)
{
asio::steady_timer t{executor};
while (true)
{
t.cancel();
t.expires_after(std::chrono::milliseconds{1500});
co_await t.async_wait(asio::use_awaitable);
asio::co_spawn(executor, writer(executor, lock), asio::detached);
}
}
int main()
{
asio::io_context io_ctx;
RW_Lock<> lock{io_ctx.get_executor()};
// spawn coroutines that periodically spawn readers and writers
asio::co_spawn(io_ctx, post_readers(io_ctx.get_executor(), lock), asio::detached);
asio::co_spawn(io_ctx, post_writers(io_ctx.get_executor(), lock), asio::detached);
// prevent the printing thread from terminating when there is nothing to print
auto cout_work_guard = asio::make_work_guard(cout_io_ctx);
// start the printing thread
std::jthread cout_thread{ [&](){ cout_io_ctx.run(); } }; | {
"domain": "codereview.stackexchange",
"id": 45014,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, asynchronous, thread-safety, c++20, boost",
"url": null
} |
c++, asynchronous, thread-safety, c++20, boost
// run the main io_context in multiple threads
std::vector<std::jthread> threads;
threads.resize(3);
for (auto & x : threads)
x = std::jthread{ [&io_ctx](){ io_ctx.run(); } };
io_ctx.run();
cout_work_guard.reset();
}
Answer: Thread safety issues
The classes Read_Lock and Write_Lock are public, and have public constructors. Someone could mistake this for being the public API, and think these classes can be used like std::shared_lock and std::unique_lock.
If you do that, and if you have multiple threads calling io_ctx.run(), then you can have multiple threads calling member functions of your class RW_Lock. Then multiple threads could end up calling begin_read() at the same time. Even this line:
++m_reader_count; | {
"domain": "codereview.stackexchange",
"id": 45014,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, asynchronous, thread-safety, c++20, boost",
"url": null
} |
c++, asynchronous, thread-safety, c++20, boost
Is not thread-safe: internally, it has to load the value of m_reader_count in a register, increment it, and then write it back to memory. Thus it is not done in an atomic way. To make operations on an int atomic, just use std::atomic<int>. However, m_pending_readers is much harder to make atomic. You can use a std::mutex to protect against multiple threads modifying the state of a RW_Lock object.
As you mentioned, this was not intentional, and the user should use async_get_read_lock() and async_get_write_lock() instead. You can enforce that by making the constructors of Read_Lock and Write_Lock private, and only give RW_Lock access to them.
Do you really need this?
One of the advantages of coroutines or an io_context that is only running on one thread is that you don't have thread safety issues, as only one function is executing at any given time. So you don't need any locks in those situations.
If you do have multiple threads running, then you can use a regular std::shared_mutex to get a read-write lock. The only drawback is that you shouldn't co_await while holding a lock, because depending on how the io_context schedules things, this might cause a deadlock. If you do need to await something while holding a lock, I see two options:
Unlock the mutex while co_awaiting something. You of course have to assume something else can modify the shared resource you wanted to lock.
Spawn a task on a separate, single-threaded context to do things that modify the shared resource.
Simplify the interface
Many of Boost Asio's functions that need an executor to work allow you to pass objects that have a get_executor() function, so you don't have to manually call that yourself. Consider making it so you can write:
asio::io_context io_ctx;
RW_Lock<> lock{io_ctx};
asio::co_spawn(io_ctx, post_readers(io_ctx, lock), asio::detached);
asio::co_spawn(io_ctx, post_writers(io_ctx, lock), asio::detached); | {
"domain": "codereview.stackexchange",
"id": 45014,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, asynchronous, thread-safety, c++20, boost",
"url": null
} |
c++, asynchronous, thread-safety, c++20, boost
You might also want to add a get_executor() member function to class RW_Lock.
Prior art
There are non-Boost libraries that build on C++20's coroutines, some of them provide async reader-writer locks. For example, Josh Baldwin's libcoro has a coro::shared_mutex. The interface seems similar. You could look at its source code to see how the implementation is done.
Performance
One big issue with your implementation is that whenever you lock the RW_Lock, even if there is no contention, it will post a task to the m_lock_strand. That is not a trivial operation. If you have multiple threads running the strands, then I think this might also involve cross-thread synchronization to happen behind the scenes. Ideally, you only use m_lock_strand if there is contention.
Consider how std::mutex works: in the uncontended case, it is just a single atomic compare-exchange operation to lock the mutex. Only if this detects that the mutex was already locked, then a futex call is made to suspend the thread until some other thread unlocks the mutex. Using atomic operations in coroutines is safe, so you could take the same approach: have an atomic variable to keep track of whether the RW_Lock is locked, and use atomics to update this variable, and only in case of contention post a task to m_lock_strand to suspend the coroutine until another strand releases the lock. | {
"domain": "codereview.stackexchange",
"id": 45014,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, asynchronous, thread-safety, c++20, boost",
"url": null
} |
json, rust
Title: Update JSON object in outer object or insert a default one
Question: The below code is part of a program that, among other things, configures a Chromium profile:
#[cfg(target_family = "unix")]
use home::home_dir;
use rocket::serde::json::serde_json::Map;
use rocket::serde::json::{serde_json, Value};
use serde::Deserialize;
#[cfg(target_family = "windows")]
use std::env::var;
use std::fmt::{Debug, Display, Formatter};
use std::fs::{read_to_string, OpenOptions};
use std::io::Write;
use std::path::{Path, PathBuf};
use subprocess::{Popen, PopenConfig, Redirection};
#[cfg(target_family = "unix")]
const CHROMIUM_DEFAULT_PREFERENCES: &str = ".config/chromium/Default/Preferences";
#[cfg(target_family = "windows")]
const CHROMIUM_DEFAULT_PREFERENCES: &str = r"Google\Chrome\User Data\Default";
#[cfg(target_family = "unix")]
pub fn chromium_default_preferences() -> Option<PathBuf> {
home_dir().map(|home| home.join(CHROMIUM_DEFAULT_PREFERENCES))
}
#[cfg(target_family = "windows")]
pub fn chromium_default_preferences() -> Option<PathBuf> {
var("%LOCALAPPDATA%")
.map(PathBuf::from)
.map(|home| home.join(CHROMIUM_DEFAULT_PREFERENCES))
.ok()
}
#[allow(clippy::enum_variant_names)]
#[derive(Debug)]
pub enum Error {
SerdeError(serde_json::Error),
IoError(std::io::Error),
ChromiumDefaultPreferencesNotFound,
NotAJsonObject(&'static str),
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::SerdeError(error) => <serde_json::Error as Display>::fmt(error, f),
Self::IoError(error) => <std::io::Error as Display>::fmt(error, f),
Self::ChromiumDefaultPreferencesNotFound => {
write!(f, "Chrome / Chromium default preferences not found")
}
Self::NotAJsonObject(key) => write!(f, "not a JSON object: {key}"),
}
}
} | {
"domain": "codereview.stackexchange",
"id": 45015,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "json, rust",
"url": null
} |
json, rust
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Self::SerdeError(error) => Some(error),
Self::IoError(error) => Some(error),
_ => None,
}
}
}
impl From<std::io::Error> for Error {
fn from(error: std::io::Error) -> Self {
Self::IoError(error)
}
}
impl From<serde_json::Error> for Error {
fn from(error: serde_json::Error) -> Self {
Self::SerdeError(error)
}
}
#[derive(Debug, Deserialize, Eq, PartialEq)]
pub struct Config {
url: String,
}
impl Config {
#[must_use]
pub fn url(&self) -> &str {
self.url.as_str()
}
/// Applies the configuration to the system
/// # Errors
/// Returns an [`digsigctl::config::Error`] if the configuration could not be applied
pub fn apply(&self) -> Result<(), anyhow::Error> {
self.update()?;
reload()?;
Ok(())
}
fn update(&self) -> Result<(), Error> {
let filename =
chromium_default_preferences().ok_or(Error::ChromiumDefaultPreferencesNotFound)?;
let mut value = load(&filename)?;
let preferences = value
.as_object_mut()
.ok_or(Error::NotAJsonObject("preferences"))?;
if let Some(session) = preferences.get_mut("session") {
let session = session
.as_object_mut()
.ok_or(Error::NotAJsonObject("session"))?;
session.insert("startup_urls".to_string(), vec![self.url.clone()].into());
session.insert("restore_on_startup".to_string(), 4.into());
} else {
let mut session = Map::new();
session.insert("startup_urls".to_string(), vec![self.url.clone()].into());
session.insert("restore_on_startup".to_string(), 4.into());
preferences.insert("session".to_string(), Value::Object(session));
}
save(&filename, &value)
}
} | {
"domain": "codereview.stackexchange",
"id": 45015,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "json, rust",
"url": null
} |
json, rust
save(&filename, &value)
}
}
fn reload() -> subprocess::Result<Popen> {
Popen::create(
&["systemctl", "restart", "chromium.service"],
PopenConfig {
stdout: Redirection::None,
detached: false,
..Default::default()
},
)
}
fn load(filename: impl AsRef<Path>) -> Result<Value, Error> {
Ok(serde_json::from_str::<Value>(&read_to_string(filename)?)?)
}
fn save(filename: impl AsRef<Path>, value: &Value) -> Result<(), Error> {
Ok(OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(filename)?
.write_all(serde_json::to_string(value)?.as_bytes())?)
}
My biggest issue with the above code is the Config::update() method.
I have duplicate lines that set the session keys I want to override.
I don't know how to resolve this, since the else case handles the case in which the object does not exist in the outer container and thus needs to be created and inserted.
Therefor I need to move the newly created object into the container when all is done, at which point I can no longer have references to it.
So I cannot write something like
...
let mut map = Map::new();
let mut session = if let Some(session) = preferences.get_mut("session") {
session
.as_object_mut()
.ok_or(Error::NotAJsonObject("session"))?
} else {
preferences.insert("session".to_string(), Value::Object(map));
&mut map
};
session.insert("startup_urls".to_string(), vec![self.url.clone()].into());
session.insert("restore_on_startup".to_string(), 4.into());
... | {
"domain": "codereview.stackexchange",
"id": 45015,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "json, rust",
"url": null
} |
json, rust
error[E0382]: borrow of moved value: `map`
--> src/config.rs:112:13
|
104 | let mut map = Map::new();
| ------- move occurs because `map` has type `rocket::serde::json::serde_json::Map<std::string::String, rocket::serde::json::Value>`, which does not implement the `Copy` trait
...
111 | preferences.insert("session".to_string(), Value::Object(map));
| --- value moved here
112 | &mut map
| ^^^^^^^^ value borrowed here after move
Is there another way to de-duplicate those insert statements?
Update
I found a possible solution to de-duplicate the insert statements:
fn update(&self) -> Result<(), Error> {
let filename =
chromium_default_preferences().ok_or(Error::ChromiumDefaultPreferencesNotFound)?;
let mut value = load(&filename)?;
let preferences = value
.as_object_mut()
.ok_or(Error::NotAJsonObject("preferences"))?;
let default_session = Map::from_iter([
(
"startup_urls".to_string(),
Value::Array(vec![Value::String(self.url.clone())]),
),
("restore_on_startup".to_string(), Value::Number(4.into())),
]);
if let Some(session) = preferences.get_mut("session") {
session
.as_object_mut()
.ok_or(Error::NotAJsonObject("session"))?
.extend(default_session);
} else {
preferences.insert("session".to_string(), Value::Object(default_session));
}
save(&filename, &value)
}
If you have other improvements to suggest, they'll be much appreciated. | {
"domain": "codereview.stackexchange",
"id": 45015,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "json, rust",
"url": null
} |
json, rust
If you have other improvements to suggest, they'll be much appreciated.
Answer: After some more refactoring I handled the Chromium config updating as follows:
use super::error::Error;
use rocket::serde::json::serde_json::Map;
use rocket::serde::json::{serde_json, Value};
use std::convert::Into;
use std::fs::{read_to_string, OpenOptions};
use std::io::Write;
use std::path::Path;
use std::string::ToString;
/// Manage "Preferences" file of Chrome / Chromium webbrowsers
pub struct ChromiumPreferences(Value);
impl ChromiumPreferences {
/// Load preferences from the given file
///
/// # Errors
/// Returns an `[digsigctl::config::error::Error]` if the file could not be read or deserialized
pub fn load(filename: impl AsRef<Path>) -> Result<Self, Error> {
Ok(Self(serde_json::from_str::<Value>(&read_to_string(
filename,
)?)?))
}
/// Saves preferences to the given file
///
/// # Errors
/// Returns an `[digsigctl::config::error::Error]` if the file could not be written or serialized
pub fn save(&self, filename: impl AsRef<Path>) -> Result<(), Error> {
Ok(OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(filename)?
.write_all(serde_json::to_string(&self.0)?.as_bytes())?)
}
/// Updates the _session_ object or initializes it, if it is not present
///
/// # Errors
/// Returns an `[digsigctl::config::error::Error]` if the preferences file is corrupted
pub fn update_or_init_session(&mut self, url: &str) -> Result<(), Error> {
self.update_or_insert(
"session",
&[
(
"startup_urls".to_string(),
Value::Array(vec![Value::String(url.to_string())]),
),
("restore_on_startup".to_string(), Value::Number(4.into())),
],
)
} | {
"domain": "codereview.stackexchange",
"id": 45015,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "json, rust",
"url": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.