file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
ImpConcat-Recall.py | #!/usr/bin/env python
# coding: utf-8
# # fMRI Data Loading and Normalization in Python
# **V.0.2 - Beta, [Contributions](#contributions)**
#
# ### Goal of this script
# 1. load the fMRI data into python
# - 3 recall runs
# 2. create an average brain mask from multiple runs
# - ses01_brain (3 recall runs)
# 3. trim TRs from the beginning AND end of each run (and apply this trimming to the confounds as well)
# - save volume as _trimTRs.nii.gz
# 4. apply a high-pass filter and z-score the data
# - save volume as _trim_norm.nii.gz
# 5. concatenate runs to make one time series
# - recall, in a standardized order
# In[10]:
ipynby=0 #python notebook or not july 2
if ipynby==0:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-subject', type=str)
args = parser.parse_args()
print(args.subject)
subject=args.subject
if ipynby==1:
subject=2
# In[11]:
sub ='sub-0'+str(subject)
subS = str(int(subject))
ses = 'ses-01'
task='recall'
n_trunc=3 # Number of volumes to trim/truncate
print(sub)
#one thing about n_trunc=if set to 100, moves mstr back 2 and resets to zero.
#here you can see a nice hrf peaking around the time of movie onset bc score screen was ~ 4 s before
# In[12]:
#import packages
import warnings
import sys
if not sys.warnoptions:
warnings.simplefilter("ignore")
import time
import os
import shutil
import numpy as np
import pandas as pd
import nibabel as nib
from nilearn.input_data import NiftiMasker, MultiNiftiMasker
from nilearn.masking import intersect_masks
from nilearn.datasets import load_mni152_template
from nilearn import image
from nilearn.plotting import plot_roi
from nilearn.plotting import plot_anat
from nilearn.plotting import plot_epi
from nilearn.image.image import mean_img
from nilearn.image import resample_to_img
from scipy import stats
from sklearn import preprocessing
from sklearn import datasets, linear_model
import matplotlib.pyplot as plt
import scipy.io
from nipype.workflows.fmri.fsl.preprocess import create_susan_smooth
if ipynby==1:
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('autosave', '30')
# In[13]:
# load some helper functions and import data / settings
import ss_utils
from ss_utils import load_ss_epi_data, load_ss_mask, mask_data, load_data
from ss_utils import ss_dir, ss_bids_dir, ss_TR, ss_hrf_lag, run_names, n_runs
print('TASK:', task)
firstrun=1
lastrun=3
n_runs_recall = lastrun-firstrun+1
bold_dir=ss_bids_dir + 'derivatives/fmriprep/%s/%s/func/' % (sub, ses)
anat_dir=ss_bids_dir + '%s/%s/anat/' % (sub, ses)
anat_fmriprep_dir=ss_bids_dir + 'derivatives/fmriprep/%s//anat/' % sub
out_dir= ss_bids_dir + 'derivatives/firstlevel/%s/' % sub
mask_fold = ss_bids_dir + 'derivatives/firstlevel/%s/masks/' % sub
ses1_dir=ss_bids_dir + 'derivatives/fmriprep/%s/ses-01/func/' % sub
#load g_o
ss_tngs=9
analysis_dir=ss_dir+'analysis/'
nonfmri_dir=ss_dir+'data/nonfmri/%s/' %subS
mat_fname=nonfmri_dir+'g_o.mat'
mat_contents = scipy.io.loadmat(mat_fname)
g_o = mat_contents['g_o']
mat_fname=nonfmri_dir+'MSMEhr.mat'
mat_contents = scipy.io.loadmat(mat_fname)
RSTR = mat_contents['RSTR'] #movie start TRs
RETR = mat_contents['RETR'] #movie end TRs
hrshiftval=5
fwhmval=5
print('bids dir = %s' % (ss_bids_dir))
print('anat dir = %s' % (anat_dir))
print('subject dir = %s' % (bold_dir))
print('output dir = %s' % (out_dir))
print('number of recall runs = %d' % (n_runs_recall))
print('number of games = %d' % (ss_tngs))
print('TR = %s seconds' % (ss_TR))
print('trim %d volumes from each run' % (n_trunc))
print('Game order = %s' % (g_o))
print('Recall start times = %s' % (RSTR))
print('Recall end times = %s' % (RETR))
# In[14]:
#Select confounds and trim volumes from confounds file
#Choose the desired confounds from the confounds_regressors.tsv file from fmriprep, trim the columns corresponding to trimmed volumes, and save as a .txt file.
starttime = time.time()
confounds=[]
confounds_all=[]
mc_all=[]
ntr=[]
ntr=np.zeros((n_runs_recall,1))
for r in range(firstrun,lastrun+1):
fname='_ses-01_task-recall_run-0%i_desc-confounds_regressors.tsv' % (r)
confounds = pd.read_csv(ses1_dir + sub + fname, sep='\t', header=(0))
confounds_selected=confounds[['trans_x','trans_y','trans_z','rot_x','rot_y','rot_z','framewise_displacement','a_comp_cor_00','a_comp_cor_01','a_comp_cor_02','a_comp_cor_03','a_comp_cor_04','a_comp_cor_05']][n_trunc:]
confounds_selected=pd.DataFrame(confounds_selected)
confounds_selected.to_csv(out_dir + 'ses-01/' + sub + '_ses-01_task-recall_run-0%i_confounds_selected.txt' % r, index=False, sep='\t', mode='w')
if 0==firstrun:
ntr[r]=confounds_selected.shape[0]
if 1==firstrun:
ntr[r-1]=confounds_selected.shape[0]
if r==firstrun:
confounds_all=confounds_selected
else:
confounds_all=np.vstack([confounds_all,confounds_selected])
print(confounds_selected.shape[0])
print(ntr)
print(sum(ntr[0]))
# In[15]:
mask_imgs=[]
for run in range(firstrun,lastrun+1):
mask_name = ses1_dir + sub + '_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz' % run
mask_imgs.append(mask_name)
template = load_mni152_template()
i=np.eye(3)*3
template =image.resample_img(template, target_affine=i)
# intersect 3 view brain masks
avg_mask=intersect_masks(mask_imgs, threshold=0.5, connected=True)
avg_mask = resample_to_img(avg_mask, template)
thresha=avg_mask.dataobj>-10000
thresh=avg_mask.dataobj>0.5
avg_mask.dataobj[thresha] = 0
avg_mask.dataobj[thresh] = 1
if ipynby==1:
crange=1
plt.figure(figsize=(16,10))
this_img = avg_mask.dataobj[50,:,:];
plt.imshow(this_img,cmap="viridis",vmin=0,vmax=crange,origin='lower',interpolation='none',aspect="auto")
cbar = plt.colorbar()
dimsize=avg_mask.header.get_zooms()
affine_mat = avg_mask.affine
print(affine_mat)
coords = np.where(avg_mask.get_fdata())
# In[16]:
#plot average brain????
t1_file = anat_fmriprep_dir + sub + '_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz'
print(t1_file)
t1_img = image.load_img(t1_file)
t1_img = resample_to_img(t1_img, template)
if ipynby==1:
plot_roi(avg_mask, bg_img=t1_img)
# Save the mask
output_name_mask = mask_fold + '%s_%s_brain.nii.gz' % (sub, ses)
'''hdr = avg_mask.header # get a handle for the .nii file's header
hdr.set_zooms((dimsize[0], dimsize[1], dimsize[2]))
nib.save(avg_mask, output_name_mask)'''
# In[17]:
def mod_smooth(in_file, mask_file, fwhm, smooth_type):
|
# In[18]:
#truncate first n_trunc TRs
#confounds_trunc=confounds_selected[3:end]
epi_trunc=[]
#https://github.com/INCF/BrainImagingPipelines/blob/master/bips/workflows/gablab/wips/scripts/modular_nodes.py
print('Number of runs to concatenate:', n_runs_recall)
for run in range(firstrun,lastrun+1):#lastrun+1
out_smooth=(out_dir + 'ses-01/' + '%s_ses-01_task-recall9_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_smooth%d.nii.gz' % (sub, run, n_trunc,fwhmval))
if os.path.exists(out_smooth):
proceeeeed=[]
epi_data=nib.load(out_smooth)
epi_data=resample_to_img(epi_data, template)# JWA, August 25 change
epi=epi_data.get_fdata()
#truncate
epi_trunc =np.zeros((epi_data.shape[0], epi_data.shape[1], epi_data.shape[2], epi_data.shape[3]-n_trunc))
epi_trunc[:, :, :, :] = epi[:,:,:,n_trunc:]
print(epi_data.shape, ' ', epi_trunc.shape)
dimsize=epi_data.header.get_zooms()
#print(dimsize)
orig_dimsize=dimsize
affine_mat = epi_data.affine # What is the orientation of the data
print(affine_mat)
else:
epi_file=ses1_dir + sub + '_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz' % run
epi_data=nib.load(epi_file)
epi_data=resample_to_img(epi_data, template)# JWA, August 25 change
epi=epi_data.get_fdata()
#truncate
epi_trunc =np.zeros((epi_data.shape[0], epi_data.shape[1], epi_data.shape[2], epi_data.shape[3]-n_trunc))
epi_trunc[:, :, :, :] = epi[:,:,:,n_trunc:]
print(epi_data.shape, ' ', epi_trunc.shape)
dimsize=epi_data.header.get_zooms()
#print(dimsize)
orig_dimsize=dimsize
affine_mat = epi_data.affine # What is the orientation of the data
print(affine_mat)
# Save the volume
output_name = (out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%dTRs.nii.gz' % (sub, run, n_trunc))
bold_nii = nib.Nifti1Image(epi_trunc, affine_mat)
hdr = bold_nii.header # get a handle for the .nii file's header
hdr.set_zooms((dimsize[0], dimsize[1], dimsize[2], dimsize[3]))
nib.save(bold_nii, output_name)
# smooth with susan
smoothed_file = mod_smooth(output_name,output_name_mask,fwhmval, 'susan')
#move file
in_smooth=(out_dir+'susan_smooth/smooth/mapflow/_smooth0/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%dTRs_smooth.nii.gz' % (sub, run, n_trunc))
#out_smooth=(out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_smooth%d.nii.gz' % (sub, run, n_trunc,fwhmval))
os.rename(in_smooth,out_smooth)
# ## Load fMRI data <a id="load_fmri"></a>
# #### Get voxels from an ROI
#
# We will extract BOLD data, only for voxels in a mask, by executing the following sequence of steps:
# 1. load whole brain fMRI data (for a given subject and a given run)
# 2. load the desired mask
# 3. use `NiftiMasker` to sub-select mask voxels from the whole brain data
# - `NiftiMasker` is a function from nilearn. Here's <a href="https://nilearn.github.io/auto_examples/04_manipulating_images/plot_mask_computation.html">an example</a> about how to use it, and here's the official <a href="https://nilearn.github.io/modules/generated/nilearn.input_data.NiftiMasker.html">documentation</a>.
# ## Apply mask to truncated dataset
# In[19]:
epi_mask_data_all=[]
for run in range(firstrun,lastrun+1):# Load the fMRI data
print('now on run:', run)
epi_masker= NiftiMasker(mask_img=avg_mask, high_pass=1/128,
standardize=True, # Are you going to zscore the data across time?
t_r=ss_TR,
memory='nilearn_cache', # Caches the mask in the directory given as a string here so that it is easier to load and retrieve
memory_level=1, # How much memory will you cache?
verbose=0)
epi_file=out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_smooth%d.nii.gz' % (sub, run, n_trunc,fwhmval)
#confound_file= bold_dir + '%s_confounds_selected_r0%i.txt' % (sub, run) #uncommented from Lizzie
confound_file= bold_dir + 'sub-%s_ses-01_task-recall_run-%s_desc-confounds_regressors.tsv' %(sub,run)
if run==firstrun:
if 0==firstrun:
epi_mask_data = epi_masker.fit_transform(epi_file,confounds=confounds_all[0:int(sum(ntr[run])),:]) #commented from Lizzie
if 1==firstrun:
epi_mask_data = epi_masker.fit_transform(epi_file,confounds=confounds_all[0:int(sum(ntr[run-1])),:]) #commented from Lizzie
epi_mask_data_all=epi_mask_data
nTR_all=epi_mask_data.shape[0]
else:
if 0==firstrun:
epi_mask_data = epi_masker.fit_transform(epi_file,confounds=confounds_all[int(sum(ntr[0:run])):int(sum(ntr[0:run+1])),:])
if 1==firstrun:
epi_mask_data = epi_masker.fit_transform(epi_file,confounds=confounds_all[int(sum(ntr[0:run-1])):int(sum(ntr[0:run])),:])
epi_mask_data_all=np.vstack([epi_mask_data_all,epi_mask_data])
nTR_all=np.vstack([nTR_all,epi_mask_data.shape[0]])
print('Saving trimmed and normalized volume for run',run)
affine_mat = avg_mask.affine #should be the same as the epi data
avg_mask.shape
coords = np.where(avg_mask.get_fdata())
bold_vol=[]
bold_vol=np.zeros((avg_mask.shape[0], avg_mask.shape[1], avg_mask.shape[2], epi_mask_data.shape[0]))
bold_vol[coords[0], coords[1], coords[2], :] = epi_mask_data.T
print('epi_mask_data shape:', bold_vol.shape)
output_name = (out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_norm.nii.gz' % (sub, run, n_trunc))
bold_nii = nib.Nifti1Image(bold_vol, affine_mat)
hdr = bold_nii.header # get a handle for the .nii file's header
hdr.set_zooms((orig_dimsize[0], orig_dimsize[1], orig_dimsize[2], orig_dimsize[3]))
nib.save(bold_nii, output_name)
print(epi_mask_data_all.shape)
# In[20]:
# concatenate volumes
segs = {}
runs = {}
for game in range(ss_tngs):
#identify game in the order it was shown
g_num=g_o[0,game]
seg=np.arange(RSTR[game,0]-1,RETR[game,0]-2,1) #Note the difference of -2
seg=seg+hrshiftval #shift for hrf
#determine which run it was in
run=np.ceil((game+1)/3)
if firstrun==1: #most subjects
if run==2:
seg=seg+nTR_all[0]
if run==3:
seg=seg+nTR_all[0]+nTR_all[1]
print(seg)
print(len(seg))
#if seg: #most subjects
segs[g_num]=seg
runs[g_num]=run
# In[21]:
# re-order concatenated volumes
for g_num in range(1,1+ss_tngs):
runv=np.repeat(runs[g_num],len(segs[g_num]))
if g_num==1:
epi_mask_event=epi_mask_data_all[segs[g_num].astype(int),:]
confounds_event=confounds_all[segs[g_num].astype(int),:]
run_event=runv
print(epi_mask_event.shape)
print(confounds_event.shape)
print(run_event.shape)
else:
epi_mask_event=np.concatenate([epi_mask_event,epi_mask_data_all[segs[g_num].astype(int),:]],axis=0)
confounds_event=np.concatenate([confounds_event,confounds_all[segs[g_num].astype(int),:]],axis=0)
run_event=np.concatenate([run_event,runv],axis=0)
print(epi_mask_event.shape)
print(confounds_event.shape)
print(run_event.shape)
# In[22]:
print(run_event)
# In[23]:
#estimate autocorrelation
'''
def estimated_autocorrelation(x):
"""
http://stackoverflow.com/q/14297012/190597
http://en.wikipedia.org/wiki/Autocorrelation#Estimation
"""
n = len(x)
variance = x.var()
x = x-x.mean()
r = np.correlate(x, x, mode = 'full')[-n:]
assert np.allclose(r, np.array([(x[:n-k]*x[-(n-k):]).sum() for k in range(n)]))
result = r/(variance*(np.arange(n, 0, -1)))
return result
voxel_id=17891
a1=estimated_autocorrelation(epi_mask_hrf[:, voxel_id])
print(a1.shape)
pts=20
if ipynby==1:
f, ax = plt.subplots(1,1, figsize=(14,5))
ax.plot(a1[0:pts])'''
# In[24]:
#z-score
epi_mask_event2=epi_mask_event
print(epi_mask_event2.shape)
# In[25]:
#save confounds_hrf2 as .mat
mat_fname=nonfmri_dir+'confounds_recall.mat'
scipy.io.savemat(mat_fname,{'nTR_all': nTR_all,'confounds_event': confounds_event,'run_event': run_event})
# In[26]:
bold_vol_event=[]
bold_vol_event=np.zeros((avg_mask.shape[0], avg_mask.shape[1], avg_mask.shape[2], epi_mask_event2.shape[0]))
bold_vol_event[coords[0], coords[1], coords[2], :] = epi_mask_event2.T
print(bold_vol_event.shape)
print(avg_mask.shape)
# In[27]:
# Save the concatenated volumes, event file
output_name = out_dir + '%s_task-recall_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_norm_event.nii.gz' % (sub, n_trunc)
print(output_name)
bold_nii = nib.Nifti1Image(bold_vol_event, affine_mat)
hdr = bold_nii.header # get a handle for the .nii file's header
print(orig_dimsize)
hdr.set_zooms((orig_dimsize[0], orig_dimsize[1], orig_dimsize[2], orig_dimsize[3]))
nib.save(bold_nii, output_name)
print('Volume saved')
endtime = time.time()
print(endtime - starttime)
# ## Plot voxels across runs in game order
# In[28]:
if ipynby==1:
n_vox=2000
plt.figure(figsize=(10,16))
this_img = epi_mask_event2[:,:n_vox];
this_img=this_img.T;#must transform because of how Python reshapes
tn='Event-%s' %sub;
plt.imshow(this_img,cmap='gray',origin='lower',interpolation='none',aspect="auto")
plt.title(tn)
# In[29]:
if ipynby==1:
for run in range(1,n_runs_recall+1):
func_name=out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_norm.nii.gz' % (sub, run, n_trunc)
#print(image.load_img(func_name).shape)
vol_num=round(image.load_img(func_name).shape[3]/2)
middle_vol = image.index_img(func_name,vol_num)
#mean_func = mean_img(func_name)
print('Plotting middle volumed for run',run)
plot_epi(middle_vol)
# ### 3.1. Plot a voxel time-series <a id="plot_voxel"></a>
# In[30]:
# Plot value of voxel_id through time
if ipynby==1:
voxel_id = 2000
f, ax = plt.subplots(1,1, figsize=(14,5))
ax.plot(epi_mask_event2[0:200, voxel_id])
ax.set_title('Voxel time series, voxel id = %d' % voxel_id)
ax.set_xlabel('TR (ordered by game #)')
ax.set_ylabel('Voxel Intensity-normed')
# ## Check mean and standard deviation of normalized data
# In[31]:
if ipynby==1:
x_mean = np.mean(epi_mask_event2, axis=0)
x_std = np.std(epi_mask_event2, axis=0, dtype=np.float64)
print('the mean of 1st few time points:\n', x_mean[0:50])
print('')
print('the std of 1st few time points:\n', x_std[0:50])
print('')
print(np.shape(x_mean))
print(np.shape(x_std))
print('')
print(np.amin(x_mean), np.amax(x_mean))
print(np.amin(x_std), np.amax(x_std))
# print(x_std)
f, axes = plt.subplots(1, 2, figsize = (14,4))
n_bins = 20
axes[0].hist(x_mean, bins = n_bins)
axes[0].set_title('distribution of means')
axes[0].set_xlabel('mean values')
axes[0].set_ylabel('counts')
axes[0].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[1].hist(x_std, bins = n_bins)
axes[1].set_title('distribution of stds')
axes[1].set_xlabel('std values')
axes[1].set_ylabel('counts')
axes[1].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[1].get_xaxis().get_major_formatter().set_useOffset(False)
plt.show()
# In[ ]:
| import nipype.interfaces.fsl as fsl
import nipype.interfaces.freesurfer as fs
import os
if smooth_type == 'susan':
if fwhm == 0:
return in_file
smooth = create_susan_smooth()
smooth.base_dir = out_dir#os.getcwd()
smooth.inputs.inputnode.fwhm = fwhm
smooth.inputs.inputnode.mask_file = mask_file
smooth.inputs.inputnode.in_files = in_file
#smooth.outputs.outputnode.smoothed_files='/jukebox/norman/jantony/surprisesuspense/data/bids/Norman/Antony/ss/derivatives/firstlevel/sub-02/ses-01/sub-02_ses-01_task-recall_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold_trim3TRs_smooth.nii.gz'
res = smooth.run()
smoothed_file=[] #smoothed_file = res.outputs.outputnode.smoothed_files
return smoothed_file | identifier_body |
ImpConcat-Recall.py | #!/usr/bin/env python
# coding: utf-8
# # fMRI Data Loading and Normalization in Python
# **V.0.2 - Beta, [Contributions](#contributions)**
#
# ### Goal of this script
# 1. load the fMRI data into python
# - 3 recall runs
# 2. create an average brain mask from multiple runs
# - ses01_brain (3 recall runs)
# 3. trim TRs from the beginning AND end of each run (and apply this trimming to the confounds as well)
# - save volume as _trimTRs.nii.gz
# 4. apply a high-pass filter and z-score the data
# - save volume as _trim_norm.nii.gz
# 5. concatenate runs to make one time series
# - recall, in a standardized order
# In[10]:
ipynby=0 #python notebook or not july 2
if ipynby==0:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-subject', type=str)
args = parser.parse_args()
print(args.subject)
subject=args.subject
if ipynby==1:
subject=2
# In[11]:
sub ='sub-0'+str(subject)
subS = str(int(subject))
ses = 'ses-01'
task='recall'
n_trunc=3 # Number of volumes to trim/truncate
print(sub)
#one thing about n_trunc=if set to 100, moves mstr back 2 and resets to zero.
#here you can see a nice hrf peaking around the time of movie onset bc score screen was ~ 4 s before
# In[12]:
#import packages
import warnings
import sys
if not sys.warnoptions:
warnings.simplefilter("ignore")
import time
import os
import shutil
import numpy as np
import pandas as pd
import nibabel as nib
from nilearn.input_data import NiftiMasker, MultiNiftiMasker
from nilearn.masking import intersect_masks
from nilearn.datasets import load_mni152_template
from nilearn import image
from nilearn.plotting import plot_roi
from nilearn.plotting import plot_anat
from nilearn.plotting import plot_epi
from nilearn.image.image import mean_img
from nilearn.image import resample_to_img
from scipy import stats
from sklearn import preprocessing
from sklearn import datasets, linear_model
import matplotlib.pyplot as plt
import scipy.io
from nipype.workflows.fmri.fsl.preprocess import create_susan_smooth
if ipynby==1:
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('autosave', '30')
# In[13]:
# load some helper functions and import data / settings
import ss_utils
from ss_utils import load_ss_epi_data, load_ss_mask, mask_data, load_data
from ss_utils import ss_dir, ss_bids_dir, ss_TR, ss_hrf_lag, run_names, n_runs
print('TASK:', task)
firstrun=1
lastrun=3
n_runs_recall = lastrun-firstrun+1
bold_dir=ss_bids_dir + 'derivatives/fmriprep/%s/%s/func/' % (sub, ses)
anat_dir=ss_bids_dir + '%s/%s/anat/' % (sub, ses)
anat_fmriprep_dir=ss_bids_dir + 'derivatives/fmriprep/%s//anat/' % sub
out_dir= ss_bids_dir + 'derivatives/firstlevel/%s/' % sub
mask_fold = ss_bids_dir + 'derivatives/firstlevel/%s/masks/' % sub
ses1_dir=ss_bids_dir + 'derivatives/fmriprep/%s/ses-01/func/' % sub
#load g_o
ss_tngs=9
analysis_dir=ss_dir+'analysis/'
nonfmri_dir=ss_dir+'data/nonfmri/%s/' %subS
mat_fname=nonfmri_dir+'g_o.mat'
mat_contents = scipy.io.loadmat(mat_fname)
g_o = mat_contents['g_o']
mat_fname=nonfmri_dir+'MSMEhr.mat'
mat_contents = scipy.io.loadmat(mat_fname)
RSTR = mat_contents['RSTR'] #movie start TRs
RETR = mat_contents['RETR'] #movie end TRs
hrshiftval=5
fwhmval=5
print('bids dir = %s' % (ss_bids_dir))
print('anat dir = %s' % (anat_dir))
print('subject dir = %s' % (bold_dir))
print('output dir = %s' % (out_dir))
print('number of recall runs = %d' % (n_runs_recall))
print('number of games = %d' % (ss_tngs))
print('TR = %s seconds' % (ss_TR))
print('trim %d volumes from each run' % (n_trunc))
print('Game order = %s' % (g_o))
print('Recall start times = %s' % (RSTR))
print('Recall end times = %s' % (RETR))
# In[14]:
#Select confounds and trim volumes from confounds file
#Choose the desired confounds from the confounds_regressors.tsv file from fmriprep, trim the columns corresponding to trimmed volumes, and save as a .txt file.
starttime = time.time()
confounds=[]
confounds_all=[]
mc_all=[]
ntr=[]
ntr=np.zeros((n_runs_recall,1))
for r in range(firstrun,lastrun+1):
fname='_ses-01_task-recall_run-0%i_desc-confounds_regressors.tsv' % (r)
confounds = pd.read_csv(ses1_dir + sub + fname, sep='\t', header=(0))
confounds_selected=confounds[['trans_x','trans_y','trans_z','rot_x','rot_y','rot_z','framewise_displacement','a_comp_cor_00','a_comp_cor_01','a_comp_cor_02','a_comp_cor_03','a_comp_cor_04','a_comp_cor_05']][n_trunc:]
confounds_selected=pd.DataFrame(confounds_selected)
confounds_selected.to_csv(out_dir + 'ses-01/' + sub + '_ses-01_task-recall_run-0%i_confounds_selected.txt' % r, index=False, sep='\t', mode='w')
if 0==firstrun:
ntr[r]=confounds_selected.shape[0]
if 1==firstrun:
ntr[r-1]=confounds_selected.shape[0]
if r==firstrun:
confounds_all=confounds_selected
else:
confounds_all=np.vstack([confounds_all,confounds_selected])
print(confounds_selected.shape[0])
print(ntr)
print(sum(ntr[0]))
# In[15]:
mask_imgs=[]
for run in range(firstrun,lastrun+1):
mask_name = ses1_dir + sub + '_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz' % run
mask_imgs.append(mask_name)
template = load_mni152_template()
i=np.eye(3)*3
template =image.resample_img(template, target_affine=i)
# intersect 3 view brain masks
avg_mask=intersect_masks(mask_imgs, threshold=0.5, connected=True)
avg_mask = resample_to_img(avg_mask, template)
thresha=avg_mask.dataobj>-10000
thresh=avg_mask.dataobj>0.5
avg_mask.dataobj[thresha] = 0
avg_mask.dataobj[thresh] = 1
if ipynby==1:
crange=1
plt.figure(figsize=(16,10))
this_img = avg_mask.dataobj[50,:,:];
plt.imshow(this_img,cmap="viridis",vmin=0,vmax=crange,origin='lower',interpolation='none',aspect="auto")
cbar = plt.colorbar()
dimsize=avg_mask.header.get_zooms()
affine_mat = avg_mask.affine
print(affine_mat)
coords = np.where(avg_mask.get_fdata())
# In[16]:
#plot average brain????
t1_file = anat_fmriprep_dir + sub + '_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz'
print(t1_file)
t1_img = image.load_img(t1_file)
t1_img = resample_to_img(t1_img, template)
if ipynby==1:
plot_roi(avg_mask, bg_img=t1_img)
# Save the mask
output_name_mask = mask_fold + '%s_%s_brain.nii.gz' % (sub, ses)
'''hdr = avg_mask.header # get a handle for the .nii file's header
hdr.set_zooms((dimsize[0], dimsize[1], dimsize[2]))
nib.save(avg_mask, output_name_mask)'''
# In[17]:
def mod_smooth(in_file, mask_file, fwhm, smooth_type):
import nipype.interfaces.fsl as fsl
import nipype.interfaces.freesurfer as fs
import os
if smooth_type == 'susan':
if fwhm == 0:
return in_file
smooth = create_susan_smooth()
smooth.base_dir = out_dir#os.getcwd()
smooth.inputs.inputnode.fwhm = fwhm
smooth.inputs.inputnode.mask_file = mask_file
smooth.inputs.inputnode.in_files = in_file
#smooth.outputs.outputnode.smoothed_files='/jukebox/norman/jantony/surprisesuspense/data/bids/Norman/Antony/ss/derivatives/firstlevel/sub-02/ses-01/sub-02_ses-01_task-recall_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold_trim3TRs_smooth.nii.gz'
res = smooth.run()
smoothed_file=[] #smoothed_file = res.outputs.outputnode.smoothed_files
return smoothed_file
# In[18]:
#truncate first n_trunc TRs
#confounds_trunc=confounds_selected[3:end]
epi_trunc=[]
#https://github.com/INCF/BrainImagingPipelines/blob/master/bips/workflows/gablab/wips/scripts/modular_nodes.py
print('Number of runs to concatenate:', n_runs_recall)
for run in range(firstrun,lastrun+1):#lastrun+1
out_smooth=(out_dir + 'ses-01/' + '%s_ses-01_task-recall9_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_smooth%d.nii.gz' % (sub, run, n_trunc,fwhmval))
if os.path.exists(out_smooth):
proceeeeed=[]
epi_data=nib.load(out_smooth)
epi_data=resample_to_img(epi_data, template)# JWA, August 25 change
epi=epi_data.get_fdata()
#truncate
epi_trunc =np.zeros((epi_data.shape[0], epi_data.shape[1], epi_data.shape[2], epi_data.shape[3]-n_trunc))
epi_trunc[:, :, :, :] = epi[:,:,:,n_trunc:]
print(epi_data.shape, ' ', epi_trunc.shape)
dimsize=epi_data.header.get_zooms()
#print(dimsize)
orig_dimsize=dimsize
affine_mat = epi_data.affine # What is the orientation of the data
print(affine_mat)
else:
epi_file=ses1_dir + sub + '_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz' % run
epi_data=nib.load(epi_file)
epi_data=resample_to_img(epi_data, template)# JWA, August 25 change
epi=epi_data.get_fdata()
#truncate
epi_trunc =np.zeros((epi_data.shape[0], epi_data.shape[1], epi_data.shape[2], epi_data.shape[3]-n_trunc))
epi_trunc[:, :, :, :] = epi[:,:,:,n_trunc:]
print(epi_data.shape, ' ', epi_trunc.shape)
dimsize=epi_data.header.get_zooms()
#print(dimsize)
orig_dimsize=dimsize
affine_mat = epi_data.affine # What is the orientation of the data
print(affine_mat)
# Save the volume
output_name = (out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%dTRs.nii.gz' % (sub, run, n_trunc))
bold_nii = nib.Nifti1Image(epi_trunc, affine_mat)
hdr = bold_nii.header # get a handle for the .nii file's header
hdr.set_zooms((dimsize[0], dimsize[1], dimsize[2], dimsize[3]))
nib.save(bold_nii, output_name)
# smooth with susan
smoothed_file = mod_smooth(output_name,output_name_mask,fwhmval, 'susan')
#move file
in_smooth=(out_dir+'susan_smooth/smooth/mapflow/_smooth0/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%dTRs_smooth.nii.gz' % (sub, run, n_trunc))
#out_smooth=(out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_smooth%d.nii.gz' % (sub, run, n_trunc,fwhmval))
os.rename(in_smooth,out_smooth)
# ## Load fMRI data <a id="load_fmri"></a>
# #### Get voxels from an ROI
#
# We will extract BOLD data, only for voxels in a mask, by executing the following sequence of steps:
# 1. load whole brain fMRI data (for a given subject and a given run)
# 2. load the desired mask
# 3. use `NiftiMasker` to sub-select mask voxels from the whole brain data
# - `NiftiMasker` is a function from nilearn. Here's <a href="https://nilearn.github.io/auto_examples/04_manipulating_images/plot_mask_computation.html">an example</a> about how to use it, and here's the official <a href="https://nilearn.github.io/modules/generated/nilearn.input_data.NiftiMasker.html">documentation</a>.
# ## Apply mask to truncated dataset
# In[19]:
epi_mask_data_all=[]
for run in range(firstrun,lastrun+1):# Load the fMRI data
print('now on run:', run)
epi_masker= NiftiMasker(mask_img=avg_mask, high_pass=1/128,
standardize=True, # Are you going to zscore the data across time?
t_r=ss_TR,
memory='nilearn_cache', # Caches the mask in the directory given as a string here so that it is easier to load and retrieve
memory_level=1, # How much memory will you cache?
verbose=0)
epi_file=out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_smooth%d.nii.gz' % (sub, run, n_trunc,fwhmval)
#confound_file= bold_dir + '%s_confounds_selected_r0%i.txt' % (sub, run) #uncommented from Lizzie
confound_file= bold_dir + 'sub-%s_ses-01_task-recall_run-%s_desc-confounds_regressors.tsv' %(sub,run)
if run==firstrun:
if 0==firstrun:
epi_mask_data = epi_masker.fit_transform(epi_file,confounds=confounds_all[0:int(sum(ntr[run])),:]) #commented from Lizzie
if 1==firstrun:
epi_mask_data = epi_masker.fit_transform(epi_file,confounds=confounds_all[0:int(sum(ntr[run-1])),:]) #commented from Lizzie
epi_mask_data_all=epi_mask_data
nTR_all=epi_mask_data.shape[0]
else:
if 0==firstrun:
epi_mask_data = epi_masker.fit_transform(epi_file,confounds=confounds_all[int(sum(ntr[0:run])):int(sum(ntr[0:run+1])),:])
if 1==firstrun:
epi_mask_data = epi_masker.fit_transform(epi_file,confounds=confounds_all[int(sum(ntr[0:run-1])):int(sum(ntr[0:run])),:])
epi_mask_data_all=np.vstack([epi_mask_data_all,epi_mask_data])
nTR_all=np.vstack([nTR_all,epi_mask_data.shape[0]])
print('Saving trimmed and normalized volume for run',run)
affine_mat = avg_mask.affine #should be the same as the epi data | coords = np.where(avg_mask.get_fdata())
bold_vol=[]
bold_vol=np.zeros((avg_mask.shape[0], avg_mask.shape[1], avg_mask.shape[2], epi_mask_data.shape[0]))
bold_vol[coords[0], coords[1], coords[2], :] = epi_mask_data.T
print('epi_mask_data shape:', bold_vol.shape)
output_name = (out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_norm.nii.gz' % (sub, run, n_trunc))
bold_nii = nib.Nifti1Image(bold_vol, affine_mat)
hdr = bold_nii.header # get a handle for the .nii file's header
hdr.set_zooms((orig_dimsize[0], orig_dimsize[1], orig_dimsize[2], orig_dimsize[3]))
nib.save(bold_nii, output_name)
print(epi_mask_data_all.shape)
# In[20]:
# concatenate volumes
segs = {}
runs = {}
for game in range(ss_tngs):
#identify game in the order it was shown
g_num=g_o[0,game]
seg=np.arange(RSTR[game,0]-1,RETR[game,0]-2,1) #Note the difference of -2
seg=seg+hrshiftval #shift for hrf
#determine which run it was in
run=np.ceil((game+1)/3)
if firstrun==1: #most subjects
if run==2:
seg=seg+nTR_all[0]
if run==3:
seg=seg+nTR_all[0]+nTR_all[1]
print(seg)
print(len(seg))
#if seg: #most subjects
segs[g_num]=seg
runs[g_num]=run
# In[21]:
# re-order concatenated volumes
for g_num in range(1,1+ss_tngs):
runv=np.repeat(runs[g_num],len(segs[g_num]))
if g_num==1:
epi_mask_event=epi_mask_data_all[segs[g_num].astype(int),:]
confounds_event=confounds_all[segs[g_num].astype(int),:]
run_event=runv
print(epi_mask_event.shape)
print(confounds_event.shape)
print(run_event.shape)
else:
epi_mask_event=np.concatenate([epi_mask_event,epi_mask_data_all[segs[g_num].astype(int),:]],axis=0)
confounds_event=np.concatenate([confounds_event,confounds_all[segs[g_num].astype(int),:]],axis=0)
run_event=np.concatenate([run_event,runv],axis=0)
print(epi_mask_event.shape)
print(confounds_event.shape)
print(run_event.shape)
# In[22]:
print(run_event)
# In[23]:
#estimate autocorrelation
'''
def estimated_autocorrelation(x):
"""
http://stackoverflow.com/q/14297012/190597
http://en.wikipedia.org/wiki/Autocorrelation#Estimation
"""
n = len(x)
variance = x.var()
x = x-x.mean()
r = np.correlate(x, x, mode = 'full')[-n:]
assert np.allclose(r, np.array([(x[:n-k]*x[-(n-k):]).sum() for k in range(n)]))
result = r/(variance*(np.arange(n, 0, -1)))
return result
voxel_id=17891
a1=estimated_autocorrelation(epi_mask_hrf[:, voxel_id])
print(a1.shape)
pts=20
if ipynby==1:
f, ax = plt.subplots(1,1, figsize=(14,5))
ax.plot(a1[0:pts])'''
# In[24]:
#z-score
epi_mask_event2=epi_mask_event
print(epi_mask_event2.shape)
# In[25]:
#save confounds_hrf2 as .mat
mat_fname=nonfmri_dir+'confounds_recall.mat'
scipy.io.savemat(mat_fname,{'nTR_all': nTR_all,'confounds_event': confounds_event,'run_event': run_event})
# In[26]:
bold_vol_event=[]
bold_vol_event=np.zeros((avg_mask.shape[0], avg_mask.shape[1], avg_mask.shape[2], epi_mask_event2.shape[0]))
bold_vol_event[coords[0], coords[1], coords[2], :] = epi_mask_event2.T
print(bold_vol_event.shape)
print(avg_mask.shape)
# In[27]:
# Save the concatenated volumes, event file
output_name = out_dir + '%s_task-recall_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_norm_event.nii.gz' % (sub, n_trunc)
print(output_name)
bold_nii = nib.Nifti1Image(bold_vol_event, affine_mat)
hdr = bold_nii.header # get a handle for the .nii file's header
print(orig_dimsize)
hdr.set_zooms((orig_dimsize[0], orig_dimsize[1], orig_dimsize[2], orig_dimsize[3]))
nib.save(bold_nii, output_name)
print('Volume saved')
endtime = time.time()
print(endtime - starttime)
# ## Plot voxels across runs in game order
# In[28]:
if ipynby==1:
n_vox=2000
plt.figure(figsize=(10,16))
this_img = epi_mask_event2[:,:n_vox];
this_img=this_img.T;#must transform because of how Python reshapes
tn='Event-%s' %sub;
plt.imshow(this_img,cmap='gray',origin='lower',interpolation='none',aspect="auto")
plt.title(tn)
# In[29]:
if ipynby==1:
for run in range(1,n_runs_recall+1):
func_name=out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_norm.nii.gz' % (sub, run, n_trunc)
#print(image.load_img(func_name).shape)
vol_num=round(image.load_img(func_name).shape[3]/2)
middle_vol = image.index_img(func_name,vol_num)
#mean_func = mean_img(func_name)
print('Plotting middle volumed for run',run)
plot_epi(middle_vol)
# ### 3.1. Plot a voxel time-series <a id="plot_voxel"></a>
# In[30]:
# Plot value of voxel_id through time
if ipynby==1:
voxel_id = 2000
f, ax = plt.subplots(1,1, figsize=(14,5))
ax.plot(epi_mask_event2[0:200, voxel_id])
ax.set_title('Voxel time series, voxel id = %d' % voxel_id)
ax.set_xlabel('TR (ordered by game #)')
ax.set_ylabel('Voxel Intensity-normed')
# ## Check mean and standard deviation of normalized data
# In[31]:
if ipynby==1:
x_mean = np.mean(epi_mask_event2, axis=0)
x_std = np.std(epi_mask_event2, axis=0, dtype=np.float64)
print('the mean of 1st few time points:\n', x_mean[0:50])
print('')
print('the std of 1st few time points:\n', x_std[0:50])
print('')
print(np.shape(x_mean))
print(np.shape(x_std))
print('')
print(np.amin(x_mean), np.amax(x_mean))
print(np.amin(x_std), np.amax(x_std))
# print(x_std)
f, axes = plt.subplots(1, 2, figsize = (14,4))
n_bins = 20
axes[0].hist(x_mean, bins = n_bins)
axes[0].set_title('distribution of means')
axes[0].set_xlabel('mean values')
axes[0].set_ylabel('counts')
axes[0].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[1].hist(x_std, bins = n_bins)
axes[1].set_title('distribution of stds')
axes[1].set_xlabel('std values')
axes[1].set_ylabel('counts')
axes[1].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[1].get_xaxis().get_major_formatter().set_useOffset(False)
plt.show()
# In[ ]: | avg_mask.shape | random_line_split |
ImpConcat-Recall.py | #!/usr/bin/env python
# coding: utf-8
# # fMRI Data Loading and Normalization in Python
# **V.0.2 - Beta, [Contributions](#contributions)**
#
# ### Goal of this script
# 1. load the fMRI data into python
# - 3 recall runs
# 2. create an average brain mask from multiple runs
# - ses01_brain (3 recall runs)
# 3. trim TRs from the beginning AND end of each run (and apply this trimming to the confounds as well)
# - save volume as _trimTRs.nii.gz
# 4. apply a high-pass filter and z-score the data
# - save volume as _trim_norm.nii.gz
# 5. concatenate runs to make one time series
# - recall, in a standardized order
# In[10]:
ipynby=0 #python notebook or not july 2
if ipynby==0:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-subject', type=str)
args = parser.parse_args()
print(args.subject)
subject=args.subject
if ipynby==1:
subject=2
# In[11]:
sub ='sub-0'+str(subject)
subS = str(int(subject))
ses = 'ses-01'
task='recall'
n_trunc=3 # Number of volumes to trim/truncate
print(sub)
#one thing about n_trunc=if set to 100, moves mstr back 2 and resets to zero.
#here you can see a nice hrf peaking around the time of movie onset bc score screen was ~ 4 s before
# In[12]:
#import packages
import warnings
import sys
if not sys.warnoptions:
warnings.simplefilter("ignore")
import time
import os
import shutil
import numpy as np
import pandas as pd
import nibabel as nib
from nilearn.input_data import NiftiMasker, MultiNiftiMasker
from nilearn.masking import intersect_masks
from nilearn.datasets import load_mni152_template
from nilearn import image
from nilearn.plotting import plot_roi
from nilearn.plotting import plot_anat
from nilearn.plotting import plot_epi
from nilearn.image.image import mean_img
from nilearn.image import resample_to_img
from scipy import stats
from sklearn import preprocessing
from sklearn import datasets, linear_model
import matplotlib.pyplot as plt
import scipy.io
from nipype.workflows.fmri.fsl.preprocess import create_susan_smooth
if ipynby==1:
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('autosave', '30')
# In[13]:
# load some helper functions and import data / settings
import ss_utils
from ss_utils import load_ss_epi_data, load_ss_mask, mask_data, load_data
from ss_utils import ss_dir, ss_bids_dir, ss_TR, ss_hrf_lag, run_names, n_runs
print('TASK:', task)
firstrun=1
lastrun=3
n_runs_recall = lastrun-firstrun+1
bold_dir=ss_bids_dir + 'derivatives/fmriprep/%s/%s/func/' % (sub, ses)
anat_dir=ss_bids_dir + '%s/%s/anat/' % (sub, ses)
anat_fmriprep_dir=ss_bids_dir + 'derivatives/fmriprep/%s//anat/' % sub
out_dir= ss_bids_dir + 'derivatives/firstlevel/%s/' % sub
mask_fold = ss_bids_dir + 'derivatives/firstlevel/%s/masks/' % sub
ses1_dir=ss_bids_dir + 'derivatives/fmriprep/%s/ses-01/func/' % sub
#load g_o
ss_tngs=9
analysis_dir=ss_dir+'analysis/'
nonfmri_dir=ss_dir+'data/nonfmri/%s/' %subS
mat_fname=nonfmri_dir+'g_o.mat'
mat_contents = scipy.io.loadmat(mat_fname)
g_o = mat_contents['g_o']
mat_fname=nonfmri_dir+'MSMEhr.mat'
mat_contents = scipy.io.loadmat(mat_fname)
RSTR = mat_contents['RSTR'] #movie start TRs
RETR = mat_contents['RETR'] #movie end TRs
hrshiftval=5
fwhmval=5
print('bids dir = %s' % (ss_bids_dir))
print('anat dir = %s' % (anat_dir))
print('subject dir = %s' % (bold_dir))
print('output dir = %s' % (out_dir))
print('number of recall runs = %d' % (n_runs_recall))
print('number of games = %d' % (ss_tngs))
print('TR = %s seconds' % (ss_TR))
print('trim %d volumes from each run' % (n_trunc))
print('Game order = %s' % (g_o))
print('Recall start times = %s' % (RSTR))
print('Recall end times = %s' % (RETR))
# In[14]:
#Select confounds and trim volumes from confounds file
#Choose the desired confounds from the confounds_regressors.tsv file from fmriprep, trim the columns corresponding to trimmed volumes, and save as a .txt file.
starttime = time.time()
confounds=[]
confounds_all=[]
mc_all=[]
ntr=[]
ntr=np.zeros((n_runs_recall,1))
for r in range(firstrun,lastrun+1):
fname='_ses-01_task-recall_run-0%i_desc-confounds_regressors.tsv' % (r)
confounds = pd.read_csv(ses1_dir + sub + fname, sep='\t', header=(0))
confounds_selected=confounds[['trans_x','trans_y','trans_z','rot_x','rot_y','rot_z','framewise_displacement','a_comp_cor_00','a_comp_cor_01','a_comp_cor_02','a_comp_cor_03','a_comp_cor_04','a_comp_cor_05']][n_trunc:]
confounds_selected=pd.DataFrame(confounds_selected)
confounds_selected.to_csv(out_dir + 'ses-01/' + sub + '_ses-01_task-recall_run-0%i_confounds_selected.txt' % r, index=False, sep='\t', mode='w')
if 0==firstrun:
ntr[r]=confounds_selected.shape[0]
if 1==firstrun:
ntr[r-1]=confounds_selected.shape[0]
if r==firstrun:
confounds_all=confounds_selected
else:
confounds_all=np.vstack([confounds_all,confounds_selected])
print(confounds_selected.shape[0])
print(ntr)
print(sum(ntr[0]))
# In[15]:
mask_imgs=[]
for run in range(firstrun,lastrun+1):
mask_name = ses1_dir + sub + '_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz' % run
mask_imgs.append(mask_name)
template = load_mni152_template()
i=np.eye(3)*3
template =image.resample_img(template, target_affine=i)
# intersect 3 view brain masks
avg_mask=intersect_masks(mask_imgs, threshold=0.5, connected=True)
avg_mask = resample_to_img(avg_mask, template)
thresha=avg_mask.dataobj>-10000
thresh=avg_mask.dataobj>0.5
avg_mask.dataobj[thresha] = 0
avg_mask.dataobj[thresh] = 1
if ipynby==1:
crange=1
plt.figure(figsize=(16,10))
this_img = avg_mask.dataobj[50,:,:];
plt.imshow(this_img,cmap="viridis",vmin=0,vmax=crange,origin='lower',interpolation='none',aspect="auto")
cbar = plt.colorbar()
dimsize=avg_mask.header.get_zooms()
affine_mat = avg_mask.affine
print(affine_mat)
coords = np.where(avg_mask.get_fdata())
# In[16]:
#plot average brain????
t1_file = anat_fmriprep_dir + sub + '_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz'
print(t1_file)
t1_img = image.load_img(t1_file)
t1_img = resample_to_img(t1_img, template)
if ipynby==1:
plot_roi(avg_mask, bg_img=t1_img)
# Save the mask
output_name_mask = mask_fold + '%s_%s_brain.nii.gz' % (sub, ses)
'''hdr = avg_mask.header # get a handle for the .nii file's header
hdr.set_zooms((dimsize[0], dimsize[1], dimsize[2]))
nib.save(avg_mask, output_name_mask)'''
# In[17]:
def | (in_file, mask_file, fwhm, smooth_type):
import nipype.interfaces.fsl as fsl
import nipype.interfaces.freesurfer as fs
import os
if smooth_type == 'susan':
if fwhm == 0:
return in_file
smooth = create_susan_smooth()
smooth.base_dir = out_dir#os.getcwd()
smooth.inputs.inputnode.fwhm = fwhm
smooth.inputs.inputnode.mask_file = mask_file
smooth.inputs.inputnode.in_files = in_file
#smooth.outputs.outputnode.smoothed_files='/jukebox/norman/jantony/surprisesuspense/data/bids/Norman/Antony/ss/derivatives/firstlevel/sub-02/ses-01/sub-02_ses-01_task-recall_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold_trim3TRs_smooth.nii.gz'
res = smooth.run()
smoothed_file=[] #smoothed_file = res.outputs.outputnode.smoothed_files
return smoothed_file
# In[18]:
#truncate first n_trunc TRs
#confounds_trunc=confounds_selected[3:end]
epi_trunc=[]
#https://github.com/INCF/BrainImagingPipelines/blob/master/bips/workflows/gablab/wips/scripts/modular_nodes.py
print('Number of runs to concatenate:', n_runs_recall)
for run in range(firstrun,lastrun+1):#lastrun+1
out_smooth=(out_dir + 'ses-01/' + '%s_ses-01_task-recall9_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_smooth%d.nii.gz' % (sub, run, n_trunc,fwhmval))
if os.path.exists(out_smooth):
proceeeeed=[]
epi_data=nib.load(out_smooth)
epi_data=resample_to_img(epi_data, template)# JWA, August 25 change
epi=epi_data.get_fdata()
#truncate
epi_trunc =np.zeros((epi_data.shape[0], epi_data.shape[1], epi_data.shape[2], epi_data.shape[3]-n_trunc))
epi_trunc[:, :, :, :] = epi[:,:,:,n_trunc:]
print(epi_data.shape, ' ', epi_trunc.shape)
dimsize=epi_data.header.get_zooms()
#print(dimsize)
orig_dimsize=dimsize
affine_mat = epi_data.affine # What is the orientation of the data
print(affine_mat)
else:
epi_file=ses1_dir + sub + '_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz' % run
epi_data=nib.load(epi_file)
epi_data=resample_to_img(epi_data, template)# JWA, August 25 change
epi=epi_data.get_fdata()
#truncate
epi_trunc =np.zeros((epi_data.shape[0], epi_data.shape[1], epi_data.shape[2], epi_data.shape[3]-n_trunc))
epi_trunc[:, :, :, :] = epi[:,:,:,n_trunc:]
print(epi_data.shape, ' ', epi_trunc.shape)
dimsize=epi_data.header.get_zooms()
#print(dimsize)
orig_dimsize=dimsize
affine_mat = epi_data.affine # What is the orientation of the data
print(affine_mat)
# Save the volume
output_name = (out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%dTRs.nii.gz' % (sub, run, n_trunc))
bold_nii = nib.Nifti1Image(epi_trunc, affine_mat)
hdr = bold_nii.header # get a handle for the .nii file's header
hdr.set_zooms((dimsize[0], dimsize[1], dimsize[2], dimsize[3]))
nib.save(bold_nii, output_name)
# smooth with susan
smoothed_file = mod_smooth(output_name,output_name_mask,fwhmval, 'susan')
#move file
in_smooth=(out_dir+'susan_smooth/smooth/mapflow/_smooth0/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%dTRs_smooth.nii.gz' % (sub, run, n_trunc))
#out_smooth=(out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_smooth%d.nii.gz' % (sub, run, n_trunc,fwhmval))
os.rename(in_smooth,out_smooth)
# ## Load fMRI data <a id="load_fmri"></a>
# #### Get voxels from an ROI
#
# We will extract BOLD data, only for voxels in a mask, by executing the following sequence of steps:
# 1. load whole brain fMRI data (for a given subject and a given run)
# 2. load the desired mask
# 3. use `NiftiMasker` to sub-select mask voxels from the whole brain data
# - `NiftiMasker` is a function from nilearn. Here's <a href="https://nilearn.github.io/auto_examples/04_manipulating_images/plot_mask_computation.html">an example</a> about how to use it, and here's the official <a href="https://nilearn.github.io/modules/generated/nilearn.input_data.NiftiMasker.html">documentation</a>.
# ## Apply mask to truncated dataset
# In[19]:
epi_mask_data_all=[]
for run in range(firstrun,lastrun+1):# Load the fMRI data
print('now on run:', run)
epi_masker= NiftiMasker(mask_img=avg_mask, high_pass=1/128,
standardize=True, # Are you going to zscore the data across time?
t_r=ss_TR,
memory='nilearn_cache', # Caches the mask in the directory given as a string here so that it is easier to load and retrieve
memory_level=1, # How much memory will you cache?
verbose=0)
epi_file=out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_smooth%d.nii.gz' % (sub, run, n_trunc,fwhmval)
#confound_file= bold_dir + '%s_confounds_selected_r0%i.txt' % (sub, run) #uncommented from Lizzie
confound_file= bold_dir + 'sub-%s_ses-01_task-recall_run-%s_desc-confounds_regressors.tsv' %(sub,run)
if run==firstrun:
if 0==firstrun:
epi_mask_data = epi_masker.fit_transform(epi_file,confounds=confounds_all[0:int(sum(ntr[run])),:]) #commented from Lizzie
if 1==firstrun:
epi_mask_data = epi_masker.fit_transform(epi_file,confounds=confounds_all[0:int(sum(ntr[run-1])),:]) #commented from Lizzie
epi_mask_data_all=epi_mask_data
nTR_all=epi_mask_data.shape[0]
else:
if 0==firstrun:
epi_mask_data = epi_masker.fit_transform(epi_file,confounds=confounds_all[int(sum(ntr[0:run])):int(sum(ntr[0:run+1])),:])
if 1==firstrun:
epi_mask_data = epi_masker.fit_transform(epi_file,confounds=confounds_all[int(sum(ntr[0:run-1])):int(sum(ntr[0:run])),:])
epi_mask_data_all=np.vstack([epi_mask_data_all,epi_mask_data])
nTR_all=np.vstack([nTR_all,epi_mask_data.shape[0]])
print('Saving trimmed and normalized volume for run',run)
affine_mat = avg_mask.affine #should be the same as the epi data
avg_mask.shape
coords = np.where(avg_mask.get_fdata())
bold_vol=[]
bold_vol=np.zeros((avg_mask.shape[0], avg_mask.shape[1], avg_mask.shape[2], epi_mask_data.shape[0]))
bold_vol[coords[0], coords[1], coords[2], :] = epi_mask_data.T
print('epi_mask_data shape:', bold_vol.shape)
output_name = (out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_norm.nii.gz' % (sub, run, n_trunc))
bold_nii = nib.Nifti1Image(bold_vol, affine_mat)
hdr = bold_nii.header # get a handle for the .nii file's header
hdr.set_zooms((orig_dimsize[0], orig_dimsize[1], orig_dimsize[2], orig_dimsize[3]))
nib.save(bold_nii, output_name)
print(epi_mask_data_all.shape)
# In[20]:
# concatenate volumes
segs = {}
runs = {}
for game in range(ss_tngs):
#identify game in the order it was shown
g_num=g_o[0,game]
seg=np.arange(RSTR[game,0]-1,RETR[game,0]-2,1) #Note the difference of -2
seg=seg+hrshiftval #shift for hrf
#determine which run it was in
run=np.ceil((game+1)/3)
if firstrun==1: #most subjects
if run==2:
seg=seg+nTR_all[0]
if run==3:
seg=seg+nTR_all[0]+nTR_all[1]
print(seg)
print(len(seg))
#if seg: #most subjects
segs[g_num]=seg
runs[g_num]=run
# In[21]:
# re-order concatenated volumes
for g_num in range(1,1+ss_tngs):
runv=np.repeat(runs[g_num],len(segs[g_num]))
if g_num==1:
epi_mask_event=epi_mask_data_all[segs[g_num].astype(int),:]
confounds_event=confounds_all[segs[g_num].astype(int),:]
run_event=runv
print(epi_mask_event.shape)
print(confounds_event.shape)
print(run_event.shape)
else:
epi_mask_event=np.concatenate([epi_mask_event,epi_mask_data_all[segs[g_num].astype(int),:]],axis=0)
confounds_event=np.concatenate([confounds_event,confounds_all[segs[g_num].astype(int),:]],axis=0)
run_event=np.concatenate([run_event,runv],axis=0)
print(epi_mask_event.shape)
print(confounds_event.shape)
print(run_event.shape)
# In[22]:
print(run_event)
# In[23]:
#estimate autocorrelation
'''
def estimated_autocorrelation(x):
"""
http://stackoverflow.com/q/14297012/190597
http://en.wikipedia.org/wiki/Autocorrelation#Estimation
"""
n = len(x)
variance = x.var()
x = x-x.mean()
r = np.correlate(x, x, mode = 'full')[-n:]
assert np.allclose(r, np.array([(x[:n-k]*x[-(n-k):]).sum() for k in range(n)]))
result = r/(variance*(np.arange(n, 0, -1)))
return result
voxel_id=17891
a1=estimated_autocorrelation(epi_mask_hrf[:, voxel_id])
print(a1.shape)
pts=20
if ipynby==1:
f, ax = plt.subplots(1,1, figsize=(14,5))
ax.plot(a1[0:pts])'''
# In[24]:
#z-score
epi_mask_event2=epi_mask_event
print(epi_mask_event2.shape)
# In[25]:
#save confounds_hrf2 as .mat
mat_fname=nonfmri_dir+'confounds_recall.mat'
scipy.io.savemat(mat_fname,{'nTR_all': nTR_all,'confounds_event': confounds_event,'run_event': run_event})
# In[26]:
bold_vol_event=[]
bold_vol_event=np.zeros((avg_mask.shape[0], avg_mask.shape[1], avg_mask.shape[2], epi_mask_event2.shape[0]))
bold_vol_event[coords[0], coords[1], coords[2], :] = epi_mask_event2.T
print(bold_vol_event.shape)
print(avg_mask.shape)
# In[27]:
# Save the concatenated volumes, event file
output_name = out_dir + '%s_task-recall_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_norm_event.nii.gz' % (sub, n_trunc)
print(output_name)
bold_nii = nib.Nifti1Image(bold_vol_event, affine_mat)
hdr = bold_nii.header # get a handle for the .nii file's header
print(orig_dimsize)
hdr.set_zooms((orig_dimsize[0], orig_dimsize[1], orig_dimsize[2], orig_dimsize[3]))
nib.save(bold_nii, output_name)
print('Volume saved')
endtime = time.time()
print(endtime - starttime)
# ## Plot voxels across runs in game order
# In[28]:
if ipynby==1:
n_vox=2000
plt.figure(figsize=(10,16))
this_img = epi_mask_event2[:,:n_vox];
this_img=this_img.T;#must transform because of how Python reshapes
tn='Event-%s' %sub;
plt.imshow(this_img,cmap='gray',origin='lower',interpolation='none',aspect="auto")
plt.title(tn)
# In[29]:
if ipynby==1:
for run in range(1,n_runs_recall+1):
func_name=out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_norm.nii.gz' % (sub, run, n_trunc)
#print(image.load_img(func_name).shape)
vol_num=round(image.load_img(func_name).shape[3]/2)
middle_vol = image.index_img(func_name,vol_num)
#mean_func = mean_img(func_name)
print('Plotting middle volumed for run',run)
plot_epi(middle_vol)
# ### 3.1. Plot a voxel time-series <a id="plot_voxel"></a>
# In[30]:
# Plot value of voxel_id through time
if ipynby==1:
voxel_id = 2000
f, ax = plt.subplots(1,1, figsize=(14,5))
ax.plot(epi_mask_event2[0:200, voxel_id])
ax.set_title('Voxel time series, voxel id = %d' % voxel_id)
ax.set_xlabel('TR (ordered by game #)')
ax.set_ylabel('Voxel Intensity-normed')
# ## Check mean and standard deviation of normalized data
# In[31]:
if ipynby==1:
x_mean = np.mean(epi_mask_event2, axis=0)
x_std = np.std(epi_mask_event2, axis=0, dtype=np.float64)
print('the mean of 1st few time points:\n', x_mean[0:50])
print('')
print('the std of 1st few time points:\n', x_std[0:50])
print('')
print(np.shape(x_mean))
print(np.shape(x_std))
print('')
print(np.amin(x_mean), np.amax(x_mean))
print(np.amin(x_std), np.amax(x_std))
# print(x_std)
f, axes = plt.subplots(1, 2, figsize = (14,4))
n_bins = 20
axes[0].hist(x_mean, bins = n_bins)
axes[0].set_title('distribution of means')
axes[0].set_xlabel('mean values')
axes[0].set_ylabel('counts')
axes[0].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[1].hist(x_std, bins = n_bins)
axes[1].set_title('distribution of stds')
axes[1].set_xlabel('std values')
axes[1].set_ylabel('counts')
axes[1].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[1].get_xaxis().get_major_formatter().set_useOffset(False)
plt.show()
# In[ ]:
| mod_smooth | identifier_name |
ImpConcat-Recall.py | #!/usr/bin/env python
# coding: utf-8
# # fMRI Data Loading and Normalization in Python
# **V.0.2 - Beta, [Contributions](#contributions)**
#
# ### Goal of this script
# 1. load the fMRI data into python
# - 3 recall runs
# 2. create an average brain mask from multiple runs
# - ses01_brain (3 recall runs)
# 3. trim TRs from the beginning AND end of each run (and apply this trimming to the confounds as well)
# - save volume as _trimTRs.nii.gz
# 4. apply a high-pass filter and z-score the data
# - save volume as _trim_norm.nii.gz
# 5. concatenate runs to make one time series
# - recall, in a standardized order
# In[10]:
ipynby=0 #python notebook or not july 2
if ipynby==0:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-subject', type=str)
args = parser.parse_args()
print(args.subject)
subject=args.subject
if ipynby==1:
subject=2
# In[11]:
sub ='sub-0'+str(subject)
subS = str(int(subject))
ses = 'ses-01'
task='recall'
n_trunc=3 # Number of volumes to trim/truncate
print(sub)
#one thing about n_trunc=if set to 100, moves mstr back 2 and resets to zero.
#here you can see a nice hrf peaking around the time of movie onset bc score screen was ~ 4 s before
# In[12]:
#import packages
import warnings
import sys
if not sys.warnoptions:
warnings.simplefilter("ignore")
import time
import os
import shutil
import numpy as np
import pandas as pd
import nibabel as nib
from nilearn.input_data import NiftiMasker, MultiNiftiMasker
from nilearn.masking import intersect_masks
from nilearn.datasets import load_mni152_template
from nilearn import image
from nilearn.plotting import plot_roi
from nilearn.plotting import plot_anat
from nilearn.plotting import plot_epi
from nilearn.image.image import mean_img
from nilearn.image import resample_to_img
from scipy import stats
from sklearn import preprocessing
from sklearn import datasets, linear_model
import matplotlib.pyplot as plt
import scipy.io
from nipype.workflows.fmri.fsl.preprocess import create_susan_smooth
if ipynby==1:
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('autosave', '30')
# In[13]:
# load some helper functions and import data / settings
import ss_utils
from ss_utils import load_ss_epi_data, load_ss_mask, mask_data, load_data
from ss_utils import ss_dir, ss_bids_dir, ss_TR, ss_hrf_lag, run_names, n_runs
print('TASK:', task)
firstrun=1
lastrun=3
n_runs_recall = lastrun-firstrun+1
bold_dir=ss_bids_dir + 'derivatives/fmriprep/%s/%s/func/' % (sub, ses)
anat_dir=ss_bids_dir + '%s/%s/anat/' % (sub, ses)
anat_fmriprep_dir=ss_bids_dir + 'derivatives/fmriprep/%s//anat/' % sub
out_dir= ss_bids_dir + 'derivatives/firstlevel/%s/' % sub
mask_fold = ss_bids_dir + 'derivatives/firstlevel/%s/masks/' % sub
ses1_dir=ss_bids_dir + 'derivatives/fmriprep/%s/ses-01/func/' % sub
#load g_o
ss_tngs=9
analysis_dir=ss_dir+'analysis/'
nonfmri_dir=ss_dir+'data/nonfmri/%s/' %subS
mat_fname=nonfmri_dir+'g_o.mat'
mat_contents = scipy.io.loadmat(mat_fname)
g_o = mat_contents['g_o']
mat_fname=nonfmri_dir+'MSMEhr.mat'
mat_contents = scipy.io.loadmat(mat_fname)
RSTR = mat_contents['RSTR'] #movie start TRs
RETR = mat_contents['RETR'] #movie end TRs
hrshiftval=5
fwhmval=5
print('bids dir = %s' % (ss_bids_dir))
print('anat dir = %s' % (anat_dir))
print('subject dir = %s' % (bold_dir))
print('output dir = %s' % (out_dir))
print('number of recall runs = %d' % (n_runs_recall))
print('number of games = %d' % (ss_tngs))
print('TR = %s seconds' % (ss_TR))
print('trim %d volumes from each run' % (n_trunc))
print('Game order = %s' % (g_o))
print('Recall start times = %s' % (RSTR))
print('Recall end times = %s' % (RETR))
# In[14]:
#Select confounds and trim volumes from confounds file
#Choose the desired confounds from the confounds_regressors.tsv file from fmriprep, trim the columns corresponding to trimmed volumes, and save as a .txt file.
starttime = time.time()
confounds=[]
confounds_all=[]
mc_all=[]
ntr=[]
ntr=np.zeros((n_runs_recall,1))
for r in range(firstrun,lastrun+1):
fname='_ses-01_task-recall_run-0%i_desc-confounds_regressors.tsv' % (r)
confounds = pd.read_csv(ses1_dir + sub + fname, sep='\t', header=(0))
confounds_selected=confounds[['trans_x','trans_y','trans_z','rot_x','rot_y','rot_z','framewise_displacement','a_comp_cor_00','a_comp_cor_01','a_comp_cor_02','a_comp_cor_03','a_comp_cor_04','a_comp_cor_05']][n_trunc:]
confounds_selected=pd.DataFrame(confounds_selected)
confounds_selected.to_csv(out_dir + 'ses-01/' + sub + '_ses-01_task-recall_run-0%i_confounds_selected.txt' % r, index=False, sep='\t', mode='w')
if 0==firstrun:
ntr[r]=confounds_selected.shape[0]
if 1==firstrun:
ntr[r-1]=confounds_selected.shape[0]
if r==firstrun:
|
else:
confounds_all=np.vstack([confounds_all,confounds_selected])
print(confounds_selected.shape[0])
print(ntr)
print(sum(ntr[0]))
# In[15]:
mask_imgs=[]
for run in range(firstrun,lastrun+1):
mask_name = ses1_dir + sub + '_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz' % run
mask_imgs.append(mask_name)
template = load_mni152_template()
i=np.eye(3)*3
template =image.resample_img(template, target_affine=i)
# intersect 3 view brain masks
avg_mask=intersect_masks(mask_imgs, threshold=0.5, connected=True)
avg_mask = resample_to_img(avg_mask, template)
thresha=avg_mask.dataobj>-10000
thresh=avg_mask.dataobj>0.5
avg_mask.dataobj[thresha] = 0
avg_mask.dataobj[thresh] = 1
if ipynby==1:
crange=1
plt.figure(figsize=(16,10))
this_img = avg_mask.dataobj[50,:,:];
plt.imshow(this_img,cmap="viridis",vmin=0,vmax=crange,origin='lower',interpolation='none',aspect="auto")
cbar = plt.colorbar()
dimsize=avg_mask.header.get_zooms()
affine_mat = avg_mask.affine
print(affine_mat)
coords = np.where(avg_mask.get_fdata())
# In[16]:
#plot average brain????
t1_file = anat_fmriprep_dir + sub + '_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz'
print(t1_file)
t1_img = image.load_img(t1_file)
t1_img = resample_to_img(t1_img, template)
if ipynby==1:
plot_roi(avg_mask, bg_img=t1_img)
# Save the mask
output_name_mask = mask_fold + '%s_%s_brain.nii.gz' % (sub, ses)
'''hdr = avg_mask.header # get a handle for the .nii file's header
hdr.set_zooms((dimsize[0], dimsize[1], dimsize[2]))
nib.save(avg_mask, output_name_mask)'''
# In[17]:
def mod_smooth(in_file, mask_file, fwhm, smooth_type):
import nipype.interfaces.fsl as fsl
import nipype.interfaces.freesurfer as fs
import os
if smooth_type == 'susan':
if fwhm == 0:
return in_file
smooth = create_susan_smooth()
smooth.base_dir = out_dir#os.getcwd()
smooth.inputs.inputnode.fwhm = fwhm
smooth.inputs.inputnode.mask_file = mask_file
smooth.inputs.inputnode.in_files = in_file
#smooth.outputs.outputnode.smoothed_files='/jukebox/norman/jantony/surprisesuspense/data/bids/Norman/Antony/ss/derivatives/firstlevel/sub-02/ses-01/sub-02_ses-01_task-recall_run-01_space-MNI152NLin2009cAsym_desc-preproc_bold_trim3TRs_smooth.nii.gz'
res = smooth.run()
smoothed_file=[] #smoothed_file = res.outputs.outputnode.smoothed_files
return smoothed_file
# In[18]:
#truncate first n_trunc TRs
#confounds_trunc=confounds_selected[3:end]
epi_trunc=[]
#https://github.com/INCF/BrainImagingPipelines/blob/master/bips/workflows/gablab/wips/scripts/modular_nodes.py
print('Number of runs to concatenate:', n_runs_recall)
for run in range(firstrun,lastrun+1):#lastrun+1
out_smooth=(out_dir + 'ses-01/' + '%s_ses-01_task-recall9_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_smooth%d.nii.gz' % (sub, run, n_trunc,fwhmval))
if os.path.exists(out_smooth):
proceeeeed=[]
epi_data=nib.load(out_smooth)
epi_data=resample_to_img(epi_data, template)# JWA, August 25 change
epi=epi_data.get_fdata()
#truncate
epi_trunc =np.zeros((epi_data.shape[0], epi_data.shape[1], epi_data.shape[2], epi_data.shape[3]-n_trunc))
epi_trunc[:, :, :, :] = epi[:,:,:,n_trunc:]
print(epi_data.shape, ' ', epi_trunc.shape)
dimsize=epi_data.header.get_zooms()
#print(dimsize)
orig_dimsize=dimsize
affine_mat = epi_data.affine # What is the orientation of the data
print(affine_mat)
else:
epi_file=ses1_dir + sub + '_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz' % run
epi_data=nib.load(epi_file)
epi_data=resample_to_img(epi_data, template)# JWA, August 25 change
epi=epi_data.get_fdata()
#truncate
epi_trunc =np.zeros((epi_data.shape[0], epi_data.shape[1], epi_data.shape[2], epi_data.shape[3]-n_trunc))
epi_trunc[:, :, :, :] = epi[:,:,:,n_trunc:]
print(epi_data.shape, ' ', epi_trunc.shape)
dimsize=epi_data.header.get_zooms()
#print(dimsize)
orig_dimsize=dimsize
affine_mat = epi_data.affine # What is the orientation of the data
print(affine_mat)
# Save the volume
output_name = (out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%dTRs.nii.gz' % (sub, run, n_trunc))
bold_nii = nib.Nifti1Image(epi_trunc, affine_mat)
hdr = bold_nii.header # get a handle for the .nii file's header
hdr.set_zooms((dimsize[0], dimsize[1], dimsize[2], dimsize[3]))
nib.save(bold_nii, output_name)
# smooth with susan
smoothed_file = mod_smooth(output_name,output_name_mask,fwhmval, 'susan')
#move file
in_smooth=(out_dir+'susan_smooth/smooth/mapflow/_smooth0/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%dTRs_smooth.nii.gz' % (sub, run, n_trunc))
#out_smooth=(out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_smooth%d.nii.gz' % (sub, run, n_trunc,fwhmval))
os.rename(in_smooth,out_smooth)
# ## Load fMRI data <a id="load_fmri"></a>
# #### Get voxels from an ROI
#
# We will extract BOLD data, only for voxels in a mask, by executing the following sequence of steps:
# 1. load whole brain fMRI data (for a given subject and a given run)
# 2. load the desired mask
# 3. use `NiftiMasker` to sub-select mask voxels from the whole brain data
# - `NiftiMasker` is a function from nilearn. Here's <a href="https://nilearn.github.io/auto_examples/04_manipulating_images/plot_mask_computation.html">an example</a> about how to use it, and here's the official <a href="https://nilearn.github.io/modules/generated/nilearn.input_data.NiftiMasker.html">documentation</a>.
# ## Apply mask to truncated dataset
# In[19]:
epi_mask_data_all=[]
for run in range(firstrun,lastrun+1):# Load the fMRI data
print('now on run:', run)
epi_masker= NiftiMasker(mask_img=avg_mask, high_pass=1/128,
standardize=True, # Are you going to zscore the data across time?
t_r=ss_TR,
memory='nilearn_cache', # Caches the mask in the directory given as a string here so that it is easier to load and retrieve
memory_level=1, # How much memory will you cache?
verbose=0)
epi_file=out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_smooth%d.nii.gz' % (sub, run, n_trunc,fwhmval)
#confound_file= bold_dir + '%s_confounds_selected_r0%i.txt' % (sub, run) #uncommented from Lizzie
confound_file= bold_dir + 'sub-%s_ses-01_task-recall_run-%s_desc-confounds_regressors.tsv' %(sub,run)
if run==firstrun:
if 0==firstrun:
epi_mask_data = epi_masker.fit_transform(epi_file,confounds=confounds_all[0:int(sum(ntr[run])),:]) #commented from Lizzie
if 1==firstrun:
epi_mask_data = epi_masker.fit_transform(epi_file,confounds=confounds_all[0:int(sum(ntr[run-1])),:]) #commented from Lizzie
epi_mask_data_all=epi_mask_data
nTR_all=epi_mask_data.shape[0]
else:
if 0==firstrun:
epi_mask_data = epi_masker.fit_transform(epi_file,confounds=confounds_all[int(sum(ntr[0:run])):int(sum(ntr[0:run+1])),:])
if 1==firstrun:
epi_mask_data = epi_masker.fit_transform(epi_file,confounds=confounds_all[int(sum(ntr[0:run-1])):int(sum(ntr[0:run])),:])
epi_mask_data_all=np.vstack([epi_mask_data_all,epi_mask_data])
nTR_all=np.vstack([nTR_all,epi_mask_data.shape[0]])
print('Saving trimmed and normalized volume for run',run)
affine_mat = avg_mask.affine #should be the same as the epi data
avg_mask.shape
coords = np.where(avg_mask.get_fdata())
bold_vol=[]
bold_vol=np.zeros((avg_mask.shape[0], avg_mask.shape[1], avg_mask.shape[2], epi_mask_data.shape[0]))
bold_vol[coords[0], coords[1], coords[2], :] = epi_mask_data.T
print('epi_mask_data shape:', bold_vol.shape)
output_name = (out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_norm.nii.gz' % (sub, run, n_trunc))
bold_nii = nib.Nifti1Image(bold_vol, affine_mat)
hdr = bold_nii.header # get a handle for the .nii file's header
hdr.set_zooms((orig_dimsize[0], orig_dimsize[1], orig_dimsize[2], orig_dimsize[3]))
nib.save(bold_nii, output_name)
print(epi_mask_data_all.shape)
# In[20]:
# concatenate volumes
segs = {}
runs = {}
for game in range(ss_tngs):
#identify game in the order it was shown
g_num=g_o[0,game]
seg=np.arange(RSTR[game,0]-1,RETR[game,0]-2,1) #Note the difference of -2
seg=seg+hrshiftval #shift for hrf
#determine which run it was in
run=np.ceil((game+1)/3)
if firstrun==1: #most subjects
if run==2:
seg=seg+nTR_all[0]
if run==3:
seg=seg+nTR_all[0]+nTR_all[1]
print(seg)
print(len(seg))
#if seg: #most subjects
segs[g_num]=seg
runs[g_num]=run
# In[21]:
# re-order concatenated volumes
for g_num in range(1,1+ss_tngs):
runv=np.repeat(runs[g_num],len(segs[g_num]))
if g_num==1:
epi_mask_event=epi_mask_data_all[segs[g_num].astype(int),:]
confounds_event=confounds_all[segs[g_num].astype(int),:]
run_event=runv
print(epi_mask_event.shape)
print(confounds_event.shape)
print(run_event.shape)
else:
epi_mask_event=np.concatenate([epi_mask_event,epi_mask_data_all[segs[g_num].astype(int),:]],axis=0)
confounds_event=np.concatenate([confounds_event,confounds_all[segs[g_num].astype(int),:]],axis=0)
run_event=np.concatenate([run_event,runv],axis=0)
print(epi_mask_event.shape)
print(confounds_event.shape)
print(run_event.shape)
# In[22]:
print(run_event)
# In[23]:
#estimate autocorrelation
'''
def estimated_autocorrelation(x):
"""
http://stackoverflow.com/q/14297012/190597
http://en.wikipedia.org/wiki/Autocorrelation#Estimation
"""
n = len(x)
variance = x.var()
x = x-x.mean()
r = np.correlate(x, x, mode = 'full')[-n:]
assert np.allclose(r, np.array([(x[:n-k]*x[-(n-k):]).sum() for k in range(n)]))
result = r/(variance*(np.arange(n, 0, -1)))
return result
voxel_id=17891
a1=estimated_autocorrelation(epi_mask_hrf[:, voxel_id])
print(a1.shape)
pts=20
if ipynby==1:
f, ax = plt.subplots(1,1, figsize=(14,5))
ax.plot(a1[0:pts])'''
# In[24]:
#z-score
epi_mask_event2=epi_mask_event
print(epi_mask_event2.shape)
# In[25]:
#save confounds_hrf2 as .mat
mat_fname=nonfmri_dir+'confounds_recall.mat'
scipy.io.savemat(mat_fname,{'nTR_all': nTR_all,'confounds_event': confounds_event,'run_event': run_event})
# In[26]:
bold_vol_event=[]
bold_vol_event=np.zeros((avg_mask.shape[0], avg_mask.shape[1], avg_mask.shape[2], epi_mask_event2.shape[0]))
bold_vol_event[coords[0], coords[1], coords[2], :] = epi_mask_event2.T
print(bold_vol_event.shape)
print(avg_mask.shape)
# In[27]:
# Save the concatenated volumes, event file
output_name = out_dir + '%s_task-recall_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_norm_event.nii.gz' % (sub, n_trunc)
print(output_name)
bold_nii = nib.Nifti1Image(bold_vol_event, affine_mat)
hdr = bold_nii.header # get a handle for the .nii file's header
print(orig_dimsize)
hdr.set_zooms((orig_dimsize[0], orig_dimsize[1], orig_dimsize[2], orig_dimsize[3]))
nib.save(bold_nii, output_name)
print('Volume saved')
endtime = time.time()
print(endtime - starttime)
# ## Plot voxels across runs in game order
# In[28]:
if ipynby==1:
n_vox=2000
plt.figure(figsize=(10,16))
this_img = epi_mask_event2[:,:n_vox];
this_img=this_img.T;#must transform because of how Python reshapes
tn='Event-%s' %sub;
plt.imshow(this_img,cmap='gray',origin='lower',interpolation='none',aspect="auto")
plt.title(tn)
# In[29]:
if ipynby==1:
for run in range(1,n_runs_recall+1):
func_name=out_dir + 'ses-01/' + '%s_ses-01_task-recall_run-0%i_space-MNI152NLin2009cAsym_desc-preproc_bold_trim%d_norm.nii.gz' % (sub, run, n_trunc)
#print(image.load_img(func_name).shape)
vol_num=round(image.load_img(func_name).shape[3]/2)
middle_vol = image.index_img(func_name,vol_num)
#mean_func = mean_img(func_name)
print('Plotting middle volumed for run',run)
plot_epi(middle_vol)
# ### 3.1. Plot a voxel time-series <a id="plot_voxel"></a>
# In[30]:
# Plot value of voxel_id through time
if ipynby==1:
voxel_id = 2000
f, ax = plt.subplots(1,1, figsize=(14,5))
ax.plot(epi_mask_event2[0:200, voxel_id])
ax.set_title('Voxel time series, voxel id = %d' % voxel_id)
ax.set_xlabel('TR (ordered by game #)')
ax.set_ylabel('Voxel Intensity-normed')
# ## Check mean and standard deviation of normalized data
# In[31]:
if ipynby==1:
x_mean = np.mean(epi_mask_event2, axis=0)
x_std = np.std(epi_mask_event2, axis=0, dtype=np.float64)
print('the mean of 1st few time points:\n', x_mean[0:50])
print('')
print('the std of 1st few time points:\n', x_std[0:50])
print('')
print(np.shape(x_mean))
print(np.shape(x_std))
print('')
print(np.amin(x_mean), np.amax(x_mean))
print(np.amin(x_std), np.amax(x_std))
# print(x_std)
f, axes = plt.subplots(1, 2, figsize = (14,4))
n_bins = 20
axes[0].hist(x_mean, bins = n_bins)
axes[0].set_title('distribution of means')
axes[0].set_xlabel('mean values')
axes[0].set_ylabel('counts')
axes[0].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[1].hist(x_std, bins = n_bins)
axes[1].set_title('distribution of stds')
axes[1].set_xlabel('std values')
axes[1].set_ylabel('counts')
axes[1].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[1].get_xaxis().get_major_formatter().set_useOffset(False)
plt.show()
# In[ ]:
| confounds_all=confounds_selected | conditional_block |
test_unihan.py | """Tests for unihan data download and processing."""
import logging
import os
import shutil
import pytest
from unihan_etl import __version__, constants, process
from unihan_etl._compat import PY2
from unihan_etl.process import DEFAULT_OPTIONS, UNIHAN_ZIP_PATH, Packager, zip_has_files
from unihan_etl.test import assert_dict_contains_subset
from unihan_etl.util import merge_dict
log = logging.getLogger(__name__)
def test_zip_has_files(mock_zip):
assert zip_has_files(['Unihan_Readings.txt'], mock_zip)
assert not zip_has_files(['Unihan_Cats.txt'], mock_zip)
def test_has_valid_zip(tmpdir, mock_zip):
if os.path.isfile(UNIHAN_ZIP_PATH):
assert process.has_valid_zip(UNIHAN_ZIP_PATH)
else:
assert not process.has_valid_zip(UNIHAN_ZIP_PATH)
assert process.has_valid_zip(mock_zip.filename)
bad_zip = tmpdir.join('corrupt.zip')
bad_zip.write('moo')
assert not process.has_valid_zip(str(bad_zip))
def test_in_fields():
columns = ['hey', 'kDefinition', 'kWhat']
result = process.in_fields('kDefinition', columns)
assert result
def test_filter_manifest():
expected = {
'Unihan_Variants.txt': [
'kSemanticVariant',
'kSimplifiedVariant',
'kSpecializedSemanticVariant',
'kTraditionalVariant',
'kZVariant',
]
}
result = process.filter_manifest(['Unihan_Variants.txt'])
assert set(result) == set(expected)
def test_get_files():
fields = ['kKorean', 'kRSUnicode']
expected = ['Unihan_Readings.txt', 'Unihan_RadicalStrokeCounts.txt']
result = process.get_files(fields)
assert set(result) == set(expected)
def test_download(tmpdir, mock_zip, mock_zip_file, mock_zip_filename):
dest_filepath = tmpdir.join('data', mock_zip_filename)
process.download(str(mock_zip_file), str(dest_filepath), shutil.copy)
result = os.path.dirname(str(dest_filepath.join('data')))
assert result, "Creates data directory if doesn't exist."
def test_download_mock(tmpdir, mock_zip, mock_zip_file, mock_test_dir, test_options):
data_path = tmpdir.join('data')
dest_path = data_path.join('data', 'hey.zip')
def urlretrieve(url, filename, url_retrieve, reporthook=None):
mock_zip_file.copy(dest_path)
p = Packager(
merge_dict(
test_options.copy,
{
'fields': ['kDefinition'],
'zip_path': str(dest_path),
'work_dir': str(mock_test_dir.join('downloads')),
'destination': str(data_path.join('unihan.csv')),
},
)
)
p.download(urlretrieve_fn=urlretrieve)
assert os.path.exists(str(dest_path))
p.export()
def test_export_format(tmpdir, mock_zip, mock_zip_file, mock_test_dir, test_options):
data_path = tmpdir.join('data')
dest_path = data_path.join('data', 'hey.zip')
def urlretrieve(url, filename, url_retrieve, reporthook=None):
mock_zip_file.copy(dest_path)
p = Packager(
merge_dict(
test_options.copy,
{
'fields': ['kDefinition'],
'zip_path': str(dest_path),
'work_dir': str(mock_test_dir.join('downloads')),
'destination': str(data_path.join('unihan.{ext}')),
'format': 'json',
},
)
)
p.download(urlretrieve_fn=urlretrieve)
assert os.path.exists(str(dest_path))
p.export()
assert str(data_path.join('unihan.json')) == p.options['destination']
assert os.path.exists(p.options['destination'])
def test_extract_zip(mock_zip, mock_zip_file, tmpdir):
zf = process.extract_zip(str(mock_zip_file), str(tmpdir))
assert len(zf.infolist()) == 1
assert zf.infolist()[0].file_size == 218
assert zf.infolist()[0].filename == "Unihan_Readings.txt"
def test_normalize_only_output_requested_columns(normalized_data, columns):
items = normalized_data
in_columns = ['kDefinition', 'kCantonese']
for v in items:
assert set(columns) == set(v.keys())
items = process.listify(items, in_columns)
not_in_columns = []
# columns not selected in normalize must not be in result.
for v in items[0]:
if v not in columns:
not_in_columns.append(v)
else:
|
assert [] == not_in_columns, "normalize filters columns not specified."
assert set(in_columns).issubset(
set(columns)
), "normalize returns correct columns specified + ucn and char."
def test_normalize_simple_data_format(fixture_dir):
"""normalize turns data into simple data format (SDF)."""
csv_files = [
os.path.join(fixture_dir, 'Unihan_DictionaryLikeData.txt'),
os.path.join(fixture_dir, 'Unihan_Readings.txt'),
]
columns = (
'kTotalStrokes',
'kPhonetic',
'kCantonese',
'kDefinition',
) + constants.INDEX_FIELDS
data = process.load_data(files=csv_files)
items = process.normalize(data, columns)
items = process.listify(items, columns)
header = items[0]
assert header == columns
rows = items[1:] # NOQA
def test_flatten_fields():
single_dataset = {'Unihan_Readings.txt': ['kCantonese', 'kDefinition', 'kHangul']}
expected = ['kCantonese', 'kDefinition', 'kHangul']
results = process.get_fields(single_dataset)
assert expected == results
datasets = {
'Unihan_NumericValues.txt': [
'kAccountingNumeric',
'kOtherNumeric',
'kPrimaryNumeric',
],
'Unihan_OtherMappings.txt': ['kBigFive', 'kCCCII', 'kCNS1986'],
}
expected = [
'kAccountingNumeric',
'kOtherNumeric',
'kPrimaryNumeric',
'kBigFive',
'kCCCII',
'kCNS1986',
]
results = process.get_fields(datasets)
assert set(expected) == set(results)
def test_pick_files(mock_zip_file):
"""Pick a white list of files to build from."""
files = ['Unihan_Readings.txt', 'Unihan_Variants.txt']
options = {'input_files': files, 'zip_path': str(mock_zip_file)}
b = process.Packager(options)
result = b.options['input_files']
expected = files
assert result == expected, 'Returns only the files picked.'
def test_raise_error_unknown_field():
"""Throw error if picking unknown field."""
options = {'fields': ['kHello']}
with pytest.raises(KeyError) as excinfo:
process.Packager(options)
excinfo.match('Field ([a-zA-Z].*) not found in file list.')
def test_raise_error_unknown_file():
"""Throw error if picking unknown file."""
options = {'input_files': ['Sparta.lol']}
with pytest.raises(KeyError) as excinfo:
process.Packager(options)
excinfo.match(r'File ([a-zA-Z_\.\'].*) not found in file list.')
def test_raise_error_unknown_field_filtered_files():
"""Throw error field not in file list, when files specified."""
files = ['Unihan_Variants.txt']
options = {'input_files': files, 'fields': ['kDefinition']}
with pytest.raises(KeyError) as excinfo:
process.Packager(options)
excinfo.match('Field ([a-zA-Z].*) not found in file list.')
def test_set_reduce_files_automatically_when_only_field_specified():
"""Picks file automatically if none specified and fields are."""
fields = (
constants.UNIHAN_MANIFEST['Unihan_Readings.txt']
+ constants.UNIHAN_MANIFEST['Unihan_Variants.txt']
)
options = {'fields': fields}
b = process.Packager(options)
expected = ['Unihan_Readings.txt', 'Unihan_Variants.txt']
results = b.options['input_files']
assert set(expected) == set(results)
def test_set_reduce_fields_automatically_when_only_files_specified():
"""Picks only necessary files when fields specified."""
files = ['Unihan_Readings.txt', 'Unihan_Variants.txt']
options = {'input_files': files}
b = process.Packager(options)
results = process.get_fields(process.filter_manifest(files))
expected = b.options['fields']
assert set(expected) == set(results), 'Returns only the fields for files picked.'
def test_no_args():
"""Works without arguments."""
assert DEFAULT_OPTIONS == Packager.from_cli([]).options
def test_cli_plus_defaults(mock_zip_file):
"""Test CLI args + defaults."""
option_subset = {'zip_path': str(mock_zip_file)}
result = Packager.from_cli(['-z', str(mock_zip_file)]).options
assert_dict_contains_subset(option_subset, result)
option_subset = {'fields': ['kDefinition']}
result = Packager.from_cli(['-f', 'kDefinition']).options
assert_dict_contains_subset(option_subset, result)
option_subset = {'fields': ['kDefinition', 'kXerox']}
result = Packager.from_cli(['-f', 'kDefinition', 'kXerox']).options
assert_dict_contains_subset(
option_subset, result, msg="fields -f allows multiple fields."
)
option_subset = {'fields': ['kDefinition', 'kXerox'], 'destination': 'data/ha.csv'}
result = Packager.from_cli(
['-f', 'kDefinition', 'kXerox', '-d', 'data/ha.csv']
).options
assert_dict_contains_subset(
option_subset, result, msg="fields -f allows additional arguments."
)
result = Packager.from_cli(['--format', 'json']).options
option_subset = {'format': 'json'}
assert_dict_contains_subset(option_subset, result, msg="format argument works")
def test_cli_exit_emessage_to_stderr():
"""Sends exception .message to stderr on exit."""
# SystemExit print's to stdout by default
with pytest.raises(SystemExit) as excinfo:
Packager.from_cli(['-d', 'data/output.csv', '-f', 'sdfa'])
excinfo.match('Field sdfa not found in file list.')
@pytest.mark.parametrize('flag', ['-v', '--version'])
def test_cli_version(capsys, flag):
with pytest.raises(SystemExit):
Packager.from_cli([flag])
captured = capsys.readouterr()
if PY2: # todo: why does python 2.x return -v in error?
assert __version__ in captured.err
else:
assert __version__ in captured.out
| in_columns.append(v) | conditional_block |
test_unihan.py | """Tests for unihan data download and processing."""
import logging
import os
import shutil
import pytest
from unihan_etl import __version__, constants, process
from unihan_etl._compat import PY2
from unihan_etl.process import DEFAULT_OPTIONS, UNIHAN_ZIP_PATH, Packager, zip_has_files
from unihan_etl.test import assert_dict_contains_subset
from unihan_etl.util import merge_dict
log = logging.getLogger(__name__)
def test_zip_has_files(mock_zip):
assert zip_has_files(['Unihan_Readings.txt'], mock_zip)
assert not zip_has_files(['Unihan_Cats.txt'], mock_zip)
def test_has_valid_zip(tmpdir, mock_zip):
if os.path.isfile(UNIHAN_ZIP_PATH):
assert process.has_valid_zip(UNIHAN_ZIP_PATH)
else:
assert not process.has_valid_zip(UNIHAN_ZIP_PATH)
assert process.has_valid_zip(mock_zip.filename)
bad_zip = tmpdir.join('corrupt.zip')
bad_zip.write('moo')
assert not process.has_valid_zip(str(bad_zip))
def test_in_fields():
columns = ['hey', 'kDefinition', 'kWhat']
result = process.in_fields('kDefinition', columns)
assert result
def test_filter_manifest():
expected = {
'Unihan_Variants.txt': [
'kSemanticVariant',
'kSimplifiedVariant',
'kSpecializedSemanticVariant',
'kTraditionalVariant',
'kZVariant',
]
}
result = process.filter_manifest(['Unihan_Variants.txt'])
assert set(result) == set(expected)
def test_get_files():
fields = ['kKorean', 'kRSUnicode']
expected = ['Unihan_Readings.txt', 'Unihan_RadicalStrokeCounts.txt']
result = process.get_files(fields)
assert set(result) == set(expected)
def test_download(tmpdir, mock_zip, mock_zip_file, mock_zip_filename):
dest_filepath = tmpdir.join('data', mock_zip_filename)
process.download(str(mock_zip_file), str(dest_filepath), shutil.copy)
result = os.path.dirname(str(dest_filepath.join('data')))
assert result, "Creates data directory if doesn't exist."
def test_download_mock(tmpdir, mock_zip, mock_zip_file, mock_test_dir, test_options):
data_path = tmpdir.join('data')
dest_path = data_path.join('data', 'hey.zip')
def urlretrieve(url, filename, url_retrieve, reporthook=None):
mock_zip_file.copy(dest_path)
p = Packager(
merge_dict(
test_options.copy,
{
'fields': ['kDefinition'],
'zip_path': str(dest_path),
'work_dir': str(mock_test_dir.join('downloads')),
'destination': str(data_path.join('unihan.csv')),
},
)
)
p.download(urlretrieve_fn=urlretrieve)
assert os.path.exists(str(dest_path))
p.export()
def test_export_format(tmpdir, mock_zip, mock_zip_file, mock_test_dir, test_options):
data_path = tmpdir.join('data')
dest_path = data_path.join('data', 'hey.zip')
def urlretrieve(url, filename, url_retrieve, reporthook=None):
mock_zip_file.copy(dest_path)
p = Packager(
merge_dict(
test_options.copy,
{
'fields': ['kDefinition'],
'zip_path': str(dest_path),
'work_dir': str(mock_test_dir.join('downloads')),
'destination': str(data_path.join('unihan.{ext}')),
'format': 'json',
},
)
)
p.download(urlretrieve_fn=urlretrieve)
assert os.path.exists(str(dest_path))
p.export()
assert str(data_path.join('unihan.json')) == p.options['destination']
assert os.path.exists(p.options['destination'])
def test_extract_zip(mock_zip, mock_zip_file, tmpdir):
zf = process.extract_zip(str(mock_zip_file), str(tmpdir))
assert len(zf.infolist()) == 1
assert zf.infolist()[0].file_size == 218
assert zf.infolist()[0].filename == "Unihan_Readings.txt"
def | (normalized_data, columns):
items = normalized_data
in_columns = ['kDefinition', 'kCantonese']
for v in items:
assert set(columns) == set(v.keys())
items = process.listify(items, in_columns)
not_in_columns = []
# columns not selected in normalize must not be in result.
for v in items[0]:
if v not in columns:
not_in_columns.append(v)
else:
in_columns.append(v)
assert [] == not_in_columns, "normalize filters columns not specified."
assert set(in_columns).issubset(
set(columns)
), "normalize returns correct columns specified + ucn and char."
def test_normalize_simple_data_format(fixture_dir):
"""normalize turns data into simple data format (SDF)."""
csv_files = [
os.path.join(fixture_dir, 'Unihan_DictionaryLikeData.txt'),
os.path.join(fixture_dir, 'Unihan_Readings.txt'),
]
columns = (
'kTotalStrokes',
'kPhonetic',
'kCantonese',
'kDefinition',
) + constants.INDEX_FIELDS
data = process.load_data(files=csv_files)
items = process.normalize(data, columns)
items = process.listify(items, columns)
header = items[0]
assert header == columns
rows = items[1:] # NOQA
def test_flatten_fields():
single_dataset = {'Unihan_Readings.txt': ['kCantonese', 'kDefinition', 'kHangul']}
expected = ['kCantonese', 'kDefinition', 'kHangul']
results = process.get_fields(single_dataset)
assert expected == results
datasets = {
'Unihan_NumericValues.txt': [
'kAccountingNumeric',
'kOtherNumeric',
'kPrimaryNumeric',
],
'Unihan_OtherMappings.txt': ['kBigFive', 'kCCCII', 'kCNS1986'],
}
expected = [
'kAccountingNumeric',
'kOtherNumeric',
'kPrimaryNumeric',
'kBigFive',
'kCCCII',
'kCNS1986',
]
results = process.get_fields(datasets)
assert set(expected) == set(results)
def test_pick_files(mock_zip_file):
"""Pick a white list of files to build from."""
files = ['Unihan_Readings.txt', 'Unihan_Variants.txt']
options = {'input_files': files, 'zip_path': str(mock_zip_file)}
b = process.Packager(options)
result = b.options['input_files']
expected = files
assert result == expected, 'Returns only the files picked.'
def test_raise_error_unknown_field():
"""Throw error if picking unknown field."""
options = {'fields': ['kHello']}
with pytest.raises(KeyError) as excinfo:
process.Packager(options)
excinfo.match('Field ([a-zA-Z].*) not found in file list.')
def test_raise_error_unknown_file():
"""Throw error if picking unknown file."""
options = {'input_files': ['Sparta.lol']}
with pytest.raises(KeyError) as excinfo:
process.Packager(options)
excinfo.match(r'File ([a-zA-Z_\.\'].*) not found in file list.')
def test_raise_error_unknown_field_filtered_files():
"""Throw error field not in file list, when files specified."""
files = ['Unihan_Variants.txt']
options = {'input_files': files, 'fields': ['kDefinition']}
with pytest.raises(KeyError) as excinfo:
process.Packager(options)
excinfo.match('Field ([a-zA-Z].*) not found in file list.')
def test_set_reduce_files_automatically_when_only_field_specified():
"""Picks file automatically if none specified and fields are."""
fields = (
constants.UNIHAN_MANIFEST['Unihan_Readings.txt']
+ constants.UNIHAN_MANIFEST['Unihan_Variants.txt']
)
options = {'fields': fields}
b = process.Packager(options)
expected = ['Unihan_Readings.txt', 'Unihan_Variants.txt']
results = b.options['input_files']
assert set(expected) == set(results)
def test_set_reduce_fields_automatically_when_only_files_specified():
"""Picks only necessary files when fields specified."""
files = ['Unihan_Readings.txt', 'Unihan_Variants.txt']
options = {'input_files': files}
b = process.Packager(options)
results = process.get_fields(process.filter_manifest(files))
expected = b.options['fields']
assert set(expected) == set(results), 'Returns only the fields for files picked.'
def test_no_args():
"""Works without arguments."""
assert DEFAULT_OPTIONS == Packager.from_cli([]).options
def test_cli_plus_defaults(mock_zip_file):
"""Test CLI args + defaults."""
option_subset = {'zip_path': str(mock_zip_file)}
result = Packager.from_cli(['-z', str(mock_zip_file)]).options
assert_dict_contains_subset(option_subset, result)
option_subset = {'fields': ['kDefinition']}
result = Packager.from_cli(['-f', 'kDefinition']).options
assert_dict_contains_subset(option_subset, result)
option_subset = {'fields': ['kDefinition', 'kXerox']}
result = Packager.from_cli(['-f', 'kDefinition', 'kXerox']).options
assert_dict_contains_subset(
option_subset, result, msg="fields -f allows multiple fields."
)
option_subset = {'fields': ['kDefinition', 'kXerox'], 'destination': 'data/ha.csv'}
result = Packager.from_cli(
['-f', 'kDefinition', 'kXerox', '-d', 'data/ha.csv']
).options
assert_dict_contains_subset(
option_subset, result, msg="fields -f allows additional arguments."
)
result = Packager.from_cli(['--format', 'json']).options
option_subset = {'format': 'json'}
assert_dict_contains_subset(option_subset, result, msg="format argument works")
def test_cli_exit_emessage_to_stderr():
"""Sends exception .message to stderr on exit."""
# SystemExit print's to stdout by default
with pytest.raises(SystemExit) as excinfo:
Packager.from_cli(['-d', 'data/output.csv', '-f', 'sdfa'])
excinfo.match('Field sdfa not found in file list.')
@pytest.mark.parametrize('flag', ['-v', '--version'])
def test_cli_version(capsys, flag):
with pytest.raises(SystemExit):
Packager.from_cli([flag])
captured = capsys.readouterr()
if PY2: # todo: why does python 2.x return -v in error?
assert __version__ in captured.err
else:
assert __version__ in captured.out
| test_normalize_only_output_requested_columns | identifier_name |
test_unihan.py | """Tests for unihan data download and processing."""
import logging
import os
import shutil
import pytest
from unihan_etl import __version__, constants, process
from unihan_etl._compat import PY2
from unihan_etl.process import DEFAULT_OPTIONS, UNIHAN_ZIP_PATH, Packager, zip_has_files
from unihan_etl.test import assert_dict_contains_subset
from unihan_etl.util import merge_dict
log = logging.getLogger(__name__)
def test_zip_has_files(mock_zip):
assert zip_has_files(['Unihan_Readings.txt'], mock_zip)
assert not zip_has_files(['Unihan_Cats.txt'], mock_zip)
def test_has_valid_zip(tmpdir, mock_zip):
if os.path.isfile(UNIHAN_ZIP_PATH):
assert process.has_valid_zip(UNIHAN_ZIP_PATH)
else:
assert not process.has_valid_zip(UNIHAN_ZIP_PATH)
assert process.has_valid_zip(mock_zip.filename)
bad_zip = tmpdir.join('corrupt.zip')
bad_zip.write('moo')
assert not process.has_valid_zip(str(bad_zip))
def test_in_fields():
columns = ['hey', 'kDefinition', 'kWhat']
result = process.in_fields('kDefinition', columns)
assert result
def test_filter_manifest():
expected = {
'Unihan_Variants.txt': [
'kSemanticVariant',
'kSimplifiedVariant',
'kSpecializedSemanticVariant',
'kTraditionalVariant',
'kZVariant',
]
}
result = process.filter_manifest(['Unihan_Variants.txt'])
assert set(result) == set(expected)
def test_get_files():
fields = ['kKorean', 'kRSUnicode']
expected = ['Unihan_Readings.txt', 'Unihan_RadicalStrokeCounts.txt']
result = process.get_files(fields)
assert set(result) == set(expected)
def test_download(tmpdir, mock_zip, mock_zip_file, mock_zip_filename):
dest_filepath = tmpdir.join('data', mock_zip_filename)
process.download(str(mock_zip_file), str(dest_filepath), shutil.copy)
result = os.path.dirname(str(dest_filepath.join('data')))
assert result, "Creates data directory if doesn't exist."
def test_download_mock(tmpdir, mock_zip, mock_zip_file, mock_test_dir, test_options):
data_path = tmpdir.join('data')
dest_path = data_path.join('data', 'hey.zip')
def urlretrieve(url, filename, url_retrieve, reporthook=None): | {
'fields': ['kDefinition'],
'zip_path': str(dest_path),
'work_dir': str(mock_test_dir.join('downloads')),
'destination': str(data_path.join('unihan.csv')),
},
)
)
p.download(urlretrieve_fn=urlretrieve)
assert os.path.exists(str(dest_path))
p.export()
def test_export_format(tmpdir, mock_zip, mock_zip_file, mock_test_dir, test_options):
data_path = tmpdir.join('data')
dest_path = data_path.join('data', 'hey.zip')
def urlretrieve(url, filename, url_retrieve, reporthook=None):
mock_zip_file.copy(dest_path)
p = Packager(
merge_dict(
test_options.copy,
{
'fields': ['kDefinition'],
'zip_path': str(dest_path),
'work_dir': str(mock_test_dir.join('downloads')),
'destination': str(data_path.join('unihan.{ext}')),
'format': 'json',
},
)
)
p.download(urlretrieve_fn=urlretrieve)
assert os.path.exists(str(dest_path))
p.export()
assert str(data_path.join('unihan.json')) == p.options['destination']
assert os.path.exists(p.options['destination'])
def test_extract_zip(mock_zip, mock_zip_file, tmpdir):
zf = process.extract_zip(str(mock_zip_file), str(tmpdir))
assert len(zf.infolist()) == 1
assert zf.infolist()[0].file_size == 218
assert zf.infolist()[0].filename == "Unihan_Readings.txt"
def test_normalize_only_output_requested_columns(normalized_data, columns):
items = normalized_data
in_columns = ['kDefinition', 'kCantonese']
for v in items:
assert set(columns) == set(v.keys())
items = process.listify(items, in_columns)
not_in_columns = []
# columns not selected in normalize must not be in result.
for v in items[0]:
if v not in columns:
not_in_columns.append(v)
else:
in_columns.append(v)
assert [] == not_in_columns, "normalize filters columns not specified."
assert set(in_columns).issubset(
set(columns)
), "normalize returns correct columns specified + ucn and char."
def test_normalize_simple_data_format(fixture_dir):
"""normalize turns data into simple data format (SDF)."""
csv_files = [
os.path.join(fixture_dir, 'Unihan_DictionaryLikeData.txt'),
os.path.join(fixture_dir, 'Unihan_Readings.txt'),
]
columns = (
'kTotalStrokes',
'kPhonetic',
'kCantonese',
'kDefinition',
) + constants.INDEX_FIELDS
data = process.load_data(files=csv_files)
items = process.normalize(data, columns)
items = process.listify(items, columns)
header = items[0]
assert header == columns
rows = items[1:] # NOQA
def test_flatten_fields():
single_dataset = {'Unihan_Readings.txt': ['kCantonese', 'kDefinition', 'kHangul']}
expected = ['kCantonese', 'kDefinition', 'kHangul']
results = process.get_fields(single_dataset)
assert expected == results
datasets = {
'Unihan_NumericValues.txt': [
'kAccountingNumeric',
'kOtherNumeric',
'kPrimaryNumeric',
],
'Unihan_OtherMappings.txt': ['kBigFive', 'kCCCII', 'kCNS1986'],
}
expected = [
'kAccountingNumeric',
'kOtherNumeric',
'kPrimaryNumeric',
'kBigFive',
'kCCCII',
'kCNS1986',
]
results = process.get_fields(datasets)
assert set(expected) == set(results)
def test_pick_files(mock_zip_file):
"""Pick a white list of files to build from."""
files = ['Unihan_Readings.txt', 'Unihan_Variants.txt']
options = {'input_files': files, 'zip_path': str(mock_zip_file)}
b = process.Packager(options)
result = b.options['input_files']
expected = files
assert result == expected, 'Returns only the files picked.'
def test_raise_error_unknown_field():
"""Throw error if picking unknown field."""
options = {'fields': ['kHello']}
with pytest.raises(KeyError) as excinfo:
process.Packager(options)
excinfo.match('Field ([a-zA-Z].*) not found in file list.')
def test_raise_error_unknown_file():
"""Throw error if picking unknown file."""
options = {'input_files': ['Sparta.lol']}
with pytest.raises(KeyError) as excinfo:
process.Packager(options)
excinfo.match(r'File ([a-zA-Z_\.\'].*) not found in file list.')
def test_raise_error_unknown_field_filtered_files():
"""Throw error field not in file list, when files specified."""
files = ['Unihan_Variants.txt']
options = {'input_files': files, 'fields': ['kDefinition']}
with pytest.raises(KeyError) as excinfo:
process.Packager(options)
excinfo.match('Field ([a-zA-Z].*) not found in file list.')
def test_set_reduce_files_automatically_when_only_field_specified():
"""Picks file automatically if none specified and fields are."""
fields = (
constants.UNIHAN_MANIFEST['Unihan_Readings.txt']
+ constants.UNIHAN_MANIFEST['Unihan_Variants.txt']
)
options = {'fields': fields}
b = process.Packager(options)
expected = ['Unihan_Readings.txt', 'Unihan_Variants.txt']
results = b.options['input_files']
assert set(expected) == set(results)
def test_set_reduce_fields_automatically_when_only_files_specified():
"""Picks only necessary files when fields specified."""
files = ['Unihan_Readings.txt', 'Unihan_Variants.txt']
options = {'input_files': files}
b = process.Packager(options)
results = process.get_fields(process.filter_manifest(files))
expected = b.options['fields']
assert set(expected) == set(results), 'Returns only the fields for files picked.'
def test_no_args():
"""Works without arguments."""
assert DEFAULT_OPTIONS == Packager.from_cli([]).options
def test_cli_plus_defaults(mock_zip_file):
"""Test CLI args + defaults."""
option_subset = {'zip_path': str(mock_zip_file)}
result = Packager.from_cli(['-z', str(mock_zip_file)]).options
assert_dict_contains_subset(option_subset, result)
option_subset = {'fields': ['kDefinition']}
result = Packager.from_cli(['-f', 'kDefinition']).options
assert_dict_contains_subset(option_subset, result)
option_subset = {'fields': ['kDefinition', 'kXerox']}
result = Packager.from_cli(['-f', 'kDefinition', 'kXerox']).options
assert_dict_contains_subset(
option_subset, result, msg="fields -f allows multiple fields."
)
option_subset = {'fields': ['kDefinition', 'kXerox'], 'destination': 'data/ha.csv'}
result = Packager.from_cli(
['-f', 'kDefinition', 'kXerox', '-d', 'data/ha.csv']
).options
assert_dict_contains_subset(
option_subset, result, msg="fields -f allows additional arguments."
)
result = Packager.from_cli(['--format', 'json']).options
option_subset = {'format': 'json'}
assert_dict_contains_subset(option_subset, result, msg="format argument works")
def test_cli_exit_emessage_to_stderr():
"""Sends exception .message to stderr on exit."""
# SystemExit print's to stdout by default
with pytest.raises(SystemExit) as excinfo:
Packager.from_cli(['-d', 'data/output.csv', '-f', 'sdfa'])
excinfo.match('Field sdfa not found in file list.')
@pytest.mark.parametrize('flag', ['-v', '--version'])
def test_cli_version(capsys, flag):
with pytest.raises(SystemExit):
Packager.from_cli([flag])
captured = capsys.readouterr()
if PY2: # todo: why does python 2.x return -v in error?
assert __version__ in captured.err
else:
assert __version__ in captured.out | mock_zip_file.copy(dest_path)
p = Packager(
merge_dict(
test_options.copy, | random_line_split |
test_unihan.py | """Tests for unihan data download and processing."""
import logging
import os
import shutil
import pytest
from unihan_etl import __version__, constants, process
from unihan_etl._compat import PY2
from unihan_etl.process import DEFAULT_OPTIONS, UNIHAN_ZIP_PATH, Packager, zip_has_files
from unihan_etl.test import assert_dict_contains_subset
from unihan_etl.util import merge_dict
log = logging.getLogger(__name__)
def test_zip_has_files(mock_zip):
assert zip_has_files(['Unihan_Readings.txt'], mock_zip)
assert not zip_has_files(['Unihan_Cats.txt'], mock_zip)
def test_has_valid_zip(tmpdir, mock_zip):
if os.path.isfile(UNIHAN_ZIP_PATH):
assert process.has_valid_zip(UNIHAN_ZIP_PATH)
else:
assert not process.has_valid_zip(UNIHAN_ZIP_PATH)
assert process.has_valid_zip(mock_zip.filename)
bad_zip = tmpdir.join('corrupt.zip')
bad_zip.write('moo')
assert not process.has_valid_zip(str(bad_zip))
def test_in_fields():
columns = ['hey', 'kDefinition', 'kWhat']
result = process.in_fields('kDefinition', columns)
assert result
def test_filter_manifest():
expected = {
'Unihan_Variants.txt': [
'kSemanticVariant',
'kSimplifiedVariant',
'kSpecializedSemanticVariant',
'kTraditionalVariant',
'kZVariant',
]
}
result = process.filter_manifest(['Unihan_Variants.txt'])
assert set(result) == set(expected)
def test_get_files():
fields = ['kKorean', 'kRSUnicode']
expected = ['Unihan_Readings.txt', 'Unihan_RadicalStrokeCounts.txt']
result = process.get_files(fields)
assert set(result) == set(expected)
def test_download(tmpdir, mock_zip, mock_zip_file, mock_zip_filename):
dest_filepath = tmpdir.join('data', mock_zip_filename)
process.download(str(mock_zip_file), str(dest_filepath), shutil.copy)
result = os.path.dirname(str(dest_filepath.join('data')))
assert result, "Creates data directory if doesn't exist."
def test_download_mock(tmpdir, mock_zip, mock_zip_file, mock_test_dir, test_options):
data_path = tmpdir.join('data')
dest_path = data_path.join('data', 'hey.zip')
def urlretrieve(url, filename, url_retrieve, reporthook=None):
mock_zip_file.copy(dest_path)
p = Packager(
merge_dict(
test_options.copy,
{
'fields': ['kDefinition'],
'zip_path': str(dest_path),
'work_dir': str(mock_test_dir.join('downloads')),
'destination': str(data_path.join('unihan.csv')),
},
)
)
p.download(urlretrieve_fn=urlretrieve)
assert os.path.exists(str(dest_path))
p.export()
def test_export_format(tmpdir, mock_zip, mock_zip_file, mock_test_dir, test_options):
|
def test_extract_zip(mock_zip, mock_zip_file, tmpdir):
zf = process.extract_zip(str(mock_zip_file), str(tmpdir))
assert len(zf.infolist()) == 1
assert zf.infolist()[0].file_size == 218
assert zf.infolist()[0].filename == "Unihan_Readings.txt"
def test_normalize_only_output_requested_columns(normalized_data, columns):
items = normalized_data
in_columns = ['kDefinition', 'kCantonese']
for v in items:
assert set(columns) == set(v.keys())
items = process.listify(items, in_columns)
not_in_columns = []
# columns not selected in normalize must not be in result.
for v in items[0]:
if v not in columns:
not_in_columns.append(v)
else:
in_columns.append(v)
assert [] == not_in_columns, "normalize filters columns not specified."
assert set(in_columns).issubset(
set(columns)
), "normalize returns correct columns specified + ucn and char."
def test_normalize_simple_data_format(fixture_dir):
"""normalize turns data into simple data format (SDF)."""
csv_files = [
os.path.join(fixture_dir, 'Unihan_DictionaryLikeData.txt'),
os.path.join(fixture_dir, 'Unihan_Readings.txt'),
]
columns = (
'kTotalStrokes',
'kPhonetic',
'kCantonese',
'kDefinition',
) + constants.INDEX_FIELDS
data = process.load_data(files=csv_files)
items = process.normalize(data, columns)
items = process.listify(items, columns)
header = items[0]
assert header == columns
rows = items[1:] # NOQA
def test_flatten_fields():
single_dataset = {'Unihan_Readings.txt': ['kCantonese', 'kDefinition', 'kHangul']}
expected = ['kCantonese', 'kDefinition', 'kHangul']
results = process.get_fields(single_dataset)
assert expected == results
datasets = {
'Unihan_NumericValues.txt': [
'kAccountingNumeric',
'kOtherNumeric',
'kPrimaryNumeric',
],
'Unihan_OtherMappings.txt': ['kBigFive', 'kCCCII', 'kCNS1986'],
}
expected = [
'kAccountingNumeric',
'kOtherNumeric',
'kPrimaryNumeric',
'kBigFive',
'kCCCII',
'kCNS1986',
]
results = process.get_fields(datasets)
assert set(expected) == set(results)
def test_pick_files(mock_zip_file):
"""Pick a white list of files to build from."""
files = ['Unihan_Readings.txt', 'Unihan_Variants.txt']
options = {'input_files': files, 'zip_path': str(mock_zip_file)}
b = process.Packager(options)
result = b.options['input_files']
expected = files
assert result == expected, 'Returns only the files picked.'
def test_raise_error_unknown_field():
"""Throw error if picking unknown field."""
options = {'fields': ['kHello']}
with pytest.raises(KeyError) as excinfo:
process.Packager(options)
excinfo.match('Field ([a-zA-Z].*) not found in file list.')
def test_raise_error_unknown_file():
"""Throw error if picking unknown file."""
options = {'input_files': ['Sparta.lol']}
with pytest.raises(KeyError) as excinfo:
process.Packager(options)
excinfo.match(r'File ([a-zA-Z_\.\'].*) not found in file list.')
def test_raise_error_unknown_field_filtered_files():
"""Throw error field not in file list, when files specified."""
files = ['Unihan_Variants.txt']
options = {'input_files': files, 'fields': ['kDefinition']}
with pytest.raises(KeyError) as excinfo:
process.Packager(options)
excinfo.match('Field ([a-zA-Z].*) not found in file list.')
def test_set_reduce_files_automatically_when_only_field_specified():
"""Picks file automatically if none specified and fields are."""
fields = (
constants.UNIHAN_MANIFEST['Unihan_Readings.txt']
+ constants.UNIHAN_MANIFEST['Unihan_Variants.txt']
)
options = {'fields': fields}
b = process.Packager(options)
expected = ['Unihan_Readings.txt', 'Unihan_Variants.txt']
results = b.options['input_files']
assert set(expected) == set(results)
def test_set_reduce_fields_automatically_when_only_files_specified():
"""Picks only necessary files when fields specified."""
files = ['Unihan_Readings.txt', 'Unihan_Variants.txt']
options = {'input_files': files}
b = process.Packager(options)
results = process.get_fields(process.filter_manifest(files))
expected = b.options['fields']
assert set(expected) == set(results), 'Returns only the fields for files picked.'
def test_no_args():
"""Works without arguments."""
assert DEFAULT_OPTIONS == Packager.from_cli([]).options
def test_cli_plus_defaults(mock_zip_file):
"""Test CLI args + defaults."""
option_subset = {'zip_path': str(mock_zip_file)}
result = Packager.from_cli(['-z', str(mock_zip_file)]).options
assert_dict_contains_subset(option_subset, result)
option_subset = {'fields': ['kDefinition']}
result = Packager.from_cli(['-f', 'kDefinition']).options
assert_dict_contains_subset(option_subset, result)
option_subset = {'fields': ['kDefinition', 'kXerox']}
result = Packager.from_cli(['-f', 'kDefinition', 'kXerox']).options
assert_dict_contains_subset(
option_subset, result, msg="fields -f allows multiple fields."
)
option_subset = {'fields': ['kDefinition', 'kXerox'], 'destination': 'data/ha.csv'}
result = Packager.from_cli(
['-f', 'kDefinition', 'kXerox', '-d', 'data/ha.csv']
).options
assert_dict_contains_subset(
option_subset, result, msg="fields -f allows additional arguments."
)
result = Packager.from_cli(['--format', 'json']).options
option_subset = {'format': 'json'}
assert_dict_contains_subset(option_subset, result, msg="format argument works")
def test_cli_exit_emessage_to_stderr():
"""Sends exception .message to stderr on exit."""
# SystemExit print's to stdout by default
with pytest.raises(SystemExit) as excinfo:
Packager.from_cli(['-d', 'data/output.csv', '-f', 'sdfa'])
excinfo.match('Field sdfa not found in file list.')
@pytest.mark.parametrize('flag', ['-v', '--version'])
def test_cli_version(capsys, flag):
with pytest.raises(SystemExit):
Packager.from_cli([flag])
captured = capsys.readouterr()
if PY2: # todo: why does python 2.x return -v in error?
assert __version__ in captured.err
else:
assert __version__ in captured.out
| data_path = tmpdir.join('data')
dest_path = data_path.join('data', 'hey.zip')
def urlretrieve(url, filename, url_retrieve, reporthook=None):
mock_zip_file.copy(dest_path)
p = Packager(
merge_dict(
test_options.copy,
{
'fields': ['kDefinition'],
'zip_path': str(dest_path),
'work_dir': str(mock_test_dir.join('downloads')),
'destination': str(data_path.join('unihan.{ext}')),
'format': 'json',
},
)
)
p.download(urlretrieve_fn=urlretrieve)
assert os.path.exists(str(dest_path))
p.export()
assert str(data_path.join('unihan.json')) == p.options['destination']
assert os.path.exists(p.options['destination']) | identifier_body |
bg.rs | //! Background layer rendering
use super::{Ppu, SnesRgb};
/// BG layer scanline cache.
///
/// This cache stores a prerendered scanline of all background layers. The cache is created lazily
/// (when BG layer pixels are looked up), so we will not waste time caching a disabled BG layer.
#[derive(Default)]
pub struct BgCache {
layers: [BgLayerCache; 4],
}
/// Data that's stored in the BG layer caches for a single pixel
#[derive(Copy, Clone, Default)]
struct CachedPixel {
// These are just copied from `TilemapEntry`.
/// Tile priority bit (0-1)
priority: u8,
/// Precalculated color of the pixel (15-bit RGB). `None` = transparent.
color: Option<SnesRgb>,
}
/// BG cache for a single layer
struct BgLayerCache {
/// Whether this cache contains valid data. If `false`, the cache will be refreshed on next
/// access.
valid: bool,
/// Stores the prerendered scanline
scanline: [CachedPixel; super::SCREEN_WIDTH as usize],
}
impl Default for BgLayerCache {
fn default() -> Self {
BgLayerCache {
valid: false,
scanline: [CachedPixel::default(); super::SCREEN_WIDTH as usize],
}
}
}
impl BgLayerCache {
/// Invalidates the cache of this layer, causing it to be rebuilt on next access.
#[allow(dead_code)] // FIXME Use in the right locations
fn | (&mut self) {
self.valid = false;
}
}
impl BgCache {
/// Invalidates the BG cache of all layers
fn invalidate_all(&mut self) {
self.layers[0].valid = false;
self.layers[1].valid = false;
self.layers[2].valid = false;
self.layers[3].valid = false;
}
}
/// Collected background settings
struct BgSettings {
/// Mosaic pixel size (1-16). 1 = Normal pixels.
/// FIXME: I think there's a difference between disabled and enabled with 1x1 mosaic size in
/// some modes (highres presumably)
#[allow(dead_code)] // FIXME NYI
mosaic: u8,
/// Tilemap word address in VRAM
/// "Starting at the tilemap address, the first $800 bytes are for tilemap A. Then come the
/// $800 bytes for B, then C then D."
tilemap_word_addr: u16,
/// When `true`, this BGs tilemaps are repeated sideways
tilemap_mirror_h: bool,
/// When `true`, this BGs tilemaps are repeated downwards
tilemap_mirror_v: bool,
/// If `true`, BG tiles are 16x16 pixels. If `false`, they are 8x8 pixels.
tile_size_16: bool,
/// Character Data start address in VRAM
chr_addr: u16,
/// Horizontal scroll offset. Moves the BG layer to the left by some number of pixels.
hofs: u16,
/// Vertical scroll offset. Moves the BG layer up by some number of pixels.
vofs: u16,
}
/// Unpacked tilemap entry for internal (rendering) use.
///
/// A tilemap entry is 2 bytes large and contains informations about a single background layer tile.
struct TilemapEntry {
/// Flip this tile vertically (flips top and down of the tile)
vflip: bool,
/// Flip horizontally (flips left and right side)
hflip: bool,
/// Priority bit (0-1)
priority: u8,
/// Tile palette (0-7)
palette: u8,
/// Index into the character/tile data, where the actual tile character data is stored in
/// bitplanes (10 bits)
tile_number: u16,
}
impl Ppu {
/// Determines whether the given BG layer (1-4) is enabled
fn bg_enabled(&self, bg: u8, subscreen: bool) -> bool {
let reg = if subscreen { self.ts } else { self.tm };
reg & (1 << (bg - 1)) != 0
}
/// Reads the tilemap entry at the given VRAM word address.
/// vhopppcc cccccccc (high, low)
/// v/h = Vertical/Horizontal flip this tile.
/// o = Tile priority.
/// ppp = Tile palette base.
/// cccccccccc = Tile number.
fn tilemap_entry(&self, word_address: u16) -> TilemapEntry {
let byte_address = word_address << 1;
let lo = self.vram[byte_address];
let hi = self.vram[byte_address + 1];
TilemapEntry {
vflip: hi & 0x80 != 0,
hflip: hi & 0x40 != 0,
priority: (hi & 0x20) >> 5,
palette: (hi & 0x1c) >> 2,
tile_number: ((hi as u16 & 0x03) << 8) | lo as u16,
}
}
/// Collects properties of a background layer
fn bg_settings(&self, bg: u8) -> BgSettings {
// The BGxSC register for our background layer
let bgsc = match bg {
1 => self.bg1sc,
2 => self.bg2sc,
3 => self.bg3sc,
4 => self.bg4sc,
_ => unreachable!(),
};
// Chr (Tileset, not Tilemap) start (word?) address >> 12
let chr = match bg {
1 => self.bg12nba & 0x0f,
2 => (self.bg12nba & 0xf0) >> 4,
3 => self.bg34nba & 0x0f,
4 => (self.bg34nba & 0xf0) >> 4,
_ => unreachable!(),
};
let (hofs, vofs) = match bg {
1 => (self.bg1hofs, self.bg1vofs),
2 => (self.bg2hofs, self.bg2vofs),
3 => (self.bg3hofs, self.bg3vofs),
4 => (self.bg4hofs, self.bg4vofs),
_ => unreachable!(),
};
BgSettings {
mosaic: if self.mosaic & (1 << (bg-1)) == 0 {
1
} else {
((self.mosaic & 0xf0) >> 4) + 1
},
tilemap_word_addr: ((bgsc as u16 & 0xfc) >> 2) << 10,
tilemap_mirror_h: bgsc & 0b01 == 0, // inverted bit value
tilemap_mirror_v: bgsc & 0b10 == 0, // inverted bit value
tile_size_16: match self.bg_mode() {
// "If the BG character size for BG1/BG2/BG3/BG4 bit is set, then the BG is made of
// 16x16 tiles. Otherwise, 8x8 tiles are used. However, note that Modes 5 and 6
// always use 16-pixel wide tiles, and Mode 7 always uses 8x8 tiles."
5 | 6 => true,
7 => false,
_ => {
// BGMODE: `4321----` (`-` = not relevant here) - Use 16x16 tiles?
self.bgmode & (1 << (bg + 3)) != 0
}
},
chr_addr: (chr as u16) << 12,
hofs: hofs,
vofs: vofs,
}
}
/// Returns the number of color bits in the given BG layer in the current BG mode (2, 4, 7 or
/// 8). To get the number of colors, use `1 << color_bits_for_bg`.
///
/// Table of colors for BG layers (not what this function returns!). `X` denotes a BG for
/// offset-per-tile data.
/// ```text
/// Mode # Colors for BG
/// 1 2 3 4
/// ======---=---=---=---=
/// 0 4 4 4 4
/// 1 16 16 4 -
/// 2 16 16 X -
/// 3 256 16 - -
/// 4 256 4 X -
/// 5 16 4 - -
/// 6 16 - X -
/// 7 256 - - -
/// 7EXTBG 256 128 - -
/// ```
fn color_bits_for_bg(&self, bg: u8) -> u8 {
match (self.bg_mode(), bg) {
(0, _) => 2,
(1, 1) |
(1, 2) => 4,
(1, 3) => 2,
(2, _) => 4,
(3, 1) => 8,
(3, 2) => 4,
(4, 1) => 8,
(4, 2) => 2,
(5, 1) => 4,
(5, 2) => 2,
(6, _) => 4,
(7, _) => panic!("unreachable: color_count_for_bg for mode 7"),
_ => unreachable!(),
}
}
/// Calculates the palette base index for a tile in the given background layer. `palette_num`
/// is the palette number stored in the tilemap entry (the 3 `p` bits).
fn palette_base_for_bg_tile(&self, bg: u8, palette_num: u8) -> u8 {
debug_assert!(bg >= 1 && bg <= 4);
match (self.bg_mode(), bg) {
(0, _) => palette_num * 4 + (bg - 1) * 32,
(1, _) |
(5, _) => palette_num * (1 << self.color_bits_for_bg(bg) as u8),
(2, _) => palette_num * 16,
(3, 1) => 0,
(3, 2) => palette_num * 16,
(4, 1) => 0,
(4, 2) => palette_num * 4,
(6, _) => palette_num * 16, // BG1 has 16 colors
(7, _) => panic!("unreachable: palette_base_for_bg_tile for mode 7"),
_ => unreachable!(),
}
}
fn render_mode7_scanline(&mut self) {
// TODO Figure out how to integrate EXTBG
assert!(self.setini & 0x40 == 0, "NYI: Mode 7 EXTBG");
// FIXME consider changing the type of `Ppu.m7a,...` to `i16`
let vflip = self.m7sel & 0x02 != 0;
let hflip = self.m7sel & 0x01 != 0;
// 0/1: Wrap
// 2: Transparent
// 3: Fill with tile 0
let screen_over = self.m7sel >> 6;
let y = self.scanline;
for x in self.x..super::SCREEN_WIDTH as u16 {
// Code taken from http://problemkaputt.de/fullsnes.htm
// FIXME: The above source also has a much faster way to render whole scanlines!
let screen_x = x ^ if hflip { 0xff } else { 0x00 };
let screen_y = y ^ if vflip { 0xff } else { 0x00 };
let mut org_x = (self.m7hofs as i16 - self.m7x as i16) & !0x1c00;
if org_x < 0 { org_x |= 0x1c00; }
let mut org_y = (self.m7vofs as i16 - self.m7y as i16) & !0x1c00;
if org_y < 0 { org_y |= 0x1c00; }
let mut vram_x: i32 = ((self.m7a as i16 as i32 * org_x as i32) & !0x3f) + ((self.m7b as i16 as i32 * org_y as i32) & !0x3f) + self.m7x as i16 as i32 * 0x100;
let mut vram_y: i32 = ((self.m7c as i16 as i32 * org_x as i32) & !0x3f) + ((self.m7d as i16 as i32 * org_y as i32) & !0x3f) + self.m7y as i16 as i32 * 0x100;
vram_x += ((self.m7b as i16 as i32 * screen_y as i32) & !0x3f) + self.m7a as i16 as i32 * screen_x as i32;
vram_y += ((self.m7d as i16 as i32 * screen_y as i32) & !0x3f) + self.m7c as i16 as i32 * screen_x as i32;
let out_of_bounds = vram_x & (1 << 18) != 0 || vram_y & (1 << 18) != 0;
let palette_index = match screen_over {
2 if out_of_bounds => { // transparent
0
},
_ => {
let (tile_x, tile_y) = if screen_over == 3 && out_of_bounds {
(0, 0) // 3 -> use tile 0
} else {
let tile_x: u16 = ((vram_x as u32 >> 11) & 0x7f) as u16;
let tile_y: u16 = ((vram_y as u32 >> 11) & 0x7f) as u16;
(tile_x, tile_y)
};
let off_x: u16 = (vram_x as u16 >> 8) & 0x07;
let off_y: u16 = (vram_y as u16 >> 8) & 0x07;
// Tilemap address for (7-bit) tile X/Y coordinates (BG1 is 128x128 tiles):
// `0yyyyyyy xxxxxxx0`
let tilemap_addr: u16 = (tile_y << 8) | (tile_x << 1);
// The "tilemap" in mode 7 just consists of "tile numbers" (or pixel addresses)
let tile_number = self.vram[tilemap_addr] as u16;
// The CHR address is calculated like this (where `t` is `tile_number` and `x` and `y`
// are pixel offsets inside the tile):
// `tttttttt tyyyxxx1`
let chr_addr = (tile_number << 7) | (off_y << 4) | (off_x << 1) | 1;
self.vram[chr_addr]
},
};
let rgb = match palette_index {
0 => None,
_ => Some(self.cgram.get_color(palette_index)),
};
self.bg_cache.layers[0].scanline[x as usize] = CachedPixel {
priority: 0, // Ignored anyways
color: rgb,
};
}
}
/// Render the current scanline of the given BG layer into its cache.
///
/// We render starting at `self.x` (the pixel we actually need) until the end of the
/// scanline. Note that this means that the `valid` flag is only relevant for the
/// leftover part of the scanline, not the entire cached scanline.
fn render_bg_scanline(&mut self, bg_num: u8) {
// Apply BG scrolling and get the tile coordinates
// FIXME Apply mosaic filter
// FIXME Fix this: "Note that many games will set their vertical scroll values to -1 rather
// than 0. This is because the SNES loads OBJ data for each scanline during the previous
// scanline. The very first line, though, wouldn’t have any OBJ data loaded! So the SNES
// doesn’t actually output scanline 0, although it does everything to render it. These
// games want the first line of their tilemap to be the first line output, so they set
// their VOFS registers in this manner. Note that an interlace screen needs -2 rather than
// -1 to properly correct for the missing line 0 (and an emulator would need to add 2
// instead of 1 to account for this)."
// -> I guess we should just decrement the physical screen height by 1
if self.bg_mode() == 7 {
self.render_mode7_scanline();
return;
}
let mut x = self.x;
let y = self.scanline;
let bg = self.bg_settings(bg_num);
let tile_size = if bg.tile_size_16 { 16 } else { 8 };
let (hofs, vofs) = (bg.hofs, bg.vofs);
let (sx, sy) = (!bg.tilemap_mirror_h, !bg.tilemap_mirror_v);
let color_bits = self.color_bits_for_bg(bg_num);
if color_bits == 8 {
// can use direct color mode
debug_assert!(self.cgwsel & 0x01 == 0, "NYI: direct color mode");
}
let mut tile_x = x.wrapping_add(hofs) / tile_size as u16;
let tile_y = y.wrapping_add(vofs) / tile_size as u16;
let mut off_x = (x.wrapping_add(hofs) % tile_size as u16) as u8;
let off_y = (y.wrapping_add(vofs) % tile_size as u16) as u8;
while x < super::SCREEN_WIDTH as u16 {
// Render current tile (`tile_x`) starting at `off_x` until the end of the tile,
// then go to next tile and set `off_x = 0`
// Calculate the VRAM word address, where the tilemap entry for our tile is stored
let tilemap_entry_word_address =
bg.tilemap_word_addr |
((tile_y & 0x1f) << 5) |
(tile_x & 0x1f) |
if sy {(tile_y & 0x20) << if sx {6} else {5}} else {0} |
if sx {(tile_x & 0x20) << 5} else {0};
let tilemap_entry = self.tilemap_entry(tilemap_entry_word_address);
let bitplane_start_addr =
(bg.chr_addr << 1) +
(tilemap_entry.tile_number * 8 * color_bits as u16); // 8 bytes per bitplane
let palette_base = self.palette_base_for_bg_tile(bg_num, tilemap_entry.palette);
while off_x < tile_size && x < super::SCREEN_WIDTH as u16 {
let palette_index = self.read_chr_entry(color_bits,
bitplane_start_addr,
tile_size,
(off_x, off_y),
(tilemap_entry.vflip, tilemap_entry.hflip));
let rgb = match palette_index {
0 => None,
_ => Some(self.cgram.get_color(palette_base + palette_index)),
};
self.bg_cache.layers[bg_num as usize - 1].scanline[x as usize] = CachedPixel {
priority: tilemap_entry.priority,
color: rgb,
};
x += 1;
off_x += 1;
}
tile_x += 1;
off_x = 0;
}
}
/// Main entry point into the BG layer renderer.
///
/// Lookup the color of the given background layer (1-4) at the current pixel, using the given
/// priority (0-1) only. This will also scroll backgrounds accordingly.
///
/// This may only be called with BG layer numbers which are actually valid in the current BG
/// mode (the renderer code makes sure that this is the case).
///
/// Returns `None` if the pixel is transparent, `Some(SnesRgb)` otherwise.
pub fn lookup_bg_color(&mut self, bg_num: u8, prio: u8, subscreen: bool) -> Option<SnesRgb> {
debug_assert!(bg_num >= 1 && bg_num <= 4);
debug_assert!(prio == 0 || prio == 1);
if !self.bg_enabled(bg_num, subscreen) {
return None;
}
if self.x == 0 {
// Before we draw the first pixel, make sure that we invalidate the cache so it is
// rebuilt first.
self.bg_cache.invalidate_all();
}
if !self.bg_cache.layers[bg_num as usize - 1].valid {
// Call actual render code to render the scanline into the cache
self.render_bg_scanline(bg_num);
self.bg_cache.layers[bg_num as usize - 1].valid = true;
}
// Cache must be valid now, so we can access the pixel we need:
let pixel = &self.bg_cache.layers[bg_num as usize - 1].scanline[self.x as usize];
if pixel.priority == prio {
pixel.color
} else {
None
}
}
}
| invalidate | identifier_name |
bg.rs | //! Background layer rendering
use super::{Ppu, SnesRgb};
/// BG layer scanline cache.
///
/// This cache stores a prerendered scanline of all background layers. The cache is created lazily
/// (when BG layer pixels are looked up), so we will not waste time caching a disabled BG layer.
#[derive(Default)]
pub struct BgCache {
layers: [BgLayerCache; 4],
}
/// Data that's stored in the BG layer caches for a single pixel
#[derive(Copy, Clone, Default)]
struct CachedPixel {
// These are just copied from `TilemapEntry`.
/// Tile priority bit (0-1)
priority: u8,
/// Precalculated color of the pixel (15-bit RGB). `None` = transparent.
color: Option<SnesRgb>,
}
/// BG cache for a single layer
struct BgLayerCache {
/// Whether this cache contains valid data. If `false`, the cache will be refreshed on next
/// access.
valid: bool,
/// Stores the prerendered scanline
scanline: [CachedPixel; super::SCREEN_WIDTH as usize],
}
impl Default for BgLayerCache {
fn default() -> Self {
BgLayerCache {
valid: false,
scanline: [CachedPixel::default(); super::SCREEN_WIDTH as usize],
}
}
}
impl BgLayerCache {
/// Invalidates the cache of this layer, causing it to be rebuilt on next access.
#[allow(dead_code)] // FIXME Use in the right locations
fn invalidate(&mut self) {
self.valid = false;
}
}
impl BgCache {
/// Invalidates the BG cache of all layers
fn invalidate_all(&mut self) {
self.layers[0].valid = false;
self.layers[1].valid = false;
self.layers[2].valid = false;
self.layers[3].valid = false;
}
}
/// Collected background settings
struct BgSettings {
/// Mosaic pixel size (1-16). 1 = Normal pixels.
/// FIXME: I think there's a difference between disabled and enabled with 1x1 mosaic size in
/// some modes (highres presumably)
#[allow(dead_code)] // FIXME NYI
mosaic: u8,
/// Tilemap word address in VRAM
/// "Starting at the tilemap address, the first $800 bytes are for tilemap A. Then come the
/// $800 bytes for B, then C then D."
tilemap_word_addr: u16,
/// When `true`, this BGs tilemaps are repeated sideways
tilemap_mirror_h: bool,
/// When `true`, this BGs tilemaps are repeated downwards
tilemap_mirror_v: bool,
/// If `true`, BG tiles are 16x16 pixels. If `false`, they are 8x8 pixels.
tile_size_16: bool,
/// Character Data start address in VRAM
chr_addr: u16,
/// Horizontal scroll offset. Moves the BG layer to the left by some number of pixels.
hofs: u16,
/// Vertical scroll offset. Moves the BG layer up by some number of pixels.
vofs: u16,
}
/// Unpacked tilemap entry for internal (rendering) use.
///
/// A tilemap entry is 2 bytes large and contains informations about a single background layer tile.
struct TilemapEntry {
/// Flip this tile vertically (flips top and down of the tile)
vflip: bool,
/// Flip horizontally (flips left and right side)
hflip: bool,
/// Priority bit (0-1)
priority: u8,
/// Tile palette (0-7)
palette: u8,
/// Index into the character/tile data, where the actual tile character data is stored in
/// bitplanes (10 bits)
tile_number: u16,
}
impl Ppu {
/// Determines whether the given BG layer (1-4) is enabled
fn bg_enabled(&self, bg: u8, subscreen: bool) -> bool {
let reg = if subscreen { self.ts } else { self.tm };
reg & (1 << (bg - 1)) != 0
}
/// Reads the tilemap entry at the given VRAM word address.
/// vhopppcc cccccccc (high, low)
/// v/h = Vertical/Horizontal flip this tile.
/// o = Tile priority.
/// ppp = Tile palette base.
/// cccccccccc = Tile number.
fn tilemap_entry(&self, word_address: u16) -> TilemapEntry {
let byte_address = word_address << 1;
let lo = self.vram[byte_address];
let hi = self.vram[byte_address + 1];
TilemapEntry {
vflip: hi & 0x80 != 0,
hflip: hi & 0x40 != 0,
priority: (hi & 0x20) >> 5,
palette: (hi & 0x1c) >> 2,
tile_number: ((hi as u16 & 0x03) << 8) | lo as u16,
}
}
/// Collects properties of a background layer
fn bg_settings(&self, bg: u8) -> BgSettings {
// The BGxSC register for our background layer
let bgsc = match bg {
1 => self.bg1sc,
2 => self.bg2sc,
3 => self.bg3sc,
4 => self.bg4sc,
_ => unreachable!(),
};
// Chr (Tileset, not Tilemap) start (word?) address >> 12
let chr = match bg {
1 => self.bg12nba & 0x0f,
2 => (self.bg12nba & 0xf0) >> 4,
3 => self.bg34nba & 0x0f,
4 => (self.bg34nba & 0xf0) >> 4,
_ => unreachable!(),
};
let (hofs, vofs) = match bg {
1 => (self.bg1hofs, self.bg1vofs),
2 => (self.bg2hofs, self.bg2vofs),
3 => (self.bg3hofs, self.bg3vofs),
4 => (self.bg4hofs, self.bg4vofs),
_ => unreachable!(),
};
BgSettings {
mosaic: if self.mosaic & (1 << (bg-1)) == 0 {
1
} else {
((self.mosaic & 0xf0) >> 4) + 1
},
tilemap_word_addr: ((bgsc as u16 & 0xfc) >> 2) << 10,
tilemap_mirror_h: bgsc & 0b01 == 0, // inverted bit value
tilemap_mirror_v: bgsc & 0b10 == 0, // inverted bit value
tile_size_16: match self.bg_mode() {
// "If the BG character size for BG1/BG2/BG3/BG4 bit is set, then the BG is made of
// 16x16 tiles. Otherwise, 8x8 tiles are used. However, note that Modes 5 and 6
// always use 16-pixel wide tiles, and Mode 7 always uses 8x8 tiles."
5 | 6 => true,
7 => false,
_ => {
// BGMODE: `4321----` (`-` = not relevant here) - Use 16x16 tiles?
self.bgmode & (1 << (bg + 3)) != 0
}
},
chr_addr: (chr as u16) << 12,
hofs: hofs,
vofs: vofs,
}
}
/// Returns the number of color bits in the given BG layer in the current BG mode (2, 4, 7 or
/// 8). To get the number of colors, use `1 << color_bits_for_bg`.
///
/// Table of colors for BG layers (not what this function returns!). `X` denotes a BG for
/// offset-per-tile data.
/// ```text
/// Mode # Colors for BG
/// 1 2 3 4
/// ======---=---=---=---=
/// 0 4 4 4 4
/// 1 16 16 4 -
/// 2 16 16 X -
/// 3 256 16 - -
/// 4 256 4 X -
/// 5 16 4 - -
/// 6 16 - X -
/// 7 256 - - -
/// 7EXTBG 256 128 - -
/// ```
fn color_bits_for_bg(&self, bg: u8) -> u8 {
match (self.bg_mode(), bg) {
(0, _) => 2,
(1, 1) |
(1, 2) => 4,
(1, 3) => 2,
(2, _) => 4,
(3, 1) => 8,
(3, 2) => 4,
(4, 1) => 8,
(4, 2) => 2,
(5, 1) => 4,
(5, 2) => 2,
(6, _) => 4,
(7, _) => panic!("unreachable: color_count_for_bg for mode 7"),
_ => unreachable!(),
}
}
/// Calculates the palette base index for a tile in the given background layer. `palette_num`
/// is the palette number stored in the tilemap entry (the 3 `p` bits).
fn palette_base_for_bg_tile(&self, bg: u8, palette_num: u8) -> u8 {
debug_assert!(bg >= 1 && bg <= 4);
match (self.bg_mode(), bg) {
(0, _) => palette_num * 4 + (bg - 1) * 32,
(1, _) |
(5, _) => palette_num * (1 << self.color_bits_for_bg(bg) as u8),
(2, _) => palette_num * 16,
(3, 1) => 0,
(3, 2) => palette_num * 16,
(4, 1) => 0,
(4, 2) => palette_num * 4,
(6, _) => palette_num * 16, // BG1 has 16 colors
(7, _) => panic!("unreachable: palette_base_for_bg_tile for mode 7"),
_ => unreachable!(),
}
}
fn render_mode7_scanline(&mut self) {
// TODO Figure out how to integrate EXTBG
assert!(self.setini & 0x40 == 0, "NYI: Mode 7 EXTBG");
// FIXME consider changing the type of `Ppu.m7a,...` to `i16`
let vflip = self.m7sel & 0x02 != 0;
let hflip = self.m7sel & 0x01 != 0;
// 0/1: Wrap
// 2: Transparent
// 3: Fill with tile 0
let screen_over = self.m7sel >> 6;
let y = self.scanline;
for x in self.x..super::SCREEN_WIDTH as u16 {
// Code taken from http://problemkaputt.de/fullsnes.htm
// FIXME: The above source also has a much faster way to render whole scanlines!
let screen_x = x ^ if hflip { 0xff } else { 0x00 };
let screen_y = y ^ if vflip { 0xff } else { 0x00 };
let mut org_x = (self.m7hofs as i16 - self.m7x as i16) & !0x1c00;
if org_x < 0 { org_x |= 0x1c00; }
let mut org_y = (self.m7vofs as i16 - self.m7y as i16) & !0x1c00;
if org_y < 0 { org_y |= 0x1c00; }
let mut vram_x: i32 = ((self.m7a as i16 as i32 * org_x as i32) & !0x3f) + ((self.m7b as i16 as i32 * org_y as i32) & !0x3f) + self.m7x as i16 as i32 * 0x100;
let mut vram_y: i32 = ((self.m7c as i16 as i32 * org_x as i32) & !0x3f) + ((self.m7d as i16 as i32 * org_y as i32) & !0x3f) + self.m7y as i16 as i32 * 0x100;
vram_x += ((self.m7b as i16 as i32 * screen_y as i32) & !0x3f) + self.m7a as i16 as i32 * screen_x as i32;
vram_y += ((self.m7d as i16 as i32 * screen_y as i32) & !0x3f) + self.m7c as i16 as i32 * screen_x as i32;
let out_of_bounds = vram_x & (1 << 18) != 0 || vram_y & (1 << 18) != 0;
let palette_index = match screen_over {
2 if out_of_bounds => { // transparent
0
},
_ => {
let (tile_x, tile_y) = if screen_over == 3 && out_of_bounds {
(0, 0) // 3 -> use tile 0
} else {
let tile_x: u16 = ((vram_x as u32 >> 11) & 0x7f) as u16;
let tile_y: u16 = ((vram_y as u32 >> 11) & 0x7f) as u16;
(tile_x, tile_y)
};
let off_x: u16 = (vram_x as u16 >> 8) & 0x07;
let off_y: u16 = (vram_y as u16 >> 8) & 0x07;
// Tilemap address for (7-bit) tile X/Y coordinates (BG1 is 128x128 tiles):
// `0yyyyyyy xxxxxxx0`
let tilemap_addr: u16 = (tile_y << 8) | (tile_x << 1);
// The "tilemap" in mode 7 just consists of "tile numbers" (or pixel addresses)
let tile_number = self.vram[tilemap_addr] as u16;
// The CHR address is calculated like this (where `t` is `tile_number` and `x` and `y`
// are pixel offsets inside the tile):
// `tttttttt tyyyxxx1`
let chr_addr = (tile_number << 7) | (off_y << 4) | (off_x << 1) | 1;
self.vram[chr_addr]
},
};
let rgb = match palette_index {
0 => None,
_ => Some(self.cgram.get_color(palette_index)),
};
self.bg_cache.layers[0].scanline[x as usize] = CachedPixel {
priority: 0, // Ignored anyways
color: rgb,
};
}
}
/// Render the current scanline of the given BG layer into its cache.
///
/// We render starting at `self.x` (the pixel we actually need) until the end of the
/// scanline. Note that this means that the `valid` flag is only relevant for the
/// leftover part of the scanline, not the entire cached scanline.
fn render_bg_scanline(&mut self, bg_num: u8) {
// Apply BG scrolling and get the tile coordinates
// FIXME Apply mosaic filter
// FIXME Fix this: "Note that many games will set their vertical scroll values to -1 rather
// than 0. This is because the SNES loads OBJ data for each scanline during the previous
// scanline. The very first line, though, wouldn’t have any OBJ data loaded! So the SNES
// doesn’t actually output scanline 0, although it does everything to render it. These
// games want the first line of their tilemap to be the first line output, so they set
// their VOFS registers in this manner. Note that an interlace screen needs -2 rather than
// -1 to properly correct for the missing line 0 (and an emulator would need to add 2
// instead of 1 to account for this)."
// -> I guess we should just decrement the physical screen height by 1
if self.bg_mode() == 7 {
self.render_mode7_scanline();
return;
}
let mut x = self.x;
let y = self.scanline;
let bg = self.bg_settings(bg_num);
let tile_size = if bg.tile_size_16 { 16 } else { 8 };
let (hofs, vofs) = (bg.hofs, bg.vofs);
let (sx, sy) = (!bg.tilemap_mirror_h, !bg.tilemap_mirror_v); | let color_bits = self.color_bits_for_bg(bg_num);
if color_bits == 8 {
// can use direct color mode
debug_assert!(self.cgwsel & 0x01 == 0, "NYI: direct color mode");
}
let mut tile_x = x.wrapping_add(hofs) / tile_size as u16;
let tile_y = y.wrapping_add(vofs) / tile_size as u16;
let mut off_x = (x.wrapping_add(hofs) % tile_size as u16) as u8;
let off_y = (y.wrapping_add(vofs) % tile_size as u16) as u8;
while x < super::SCREEN_WIDTH as u16 {
// Render current tile (`tile_x`) starting at `off_x` until the end of the tile,
// then go to next tile and set `off_x = 0`
// Calculate the VRAM word address, where the tilemap entry for our tile is stored
let tilemap_entry_word_address =
bg.tilemap_word_addr |
((tile_y & 0x1f) << 5) |
(tile_x & 0x1f) |
if sy {(tile_y & 0x20) << if sx {6} else {5}} else {0} |
if sx {(tile_x & 0x20) << 5} else {0};
let tilemap_entry = self.tilemap_entry(tilemap_entry_word_address);
let bitplane_start_addr =
(bg.chr_addr << 1) +
(tilemap_entry.tile_number * 8 * color_bits as u16); // 8 bytes per bitplane
let palette_base = self.palette_base_for_bg_tile(bg_num, tilemap_entry.palette);
while off_x < tile_size && x < super::SCREEN_WIDTH as u16 {
let palette_index = self.read_chr_entry(color_bits,
bitplane_start_addr,
tile_size,
(off_x, off_y),
(tilemap_entry.vflip, tilemap_entry.hflip));
let rgb = match palette_index {
0 => None,
_ => Some(self.cgram.get_color(palette_base + palette_index)),
};
self.bg_cache.layers[bg_num as usize - 1].scanline[x as usize] = CachedPixel {
priority: tilemap_entry.priority,
color: rgb,
};
x += 1;
off_x += 1;
}
tile_x += 1;
off_x = 0;
}
}
/// Main entry point into the BG layer renderer.
///
/// Lookup the color of the given background layer (1-4) at the current pixel, using the given
/// priority (0-1) only. This will also scroll backgrounds accordingly.
///
/// This may only be called with BG layer numbers which are actually valid in the current BG
/// mode (the renderer code makes sure that this is the case).
///
/// Returns `None` if the pixel is transparent, `Some(SnesRgb)` otherwise.
pub fn lookup_bg_color(&mut self, bg_num: u8, prio: u8, subscreen: bool) -> Option<SnesRgb> {
debug_assert!(bg_num >= 1 && bg_num <= 4);
debug_assert!(prio == 0 || prio == 1);
if !self.bg_enabled(bg_num, subscreen) {
return None;
}
if self.x == 0 {
// Before we draw the first pixel, make sure that we invalidate the cache so it is
// rebuilt first.
self.bg_cache.invalidate_all();
}
if !self.bg_cache.layers[bg_num as usize - 1].valid {
// Call actual render code to render the scanline into the cache
self.render_bg_scanline(bg_num);
self.bg_cache.layers[bg_num as usize - 1].valid = true;
}
// Cache must be valid now, so we can access the pixel we need:
let pixel = &self.bg_cache.layers[bg_num as usize - 1].scanline[self.x as usize];
if pixel.priority == prio {
pixel.color
} else {
None
}
}
} | random_line_split | |
builtin_func.go | package eval
// Builtin functions.
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"os"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"syscall"
"time"
"github.com/elves/elvish/sys"
"github.com/elves/elvish/util"
)
var builtinFns []*BuiltinFn
// BuiltinFn is a builtin function.
type BuiltinFn struct {
Name string
Impl func(*EvalCtx, []Value)
}
func (*BuiltinFn) Kind() string {
return "fn"
}
func (b *BuiltinFn) Repr(int) string {
return "$" + FnPrefix + b.Name
}
// Call calls a builtin function.
func (b *BuiltinFn) Call(ec *EvalCtx, args []Value) {
b.Impl(ec, args)
}
func init() {
// Needed to work around init loop.
builtinFns = []*BuiltinFn{ | &BuiltinFn{"print", wrapFn(print)},
&BuiltinFn{"println", wrapFn(println)},
&BuiltinFn{"pprint", pprint},
&BuiltinFn{"into-lines", wrapFn(intoLines)},
&BuiltinFn{"from-lines", wrapFn(fromLines)},
&BuiltinFn{"rat", wrapFn(ratFn)},
&BuiltinFn{"put", put},
&BuiltinFn{"put-all", wrapFn(putAll)},
&BuiltinFn{"unpack", wrapFn(unpack)},
&BuiltinFn{"from-json", wrapFn(fromJSON)},
&BuiltinFn{"kind-of", kindOf},
&BuiltinFn{"fail", wrapFn(fail)},
&BuiltinFn{"multi-error", wrapFn(multiErrorFn)},
&BuiltinFn{"return", wrapFn(returnFn)},
&BuiltinFn{"break", wrapFn(breakFn)},
&BuiltinFn{"continue", wrapFn(continueFn)},
&BuiltinFn{"each", wrapFn(each)},
&BuiltinFn{"eawk", wrapFn(eawk)},
&BuiltinFn{"cd", cd},
&BuiltinFn{"dirs", wrapFn(dirs)},
&BuiltinFn{"history", wrapFn(history)},
&BuiltinFn{"source", wrapFn(source)},
&BuiltinFn{"+", wrapFn(plus)},
&BuiltinFn{"-", wrapFn(minus)},
&BuiltinFn{"mul", wrapFn(times)},
&BuiltinFn{"div", wrapFn(divide)},
&BuiltinFn{"pow", wrapFn(pow)},
&BuiltinFn{"lt", wrapFn(lt)},
&BuiltinFn{"gt", wrapFn(gt)},
&BuiltinFn{"base", wrapFn(base)},
&BuiltinFn{"==", eq},
&BuiltinFn{"!=", wrapFn(noteq)},
&BuiltinFn{"deepeq", deepeq},
&BuiltinFn{"take", wrapFn(take)},
&BuiltinFn{"drop", wrapFn(drop)},
&BuiltinFn{"len", wrapFn(lenFn)},
&BuiltinFn{"count", wrapFn(count)},
&BuiltinFn{"rest", wrapFn(rest)},
&BuiltinFn{"fg", wrapFn(fg)},
&BuiltinFn{"tilde-abbr", wrapFn(tildeAbbr)},
&BuiltinFn{"-sleep", wrapFn(_sleep)},
&BuiltinFn{"-stack", wrapFn(_stack)},
&BuiltinFn{"-log", wrapFn(_log)},
&BuiltinFn{"-exec", wrapFn(_exec)},
}
for _, b := range builtinFns {
builtinNamespace[FnPrefix+b.Name] = NewRoVariable(b)
}
}
var (
ErrArgs = errors.New("args error")
ErrInput = errors.New("input error")
ErrStoreNotConnected = errors.New("store not connected")
ErrNoMatchingDir = errors.New("no matching directory")
ErrNotInSameGroup = errors.New("not in the same process group")
ErrInterrupted = errors.New("interrupted")
)
var (
evalCtxType = reflect.TypeOf((*EvalCtx)(nil))
valueType = reflect.TypeOf((*Value)(nil)).Elem()
)
// wrapFn wraps an inner function into one suitable as a builtin function. It
// generates argument checking and conversion code according to the signature
// of the inner function. The inner function must accept evalCtx* as the first
// argument and return an exitus.
func wrapFn(inner interface{}) func(*EvalCtx, []Value) {
type_ := reflect.TypeOf(inner)
if type_.In(0) != evalCtxType {
panic("bad func")
}
requiredArgs := type_.NumIn() - 1
isVariadic := type_.IsVariadic()
var variadicType reflect.Type
if isVariadic {
requiredArgs--
variadicType = type_.In(type_.NumIn() - 1).Elem()
if !supportedIn(variadicType) {
panic("bad func argument")
}
}
for i := 0; i < requiredArgs; i++ {
if !supportedIn(type_.In(i + 1)) {
panic("bad func argument")
}
}
return func(ec *EvalCtx, args []Value) {
if len(args) < requiredArgs || (!isVariadic && len(args) > requiredArgs) {
throw(ErrArgs)
}
callArgs := make([]reflect.Value, len(args)+1)
callArgs[0] = reflect.ValueOf(ec)
ok := convertArgs(args[:requiredArgs], callArgs[1:],
func(i int) reflect.Type { return type_.In(i + 1) })
if !ok {
throw(ErrArgs)
}
if isVariadic {
ok := convertArgs(args[requiredArgs:], callArgs[1+requiredArgs:],
func(i int) reflect.Type { return variadicType })
if !ok {
throw(ErrArgs)
}
}
reflect.ValueOf(inner).Call(callArgs)
}
}
func supportedIn(t reflect.Type) bool {
return t.Kind() == reflect.String ||
t.Kind() == reflect.Int || t.Kind() == reflect.Float64 ||
t.Implements(valueType)
}
func convertArgs(args []Value, callArgs []reflect.Value, callType func(int) reflect.Type) bool {
for i, arg := range args {
var callArg interface{}
switch callType(i).Kind() {
case reflect.String:
callArg = ToString(arg)
case reflect.Int:
var err error
callArg, err = toInt(arg)
if err != nil {
return false
}
case reflect.Float64:
var err error
callArg, err = toFloat(arg)
if err != nil {
return false
// return err
}
default:
if reflect.TypeOf(arg).ConvertibleTo(callType(i)) {
callArg = arg
} else {
return false
// return argsError
}
}
callArgs[i] = reflect.ValueOf(callArg)
}
return true
}
func nop(ec *EvalCtx, args []Value) {
}
func put(ec *EvalCtx, args []Value) {
out := ec.ports[1].Chan
for _, a := range args {
out <- a
}
}
func putAll(ec *EvalCtx, lists ...List) {
out := ec.ports[1].Chan
for _, list := range lists {
for _, x := range *list.inner {
out <- x
}
}
}
func kindOf(ec *EvalCtx, args []Value) {
out := ec.ports[1].Chan
for _, a := range args {
out <- String(a.Kind())
}
}
func fail(ec *EvalCtx, arg Value) {
throw(errors.New(ToString(arg)))
}
func multiErrorFn(ec *EvalCtx, args ...Error) {
throw(MultiError{args})
}
func returnFn(ec *EvalCtx) {
throw(Return)
}
func breakFn(ec *EvalCtx) {
throw(Break)
}
func continueFn(ec *EvalCtx) {
throw(Continue)
}
func print(ec *EvalCtx, args ...string) {
out := ec.ports[1].File
for i, arg := range args {
if i > 0 {
out.WriteString(" ")
}
out.WriteString(arg)
}
}
func println(ec *EvalCtx, args ...string) {
print(ec, args...)
ec.ports[1].File.WriteString("\n")
}
func pprint(ec *EvalCtx, args []Value) {
out := ec.ports[1].File
for _, arg := range args {
out.WriteString(arg.Repr(0))
out.WriteString("\n")
}
}
func intoLines(ec *EvalCtx) {
in := ec.ports[0].Chan
out := ec.ports[1].File
for v := range in {
fmt.Fprintln(out, ToString(v))
}
}
func fromLines(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
bufferedIn := bufio.NewReader(in)
for {
line, err := bufferedIn.ReadString('\n')
if err == io.EOF {
return
} else if err != nil {
throw(err)
}
out <- String(line[:len(line)-1])
}
}
func ratFn(ec *EvalCtx, arg Value) {
out := ec.ports[1].Chan
r, err := ToRat(arg)
if err != nil {
throw(err)
}
out <- r
}
// unpack takes Elemser's from the input and unpack them.
func unpack(ec *EvalCtx) {
in := ec.ports[0].Chan
out := ec.ports[1].Chan
for v := range in {
elemser, ok := v.(Elemser)
if !ok {
throw(ErrInput)
}
for e := range elemser.Elems() {
out <- e
}
}
}
// fromJSON parses a stream of JSON data into Value's.
func fromJSON(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
dec := json.NewDecoder(in)
var v interface{}
for {
err := dec.Decode(&v)
if err != nil {
if err == io.EOF {
return
}
throw(err)
}
out <- FromJSONInterface(v)
}
}
// each takes a single closure and applies it to all input values.
func each(ec *EvalCtx, f FnValue) {
in := ec.ports[0].Chan
in:
for v := range in {
// NOTE We don't have the position range of the closure in the source.
// Ideally, it should be kept in the Closure itself.
newec := ec.fork("closure of each")
ex := newec.PCall(f, []Value{v})
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
break in
default:
throw(ex)
}
}
}
var eawkWordSep = regexp.MustCompile("[ \t]+")
// eawk takes a function. For each line in the input stream, it calls the
// function with the line and the words in the line. The words are found by
// stripping the line and splitting the line by whitespaces. The function may
// call break and continue. Overall this provides a similar functionality to
// awk, hence the name.
func eawk(ec *EvalCtx, f FnValue) {
in := bufio.NewReader(ec.ports[0].File)
in:
for {
line, err := in.ReadString('\n')
if err == io.EOF {
break
} else if err != nil {
throw(err)
}
line = line[:len(line)-1]
args := []Value{String(line)}
for _, field := range eawkWordSep.Split(strings.Trim(line, " \t"), -1) {
args = append(args, String(field))
}
newec := ec.fork("fn of eawk")
ex := newec.PCall(f, args)
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
break in
default:
throw(ex)
}
}
}
func cd(ec *EvalCtx, args []Value) {
var dir string
if len(args) == 0 {
dir = mustGetHome("")
} else if len(args) == 1 {
dir = ToString(args[0])
} else {
throw(ErrArgs)
}
cdInner(dir, ec)
}
func cdInner(dir string, ec *EvalCtx) {
err := os.Chdir(dir)
if err != nil {
throw(err)
}
if ec.store != nil {
// XXX Error ignored.
pwd, err := os.Getwd()
if err == nil {
store := ec.store
go func() {
store.Waits.Add(1)
// XXX Error ignored.
store.AddDir(pwd, 1)
store.Waits.Done()
Logger.Println("added dir to store:", pwd)
}()
}
}
}
var dirFieldNames = []string{"path", "score"}
func dirs(ec *EvalCtx) {
if ec.store == nil {
throw(ErrStoreNotConnected)
}
dirs, err := ec.store.ListDirs()
if err != nil {
throw(errors.New("store error: " + err.Error()))
}
out := ec.ports[1].Chan
for _, dir := range dirs {
out <- &Struct{dirFieldNames, []Variable{
NewRoVariable(String(dir.Path)),
NewRoVariable(String(fmt.Sprint(dir.Score))),
}}
}
}
func history(ec *EvalCtx) {
if ec.store == nil {
throw(ErrStoreNotConnected)
}
store := ec.store
seq, err := store.NextCmdSeq()
maybeThrow(err)
cmds, err := store.Cmds(0, seq)
maybeThrow(err)
out := ec.ports[1].Chan
for _, cmd := range cmds {
out <- String(cmd)
}
}
func source(ec *EvalCtx, fname string) {
ec.Source(fname)
}
func toFloat(arg Value) (float64, error) {
arg, ok := arg.(String)
if !ok {
return 0, fmt.Errorf("must be string")
}
num, err := strconv.ParseFloat(string(arg.(String)), 64)
if err != nil {
return 0, err
}
return num, nil
}
func toInt(arg Value) (int, error) {
arg, ok := arg.(String)
if !ok {
return 0, fmt.Errorf("must be string")
}
num, err := strconv.Atoi(string(arg.(String)))
if err != nil {
return 0, err
}
return num, nil
}
func plus(ec *EvalCtx, nums ...float64) {
out := ec.ports[1].Chan
sum := 0.0
for _, f := range nums {
sum += f
}
out <- String(fmt.Sprintf("%g", sum))
}
func minus(ec *EvalCtx, sum float64, nums ...float64) {
out := ec.ports[1].Chan
for _, f := range nums {
sum -= f
}
out <- String(fmt.Sprintf("%g", sum))
}
func times(ec *EvalCtx, nums ...float64) {
out := ec.ports[1].Chan
prod := 1.0
for _, f := range nums {
prod *= f
}
out <- String(fmt.Sprintf("%g", prod))
}
func divide(ec *EvalCtx, prod float64, nums ...float64) {
out := ec.ports[1].Chan
for _, f := range nums {
prod /= f
}
out <- String(fmt.Sprintf("%g", prod))
}
func pow(ec *EvalCtx, b, p float64) {
out := ec.ports[1].Chan
out <- String(fmt.Sprintf("%g", math.Pow(b, p)))
}
var ErrFalse = errors.New("false")
func lt(ec *EvalCtx, nums ...float64) {
for i := 0; i < len(nums)-1; i++ {
if !(nums[i] < nums[i+1]) {
throw(ErrFalse)
}
}
}
func gt(ec *EvalCtx, nums ...float64) {
for i := 0; i < len(nums)-1; i++ {
if !(nums[i] > nums[i+1]) {
throw(ErrFalse)
}
}
}
var ErrBadBase = errors.New("bad base")
func base(ec *EvalCtx, b int, nums ...int) {
if b < 2 || b > 36 {
throw(ErrBadBase)
}
out := ec.ports[1].Chan
for _, num := range nums {
out <- String(strconv.FormatInt(int64(num), b))
}
}
var ErrNotEqual = errors.New("not equal")
func eq(ec *EvalCtx, args []Value) {
if len(args) == 0 {
throw(ErrArgs)
}
for i := 0; i+1 < len(args); i++ {
if args[i] != args[i+1] {
throw(ErrNotEqual)
}
}
}
var ErrEqual = errors.New("equal")
func noteq(ec *EvalCtx, lhs, rhs Value) {
if lhs == rhs {
throw(ErrEqual)
}
}
func deepeq(ec *EvalCtx, args []Value) {
out := ec.ports[1].Chan
if len(args) == 0 {
throw(ErrArgs)
}
for i := 0; i+1 < len(args); i++ {
if !DeepEq(args[i], args[i+1]) {
out <- Bool(false)
return
}
}
out <- Bool(true)
}
func take(ec *EvalCtx, n int) {
in := ec.ports[0].Chan
out := ec.ports[1].Chan
i := 0
for v := range in {
if i >= n {
break
}
i++
out <- v
}
}
func drop(ec *EvalCtx, n int) {
in := ec.ports[0].Chan
out := ec.ports[1].Chan
for i := 0; i < n; i++ {
<-in
}
for v := range in {
out <- v
}
}
func lenFn(ec *EvalCtx, v Value) {
lener, ok := v.(Lener)
if !ok {
throw(fmt.Errorf("cannot get length of a %s", v.Kind()))
}
ec.ports[1].Chan <- String(strconv.Itoa(lener.Len()))
}
func count(ec *EvalCtx) {
in := ec.ports[0].Chan
out := ec.ports[1].Chan
n := 0
for range in {
n++
}
out <- String(strconv.Itoa(n))
}
func rest(ec *EvalCtx, li List) {
out := ec.ports[1].Chan
restli := (*li.inner)[1:]
out <- List{&restli}
}
func fg(ec *EvalCtx, pids ...int) {
if len(pids) == 0 {
throw(ErrArgs)
}
var thepgid int
for i, pid := range pids {
pgid, err := syscall.Getpgid(pid)
maybeThrow(err)
if i == 0 {
thepgid = pgid
} else if pgid != thepgid {
throw(ErrNotInSameGroup)
}
}
err := sys.Tcsetpgrp(0, thepgid)
maybeThrow(err)
errors := make([]Error, len(pids))
for i, pid := range pids {
err := syscall.Kill(pid, syscall.SIGCONT)
if err != nil {
errors[i] = Error{err}
}
}
for i, pid := range pids {
if errors[i] != OK {
continue
}
var ws syscall.WaitStatus
_, err = syscall.Wait4(pid, &ws, syscall.WUNTRACED, nil)
if err != nil {
errors[i] = Error{err}
} else {
errors[i] = Error{NewExternalCmdExit(ws, pid)}
}
}
throwCompositeError(errors)
}
func tildeAbbr(ec *EvalCtx, path string) {
out := ec.ports[1].Chan
out <- String(util.TildeAbbr(path))
}
func _sleep(ec *EvalCtx, t float64) {
d := time.Duration(float64(time.Second) * t)
select {
case <-ec.intCh:
throw(ErrInterrupted)
case <-time.After(d):
}
}
func _stack(ec *EvalCtx) {
out := ec.ports[1].File
// XXX dup with main.go
buf := make([]byte, 1024)
for runtime.Stack(buf, true) == cap(buf) {
buf = make([]byte, cap(buf)*2)
}
out.Write(buf)
}
func _log(ec *EvalCtx, fname string) {
maybeThrow(util.SetOutputFile(fname))
}
func _exec(ec *EvalCtx, args ...string) {
if len(args) == 0 {
args = []string{"elvish"}
}
var err error
args[0], err = ec.Search(args[0])
maybeThrow(err)
err = ec.store.Close()
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
if ec.Stub != nil {
ec.Stub.Terminate()
}
err = syscall.Exec(args[0], args, os.Environ())
maybeThrow(err)
} | &BuiltinFn{":", nop},
&BuiltinFn{"true", nop},
| random_line_split |
builtin_func.go | package eval
// Builtin functions.
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"os"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"syscall"
"time"
"github.com/elves/elvish/sys"
"github.com/elves/elvish/util"
)
var builtinFns []*BuiltinFn
// BuiltinFn is a builtin function.
type BuiltinFn struct {
Name string
Impl func(*EvalCtx, []Value)
}
func (*BuiltinFn) Kind() string {
return "fn"
}
func (b *BuiltinFn) Repr(int) string {
return "$" + FnPrefix + b.Name
}
// Call calls a builtin function.
func (b *BuiltinFn) Call(ec *EvalCtx, args []Value) {
b.Impl(ec, args)
}
func init() {
// Needed to work around init loop.
builtinFns = []*BuiltinFn{
&BuiltinFn{":", nop},
&BuiltinFn{"true", nop},
&BuiltinFn{"print", wrapFn(print)},
&BuiltinFn{"println", wrapFn(println)},
&BuiltinFn{"pprint", pprint},
&BuiltinFn{"into-lines", wrapFn(intoLines)},
&BuiltinFn{"from-lines", wrapFn(fromLines)},
&BuiltinFn{"rat", wrapFn(ratFn)},
&BuiltinFn{"put", put},
&BuiltinFn{"put-all", wrapFn(putAll)},
&BuiltinFn{"unpack", wrapFn(unpack)},
&BuiltinFn{"from-json", wrapFn(fromJSON)},
&BuiltinFn{"kind-of", kindOf},
&BuiltinFn{"fail", wrapFn(fail)},
&BuiltinFn{"multi-error", wrapFn(multiErrorFn)},
&BuiltinFn{"return", wrapFn(returnFn)},
&BuiltinFn{"break", wrapFn(breakFn)},
&BuiltinFn{"continue", wrapFn(continueFn)},
&BuiltinFn{"each", wrapFn(each)},
&BuiltinFn{"eawk", wrapFn(eawk)},
&BuiltinFn{"cd", cd},
&BuiltinFn{"dirs", wrapFn(dirs)},
&BuiltinFn{"history", wrapFn(history)},
&BuiltinFn{"source", wrapFn(source)},
&BuiltinFn{"+", wrapFn(plus)},
&BuiltinFn{"-", wrapFn(minus)},
&BuiltinFn{"mul", wrapFn(times)},
&BuiltinFn{"div", wrapFn(divide)},
&BuiltinFn{"pow", wrapFn(pow)},
&BuiltinFn{"lt", wrapFn(lt)},
&BuiltinFn{"gt", wrapFn(gt)},
&BuiltinFn{"base", wrapFn(base)},
&BuiltinFn{"==", eq},
&BuiltinFn{"!=", wrapFn(noteq)},
&BuiltinFn{"deepeq", deepeq},
&BuiltinFn{"take", wrapFn(take)},
&BuiltinFn{"drop", wrapFn(drop)},
&BuiltinFn{"len", wrapFn(lenFn)},
&BuiltinFn{"count", wrapFn(count)},
&BuiltinFn{"rest", wrapFn(rest)},
&BuiltinFn{"fg", wrapFn(fg)},
&BuiltinFn{"tilde-abbr", wrapFn(tildeAbbr)},
&BuiltinFn{"-sleep", wrapFn(_sleep)},
&BuiltinFn{"-stack", wrapFn(_stack)},
&BuiltinFn{"-log", wrapFn(_log)},
&BuiltinFn{"-exec", wrapFn(_exec)},
}
for _, b := range builtinFns {
builtinNamespace[FnPrefix+b.Name] = NewRoVariable(b)
}
}
var (
ErrArgs = errors.New("args error")
ErrInput = errors.New("input error")
ErrStoreNotConnected = errors.New("store not connected")
ErrNoMatchingDir = errors.New("no matching directory")
ErrNotInSameGroup = errors.New("not in the same process group")
ErrInterrupted = errors.New("interrupted")
)
var (
evalCtxType = reflect.TypeOf((*EvalCtx)(nil))
valueType = reflect.TypeOf((*Value)(nil)).Elem()
)
// wrapFn wraps an inner function into one suitable as a builtin function. It
// generates argument checking and conversion code according to the signature
// of the inner function. The inner function must accept evalCtx* as the first
// argument and return an exitus.
func wrapFn(inner interface{}) func(*EvalCtx, []Value) {
type_ := reflect.TypeOf(inner)
if type_.In(0) != evalCtxType {
panic("bad func")
}
requiredArgs := type_.NumIn() - 1
isVariadic := type_.IsVariadic()
var variadicType reflect.Type
if isVariadic {
requiredArgs--
variadicType = type_.In(type_.NumIn() - 1).Elem()
if !supportedIn(variadicType) {
panic("bad func argument")
}
}
for i := 0; i < requiredArgs; i++ {
if !supportedIn(type_.In(i + 1)) {
panic("bad func argument")
}
}
return func(ec *EvalCtx, args []Value) {
if len(args) < requiredArgs || (!isVariadic && len(args) > requiredArgs) {
throw(ErrArgs)
}
callArgs := make([]reflect.Value, len(args)+1)
callArgs[0] = reflect.ValueOf(ec)
ok := convertArgs(args[:requiredArgs], callArgs[1:],
func(i int) reflect.Type { return type_.In(i + 1) })
if !ok {
throw(ErrArgs)
}
if isVariadic {
ok := convertArgs(args[requiredArgs:], callArgs[1+requiredArgs:],
func(i int) reflect.Type { return variadicType })
if !ok {
throw(ErrArgs)
}
}
reflect.ValueOf(inner).Call(callArgs)
}
}
func supportedIn(t reflect.Type) bool {
return t.Kind() == reflect.String ||
t.Kind() == reflect.Int || t.Kind() == reflect.Float64 ||
t.Implements(valueType)
}
func convertArgs(args []Value, callArgs []reflect.Value, callType func(int) reflect.Type) bool {
for i, arg := range args {
var callArg interface{}
switch callType(i).Kind() {
case reflect.String:
callArg = ToString(arg)
case reflect.Int:
var err error
callArg, err = toInt(arg)
if err != nil {
return false
}
case reflect.Float64:
var err error
callArg, err = toFloat(arg)
if err != nil {
return false
// return err
}
default:
if reflect.TypeOf(arg).ConvertibleTo(callType(i)) {
callArg = arg
} else {
return false
// return argsError
}
}
callArgs[i] = reflect.ValueOf(callArg)
}
return true
}
func nop(ec *EvalCtx, args []Value) {
}
func put(ec *EvalCtx, args []Value) {
out := ec.ports[1].Chan
for _, a := range args {
out <- a
}
}
func putAll(ec *EvalCtx, lists ...List) {
out := ec.ports[1].Chan
for _, list := range lists {
for _, x := range *list.inner {
out <- x
}
}
}
func kindOf(ec *EvalCtx, args []Value) {
out := ec.ports[1].Chan
for _, a := range args {
out <- String(a.Kind())
}
}
func fail(ec *EvalCtx, arg Value) {
throw(errors.New(ToString(arg)))
}
func multiErrorFn(ec *EvalCtx, args ...Error) {
throw(MultiError{args})
}
func returnFn(ec *EvalCtx) {
throw(Return)
}
func breakFn(ec *EvalCtx) {
throw(Break)
}
func continueFn(ec *EvalCtx) {
throw(Continue)
}
func print(ec *EvalCtx, args ...string) {
out := ec.ports[1].File
for i, arg := range args {
if i > 0 {
out.WriteString(" ")
}
out.WriteString(arg)
}
}
func println(ec *EvalCtx, args ...string) {
print(ec, args...)
ec.ports[1].File.WriteString("\n")
}
func pprint(ec *EvalCtx, args []Value) {
out := ec.ports[1].File
for _, arg := range args {
out.WriteString(arg.Repr(0))
out.WriteString("\n")
}
}
func intoLines(ec *EvalCtx) {
in := ec.ports[0].Chan
out := ec.ports[1].File
for v := range in {
fmt.Fprintln(out, ToString(v))
}
}
func fromLines(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
bufferedIn := bufio.NewReader(in)
for {
line, err := bufferedIn.ReadString('\n')
if err == io.EOF {
return
} else if err != nil {
throw(err)
}
out <- String(line[:len(line)-1])
}
}
func ratFn(ec *EvalCtx, arg Value) {
out := ec.ports[1].Chan
r, err := ToRat(arg)
if err != nil {
throw(err)
}
out <- r
}
// unpack takes Elemser's from the input and unpack them.
func unpack(ec *EvalCtx) {
in := ec.ports[0].Chan
out := ec.ports[1].Chan
for v := range in {
elemser, ok := v.(Elemser)
if !ok {
throw(ErrInput)
}
for e := range elemser.Elems() {
out <- e
}
}
}
// fromJSON parses a stream of JSON data into Value's.
func fromJSON(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
dec := json.NewDecoder(in)
var v interface{}
for {
err := dec.Decode(&v)
if err != nil {
if err == io.EOF |
throw(err)
}
out <- FromJSONInterface(v)
}
}
// each takes a single closure and applies it to all input values.
func each(ec *EvalCtx, f FnValue) {
in := ec.ports[0].Chan
in:
for v := range in {
// NOTE We don't have the position range of the closure in the source.
// Ideally, it should be kept in the Closure itself.
newec := ec.fork("closure of each")
ex := newec.PCall(f, []Value{v})
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
break in
default:
throw(ex)
}
}
}
var eawkWordSep = regexp.MustCompile("[ \t]+")
// eawk takes a function. For each line in the input stream, it calls the
// function with the line and the words in the line. The words are found by
// stripping the line and splitting the line by whitespaces. The function may
// call break and continue. Overall this provides a similar functionality to
// awk, hence the name.
func eawk(ec *EvalCtx, f FnValue) {
in := bufio.NewReader(ec.ports[0].File)
in:
for {
line, err := in.ReadString('\n')
if err == io.EOF {
break
} else if err != nil {
throw(err)
}
line = line[:len(line)-1]
args := []Value{String(line)}
for _, field := range eawkWordSep.Split(strings.Trim(line, " \t"), -1) {
args = append(args, String(field))
}
newec := ec.fork("fn of eawk")
ex := newec.PCall(f, args)
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
break in
default:
throw(ex)
}
}
}
func cd(ec *EvalCtx, args []Value) {
var dir string
if len(args) == 0 {
dir = mustGetHome("")
} else if len(args) == 1 {
dir = ToString(args[0])
} else {
throw(ErrArgs)
}
cdInner(dir, ec)
}
func cdInner(dir string, ec *EvalCtx) {
err := os.Chdir(dir)
if err != nil {
throw(err)
}
if ec.store != nil {
// XXX Error ignored.
pwd, err := os.Getwd()
if err == nil {
store := ec.store
go func() {
store.Waits.Add(1)
// XXX Error ignored.
store.AddDir(pwd, 1)
store.Waits.Done()
Logger.Println("added dir to store:", pwd)
}()
}
}
}
var dirFieldNames = []string{"path", "score"}
func dirs(ec *EvalCtx) {
if ec.store == nil {
throw(ErrStoreNotConnected)
}
dirs, err := ec.store.ListDirs()
if err != nil {
throw(errors.New("store error: " + err.Error()))
}
out := ec.ports[1].Chan
for _, dir := range dirs {
out <- &Struct{dirFieldNames, []Variable{
NewRoVariable(String(dir.Path)),
NewRoVariable(String(fmt.Sprint(dir.Score))),
}}
}
}
func history(ec *EvalCtx) {
if ec.store == nil {
throw(ErrStoreNotConnected)
}
store := ec.store
seq, err := store.NextCmdSeq()
maybeThrow(err)
cmds, err := store.Cmds(0, seq)
maybeThrow(err)
out := ec.ports[1].Chan
for _, cmd := range cmds {
out <- String(cmd)
}
}
func source(ec *EvalCtx, fname string) {
ec.Source(fname)
}
func toFloat(arg Value) (float64, error) {
arg, ok := arg.(String)
if !ok {
return 0, fmt.Errorf("must be string")
}
num, err := strconv.ParseFloat(string(arg.(String)), 64)
if err != nil {
return 0, err
}
return num, nil
}
func toInt(arg Value) (int, error) {
arg, ok := arg.(String)
if !ok {
return 0, fmt.Errorf("must be string")
}
num, err := strconv.Atoi(string(arg.(String)))
if err != nil {
return 0, err
}
return num, nil
}
func plus(ec *EvalCtx, nums ...float64) {
out := ec.ports[1].Chan
sum := 0.0
for _, f := range nums {
sum += f
}
out <- String(fmt.Sprintf("%g", sum))
}
func minus(ec *EvalCtx, sum float64, nums ...float64) {
out := ec.ports[1].Chan
for _, f := range nums {
sum -= f
}
out <- String(fmt.Sprintf("%g", sum))
}
func times(ec *EvalCtx, nums ...float64) {
out := ec.ports[1].Chan
prod := 1.0
for _, f := range nums {
prod *= f
}
out <- String(fmt.Sprintf("%g", prod))
}
func divide(ec *EvalCtx, prod float64, nums ...float64) {
out := ec.ports[1].Chan
for _, f := range nums {
prod /= f
}
out <- String(fmt.Sprintf("%g", prod))
}
func pow(ec *EvalCtx, b, p float64) {
out := ec.ports[1].Chan
out <- String(fmt.Sprintf("%g", math.Pow(b, p)))
}
var ErrFalse = errors.New("false")
func lt(ec *EvalCtx, nums ...float64) {
for i := 0; i < len(nums)-1; i++ {
if !(nums[i] < nums[i+1]) {
throw(ErrFalse)
}
}
}
func gt(ec *EvalCtx, nums ...float64) {
for i := 0; i < len(nums)-1; i++ {
if !(nums[i] > nums[i+1]) {
throw(ErrFalse)
}
}
}
var ErrBadBase = errors.New("bad base")
func base(ec *EvalCtx, b int, nums ...int) {
if b < 2 || b > 36 {
throw(ErrBadBase)
}
out := ec.ports[1].Chan
for _, num := range nums {
out <- String(strconv.FormatInt(int64(num), b))
}
}
var ErrNotEqual = errors.New("not equal")
func eq(ec *EvalCtx, args []Value) {
if len(args) == 0 {
throw(ErrArgs)
}
for i := 0; i+1 < len(args); i++ {
if args[i] != args[i+1] {
throw(ErrNotEqual)
}
}
}
var ErrEqual = errors.New("equal")
func noteq(ec *EvalCtx, lhs, rhs Value) {
if lhs == rhs {
throw(ErrEqual)
}
}
func deepeq(ec *EvalCtx, args []Value) {
out := ec.ports[1].Chan
if len(args) == 0 {
throw(ErrArgs)
}
for i := 0; i+1 < len(args); i++ {
if !DeepEq(args[i], args[i+1]) {
out <- Bool(false)
return
}
}
out <- Bool(true)
}
func take(ec *EvalCtx, n int) {
in := ec.ports[0].Chan
out := ec.ports[1].Chan
i := 0
for v := range in {
if i >= n {
break
}
i++
out <- v
}
}
func drop(ec *EvalCtx, n int) {
in := ec.ports[0].Chan
out := ec.ports[1].Chan
for i := 0; i < n; i++ {
<-in
}
for v := range in {
out <- v
}
}
func lenFn(ec *EvalCtx, v Value) {
lener, ok := v.(Lener)
if !ok {
throw(fmt.Errorf("cannot get length of a %s", v.Kind()))
}
ec.ports[1].Chan <- String(strconv.Itoa(lener.Len()))
}
func count(ec *EvalCtx) {
in := ec.ports[0].Chan
out := ec.ports[1].Chan
n := 0
for range in {
n++
}
out <- String(strconv.Itoa(n))
}
func rest(ec *EvalCtx, li List) {
out := ec.ports[1].Chan
restli := (*li.inner)[1:]
out <- List{&restli}
}
func fg(ec *EvalCtx, pids ...int) {
if len(pids) == 0 {
throw(ErrArgs)
}
var thepgid int
for i, pid := range pids {
pgid, err := syscall.Getpgid(pid)
maybeThrow(err)
if i == 0 {
thepgid = pgid
} else if pgid != thepgid {
throw(ErrNotInSameGroup)
}
}
err := sys.Tcsetpgrp(0, thepgid)
maybeThrow(err)
errors := make([]Error, len(pids))
for i, pid := range pids {
err := syscall.Kill(pid, syscall.SIGCONT)
if err != nil {
errors[i] = Error{err}
}
}
for i, pid := range pids {
if errors[i] != OK {
continue
}
var ws syscall.WaitStatus
_, err = syscall.Wait4(pid, &ws, syscall.WUNTRACED, nil)
if err != nil {
errors[i] = Error{err}
} else {
errors[i] = Error{NewExternalCmdExit(ws, pid)}
}
}
throwCompositeError(errors)
}
func tildeAbbr(ec *EvalCtx, path string) {
out := ec.ports[1].Chan
out <- String(util.TildeAbbr(path))
}
func _sleep(ec *EvalCtx, t float64) {
d := time.Duration(float64(time.Second) * t)
select {
case <-ec.intCh:
throw(ErrInterrupted)
case <-time.After(d):
}
}
func _stack(ec *EvalCtx) {
out := ec.ports[1].File
// XXX dup with main.go
buf := make([]byte, 1024)
for runtime.Stack(buf, true) == cap(buf) {
buf = make([]byte, cap(buf)*2)
}
out.Write(buf)
}
func _log(ec *EvalCtx, fname string) {
maybeThrow(util.SetOutputFile(fname))
}
func _exec(ec *EvalCtx, args ...string) {
if len(args) == 0 {
args = []string{"elvish"}
}
var err error
args[0], err = ec.Search(args[0])
maybeThrow(err)
err = ec.store.Close()
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
if ec.Stub != nil {
ec.Stub.Terminate()
}
err = syscall.Exec(args[0], args, os.Environ())
maybeThrow(err)
}
| {
return
} | conditional_block |
builtin_func.go | package eval
// Builtin functions.
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"os"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"syscall"
"time"
"github.com/elves/elvish/sys"
"github.com/elves/elvish/util"
)
var builtinFns []*BuiltinFn
// BuiltinFn is a builtin function.
type BuiltinFn struct {
Name string
Impl func(*EvalCtx, []Value)
}
func (*BuiltinFn) Kind() string {
return "fn"
}
func (b *BuiltinFn) Repr(int) string {
return "$" + FnPrefix + b.Name
}
// Call calls a builtin function.
func (b *BuiltinFn) Call(ec *EvalCtx, args []Value) {
b.Impl(ec, args)
}
func init() {
// Needed to work around init loop.
builtinFns = []*BuiltinFn{
&BuiltinFn{":", nop},
&BuiltinFn{"true", nop},
&BuiltinFn{"print", wrapFn(print)},
&BuiltinFn{"println", wrapFn(println)},
&BuiltinFn{"pprint", pprint},
&BuiltinFn{"into-lines", wrapFn(intoLines)},
&BuiltinFn{"from-lines", wrapFn(fromLines)},
&BuiltinFn{"rat", wrapFn(ratFn)},
&BuiltinFn{"put", put},
&BuiltinFn{"put-all", wrapFn(putAll)},
&BuiltinFn{"unpack", wrapFn(unpack)},
&BuiltinFn{"from-json", wrapFn(fromJSON)},
&BuiltinFn{"kind-of", kindOf},
&BuiltinFn{"fail", wrapFn(fail)},
&BuiltinFn{"multi-error", wrapFn(multiErrorFn)},
&BuiltinFn{"return", wrapFn(returnFn)},
&BuiltinFn{"break", wrapFn(breakFn)},
&BuiltinFn{"continue", wrapFn(continueFn)},
&BuiltinFn{"each", wrapFn(each)},
&BuiltinFn{"eawk", wrapFn(eawk)},
&BuiltinFn{"cd", cd},
&BuiltinFn{"dirs", wrapFn(dirs)},
&BuiltinFn{"history", wrapFn(history)},
&BuiltinFn{"source", wrapFn(source)},
&BuiltinFn{"+", wrapFn(plus)},
&BuiltinFn{"-", wrapFn(minus)},
&BuiltinFn{"mul", wrapFn(times)},
&BuiltinFn{"div", wrapFn(divide)},
&BuiltinFn{"pow", wrapFn(pow)},
&BuiltinFn{"lt", wrapFn(lt)},
&BuiltinFn{"gt", wrapFn(gt)},
&BuiltinFn{"base", wrapFn(base)},
&BuiltinFn{"==", eq},
&BuiltinFn{"!=", wrapFn(noteq)},
&BuiltinFn{"deepeq", deepeq},
&BuiltinFn{"take", wrapFn(take)},
&BuiltinFn{"drop", wrapFn(drop)},
&BuiltinFn{"len", wrapFn(lenFn)},
&BuiltinFn{"count", wrapFn(count)},
&BuiltinFn{"rest", wrapFn(rest)},
&BuiltinFn{"fg", wrapFn(fg)},
&BuiltinFn{"tilde-abbr", wrapFn(tildeAbbr)},
&BuiltinFn{"-sleep", wrapFn(_sleep)},
&BuiltinFn{"-stack", wrapFn(_stack)},
&BuiltinFn{"-log", wrapFn(_log)},
&BuiltinFn{"-exec", wrapFn(_exec)},
}
for _, b := range builtinFns {
builtinNamespace[FnPrefix+b.Name] = NewRoVariable(b)
}
}
var (
ErrArgs = errors.New("args error")
ErrInput = errors.New("input error")
ErrStoreNotConnected = errors.New("store not connected")
ErrNoMatchingDir = errors.New("no matching directory")
ErrNotInSameGroup = errors.New("not in the same process group")
ErrInterrupted = errors.New("interrupted")
)
var (
evalCtxType = reflect.TypeOf((*EvalCtx)(nil))
valueType = reflect.TypeOf((*Value)(nil)).Elem()
)
// wrapFn wraps an inner function into one suitable as a builtin function. It
// generates argument checking and conversion code according to the signature
// of the inner function. The inner function must accept evalCtx* as the first
// argument and return an exitus.
func wrapFn(inner interface{}) func(*EvalCtx, []Value) {
type_ := reflect.TypeOf(inner)
if type_.In(0) != evalCtxType {
panic("bad func")
}
requiredArgs := type_.NumIn() - 1
isVariadic := type_.IsVariadic()
var variadicType reflect.Type
if isVariadic {
requiredArgs--
variadicType = type_.In(type_.NumIn() - 1).Elem()
if !supportedIn(variadicType) {
panic("bad func argument")
}
}
for i := 0; i < requiredArgs; i++ {
if !supportedIn(type_.In(i + 1)) {
panic("bad func argument")
}
}
return func(ec *EvalCtx, args []Value) {
if len(args) < requiredArgs || (!isVariadic && len(args) > requiredArgs) {
throw(ErrArgs)
}
callArgs := make([]reflect.Value, len(args)+1)
callArgs[0] = reflect.ValueOf(ec)
ok := convertArgs(args[:requiredArgs], callArgs[1:],
func(i int) reflect.Type { return type_.In(i + 1) })
if !ok {
throw(ErrArgs)
}
if isVariadic {
ok := convertArgs(args[requiredArgs:], callArgs[1+requiredArgs:],
func(i int) reflect.Type { return variadicType })
if !ok {
throw(ErrArgs)
}
}
reflect.ValueOf(inner).Call(callArgs)
}
}
func supportedIn(t reflect.Type) bool {
return t.Kind() == reflect.String ||
t.Kind() == reflect.Int || t.Kind() == reflect.Float64 ||
t.Implements(valueType)
}
func convertArgs(args []Value, callArgs []reflect.Value, callType func(int) reflect.Type) bool {
for i, arg := range args {
var callArg interface{}
switch callType(i).Kind() {
case reflect.String:
callArg = ToString(arg)
case reflect.Int:
var err error
callArg, err = toInt(arg)
if err != nil {
return false
}
case reflect.Float64:
var err error
callArg, err = toFloat(arg)
if err != nil {
return false
// return err
}
default:
if reflect.TypeOf(arg).ConvertibleTo(callType(i)) {
callArg = arg
} else {
return false
// return argsError
}
}
callArgs[i] = reflect.ValueOf(callArg)
}
return true
}
func nop(ec *EvalCtx, args []Value) {
}
func put(ec *EvalCtx, args []Value) {
out := ec.ports[1].Chan
for _, a := range args {
out <- a
}
}
func putAll(ec *EvalCtx, lists ...List) {
out := ec.ports[1].Chan
for _, list := range lists {
for _, x := range *list.inner {
out <- x
}
}
}
func kindOf(ec *EvalCtx, args []Value) {
out := ec.ports[1].Chan
for _, a := range args {
out <- String(a.Kind())
}
}
func fail(ec *EvalCtx, arg Value) {
throw(errors.New(ToString(arg)))
}
func multiErrorFn(ec *EvalCtx, args ...Error) {
throw(MultiError{args})
}
func returnFn(ec *EvalCtx) {
throw(Return)
}
func breakFn(ec *EvalCtx) {
throw(Break)
}
func continueFn(ec *EvalCtx) {
throw(Continue)
}
func print(ec *EvalCtx, args ...string) {
out := ec.ports[1].File
for i, arg := range args {
if i > 0 {
out.WriteString(" ")
}
out.WriteString(arg)
}
}
func println(ec *EvalCtx, args ...string) {
print(ec, args...)
ec.ports[1].File.WriteString("\n")
}
func pprint(ec *EvalCtx, args []Value) {
out := ec.ports[1].File
for _, arg := range args {
out.WriteString(arg.Repr(0))
out.WriteString("\n")
}
}
func intoLines(ec *EvalCtx) {
in := ec.ports[0].Chan
out := ec.ports[1].File
for v := range in {
fmt.Fprintln(out, ToString(v))
}
}
func fromLines(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
bufferedIn := bufio.NewReader(in)
for {
line, err := bufferedIn.ReadString('\n')
if err == io.EOF {
return
} else if err != nil {
throw(err)
}
out <- String(line[:len(line)-1])
}
}
func ratFn(ec *EvalCtx, arg Value) {
out := ec.ports[1].Chan
r, err := ToRat(arg)
if err != nil {
throw(err)
}
out <- r
}
// unpack takes Elemser's from the input and unpack them.
func unpack(ec *EvalCtx) {
in := ec.ports[0].Chan
out := ec.ports[1].Chan
for v := range in {
elemser, ok := v.(Elemser)
if !ok {
throw(ErrInput)
}
for e := range elemser.Elems() {
out <- e
}
}
}
// fromJSON parses a stream of JSON data into Value's.
func fromJSON(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
dec := json.NewDecoder(in)
var v interface{}
for {
err := dec.Decode(&v)
if err != nil {
if err == io.EOF {
return
}
throw(err)
}
out <- FromJSONInterface(v)
}
}
// each takes a single closure and applies it to all input values.
func each(ec *EvalCtx, f FnValue) {
in := ec.ports[0].Chan
in:
for v := range in {
// NOTE We don't have the position range of the closure in the source.
// Ideally, it should be kept in the Closure itself.
newec := ec.fork("closure of each")
ex := newec.PCall(f, []Value{v})
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
break in
default:
throw(ex)
}
}
}
var eawkWordSep = regexp.MustCompile("[ \t]+")
// eawk takes a function. For each line in the input stream, it calls the
// function with the line and the words in the line. The words are found by
// stripping the line and splitting the line by whitespaces. The function may
// call break and continue. Overall this provides a similar functionality to
// awk, hence the name.
func eawk(ec *EvalCtx, f FnValue) {
in := bufio.NewReader(ec.ports[0].File)
in:
for {
line, err := in.ReadString('\n')
if err == io.EOF {
break
} else if err != nil {
throw(err)
}
line = line[:len(line)-1]
args := []Value{String(line)}
for _, field := range eawkWordSep.Split(strings.Trim(line, " \t"), -1) {
args = append(args, String(field))
}
newec := ec.fork("fn of eawk")
ex := newec.PCall(f, args)
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
break in
default:
throw(ex)
}
}
}
func cd(ec *EvalCtx, args []Value) {
var dir string
if len(args) == 0 {
dir = mustGetHome("")
} else if len(args) == 1 {
dir = ToString(args[0])
} else {
throw(ErrArgs)
}
cdInner(dir, ec)
}
func cdInner(dir string, ec *EvalCtx) {
err := os.Chdir(dir)
if err != nil {
throw(err)
}
if ec.store != nil {
// XXX Error ignored.
pwd, err := os.Getwd()
if err == nil {
store := ec.store
go func() {
store.Waits.Add(1)
// XXX Error ignored.
store.AddDir(pwd, 1)
store.Waits.Done()
Logger.Println("added dir to store:", pwd)
}()
}
}
}
var dirFieldNames = []string{"path", "score"}
func dirs(ec *EvalCtx) {
if ec.store == nil {
throw(ErrStoreNotConnected)
}
dirs, err := ec.store.ListDirs()
if err != nil {
throw(errors.New("store error: " + err.Error()))
}
out := ec.ports[1].Chan
for _, dir := range dirs {
out <- &Struct{dirFieldNames, []Variable{
NewRoVariable(String(dir.Path)),
NewRoVariable(String(fmt.Sprint(dir.Score))),
}}
}
}
func history(ec *EvalCtx) {
if ec.store == nil {
throw(ErrStoreNotConnected)
}
store := ec.store
seq, err := store.NextCmdSeq()
maybeThrow(err)
cmds, err := store.Cmds(0, seq)
maybeThrow(err)
out := ec.ports[1].Chan
for _, cmd := range cmds {
out <- String(cmd)
}
}
func source(ec *EvalCtx, fname string) {
ec.Source(fname)
}
func toFloat(arg Value) (float64, error) {
arg, ok := arg.(String)
if !ok {
return 0, fmt.Errorf("must be string")
}
num, err := strconv.ParseFloat(string(arg.(String)), 64)
if err != nil {
return 0, err
}
return num, nil
}
func toInt(arg Value) (int, error) {
arg, ok := arg.(String)
if !ok {
return 0, fmt.Errorf("must be string")
}
num, err := strconv.Atoi(string(arg.(String)))
if err != nil {
return 0, err
}
return num, nil
}
func plus(ec *EvalCtx, nums ...float64) {
out := ec.ports[1].Chan
sum := 0.0
for _, f := range nums {
sum += f
}
out <- String(fmt.Sprintf("%g", sum))
}
func minus(ec *EvalCtx, sum float64, nums ...float64) {
out := ec.ports[1].Chan
for _, f := range nums {
sum -= f
}
out <- String(fmt.Sprintf("%g", sum))
}
func times(ec *EvalCtx, nums ...float64) {
out := ec.ports[1].Chan
prod := 1.0
for _, f := range nums {
prod *= f
}
out <- String(fmt.Sprintf("%g", prod))
}
func divide(ec *EvalCtx, prod float64, nums ...float64) {
out := ec.ports[1].Chan
for _, f := range nums {
prod /= f
}
out <- String(fmt.Sprintf("%g", prod))
}
func pow(ec *EvalCtx, b, p float64) {
out := ec.ports[1].Chan
out <- String(fmt.Sprintf("%g", math.Pow(b, p)))
}
var ErrFalse = errors.New("false")
func lt(ec *EvalCtx, nums ...float64) {
for i := 0; i < len(nums)-1; i++ {
if !(nums[i] < nums[i+1]) {
throw(ErrFalse)
}
}
}
func gt(ec *EvalCtx, nums ...float64) {
for i := 0; i < len(nums)-1; i++ {
if !(nums[i] > nums[i+1]) {
throw(ErrFalse)
}
}
}
var ErrBadBase = errors.New("bad base")
func base(ec *EvalCtx, b int, nums ...int) {
if b < 2 || b > 36 {
throw(ErrBadBase)
}
out := ec.ports[1].Chan
for _, num := range nums {
out <- String(strconv.FormatInt(int64(num), b))
}
}
var ErrNotEqual = errors.New("not equal")
func eq(ec *EvalCtx, args []Value) {
if len(args) == 0 {
throw(ErrArgs)
}
for i := 0; i+1 < len(args); i++ {
if args[i] != args[i+1] {
throw(ErrNotEqual)
}
}
}
var ErrEqual = errors.New("equal")
func noteq(ec *EvalCtx, lhs, rhs Value) {
if lhs == rhs {
throw(ErrEqual)
}
}
func deepeq(ec *EvalCtx, args []Value) {
out := ec.ports[1].Chan
if len(args) == 0 {
throw(ErrArgs)
}
for i := 0; i+1 < len(args); i++ {
if !DeepEq(args[i], args[i+1]) {
out <- Bool(false)
return
}
}
out <- Bool(true)
}
func take(ec *EvalCtx, n int) {
in := ec.ports[0].Chan
out := ec.ports[1].Chan
i := 0
for v := range in {
if i >= n {
break
}
i++
out <- v
}
}
func drop(ec *EvalCtx, n int) {
in := ec.ports[0].Chan
out := ec.ports[1].Chan
for i := 0; i < n; i++ {
<-in
}
for v := range in {
out <- v
}
}
func lenFn(ec *EvalCtx, v Value) {
lener, ok := v.(Lener)
if !ok {
throw(fmt.Errorf("cannot get length of a %s", v.Kind()))
}
ec.ports[1].Chan <- String(strconv.Itoa(lener.Len()))
}
func count(ec *EvalCtx) {
in := ec.ports[0].Chan
out := ec.ports[1].Chan
n := 0
for range in {
n++
}
out <- String(strconv.Itoa(n))
}
func rest(ec *EvalCtx, li List) {
out := ec.ports[1].Chan
restli := (*li.inner)[1:]
out <- List{&restli}
}
func fg(ec *EvalCtx, pids ...int) {
if len(pids) == 0 {
throw(ErrArgs)
}
var thepgid int
for i, pid := range pids {
pgid, err := syscall.Getpgid(pid)
maybeThrow(err)
if i == 0 {
thepgid = pgid
} else if pgid != thepgid {
throw(ErrNotInSameGroup)
}
}
err := sys.Tcsetpgrp(0, thepgid)
maybeThrow(err)
errors := make([]Error, len(pids))
for i, pid := range pids {
err := syscall.Kill(pid, syscall.SIGCONT)
if err != nil {
errors[i] = Error{err}
}
}
for i, pid := range pids {
if errors[i] != OK {
continue
}
var ws syscall.WaitStatus
_, err = syscall.Wait4(pid, &ws, syscall.WUNTRACED, nil)
if err != nil {
errors[i] = Error{err}
} else {
errors[i] = Error{NewExternalCmdExit(ws, pid)}
}
}
throwCompositeError(errors)
}
func tildeAbbr(ec *EvalCtx, path string) {
out := ec.ports[1].Chan
out <- String(util.TildeAbbr(path))
}
func _sleep(ec *EvalCtx, t float64) {
d := time.Duration(float64(time.Second) * t)
select {
case <-ec.intCh:
throw(ErrInterrupted)
case <-time.After(d):
}
}
func _stack(ec *EvalCtx) |
func _log(ec *EvalCtx, fname string) {
maybeThrow(util.SetOutputFile(fname))
}
func _exec(ec *EvalCtx, args ...string) {
if len(args) == 0 {
args = []string{"elvish"}
}
var err error
args[0], err = ec.Search(args[0])
maybeThrow(err)
err = ec.store.Close()
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
if ec.Stub != nil {
ec.Stub.Terminate()
}
err = syscall.Exec(args[0], args, os.Environ())
maybeThrow(err)
}
| {
out := ec.ports[1].File
// XXX dup with main.go
buf := make([]byte, 1024)
for runtime.Stack(buf, true) == cap(buf) {
buf = make([]byte, cap(buf)*2)
}
out.Write(buf)
} | identifier_body |
builtin_func.go | package eval
// Builtin functions.
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"os"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"syscall"
"time"
"github.com/elves/elvish/sys"
"github.com/elves/elvish/util"
)
var builtinFns []*BuiltinFn
// BuiltinFn is a builtin function.
type BuiltinFn struct {
Name string
Impl func(*EvalCtx, []Value)
}
func (*BuiltinFn) Kind() string {
return "fn"
}
func (b *BuiltinFn) Repr(int) string {
return "$" + FnPrefix + b.Name
}
// Call calls a builtin function.
func (b *BuiltinFn) Call(ec *EvalCtx, args []Value) {
b.Impl(ec, args)
}
func init() {
// Needed to work around init loop.
builtinFns = []*BuiltinFn{
&BuiltinFn{":", nop},
&BuiltinFn{"true", nop},
&BuiltinFn{"print", wrapFn(print)},
&BuiltinFn{"println", wrapFn(println)},
&BuiltinFn{"pprint", pprint},
&BuiltinFn{"into-lines", wrapFn(intoLines)},
&BuiltinFn{"from-lines", wrapFn(fromLines)},
&BuiltinFn{"rat", wrapFn(ratFn)},
&BuiltinFn{"put", put},
&BuiltinFn{"put-all", wrapFn(putAll)},
&BuiltinFn{"unpack", wrapFn(unpack)},
&BuiltinFn{"from-json", wrapFn(fromJSON)},
&BuiltinFn{"kind-of", kindOf},
&BuiltinFn{"fail", wrapFn(fail)},
&BuiltinFn{"multi-error", wrapFn(multiErrorFn)},
&BuiltinFn{"return", wrapFn(returnFn)},
&BuiltinFn{"break", wrapFn(breakFn)},
&BuiltinFn{"continue", wrapFn(continueFn)},
&BuiltinFn{"each", wrapFn(each)},
&BuiltinFn{"eawk", wrapFn(eawk)},
&BuiltinFn{"cd", cd},
&BuiltinFn{"dirs", wrapFn(dirs)},
&BuiltinFn{"history", wrapFn(history)},
&BuiltinFn{"source", wrapFn(source)},
&BuiltinFn{"+", wrapFn(plus)},
&BuiltinFn{"-", wrapFn(minus)},
&BuiltinFn{"mul", wrapFn(times)},
&BuiltinFn{"div", wrapFn(divide)},
&BuiltinFn{"pow", wrapFn(pow)},
&BuiltinFn{"lt", wrapFn(lt)},
&BuiltinFn{"gt", wrapFn(gt)},
&BuiltinFn{"base", wrapFn(base)},
&BuiltinFn{"==", eq},
&BuiltinFn{"!=", wrapFn(noteq)},
&BuiltinFn{"deepeq", deepeq},
&BuiltinFn{"take", wrapFn(take)},
&BuiltinFn{"drop", wrapFn(drop)},
&BuiltinFn{"len", wrapFn(lenFn)},
&BuiltinFn{"count", wrapFn(count)},
&BuiltinFn{"rest", wrapFn(rest)},
&BuiltinFn{"fg", wrapFn(fg)},
&BuiltinFn{"tilde-abbr", wrapFn(tildeAbbr)},
&BuiltinFn{"-sleep", wrapFn(_sleep)},
&BuiltinFn{"-stack", wrapFn(_stack)},
&BuiltinFn{"-log", wrapFn(_log)},
&BuiltinFn{"-exec", wrapFn(_exec)},
}
for _, b := range builtinFns {
builtinNamespace[FnPrefix+b.Name] = NewRoVariable(b)
}
}
var (
ErrArgs = errors.New("args error")
ErrInput = errors.New("input error")
ErrStoreNotConnected = errors.New("store not connected")
ErrNoMatchingDir = errors.New("no matching directory")
ErrNotInSameGroup = errors.New("not in the same process group")
ErrInterrupted = errors.New("interrupted")
)
var (
evalCtxType = reflect.TypeOf((*EvalCtx)(nil))
valueType = reflect.TypeOf((*Value)(nil)).Elem()
)
// wrapFn wraps an inner function into one suitable as a builtin function. It
// generates argument checking and conversion code according to the signature
// of the inner function. The inner function must accept evalCtx* as the first
// argument and return an exitus.
func wrapFn(inner interface{}) func(*EvalCtx, []Value) {
type_ := reflect.TypeOf(inner)
if type_.In(0) != evalCtxType {
panic("bad func")
}
requiredArgs := type_.NumIn() - 1
isVariadic := type_.IsVariadic()
var variadicType reflect.Type
if isVariadic {
requiredArgs--
variadicType = type_.In(type_.NumIn() - 1).Elem()
if !supportedIn(variadicType) {
panic("bad func argument")
}
}
for i := 0; i < requiredArgs; i++ {
if !supportedIn(type_.In(i + 1)) {
panic("bad func argument")
}
}
return func(ec *EvalCtx, args []Value) {
if len(args) < requiredArgs || (!isVariadic && len(args) > requiredArgs) {
throw(ErrArgs)
}
callArgs := make([]reflect.Value, len(args)+1)
callArgs[0] = reflect.ValueOf(ec)
ok := convertArgs(args[:requiredArgs], callArgs[1:],
func(i int) reflect.Type { return type_.In(i + 1) })
if !ok {
throw(ErrArgs)
}
if isVariadic {
ok := convertArgs(args[requiredArgs:], callArgs[1+requiredArgs:],
func(i int) reflect.Type { return variadicType })
if !ok {
throw(ErrArgs)
}
}
reflect.ValueOf(inner).Call(callArgs)
}
}
func supportedIn(t reflect.Type) bool {
return t.Kind() == reflect.String ||
t.Kind() == reflect.Int || t.Kind() == reflect.Float64 ||
t.Implements(valueType)
}
func convertArgs(args []Value, callArgs []reflect.Value, callType func(int) reflect.Type) bool {
for i, arg := range args {
var callArg interface{}
switch callType(i).Kind() {
case reflect.String:
callArg = ToString(arg)
case reflect.Int:
var err error
callArg, err = toInt(arg)
if err != nil {
return false
}
case reflect.Float64:
var err error
callArg, err = toFloat(arg)
if err != nil {
return false
// return err
}
default:
if reflect.TypeOf(arg).ConvertibleTo(callType(i)) {
callArg = arg
} else {
return false
// return argsError
}
}
callArgs[i] = reflect.ValueOf(callArg)
}
return true
}
func nop(ec *EvalCtx, args []Value) {
}
func put(ec *EvalCtx, args []Value) {
out := ec.ports[1].Chan
for _, a := range args {
out <- a
}
}
func putAll(ec *EvalCtx, lists ...List) {
out := ec.ports[1].Chan
for _, list := range lists {
for _, x := range *list.inner {
out <- x
}
}
}
func kindOf(ec *EvalCtx, args []Value) {
out := ec.ports[1].Chan
for _, a := range args {
out <- String(a.Kind())
}
}
func fail(ec *EvalCtx, arg Value) {
throw(errors.New(ToString(arg)))
}
func multiErrorFn(ec *EvalCtx, args ...Error) {
throw(MultiError{args})
}
func returnFn(ec *EvalCtx) {
throw(Return)
}
func breakFn(ec *EvalCtx) {
throw(Break)
}
func continueFn(ec *EvalCtx) {
throw(Continue)
}
func print(ec *EvalCtx, args ...string) {
out := ec.ports[1].File
for i, arg := range args {
if i > 0 {
out.WriteString(" ")
}
out.WriteString(arg)
}
}
func println(ec *EvalCtx, args ...string) {
print(ec, args...)
ec.ports[1].File.WriteString("\n")
}
func pprint(ec *EvalCtx, args []Value) {
out := ec.ports[1].File
for _, arg := range args {
out.WriteString(arg.Repr(0))
out.WriteString("\n")
}
}
func intoLines(ec *EvalCtx) {
in := ec.ports[0].Chan
out := ec.ports[1].File
for v := range in {
fmt.Fprintln(out, ToString(v))
}
}
func fromLines(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
bufferedIn := bufio.NewReader(in)
for {
line, err := bufferedIn.ReadString('\n')
if err == io.EOF {
return
} else if err != nil {
throw(err)
}
out <- String(line[:len(line)-1])
}
}
func ratFn(ec *EvalCtx, arg Value) {
out := ec.ports[1].Chan
r, err := ToRat(arg)
if err != nil {
throw(err)
}
out <- r
}
// unpack takes Elemser's from the input and unpack them.
func unpack(ec *EvalCtx) {
in := ec.ports[0].Chan
out := ec.ports[1].Chan
for v := range in {
elemser, ok := v.(Elemser)
if !ok {
throw(ErrInput)
}
for e := range elemser.Elems() {
out <- e
}
}
}
// fromJSON parses a stream of JSON data into Value's.
func fromJSON(ec *EvalCtx) {
in := ec.ports[0].File
out := ec.ports[1].Chan
dec := json.NewDecoder(in)
var v interface{}
for {
err := dec.Decode(&v)
if err != nil {
if err == io.EOF {
return
}
throw(err)
}
out <- FromJSONInterface(v)
}
}
// each takes a single closure and applies it to all input values.
func each(ec *EvalCtx, f FnValue) {
in := ec.ports[0].Chan
in:
for v := range in {
// NOTE We don't have the position range of the closure in the source.
// Ideally, it should be kept in the Closure itself.
newec := ec.fork("closure of each")
ex := newec.PCall(f, []Value{v})
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
break in
default:
throw(ex)
}
}
}
var eawkWordSep = regexp.MustCompile("[ \t]+")
// eawk takes a function. For each line in the input stream, it calls the
// function with the line and the words in the line. The words are found by
// stripping the line and splitting the line by whitespaces. The function may
// call break and continue. Overall this provides a similar functionality to
// awk, hence the name.
func eawk(ec *EvalCtx, f FnValue) {
in := bufio.NewReader(ec.ports[0].File)
in:
for {
line, err := in.ReadString('\n')
if err == io.EOF {
break
} else if err != nil {
throw(err)
}
line = line[:len(line)-1]
args := []Value{String(line)}
for _, field := range eawkWordSep.Split(strings.Trim(line, " \t"), -1) {
args = append(args, String(field))
}
newec := ec.fork("fn of eawk")
ex := newec.PCall(f, args)
ClosePorts(newec.ports)
switch ex {
case nil, Continue:
// nop
case Break:
break in
default:
throw(ex)
}
}
}
func | (ec *EvalCtx, args []Value) {
var dir string
if len(args) == 0 {
dir = mustGetHome("")
} else if len(args) == 1 {
dir = ToString(args[0])
} else {
throw(ErrArgs)
}
cdInner(dir, ec)
}
func cdInner(dir string, ec *EvalCtx) {
err := os.Chdir(dir)
if err != nil {
throw(err)
}
if ec.store != nil {
// XXX Error ignored.
pwd, err := os.Getwd()
if err == nil {
store := ec.store
go func() {
store.Waits.Add(1)
// XXX Error ignored.
store.AddDir(pwd, 1)
store.Waits.Done()
Logger.Println("added dir to store:", pwd)
}()
}
}
}
var dirFieldNames = []string{"path", "score"}
func dirs(ec *EvalCtx) {
if ec.store == nil {
throw(ErrStoreNotConnected)
}
dirs, err := ec.store.ListDirs()
if err != nil {
throw(errors.New("store error: " + err.Error()))
}
out := ec.ports[1].Chan
for _, dir := range dirs {
out <- &Struct{dirFieldNames, []Variable{
NewRoVariable(String(dir.Path)),
NewRoVariable(String(fmt.Sprint(dir.Score))),
}}
}
}
func history(ec *EvalCtx) {
if ec.store == nil {
throw(ErrStoreNotConnected)
}
store := ec.store
seq, err := store.NextCmdSeq()
maybeThrow(err)
cmds, err := store.Cmds(0, seq)
maybeThrow(err)
out := ec.ports[1].Chan
for _, cmd := range cmds {
out <- String(cmd)
}
}
func source(ec *EvalCtx, fname string) {
ec.Source(fname)
}
func toFloat(arg Value) (float64, error) {
arg, ok := arg.(String)
if !ok {
return 0, fmt.Errorf("must be string")
}
num, err := strconv.ParseFloat(string(arg.(String)), 64)
if err != nil {
return 0, err
}
return num, nil
}
func toInt(arg Value) (int, error) {
arg, ok := arg.(String)
if !ok {
return 0, fmt.Errorf("must be string")
}
num, err := strconv.Atoi(string(arg.(String)))
if err != nil {
return 0, err
}
return num, nil
}
func plus(ec *EvalCtx, nums ...float64) {
out := ec.ports[1].Chan
sum := 0.0
for _, f := range nums {
sum += f
}
out <- String(fmt.Sprintf("%g", sum))
}
func minus(ec *EvalCtx, sum float64, nums ...float64) {
out := ec.ports[1].Chan
for _, f := range nums {
sum -= f
}
out <- String(fmt.Sprintf("%g", sum))
}
func times(ec *EvalCtx, nums ...float64) {
out := ec.ports[1].Chan
prod := 1.0
for _, f := range nums {
prod *= f
}
out <- String(fmt.Sprintf("%g", prod))
}
func divide(ec *EvalCtx, prod float64, nums ...float64) {
out := ec.ports[1].Chan
for _, f := range nums {
prod /= f
}
out <- String(fmt.Sprintf("%g", prod))
}
func pow(ec *EvalCtx, b, p float64) {
out := ec.ports[1].Chan
out <- String(fmt.Sprintf("%g", math.Pow(b, p)))
}
var ErrFalse = errors.New("false")
func lt(ec *EvalCtx, nums ...float64) {
for i := 0; i < len(nums)-1; i++ {
if !(nums[i] < nums[i+1]) {
throw(ErrFalse)
}
}
}
func gt(ec *EvalCtx, nums ...float64) {
for i := 0; i < len(nums)-1; i++ {
if !(nums[i] > nums[i+1]) {
throw(ErrFalse)
}
}
}
var ErrBadBase = errors.New("bad base")
func base(ec *EvalCtx, b int, nums ...int) {
if b < 2 || b > 36 {
throw(ErrBadBase)
}
out := ec.ports[1].Chan
for _, num := range nums {
out <- String(strconv.FormatInt(int64(num), b))
}
}
var ErrNotEqual = errors.New("not equal")
func eq(ec *EvalCtx, args []Value) {
if len(args) == 0 {
throw(ErrArgs)
}
for i := 0; i+1 < len(args); i++ {
if args[i] != args[i+1] {
throw(ErrNotEqual)
}
}
}
var ErrEqual = errors.New("equal")
func noteq(ec *EvalCtx, lhs, rhs Value) {
if lhs == rhs {
throw(ErrEqual)
}
}
func deepeq(ec *EvalCtx, args []Value) {
out := ec.ports[1].Chan
if len(args) == 0 {
throw(ErrArgs)
}
for i := 0; i+1 < len(args); i++ {
if !DeepEq(args[i], args[i+1]) {
out <- Bool(false)
return
}
}
out <- Bool(true)
}
func take(ec *EvalCtx, n int) {
in := ec.ports[0].Chan
out := ec.ports[1].Chan
i := 0
for v := range in {
if i >= n {
break
}
i++
out <- v
}
}
func drop(ec *EvalCtx, n int) {
in := ec.ports[0].Chan
out := ec.ports[1].Chan
for i := 0; i < n; i++ {
<-in
}
for v := range in {
out <- v
}
}
func lenFn(ec *EvalCtx, v Value) {
lener, ok := v.(Lener)
if !ok {
throw(fmt.Errorf("cannot get length of a %s", v.Kind()))
}
ec.ports[1].Chan <- String(strconv.Itoa(lener.Len()))
}
func count(ec *EvalCtx) {
in := ec.ports[0].Chan
out := ec.ports[1].Chan
n := 0
for range in {
n++
}
out <- String(strconv.Itoa(n))
}
func rest(ec *EvalCtx, li List) {
out := ec.ports[1].Chan
restli := (*li.inner)[1:]
out <- List{&restli}
}
func fg(ec *EvalCtx, pids ...int) {
if len(pids) == 0 {
throw(ErrArgs)
}
var thepgid int
for i, pid := range pids {
pgid, err := syscall.Getpgid(pid)
maybeThrow(err)
if i == 0 {
thepgid = pgid
} else if pgid != thepgid {
throw(ErrNotInSameGroup)
}
}
err := sys.Tcsetpgrp(0, thepgid)
maybeThrow(err)
errors := make([]Error, len(pids))
for i, pid := range pids {
err := syscall.Kill(pid, syscall.SIGCONT)
if err != nil {
errors[i] = Error{err}
}
}
for i, pid := range pids {
if errors[i] != OK {
continue
}
var ws syscall.WaitStatus
_, err = syscall.Wait4(pid, &ws, syscall.WUNTRACED, nil)
if err != nil {
errors[i] = Error{err}
} else {
errors[i] = Error{NewExternalCmdExit(ws, pid)}
}
}
throwCompositeError(errors)
}
func tildeAbbr(ec *EvalCtx, path string) {
out := ec.ports[1].Chan
out <- String(util.TildeAbbr(path))
}
func _sleep(ec *EvalCtx, t float64) {
d := time.Duration(float64(time.Second) * t)
select {
case <-ec.intCh:
throw(ErrInterrupted)
case <-time.After(d):
}
}
func _stack(ec *EvalCtx) {
out := ec.ports[1].File
// XXX dup with main.go
buf := make([]byte, 1024)
for runtime.Stack(buf, true) == cap(buf) {
buf = make([]byte, cap(buf)*2)
}
out.Write(buf)
}
func _log(ec *EvalCtx, fname string) {
maybeThrow(util.SetOutputFile(fname))
}
func _exec(ec *EvalCtx, args ...string) {
if len(args) == 0 {
args = []string{"elvish"}
}
var err error
args[0], err = ec.Search(args[0])
maybeThrow(err)
err = ec.store.Close()
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
if ec.Stub != nil {
ec.Stub.Terminate()
}
err = syscall.Exec(args[0], args, os.Environ())
maybeThrow(err)
}
| cd | identifier_name |
settings.py | """
Copyright 2018 ООО «Верме»
Настройки проекта outsourcing
"""
import os
import logging
import tempfile
from .settings_local import DEBUG
from datetime import timedelta
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i@g+(1qc06b@8ee4*3!f0i9g*28ddsx39gv!nvs9w_(p$)p*cy'
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '*'] # TODO
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'apps.outsource',
'apps.claims',
'apps.shifts',
'apps.employees',
'apps.remotes',
'apps.notifications',
'apps.lib',
'apps.permission',
'apps.config',
'apps.authutils',
'apps.violations',
'apps.easy_log',
'compressor',
'social_django',
'axes',
'saml',
'applogs',
'xlsexport',
'wfm_admin',
'rangefilter',
]
AUTHENTICATION_BACKENDS = (
#'django.contrib.auth.backends.ModelBackend',
'apps.authutils.backends.EmailLoginBackend',
'apps.authutils.backends.UsernameLoginBackend',
'saml.backends.SAMLAuthExt',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
# WARN: http://breachattack.com/, http://breachattack.com/resources/BREACH%20-%20SSL,%20gone%20in%2030%20seconds.pdf
'django.middleware.gzip.GZipMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
)
ROOT_URLCONF = 'wfm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wfm.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('WFM_DB_HOST', '127.0.0.1'),
'NAME': os.environ.get('WFM_DB_NAME', 'out_db'),
'USER': os.environ.get('WFM_DB_USER', 'wfm'),
'PASSWORD': os.environ.get('WFM_DB_PASSWORD', 'wfm'),
},
'userlogs': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('WFM_DB_HOST', '127.0.0.1'),
'NAME': 'wfm_log',
'USER': os.environ.get('WFM_DB_USER', 'wfm'),
'PASSWORD': os.environ.get('WFM_DB_PASSWORD', 'wfm'),
},
'applogs': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('WFM_DB_HOST', '127.0.0.1'),
'NAME': 'app_logs',
'USER': os.environ.get('WFM_DB_USER', 'wfm'),
'PASSWORD': os.environ.get('WFM_DB_PASSWORD', 'wfm'),
},
}
try:
from .settings_local import DATABASES
except ImportError:
pass
DATABASE_ROUTERS = [
'applogs.db_router.LogsDBRouter',
'apps.easy_log.db_router.EasyLogRouter',
'wfm.default_db_router.DefaultDBRouter',
]
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 6,
}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'ru-RU'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static_collected/")
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = '/upload/'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'compressor-cache': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': os.path.join(tempfile.gettempdir(), 'django_compressor_cache'),
'TIMEOUT': None,
'OPTIONS': {
'MAX_ENTRIES': 1000,
},
},
'axes_cache': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
try:
from .settings_local import CACHES
except ImportError:
pass
COMPRESS_CACHE_BACKEND = 'compressor-cache'
COMPRESS_ENABLED = False
try:
from .settings_local import COMPRESS_ENABLED
except ImportError:
pass
LOGIN_URL = '/auth/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = LOGIN_URL
# -------------------------------------------------------------------------------------------------------------------------
# --------------------------------------------------- АВТОРИЗАЦИЯ ---------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------------------
# Social Auth (social_core/pipeline/__init__.py)
# Доступные способы авторизации
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'saml.pipelines.associate_by_name_id',
'social_core.pipeline.social_auth.associate_user',
)
# SAML error handler - ошибка авторизации
SOCIAL_AUTH_LOGIN_ERROR_URL = '/saml/error?type=login-error'
# SAML error handler - блокированный пользователь
SOCIAL_AUTH_INACTIVE_USER_URL = '/saml/error?type=inactive-user'
# SAML error handler - обрыв подключения
#SOCIAL_AUTH_DISCONNECT_REDIRECT_URL = LOGOUT_REDIRECT_URL
# Информация о приложении
SOCIAL_AUTH_SAML_ORG_INFO = {
"en-US": {
"name": "Verme Identity Provider",
"displayname": "Verme Identity Provider",
"url": "https://verme.ru",
}
}
# Контакты технического специалиста.
SOCIAL_AUTH_SAML_TECHNICAL_CONTACT = {
"givenName": "VERME Info",
"emailAddress": "info@verme.ru"
}
# Контакты поддержки
SOCIAL_AUTH_SAML_SUPPORT_CONTACT = {
"givenName": "VERME Support",
"emailAddress": "support@verme.ru",
}
# Общие параметры SAML-протокола
SOCIAL_AUTH_SAML_SECURITY_CONFIG = {
'wantNameId': True,
'wantAttributeStatement': False,
"logoutRequestSigned": True,
"logoutResponseSigned": True,
"signatureAlgorithm": "http://www.w3.org/2001/04/xmldsig-more#rsa-sha256",
}
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
try:
from .social import *
except ImportError:
pass
# Логи
class F(logging.Filter):
""" Этот "фильтр" не фильтрует, а добавляет в объекты record айпи и имя
юзера, делающего запрос, чтоб форматтер их вставил потом в строку """
def filter(self, record):
# TODO: похоже, это всё больше не работает, потому что вместо request'а тут какой-то socket
request = getattr(record, 'request', None)
if request and hasattr(request, 'user'): # user
record.user = request.user
else:
record.user = '--'
if request and hasattr(request, 'META'): # IP
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
record.ip = x_forwarded_for.split(',')[-1]
else:
record.ip = request.META.get('REMOTE_ADDR')
else:
record.ip = '--'
return True
try:
os.mkdir(os.path.join(BASE_DIR, 'logs'))
except FileExistsError:
pass
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'main': {
'()': F
}
},
'formatters': {
'stamp': {
'format': '%(levelname)s [%(asctime)s] %(ip)s "%(user)s" %(name)s.%(module)s %(message)s'
},
},
'handlers': {
'file_main': {
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'logs', 'main.log'),
'formatter': 'stamp',
'filters': ['main'],
},
'console': {
'class': 'logging.StreamHandler',
'formatter': 'stamp',
'filters': ['main'],
},
'db': {
'class': 'applogs.handlers.DBLogsHandler',
'filters': ['main'],
},
},
'loggers': {
'django': {
'handlers': ['file_main', 'console'],
'level': 'WARNING',
},
'apps': {
'handlers': ['file_main', 'console'],
'level': 'DEBUG',
},
'command': {
'handlers': ['db', 'console'],
'level': 'DEBUG',
},
'api': {
'handlers': ['db', 'console'],
'level': 'DEBUG',
},
'remote_service': {
'handlers': ['db', 'console'],
'level': 'DEBUG',
},
},
}
try:
from .wfm_admin import ADMIN_COLUMNS, ADMIN_SECTIONS
except ImportError:
ADMIN_SECTIONS = {}
ADMIN_COLUMNS = []
DATA_UPLOAD_MAX_MEMORY_SIZE = 100 * 1024 * 1024 # 10 MB
DATA_UPLOAD_MAX_NUMBER_FIELDS = 10000
# AXES config
def username_getter(request, credentials):
from apps.authutils.views import axes_username_getter
return axes_username_getter(request, credentials)
AXES_CACHE = 'axes_cache'
AXES_COOLOFF_TIME = timedelta(minutes=5)
AXES_FAILURE_LIMIT = 10
AXES_LOCKOUT_TEMPLATE = 'login_locked.html'
AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = True
AXES_USERNAME_CALLABLE = username_getter
AXES_META_PRECEDENCE_ORDER = ('HTTP_X_R | EAL_IP',)
| identifier_body | |
settings.py | """
Copyright 2018 ООО «Верме»
Настройки проекта outsourcing
"""
import os
import logging
import tempfile
from .settings_local import DEBUG
from datetime import timedelta
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i@g+(1qc06b@8ee4*3!f0i9g*28ddsx39gv!nvs9w_(p$)p*cy'
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '*'] # TODO
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'apps.outsource',
'apps.claims',
'apps.shifts',
'apps.employees',
'apps.remotes',
'apps.notifications',
'apps.lib',
'apps.permission',
'apps.config',
'apps.authutils',
'apps.violations',
'apps.easy_log',
'compressor',
'social_django',
'axes',
'saml',
'applogs',
'xlsexport',
'wfm_admin',
'rangefilter',
]
AUTHENTICATION_BACKENDS = (
#'django.contrib.auth.backends.ModelBackend',
'apps.authutils.backends.EmailLoginBackend',
'apps.authutils.backends.UsernameLoginBackend',
'saml.backends.SAMLAuthExt',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
# WARN: http://breachattack.com/, http://breachattack.com/resources/BREACH%20-%20SSL,%20gone%20in%2030%20seconds.pdf
'django.middleware.gzip.GZipMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
)
ROOT_URLCONF = 'wfm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wfm.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('WFM_DB_HOST', '127.0.0.1'),
'NAME': os.environ.get('WFM_DB_NAME', 'out_db'),
'USER': os.environ.get('WFM_DB_USER', 'wfm'),
'PASSWORD': os.environ.get('WFM_DB_PASSWORD', 'wfm'),
},
'userlogs': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('WFM_DB_HOST', '127.0.0.1'),
'NAME': 'wfm_log',
'USER': os.environ.get('WFM_DB_USER', 'wfm'),
'PASSWORD': os.environ.get('WFM_DB_PASSWORD', 'wfm'),
},
'applogs': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('WFM_DB_HOST', '127.0.0.1'),
'NAME': 'app_logs',
'USER': os.environ.get('WFM_DB_USER', 'wfm'),
'PASSWORD': os.environ.get('WFM_DB_PASSWORD', 'wfm'),
},
}
try:
from .settings_local import DATABASES
except ImportError:
pass
DATABASE_ROUTERS = [
'applogs.db_router.LogsDBRouter',
'apps.easy_log.db_router.EasyLogRouter',
'wfm.default_db_router.DefaultDBRouter',
]
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 6,
}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'ru-RU'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static_collected/")
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = '/upload/'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'compressor-cache': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': os.path.join(tempfile.gettempdir(), 'django_compressor_cache'),
'TIMEOUT': None,
'OPTIONS': {
'MAX_ENTRIES': 1000,
},
},
'axes_cache': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
try:
from .settings_local import CACHES
except ImportError:
pass
COMPRESS_CACHE_BACKEND = 'compressor-cache'
COMPRESS_ENABLED = False
try:
from .settings_local import COMPRESS_ENABLED
except ImportError:
pass
LOGIN_URL = '/auth/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = LOGIN_URL
# -------------------------------------------------------------------------------------------------------------------------
# --------------------------------------------------- АВТОРИЗАЦИЯ ---------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------------------
# Social Auth (social_core/pipeline/__init__.py)
# Доступные способы авторизации
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'saml.pipelines.associate_by_name_id',
'social_core.pipeline.social_auth.associate_user',
)
# SAML error handler - ошибка авторизации
SOCIAL_AUTH_LOGIN_ERROR_URL = '/saml/error?type=login-error'
# SAML error handler - блокированный пользователь
SOCIAL_AUTH_INACTIVE_USER_URL = '/saml/error?type=inactive-user'
# SAML error handler - обрыв подключения
#SOCIAL_AUTH_DISCONNECT_REDIRECT_URL = LOGOUT_REDIRECT_URL
# Информация о приложении
SOCIAL_AUTH_SAML_ORG_INFO = {
"en-US": {
"name": "Verme Identity Provider",
"displayname": "Verme Identity Provider",
"url": "https://verme.ru",
}
}
# Контакты технического специалиста.
SOCIAL_AUTH_SAML_TECHNICAL_CONTACT = {
"givenName": "VERME Info",
"emailAddress": "info@verme.ru"
}
# Контакты поддержки
SOCIAL_AUTH_SAML_SUPPORT_CONTACT = {
"givenName": "VERME Support",
"emailAddress": "support@verme.ru",
}
# Общие параметры SAML-протокола
SOCIAL_AUTH_SAML_SECURITY_CONFIG = {
'wantNameId': True,
'wantAttributeStatement': False,
"logoutRequestSigned": True,
"logoutResponseSigned": True,
"signatureAlgorithm": "http://www.w3.org/2001/04/xmldsig-more#rsa-sha256",
}
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
try:
from .social import *
except ImportError:
pass
# Логи
class F(logging.Filter):
""" Этот "фильтр" не фильтрует, а добавляет в объекты record айпи и имя
юзера, делающего запрос, чтоб форматтер их вставил потом в строку """
def filter(self, record):
# TODO: похоже, это всё больше не работает, потому что вместо request'а тут какой-то socket
request = getattr(record, 'request', None)
if request and hasattr(request, 'user'): # user
record.user = request.user
else:
record.user = '--'
if request and hasattr(request, 'META'): # IP
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
record.ip = x_forwarded_for.split(',')[-1]
else:
record.ip = request.META.get('REMOTE_ADDR')
else:
record.ip = '--'
return True
try:
os.mkdir(os.path.join(BASE_DIR, 'logs'))
except FileExistsError:
pass
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'main': {
'()': F
}
},
'formatters': {
'stamp': {
'format': '%(levelname)s [%(asctime)s] %(ip)s "%(user)s" %(name)s.%(module)s %(message)s'
},
},
'handlers': {
'file_main': {
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'logs', 'main.log'),
'formatter': 'stamp',
'filters': ['main'],
},
'console': {
'class': 'logging.StreamHandler',
'formatter': 'stamp',
'filters': ['main'],
},
'db': {
'class': 'applogs.handlers.DBLogsHandler',
'filters': ['main'],
},
},
'loggers': {
'django': {
'handlers': ['file_main', 'console'],
'level': 'WARNING',
},
'apps': {
'handlers': ['file_main', 'console'],
'level': 'DEBUG',
},
'command': {
'handlers': ['db', 'console'],
'level': 'DEBUG',
},
'api': {
'handlers': ['db', 'console'],
'level': 'DEBUG',
},
'remote_service': { | 'level': 'DEBUG',
},
},
}
try:
from .wfm_admin import ADMIN_COLUMNS, ADMIN_SECTIONS
except ImportError:
ADMIN_SECTIONS = {}
ADMIN_COLUMNS = []
DATA_UPLOAD_MAX_MEMORY_SIZE = 100 * 1024 * 1024 # 10 MB
DATA_UPLOAD_MAX_NUMBER_FIELDS = 10000
# AXES config
def username_getter(request, credentials):
from apps.authutils.views import axes_username_getter
return axes_username_getter(request, credentials)
AXES_CACHE = 'axes_cache'
AXES_COOLOFF_TIME = timedelta(minutes=5)
AXES_FAILURE_LIMIT = 10
AXES_LOCKOUT_TEMPLATE = 'login_locked.html'
AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = True
AXES_USERNAME_CALLABLE = username_getter
AXES_META_PRECEDENCE_ORDER = ('HTTP_X_REAL_IP',) | 'handlers': ['db', 'console'], | random_line_split |
settings.py | """
Copyright 2018 ООО «Верме»
Настройки проекта outsourcing
"""
import os
import logging
import tempfile
from .settings_local import DEBUG
from datetime import timedelta
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i@g+(1qc06b@8ee4*3!f0i9g*28ddsx39gv!nvs9w_(p$)p*cy'
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '*'] # TODO
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'apps.outsource',
'apps.claims',
'apps.shifts',
'apps.employees',
'apps.remotes',
'apps.notifications',
'apps.lib',
'apps.permission',
'apps.config',
'apps.authutils',
'apps.violations',
'apps.easy_log',
'compressor',
'social_django',
'axes',
'saml',
'applogs',
'xlsexport',
'wfm_admin',
'rangefilter',
]
AUTHENTICATION_BACKENDS = (
#'django.contrib.auth.backends.ModelBackend',
'apps.authutils.backends.EmailLoginBackend',
'apps.authutils.backends.UsernameLoginBackend',
'saml.backends.SAMLAuthExt',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
# WARN: http://breachattack.com/, http://breachattack.com/resources/BREACH%20-%20SSL,%20gone%20in%2030%20seconds.pdf
'django.middleware.gzip.GZipMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
)
ROOT_URLCONF = 'wfm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wfm.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('WFM_DB_HOST', '127.0.0.1'),
'NAME': os.environ.get('WFM_DB_NAME', 'out_db'),
'USER': os.environ.get('WFM_DB_USER', 'wfm'),
'PASSWORD': os.environ.get('WFM_DB_PASSWORD', 'wfm'),
},
'userlogs': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('WFM_DB_HOST', '127.0.0.1'),
'NAME': 'wfm_log',
'USER': os.environ.get('WFM_DB_USER', 'wfm'),
'PASSWORD': os.environ.get('WFM_DB_PASSWORD', 'wfm'),
},
'applogs': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('WFM_DB_HOST', '127.0.0.1'),
'NAME': 'app_logs',
'USER': os.environ.get('WFM_DB_USER', 'wfm'),
'PASSWORD': os.environ.get('WFM_DB_PASSWORD', 'wfm'),
},
}
try:
from .settings_local import DATABASES
except ImportError:
pass
DATABASE_ROUTERS = [
'applogs.db_router.LogsDBRouter',
'apps.easy_log.db_router.EasyLogRouter',
'wfm.default_db_router.DefaultDBRouter',
]
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 6,
}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'ru-RU'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static_collected/")
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = '/upload/'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'compressor-cache': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': os.path.join(tempfile.gettempdir(), 'django_compressor_cache'),
'TIMEOUT': None,
'OPTIONS': {
'MAX_ENTRIES': 1000,
},
},
'axes_cache': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
try:
from .settings_local import CACHES
except ImportError:
pass
COMPRESS_CACHE_BACKEND = 'compressor-cache'
COMPRESS_ENABLED = False
try:
from .settings_local import COMPRESS_ENABLED
except ImportError:
pass
LOGIN_URL = '/auth/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = LOGIN_URL
# -------------------------------------------------------------------------------------------------------------------------
# --------------------------------------------------- АВТОРИЗАЦИЯ ---------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------------------
# Social Auth (social_core/pipeline/__init__.py)
# Доступные способы авторизации
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'saml.pipelines.associate_by_name_id',
'social_core.pipeline.social_auth.associate_user',
)
# SAML error handler - ошибка авторизации
SOCIAL_AUTH_LOGIN_ERROR_URL = '/saml/error?type=login-error'
# SAML error handler - блокированный пользователь
SOCIAL_AUTH_INACTIVE_USER_URL = '/saml/error?type=inactive-user'
# SAML error handler - обрыв подключения
#SOCIAL_AUTH_DISCONNECT_REDIRECT_URL = LOGOUT_REDIRECT_URL
# Информация о приложении
SOCIAL_AUTH_SAML_ORG_INFO = {
"en-US": {
"name": "Verme Identity Provider",
"displayname": "Verme Identity Provider",
"url": "https://verme.ru",
}
}
# Контакты технического специалиста.
SOCIAL_AUTH_SAML_TECHNICAL_CONTACT = {
"givenName": "VERME Info",
"emailAddress": "info@verme.ru"
}
# Контакты поддержки
SOCIAL_AUTH_SAML_SUPPORT_CONTACT = {
"givenName": "VERME Support",
"emailAddress": "support@verme.ru",
}
# Общие параметры SAML-протокола
SOCIAL_AUTH_SAML_SECURITY_CONFIG = {
'wantNameId': True,
'wantAttributeStatement': False,
"logoutRequestSigned": True,
"logoutResponseSigned": True,
"signatureAlgorithm": "http://www.w3.org/2001/04/xmldsig-more#rsa-sha256",
}
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
try:
from .social import *
except ImportError:
pass
# Логи
class F(logging.Filter):
""" Этот "фильтр" не фильтрует, а добавляет в объекты record айпи и имя
юзера, делающего запрос, чтоб форматтер их вставил потом в строку """
def filter(self, record):
# TODO: | похоже, это всё больше не работает, потому что вместо request'а тут какой-то socket
request = getattr(record, 'request', None)
if request and hasattr(request, 'user'): # user
record.user = request.user
else:
record.user = '--'
if request and hasattr(request, 'META'): # IP
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
record.ip = x_forwarded_for.split(',')[-1]
else:
record.ip = request.META.get('REMOTE_ADDR')
else:
record.ip = '--'
return True
try:
os.mkdir(os.path.join(BASE_DIR, 'logs'))
except FileExistsError:
pass
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'main': {
'()': F
}
},
'formatters': {
'stamp': {
'format': '%(levelname)s [%(asctime)s] %(ip)s "%(user)s" %(name)s.%(module)s %(message)s'
},
},
'handlers': {
'file_main': {
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'logs', 'main.log'),
'formatter': 'stamp',
'filters': ['main'],
},
'console': {
'class': 'logging.StreamHandler',
'formatter': 'stamp',
'filters': ['main'],
},
'db': {
'class': 'applogs.handlers.DBLogsHandler',
'filters': ['main'],
},
},
'loggers': {
'django': {
'handlers': ['file_main', 'console'],
'level': 'WARNING',
},
'apps': {
'handlers': ['file_main', 'console'],
'level': 'DEBUG',
},
'command': {
'handlers': ['db', 'console'],
'level': 'DEBUG',
},
'api': {
'handlers': ['db', 'console'],
'level': 'DEBUG',
},
'remote_service': {
'handlers': ['db', 'console'],
'level': 'DEBUG',
},
},
}
try:
from .wfm_admin import ADMIN_COLUMNS, ADMIN_SECTIONS
except ImportError:
ADMIN_SECTIONS = {}
ADMIN_COLUMNS = []
DATA_UPLOAD_MAX_MEMORY_SIZE = 100 * 1024 * 1024 # 10 MB
DATA_UPLOAD_MAX_NUMBER_FIELDS = 10000
# AXES config
def username_getter(request, credentials):
from apps.authutils.views import axes_username_getter
return axes_username_getter(request, credentials)
AXES_CACHE = 'axes_cache'
AXES_COOLOFF_TIME = timedelta(minutes=5)
AXES_FAILURE_LIMIT = 10
AXES_LOCKOUT_TEMPLATE = 'login_locked.html'
AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = True
AXES_USERNAME_CALLABLE = username_getter
AXES_META_PRECEDENCE_ORDER = ('HTTP_X_REAL_IP',)
| identifier_name | |
settings.py | """
Copyright 2018 ООО «Верме»
Настройки проекта outsourcing
"""
import os
import logging
import tempfile
from .settings_local import DEBUG
from datetime import timedelta
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i@g+(1qc06b@8ee4*3!f0i9g*28ddsx39gv!nvs9w_(p$)p*cy'
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '*'] # TODO
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'apps.outsource',
'apps.claims',
'apps.shifts',
'apps.employees',
'apps.remotes',
'apps.notifications',
'apps.lib',
'apps.permission',
'apps.config',
'apps.authutils',
'apps.violations',
'apps.easy_log',
'compressor',
'social_django',
'axes',
'saml',
'applogs',
'xlsexport',
'wfm_admin',
'rangefilter',
]
AUTHENTICATION_BACKENDS = (
#'django.contrib.auth.backends.ModelBackend',
'apps.authutils.backends.EmailLoginBackend',
'apps.authutils.backends.UsernameLoginBackend',
'saml.backends.SAMLAuthExt',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
# WARN: http://breachattack.com/, http://breachattack.com/resources/BREACH%20-%20SSL,%20gone%20in%2030%20seconds.pdf
'django.middleware.gzip.GZipMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
)
ROOT_URLCONF = 'wfm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wfm.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('WFM_DB_HOST', '127.0.0.1'),
'NAME': os.environ.get('WFM_DB_NAME', 'out_db'),
'USER': os.environ.get('WFM_DB_USER', 'wfm'),
'PASSWORD': os.environ.get('WFM_DB_PASSWORD', 'wfm'),
},
'userlogs': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('WFM_DB_HOST', '127.0.0.1'),
'NAME': 'wfm_log',
'USER': os.environ.get('WFM_DB_USER', 'wfm'),
'PASSWORD': os.environ.get('WFM_DB_PASSWORD', 'wfm'),
},
'applogs': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('WFM_DB_HOST', '127.0.0.1'),
'NAME': 'app_logs',
'USER': os.environ.get('WFM_DB_USER', 'wfm'),
'PASSWORD': os.environ.get('WFM_DB_PASSWORD', 'wfm'),
},
}
try:
from .settings_local import DATABASES
except ImportError:
pass
DATABASE_ROUTERS = [
'applogs.db_router.LogsDBRouter',
'apps.easy_log.db_router.EasyLogRouter',
'wfm.default_db_router.DefaultDBRouter',
]
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 6,
}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'ru-RU'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static_collected/")
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = '/upload/'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'compressor-cache': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': os.path.join(tempfile.gettempdir(), 'django_compressor_cache'),
'TIMEOUT': None,
'OPTIONS': {
'MAX_ENTRIES': 1000,
},
},
'axes_cache': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
try:
from .settings_local import CACHES
except ImportError:
pass
COMPRESS_CACHE_BACKEND = 'compressor-cache'
COMPRESS_ENABLED = False
try:
from .settings_local import COMPRESS_ENABLED
except ImportError:
pass
LOGIN_URL = '/auth/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = LOGIN_URL
# -------------------------------------------------------------------------------------------------------------------------
# --------------------------------------------------- АВТОРИЗАЦИЯ ---------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------------------
# Social Auth (social_core/pipeline/__init__.py)
# Доступные способы авторизации
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'saml.pipelines.associate_by_name_id',
'social_core.pipeline.social_auth.associate_user',
)
# SAML error handler - ошибка авторизации
SOCIAL_AUTH_LOGIN_ERROR_URL = '/saml/error?type=login-error'
# SAML error handler - блокированный пользователь
SOCIAL_AUTH_INACTIVE_USER_URL = '/saml/error?type=inactive-user'
# SAML error handler - обрыв подключения
#SOCIAL_AUTH_DISCONNECT_REDIRECT_URL = LOGOUT_REDIRECT_URL
# Информация о приложении
SOCIAL_AUTH_SAML_ORG_INFO = {
"en-US": {
"name": "Verme Identity Provider",
"displayname": "Verme Identity Provider",
"url": "https://verme.ru",
}
}
# Контакты технического специалиста.
SOCIAL_AUTH_SAML_TECHNICAL_CONTACT = {
"givenName": "VERME Info",
"emailAddress": "info@verme.ru"
}
# Контакты поддержки
SOCIAL_AUTH_SAML_SUPPORT_CONTACT = {
"givenName": "VERME Support",
"emailAddress": "support@verme.ru",
}
# Общие параметры SAML-протокола
SOCIAL_AUTH_SAML_SECURITY_CONFIG = {
'wantNameId': True,
'wantAttributeStatement': False,
"logoutRequestSigned": True,
"logoutResponseSigned": True,
"signatureAlgorithm": "http://www.w3.org/2001/04/xmldsig-more#rsa-sha256",
}
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
try:
from .social import *
except ImportError:
pass
# Логи
class F(logging.Filter):
""" Этот "фильтр" не фильтрует, а добавляет в объекты record айпи и имя
юзера, делающего запрос, чтоб форматтер их вставил потом в строку """
def filter(self, record):
# TODO: похоже, это всё больше не работает, потому что вместо request'а тут какой-то socket
request = getattr(record, 'request', None)
if request and hasattr(request, 'user'): # user
record.user = request.user
else:
record.user = '--'
if request and hasattr(request, 'META'): # IP
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
record.ip = x_forwarded_for.split(',')[-1]
else:
record.ip = request.META.get('REMOTE_ADDR')
else:
record.ip = '--'
return True
try:
os.mkdir(os.path.join(BASE_DIR, 'logs'))
except FileExistsError:
pass
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'main': {
'()': F
}
},
'formatters': {
'stamp': {
'format': '%(levelname)s [%(asctime)s] %(ip)s "%(user)s" %( | s %(message)s'
},
},
'handlers': {
'file_main': {
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'logs', 'main.log'),
'formatter': 'stamp',
'filters': ['main'],
},
'console': {
'class': 'logging.StreamHandler',
'formatter': 'stamp',
'filters': ['main'],
},
'db': {
'class': 'applogs.handlers.DBLogsHandler',
'filters': ['main'],
},
},
'loggers': {
'django': {
'handlers': ['file_main', 'console'],
'level': 'WARNING',
},
'apps': {
'handlers': ['file_main', 'console'],
'level': 'DEBUG',
},
'command': {
'handlers': ['db', 'console'],
'level': 'DEBUG',
},
'api': {
'handlers': ['db', 'console'],
'level': 'DEBUG',
},
'remote_service': {
'handlers': ['db', 'console'],
'level': 'DEBUG',
},
},
}
try:
from .wfm_admin import ADMIN_COLUMNS, ADMIN_SECTIONS
except ImportError:
ADMIN_SECTIONS = {}
ADMIN_COLUMNS = []
DATA_UPLOAD_MAX_MEMORY_SIZE = 100 * 1024 * 1024 # 10 MB
DATA_UPLOAD_MAX_NUMBER_FIELDS = 10000
# AXES config
def username_getter(request, credentials):
from apps.authutils.views import axes_username_getter
return axes_username_getter(request, credentials)
AXES_CACHE = 'axes_cache'
AXES_COOLOFF_TIME = timedelta(minutes=5)
AXES_FAILURE_LIMIT = 10
AXES_LOCKOUT_TEMPLATE = 'login_locked.html'
AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = True
AXES_USERNAME_CALLABLE = username_getter
AXES_META_PRECEDENCE_ORDER = ('HTTP_X_REAL_IP',)
| name)s.%(module) | conditional_block |
chainmaker_yaml_types.go | /*
Copyright (C) BABEC. All rights reserved.
Copyright (C) THL A29 Limited, a Tencent company. All rights reserved.
SPDX-License-Identifier: Apache-2.0
*/
package localconf
import (
"chainmaker.org/chainmaker-go/logger"
"gopkg.in/yaml.v2"
"io/ioutil"
"io/fs"
)
type nodeConfig struct {
Type string `yaml:"type"`
CertFile string `yaml:"cert_file"`
PrivKeyFile string `yaml:"priv_key_file"`
PrivKeyPassword string `yaml:"priv_key_password"`
AuthType string `yaml:"auth_type"`
P11Config pkcs11Config `yaml:"pkcs11"`
NodeId string `yaml:"node_id"`
OrgId string `yaml:"org_id"`
SignerCacheSize int `yaml:"signer_cache_size"`
CertCacheSize int `yaml:"cert_cache_size"`
}
type netConfig struct {
Provider string `yaml:"provider"`
ListenAddr string `yaml:"listen_addr"`
PeerStreamPoolSize int `yaml:"peer_stream_pool_size"`
MaxPeerCountAllow int `yaml:"max_peer_count_allow"`
PeerEliminationStrategy int `yaml:"peer_elimination_strategy"`
Seeds []string `yaml:"seeds"`
TLSConfig netTlsConfig `yaml:"tls"`
BlackList blackList `yaml:"blacklist"`
CustomChainTrustRoots []chainTrustRoots `yaml:"custom_chain_trust_roots"`
}
type netTlsConfig struct {
Enabled bool `yaml:"enabled"`
PrivKeyFile string `yaml:"priv_key_file"`
CertFile string `yaml:"cert_file"`
}
type pkcs11Config struct {
Enabled bool `yaml:"enabled"`
Library string `yaml:"library"`
Label string `yaml:"label"`
Password string `yaml:"password"`
SessionCacheSize int `yaml:"session_cache_size"`
Hash string `yaml:"hash"`
}
type blackList struct {
Addresses []string `yaml:"addresses"`
NodeIds []string `yaml:"node_ids"`
}
type chainTrustRoots struct {
ChainId string `yaml:"chain_id"`
TrustRoots []trustRoots `yaml:"trust_roots"`
}
type trustRoots struct {
OrgId string `yaml:"org_id"`
Root string `yaml:"root"`
}
type rpcConfig struct {
Provider string `yaml:"provider"`
Port int `yaml:"port"`
TLSConfig tlsConfig `yaml:"tls"`
RateLimitConfig rateLimitConfig `yaml:"ratelimit"`
SubscriberConfig subscriberConfig `yaml:"subscriber"`
CheckChainConfTrustRootsChangeInterval int `yaml:"check_chain_conf_trust_roots_change_interval"`
}
type tlsConfig struct {
Mode string `yaml:"mode"`
PrivKeyFile string `yaml:"priv_key_file"`
CertFile string `yaml:"cert_file"`
TestClientPrivKeyFile string `yaml:"test_client_priv_key_file"`
TestClientCertFile string `yaml:"test_client_cert_file"`
}
type rateLimitConfig struct {
TokenPerSecond int `yaml:"token_per_second"`
TokenBucketSize int `yaml:"token_bucket_size"`
}
type subscriberConfig struct {
RateLimitConfig rateLimitConfig `yaml:"ratelimit"`
}
type debugConfig struct {
IsCliOpen bool `yaml:"is_cli_open"`
IsHttpOpen bool `yaml:"is_http_open"`
IsProposer bool `yaml:"is_proposer"`
IsNotRWSetCheck bool `yaml:"is_not_rwset_check"`
IsConcurPropose bool `yaml:"is_concur_propose"`
IsConcurVerify bool `yaml:"is_concur_verify"`
IsSolo bool `yaml:"is_solo"`
IsHaltPropose bool `yaml:"is_halt_propose"`
IsSkipAccessControl bool `yaml:"is_skip_access_control"` // true: minimize access control; false: use full access control
IsTraceMemoryUsage bool `yaml:"is_trace_memory_usage"` // true for trace memory usage information periodically
IsProposeDuplicately bool `yaml:"is_propose_duplicately"` // Simulate a node which would propose duplicate after it has proposed Proposal
IsProposeMultiNodeDuplicately bool `yaml:"is_propose_multinode_duplicately"` // Simulate a malicious node which would propose duplicate proposals
IsProposalOldHeight bool `yaml:"is_proposal_old_height"`
IsPrevoteDuplicately bool `yaml:"is_prevote_duplicately"` // Simulate a malicious node which would prevote duplicately
IsPrevoteOldHeight bool `yaml:"is_prevote_old_height"` // Simulate a malicious node which would prevote for oldheight
IsPrevoteLost bool `yaml:"is_prevote_lost"` //prevote vote lost
IsPrecommitDuplicately bool `yaml:"is_precommit_duplicately"` //Simulate a malicious node which would propose duplicate precommits
IsPrecommitOldHeight bool `yaml:"is_precommit_old_height"` // Simulate a malicious node which would Precommit a lower height than current height
IsProposeLost bool `yaml:"is_propose_lost"` //proposal vote lost
IsProposeDelay bool `yaml:"is_propose_delay"` //proposal lost
IsPrevoteDelay bool `yaml:"is_prevote_delay"` //network problem resulting in preovote lost
IsPrecommitLost bool `yaml:"is_precommit_lost"` //precommit vote lost
IsPrecommitDelay bool `yaml:"is_prevcommit_delay"` //network problem resulting in precommit lost
IsCommitWithoutPublish bool `yaml:"is_commit_without_publish"` //if the node committing block without publishing, TRUE;else, FALSE
IsPrevoteInvalid bool `yaml:"is_prevote_invalid"` //simulate a node which sends an invalid prevote(hash=nil)
IsPrecommitInvalid bool `yaml:"is_precommit_invalid"` //simulate a node which sends an invalid precommit(hash=nil)
IsModifyTxPayload bool `yaml:"is_modify_tx_payload"`
IsExtreme bool `yaml:"is_extreme"` //extreme fast mode
UseNetMsgCompression bool `yaml:"use_net_msg_compression"`
IsNetInsecurity bool `yaml:"is_net_insecurity"`
}
type BlockchainConfig struct {
ChainId string
Genesis string
}
type StorageConfig struct {
//默认的Leveldb配置,如果每个DB有不同的设置,可以在自己的DB中进行设置
StorePath string `yaml:"store_path"`
DbPrefix string `yaml:"db_prefix"`
WriteBufferSize int `yaml:"write_buffer_size"`
BloomFilterBits int `yaml:"bloom_filter_bits"`
BlockWriteBufferSize int `yaml:"block_write_buffer_size"`
//数据库模式:light只存区块头,normal存储区块头和交易以及生成的State,full存储了区块头、交易、状态和交易收据(读写集、日志等)
//Mode string `yaml:"mode"`
DisableHistoryDB bool `yaml:"disable_historydb"`
DisableResultDB bool `yaml:"disable_resultdb"`
DisableContractEventDB bool `yaml:"disable_contract_eventdb"`
LogDBWriteAsync bool `yaml:"logdb_write_async"`
BlockDbConfig *DbConfig `yaml:"blockdb_config"`
StateDbConfig *DbConfig `yaml:"statedb_config"`
HistoryDbConfig *DbConfig `yaml:"historydb_config"`
ResultDbConfig *DbConfig `yaml:"resultdb_config"`
ContractEventDbConfig *DbConfig `yaml:"contract_eventdb_config"`
UnArchiveBlockHeight uint64 `yaml:"unarchive_block_height"`
}
func (config *StorageConfig) setDefault() {
if config.DbPrefix != "" {
if config.BlockDbConfig != nil && config.BlockDbConfig.SqlDbConfig != nil && config.BlockDbConfig.SqlDbConfig.DbPrefix == "" {
config.BlockDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
if config.StateDbConfig != nil && config.StateDbConfig.SqlDbConfig != nil && config.StateDbConfig.SqlDbConfig.DbPrefix == "" {
config.StateDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
if config.HistoryDbConfig != nil && config.HistoryDbConfig.SqlDbConfig != nil && config.HistoryDbConfig.SqlDbConfig.DbPrefix == "" {
config.HistoryDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
if config.ResultDbConfig != nil && config.ResultDbConfig.SqlDbConfig != nil && config.ResultDbConfig.SqlDbConfig.DbPrefix == "" {
config.ResultDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
if config.ContractEventDbConfig != nil && config.ContractEventDbConfig.SqlDbConfig != nil && config.ContractEventDbConfig.SqlDbConfig.DbPrefix == "" {
config.ContractEventDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
}
}
func (config *StorageConfig) GetBlockDbConfig() *DbConfig {
if config.BlockDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.BlockDbConfig
}
func (config *StorageConfig) GetStateDbConfig() *DbConfig {
if config.StateDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.StateDbConfig
}
func (config *StorageConfig) GetHistoryDbConfig() *DbConfig {
if config.HistoryDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.HistoryDbConfig
}
func (config *StorageConfig) GetResultDbConfig() *DbConfig {
if config.ResultDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.ResultDbConfig
}
func (config *StorageConfig) GetContractEventDbConfig() *DbConfig {
if config.ContractEventDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.ContractEventDbConfig
}
func (config *StorageConfig) GetDefaultDBConfig() *DbConfig {
lconfig := &LevelDbConfig{
StorePath: config.StorePath,
WriteBufferSize: config.WriteBufferSize,
BloomFilterBits: config.BloomFilterBits,
BlockWriteBufferSize: config.WriteBufferSize,
}
return &DbConfig{
Provider: "leveldb",
LevelDbConfig: lconfig,
}
}
//根据配置的DisableDB的情况,确定当前配置活跃的数据库数量
func (config *StorageConfig) GetActiveDBCount() int {
count := 5
if config.DisableContractEventDB {
count--
}
if config.DisableHistoryDB {
count--
}
if config.DisableResultDB {
count--
}
return count
}
type DbConfig struct {
//leveldb,rocksdb,sql
Provider string `yaml:"provider"`
LevelDbConfig *LevelDbConfig `yaml:"leveldb_config"`
SqlDbConfig *SqlDbConfig `yaml:"sqldb_config"`
}
const DbConfig_Provider_Sql = "sql"
const DbConfig_Provider_LevelDb = "leveldb"
const DbConfig_Provider_RocksDb = "rocksdb"
func (dbc *DbConfig) IsKVDB() bool {
return dbc.Provider == DbConfig_Provider_LevelDb || dbc.Provider == DbConfig_Provider_RocksDb
}
func (dbc *DbConfig) IsSqlDB() bool {
return dbc.Provider == DbConfig_Provider_Sql || dbc.Provider == "mysql" || dbc.Provider == "rdbms" //兼容其他配置情况
}
type LevelDbConfig struct {
StorePath string `yaml:"store_path"`
WriteBufferSize int `yaml:"write_buffer_size"`
BloomFilterBits int `yaml:"bloom_filter_bits"`
BlockWriteBufferSize int `yaml:"block_write_buffer_size"`
}
type SqlDbConfig struct {
//mysql, sqlite, postgres, sqlserver
SqlDbType string `yaml:"sqldb_type"`
Dsn string `yaml:"dsn"`
MaxIdleConns int `yaml:"max_idle_conns"`
MaxOpenConns int `yaml:"max_open_conns"`
ConnMaxLifeTime int `yaml:"conn_max_lifetime"` //second
SqlLogMode string `yaml:"sqllog_mode"` //Silent,Error,Warn,Info
SqlVerifier string `yaml:"sql_verifier"` //simple,safe
DbPrefix string `yaml:"db_prefix"`
}
const SqlDbConfig_SqlDbType_MySQL = "mysql"
const SqlDbConfig_SqlDbType_Sqlite = "sqlite"
type txPoolConfig struct {
PoolType string `yaml:"pool_type"`
MaxTxPoolSize uint32 `yaml:"max_txpool_size"`
MaxConfigTxPoolSize uint32 `yaml:"max_config_txpool_size"`
IsMetrics bool `yaml:"is_metrics"`
Performance bool `yaml:"performance"`
BatchMaxSize int `yaml:"batch_max_size"`
BatchCreateTimeout int64 `yaml:"batch_create_timeout"`
CacheFlushTicker int64 `yaml:"cache_flush_ticker"`
CacheThresholdCount int64 `yaml:"cache_threshold_count"`
CacheFlushTimeOut int64 `yaml:"cache_flush_timeout"`
AddTxChannelSize int64 `yaml:"add_tx_channel_size"`
}
type syncConfig struct {
BroadcastTime uint32 `yaml:"broadcast_time"`
BlockPoolSize uint32 `yaml:"block_pool_size"`
WaitTimeOfBlockRequestMsg uint32 `yaml:"wait_time_requested"`
BatchSizeFromOneNode uint32 `yaml:"batch_Size_from_one_node"`
ProcessBlockTick float64 `yaml:"process_block_tick"`
NodeStatusTick float64 `yaml:"node_status_tick"`
LivenessTick float64 `yaml:"liveness_tick"`
SchedulerTick float64 `yaml:"scheduler_tick"`
ReqTimeThreshold float64 `yaml:"req_time_threshold"`
DataDetectionTick float64 `yaml:"data_detection_tick"`
}
type spvConfig struct {
RefreshReqCacheMills int64 `yaml:"refresh_reqcache_mils"`
MessageCacheSize int64 `yaml:"message_cahche_size"`
ReSyncCheckIntervalMills int64 `yaml:"resync_check_interval_mils"`
SyncTimeoutMills int64 `yaml:"sync_timeout_mils"`
ReqSyncBlockNum int64 `yaml:"reqsync_blocknum"`
MaxReqSyncBlockNum int64 `yaml:"max_reqsync_blocknum"`
PeerActiveTime int64 `yaml:"peer_active_time"`
}
type monitorConfig struct {
Enabled bool `yaml:"enabled"`
Port int `yaml:"port"`
}
type pprofConfig struct {
Enabled bool `yaml:"enabled"`
Port int `yaml:"port"`
} | Url string `yaml:"url"`
Auth string `yaml:"auth"`
DB int `yaml:"db"`
MaxIdle int `yaml:"max_idle"`
MaxActive int `yaml:"max_active"`
IdleTimeout int `yaml:"idle_timeout"`
CacheTimeout int `yaml:"cache_timeout"`
}
type clientConfig struct {
OrgId string `yaml:"org_id"`
UserKeyFilePath string `yaml:"user_key_file_path"`
UserCrtFilePath string `yaml:"user_crt_file_path"`
HashType string `yaml:"hash_type"`
}
type schedulerConfig struct {
RWSetLog bool `yaml:"rwset_log"`
}
type coreConfig struct {
Evidence bool `yaml:"evidence"`
}
// CMConfig - Local config struct
type CMConfig struct {
LogConfig logger.LogConfig `yaml:"log"`
NetConfig netConfig `yaml:"net"`
NodeConfig nodeConfig `yaml:"node"`
RpcConfig rpcConfig `yaml:"rpc"`
BlockChainConfig []BlockchainConfig `yaml:"blockchain"`
StorageConfig StorageConfig `yaml:"storage"`
TxPoolConfig txPoolConfig `yaml:"txpool"`
SyncConfig syncConfig `yaml:"sync"`
SpvConfig spvConfig `yaml:"spv"`
// 开发调试使用
DebugConfig debugConfig `yaml:"debug"`
PProfConfig pprofConfig `yaml:"pprof"`
MonitorConfig monitorConfig `yaml:"monitor"`
CoreConfig coreConfig `yaml:"core"`
SchedulerConfig schedulerConfig `yaml:"scheduler"`
}
// write config into file
func (config *CMConfig) WriteFile(fileName string,fileMode fs.FileMode) error {
data,err := yaml.Marshal(config)
if err != nil{
return err
}
err = ioutil.WriteFile(fileName,data,fileMode)
if err != nil{
return err
}
return nil
}
//read config from configfile
func (config *CMConfig) ReadFile(fileName string) error {
data,err := ioutil.ReadFile(fileName)
if err != nil{
return err
}
err = yaml.Unmarshal(data,config)
if err != nil{
return err
}
return nil
}
// GetBlockChains - get blockchain config list
func (c *CMConfig) GetBlockChains() []BlockchainConfig {
return c.BlockChainConfig
} |
type redisConfig struct { | random_line_split |
chainmaker_yaml_types.go | /*
Copyright (C) BABEC. All rights reserved.
Copyright (C) THL A29 Limited, a Tencent company. All rights reserved.
SPDX-License-Identifier: Apache-2.0
*/
package localconf
import (
"chainmaker.org/chainmaker-go/logger"
"gopkg.in/yaml.v2"
"io/ioutil"
"io/fs"
)
type nodeConfig struct {
Type string `yaml:"type"`
CertFile string `yaml:"cert_file"`
PrivKeyFile string `yaml:"priv_key_file"`
PrivKeyPassword string `yaml:"priv_key_password"`
AuthType string `yaml:"auth_type"`
P11Config pkcs11Config `yaml:"pkcs11"`
NodeId string `yaml:"node_id"`
OrgId string `yaml:"org_id"`
SignerCacheSize int `yaml:"signer_cache_size"`
CertCacheSize int `yaml:"cert_cache_size"`
}
type netConfig struct {
Provider string `yaml:"provider"`
ListenAddr string `yaml:"listen_addr"`
PeerStreamPoolSize int `yaml:"peer_stream_pool_size"`
MaxPeerCountAllow int `yaml:"max_peer_count_allow"`
PeerEliminationStrategy int `yaml:"peer_elimination_strategy"`
Seeds []string `yaml:"seeds"`
TLSConfig netTlsConfig `yaml:"tls"`
BlackList blackList `yaml:"blacklist"`
CustomChainTrustRoots []chainTrustRoots `yaml:"custom_chain_trust_roots"`
}
type netTlsConfig struct {
Enabled bool `yaml:"enabled"`
PrivKeyFile string `yaml:"priv_key_file"`
CertFile string `yaml:"cert_file"`
}
type pkcs11Config struct {
Enabled bool `yaml:"enabled"`
Library string `yaml:"library"`
Label string `yaml:"label"`
Password string `yaml:"password"`
SessionCacheSize int `yaml:"session_cache_size"`
Hash string `yaml:"hash"`
}
type blackList struct {
Addresses []string `yaml:"addresses"`
NodeIds []string `yaml:"node_ids"`
}
type chainTrustRoots struct {
ChainId string `yaml:"chain_id"`
TrustRoots []trustRoots `yaml:"trust_roots"`
}
type trustRoots struct {
OrgId string `yaml:"org_id"`
Root string `yaml:"root"`
}
type rpcConfig struct {
Provider string `yaml:"provider"`
Port int `yaml:"port"`
TLSConfig tlsConfig `yaml:"tls"`
RateLimitConfig rateLimitConfig `yaml:"ratelimit"`
SubscriberConfig subscriberConfig `yaml:"subscriber"`
CheckChainConfTrustRootsChangeInterval int `yaml:"check_chain_conf_trust_roots_change_interval"`
}
type tlsConfig struct {
Mode string `yaml:"mode"`
PrivKeyFile string `yaml:"priv_key_file"`
CertFile string `yaml:"cert_file"`
TestClientPrivKeyFile string `yaml:"test_client_priv_key_file"`
TestClientCertFile string `yaml:"test_client_cert_file"`
}
type rateLimitConfig struct {
TokenPerSecond int `yaml:"token_per_second"`
TokenBucketSize int `yaml:"token_bucket_size"`
}
type subscriberConfig struct {
RateLimitConfig rateLimitConfig `yaml:"ratelimit"`
}
type debugConfig struct {
IsCliOpen bool `yaml:"is_cli_open"`
IsHttpOpen bool `yaml:"is_http_open"`
IsProposer bool `yaml:"is_proposer"`
IsNotRWSetCheck bool `yaml:"is_not_rwset_check"`
IsConcurPropose bool `yaml:"is_concur_propose"`
IsConcurVerify bool `yaml:"is_concur_verify"`
IsSolo bool `yaml:"is_solo"`
IsHaltPropose bool `yaml:"is_halt_propose"`
IsSkipAccessControl bool `yaml:"is_skip_access_control"` // true: minimize access control; false: use full access control
IsTraceMemoryUsage bool `yaml:"is_trace_memory_usage"` // true for trace memory usage information periodically
IsProposeDuplicately bool `yaml:"is_propose_duplicately"` // Simulate a node which would propose duplicate after it has proposed Proposal
IsProposeMultiNodeDuplicately bool `yaml:"is_propose_multinode_duplicately"` // Simulate a malicious node which would propose duplicate proposals
IsProposalOldHeight bool `yaml:"is_proposal_old_height"`
IsPrevoteDuplicately bool `yaml:"is_prevote_duplicately"` // Simulate a malicious node which would prevote duplicately
IsPrevoteOldHeight bool `yaml:"is_prevote_old_height"` // Simulate a malicious node which would prevote for oldheight
IsPrevoteLost bool `yaml:"is_prevote_lost"` //prevote vote lost
IsPrecommitDuplicately bool `yaml:"is_precommit_duplicately"` //Simulate a malicious node which would propose duplicate precommits
IsPrecommitOldHeight bool `yaml:"is_precommit_old_height"` // Simulate a malicious node which would Precommit a lower height than current height
IsProposeLost bool `yaml:"is_propose_lost"` //proposal vote lost
IsProposeDelay bool `yaml:"is_propose_delay"` //proposal lost
IsPrevoteDelay bool `yaml:"is_prevote_delay"` //network problem resulting in preovote lost
IsPrecommitLost bool `yaml:"is_precommit_lost"` //precommit vote lost
IsPrecommitDelay bool `yaml:"is_prevcommit_delay"` //network problem resulting in precommit lost
IsCommitWithoutPublish bool `yaml:"is_commit_without_publish"` //if the node committing block without publishing, TRUE;else, FALSE
IsPrevoteInvalid bool `yaml:"is_prevote_invalid"` //simulate a node which sends an invalid prevote(hash=nil)
IsPrecommitInvalid bool `yaml:"is_precommit_invalid"` //simulate a node which sends an invalid precommit(hash=nil)
IsModifyTxPayload bool `yaml:"is_modify_tx_payload"`
IsExtreme bool `yaml:"is_extreme"` //extreme fast mode
UseNetMsgCompression bool `yaml:"use_net_msg_compression"`
IsNetInsecurity bool `yaml:"is_net_insecurity"`
}
type BlockchainConfig struct {
ChainId string
Genesis string
}
type StorageConfig struct {
//默认的Leveldb配置,如果每个DB有不同的设置,可以在自己的DB中进行设置
StorePath string `yaml:"store_path"`
DbPrefix string `yaml:"db_prefix"`
WriteBufferSize int `yaml:"write_buffer_size"`
BloomFilterBits int `yaml:"bloom_filter_bits"`
BlockWriteBufferSize int `yaml:"block_write_buffer_size"`
//数据库模式:light只存区块头,normal存储区块头和交易以及生成的State,full存储了区块头、交易、状态和交易收据(读写集、日志等)
//Mode string `yaml:"mode"`
DisableHistoryDB bool `yaml:"disable_historydb"`
DisableResultDB bool `yaml:"disable_resultdb"`
DisableContractEventDB bool `yaml:"disable_contract_eventdb"`
LogDBWriteAsync bool `yaml:"logdb_write_async"`
BlockDbConfig *DbConfig `yaml:"blockdb_config"`
StateDbConfig *DbConfig `yaml:"statedb_config"`
HistoryDbConfig *DbConfig `yaml:"historydb_config"`
ResultDbConfig *DbConfig `yaml:"resultdb_config"`
ContractEventDbConfig *DbConfig `yaml:"contract_eventdb_config"`
UnArchiveBlockHeight uint64 `yaml:"unarchive_block_height"`
}
func (config *StorageConfig) setDefault() {
if config.DbPrefix != "" {
if config.BlockDbConfig != nil && config.BlockDbConfig.SqlDbConfig != nil && config.BlockDbConfig.SqlDbConfig.DbPrefix == "" {
config.BlockDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
if config.StateDbConfig != nil && config.StateDbConfig.SqlDbConfig != nil && config.StateDbConfig.SqlDbConfig.DbPrefix == "" {
config.StateDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
if config.HistoryDbConfig != nil && config.HistoryDbConfig.SqlDbConfig != nil && config | fig.SqlDbConfig.DbPrefix = config.DbPrefix
}
if config.ResultDbConfig != nil && config.ResultDbConfig.SqlDbConfig != nil && config.ResultDbConfig.SqlDbConfig.DbPrefix == "" {
config.ResultDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
if config.ContractEventDbConfig != nil && config.ContractEventDbConfig.SqlDbConfig != nil && config.ContractEventDbConfig.SqlDbConfig.DbPrefix == "" {
config.ContractEventDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
}
}
func (config *StorageConfig) GetBlockDbConfig() *DbConfig {
if config.BlockDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.BlockDbConfig
}
func (config *StorageConfig) GetStateDbConfig() *DbConfig {
if config.StateDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.StateDbConfig
}
func (config *StorageConfig) GetHistoryDbConfig() *DbConfig {
if config.HistoryDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.HistoryDbConfig
}
func (config *StorageConfig) GetResultDbConfig() *DbConfig {
if config.ResultDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.ResultDbConfig
}
func (config *StorageConfig) GetContractEventDbConfig() *DbConfig {
if config.ContractEventDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.ContractEventDbConfig
}
func (config *StorageConfig) GetDefaultDBConfig() *DbConfig {
lconfig := &LevelDbConfig{
StorePath: config.StorePath,
WriteBufferSize: config.WriteBufferSize,
BloomFilterBits: config.BloomFilterBits,
BlockWriteBufferSize: config.WriteBufferSize,
}
return &DbConfig{
Provider: "leveldb",
LevelDbConfig: lconfig,
}
}
//根据配置的DisableDB的情况,确定当前配置活跃的数据库数量
func (config *StorageConfig) GetActiveDBCount() int {
count := 5
if config.DisableContractEventDB {
count--
}
if config.DisableHistoryDB {
count--
}
if config.DisableResultDB {
count--
}
return count
}
type DbConfig struct {
//leveldb,rocksdb,sql
Provider string `yaml:"provider"`
LevelDbConfig *LevelDbConfig `yaml:"leveldb_config"`
SqlDbConfig *SqlDbConfig `yaml:"sqldb_config"`
}
const DbConfig_Provider_Sql = "sql"
const DbConfig_Provider_LevelDb = "leveldb"
const DbConfig_Provider_RocksDb = "rocksdb"
func (dbc *DbConfig) IsKVDB() bool {
return dbc.Provider == DbConfig_Provider_LevelDb || dbc.Provider == DbConfig_Provider_RocksDb
}
func (dbc *DbConfig) IsSqlDB() bool {
return dbc.Provider == DbConfig_Provider_Sql || dbc.Provider == "mysql" || dbc.Provider == "rdbms" //兼容其他配置情况
}
type LevelDbConfig struct {
StorePath string `yaml:"store_path"`
WriteBufferSize int `yaml:"write_buffer_size"`
BloomFilterBits int `yaml:"bloom_filter_bits"`
BlockWriteBufferSize int `yaml:"block_write_buffer_size"`
}
type SqlDbConfig struct {
//mysql, sqlite, postgres, sqlserver
SqlDbType string `yaml:"sqldb_type"`
Dsn string `yaml:"dsn"`
MaxIdleConns int `yaml:"max_idle_conns"`
MaxOpenConns int `yaml:"max_open_conns"`
ConnMaxLifeTime int `yaml:"conn_max_lifetime"` //second
SqlLogMode string `yaml:"sqllog_mode"` //Silent,Error,Warn,Info
SqlVerifier string `yaml:"sql_verifier"` //simple,safe
DbPrefix string `yaml:"db_prefix"`
}
const SqlDbConfig_SqlDbType_MySQL = "mysql"
const SqlDbConfig_SqlDbType_Sqlite = "sqlite"
type txPoolConfig struct {
PoolType string `yaml:"pool_type"`
MaxTxPoolSize uint32 `yaml:"max_txpool_size"`
MaxConfigTxPoolSize uint32 `yaml:"max_config_txpool_size"`
IsMetrics bool `yaml:"is_metrics"`
Performance bool `yaml:"performance"`
BatchMaxSize int `yaml:"batch_max_size"`
BatchCreateTimeout int64 `yaml:"batch_create_timeout"`
CacheFlushTicker int64 `yaml:"cache_flush_ticker"`
CacheThresholdCount int64 `yaml:"cache_threshold_count"`
CacheFlushTimeOut int64 `yaml:"cache_flush_timeout"`
AddTxChannelSize int64 `yaml:"add_tx_channel_size"`
}
type syncConfig struct {
BroadcastTime uint32 `yaml:"broadcast_time"`
BlockPoolSize uint32 `yaml:"block_pool_size"`
WaitTimeOfBlockRequestMsg uint32 `yaml:"wait_time_requested"`
BatchSizeFromOneNode uint32 `yaml:"batch_Size_from_one_node"`
ProcessBlockTick float64 `yaml:"process_block_tick"`
NodeStatusTick float64 `yaml:"node_status_tick"`
LivenessTick float64 `yaml:"liveness_tick"`
SchedulerTick float64 `yaml:"scheduler_tick"`
ReqTimeThreshold float64 `yaml:"req_time_threshold"`
DataDetectionTick float64 `yaml:"data_detection_tick"`
}
type spvConfig struct {
RefreshReqCacheMills int64 `yaml:"refresh_reqcache_mils"`
MessageCacheSize int64 `yaml:"message_cahche_size"`
ReSyncCheckIntervalMills int64 `yaml:"resync_check_interval_mils"`
SyncTimeoutMills int64 `yaml:"sync_timeout_mils"`
ReqSyncBlockNum int64 `yaml:"reqsync_blocknum"`
MaxReqSyncBlockNum int64 `yaml:"max_reqsync_blocknum"`
PeerActiveTime int64 `yaml:"peer_active_time"`
}
type monitorConfig struct {
Enabled bool `yaml:"enabled"`
Port int `yaml:"port"`
}
type pprofConfig struct {
Enabled bool `yaml:"enabled"`
Port int `yaml:"port"`
}
type redisConfig struct {
Url string `yaml:"url"`
Auth string `yaml:"auth"`
DB int `yaml:"db"`
MaxIdle int `yaml:"max_idle"`
MaxActive int `yaml:"max_active"`
IdleTimeout int `yaml:"idle_timeout"`
CacheTimeout int `yaml:"cache_timeout"`
}
type clientConfig struct {
OrgId string `yaml:"org_id"`
UserKeyFilePath string `yaml:"user_key_file_path"`
UserCrtFilePath string `yaml:"user_crt_file_path"`
HashType string `yaml:"hash_type"`
}
type schedulerConfig struct {
RWSetLog bool `yaml:"rwset_log"`
}
type coreConfig struct {
Evidence bool `yaml:"evidence"`
}
// CMConfig - Local config struct
type CMConfig struct {
LogConfig logger.LogConfig `yaml:"log"`
NetConfig netConfig `yaml:"net"`
NodeConfig nodeConfig `yaml:"node"`
RpcConfig rpcConfig `yaml:"rpc"`
BlockChainConfig []BlockchainConfig `yaml:"blockchain"`
StorageConfig StorageConfig `yaml:"storage"`
TxPoolConfig txPoolConfig `yaml:"txpool"`
SyncConfig syncConfig `yaml:"sync"`
SpvConfig spvConfig `yaml:"spv"`
// 开发调试使用
DebugConfig debugConfig `yaml:"debug"`
PProfConfig pprofConfig `yaml:"pprof"`
MonitorConfig monitorConfig `yaml:"monitor"`
CoreConfig coreConfig `yaml:"core"`
SchedulerConfig schedulerConfig `yaml:"scheduler"`
}
// write config into file
func (config *CMConfig) WriteFile(fileName string,fileMode fs.FileMode) error {
data,err := yaml.Marshal(config)
if err != nil{
return err
}
err = ioutil.WriteFile(fileName,data,fileMode)
if err != nil{
return err
}
return nil
}
//read config from configfile
func (config *CMConfig) ReadFile(fileName string) error {
data,err := ioutil.ReadFile(fileName)
if err != nil{
return err
}
err = yaml.Unmarshal(data,config)
if err != nil{
return err
}
return nil
}
// GetBlockChains - get blockchain config list
func (c *CMConfig) GetBlockChains() []BlockchainConfig {
return c.BlockChainConfig
}
| .HistoryDbConfig.SqlDbConfig.DbPrefix == "" {
config.HistoryDbCon | conditional_block |
chainmaker_yaml_types.go | /*
Copyright (C) BABEC. All rights reserved.
Copyright (C) THL A29 Limited, a Tencent company. All rights reserved.
SPDX-License-Identifier: Apache-2.0
*/
package localconf
import (
"chainmaker.org/chainmaker-go/logger"
"gopkg.in/yaml.v2"
"io/ioutil"
"io/fs"
)
type nodeConfig struct {
Type string `yaml:"type"`
CertFile string `yaml:"cert_file"`
PrivKeyFile string `yaml:"priv_key_file"`
PrivKeyPassword string `yaml:"priv_key_password"`
AuthType string `yaml:"auth_type"`
P11Config pkcs11Config `yaml:"pkcs11"`
NodeId string `yaml:"node_id"`
OrgId string `yaml:"org_id"`
SignerCacheSize int `yaml:"signer_cache_size"`
CertCacheSize int `yaml:"cert_cache_size"`
}
type netConfig struct {
Provider string `yaml:"provider"`
ListenAddr string `yaml:"listen_addr"`
PeerStreamPoolSize int `yaml:"peer_stream_pool_size"`
MaxPeerCountAllow int `yaml:"max_peer_count_allow"`
PeerEliminationStrategy int `yaml:"peer_elimination_strategy"`
Seeds []string `yaml:"seeds"`
TLSConfig netTlsConfig `yaml:"tls"`
BlackList blackList `yaml:"blacklist"`
CustomChainTrustRoots []chainTrustRoots `yaml:"custom_chain_trust_roots"`
}
type netTlsConfig struct {
Enabled bool `yaml:"enabled"`
PrivKeyFile string `yaml:"priv_key_file"`
CertFile string `yaml:"cert_file"`
}
type pkcs11Config struct {
Enabled bool `yaml:"enabled"`
Library string `yaml:"library"`
Label string `yaml:"label"`
Password string `yaml:"password"`
SessionCacheSize int `yaml:"session_cache_size"`
Hash string `yaml:"hash"`
}
type blackList struct {
Addresses []string `yaml:"addresses"`
NodeIds []string `yaml:"node_ids"`
}
type chainTrustRoots struct {
ChainId string `yaml:"chain_id"`
TrustRoots []trustRoots `yaml:"trust_roots"`
}
type trustRoots struct {
OrgId string `yaml:"org_id"`
Root string `yaml:"root"`
}
type rpcConfig struct {
Provider string `yaml:"provider"`
Port int `yaml:"port"`
TLSConfig tlsConfig `yaml:"tls"`
RateLimitConfig rateLimitConfig `yaml:"ratelimit"`
SubscriberConfig subscriberConfig `yaml:"subscriber"`
CheckChainConfTrustRootsChangeInterval int `yaml:"check_chain_conf_trust_roots_change_interval"`
}
type tlsConfig struct {
Mode string `yaml:"mode"`
PrivKeyFile string `yaml:"priv_key_file"`
CertFile string `yaml:"cert_file"`
TestClientPrivKeyFile string `yaml:"test_client_priv_key_file"`
TestClientCertFile string `yaml:"test_client_cert_file"`
}
type rateLimitConfig struct {
TokenPerSecond int `yaml:"token_per_second"`
TokenBucketSize int `yaml:"token_bucket_size"`
}
type subscriberConfig struct {
RateLimitConfig rateLimitConfig `yaml:"ratelimit"`
}
type debugConfig struct {
IsCliOpen bool `yaml:"is_cli_open"`
IsHttpOpen bool `yaml:"is_http_open"`
IsProposer bool `yaml:"is_proposer"`
IsNotRWSetCheck bool `yaml:"is_not_rwset_check"`
IsConcurPropose bool `yaml:"is_concur_propose"`
IsConcurVerify bool `yaml:"is_concur_verify"`
IsSolo bool `yaml:"is_solo"`
IsHaltPropose bool `yaml:"is_halt_propose"`
IsSkipAccessControl bool `yaml:"is_skip_access_control"` // true: minimize access control; false: use full access control
IsTraceMemoryUsage bool `yaml:"is_trace_memory_usage"` // true for trace memory usage information periodically
IsProposeDuplicately bool `yaml:"is_propose_duplicately"` // Simulate a node which would propose duplicate after it has proposed Proposal
IsProposeMultiNodeDuplicately bool `yaml:"is_propose_multinode_duplicately"` // Simulate a malicious node which would propose duplicate proposals
IsProposalOldHeight bool `yaml:"is_proposal_old_height"`
IsPrevoteDuplicately bool `yaml:"is_prevote_duplicately"` // Simulate a malicious node which would prevote duplicately
IsPrevoteOldHeight bool `yaml:"is_prevote_old_height"` // Simulate a malicious node which would prevote for oldheight
IsPrevoteLost bool `yaml:"is_prevote_lost"` //prevote vote lost
IsPrecommitDuplicately bool `yaml:"is_precommit_duplicately"` //Simulate a malicious node which would propose duplicate precommits
IsPrecommitOldHeight bool `yaml:"is_precommit_old_height"` // Simulate a malicious node which would Precommit a lower height than current height
IsProposeLost bool `yaml:"is_propose_lost"` //proposal vote lost
IsProposeDelay bool `yaml:"is_propose_delay"` //proposal lost
IsPrevoteDelay bool `yaml:"is_prevote_delay"` //network problem resulting in preovote lost
IsPrecommitLost bool `yaml:"is_precommit_lost"` //precommit vote lost
IsPrecommitDelay bool `yaml:"is_prevcommit_delay"` //network problem resulting in precommit lost
IsCommitWithoutPublish bool `yaml:"is_commit_without_publish"` //if the node committing block without publishing, TRUE;else, FALSE
IsPrevoteInvalid bool `yaml:"is_prevote_invalid"` //simulate a node which sends an invalid prevote(hash=nil)
IsPrecommitInvalid bool `yaml:"is_precommit_invalid"` //simulate a node which sends an invalid precommit(hash=nil)
IsModifyTxPayload bool `yaml:"is_modify_tx_payload"`
IsExtreme bool `yaml:"is_extreme"` //extreme fast mode
UseNetMsgCompression bool `yaml:"use_net_msg_compression"`
IsNetInsecurity bool `yaml:"is_net_insecurity"`
}
type BlockchainConfig struct {
ChainId string
Genesis string
}
type StorageConfig struct {
//默认的Leveldb配置,如果每个DB有不同的设置,可以在自己的DB中进行设置
StorePath string `yaml:"store_path"`
DbPrefix string `yaml:"db_prefix"`
WriteBufferSize int `yaml:"write_buffer_size"`
BloomFilterBits int `yaml:"bloom_filter_bits"`
BlockWriteBufferSize int `yaml:"block_write_buffer_size"`
//数据库模式:light只存区块头,normal存储区块头和交易以及生成的State,full存储了区块头、交易、状态和交易收据(读写集、日志等)
//Mode string `yaml:"mode"`
DisableHistoryDB bool `yaml:"disable_historydb"`
DisableResultDB bool `yaml:"disable_resultdb"`
DisableContractEventDB bool `yaml:"disable_contract_eventdb"`
LogDBWriteAsync bool `yaml:"logdb_write_async"`
BlockDbConfig *DbConfig `yaml:"blockdb_config"`
StateDbConfig *DbConfig `yaml:"statedb_config"`
HistoryDbConfig *DbConfig `yaml:"historydb_config"`
ResultDbConfig *DbConfig `yaml:"resultdb_config"`
ContractEventDbConfig *DbConfig `yaml:"contract_eventdb_config"`
UnArchiveBlockHeight uint64 `yaml:"unarchive_block_height"`
}
func (config *StorageConfig) setDefault() {
if config.DbPrefix != "" {
if config.BlockDbConfig != nil && config.BlockDbConfig.SqlDbConfig != nil && config.BlockDbConfig.SqlDbConfig.DbPrefix == "" {
config.BlockDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
if config.StateDbConfig != nil && config.StateDbConfig.SqlDbConfig != nil && config.StateDbConfig.SqlDbConfig.DbPrefix == "" {
config.StateDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
if config.HistoryDbConfig != nil && config.HistoryDbConfig.SqlDbConfig != nil && config.HistoryDbConfig.SqlDbConfig.DbPrefix == "" {
config.HistoryDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
if config.ResultDbConfig != nil && config.ResultDbConfig.SqlDbConfig != nil && config.ResultDbConfig.SqlDbConfig.DbPrefix == "" {
config.ResultDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
if config.ContractEventDbConfig != nil && config.ContractEventDbConfig.SqlDbConfig != nil && config.ContractEventDbConfig.SqlDbConfig.DbPrefix == "" {
config.ContractEventDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
}
}
func (config *StorageConfig) GetBlockDbConfig() *DbConfig {
if config.BlockDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.BlockDbConfig
}
func (config *StorageConfig) GetStateDbConfig() *DbConfig {
if config.StateDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.StateDbConfig
}
func (config *StorageConfig) GetHistoryDbConfig() *DbConfig {
if config.HistoryDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.HistoryDbConfig
}
func (config *StorageConfig) GetResultDbConfig() *DbConfig {
if config.ResultDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.ResultDbConfig
}
func (config *StorageConfig) GetContractEventDbConfig() *DbConfig {
if config.ContractEventDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.ContractEventDbConfig
}
func (config *StorageConfig) GetDefaultDBConfig() *DbConfig {
lconfig := &LevelDbConfig{
StorePath: config.StorePath,
WriteBufferSize: config.WriteBufferSize,
BloomFilterBits: config.BloomFilterBits,
BlockWriteBufferSize: config.WriteBufferSize,
}
return &DbConfig{
Provider: "leveldb",
LevelDbConfig: lconfig,
}
}
//根据配置的DisableDB的情况,确定当前配置活跃的数据库数量
func (config *StorageConfig) GetActiveDBCount() int {
count := 5
if config.DisableContractEventDB {
count--
}
if config.DisableHistoryDB {
count--
}
if config.DisableResultDB {
count--
}
return count
}
type DbConfig struct {
//leveldb,rocksdb,sql
Provider string `yaml:"provider"`
LevelDbConfig *LevelDbConfig `yaml:"leveldb_config"`
SqlDbConfig *SqlDbConfig `yaml:"sqldb_config"`
}
const DbConfig_Provider_Sql = "sql"
const DbConfig_Provider_LevelDb = "leveldb"
const DbConfig_Provider_RocksDb = "rocksdb"
func (dbc *DbConfig) IsKVDB() bool {
return dbc.Provider == DbConfig_Provider_LevelDb || dbc.Provider == DbConfig_Provider_RocksDb
}
func (dbc *DbConfig) IsSqlDB() bool {
return dbc.Provider == DbConfig_Provider_Sql || dbc.Provider == "mysql" || dbc.Provider == "rdbms" //兼容其他配置情况
}
type LevelDbConfig struct {
StorePath string `yaml:"store_path"`
WriteBufferSize int `yaml:"write_buffer_size"`
BloomFilterBits int `yaml:"bloom_filter_bits"`
BlockWriteBufferSize int `yaml:"block_write_buffer_size"`
}
type SqlDbConfig struct {
//mysql, sqlite, postgres, sqlserver
SqlDbType string `yaml:"sqldb_type"`
Dsn string `yaml:"dsn"`
MaxIdleConns int `yaml:"max_idle_conns"`
MaxOpenConns int `yaml:"max_open_conns"`
ConnMaxLifeTime int `yaml:"conn_max_lifetime"` //second
SqlLogMode string `yaml:"sqllog_mode"` //Silent,Error,Warn,Info
SqlVerifier string `yaml:"sql_verifier"` //simple,safe
DbPrefix string `yaml:"db_prefix"`
}
const SqlDbConfig_SqlDbType_MySQL = "mysql"
const SqlDbConfig_SqlDbType_Sqlite = "sqlite"
type txPoolConfig struct {
PoolType string `yaml:"pool_type"`
MaxTxPoolSize uint32 `yaml:"max_txpool_size"`
MaxConfigTxPoolSize uint32 `yaml:"max_config_txpool_size"`
IsMetrics bool `yaml:"is_metrics"`
Performance bool `yaml:"performance"`
BatchMaxSize int `yaml:"batch_max_size"`
BatchCreateTimeout int64 `yaml:"batch_create_timeout"`
CacheFlushTicker int64 `yaml:"cache_flush_ticker"`
CacheThresholdCount int64 `yaml:"cache_threshold_count"`
CacheFlushTimeOut int64 `yaml:"cache_flush_timeout"`
AddTxChannelSize int64 `yaml:"add_tx_channel_size"`
}
type syncConfig struct {
BroadcastTime uint32 `yaml:"broadcast_time"`
BlockPoolSize uint32 `yaml:"block_pool_size"`
WaitTimeOfBlockRequestMsg uint32 `yaml:"wait_time_requested"`
BatchSizeFromOneNode uint32 `yaml:"batch_Size_from_one_node"`
ProcessBlockTick float64 `yaml:"process_block_tick"`
NodeStatusTick float64 `yaml:"node_status_tick"`
LivenessTick float64 `yaml:"liveness_tick"`
SchedulerTick float64 `yaml:"scheduler_tick"`
ReqTimeThreshold float64 `yaml:"req_time_threshold"`
DataDetectionTick float64 `yaml:"data_detection_tick"`
}
type spvConfig struct {
RefreshReqCacheMills int64 `yaml:"refresh_reqcache_mils"`
MessageCacheSize int64 `yaml:"message_cahche_size"`
ReSyncCheckIntervalMills int64 `yaml:"resync_check_interval_mils"`
SyncTimeoutMills int64 `yaml:"sync_timeout_mils"`
ReqSyncBlockNum int64 `yaml:"reqsync_blocknum"`
MaxReqSyncBlockNum int64 `yaml:"max_reqsync_blocknum"`
PeerActiveTime int64 `yaml:"peer_active_time"`
}
type monitorConfig struct {
Enabled bool `yaml:"enabled"`
Port int `yaml:"port"`
}
type pprofConfig struct {
Enabled bool `yaml:"enabled"`
Port int `yaml:"port"`
}
type redisConfig struct {
Url string `yaml:"url"`
Auth string `yaml:"auth"`
DB int `yaml:"db"`
MaxIdle int `yaml:"max_idle"`
MaxActive int `yaml:"max_active"`
IdleTimeout int `yaml:"idle_timeout"`
CacheTimeout int `yaml:"cache_timeout"`
}
type clientConfig struct {
OrgId string `yaml:"org_id"`
UserKeyFilePath string `yaml:"user_key_file_path"`
UserCrtFilePath string `yaml:"user_crt_file_path"`
HashType string `yaml:"hash_type"`
}
type schedulerConfig struct {
RWSetLog bool `yaml:"rwset_log"`
}
type coreConfig struct {
Evidence bool `yaml:"evidence"`
}
// CMConfig - Local config struct
type CMConfig struct {
LogConfig logger.LogConfig `yaml:"log"`
NetConfig netConfig `yaml:"net"`
NodeConfig nodeConfig `yaml:"node"`
RpcConfig rpcConfig `yaml:"rpc"`
BlockChainConfig []BlockchainConfig `yaml:"blockchain"`
StorageConfig StorageConfig `yaml:"storage"`
TxPoolConfig txPoolConfig `yaml:"txpool"`
SyncConfig syncConfig `yaml:"sync"`
SpvConfig spvConfig `yaml:"spv"`
// 开发调试使用
DebugConfig debugConfig `yaml:"debug"`
PProfConfig pprofConfig `yaml:"pprof"`
MonitorConfig monitorConfig `yaml:"monitor"`
CoreConfig coreConfig `yaml:"core"`
SchedulerConfig schedulerConfig `yaml:"scheduler"`
}
// write config into file
func (config *CMConfig) WriteFile(fileName string,fileMode fs.FileMode) error {
data,err := yaml.Marshal(config)
if err != nil{
return err
}
err = ioutil.WriteFile(fileName,data,fileMode)
if err != nil{
return err
}
return nil
}
//read config from configfile
func (config *CMConfig) ReadFile(fileName string) error {
data,err := ioutil.ReadFile(fileName)
if err != nil{
return err
}
err = yaml.Unmarshal(data,config)
if err != nil{
return err
}
return nil
}
// GetBlockChains - get blockchain config | c (c *CMConfig) GetBlockChains() []BlockchainConfig {
return c.BlockChainConfig
}
| list
fun | identifier_name |
chainmaker_yaml_types.go | /*
Copyright (C) BABEC. All rights reserved.
Copyright (C) THL A29 Limited, a Tencent company. All rights reserved.
SPDX-License-Identifier: Apache-2.0
*/
package localconf
import (
"chainmaker.org/chainmaker-go/logger"
"gopkg.in/yaml.v2"
"io/ioutil"
"io/fs"
)
type nodeConfig struct {
Type string `yaml:"type"`
CertFile string `yaml:"cert_file"`
PrivKeyFile string `yaml:"priv_key_file"`
PrivKeyPassword string `yaml:"priv_key_password"`
AuthType string `yaml:"auth_type"`
P11Config pkcs11Config `yaml:"pkcs11"`
NodeId string `yaml:"node_id"`
OrgId string `yaml:"org_id"`
SignerCacheSize int `yaml:"signer_cache_size"`
CertCacheSize int `yaml:"cert_cache_size"`
}
type netConfig struct {
Provider string `yaml:"provider"`
ListenAddr string `yaml:"listen_addr"`
PeerStreamPoolSize int `yaml:"peer_stream_pool_size"`
MaxPeerCountAllow int `yaml:"max_peer_count_allow"`
PeerEliminationStrategy int `yaml:"peer_elimination_strategy"`
Seeds []string `yaml:"seeds"`
TLSConfig netTlsConfig `yaml:"tls"`
BlackList blackList `yaml:"blacklist"`
CustomChainTrustRoots []chainTrustRoots `yaml:"custom_chain_trust_roots"`
}
type netTlsConfig struct {
Enabled bool `yaml:"enabled"`
PrivKeyFile string `yaml:"priv_key_file"`
CertFile string `yaml:"cert_file"`
}
type pkcs11Config struct {
Enabled bool `yaml:"enabled"`
Library string `yaml:"library"`
Label string `yaml:"label"`
Password string `yaml:"password"`
SessionCacheSize int `yaml:"session_cache_size"`
Hash string `yaml:"hash"`
}
type blackList struct {
Addresses []string `yaml:"addresses"`
NodeIds []string `yaml:"node_ids"`
}
type chainTrustRoots struct {
ChainId string `yaml:"chain_id"`
TrustRoots []trustRoots `yaml:"trust_roots"`
}
type trustRoots struct {
OrgId string `yaml:"org_id"`
Root string `yaml:"root"`
}
type rpcConfig struct {
Provider string `yaml:"provider"`
Port int `yaml:"port"`
TLSConfig tlsConfig `yaml:"tls"`
RateLimitConfig rateLimitConfig `yaml:"ratelimit"`
SubscriberConfig subscriberConfig `yaml:"subscriber"`
CheckChainConfTrustRootsChangeInterval int `yaml:"check_chain_conf_trust_roots_change_interval"`
}
type tlsConfig struct {
Mode string `yaml:"mode"`
PrivKeyFile string `yaml:"priv_key_file"`
CertFile string `yaml:"cert_file"`
TestClientPrivKeyFile string `yaml:"test_client_priv_key_file"`
TestClientCertFile string `yaml:"test_client_cert_file"`
}
type rateLimitConfig struct {
TokenPerSecond int `yaml:"token_per_second"`
TokenBucketSize int `yaml:"token_bucket_size"`
}
type subscriberConfig struct {
RateLimitConfig rateLimitConfig `yaml:"ratelimit"`
}
type debugConfig struct {
IsCliOpen bool `yaml:"is_cli_open"`
IsHttpOpen bool `yaml:"is_http_open"`
IsProposer bool `yaml:"is_proposer"`
IsNotRWSetCheck bool `yaml:"is_not_rwset_check"`
IsConcurPropose bool `yaml:"is_concur_propose"`
IsConcurVerify bool `yaml:"is_concur_verify"`
IsSolo bool `yaml:"is_solo"`
IsHaltPropose bool `yaml:"is_halt_propose"`
IsSkipAccessControl bool `yaml:"is_skip_access_control"` // true: minimize access control; false: use full access control
IsTraceMemoryUsage bool `yaml:"is_trace_memory_usage"` // true for trace memory usage information periodically
IsProposeDuplicately bool `yaml:"is_propose_duplicately"` // Simulate a node which would propose duplicate after it has proposed Proposal
IsProposeMultiNodeDuplicately bool `yaml:"is_propose_multinode_duplicately"` // Simulate a malicious node which would propose duplicate proposals
IsProposalOldHeight bool `yaml:"is_proposal_old_height"`
IsPrevoteDuplicately bool `yaml:"is_prevote_duplicately"` // Simulate a malicious node which would prevote duplicately
IsPrevoteOldHeight bool `yaml:"is_prevote_old_height"` // Simulate a malicious node which would prevote for oldheight
IsPrevoteLost bool `yaml:"is_prevote_lost"` //prevote vote lost
IsPrecommitDuplicately bool `yaml:"is_precommit_duplicately"` //Simulate a malicious node which would propose duplicate precommits
IsPrecommitOldHeight bool `yaml:"is_precommit_old_height"` // Simulate a malicious node which would Precommit a lower height than current height
IsProposeLost bool `yaml:"is_propose_lost"` //proposal vote lost
IsProposeDelay bool `yaml:"is_propose_delay"` //proposal lost
IsPrevoteDelay bool `yaml:"is_prevote_delay"` //network problem resulting in preovote lost
IsPrecommitLost bool `yaml:"is_precommit_lost"` //precommit vote lost
IsPrecommitDelay bool `yaml:"is_prevcommit_delay"` //network problem resulting in precommit lost
IsCommitWithoutPublish bool `yaml:"is_commit_without_publish"` //if the node committing block without publishing, TRUE;else, FALSE
IsPrevoteInvalid bool `yaml:"is_prevote_invalid"` //simulate a node which sends an invalid prevote(hash=nil)
IsPrecommitInvalid bool `yaml:"is_precommit_invalid"` //simulate a node which sends an invalid precommit(hash=nil)
IsModifyTxPayload bool `yaml:"is_modify_tx_payload"`
IsExtreme bool `yaml:"is_extreme"` //extreme fast mode
UseNetMsgCompression bool `yaml:"use_net_msg_compression"`
IsNetInsecurity bool `yaml:"is_net_insecurity"`
}
type BlockchainConfig struct {
ChainId string
Genesis string
}
type StorageConfig struct {
//默认的Leveldb配置,如果每个DB有不同的设置,可以在自己的DB中进行设置
StorePath string `yaml:"store_path"`
DbPrefix string `yaml:"db_prefix"`
WriteBufferSize int `yaml:"write_buffer_size"`
BloomFilterBits int `yaml:"bloom_filter_bits"`
BlockWriteBufferSize int `yaml:"block_write_buffer_size"`
//数据库模式:light只存区块头,normal存储区块头和交易以及生成的State,full存储了区块头、交易、状态和交易收据(读写集、日志等)
//Mode string `yaml:"mode"`
DisableHistoryDB bool `yaml:"disable_historydb"`
DisableResultDB bool `yaml:"disable_resultdb"`
DisableContractEventDB bool `yaml:"disable_contract_eventdb"`
LogDBWriteAsync bool `yaml:"logdb_write_async"`
BlockDbConfig *DbConfig `yaml:"blockdb_config"`
StateDbConfig *DbConfig `yaml:"statedb_config"`
HistoryDbConfig *DbConfig `yaml:"historydb_config"`
ResultDbConfig *DbConfig `yaml:"resultdb_config"`
ContractEventDbConfig *DbConfig `yaml:"contract_eventdb_config"`
UnArchiveBlockHeight uint64 `yaml:"unarchive_block_height"`
}
func (config *StorageConfig) setDefault() {
if config.DbPrefix != "" {
if config.BlockDbConfig != nil && config.BlockDbConfig.SqlDbConfig != nil && config.BlockDbConfig.SqlDbConfig.DbPrefix == "" {
config.BlockDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
if config.StateDbConfig != nil && config.StateDbConfig.SqlDbConfig != nil && config.StateDbConfig.SqlDbConfig.DbPrefix == "" {
config.StateDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
if config.HistoryDbConfig != nil && config.HistoryDbConfig.SqlDbConfig != nil && config.HistoryDbConfig.SqlDbConfig.DbPrefix == "" {
config.HistoryDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
if config.ResultDbConfig != nil && config.ResultDbConfig.SqlDbConfig != nil && config.ResultDbConfig.SqlDbConfig.DbPrefix == "" {
config.ResultDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
if config.ContractEventDbConfig != nil && config.ContractEventDbConfig.SqlDbConfig != nil && config.ContractEventDbConfig.SqlDbConfig.DbPrefix == "" {
config.ContractEventDbConfig.SqlDbConfig.DbPrefix = config.DbPrefix
}
}
}
func (config *StorageConfig) GetBlockDbConfig() *DbConfig {
if config.BlockDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.BlockDbConfig
}
func (config *StorageConfig) GetStateDbConfig() *DbConfig {
if config.StateDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.StateDbConfig
}
func (config *StorageConfig) GetHistoryDbConfig() *DbConfig {
if config.HistoryDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.HistoryDbConfig
}
func (config *StorageConfig) GetResultDbConfig() *DbConfig {
if config.ResultDbConfig == nil {
return config.GetDefaultDBConfig()
}
config.setDefault()
return config.ResultDbConfig
}
func (config *StorageConfig) | .setDefault()
return config.ContractEventDbConfig
}
func (config *StorageConfig) GetDefaultDBConfig() *DbConfig {
lconfig := &LevelDbConfig{
StorePath: config.StorePath,
WriteBufferSize: config.WriteBufferSize,
BloomFilterBits: config.BloomFilterBits,
BlockWriteBufferSize: config.WriteBufferSize,
}
return &DbConfig{
Provider: "leveldb",
LevelDbConfig: lconfig,
}
}
//根据配置的DisableDB的情况,确定当前配置活跃的数据库数量
func (config *StorageConfig) GetActiveDBCount() int {
count := 5
if config.DisableContractEventDB {
count--
}
if config.DisableHistoryDB {
count--
}
if config.DisableResultDB {
count--
}
return count
}
type DbConfig struct {
//leveldb,rocksdb,sql
Provider string `yaml:"provider"`
LevelDbConfig *LevelDbConfig `yaml:"leveldb_config"`
SqlDbConfig *SqlDbConfig `yaml:"sqldb_config"`
}
const DbConfig_Provider_Sql = "sql"
const DbConfig_Provider_LevelDb = "leveldb"
const DbConfig_Provider_RocksDb = "rocksdb"
func (dbc *DbConfig) IsKVDB() bool {
return dbc.Provider == DbConfig_Provider_LevelDb || dbc.Provider == DbConfig_Provider_RocksDb
}
func (dbc *DbConfig) IsSqlDB() bool {
return dbc.Provider == DbConfig_Provider_Sql || dbc.Provider == "mysql" || dbc.Provider == "rdbms" //兼容其他配置情况
}
type LevelDbConfig struct {
StorePath string `yaml:"store_path"`
WriteBufferSize int `yaml:"write_buffer_size"`
BloomFilterBits int `yaml:"bloom_filter_bits"`
BlockWriteBufferSize int `yaml:"block_write_buffer_size"`
}
type SqlDbConfig struct {
//mysql, sqlite, postgres, sqlserver
SqlDbType string `yaml:"sqldb_type"`
Dsn string `yaml:"dsn"`
MaxIdleConns int `yaml:"max_idle_conns"`
MaxOpenConns int `yaml:"max_open_conns"`
ConnMaxLifeTime int `yaml:"conn_max_lifetime"` //second
SqlLogMode string `yaml:"sqllog_mode"` //Silent,Error,Warn,Info
SqlVerifier string `yaml:"sql_verifier"` //simple,safe
DbPrefix string `yaml:"db_prefix"`
}
const SqlDbConfig_SqlDbType_MySQL = "mysql"
const SqlDbConfig_SqlDbType_Sqlite = "sqlite"
type txPoolConfig struct {
PoolType string `yaml:"pool_type"`
MaxTxPoolSize uint32 `yaml:"max_txpool_size"`
MaxConfigTxPoolSize uint32 `yaml:"max_config_txpool_size"`
IsMetrics bool `yaml:"is_metrics"`
Performance bool `yaml:"performance"`
BatchMaxSize int `yaml:"batch_max_size"`
BatchCreateTimeout int64 `yaml:"batch_create_timeout"`
CacheFlushTicker int64 `yaml:"cache_flush_ticker"`
CacheThresholdCount int64 `yaml:"cache_threshold_count"`
CacheFlushTimeOut int64 `yaml:"cache_flush_timeout"`
AddTxChannelSize int64 `yaml:"add_tx_channel_size"`
}
type syncConfig struct {
BroadcastTime uint32 `yaml:"broadcast_time"`
BlockPoolSize uint32 `yaml:"block_pool_size"`
WaitTimeOfBlockRequestMsg uint32 `yaml:"wait_time_requested"`
BatchSizeFromOneNode uint32 `yaml:"batch_Size_from_one_node"`
ProcessBlockTick float64 `yaml:"process_block_tick"`
NodeStatusTick float64 `yaml:"node_status_tick"`
LivenessTick float64 `yaml:"liveness_tick"`
SchedulerTick float64 `yaml:"scheduler_tick"`
ReqTimeThreshold float64 `yaml:"req_time_threshold"`
DataDetectionTick float64 `yaml:"data_detection_tick"`
}
type spvConfig struct {
RefreshReqCacheMills int64 `yaml:"refresh_reqcache_mils"`
MessageCacheSize int64 `yaml:"message_cahche_size"`
ReSyncCheckIntervalMills int64 `yaml:"resync_check_interval_mils"`
SyncTimeoutMills int64 `yaml:"sync_timeout_mils"`
ReqSyncBlockNum int64 `yaml:"reqsync_blocknum"`
MaxReqSyncBlockNum int64 `yaml:"max_reqsync_blocknum"`
PeerActiveTime int64 `yaml:"peer_active_time"`
}
type monitorConfig struct {
Enabled bool `yaml:"enabled"`
Port int `yaml:"port"`
}
type pprofConfig struct {
Enabled bool `yaml:"enabled"`
Port int `yaml:"port"`
}
type redisConfig struct {
Url string `yaml:"url"`
Auth string `yaml:"auth"`
DB int `yaml:"db"`
MaxIdle int `yaml:"max_idle"`
MaxActive int `yaml:"max_active"`
IdleTimeout int `yaml:"idle_timeout"`
CacheTimeout int `yaml:"cache_timeout"`
}
type clientConfig struct {
OrgId string `yaml:"org_id"`
UserKeyFilePath string `yaml:"user_key_file_path"`
UserCrtFilePath string `yaml:"user_crt_file_path"`
HashType string `yaml:"hash_type"`
}
type schedulerConfig struct {
RWSetLog bool `yaml:"rwset_log"`
}
type coreConfig struct {
Evidence bool `yaml:"evidence"`
}
// CMConfig - Local config struct
type CMConfig struct {
LogConfig logger.LogConfig `yaml:"log"`
NetConfig netConfig `yaml:"net"`
NodeConfig nodeConfig `yaml:"node"`
RpcConfig rpcConfig `yaml:"rpc"`
BlockChainConfig []BlockchainConfig `yaml:"blockchain"`
StorageConfig StorageConfig `yaml:"storage"`
TxPoolConfig txPoolConfig `yaml:"txpool"`
SyncConfig syncConfig `yaml:"sync"`
SpvConfig spvConfig `yaml:"spv"`
// 开发调试使用
DebugConfig debugConfig `yaml:"debug"`
PProfConfig pprofConfig `yaml:"pprof"`
MonitorConfig monitorConfig `yaml:"monitor"`
CoreConfig coreConfig `yaml:"core"`
SchedulerConfig schedulerConfig `yaml:"scheduler"`
}
// write config into file
func (config *CMConfig) WriteFile(fileName string,fileMode fs.FileMode) error {
data,err := yaml.Marshal(config)
if err != nil{
return err
}
err = ioutil.WriteFile(fileName,data,fileMode)
if err != nil{
return err
}
return nil
}
//read config from configfile
func (config *CMConfig) ReadFile(fileName string) error {
data,err := ioutil.ReadFile(fileName)
if err != nil{
return err
}
err = yaml.Unmarshal(data,config)
if err != nil{
return err
}
return nil
}
// GetBlockChains - get blockchain config list
func (c *CMConfig) GetBlockChains() []BlockchainConfig {
return c.BlockChainConfig
}
| GetContractEventDbConfig() *DbConfig {
if config.ContractEventDbConfig == nil {
return config.GetDefaultDBConfig()
}
config | identifier_body |
topology.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
use std::path::Path;
use std::io::{BufReader, BufRead, ErrorKind, BufWriter, Write, Read};
use std::fs::File;
use std::collections::HashMap;
use std::cmp::min;
use std::time::Instant;
use std::sync::Arc;
use byteorder::{BigEndian, WriteBytesExt, ByteOrder};
pub struct SegmentList<T> {
shift: usize,
seg_size: usize,
segments: Vec<Vec<T>>,
current: Vec<T>,
len: usize,
}
impl<T> SegmentList<T> {
pub fn new(shift: usize) -> Self {
let seg_size = 1 << shift;
SegmentList {
shift,
seg_size,
segments: Vec::new(),
current: Vec::with_capacity(seg_size),
len: 0,
}
}
pub fn push(&mut self, e: T) {
self.current.push(e);
if self.current.len() == self.seg_size {
self.segments.push(::std::mem::replace(&mut self.current,
Vec::with_capacity(self.seg_size)));
}
self.len += 1;
}
pub fn get(&self, offset: usize) -> Option<&T> {
let seg = offset >> self.shift;
let offset = offset - self.seg_size * seg;
if seg > self.segments.len() {
None
} else if seg == self.segments.len() {
Some(&self.current[offset])
} else {
Some(&self.segments[seg][offset])
}
}
pub fn get_multi(&self, start: usize, len: usize) -> Result<Vec<&T>, String> {
let mut tmp = Vec::with_capacity(len);
let mut seg = start >> self.shift;
let offset = start - self.seg_size * seg;
let mut left = len;
let mut start = offset;
while left > 0 {
let end = min(left, self.seg_size - start);
let read = self.get_in_seg(seg, start, end)?;
for e in read.iter() {
tmp.push(e);
}
seg += 1;
start = 0;
left -= read.len();
}
Ok(tmp)
}
#[inline]
pub fn len(&self) -> usize {
self.len
}
#[inline]
fn get_in_seg(&self, seg: usize, start: usize, len: usize) -> Result<&[T], String> {
let end = start + len;
if seg > self.segments.len() {
Err("Index out of bound".to_owned())
} else if seg == self.segments.len() {
if end > self.current.len() {
Err("Index out of bound".to_owned())
} else {
Ok(&self.current[start..end])
}
} else {
Ok(&self.segments[seg][start..end])
}
}
}
/// 1 -> (2,3,4),
/// 2 -> 3,
/// 4 -> 5,
/// 5 -> (1, 3),
/// 6 -> (7, 8),
/// 7 -> 8
const DEFAULT_GRAPH: [(u64, u64); 10] = [(1, 2), (1, 3), (1, 4), (2, 3), (5, 1), (5, 3), (4, 5), (6, 7), (6, 8), (7, 8)];
#[allow(dead_code)]
pub struct GraphTopology {
partition: u32,
peers: u32,
count: usize,
neighbors: HashMap<u64, Arc<Vec<u64>>>,
}
#[derive(Clone, Serialize, Deserialize, Debug, Abomonation)]
pub struct Vertex {
pub id: u64,
#[cfg(feature = "padding")]
padding_1: [u64; 8],
#[cfg(feature = "padding")]
padding_2: [u64; 7],
}
impl Vertex {
pub fn new(id: u64) -> Self {
Vertex {
id,
#[cfg(feature = "padding")]
padding_1: [0; 8],
#[cfg(feature = "padding")]
padding_2: [0; 7]
}
}
}
pub struct NeighborIter {
cursor: usize,
len: usize,
inner: Arc<Vec<u64>>
}
impl NeighborIter {
pub fn new(neighbors: &Arc<Vec<u64>>) -> Self {
NeighborIter {
cursor: 0,
len: neighbors.len(),
inner: neighbors.clone(),
}
}
pub fn empty() -> Self {
NeighborIter {
cursor: 0,
len: 0,
inner: Arc::new(vec![])
}
}
}
impl Iterator for NeighborIter {
type Item = Vertex;
fn next(&mut self) -> Option<Self::Item> {
if self.cursor == self.len {
None
} else {
self.cursor += 1;
Some(Vertex::new(self.inner[self.cursor - 1]))
}
}
}
impl GraphTopology {
pub fn with_default(partition: u32, peers: u32, directed: bool) -> Self {
let mut neighbors = HashMap::new();
let mut count = 0;
for (s, d) in DEFAULT_GRAPH.iter() {
if peers == 1 || (s % peers as u64) as u32 == partition {
let n = neighbors.entry(*s).or_insert(Vec::new());
n.push(*d);
count += 1;
}
if peers == 1 || (d % peers as u64) as u32 == partition {
let n = neighbors.entry(*d).or_insert(Vec::new());
if !directed {
n.push(*s);
count += 1;
}
}
}
let mut arc_neighbors = HashMap::new();
for (k, v) in neighbors.drain() {
arc_neighbors.insert(k, Arc::new(v));
}
GraphTopology {
partition,
count,
peers,
neighbors: arc_neighbors
}
}
pub fn load<P: AsRef<Path>>(partition: u32, peers: u32, directed: bool, split: char, path: P) -> Self {
let as_bin = path.as_ref().with_extension("bin");
Self::convert_to_bin(path, as_bin.as_path(), split);
info!("Convert raw file format to binary {:?}", as_bin.as_os_str());
Self::load_bin(partition, peers, directed, as_bin.as_path())
}
/// Load graph from binary file.
///
/// The binary file should follow this format: src1 dst1 src2 dst2 src3 dst3 ...
/// Vertex IDs are 32-bit big endian integers.
pub fn load_bin<P: AsRef<Path>>(partition: u32, peers: u32, directed: bool, path: P) -> Self {
let mut reader = BufReader::new(File::open(path).unwrap());
//let mut reader = File::open(path).unwrap();
let mut neighbors = HashMap::new();
let mut count = 0_usize;
let mut start = ::std::time::Instant::now();
let mut buffer = [0u8;1<< 12];
let peers = peers as u64;
loop {
let read = match reader.read(&mut buffer[0..]) {
Ok(n) => n,
Err(e) => {
if let ErrorKind::UnexpectedEof = e.kind() {
break
} else {
panic!(e);
}
}
};
if read > 0 {
assert!(read % 8 == 0, "unexpected: read {} bytes", read);
let valid = &mut buffer[0..read];
let mut extract = 0;
while extract < read {
let src = BigEndian::read_u64(&valid[extract..]);
let dst = BigEndian::read_u64(&valid[extract + 8..]);
if peers == 1 || (src % peers) as u32 == partition {
let n = neighbors.entry(src).or_insert_with(|| Vec::new());
n.push(dst);
}
if !directed && (peers == 1 || (dst % peers) as u32 == partition) {
let n = neighbors.entry(dst).or_insert_with(|| Vec::new());
n.push(src);
}
count += 1;
if log::log_enabled!(log::Level::Debug) {
if count % 5000000 == 0 {
let duration_ms = (Instant::now() - start).as_millis() as f64;
let speed = 5000000.0 / duration_ms * 1000.0;
debug!("Scanned edges: {}, speed: {:.2}/s", count, speed);
start = ::std::time::Instant::now();
}
}
extract += 16;
}
} else {
break
}
}
let mut arc_neighbors = HashMap::new();
for (k, v) in neighbors.drain() {
arc_neighbors.insert(k, Arc::new(v));
}
GraphTopology {
partition,
count,
peers: peers as u32,
neighbors: arc_neighbors,
}
}
/// Convert graph file from raw text format to binary format.
/// The binary file should follow this format: src1 dst1 src2 dst2 src3 dst3 ...
/// Vertex IDs are 32-bit big endian integers.
pub fn convert_to_bin<P1: AsRef<Path>, P2: AsRef<Path>>(input: P1, output: P2, split: char) {
let reader = BufReader::new(File::open(input).unwrap());
let mut writer = BufWriter::new(File::create(output).unwrap());
let mut count = 0_usize;
let mut start = ::std::time::Instant::now();
for edge in reader.lines() {
let edge = edge.unwrap();
let edge = edge.split(split).collect::<Vec<_>>();
let src: u64 = edge[0].parse().unwrap();
let dst: u64 = edge[1].parse().unwrap();
writer.write_u64::<BigEndian>(src).unwrap();
writer.write_u64::<BigEndian>(dst).unwrap();
count += 1;
if count % 5000000 == 0 {
let duration_ms = (Instant::now() - start).as_millis() as f64;
let speed = 5000000.0 / duration_ms * 1000.0;
debug!("Scanned edges: {}, speed: {:.2}/s", count, speed);
start = ::std::time::Instant::now();
}
}
writer.flush().unwrap();
}
pub fn get_neighbors(&self, src: &u64) -> Option<NeighborIter> {
self.neighbors.get(src).map(|n| {
NeighborIter::new(n)
})
}
#[inline]
pub fn count_nodes(&self) -> usize |
#[inline]
pub fn count_edges(&self) -> usize {
self.count
}
}
#[cfg(test)]
mod test {
use super::*;
use std::path::PathBuf;
#[test]
fn test_segment_list() {
let mut list = SegmentList::new(6);
for i in 0..1024 {
list.push(i);
}
for i in 0..1024 {
let e = list.get(i as usize).unwrap();
assert_eq!(i, *e);
}
for i in 0..1014 {
let res = list.get_multi(i as usize, 10).unwrap();
//println!("get res {:?}", res);
for j in 0..10 {
assert_eq!(i + j, *res[j]);
}
}
}
#[test]
fn test_graph_load() {
let mut d = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
d.push("data/twitter_rv.net");
{
println!("dir is : {}", d.display());
let graph = GraphTopology::load(1, 1, true, ' ', d.as_path());
println!("finish load");
let n = graph.get_neighbors(&12).unwrap()
.fold(0, |count, _| count + 1);
assert_eq!(n, 4);
}
{
let graph = GraphTopology::load_bin(1, 1, true, d.as_path().with_extension("bin"));
let n = graph.get_neighbors(&12).unwrap()
.map(|v| {
println!("get v : {}", v.id);
v
})
.fold(0, |count, _| count + 1);
assert_eq!(n, 4);
}
}
#[test]
fn test_graph() {
let graph = GraphTopology::with_default(3, 1, true);
{
let mut ns = vec![];
for n in graph.get_neighbors(&1).unwrap() {
ns.push(n);
}
let mut ns = ns.into_iter().map(|v| v.id).collect::<Vec<_>>();
ns.sort();
assert_eq!(ns, vec![2, 3, 4]);
}
{
let mut ns = vec![];
for n in graph.get_neighbors(&6).unwrap() {
ns.push(n);
}
let mut ns = ns.into_iter().map(|v| v.id).collect::<Vec<_>>();
ns.sort();
assert_eq!(ns, vec![7, 8]);
}
}
}
| {
self.neighbors.len()
} | identifier_body |
topology.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
use std::path::Path;
use std::io::{BufReader, BufRead, ErrorKind, BufWriter, Write, Read};
use std::fs::File;
use std::collections::HashMap;
use std::cmp::min;
use std::time::Instant;
use std::sync::Arc;
use byteorder::{BigEndian, WriteBytesExt, ByteOrder};
pub struct SegmentList<T> {
shift: usize,
seg_size: usize,
segments: Vec<Vec<T>>,
current: Vec<T>,
len: usize,
}
impl<T> SegmentList<T> {
pub fn new(shift: usize) -> Self {
let seg_size = 1 << shift;
SegmentList {
shift,
seg_size,
segments: Vec::new(),
current: Vec::with_capacity(seg_size),
len: 0,
}
}
pub fn push(&mut self, e: T) {
self.current.push(e);
if self.current.len() == self.seg_size {
self.segments.push(::std::mem::replace(&mut self.current,
Vec::with_capacity(self.seg_size)));
}
self.len += 1;
}
pub fn get(&self, offset: usize) -> Option<&T> {
let seg = offset >> self.shift;
let offset = offset - self.seg_size * seg;
if seg > self.segments.len() {
None
} else if seg == self.segments.len() {
Some(&self.current[offset])
} else {
Some(&self.segments[seg][offset])
}
}
pub fn get_multi(&self, start: usize, len: usize) -> Result<Vec<&T>, String> {
let mut tmp = Vec::with_capacity(len);
let mut seg = start >> self.shift;
let offset = start - self.seg_size * seg;
let mut left = len;
let mut start = offset;
while left > 0 {
let end = min(left, self.seg_size - start);
let read = self.get_in_seg(seg, start, end)?;
for e in read.iter() {
tmp.push(e);
}
seg += 1;
start = 0;
left -= read.len();
}
Ok(tmp)
}
#[inline]
pub fn len(&self) -> usize {
self.len
}
#[inline]
fn get_in_seg(&self, seg: usize, start: usize, len: usize) -> Result<&[T], String> {
let end = start + len;
if seg > self.segments.len() {
Err("Index out of bound".to_owned())
} else if seg == self.segments.len() {
if end > self.current.len() {
Err("Index out of bound".to_owned())
} else {
Ok(&self.current[start..end])
}
} else {
Ok(&self.segments[seg][start..end])
}
}
}
/// 1 -> (2,3,4),
/// 2 -> 3,
/// 4 -> 5,
/// 5 -> (1, 3),
/// 6 -> (7, 8),
/// 7 -> 8
const DEFAULT_GRAPH: [(u64, u64); 10] = [(1, 2), (1, 3), (1, 4), (2, 3), (5, 1), (5, 3), (4, 5), (6, 7), (6, 8), (7, 8)];
#[allow(dead_code)]
pub struct GraphTopology {
partition: u32,
peers: u32,
count: usize,
neighbors: HashMap<u64, Arc<Vec<u64>>>,
}
#[derive(Clone, Serialize, Deserialize, Debug, Abomonation)]
pub struct Vertex {
pub id: u64,
#[cfg(feature = "padding")]
padding_1: [u64; 8],
#[cfg(feature = "padding")]
padding_2: [u64; 7],
}
impl Vertex {
pub fn | (id: u64) -> Self {
Vertex {
id,
#[cfg(feature = "padding")]
padding_1: [0; 8],
#[cfg(feature = "padding")]
padding_2: [0; 7]
}
}
}
pub struct NeighborIter {
cursor: usize,
len: usize,
inner: Arc<Vec<u64>>
}
impl NeighborIter {
pub fn new(neighbors: &Arc<Vec<u64>>) -> Self {
NeighborIter {
cursor: 0,
len: neighbors.len(),
inner: neighbors.clone(),
}
}
pub fn empty() -> Self {
NeighborIter {
cursor: 0,
len: 0,
inner: Arc::new(vec![])
}
}
}
impl Iterator for NeighborIter {
type Item = Vertex;
fn next(&mut self) -> Option<Self::Item> {
if self.cursor == self.len {
None
} else {
self.cursor += 1;
Some(Vertex::new(self.inner[self.cursor - 1]))
}
}
}
impl GraphTopology {
pub fn with_default(partition: u32, peers: u32, directed: bool) -> Self {
let mut neighbors = HashMap::new();
let mut count = 0;
for (s, d) in DEFAULT_GRAPH.iter() {
if peers == 1 || (s % peers as u64) as u32 == partition {
let n = neighbors.entry(*s).or_insert(Vec::new());
n.push(*d);
count += 1;
}
if peers == 1 || (d % peers as u64) as u32 == partition {
let n = neighbors.entry(*d).or_insert(Vec::new());
if !directed {
n.push(*s);
count += 1;
}
}
}
let mut arc_neighbors = HashMap::new();
for (k, v) in neighbors.drain() {
arc_neighbors.insert(k, Arc::new(v));
}
GraphTopology {
partition,
count,
peers,
neighbors: arc_neighbors
}
}
pub fn load<P: AsRef<Path>>(partition: u32, peers: u32, directed: bool, split: char, path: P) -> Self {
let as_bin = path.as_ref().with_extension("bin");
Self::convert_to_bin(path, as_bin.as_path(), split);
info!("Convert raw file format to binary {:?}", as_bin.as_os_str());
Self::load_bin(partition, peers, directed, as_bin.as_path())
}
/// Load graph from binary file.
///
/// The binary file should follow this format: src1 dst1 src2 dst2 src3 dst3 ...
/// Vertex IDs are 32-bit big endian integers.
pub fn load_bin<P: AsRef<Path>>(partition: u32, peers: u32, directed: bool, path: P) -> Self {
let mut reader = BufReader::new(File::open(path).unwrap());
//let mut reader = File::open(path).unwrap();
let mut neighbors = HashMap::new();
let mut count = 0_usize;
let mut start = ::std::time::Instant::now();
let mut buffer = [0u8;1<< 12];
let peers = peers as u64;
loop {
let read = match reader.read(&mut buffer[0..]) {
Ok(n) => n,
Err(e) => {
if let ErrorKind::UnexpectedEof = e.kind() {
break
} else {
panic!(e);
}
}
};
if read > 0 {
assert!(read % 8 == 0, "unexpected: read {} bytes", read);
let valid = &mut buffer[0..read];
let mut extract = 0;
while extract < read {
let src = BigEndian::read_u64(&valid[extract..]);
let dst = BigEndian::read_u64(&valid[extract + 8..]);
if peers == 1 || (src % peers) as u32 == partition {
let n = neighbors.entry(src).or_insert_with(|| Vec::new());
n.push(dst);
}
if !directed && (peers == 1 || (dst % peers) as u32 == partition) {
let n = neighbors.entry(dst).or_insert_with(|| Vec::new());
n.push(src);
}
count += 1;
if log::log_enabled!(log::Level::Debug) {
if count % 5000000 == 0 {
let duration_ms = (Instant::now() - start).as_millis() as f64;
let speed = 5000000.0 / duration_ms * 1000.0;
debug!("Scanned edges: {}, speed: {:.2}/s", count, speed);
start = ::std::time::Instant::now();
}
}
extract += 16;
}
} else {
break
}
}
let mut arc_neighbors = HashMap::new();
for (k, v) in neighbors.drain() {
arc_neighbors.insert(k, Arc::new(v));
}
GraphTopology {
partition,
count,
peers: peers as u32,
neighbors: arc_neighbors,
}
}
/// Convert graph file from raw text format to binary format.
/// The binary file should follow this format: src1 dst1 src2 dst2 src3 dst3 ...
/// Vertex IDs are 32-bit big endian integers.
pub fn convert_to_bin<P1: AsRef<Path>, P2: AsRef<Path>>(input: P1, output: P2, split: char) {
let reader = BufReader::new(File::open(input).unwrap());
let mut writer = BufWriter::new(File::create(output).unwrap());
let mut count = 0_usize;
let mut start = ::std::time::Instant::now();
for edge in reader.lines() {
let edge = edge.unwrap();
let edge = edge.split(split).collect::<Vec<_>>();
let src: u64 = edge[0].parse().unwrap();
let dst: u64 = edge[1].parse().unwrap();
writer.write_u64::<BigEndian>(src).unwrap();
writer.write_u64::<BigEndian>(dst).unwrap();
count += 1;
if count % 5000000 == 0 {
let duration_ms = (Instant::now() - start).as_millis() as f64;
let speed = 5000000.0 / duration_ms * 1000.0;
debug!("Scanned edges: {}, speed: {:.2}/s", count, speed);
start = ::std::time::Instant::now();
}
}
writer.flush().unwrap();
}
pub fn get_neighbors(&self, src: &u64) -> Option<NeighborIter> {
self.neighbors.get(src).map(|n| {
NeighborIter::new(n)
})
}
#[inline]
pub fn count_nodes(&self) -> usize {
self.neighbors.len()
}
#[inline]
pub fn count_edges(&self) -> usize {
self.count
}
}
#[cfg(test)]
mod test {
use super::*;
use std::path::PathBuf;
#[test]
fn test_segment_list() {
let mut list = SegmentList::new(6);
for i in 0..1024 {
list.push(i);
}
for i in 0..1024 {
let e = list.get(i as usize).unwrap();
assert_eq!(i, *e);
}
for i in 0..1014 {
let res = list.get_multi(i as usize, 10).unwrap();
//println!("get res {:?}", res);
for j in 0..10 {
assert_eq!(i + j, *res[j]);
}
}
}
#[test]
fn test_graph_load() {
let mut d = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
d.push("data/twitter_rv.net");
{
println!("dir is : {}", d.display());
let graph = GraphTopology::load(1, 1, true, ' ', d.as_path());
println!("finish load");
let n = graph.get_neighbors(&12).unwrap()
.fold(0, |count, _| count + 1);
assert_eq!(n, 4);
}
{
let graph = GraphTopology::load_bin(1, 1, true, d.as_path().with_extension("bin"));
let n = graph.get_neighbors(&12).unwrap()
.map(|v| {
println!("get v : {}", v.id);
v
})
.fold(0, |count, _| count + 1);
assert_eq!(n, 4);
}
}
#[test]
fn test_graph() {
let graph = GraphTopology::with_default(3, 1, true);
{
let mut ns = vec![];
for n in graph.get_neighbors(&1).unwrap() {
ns.push(n);
}
let mut ns = ns.into_iter().map(|v| v.id).collect::<Vec<_>>();
ns.sort();
assert_eq!(ns, vec![2, 3, 4]);
}
{
let mut ns = vec![];
for n in graph.get_neighbors(&6).unwrap() {
ns.push(n);
}
let mut ns = ns.into_iter().map(|v| v.id).collect::<Vec<_>>();
ns.sort();
assert_eq!(ns, vec![7, 8]);
}
}
}
| new | identifier_name |
topology.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
use std::path::Path;
use std::io::{BufReader, BufRead, ErrorKind, BufWriter, Write, Read};
use std::fs::File;
use std::collections::HashMap;
use std::cmp::min;
use std::time::Instant;
use std::sync::Arc;
use byteorder::{BigEndian, WriteBytesExt, ByteOrder};
pub struct SegmentList<T> {
shift: usize,
seg_size: usize,
segments: Vec<Vec<T>>,
current: Vec<T>,
len: usize,
}
impl<T> SegmentList<T> {
pub fn new(shift: usize) -> Self {
let seg_size = 1 << shift;
SegmentList {
shift,
seg_size,
segments: Vec::new(),
current: Vec::with_capacity(seg_size),
len: 0,
}
}
pub fn push(&mut self, e: T) {
self.current.push(e);
if self.current.len() == self.seg_size {
self.segments.push(::std::mem::replace(&mut self.current,
Vec::with_capacity(self.seg_size)));
}
self.len += 1;
}
pub fn get(&self, offset: usize) -> Option<&T> {
let seg = offset >> self.shift;
let offset = offset - self.seg_size * seg;
if seg > self.segments.len() {
None
} else if seg == self.segments.len() {
Some(&self.current[offset])
} else {
Some(&self.segments[seg][offset])
}
}
pub fn get_multi(&self, start: usize, len: usize) -> Result<Vec<&T>, String> {
let mut tmp = Vec::with_capacity(len);
let mut seg = start >> self.shift;
let offset = start - self.seg_size * seg;
let mut left = len;
let mut start = offset;
while left > 0 {
let end = min(left, self.seg_size - start);
let read = self.get_in_seg(seg, start, end)?;
for e in read.iter() {
tmp.push(e);
}
seg += 1;
start = 0;
left -= read.len();
}
Ok(tmp)
}
#[inline]
pub fn len(&self) -> usize {
self.len
}
#[inline]
fn get_in_seg(&self, seg: usize, start: usize, len: usize) -> Result<&[T], String> {
let end = start + len;
if seg > self.segments.len() {
Err("Index out of bound".to_owned())
} else if seg == self.segments.len() {
if end > self.current.len() {
Err("Index out of bound".to_owned())
} else {
Ok(&self.current[start..end])
}
} else {
Ok(&self.segments[seg][start..end])
}
}
}
/// 1 -> (2,3,4),
/// 2 -> 3,
/// 4 -> 5,
/// 5 -> (1, 3),
/// 6 -> (7, 8),
/// 7 -> 8
const DEFAULT_GRAPH: [(u64, u64); 10] = [(1, 2), (1, 3), (1, 4), (2, 3), (5, 1), (5, 3), (4, 5), (6, 7), (6, 8), (7, 8)];
#[allow(dead_code)]
pub struct GraphTopology {
partition: u32,
peers: u32,
count: usize,
neighbors: HashMap<u64, Arc<Vec<u64>>>,
}
#[derive(Clone, Serialize, Deserialize, Debug, Abomonation)]
pub struct Vertex {
pub id: u64,
#[cfg(feature = "padding")]
padding_1: [u64; 8],
#[cfg(feature = "padding")]
padding_2: [u64; 7],
}
impl Vertex {
pub fn new(id: u64) -> Self {
Vertex {
id,
#[cfg(feature = "padding")]
padding_1: [0; 8],
#[cfg(feature = "padding")]
padding_2: [0; 7]
}
}
}
pub struct NeighborIter {
cursor: usize,
len: usize,
inner: Arc<Vec<u64>>
}
impl NeighborIter {
pub fn new(neighbors: &Arc<Vec<u64>>) -> Self {
NeighborIter {
cursor: 0,
len: neighbors.len(),
inner: neighbors.clone(),
}
}
pub fn empty() -> Self {
NeighborIter {
cursor: 0,
len: 0,
inner: Arc::new(vec![])
}
}
}
impl Iterator for NeighborIter {
type Item = Vertex;
fn next(&mut self) -> Option<Self::Item> {
if self.cursor == self.len {
None
} else {
self.cursor += 1;
Some(Vertex::new(self.inner[self.cursor - 1]))
}
}
}
impl GraphTopology {
pub fn with_default(partition: u32, peers: u32, directed: bool) -> Self {
let mut neighbors = HashMap::new();
let mut count = 0;
for (s, d) in DEFAULT_GRAPH.iter() {
if peers == 1 || (s % peers as u64) as u32 == partition {
let n = neighbors.entry(*s).or_insert(Vec::new());
n.push(*d); | if peers == 1 || (d % peers as u64) as u32 == partition {
let n = neighbors.entry(*d).or_insert(Vec::new());
if !directed {
n.push(*s);
count += 1;
}
}
}
let mut arc_neighbors = HashMap::new();
for (k, v) in neighbors.drain() {
arc_neighbors.insert(k, Arc::new(v));
}
GraphTopology {
partition,
count,
peers,
neighbors: arc_neighbors
}
}
pub fn load<P: AsRef<Path>>(partition: u32, peers: u32, directed: bool, split: char, path: P) -> Self {
let as_bin = path.as_ref().with_extension("bin");
Self::convert_to_bin(path, as_bin.as_path(), split);
info!("Convert raw file format to binary {:?}", as_bin.as_os_str());
Self::load_bin(partition, peers, directed, as_bin.as_path())
}
/// Load graph from binary file.
///
/// The binary file should follow this format: src1 dst1 src2 dst2 src3 dst3 ...
/// Vertex IDs are 32-bit big endian integers.
pub fn load_bin<P: AsRef<Path>>(partition: u32, peers: u32, directed: bool, path: P) -> Self {
let mut reader = BufReader::new(File::open(path).unwrap());
//let mut reader = File::open(path).unwrap();
let mut neighbors = HashMap::new();
let mut count = 0_usize;
let mut start = ::std::time::Instant::now();
let mut buffer = [0u8;1<< 12];
let peers = peers as u64;
loop {
let read = match reader.read(&mut buffer[0..]) {
Ok(n) => n,
Err(e) => {
if let ErrorKind::UnexpectedEof = e.kind() {
break
} else {
panic!(e);
}
}
};
if read > 0 {
assert!(read % 8 == 0, "unexpected: read {} bytes", read);
let valid = &mut buffer[0..read];
let mut extract = 0;
while extract < read {
let src = BigEndian::read_u64(&valid[extract..]);
let dst = BigEndian::read_u64(&valid[extract + 8..]);
if peers == 1 || (src % peers) as u32 == partition {
let n = neighbors.entry(src).or_insert_with(|| Vec::new());
n.push(dst);
}
if !directed && (peers == 1 || (dst % peers) as u32 == partition) {
let n = neighbors.entry(dst).or_insert_with(|| Vec::new());
n.push(src);
}
count += 1;
if log::log_enabled!(log::Level::Debug) {
if count % 5000000 == 0 {
let duration_ms = (Instant::now() - start).as_millis() as f64;
let speed = 5000000.0 / duration_ms * 1000.0;
debug!("Scanned edges: {}, speed: {:.2}/s", count, speed);
start = ::std::time::Instant::now();
}
}
extract += 16;
}
} else {
break
}
}
let mut arc_neighbors = HashMap::new();
for (k, v) in neighbors.drain() {
arc_neighbors.insert(k, Arc::new(v));
}
GraphTopology {
partition,
count,
peers: peers as u32,
neighbors: arc_neighbors,
}
}
/// Convert graph file from raw text format to binary format.
/// The binary file should follow this format: src1 dst1 src2 dst2 src3 dst3 ...
/// Vertex IDs are 32-bit big endian integers.
pub fn convert_to_bin<P1: AsRef<Path>, P2: AsRef<Path>>(input: P1, output: P2, split: char) {
let reader = BufReader::new(File::open(input).unwrap());
let mut writer = BufWriter::new(File::create(output).unwrap());
let mut count = 0_usize;
let mut start = ::std::time::Instant::now();
for edge in reader.lines() {
let edge = edge.unwrap();
let edge = edge.split(split).collect::<Vec<_>>();
let src: u64 = edge[0].parse().unwrap();
let dst: u64 = edge[1].parse().unwrap();
writer.write_u64::<BigEndian>(src).unwrap();
writer.write_u64::<BigEndian>(dst).unwrap();
count += 1;
if count % 5000000 == 0 {
let duration_ms = (Instant::now() - start).as_millis() as f64;
let speed = 5000000.0 / duration_ms * 1000.0;
debug!("Scanned edges: {}, speed: {:.2}/s", count, speed);
start = ::std::time::Instant::now();
}
}
writer.flush().unwrap();
}
pub fn get_neighbors(&self, src: &u64) -> Option<NeighborIter> {
self.neighbors.get(src).map(|n| {
NeighborIter::new(n)
})
}
#[inline]
pub fn count_nodes(&self) -> usize {
self.neighbors.len()
}
#[inline]
pub fn count_edges(&self) -> usize {
self.count
}
}
#[cfg(test)]
mod test {
use super::*;
use std::path::PathBuf;
#[test]
fn test_segment_list() {
let mut list = SegmentList::new(6);
for i in 0..1024 {
list.push(i);
}
for i in 0..1024 {
let e = list.get(i as usize).unwrap();
assert_eq!(i, *e);
}
for i in 0..1014 {
let res = list.get_multi(i as usize, 10).unwrap();
//println!("get res {:?}", res);
for j in 0..10 {
assert_eq!(i + j, *res[j]);
}
}
}
#[test]
fn test_graph_load() {
let mut d = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
d.push("data/twitter_rv.net");
{
println!("dir is : {}", d.display());
let graph = GraphTopology::load(1, 1, true, ' ', d.as_path());
println!("finish load");
let n = graph.get_neighbors(&12).unwrap()
.fold(0, |count, _| count + 1);
assert_eq!(n, 4);
}
{
let graph = GraphTopology::load_bin(1, 1, true, d.as_path().with_extension("bin"));
let n = graph.get_neighbors(&12).unwrap()
.map(|v| {
println!("get v : {}", v.id);
v
})
.fold(0, |count, _| count + 1);
assert_eq!(n, 4);
}
}
#[test]
fn test_graph() {
let graph = GraphTopology::with_default(3, 1, true);
{
let mut ns = vec![];
for n in graph.get_neighbors(&1).unwrap() {
ns.push(n);
}
let mut ns = ns.into_iter().map(|v| v.id).collect::<Vec<_>>();
ns.sort();
assert_eq!(ns, vec![2, 3, 4]);
}
{
let mut ns = vec![];
for n in graph.get_neighbors(&6).unwrap() {
ns.push(n);
}
let mut ns = ns.into_iter().map(|v| v.id).collect::<Vec<_>>();
ns.sort();
assert_eq!(ns, vec![7, 8]);
}
}
} | count += 1;
}
| random_line_split |
lib.rs | //! Contract module which acts as a timelocked controller. When set as the
//! owner of an `Ownable` smart contract, it enforces a timelock on all
//! `onlyOwner` maintenance operations. This gives time for users of the
//! controlled contract to exit before a potentially dangerous maintenance
//! operation is applied.
//!
//! By default, this contract is self administered, meaning administration tasks
//! have to go through the timelock process. The proposer (resp executor) role
//! is in charge of proposing (resp executing) operations. A common use case is
//! to position this {TimelockController} as the owner of a smart contract, with
//! a multisig or a DAO as the sole proposer.
#![cfg_attr(not(feature = "std"), no_std)]
pub use access_control::{
Error,
Result,
RoleId,
};
use ink_env::hash::Blake2x256;
use ink_lang::ForwardCallMut;
use ink_prelude::vec::Vec;
use metis_access_control as access_control;
use metis_lang::{
Env,
FromAccountId,
Storage,
};
use metis_timelock_controller_receiver::Receiver;
use scale::Encode;
#[cfg(not(feature = "ink-as-dependency"))]
use ::ink_storage::{
collections::HashMap as StorageHashMap,
lazy::Lazy,
traits::SpreadLayout,
};
pub const TIMELOCK_ADMIN_ROLE: RoleId =
RoleId::new(metis_lang::hash!(TIMELOCK_ADMIN_ROLE));
pub const PROPOSER_ROLE: RoleId = RoleId::new(metis_lang::hash!(PROPOSER_ROLE));
pub const EXECUTOR_ROLE: RoleId = RoleId::new(metis_lang::hash!(EXECUTOR_ROLE));
pub const _DONE_TIMESTAMP: u8 = 1;
/// The Data of ERC20 component
#[cfg_attr(feature = "std", derive(::ink_storage::traits::StorageLayout))]
#[derive(Debug, SpreadLayout)]
pub struct Data<E: Env> {
/// min delay for controller
pub min_delay: Lazy<E::Timestamp>,
pub timestamps: StorageHashMap<[u8; 32], E::Timestamp>,
}
impl<E: Env> Data<E> {
/// Sets the value of the `cap`. This value is immutable, it can only be
/// set once during construction. |
impl<E> Default for Data<E>
where
E: Env,
{
fn default() -> Self {
Self {
min_delay: Lazy::new(E::Timestamp::from(1_u8)),
timestamps: StorageHashMap::default(),
}
}
}
impl<E: Env> Data<E> {}
/// The `EventEmit` impl the event emit api for component.
pub trait EventEmit<E: Env> {
/// Emitted when a call is scheduled as part of operation `id`.
fn emit_event_call_scheduled(
&mut self,
id: [u8; 32],
target: E::AccountId,
value: E::Balance,
data: Vec<u8>,
predecessor: Option<[u8; 32]>,
delay: E::Timestamp,
);
/// Emitted when a call is performed as part of operation `id`.
fn emit_event_call_executed(
&mut self,
id: [u8; 32],
target: E::AccountId,
value: E::Balance,
data: Vec<u8>,
);
/// Emitted when operation `id` is cancelled.
fn emit_event_cancelled(&mut self, id: [u8; 32]);
/// Emitted when the minimum delay for future operations is modified.
fn emit_event_min_delay_change(
&mut self,
old_duration: E::Timestamp,
new_duration: E::Timestamp,
);
}
pub trait Impl<E>: access_control::Impl<E> + EventEmit<E> + Storage<E, Data<E>>
where
E: Env,
{
/// initial the state of contract
fn init(
&mut self,
min_delay: E::Timestamp,
proposers: Vec<E::AccountId>,
executors: Vec<E::AccountId>,
) {
access_control::Impl::_set_role_admin(
self,
TIMELOCK_ADMIN_ROLE,
TIMELOCK_ADMIN_ROLE,
);
access_control::Impl::_set_role_admin(self, PROPOSER_ROLE, TIMELOCK_ADMIN_ROLE);
access_control::Impl::_set_role_admin(self, EXECUTOR_ROLE, TIMELOCK_ADMIN_ROLE);
// deployer + self administration
access_control::Impl::_setup_role(self, TIMELOCK_ADMIN_ROLE, Self::caller());
// access_control::Impl::_setup_role(self, TIMELOCK_ADMIN_ROLE, address(this));
// register proposers
for proposer in proposers.iter() {
access_control::Impl::_setup_role(self, PROPOSER_ROLE, proposer.clone());
}
// register executors
for executor in executors.iter() {
access_control::Impl::_setup_role(self, EXECUTOR_ROLE, executor.clone());
}
Lazy::set(
&mut Storage::<E, Data<E>>::get_mut(self).min_delay,
min_delay,
);
self.emit_event_min_delay_change(E::Timestamp::from(0_u8), min_delay);
}
/// To make a function callable only by a certain role. In
/// addition to checking the sender's role, `address(0)` 's role is also
/// considered. Granting a role to `address(0)` is equivalent to enabling
/// this role for everyone.
fn ensure_only_role_or_open_role(&self, role: RoleId) {
if !access_control::Impl::has_role(self, role, E::AccountId::default()) {
access_control::Impl::ensure_caller_role(self, role);
}
}
/// Returns whether an id correspond to a registered operation. This
/// includes both Pending, Ready and Done operations.
fn is_operation(&self, id: &[u8; 32]) -> bool {
self.get_timestamp(id) > E::Timestamp::from(0_u8)
}
/// Returns whether an operation is pending or not.
fn is_operation_pending(&self, id: &[u8; 32]) -> bool {
self.get_timestamp(id) > E::Timestamp::from(_DONE_TIMESTAMP)
}
/// Returns whether an operation is ready or not.
fn is_operation_ready(&self, id: &[u8; 32]) -> bool {
let timestamp = self.get_timestamp(id);
timestamp > E::Timestamp::from(_DONE_TIMESTAMP)
&& timestamp <= Self::block_timestamp()
}
/// Returns whether an operation is done or not.
fn is_operation_done(&self, id: &[u8; 32]) -> bool {
self.get_timestamp(id) == E::Timestamp::from(_DONE_TIMESTAMP)
}
/// Returns the timestamp at with an operation becomes ready (0 for
/// unset operations, 1 for done operations).
fn get_timestamp(&self, id: &[u8; 32]) -> E::Timestamp {
*Storage::<E, Data<E>>::get(self)
.timestamps
.get(id)
.unwrap_or(&E::Timestamp::from(0_u8))
}
/// Returns the minimum delay for an operation to become valid.
///
/// This value can be changed by executing an operation that calls `updateDelay`.
fn get_min_delay(&self) -> E::Timestamp {
*Storage::<E, Data<E>>::get(self).min_delay
}
/// Returns the identifier of an operation containing a single
/// transaction.
fn hash_operation(
&self,
target: &E::AccountId,
value: &E::Balance,
data: &Vec<u8>,
predecessor: &Option<[u8; 32]>,
salt: &[u8; 32],
) -> [u8; 32] {
// for target + value + data + predecessor + salt
let mut hash_data: Vec<u8> = Vec::with_capacity(128 + data.len());
hash_data.append(&mut target.encode());
hash_data.append(&mut value.encode());
hash_data.append(&mut data.clone());
hash_data.append(&mut predecessor.encode());
for s in salt.into_iter() {
hash_data.push(s.clone());
}
Self::hash_bytes::<Blake2x256>(&hash_data)
}
/// Schedule an operation containing a single transaction.
///
/// Emits a `CallScheduled` event.
///
/// Requirements:
///
/// - the caller must have the 'proposer' role.
fn schedule(
&mut self,
target: E::AccountId,
value: E::Balance,
data: Vec<u8>,
predecessor: Option<[u8; 32]>,
salt: [u8; 32],
delay: E::Timestamp,
) {
access_control::Impl::ensure_caller_role(self, PROPOSER_ROLE);
let id = self.hash_operation(&target, &value, &data, &predecessor, &salt);
self._schedule(id, delay);
self.emit_event_call_scheduled(id, target, value, data, predecessor, delay);
}
/// Schedule an operation that is to becomes valid after a given delay.
fn _schedule(&mut self, id: [u8; 32], delay: E::Timestamp) {
assert!(
!self.is_operation(&id),
"TimelockController: operation already scheduled"
);
assert!(
delay >= self.get_min_delay(),
"TimelockController: insufficient delay"
);
Storage::<E, Data<E>>::get_mut(self)
.timestamps
.insert(id, Self::block_timestamp() + delay);
}
/// Cancel an operation.
///
/// Requirements:
///
/// - the caller must have the 'proposer' role.
fn cancel(&mut self, id: [u8; 32]) {
access_control::Impl::ensure_caller_role(self, PROPOSER_ROLE);
assert!(
self.is_operation_pending(&id),
"TimelockController: operation cannot be cancelled"
);
Storage::<E, Data<E>>::get_mut(self).timestamps.take(&id);
self.emit_event_cancelled(id);
}
/// Execute an (ready) operation containing a single transaction.
///
/// Emits a `CallExecuted` event.
///
/// Requirements:
///
/// - the caller must have the 'executor' role.
fn execute(
&mut self,
target: E::AccountId,
value: E::Balance,
data: Vec<u8>,
predecessor: Option<[u8; 32]>,
salt: [u8; 32],
) {
self.ensure_only_role_or_open_role(EXECUTOR_ROLE);
let id = self.hash_operation(&target, &value, &data, &predecessor, &salt);
self._before_call(predecessor);
self._call(id, target, value, data);
self._after_call(id);
}
/// Checks before execution of an operation's calls.
fn _before_call(&self, predecessor: Option<[u8; 32]>) {
match predecessor {
Some(predecessor) => {
assert!(
self.is_operation_done(&predecessor),
"TimelockController: missing dependency"
);
()
}
None => (),
}
}
/// Checks after execution of an operation's calls.
fn _after_call(&mut self, id: [u8; 32]) {
assert!(
self.is_operation_ready(&id),
"TimelockController: operation is not ready"
);
Storage::<E, Data<E>>::get_mut(self)
.timestamps
.insert(id, E::Timestamp::from(_DONE_TIMESTAMP));
}
/// Execute an operation's call.
///
/// Emits a `CallExecuted` event.
fn _call(
&mut self,
id: [u8; 32],
target: E::AccountId,
value: E::Balance,
data: Vec<u8>,
) {
let mut receiver =
<Receiver as FromAccountId<E>>::from_account_id(target.clone());
let success = receiver
.call_mut()
.on_call(Self::caller().into(), data.clone())
.transferred_value(value.into())
.fire();
let success = match success {
Ok(success) => success,
Err(_) => false,
};
assert!(
success,
"TimelockController: underlying transaction reverted"
);
self.emit_event_call_executed(id, target, value, data);
}
/// Changes the minimum timelock duration for future operations.
///
/// Emits a `MinDelayChange` event.
///
/// Requirements:
///
/// - the caller must be the timelock itself. This can only be achieved by scheduling and later executing
/// an operation where the timelock is the target and the data is the ABI-encoded call to this fn.
fn _set_update_delay(&mut self, new_delay: E::Timestamp) {
let current_min_delay = self.get_min_delay();
self.emit_event_min_delay_change(current_min_delay, new_delay);
*Storage::<E, Data<E>>::get_mut(self).min_delay = new_delay;
}
} | pub fn new() -> Self {
Self::default()
}
} | random_line_split |
lib.rs | //! Contract module which acts as a timelocked controller. When set as the
//! owner of an `Ownable` smart contract, it enforces a timelock on all
//! `onlyOwner` maintenance operations. This gives time for users of the
//! controlled contract to exit before a potentially dangerous maintenance
//! operation is applied.
//!
//! By default, this contract is self administered, meaning administration tasks
//! have to go through the timelock process. The proposer (resp executor) role
//! is in charge of proposing (resp executing) operations. A common use case is
//! to position this {TimelockController} as the owner of a smart contract, with
//! a multisig or a DAO as the sole proposer.
#![cfg_attr(not(feature = "std"), no_std)]
pub use access_control::{
Error,
Result,
RoleId,
};
use ink_env::hash::Blake2x256;
use ink_lang::ForwardCallMut;
use ink_prelude::vec::Vec;
use metis_access_control as access_control;
use metis_lang::{
Env,
FromAccountId,
Storage,
};
use metis_timelock_controller_receiver::Receiver;
use scale::Encode;
#[cfg(not(feature = "ink-as-dependency"))]
use ::ink_storage::{
collections::HashMap as StorageHashMap,
lazy::Lazy,
traits::SpreadLayout,
};
pub const TIMELOCK_ADMIN_ROLE: RoleId =
RoleId::new(metis_lang::hash!(TIMELOCK_ADMIN_ROLE));
pub const PROPOSER_ROLE: RoleId = RoleId::new(metis_lang::hash!(PROPOSER_ROLE));
pub const EXECUTOR_ROLE: RoleId = RoleId::new(metis_lang::hash!(EXECUTOR_ROLE));
pub const _DONE_TIMESTAMP: u8 = 1;
/// The Data of ERC20 component
#[cfg_attr(feature = "std", derive(::ink_storage::traits::StorageLayout))]
#[derive(Debug, SpreadLayout)]
pub struct Data<E: Env> {
/// min delay for controller
pub min_delay: Lazy<E::Timestamp>,
pub timestamps: StorageHashMap<[u8; 32], E::Timestamp>,
}
impl<E: Env> Data<E> {
/// Sets the value of the `cap`. This value is immutable, it can only be
/// set once during construction.
pub fn new() -> Self {
Self::default()
}
}
impl<E> Default for Data<E>
where
E: Env,
{
fn default() -> Self {
Self {
min_delay: Lazy::new(E::Timestamp::from(1_u8)),
timestamps: StorageHashMap::default(),
}
}
}
impl<E: Env> Data<E> {}
/// The `EventEmit` impl the event emit api for component.
pub trait EventEmit<E: Env> {
/// Emitted when a call is scheduled as part of operation `id`.
fn emit_event_call_scheduled(
&mut self,
id: [u8; 32],
target: E::AccountId,
value: E::Balance,
data: Vec<u8>,
predecessor: Option<[u8; 32]>,
delay: E::Timestamp,
);
/// Emitted when a call is performed as part of operation `id`.
fn emit_event_call_executed(
&mut self,
id: [u8; 32],
target: E::AccountId,
value: E::Balance,
data: Vec<u8>,
);
/// Emitted when operation `id` is cancelled.
fn emit_event_cancelled(&mut self, id: [u8; 32]);
/// Emitted when the minimum delay for future operations is modified.
fn emit_event_min_delay_change(
&mut self,
old_duration: E::Timestamp,
new_duration: E::Timestamp,
);
}
pub trait Impl<E>: access_control::Impl<E> + EventEmit<E> + Storage<E, Data<E>>
where
E: Env,
{
/// initial the state of contract
fn init(
&mut self,
min_delay: E::Timestamp,
proposers: Vec<E::AccountId>,
executors: Vec<E::AccountId>,
) {
access_control::Impl::_set_role_admin(
self,
TIMELOCK_ADMIN_ROLE,
TIMELOCK_ADMIN_ROLE,
);
access_control::Impl::_set_role_admin(self, PROPOSER_ROLE, TIMELOCK_ADMIN_ROLE);
access_control::Impl::_set_role_admin(self, EXECUTOR_ROLE, TIMELOCK_ADMIN_ROLE);
// deployer + self administration
access_control::Impl::_setup_role(self, TIMELOCK_ADMIN_ROLE, Self::caller());
// access_control::Impl::_setup_role(self, TIMELOCK_ADMIN_ROLE, address(this));
// register proposers
for proposer in proposers.iter() {
access_control::Impl::_setup_role(self, PROPOSER_ROLE, proposer.clone());
}
// register executors
for executor in executors.iter() {
access_control::Impl::_setup_role(self, EXECUTOR_ROLE, executor.clone());
}
Lazy::set(
&mut Storage::<E, Data<E>>::get_mut(self).min_delay,
min_delay,
);
self.emit_event_min_delay_change(E::Timestamp::from(0_u8), min_delay);
}
/// To make a function callable only by a certain role. In
/// addition to checking the sender's role, `address(0)` 's role is also
/// considered. Granting a role to `address(0)` is equivalent to enabling
/// this role for everyone.
fn ensure_only_role_or_open_role(&self, role: RoleId) {
if !access_control::Impl::has_role(self, role, E::AccountId::default()) {
access_control::Impl::ensure_caller_role(self, role);
}
}
/// Returns whether an id correspond to a registered operation. This
/// includes both Pending, Ready and Done operations.
fn is_operation(&self, id: &[u8; 32]) -> bool {
self.get_timestamp(id) > E::Timestamp::from(0_u8)
}
/// Returns whether an operation is pending or not.
fn is_operation_pending(&self, id: &[u8; 32]) -> bool {
self.get_timestamp(id) > E::Timestamp::from(_DONE_TIMESTAMP)
}
/// Returns whether an operation is ready or not.
fn is_operation_ready(&self, id: &[u8; 32]) -> bool {
let timestamp = self.get_timestamp(id);
timestamp > E::Timestamp::from(_DONE_TIMESTAMP)
&& timestamp <= Self::block_timestamp()
}
/// Returns whether an operation is done or not.
fn is_operation_done(&self, id: &[u8; 32]) -> bool {
self.get_timestamp(id) == E::Timestamp::from(_DONE_TIMESTAMP)
}
/// Returns the timestamp at with an operation becomes ready (0 for
/// unset operations, 1 for done operations).
fn get_timestamp(&self, id: &[u8; 32]) -> E::Timestamp {
*Storage::<E, Data<E>>::get(self)
.timestamps
.get(id)
.unwrap_or(&E::Timestamp::from(0_u8))
}
/// Returns the minimum delay for an operation to become valid.
///
/// This value can be changed by executing an operation that calls `updateDelay`.
fn get_min_delay(&self) -> E::Timestamp {
*Storage::<E, Data<E>>::get(self).min_delay
}
/// Returns the identifier of an operation containing a single
/// transaction.
fn hash_operation(
&self,
target: &E::AccountId,
value: &E::Balance,
data: &Vec<u8>,
predecessor: &Option<[u8; 32]>,
salt: &[u8; 32],
) -> [u8; 32] {
// for target + value + data + predecessor + salt
let mut hash_data: Vec<u8> = Vec::with_capacity(128 + data.len());
hash_data.append(&mut target.encode());
hash_data.append(&mut value.encode());
hash_data.append(&mut data.clone());
hash_data.append(&mut predecessor.encode());
for s in salt.into_iter() {
hash_data.push(s.clone());
}
Self::hash_bytes::<Blake2x256>(&hash_data)
}
/// Schedule an operation containing a single transaction.
///
/// Emits a `CallScheduled` event.
///
/// Requirements:
///
/// - the caller must have the 'proposer' role.
fn schedule(
&mut self,
target: E::AccountId,
value: E::Balance,
data: Vec<u8>,
predecessor: Option<[u8; 32]>,
salt: [u8; 32],
delay: E::Timestamp,
) {
access_control::Impl::ensure_caller_role(self, PROPOSER_ROLE);
let id = self.hash_operation(&target, &value, &data, &predecessor, &salt);
self._schedule(id, delay);
self.emit_event_call_scheduled(id, target, value, data, predecessor, delay);
}
/// Schedule an operation that is to becomes valid after a given delay.
fn _schedule(&mut self, id: [u8; 32], delay: E::Timestamp) {
assert!(
!self.is_operation(&id),
"TimelockController: operation already scheduled"
);
assert!(
delay >= self.get_min_delay(),
"TimelockController: insufficient delay"
);
Storage::<E, Data<E>>::get_mut(self)
.timestamps
.insert(id, Self::block_timestamp() + delay);
}
/// Cancel an operation.
///
/// Requirements:
///
/// - the caller must have the 'proposer' role.
fn cancel(&mut self, id: [u8; 32]) {
access_control::Impl::ensure_caller_role(self, PROPOSER_ROLE);
assert!(
self.is_operation_pending(&id),
"TimelockController: operation cannot be cancelled"
);
Storage::<E, Data<E>>::get_mut(self).timestamps.take(&id);
self.emit_event_cancelled(id);
}
/// Execute an (ready) operation containing a single transaction.
///
/// Emits a `CallExecuted` event.
///
/// Requirements:
///
/// - the caller must have the 'executor' role.
fn execute(
&mut self,
target: E::AccountId,
value: E::Balance,
data: Vec<u8>,
predecessor: Option<[u8; 32]>,
salt: [u8; 32],
) {
self.ensure_only_role_or_open_role(EXECUTOR_ROLE);
let id = self.hash_operation(&target, &value, &data, &predecessor, &salt);
self._before_call(predecessor);
self._call(id, target, value, data);
self._after_call(id);
}
/// Checks before execution of an operation's calls.
fn _before_call(&self, predecessor: Option<[u8; 32]>) {
match predecessor {
Some(predecessor) => {
assert!(
self.is_operation_done(&predecessor),
"TimelockController: missing dependency"
);
()
}
None => (),
}
}
/// Checks after execution of an operation's calls.
fn _after_call(&mut self, id: [u8; 32]) |
/// Execute an operation's call.
///
/// Emits a `CallExecuted` event.
fn _call(
&mut self,
id: [u8; 32],
target: E::AccountId,
value: E::Balance,
data: Vec<u8>,
) {
let mut receiver =
<Receiver as FromAccountId<E>>::from_account_id(target.clone());
let success = receiver
.call_mut()
.on_call(Self::caller().into(), data.clone())
.transferred_value(value.into())
.fire();
let success = match success {
Ok(success) => success,
Err(_) => false,
};
assert!(
success,
"TimelockController: underlying transaction reverted"
);
self.emit_event_call_executed(id, target, value, data);
}
/// Changes the minimum timelock duration for future operations.
///
/// Emits a `MinDelayChange` event.
///
/// Requirements:
///
/// - the caller must be the timelock itself. This can only be achieved by scheduling and later executing
/// an operation where the timelock is the target and the data is the ABI-encoded call to this fn.
fn _set_update_delay(&mut self, new_delay: E::Timestamp) {
let current_min_delay = self.get_min_delay();
self.emit_event_min_delay_change(current_min_delay, new_delay);
*Storage::<E, Data<E>>::get_mut(self).min_delay = new_delay;
}
}
| {
assert!(
self.is_operation_ready(&id),
"TimelockController: operation is not ready"
);
Storage::<E, Data<E>>::get_mut(self)
.timestamps
.insert(id, E::Timestamp::from(_DONE_TIMESTAMP));
} | identifier_body |
lib.rs | //! Contract module which acts as a timelocked controller. When set as the
//! owner of an `Ownable` smart contract, it enforces a timelock on all
//! `onlyOwner` maintenance operations. This gives time for users of the
//! controlled contract to exit before a potentially dangerous maintenance
//! operation is applied.
//!
//! By default, this contract is self administered, meaning administration tasks
//! have to go through the timelock process. The proposer (resp executor) role
//! is in charge of proposing (resp executing) operations. A common use case is
//! to position this {TimelockController} as the owner of a smart contract, with
//! a multisig or a DAO as the sole proposer.
#![cfg_attr(not(feature = "std"), no_std)]
pub use access_control::{
Error,
Result,
RoleId,
};
use ink_env::hash::Blake2x256;
use ink_lang::ForwardCallMut;
use ink_prelude::vec::Vec;
use metis_access_control as access_control;
use metis_lang::{
Env,
FromAccountId,
Storage,
};
use metis_timelock_controller_receiver::Receiver;
use scale::Encode;
#[cfg(not(feature = "ink-as-dependency"))]
use ::ink_storage::{
collections::HashMap as StorageHashMap,
lazy::Lazy,
traits::SpreadLayout,
};
pub const TIMELOCK_ADMIN_ROLE: RoleId =
RoleId::new(metis_lang::hash!(TIMELOCK_ADMIN_ROLE));
pub const PROPOSER_ROLE: RoleId = RoleId::new(metis_lang::hash!(PROPOSER_ROLE));
pub const EXECUTOR_ROLE: RoleId = RoleId::new(metis_lang::hash!(EXECUTOR_ROLE));
pub const _DONE_TIMESTAMP: u8 = 1;
/// The Data of ERC20 component
#[cfg_attr(feature = "std", derive(::ink_storage::traits::StorageLayout))]
#[derive(Debug, SpreadLayout)]
pub struct Data<E: Env> {
/// min delay for controller
pub min_delay: Lazy<E::Timestamp>,
pub timestamps: StorageHashMap<[u8; 32], E::Timestamp>,
}
impl<E: Env> Data<E> {
/// Sets the value of the `cap`. This value is immutable, it can only be
/// set once during construction.
pub fn new() -> Self {
Self::default()
}
}
impl<E> Default for Data<E>
where
E: Env,
{
fn default() -> Self {
Self {
min_delay: Lazy::new(E::Timestamp::from(1_u8)),
timestamps: StorageHashMap::default(),
}
}
}
impl<E: Env> Data<E> {}
/// The `EventEmit` impl the event emit api for component.
pub trait EventEmit<E: Env> {
/// Emitted when a call is scheduled as part of operation `id`.
fn emit_event_call_scheduled(
&mut self,
id: [u8; 32],
target: E::AccountId,
value: E::Balance,
data: Vec<u8>,
predecessor: Option<[u8; 32]>,
delay: E::Timestamp,
);
/// Emitted when a call is performed as part of operation `id`.
fn emit_event_call_executed(
&mut self,
id: [u8; 32],
target: E::AccountId,
value: E::Balance,
data: Vec<u8>,
);
/// Emitted when operation `id` is cancelled.
fn emit_event_cancelled(&mut self, id: [u8; 32]);
/// Emitted when the minimum delay for future operations is modified.
fn emit_event_min_delay_change(
&mut self,
old_duration: E::Timestamp,
new_duration: E::Timestamp,
);
}
pub trait Impl<E>: access_control::Impl<E> + EventEmit<E> + Storage<E, Data<E>>
where
E: Env,
{
/// initial the state of contract
fn init(
&mut self,
min_delay: E::Timestamp,
proposers: Vec<E::AccountId>,
executors: Vec<E::AccountId>,
) {
access_control::Impl::_set_role_admin(
self,
TIMELOCK_ADMIN_ROLE,
TIMELOCK_ADMIN_ROLE,
);
access_control::Impl::_set_role_admin(self, PROPOSER_ROLE, TIMELOCK_ADMIN_ROLE);
access_control::Impl::_set_role_admin(self, EXECUTOR_ROLE, TIMELOCK_ADMIN_ROLE);
// deployer + self administration
access_control::Impl::_setup_role(self, TIMELOCK_ADMIN_ROLE, Self::caller());
// access_control::Impl::_setup_role(self, TIMELOCK_ADMIN_ROLE, address(this));
// register proposers
for proposer in proposers.iter() {
access_control::Impl::_setup_role(self, PROPOSER_ROLE, proposer.clone());
}
// register executors
for executor in executors.iter() {
access_control::Impl::_setup_role(self, EXECUTOR_ROLE, executor.clone());
}
Lazy::set(
&mut Storage::<E, Data<E>>::get_mut(self).min_delay,
min_delay,
);
self.emit_event_min_delay_change(E::Timestamp::from(0_u8), min_delay);
}
/// To make a function callable only by a certain role. In
/// addition to checking the sender's role, `address(0)` 's role is also
/// considered. Granting a role to `address(0)` is equivalent to enabling
/// this role for everyone.
fn ensure_only_role_or_open_role(&self, role: RoleId) {
if !access_control::Impl::has_role(self, role, E::AccountId::default()) {
access_control::Impl::ensure_caller_role(self, role);
}
}
/// Returns whether an id correspond to a registered operation. This
/// includes both Pending, Ready and Done operations.
fn is_operation(&self, id: &[u8; 32]) -> bool {
self.get_timestamp(id) > E::Timestamp::from(0_u8)
}
/// Returns whether an operation is pending or not.
fn | (&self, id: &[u8; 32]) -> bool {
self.get_timestamp(id) > E::Timestamp::from(_DONE_TIMESTAMP)
}
/// Returns whether an operation is ready or not.
fn is_operation_ready(&self, id: &[u8; 32]) -> bool {
let timestamp = self.get_timestamp(id);
timestamp > E::Timestamp::from(_DONE_TIMESTAMP)
&& timestamp <= Self::block_timestamp()
}
/// Returns whether an operation is done or not.
fn is_operation_done(&self, id: &[u8; 32]) -> bool {
self.get_timestamp(id) == E::Timestamp::from(_DONE_TIMESTAMP)
}
/// Returns the timestamp at with an operation becomes ready (0 for
/// unset operations, 1 for done operations).
fn get_timestamp(&self, id: &[u8; 32]) -> E::Timestamp {
*Storage::<E, Data<E>>::get(self)
.timestamps
.get(id)
.unwrap_or(&E::Timestamp::from(0_u8))
}
/// Returns the minimum delay for an operation to become valid.
///
/// This value can be changed by executing an operation that calls `updateDelay`.
fn get_min_delay(&self) -> E::Timestamp {
*Storage::<E, Data<E>>::get(self).min_delay
}
/// Returns the identifier of an operation containing a single
/// transaction.
fn hash_operation(
&self,
target: &E::AccountId,
value: &E::Balance,
data: &Vec<u8>,
predecessor: &Option<[u8; 32]>,
salt: &[u8; 32],
) -> [u8; 32] {
// for target + value + data + predecessor + salt
let mut hash_data: Vec<u8> = Vec::with_capacity(128 + data.len());
hash_data.append(&mut target.encode());
hash_data.append(&mut value.encode());
hash_data.append(&mut data.clone());
hash_data.append(&mut predecessor.encode());
for s in salt.into_iter() {
hash_data.push(s.clone());
}
Self::hash_bytes::<Blake2x256>(&hash_data)
}
/// Schedule an operation containing a single transaction.
///
/// Emits a `CallScheduled` event.
///
/// Requirements:
///
/// - the caller must have the 'proposer' role.
fn schedule(
&mut self,
target: E::AccountId,
value: E::Balance,
data: Vec<u8>,
predecessor: Option<[u8; 32]>,
salt: [u8; 32],
delay: E::Timestamp,
) {
access_control::Impl::ensure_caller_role(self, PROPOSER_ROLE);
let id = self.hash_operation(&target, &value, &data, &predecessor, &salt);
self._schedule(id, delay);
self.emit_event_call_scheduled(id, target, value, data, predecessor, delay);
}
/// Schedule an operation that is to becomes valid after a given delay.
fn _schedule(&mut self, id: [u8; 32], delay: E::Timestamp) {
assert!(
!self.is_operation(&id),
"TimelockController: operation already scheduled"
);
assert!(
delay >= self.get_min_delay(),
"TimelockController: insufficient delay"
);
Storage::<E, Data<E>>::get_mut(self)
.timestamps
.insert(id, Self::block_timestamp() + delay);
}
/// Cancel an operation.
///
/// Requirements:
///
/// - the caller must have the 'proposer' role.
fn cancel(&mut self, id: [u8; 32]) {
access_control::Impl::ensure_caller_role(self, PROPOSER_ROLE);
assert!(
self.is_operation_pending(&id),
"TimelockController: operation cannot be cancelled"
);
Storage::<E, Data<E>>::get_mut(self).timestamps.take(&id);
self.emit_event_cancelled(id);
}
/// Execute an (ready) operation containing a single transaction.
///
/// Emits a `CallExecuted` event.
///
/// Requirements:
///
/// - the caller must have the 'executor' role.
fn execute(
&mut self,
target: E::AccountId,
value: E::Balance,
data: Vec<u8>,
predecessor: Option<[u8; 32]>,
salt: [u8; 32],
) {
self.ensure_only_role_or_open_role(EXECUTOR_ROLE);
let id = self.hash_operation(&target, &value, &data, &predecessor, &salt);
self._before_call(predecessor);
self._call(id, target, value, data);
self._after_call(id);
}
/// Checks before execution of an operation's calls.
fn _before_call(&self, predecessor: Option<[u8; 32]>) {
match predecessor {
Some(predecessor) => {
assert!(
self.is_operation_done(&predecessor),
"TimelockController: missing dependency"
);
()
}
None => (),
}
}
/// Checks after execution of an operation's calls.
fn _after_call(&mut self, id: [u8; 32]) {
assert!(
self.is_operation_ready(&id),
"TimelockController: operation is not ready"
);
Storage::<E, Data<E>>::get_mut(self)
.timestamps
.insert(id, E::Timestamp::from(_DONE_TIMESTAMP));
}
/// Execute an operation's call.
///
/// Emits a `CallExecuted` event.
fn _call(
&mut self,
id: [u8; 32],
target: E::AccountId,
value: E::Balance,
data: Vec<u8>,
) {
let mut receiver =
<Receiver as FromAccountId<E>>::from_account_id(target.clone());
let success = receiver
.call_mut()
.on_call(Self::caller().into(), data.clone())
.transferred_value(value.into())
.fire();
let success = match success {
Ok(success) => success,
Err(_) => false,
};
assert!(
success,
"TimelockController: underlying transaction reverted"
);
self.emit_event_call_executed(id, target, value, data);
}
/// Changes the minimum timelock duration for future operations.
///
/// Emits a `MinDelayChange` event.
///
/// Requirements:
///
/// - the caller must be the timelock itself. This can only be achieved by scheduling and later executing
/// an operation where the timelock is the target and the data is the ABI-encoded call to this fn.
fn _set_update_delay(&mut self, new_delay: E::Timestamp) {
let current_min_delay = self.get_min_delay();
self.emit_event_min_delay_change(current_min_delay, new_delay);
*Storage::<E, Data<E>>::get_mut(self).min_delay = new_delay;
}
}
| is_operation_pending | identifier_name |
scheduling.rs | use super::*;
use crate::domain::scheduling::*;
use chrono::{DateTime, SecondsFormat};
use futures::{Async, Future, Stream};
use std::sync::{Arc, Mutex, MutexGuard};
use std::time::Duration;
use tokio::timer::{
delay_queue::{Expired, Key as DelayQueueKey},
DelayQueue,
};
enum DelayQueueItem<T> {
TaskSchedule(T),
KeepAlive,
}
// The valid duration is limited by this upper bound that is
// reserved for the keep alive token!
// TODO: The maximum acceptable value of 795 days that does
// not cause an internal panic has been discovered experimentally.
// No references about this limit can be found in the Tokio docs!?
const MAX_DELAY_TIMEOUT: Duration = Duration::from_secs(795 * 24 * 60 * 60);
struct ScheduledTaskQueue<T> {
task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>,
upcoming_tasks: DelayQueue<DelayQueueItem<T>>,
keep_alive_key: DelayQueueKey,
}
fn format_datetime<Z: chrono::TimeZone>(dt: &DateTime<Z>) -> String
where
<Z as chrono::TimeZone>::Offset: std::fmt::Display,
{
dt.to_rfc3339_opts(SecondsFormat::Millis, true)
}
impl<T> ScheduledTaskQueue<T>
where
T: TaskSchedule + std::fmt::Debug,
<T::TimeZone as chrono::TimeZone>::Offset: std::fmt::Display,
{
pub fn new(task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>) -> Self {
let mut upcoming_tasks = DelayQueue::new();
let keep_alive_key = upcoming_tasks.insert(DelayQueueItem::KeepAlive, MAX_DELAY_TIMEOUT);
Self {
task_scheduler,
upcoming_tasks,
keep_alive_key,
}
}
fn keep_alive(&mut self) {
self.upcoming_tasks
.reset(&self.keep_alive_key, MAX_DELAY_TIMEOUT);
}
pub fn handle_expired(&mut self, expired: Expired<DelayQueueItem<T>>) {
match expired.into_inner() {
DelayQueueItem::TaskSchedule(task_schedule) => self.reschedule_expired(task_schedule),
DelayQueueItem::KeepAlive => self.reschedule_all(Default::default()),
}
}
fn reschedule_expired(&mut self, task_schedule: T) {
let now = self.task_scheduler.now();
debug!("{:?} expired at {}", task_schedule, now);
let task_reschedule = self
.task_scheduler
.dispatch_and_reschedule_expired_task(&now, task_schedule);
if let Some(task_schedule) = task_reschedule {
self.schedule_next(&now, task_schedule);
self.keep_alive();
}
}
fn schedule_next(
&mut self,
now: &DateTime<T::TimeZone>,
task_schedule: T,
) -> Option<DateTime<T::TimeZone>> {
if let Some(next_after_now) = task_schedule.schedule_next_after(now) {
debug_assert!(next_after_now > *now);
debug!(
"Rescheduling {:?} at {}",
task_schedule,
format_datetime(&next_after_now)
);
let timeout = (next_after_now.clone() - now.clone()).to_std().unwrap();
if timeout < MAX_DELAY_TIMEOUT {
self.upcoming_tasks
.insert(DelayQueueItem::TaskSchedule(task_schedule), timeout);
Some(next_after_now)
} else {
error!(
"Cannot reschedule {:?} at {}: Maximum timeout duration exceeded: {:?} >= {:?}",
task_schedule,
format_datetime(&next_after_now),
timeout,
MAX_DELAY_TIMEOUT
);
None
}
} else {
debug!("Finished {:?}", task_schedule);
None
}
}
pub fn reschedule_all(&mut self, task_schedules: Vec<T>) {
// Clear the delay queue, i.e. discard all tasks
debug!("Discarding all scheduled tasks");
self.upcoming_tasks.clear();
// Repopulate the delay queue with the given irrigation schedules
debug_assert!(self.upcoming_tasks.is_empty());
self.upcoming_tasks.reserve(task_schedules.len() + 1);
let now = self.task_scheduler.now();
task_schedules.into_iter().for_each(|task_schedule| {
self.schedule_next(&now, task_schedule);
});
self.keep_alive_key = self
.upcoming_tasks
.insert(DelayQueueItem::KeepAlive, MAX_DELAY_TIMEOUT);
}
}
impl<T> Stream for ScheduledTaskQueue<T> {
type Item = <DelayQueue<DelayQueueItem<T>> as Stream>::Item;
type Error = <DelayQueue<DelayQueueItem<T>> as Stream>::Error;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
self.upcoming_tasks.poll()
}
}
// This mutex will only be locked within the same executor,
// though maybe subsequently by different threads. There
// won't be any lock contention, i.e. no thread will block
// when locking this mutex! It is only required to satisfy
// the Send bound for the enclosing context.
struct ScheduledTasks<T>(Arc<Mutex<ScheduledTaskQueue<T>>>);
impl<T> ScheduledTasks<T> {
pub fn new(inner: ScheduledTaskQueue<T>) -> Self {
ScheduledTasks(Arc::new(Mutex::new(inner)))
}
pub fn lock_inner(&mut self) -> MutexGuard<ScheduledTaskQueue<T>> {
// Even a try_lock() should never fail, but we prefer
// the blocking variant to be safe!
let lock_result = self.0.lock();
debug_assert!(lock_result.is_ok());
match lock_result {
Ok(guard) => guard,
Err(err) => {
error!("Failed to lock mutex of scheduled tasks: {}", err);
unreachable!();
}
}
}
}
impl<T> Clone for ScheduledTasks<T> {
fn clone(&self) -> Self {
ScheduledTasks(self.0.clone())
}
}
impl<T> Stream for ScheduledTasks<T> {
type Item = <DelayQueue<DelayQueueItem<T>> as Stream>::Item;
type Error = <DelayQueue<DelayQueueItem<T>> as Stream>::Error;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
self.lock_inner().poll()
}
}
#[derive(Debug, Clone, Copy)]
pub enum TaskSchedulingSignal {}
#[derive(Debug, Clone, PartialEq)]
pub enum TaskSchedulingCommand<T: TaskSchedule> {
RescheduleAll(Vec<T>),
}
#[derive(Debug, Clone, Copy)]
pub enum TaskSchedulingQuery {}
pub type TaskSchedulingAction<T> =
Action<TaskSchedulingSignal, TaskSchedulingCommand<T>, TaskSchedulingQuery>;
pub type TaskSchedulingActionSender<T> = ActionSender<TaskSchedulingAction<T>>;
type TaskSchedulingActionReceiver<T> = ActionReceiver<TaskSchedulingAction<T>>;
#[derive(Debug, Clone, Copy)]
pub enum TaskSchedulingNotification {}
type TaskSchedulingNotificationSender = NotificationSender<TaskSchedulingNotification>;
pub type TaskSchedulingNotificationReceiver = NotificationReceiver<TaskSchedulingNotification>;
pub struct TaskSchedulingActor<T: TaskSchedule> {
// Currently unused
_notification_tx: TaskSchedulingNotificationSender,
scheduled_tasks: ScheduledTasks<T>,
}
impl<T> TaskSchedulingActor<T>
where
T: TaskSchedule + Send + std::fmt::Debug,
{
pub fn create(
task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>,
) -> (
impl Future<Item = (), Error = ()>,
TaskSchedulingActionSender<T>,
TaskSchedulingNotificationReceiver,
) {
let (action_tx, action_rx) = new_action_channel();
let (_notification_tx, notification_rx) = new_notification_channel();
let event_loop = futures::lazy(move || {
info!("Starting scheduler");
// Lazy instantiation is essential to implicitly attach the
// DelayQueue to the Timer of the corresponding Runtime in
// DelayQueue::new()!!!
Ok(ScheduledTasks::new(ScheduledTaskQueue::new(task_scheduler)))
})
.and_then(move |scheduled_tasks| {
// Create a handler for expired tasks
let mut expired_tasks = scheduled_tasks.clone();
let expired_tasks_handler = scheduled_tasks
.clone()
.for_each(move |expired| {
expired_tasks.lock_inner().handle_expired(expired);
Ok(())
})
.map_err(|err| error!("Failed to handle expired tasks: {}", err));
Ok((scheduled_tasks, expired_tasks_handler))
})
.and_then(move |(scheduled_tasks, expired_tasks_handler)| {
// Create a handler for actions...
let action_handler = Self {
_notification_tx,
scheduled_tasks,
}
.handle_actions(action_rx);
// ...and combine the handlers.
// Warning: The order for combining both futures seems to matter!!
// Using select() on action_handler followed by expired_tasks_handler
// as an argument works as expected. When reversing this order any
// previously rescheduled tasks are retained and don't expire until
// the next action is received.
action_handler
.select(expired_tasks_handler)
.map(drop)
.map_err(drop)
});
(event_loop, action_tx, notification_rx)
}
fn handle_actions(
mut self,
action_rx: TaskSchedulingActionReceiver<T>,
) -> impl Future<Item = (), Error = ()> {
action_rx.for_each(move |action| {
self.handle_action(action);
Ok(())
})
}
fn handle_action(&mut self, action: TaskSchedulingAction<T>) {
match action {
Action::Signal(signal) => match signal {},
Action::Command(response_tx, command) => self.handle_command(response_tx, command),
Action::Query(query) => match query {},
}
}
fn | (
&mut self,
response_tx: CommandResponseSender,
command: TaskSchedulingCommand<T>,
) {
let result = match command {
TaskSchedulingCommand::RescheduleAll(task_schedules) => {
self.scheduled_tasks
.lock_inner()
.reschedule_all(task_schedules);
Ok(())
}
};
reply(response_tx, result);
}
}
| handle_command | identifier_name |
scheduling.rs | use super::*;
use crate::domain::scheduling::*;
use chrono::{DateTime, SecondsFormat};
use futures::{Async, Future, Stream};
use std::sync::{Arc, Mutex, MutexGuard};
use std::time::Duration;
use tokio::timer::{
delay_queue::{Expired, Key as DelayQueueKey},
DelayQueue,
};
enum DelayQueueItem<T> {
TaskSchedule(T),
KeepAlive,
}
// The valid duration is limited by this upper bound that is
// reserved for the keep alive token!
// TODO: The maximum acceptable value of 795 days that does
// not cause an internal panic has been discovered experimentally.
// No references about this limit can be found in the Tokio docs!?
const MAX_DELAY_TIMEOUT: Duration = Duration::from_secs(795 * 24 * 60 * 60);
struct ScheduledTaskQueue<T> {
task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>,
upcoming_tasks: DelayQueue<DelayQueueItem<T>>,
keep_alive_key: DelayQueueKey,
}
fn format_datetime<Z: chrono::TimeZone>(dt: &DateTime<Z>) -> String
where
<Z as chrono::TimeZone>::Offset: std::fmt::Display,
{
dt.to_rfc3339_opts(SecondsFormat::Millis, true)
}
impl<T> ScheduledTaskQueue<T>
where
T: TaskSchedule + std::fmt::Debug,
<T::TimeZone as chrono::TimeZone>::Offset: std::fmt::Display,
{
pub fn new(task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>) -> Self {
let mut upcoming_tasks = DelayQueue::new();
let keep_alive_key = upcoming_tasks.insert(DelayQueueItem::KeepAlive, MAX_DELAY_TIMEOUT);
Self {
task_scheduler,
upcoming_tasks,
keep_alive_key,
}
}
fn keep_alive(&mut self) {
self.upcoming_tasks
.reset(&self.keep_alive_key, MAX_DELAY_TIMEOUT);
}
pub fn handle_expired(&mut self, expired: Expired<DelayQueueItem<T>>) {
match expired.into_inner() {
DelayQueueItem::TaskSchedule(task_schedule) => self.reschedule_expired(task_schedule),
DelayQueueItem::KeepAlive => self.reschedule_all(Default::default()),
}
}
fn reschedule_expired(&mut self, task_schedule: T) {
let now = self.task_scheduler.now();
debug!("{:?} expired at {}", task_schedule, now);
let task_reschedule = self
.task_scheduler
.dispatch_and_reschedule_expired_task(&now, task_schedule);
if let Some(task_schedule) = task_reschedule {
self.schedule_next(&now, task_schedule);
self.keep_alive();
}
}
fn schedule_next(
&mut self,
now: &DateTime<T::TimeZone>,
task_schedule: T,
) -> Option<DateTime<T::TimeZone>> {
if let Some(next_after_now) = task_schedule.schedule_next_after(now) {
debug_assert!(next_after_now > *now);
debug!(
"Rescheduling {:?} at {}",
task_schedule,
format_datetime(&next_after_now)
);
let timeout = (next_after_now.clone() - now.clone()).to_std().unwrap();
if timeout < MAX_DELAY_TIMEOUT {
self.upcoming_tasks
.insert(DelayQueueItem::TaskSchedule(task_schedule), timeout);
Some(next_after_now)
} else {
error!(
"Cannot reschedule {:?} at {}: Maximum timeout duration exceeded: {:?} >= {:?}",
task_schedule,
format_datetime(&next_after_now),
timeout,
MAX_DELAY_TIMEOUT
);
None
}
} else {
debug!("Finished {:?}", task_schedule);
None
}
}
pub fn reschedule_all(&mut self, task_schedules: Vec<T>) {
// Clear the delay queue, i.e. discard all tasks
debug!("Discarding all scheduled tasks");
self.upcoming_tasks.clear();
// Repopulate the delay queue with the given irrigation schedules
debug_assert!(self.upcoming_tasks.is_empty());
self.upcoming_tasks.reserve(task_schedules.len() + 1);
let now = self.task_scheduler.now();
task_schedules.into_iter().for_each(|task_schedule| {
self.schedule_next(&now, task_schedule);
});
self.keep_alive_key = self
.upcoming_tasks
.insert(DelayQueueItem::KeepAlive, MAX_DELAY_TIMEOUT);
}
}
impl<T> Stream for ScheduledTaskQueue<T> {
type Item = <DelayQueue<DelayQueueItem<T>> as Stream>::Item;
type Error = <DelayQueue<DelayQueueItem<T>> as Stream>::Error;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
self.upcoming_tasks.poll()
}
}
// This mutex will only be locked within the same executor,
// though maybe subsequently by different threads. There
// won't be any lock contention, i.e. no thread will block
// when locking this mutex! It is only required to satisfy
// the Send bound for the enclosing context.
struct ScheduledTasks<T>(Arc<Mutex<ScheduledTaskQueue<T>>>);
impl<T> ScheduledTasks<T> {
pub fn new(inner: ScheduledTaskQueue<T>) -> Self {
ScheduledTasks(Arc::new(Mutex::new(inner)))
}
pub fn lock_inner(&mut self) -> MutexGuard<ScheduledTaskQueue<T>> {
// Even a try_lock() should never fail, but we prefer
// the blocking variant to be safe!
let lock_result = self.0.lock();
debug_assert!(lock_result.is_ok());
match lock_result {
Ok(guard) => guard,
Err(err) => {
error!("Failed to lock mutex of scheduled tasks: {}", err);
unreachable!();
}
}
}
}
impl<T> Clone for ScheduledTasks<T> {
fn clone(&self) -> Self {
ScheduledTasks(self.0.clone())
}
}
impl<T> Stream for ScheduledTasks<T> {
type Item = <DelayQueue<DelayQueueItem<T>> as Stream>::Item;
type Error = <DelayQueue<DelayQueueItem<T>> as Stream>::Error;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
self.lock_inner().poll()
}
}
#[derive(Debug, Clone, Copy)]
pub enum TaskSchedulingSignal {}
#[derive(Debug, Clone, PartialEq)]
pub enum TaskSchedulingCommand<T: TaskSchedule> {
RescheduleAll(Vec<T>),
}
#[derive(Debug, Clone, Copy)]
pub enum TaskSchedulingQuery {}
pub type TaskSchedulingAction<T> =
Action<TaskSchedulingSignal, TaskSchedulingCommand<T>, TaskSchedulingQuery>;
pub type TaskSchedulingActionSender<T> = ActionSender<TaskSchedulingAction<T>>;
type TaskSchedulingActionReceiver<T> = ActionReceiver<TaskSchedulingAction<T>>;
#[derive(Debug, Clone, Copy)]
pub enum TaskSchedulingNotification {}
type TaskSchedulingNotificationSender = NotificationSender<TaskSchedulingNotification>;
pub type TaskSchedulingNotificationReceiver = NotificationReceiver<TaskSchedulingNotification>;
pub struct TaskSchedulingActor<T: TaskSchedule> {
// Currently unused
_notification_tx: TaskSchedulingNotificationSender,
| impl<T> TaskSchedulingActor<T>
where
T: TaskSchedule + Send + std::fmt::Debug,
{
pub fn create(
task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>,
) -> (
impl Future<Item = (), Error = ()>,
TaskSchedulingActionSender<T>,
TaskSchedulingNotificationReceiver,
) {
let (action_tx, action_rx) = new_action_channel();
let (_notification_tx, notification_rx) = new_notification_channel();
let event_loop = futures::lazy(move || {
info!("Starting scheduler");
// Lazy instantiation is essential to implicitly attach the
// DelayQueue to the Timer of the corresponding Runtime in
// DelayQueue::new()!!!
Ok(ScheduledTasks::new(ScheduledTaskQueue::new(task_scheduler)))
})
.and_then(move |scheduled_tasks| {
// Create a handler for expired tasks
let mut expired_tasks = scheduled_tasks.clone();
let expired_tasks_handler = scheduled_tasks
.clone()
.for_each(move |expired| {
expired_tasks.lock_inner().handle_expired(expired);
Ok(())
})
.map_err(|err| error!("Failed to handle expired tasks: {}", err));
Ok((scheduled_tasks, expired_tasks_handler))
})
.and_then(move |(scheduled_tasks, expired_tasks_handler)| {
// Create a handler for actions...
let action_handler = Self {
_notification_tx,
scheduled_tasks,
}
.handle_actions(action_rx);
// ...and combine the handlers.
// Warning: The order for combining both futures seems to matter!!
// Using select() on action_handler followed by expired_tasks_handler
// as an argument works as expected. When reversing this order any
// previously rescheduled tasks are retained and don't expire until
// the next action is received.
action_handler
.select(expired_tasks_handler)
.map(drop)
.map_err(drop)
});
(event_loop, action_tx, notification_rx)
}
fn handle_actions(
mut self,
action_rx: TaskSchedulingActionReceiver<T>,
) -> impl Future<Item = (), Error = ()> {
action_rx.for_each(move |action| {
self.handle_action(action);
Ok(())
})
}
fn handle_action(&mut self, action: TaskSchedulingAction<T>) {
match action {
Action::Signal(signal) => match signal {},
Action::Command(response_tx, command) => self.handle_command(response_tx, command),
Action::Query(query) => match query {},
}
}
fn handle_command(
&mut self,
response_tx: CommandResponseSender,
command: TaskSchedulingCommand<T>,
) {
let result = match command {
TaskSchedulingCommand::RescheduleAll(task_schedules) => {
self.scheduled_tasks
.lock_inner()
.reschedule_all(task_schedules);
Ok(())
}
};
reply(response_tx, result);
}
} | scheduled_tasks: ScheduledTasks<T>,
}
| random_line_split |
scheduling.rs | use super::*;
use crate::domain::scheduling::*;
use chrono::{DateTime, SecondsFormat};
use futures::{Async, Future, Stream};
use std::sync::{Arc, Mutex, MutexGuard};
use std::time::Duration;
use tokio::timer::{
delay_queue::{Expired, Key as DelayQueueKey},
DelayQueue,
};
enum DelayQueueItem<T> {
TaskSchedule(T),
KeepAlive,
}
// The valid duration is limited by this upper bound that is
// reserved for the keep alive token!
// TODO: The maximum acceptable value of 795 days that does
// not cause an internal panic has been discovered experimentally.
// No references about this limit can be found in the Tokio docs!?
const MAX_DELAY_TIMEOUT: Duration = Duration::from_secs(795 * 24 * 60 * 60);
struct ScheduledTaskQueue<T> {
task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>,
upcoming_tasks: DelayQueue<DelayQueueItem<T>>,
keep_alive_key: DelayQueueKey,
}
fn format_datetime<Z: chrono::TimeZone>(dt: &DateTime<Z>) -> String
where
<Z as chrono::TimeZone>::Offset: std::fmt::Display,
{
dt.to_rfc3339_opts(SecondsFormat::Millis, true)
}
impl<T> ScheduledTaskQueue<T>
where
T: TaskSchedule + std::fmt::Debug,
<T::TimeZone as chrono::TimeZone>::Offset: std::fmt::Display,
{
pub fn new(task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>) -> Self {
let mut upcoming_tasks = DelayQueue::new();
let keep_alive_key = upcoming_tasks.insert(DelayQueueItem::KeepAlive, MAX_DELAY_TIMEOUT);
Self {
task_scheduler,
upcoming_tasks,
keep_alive_key,
}
}
fn keep_alive(&mut self) {
self.upcoming_tasks
.reset(&self.keep_alive_key, MAX_DELAY_TIMEOUT);
}
pub fn handle_expired(&mut self, expired: Expired<DelayQueueItem<T>>) {
match expired.into_inner() {
DelayQueueItem::TaskSchedule(task_schedule) => self.reschedule_expired(task_schedule),
DelayQueueItem::KeepAlive => self.reschedule_all(Default::default()),
}
}
fn reschedule_expired(&mut self, task_schedule: T) {
let now = self.task_scheduler.now();
debug!("{:?} expired at {}", task_schedule, now);
let task_reschedule = self
.task_scheduler
.dispatch_and_reschedule_expired_task(&now, task_schedule);
if let Some(task_schedule) = task_reschedule {
self.schedule_next(&now, task_schedule);
self.keep_alive();
}
}
fn schedule_next(
&mut self,
now: &DateTime<T::TimeZone>,
task_schedule: T,
) -> Option<DateTime<T::TimeZone>> {
if let Some(next_after_now) = task_schedule.schedule_next_after(now) {
debug_assert!(next_after_now > *now);
debug!(
"Rescheduling {:?} at {}",
task_schedule,
format_datetime(&next_after_now)
);
let timeout = (next_after_now.clone() - now.clone()).to_std().unwrap();
if timeout < MAX_DELAY_TIMEOUT {
self.upcoming_tasks
.insert(DelayQueueItem::TaskSchedule(task_schedule), timeout);
Some(next_after_now)
} else {
error!(
"Cannot reschedule {:?} at {}: Maximum timeout duration exceeded: {:?} >= {:?}",
task_schedule,
format_datetime(&next_after_now),
timeout,
MAX_DELAY_TIMEOUT
);
None
}
} else {
debug!("Finished {:?}", task_schedule);
None
}
}
pub fn reschedule_all(&mut self, task_schedules: Vec<T>) {
// Clear the delay queue, i.e. discard all tasks
debug!("Discarding all scheduled tasks");
self.upcoming_tasks.clear();
// Repopulate the delay queue with the given irrigation schedules
debug_assert!(self.upcoming_tasks.is_empty());
self.upcoming_tasks.reserve(task_schedules.len() + 1);
let now = self.task_scheduler.now();
task_schedules.into_iter().for_each(|task_schedule| {
self.schedule_next(&now, task_schedule);
});
self.keep_alive_key = self
.upcoming_tasks
.insert(DelayQueueItem::KeepAlive, MAX_DELAY_TIMEOUT);
}
}
impl<T> Stream for ScheduledTaskQueue<T> {
type Item = <DelayQueue<DelayQueueItem<T>> as Stream>::Item;
type Error = <DelayQueue<DelayQueueItem<T>> as Stream>::Error;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
self.upcoming_tasks.poll()
}
}
// This mutex will only be locked within the same executor,
// though maybe subsequently by different threads. There
// won't be any lock contention, i.e. no thread will block
// when locking this mutex! It is only required to satisfy
// the Send bound for the enclosing context.
struct ScheduledTasks<T>(Arc<Mutex<ScheduledTaskQueue<T>>>);
impl<T> ScheduledTasks<T> {
pub fn new(inner: ScheduledTaskQueue<T>) -> Self {
ScheduledTasks(Arc::new(Mutex::new(inner)))
}
pub fn lock_inner(&mut self) -> MutexGuard<ScheduledTaskQueue<T>> {
// Even a try_lock() should never fail, but we prefer
// the blocking variant to be safe!
let lock_result = self.0.lock();
debug_assert!(lock_result.is_ok());
match lock_result {
Ok(guard) => guard,
Err(err) => {
error!("Failed to lock mutex of scheduled tasks: {}", err);
unreachable!();
}
}
}
}
impl<T> Clone for ScheduledTasks<T> {
fn clone(&self) -> Self |
}
impl<T> Stream for ScheduledTasks<T> {
type Item = <DelayQueue<DelayQueueItem<T>> as Stream>::Item;
type Error = <DelayQueue<DelayQueueItem<T>> as Stream>::Error;
fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> {
self.lock_inner().poll()
}
}
#[derive(Debug, Clone, Copy)]
pub enum TaskSchedulingSignal {}
#[derive(Debug, Clone, PartialEq)]
pub enum TaskSchedulingCommand<T: TaskSchedule> {
RescheduleAll(Vec<T>),
}
#[derive(Debug, Clone, Copy)]
pub enum TaskSchedulingQuery {}
pub type TaskSchedulingAction<T> =
Action<TaskSchedulingSignal, TaskSchedulingCommand<T>, TaskSchedulingQuery>;
pub type TaskSchedulingActionSender<T> = ActionSender<TaskSchedulingAction<T>>;
type TaskSchedulingActionReceiver<T> = ActionReceiver<TaskSchedulingAction<T>>;
#[derive(Debug, Clone, Copy)]
pub enum TaskSchedulingNotification {}
type TaskSchedulingNotificationSender = NotificationSender<TaskSchedulingNotification>;
pub type TaskSchedulingNotificationReceiver = NotificationReceiver<TaskSchedulingNotification>;
pub struct TaskSchedulingActor<T: TaskSchedule> {
// Currently unused
_notification_tx: TaskSchedulingNotificationSender,
scheduled_tasks: ScheduledTasks<T>,
}
impl<T> TaskSchedulingActor<T>
where
T: TaskSchedule + Send + std::fmt::Debug,
{
pub fn create(
task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>,
) -> (
impl Future<Item = (), Error = ()>,
TaskSchedulingActionSender<T>,
TaskSchedulingNotificationReceiver,
) {
let (action_tx, action_rx) = new_action_channel();
let (_notification_tx, notification_rx) = new_notification_channel();
let event_loop = futures::lazy(move || {
info!("Starting scheduler");
// Lazy instantiation is essential to implicitly attach the
// DelayQueue to the Timer of the corresponding Runtime in
// DelayQueue::new()!!!
Ok(ScheduledTasks::new(ScheduledTaskQueue::new(task_scheduler)))
})
.and_then(move |scheduled_tasks| {
// Create a handler for expired tasks
let mut expired_tasks = scheduled_tasks.clone();
let expired_tasks_handler = scheduled_tasks
.clone()
.for_each(move |expired| {
expired_tasks.lock_inner().handle_expired(expired);
Ok(())
})
.map_err(|err| error!("Failed to handle expired tasks: {}", err));
Ok((scheduled_tasks, expired_tasks_handler))
})
.and_then(move |(scheduled_tasks, expired_tasks_handler)| {
// Create a handler for actions...
let action_handler = Self {
_notification_tx,
scheduled_tasks,
}
.handle_actions(action_rx);
// ...and combine the handlers.
// Warning: The order for combining both futures seems to matter!!
// Using select() on action_handler followed by expired_tasks_handler
// as an argument works as expected. When reversing this order any
// previously rescheduled tasks are retained and don't expire until
// the next action is received.
action_handler
.select(expired_tasks_handler)
.map(drop)
.map_err(drop)
});
(event_loop, action_tx, notification_rx)
}
fn handle_actions(
mut self,
action_rx: TaskSchedulingActionReceiver<T>,
) -> impl Future<Item = (), Error = ()> {
action_rx.for_each(move |action| {
self.handle_action(action);
Ok(())
})
}
fn handle_action(&mut self, action: TaskSchedulingAction<T>) {
match action {
Action::Signal(signal) => match signal {},
Action::Command(response_tx, command) => self.handle_command(response_tx, command),
Action::Query(query) => match query {},
}
}
fn handle_command(
&mut self,
response_tx: CommandResponseSender,
command: TaskSchedulingCommand<T>,
) {
let result = match command {
TaskSchedulingCommand::RescheduleAll(task_schedules) => {
self.scheduled_tasks
.lock_inner()
.reschedule_all(task_schedules);
Ok(())
}
};
reply(response_tx, result);
}
}
| {
ScheduledTasks(self.0.clone())
} | identifier_body |
messenger.ts | import * as Mixins from './mixins'
import * as tools from './tools'
import * as _eventsApi from './events-api'
import { EventMap, EventsDefinition } from './events-api'
const { mixins, define, extendable } = Mixins,
{ omit, once, isEmpty, keys } = tools,
{ EventHandler, trigger0, trigger1, trigger2, trigger3 } = _eventsApi;
// Regular expression used to split event strings.
const eventSplitter = /\s+/;
let _idCount = 0;
function uniqueId() : string {
return 'l' + _idCount++;
}
export { EventMap, EventsDefinition }
export interface MessengerDefinition extends Mixins.ClassDefinition {
_localEvents? : EventMap
localEvents? : EventsDefinition
}
/*************************
* Messenger is mixable class with capabilities of sending and receiving synchronous events.
* This class itself can serve as both mixin and base class.
*/
@extendable
export abstract class Messenger implements Mixins.Mixable {
// Define extendable mixin static properties.
static create : ( a : any, b? : any, c? : any ) => Messenger
static mixins : ( ...mixins : ( Mixins.Constructor<any> | {} )[] ) => Mixins.MixableConstructor< Messenger >
static mixinRules : ( mixinRules : Mixins.MixinRules ) => Mixins.MixableConstructor< Messenger >
static mixTo : ( ...args : Mixins.Constructor<any>[] ) => Mixins.MixableConstructor< Messenger >
static extend : (spec? : MessengerDefinition, statics? : {} ) => Mixins.MixableConstructor< Messenger >
static predefine : () => Mixins.MixableConstructor< Messenger >
/** @hidden */
_events : _eventsApi.EventsSubscription = void 0;
/** @hidden */
_listeners : Listeners = void 0
/** @hidden */
_listeningTo : ListeningToMap = void 0
/** Unique client-only id. */
cid : string
// Prototype-only property to manage automatic local events subscription.
/** @hidden */
_localEvents : _eventsApi.EventMap
/** @private */
static define( protoProps? : MessengerDefinition , staticProps? ) : typeof Messenger {
const spec : MessengerDefinition = omit( protoProps || {}, 'localEvents' );
if( protoProps ){
const { localEvents, _localEvents } = protoProps;
if( localEvents || _localEvents ){
const eventsMap = new EventMap( this.prototype._localEvents );
localEvents && eventsMap.addEventsMap( localEvents );
_localEvents && eventsMap.merge( _localEvents );
spec._localEvents = eventsMap;
}
}
return Mixins.Mixable.define.call( this, spec, staticProps );
}
/** @hidden */
constructor(){
this.cid = uniqueId();
this.initialize.apply( this, arguments );
}
/** Method is called at the end of the constructor */
initialize() : void {}
/** Bind an event to a `callback` function. Passing `"all"` will bind
* the callback to all events fired.
*/
on(name, callback, context?) : this {
return <this>internalOn(this, name, callback, context);
}
/** Remove one or many callbacks. If `context` is null, removes all
* callbacks with that function. If `callback` is null, removes all
* callbacks for the event. If `name` is null, removes all bound
* callbacks for all events.
*/
off(name? : string, callback? : Function, context? ) : this {
if (!this._events) return this;
this._events = eventsApi(offApi, this._events, name, callback,
new OffOptions(
context,
this._listeners )
);
return this;
}
/** Tell this object to stop listening to either specific events ... or
* to every object it's currently listening to.
*/
stopListening( obj? : Messenger, name? : string, callback? : Function ) : this {
const listeningTo = this._listeningTo;
if (!listeningTo) return this;
const ids = obj ? [obj.cid] : keys(listeningTo);
for (let i = 0; i < ids.length; i++) {
const listening = listeningTo[ids[i]];
// If listening doesn't exist, this object is not currently
// listening to obj. Break out early.
if (!listening) break;
listening.obj.off(name, callback, this);
}
if (isEmpty(listeningTo)) this._listeningTo = void 0;
return this;
}
/** Inversion-of-control versions of `on`. Tell *this* object to listen to
* an event in another object... keeping track of what it's listening to
* for easier unbinding later.
*/
listenTo(obj : Messenger, name, callback? ) : this {
if( !obj ) return this;
const id = obj.cid || (obj.cid = uniqueId()),
listeningTo = this._listeningTo || (this._listeningTo = {});
let listening = listeningTo[id];
// This object is not listening to any other events on `obj` yet.
// Setup the necessary references to track the listening callbacks.
if (!listening) {
const thisId = this.cid || (this.cid = uniqueId());
listening = listeningTo[id] = new ListeningTo( obj, id, thisId, listeningTo );
}
// Bind callbacks on obj, and keep track of them on listening.
internalOn( obj, name, callback, this, listening );
return this;
}
/** Bind an event to only be triggered a single time. After the first time
* the callback is invoked, its listener will be removed. If multiple events
* are passed in using the space-separated syntax, the handler will fire
* once for each event, not once for a combination of all events.
*/
once(name, callback, context) : this {
// Map the event into a `{event: once}` object.
const events = eventsApi(onceMap, {}, name, callback, this.off.bind( this ));
return this.on(events, void 0, context);
}
/** Inversion-of-control versions of `once`.*/
listenToOnce(obj : Messenger, name, callback) : this {
// Map the event into a `{event: once}` object.
const events = eventsApi(onceMap, {}, name, callback, this.stopListening.bind( this, obj ) );
return this.listenTo(obj, events);
}
/** Trigger one or many events, firing all bound callbacks. Callbacks are
* passed the same arguments as `trigger` is, apart from the event name
* (unless you're listening on `"all"`, which will cause your callback to
* receive the true name of the event as the first argument).
*/
trigger(name : string, a?, b?, c? ) : this {
if( !this._events ) return this;
switch( arguments.length ){
// Forward call to monomorphic fast-path functions.
case 1 : trigger0( this, name ); break;
case 2 : trigger1( this, name, a ); break;
case 3 : trigger2( this, name, a, b ); break;
case 4 : trigger3( this, name, a, b, c ); break;
// Trigger event with more than 3 arguments.
default :
// Passing arguments around killing performance. Convert it to array.
const allArgs = Array( arguments.length );
for( let i = 0; i < allArgs.length; i++ ){
allArgs[ i ] = arguments[ i ];
}
// Send events.
const { _events } = this;
let queue = _events[ name ];
if( queue ) _fireEventAll( queue, allArgs.slice( 1 ) );
if( queue = _events.all ) _fireEventAll( queue, allArgs );
}
return this;
}
/**
* Destructor. Stops messenger from listening to all objects,
* and stop others from listening to the messenger.
*/
dispose() : void {
this.stopListening();
this.off();
}
}
/** @hidden */
const slice = Array.prototype.slice;
/**
* Backbone 1.2 API conformant Events mixin.
*/
export const Events : Messenger = <Messenger> omit( Messenger.prototype, 'constructor', 'initialize' );
// Iterates over the standard `event, callback` (as well as the fancy multiple
// space-separated events `"change blur", callback` and jQuery-style event
// maps `{event: callback}`).
/** @hidden */
function eventsApi(iteratee, events, name, callback, opts) {
let i = 0, names;
if (name && typeof name === 'object') {
// Handle event maps.
if (callback !== void 0 && 'context' in opts && opts.context === void 0) opts.context = callback;
for (names = keys(name); i < names.length ; i++) {
events = eventsApi(iteratee, events, names[i], name[names[i]], opts);
}
} else if (name && eventSplitter.test(name)) {
// Handle space separated event names by delegating them individually.
for (names = name.split(eventSplitter); i < names.length; i++) {
events = iteratee(events, names[i], callback, opts);
}
} else {
// Finally, standard events.
events = iteratee(events, name, callback, opts);
}
return events;
};
/** @hidden */
class ListeningTo {
count : number = 0
constructor( public obj, public objId, public id, public listeningTo ){}
}
/** @hidden */
export interface ListeningToMap {
[ id : string ] : ListeningTo
}
/** @hidden */
export interface Listeners {
[ id : string ] : Messenger
}
// Guard the `listening` argument from the public API.
/** @hidden */
function internalOn(obj : Messenger, name, callback, context, listening? ) : Messenger {
obj._events = eventsApi(onApi, obj._events || {}, name,
callback, new EventHandler( context, obj, listening));
if (listening) {
const listeners = obj._listeners || (obj._listeners = {});
listeners[listening.id] = listening;
}
return obj;
};
// The reducing API that adds a callback to the `events` object.
/** @hidden */
function onApi(events : _eventsApi.EventsSubscription, name : string, callback : Function, options) : _eventsApi.EventsSubscription {
if (callback) {
const handlers = events[name],
toAdd = [ options.clone( callback ) ];
events[name] = handlers ? handlers.concat( toAdd ) : toAdd;
}
return events;
};
/** @hidden */
class OffOptions {
constructor( public context, public listeners : Listeners ){}
}
// The reducing API that removes a callback from the `events` object.
/** @hidden */
function offApi(events : _eventsApi.EventsSubscription, name, callback, options : OffOptions ) {
if (!events) return;
let i = 0, listening;
const context = options.context, listeners = options.listeners;
// Delete all events listeners and "drop" events.
if (!name && !callback && !context) {
const ids = keys(listeners);
for (; i < ids.length; i++) {
listening = listeners[ids[i]];
delete listeners[listening.id];
delete listening.listeningTo[listening.objId];
}
return {};
}
const names = name ? [name] : keys(events);
for (; i < names.length; i++) {
name = names[i];
const handlers = events[name];
// Bail out if there are no events stored.
if (!handlers) break;
// Replace events if there are any remaining. Otherwise, clean up.
const remaining = [];
for (let j = 0; j < handlers.length; j++) |
// Update tail event if the list has any events. Otherwise, clean up.
if (remaining.length) {
events[name] = remaining;
} else {
delete events[name];
}
}
return events;
};
// Reduces the event callbacks into a map of `{event: onceWrapper}`.
// `offer` unbinds the `onceWrapper` after it has been called.
/** @hidden */
function onceMap(map, name, callback, offer) {
if (callback) {
const _once : _eventsApi.Callback = map[name] = once(function() {
offer(name, _once);
callback.apply(this, arguments);
});
_once._callback = callback;
}
return map;
};
/** @hidden */
function _fireEventAll( events : _eventsApi.EventHandler[], a ) : void {
for( let ev of events )
ev.callback.apply( ev.ctx, a );
}
| {
const handler = handlers[j];
if (
callback && callback !== handler.callback &&
callback !== handler.callback._callback ||
context && context !== handler.context
) {
remaining.push(handler);
} else {
listening = handler.listening;
if (listening && --listening.count === 0) {
delete listeners[listening.id];
delete listening.listeningTo[listening.objId];
}
}
} | conditional_block |
messenger.ts | import * as Mixins from './mixins'
import * as tools from './tools'
import * as _eventsApi from './events-api'
import { EventMap, EventsDefinition } from './events-api'
const { mixins, define, extendable } = Mixins,
{ omit, once, isEmpty, keys } = tools,
{ EventHandler, trigger0, trigger1, trigger2, trigger3 } = _eventsApi;
// Regular expression used to split event strings.
const eventSplitter = /\s+/;
let _idCount = 0;
function uniqueId() : string {
return 'l' + _idCount++;
}
export { EventMap, EventsDefinition }
export interface MessengerDefinition extends Mixins.ClassDefinition {
_localEvents? : EventMap
localEvents? : EventsDefinition
}
/*************************
* Messenger is mixable class with capabilities of sending and receiving synchronous events.
* This class itself can serve as both mixin and base class.
*/
@extendable
export abstract class Messenger implements Mixins.Mixable {
// Define extendable mixin static properties.
static create : ( a : any, b? : any, c? : any ) => Messenger
static mixins : ( ...mixins : ( Mixins.Constructor<any> | {} )[] ) => Mixins.MixableConstructor< Messenger >
static mixinRules : ( mixinRules : Mixins.MixinRules ) => Mixins.MixableConstructor< Messenger >
static mixTo : ( ...args : Mixins.Constructor<any>[] ) => Mixins.MixableConstructor< Messenger >
static extend : (spec? : MessengerDefinition, statics? : {} ) => Mixins.MixableConstructor< Messenger >
static predefine : () => Mixins.MixableConstructor< Messenger >
/** @hidden */
_events : _eventsApi.EventsSubscription = void 0;
/** @hidden */
_listeners : Listeners = void 0
/** @hidden */
_listeningTo : ListeningToMap = void 0
/** Unique client-only id. */
cid : string
// Prototype-only property to manage automatic local events subscription.
/** @hidden */
_localEvents : _eventsApi.EventMap
/** @private */
static define( protoProps? : MessengerDefinition , staticProps? ) : typeof Messenger {
const spec : MessengerDefinition = omit( protoProps || {}, 'localEvents' );
if( protoProps ){
const { localEvents, _localEvents } = protoProps;
if( localEvents || _localEvents ){
const eventsMap = new EventMap( this.prototype._localEvents );
localEvents && eventsMap.addEventsMap( localEvents );
_localEvents && eventsMap.merge( _localEvents );
spec._localEvents = eventsMap;
}
}
return Mixins.Mixable.define.call( this, spec, staticProps );
}
/** @hidden */
constructor(){
this.cid = uniqueId();
this.initialize.apply( this, arguments );
}
/** Method is called at the end of the constructor */
initialize() : void {}
/** Bind an event to a `callback` function. Passing `"all"` will bind
* the callback to all events fired.
*/
on(name, callback, context?) : this {
return <this>internalOn(this, name, callback, context);
}
/** Remove one or many callbacks. If `context` is null, removes all
* callbacks with that function. If `callback` is null, removes all
* callbacks for the event. If `name` is null, removes all bound
* callbacks for all events.
*/
off(name? : string, callback? : Function, context? ) : this {
if (!this._events) return this;
this._events = eventsApi(offApi, this._events, name, callback,
new OffOptions(
context,
this._listeners )
);
return this;
}
/** Tell this object to stop listening to either specific events ... or
* to every object it's currently listening to.
*/
stopListening( obj? : Messenger, name? : string, callback? : Function ) : this {
const listeningTo = this._listeningTo;
if (!listeningTo) return this;
const ids = obj ? [obj.cid] : keys(listeningTo);
for (let i = 0; i < ids.length; i++) {
const listening = listeningTo[ids[i]];
// If listening doesn't exist, this object is not currently
// listening to obj. Break out early.
if (!listening) break;
listening.obj.off(name, callback, this);
}
if (isEmpty(listeningTo)) this._listeningTo = void 0;
return this;
}
/** Inversion-of-control versions of `on`. Tell *this* object to listen to
* an event in another object... keeping track of what it's listening to
* for easier unbinding later.
*/
listenTo(obj : Messenger, name, callback? ) : this {
if( !obj ) return this;
const id = obj.cid || (obj.cid = uniqueId()),
listeningTo = this._listeningTo || (this._listeningTo = {});
let listening = listeningTo[id];
// This object is not listening to any other events on `obj` yet.
// Setup the necessary references to track the listening callbacks.
if (!listening) {
const thisId = this.cid || (this.cid = uniqueId());
listening = listeningTo[id] = new ListeningTo( obj, id, thisId, listeningTo );
}
// Bind callbacks on obj, and keep track of them on listening.
internalOn( obj, name, callback, this, listening );
return this;
}
/** Bind an event to only be triggered a single time. After the first time
* the callback is invoked, its listener will be removed. If multiple events
* are passed in using the space-separated syntax, the handler will fire
* once for each event, not once for a combination of all events.
*/
once(name, callback, context) : this {
// Map the event into a `{event: once}` object.
const events = eventsApi(onceMap, {}, name, callback, this.off.bind( this ));
return this.on(events, void 0, context);
}
/** Inversion-of-control versions of `once`.*/
listenToOnce(obj : Messenger, name, callback) : this {
// Map the event into a `{event: once}` object.
const events = eventsApi(onceMap, {}, name, callback, this.stopListening.bind( this, obj ) );
return this.listenTo(obj, events);
}
/** Trigger one or many events, firing all bound callbacks. Callbacks are
* passed the same arguments as `trigger` is, apart from the event name
* (unless you're listening on `"all"`, which will cause your callback to
* receive the true name of the event as the first argument).
*/
trigger(name : string, a?, b?, c? ) : this {
if( !this._events ) return this;
switch( arguments.length ){
// Forward call to monomorphic fast-path functions.
case 1 : trigger0( this, name ); break;
case 2 : trigger1( this, name, a ); break;
case 3 : trigger2( this, name, a, b ); break;
case 4 : trigger3( this, name, a, b, c ); break;
// Trigger event with more than 3 arguments.
default :
// Passing arguments around killing performance. Convert it to array.
const allArgs = Array( arguments.length );
for( let i = 0; i < allArgs.length; i++ ){
allArgs[ i ] = arguments[ i ];
}
// Send events.
const { _events } = this;
let queue = _events[ name ];
if( queue ) _fireEventAll( queue, allArgs.slice( 1 ) );
if( queue = _events.all ) _fireEventAll( queue, allArgs );
}
return this;
}
/**
* Destructor. Stops messenger from listening to all objects,
* and stop others from listening to the messenger.
*/
dispose() : void {
this.stopListening();
this.off();
}
}
/** @hidden */
const slice = Array.prototype.slice;
/**
* Backbone 1.2 API conformant Events mixin.
*/
export const Events : Messenger = <Messenger> omit( Messenger.prototype, 'constructor', 'initialize' );
// Iterates over the standard `event, callback` (as well as the fancy multiple
// space-separated events `"change blur", callback` and jQuery-style event
// maps `{event: callback}`).
/** @hidden */
function eventsApi(iteratee, events, name, callback, opts) {
let i = 0, names;
if (name && typeof name === 'object') {
// Handle event maps.
if (callback !== void 0 && 'context' in opts && opts.context === void 0) opts.context = callback;
for (names = keys(name); i < names.length ; i++) {
events = eventsApi(iteratee, events, names[i], name[names[i]], opts);
}
} else if (name && eventSplitter.test(name)) {
// Handle space separated event names by delegating them individually.
for (names = name.split(eventSplitter); i < names.length; i++) {
events = iteratee(events, names[i], callback, opts);
}
} else {
// Finally, standard events.
events = iteratee(events, name, callback, opts);
}
return events;
};
/** @hidden */
class ListeningTo {
count : number = 0
constructor( public obj, public objId, public id, public listeningTo ){}
}
/** @hidden */
export interface ListeningToMap {
[ id : string ] : ListeningTo
}
/** @hidden */
export interface Listeners {
[ id : string ] : Messenger
}
// Guard the `listening` argument from the public API.
/** @hidden */
function internalOn(obj : Messenger, name, callback, context, listening? ) : Messenger {
obj._events = eventsApi(onApi, obj._events || {}, name,
callback, new EventHandler( context, obj, listening));
if (listening) {
const listeners = obj._listeners || (obj._listeners = {});
listeners[listening.id] = listening;
}
return obj;
};
// The reducing API that adds a callback to the `events` object.
/** @hidden */
function onApi(events : _eventsApi.EventsSubscription, name : string, callback : Function, options) : _eventsApi.EventsSubscription {
if (callback) {
const handlers = events[name],
toAdd = [ options.clone( callback ) ];
events[name] = handlers ? handlers.concat( toAdd ) : toAdd;
}
return events;
};
/** @hidden */
class OffOptions {
constructor( public context, public listeners : Listeners ){}
}
// The reducing API that removes a callback from the `events` object.
/** @hidden */
function offApi(events : _eventsApi.EventsSubscription, name, callback, options : OffOptions ) | ;
// Reduces the event callbacks into a map of `{event: onceWrapper}`.
// `offer` unbinds the `onceWrapper` after it has been called.
/** @hidden */
function onceMap(map, name, callback, offer) {
if (callback) {
const _once : _eventsApi.Callback = map[name] = once(function() {
offer(name, _once);
callback.apply(this, arguments);
});
_once._callback = callback;
}
return map;
};
/** @hidden */
function _fireEventAll( events : _eventsApi.EventHandler[], a ) : void {
for( let ev of events )
ev.callback.apply( ev.ctx, a );
}
| {
if (!events) return;
let i = 0, listening;
const context = options.context, listeners = options.listeners;
// Delete all events listeners and "drop" events.
if (!name && !callback && !context) {
const ids = keys(listeners);
for (; i < ids.length; i++) {
listening = listeners[ids[i]];
delete listeners[listening.id];
delete listening.listeningTo[listening.objId];
}
return {};
}
const names = name ? [name] : keys(events);
for (; i < names.length; i++) {
name = names[i];
const handlers = events[name];
// Bail out if there are no events stored.
if (!handlers) break;
// Replace events if there are any remaining. Otherwise, clean up.
const remaining = [];
for (let j = 0; j < handlers.length; j++) {
const handler = handlers[j];
if (
callback && callback !== handler.callback &&
callback !== handler.callback._callback ||
context && context !== handler.context
) {
remaining.push(handler);
} else {
listening = handler.listening;
if (listening && --listening.count === 0) {
delete listeners[listening.id];
delete listening.listeningTo[listening.objId];
}
}
}
// Update tail event if the list has any events. Otherwise, clean up.
if (remaining.length) {
events[name] = remaining;
} else {
delete events[name];
}
}
return events;
} | identifier_body |
messenger.ts | import * as Mixins from './mixins'
import * as tools from './tools'
import * as _eventsApi from './events-api'
import { EventMap, EventsDefinition } from './events-api'
const { mixins, define, extendable } = Mixins,
{ omit, once, isEmpty, keys } = tools,
{ EventHandler, trigger0, trigger1, trigger2, trigger3 } = _eventsApi;
// Regular expression used to split event strings.
const eventSplitter = /\s+/;
let _idCount = 0;
function uniqueId() : string {
return 'l' + _idCount++;
}
export { EventMap, EventsDefinition }
export interface MessengerDefinition extends Mixins.ClassDefinition {
_localEvents? : EventMap
localEvents? : EventsDefinition
}
/*************************
* Messenger is mixable class with capabilities of sending and receiving synchronous events.
* This class itself can serve as both mixin and base class.
*/
@extendable
export abstract class Messenger implements Mixins.Mixable {
// Define extendable mixin static properties.
static create : ( a : any, b? : any, c? : any ) => Messenger
static mixins : ( ...mixins : ( Mixins.Constructor<any> | {} )[] ) => Mixins.MixableConstructor< Messenger >
static mixinRules : ( mixinRules : Mixins.MixinRules ) => Mixins.MixableConstructor< Messenger >
static mixTo : ( ...args : Mixins.Constructor<any>[] ) => Mixins.MixableConstructor< Messenger >
static extend : (spec? : MessengerDefinition, statics? : {} ) => Mixins.MixableConstructor< Messenger >
static predefine : () => Mixins.MixableConstructor< Messenger >
/** @hidden */
_events : _eventsApi.EventsSubscription = void 0;
/** @hidden */
_listeners : Listeners = void 0
/** @hidden */
_listeningTo : ListeningToMap = void 0
/** Unique client-only id. */
cid : string
// Prototype-only property to manage automatic local events subscription.
/** @hidden */
_localEvents : _eventsApi.EventMap
/** @private */
static define( protoProps? : MessengerDefinition , staticProps? ) : typeof Messenger {
const spec : MessengerDefinition = omit( protoProps || {}, 'localEvents' );
if( protoProps ){
const { localEvents, _localEvents } = protoProps;
if( localEvents || _localEvents ){
const eventsMap = new EventMap( this.prototype._localEvents );
localEvents && eventsMap.addEventsMap( localEvents );
_localEvents && eventsMap.merge( _localEvents );
spec._localEvents = eventsMap;
}
}
return Mixins.Mixable.define.call( this, spec, staticProps );
}
/** @hidden */
constructor(){
this.cid = uniqueId();
this.initialize.apply( this, arguments );
}
/** Method is called at the end of the constructor */
initialize() : void {}
/** Bind an event to a `callback` function. Passing `"all"` will bind
* the callback to all events fired.
*/
on(name, callback, context?) : this {
return <this>internalOn(this, name, callback, context);
}
/** Remove one or many callbacks. If `context` is null, removes all
* callbacks with that function. If `callback` is null, removes all
* callbacks for the event. If `name` is null, removes all bound
* callbacks for all events.
*/
off(name? : string, callback? : Function, context? ) : this {
if (!this._events) return this;
this._events = eventsApi(offApi, this._events, name, callback,
new OffOptions(
context,
this._listeners )
);
return this;
}
/** Tell this object to stop listening to either specific events ... or
* to every object it's currently listening to.
*/
stopListening( obj? : Messenger, name? : string, callback? : Function ) : this {
const listeningTo = this._listeningTo;
if (!listeningTo) return this;
const ids = obj ? [obj.cid] : keys(listeningTo);
for (let i = 0; i < ids.length; i++) {
const listening = listeningTo[ids[i]];
// If listening doesn't exist, this object is not currently
// listening to obj. Break out early.
if (!listening) break;
listening.obj.off(name, callback, this);
}
if (isEmpty(listeningTo)) this._listeningTo = void 0;
return this;
}
/** Inversion-of-control versions of `on`. Tell *this* object to listen to
* an event in another object... keeping track of what it's listening to
* for easier unbinding later.
*/
listenTo(obj : Messenger, name, callback? ) : this {
if( !obj ) return this;
const id = obj.cid || (obj.cid = uniqueId()),
listeningTo = this._listeningTo || (this._listeningTo = {});
let listening = listeningTo[id];
// This object is not listening to any other events on `obj` yet.
// Setup the necessary references to track the listening callbacks.
if (!listening) {
const thisId = this.cid || (this.cid = uniqueId());
listening = listeningTo[id] = new ListeningTo( obj, id, thisId, listeningTo );
}
// Bind callbacks on obj, and keep track of them on listening.
internalOn( obj, name, callback, this, listening );
return this;
}
/** Bind an event to only be triggered a single time. After the first time
* the callback is invoked, its listener will be removed. If multiple events
* are passed in using the space-separated syntax, the handler will fire
* once for each event, not once for a combination of all events.
*/
once(name, callback, context) : this {
// Map the event into a `{event: once}` object.
const events = eventsApi(onceMap, {}, name, callback, this.off.bind( this ));
return this.on(events, void 0, context);
}
/** Inversion-of-control versions of `once`.*/
listenToOnce(obj : Messenger, name, callback) : this {
// Map the event into a `{event: once}` object.
const events = eventsApi(onceMap, {}, name, callback, this.stopListening.bind( this, obj ) );
return this.listenTo(obj, events);
}
/** Trigger one or many events, firing all bound callbacks. Callbacks are
* passed the same arguments as `trigger` is, apart from the event name
* (unless you're listening on `"all"`, which will cause your callback to
* receive the true name of the event as the first argument).
*/
trigger(name : string, a?, b?, c? ) : this {
if( !this._events ) return this;
switch( arguments.length ){
// Forward call to monomorphic fast-path functions.
case 1 : trigger0( this, name ); break;
case 2 : trigger1( this, name, a ); break;
case 3 : trigger2( this, name, a, b ); break;
case 4 : trigger3( this, name, a, b, c ); break;
// Trigger event with more than 3 arguments.
default :
// Passing arguments around killing performance. Convert it to array.
const allArgs = Array( arguments.length );
for( let i = 0; i < allArgs.length; i++ ){
allArgs[ i ] = arguments[ i ];
}
// Send events.
const { _events } = this;
let queue = _events[ name ];
if( queue ) _fireEventAll( queue, allArgs.slice( 1 ) );
if( queue = _events.all ) _fireEventAll( queue, allArgs );
}
return this;
}
/**
* Destructor. Stops messenger from listening to all objects,
* and stop others from listening to the messenger.
*/
dispose() : void {
this.stopListening();
this.off();
}
}
/** @hidden */
const slice = Array.prototype.slice;
/**
* Backbone 1.2 API conformant Events mixin.
*/
export const Events : Messenger = <Messenger> omit( Messenger.prototype, 'constructor', 'initialize' );
// Iterates over the standard `event, callback` (as well as the fancy multiple
// space-separated events `"change blur", callback` and jQuery-style event
// maps `{event: callback}`).
/** @hidden */
function eventsApi(iteratee, events, name, callback, opts) {
let i = 0, names;
if (name && typeof name === 'object') {
// Handle event maps.
if (callback !== void 0 && 'context' in opts && opts.context === void 0) opts.context = callback;
for (names = keys(name); i < names.length ; i++) {
events = eventsApi(iteratee, events, names[i], name[names[i]], opts);
}
} else if (name && eventSplitter.test(name)) {
// Handle space separated event names by delegating them individually.
for (names = name.split(eventSplitter); i < names.length; i++) {
events = iteratee(events, names[i], callback, opts);
}
} else {
// Finally, standard events.
events = iteratee(events, name, callback, opts);
}
return events;
};
/** @hidden */
class ListeningTo {
count : number = 0
constructor( public obj, public objId, public id, public listeningTo ){}
}
/** @hidden */
export interface ListeningToMap {
[ id : string ] : ListeningTo
}
/** @hidden */
export interface Listeners {
[ id : string ] : Messenger
}
// Guard the `listening` argument from the public API.
/** @hidden */
function internalOn(obj : Messenger, name, callback, context, listening? ) : Messenger {
obj._events = eventsApi(onApi, obj._events || {}, name,
callback, new EventHandler( context, obj, listening));
if (listening) {
const listeners = obj._listeners || (obj._listeners = {});
listeners[listening.id] = listening;
}
return obj;
};
// The reducing API that adds a callback to the `events` object.
/** @hidden */
function onApi(events : _eventsApi.EventsSubscription, name : string, callback : Function, options) : _eventsApi.EventsSubscription {
if (callback) {
const handlers = events[name],
toAdd = [ options.clone( callback ) ];
events[name] = handlers ? handlers.concat( toAdd ) : toAdd;
}
return events;
};
/** @hidden */
class | {
constructor( public context, public listeners : Listeners ){}
}
// The reducing API that removes a callback from the `events` object.
/** @hidden */
function offApi(events : _eventsApi.EventsSubscription, name, callback, options : OffOptions ) {
if (!events) return;
let i = 0, listening;
const context = options.context, listeners = options.listeners;
// Delete all events listeners and "drop" events.
if (!name && !callback && !context) {
const ids = keys(listeners);
for (; i < ids.length; i++) {
listening = listeners[ids[i]];
delete listeners[listening.id];
delete listening.listeningTo[listening.objId];
}
return {};
}
const names = name ? [name] : keys(events);
for (; i < names.length; i++) {
name = names[i];
const handlers = events[name];
// Bail out if there are no events stored.
if (!handlers) break;
// Replace events if there are any remaining. Otherwise, clean up.
const remaining = [];
for (let j = 0; j < handlers.length; j++) {
const handler = handlers[j];
if (
callback && callback !== handler.callback &&
callback !== handler.callback._callback ||
context && context !== handler.context
) {
remaining.push(handler);
} else {
listening = handler.listening;
if (listening && --listening.count === 0) {
delete listeners[listening.id];
delete listening.listeningTo[listening.objId];
}
}
}
// Update tail event if the list has any events. Otherwise, clean up.
if (remaining.length) {
events[name] = remaining;
} else {
delete events[name];
}
}
return events;
};
// Reduces the event callbacks into a map of `{event: onceWrapper}`.
// `offer` unbinds the `onceWrapper` after it has been called.
/** @hidden */
function onceMap(map, name, callback, offer) {
if (callback) {
const _once : _eventsApi.Callback = map[name] = once(function() {
offer(name, _once);
callback.apply(this, arguments);
});
_once._callback = callback;
}
return map;
};
/** @hidden */
function _fireEventAll( events : _eventsApi.EventHandler[], a ) : void {
for( let ev of events )
ev.callback.apply( ev.ctx, a );
}
| OffOptions | identifier_name |
messenger.ts | import * as Mixins from './mixins'
import * as tools from './tools'
import * as _eventsApi from './events-api'
import { EventMap, EventsDefinition } from './events-api'
const { mixins, define, extendable } = Mixins,
{ omit, once, isEmpty, keys } = tools,
{ EventHandler, trigger0, trigger1, trigger2, trigger3 } = _eventsApi;
// Regular expression used to split event strings.
const eventSplitter = /\s+/;
let _idCount = 0;
function uniqueId() : string {
return 'l' + _idCount++;
}
export { EventMap, EventsDefinition }
export interface MessengerDefinition extends Mixins.ClassDefinition {
_localEvents? : EventMap
localEvents? : EventsDefinition
}
/*************************
* Messenger is mixable class with capabilities of sending and receiving synchronous events.
* This class itself can serve as both mixin and base class.
*/
@extendable
export abstract class Messenger implements Mixins.Mixable {
// Define extendable mixin static properties.
static create : ( a : any, b? : any, c? : any ) => Messenger
static mixins : ( ...mixins : ( Mixins.Constructor<any> | {} )[] ) => Mixins.MixableConstructor< Messenger >
static mixinRules : ( mixinRules : Mixins.MixinRules ) => Mixins.MixableConstructor< Messenger >
static mixTo : ( ...args : Mixins.Constructor<any>[] ) => Mixins.MixableConstructor< Messenger >
static extend : (spec? : MessengerDefinition, statics? : {} ) => Mixins.MixableConstructor< Messenger >
static predefine : () => Mixins.MixableConstructor< Messenger >
/** @hidden */
_events : _eventsApi.EventsSubscription = void 0;
/** @hidden */
_listeners : Listeners = void 0
/** @hidden */
_listeningTo : ListeningToMap = void 0
/** Unique client-only id. */
cid : string
// Prototype-only property to manage automatic local events subscription.
/** @hidden */
_localEvents : _eventsApi.EventMap
/** @private */
static define( protoProps? : MessengerDefinition , staticProps? ) : typeof Messenger {
const spec : MessengerDefinition = omit( protoProps || {}, 'localEvents' );
if( protoProps ){
const { localEvents, _localEvents } = protoProps;
if( localEvents || _localEvents ){
const eventsMap = new EventMap( this.prototype._localEvents );
localEvents && eventsMap.addEventsMap( localEvents );
_localEvents && eventsMap.merge( _localEvents );
spec._localEvents = eventsMap;
}
}
return Mixins.Mixable.define.call( this, spec, staticProps );
}
/** @hidden */
constructor(){
this.cid = uniqueId();
this.initialize.apply( this, arguments );
}
/** Method is called at the end of the constructor */
initialize() : void {}
/** Bind an event to a `callback` function. Passing `"all"` will bind
* the callback to all events fired.
*/
on(name, callback, context?) : this {
return <this>internalOn(this, name, callback, context);
}
/** Remove one or many callbacks. If `context` is null, removes all
* callbacks with that function. If `callback` is null, removes all
* callbacks for the event. If `name` is null, removes all bound
* callbacks for all events.
*/
off(name? : string, callback? : Function, context? ) : this {
if (!this._events) return this;
this._events = eventsApi(offApi, this._events, name, callback,
new OffOptions(
context,
this._listeners )
);
return this;
}
/** Tell this object to stop listening to either specific events ... or
* to every object it's currently listening to.
*/
stopListening( obj? : Messenger, name? : string, callback? : Function ) : this {
const listeningTo = this._listeningTo;
if (!listeningTo) return this;
const ids = obj ? [obj.cid] : keys(listeningTo);
for (let i = 0; i < ids.length; i++) {
const listening = listeningTo[ids[i]];
// If listening doesn't exist, this object is not currently
// listening to obj. Break out early.
if (!listening) break;
listening.obj.off(name, callback, this);
}
if (isEmpty(listeningTo)) this._listeningTo = void 0;
return this;
}
/** Inversion-of-control versions of `on`. Tell *this* object to listen to
* an event in another object... keeping track of what it's listening to
* for easier unbinding later.
*/
listenTo(obj : Messenger, name, callback? ) : this {
if( !obj ) return this;
const id = obj.cid || (obj.cid = uniqueId()),
listeningTo = this._listeningTo || (this._listeningTo = {});
let listening = listeningTo[id];
// This object is not listening to any other events on `obj` yet.
// Setup the necessary references to track the listening callbacks.
if (!listening) {
const thisId = this.cid || (this.cid = uniqueId());
listening = listeningTo[id] = new ListeningTo( obj, id, thisId, listeningTo );
}
// Bind callbacks on obj, and keep track of them on listening.
internalOn( obj, name, callback, this, listening );
return this;
}
/** Bind an event to only be triggered a single time. After the first time
* the callback is invoked, its listener will be removed. If multiple events
* are passed in using the space-separated syntax, the handler will fire
* once for each event, not once for a combination of all events.
*/
once(name, callback, context) : this {
// Map the event into a `{event: once}` object.
const events = eventsApi(onceMap, {}, name, callback, this.off.bind( this ));
return this.on(events, void 0, context);
}
/** Inversion-of-control versions of `once`.*/
listenToOnce(obj : Messenger, name, callback) : this {
// Map the event into a `{event: once}` object.
const events = eventsApi(onceMap, {}, name, callback, this.stopListening.bind( this, obj ) );
return this.listenTo(obj, events);
}
/** Trigger one or many events, firing all bound callbacks. Callbacks are
* passed the same arguments as `trigger` is, apart from the event name
* (unless you're listening on `"all"`, which will cause your callback to
* receive the true name of the event as the first argument).
*/
trigger(name : string, a?, b?, c? ) : this {
if( !this._events ) return this;
switch( arguments.length ){
// Forward call to monomorphic fast-path functions.
case 1 : trigger0( this, name ); break;
case 2 : trigger1( this, name, a ); break;
case 3 : trigger2( this, name, a, b ); break;
case 4 : trigger3( this, name, a, b, c ); break;
// Trigger event with more than 3 arguments.
default :
// Passing arguments around killing performance. Convert it to array.
const allArgs = Array( arguments.length );
for( let i = 0; i < allArgs.length; i++ ){
allArgs[ i ] = arguments[ i ];
}
// Send events.
const { _events } = this;
let queue = _events[ name ];
if( queue ) _fireEventAll( queue, allArgs.slice( 1 ) );
if( queue = _events.all ) _fireEventAll( queue, allArgs );
}
return this;
}
/**
* Destructor. Stops messenger from listening to all objects,
* and stop others from listening to the messenger.
*/
dispose() : void {
this.stopListening();
this.off();
}
}
/** @hidden */
const slice = Array.prototype.slice;
/**
* Backbone 1.2 API conformant Events mixin.
*/
export const Events : Messenger = <Messenger> omit( Messenger.prototype, 'constructor', 'initialize' );
// Iterates over the standard `event, callback` (as well as the fancy multiple
// space-separated events `"change blur", callback` and jQuery-style event
// maps `{event: callback}`).
/** @hidden */
function eventsApi(iteratee, events, name, callback, opts) {
let i = 0, names;
if (name && typeof name === 'object') {
// Handle event maps.
if (callback !== void 0 && 'context' in opts && opts.context === void 0) opts.context = callback;
for (names = keys(name); i < names.length ; i++) {
events = eventsApi(iteratee, events, names[i], name[names[i]], opts);
}
} else if (name && eventSplitter.test(name)) {
// Handle space separated event names by delegating them individually.
for (names = name.split(eventSplitter); i < names.length; i++) {
events = iteratee(events, names[i], callback, opts);
}
} else {
// Finally, standard events.
events = iteratee(events, name, callback, opts);
}
return events; |
/** @hidden */
class ListeningTo {
count : number = 0
constructor( public obj, public objId, public id, public listeningTo ){}
}
/** @hidden */
export interface ListeningToMap {
[ id : string ] : ListeningTo
}
/** @hidden */
export interface Listeners {
[ id : string ] : Messenger
}
// Guard the `listening` argument from the public API.
/** @hidden */
function internalOn(obj : Messenger, name, callback, context, listening? ) : Messenger {
obj._events = eventsApi(onApi, obj._events || {}, name,
callback, new EventHandler( context, obj, listening));
if (listening) {
const listeners = obj._listeners || (obj._listeners = {});
listeners[listening.id] = listening;
}
return obj;
};
// The reducing API that adds a callback to the `events` object.
/** @hidden */
function onApi(events : _eventsApi.EventsSubscription, name : string, callback : Function, options) : _eventsApi.EventsSubscription {
if (callback) {
const handlers = events[name],
toAdd = [ options.clone( callback ) ];
events[name] = handlers ? handlers.concat( toAdd ) : toAdd;
}
return events;
};
/** @hidden */
class OffOptions {
constructor( public context, public listeners : Listeners ){}
}
// The reducing API that removes a callback from the `events` object.
/** @hidden */
function offApi(events : _eventsApi.EventsSubscription, name, callback, options : OffOptions ) {
if (!events) return;
let i = 0, listening;
const context = options.context, listeners = options.listeners;
// Delete all events listeners and "drop" events.
if (!name && !callback && !context) {
const ids = keys(listeners);
for (; i < ids.length; i++) {
listening = listeners[ids[i]];
delete listeners[listening.id];
delete listening.listeningTo[listening.objId];
}
return {};
}
const names = name ? [name] : keys(events);
for (; i < names.length; i++) {
name = names[i];
const handlers = events[name];
// Bail out if there are no events stored.
if (!handlers) break;
// Replace events if there are any remaining. Otherwise, clean up.
const remaining = [];
for (let j = 0; j < handlers.length; j++) {
const handler = handlers[j];
if (
callback && callback !== handler.callback &&
callback !== handler.callback._callback ||
context && context !== handler.context
) {
remaining.push(handler);
} else {
listening = handler.listening;
if (listening && --listening.count === 0) {
delete listeners[listening.id];
delete listening.listeningTo[listening.objId];
}
}
}
// Update tail event if the list has any events. Otherwise, clean up.
if (remaining.length) {
events[name] = remaining;
} else {
delete events[name];
}
}
return events;
};
// Reduces the event callbacks into a map of `{event: onceWrapper}`.
// `offer` unbinds the `onceWrapper` after it has been called.
/** @hidden */
function onceMap(map, name, callback, offer) {
if (callback) {
const _once : _eventsApi.Callback = map[name] = once(function() {
offer(name, _once);
callback.apply(this, arguments);
});
_once._callback = callback;
}
return map;
};
/** @hidden */
function _fireEventAll( events : _eventsApi.EventHandler[], a ) : void {
for( let ev of events )
ev.callback.apply( ev.ctx, a );
} | }; | random_line_split |
Client_V1.1.py | #Version 1.1
#Draft Time: Wed 16 Sep 2020 11:43
# importing the necessary libraries
from enum import Enum
from gi.repository import GLib
#class enumerates the types of media that can be displayed
class media_type_t(Enum):
MEDIA_TYPE_QRCODE = 0
MEDIA_TYPE_IMAGE = 1
MEDIA_TYPE_GIF = 2
#class enumerates the types of screens
class screen_type_t(Enum):
SCREEN_TYPE_S1 = 0
SCREEN_TYPE_S2 = 1
#class enumerates the types of cards
class card_type_t_100(Enum):
CARD_TYPE_W100_BLUE_SPLASH = 0
CARD_TYPE_W100_GRAY_STRING3 = 1
CARD_TYPE_W100_GRAY_MEDIA = 2
CARD_TYPE_W100_GRAY_MEDIA_STRING3 = 3
CARD_TYPE_W100_RED_MEDIA_STRING3 = 4
CARD_TYPE_W100_USERCOLOR_MEDIA_STRING_3 = 5
class card_type_t_50(Enum):
CARD_TYPE_W50_GRAY_MEDIA_STRING2 = 6
CARD_TYPE_W50_GREEN_MEDIA_STRING2 = 7
CARD_TYPE_W50_BLUE_MEDIA_STRING2 = 8
CARD_TYPE_W50_RED_MEDIA_STRING2 = 9
CARD_TYPE_W50_AMBER_MEDIA_STRING2 = 10
CARD_TYPE_W50_GREEN_TITLE_STRING4 = 11
CARD_TYPE_W50_GREEN_TITLE_STRING3 = 12
CARD_TYPE_W50_USERCOLOR_MEDIA_STRING2 = 13
class screen_ui_t():
"""
Method 1
This reads the type of screen from the user
"""
screen_type = 0
def screen_type(self):
global screen_type
screen_type = int(input('Enter a screen type (0=SCREEN_TYPE_S1 / 1=SCREEN_TYPE_S2): '))
if screen_type == 0:
print('Screen set to: {}\n'.format(screen_type_t(screen_type)))
elif screen_type == 1:
print('Screen set to: {}\n'.format(screen_type_t(screen_type)))
else:
print('Invalid input\n')
"""
Method 2
This reads and sets the Station's Serial Address
"""
def station_info_t(self):
serial = str(input('Enter Serial number of station: '))
#reads Serial number of the station
print('Serial address set as: {}\n'.format(serial))
"""
Method 3
This sets or clears the header visiblity on the screen
"""
def HEADER_VIS(self):
header = int(input('Header visibility (0=HIDE/1=SHOW): '))
#reads the choice to show or hide header (sets Bool values based on user choice)
if header == 0:
header_visibility = False
print('Header set to HIDE\n')
elif header == 1:
header_visibility = True
print('Header set to SHOW\n')
else:
print('Invalid input\n')
exit()
"""
Child Class 1
This reads Card parameters from the user and sets the same to each card
"""
class card_ui_t:
"""
Child-Method 1
This reads and sets the type of card to be displayed on the screen
"""
card_type = 0 #default card type
def Card_Type(self):
#parameter 1
global screen_type
global card_type
if screen_type == 0:
i = 0 #to display count of ENUM listing
for types in card_type_t_100:
print('{}'.format(i), types)
i += 1
card_type = int(input('Enter ENUM index (0-5)\nSelect Card type: '))
#reads card type from user
#assigns through ENUM index
try:
print('Card type set to: ')
print(card_type_t_100(card_type))
except:
print('Input value does not match any defined card types')
exit()
elif screen_type == 1:
i = 6 #to display count of ENUM listing
for types in card_type_t_50:
print('{}'.format(i), types)
i += 1
card_type = int(input('Enter ENUM index (6-13)\nSelect Card type: '))
#reads card type from user
#assigns through ENUM index
try:
print('Card type set to: ')
print(card_type_t_50(card_type))
except:
print('Input value does not match any defined card types')
exit()
"""
Child-Method 2
This sets or clears the header visiblity on the screen
"""
#parameter 2
#this parameter is read only if the user chooses
#5 card_type_t.CARD_TYPE_W100_USERCOLOR_MEDIA_STRING_3 or
#11 card_type_t.CARD_TYPE_W50_USERCOLOR_MEDIA_STRING2
def | (self):
global card_type
if card_type == 5 or card_type == 13:
card_bgcolor = int(input('\nEnter Background Color: '))
#reads background color from user
#assumed to be a integer value
print('\nBackground color set to: {}\n'.format(card_bgcolor))
else:
print('\nBackground color set by default as per card formatting\n')
"""
Child-Method 3
This reads the slot index of the card from the user
"""
slot_index = -1
def SlotIndex(self):
global slot_index
global screen_type
#parameter 3
if screen_type == 1:
slot_index = int(input('Enter the slot index (0=LEFT/1=RIGHT): '))
print('Slot index set as: {}\n'.format(slot_index))
"""
Child-Method 4
This Shows or hides the Flipper
"""
#parameter 4
flipper_visibility = -1
def FlipperVisibility(self):
global screen_type
if screen_type == 1:
choice = int(input('FLIPPER Visibility (0=HIDE/1=SHOW): '))
#reads users choice to either show or hide flipper.
#sets Bool value appropriately
global flipper_visibility
if choice == 0:
flipper_visibility = False
print('Flipper set to HIDE\n')
elif choice == 1:
flipper_visibility = True
print('Flipper set to SHOW\n')
else:
print('Invalid choice\n')
print('Flipper set to default value: HIDE\n')
flipper_visibility = False
else:
flipper_visibility = False
"""
Child-Method 5
This sets the flipper between two parameters
"""
#parameter 5
def FlipIndex(self):
global flipper_visibility
if flipper_visibility == True:
flipper_index = int(input('Enter the flipper index(0/1): '))
#reads index of Flipper from user
if flipper_index != 0 or flipper_index != 1:
print('Invalid index\n')
exit()
print('Flipper index set to: {}\n'.format(flipper_index))
"""
Child-Method 6
This reads the type of media file to display
"""
def MediaType(self):
#parameter 6
global card_type
if card_type != 0 or card_type != 1 or card_type != 11 or card_type != 12:
i = 0 #to display count of ENUM listing
for media in media_type_t:
print('{}'.format(i), media)
i += 1
media_type = int(input('Enter ENUM index (0-2)\nSelect Media type: '))
#reads the media type from user through ENUM index
print('Media type set to: ')
print(media_type_t(media_type))
#parameter 7
#all paths are hardcoded now as recommended
if media_type == 0:
media_path = "directory0/directory1/directory4/QR_file.ext"
print('\nMedia Path set to: {}\n'.format(media_path))
elif media_type == 1:
media_path = "directory0/directory1/directory4/IMAGE_file.ext"
print('\nMedia Path set to: {}\n'.format(media_path))
elif media_type == 2:
media_path = "directory0/directory1/directory4/GIF_file.ext"
print('\nMedia Path set to: {}\n'.format(media_path))
else:
print('Invalid Media Type\n')
exit()
else:
print('Card type does not support any media\n')
"""
Child-Method 7
This reads the title of the card
"""
def Title(self):
#parameter 8
#reads the title for the card selected
global card_type
global slot_index
if card_type == 11 or card_type == 12:
title = str(input('Enter the title of card {}: '.format(slot_index)))
print('Title for Card {} set as: {}'.format(slot_index,title))
"""
Child-Method 8
This relevant number of strings for the card
"""
#parameter 8,9,10,11
#Only strings relevant to the user selected cards are read.
#Other strings are ignored
#cards 1 and 2 expect no string
def String(self):
global card_type
if card_type == 3 or card_type == 4 or card_type == 5 or card_type == 1 or card_type == 12:
#cards 2, 3, 4 5 and 13 expect 3 strings
print('Selected card expects 3 strings\n')
string1 = str(input('Enter string 1: '))
string2 = str(input('Enter string 2: '))
string3 = str(input('Enter string 3: '))
print('String 1: {}\nString 2: {}\nString 3: {}'.format(string1, string2, string3))
elif card_type == 6 or card_type == 7 or card_type == 8 or card_type == 9 or card_type == 10 or card_type == 13:
#cards 6, 7, 8, 9, 10, 11 expect 2 strings
print ('Selected card expects 2 strings\n')
string1 = str(input('Enter string 1: '))
string2 = str(input('Enter string 2: '))
print('String 1: {}\nString 2: {}\n'.format(string1, string2))
elif card_type == 11:
print('Selected card expects 4 strings\n')
string1 = str(input('Enter string 1: '))
string2 = str(input('Enter string 2: '))
string3 = str(input('Enter string 3: '))
string4 = str(input('Enter string 4: '))
print('String 1: {}\nString 2: {}\nString 3: {}\nString 4: {}'.format(string1, string2, string3, string4))
else:
print('Selected card {} doesnot expect any strings\n'.format(card_type_t(card_type)))
#Main Function outside of Class Definitions
Screen = screen_ui_t() #creating an object of the class screen_ui_t
#This class initialises all details relating to the Screen/Display
Screen.screen_type() #method call for screen_type
Screen.station_info_t() #method call to set serial number of station
Screen.HEADER_VIS() #method call to show or hide header
Card = Screen.card_ui_t() #creating an object of child class card_ui_t
#this class defines all paramters relating to the Card details
Card.Card_Type() #method call to set the type of the card
Card.BackGround_Color() #method call to set the background color of the card (essential only for two cards)
Card.SlotIndex() #method call to set the slot_index
Card.FlipperVisibility() #method to show or hide flipper
Card.FlipIndex() #method to set the index of the flipper
Card.MediaType() #method to choose the media type to display on the card
Card.Title() #method to set the title of the card
Card.String() #method to set the strings to be displayed on each card
| BackGround_Color | identifier_name |
Client_V1.1.py | #Version 1.1
#Draft Time: Wed 16 Sep 2020 11:43
# importing the necessary libraries
from enum import Enum
from gi.repository import GLib
#class enumerates the types of media that can be displayed
class media_type_t(Enum):
MEDIA_TYPE_QRCODE = 0
MEDIA_TYPE_IMAGE = 1
MEDIA_TYPE_GIF = 2
#class enumerates the types of screens
class screen_type_t(Enum):
SCREEN_TYPE_S1 = 0
SCREEN_TYPE_S2 = 1
#class enumerates the types of cards
class card_type_t_100(Enum):
CARD_TYPE_W100_BLUE_SPLASH = 0
CARD_TYPE_W100_GRAY_STRING3 = 1
CARD_TYPE_W100_GRAY_MEDIA = 2
CARD_TYPE_W100_GRAY_MEDIA_STRING3 = 3
CARD_TYPE_W100_RED_MEDIA_STRING3 = 4
CARD_TYPE_W100_USERCOLOR_MEDIA_STRING_3 = 5
class card_type_t_50(Enum):
CARD_TYPE_W50_GRAY_MEDIA_STRING2 = 6
CARD_TYPE_W50_GREEN_MEDIA_STRING2 = 7
CARD_TYPE_W50_BLUE_MEDIA_STRING2 = 8
CARD_TYPE_W50_RED_MEDIA_STRING2 = 9
CARD_TYPE_W50_AMBER_MEDIA_STRING2 = 10
CARD_TYPE_W50_GREEN_TITLE_STRING4 = 11
CARD_TYPE_W50_GREEN_TITLE_STRING3 = 12
CARD_TYPE_W50_USERCOLOR_MEDIA_STRING2 = 13
class screen_ui_t():
"""
Method 1
This reads the type of screen from the user
"""
screen_type = 0
def screen_type(self):
global screen_type
screen_type = int(input('Enter a screen type (0=SCREEN_TYPE_S1 / 1=SCREEN_TYPE_S2): '))
if screen_type == 0:
print('Screen set to: {}\n'.format(screen_type_t(screen_type)))
elif screen_type == 1:
print('Screen set to: {}\n'.format(screen_type_t(screen_type)))
else:
print('Invalid input\n')
"""
Method 2
This reads and sets the Station's Serial Address
"""
def station_info_t(self):
serial = str(input('Enter Serial number of station: '))
#reads Serial number of the station
print('Serial address set as: {}\n'.format(serial))
"""
Method 3
This sets or clears the header visiblity on the screen
"""
def HEADER_VIS(self):
header = int(input('Header visibility (0=HIDE/1=SHOW): '))
#reads the choice to show or hide header (sets Bool values based on user choice)
if header == 0:
header_visibility = False
print('Header set to HIDE\n')
elif header == 1:
header_visibility = True
print('Header set to SHOW\n')
else:
print('Invalid input\n')
exit()
"""
Child Class 1
This reads Card parameters from the user and sets the same to each card
"""
class card_ui_t:
"""
Child-Method 1
This reads and sets the type of card to be displayed on the screen
"""
card_type = 0 #default card type
def Card_Type(self):
#parameter 1
global screen_type
global card_type
if screen_type == 0:
i = 0 #to display count of ENUM listing
for types in card_type_t_100:
print('{}'.format(i), types)
i += 1
card_type = int(input('Enter ENUM index (0-5)\nSelect Card type: '))
#reads card type from user
#assigns through ENUM index
try:
print('Card type set to: ')
print(card_type_t_100(card_type)) | print('Input value does not match any defined card types')
exit()
elif screen_type == 1:
i = 6 #to display count of ENUM listing
for types in card_type_t_50:
print('{}'.format(i), types)
i += 1
card_type = int(input('Enter ENUM index (6-13)\nSelect Card type: '))
#reads card type from user
#assigns through ENUM index
try:
print('Card type set to: ')
print(card_type_t_50(card_type))
except:
print('Input value does not match any defined card types')
exit()
"""
Child-Method 2
This sets or clears the header visiblity on the screen
"""
#parameter 2
#this parameter is read only if the user chooses
#5 card_type_t.CARD_TYPE_W100_USERCOLOR_MEDIA_STRING_3 or
#11 card_type_t.CARD_TYPE_W50_USERCOLOR_MEDIA_STRING2
def BackGround_Color(self):
global card_type
if card_type == 5 or card_type == 13:
card_bgcolor = int(input('\nEnter Background Color: '))
#reads background color from user
#assumed to be a integer value
print('\nBackground color set to: {}\n'.format(card_bgcolor))
else:
print('\nBackground color set by default as per card formatting\n')
"""
Child-Method 3
This reads the slot index of the card from the user
"""
slot_index = -1
def SlotIndex(self):
global slot_index
global screen_type
#parameter 3
if screen_type == 1:
slot_index = int(input('Enter the slot index (0=LEFT/1=RIGHT): '))
print('Slot index set as: {}\n'.format(slot_index))
"""
Child-Method 4
This Shows or hides the Flipper
"""
#parameter 4
flipper_visibility = -1
def FlipperVisibility(self):
global screen_type
if screen_type == 1:
choice = int(input('FLIPPER Visibility (0=HIDE/1=SHOW): '))
#reads users choice to either show or hide flipper.
#sets Bool value appropriately
global flipper_visibility
if choice == 0:
flipper_visibility = False
print('Flipper set to HIDE\n')
elif choice == 1:
flipper_visibility = True
print('Flipper set to SHOW\n')
else:
print('Invalid choice\n')
print('Flipper set to default value: HIDE\n')
flipper_visibility = False
else:
flipper_visibility = False
"""
Child-Method 5
This sets the flipper between two parameters
"""
#parameter 5
def FlipIndex(self):
global flipper_visibility
if flipper_visibility == True:
flipper_index = int(input('Enter the flipper index(0/1): '))
#reads index of Flipper from user
if flipper_index != 0 or flipper_index != 1:
print('Invalid index\n')
exit()
print('Flipper index set to: {}\n'.format(flipper_index))
"""
Child-Method 6
This reads the type of media file to display
"""
def MediaType(self):
#parameter 6
global card_type
if card_type != 0 or card_type != 1 or card_type != 11 or card_type != 12:
i = 0 #to display count of ENUM listing
for media in media_type_t:
print('{}'.format(i), media)
i += 1
media_type = int(input('Enter ENUM index (0-2)\nSelect Media type: '))
#reads the media type from user through ENUM index
print('Media type set to: ')
print(media_type_t(media_type))
#parameter 7
#all paths are hardcoded now as recommended
if media_type == 0:
media_path = "directory0/directory1/directory4/QR_file.ext"
print('\nMedia Path set to: {}\n'.format(media_path))
elif media_type == 1:
media_path = "directory0/directory1/directory4/IMAGE_file.ext"
print('\nMedia Path set to: {}\n'.format(media_path))
elif media_type == 2:
media_path = "directory0/directory1/directory4/GIF_file.ext"
print('\nMedia Path set to: {}\n'.format(media_path))
else:
print('Invalid Media Type\n')
exit()
else:
print('Card type does not support any media\n')
"""
Child-Method 7
This reads the title of the card
"""
def Title(self):
#parameter 8
#reads the title for the card selected
global card_type
global slot_index
if card_type == 11 or card_type == 12:
title = str(input('Enter the title of card {}: '.format(slot_index)))
print('Title for Card {} set as: {}'.format(slot_index,title))
"""
Child-Method 8
This relevant number of strings for the card
"""
#parameter 8,9,10,11
#Only strings relevant to the user selected cards are read.
#Other strings are ignored
#cards 1 and 2 expect no string
def String(self):
global card_type
if card_type == 3 or card_type == 4 or card_type == 5 or card_type == 1 or card_type == 12:
#cards 2, 3, 4 5 and 13 expect 3 strings
print('Selected card expects 3 strings\n')
string1 = str(input('Enter string 1: '))
string2 = str(input('Enter string 2: '))
string3 = str(input('Enter string 3: '))
print('String 1: {}\nString 2: {}\nString 3: {}'.format(string1, string2, string3))
elif card_type == 6 or card_type == 7 or card_type == 8 or card_type == 9 or card_type == 10 or card_type == 13:
#cards 6, 7, 8, 9, 10, 11 expect 2 strings
print ('Selected card expects 2 strings\n')
string1 = str(input('Enter string 1: '))
string2 = str(input('Enter string 2: '))
print('String 1: {}\nString 2: {}\n'.format(string1, string2))
elif card_type == 11:
print('Selected card expects 4 strings\n')
string1 = str(input('Enter string 1: '))
string2 = str(input('Enter string 2: '))
string3 = str(input('Enter string 3: '))
string4 = str(input('Enter string 4: '))
print('String 1: {}\nString 2: {}\nString 3: {}\nString 4: {}'.format(string1, string2, string3, string4))
else:
print('Selected card {} doesnot expect any strings\n'.format(card_type_t(card_type)))
#Main Function outside of Class Definitions
Screen = screen_ui_t() #creating an object of the class screen_ui_t
#This class initialises all details relating to the Screen/Display
Screen.screen_type() #method call for screen_type
Screen.station_info_t() #method call to set serial number of station
Screen.HEADER_VIS() #method call to show or hide header
Card = Screen.card_ui_t() #creating an object of child class card_ui_t
#this class defines all paramters relating to the Card details
Card.Card_Type() #method call to set the type of the card
Card.BackGround_Color() #method call to set the background color of the card (essential only for two cards)
Card.SlotIndex() #method call to set the slot_index
Card.FlipperVisibility() #method to show or hide flipper
Card.FlipIndex() #method to set the index of the flipper
Card.MediaType() #method to choose the media type to display on the card
Card.Title() #method to set the title of the card
Card.String() #method to set the strings to be displayed on each card | except: | random_line_split |
Client_V1.1.py | #Version 1.1
#Draft Time: Wed 16 Sep 2020 11:43
# importing the necessary libraries
from enum import Enum
from gi.repository import GLib
#class enumerates the types of media that can be displayed
class media_type_t(Enum):
MEDIA_TYPE_QRCODE = 0
MEDIA_TYPE_IMAGE = 1
MEDIA_TYPE_GIF = 2
#class enumerates the types of screens
class screen_type_t(Enum):
SCREEN_TYPE_S1 = 0
SCREEN_TYPE_S2 = 1
#class enumerates the types of cards
class card_type_t_100(Enum):
CARD_TYPE_W100_BLUE_SPLASH = 0
CARD_TYPE_W100_GRAY_STRING3 = 1
CARD_TYPE_W100_GRAY_MEDIA = 2
CARD_TYPE_W100_GRAY_MEDIA_STRING3 = 3
CARD_TYPE_W100_RED_MEDIA_STRING3 = 4
CARD_TYPE_W100_USERCOLOR_MEDIA_STRING_3 = 5
class card_type_t_50(Enum):
CARD_TYPE_W50_GRAY_MEDIA_STRING2 = 6
CARD_TYPE_W50_GREEN_MEDIA_STRING2 = 7
CARD_TYPE_W50_BLUE_MEDIA_STRING2 = 8
CARD_TYPE_W50_RED_MEDIA_STRING2 = 9
CARD_TYPE_W50_AMBER_MEDIA_STRING2 = 10
CARD_TYPE_W50_GREEN_TITLE_STRING4 = 11
CARD_TYPE_W50_GREEN_TITLE_STRING3 = 12
CARD_TYPE_W50_USERCOLOR_MEDIA_STRING2 = 13
class screen_ui_t():
"""
Method 1
This reads the type of screen from the user
"""
screen_type = 0
def screen_type(self):
global screen_type
screen_type = int(input('Enter a screen type (0=SCREEN_TYPE_S1 / 1=SCREEN_TYPE_S2): '))
if screen_type == 0:
print('Screen set to: {}\n'.format(screen_type_t(screen_type)))
elif screen_type == 1:
print('Screen set to: {}\n'.format(screen_type_t(screen_type)))
else:
print('Invalid input\n')
"""
Method 2
This reads and sets the Station's Serial Address
"""
def station_info_t(self):
serial = str(input('Enter Serial number of station: '))
#reads Serial number of the station
print('Serial address set as: {}\n'.format(serial))
"""
Method 3
This sets or clears the header visiblity on the screen
"""
def HEADER_VIS(self):
header = int(input('Header visibility (0=HIDE/1=SHOW): '))
#reads the choice to show or hide header (sets Bool values based on user choice)
if header == 0:
header_visibility = False
print('Header set to HIDE\n')
elif header == 1:
header_visibility = True
print('Header set to SHOW\n')
else:
print('Invalid input\n')
exit()
"""
Child Class 1
This reads Card parameters from the user and sets the same to each card
"""
class card_ui_t:
"""
Child-Method 1
This reads and sets the type of card to be displayed on the screen
"""
card_type = 0 #default card type
def Card_Type(self):
#parameter 1
global screen_type
global card_type
if screen_type == 0:
i = 0 #to display count of ENUM listing
for types in card_type_t_100:
print('{}'.format(i), types)
i += 1
card_type = int(input('Enter ENUM index (0-5)\nSelect Card type: '))
#reads card type from user
#assigns through ENUM index
try:
print('Card type set to: ')
print(card_type_t_100(card_type))
except:
print('Input value does not match any defined card types')
exit()
elif screen_type == 1:
i = 6 #to display count of ENUM listing
for types in card_type_t_50:
print('{}'.format(i), types)
i += 1
card_type = int(input('Enter ENUM index (6-13)\nSelect Card type: '))
#reads card type from user
#assigns through ENUM index
try:
print('Card type set to: ')
print(card_type_t_50(card_type))
except:
print('Input value does not match any defined card types')
exit()
"""
Child-Method 2
This sets or clears the header visiblity on the screen
"""
#parameter 2
#this parameter is read only if the user chooses
#5 card_type_t.CARD_TYPE_W100_USERCOLOR_MEDIA_STRING_3 or
#11 card_type_t.CARD_TYPE_W50_USERCOLOR_MEDIA_STRING2
def BackGround_Color(self):
global card_type
if card_type == 5 or card_type == 13:
card_bgcolor = int(input('\nEnter Background Color: '))
#reads background color from user
#assumed to be a integer value
print('\nBackground color set to: {}\n'.format(card_bgcolor))
else:
print('\nBackground color set by default as per card formatting\n')
"""
Child-Method 3
This reads the slot index of the card from the user
"""
slot_index = -1
def SlotIndex(self):
|
"""
Child-Method 4
This Shows or hides the Flipper
"""
#parameter 4
flipper_visibility = -1
def FlipperVisibility(self):
global screen_type
if screen_type == 1:
choice = int(input('FLIPPER Visibility (0=HIDE/1=SHOW): '))
#reads users choice to either show or hide flipper.
#sets Bool value appropriately
global flipper_visibility
if choice == 0:
flipper_visibility = False
print('Flipper set to HIDE\n')
elif choice == 1:
flipper_visibility = True
print('Flipper set to SHOW\n')
else:
print('Invalid choice\n')
print('Flipper set to default value: HIDE\n')
flipper_visibility = False
else:
flipper_visibility = False
"""
Child-Method 5
This sets the flipper between two parameters
"""
#parameter 5
def FlipIndex(self):
global flipper_visibility
if flipper_visibility == True:
flipper_index = int(input('Enter the flipper index(0/1): '))
#reads index of Flipper from user
if flipper_index != 0 or flipper_index != 1:
print('Invalid index\n')
exit()
print('Flipper index set to: {}\n'.format(flipper_index))
"""
Child-Method 6
This reads the type of media file to display
"""
def MediaType(self):
#parameter 6
global card_type
if card_type != 0 or card_type != 1 or card_type != 11 or card_type != 12:
i = 0 #to display count of ENUM listing
for media in media_type_t:
print('{}'.format(i), media)
i += 1
media_type = int(input('Enter ENUM index (0-2)\nSelect Media type: '))
#reads the media type from user through ENUM index
print('Media type set to: ')
print(media_type_t(media_type))
#parameter 7
#all paths are hardcoded now as recommended
if media_type == 0:
media_path = "directory0/directory1/directory4/QR_file.ext"
print('\nMedia Path set to: {}\n'.format(media_path))
elif media_type == 1:
media_path = "directory0/directory1/directory4/IMAGE_file.ext"
print('\nMedia Path set to: {}\n'.format(media_path))
elif media_type == 2:
media_path = "directory0/directory1/directory4/GIF_file.ext"
print('\nMedia Path set to: {}\n'.format(media_path))
else:
print('Invalid Media Type\n')
exit()
else:
print('Card type does not support any media\n')
"""
Child-Method 7
This reads the title of the card
"""
def Title(self):
#parameter 8
#reads the title for the card selected
global card_type
global slot_index
if card_type == 11 or card_type == 12:
title = str(input('Enter the title of card {}: '.format(slot_index)))
print('Title for Card {} set as: {}'.format(slot_index,title))
"""
Child-Method 8
This relevant number of strings for the card
"""
#parameter 8,9,10,11
#Only strings relevant to the user selected cards are read.
#Other strings are ignored
#cards 1 and 2 expect no string
def String(self):
global card_type
if card_type == 3 or card_type == 4 or card_type == 5 or card_type == 1 or card_type == 12:
#cards 2, 3, 4 5 and 13 expect 3 strings
print('Selected card expects 3 strings\n')
string1 = str(input('Enter string 1: '))
string2 = str(input('Enter string 2: '))
string3 = str(input('Enter string 3: '))
print('String 1: {}\nString 2: {}\nString 3: {}'.format(string1, string2, string3))
elif card_type == 6 or card_type == 7 or card_type == 8 or card_type == 9 or card_type == 10 or card_type == 13:
#cards 6, 7, 8, 9, 10, 11 expect 2 strings
print ('Selected card expects 2 strings\n')
string1 = str(input('Enter string 1: '))
string2 = str(input('Enter string 2: '))
print('String 1: {}\nString 2: {}\n'.format(string1, string2))
elif card_type == 11:
print('Selected card expects 4 strings\n')
string1 = str(input('Enter string 1: '))
string2 = str(input('Enter string 2: '))
string3 = str(input('Enter string 3: '))
string4 = str(input('Enter string 4: '))
print('String 1: {}\nString 2: {}\nString 3: {}\nString 4: {}'.format(string1, string2, string3, string4))
else:
print('Selected card {} doesnot expect any strings\n'.format(card_type_t(card_type)))
#Main Function outside of Class Definitions
Screen = screen_ui_t() #creating an object of the class screen_ui_t
#This class initialises all details relating to the Screen/Display
Screen.screen_type() #method call for screen_type
Screen.station_info_t() #method call to set serial number of station
Screen.HEADER_VIS() #method call to show or hide header
Card = Screen.card_ui_t() #creating an object of child class card_ui_t
#this class defines all paramters relating to the Card details
Card.Card_Type() #method call to set the type of the card
Card.BackGround_Color() #method call to set the background color of the card (essential only for two cards)
Card.SlotIndex() #method call to set the slot_index
Card.FlipperVisibility() #method to show or hide flipper
Card.FlipIndex() #method to set the index of the flipper
Card.MediaType() #method to choose the media type to display on the card
Card.Title() #method to set the title of the card
Card.String() #method to set the strings to be displayed on each card
| global slot_index
global screen_type
#parameter 3
if screen_type == 1:
slot_index = int(input('Enter the slot index (0=LEFT/1=RIGHT): '))
print('Slot index set as: {}\n'.format(slot_index)) | identifier_body |
Client_V1.1.py | #Version 1.1
#Draft Time: Wed 16 Sep 2020 11:43
# importing the necessary libraries
from enum import Enum
from gi.repository import GLib
#class enumerates the types of media that can be displayed
class media_type_t(Enum):
MEDIA_TYPE_QRCODE = 0
MEDIA_TYPE_IMAGE = 1
MEDIA_TYPE_GIF = 2
#class enumerates the types of screens
class screen_type_t(Enum):
SCREEN_TYPE_S1 = 0
SCREEN_TYPE_S2 = 1
#class enumerates the types of cards
class card_type_t_100(Enum):
CARD_TYPE_W100_BLUE_SPLASH = 0
CARD_TYPE_W100_GRAY_STRING3 = 1
CARD_TYPE_W100_GRAY_MEDIA = 2
CARD_TYPE_W100_GRAY_MEDIA_STRING3 = 3
CARD_TYPE_W100_RED_MEDIA_STRING3 = 4
CARD_TYPE_W100_USERCOLOR_MEDIA_STRING_3 = 5
class card_type_t_50(Enum):
CARD_TYPE_W50_GRAY_MEDIA_STRING2 = 6
CARD_TYPE_W50_GREEN_MEDIA_STRING2 = 7
CARD_TYPE_W50_BLUE_MEDIA_STRING2 = 8
CARD_TYPE_W50_RED_MEDIA_STRING2 = 9
CARD_TYPE_W50_AMBER_MEDIA_STRING2 = 10
CARD_TYPE_W50_GREEN_TITLE_STRING4 = 11
CARD_TYPE_W50_GREEN_TITLE_STRING3 = 12
CARD_TYPE_W50_USERCOLOR_MEDIA_STRING2 = 13
class screen_ui_t():
"""
Method 1
This reads the type of screen from the user
"""
screen_type = 0
def screen_type(self):
global screen_type
screen_type = int(input('Enter a screen type (0=SCREEN_TYPE_S1 / 1=SCREEN_TYPE_S2): '))
if screen_type == 0:
print('Screen set to: {}\n'.format(screen_type_t(screen_type)))
elif screen_type == 1:
print('Screen set to: {}\n'.format(screen_type_t(screen_type)))
else:
print('Invalid input\n')
"""
Method 2
This reads and sets the Station's Serial Address
"""
def station_info_t(self):
serial = str(input('Enter Serial number of station: '))
#reads Serial number of the station
print('Serial address set as: {}\n'.format(serial))
"""
Method 3
This sets or clears the header visiblity on the screen
"""
def HEADER_VIS(self):
header = int(input('Header visibility (0=HIDE/1=SHOW): '))
#reads the choice to show or hide header (sets Bool values based on user choice)
if header == 0:
header_visibility = False
print('Header set to HIDE\n')
elif header == 1:
header_visibility = True
print('Header set to SHOW\n')
else:
print('Invalid input\n')
exit()
"""
Child Class 1
This reads Card parameters from the user and sets the same to each card
"""
class card_ui_t:
"""
Child-Method 1
This reads and sets the type of card to be displayed on the screen
"""
card_type = 0 #default card type
def Card_Type(self):
#parameter 1
global screen_type
global card_type
if screen_type == 0:
i = 0 #to display count of ENUM listing
for types in card_type_t_100:
print('{}'.format(i), types)
i += 1
card_type = int(input('Enter ENUM index (0-5)\nSelect Card type: '))
#reads card type from user
#assigns through ENUM index
try:
print('Card type set to: ')
print(card_type_t_100(card_type))
except:
print('Input value does not match any defined card types')
exit()
elif screen_type == 1:
i = 6 #to display count of ENUM listing
for types in card_type_t_50:
print('{}'.format(i), types)
i += 1
card_type = int(input('Enter ENUM index (6-13)\nSelect Card type: '))
#reads card type from user
#assigns through ENUM index
try:
print('Card type set to: ')
print(card_type_t_50(card_type))
except:
print('Input value does not match any defined card types')
exit()
"""
Child-Method 2
This sets or clears the header visiblity on the screen
"""
#parameter 2
#this parameter is read only if the user chooses
#5 card_type_t.CARD_TYPE_W100_USERCOLOR_MEDIA_STRING_3 or
#11 card_type_t.CARD_TYPE_W50_USERCOLOR_MEDIA_STRING2
def BackGround_Color(self):
global card_type
if card_type == 5 or card_type == 13:
card_bgcolor = int(input('\nEnter Background Color: '))
#reads background color from user
#assumed to be a integer value
print('\nBackground color set to: {}\n'.format(card_bgcolor))
else:
print('\nBackground color set by default as per card formatting\n')
"""
Child-Method 3
This reads the slot index of the card from the user
"""
slot_index = -1
def SlotIndex(self):
global slot_index
global screen_type
#parameter 3
if screen_type == 1:
|
"""
Child-Method 4
This Shows or hides the Flipper
"""
#parameter 4
flipper_visibility = -1
def FlipperVisibility(self):
global screen_type
if screen_type == 1:
choice = int(input('FLIPPER Visibility (0=HIDE/1=SHOW): '))
#reads users choice to either show or hide flipper.
#sets Bool value appropriately
global flipper_visibility
if choice == 0:
flipper_visibility = False
print('Flipper set to HIDE\n')
elif choice == 1:
flipper_visibility = True
print('Flipper set to SHOW\n')
else:
print('Invalid choice\n')
print('Flipper set to default value: HIDE\n')
flipper_visibility = False
else:
flipper_visibility = False
"""
Child-Method 5
This sets the flipper between two parameters
"""
#parameter 5
def FlipIndex(self):
global flipper_visibility
if flipper_visibility == True:
flipper_index = int(input('Enter the flipper index(0/1): '))
#reads index of Flipper from user
if flipper_index != 0 or flipper_index != 1:
print('Invalid index\n')
exit()
print('Flipper index set to: {}\n'.format(flipper_index))
"""
Child-Method 6
This reads the type of media file to display
"""
def MediaType(self):
#parameter 6
global card_type
if card_type != 0 or card_type != 1 or card_type != 11 or card_type != 12:
i = 0 #to display count of ENUM listing
for media in media_type_t:
print('{}'.format(i), media)
i += 1
media_type = int(input('Enter ENUM index (0-2)\nSelect Media type: '))
#reads the media type from user through ENUM index
print('Media type set to: ')
print(media_type_t(media_type))
#parameter 7
#all paths are hardcoded now as recommended
if media_type == 0:
media_path = "directory0/directory1/directory4/QR_file.ext"
print('\nMedia Path set to: {}\n'.format(media_path))
elif media_type == 1:
media_path = "directory0/directory1/directory4/IMAGE_file.ext"
print('\nMedia Path set to: {}\n'.format(media_path))
elif media_type == 2:
media_path = "directory0/directory1/directory4/GIF_file.ext"
print('\nMedia Path set to: {}\n'.format(media_path))
else:
print('Invalid Media Type\n')
exit()
else:
print('Card type does not support any media\n')
"""
Child-Method 7
This reads the title of the card
"""
def Title(self):
#parameter 8
#reads the title for the card selected
global card_type
global slot_index
if card_type == 11 or card_type == 12:
title = str(input('Enter the title of card {}: '.format(slot_index)))
print('Title for Card {} set as: {}'.format(slot_index,title))
"""
Child-Method 8
This relevant number of strings for the card
"""
#parameter 8,9,10,11
#Only strings relevant to the user selected cards are read.
#Other strings are ignored
#cards 1 and 2 expect no string
def String(self):
global card_type
if card_type == 3 or card_type == 4 or card_type == 5 or card_type == 1 or card_type == 12:
#cards 2, 3, 4 5 and 13 expect 3 strings
print('Selected card expects 3 strings\n')
string1 = str(input('Enter string 1: '))
string2 = str(input('Enter string 2: '))
string3 = str(input('Enter string 3: '))
print('String 1: {}\nString 2: {}\nString 3: {}'.format(string1, string2, string3))
elif card_type == 6 or card_type == 7 or card_type == 8 or card_type == 9 or card_type == 10 or card_type == 13:
#cards 6, 7, 8, 9, 10, 11 expect 2 strings
print ('Selected card expects 2 strings\n')
string1 = str(input('Enter string 1: '))
string2 = str(input('Enter string 2: '))
print('String 1: {}\nString 2: {}\n'.format(string1, string2))
elif card_type == 11:
print('Selected card expects 4 strings\n')
string1 = str(input('Enter string 1: '))
string2 = str(input('Enter string 2: '))
string3 = str(input('Enter string 3: '))
string4 = str(input('Enter string 4: '))
print('String 1: {}\nString 2: {}\nString 3: {}\nString 4: {}'.format(string1, string2, string3, string4))
else:
print('Selected card {} doesnot expect any strings\n'.format(card_type_t(card_type)))
#Main Function outside of Class Definitions
Screen = screen_ui_t() #creating an object of the class screen_ui_t
#This class initialises all details relating to the Screen/Display
Screen.screen_type() #method call for screen_type
Screen.station_info_t() #method call to set serial number of station
Screen.HEADER_VIS() #method call to show or hide header
Card = Screen.card_ui_t() #creating an object of child class card_ui_t
#this class defines all paramters relating to the Card details
Card.Card_Type() #method call to set the type of the card
Card.BackGround_Color() #method call to set the background color of the card (essential only for two cards)
Card.SlotIndex() #method call to set the slot_index
Card.FlipperVisibility() #method to show or hide flipper
Card.FlipIndex() #method to set the index of the flipper
Card.MediaType() #method to choose the media type to display on the card
Card.Title() #method to set the title of the card
Card.String() #method to set the strings to be displayed on each card
| slot_index = int(input('Enter the slot index (0=LEFT/1=RIGHT): '))
print('Slot index set as: {}\n'.format(slot_index)) | conditional_block |
VIEW3D_MT_armature_context_menu.py | # 「3Dビュー」エリア > アーマチュアの「編集」モード > 「アーマチュアコンテクストメニュー」 (Wキー)
# "3D View" Area > "Edit" Mode with Armature > "Armature Context Menu" (W Key)
import bpy, mathutils
import re
from bpy.props import *
SUFFIX_TPL = (".R",".L",".r",".l","_R","_L","_r","_l",".right",".left",".Right",".Left","_right","_left","_Right","_Left")
################
# オペレーター #
################
class CreateMirror(bpy.types.Operator):
bl_idname = "armature.create_mirror"
bl_label = "Add Suffix + Symmetrize"
bl_description = "Add left-right suffix to selected bones' names, and make their copies at the symmetric positions"
bl_options = {'REGISTER', 'UNDO'}
is_connect : BoolProperty(name="Copy 'Connected'", default=True)
use_autoname : BoolProperty(name="Add Suffix", default=True)
use_rename : BoolProperty(name="Rename", default=False)
new_name : StringProperty(name="New Name", default="Bone")
name_sep : EnumProperty(name="Numbering Expression",items=[
(".",".00X","",1),("_","_00X","",2),("-","-00X","",3)])
start_from : EnumProperty(name="Numbering Starts from",items=[
("NO","No number","",1),("ZERO","000","",2),("ONE","001","",3)])
@classmethod
def poll(cls, context):
ob = context.active_object
if ob:
if ob.type == 'ARMATURE':
if ob.mode == "EDIT":
return True
return False
def draw(self, context):
for p in ['is_connect', 'use_autoname', 'use_rename']:
row = self.layout.row()
row.use_property_split = True
row.prop(self, p)
box = self.layout.box()
if self.use_rename:
row = box.row(align=True)
row.label(text="New Name")
row.prop(self, 'new_name', text="")
row.prop(self, 'name_sep', text="")
row = box.row(align=True)
row.label(text="Numbering Starts from")
row.prop(self, 'start_from', text="")
def execute(self, context):
obj = context.active_object
if self.use_rename:
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.mode_set(mode='EDIT')# 直前に行った名前変更が obj.data.bones に反映されていない場合への対処
preCursorCo = context.scene.cursor.location[:]
prePivotPoint = context.scene.tool_settings.transform_pivot_point
preUseMirror = context.object.data.use_mirror_x
context.scene.cursor.location = context.object.location
context.scene.tool_settings.transform_pivot_point = 'CURSOR'
context.object.data.use_mirror_x = True
selectedBones = context.selected_bones[:]
if self.use_rename:
name_head = f"{self.new_name}{self.name_sep}"
if self.start_from == 'NO':
new_names = [self.new_name] + [f"{name_head}{num+1:03}" for num in range(len(selectedBones)-1)]
elif self.start_from == 'ZERO':
new_names = [f"{name_head}{num:03}" for num in range(len(selectedBones))]
elif self.start_from == 'ONE':
new_names = [f"{name_head}{num+1:03}" for num in range(len(selectedBones))]
for b, nam in zip(selectedBones, new_names):
try:
existed_bone = obj.data.bones[nam]
existed_bone.name = "temp"
b.name = nam
existed_bone.name = nam
except KeyError:
b.name = nam
if self.use_autoname:
for b in selectedBones:
if b.name.endswith(SUFFIX_TPL):
b.select = False
bpy.ops.armature.autoside_names(type='XAXIS')
for b in selectedBones:
b.select = True
bpy.ops.armature.duplicate(do_flip_names=True)
axis = (True, False, False)
bpy.ops.transform.mirror(constraint_axis=axis)
newBones = set(context.selected_bones) - set(selectedBones)
newBones = sorted(list(newBones), key=lambda x: x.name)
selectedBones = sorted(selectedBones, key=lambda x: x.name)
for orig, copy in zip(selectedBones, newBones):
bpy.ops.armature.select_all(action='DESELECT')
orig.select = orig.select_head = orig.select_tail = True
bpy.ops.transform.transform(mode='BONE_ROLL', value=(0, 0, 0, 0))
bpy.ops.armature.select_all(action='DESELECT')
copy.select = copy.select_head = copy.select_tail = True
if self.is_connect:
copy.use_connect = orig.use_connect
for b in newBones:
b.select = b.select_head = b.select_tail = True
context.scene.cursor.location = preCursorCo[:]
context.scene.tool_settings.transform_pivot_point = prePivotPoint
context.object.data.use_mirror_x = preUseMirror
return {'FINISHED'}
class RenameBoneRegularExpression(bpy.types.Operator):
bl_idname = "armature.rename_bone_regular_expression"
bl_label = "Rename Bones by Regular Express | lace selected bones' names by using regular expression"
bl_options = {'REGISTER', 'UNDO'}
isAll : BoolProperty(name="Apply to All Bones", default=False)
pattern : StringProperty(name="Target text", default="^")
repl : StringProperty(name="New Text", default="")
@classmethod
def poll(cls, context):
if context.active_object and context.active_object.type == 'ARMATURE':
return True
return False
def execute(self, context):
obj = context.active_object
bones = context.selected_bones
if not bones:
bones = [b.bone for b in context.selected_pose_bones]
if (self.isAll):
bones = obj.data.bones
for bone in bones:
try:
new_name = re.sub(self.pattern, self.repl, bone.name)
except:
continue
bone.name = new_name
return {'FINISHED'}
class RenameOppositeBone(bpy.types.Operator):
bl_idname = "armature.rename_opposite_bone"
bl_label = "Manipulate Symmetric Bones' Names"
bl_description = "Change names of selected bones and its opposite-side ones in a specific way"
bl_options = {'REGISTER', 'UNDO'}
threshold : FloatProperty(name="Position of Threshold", default=0.00001, min=0, soft_min=0, step=0.001, precision=5)
use_rename : EnumProperty(name="Manipulate",items=[
("False","Add Suffix","",1),("True","Rename + Add Suffix","",2)])
order : EnumProperty(name="Bones' Order",items=[
("DEFAULT","Default","",1),("NAME","Sort by original name","",2)])
use_root : BoolProperty(name="Use Root Bones' Names as New Names", default=False)
new_name : StringProperty(name="New Name", default="Bone")
name_sep : EnumProperty(name="Numbering Expression",items=[
(".",".00X","",1),("_","_00X","",2),("-","-00X","",3)])
start_from : EnumProperty(name="Numbering Starts from",items=[
("NO","No number","",1),("ZERO","000","",2),("ONE","001","",3)])
@classmethod
def poll(cls, context):
ob = context.active_object
if ob:
if ob.type == 'ARMATURE':
if ob.mode == "EDIT":
return True
return False
def draw(self, context):
row = self.layout.row()
row.use_property_split = True
row.prop(self, 'threshold')
self.layout.prop(self, 'use_rename', expand=True)
box = self.layout.box()
if self.use_rename == 'True':
row = box.row(align=True)
row.label(text="Use Root Bones' Names as New Names")
row.prop(self, 'use_root', text="")
row = box.split(factor=0.4, align=True)
row.label(text="Bones' Order")
row.prop(self, 'order', text="")
row.enabled = not self.use_root
sp = box.split(factor=0.66, align=True)
sp_row = sp.row(align=True)
sp_row.label(text="New Name")
sp_row.prop(self, 'new_name', text="")
sp_row.enabled = not self.use_root
sp.prop(self, 'name_sep', text="")
row = box.row(align=True)
row.label(text="Numbering Starts from")
row.prop(self, 'start_from', text="")
def execute(self, context):
obj = context.active_object
arm = obj.data
selectedBones = context.selected_bones[:]
if self.use_rename:
if self.use_root:
roots = [b for b in selectedBones if (not b.parent) or (b.parent not in selectedBones)]
root_names=[]
for ro in roots:
pre_name = ro.name
bpy.ops.armature.select_all(action='DESELECT')
ro.select = True
bpy.ops.armature.flip_names(do_strip_numbers=True)
for idx,(x,y) in enumerate(zip(pre_name, ro.name)):
if not x == y:
if pre_name[:idx][-1] in ['.','_','-']:
root_names.append(pre_name[:idx-1])
else:
root_names.append(pre_name[:idx])
ro.name = pre_name
break
else:
root_names.append(pre_name)
target_bones = []
for b in roots:
b_chain = [b] + b.children_recursive
target_bones.append([b for b in b_chain if b in selectedBones])
else:
root_names = [self.new_name]
if self.order == 'DEFAULT':
target_bones = [selectedBones]
elif self.order == 'NAME':
target_bones = [sorted(selectedBones, key=lambda x:x.name)]
for root_name, bone_list in zip(root_names, target_bones):
name_head = f"{root_name}{self.name_sep}"
if self.start_from == 'NO':
new_names = [root_name] + [f"{name_head}{num+1:03}" for num in range(len(selectedBones)-1)]
elif self.start_from == 'ZERO':
new_names = [f"{name_head}{num:03}" for num in range(len(selectedBones))]
elif self.start_from == 'ONE':
new_names = [f"{name_head}{num+1:03}" for num in range(len(selectedBones))]
for b, nam in zip(bone_list, new_names):
try:
existed_bone = obj.data.bones[nam]
existed_bone.name = "temp"
b.name = nam
existed_bone.name = nam
except KeyError:
b.name = nam
bpy.ops.armature.select_all(action='DESELECT')
for b in selectedBones:
if not b.name.endswith(SUFFIX_TPL):
b.select = True
bpy.ops.armature.autoside_names(type='XAXIS')
bpy.ops.armature.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
threshold = self.threshold
for bone in selectedBones:
bone = arm.bones[bone.name]
temp = [x for x in bone.head_local]
head_interval = [(x-threshold, x+threshold) for x in [temp[0]*(-1)] + temp[1:]]
temp = [x for x in bone.tail_local]
tail_interval = [(x-threshold, x+threshold) for x in [temp[0]*(-1)] + temp[1:]]
for b in arm.bones:
if b == bone:
continue
for value, limits in zip(b.head_local, head_interval):
if not limits[0] <= value <= limits[1]:
break
else:
for value, limits in zip(b.tail_local, tail_interval):
if not limits[0] <= value <= limits[1]:
break
else:
b.name = bone.name
b.select = True
b.select_head = True
b.select_tail = True
break
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.armature.flip_names(do_strip_numbers=True)
return {'FINISHED'}
class ExtendBone(bpy.types.Operator):
bl_idname = "armature.extend_bone"
bl_label = "Extend Bone"
bl_description = "Stretch new bone in the direction of selected bone"
bl_options = {'REGISTER', 'UNDO'}
length : FloatProperty(name="Length", default=1.0, min=-10, max=10, soft_min=-10, soft_max=10, step=10, precision=3)
is_parent : BoolProperty(name="Set Original as Parent", default=True)
is_connect : BoolProperty(name="Connected", default=True)
@classmethod
def poll(cls, context):
ob = context.active_object
if ob:
if ob.type == 'ARMATURE':
if 'selected_bones' in dir(context):
if context.selected_bones:
if 1 <= len(context.selected_bones):
return True
return False
def execute(self, context):
ob = context.active_object
arm = ob.data
for bone in context.selected_bones[:]:
new_bone = arm.edit_bones.new(bone.name)
new_bone.head = bone.tail[:]
rot = bone.matrix.to_quaternion()
tail = mathutils.Vector((0, 1, 0)) * self.length
tail.rotate(rot)
new_bone.tail = bone.tail + tail
new_bone.roll = bone.roll
if self.is_parent:
new_bone.parent = bone
if self.is_connect:
new_bone.use_connect = True
bone.select = False
bone.select_head = False
if bone.use_connect:
bone.parent.select_tail = False
if self.is_connect:
bone.select_tail = True
new_bone.select = True
new_bone.select_head = True
new_bone.select_tail = True
return {'FINISHED'}
################
# クラスの登録 #
################
classes = [
CreateMirror,
RenameBoneRegularExpression,
RenameOppositeBone,
ExtendBone
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
################
# メニュー追加 #
################
# メニューのオン/オフの判定
def IsMenuEnable(self_id):
for id in bpy.context.preferences.addons[__name__.partition('.')[0]].preferences.disabled_menu.split(','):
if (id == self_id):
return False
else:
return True
# メニューを登録する関数
def menu(self, context):
if (IsMenuEnable(__name__.split('.')[-1])):
self.layout.separator()
self.layout.operator(ExtendBone.bl_idname, icon='PLUGIN')
self.layout.separator()
self.layout.prop(context.object.data, 'use_mirror_x', icon='PLUGIN')
self.layout.operator(CreateMirror.bl_idname, icon='PLUGIN')
self.layout.operator(RenameOppositeBone.bl_idname, icon='PLUGIN')
self.layout.separator()
self.layout.operator('object.copy_bone_name', icon='PLUGIN')#BONE_PT_context_bone で定義
self.layout.operator(RenameBoneRegularExpression.bl_idname, icon='PLUGIN')
if (context.preferences.addons[__name__.partition('.')[0]].preferences.use_disabled_menu):
self.layout.separator()
self.layout.operator('wm.toggle_menu_enable', icon='CANCEL').id = __name__.split('.')[-1]
| ion"
bl_description = "Rep | identifier_name |
VIEW3D_MT_armature_context_menu.py | # 「3Dビュー」エリア > アーマチュアの「編集」モード > 「アーマチュアコンテクストメニュー」 (Wキー)
# "3D View" Area > "Edit" Mode with Armature > "Armature Context Menu" (W Key)
import bpy, mathutils
import re
from bpy.props import *
SUFFIX_TPL = (".R",".L",".r",".l","_R","_L","_r","_l",".right",".left",".Right",".Left","_right","_left","_Right","_Left")
################
# オペレーター #
################
class CreateMirror(bpy.types.Operator):
bl_idname = "armature.create_mirror"
bl_label = "Add Suffix + Symmetrize"
bl_description = "Add left-right suffix to selected bones' names, and make their copies at the symmetric positions"
bl_options = {'REGISTER', 'UNDO'}
is_connect : BoolProperty(name="Copy 'Connected'", default=True)
use_autoname : BoolProperty(name="Add Suffix", default=True)
use_rename : BoolProperty(name="Rename", default=False)
new_name : StringProperty(name="New Name", default="Bone")
name_sep : EnumProperty(name="Numbering Expression",items=[
(".",".00X","",1),("_","_00X","",2),("-","-00X","",3)])
start_from : EnumProperty(name="Numbering Starts from",items=[
("NO","No number","",1),("ZERO","000","",2),("ONE","001","",3)])
@classmethod
def poll(cls, context):
ob = context.active_object
if ob:
if ob.type == 'ARMATURE':
if ob.mode == "EDIT":
return True
return False
def draw(self, context):
for p in ['is_connect', 'use_autoname', 'use_rename']:
row = self.layout.row()
row.use_property_split = True
row.prop(self, p)
box = self.layout.box()
if self.use_rename:
row = box.row(align=True)
row.label(text="New Name")
row.prop(self, 'new_name', text="")
row.prop(self, 'name_sep', text="")
row = box.row(align=True)
row.label(text="Numbering Starts from")
row.prop(self, 'start_from', text="")
def execute(self, context):
obj = context.active_object
if self.use_rename:
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.mode_set(mode='EDIT')# 直前に行った名前変更が obj.data.bones に反映されていない場合への対処
preCursorCo = context.scene.cursor.location[:]
prePivotPoint = context.scene.tool_settings.transform_pivot_point
preUseMirror = context.object.data.use_mirror_x
context.scene.cursor.location = context.object.location
context.scene.tool_settings.transform_pivot_point = 'CURSOR'
context.object.data.use_mirror_x = True
selectedBones = context.selected_bones[:]
if self.use_rename:
name_head = f"{self.new_name}{self.name_sep}"
if self.start_from == 'NO':
new_names = [self.new_name] + [f"{name_head}{num+1:03}" for num in range(len(selectedBones)-1)]
elif self.start_from == 'ZERO':
new_names = [f"{name_head}{num:03}" for num in range(len(selectedBones))]
elif self.start_from == 'ONE':
new_names = [f"{name_head}{num+1:03}" for num in range(len(selectedBones))]
for b, nam in zip(selectedBones, new_names):
try:
existed_bone = obj.data.bones[nam]
existed_bone.name = "temp"
b.name = nam
existed_bone.name = nam
except KeyError:
b.name = nam
if self.use_autoname:
for b in selectedBones:
if b.name.endswith(SUFFIX_TPL):
b.select = False
bpy.ops.armature.autoside_names(type='XAXIS')
for b in selectedBones:
b.select = True
bpy.ops.armature.duplicate(do_flip_names=True)
axis = (True, False, False)
bpy.ops.transform.mirror(constraint_axis=axis)
newBones = set(context.selected_bones) - set(selectedBones)
newBones = sorted(list(newBones), key=lambda x: x.name)
selectedBones = sorted(selectedBones, key=lambda x: x.name)
for orig, copy in zip(selectedBones, newBones):
bpy.ops.armature.select_all(action='DESELECT')
orig.select = orig.select_head = orig.select_tail = True
bpy.ops.transform.transform(mode='BONE_ROLL', value=(0, 0, 0, 0))
bpy.ops.armature.select_all(action='DESELECT')
copy.select = copy.select_head = copy.select_tail = True
if self.is_connect:
copy.use_connect = orig.use_connect
for b in newBones:
b.select = b.select_head = b.select_tail = True
context.scene.cursor.location = preCursorCo[:]
context.scene.tool_settings.transform_pivot_point = prePivotPoint
context.object.data.use_mirror_x = preUseMirror
return {'FINISHED'}
class RenameBoneRegularExpression(bpy.types.Operator):
bl_idname = "armature.rename_bone_regular_expression"
bl_label = "Rename Bones by Regular Expression"
bl_description = "Replace selected bones' names by using regular expression"
bl_options = {'REGISTER', 'UNDO'}
isAll : BoolProperty(name="Apply to All Bones", default=False)
pattern : StringProperty(name="Target text", default="^")
repl : StringProperty(name="New Text", default="")
@classmethod
def poll(cls, context):
if context.active_object and context.active_object.type == 'ARMATURE':
return True
return False
def execute(self, context):
obj = context.active_object
bones = context.selected_bones
if not bones:
bones = [b.bone for b in context.selected_pose_bones]
if (self.isAll):
bones = obj.data.bones
for bone in bones:
try:
new_name = re.sub(self.pattern, self.repl, bone.name)
except:
continue
bone.name = new_name
return {'FINISHED'}
class RenameOppositeBone(bpy.types.Operator):
bl_idname = "armature.rename_opposite_bone"
bl_label = "Manipulate Symmetric Bones' Names"
bl_description = "Change names of selected bones and its opposite-side ones in a specific way"
bl_options = {'REGISTER', 'UNDO'}
threshold : FloatProperty(name="Position of Threshold", default=0.00001, min=0, soft_min=0, step=0.001, precision=5)
use_rename : EnumProperty(name="Manipulate",items=[
("False","Add Suffix","",1),("True","Rename + Add Suffix","",2)])
order : EnumProperty(name="Bones' Order",items=[
("DEFAULT","Default","",1),("NAME","Sort by original name","",2)])
use_root : BoolProperty(name="Use Root Bones' Names as New Names", default=False)
new_name : StringProperty(name="New Name", default="Bone")
name_sep : EnumProperty(name="Numbering Expression",items=[
(".",".00X","",1),("_","_00X","",2),("-","-00X","",3)])
start_from : EnumProperty(name="Numbering Starts from",items=[
("NO","No number","",1),("ZERO","000","",2),("ONE","001","",3)])
@classmethod
def poll(cls, context):
ob = context.active_object
if ob:
if ob.type == 'ARMATURE':
if ob.mode == "EDIT":
return True
return False
def draw(self, context): | ame', expand=True)
box = self.layout.box()
if self.use_rename == 'True':
row = box.row(align=True)
row.label(text="Use Root Bones' Names as New Names")
row.prop(self, 'use_root', text="")
row = box.split(factor=0.4, align=True)
row.label(text="Bones' Order")
row.prop(self, 'order', text="")
row.enabled = not self.use_root
sp = box.split(factor=0.66, align=True)
sp_row = sp.row(align=True)
sp_row.label(text="New Name")
sp_row.prop(self, 'new_name', text="")
sp_row.enabled = not self.use_root
sp.prop(self, 'name_sep', text="")
row = box.row(align=True)
row.label(text="Numbering Starts from")
row.prop(self, 'start_from', text="")
def execute(self, context):
obj = context.active_object
arm = obj.data
selectedBones = context.selected_bones[:]
if self.use_rename:
if self.use_root:
roots = [b for b in selectedBones if (not b.parent) or (b.parent not in selectedBones)]
root_names=[]
for ro in roots:
pre_name = ro.name
bpy.ops.armature.select_all(action='DESELECT')
ro.select = True
bpy.ops.armature.flip_names(do_strip_numbers=True)
for idx,(x,y) in enumerate(zip(pre_name, ro.name)):
if not x == y:
if pre_name[:idx][-1] in ['.','_','-']:
root_names.append(pre_name[:idx-1])
else:
root_names.append(pre_name[:idx])
ro.name = pre_name
break
else:
root_names.append(pre_name)
target_bones = []
for b in roots:
b_chain = [b] + b.children_recursive
target_bones.append([b for b in b_chain if b in selectedBones])
else:
root_names = [self.new_name]
if self.order == 'DEFAULT':
target_bones = [selectedBones]
elif self.order == 'NAME':
target_bones = [sorted(selectedBones, key=lambda x:x.name)]
for root_name, bone_list in zip(root_names, target_bones):
name_head = f"{root_name}{self.name_sep}"
if self.start_from == 'NO':
new_names = [root_name] + [f"{name_head}{num+1:03}" for num in range(len(selectedBones)-1)]
elif self.start_from == 'ZERO':
new_names = [f"{name_head}{num:03}" for num in range(len(selectedBones))]
elif self.start_from == 'ONE':
new_names = [f"{name_head}{num+1:03}" for num in range(len(selectedBones))]
for b, nam in zip(bone_list, new_names):
try:
existed_bone = obj.data.bones[nam]
existed_bone.name = "temp"
b.name = nam
existed_bone.name = nam
except KeyError:
b.name = nam
bpy.ops.armature.select_all(action='DESELECT')
for b in selectedBones:
if not b.name.endswith(SUFFIX_TPL):
b.select = True
bpy.ops.armature.autoside_names(type='XAXIS')
bpy.ops.armature.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
threshold = self.threshold
for bone in selectedBones:
bone = arm.bones[bone.name]
temp = [x for x in bone.head_local]
head_interval = [(x-threshold, x+threshold) for x in [temp[0]*(-1)] + temp[1:]]
temp = [x for x in bone.tail_local]
tail_interval = [(x-threshold, x+threshold) for x in [temp[0]*(-1)] + temp[1:]]
for b in arm.bones:
if b == bone:
continue
for value, limits in zip(b.head_local, head_interval):
if not limits[0] <= value <= limits[1]:
break
else:
for value, limits in zip(b.tail_local, tail_interval):
if not limits[0] <= value <= limits[1]:
break
else:
b.name = bone.name
b.select = True
b.select_head = True
b.select_tail = True
break
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.armature.flip_names(do_strip_numbers=True)
return {'FINISHED'}
class ExtendBone(bpy.types.Operator):
bl_idname = "armature.extend_bone"
bl_label = "Extend Bone"
bl_description = "Stretch new bone in the direction of selected bone"
bl_options = {'REGISTER', 'UNDO'}
length : FloatProperty(name="Length", default=1.0, min=-10, max=10, soft_min=-10, soft_max=10, step=10, precision=3)
is_parent : BoolProperty(name="Set Original as Parent", default=True)
is_connect : BoolProperty(name="Connected", default=True)
@classmethod
def poll(cls, context):
ob = context.active_object
if ob:
if ob.type == 'ARMATURE':
if 'selected_bones' in dir(context):
if context.selected_bones:
if 1 <= len(context.selected_bones):
return True
return False
def execute(self, context):
ob = context.active_object
arm = ob.data
for bone in context.selected_bones[:]:
new_bone = arm.edit_bones.new(bone.name)
new_bone.head = bone.tail[:]
rot = bone.matrix.to_quaternion()
tail = mathutils.Vector((0, 1, 0)) * self.length
tail.rotate(rot)
new_bone.tail = bone.tail + tail
new_bone.roll = bone.roll
if self.is_parent:
new_bone.parent = bone
if self.is_connect:
new_bone.use_connect = True
bone.select = False
bone.select_head = False
if bone.use_connect:
bone.parent.select_tail = False
if self.is_connect:
bone.select_tail = True
new_bone.select = True
new_bone.select_head = True
new_bone.select_tail = True
return {'FINISHED'}
################
# クラスの登録 #
################
classes = [
CreateMirror,
RenameBoneRegularExpression,
RenameOppositeBone,
ExtendBone
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
################
# メニュー追加 #
################
# メニューのオン/オフの判定
def IsMenuEnable(self_id):
for id in bpy.context.preferences.addons[__name__.partition('.')[0]].preferences.disabled_menu.split(','):
if (id == self_id):
return False
else:
return True
# メニューを登録する関数
def menu(self, context):
if (IsMenuEnable(__name__.split('.')[-1])):
self.layout.separator()
self.layout.operator(ExtendBone.bl_idname, icon='PLUGIN')
self.layout.separator()
self.layout.prop(context.object.data, 'use_mirror_x', icon='PLUGIN')
self.layout.operator(CreateMirror.bl_idname, icon='PLUGIN')
self.layout.operator(RenameOppositeBone.bl_idname, icon='PLUGIN')
self.layout.separator()
self.layout.operator('object.copy_bone_name', icon='PLUGIN')#BONE_PT_context_bone で定義
self.layout.operator(RenameBoneRegularExpression.bl_idname, icon='PLUGIN')
if (context.preferences.addons[__name__.partition('.')[0]].preferences.use_disabled_menu):
self.layout.separator()
self.layout.operator('wm.toggle_menu_enable', icon='CANCEL').id = __name__.split('.')[-1]
|
row = self.layout.row()
row.use_property_split = True
row.prop(self, 'threshold')
self.layout.prop(self, 'use_ren | identifier_body |
VIEW3D_MT_armature_context_menu.py | # 「3Dビュー」エリア > アーマチュアの「編集」モード > 「アーマチュアコンテクストメニュー」 (Wキー)
# "3D View" Area > "Edit" Mode with Armature > "Armature Context Menu" (W Key)
import bpy, mathutils
import re
from bpy.props import *
SUFFIX_TPL = (".R",".L",".r",".l","_R","_L","_r","_l",".right",".left",".Right",".Left","_right","_left","_Right","_Left")
################
# オペレーター #
################
class CreateMirror(bpy.types.Operator):
bl_idname = "armature.create_mirror"
bl_label = "Add Suffix + Symmetrize"
bl_description = "Add left-right suffix to selected bones' names, and make their copies at the symmetric positions"
bl_options = {'REGISTER', 'UNDO'}
is_connect : BoolProperty(name="Copy 'Connected'", default=True)
use_autoname : BoolProperty(name="Add Suffix", default=True)
use_rename : BoolProperty(name="Rename", default=False)
new_name : StringProperty(name="New Name", default="Bone")
name_sep : EnumProperty(name="Numbering Expression",items=[
(".",".00X","",1),("_","_00X","",2),("-","-00X","",3)])
start_from : EnumProperty(name="Numbering Starts from",items=[
("NO","No number","",1),("ZERO","000","",2),("ONE","001","",3)])
@classmethod
def poll(cls, context):
ob = context.active_object
if ob:
if ob.type == 'ARMATURE':
if ob.mode == "EDIT":
return True
return False
def draw(self, context):
for p in ['is_connect', 'use_autoname', 'use_rename']:
row = self.layout.row()
row.use_property_split = True
row.prop(self, p)
box = self.layout.box()
if self.use_rename:
row = box.row(align=True)
row.label(text="New Name")
row.prop(self, 'new_name', text="")
row.prop(self, 'name_sep', text="")
row = box.row(align=True)
row.label(text="Numbering Starts from")
row.prop(self, 'start_from', text="")
def execute(self, context):
obj = context.active_object
if self.use_rename:
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.mode_set(mode='EDIT')# 直前に行った名前変更が obj.data.bones に反映されていない場合への対処
preCursorCo = context.scene.cursor.location[:]
prePivotPoint = context.scene.tool_settings.transform_pivot_point
preUseMirror = context.object.data.use_mirror_x
context.scene.cursor.location = context.object.location
context.scene.tool_settings.transform_pivot_point = 'CURSOR'
context.object.data.use_mirror_x = True
selectedBones = context.selected_bones[:] | new_names = [self.new_name] + [f"{name_head}{num+1:03}" for num in range(len(selectedBones)-1)]
elif self.start_from == 'ZERO':
new_names = [f"{name_head}{num:03}" for num in range(len(selectedBones))]
elif self.start_from == 'ONE':
new_names = [f"{name_head}{num+1:03}" for num in range(len(selectedBones))]
for b, nam in zip(selectedBones, new_names):
try:
existed_bone = obj.data.bones[nam]
existed_bone.name = "temp"
b.name = nam
existed_bone.name = nam
except KeyError:
b.name = nam
if self.use_autoname:
for b in selectedBones:
if b.name.endswith(SUFFIX_TPL):
b.select = False
bpy.ops.armature.autoside_names(type='XAXIS')
for b in selectedBones:
b.select = True
bpy.ops.armature.duplicate(do_flip_names=True)
axis = (True, False, False)
bpy.ops.transform.mirror(constraint_axis=axis)
newBones = set(context.selected_bones) - set(selectedBones)
newBones = sorted(list(newBones), key=lambda x: x.name)
selectedBones = sorted(selectedBones, key=lambda x: x.name)
for orig, copy in zip(selectedBones, newBones):
bpy.ops.armature.select_all(action='DESELECT')
orig.select = orig.select_head = orig.select_tail = True
bpy.ops.transform.transform(mode='BONE_ROLL', value=(0, 0, 0, 0))
bpy.ops.armature.select_all(action='DESELECT')
copy.select = copy.select_head = copy.select_tail = True
if self.is_connect:
copy.use_connect = orig.use_connect
for b in newBones:
b.select = b.select_head = b.select_tail = True
context.scene.cursor.location = preCursorCo[:]
context.scene.tool_settings.transform_pivot_point = prePivotPoint
context.object.data.use_mirror_x = preUseMirror
return {'FINISHED'}
class RenameBoneRegularExpression(bpy.types.Operator):
bl_idname = "armature.rename_bone_regular_expression"
bl_label = "Rename Bones by Regular Expression"
bl_description = "Replace selected bones' names by using regular expression"
bl_options = {'REGISTER', 'UNDO'}
isAll : BoolProperty(name="Apply to All Bones", default=False)
pattern : StringProperty(name="Target text", default="^")
repl : StringProperty(name="New Text", default="")
@classmethod
def poll(cls, context):
if context.active_object and context.active_object.type == 'ARMATURE':
return True
return False
def execute(self, context):
obj = context.active_object
bones = context.selected_bones
if not bones:
bones = [b.bone for b in context.selected_pose_bones]
if (self.isAll):
bones = obj.data.bones
for bone in bones:
try:
new_name = re.sub(self.pattern, self.repl, bone.name)
except:
continue
bone.name = new_name
return {'FINISHED'}
class RenameOppositeBone(bpy.types.Operator):
bl_idname = "armature.rename_opposite_bone"
bl_label = "Manipulate Symmetric Bones' Names"
bl_description = "Change names of selected bones and its opposite-side ones in a specific way"
bl_options = {'REGISTER', 'UNDO'}
threshold : FloatProperty(name="Position of Threshold", default=0.00001, min=0, soft_min=0, step=0.001, precision=5)
use_rename : EnumProperty(name="Manipulate",items=[
("False","Add Suffix","",1),("True","Rename + Add Suffix","",2)])
order : EnumProperty(name="Bones' Order",items=[
("DEFAULT","Default","",1),("NAME","Sort by original name","",2)])
use_root : BoolProperty(name="Use Root Bones' Names as New Names", default=False)
new_name : StringProperty(name="New Name", default="Bone")
name_sep : EnumProperty(name="Numbering Expression",items=[
(".",".00X","",1),("_","_00X","",2),("-","-00X","",3)])
start_from : EnumProperty(name="Numbering Starts from",items=[
("NO","No number","",1),("ZERO","000","",2),("ONE","001","",3)])
@classmethod
def poll(cls, context):
ob = context.active_object
if ob:
if ob.type == 'ARMATURE':
if ob.mode == "EDIT":
return True
return False
def draw(self, context):
row = self.layout.row()
row.use_property_split = True
row.prop(self, 'threshold')
self.layout.prop(self, 'use_rename', expand=True)
box = self.layout.box()
if self.use_rename == 'True':
row = box.row(align=True)
row.label(text="Use Root Bones' Names as New Names")
row.prop(self, 'use_root', text="")
row = box.split(factor=0.4, align=True)
row.label(text="Bones' Order")
row.prop(self, 'order', text="")
row.enabled = not self.use_root
sp = box.split(factor=0.66, align=True)
sp_row = sp.row(align=True)
sp_row.label(text="New Name")
sp_row.prop(self, 'new_name', text="")
sp_row.enabled = not self.use_root
sp.prop(self, 'name_sep', text="")
row = box.row(align=True)
row.label(text="Numbering Starts from")
row.prop(self, 'start_from', text="")
def execute(self, context):
obj = context.active_object
arm = obj.data
selectedBones = context.selected_bones[:]
if self.use_rename:
if self.use_root:
roots = [b for b in selectedBones if (not b.parent) or (b.parent not in selectedBones)]
root_names=[]
for ro in roots:
pre_name = ro.name
bpy.ops.armature.select_all(action='DESELECT')
ro.select = True
bpy.ops.armature.flip_names(do_strip_numbers=True)
for idx,(x,y) in enumerate(zip(pre_name, ro.name)):
if not x == y:
if pre_name[:idx][-1] in ['.','_','-']:
root_names.append(pre_name[:idx-1])
else:
root_names.append(pre_name[:idx])
ro.name = pre_name
break
else:
root_names.append(pre_name)
target_bones = []
for b in roots:
b_chain = [b] + b.children_recursive
target_bones.append([b for b in b_chain if b in selectedBones])
else:
root_names = [self.new_name]
if self.order == 'DEFAULT':
target_bones = [selectedBones]
elif self.order == 'NAME':
target_bones = [sorted(selectedBones, key=lambda x:x.name)]
for root_name, bone_list in zip(root_names, target_bones):
name_head = f"{root_name}{self.name_sep}"
if self.start_from == 'NO':
new_names = [root_name] + [f"{name_head}{num+1:03}" for num in range(len(selectedBones)-1)]
elif self.start_from == 'ZERO':
new_names = [f"{name_head}{num:03}" for num in range(len(selectedBones))]
elif self.start_from == 'ONE':
new_names = [f"{name_head}{num+1:03}" for num in range(len(selectedBones))]
for b, nam in zip(bone_list, new_names):
try:
existed_bone = obj.data.bones[nam]
existed_bone.name = "temp"
b.name = nam
existed_bone.name = nam
except KeyError:
b.name = nam
bpy.ops.armature.select_all(action='DESELECT')
for b in selectedBones:
if not b.name.endswith(SUFFIX_TPL):
b.select = True
bpy.ops.armature.autoside_names(type='XAXIS')
bpy.ops.armature.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
threshold = self.threshold
for bone in selectedBones:
bone = arm.bones[bone.name]
temp = [x for x in bone.head_local]
head_interval = [(x-threshold, x+threshold) for x in [temp[0]*(-1)] + temp[1:]]
temp = [x for x in bone.tail_local]
tail_interval = [(x-threshold, x+threshold) for x in [temp[0]*(-1)] + temp[1:]]
for b in arm.bones:
if b == bone:
continue
for value, limits in zip(b.head_local, head_interval):
if not limits[0] <= value <= limits[1]:
break
else:
for value, limits in zip(b.tail_local, tail_interval):
if not limits[0] <= value <= limits[1]:
break
else:
b.name = bone.name
b.select = True
b.select_head = True
b.select_tail = True
break
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.armature.flip_names(do_strip_numbers=True)
return {'FINISHED'}
class ExtendBone(bpy.types.Operator):
bl_idname = "armature.extend_bone"
bl_label = "Extend Bone"
bl_description = "Stretch new bone in the direction of selected bone"
bl_options = {'REGISTER', 'UNDO'}
length : FloatProperty(name="Length", default=1.0, min=-10, max=10, soft_min=-10, soft_max=10, step=10, precision=3)
is_parent : BoolProperty(name="Set Original as Parent", default=True)
is_connect : BoolProperty(name="Connected", default=True)
@classmethod
def poll(cls, context):
ob = context.active_object
if ob:
if ob.type == 'ARMATURE':
if 'selected_bones' in dir(context):
if context.selected_bones:
if 1 <= len(context.selected_bones):
return True
return False
def execute(self, context):
ob = context.active_object
arm = ob.data
for bone in context.selected_bones[:]:
new_bone = arm.edit_bones.new(bone.name)
new_bone.head = bone.tail[:]
rot = bone.matrix.to_quaternion()
tail = mathutils.Vector((0, 1, 0)) * self.length
tail.rotate(rot)
new_bone.tail = bone.tail + tail
new_bone.roll = bone.roll
if self.is_parent:
new_bone.parent = bone
if self.is_connect:
new_bone.use_connect = True
bone.select = False
bone.select_head = False
if bone.use_connect:
bone.parent.select_tail = False
if self.is_connect:
bone.select_tail = True
new_bone.select = True
new_bone.select_head = True
new_bone.select_tail = True
return {'FINISHED'}
################
# クラスの登録 #
################
classes = [
CreateMirror,
RenameBoneRegularExpression,
RenameOppositeBone,
ExtendBone
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
################
# メニュー追加 #
################
# メニューのオン/オフの判定
def IsMenuEnable(self_id):
for id in bpy.context.preferences.addons[__name__.partition('.')[0]].preferences.disabled_menu.split(','):
if (id == self_id):
return False
else:
return True
# メニューを登録する関数
def menu(self, context):
if (IsMenuEnable(__name__.split('.')[-1])):
self.layout.separator()
self.layout.operator(ExtendBone.bl_idname, icon='PLUGIN')
self.layout.separator()
self.layout.prop(context.object.data, 'use_mirror_x', icon='PLUGIN')
self.layout.operator(CreateMirror.bl_idname, icon='PLUGIN')
self.layout.operator(RenameOppositeBone.bl_idname, icon='PLUGIN')
self.layout.separator()
self.layout.operator('object.copy_bone_name', icon='PLUGIN')#BONE_PT_context_bone で定義
self.layout.operator(RenameBoneRegularExpression.bl_idname, icon='PLUGIN')
if (context.preferences.addons[__name__.partition('.')[0]].preferences.use_disabled_menu):
self.layout.separator()
self.layout.operator('wm.toggle_menu_enable', icon='CANCEL').id = __name__.split('.')[-1] | if self.use_rename:
name_head = f"{self.new_name}{self.name_sep}"
if self.start_from == 'NO': | random_line_split |
VIEW3D_MT_armature_context_menu.py | # 「3Dビュー」エリア > アーマチュアの「編集」モード > 「アーマチュアコンテクストメニュー」 (Wキー)
# "3D View" Area > "Edit" Mode with Armature > "Armature Context Menu" (W Key)
import bpy, mathutils
import re
from bpy.props import *
SUFFIX_TPL = (".R",".L",".r",".l","_R","_L","_r","_l",".right",".left",".Right",".Left","_right","_left","_Right","_Left")
################
# オペレーター #
################
class CreateMirror(bpy.types.Operator):
bl_idname = "armature.create_mirror"
bl_label = "Add Suffix + Symmetrize"
bl_description = "Add left-right suffix to selected bones' names, and make their copies at the symmetric positions"
bl_options = {'REGISTER', 'UNDO'}
is_connect : BoolProperty(name="Copy 'Connected'", default=True)
use_autoname : BoolProperty(name="Add Suffix", default=True)
use_rename : BoolProperty(name="Rename", default=False)
new_name : StringProperty(name="New Name", default="Bone")
name_sep : EnumProperty(name="Numbering Expression",items=[
(".",".00X","",1),("_","_00X","",2),("-","-00X","",3)])
start_from : EnumProperty(name="Numbering Starts from",items=[
("NO","No number","",1),("ZERO","000","",2),("ONE","001","",3)])
@classmethod
def poll(cls, context):
ob = context.active_object
if ob:
if ob.type == 'ARMATURE':
if ob.mode == "EDIT":
return True
return False
def draw(self, context):
for p in ['is_connect', 'use_autoname', 'use_rename']:
row = self.layout.row()
row.use_property_split = True
row.prop(self, p)
box = self.layout.box()
if self.use_rename:
row = box.row(align=True)
row.label(text="New Name")
row.prop(self, 'new_name', text="")
row.prop(self, 'name_sep', text="")
row = box.row(align=True)
row.label(text="Numbering Starts from")
row.prop(self, 'start_from', text="")
def execute(self, context):
obj = context.active_object
if self.use_rename:
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.mode_set(mode='EDIT')# 直前に行った名前変更が obj.data.bones に反映されていない場合への対処
preCursorCo = context.scene.cursor.location[:]
prePivotPoint = context.scene.tool_settings.transform_pivot_point
preUseMirror = context.object.data.use_mirror_x
context.scene.cursor.location = context.object.location
context.scene.tool_settings.transform_pivot_point = 'CURSOR'
context.object.data.use_mirror_x = True
selectedBones = context.selected_bones[:]
if self.use_rename:
name_head = f"{self.new_name}{self.name_sep}"
if self.start_from == 'NO':
new_names = [self.new_name] + [f"{name_head}{num+1:03}" for num in range(len(selectedBones)-1)]
elif self.start_from == 'ZERO':
new_names = [f"{name_head}{num:03}" for num in range(len(selectedBones))]
elif self.start_from == 'ONE':
new_names = [f"{name_head}{num+1:03}" for num in range(len(selectedBones))]
for b, nam in zip(selectedBones, new_names):
try:
existed_bone = obj.data.bones[nam]
existed_bone.name = "temp"
b.name = nam
existed_bone.name = nam
except KeyError:
b.name = nam
if self.use_autoname:
for b in selectedBones:
if b.name.endswith(SUFFIX_TPL):
b.select = False
bpy.ops.armature.autoside_names(type='XAXIS')
for b in selectedBones:
b.select = True
bpy.ops.armature.duplicate(do_flip_names=True)
axis = (True, False, False)
bpy.ops.transform.mirror(constraint_axis=axis)
newBones = set(context.selected_bones) - set(selectedBones)
newBones = sorted(list(newBones), key=lambda x: x.name)
selectedBones = sorted(selectedBones, key=lambda x: x.name)
for orig, copy in zip(selectedBones, newBones):
bpy.ops.armature.select_all(action='DESELECT')
orig.select = orig.select_head = orig.select_tail = True
bpy.ops.transform.transform(mode='BONE_ROLL', value=(0, 0, 0, 0))
bpy.ops.armature.select_all(action='DESELECT')
copy.select = copy.select_head = copy.select_tail = True
if self.is_connect:
copy.use_connect = orig.use_connect
for b in newBones:
b.select = b.select_head = b.select_tail = True
context.scene.cursor.location = preCursorCo[:]
context.scene.tool_settings.transform_pivot_point = prePivotPoint
context.object.data.use_mirror_x = preUseMirror
return {'FINISHED'}
class RenameBoneRegularExpression(bpy.types.Operator):
bl_idname = "armature.rename_bone_regular_expression"
bl_label = "Rename Bones by Regular Expression"
bl_description = "Replace selected bones' names by using regular expression"
bl_options = {'REGISTER', 'UNDO'}
isAll : BoolProperty(name="Apply to All Bones", default=False)
pattern : StringProperty(name="Target text", default="^")
repl : StringProperty(name="New Text", default="")
@classmethod
def poll(cls, context):
if context.active_object and context.active_object.type == 'ARMATURE':
return True
return False
def execute(self, context):
obj = context.active_object
bones = context.selected_bones
if not bones:
bones = [b.bone for b in context.selected_pose_bones]
if (self.isAll):
bones = obj.data.bones
for bone in bones:
try:
new_name = re.sub(self.pattern, self.repl, bone.name)
except:
continue
bone.name = new_name
return {'FINISHED'}
class RenameOppositeBone(bpy.types.Operator):
bl_idname = "armature.rename_opposite_bone"
bl_label = "Manipulate Symmetric Bones' Names"
bl_description = "Change names of selected bones and its opposite-side ones in a specific way"
bl_options = {'REGISTER', 'UNDO'}
threshold : FloatProperty(name="Position of Threshold", default=0.00001, min=0, soft_min=0, step=0.001, precision=5)
use_rename : EnumProperty(name="Manipulate",items=[
("False","Add Suffix","",1),("True","Rename + Add Suffix","",2)])
order : EnumProperty(name="Bones' Order",items=[
("DEFAULT","Default","",1),("NAME","Sort by original name","",2)])
use_root : BoolProperty(name="Use Root Bones' Names as New Names", default=False)
new_name : StringProperty(name="New Name", default="Bone")
name_sep : EnumProperty(name="Numbering Expression",items=[
(".",".00X","",1),("_","_00X","",2),("-","-00X","",3)])
start_from : EnumProperty(name="Numbering Starts from",items=[
("NO","No number","",1),("ZERO","000","",2),("ONE","001","",3)])
@classmethod
def poll(cls, context):
ob = context.active_object
if ob:
if ob.type == 'ARMATURE':
if ob.mode == "EDIT":
return True
return False
def draw(self, context):
row = self.layout.row()
row.use_property_split = True
row.prop(self, 'threshold')
self.layout.prop(self, 'use_rename', expand=True)
box = self.layout.box()
if self.use_rename == 'True':
row = box.row(align=True)
row.label(text="Use Root Bones' Names as New Names")
row.prop(self, 'use_root', text="")
row = box.split(factor=0.4, align=True)
row.label(text="Bones' Order")
row.prop(self, 'order', text="")
row.enabled = not self.use_root
sp = box.split(factor=0.66, align=True)
sp_row = sp.row(align=True)
sp_row.label(text="New Name")
sp_row.prop(self, 'new_name', text="")
sp_row.enabled = not self.use_root
sp.prop(self, 'name_sep', text="")
row = box.row(align=True)
row.label(text="Numbering Starts from")
row.prop(self, 'start_from', text="")
def execute(self, context):
obj = context.active_object
arm = obj.data
selectedBones = context.selected_bones[:]
if self.use_rename:
if self.use_root:
roots = [b for b in selectedBones if (not b.parent) or (b.parent not in selectedBones)]
root_names=[]
for ro in roots:
pre_name = ro.name
bpy.ops.armature.select_all(action='DESELECT')
ro.select = True
bpy.ops.armature.flip_names(do_strip_numbers=True)
for idx,(x,y) in enumerate(zip(pre_name, ro.name)):
if not x == y:
if pre_name[:idx][-1] in ['.','_','-']:
root_names.append(pre_name[:idx-1])
else:
root_names.append(pre_name[:idx])
ro.name = pre_name
break
else:
root_names.append(pre_name)
target_bones = []
for b in roots:
b_chain = [b] + b.children_recursive
target_bones.append([b for b in b_chain if b in selectedBones])
else:
root_names = [self.new_name]
if self.order == 'DEFAULT':
target_bones = [selectedBones]
elif self.order == 'NAME':
target_bones = [sorted(selectedBones, key=lambda x:x.name)]
for root_name, bone_list in zip(root_names, target_bones):
name_head = f"{root_name}{self.name_sep}"
if self.start_from == 'NO':
new_names = [root_name] + [f"{name_head}{num+1:03}" for num in range(len(selectedBones)-1)]
elif self.start_from == 'ZERO':
new_names = [f"{name_head}{num:03}" for num in range(len(selectedBones))]
elif self.start_from == 'ONE':
new_names = [f"{name_head}{num+1:03}" for num in range(len(selectedBones))]
for b, nam in zip(bone_list, new_names):
try:
existed_bone = obj.data.bones[nam]
existed_bone.name = "temp"
b.name = nam
existed_bone.name = nam
except KeyError:
b.name = nam
bpy.ops.armature.select_all(action='DESELECT')
for b in selectedBones:
if not b.name.endswith(SUFFIX_TPL):
b.select = True
bpy.ops.armature.autoside_names(type='XAXIS')
bpy.ops.armature.select_all(action='DESELE | ld = self.threshold
for bone in selectedBones:
bone = arm.bones[bone.name]
temp = [x for x in bone.head_local]
head_interval = [(x-threshold, x+threshold) for x in [temp[0]*(-1)] + temp[1:]]
temp = [x for x in bone.tail_local]
tail_interval = [(x-threshold, x+threshold) for x in [temp[0]*(-1)] + temp[1:]]
for b in arm.bones:
if b == bone:
continue
for value, limits in zip(b.head_local, head_interval):
if not limits[0] <= value <= limits[1]:
break
else:
for value, limits in zip(b.tail_local, tail_interval):
if not limits[0] <= value <= limits[1]:
break
else:
b.name = bone.name
b.select = True
b.select_head = True
b.select_tail = True
break
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.armature.flip_names(do_strip_numbers=True)
return {'FINISHED'}
class ExtendBone(bpy.types.Operator):
bl_idname = "armature.extend_bone"
bl_label = "Extend Bone"
bl_description = "Stretch new bone in the direction of selected bone"
bl_options = {'REGISTER', 'UNDO'}
length : FloatProperty(name="Length", default=1.0, min=-10, max=10, soft_min=-10, soft_max=10, step=10, precision=3)
is_parent : BoolProperty(name="Set Original as Parent", default=True)
is_connect : BoolProperty(name="Connected", default=True)
@classmethod
def poll(cls, context):
ob = context.active_object
if ob:
if ob.type == 'ARMATURE':
if 'selected_bones' in dir(context):
if context.selected_bones:
if 1 <= len(context.selected_bones):
return True
return False
def execute(self, context):
ob = context.active_object
arm = ob.data
for bone in context.selected_bones[:]:
new_bone = arm.edit_bones.new(bone.name)
new_bone.head = bone.tail[:]
rot = bone.matrix.to_quaternion()
tail = mathutils.Vector((0, 1, 0)) * self.length
tail.rotate(rot)
new_bone.tail = bone.tail + tail
new_bone.roll = bone.roll
if self.is_parent:
new_bone.parent = bone
if self.is_connect:
new_bone.use_connect = True
bone.select = False
bone.select_head = False
if bone.use_connect:
bone.parent.select_tail = False
if self.is_connect:
bone.select_tail = True
new_bone.select = True
new_bone.select_head = True
new_bone.select_tail = True
return {'FINISHED'}
################
# クラスの登録 #
################
classes = [
CreateMirror,
RenameBoneRegularExpression,
RenameOppositeBone,
ExtendBone
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
################
# メニュー追加 #
################
# メニューのオン/オフの判定
def IsMenuEnable(self_id):
for id in bpy.context.preferences.addons[__name__.partition('.')[0]].preferences.disabled_menu.split(','):
if (id == self_id):
return False
else:
return True
# メニューを登録する関数
def menu(self, context):
if (IsMenuEnable(__name__.split('.')[-1])):
self.layout.separator()
self.layout.operator(ExtendBone.bl_idname, icon='PLUGIN')
self.layout.separator()
self.layout.prop(context.object.data, 'use_mirror_x', icon='PLUGIN')
self.layout.operator(CreateMirror.bl_idname, icon='PLUGIN')
self.layout.operator(RenameOppositeBone.bl_idname, icon='PLUGIN')
self.layout.separator()
self.layout.operator('object.copy_bone_name', icon='PLUGIN')#BONE_PT_context_bone で定義
self.layout.operator(RenameBoneRegularExpression.bl_idname, icon='PLUGIN')
if (context.preferences.addons[__name__.partition('.')[0]].preferences.use_disabled_menu):
self.layout.separator()
self.layout.operator('wm.toggle_menu_enable', icon='CANCEL').id = __name__.split('.')[-1]
| CT')
bpy.ops.object.mode_set(mode='OBJECT')
thresho | conditional_block |
handler.rs | // Copyright 2019 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::protocol;
use futures::prelude::*;
use libp2p_core::ProtocolsHandlerEvent;
use libp2p_core::protocols_handler::{
KeepAlive,
SubstreamProtocol,
ProtocolsHandler,
ProtocolsHandlerUpgrErr,
};
use std::{error::Error, io, fmt, num::NonZeroU32, time::Duration};
use std::collections::VecDeque;
use tokio_io::{AsyncRead, AsyncWrite};
use wasm_timer::{Delay, Instant};
use void::Void;
/// The configuration for outbound pings.
#[derive(Clone, Debug)]
pub struct PingConfig {
/// The timeout of an outbound ping.
timeout: Duration,
/// The duration between the last successful outbound or inbound ping
/// and the next outbound ping.
interval: Duration,
/// The maximum number of failed outbound pings before the associated
/// connection is deemed unhealthy, indicating to the `Swarm` that it
/// should be closed.
max_failures: NonZeroU32,
/// Whether the connection should generally be kept alive unless
/// `max_failures` occur.
keep_alive: bool,
}
impl PingConfig {
/// Creates a new `PingConfig` with the following default settings:
///
/// * [`PingConfig::with_interval`] 15s
/// * [`PingConfig::with_timeout`] 20s
/// * [`PingConfig::with_max_failures`] 1
/// * [`PingConfig::with_keep_alive`] false
///
/// These settings have the following effect:
///
/// * A ping is sent every 15 seconds on a healthy connection.
/// * Every ping sent must yield a response within 20 seconds in order to
/// be successful.
/// * A single ping failure is sufficient for the connection to be subject
/// to being closed.
/// * The connection may be closed at any time as far as the ping protocol
/// is concerned, i.e. the ping protocol itself does not keep the
/// connection alive.
pub fn new() -> Self {
Self {
timeout: Duration::from_secs(20),
interval: Duration::from_secs(15),
max_failures: NonZeroU32::new(1).expect("1 != 0"),
keep_alive: false
}
}
/// Sets the ping timeout.
pub fn with_timeout(mut self, d: Duration) -> Self {
self.timeout = d;
self
}
/// Sets the ping interval.
pub fn with_interval(mut self, d: Duration) -> Self {
self.interval = d;
self
}
/// Sets the maximum number of consecutive ping failures upon which the remote
/// peer is considered unreachable and the connection closed.
pub fn with_max_failures(mut self, n: NonZeroU32) -> Self {
self.max_failures = n;
self
}
/// Sets whether the ping protocol itself should keep the connection alive,
/// apart from the maximum allowed failures.
///
/// By default, the ping protocol itself allows the connection to be closed
/// at any time, i.e. in the absence of ping failures the connection lifetime
/// is determined by other protocol handlers.
///
/// If the maximum number of allowed ping failures is reached, the
/// connection is always terminated as a result of [`PingHandler::poll`]
/// returning an error, regardless of the keep-alive setting.
pub fn with_keep_alive(mut self, b: bool) -> Self {
self.keep_alive = b;
self
}
}
/// The result of an inbound or outbound ping.
pub type PingResult = Result<PingSuccess, PingFailure>;
/// The successful result of processing an inbound or outbound ping.
#[derive(Debug)]
pub enum PingSuccess {
/// Received a ping and sent back a pong.
Pong,
/// Sent a ping and received back a pong.
///
/// Includes the round-trip time.
Ping { rtt: Duration },
}
/// An outbound ping failure.
#[derive(Debug)]
pub enum PingFailure {
/// The ping timed out, i.e. no response was received within the
/// configured ping timeout.
Timeout,
/// The ping failed for reasons other than a timeout.
Other { error: Box<dyn std::error::Error + Send + 'static> }
}
impl fmt::Display for PingFailure {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
PingFailure::Timeout => f.write_str("Ping timeout"),
PingFailure::Other { error } => write!(f, "Ping error: {}", error)
}
}
}
impl Error for PingFailure {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
PingFailure::Timeout => None,
PingFailure::Other { error } => Some(&**error)
}
}
}
/// Protocol handler that handles pinging the remote at a regular period
/// and answering ping queries.
///
/// If the remote doesn't respond, produces an error that closes the connection.
pub struct PingHandler<TSubstream> {
/// Configuration options.
config: PingConfig,
/// The timer for when to send the next ping.
next_ping: Delay,
/// The pending results from inbound or outbound pings, ready
/// to be `poll()`ed.
pending_results: VecDeque<PingResult>,
/// The number of consecutive ping failures that occurred.
failures: u32,
_marker: std::marker::PhantomData<TSubstream>
}
impl<TSubstream> PingHandler<TSubstream> {
/// Builds a new `PingHandler` with the given configuration.
pub fn new(config: PingConfig) -> Self {
PingHandler {
config,
next_ping: Delay::new(Instant::now()),
pending_results: VecDeque::with_capacity(2),
failures: 0,
_marker: std::marker::PhantomData
}
}
}
impl<TSubstream> ProtocolsHandler for PingHandler<TSubstream>
where
TSubstream: AsyncRead + AsyncWrite,
{
type InEvent = Void;
type OutEvent = PingResult;
type Error = PingFailure;
type Substream = TSubstream;
type InboundProtocol = protocol::Ping;
type OutboundProtocol = protocol::Ping;
type OutboundOpenInfo = ();
fn listen_protocol(&self) -> SubstreamProtocol<protocol::Ping> {
SubstreamProtocol::new(protocol::Ping)
}
fn inject_fully_negotiated_inbound(&mut self, _: ()) {
// A ping from a remote peer has been answered.
self.pending_results.push_front(Ok(PingSuccess::Pong));
}
fn inject_fully_negotiated_outbound(&mut self, rtt: Duration, _info: ()) {
// A ping initiated by the local peer was answered by the remote.
self.pending_results.push_front(Ok(PingSuccess::Ping { rtt }));
}
fn inject_event(&mut self, _: Void) {}
fn inject_dial_upgrade_error(&mut self, _info: (), error: ProtocolsHandlerUpgrErr<io::Error>) {
self.pending_results.push_front(
Err(match error {
ProtocolsHandlerUpgrErr::Timeout => PingFailure::Timeout,
e => PingFailure::Other { error: Box::new(e) }
}))
}
fn connection_keep_alive(&self) -> KeepAlive {
if self.config.keep_alive { | KeepAlive::Yes
} else {
KeepAlive::No
}
}
fn poll(&mut self) -> Poll<ProtocolsHandlerEvent<protocol::Ping, (), PingResult>, Self::Error> {
if let Some(result) = self.pending_results.pop_back() {
if let Ok(PingSuccess::Ping { .. }) = result {
let next_ping = Instant::now() + self.config.interval;
self.failures = 0;
self.next_ping.reset(next_ping);
}
if let Err(e) = result {
self.failures += 1;
if self.failures >= self.config.max_failures.get() {
return Err(e)
} else {
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(Err(e))))
}
}
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(result)))
}
match self.next_ping.poll() {
Ok(Async::Ready(())) => {
self.next_ping.reset(Instant::now() + self.config.timeout);
let protocol = SubstreamProtocol::new(protocol::Ping)
.with_timeout(self.config.timeout);
Ok(Async::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
protocol,
info: (),
}))
},
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(e) => Err(PingFailure::Other { error: Box::new(e) })
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::future;
use quickcheck::*;
use rand::Rng;
use tokio_tcp::TcpStream;
use tokio::runtime::current_thread::Runtime;
impl Arbitrary for PingConfig {
fn arbitrary<G: Gen>(g: &mut G) -> PingConfig {
PingConfig::new()
.with_timeout(Duration::from_secs(g.gen_range(0, 3600)))
.with_interval(Duration::from_secs(g.gen_range(0, 3600)))
.with_max_failures(NonZeroU32::new(g.gen_range(1, 100)).unwrap())
}
}
fn tick(h: &mut PingHandler<TcpStream>) -> Result<
ProtocolsHandlerEvent<protocol::Ping, (), PingResult>,
PingFailure
> {
Runtime::new().unwrap().block_on(future::poll_fn(|| h.poll() ))
}
#[test]
fn ping_interval() {
fn prop(cfg: PingConfig, ping_rtt: Duration) -> bool {
let mut h = PingHandler::<TcpStream>::new(cfg);
// The first ping is scheduled "immediately".
let start = h.next_ping.deadline();
assert!(start <= Instant::now());
// Send ping
match tick(&mut h) {
Ok(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: _ }) => {
// The handler must use the configured timeout.
assert_eq!(protocol.timeout(), &h.config.timeout);
// The next ping must be scheduled no earlier than the ping timeout.
assert!(h.next_ping.deadline() >= start + h.config.timeout);
}
e => panic!("Unexpected event: {:?}", e)
}
let now = Instant::now();
// Receive pong
h.inject_fully_negotiated_outbound(ping_rtt, ());
match tick(&mut h) {
Ok(ProtocolsHandlerEvent::Custom(Ok(PingSuccess::Ping { rtt }))) => {
// The handler must report the given RTT.
assert_eq!(rtt, ping_rtt);
// The next ping must be scheduled no earlier than the ping interval.
assert!(now + h.config.interval <= h.next_ping.deadline());
}
e => panic!("Unexpected event: {:?}", e)
}
true
}
quickcheck(prop as fn(_,_) -> _);
}
#[test]
fn max_failures() {
let cfg = PingConfig::arbitrary(&mut StdGen::new(rand::thread_rng(), 100));
let mut h = PingHandler::<TcpStream>::new(cfg);
for _ in 0 .. h.config.max_failures.get() - 1 {
h.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout);
match tick(&mut h) {
Ok(ProtocolsHandlerEvent::Custom(Err(PingFailure::Timeout))) => {}
e => panic!("Unexpected event: {:?}", e)
}
}
h.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout);
match tick(&mut h) {
Err(PingFailure::Timeout) => {
assert_eq!(h.failures, h.config.max_failures.get());
}
e => panic!("Unexpected event: {:?}", e)
}
h.inject_fully_negotiated_outbound(Duration::from_secs(1), ());
match tick(&mut h) {
Ok(ProtocolsHandlerEvent::Custom(Ok(PingSuccess::Ping { .. }))) => {
// A success resets the counter for consecutive failures.
assert_eq!(h.failures, 0);
}
e => panic!("Unexpected event: {:?}", e)
}
}
} | random_line_split | |
handler.rs | // Copyright 2019 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::protocol;
use futures::prelude::*;
use libp2p_core::ProtocolsHandlerEvent;
use libp2p_core::protocols_handler::{
KeepAlive,
SubstreamProtocol,
ProtocolsHandler,
ProtocolsHandlerUpgrErr,
};
use std::{error::Error, io, fmt, num::NonZeroU32, time::Duration};
use std::collections::VecDeque;
use tokio_io::{AsyncRead, AsyncWrite};
use wasm_timer::{Delay, Instant};
use void::Void;
/// The configuration for outbound pings.
#[derive(Clone, Debug)]
pub struct PingConfig {
/// The timeout of an outbound ping.
timeout: Duration,
/// The duration between the last successful outbound or inbound ping
/// and the next outbound ping.
interval: Duration,
/// The maximum number of failed outbound pings before the associated
/// connection is deemed unhealthy, indicating to the `Swarm` that it
/// should be closed.
max_failures: NonZeroU32,
/// Whether the connection should generally be kept alive unless
/// `max_failures` occur.
keep_alive: bool,
}
impl PingConfig {
/// Creates a new `PingConfig` with the following default settings:
///
/// * [`PingConfig::with_interval`] 15s
/// * [`PingConfig::with_timeout`] 20s
/// * [`PingConfig::with_max_failures`] 1
/// * [`PingConfig::with_keep_alive`] false
///
/// These settings have the following effect:
///
/// * A ping is sent every 15 seconds on a healthy connection.
/// * Every ping sent must yield a response within 20 seconds in order to
/// be successful.
/// * A single ping failure is sufficient for the connection to be subject
/// to being closed.
/// * The connection may be closed at any time as far as the ping protocol
/// is concerned, i.e. the ping protocol itself does not keep the
/// connection alive.
pub fn new() -> Self {
Self {
timeout: Duration::from_secs(20),
interval: Duration::from_secs(15),
max_failures: NonZeroU32::new(1).expect("1 != 0"),
keep_alive: false
}
}
/// Sets the ping timeout.
pub fn with_timeout(mut self, d: Duration) -> Self {
self.timeout = d;
self
}
/// Sets the ping interval.
pub fn with_interval(mut self, d: Duration) -> Self {
self.interval = d;
self
}
/// Sets the maximum number of consecutive ping failures upon which the remote
/// peer is considered unreachable and the connection closed.
pub fn with_max_failures(mut self, n: NonZeroU32) -> Self {
self.max_failures = n;
self
}
/// Sets whether the ping protocol itself should keep the connection alive,
/// apart from the maximum allowed failures.
///
/// By default, the ping protocol itself allows the connection to be closed
/// at any time, i.e. in the absence of ping failures the connection lifetime
/// is determined by other protocol handlers.
///
/// If the maximum number of allowed ping failures is reached, the
/// connection is always terminated as a result of [`PingHandler::poll`]
/// returning an error, regardless of the keep-alive setting.
pub fn with_keep_alive(mut self, b: bool) -> Self {
self.keep_alive = b;
self
}
}
/// The result of an inbound or outbound ping.
pub type PingResult = Result<PingSuccess, PingFailure>;
/// The successful result of processing an inbound or outbound ping.
#[derive(Debug)]
pub enum PingSuccess {
/// Received a ping and sent back a pong.
Pong,
/// Sent a ping and received back a pong.
///
/// Includes the round-trip time.
Ping { rtt: Duration },
}
/// An outbound ping failure.
#[derive(Debug)]
pub enum PingFailure {
/// The ping timed out, i.e. no response was received within the
/// configured ping timeout.
Timeout,
/// The ping failed for reasons other than a timeout.
Other { error: Box<dyn std::error::Error + Send + 'static> }
}
impl fmt::Display for PingFailure {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
PingFailure::Timeout => f.write_str("Ping timeout"),
PingFailure::Other { error } => write!(f, "Ping error: {}", error)
}
}
}
impl Error for PingFailure {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
PingFailure::Timeout => None,
PingFailure::Other { error } => Some(&**error)
}
}
}
/// Protocol handler that handles pinging the remote at a regular period
/// and answering ping queries.
///
/// If the remote doesn't respond, produces an error that closes the connection.
pub struct PingHandler<TSubstream> {
/// Configuration options.
config: PingConfig,
/// The timer for when to send the next ping.
next_ping: Delay,
/// The pending results from inbound or outbound pings, ready
/// to be `poll()`ed.
pending_results: VecDeque<PingResult>,
/// The number of consecutive ping failures that occurred.
failures: u32,
_marker: std::marker::PhantomData<TSubstream>
}
impl<TSubstream> PingHandler<TSubstream> {
/// Builds a new `PingHandler` with the given configuration.
pub fn new(config: PingConfig) -> Self {
PingHandler {
config,
next_ping: Delay::new(Instant::now()),
pending_results: VecDeque::with_capacity(2),
failures: 0,
_marker: std::marker::PhantomData
}
}
}
impl<TSubstream> ProtocolsHandler for PingHandler<TSubstream>
where
TSubstream: AsyncRead + AsyncWrite,
{
type InEvent = Void;
type OutEvent = PingResult;
type Error = PingFailure;
type Substream = TSubstream;
type InboundProtocol = protocol::Ping;
type OutboundProtocol = protocol::Ping;
type OutboundOpenInfo = ();
fn listen_protocol(&self) -> SubstreamProtocol<protocol::Ping> {
SubstreamProtocol::new(protocol::Ping)
}
fn inject_fully_negotiated_inbound(&mut self, _: ()) {
// A ping from a remote peer has been answered.
self.pending_results.push_front(Ok(PingSuccess::Pong));
}
fn inject_fully_negotiated_outbound(&mut self, rtt: Duration, _info: ()) {
// A ping initiated by the local peer was answered by the remote.
self.pending_results.push_front(Ok(PingSuccess::Ping { rtt }));
}
fn inject_event(&mut self, _: Void) {}
fn inject_dial_upgrade_error(&mut self, _info: (), error: ProtocolsHandlerUpgrErr<io::Error>) {
self.pending_results.push_front(
Err(match error {
ProtocolsHandlerUpgrErr::Timeout => PingFailure::Timeout,
e => PingFailure::Other { error: Box::new(e) }
}))
}
fn connection_keep_alive(&self) -> KeepAlive {
if self.config.keep_alive {
KeepAlive::Yes
} else {
KeepAlive::No
}
}
fn poll(&mut self) -> Poll<ProtocolsHandlerEvent<protocol::Ping, (), PingResult>, Self::Error> {
if let Some(result) = self.pending_results.pop_back() {
if let Ok(PingSuccess::Ping { .. }) = result {
let next_ping = Instant::now() + self.config.interval;
self.failures = 0;
self.next_ping.reset(next_ping);
}
if let Err(e) = result {
self.failures += 1;
if self.failures >= self.config.max_failures.get() {
return Err(e)
} else {
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(Err(e))))
}
}
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(result)))
}
match self.next_ping.poll() {
Ok(Async::Ready(())) => {
self.next_ping.reset(Instant::now() + self.config.timeout);
let protocol = SubstreamProtocol::new(protocol::Ping)
.with_timeout(self.config.timeout);
Ok(Async::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
protocol,
info: (),
}))
},
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(e) => Err(PingFailure::Other { error: Box::new(e) })
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::future;
use quickcheck::*;
use rand::Rng;
use tokio_tcp::TcpStream;
use tokio::runtime::current_thread::Runtime;
impl Arbitrary for PingConfig {
fn arbitrary<G: Gen>(g: &mut G) -> PingConfig {
PingConfig::new()
.with_timeout(Duration::from_secs(g.gen_range(0, 3600)))
.with_interval(Duration::from_secs(g.gen_range(0, 3600)))
.with_max_failures(NonZeroU32::new(g.gen_range(1, 100)).unwrap())
}
}
fn tick(h: &mut PingHandler<TcpStream>) -> Result<
ProtocolsHandlerEvent<protocol::Ping, (), PingResult>,
PingFailure
> {
Runtime::new().unwrap().block_on(future::poll_fn(|| h.poll() ))
}
#[test]
fn ping_interval() {
fn prop(cfg: PingConfig, ping_rtt: Duration) -> bool {
let mut h = PingHandler::<TcpStream>::new(cfg);
// The first ping is scheduled "immediately".
let start = h.next_ping.deadline();
assert!(start <= Instant::now());
// Send ping
match tick(&mut h) {
Ok(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: _ }) => {
// The handler must use the configured timeout.
assert_eq!(protocol.timeout(), &h.config.timeout);
// The next ping must be scheduled no earlier than the ping timeout.
assert!(h.next_ping.deadline() >= start + h.config.timeout);
}
e => panic!("Unexpected event: {:?}", e)
}
let now = Instant::now();
// Receive pong
h.inject_fully_negotiated_outbound(ping_rtt, ());
match tick(&mut h) {
Ok(ProtocolsHandlerEvent::Custom(Ok(PingSuccess::Ping { rtt }))) => {
// The handler must report the given RTT.
assert_eq!(rtt, ping_rtt);
// The next ping must be scheduled no earlier than the ping interval.
assert!(now + h.config.interval <= h.next_ping.deadline());
}
e => panic!("Unexpected event: {:?}", e)
}
true
}
quickcheck(prop as fn(_,_) -> _);
}
#[test]
fn max_failures() {
let cfg = PingConfig::arbitrary(&mut StdGen::new(rand::thread_rng(), 100));
let mut h = PingHandler::<TcpStream>::new(cfg);
for _ in 0 .. h.config.max_failures.get() - 1 {
h.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout);
match tick(&mut h) {
Ok(ProtocolsHandlerEvent::Custom(Err(PingFailure::Timeout))) => |
e => panic!("Unexpected event: {:?}", e)
}
}
h.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout);
match tick(&mut h) {
Err(PingFailure::Timeout) => {
assert_eq!(h.failures, h.config.max_failures.get());
}
e => panic!("Unexpected event: {:?}", e)
}
h.inject_fully_negotiated_outbound(Duration::from_secs(1), ());
match tick(&mut h) {
Ok(ProtocolsHandlerEvent::Custom(Ok(PingSuccess::Ping { .. }))) => {
// A success resets the counter for consecutive failures.
assert_eq!(h.failures, 0);
}
e => panic!("Unexpected event: {:?}", e)
}
}
}
| {} | conditional_block |
handler.rs | // Copyright 2019 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::protocol;
use futures::prelude::*;
use libp2p_core::ProtocolsHandlerEvent;
use libp2p_core::protocols_handler::{
KeepAlive,
SubstreamProtocol,
ProtocolsHandler,
ProtocolsHandlerUpgrErr,
};
use std::{error::Error, io, fmt, num::NonZeroU32, time::Duration};
use std::collections::VecDeque;
use tokio_io::{AsyncRead, AsyncWrite};
use wasm_timer::{Delay, Instant};
use void::Void;
/// The configuration for outbound pings.
#[derive(Clone, Debug)]
pub struct PingConfig {
/// The timeout of an outbound ping.
timeout: Duration,
/// The duration between the last successful outbound or inbound ping
/// and the next outbound ping.
interval: Duration,
/// The maximum number of failed outbound pings before the associated
/// connection is deemed unhealthy, indicating to the `Swarm` that it
/// should be closed.
max_failures: NonZeroU32,
/// Whether the connection should generally be kept alive unless
/// `max_failures` occur.
keep_alive: bool,
}
impl PingConfig {
/// Creates a new `PingConfig` with the following default settings:
///
/// * [`PingConfig::with_interval`] 15s
/// * [`PingConfig::with_timeout`] 20s
/// * [`PingConfig::with_max_failures`] 1
/// * [`PingConfig::with_keep_alive`] false
///
/// These settings have the following effect:
///
/// * A ping is sent every 15 seconds on a healthy connection.
/// * Every ping sent must yield a response within 20 seconds in order to
/// be successful.
/// * A single ping failure is sufficient for the connection to be subject
/// to being closed.
/// * The connection may be closed at any time as far as the ping protocol
/// is concerned, i.e. the ping protocol itself does not keep the
/// connection alive.
pub fn new() -> Self {
Self {
timeout: Duration::from_secs(20),
interval: Duration::from_secs(15),
max_failures: NonZeroU32::new(1).expect("1 != 0"),
keep_alive: false
}
}
/// Sets the ping timeout.
pub fn with_timeout(mut self, d: Duration) -> Self {
self.timeout = d;
self
}
/// Sets the ping interval.
pub fn with_interval(mut self, d: Duration) -> Self {
self.interval = d;
self
}
/// Sets the maximum number of consecutive ping failures upon which the remote
/// peer is considered unreachable and the connection closed.
pub fn with_max_failures(mut self, n: NonZeroU32) -> Self {
self.max_failures = n;
self
}
/// Sets whether the ping protocol itself should keep the connection alive,
/// apart from the maximum allowed failures.
///
/// By default, the ping protocol itself allows the connection to be closed
/// at any time, i.e. in the absence of ping failures the connection lifetime
/// is determined by other protocol handlers.
///
/// If the maximum number of allowed ping failures is reached, the
/// connection is always terminated as a result of [`PingHandler::poll`]
/// returning an error, regardless of the keep-alive setting.
pub fn with_keep_alive(mut self, b: bool) -> Self {
self.keep_alive = b;
self
}
}
/// The result of an inbound or outbound ping.
pub type PingResult = Result<PingSuccess, PingFailure>;
/// The successful result of processing an inbound or outbound ping.
#[derive(Debug)]
pub enum PingSuccess {
/// Received a ping and sent back a pong.
Pong,
/// Sent a ping and received back a pong.
///
/// Includes the round-trip time.
Ping { rtt: Duration },
}
/// An outbound ping failure.
#[derive(Debug)]
pub enum PingFailure {
/// The ping timed out, i.e. no response was received within the
/// configured ping timeout.
Timeout,
/// The ping failed for reasons other than a timeout.
Other { error: Box<dyn std::error::Error + Send + 'static> }
}
impl fmt::Display for PingFailure {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
PingFailure::Timeout => f.write_str("Ping timeout"),
PingFailure::Other { error } => write!(f, "Ping error: {}", error)
}
}
}
impl Error for PingFailure {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
PingFailure::Timeout => None,
PingFailure::Other { error } => Some(&**error)
}
}
}
/// Protocol handler that handles pinging the remote at a regular period
/// and answering ping queries.
///
/// If the remote doesn't respond, produces an error that closes the connection.
pub struct PingHandler<TSubstream> {
/// Configuration options.
config: PingConfig,
/// The timer for when to send the next ping.
next_ping: Delay,
/// The pending results from inbound or outbound pings, ready
/// to be `poll()`ed.
pending_results: VecDeque<PingResult>,
/// The number of consecutive ping failures that occurred.
failures: u32,
_marker: std::marker::PhantomData<TSubstream>
}
impl<TSubstream> PingHandler<TSubstream> {
/// Builds a new `PingHandler` with the given configuration.
pub fn | (config: PingConfig) -> Self {
PingHandler {
config,
next_ping: Delay::new(Instant::now()),
pending_results: VecDeque::with_capacity(2),
failures: 0,
_marker: std::marker::PhantomData
}
}
}
impl<TSubstream> ProtocolsHandler for PingHandler<TSubstream>
where
TSubstream: AsyncRead + AsyncWrite,
{
type InEvent = Void;
type OutEvent = PingResult;
type Error = PingFailure;
type Substream = TSubstream;
type InboundProtocol = protocol::Ping;
type OutboundProtocol = protocol::Ping;
type OutboundOpenInfo = ();
fn listen_protocol(&self) -> SubstreamProtocol<protocol::Ping> {
SubstreamProtocol::new(protocol::Ping)
}
fn inject_fully_negotiated_inbound(&mut self, _: ()) {
// A ping from a remote peer has been answered.
self.pending_results.push_front(Ok(PingSuccess::Pong));
}
fn inject_fully_negotiated_outbound(&mut self, rtt: Duration, _info: ()) {
// A ping initiated by the local peer was answered by the remote.
self.pending_results.push_front(Ok(PingSuccess::Ping { rtt }));
}
fn inject_event(&mut self, _: Void) {}
fn inject_dial_upgrade_error(&mut self, _info: (), error: ProtocolsHandlerUpgrErr<io::Error>) {
self.pending_results.push_front(
Err(match error {
ProtocolsHandlerUpgrErr::Timeout => PingFailure::Timeout,
e => PingFailure::Other { error: Box::new(e) }
}))
}
fn connection_keep_alive(&self) -> KeepAlive {
if self.config.keep_alive {
KeepAlive::Yes
} else {
KeepAlive::No
}
}
fn poll(&mut self) -> Poll<ProtocolsHandlerEvent<protocol::Ping, (), PingResult>, Self::Error> {
if let Some(result) = self.pending_results.pop_back() {
if let Ok(PingSuccess::Ping { .. }) = result {
let next_ping = Instant::now() + self.config.interval;
self.failures = 0;
self.next_ping.reset(next_ping);
}
if let Err(e) = result {
self.failures += 1;
if self.failures >= self.config.max_failures.get() {
return Err(e)
} else {
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(Err(e))))
}
}
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(result)))
}
match self.next_ping.poll() {
Ok(Async::Ready(())) => {
self.next_ping.reset(Instant::now() + self.config.timeout);
let protocol = SubstreamProtocol::new(protocol::Ping)
.with_timeout(self.config.timeout);
Ok(Async::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
protocol,
info: (),
}))
},
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(e) => Err(PingFailure::Other { error: Box::new(e) })
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::future;
use quickcheck::*;
use rand::Rng;
use tokio_tcp::TcpStream;
use tokio::runtime::current_thread::Runtime;
impl Arbitrary for PingConfig {
fn arbitrary<G: Gen>(g: &mut G) -> PingConfig {
PingConfig::new()
.with_timeout(Duration::from_secs(g.gen_range(0, 3600)))
.with_interval(Duration::from_secs(g.gen_range(0, 3600)))
.with_max_failures(NonZeroU32::new(g.gen_range(1, 100)).unwrap())
}
}
fn tick(h: &mut PingHandler<TcpStream>) -> Result<
ProtocolsHandlerEvent<protocol::Ping, (), PingResult>,
PingFailure
> {
Runtime::new().unwrap().block_on(future::poll_fn(|| h.poll() ))
}
#[test]
fn ping_interval() {
fn prop(cfg: PingConfig, ping_rtt: Duration) -> bool {
let mut h = PingHandler::<TcpStream>::new(cfg);
// The first ping is scheduled "immediately".
let start = h.next_ping.deadline();
assert!(start <= Instant::now());
// Send ping
match tick(&mut h) {
Ok(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: _ }) => {
// The handler must use the configured timeout.
assert_eq!(protocol.timeout(), &h.config.timeout);
// The next ping must be scheduled no earlier than the ping timeout.
assert!(h.next_ping.deadline() >= start + h.config.timeout);
}
e => panic!("Unexpected event: {:?}", e)
}
let now = Instant::now();
// Receive pong
h.inject_fully_negotiated_outbound(ping_rtt, ());
match tick(&mut h) {
Ok(ProtocolsHandlerEvent::Custom(Ok(PingSuccess::Ping { rtt }))) => {
// The handler must report the given RTT.
assert_eq!(rtt, ping_rtt);
// The next ping must be scheduled no earlier than the ping interval.
assert!(now + h.config.interval <= h.next_ping.deadline());
}
e => panic!("Unexpected event: {:?}", e)
}
true
}
quickcheck(prop as fn(_,_) -> _);
}
#[test]
fn max_failures() {
let cfg = PingConfig::arbitrary(&mut StdGen::new(rand::thread_rng(), 100));
let mut h = PingHandler::<TcpStream>::new(cfg);
for _ in 0 .. h.config.max_failures.get() - 1 {
h.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout);
match tick(&mut h) {
Ok(ProtocolsHandlerEvent::Custom(Err(PingFailure::Timeout))) => {}
e => panic!("Unexpected event: {:?}", e)
}
}
h.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout);
match tick(&mut h) {
Err(PingFailure::Timeout) => {
assert_eq!(h.failures, h.config.max_failures.get());
}
e => panic!("Unexpected event: {:?}", e)
}
h.inject_fully_negotiated_outbound(Duration::from_secs(1), ());
match tick(&mut h) {
Ok(ProtocolsHandlerEvent::Custom(Ok(PingSuccess::Ping { .. }))) => {
// A success resets the counter for consecutive failures.
assert_eq!(h.failures, 0);
}
e => panic!("Unexpected event: {:?}", e)
}
}
}
| new | identifier_name |
handler.rs | // Copyright 2019 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::protocol;
use futures::prelude::*;
use libp2p_core::ProtocolsHandlerEvent;
use libp2p_core::protocols_handler::{
KeepAlive,
SubstreamProtocol,
ProtocolsHandler,
ProtocolsHandlerUpgrErr,
};
use std::{error::Error, io, fmt, num::NonZeroU32, time::Duration};
use std::collections::VecDeque;
use tokio_io::{AsyncRead, AsyncWrite};
use wasm_timer::{Delay, Instant};
use void::Void;
/// The configuration for outbound pings.
#[derive(Clone, Debug)]
pub struct PingConfig {
/// The timeout of an outbound ping.
timeout: Duration,
/// The duration between the last successful outbound or inbound ping
/// and the next outbound ping.
interval: Duration,
/// The maximum number of failed outbound pings before the associated
/// connection is deemed unhealthy, indicating to the `Swarm` that it
/// should be closed.
max_failures: NonZeroU32,
/// Whether the connection should generally be kept alive unless
/// `max_failures` occur.
keep_alive: bool,
}
impl PingConfig {
/// Creates a new `PingConfig` with the following default settings:
///
/// * [`PingConfig::with_interval`] 15s
/// * [`PingConfig::with_timeout`] 20s
/// * [`PingConfig::with_max_failures`] 1
/// * [`PingConfig::with_keep_alive`] false
///
/// These settings have the following effect:
///
/// * A ping is sent every 15 seconds on a healthy connection.
/// * Every ping sent must yield a response within 20 seconds in order to
/// be successful.
/// * A single ping failure is sufficient for the connection to be subject
/// to being closed.
/// * The connection may be closed at any time as far as the ping protocol
/// is concerned, i.e. the ping protocol itself does not keep the
/// connection alive.
pub fn new() -> Self {
Self {
timeout: Duration::from_secs(20),
interval: Duration::from_secs(15),
max_failures: NonZeroU32::new(1).expect("1 != 0"),
keep_alive: false
}
}
/// Sets the ping timeout.
pub fn with_timeout(mut self, d: Duration) -> Self {
self.timeout = d;
self
}
/// Sets the ping interval.
pub fn with_interval(mut self, d: Duration) -> Self {
self.interval = d;
self
}
/// Sets the maximum number of consecutive ping failures upon which the remote
/// peer is considered unreachable and the connection closed.
pub fn with_max_failures(mut self, n: NonZeroU32) -> Self {
self.max_failures = n;
self
}
/// Sets whether the ping protocol itself should keep the connection alive,
/// apart from the maximum allowed failures.
///
/// By default, the ping protocol itself allows the connection to be closed
/// at any time, i.e. in the absence of ping failures the connection lifetime
/// is determined by other protocol handlers.
///
/// If the maximum number of allowed ping failures is reached, the
/// connection is always terminated as a result of [`PingHandler::poll`]
/// returning an error, regardless of the keep-alive setting.
pub fn with_keep_alive(mut self, b: bool) -> Self {
self.keep_alive = b;
self
}
}
/// The result of an inbound or outbound ping.
pub type PingResult = Result<PingSuccess, PingFailure>;
/// The successful result of processing an inbound or outbound ping.
#[derive(Debug)]
pub enum PingSuccess {
/// Received a ping and sent back a pong.
Pong,
/// Sent a ping and received back a pong.
///
/// Includes the round-trip time.
Ping { rtt: Duration },
}
/// An outbound ping failure.
#[derive(Debug)]
pub enum PingFailure {
/// The ping timed out, i.e. no response was received within the
/// configured ping timeout.
Timeout,
/// The ping failed for reasons other than a timeout.
Other { error: Box<dyn std::error::Error + Send + 'static> }
}
impl fmt::Display for PingFailure {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
PingFailure::Timeout => f.write_str("Ping timeout"),
PingFailure::Other { error } => write!(f, "Ping error: {}", error)
}
}
}
impl Error for PingFailure {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
PingFailure::Timeout => None,
PingFailure::Other { error } => Some(&**error)
}
}
}
/// Protocol handler that handles pinging the remote at a regular period
/// and answering ping queries.
///
/// If the remote doesn't respond, produces an error that closes the connection.
pub struct PingHandler<TSubstream> {
/// Configuration options.
config: PingConfig,
/// The timer for when to send the next ping.
next_ping: Delay,
/// The pending results from inbound or outbound pings, ready
/// to be `poll()`ed.
pending_results: VecDeque<PingResult>,
/// The number of consecutive ping failures that occurred.
failures: u32,
_marker: std::marker::PhantomData<TSubstream>
}
impl<TSubstream> PingHandler<TSubstream> {
/// Builds a new `PingHandler` with the given configuration.
pub fn new(config: PingConfig) -> Self {
PingHandler {
config,
next_ping: Delay::new(Instant::now()),
pending_results: VecDeque::with_capacity(2),
failures: 0,
_marker: std::marker::PhantomData
}
}
}
impl<TSubstream> ProtocolsHandler for PingHandler<TSubstream>
where
TSubstream: AsyncRead + AsyncWrite,
{
type InEvent = Void;
type OutEvent = PingResult;
type Error = PingFailure;
type Substream = TSubstream;
type InboundProtocol = protocol::Ping;
type OutboundProtocol = protocol::Ping;
type OutboundOpenInfo = ();
fn listen_protocol(&self) -> SubstreamProtocol<protocol::Ping> {
SubstreamProtocol::new(protocol::Ping)
}
fn inject_fully_negotiated_inbound(&mut self, _: ()) |
fn inject_fully_negotiated_outbound(&mut self, rtt: Duration, _info: ()) {
// A ping initiated by the local peer was answered by the remote.
self.pending_results.push_front(Ok(PingSuccess::Ping { rtt }));
}
fn inject_event(&mut self, _: Void) {}
fn inject_dial_upgrade_error(&mut self, _info: (), error: ProtocolsHandlerUpgrErr<io::Error>) {
self.pending_results.push_front(
Err(match error {
ProtocolsHandlerUpgrErr::Timeout => PingFailure::Timeout,
e => PingFailure::Other { error: Box::new(e) }
}))
}
fn connection_keep_alive(&self) -> KeepAlive {
if self.config.keep_alive {
KeepAlive::Yes
} else {
KeepAlive::No
}
}
fn poll(&mut self) -> Poll<ProtocolsHandlerEvent<protocol::Ping, (), PingResult>, Self::Error> {
if let Some(result) = self.pending_results.pop_back() {
if let Ok(PingSuccess::Ping { .. }) = result {
let next_ping = Instant::now() + self.config.interval;
self.failures = 0;
self.next_ping.reset(next_ping);
}
if let Err(e) = result {
self.failures += 1;
if self.failures >= self.config.max_failures.get() {
return Err(e)
} else {
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(Err(e))))
}
}
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(result)))
}
match self.next_ping.poll() {
Ok(Async::Ready(())) => {
self.next_ping.reset(Instant::now() + self.config.timeout);
let protocol = SubstreamProtocol::new(protocol::Ping)
.with_timeout(self.config.timeout);
Ok(Async::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
protocol,
info: (),
}))
},
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(e) => Err(PingFailure::Other { error: Box::new(e) })
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::future;
use quickcheck::*;
use rand::Rng;
use tokio_tcp::TcpStream;
use tokio::runtime::current_thread::Runtime;
impl Arbitrary for PingConfig {
fn arbitrary<G: Gen>(g: &mut G) -> PingConfig {
PingConfig::new()
.with_timeout(Duration::from_secs(g.gen_range(0, 3600)))
.with_interval(Duration::from_secs(g.gen_range(0, 3600)))
.with_max_failures(NonZeroU32::new(g.gen_range(1, 100)).unwrap())
}
}
fn tick(h: &mut PingHandler<TcpStream>) -> Result<
ProtocolsHandlerEvent<protocol::Ping, (), PingResult>,
PingFailure
> {
Runtime::new().unwrap().block_on(future::poll_fn(|| h.poll() ))
}
#[test]
fn ping_interval() {
fn prop(cfg: PingConfig, ping_rtt: Duration) -> bool {
let mut h = PingHandler::<TcpStream>::new(cfg);
// The first ping is scheduled "immediately".
let start = h.next_ping.deadline();
assert!(start <= Instant::now());
// Send ping
match tick(&mut h) {
Ok(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: _ }) => {
// The handler must use the configured timeout.
assert_eq!(protocol.timeout(), &h.config.timeout);
// The next ping must be scheduled no earlier than the ping timeout.
assert!(h.next_ping.deadline() >= start + h.config.timeout);
}
e => panic!("Unexpected event: {:?}", e)
}
let now = Instant::now();
// Receive pong
h.inject_fully_negotiated_outbound(ping_rtt, ());
match tick(&mut h) {
Ok(ProtocolsHandlerEvent::Custom(Ok(PingSuccess::Ping { rtt }))) => {
// The handler must report the given RTT.
assert_eq!(rtt, ping_rtt);
// The next ping must be scheduled no earlier than the ping interval.
assert!(now + h.config.interval <= h.next_ping.deadline());
}
e => panic!("Unexpected event: {:?}", e)
}
true
}
quickcheck(prop as fn(_,_) -> _);
}
#[test]
fn max_failures() {
let cfg = PingConfig::arbitrary(&mut StdGen::new(rand::thread_rng(), 100));
let mut h = PingHandler::<TcpStream>::new(cfg);
for _ in 0 .. h.config.max_failures.get() - 1 {
h.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout);
match tick(&mut h) {
Ok(ProtocolsHandlerEvent::Custom(Err(PingFailure::Timeout))) => {}
e => panic!("Unexpected event: {:?}", e)
}
}
h.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout);
match tick(&mut h) {
Err(PingFailure::Timeout) => {
assert_eq!(h.failures, h.config.max_failures.get());
}
e => panic!("Unexpected event: {:?}", e)
}
h.inject_fully_negotiated_outbound(Duration::from_secs(1), ());
match tick(&mut h) {
Ok(ProtocolsHandlerEvent::Custom(Ok(PingSuccess::Ping { .. }))) => {
// A success resets the counter for consecutive failures.
assert_eq!(h.failures, 0);
}
e => panic!("Unexpected event: {:?}", e)
}
}
}
| {
// A ping from a remote peer has been answered.
self.pending_results.push_front(Ok(PingSuccess::Pong));
} | identifier_body |
dict.rs | use bitflags::bitflags;
use std::{ffi::CStr, fmt, marker::PhantomData};
pub trait ReadableDict {
/// Obtain the pointer to the raw `spa_dict` struct.
fn get_dict_ptr(&self) -> *const spa_sys::spa_dict;
/// An iterator over all raw key-value pairs.
/// The iterator element type is `(&CStr, &CStr)`.
fn iter_cstr(&self) -> CIter {
let first_elem_ptr = unsafe { (*self.get_dict_ptr()).items };
CIter {
next: first_elem_ptr,
end: unsafe { first_elem_ptr.offset((*self.get_dict_ptr()).n_items as isize) },
_phantom: PhantomData,
}
}
/// An iterator over all key-value pairs that are valid utf-8.
/// The iterator element type is `(&str, &str)`.
fn iter(&self) -> Iter {
Iter {
inner: self.iter_cstr(),
}
}
/// An iterator over all keys that are valid utf-8.
/// The iterator element type is &str.
fn keys(&self) -> Keys {
Keys {
inner: self.iter_cstr(),
}
}
/// An iterator over all values that are valid utf-8.
/// The iterator element type is &str.
fn values(&self) -> Values {
Values {
inner: self.iter_cstr(),
}
}
/// Returns the number of key-value-pairs in the dict.
/// This is the number of all pairs, not only pairs that are valid-utf8.
fn len(&self) -> usize {
unsafe { (*self.get_dict_ptr()).n_items as usize }
}
/// Returns `true` if the dict is empty, `false` if it is not.
fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the bitflags that are set for the dict.
fn flags(&self) -> Flags {
Flags::from_bits_truncate(unsafe { (*self.get_dict_ptr()).flags })
}
/// Get the value associated with the provided key.
///
/// If the dict does not contain the key or the value is non-utf8, `None` is returned.
/// Use [`iter_cstr`] if you need a non-utf8 key or value.
///
/// [`iter_cstr`]: #method.iter_cstr
// FIXME: Some items might be integers, booleans, floats, doubles or pointers instead of strings.
// Perhaps we should return an enum that can be any of these values.
// See https://gitlab.freedesktop.org/pipewire/pipewire-rs/-/merge_requests/12#note_695914.
fn get(&self, key: &str) -> Option<&str> {
self.iter().find(|(k, _)| *k == key).map(|(_, v)| v)
}
}
pub trait WritableDict {
/// Insert the key-value pair, overwriting any old value.
fn insert<T: Into<Vec<u8>>>(&mut self, key: T, value: T);
/// Remove the key-value pair if it exists.
fn remove<T: Into<Vec<u8>>>(&mut self, key: T);
/// Clear the object, removing all key-value pairs.
fn clear(&mut self);
}
/// A wrapper for a `*const spa_dict` struct that does not take ownership of the data,
/// useful for dicts shared to us via FFI.
pub struct ForeignDict(*const spa_sys::spa_dict);
impl ForeignDict {
/// Wraps the provided pointer in a read-only `ForeignDict` struct without taking ownership of the struct pointed to.
///
/// # Safety
///
/// - The provided pointer must point to a valid, well-aligned `spa_dict` struct, and must not be `NULL`.
/// - The struct pointed to must be kept valid for the entire lifetime of the created `Dict`.
///
/// Violating any of these rules will result in undefined behaviour.
pub unsafe fn from_ptr(dict: *const spa_sys::spa_dict) -> Self {
debug_assert!(
!dict.is_null(),
"Dict must not be created from a pointer that is NULL"
);
Self(dict)
}
}
impl ReadableDict for ForeignDict {
fn get_dict_ptr(&self) -> *const spa_sys::spa_dict {
self.0
}
}
impl fmt::Debug for ForeignDict {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// FIXME: Find a way to display flags too.
f.debug_map().entries(self.iter_cstr()).finish()
}
}
bitflags! {
pub struct Flags: u32 {
// These flags are redefinitions from
// https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/master/spa/include/spa/utils/dict.h
const SORTED = spa_sys::SPA_DICT_FLAG_SORTED;
}
}
pub struct CIter<'a> {
next: *const spa_sys::spa_dict_item,
/// Points to the first element outside of the allocated area.
end: *const spa_sys::spa_dict_item,
_phantom: PhantomData<&'a str>,
}
impl<'a> Iterator for CIter<'a> {
type Item = (&'a CStr, &'a CStr);
fn next(&mut self) -> Option<Self::Item> {
if !self.next.is_null() && self.next < self.end {
let k = unsafe { CStr::from_ptr((*self.next).key) };
let v = unsafe { CStr::from_ptr((*self.next).value) };
self.next = unsafe { self.next.add(1) };
Some((k, v))
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let bound: usize = unsafe { self.next.offset_from(self.end) as usize };
// We know the exact value, so lower bound and upper bound are the same.
(bound, Some(bound))
}
}
pub struct Iter<'a> {
inner: CIter<'a>,
}
impl<'a> Iterator for Iter<'a> {
type Item = (&'a str, &'a str);
fn next(&mut self) -> Option<Self::Item> {
self.inner
.find_map(|(k, v)| k.to_str().ok().zip(v.to_str().ok()))
}
fn size_hint(&self) -> (usize, Option<usize>) {
// Lower bound is 0, as all keys left might not be valid UTF-8.
(0, self.inner.size_hint().1)
}
}
pub struct Keys<'a> {
inner: CIter<'a>,
}
impl<'a> Iterator for Keys<'a> {
type Item = &'a str;
fn next(&mut self) -> Option<Self::Item> {
self.inner.find_map(|(k, _)| k.to_str().ok())
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
pub struct Values<'a> {
inner: CIter<'a>,
}
impl<'a> Iterator for Values<'a> {
type Item = &'a str;
fn next(&mut self) -> Option<Self::Item> {
self.inner.find_map(|(_, v)| v.to_str().ok())
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[cfg(test)]
mod tests {
use super::{Flags, ForeignDict, ReadableDict};
use spa_sys::{spa_dict, spa_dict_item};
use std::{ffi::CString, ptr};
/// Create a raw dict with the specified number of key-value pairs.
///
/// `num_items` must not be zero, or this function will panic.
///
/// Each key value pair is `("K<n>", "V<n>")`, with *\<n\>* being an element of the range `0..num_items`.
///
/// The function returns a tuple consisting of:
/// 1. An allocation (`Vec`) containing the raw Key and Value Strings.
/// 2. An allocation (`Vec`) containing all the items.
/// 3. The created `spa_dict` struct.
///
/// The first two items must be kept alive for the entire lifetime of the returned `spa_dict` struct.
fn make_raw_dict(
num_items: u32,
) -> (
Vec<(CString, CString)>,
Vec<spa_dict_item>,
spa_sys::spa_dict,
) {
assert!(num_items != 0, "num_items must not be zero");
let mut strings: Vec<(CString, CString)> = Vec::with_capacity(num_items as usize);
let mut items: Vec<spa_dict_item> = Vec::with_capacity(num_items as usize);
for i in 0..num_items {
let k = CString::new(format!("K{}", i)).unwrap();
let v = CString::new(format!("V{}", i)).unwrap();
let item = spa_dict_item {
key: k.as_ptr(),
value: v.as_ptr(),
};
strings.push((k, v));
items.push(item);
}
let raw = spa_dict {
flags: Flags::empty().bits,
n_items: num_items,
items: items.as_ptr(),
};
(strings, items, raw)
}
#[test]
fn test_empty_dict() {
let raw = spa_dict {
flags: Flags::empty().bits,
n_items: 0,
items: ptr::null(),
};
let dict = unsafe { ForeignDict::from_ptr(&raw) };
let iter = dict.iter_cstr();
assert_eq!(0, dict.len());
iter.for_each(|_| panic!("Iterated over non-existing item"));
}
#[test]
fn | () {
let (_strings, _items, raw) = make_raw_dict(2);
let dict = unsafe { ForeignDict::from_ptr(&raw) };
let mut iter = dict.iter_cstr();
assert_eq!(
(
CString::new("K0").unwrap().as_c_str(),
CString::new("V0").unwrap().as_c_str()
),
iter.next().unwrap()
);
assert_eq!(
(
CString::new("K1").unwrap().as_c_str(),
CString::new("V1").unwrap().as_c_str()
),
iter.next().unwrap()
);
assert_eq!(None, iter.next());
}
#[test]
fn test_iterators() {
let (_strings, _items, raw) = make_raw_dict(2);
let dict = unsafe { ForeignDict::from_ptr(&raw) };
let mut iter = dict.iter();
assert_eq!(("K0", "V0"), iter.next().unwrap());
assert_eq!(("K1", "V1"), iter.next().unwrap());
assert_eq!(None, iter.next());
let mut key_iter = dict.keys();
assert_eq!("K0", key_iter.next().unwrap());
assert_eq!("K1", key_iter.next().unwrap());
assert_eq!(None, key_iter.next());
let mut val_iter = dict.values();
assert_eq!("V0", val_iter.next().unwrap());
assert_eq!("V1", val_iter.next().unwrap());
assert_eq!(None, val_iter.next());
}
#[test]
fn test_get() {
let (_strings, _items, raw) = make_raw_dict(1);
let dict = unsafe { ForeignDict::from_ptr(&raw) };
assert_eq!(Some("V0"), dict.get("K0"));
}
#[test]
fn test_debug() {
let (_strings, _items, raw) = make_raw_dict(1);
let dict = unsafe { ForeignDict::from_ptr(&raw) };
assert_eq!(r#"{"K0": "V0"}"#, &format!("{:?}", dict))
}
}
| test_iter_cstr | identifier_name |
dict.rs | use bitflags::bitflags;
use std::{ffi::CStr, fmt, marker::PhantomData};
pub trait ReadableDict {
/// Obtain the pointer to the raw `spa_dict` struct.
fn get_dict_ptr(&self) -> *const spa_sys::spa_dict;
/// An iterator over all raw key-value pairs.
/// The iterator element type is `(&CStr, &CStr)`.
fn iter_cstr(&self) -> CIter {
let first_elem_ptr = unsafe { (*self.get_dict_ptr()).items };
CIter {
next: first_elem_ptr,
end: unsafe { first_elem_ptr.offset((*self.get_dict_ptr()).n_items as isize) },
_phantom: PhantomData,
}
}
/// An iterator over all key-value pairs that are valid utf-8.
/// The iterator element type is `(&str, &str)`.
fn iter(&self) -> Iter {
Iter {
inner: self.iter_cstr(),
}
}
/// An iterator over all keys that are valid utf-8.
/// The iterator element type is &str.
fn keys(&self) -> Keys {
Keys {
inner: self.iter_cstr(),
}
}
/// An iterator over all values that are valid utf-8.
/// The iterator element type is &str.
fn values(&self) -> Values {
Values {
inner: self.iter_cstr(),
}
}
/// Returns the number of key-value-pairs in the dict.
/// This is the number of all pairs, not only pairs that are valid-utf8.
fn len(&self) -> usize {
unsafe { (*self.get_dict_ptr()).n_items as usize }
}
/// Returns `true` if the dict is empty, `false` if it is not.
fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the bitflags that are set for the dict.
fn flags(&self) -> Flags {
Flags::from_bits_truncate(unsafe { (*self.get_dict_ptr()).flags })
}
/// Get the value associated with the provided key.
///
/// If the dict does not contain the key or the value is non-utf8, `None` is returned.
/// Use [`iter_cstr`] if you need a non-utf8 key or value.
///
/// [`iter_cstr`]: #method.iter_cstr
// FIXME: Some items might be integers, booleans, floats, doubles or pointers instead of strings.
// Perhaps we should return an enum that can be any of these values.
// See https://gitlab.freedesktop.org/pipewire/pipewire-rs/-/merge_requests/12#note_695914.
fn get(&self, key: &str) -> Option<&str> {
self.iter().find(|(k, _)| *k == key).map(|(_, v)| v)
}
}
pub trait WritableDict {
/// Insert the key-value pair, overwriting any old value.
fn insert<T: Into<Vec<u8>>>(&mut self, key: T, value: T);
/// Remove the key-value pair if it exists.
fn remove<T: Into<Vec<u8>>>(&mut self, key: T);
/// Clear the object, removing all key-value pairs.
fn clear(&mut self);
}
/// A wrapper for a `*const spa_dict` struct that does not take ownership of the data,
/// useful for dicts shared to us via FFI.
pub struct ForeignDict(*const spa_sys::spa_dict);
impl ForeignDict {
/// Wraps the provided pointer in a read-only `ForeignDict` struct without taking ownership of the struct pointed to.
///
/// # Safety
///
/// - The provided pointer must point to a valid, well-aligned `spa_dict` struct, and must not be `NULL`.
/// - The struct pointed to must be kept valid for the entire lifetime of the created `Dict`.
///
/// Violating any of these rules will result in undefined behaviour.
pub unsafe fn from_ptr(dict: *const spa_sys::spa_dict) -> Self {
debug_assert!(
!dict.is_null(),
"Dict must not be created from a pointer that is NULL"
);
Self(dict)
}
}
impl ReadableDict for ForeignDict {
fn get_dict_ptr(&self) -> *const spa_sys::spa_dict {
self.0
}
}
impl fmt::Debug for ForeignDict {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// FIXME: Find a way to display flags too.
f.debug_map().entries(self.iter_cstr()).finish()
}
}
bitflags! {
pub struct Flags: u32 {
// These flags are redefinitions from
// https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/master/spa/include/spa/utils/dict.h
const SORTED = spa_sys::SPA_DICT_FLAG_SORTED;
}
}
pub struct CIter<'a> {
next: *const spa_sys::spa_dict_item,
/// Points to the first element outside of the allocated area.
end: *const spa_sys::spa_dict_item,
_phantom: PhantomData<&'a str>,
}
impl<'a> Iterator for CIter<'a> {
type Item = (&'a CStr, &'a CStr);
fn next(&mut self) -> Option<Self::Item> {
if !self.next.is_null() && self.next < self.end {
let k = unsafe { CStr::from_ptr((*self.next).key) };
let v = unsafe { CStr::from_ptr((*self.next).value) };
self.next = unsafe { self.next.add(1) };
Some((k, v))
} else |
}
fn size_hint(&self) -> (usize, Option<usize>) {
let bound: usize = unsafe { self.next.offset_from(self.end) as usize };
// We know the exact value, so lower bound and upper bound are the same.
(bound, Some(bound))
}
}
pub struct Iter<'a> {
inner: CIter<'a>,
}
impl<'a> Iterator for Iter<'a> {
type Item = (&'a str, &'a str);
fn next(&mut self) -> Option<Self::Item> {
self.inner
.find_map(|(k, v)| k.to_str().ok().zip(v.to_str().ok()))
}
fn size_hint(&self) -> (usize, Option<usize>) {
// Lower bound is 0, as all keys left might not be valid UTF-8.
(0, self.inner.size_hint().1)
}
}
pub struct Keys<'a> {
inner: CIter<'a>,
}
impl<'a> Iterator for Keys<'a> {
type Item = &'a str;
fn next(&mut self) -> Option<Self::Item> {
self.inner.find_map(|(k, _)| k.to_str().ok())
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
pub struct Values<'a> {
inner: CIter<'a>,
}
impl<'a> Iterator for Values<'a> {
type Item = &'a str;
fn next(&mut self) -> Option<Self::Item> {
self.inner.find_map(|(_, v)| v.to_str().ok())
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[cfg(test)]
mod tests {
use super::{Flags, ForeignDict, ReadableDict};
use spa_sys::{spa_dict, spa_dict_item};
use std::{ffi::CString, ptr};
/// Create a raw dict with the specified number of key-value pairs.
///
/// `num_items` must not be zero, or this function will panic.
///
/// Each key value pair is `("K<n>", "V<n>")`, with *\<n\>* being an element of the range `0..num_items`.
///
/// The function returns a tuple consisting of:
/// 1. An allocation (`Vec`) containing the raw Key and Value Strings.
/// 2. An allocation (`Vec`) containing all the items.
/// 3. The created `spa_dict` struct.
///
/// The first two items must be kept alive for the entire lifetime of the returned `spa_dict` struct.
fn make_raw_dict(
num_items: u32,
) -> (
Vec<(CString, CString)>,
Vec<spa_dict_item>,
spa_sys::spa_dict,
) {
assert!(num_items != 0, "num_items must not be zero");
let mut strings: Vec<(CString, CString)> = Vec::with_capacity(num_items as usize);
let mut items: Vec<spa_dict_item> = Vec::with_capacity(num_items as usize);
for i in 0..num_items {
let k = CString::new(format!("K{}", i)).unwrap();
let v = CString::new(format!("V{}", i)).unwrap();
let item = spa_dict_item {
key: k.as_ptr(),
value: v.as_ptr(),
};
strings.push((k, v));
items.push(item);
}
let raw = spa_dict {
flags: Flags::empty().bits,
n_items: num_items,
items: items.as_ptr(),
};
(strings, items, raw)
}
#[test]
fn test_empty_dict() {
let raw = spa_dict {
flags: Flags::empty().bits,
n_items: 0,
items: ptr::null(),
};
let dict = unsafe { ForeignDict::from_ptr(&raw) };
let iter = dict.iter_cstr();
assert_eq!(0, dict.len());
iter.for_each(|_| panic!("Iterated over non-existing item"));
}
#[test]
fn test_iter_cstr() {
let (_strings, _items, raw) = make_raw_dict(2);
let dict = unsafe { ForeignDict::from_ptr(&raw) };
let mut iter = dict.iter_cstr();
assert_eq!(
(
CString::new("K0").unwrap().as_c_str(),
CString::new("V0").unwrap().as_c_str()
),
iter.next().unwrap()
);
assert_eq!(
(
CString::new("K1").unwrap().as_c_str(),
CString::new("V1").unwrap().as_c_str()
),
iter.next().unwrap()
);
assert_eq!(None, iter.next());
}
#[test]
fn test_iterators() {
let (_strings, _items, raw) = make_raw_dict(2);
let dict = unsafe { ForeignDict::from_ptr(&raw) };
let mut iter = dict.iter();
assert_eq!(("K0", "V0"), iter.next().unwrap());
assert_eq!(("K1", "V1"), iter.next().unwrap());
assert_eq!(None, iter.next());
let mut key_iter = dict.keys();
assert_eq!("K0", key_iter.next().unwrap());
assert_eq!("K1", key_iter.next().unwrap());
assert_eq!(None, key_iter.next());
let mut val_iter = dict.values();
assert_eq!("V0", val_iter.next().unwrap());
assert_eq!("V1", val_iter.next().unwrap());
assert_eq!(None, val_iter.next());
}
#[test]
fn test_get() {
let (_strings, _items, raw) = make_raw_dict(1);
let dict = unsafe { ForeignDict::from_ptr(&raw) };
assert_eq!(Some("V0"), dict.get("K0"));
}
#[test]
fn test_debug() {
let (_strings, _items, raw) = make_raw_dict(1);
let dict = unsafe { ForeignDict::from_ptr(&raw) };
assert_eq!(r#"{"K0": "V0"}"#, &format!("{:?}", dict))
}
}
| {
None
} | conditional_block |
dict.rs | use bitflags::bitflags;
use std::{ffi::CStr, fmt, marker::PhantomData};
pub trait ReadableDict {
/// Obtain the pointer to the raw `spa_dict` struct.
fn get_dict_ptr(&self) -> *const spa_sys::spa_dict;
/// An iterator over all raw key-value pairs.
/// The iterator element type is `(&CStr, &CStr)`.
fn iter_cstr(&self) -> CIter {
let first_elem_ptr = unsafe { (*self.get_dict_ptr()).items };
CIter {
next: first_elem_ptr,
end: unsafe { first_elem_ptr.offset((*self.get_dict_ptr()).n_items as isize) },
_phantom: PhantomData,
}
}
/// An iterator over all key-value pairs that are valid utf-8.
/// The iterator element type is `(&str, &str)`.
fn iter(&self) -> Iter {
Iter {
inner: self.iter_cstr(),
}
}
/// An iterator over all keys that are valid utf-8.
/// The iterator element type is &str.
fn keys(&self) -> Keys {
Keys {
inner: self.iter_cstr(),
}
}
/// An iterator over all values that are valid utf-8.
/// The iterator element type is &str.
fn values(&self) -> Values {
Values {
inner: self.iter_cstr(),
}
}
/// Returns the number of key-value-pairs in the dict.
/// This is the number of all pairs, not only pairs that are valid-utf8.
fn len(&self) -> usize {
unsafe { (*self.get_dict_ptr()).n_items as usize }
}
/// Returns `true` if the dict is empty, `false` if it is not.
fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the bitflags that are set for the dict.
fn flags(&self) -> Flags {
Flags::from_bits_truncate(unsafe { (*self.get_dict_ptr()).flags })
}
/// Get the value associated with the provided key.
///
/// If the dict does not contain the key or the value is non-utf8, `None` is returned.
/// Use [`iter_cstr`] if you need a non-utf8 key or value.
///
/// [`iter_cstr`]: #method.iter_cstr
// FIXME: Some items might be integers, booleans, floats, doubles or pointers instead of strings.
// Perhaps we should return an enum that can be any of these values.
// See https://gitlab.freedesktop.org/pipewire/pipewire-rs/-/merge_requests/12#note_695914.
fn get(&self, key: &str) -> Option<&str> {
self.iter().find(|(k, _)| *k == key).map(|(_, v)| v)
}
}
pub trait WritableDict {
/// Insert the key-value pair, overwriting any old value.
fn insert<T: Into<Vec<u8>>>(&mut self, key: T, value: T);
/// Remove the key-value pair if it exists.
fn remove<T: Into<Vec<u8>>>(&mut self, key: T);
/// Clear the object, removing all key-value pairs.
fn clear(&mut self);
}
/// A wrapper for a `*const spa_dict` struct that does not take ownership of the data,
/// useful for dicts shared to us via FFI.
pub struct ForeignDict(*const spa_sys::spa_dict);
impl ForeignDict {
/// Wraps the provided pointer in a read-only `ForeignDict` struct without taking ownership of the struct pointed to.
///
/// # Safety
///
/// - The provided pointer must point to a valid, well-aligned `spa_dict` struct, and must not be `NULL`.
/// - The struct pointed to must be kept valid for the entire lifetime of the created `Dict`.
///
/// Violating any of these rules will result in undefined behaviour.
pub unsafe fn from_ptr(dict: *const spa_sys::spa_dict) -> Self {
debug_assert!(
!dict.is_null(),
"Dict must not be created from a pointer that is NULL"
);
Self(dict)
}
}
impl ReadableDict for ForeignDict {
fn get_dict_ptr(&self) -> *const spa_sys::spa_dict {
self.0
}
}
impl fmt::Debug for ForeignDict {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// FIXME: Find a way to display flags too.
f.debug_map().entries(self.iter_cstr()).finish()
}
}
bitflags! {
pub struct Flags: u32 {
// These flags are redefinitions from
// https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/master/spa/include/spa/utils/dict.h
const SORTED = spa_sys::SPA_DICT_FLAG_SORTED;
}
}
pub struct CIter<'a> {
next: *const spa_sys::spa_dict_item,
/// Points to the first element outside of the allocated area.
end: *const spa_sys::spa_dict_item,
_phantom: PhantomData<&'a str>,
}
impl<'a> Iterator for CIter<'a> {
type Item = (&'a CStr, &'a CStr);
fn next(&mut self) -> Option<Self::Item> {
if !self.next.is_null() && self.next < self.end {
let k = unsafe { CStr::from_ptr((*self.next).key) };
let v = unsafe { CStr::from_ptr((*self.next).value) };
self.next = unsafe { self.next.add(1) };
Some((k, v))
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let bound: usize = unsafe { self.next.offset_from(self.end) as usize };
// We know the exact value, so lower bound and upper bound are the same.
(bound, Some(bound))
}
}
pub struct Iter<'a> {
inner: CIter<'a>,
}
impl<'a> Iterator for Iter<'a> {
type Item = (&'a str, &'a str);
fn next(&mut self) -> Option<Self::Item> {
self.inner
.find_map(|(k, v)| k.to_str().ok().zip(v.to_str().ok()))
}
fn size_hint(&self) -> (usize, Option<usize>) {
// Lower bound is 0, as all keys left might not be valid UTF-8.
(0, self.inner.size_hint().1)
}
}
pub struct Keys<'a> {
inner: CIter<'a>,
}
impl<'a> Iterator for Keys<'a> {
type Item = &'a str;
fn next(&mut self) -> Option<Self::Item> {
self.inner.find_map(|(k, _)| k.to_str().ok())
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
pub struct Values<'a> {
inner: CIter<'a>,
}
impl<'a> Iterator for Values<'a> {
type Item = &'a str;
fn next(&mut self) -> Option<Self::Item> {
self.inner.find_map(|(_, v)| v.to_str().ok())
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[cfg(test)]
mod tests {
use super::{Flags, ForeignDict, ReadableDict};
use spa_sys::{spa_dict, spa_dict_item};
use std::{ffi::CString, ptr};
/// Create a raw dict with the specified number of key-value pairs.
///
/// `num_items` must not be zero, or this function will panic.
///
/// Each key value pair is `("K<n>", "V<n>")`, with *\<n\>* being an element of the range `0..num_items`.
///
/// The function returns a tuple consisting of:
/// 1. An allocation (`Vec`) containing the raw Key and Value Strings.
/// 2. An allocation (`Vec`) containing all the items.
/// 3. The created `spa_dict` struct.
///
/// The first two items must be kept alive for the entire lifetime of the returned `spa_dict` struct.
fn make_raw_dict(
num_items: u32,
) -> (
Vec<(CString, CString)>,
Vec<spa_dict_item>,
spa_sys::spa_dict,
) {
assert!(num_items != 0, "num_items must not be zero");
let mut strings: Vec<(CString, CString)> = Vec::with_capacity(num_items as usize);
let mut items: Vec<spa_dict_item> = Vec::with_capacity(num_items as usize);
for i in 0..num_items {
let k = CString::new(format!("K{}", i)).unwrap();
let v = CString::new(format!("V{}", i)).unwrap();
let item = spa_dict_item {
key: k.as_ptr(),
value: v.as_ptr(),
};
strings.push((k, v));
items.push(item);
}
let raw = spa_dict {
flags: Flags::empty().bits,
n_items: num_items,
items: items.as_ptr(),
};
(strings, items, raw)
}
#[test]
fn test_empty_dict() {
let raw = spa_dict {
flags: Flags::empty().bits,
n_items: 0,
items: ptr::null(),
};
let dict = unsafe { ForeignDict::from_ptr(&raw) };
let iter = dict.iter_cstr();
assert_eq!(0, dict.len());
iter.for_each(|_| panic!("Iterated over non-existing item"));
}
#[test]
fn test_iter_cstr() {
let (_strings, _items, raw) = make_raw_dict(2);
let dict = unsafe { ForeignDict::from_ptr(&raw) };
let mut iter = dict.iter_cstr();
assert_eq!(
(
CString::new("K0").unwrap().as_c_str(),
CString::new("V0").unwrap().as_c_str()
),
iter.next().unwrap()
);
assert_eq!(
(
CString::new("K1").unwrap().as_c_str(),
CString::new("V1").unwrap().as_c_str()
),
iter.next().unwrap()
);
assert_eq!(None, iter.next());
}
#[test]
fn test_iterators() {
let (_strings, _items, raw) = make_raw_dict(2);
let dict = unsafe { ForeignDict::from_ptr(&raw) };
let mut iter = dict.iter();
assert_eq!(("K0", "V0"), iter.next().unwrap());
assert_eq!(("K1", "V1"), iter.next().unwrap());
assert_eq!(None, iter.next());
let mut key_iter = dict.keys();
assert_eq!("K0", key_iter.next().unwrap());
assert_eq!("K1", key_iter.next().unwrap());
assert_eq!(None, key_iter.next());
let mut val_iter = dict.values();
assert_eq!("V0", val_iter.next().unwrap());
assert_eq!("V1", val_iter.next().unwrap());
assert_eq!(None, val_iter.next());
}
#[test]
fn test_get() {
let (_strings, _items, raw) = make_raw_dict(1);
let dict = unsafe { ForeignDict::from_ptr(&raw) };
assert_eq!(Some("V0"), dict.get("K0"));
}
#[test]
fn test_debug() |
}
| {
let (_strings, _items, raw) = make_raw_dict(1);
let dict = unsafe { ForeignDict::from_ptr(&raw) };
assert_eq!(r#"{"K0": "V0"}"#, &format!("{:?}", dict))
} | identifier_body |
dict.rs | use bitflags::bitflags;
use std::{ffi::CStr, fmt, marker::PhantomData};
pub trait ReadableDict {
/// Obtain the pointer to the raw `spa_dict` struct.
fn get_dict_ptr(&self) -> *const spa_sys::spa_dict;
/// An iterator over all raw key-value pairs.
/// The iterator element type is `(&CStr, &CStr)`.
fn iter_cstr(&self) -> CIter {
let first_elem_ptr = unsafe { (*self.get_dict_ptr()).items };
CIter {
next: first_elem_ptr,
end: unsafe { first_elem_ptr.offset((*self.get_dict_ptr()).n_items as isize) },
_phantom: PhantomData,
}
}
/// An iterator over all key-value pairs that are valid utf-8.
/// The iterator element type is `(&str, &str)`.
fn iter(&self) -> Iter {
Iter {
inner: self.iter_cstr(),
}
}
/// An iterator over all keys that are valid utf-8.
/// The iterator element type is &str.
fn keys(&self) -> Keys {
Keys {
inner: self.iter_cstr(),
}
}
/// An iterator over all values that are valid utf-8.
/// The iterator element type is &str.
fn values(&self) -> Values {
Values {
inner: self.iter_cstr(),
}
}
/// Returns the number of key-value-pairs in the dict.
/// This is the number of all pairs, not only pairs that are valid-utf8.
fn len(&self) -> usize {
unsafe { (*self.get_dict_ptr()).n_items as usize }
}
/// Returns `true` if the dict is empty, `false` if it is not. | fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the bitflags that are set for the dict.
fn flags(&self) -> Flags {
Flags::from_bits_truncate(unsafe { (*self.get_dict_ptr()).flags })
}
/// Get the value associated with the provided key.
///
/// If the dict does not contain the key or the value is non-utf8, `None` is returned.
/// Use [`iter_cstr`] if you need a non-utf8 key or value.
///
/// [`iter_cstr`]: #method.iter_cstr
// FIXME: Some items might be integers, booleans, floats, doubles or pointers instead of strings.
// Perhaps we should return an enum that can be any of these values.
// See https://gitlab.freedesktop.org/pipewire/pipewire-rs/-/merge_requests/12#note_695914.
fn get(&self, key: &str) -> Option<&str> {
self.iter().find(|(k, _)| *k == key).map(|(_, v)| v)
}
}
pub trait WritableDict {
/// Insert the key-value pair, overwriting any old value.
fn insert<T: Into<Vec<u8>>>(&mut self, key: T, value: T);
/// Remove the key-value pair if it exists.
fn remove<T: Into<Vec<u8>>>(&mut self, key: T);
/// Clear the object, removing all key-value pairs.
fn clear(&mut self);
}
/// A wrapper for a `*const spa_dict` struct that does not take ownership of the data,
/// useful for dicts shared to us via FFI.
pub struct ForeignDict(*const spa_sys::spa_dict);
impl ForeignDict {
/// Wraps the provided pointer in a read-only `ForeignDict` struct without taking ownership of the struct pointed to.
///
/// # Safety
///
/// - The provided pointer must point to a valid, well-aligned `spa_dict` struct, and must not be `NULL`.
/// - The struct pointed to must be kept valid for the entire lifetime of the created `Dict`.
///
/// Violating any of these rules will result in undefined behaviour.
pub unsafe fn from_ptr(dict: *const spa_sys::spa_dict) -> Self {
debug_assert!(
!dict.is_null(),
"Dict must not be created from a pointer that is NULL"
);
Self(dict)
}
}
impl ReadableDict for ForeignDict {
fn get_dict_ptr(&self) -> *const spa_sys::spa_dict {
self.0
}
}
impl fmt::Debug for ForeignDict {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// FIXME: Find a way to display flags too.
f.debug_map().entries(self.iter_cstr()).finish()
}
}
bitflags! {
pub struct Flags: u32 {
// These flags are redefinitions from
// https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/master/spa/include/spa/utils/dict.h
const SORTED = spa_sys::SPA_DICT_FLAG_SORTED;
}
}
pub struct CIter<'a> {
next: *const spa_sys::spa_dict_item,
/// Points to the first element outside of the allocated area.
end: *const spa_sys::spa_dict_item,
_phantom: PhantomData<&'a str>,
}
impl<'a> Iterator for CIter<'a> {
type Item = (&'a CStr, &'a CStr);
fn next(&mut self) -> Option<Self::Item> {
if !self.next.is_null() && self.next < self.end {
let k = unsafe { CStr::from_ptr((*self.next).key) };
let v = unsafe { CStr::from_ptr((*self.next).value) };
self.next = unsafe { self.next.add(1) };
Some((k, v))
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let bound: usize = unsafe { self.next.offset_from(self.end) as usize };
// We know the exact value, so lower bound and upper bound are the same.
(bound, Some(bound))
}
}
pub struct Iter<'a> {
inner: CIter<'a>,
}
impl<'a> Iterator for Iter<'a> {
type Item = (&'a str, &'a str);
fn next(&mut self) -> Option<Self::Item> {
self.inner
.find_map(|(k, v)| k.to_str().ok().zip(v.to_str().ok()))
}
fn size_hint(&self) -> (usize, Option<usize>) {
// Lower bound is 0, as all keys left might not be valid UTF-8.
(0, self.inner.size_hint().1)
}
}
pub struct Keys<'a> {
inner: CIter<'a>,
}
impl<'a> Iterator for Keys<'a> {
type Item = &'a str;
fn next(&mut self) -> Option<Self::Item> {
self.inner.find_map(|(k, _)| k.to_str().ok())
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
pub struct Values<'a> {
inner: CIter<'a>,
}
impl<'a> Iterator for Values<'a> {
type Item = &'a str;
fn next(&mut self) -> Option<Self::Item> {
self.inner.find_map(|(_, v)| v.to_str().ok())
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[cfg(test)]
mod tests {
use super::{Flags, ForeignDict, ReadableDict};
use spa_sys::{spa_dict, spa_dict_item};
use std::{ffi::CString, ptr};
/// Create a raw dict with the specified number of key-value pairs.
///
/// `num_items` must not be zero, or this function will panic.
///
/// Each key value pair is `("K<n>", "V<n>")`, with *\<n\>* being an element of the range `0..num_items`.
///
/// The function returns a tuple consisting of:
/// 1. An allocation (`Vec`) containing the raw Key and Value Strings.
/// 2. An allocation (`Vec`) containing all the items.
/// 3. The created `spa_dict` struct.
///
/// The first two items must be kept alive for the entire lifetime of the returned `spa_dict` struct.
fn make_raw_dict(
num_items: u32,
) -> (
Vec<(CString, CString)>,
Vec<spa_dict_item>,
spa_sys::spa_dict,
) {
assert!(num_items != 0, "num_items must not be zero");
let mut strings: Vec<(CString, CString)> = Vec::with_capacity(num_items as usize);
let mut items: Vec<spa_dict_item> = Vec::with_capacity(num_items as usize);
for i in 0..num_items {
let k = CString::new(format!("K{}", i)).unwrap();
let v = CString::new(format!("V{}", i)).unwrap();
let item = spa_dict_item {
key: k.as_ptr(),
value: v.as_ptr(),
};
strings.push((k, v));
items.push(item);
}
let raw = spa_dict {
flags: Flags::empty().bits,
n_items: num_items,
items: items.as_ptr(),
};
(strings, items, raw)
}
#[test]
fn test_empty_dict() {
let raw = spa_dict {
flags: Flags::empty().bits,
n_items: 0,
items: ptr::null(),
};
let dict = unsafe { ForeignDict::from_ptr(&raw) };
let iter = dict.iter_cstr();
assert_eq!(0, dict.len());
iter.for_each(|_| panic!("Iterated over non-existing item"));
}
#[test]
fn test_iter_cstr() {
let (_strings, _items, raw) = make_raw_dict(2);
let dict = unsafe { ForeignDict::from_ptr(&raw) };
let mut iter = dict.iter_cstr();
assert_eq!(
(
CString::new("K0").unwrap().as_c_str(),
CString::new("V0").unwrap().as_c_str()
),
iter.next().unwrap()
);
assert_eq!(
(
CString::new("K1").unwrap().as_c_str(),
CString::new("V1").unwrap().as_c_str()
),
iter.next().unwrap()
);
assert_eq!(None, iter.next());
}
#[test]
fn test_iterators() {
let (_strings, _items, raw) = make_raw_dict(2);
let dict = unsafe { ForeignDict::from_ptr(&raw) };
let mut iter = dict.iter();
assert_eq!(("K0", "V0"), iter.next().unwrap());
assert_eq!(("K1", "V1"), iter.next().unwrap());
assert_eq!(None, iter.next());
let mut key_iter = dict.keys();
assert_eq!("K0", key_iter.next().unwrap());
assert_eq!("K1", key_iter.next().unwrap());
assert_eq!(None, key_iter.next());
let mut val_iter = dict.values();
assert_eq!("V0", val_iter.next().unwrap());
assert_eq!("V1", val_iter.next().unwrap());
assert_eq!(None, val_iter.next());
}
#[test]
fn test_get() {
let (_strings, _items, raw) = make_raw_dict(1);
let dict = unsafe { ForeignDict::from_ptr(&raw) };
assert_eq!(Some("V0"), dict.get("K0"));
}
#[test]
fn test_debug() {
let (_strings, _items, raw) = make_raw_dict(1);
let dict = unsafe { ForeignDict::from_ptr(&raw) };
assert_eq!(r#"{"K0": "V0"}"#, &format!("{:?}", dict))
}
} | random_line_split | |
nodekeeper.go | /*
* Copyright 2018 Insolar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nodekeeper
import (
"bytes"
"encoding/binary"
"fmt"
"hash"
"sort"
"sync"
"time"
"github.com/insolar/insolar/core"
"github.com/insolar/insolar/log"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
)
type NodeKeeper interface {
// GetActiveNode get active node by its reference. Returns nil if node is not found.
GetActiveNode(ref core.RecordRef) *core.ActiveNode
// GetActiveNodes get active nodes.
GetActiveNodes() []*core.ActiveNode
// AddActiveNodes set active nodes.
AddActiveNodes([]*core.ActiveNode)
// GetUnsyncHash get hash computed based on the list of unsync nodes, and the size of this list.
GetUnsyncHash() (hash []byte, unsyncCount int, err error)
// GetUnsync gets the local unsync list (excluding other nodes unsync lists).
GetUnsync() []*core.ActiveNode
// SetPulse sets internal PulseNumber to number.
SetPulse(number core.PulseNumber)
// Sync initiate transferring unsync -> sync, sync -> active. If approved is false, unsync is not transferred to sync.
Sync(approved bool)
// AddUnsync add unsync node to the local unsync list.
// Returns error if node's PulseNumber is not equal to the NodeKeeper internal PulseNumber.
AddUnsync(*core.ActiveNode) error
// AddUnsyncGossip merge unsync list from another node to the local unsync list.
// Returns error if:
// 1. One of the nodes' PulseNumber is not equal to the NodeKeeper internal PulseNumber;
// 2. One of the nodes' reference is equal to one of the local unsync nodes' reference.
AddUnsyncGossip([]*core.ActiveNode) error
}
// NewNodeKeeper create new NodeKeeper. unsyncDiscardAfter = timeout after which each unsync node is discarded.
func NewNodeKeeper(unsyncDiscardAfter time.Duration) NodeKeeper {
return &nodekeeper{
state: undefined,
timeout: unsyncDiscardAfter,
active: make(map[core.RecordRef]*core.ActiveNode),
sync: make([]*core.ActiveNode, 0),
unsync: make([]*core.ActiveNode, 0),
unsyncGossip: make(map[core.RecordRef]*core.ActiveNode),
}
}
type nodekeeperState uint8
const (
undefined = nodekeeperState(iota + 1)
awaitUnsync
hashCalculated
synced
)
type nodekeeper struct {
state nodekeeperState
pulse core.PulseNumber
timeout time.Duration
cacheUnsyncCalc []byte
cacheUnsyncSize int
activeLock sync.RWMutex
active map[core.RecordRef]*core.ActiveNode
sync []*core.ActiveNode
unsyncLock sync.Mutex
unsync []*core.ActiveNode
unsyncTimeout []time.Time
unsyncGossip map[core.RecordRef]*core.ActiveNode
}
func (nk *nodekeeper) GetActiveNodes() []*core.ActiveNode {
nk.activeLock.RLock()
defer nk.activeLock.RUnlock()
result := make([]*core.ActiveNode, len(nk.active))
index := 0
for _, node := range nk.active {
result[index] = node
index++
}
return result
}
func (nk *nodekeeper) AddActiveNodes(nodes []*core.ActiveNode) {
nk.activeLock.Lock()
defer nk.activeLock.Unlock()
for _, node := range nodes {
nk.active[node.NodeID] = node
}
}
func (nk *nodekeeper) GetActiveNode(ref core.RecordRef) *core.ActiveNode {
nk.activeLock.RLock()
defer nk.activeLock.RUnlock()
return nk.active[ref]
}
func (nk *nodekeeper) GetUnsyncHash() ([]byte, int, error) {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
if nk.state != awaitUnsync {
log.Warn("NodeKeeper: GetUnsyncHash called more than once during one pulse")
return nk.cacheUnsyncCalc, nk.cacheUnsyncSize, nil
}
unsync := nk.collectUnsync()
hash, err := calculateHash(unsync)
if err != nil {
return nil, 0, err
}
nk.cacheUnsyncCalc, nk.cacheUnsyncSize = hash, len(unsync)
nk.state = hashCalculated
return nk.cacheUnsyncCalc, nk.cacheUnsyncSize, nil
}
func (nk *nodekeeper) GetUnsync() []*core.ActiveNode {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
result := make([]*core.ActiveNode, len(nk.unsync))
copy(result, nk.unsync)
return result
}
func (nk *nodekeeper) SetPulse(number core.PulseNumber) {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
if nk.state == undefined {
nk.pulse = number
nk.state = awaitUnsync
return
}
if number <= nk.pulse {
log.Warnf("NodeKeeper: ignored SetPulse call with number=%d while current=%d", uint32(number), uint32(nk.pulse))
return
}
if nk.state == hashCalculated || nk.state == awaitUnsync {
log.Warn("NodeKeeper: SetPulse called not from `undefined` or `synced` state")
nk.activeLock.Lock()
nk.syncUnsafe(false)
nk.activeLock.Unlock()
}
nk.pulse = number
nk.state = awaitUnsync
nk.invalidateCache()
nk.updateUnsyncPulse()
}
func (nk *nodekeeper) Sync(approved bool) {
nk.unsyncLock.Lock()
nk.activeLock.Lock()
defer func() {
nk.activeLock.Unlock()
nk.unsyncLock.Unlock()
}()
if nk.state == synced || nk.state == undefined {
log.Warn("NodeKeeper: ignored Sync call from `synced` or `undefined` state")
return
}
nk.syncUnsafe(approved)
}
func (nk *nodekeeper) AddUnsync(node *core.ActiveNode) error {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
if nk.state != awaitUnsync {
return errors.New("Cannot add node to unsync list: try again in next pulse slot")
}
checkedList := []*core.ActiveNode{node}
if err := nk.checkPulse(checkedList); err != nil {
return errors.Wrap(err, "Error adding local unsync node")
}
nk.unsync = append(nk.unsync, node)
tm := time.Now().Add(nk.timeout)
nk.unsyncTimeout = append(nk.unsyncTimeout, tm)
return nil
}
func (nk *nodekeeper) AddUnsyncGossip(nodes []*core.ActiveNode) error {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
if nk.state != awaitUnsync {
return errors.New("Cannot add node to unsync list: try again in next pulse slot")
}
if err := nk.checkPulse(nodes); err != nil {
return errors.Wrap(err, "Error adding unsync gossip nodes")
}
if err := nk.checkReference(nodes); err != nil {
return errors.Wrap(err, "Error adding unsync gossip nodes")
}
for _, node := range nodes {
nk.unsyncGossip[node.NodeID] = node
}
return nil
}
func (nk *nodekeeper) syncUnsafe(approved bool) {
// sync -> active
for _, node := range nk.sync {
nk.active[node.NodeID] = node
}
if approved {
// unsync -> sync
unsync := nk.collectUnsync()
nk.sync = unsync
// clear unsync
nk.unsync = make([]*core.ActiveNode, 0)
} else {
// clear sync
nk.sync = make([]*core.ActiveNode, 0)
nk.discardTimedOutUnsync()
}
// clear unsyncGossip
nk.unsyncGossip = make(map[core.RecordRef]*core.ActiveNode)
nk.state = synced
}
func (nk *nodekeeper) discardTimedOutUnsync() {
index := 0
for _, tm := range nk.unsyncTimeout {
if tm.After(time.Now()) {
break
}
index++
}
if index == 0 {
return
}
// discard all unsync nodes before index
nk.unsyncTimeout = nk.unsyncTimeout[index:]
nk.unsync = nk.unsync[index:]
log.Infof("NodeKeeper: discarded %d unsync nodes due to timeout", index)
}
func (nk *nodekeeper) checkPulse(nodes []*core.ActiveNode) error {
for _, node := range nodes {
if node.PulseNum != nk.pulse {
return errors.Errorf("Node ID:%s pulse:%d is not equal to NodeKeeper current pulse:%d",
node.NodeID.String(), node.PulseNum, nk.pulse)
}
}
return nil
}
func (nk *nodekeeper) checkReference(nodes []*core.ActiveNode) error {
// quadratic, should not be a problem because unsync lists are usually empty or have few elements
for _, localNode := range nk.unsync {
for _, node := range nodes {
if node.NodeID.Equal(localNode.NodeID) {
return errors.Errorf("Node %s cannot be added to gossip unsync list "+
"because it is in local unsync list", node.NodeID.String())
}
}
}
return nil
}
func (nk *nodekeeper) collectUnsync() []*core.ActiveNode {
unsync := make([]*core.ActiveNode, len(nk.unsyncGossip)+len(nk.unsync))
index := 0
for _, node := range nk.unsyncGossip {
unsync[index] = node
index++
}
copy(unsync[index:], nk.unsync)
return unsync
}
func (nk *nodekeeper) invalidateCache() |
func (nk *nodekeeper) updateUnsyncPulse() {
for _, node := range nk.unsync {
node.PulseNum = nk.pulse
}
count := len(nk.unsync)
if count != 0 {
log.Infof("NodeKeeper: updated pulse for %d stored unsync nodes", count)
}
}
func hashWriteChecked(hash hash.Hash, data []byte) {
n, err := hash.Write(data)
if n != len(data) {
panic(fmt.Sprintf("Error writing hash. Bytes expected: %d; bytes actual: %d", len(data), n))
}
if err != nil {
panic(err.Error())
}
}
func calculateNodeHash(node *core.ActiveNode) []byte {
hash := sha3.New224()
hashWriteChecked(hash, node.NodeID[:])
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(node.JetRoles))
hashWriteChecked(hash, b[:])
binary.LittleEndian.PutUint32(b, uint32(node.PulseNum))
hashWriteChecked(hash, b[:4])
b[0] = byte(node.State)
hashWriteChecked(hash, b[:1])
hashWriteChecked(hash, node.PublicKey)
return hash.Sum(nil)
}
func calculateHash(list []*core.ActiveNode) (result []byte, err error) {
sort.Slice(list[:], func(i, j int) bool {
return bytes.Compare(list[i].NodeID[:], list[j].NodeID[:]) < 0
})
// catch possible panic from hashWriteChecked in this function and in all calculateNodeHash funcs
defer func() {
if r := recover(); r != nil {
result, err = nil, fmt.Errorf("error calculating hash: %s", r)
}
}()
hash := sha3.New224()
for _, node := range list {
nodeHash := calculateNodeHash(node)
hashWriteChecked(hash, nodeHash)
}
return hash.Sum(nil), nil
}
| {
nk.cacheUnsyncCalc = nil
nk.cacheUnsyncSize = 0
} | identifier_body |
nodekeeper.go | /*
* Copyright 2018 Insolar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nodekeeper
import (
"bytes"
"encoding/binary"
"fmt"
"hash"
"sort"
"sync"
"time"
"github.com/insolar/insolar/core"
"github.com/insolar/insolar/log"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
)
type NodeKeeper interface {
// GetActiveNode get active node by its reference. Returns nil if node is not found.
GetActiveNode(ref core.RecordRef) *core.ActiveNode
// GetActiveNodes get active nodes.
GetActiveNodes() []*core.ActiveNode
// AddActiveNodes set active nodes.
AddActiveNodes([]*core.ActiveNode)
// GetUnsyncHash get hash computed based on the list of unsync nodes, and the size of this list.
GetUnsyncHash() (hash []byte, unsyncCount int, err error)
// GetUnsync gets the local unsync list (excluding other nodes unsync lists).
GetUnsync() []*core.ActiveNode
// SetPulse sets internal PulseNumber to number.
SetPulse(number core.PulseNumber)
// Sync initiate transferring unsync -> sync, sync -> active. If approved is false, unsync is not transferred to sync.
Sync(approved bool)
// AddUnsync add unsync node to the local unsync list.
// Returns error if node's PulseNumber is not equal to the NodeKeeper internal PulseNumber.
AddUnsync(*core.ActiveNode) error
// AddUnsyncGossip merge unsync list from another node to the local unsync list.
// Returns error if:
// 1. One of the nodes' PulseNumber is not equal to the NodeKeeper internal PulseNumber;
// 2. One of the nodes' reference is equal to one of the local unsync nodes' reference.
AddUnsyncGossip([]*core.ActiveNode) error
}
// NewNodeKeeper create new NodeKeeper. unsyncDiscardAfter = timeout after which each unsync node is discarded.
func NewNodeKeeper(unsyncDiscardAfter time.Duration) NodeKeeper {
return &nodekeeper{
state: undefined,
timeout: unsyncDiscardAfter,
active: make(map[core.RecordRef]*core.ActiveNode),
sync: make([]*core.ActiveNode, 0),
unsync: make([]*core.ActiveNode, 0),
unsyncGossip: make(map[core.RecordRef]*core.ActiveNode),
}
}
type nodekeeperState uint8
const (
undefined = nodekeeperState(iota + 1)
awaitUnsync
hashCalculated
synced
)
type nodekeeper struct {
state nodekeeperState
pulse core.PulseNumber
timeout time.Duration
cacheUnsyncCalc []byte
cacheUnsyncSize int
activeLock sync.RWMutex
active map[core.RecordRef]*core.ActiveNode
sync []*core.ActiveNode
unsyncLock sync.Mutex
unsync []*core.ActiveNode
unsyncTimeout []time.Time
unsyncGossip map[core.RecordRef]*core.ActiveNode
}
func (nk *nodekeeper) GetActiveNodes() []*core.ActiveNode {
nk.activeLock.RLock()
defer nk.activeLock.RUnlock()
result := make([]*core.ActiveNode, len(nk.active))
index := 0
for _, node := range nk.active {
result[index] = node
index++
}
return result
}
func (nk *nodekeeper) AddActiveNodes(nodes []*core.ActiveNode) {
nk.activeLock.Lock()
defer nk.activeLock.Unlock()
for _, node := range nodes {
nk.active[node.NodeID] = node
}
}
func (nk *nodekeeper) GetActiveNode(ref core.RecordRef) *core.ActiveNode {
nk.activeLock.RLock()
defer nk.activeLock.RUnlock()
return nk.active[ref]
}
func (nk *nodekeeper) GetUnsyncHash() ([]byte, int, error) {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
if nk.state != awaitUnsync {
log.Warn("NodeKeeper: GetUnsyncHash called more than once during one pulse")
return nk.cacheUnsyncCalc, nk.cacheUnsyncSize, nil
}
unsync := nk.collectUnsync()
hash, err := calculateHash(unsync)
if err != nil {
return nil, 0, err
}
nk.cacheUnsyncCalc, nk.cacheUnsyncSize = hash, len(unsync)
nk.state = hashCalculated
return nk.cacheUnsyncCalc, nk.cacheUnsyncSize, nil
}
func (nk *nodekeeper) GetUnsync() []*core.ActiveNode {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
result := make([]*core.ActiveNode, len(nk.unsync))
copy(result, nk.unsync)
return result
}
func (nk *nodekeeper) SetPulse(number core.PulseNumber) {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
if nk.state == undefined {
nk.pulse = number
nk.state = awaitUnsync
return
}
if number <= nk.pulse {
log.Warnf("NodeKeeper: ignored SetPulse call with number=%d while current=%d", uint32(number), uint32(nk.pulse))
return
}
if nk.state == hashCalculated || nk.state == awaitUnsync {
log.Warn("NodeKeeper: SetPulse called not from `undefined` or `synced` state")
nk.activeLock.Lock()
nk.syncUnsafe(false)
nk.activeLock.Unlock()
}
nk.pulse = number
nk.state = awaitUnsync
nk.invalidateCache()
nk.updateUnsyncPulse()
}
func (nk *nodekeeper) Sync(approved bool) {
nk.unsyncLock.Lock()
nk.activeLock.Lock()
defer func() {
nk.activeLock.Unlock()
nk.unsyncLock.Unlock()
}()
if nk.state == synced || nk.state == undefined {
log.Warn("NodeKeeper: ignored Sync call from `synced` or `undefined` state")
return
}
nk.syncUnsafe(approved)
}
func (nk *nodekeeper) AddUnsync(node *core.ActiveNode) error {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
if nk.state != awaitUnsync {
return errors.New("Cannot add node to unsync list: try again in next pulse slot")
}
checkedList := []*core.ActiveNode{node}
if err := nk.checkPulse(checkedList); err != nil |
nk.unsync = append(nk.unsync, node)
tm := time.Now().Add(nk.timeout)
nk.unsyncTimeout = append(nk.unsyncTimeout, tm)
return nil
}
func (nk *nodekeeper) AddUnsyncGossip(nodes []*core.ActiveNode) error {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
if nk.state != awaitUnsync {
return errors.New("Cannot add node to unsync list: try again in next pulse slot")
}
if err := nk.checkPulse(nodes); err != nil {
return errors.Wrap(err, "Error adding unsync gossip nodes")
}
if err := nk.checkReference(nodes); err != nil {
return errors.Wrap(err, "Error adding unsync gossip nodes")
}
for _, node := range nodes {
nk.unsyncGossip[node.NodeID] = node
}
return nil
}
func (nk *nodekeeper) syncUnsafe(approved bool) {
// sync -> active
for _, node := range nk.sync {
nk.active[node.NodeID] = node
}
if approved {
// unsync -> sync
unsync := nk.collectUnsync()
nk.sync = unsync
// clear unsync
nk.unsync = make([]*core.ActiveNode, 0)
} else {
// clear sync
nk.sync = make([]*core.ActiveNode, 0)
nk.discardTimedOutUnsync()
}
// clear unsyncGossip
nk.unsyncGossip = make(map[core.RecordRef]*core.ActiveNode)
nk.state = synced
}
func (nk *nodekeeper) discardTimedOutUnsync() {
index := 0
for _, tm := range nk.unsyncTimeout {
if tm.After(time.Now()) {
break
}
index++
}
if index == 0 {
return
}
// discard all unsync nodes before index
nk.unsyncTimeout = nk.unsyncTimeout[index:]
nk.unsync = nk.unsync[index:]
log.Infof("NodeKeeper: discarded %d unsync nodes due to timeout", index)
}
func (nk *nodekeeper) checkPulse(nodes []*core.ActiveNode) error {
for _, node := range nodes {
if node.PulseNum != nk.pulse {
return errors.Errorf("Node ID:%s pulse:%d is not equal to NodeKeeper current pulse:%d",
node.NodeID.String(), node.PulseNum, nk.pulse)
}
}
return nil
}
func (nk *nodekeeper) checkReference(nodes []*core.ActiveNode) error {
// quadratic, should not be a problem because unsync lists are usually empty or have few elements
for _, localNode := range nk.unsync {
for _, node := range nodes {
if node.NodeID.Equal(localNode.NodeID) {
return errors.Errorf("Node %s cannot be added to gossip unsync list "+
"because it is in local unsync list", node.NodeID.String())
}
}
}
return nil
}
func (nk *nodekeeper) collectUnsync() []*core.ActiveNode {
unsync := make([]*core.ActiveNode, len(nk.unsyncGossip)+len(nk.unsync))
index := 0
for _, node := range nk.unsyncGossip {
unsync[index] = node
index++
}
copy(unsync[index:], nk.unsync)
return unsync
}
func (nk *nodekeeper) invalidateCache() {
nk.cacheUnsyncCalc = nil
nk.cacheUnsyncSize = 0
}
func (nk *nodekeeper) updateUnsyncPulse() {
for _, node := range nk.unsync {
node.PulseNum = nk.pulse
}
count := len(nk.unsync)
if count != 0 {
log.Infof("NodeKeeper: updated pulse for %d stored unsync nodes", count)
}
}
func hashWriteChecked(hash hash.Hash, data []byte) {
n, err := hash.Write(data)
if n != len(data) {
panic(fmt.Sprintf("Error writing hash. Bytes expected: %d; bytes actual: %d", len(data), n))
}
if err != nil {
panic(err.Error())
}
}
func calculateNodeHash(node *core.ActiveNode) []byte {
hash := sha3.New224()
hashWriteChecked(hash, node.NodeID[:])
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(node.JetRoles))
hashWriteChecked(hash, b[:])
binary.LittleEndian.PutUint32(b, uint32(node.PulseNum))
hashWriteChecked(hash, b[:4])
b[0] = byte(node.State)
hashWriteChecked(hash, b[:1])
hashWriteChecked(hash, node.PublicKey)
return hash.Sum(nil)
}
func calculateHash(list []*core.ActiveNode) (result []byte, err error) {
sort.Slice(list[:], func(i, j int) bool {
return bytes.Compare(list[i].NodeID[:], list[j].NodeID[:]) < 0
})
// catch possible panic from hashWriteChecked in this function and in all calculateNodeHash funcs
defer func() {
if r := recover(); r != nil {
result, err = nil, fmt.Errorf("error calculating hash: %s", r)
}
}()
hash := sha3.New224()
for _, node := range list {
nodeHash := calculateNodeHash(node)
hashWriteChecked(hash, nodeHash)
}
return hash.Sum(nil), nil
}
| {
return errors.Wrap(err, "Error adding local unsync node")
} | conditional_block |
nodekeeper.go | /*
* Copyright 2018 Insolar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nodekeeper
import (
"bytes"
"encoding/binary"
"fmt"
"hash"
"sort"
"sync"
"time"
"github.com/insolar/insolar/core"
"github.com/insolar/insolar/log"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
)
type NodeKeeper interface {
// GetActiveNode get active node by its reference. Returns nil if node is not found.
GetActiveNode(ref core.RecordRef) *core.ActiveNode
// GetActiveNodes get active nodes.
GetActiveNodes() []*core.ActiveNode
// AddActiveNodes set active nodes.
AddActiveNodes([]*core.ActiveNode)
// GetUnsyncHash get hash computed based on the list of unsync nodes, and the size of this list.
GetUnsyncHash() (hash []byte, unsyncCount int, err error)
// GetUnsync gets the local unsync list (excluding other nodes unsync lists).
GetUnsync() []*core.ActiveNode
// SetPulse sets internal PulseNumber to number.
SetPulse(number core.PulseNumber)
// Sync initiate transferring unsync -> sync, sync -> active. If approved is false, unsync is not transferred to sync.
Sync(approved bool)
// AddUnsync add unsync node to the local unsync list.
// Returns error if node's PulseNumber is not equal to the NodeKeeper internal PulseNumber.
AddUnsync(*core.ActiveNode) error
// AddUnsyncGossip merge unsync list from another node to the local unsync list.
// Returns error if:
// 1. One of the nodes' PulseNumber is not equal to the NodeKeeper internal PulseNumber;
// 2. One of the nodes' reference is equal to one of the local unsync nodes' reference.
AddUnsyncGossip([]*core.ActiveNode) error
}
// NewNodeKeeper create new NodeKeeper. unsyncDiscardAfter = timeout after which each unsync node is discarded.
func NewNodeKeeper(unsyncDiscardAfter time.Duration) NodeKeeper {
return &nodekeeper{
state: undefined,
timeout: unsyncDiscardAfter,
active: make(map[core.RecordRef]*core.ActiveNode),
sync: make([]*core.ActiveNode, 0),
unsync: make([]*core.ActiveNode, 0),
unsyncGossip: make(map[core.RecordRef]*core.ActiveNode),
}
}
type nodekeeperState uint8
const (
undefined = nodekeeperState(iota + 1)
awaitUnsync
hashCalculated
synced
)
type nodekeeper struct {
state nodekeeperState
pulse core.PulseNumber
timeout time.Duration
cacheUnsyncCalc []byte
cacheUnsyncSize int
activeLock sync.RWMutex
active map[core.RecordRef]*core.ActiveNode
sync []*core.ActiveNode
unsyncLock sync.Mutex
unsync []*core.ActiveNode
unsyncTimeout []time.Time
unsyncGossip map[core.RecordRef]*core.ActiveNode
}
func (nk *nodekeeper) GetActiveNodes() []*core.ActiveNode {
nk.activeLock.RLock()
defer nk.activeLock.RUnlock()
result := make([]*core.ActiveNode, len(nk.active))
index := 0
for _, node := range nk.active {
result[index] = node
index++
}
return result
}
func (nk *nodekeeper) AddActiveNodes(nodes []*core.ActiveNode) {
nk.activeLock.Lock()
defer nk.activeLock.Unlock()
for _, node := range nodes {
nk.active[node.NodeID] = node
}
}
func (nk *nodekeeper) GetActiveNode(ref core.RecordRef) *core.ActiveNode {
nk.activeLock.RLock()
defer nk.activeLock.RUnlock()
return nk.active[ref]
}
func (nk *nodekeeper) GetUnsyncHash() ([]byte, int, error) {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
if nk.state != awaitUnsync {
log.Warn("NodeKeeper: GetUnsyncHash called more than once during one pulse")
return nk.cacheUnsyncCalc, nk.cacheUnsyncSize, nil
}
unsync := nk.collectUnsync()
hash, err := calculateHash(unsync)
if err != nil {
return nil, 0, err
}
nk.cacheUnsyncCalc, nk.cacheUnsyncSize = hash, len(unsync)
nk.state = hashCalculated
return nk.cacheUnsyncCalc, nk.cacheUnsyncSize, nil
}
func (nk *nodekeeper) GetUnsync() []*core.ActiveNode {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
result := make([]*core.ActiveNode, len(nk.unsync))
copy(result, nk.unsync)
return result
}
func (nk *nodekeeper) SetPulse(number core.PulseNumber) {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
if nk.state == undefined {
nk.pulse = number
nk.state = awaitUnsync
return
}
if number <= nk.pulse {
log.Warnf("NodeKeeper: ignored SetPulse call with number=%d while current=%d", uint32(number), uint32(nk.pulse))
return
}
if nk.state == hashCalculated || nk.state == awaitUnsync {
log.Warn("NodeKeeper: SetPulse called not from `undefined` or `synced` state")
nk.activeLock.Lock()
nk.syncUnsafe(false)
nk.activeLock.Unlock()
}
nk.pulse = number
nk.state = awaitUnsync
nk.invalidateCache()
nk.updateUnsyncPulse()
}
func (nk *nodekeeper) Sync(approved bool) {
nk.unsyncLock.Lock()
nk.activeLock.Lock()
defer func() {
nk.activeLock.Unlock()
nk.unsyncLock.Unlock()
}()
if nk.state == synced || nk.state == undefined {
log.Warn("NodeKeeper: ignored Sync call from `synced` or `undefined` state")
return
}
nk.syncUnsafe(approved)
}
func (nk *nodekeeper) AddUnsync(node *core.ActiveNode) error {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
if nk.state != awaitUnsync {
return errors.New("Cannot add node to unsync list: try again in next pulse slot")
}
checkedList := []*core.ActiveNode{node}
if err := nk.checkPulse(checkedList); err != nil {
return errors.Wrap(err, "Error adding local unsync node")
}
nk.unsync = append(nk.unsync, node)
tm := time.Now().Add(nk.timeout)
nk.unsyncTimeout = append(nk.unsyncTimeout, tm)
return nil
}
func (nk *nodekeeper) AddUnsyncGossip(nodes []*core.ActiveNode) error {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
if nk.state != awaitUnsync {
return errors.New("Cannot add node to unsync list: try again in next pulse slot")
}
if err := nk.checkPulse(nodes); err != nil {
return errors.Wrap(err, "Error adding unsync gossip nodes")
}
if err := nk.checkReference(nodes); err != nil {
return errors.Wrap(err, "Error adding unsync gossip nodes")
}
for _, node := range nodes {
nk.unsyncGossip[node.NodeID] = node
}
return nil
}
func (nk *nodekeeper) syncUnsafe(approved bool) {
// sync -> active
for _, node := range nk.sync {
nk.active[node.NodeID] = node
}
if approved {
// unsync -> sync
unsync := nk.collectUnsync()
nk.sync = unsync
// clear unsync
nk.unsync = make([]*core.ActiveNode, 0)
} else {
// clear sync
nk.sync = make([]*core.ActiveNode, 0)
nk.discardTimedOutUnsync()
}
// clear unsyncGossip
nk.unsyncGossip = make(map[core.RecordRef]*core.ActiveNode)
nk.state = synced
}
func (nk *nodekeeper) discardTimedOutUnsync() {
index := 0
for _, tm := range nk.unsyncTimeout {
if tm.After(time.Now()) {
break
}
index++
}
if index == 0 {
return
}
// discard all unsync nodes before index
nk.unsyncTimeout = nk.unsyncTimeout[index:]
nk.unsync = nk.unsync[index:]
log.Infof("NodeKeeper: discarded %d unsync nodes due to timeout", index)
}
func (nk *nodekeeper) | (nodes []*core.ActiveNode) error {
for _, node := range nodes {
if node.PulseNum != nk.pulse {
return errors.Errorf("Node ID:%s pulse:%d is not equal to NodeKeeper current pulse:%d",
node.NodeID.String(), node.PulseNum, nk.pulse)
}
}
return nil
}
func (nk *nodekeeper) checkReference(nodes []*core.ActiveNode) error {
// quadratic, should not be a problem because unsync lists are usually empty or have few elements
for _, localNode := range nk.unsync {
for _, node := range nodes {
if node.NodeID.Equal(localNode.NodeID) {
return errors.Errorf("Node %s cannot be added to gossip unsync list "+
"because it is in local unsync list", node.NodeID.String())
}
}
}
return nil
}
func (nk *nodekeeper) collectUnsync() []*core.ActiveNode {
unsync := make([]*core.ActiveNode, len(nk.unsyncGossip)+len(nk.unsync))
index := 0
for _, node := range nk.unsyncGossip {
unsync[index] = node
index++
}
copy(unsync[index:], nk.unsync)
return unsync
}
func (nk *nodekeeper) invalidateCache() {
nk.cacheUnsyncCalc = nil
nk.cacheUnsyncSize = 0
}
func (nk *nodekeeper) updateUnsyncPulse() {
for _, node := range nk.unsync {
node.PulseNum = nk.pulse
}
count := len(nk.unsync)
if count != 0 {
log.Infof("NodeKeeper: updated pulse for %d stored unsync nodes", count)
}
}
func hashWriteChecked(hash hash.Hash, data []byte) {
n, err := hash.Write(data)
if n != len(data) {
panic(fmt.Sprintf("Error writing hash. Bytes expected: %d; bytes actual: %d", len(data), n))
}
if err != nil {
panic(err.Error())
}
}
func calculateNodeHash(node *core.ActiveNode) []byte {
hash := sha3.New224()
hashWriteChecked(hash, node.NodeID[:])
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(node.JetRoles))
hashWriteChecked(hash, b[:])
binary.LittleEndian.PutUint32(b, uint32(node.PulseNum))
hashWriteChecked(hash, b[:4])
b[0] = byte(node.State)
hashWriteChecked(hash, b[:1])
hashWriteChecked(hash, node.PublicKey)
return hash.Sum(nil)
}
func calculateHash(list []*core.ActiveNode) (result []byte, err error) {
sort.Slice(list[:], func(i, j int) bool {
return bytes.Compare(list[i].NodeID[:], list[j].NodeID[:]) < 0
})
// catch possible panic from hashWriteChecked in this function and in all calculateNodeHash funcs
defer func() {
if r := recover(); r != nil {
result, err = nil, fmt.Errorf("error calculating hash: %s", r)
}
}()
hash := sha3.New224()
for _, node := range list {
nodeHash := calculateNodeHash(node)
hashWriteChecked(hash, nodeHash)
}
return hash.Sum(nil), nil
}
| checkPulse | identifier_name |
nodekeeper.go | /*
* Copyright 2018 Insolar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nodekeeper
import (
"bytes"
"encoding/binary"
"fmt"
"hash"
"sort"
"sync"
"time"
"github.com/insolar/insolar/core"
"github.com/insolar/insolar/log"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
)
type NodeKeeper interface {
// GetActiveNode get active node by its reference. Returns nil if node is not found.
GetActiveNode(ref core.RecordRef) *core.ActiveNode
// GetActiveNodes get active nodes.
GetActiveNodes() []*core.ActiveNode
// AddActiveNodes set active nodes.
AddActiveNodes([]*core.ActiveNode)
// GetUnsyncHash get hash computed based on the list of unsync nodes, and the size of this list.
GetUnsyncHash() (hash []byte, unsyncCount int, err error)
// GetUnsync gets the local unsync list (excluding other nodes unsync lists).
GetUnsync() []*core.ActiveNode | // Sync initiate transferring unsync -> sync, sync -> active. If approved is false, unsync is not transferred to sync.
Sync(approved bool)
// AddUnsync add unsync node to the local unsync list.
// Returns error if node's PulseNumber is not equal to the NodeKeeper internal PulseNumber.
AddUnsync(*core.ActiveNode) error
// AddUnsyncGossip merge unsync list from another node to the local unsync list.
// Returns error if:
// 1. One of the nodes' PulseNumber is not equal to the NodeKeeper internal PulseNumber;
// 2. One of the nodes' reference is equal to one of the local unsync nodes' reference.
AddUnsyncGossip([]*core.ActiveNode) error
}
// NewNodeKeeper create new NodeKeeper. unsyncDiscardAfter = timeout after which each unsync node is discarded.
func NewNodeKeeper(unsyncDiscardAfter time.Duration) NodeKeeper {
return &nodekeeper{
state: undefined,
timeout: unsyncDiscardAfter,
active: make(map[core.RecordRef]*core.ActiveNode),
sync: make([]*core.ActiveNode, 0),
unsync: make([]*core.ActiveNode, 0),
unsyncGossip: make(map[core.RecordRef]*core.ActiveNode),
}
}
type nodekeeperState uint8
const (
undefined = nodekeeperState(iota + 1)
awaitUnsync
hashCalculated
synced
)
type nodekeeper struct {
state nodekeeperState
pulse core.PulseNumber
timeout time.Duration
cacheUnsyncCalc []byte
cacheUnsyncSize int
activeLock sync.RWMutex
active map[core.RecordRef]*core.ActiveNode
sync []*core.ActiveNode
unsyncLock sync.Mutex
unsync []*core.ActiveNode
unsyncTimeout []time.Time
unsyncGossip map[core.RecordRef]*core.ActiveNode
}
func (nk *nodekeeper) GetActiveNodes() []*core.ActiveNode {
nk.activeLock.RLock()
defer nk.activeLock.RUnlock()
result := make([]*core.ActiveNode, len(nk.active))
index := 0
for _, node := range nk.active {
result[index] = node
index++
}
return result
}
func (nk *nodekeeper) AddActiveNodes(nodes []*core.ActiveNode) {
nk.activeLock.Lock()
defer nk.activeLock.Unlock()
for _, node := range nodes {
nk.active[node.NodeID] = node
}
}
func (nk *nodekeeper) GetActiveNode(ref core.RecordRef) *core.ActiveNode {
nk.activeLock.RLock()
defer nk.activeLock.RUnlock()
return nk.active[ref]
}
func (nk *nodekeeper) GetUnsyncHash() ([]byte, int, error) {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
if nk.state != awaitUnsync {
log.Warn("NodeKeeper: GetUnsyncHash called more than once during one pulse")
return nk.cacheUnsyncCalc, nk.cacheUnsyncSize, nil
}
unsync := nk.collectUnsync()
hash, err := calculateHash(unsync)
if err != nil {
return nil, 0, err
}
nk.cacheUnsyncCalc, nk.cacheUnsyncSize = hash, len(unsync)
nk.state = hashCalculated
return nk.cacheUnsyncCalc, nk.cacheUnsyncSize, nil
}
func (nk *nodekeeper) GetUnsync() []*core.ActiveNode {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
result := make([]*core.ActiveNode, len(nk.unsync))
copy(result, nk.unsync)
return result
}
func (nk *nodekeeper) SetPulse(number core.PulseNumber) {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
if nk.state == undefined {
nk.pulse = number
nk.state = awaitUnsync
return
}
if number <= nk.pulse {
log.Warnf("NodeKeeper: ignored SetPulse call with number=%d while current=%d", uint32(number), uint32(nk.pulse))
return
}
if nk.state == hashCalculated || nk.state == awaitUnsync {
log.Warn("NodeKeeper: SetPulse called not from `undefined` or `synced` state")
nk.activeLock.Lock()
nk.syncUnsafe(false)
nk.activeLock.Unlock()
}
nk.pulse = number
nk.state = awaitUnsync
nk.invalidateCache()
nk.updateUnsyncPulse()
}
func (nk *nodekeeper) Sync(approved bool) {
nk.unsyncLock.Lock()
nk.activeLock.Lock()
defer func() {
nk.activeLock.Unlock()
nk.unsyncLock.Unlock()
}()
if nk.state == synced || nk.state == undefined {
log.Warn("NodeKeeper: ignored Sync call from `synced` or `undefined` state")
return
}
nk.syncUnsafe(approved)
}
func (nk *nodekeeper) AddUnsync(node *core.ActiveNode) error {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
if nk.state != awaitUnsync {
return errors.New("Cannot add node to unsync list: try again in next pulse slot")
}
checkedList := []*core.ActiveNode{node}
if err := nk.checkPulse(checkedList); err != nil {
return errors.Wrap(err, "Error adding local unsync node")
}
nk.unsync = append(nk.unsync, node)
tm := time.Now().Add(nk.timeout)
nk.unsyncTimeout = append(nk.unsyncTimeout, tm)
return nil
}
func (nk *nodekeeper) AddUnsyncGossip(nodes []*core.ActiveNode) error {
nk.unsyncLock.Lock()
defer nk.unsyncLock.Unlock()
if nk.state != awaitUnsync {
return errors.New("Cannot add node to unsync list: try again in next pulse slot")
}
if err := nk.checkPulse(nodes); err != nil {
return errors.Wrap(err, "Error adding unsync gossip nodes")
}
if err := nk.checkReference(nodes); err != nil {
return errors.Wrap(err, "Error adding unsync gossip nodes")
}
for _, node := range nodes {
nk.unsyncGossip[node.NodeID] = node
}
return nil
}
func (nk *nodekeeper) syncUnsafe(approved bool) {
// sync -> active
for _, node := range nk.sync {
nk.active[node.NodeID] = node
}
if approved {
// unsync -> sync
unsync := nk.collectUnsync()
nk.sync = unsync
// clear unsync
nk.unsync = make([]*core.ActiveNode, 0)
} else {
// clear sync
nk.sync = make([]*core.ActiveNode, 0)
nk.discardTimedOutUnsync()
}
// clear unsyncGossip
nk.unsyncGossip = make(map[core.RecordRef]*core.ActiveNode)
nk.state = synced
}
func (nk *nodekeeper) discardTimedOutUnsync() {
index := 0
for _, tm := range nk.unsyncTimeout {
if tm.After(time.Now()) {
break
}
index++
}
if index == 0 {
return
}
// discard all unsync nodes before index
nk.unsyncTimeout = nk.unsyncTimeout[index:]
nk.unsync = nk.unsync[index:]
log.Infof("NodeKeeper: discarded %d unsync nodes due to timeout", index)
}
func (nk *nodekeeper) checkPulse(nodes []*core.ActiveNode) error {
for _, node := range nodes {
if node.PulseNum != nk.pulse {
return errors.Errorf("Node ID:%s pulse:%d is not equal to NodeKeeper current pulse:%d",
node.NodeID.String(), node.PulseNum, nk.pulse)
}
}
return nil
}
func (nk *nodekeeper) checkReference(nodes []*core.ActiveNode) error {
// quadratic, should not be a problem because unsync lists are usually empty or have few elements
for _, localNode := range nk.unsync {
for _, node := range nodes {
if node.NodeID.Equal(localNode.NodeID) {
return errors.Errorf("Node %s cannot be added to gossip unsync list "+
"because it is in local unsync list", node.NodeID.String())
}
}
}
return nil
}
func (nk *nodekeeper) collectUnsync() []*core.ActiveNode {
unsync := make([]*core.ActiveNode, len(nk.unsyncGossip)+len(nk.unsync))
index := 0
for _, node := range nk.unsyncGossip {
unsync[index] = node
index++
}
copy(unsync[index:], nk.unsync)
return unsync
}
func (nk *nodekeeper) invalidateCache() {
nk.cacheUnsyncCalc = nil
nk.cacheUnsyncSize = 0
}
func (nk *nodekeeper) updateUnsyncPulse() {
for _, node := range nk.unsync {
node.PulseNum = nk.pulse
}
count := len(nk.unsync)
if count != 0 {
log.Infof("NodeKeeper: updated pulse for %d stored unsync nodes", count)
}
}
func hashWriteChecked(hash hash.Hash, data []byte) {
n, err := hash.Write(data)
if n != len(data) {
panic(fmt.Sprintf("Error writing hash. Bytes expected: %d; bytes actual: %d", len(data), n))
}
if err != nil {
panic(err.Error())
}
}
func calculateNodeHash(node *core.ActiveNode) []byte {
hash := sha3.New224()
hashWriteChecked(hash, node.NodeID[:])
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(node.JetRoles))
hashWriteChecked(hash, b[:])
binary.LittleEndian.PutUint32(b, uint32(node.PulseNum))
hashWriteChecked(hash, b[:4])
b[0] = byte(node.State)
hashWriteChecked(hash, b[:1])
hashWriteChecked(hash, node.PublicKey)
return hash.Sum(nil)
}
func calculateHash(list []*core.ActiveNode) (result []byte, err error) {
sort.Slice(list[:], func(i, j int) bool {
return bytes.Compare(list[i].NodeID[:], list[j].NodeID[:]) < 0
})
// catch possible panic from hashWriteChecked in this function and in all calculateNodeHash funcs
defer func() {
if r := recover(); r != nil {
result, err = nil, fmt.Errorf("error calculating hash: %s", r)
}
}()
hash := sha3.New224()
for _, node := range list {
nodeHash := calculateNodeHash(node)
hashWriteChecked(hash, nodeHash)
}
return hash.Sum(nil), nil
} | // SetPulse sets internal PulseNumber to number.
SetPulse(number core.PulseNumber) | random_line_split |
curiosity-rl.py | '''
curiosity-rl.py
Curiosity driven RL Framework
'''
from __future__ import print_function
import tensorflow as tf
import numpy as np
from scipy.misc import imsave # Unused ... now
import time
import uw_random
import config
import pyosr
import rlargs
import a2c
import a2c_overfit
import a2c_mp
import dqn
import loco_overfit
import tunnel
import random
from six.moves import queue,input
import qtrainer
import ctrainer
import iftrainer
import curiosity
import rlsampler
import rlutil
import multiprocessing as mp
AlphaPuzzle = curiosity.RigidPuzzle
def create_trainer(args, global_step, batch_normalization):
'''
if len(args.egreedy) != 1 and len(args.egreedy) != args.threads:
assert False,"--egreedy should have only one argument, or match the number of threads"
'''
advcore = curiosity.create_advcore(learning_rate=1e-3, args=args, batch_normalization=batch_normalization)
bnorm = batch_normalization
if 'a2c' in args.train:
if args.threads > 1:
TRAINER = a2c.A2CTrainerDTT
else:
TRAINER = a2c.A2CTrainer
if 'a2c_overfit' in args.train:
TRAINER = a2c_overfit.OverfitTrainer
if args.localcluster_nsampler > 0:
TRAINER = a2c_mp.MPA2CTrainer
if args.train == 'a2c_overfit_from_fv':
TRAINER = a2c_overfit.OverfitTrainerFromFV
train_everything = False if args.viewinitckpt else True
trainer = TRAINER(
advcore=advcore,
tmax=args.batch,
gamma=args.GAMMA,
# gamma=0.5,
learning_rate=5e-5,
ckpt_dir=args.ckptdir,
global_step=global_step,
batch_normalization=bnorm,
total_number_of_replicas=args.localcluster_nsampler,
period=args.period,
LAMBDA=args.LAMBDA,
train_everything=train_everything)
elif args.train in ['dqn', 'dqn_overfit']:
TRAINER = dqn.DQNTrainerMP if args.localcluster_nsampler > 0 else dqn.DQNTrainer
trainer = TRAINER(
advcore=advcore,
args=args,
learning_rate=1e-4,
batch_normalization=bnorm)
elif args.train in ['loco_overfit']:
assert args.localcluster_nsampler <= 0, 'loco_overfit does not support MP training'
TRAINER = loco_overfit.LocoOverfitter
trainer = TRAINER(
advcore=advcore,
args=args,
learning_rate=1e-4,
batch_normalization=bnorm)
elif 'tunnel_finder' in args.train:
if args.train == 'tunnel_finder':
TRAINER = tunnel.TunnelFinderTrainer
elif args.train == 'tunnel_finder_twin1':
TRAINER = tunnel.TunnelFinderTwinTrainer
elif args.train == 'tunnel_finder_foreach1':
TRAINER = tunnel.TunnelFinderForEach1Trainer
else:
assert False, 'Unknown --train {}'.format(args.train)
trainer = TRAINER(
advcore=advcore,
args=args,
learning_rate=5e-5,
batch_normalization=bnorm)
elif args.train in ['QwithGT', 'QandFCFE', 'q_overfit'] or args.qlearning_with_gt:
trainer = qtrainer.QTrainer(
advcore=advcore,
batch=args.batch,
learning_rate=1e-4,
ckpt_dir=args.ckptdir,
period=args.period,
global_step=global_step,
train_fcfe=(args.train == 'QandFCFE'),
train_everything=(args.train == 'q_overfit'))
if args.qlearning_gt_file:
trainer.attach_gt(args.qlearning_gt_file)
if args.samplein:
trainer.attach_gt(args.samplein)
elif args.train == 'curiosity':
trainer = ctrainer.CTrainer(
advcore=advcore,
batch=args.batch,
learning_rate=1e-4,
ckpt_dir=args.ckptdir,
period=args.period,
global_step=global_step)
trainer.set_action_set(args.actionset)
trainer.limit_samples_to_use(args.sampletouse)
if args.samplein != '':
trainer.attach_samplein(args.samplein)
elif args.train == 'InF':
# TODO: allow samples from files
# Note: precomputed samples have one problem:
# Actions cannot be translated to new permutations
trainer = iftrainer.IFTrainer(
advcore=advcore,
batch=args.batch,
learning_rate=1e-4,
ckpt_dir=args.ckptdir,
period=args.period,
global_step=global_step)
elif args.train == 'Ionly':
# SAN Check: Only optimize agains Inverse Model
# Should work pretty well after loading pretrained weights
trainer = iftrainer.ITrainer(
advcore=advcore,
batch=args.batch,
learning_rate=1e-4,
ckpt_dir=args.ckptdir,
period=args.period,
global_step=global_step)
else:
assert False, '--train {} not implemented yet'.format(args.train)
return trainer, advcore
#
# IEngine: wrapper over distributed training and non-distributed evaluation
#
class IEngine(object):
def __init__(self, args):
self.args = args
if 'gpu' in args.device:
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True
else:
session_config = None
self.session_config = session_config
def run(self, sess):
pass
class ParamServer(IEngine):
def __init__(self, args, server):
super(ParamServer, self).__init__(args)
self.server = server
def run(self):
self.server.join()
class TEngine(IEngine):
def __init__(self, args):
super(TEngine, self).__init__(args)
self.mts_master = ''
self.mts_is_chief = True
self.tid = 0
def get_hooks(self):
hooks = [tf.train.StopAtStepHook(last_step=self.trainer.total_iter)]
if self.args.viewinitckpt:
class PretrainLoader(tf.train.SessionRunHook):
def __init__(self, advcore, ckpt):
self.advcore = advcore
self.ckpt = ckpt
def after_create_session(self, session, coord):
self.advcore.load_pretrain(session, self.ckpt)
print("PretrainLoader.after_create_session called")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir=self.args.ckptdir)
# Do NOT load the pretrained weights if checkpoint exists.
if not (ckpt and ckpt.model_checkpoint_path):
hooks += [PretrainLoader(self.advcore, self.args.viewinitckpt)]
return hooks
def _create_trainer(self, args):
self.bnorm = tf.placeholder(tf.bool, shape=()) if args.batchnorm else None
self.gs = tf.contrib.framework.get_or_create_global_step()
self.trainer, self.advcore = create_trainer(args, self.gs, batch_normalization=self.bnorm)
def run(self):
hooks = self.get_hooks()
# Create MonitoredTrainingSession to BOTH training and evaluation, since it's RL
#
# Note: we need to disable summaries and write it manually, because the
# summary ops are evaluated in every mon_sess.run(), and there is no way to disable it for evaluation
with tf.train.MonitoredTrainingSession(master=self.mts_master,
is_chief=self.mts_is_chief,
checkpoint_dir=self.args.ckptdir,
config=self.session_config,
save_summaries_steps=0,
save_summaries_secs=0,
save_checkpoint_secs=600,
hooks=hooks) as mon_sess:
while not mon_sess.should_stop():
self.trainer.train(self.envir, mon_sess, self.tid)
class CentralizedTrainer(TEngine):
def __init__(self, args):
super(CentralizedTrainer, self).__init__(args)
self._envirs = [AlphaPuzzle(args, aid, aid) for aid in range(self.args.agents)]
self._create_trainer(args)
self.envir_picker = 0
@property
def envir(self):
ret = self._envirs[self.envir_picker]
self.envir_picker = (self.envir_picker + 1) % self.args.agents
return ret
'''
DistributedTrainer:
Enable the distribution by:
1. Create model under different tf.device
2. set self.mts_* to enable distributed MonitoredTrainingSession in TEngine.run
'''
class DistributedTrainer(TEngine):
def __init__(self, args, cluster, server, mpqueue):
assert args.period >= 0, "--period must be explicitly listed for distributed training"
super(DistributedTrainer, self).__init__(args)
self.tid = args.task_index
self.envir = AlphaPuzzle(args, self.tid, self.tid)
self.cluster = cluster
self.server = server
# Enable distributed training
with tf.device(tf.train.replica_device_setter(
worker_device="/job:worker/task:{}".format(args.task_index),
cluster=cluster)):
self._create_trainer(args)
self.trainer.install_mpqueue_as(mpqueue, args.task_index)
self.mts_master = self.server.target
self.mts_is_chief = (args.task_index == 0)
class Evaluator(IEngine):
def __init__(self, args):
super(Evaluator, self).__init__(args)
self.gs = tf.contrib.framework.get_or_create_global_step()
self.g = tf.get_default_graph()
self.player = rlsampler.create_visualizer(args, self.g, self.gs)
def run(self):
args = self.args
saver = tf.train.Saver()
with tf.Session(config=self.session_config) as sess:
tf.get_default_graph().finalize()
if args.viewinitckpt:
self.player.advcore.load_pretrain(sess, args.viewinitckpt)
print("Load from viewinitckpt {}".format(args.viewinitckpt))
if self.player.mandatory_ckpt:
assert args.ckptdir, "--ckptdir is mandatory when --eval"
if args.ckptdir:
ckpt = tf.train.get_checkpoint_state(checkpoint_dir=args.ckptdir)
print('ckpt {}'.format(ckpt))
if self.player.mandatory_ckpt:
assert ckpt is not None, "Missing actual checkpoints at --ckptdir"
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
accum_epoch = sess.run(self.gs)
print('Restored!, global_step {}'.format(accum_epoch))
self.player.attach(sess)
self.player.play()
def curiosity_create_engine(args, mpqueue):
if args.eval:
return Evaluator(args)
cluster_dict = rlutil.create_cluster_dic(args)
if cluster_dict is None:
return CentralizedTrainer(args)
assert mpqueue is not None, "[curiosity_create_engine] MP training requires a mp.Queue object "
# assert False, "Not testing DistributedTrainer for now"
# Create a cluster from the parameter server and worker hosts.
cluster = tf.train.ClusterSpec(cluster_dict)
# Create and start a server for the local task. | task_index=args.task_index,
config=session_config)
if args.job_name == 'ps':
engine = ParamServer(args, server)
else:
assert args.job_name == 'worker', "--job_name should be either ps or worker"
engine = DistributedTrainer(args, cluster, server, mpqueue)
return engine
'''
Main Function:
1. Create TF graphs by creating Facade class TrainingManager
- This facade class will create corresponding training class on demand
1.a Alternatively, call rlsampler.create_visualizer to evaluate the traing results
2. Initialize TF sessions and TF Saver
3. Restore from checkpoints on demand
3. Call TrainingManager for some iterations on demand
'''
def process_main(args, mpqueue=None):
'''
CAVEAT: WITHOUT ALLOW_GRWTH, WE MUST CREATE RENDERER BEFORE CALLING ANY TF ROUTINE
'''
pyosr.init()
dpy = pyosr.create_display()
glctx = pyosr.create_gl_context(dpy)
# Create Training/Evaluation Engine
engine = curiosity_create_engine(args, mpqueue=mpqueue)
# Engine execution
engine.run()
def curiosity_main(args):
if args.localcluster_nsampler <= 0 and not args.ps_hosts:
process_main(args)
return
# Distributed execution
args_list = rlutil.assemble_distributed_arguments(args)
mgr = mp.Manager()
mpq = mgr.Queue()
procs = [mp.Process(target=process_main, args=(a, mpq)) for a in args_list]
for p in procs:
p.start()
for p in procs:
p.join()
def main():
args = rlargs.parse()
if args.continuetrain:
if args.samplein:
print('--continuetrain is incompatible with --samplein')
exit()
if args.batching:
print('--continuetrain is incompatible with --batching')
exit()
if -1 in args.actionset:
args.actionset = [i for i in range(12)]
assert args.threads == 1, "--threads has no effect in distributed training"
args.total_sample = args.iter * args.threads
args.total_epoch = args.total_sample / args.samplebatching
print("> Arguments {}".format(args))
curiosity_main(args)
if __name__ == '__main__':
main() | session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True
server = tf.train.Server(cluster,
job_name=args.job_name, | random_line_split |
curiosity-rl.py | '''
curiosity-rl.py
Curiosity driven RL Framework
'''
from __future__ import print_function
import tensorflow as tf
import numpy as np
from scipy.misc import imsave # Unused ... now
import time
import uw_random
import config
import pyosr
import rlargs
import a2c
import a2c_overfit
import a2c_mp
import dqn
import loco_overfit
import tunnel
import random
from six.moves import queue,input
import qtrainer
import ctrainer
import iftrainer
import curiosity
import rlsampler
import rlutil
import multiprocessing as mp
AlphaPuzzle = curiosity.RigidPuzzle
def | (args, global_step, batch_normalization):
'''
if len(args.egreedy) != 1 and len(args.egreedy) != args.threads:
assert False,"--egreedy should have only one argument, or match the number of threads"
'''
advcore = curiosity.create_advcore(learning_rate=1e-3, args=args, batch_normalization=batch_normalization)
bnorm = batch_normalization
if 'a2c' in args.train:
if args.threads > 1:
TRAINER = a2c.A2CTrainerDTT
else:
TRAINER = a2c.A2CTrainer
if 'a2c_overfit' in args.train:
TRAINER = a2c_overfit.OverfitTrainer
if args.localcluster_nsampler > 0:
TRAINER = a2c_mp.MPA2CTrainer
if args.train == 'a2c_overfit_from_fv':
TRAINER = a2c_overfit.OverfitTrainerFromFV
train_everything = False if args.viewinitckpt else True
trainer = TRAINER(
advcore=advcore,
tmax=args.batch,
gamma=args.GAMMA,
# gamma=0.5,
learning_rate=5e-5,
ckpt_dir=args.ckptdir,
global_step=global_step,
batch_normalization=bnorm,
total_number_of_replicas=args.localcluster_nsampler,
period=args.period,
LAMBDA=args.LAMBDA,
train_everything=train_everything)
elif args.train in ['dqn', 'dqn_overfit']:
TRAINER = dqn.DQNTrainerMP if args.localcluster_nsampler > 0 else dqn.DQNTrainer
trainer = TRAINER(
advcore=advcore,
args=args,
learning_rate=1e-4,
batch_normalization=bnorm)
elif args.train in ['loco_overfit']:
assert args.localcluster_nsampler <= 0, 'loco_overfit does not support MP training'
TRAINER = loco_overfit.LocoOverfitter
trainer = TRAINER(
advcore=advcore,
args=args,
learning_rate=1e-4,
batch_normalization=bnorm)
elif 'tunnel_finder' in args.train:
if args.train == 'tunnel_finder':
TRAINER = tunnel.TunnelFinderTrainer
elif args.train == 'tunnel_finder_twin1':
TRAINER = tunnel.TunnelFinderTwinTrainer
elif args.train == 'tunnel_finder_foreach1':
TRAINER = tunnel.TunnelFinderForEach1Trainer
else:
assert False, 'Unknown --train {}'.format(args.train)
trainer = TRAINER(
advcore=advcore,
args=args,
learning_rate=5e-5,
batch_normalization=bnorm)
elif args.train in ['QwithGT', 'QandFCFE', 'q_overfit'] or args.qlearning_with_gt:
trainer = qtrainer.QTrainer(
advcore=advcore,
batch=args.batch,
learning_rate=1e-4,
ckpt_dir=args.ckptdir,
period=args.period,
global_step=global_step,
train_fcfe=(args.train == 'QandFCFE'),
train_everything=(args.train == 'q_overfit'))
if args.qlearning_gt_file:
trainer.attach_gt(args.qlearning_gt_file)
if args.samplein:
trainer.attach_gt(args.samplein)
elif args.train == 'curiosity':
trainer = ctrainer.CTrainer(
advcore=advcore,
batch=args.batch,
learning_rate=1e-4,
ckpt_dir=args.ckptdir,
period=args.period,
global_step=global_step)
trainer.set_action_set(args.actionset)
trainer.limit_samples_to_use(args.sampletouse)
if args.samplein != '':
trainer.attach_samplein(args.samplein)
elif args.train == 'InF':
# TODO: allow samples from files
# Note: precomputed samples have one problem:
# Actions cannot be translated to new permutations
trainer = iftrainer.IFTrainer(
advcore=advcore,
batch=args.batch,
learning_rate=1e-4,
ckpt_dir=args.ckptdir,
period=args.period,
global_step=global_step)
elif args.train == 'Ionly':
# SAN Check: Only optimize agains Inverse Model
# Should work pretty well after loading pretrained weights
trainer = iftrainer.ITrainer(
advcore=advcore,
batch=args.batch,
learning_rate=1e-4,
ckpt_dir=args.ckptdir,
period=args.period,
global_step=global_step)
else:
assert False, '--train {} not implemented yet'.format(args.train)
return trainer, advcore
#
# IEngine: wrapper over distributed training and non-distributed evaluation
#
class IEngine(object):
def __init__(self, args):
self.args = args
if 'gpu' in args.device:
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True
else:
session_config = None
self.session_config = session_config
def run(self, sess):
pass
class ParamServer(IEngine):
def __init__(self, args, server):
super(ParamServer, self).__init__(args)
self.server = server
def run(self):
self.server.join()
class TEngine(IEngine):
def __init__(self, args):
super(TEngine, self).__init__(args)
self.mts_master = ''
self.mts_is_chief = True
self.tid = 0
def get_hooks(self):
hooks = [tf.train.StopAtStepHook(last_step=self.trainer.total_iter)]
if self.args.viewinitckpt:
class PretrainLoader(tf.train.SessionRunHook):
def __init__(self, advcore, ckpt):
self.advcore = advcore
self.ckpt = ckpt
def after_create_session(self, session, coord):
self.advcore.load_pretrain(session, self.ckpt)
print("PretrainLoader.after_create_session called")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir=self.args.ckptdir)
# Do NOT load the pretrained weights if checkpoint exists.
if not (ckpt and ckpt.model_checkpoint_path):
hooks += [PretrainLoader(self.advcore, self.args.viewinitckpt)]
return hooks
def _create_trainer(self, args):
self.bnorm = tf.placeholder(tf.bool, shape=()) if args.batchnorm else None
self.gs = tf.contrib.framework.get_or_create_global_step()
self.trainer, self.advcore = create_trainer(args, self.gs, batch_normalization=self.bnorm)
def run(self):
hooks = self.get_hooks()
# Create MonitoredTrainingSession to BOTH training and evaluation, since it's RL
#
# Note: we need to disable summaries and write it manually, because the
# summary ops are evaluated in every mon_sess.run(), and there is no way to disable it for evaluation
with tf.train.MonitoredTrainingSession(master=self.mts_master,
is_chief=self.mts_is_chief,
checkpoint_dir=self.args.ckptdir,
config=self.session_config,
save_summaries_steps=0,
save_summaries_secs=0,
save_checkpoint_secs=600,
hooks=hooks) as mon_sess:
while not mon_sess.should_stop():
self.trainer.train(self.envir, mon_sess, self.tid)
class CentralizedTrainer(TEngine):
def __init__(self, args):
super(CentralizedTrainer, self).__init__(args)
self._envirs = [AlphaPuzzle(args, aid, aid) for aid in range(self.args.agents)]
self._create_trainer(args)
self.envir_picker = 0
@property
def envir(self):
ret = self._envirs[self.envir_picker]
self.envir_picker = (self.envir_picker + 1) % self.args.agents
return ret
'''
DistributedTrainer:
Enable the distribution by:
1. Create model under different tf.device
2. set self.mts_* to enable distributed MonitoredTrainingSession in TEngine.run
'''
class DistributedTrainer(TEngine):
def __init__(self, args, cluster, server, mpqueue):
assert args.period >= 0, "--period must be explicitly listed for distributed training"
super(DistributedTrainer, self).__init__(args)
self.tid = args.task_index
self.envir = AlphaPuzzle(args, self.tid, self.tid)
self.cluster = cluster
self.server = server
# Enable distributed training
with tf.device(tf.train.replica_device_setter(
worker_device="/job:worker/task:{}".format(args.task_index),
cluster=cluster)):
self._create_trainer(args)
self.trainer.install_mpqueue_as(mpqueue, args.task_index)
self.mts_master = self.server.target
self.mts_is_chief = (args.task_index == 0)
class Evaluator(IEngine):
def __init__(self, args):
super(Evaluator, self).__init__(args)
self.gs = tf.contrib.framework.get_or_create_global_step()
self.g = tf.get_default_graph()
self.player = rlsampler.create_visualizer(args, self.g, self.gs)
def run(self):
args = self.args
saver = tf.train.Saver()
with tf.Session(config=self.session_config) as sess:
tf.get_default_graph().finalize()
if args.viewinitckpt:
self.player.advcore.load_pretrain(sess, args.viewinitckpt)
print("Load from viewinitckpt {}".format(args.viewinitckpt))
if self.player.mandatory_ckpt:
assert args.ckptdir, "--ckptdir is mandatory when --eval"
if args.ckptdir:
ckpt = tf.train.get_checkpoint_state(checkpoint_dir=args.ckptdir)
print('ckpt {}'.format(ckpt))
if self.player.mandatory_ckpt:
assert ckpt is not None, "Missing actual checkpoints at --ckptdir"
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
accum_epoch = sess.run(self.gs)
print('Restored!, global_step {}'.format(accum_epoch))
self.player.attach(sess)
self.player.play()
def curiosity_create_engine(args, mpqueue):
if args.eval:
return Evaluator(args)
cluster_dict = rlutil.create_cluster_dic(args)
if cluster_dict is None:
return CentralizedTrainer(args)
assert mpqueue is not None, "[curiosity_create_engine] MP training requires a mp.Queue object "
# assert False, "Not testing DistributedTrainer for now"
# Create a cluster from the parameter server and worker hosts.
cluster = tf.train.ClusterSpec(cluster_dict)
# Create and start a server for the local task.
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True
server = tf.train.Server(cluster,
job_name=args.job_name,
task_index=args.task_index,
config=session_config)
if args.job_name == 'ps':
engine = ParamServer(args, server)
else:
assert args.job_name == 'worker', "--job_name should be either ps or worker"
engine = DistributedTrainer(args, cluster, server, mpqueue)
return engine
'''
Main Function:
1. Create TF graphs by creating Facade class TrainingManager
- This facade class will create corresponding training class on demand
1.a Alternatively, call rlsampler.create_visualizer to evaluate the traing results
2. Initialize TF sessions and TF Saver
3. Restore from checkpoints on demand
3. Call TrainingManager for some iterations on demand
'''
def process_main(args, mpqueue=None):
'''
CAVEAT: WITHOUT ALLOW_GRWTH, WE MUST CREATE RENDERER BEFORE CALLING ANY TF ROUTINE
'''
pyosr.init()
dpy = pyosr.create_display()
glctx = pyosr.create_gl_context(dpy)
# Create Training/Evaluation Engine
engine = curiosity_create_engine(args, mpqueue=mpqueue)
# Engine execution
engine.run()
def curiosity_main(args):
if args.localcluster_nsampler <= 0 and not args.ps_hosts:
process_main(args)
return
# Distributed execution
args_list = rlutil.assemble_distributed_arguments(args)
mgr = mp.Manager()
mpq = mgr.Queue()
procs = [mp.Process(target=process_main, args=(a, mpq)) for a in args_list]
for p in procs:
p.start()
for p in procs:
p.join()
def main():
args = rlargs.parse()
if args.continuetrain:
if args.samplein:
print('--continuetrain is incompatible with --samplein')
exit()
if args.batching:
print('--continuetrain is incompatible with --batching')
exit()
if -1 in args.actionset:
args.actionset = [i for i in range(12)]
assert args.threads == 1, "--threads has no effect in distributed training"
args.total_sample = args.iter * args.threads
args.total_epoch = args.total_sample / args.samplebatching
print("> Arguments {}".format(args))
curiosity_main(args)
if __name__ == '__main__':
main()
| create_trainer | identifier_name |
curiosity-rl.py | '''
curiosity-rl.py
Curiosity driven RL Framework
'''
from __future__ import print_function
import tensorflow as tf
import numpy as np
from scipy.misc import imsave # Unused ... now
import time
import uw_random
import config
import pyosr
import rlargs
import a2c
import a2c_overfit
import a2c_mp
import dqn
import loco_overfit
import tunnel
import random
from six.moves import queue,input
import qtrainer
import ctrainer
import iftrainer
import curiosity
import rlsampler
import rlutil
import multiprocessing as mp
AlphaPuzzle = curiosity.RigidPuzzle
def create_trainer(args, global_step, batch_normalization):
'''
if len(args.egreedy) != 1 and len(args.egreedy) != args.threads:
assert False,"--egreedy should have only one argument, or match the number of threads"
'''
advcore = curiosity.create_advcore(learning_rate=1e-3, args=args, batch_normalization=batch_normalization)
bnorm = batch_normalization
if 'a2c' in args.train:
if args.threads > 1:
TRAINER = a2c.A2CTrainerDTT
else:
TRAINER = a2c.A2CTrainer
if 'a2c_overfit' in args.train:
TRAINER = a2c_overfit.OverfitTrainer
if args.localcluster_nsampler > 0:
TRAINER = a2c_mp.MPA2CTrainer
if args.train == 'a2c_overfit_from_fv':
TRAINER = a2c_overfit.OverfitTrainerFromFV
train_everything = False if args.viewinitckpt else True
trainer = TRAINER(
advcore=advcore,
tmax=args.batch,
gamma=args.GAMMA,
# gamma=0.5,
learning_rate=5e-5,
ckpt_dir=args.ckptdir,
global_step=global_step,
batch_normalization=bnorm,
total_number_of_replicas=args.localcluster_nsampler,
period=args.period,
LAMBDA=args.LAMBDA,
train_everything=train_everything)
elif args.train in ['dqn', 'dqn_overfit']:
TRAINER = dqn.DQNTrainerMP if args.localcluster_nsampler > 0 else dqn.DQNTrainer
trainer = TRAINER(
advcore=advcore,
args=args,
learning_rate=1e-4,
batch_normalization=bnorm)
elif args.train in ['loco_overfit']:
assert args.localcluster_nsampler <= 0, 'loco_overfit does not support MP training'
TRAINER = loco_overfit.LocoOverfitter
trainer = TRAINER(
advcore=advcore,
args=args,
learning_rate=1e-4,
batch_normalization=bnorm)
elif 'tunnel_finder' in args.train:
if args.train == 'tunnel_finder':
TRAINER = tunnel.TunnelFinderTrainer
elif args.train == 'tunnel_finder_twin1':
TRAINER = tunnel.TunnelFinderTwinTrainer
elif args.train == 'tunnel_finder_foreach1':
TRAINER = tunnel.TunnelFinderForEach1Trainer
else:
assert False, 'Unknown --train {}'.format(args.train)
trainer = TRAINER(
advcore=advcore,
args=args,
learning_rate=5e-5,
batch_normalization=bnorm)
elif args.train in ['QwithGT', 'QandFCFE', 'q_overfit'] or args.qlearning_with_gt:
trainer = qtrainer.QTrainer(
advcore=advcore,
batch=args.batch,
learning_rate=1e-4,
ckpt_dir=args.ckptdir,
period=args.period,
global_step=global_step,
train_fcfe=(args.train == 'QandFCFE'),
train_everything=(args.train == 'q_overfit'))
if args.qlearning_gt_file:
trainer.attach_gt(args.qlearning_gt_file)
if args.samplein:
trainer.attach_gt(args.samplein)
elif args.train == 'curiosity':
trainer = ctrainer.CTrainer(
advcore=advcore,
batch=args.batch,
learning_rate=1e-4,
ckpt_dir=args.ckptdir,
period=args.period,
global_step=global_step)
trainer.set_action_set(args.actionset)
trainer.limit_samples_to_use(args.sampletouse)
if args.samplein != '':
trainer.attach_samplein(args.samplein)
elif args.train == 'InF':
# TODO: allow samples from files
# Note: precomputed samples have one problem:
# Actions cannot be translated to new permutations
trainer = iftrainer.IFTrainer(
advcore=advcore,
batch=args.batch,
learning_rate=1e-4,
ckpt_dir=args.ckptdir,
period=args.period,
global_step=global_step)
elif args.train == 'Ionly':
# SAN Check: Only optimize agains Inverse Model
# Should work pretty well after loading pretrained weights
trainer = iftrainer.ITrainer(
advcore=advcore,
batch=args.batch,
learning_rate=1e-4,
ckpt_dir=args.ckptdir,
period=args.period,
global_step=global_step)
else:
assert False, '--train {} not implemented yet'.format(args.train)
return trainer, advcore
#
# IEngine: wrapper over distributed training and non-distributed evaluation
#
class IEngine(object):
def __init__(self, args):
self.args = args
if 'gpu' in args.device:
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True
else:
session_config = None
self.session_config = session_config
def run(self, sess):
pass
class ParamServer(IEngine):
def __init__(self, args, server):
super(ParamServer, self).__init__(args)
self.server = server
def run(self):
self.server.join()
class TEngine(IEngine):
def __init__(self, args):
super(TEngine, self).__init__(args)
self.mts_master = ''
self.mts_is_chief = True
self.tid = 0
def get_hooks(self):
hooks = [tf.train.StopAtStepHook(last_step=self.trainer.total_iter)]
if self.args.viewinitckpt:
|
return hooks
def _create_trainer(self, args):
self.bnorm = tf.placeholder(tf.bool, shape=()) if args.batchnorm else None
self.gs = tf.contrib.framework.get_or_create_global_step()
self.trainer, self.advcore = create_trainer(args, self.gs, batch_normalization=self.bnorm)
def run(self):
hooks = self.get_hooks()
# Create MonitoredTrainingSession to BOTH training and evaluation, since it's RL
#
# Note: we need to disable summaries and write it manually, because the
# summary ops are evaluated in every mon_sess.run(), and there is no way to disable it for evaluation
with tf.train.MonitoredTrainingSession(master=self.mts_master,
is_chief=self.mts_is_chief,
checkpoint_dir=self.args.ckptdir,
config=self.session_config,
save_summaries_steps=0,
save_summaries_secs=0,
save_checkpoint_secs=600,
hooks=hooks) as mon_sess:
while not mon_sess.should_stop():
self.trainer.train(self.envir, mon_sess, self.tid)
class CentralizedTrainer(TEngine):
def __init__(self, args):
super(CentralizedTrainer, self).__init__(args)
self._envirs = [AlphaPuzzle(args, aid, aid) for aid in range(self.args.agents)]
self._create_trainer(args)
self.envir_picker = 0
@property
def envir(self):
ret = self._envirs[self.envir_picker]
self.envir_picker = (self.envir_picker + 1) % self.args.agents
return ret
'''
DistributedTrainer:
Enable the distribution by:
1. Create model under different tf.device
2. set self.mts_* to enable distributed MonitoredTrainingSession in TEngine.run
'''
class DistributedTrainer(TEngine):
def __init__(self, args, cluster, server, mpqueue):
assert args.period >= 0, "--period must be explicitly listed for distributed training"
super(DistributedTrainer, self).__init__(args)
self.tid = args.task_index
self.envir = AlphaPuzzle(args, self.tid, self.tid)
self.cluster = cluster
self.server = server
# Enable distributed training
with tf.device(tf.train.replica_device_setter(
worker_device="/job:worker/task:{}".format(args.task_index),
cluster=cluster)):
self._create_trainer(args)
self.trainer.install_mpqueue_as(mpqueue, args.task_index)
self.mts_master = self.server.target
self.mts_is_chief = (args.task_index == 0)
class Evaluator(IEngine):
def __init__(self, args):
super(Evaluator, self).__init__(args)
self.gs = tf.contrib.framework.get_or_create_global_step()
self.g = tf.get_default_graph()
self.player = rlsampler.create_visualizer(args, self.g, self.gs)
def run(self):
args = self.args
saver = tf.train.Saver()
with tf.Session(config=self.session_config) as sess:
tf.get_default_graph().finalize()
if args.viewinitckpt:
self.player.advcore.load_pretrain(sess, args.viewinitckpt)
print("Load from viewinitckpt {}".format(args.viewinitckpt))
if self.player.mandatory_ckpt:
assert args.ckptdir, "--ckptdir is mandatory when --eval"
if args.ckptdir:
ckpt = tf.train.get_checkpoint_state(checkpoint_dir=args.ckptdir)
print('ckpt {}'.format(ckpt))
if self.player.mandatory_ckpt:
assert ckpt is not None, "Missing actual checkpoints at --ckptdir"
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
accum_epoch = sess.run(self.gs)
print('Restored!, global_step {}'.format(accum_epoch))
self.player.attach(sess)
self.player.play()
def curiosity_create_engine(args, mpqueue):
if args.eval:
return Evaluator(args)
cluster_dict = rlutil.create_cluster_dic(args)
if cluster_dict is None:
return CentralizedTrainer(args)
assert mpqueue is not None, "[curiosity_create_engine] MP training requires a mp.Queue object "
# assert False, "Not testing DistributedTrainer for now"
# Create a cluster from the parameter server and worker hosts.
cluster = tf.train.ClusterSpec(cluster_dict)
# Create and start a server for the local task.
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True
server = tf.train.Server(cluster,
job_name=args.job_name,
task_index=args.task_index,
config=session_config)
if args.job_name == 'ps':
engine = ParamServer(args, server)
else:
assert args.job_name == 'worker', "--job_name should be either ps or worker"
engine = DistributedTrainer(args, cluster, server, mpqueue)
return engine
'''
Main Function:
1. Create TF graphs by creating Facade class TrainingManager
- This facade class will create corresponding training class on demand
1.a Alternatively, call rlsampler.create_visualizer to evaluate the traing results
2. Initialize TF sessions and TF Saver
3. Restore from checkpoints on demand
3. Call TrainingManager for some iterations on demand
'''
def process_main(args, mpqueue=None):
'''
CAVEAT: WITHOUT ALLOW_GRWTH, WE MUST CREATE RENDERER BEFORE CALLING ANY TF ROUTINE
'''
pyosr.init()
dpy = pyosr.create_display()
glctx = pyosr.create_gl_context(dpy)
# Create Training/Evaluation Engine
engine = curiosity_create_engine(args, mpqueue=mpqueue)
# Engine execution
engine.run()
def curiosity_main(args):
if args.localcluster_nsampler <= 0 and not args.ps_hosts:
process_main(args)
return
# Distributed execution
args_list = rlutil.assemble_distributed_arguments(args)
mgr = mp.Manager()
mpq = mgr.Queue()
procs = [mp.Process(target=process_main, args=(a, mpq)) for a in args_list]
for p in procs:
p.start()
for p in procs:
p.join()
def main():
args = rlargs.parse()
if args.continuetrain:
if args.samplein:
print('--continuetrain is incompatible with --samplein')
exit()
if args.batching:
print('--continuetrain is incompatible with --batching')
exit()
if -1 in args.actionset:
args.actionset = [i for i in range(12)]
assert args.threads == 1, "--threads has no effect in distributed training"
args.total_sample = args.iter * args.threads
args.total_epoch = args.total_sample / args.samplebatching
print("> Arguments {}".format(args))
curiosity_main(args)
if __name__ == '__main__':
main()
| class PretrainLoader(tf.train.SessionRunHook):
def __init__(self, advcore, ckpt):
self.advcore = advcore
self.ckpt = ckpt
def after_create_session(self, session, coord):
self.advcore.load_pretrain(session, self.ckpt)
print("PretrainLoader.after_create_session called")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir=self.args.ckptdir)
# Do NOT load the pretrained weights if checkpoint exists.
if not (ckpt and ckpt.model_checkpoint_path):
hooks += [PretrainLoader(self.advcore, self.args.viewinitckpt)] | conditional_block |
curiosity-rl.py | '''
curiosity-rl.py
Curiosity driven RL Framework
'''
from __future__ import print_function
import tensorflow as tf
import numpy as np
from scipy.misc import imsave # Unused ... now
import time
import uw_random
import config
import pyosr
import rlargs
import a2c
import a2c_overfit
import a2c_mp
import dqn
import loco_overfit
import tunnel
import random
from six.moves import queue,input
import qtrainer
import ctrainer
import iftrainer
import curiosity
import rlsampler
import rlutil
import multiprocessing as mp
AlphaPuzzle = curiosity.RigidPuzzle
def create_trainer(args, global_step, batch_normalization):
'''
if len(args.egreedy) != 1 and len(args.egreedy) != args.threads:
assert False,"--egreedy should have only one argument, or match the number of threads"
'''
advcore = curiosity.create_advcore(learning_rate=1e-3, args=args, batch_normalization=batch_normalization)
bnorm = batch_normalization
if 'a2c' in args.train:
if args.threads > 1:
TRAINER = a2c.A2CTrainerDTT
else:
TRAINER = a2c.A2CTrainer
if 'a2c_overfit' in args.train:
TRAINER = a2c_overfit.OverfitTrainer
if args.localcluster_nsampler > 0:
TRAINER = a2c_mp.MPA2CTrainer
if args.train == 'a2c_overfit_from_fv':
TRAINER = a2c_overfit.OverfitTrainerFromFV
train_everything = False if args.viewinitckpt else True
trainer = TRAINER(
advcore=advcore,
tmax=args.batch,
gamma=args.GAMMA,
# gamma=0.5,
learning_rate=5e-5,
ckpt_dir=args.ckptdir,
global_step=global_step,
batch_normalization=bnorm,
total_number_of_replicas=args.localcluster_nsampler,
period=args.period,
LAMBDA=args.LAMBDA,
train_everything=train_everything)
elif args.train in ['dqn', 'dqn_overfit']:
TRAINER = dqn.DQNTrainerMP if args.localcluster_nsampler > 0 else dqn.DQNTrainer
trainer = TRAINER(
advcore=advcore,
args=args,
learning_rate=1e-4,
batch_normalization=bnorm)
elif args.train in ['loco_overfit']:
assert args.localcluster_nsampler <= 0, 'loco_overfit does not support MP training'
TRAINER = loco_overfit.LocoOverfitter
trainer = TRAINER(
advcore=advcore,
args=args,
learning_rate=1e-4,
batch_normalization=bnorm)
elif 'tunnel_finder' in args.train:
if args.train == 'tunnel_finder':
TRAINER = tunnel.TunnelFinderTrainer
elif args.train == 'tunnel_finder_twin1':
TRAINER = tunnel.TunnelFinderTwinTrainer
elif args.train == 'tunnel_finder_foreach1':
TRAINER = tunnel.TunnelFinderForEach1Trainer
else:
assert False, 'Unknown --train {}'.format(args.train)
trainer = TRAINER(
advcore=advcore,
args=args,
learning_rate=5e-5,
batch_normalization=bnorm)
elif args.train in ['QwithGT', 'QandFCFE', 'q_overfit'] or args.qlearning_with_gt:
trainer = qtrainer.QTrainer(
advcore=advcore,
batch=args.batch,
learning_rate=1e-4,
ckpt_dir=args.ckptdir,
period=args.period,
global_step=global_step,
train_fcfe=(args.train == 'QandFCFE'),
train_everything=(args.train == 'q_overfit'))
if args.qlearning_gt_file:
trainer.attach_gt(args.qlearning_gt_file)
if args.samplein:
trainer.attach_gt(args.samplein)
elif args.train == 'curiosity':
trainer = ctrainer.CTrainer(
advcore=advcore,
batch=args.batch,
learning_rate=1e-4,
ckpt_dir=args.ckptdir,
period=args.period,
global_step=global_step)
trainer.set_action_set(args.actionset)
trainer.limit_samples_to_use(args.sampletouse)
if args.samplein != '':
trainer.attach_samplein(args.samplein)
elif args.train == 'InF':
# TODO: allow samples from files
# Note: precomputed samples have one problem:
# Actions cannot be translated to new permutations
trainer = iftrainer.IFTrainer(
advcore=advcore,
batch=args.batch,
learning_rate=1e-4,
ckpt_dir=args.ckptdir,
period=args.period,
global_step=global_step)
elif args.train == 'Ionly':
# SAN Check: Only optimize agains Inverse Model
# Should work pretty well after loading pretrained weights
trainer = iftrainer.ITrainer(
advcore=advcore,
batch=args.batch,
learning_rate=1e-4,
ckpt_dir=args.ckptdir,
period=args.period,
global_step=global_step)
else:
assert False, '--train {} not implemented yet'.format(args.train)
return trainer, advcore
#
# IEngine: wrapper over distributed training and non-distributed evaluation
#
class IEngine(object):
def __init__(self, args):
self.args = args
if 'gpu' in args.device:
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True
else:
session_config = None
self.session_config = session_config
def run(self, sess):
pass
class ParamServer(IEngine):
def __init__(self, args, server):
super(ParamServer, self).__init__(args)
self.server = server
def run(self):
self.server.join()
class TEngine(IEngine):
def __init__(self, args):
|
def get_hooks(self):
hooks = [tf.train.StopAtStepHook(last_step=self.trainer.total_iter)]
if self.args.viewinitckpt:
class PretrainLoader(tf.train.SessionRunHook):
def __init__(self, advcore, ckpt):
self.advcore = advcore
self.ckpt = ckpt
def after_create_session(self, session, coord):
self.advcore.load_pretrain(session, self.ckpt)
print("PretrainLoader.after_create_session called")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir=self.args.ckptdir)
# Do NOT load the pretrained weights if checkpoint exists.
if not (ckpt and ckpt.model_checkpoint_path):
hooks += [PretrainLoader(self.advcore, self.args.viewinitckpt)]
return hooks
def _create_trainer(self, args):
self.bnorm = tf.placeholder(tf.bool, shape=()) if args.batchnorm else None
self.gs = tf.contrib.framework.get_or_create_global_step()
self.trainer, self.advcore = create_trainer(args, self.gs, batch_normalization=self.bnorm)
def run(self):
hooks = self.get_hooks()
# Create MonitoredTrainingSession to BOTH training and evaluation, since it's RL
#
# Note: we need to disable summaries and write it manually, because the
# summary ops are evaluated in every mon_sess.run(), and there is no way to disable it for evaluation
with tf.train.MonitoredTrainingSession(master=self.mts_master,
is_chief=self.mts_is_chief,
checkpoint_dir=self.args.ckptdir,
config=self.session_config,
save_summaries_steps=0,
save_summaries_secs=0,
save_checkpoint_secs=600,
hooks=hooks) as mon_sess:
while not mon_sess.should_stop():
self.trainer.train(self.envir, mon_sess, self.tid)
class CentralizedTrainer(TEngine):
def __init__(self, args):
super(CentralizedTrainer, self).__init__(args)
self._envirs = [AlphaPuzzle(args, aid, aid) for aid in range(self.args.agents)]
self._create_trainer(args)
self.envir_picker = 0
@property
def envir(self):
ret = self._envirs[self.envir_picker]
self.envir_picker = (self.envir_picker + 1) % self.args.agents
return ret
'''
DistributedTrainer:
Enable the distribution by:
1. Create model under different tf.device
2. set self.mts_* to enable distributed MonitoredTrainingSession in TEngine.run
'''
class DistributedTrainer(TEngine):
def __init__(self, args, cluster, server, mpqueue):
assert args.period >= 0, "--period must be explicitly listed for distributed training"
super(DistributedTrainer, self).__init__(args)
self.tid = args.task_index
self.envir = AlphaPuzzle(args, self.tid, self.tid)
self.cluster = cluster
self.server = server
# Enable distributed training
with tf.device(tf.train.replica_device_setter(
worker_device="/job:worker/task:{}".format(args.task_index),
cluster=cluster)):
self._create_trainer(args)
self.trainer.install_mpqueue_as(mpqueue, args.task_index)
self.mts_master = self.server.target
self.mts_is_chief = (args.task_index == 0)
class Evaluator(IEngine):
def __init__(self, args):
super(Evaluator, self).__init__(args)
self.gs = tf.contrib.framework.get_or_create_global_step()
self.g = tf.get_default_graph()
self.player = rlsampler.create_visualizer(args, self.g, self.gs)
def run(self):
args = self.args
saver = tf.train.Saver()
with tf.Session(config=self.session_config) as sess:
tf.get_default_graph().finalize()
if args.viewinitckpt:
self.player.advcore.load_pretrain(sess, args.viewinitckpt)
print("Load from viewinitckpt {}".format(args.viewinitckpt))
if self.player.mandatory_ckpt:
assert args.ckptdir, "--ckptdir is mandatory when --eval"
if args.ckptdir:
ckpt = tf.train.get_checkpoint_state(checkpoint_dir=args.ckptdir)
print('ckpt {}'.format(ckpt))
if self.player.mandatory_ckpt:
assert ckpt is not None, "Missing actual checkpoints at --ckptdir"
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
accum_epoch = sess.run(self.gs)
print('Restored!, global_step {}'.format(accum_epoch))
self.player.attach(sess)
self.player.play()
def curiosity_create_engine(args, mpqueue):
if args.eval:
return Evaluator(args)
cluster_dict = rlutil.create_cluster_dic(args)
if cluster_dict is None:
return CentralizedTrainer(args)
assert mpqueue is not None, "[curiosity_create_engine] MP training requires a mp.Queue object "
# assert False, "Not testing DistributedTrainer for now"
# Create a cluster from the parameter server and worker hosts.
cluster = tf.train.ClusterSpec(cluster_dict)
# Create and start a server for the local task.
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True
server = tf.train.Server(cluster,
job_name=args.job_name,
task_index=args.task_index,
config=session_config)
if args.job_name == 'ps':
engine = ParamServer(args, server)
else:
assert args.job_name == 'worker', "--job_name should be either ps or worker"
engine = DistributedTrainer(args, cluster, server, mpqueue)
return engine
'''
Main Function:
1. Create TF graphs by creating Facade class TrainingManager
- This facade class will create corresponding training class on demand
1.a Alternatively, call rlsampler.create_visualizer to evaluate the traing results
2. Initialize TF sessions and TF Saver
3. Restore from checkpoints on demand
3. Call TrainingManager for some iterations on demand
'''
def process_main(args, mpqueue=None):
'''
CAVEAT: WITHOUT ALLOW_GRWTH, WE MUST CREATE RENDERER BEFORE CALLING ANY TF ROUTINE
'''
pyosr.init()
dpy = pyosr.create_display()
glctx = pyosr.create_gl_context(dpy)
# Create Training/Evaluation Engine
engine = curiosity_create_engine(args, mpqueue=mpqueue)
# Engine execution
engine.run()
def curiosity_main(args):
if args.localcluster_nsampler <= 0 and not args.ps_hosts:
process_main(args)
return
# Distributed execution
args_list = rlutil.assemble_distributed_arguments(args)
mgr = mp.Manager()
mpq = mgr.Queue()
procs = [mp.Process(target=process_main, args=(a, mpq)) for a in args_list]
for p in procs:
p.start()
for p in procs:
p.join()
def main():
args = rlargs.parse()
if args.continuetrain:
if args.samplein:
print('--continuetrain is incompatible with --samplein')
exit()
if args.batching:
print('--continuetrain is incompatible with --batching')
exit()
if -1 in args.actionset:
args.actionset = [i for i in range(12)]
assert args.threads == 1, "--threads has no effect in distributed training"
args.total_sample = args.iter * args.threads
args.total_epoch = args.total_sample / args.samplebatching
print("> Arguments {}".format(args))
curiosity_main(args)
if __name__ == '__main__':
main()
| super(TEngine, self).__init__(args)
self.mts_master = ''
self.mts_is_chief = True
self.tid = 0 | identifier_body |
settings.go | package settingscontroller
import (
"encoding/json"
"net/http"
"os"
"reflect"
"rest-api-golang/src/dbContext/companyservice"
"rest-api-golang/src/dbContext/logssystemservice"
"rest-api-golang/src/dbContext/logsuserservice"
"rest-api-golang/src/dbContext/privilegeroluserservice"
"rest-api-golang/src/dbContext/rolesuserservice"
"rest-api-golang/src/dbContext/sessionuserservice"
"rest-api-golang/src/dbContext/usersservice"
"rest-api-golang/src/models"
"rest-api-golang/src/models/authinterfaces"
"rest-api-golang/src/utils"
u "rest-api-golang/src/utils"
"strings"
"time"
"github.com/dgrijalva/jwt-go"
)
var Login = func(w http.ResponseWriter, r *http.Request) {
var tokenString string
var expireDate time.Time
var msg string
user := &authinterfaces.LoginUser{}
err := json.NewDecoder(r.Body).Decode(user)
if err != nil {
u.Respond(w, u.Message(false, "Invalid request"))
return
}
checkUser := usersservice.CheckUserPasswordForEmail(user.User, user.Password)
if checkUser.NickName != "" {
if checkUser.Status == 1 {
//generate session User
newSession := authinterfaces.SessionUser{
Token: "",
Active: true,
DateAdd: time.Now(),
IdCompany: checkUser.IdCompany,
IdUser: checkUser.ID,
Remember: user.Remember,
}
getIdSession := sessionuserservice.Add(newSession)
//generate token
user.Password = ""
tk := &authinterfaces.Token{IdSession: getIdSession}
token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), tk)
generateToken, _ := token.SignedString([]byte(os.Getenv("TOKEN_HASH")))
tokenString = generateToken
//define expire token
now := time.Now()
expireDate = now
if user.Remember | else {
expireDate = now.Add(2 * time.Minute)
}
//update IdSession
sessionUser := sessionuserservice.FindToId(getIdSession)
sessionUser.Token = tokenString
sessionUser.TokenExpire = expireDate
sessionuserservice.UpdateOne(sessionUser)
logUser := "Inicio Session .."
logsuserservice.Add(1, checkUser.ID, checkUser.IdCompany, logUser)
msg = "Usuario Logeado "
} else {
logUser := "Intento Loggin User Desactivado .."
logsuserservice.Add(1, checkUser.ID, checkUser.IdCompany, logUser)
msg = "Usuario Desactivado.."
}
} else {
logSystem := "Intento Login Fallido usuario : " + user.User + " "
logssystemservice.Add(3, logSystem)
msg = "Usuario Invalido"
}
//fmt.Println(checkUser)
resp := u.Message(true, msg)
resp["token"] = tokenString
resp["expire"] = expireDate
resp["msg"] = msg
u.Respond(w, resp)
}
var LoginUser = func(w http.ResponseWriter, r *http.Request) {
// tokenHeader := r.Header.Get("Authorization") //Grab the token from the header
// idSession := sessionuserservice.GetClaimForToken(tokenHeader)
// fmt.Println(idSession.User.NickName)
// list := logsuserservice.GetList()
// emp := list
// e, err := json.Marshal(emp)
// if err != nil {
// fmt.Println(err)
// return
// }
// fmt.Println(string(e))
// err := json.Unmarshal([]byte(list), &models.LogUser)
// if err != nil {
// }
// // jsonInfo, _ := json.Marshal(list)
// fmt.Println(string(list))
// newSession := authinterfaces.SessionUser{
// IdUser: "5e7a3d93a248a6e1c5d6698b",
// Active: true,
// DateAdd: time.Now(),
// IdCompany: "asdasd",
// Token: "asdsadsadsadsda",
// TokenExpire: time.Now(),
// Remember: true,
// }
// sessionuserservice.Add(newSession)
//fmt.Println(result)
// user.Password = ""
// tk := &authinterfaces.Token{UserId: user.Email}
// token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), tk)
// tokenString, _ := token.SignedString([]byte(os.Getenv("TOKEN_HASH")))
resp := u.Message(true, "Successful")
resp["data"] = true
resp["token"] = ""
u.Respond(w, resp)
}
type Bird struct {
Id string
Name string
Id2 string
}
func Filter(arr interface{}, cond func(interface{}) bool) interface{} {
contentType := reflect.TypeOf(arr)
contentValue := reflect.ValueOf(arr)
newContent := reflect.MakeSlice(contentType, 0, 0)
for i := 0; i < contentValue.Len(); i++ {
if content := contentValue.Index(i); cond(content.Interface()) {
newContent = reflect.Append(newContent, content)
}
}
return newContent.Interface()
}
func isNoBarAndLessThanTenChar(a models.GenericList, value1 string) bool {
return !strings.HasPrefix(a.Value1, value1)
}
var GetUser = func(w http.ResponseWriter, r *http.Request) {
// listProvinces := genericlistservice.GetListForIdCompanyAndIdentity("", "provinces")
// list := genericlistservice.GetListForIdCompanyAndIdentity("", "municipies")
// for _, s := range listProvinces {
// fmt.Printf("%s ==> %s %s \n", s.IdKey, s.Name, s.Value1)
// for _, m := range list {
// if m.Value1 == s.IdKey {
// fmt.Println("************ " + m.Name)
// }
// }
// // result := Choose(list, isNoBarAndLessThanTenChar)
// // fmt.Println(result) // [foo_super]
// }
// var bird []Bird
// `[{"id":"1","name":"Name","id2":"4"},
// {"id":"1","Name":"Name","Id2":"4"}]`
// json.Unmarshal([]byte(myJsonString), &bird)
// for _, s := range bird {
// fmt.Printf("%s ==> %s %s \n", s.Id, s.Name, s.Id2)
// newGeneric := models.GenericList{
// IdKey: s.Id,
// Name: s.Name,
// Date: time.Now(),
// Identity: "municipies",
// Status: 1,
// Value1: s.Id2,
// IdCompany: "",
// Note: "Municipie " + s.Name + " " + " IdProvince is Value1 => " + s.Id2,
// }
// genericlistservice.Add(newGeneric)
//
// fmt.Println(result)
// newCompany := authinterfaces.Company{
// Address: "Direccion ",
// DateAdd: time.Now(),
// Image: "logo.png",
// NameLong: "Nombre Largo Empresa",
// NameShort: "Nombre Corto",
// Other: "Otros Datos",
// Phone: "809-561-2512 / 809-245-5444",
// Rnc: "001-0215211-0",
// Slogan: "Slogan Company",
// Status: 1,
// }
// result := companyservice.Add(newCompany)
// fmt.Println(result)
// for i := 0; i <= 5000; i++ {
// newLogSystem := models.LogSystem{
// Log: "Update :" + strconv.Itoa(i),
// Level: 1,
// Status: 1,
// Date: time.Now(),
// }
// logssystemservice.Add(newLogSystem)
// //time.Sleep(5 * time.Second)
// }
//list := logssystemservice.GetList()
// list := usersservice.GetList()
// for i, item := range list {
// fmt.Println(item.Log, strconv.Itoa(i))
// }
// result := usersservice.FindToId("5e795d655d554045401496e6")
// result.NickName = "ADMIN23"
// fmt.Println(usersservice.UpdateOne(result))
// fmt.Println(result.ID)
// newUser := models.IUser{
// DateAdd: time.Now(),
// IdCompany: "55555555",
// IdRol: "144444",
// Image: "imagen",
// LastLogin: time.Now(),
// LastName: "apellido",
// Name: "NOmbre",
// Password: utils.Encript([]byte("231154")),
// Status: 1,
// NickName: "usuario1",
// }
// result2 := usersservice.Add(newUser)
// fmt.Println(result2)
// profileService.Demo()
// usersservice.Demo()
// profile := &models.Profile{Name: "Juan", Age: 50, LastUpdated: time.Now(), Password: "passwrod"}
// dbContext.InsertNew(*profile)
// context := usersservice.GetList()
// // //user := dbContext.GetList()
// //fmt.Println(context)
// for i, item := range context {
// fmt.Println(item.Name, strconv.Itoa(i))
// }
// h := sha256.Sum256([]byte("demo"))
// h.Write([]byte("demo"))
// b := h.Sum(nil)
// fmt.Println(h)
// user1 := &authinterfaces.User{}
// user2 := &authinterfaces.User{}
// user1.Email = "cao.trung.thu@mail.com"
// user2.Email = "cao.trung.thu@hot.com"
// var users [2]*authinterfaces.User
// users[0] = user1
// users[1] = user2
// resp := u.Message(true, "Successful")
// resp["data"] = users
// u.Respond(w, resp)
user := &authinterfaces.LoginUser{}
// err := json.NewDecoder(r.Body).Decode(user)
// if err != nil {
// u.Respond(w, u.Message(false, "Invalid request"))
// return
// }
user.Password = "25554"
user.User = "Castro2354"
//tk := &authinterfaces.Token{UserId: user.Email}
//token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), tk)
//tokenString, _ := token.SignedString([]byte(os.Getenv("TOKEN_HASH")))
resp := u.Message(true, "Successful")
resp["data"] = true
//resp["token"] = tokenString
u.Respond(w, resp)
}
var AddCompany = func(w http.ResponseWriter, r *http.Request) {
//AddCompany
newCompany := authinterfaces.Company{
Address: "Direccion /c larga #21 demo",
DateAdd: time.Now(),
Image: "logodemo.png",
NameLong: "Nombre EMpresa largo",
NameShort: "Nombre Corto",
Other: "Otras Configuraciones",
Phone: "809-521-2144 / 20-52222",
Rnc: "004-251111-2",
Slogan: "Slogan de Empresa..",
Status: 1,
}
idCompany := companyservice.Add(newCompany)
//create rol User
newRolUser := authinterfaces.RolUser{
IdCompany: idCompany,
Date: time.Now(),
Name: "Administradores",
Note: "Todos los Privilegios",
Status: 1,
}
idRolUser := rolesuserservice.Add(newRolUser)
//add PrivilegeRol
newPrivilege := authinterfaces.PrivilegeRolUser{
IdCompany: idCompany,
IdRol: idRolUser,
WebAccess: true,
Config: 1,
TypeUser: 1,
}
privilegeroluserservice.Add(newPrivilege)
//add new User
newUser := authinterfaces.User{
IdCompany: idCompany,
DateAdd: time.Now(),
City: "Santo Domingo",
Gender: "0",
Contact: "809-545-5444",
IdRol: idRolUser,
Image: "user.png",
LastLogin: time.Now(),
LastName: "Apellido del Usuario",
Name: "Nombre del Usuario",
NickName: strings.ToLower("castro2354"),
Password: utils.Encript([]byte("231154")),
ForcePass: true,
Public: 0,
Status: 1,
Email: "castro@gmail.com",
Note: "Alguna Nota Larga Para el Usuario --> Para describir algo",
}
usersservice.Add(newUser)
//add logs Systems
logssystemservice.Add(1, "Agrego Nueva Empresa..: "+idCompany)
resp := u.Message(true, "Successful")
resp["data"] = "{}"
//resp["token"] = tokenString
u.Respond(w, resp)
}
//Send Config General System
var SendListGeneral = func(w http.ResponseWriter, r *http.Request) {
requestData := &models.RequestListGeneral{}
err := json.NewDecoder(r.Body).Decode(requestData)
if err != nil {
u.Respond(w, u.Message(false, "Invalid request"))
return
}
listSend := usersservice.GetListFromIdCompany("asdsad")
resp := u.Message(false, "ok")
resp["data"] = listSend
// resp["provincies"] = listProvinces
// resp["municipies"] = listMunicipies
u.Respond(w, resp)
}
| {
expireDate = now.AddDate(0, 0, 1)
} | conditional_block |
settings.go | package settingscontroller
import (
"encoding/json"
"net/http"
"os"
"reflect"
"rest-api-golang/src/dbContext/companyservice"
"rest-api-golang/src/dbContext/logssystemservice"
"rest-api-golang/src/dbContext/logsuserservice"
"rest-api-golang/src/dbContext/privilegeroluserservice"
"rest-api-golang/src/dbContext/rolesuserservice"
"rest-api-golang/src/dbContext/sessionuserservice"
"rest-api-golang/src/dbContext/usersservice"
"rest-api-golang/src/models"
"rest-api-golang/src/models/authinterfaces"
"rest-api-golang/src/utils"
u "rest-api-golang/src/utils"
"strings"
"time"
"github.com/dgrijalva/jwt-go"
)
var Login = func(w http.ResponseWriter, r *http.Request) {
var tokenString string
var expireDate time.Time
var msg string
user := &authinterfaces.LoginUser{}
err := json.NewDecoder(r.Body).Decode(user)
if err != nil {
u.Respond(w, u.Message(false, "Invalid request"))
return
}
checkUser := usersservice.CheckUserPasswordForEmail(user.User, user.Password)
if checkUser.NickName != "" {
if checkUser.Status == 1 {
//generate session User
newSession := authinterfaces.SessionUser{
Token: "",
Active: true,
DateAdd: time.Now(),
IdCompany: checkUser.IdCompany,
IdUser: checkUser.ID,
Remember: user.Remember,
}
getIdSession := sessionuserservice.Add(newSession)
//generate token
user.Password = ""
tk := &authinterfaces.Token{IdSession: getIdSession}
token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), tk)
generateToken, _ := token.SignedString([]byte(os.Getenv("TOKEN_HASH")))
tokenString = generateToken
//define expire token
now := time.Now()
expireDate = now
if user.Remember {
expireDate = now.AddDate(0, 0, 1)
} else {
expireDate = now.Add(2 * time.Minute)
}
//update IdSession
sessionUser := sessionuserservice.FindToId(getIdSession)
sessionUser.Token = tokenString
sessionUser.TokenExpire = expireDate
sessionuserservice.UpdateOne(sessionUser)
logUser := "Inicio Session .."
logsuserservice.Add(1, checkUser.ID, checkUser.IdCompany, logUser)
msg = "Usuario Logeado "
} else {
logUser := "Intento Loggin User Desactivado .."
logsuserservice.Add(1, checkUser.ID, checkUser.IdCompany, logUser)
msg = "Usuario Desactivado.."
}
} else {
logSystem := "Intento Login Fallido usuario : " + user.User + " "
logssystemservice.Add(3, logSystem)
msg = "Usuario Invalido"
}
//fmt.Println(checkUser)
resp := u.Message(true, msg)
resp["token"] = tokenString
resp["expire"] = expireDate
resp["msg"] = msg
u.Respond(w, resp)
}
var LoginUser = func(w http.ResponseWriter, r *http.Request) {
// tokenHeader := r.Header.Get("Authorization") //Grab the token from the header
// idSession := sessionuserservice.GetClaimForToken(tokenHeader)
// fmt.Println(idSession.User.NickName)
// list := logsuserservice.GetList()
// emp := list
// e, err := json.Marshal(emp)
// if err != nil {
// fmt.Println(err)
// return
// }
// fmt.Println(string(e))
// err := json.Unmarshal([]byte(list), &models.LogUser)
// if err != nil {
// }
// // jsonInfo, _ := json.Marshal(list)
// fmt.Println(string(list))
// newSession := authinterfaces.SessionUser{
// IdUser: "5e7a3d93a248a6e1c5d6698b",
// Active: true,
// DateAdd: time.Now(),
// IdCompany: "asdasd",
// Token: "asdsadsadsadsda",
// TokenExpire: time.Now(),
// Remember: true,
// }
// sessionuserservice.Add(newSession)
//fmt.Println(result)
// user.Password = ""
// tk := &authinterfaces.Token{UserId: user.Email}
// token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), tk)
// tokenString, _ := token.SignedString([]byte(os.Getenv("TOKEN_HASH")))
resp := u.Message(true, "Successful")
resp["data"] = true
resp["token"] = ""
u.Respond(w, resp)
}
type Bird struct {
Id string
Name string
Id2 string
}
func Filter(arr interface{}, cond func(interface{}) bool) interface{} |
func isNoBarAndLessThanTenChar(a models.GenericList, value1 string) bool {
return !strings.HasPrefix(a.Value1, value1)
}
var GetUser = func(w http.ResponseWriter, r *http.Request) {
// listProvinces := genericlistservice.GetListForIdCompanyAndIdentity("", "provinces")
// list := genericlistservice.GetListForIdCompanyAndIdentity("", "municipies")
// for _, s := range listProvinces {
// fmt.Printf("%s ==> %s %s \n", s.IdKey, s.Name, s.Value1)
// for _, m := range list {
// if m.Value1 == s.IdKey {
// fmt.Println("************ " + m.Name)
// }
// }
// // result := Choose(list, isNoBarAndLessThanTenChar)
// // fmt.Println(result) // [foo_super]
// }
// var bird []Bird
// `[{"id":"1","name":"Name","id2":"4"},
// {"id":"1","Name":"Name","Id2":"4"}]`
// json.Unmarshal([]byte(myJsonString), &bird)
// for _, s := range bird {
// fmt.Printf("%s ==> %s %s \n", s.Id, s.Name, s.Id2)
// newGeneric := models.GenericList{
// IdKey: s.Id,
// Name: s.Name,
// Date: time.Now(),
// Identity: "municipies",
// Status: 1,
// Value1: s.Id2,
// IdCompany: "",
// Note: "Municipie " + s.Name + " " + " IdProvince is Value1 => " + s.Id2,
// }
// genericlistservice.Add(newGeneric)
//
// fmt.Println(result)
// newCompany := authinterfaces.Company{
// Address: "Direccion ",
// DateAdd: time.Now(),
// Image: "logo.png",
// NameLong: "Nombre Largo Empresa",
// NameShort: "Nombre Corto",
// Other: "Otros Datos",
// Phone: "809-561-2512 / 809-245-5444",
// Rnc: "001-0215211-0",
// Slogan: "Slogan Company",
// Status: 1,
// }
// result := companyservice.Add(newCompany)
// fmt.Println(result)
// for i := 0; i <= 5000; i++ {
// newLogSystem := models.LogSystem{
// Log: "Update :" + strconv.Itoa(i),
// Level: 1,
// Status: 1,
// Date: time.Now(),
// }
// logssystemservice.Add(newLogSystem)
// //time.Sleep(5 * time.Second)
// }
//list := logssystemservice.GetList()
// list := usersservice.GetList()
// for i, item := range list {
// fmt.Println(item.Log, strconv.Itoa(i))
// }
// result := usersservice.FindToId("5e795d655d554045401496e6")
// result.NickName = "ADMIN23"
// fmt.Println(usersservice.UpdateOne(result))
// fmt.Println(result.ID)
// newUser := models.IUser{
// DateAdd: time.Now(),
// IdCompany: "55555555",
// IdRol: "144444",
// Image: "imagen",
// LastLogin: time.Now(),
// LastName: "apellido",
// Name: "NOmbre",
// Password: utils.Encript([]byte("231154")),
// Status: 1,
// NickName: "usuario1",
// }
// result2 := usersservice.Add(newUser)
// fmt.Println(result2)
// profileService.Demo()
// usersservice.Demo()
// profile := &models.Profile{Name: "Juan", Age: 50, LastUpdated: time.Now(), Password: "passwrod"}
// dbContext.InsertNew(*profile)
// context := usersservice.GetList()
// // //user := dbContext.GetList()
// //fmt.Println(context)
// for i, item := range context {
// fmt.Println(item.Name, strconv.Itoa(i))
// }
// h := sha256.Sum256([]byte("demo"))
// h.Write([]byte("demo"))
// b := h.Sum(nil)
// fmt.Println(h)
// user1 := &authinterfaces.User{}
// user2 := &authinterfaces.User{}
// user1.Email = "cao.trung.thu@mail.com"
// user2.Email = "cao.trung.thu@hot.com"
// var users [2]*authinterfaces.User
// users[0] = user1
// users[1] = user2
// resp := u.Message(true, "Successful")
// resp["data"] = users
// u.Respond(w, resp)
user := &authinterfaces.LoginUser{}
// err := json.NewDecoder(r.Body).Decode(user)
// if err != nil {
// u.Respond(w, u.Message(false, "Invalid request"))
// return
// }
user.Password = "25554"
user.User = "Castro2354"
//tk := &authinterfaces.Token{UserId: user.Email}
//token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), tk)
//tokenString, _ := token.SignedString([]byte(os.Getenv("TOKEN_HASH")))
resp := u.Message(true, "Successful")
resp["data"] = true
//resp["token"] = tokenString
u.Respond(w, resp)
}
var AddCompany = func(w http.ResponseWriter, r *http.Request) {
//AddCompany
newCompany := authinterfaces.Company{
Address: "Direccion /c larga #21 demo",
DateAdd: time.Now(),
Image: "logodemo.png",
NameLong: "Nombre EMpresa largo",
NameShort: "Nombre Corto",
Other: "Otras Configuraciones",
Phone: "809-521-2144 / 20-52222",
Rnc: "004-251111-2",
Slogan: "Slogan de Empresa..",
Status: 1,
}
idCompany := companyservice.Add(newCompany)
//create rol User
newRolUser := authinterfaces.RolUser{
IdCompany: idCompany,
Date: time.Now(),
Name: "Administradores",
Note: "Todos los Privilegios",
Status: 1,
}
idRolUser := rolesuserservice.Add(newRolUser)
//add PrivilegeRol
newPrivilege := authinterfaces.PrivilegeRolUser{
IdCompany: idCompany,
IdRol: idRolUser,
WebAccess: true,
Config: 1,
TypeUser: 1,
}
privilegeroluserservice.Add(newPrivilege)
//add new User
newUser := authinterfaces.User{
IdCompany: idCompany,
DateAdd: time.Now(),
City: "Santo Domingo",
Gender: "0",
Contact: "809-545-5444",
IdRol: idRolUser,
Image: "user.png",
LastLogin: time.Now(),
LastName: "Apellido del Usuario",
Name: "Nombre del Usuario",
NickName: strings.ToLower("castro2354"),
Password: utils.Encript([]byte("231154")),
ForcePass: true,
Public: 0,
Status: 1,
Email: "castro@gmail.com",
Note: "Alguna Nota Larga Para el Usuario --> Para describir algo",
}
usersservice.Add(newUser)
//add logs Systems
logssystemservice.Add(1, "Agrego Nueva Empresa..: "+idCompany)
resp := u.Message(true, "Successful")
resp["data"] = "{}"
//resp["token"] = tokenString
u.Respond(w, resp)
}
//Send Config General System
var SendListGeneral = func(w http.ResponseWriter, r *http.Request) {
requestData := &models.RequestListGeneral{}
err := json.NewDecoder(r.Body).Decode(requestData)
if err != nil {
u.Respond(w, u.Message(false, "Invalid request"))
return
}
listSend := usersservice.GetListFromIdCompany("asdsad")
resp := u.Message(false, "ok")
resp["data"] = listSend
// resp["provincies"] = listProvinces
// resp["municipies"] = listMunicipies
u.Respond(w, resp)
}
| {
contentType := reflect.TypeOf(arr)
contentValue := reflect.ValueOf(arr)
newContent := reflect.MakeSlice(contentType, 0, 0)
for i := 0; i < contentValue.Len(); i++ {
if content := contentValue.Index(i); cond(content.Interface()) {
newContent = reflect.Append(newContent, content)
}
}
return newContent.Interface()
} | identifier_body |
settings.go | package settingscontroller
import (
"encoding/json"
"net/http"
"os"
"reflect"
"rest-api-golang/src/dbContext/companyservice"
"rest-api-golang/src/dbContext/logssystemservice"
"rest-api-golang/src/dbContext/logsuserservice"
"rest-api-golang/src/dbContext/privilegeroluserservice"
"rest-api-golang/src/dbContext/rolesuserservice"
"rest-api-golang/src/dbContext/sessionuserservice"
"rest-api-golang/src/dbContext/usersservice"
"rest-api-golang/src/models"
"rest-api-golang/src/models/authinterfaces"
"rest-api-golang/src/utils"
u "rest-api-golang/src/utils"
"strings"
"time"
"github.com/dgrijalva/jwt-go"
)
var Login = func(w http.ResponseWriter, r *http.Request) {
var tokenString string
var expireDate time.Time
var msg string
user := &authinterfaces.LoginUser{}
err := json.NewDecoder(r.Body).Decode(user)
if err != nil {
u.Respond(w, u.Message(false, "Invalid request"))
return
}
checkUser := usersservice.CheckUserPasswordForEmail(user.User, user.Password)
if checkUser.NickName != "" {
if checkUser.Status == 1 {
//generate session User
newSession := authinterfaces.SessionUser{
Token: "",
Active: true,
DateAdd: time.Now(),
IdCompany: checkUser.IdCompany,
IdUser: checkUser.ID,
Remember: user.Remember,
}
getIdSession := sessionuserservice.Add(newSession)
//generate token
user.Password = ""
tk := &authinterfaces.Token{IdSession: getIdSession}
token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), tk)
generateToken, _ := token.SignedString([]byte(os.Getenv("TOKEN_HASH")))
tokenString = generateToken
//define expire token
now := time.Now()
expireDate = now
if user.Remember {
expireDate = now.AddDate(0, 0, 1)
} else {
expireDate = now.Add(2 * time.Minute)
}
//update IdSession
sessionUser := sessionuserservice.FindToId(getIdSession)
sessionUser.Token = tokenString
sessionUser.TokenExpire = expireDate
sessionuserservice.UpdateOne(sessionUser)
logUser := "Inicio Session .."
logsuserservice.Add(1, checkUser.ID, checkUser.IdCompany, logUser)
msg = "Usuario Logeado "
} else {
logUser := "Intento Loggin User Desactivado .."
logsuserservice.Add(1, checkUser.ID, checkUser.IdCompany, logUser)
msg = "Usuario Desactivado.."
}
} else {
logSystem := "Intento Login Fallido usuario : " + user.User + " "
logssystemservice.Add(3, logSystem)
msg = "Usuario Invalido"
}
//fmt.Println(checkUser)
resp := u.Message(true, msg)
resp["token"] = tokenString
resp["expire"] = expireDate
resp["msg"] = msg
u.Respond(w, resp)
}
var LoginUser = func(w http.ResponseWriter, r *http.Request) {
// tokenHeader := r.Header.Get("Authorization") //Grab the token from the header
// idSession := sessionuserservice.GetClaimForToken(tokenHeader)
// fmt.Println(idSession.User.NickName)
// list := logsuserservice.GetList()
// emp := list
// e, err := json.Marshal(emp)
// if err != nil {
// fmt.Println(err)
// return
// }
// fmt.Println(string(e))
// err := json.Unmarshal([]byte(list), &models.LogUser)
// if err != nil {
// }
// // jsonInfo, _ := json.Marshal(list)
// fmt.Println(string(list))
// newSession := authinterfaces.SessionUser{
// IdUser: "5e7a3d93a248a6e1c5d6698b",
// Active: true,
// DateAdd: time.Now(),
// IdCompany: "asdasd",
// Token: "asdsadsadsadsda",
// TokenExpire: time.Now(),
// Remember: true,
// }
// sessionuserservice.Add(newSession)
//fmt.Println(result)
// user.Password = ""
// tk := &authinterfaces.Token{UserId: user.Email}
// token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), tk)
// tokenString, _ := token.SignedString([]byte(os.Getenv("TOKEN_HASH")))
resp := u.Message(true, "Successful")
resp["data"] = true
resp["token"] = ""
u.Respond(w, resp)
}
type Bird struct {
Id string
Name string
Id2 string
}
func Filter(arr interface{}, cond func(interface{}) bool) interface{} {
contentType := reflect.TypeOf(arr)
contentValue := reflect.ValueOf(arr)
newContent := reflect.MakeSlice(contentType, 0, 0)
for i := 0; i < contentValue.Len(); i++ {
if content := contentValue.Index(i); cond(content.Interface()) {
newContent = reflect.Append(newContent, content)
}
}
return newContent.Interface()
}
func | (a models.GenericList, value1 string) bool {
return !strings.HasPrefix(a.Value1, value1)
}
var GetUser = func(w http.ResponseWriter, r *http.Request) {
// listProvinces := genericlistservice.GetListForIdCompanyAndIdentity("", "provinces")
// list := genericlistservice.GetListForIdCompanyAndIdentity("", "municipies")
// for _, s := range listProvinces {
// fmt.Printf("%s ==> %s %s \n", s.IdKey, s.Name, s.Value1)
// for _, m := range list {
// if m.Value1 == s.IdKey {
// fmt.Println("************ " + m.Name)
// }
// }
// // result := Choose(list, isNoBarAndLessThanTenChar)
// // fmt.Println(result) // [foo_super]
// }
// var bird []Bird
// `[{"id":"1","name":"Name","id2":"4"},
// {"id":"1","Name":"Name","Id2":"4"}]`
// json.Unmarshal([]byte(myJsonString), &bird)
// for _, s := range bird {
// fmt.Printf("%s ==> %s %s \n", s.Id, s.Name, s.Id2)
// newGeneric := models.GenericList{
// IdKey: s.Id,
// Name: s.Name,
// Date: time.Now(),
// Identity: "municipies",
// Status: 1,
// Value1: s.Id2,
// IdCompany: "",
// Note: "Municipie " + s.Name + " " + " IdProvince is Value1 => " + s.Id2,
// }
// genericlistservice.Add(newGeneric)
//
// fmt.Println(result)
// newCompany := authinterfaces.Company{
// Address: "Direccion ",
// DateAdd: time.Now(),
// Image: "logo.png",
// NameLong: "Nombre Largo Empresa",
// NameShort: "Nombre Corto",
// Other: "Otros Datos",
// Phone: "809-561-2512 / 809-245-5444",
// Rnc: "001-0215211-0",
// Slogan: "Slogan Company",
// Status: 1,
// }
// result := companyservice.Add(newCompany)
// fmt.Println(result)
// for i := 0; i <= 5000; i++ {
// newLogSystem := models.LogSystem{
// Log: "Update :" + strconv.Itoa(i),
// Level: 1,
// Status: 1,
// Date: time.Now(),
// }
// logssystemservice.Add(newLogSystem)
// //time.Sleep(5 * time.Second)
// }
//list := logssystemservice.GetList()
// list := usersservice.GetList()
// for i, item := range list {
// fmt.Println(item.Log, strconv.Itoa(i))
// }
// result := usersservice.FindToId("5e795d655d554045401496e6")
// result.NickName = "ADMIN23"
// fmt.Println(usersservice.UpdateOne(result))
// fmt.Println(result.ID)
// newUser := models.IUser{
// DateAdd: time.Now(),
// IdCompany: "55555555",
// IdRol: "144444",
// Image: "imagen",
// LastLogin: time.Now(),
// LastName: "apellido",
// Name: "NOmbre",
// Password: utils.Encript([]byte("231154")),
// Status: 1,
// NickName: "usuario1",
// }
// result2 := usersservice.Add(newUser)
// fmt.Println(result2)
// profileService.Demo()
// usersservice.Demo()
// profile := &models.Profile{Name: "Juan", Age: 50, LastUpdated: time.Now(), Password: "passwrod"}
// dbContext.InsertNew(*profile)
// context := usersservice.GetList()
// // //user := dbContext.GetList()
// //fmt.Println(context)
// for i, item := range context {
// fmt.Println(item.Name, strconv.Itoa(i))
// }
// h := sha256.Sum256([]byte("demo"))
// h.Write([]byte("demo"))
// b := h.Sum(nil)
// fmt.Println(h)
// user1 := &authinterfaces.User{}
// user2 := &authinterfaces.User{}
// user1.Email = "cao.trung.thu@mail.com"
// user2.Email = "cao.trung.thu@hot.com"
// var users [2]*authinterfaces.User
// users[0] = user1
// users[1] = user2
// resp := u.Message(true, "Successful")
// resp["data"] = users
// u.Respond(w, resp)
user := &authinterfaces.LoginUser{}
// err := json.NewDecoder(r.Body).Decode(user)
// if err != nil {
// u.Respond(w, u.Message(false, "Invalid request"))
// return
// }
user.Password = "25554"
user.User = "Castro2354"
//tk := &authinterfaces.Token{UserId: user.Email}
//token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), tk)
//tokenString, _ := token.SignedString([]byte(os.Getenv("TOKEN_HASH")))
resp := u.Message(true, "Successful")
resp["data"] = true
//resp["token"] = tokenString
u.Respond(w, resp)
}
var AddCompany = func(w http.ResponseWriter, r *http.Request) {
//AddCompany
newCompany := authinterfaces.Company{
Address: "Direccion /c larga #21 demo",
DateAdd: time.Now(),
Image: "logodemo.png",
NameLong: "Nombre EMpresa largo",
NameShort: "Nombre Corto",
Other: "Otras Configuraciones",
Phone: "809-521-2144 / 20-52222",
Rnc: "004-251111-2",
Slogan: "Slogan de Empresa..",
Status: 1,
}
idCompany := companyservice.Add(newCompany)
//create rol User
newRolUser := authinterfaces.RolUser{
IdCompany: idCompany,
Date: time.Now(),
Name: "Administradores",
Note: "Todos los Privilegios",
Status: 1,
}
idRolUser := rolesuserservice.Add(newRolUser)
//add PrivilegeRol
newPrivilege := authinterfaces.PrivilegeRolUser{
IdCompany: idCompany,
IdRol: idRolUser,
WebAccess: true,
Config: 1,
TypeUser: 1,
}
privilegeroluserservice.Add(newPrivilege)
//add new User
newUser := authinterfaces.User{
IdCompany: idCompany,
DateAdd: time.Now(),
City: "Santo Domingo",
Gender: "0",
Contact: "809-545-5444",
IdRol: idRolUser,
Image: "user.png",
LastLogin: time.Now(),
LastName: "Apellido del Usuario",
Name: "Nombre del Usuario",
NickName: strings.ToLower("castro2354"),
Password: utils.Encript([]byte("231154")),
ForcePass: true,
Public: 0,
Status: 1,
Email: "castro@gmail.com",
Note: "Alguna Nota Larga Para el Usuario --> Para describir algo",
}
usersservice.Add(newUser)
//add logs Systems
logssystemservice.Add(1, "Agrego Nueva Empresa..: "+idCompany)
resp := u.Message(true, "Successful")
resp["data"] = "{}"
//resp["token"] = tokenString
u.Respond(w, resp)
}
//Send Config General System
var SendListGeneral = func(w http.ResponseWriter, r *http.Request) {
requestData := &models.RequestListGeneral{}
err := json.NewDecoder(r.Body).Decode(requestData)
if err != nil {
u.Respond(w, u.Message(false, "Invalid request"))
return
}
listSend := usersservice.GetListFromIdCompany("asdsad")
resp := u.Message(false, "ok")
resp["data"] = listSend
// resp["provincies"] = listProvinces
// resp["municipies"] = listMunicipies
u.Respond(w, resp)
}
| isNoBarAndLessThanTenChar | identifier_name |
settings.go | package settingscontroller
import (
"encoding/json"
"net/http"
"os"
"reflect"
"rest-api-golang/src/dbContext/companyservice"
"rest-api-golang/src/dbContext/logssystemservice"
"rest-api-golang/src/dbContext/logsuserservice"
"rest-api-golang/src/dbContext/privilegeroluserservice"
"rest-api-golang/src/dbContext/rolesuserservice"
"rest-api-golang/src/dbContext/sessionuserservice"
"rest-api-golang/src/dbContext/usersservice"
"rest-api-golang/src/models"
"rest-api-golang/src/models/authinterfaces"
"rest-api-golang/src/utils"
u "rest-api-golang/src/utils"
"strings"
"time"
"github.com/dgrijalva/jwt-go"
)
var Login = func(w http.ResponseWriter, r *http.Request) {
var tokenString string
var expireDate time.Time
var msg string
user := &authinterfaces.LoginUser{}
err := json.NewDecoder(r.Body).Decode(user)
if err != nil {
u.Respond(w, u.Message(false, "Invalid request"))
return
}
checkUser := usersservice.CheckUserPasswordForEmail(user.User, user.Password)
if checkUser.NickName != "" {
if checkUser.Status == 1 {
//generate session User
newSession := authinterfaces.SessionUser{
Token: "",
Active: true,
DateAdd: time.Now(),
IdCompany: checkUser.IdCompany,
IdUser: checkUser.ID,
Remember: user.Remember,
}
getIdSession := sessionuserservice.Add(newSession)
//generate token
user.Password = ""
tk := &authinterfaces.Token{IdSession: getIdSession}
token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), tk)
generateToken, _ := token.SignedString([]byte(os.Getenv("TOKEN_HASH")))
tokenString = generateToken
//define expire token
now := time.Now()
expireDate = now
if user.Remember {
expireDate = now.AddDate(0, 0, 1)
} else {
expireDate = now.Add(2 * time.Minute)
}
//update IdSession
sessionUser := sessionuserservice.FindToId(getIdSession)
sessionUser.Token = tokenString
sessionUser.TokenExpire = expireDate
sessionuserservice.UpdateOne(sessionUser)
logUser := "Inicio Session .."
logsuserservice.Add(1, checkUser.ID, checkUser.IdCompany, logUser)
msg = "Usuario Logeado "
} else {
logUser := "Intento Loggin User Desactivado .."
logsuserservice.Add(1, checkUser.ID, checkUser.IdCompany, logUser)
msg = "Usuario Desactivado.."
}
} else {
logSystem := "Intento Login Fallido usuario : " + user.User + " "
logssystemservice.Add(3, logSystem)
msg = "Usuario Invalido"
}
//fmt.Println(checkUser)
resp := u.Message(true, msg)
resp["token"] = tokenString
resp["expire"] = expireDate
resp["msg"] = msg
u.Respond(w, resp)
}
var LoginUser = func(w http.ResponseWriter, r *http.Request) {
// tokenHeader := r.Header.Get("Authorization") //Grab the token from the header
// idSession := sessionuserservice.GetClaimForToken(tokenHeader)
// fmt.Println(idSession.User.NickName)
// list := logsuserservice.GetList()
// emp := list
// e, err := json.Marshal(emp)
// if err != nil {
// fmt.Println(err)
// return
// }
// fmt.Println(string(e))
// err := json.Unmarshal([]byte(list), &models.LogUser)
// if err != nil {
// }
// // jsonInfo, _ := json.Marshal(list)
// fmt.Println(string(list))
// newSession := authinterfaces.SessionUser{
// IdUser: "5e7a3d93a248a6e1c5d6698b",
// Active: true,
// DateAdd: time.Now(),
// IdCompany: "asdasd",
// Token: "asdsadsadsadsda",
// TokenExpire: time.Now(),
// Remember: true,
// }
// sessionuserservice.Add(newSession)
//fmt.Println(result)
// user.Password = ""
// tk := &authinterfaces.Token{UserId: user.Email}
// token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), tk)
// tokenString, _ := token.SignedString([]byte(os.Getenv("TOKEN_HASH")))
resp := u.Message(true, "Successful")
resp["data"] = true
resp["token"] = ""
u.Respond(w, resp)
}
type Bird struct {
Id string
Name string
Id2 string
}
func Filter(arr interface{}, cond func(interface{}) bool) interface{} {
contentType := reflect.TypeOf(arr)
contentValue := reflect.ValueOf(arr)
newContent := reflect.MakeSlice(contentType, 0, 0)
for i := 0; i < contentValue.Len(); i++ {
if content := contentValue.Index(i); cond(content.Interface()) {
newContent = reflect.Append(newContent, content)
}
}
return newContent.Interface()
}
func isNoBarAndLessThanTenChar(a models.GenericList, value1 string) bool {
return !strings.HasPrefix(a.Value1, value1)
}
var GetUser = func(w http.ResponseWriter, r *http.Request) {
// listProvinces := genericlistservice.GetListForIdCompanyAndIdentity("", "provinces")
// list := genericlistservice.GetListForIdCompanyAndIdentity("", "municipies")
// for _, s := range listProvinces {
// fmt.Printf("%s ==> %s %s \n", s.IdKey, s.Name, s.Value1)
// for _, m := range list {
// if m.Value1 == s.IdKey {
// fmt.Println("************ " + m.Name)
// }
// }
// // result := Choose(list, isNoBarAndLessThanTenChar)
// // fmt.Println(result) // [foo_super]
// }
// var bird []Bird
// `[{"id":"1","name":"Name","id2":"4"},
// {"id":"1","Name":"Name","Id2":"4"}]`
// json.Unmarshal([]byte(myJsonString), &bird)
// for _, s := range bird {
// fmt.Printf("%s ==> %s %s \n", s.Id, s.Name, s.Id2)
// newGeneric := models.GenericList{
// IdKey: s.Id,
// Name: s.Name,
// Date: time.Now(),
// Identity: "municipies",
// Status: 1,
// Value1: s.Id2,
// IdCompany: "",
// Note: "Municipie " + s.Name + " " + " IdProvince is Value1 => " + s.Id2,
// }
// genericlistservice.Add(newGeneric)
//
// fmt.Println(result)
// newCompany := authinterfaces.Company{
// Address: "Direccion ",
// DateAdd: time.Now(),
// Image: "logo.png",
// NameLong: "Nombre Largo Empresa",
// NameShort: "Nombre Corto",
// Other: "Otros Datos",
// Phone: "809-561-2512 / 809-245-5444",
// Rnc: "001-0215211-0",
// Slogan: "Slogan Company",
// Status: 1,
// }
// result := companyservice.Add(newCompany)
// fmt.Println(result)
// for i := 0; i <= 5000; i++ {
// newLogSystem := models.LogSystem{
// Log: "Update :" + strconv.Itoa(i),
// Level: 1,
// Status: 1,
// Date: time.Now(),
// }
// logssystemservice.Add(newLogSystem)
// //time.Sleep(5 * time.Second)
// }
//list := logssystemservice.GetList()
// list := usersservice.GetList()
// for i, item := range list {
// fmt.Println(item.Log, strconv.Itoa(i))
// } |
// newUser := models.IUser{
// DateAdd: time.Now(),
// IdCompany: "55555555",
// IdRol: "144444",
// Image: "imagen",
// LastLogin: time.Now(),
// LastName: "apellido",
// Name: "NOmbre",
// Password: utils.Encript([]byte("231154")),
// Status: 1,
// NickName: "usuario1",
// }
// result2 := usersservice.Add(newUser)
// fmt.Println(result2)
// profileService.Demo()
// usersservice.Demo()
// profile := &models.Profile{Name: "Juan", Age: 50, LastUpdated: time.Now(), Password: "passwrod"}
// dbContext.InsertNew(*profile)
// context := usersservice.GetList()
// // //user := dbContext.GetList()
// //fmt.Println(context)
// for i, item := range context {
// fmt.Println(item.Name, strconv.Itoa(i))
// }
// h := sha256.Sum256([]byte("demo"))
// h.Write([]byte("demo"))
// b := h.Sum(nil)
// fmt.Println(h)
// user1 := &authinterfaces.User{}
// user2 := &authinterfaces.User{}
// user1.Email = "cao.trung.thu@mail.com"
// user2.Email = "cao.trung.thu@hot.com"
// var users [2]*authinterfaces.User
// users[0] = user1
// users[1] = user2
// resp := u.Message(true, "Successful")
// resp["data"] = users
// u.Respond(w, resp)
user := &authinterfaces.LoginUser{}
// err := json.NewDecoder(r.Body).Decode(user)
// if err != nil {
// u.Respond(w, u.Message(false, "Invalid request"))
// return
// }
user.Password = "25554"
user.User = "Castro2354"
//tk := &authinterfaces.Token{UserId: user.Email}
//token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), tk)
//tokenString, _ := token.SignedString([]byte(os.Getenv("TOKEN_HASH")))
resp := u.Message(true, "Successful")
resp["data"] = true
//resp["token"] = tokenString
u.Respond(w, resp)
}
var AddCompany = func(w http.ResponseWriter, r *http.Request) {
//AddCompany
newCompany := authinterfaces.Company{
Address: "Direccion /c larga #21 demo",
DateAdd: time.Now(),
Image: "logodemo.png",
NameLong: "Nombre EMpresa largo",
NameShort: "Nombre Corto",
Other: "Otras Configuraciones",
Phone: "809-521-2144 / 20-52222",
Rnc: "004-251111-2",
Slogan: "Slogan de Empresa..",
Status: 1,
}
idCompany := companyservice.Add(newCompany)
//create rol User
newRolUser := authinterfaces.RolUser{
IdCompany: idCompany,
Date: time.Now(),
Name: "Administradores",
Note: "Todos los Privilegios",
Status: 1,
}
idRolUser := rolesuserservice.Add(newRolUser)
//add PrivilegeRol
newPrivilege := authinterfaces.PrivilegeRolUser{
IdCompany: idCompany,
IdRol: idRolUser,
WebAccess: true,
Config: 1,
TypeUser: 1,
}
privilegeroluserservice.Add(newPrivilege)
//add new User
newUser := authinterfaces.User{
IdCompany: idCompany,
DateAdd: time.Now(),
City: "Santo Domingo",
Gender: "0",
Contact: "809-545-5444",
IdRol: idRolUser,
Image: "user.png",
LastLogin: time.Now(),
LastName: "Apellido del Usuario",
Name: "Nombre del Usuario",
NickName: strings.ToLower("castro2354"),
Password: utils.Encript([]byte("231154")),
ForcePass: true,
Public: 0,
Status: 1,
Email: "castro@gmail.com",
Note: "Alguna Nota Larga Para el Usuario --> Para describir algo",
}
usersservice.Add(newUser)
//add logs Systems
logssystemservice.Add(1, "Agrego Nueva Empresa..: "+idCompany)
resp := u.Message(true, "Successful")
resp["data"] = "{}"
//resp["token"] = tokenString
u.Respond(w, resp)
}
//Send Config General System
var SendListGeneral = func(w http.ResponseWriter, r *http.Request) {
requestData := &models.RequestListGeneral{}
err := json.NewDecoder(r.Body).Decode(requestData)
if err != nil {
u.Respond(w, u.Message(false, "Invalid request"))
return
}
listSend := usersservice.GetListFromIdCompany("asdsad")
resp := u.Message(false, "ok")
resp["data"] = listSend
// resp["provincies"] = listProvinces
// resp["municipies"] = listMunicipies
u.Respond(w, resp)
} | // result := usersservice.FindToId("5e795d655d554045401496e6")
// result.NickName = "ADMIN23"
// fmt.Println(usersservice.UpdateOne(result))
// fmt.Println(result.ID) | random_line_split |
Code_Projet_ML.py | # -*- coding: utf-8 -*-
"""
Created on Wed May 18 22:56:15 2020
@author: paulg
"""
import sys
sys.version
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import csv
import seaborn as sns
######################## DATA TREATMENT #############################################################
# Recupération des données
filename = "./cac40_v3.csv"
data = pd.read_csv(filename, quotechar='\"', doublequote=False,quoting=csv.QUOTE_NONE).drop(columns=['"'])
data=data.replace('\"','',regex=True)
data.columns = data.columns.str.replace('\"','')
data.head()
#Affichage des caractéristiques
print("Nombre de lignes : {}\nNombre de colonnes : {}\n".format(len(data), len(data.columns)))
data['recommandation'] = pd.to_numeric(data['recommandation']) #on convertit en numeric la donnée de l'apparition du mot 'recommandation'
print(data.dtypes)
#On récupère la liste des tickers
list_tickers=data.TICKER.unique().tolist()
#Modification de la base ( date et ajout de données sur les varaitions futures de volume)
data['annee']= pd.to_datetime(data.annee*10000+data.mois*100+data.jour,format='%Y%m%d')
data.rename(columns={'annee': 'date'}, inplace=True)
data.drop(columns=['mois', 'jour'],axis='columns', inplace=True)
data=data.sort_values(by=['TICKER','date'],ascending=[1,1])
#Calcul de la variation future (à 1j, 1semaine, 1mois) du volume futur pour chaque ticker
CHG_VO_J=[data[data.TICKER == t].VO.shift(-1)/data[data.TICKER == t].VO-1 for t in list_tickers]
CHG_VO_S=[data[data.TICKER == t].VO.shift(-5)/data[data.TICKER == t].VO-1 for t in list_tickers]
CHG_VO_M=[data[data.TICKER == t].VO.shift(-20)/data[data.TICKER == t].VO-1 for t in list_tickers]
#Calcul de la médiane du volume écahangé pour chaque ticker, et du quantile à 75%
MEDIAN_VO=[data[data.TICKER == t].VO*0+data[data.TICKER == t].VO.median() for t in list_tickers]
quantile_VO=[data[data.TICKER == t].VO*0+data[data.TICKER == t].VO.quantile(0.75) for t in list_tickers]
#Ajout de ces données calculées à notre base de data
data['FUTUR_VO_J']=pd.concat(CHG_VO_J)
data['FUTUR_VO_S']=pd.concat(CHG_VO_S)
data['FUTUR_VO_M']=pd.concat(CHG_VO_M)
data['MEDIAN_VO']=pd.concat(MEDIAN_VO)
data['75_CENT_VO']=pd.concat(quantile_VO)
#Pour plus de lisibilité on replace les nouvelles données sur le volume avant celles sur les apparitions des mots
cols= list(data.columns.values)
data=data[cols[0:22]+cols[-5:]+cols[22:-5]]
#On récupère la liste de tous les mots
all_words=data.columns[27:]
# Quelques statistiques de la base (calculées par Ticker)
stats_by_ticker=pd.DataFrame(index=list_tickers)
#rendement quotidien moyen par ticker
stats_by_ticker["RDM_MOYEN_M"]=[data[data.TICKER == t].RDMT_M.values.mean() for t in list_tickers]
#mot le plus apparus par ticker
stats_by_ticker['MOST_FREQ_WORD']=[data[data.TICKER==t][all_words].sum().argmax() for t in list_tickers]
#nombre d'apparition du mot le plus souvent apparus par ticker
stats_by_ticker['NB_WORD']=[data[data.TICKER==t][all_words].sum().max() for t in list_tickers]
#Stats classified by words (no more classified by ticker)
stats_by_word=pd.DataFrame(index=all_words)
#nombre d'apparitions du mot
stats_by_word['APPARITIONS']=[data[w].sum() for w in all_words]
#rendement moyen lorsque le mot est cité
stats_by_word["RDM_MOYEN_M"]=[(data[data[w]==1].RDMT_M).mean() for w in all_words]
#frequence hausse rendement mensuel (entre historique et futur) lorsque le mot est cité
stats_by_word['HAUSSE_RDMT_M']=[(data[data[w]==1].RDMT_M>data[data[w]==1].HISTO_M).mean() for w in all_words]
#frequence volume traité du jour supérieur à la médiane (et quantile 75%) lorsque le mot est cité
stats_by_word['VO>MEDIAN']=[(data[data[w]==1].VO>data[data[w]==1].MEDIAN_VO).mean() for w in all_words]
stats_by_word['VO>QUANTIL_75']=[(data[data[w]==1].VO>data[data[w]==1]['75_CENT_VO']).mean() for w in all_words]
#frequence hausse historique (et future) du volume traité à 1jour lorsque le mot est cité
stats_by_word['VO_HISTO_J>0']=[(data[data[w]==1].VOL_J>0).mean() for w in all_words]
stats_by_word['VO_FUTUR_J>0']=[(data[data[w]==1].FUTUR_VO_J>0).mean() for w in all_words]
stats_by_word=stats_by_word.sort_values(by=['APPARITIONS','RDM_MOYEN_M'],ascending=[0,0])
#Save dataframe of statistique in an excel
#writer = pd.ExcelWriter(r'Statistiques de la base.xlsx', engine='xlsxwriter')
#stats_by_word.to_excel(writer, sheet_name='by Word')
#stats_by_ticker.to_excel(writer, sheet_name='by Ticker')
## Close the Pandas Excel writer and output the Excel file.
#writer.save()
#Récupération des mots qui apparaissent plus de 400 fois
list_words=[]
for w in all_words:
if data[w].sum()>=400:
list_words.append(w)
# calcul du rendement moyen pour chacun de ses mots,
result=[]
for w in list_words:
apparitions=sum(data[w].values)
rdmt_moy_m=sum(data[w].values*data['RDMT_M'].values)/apparitions
# on cnserve ceux pour lesquels le rendement moyen > 1%
if rdmt_moy_m >=0.01:
result.append([w,apparitions,rdm | ("Mot\tApparition\tRdt mensuel moyen")
print("==================================")
for i in range(len(result)):
print("\n{}\t{}\t{}".format(result[i][0],result[i][1],result[i][2].round(4)))
#DataFrame contenant la liste des mots filtrés, leurs rendements moyens et leurs nombres d'apparitions
df = pd.DataFrame(result,columns=['WORD','APPARITIONS','RETURN'])
#On crée une variable indicatrice sur la condition d'apparition d'un des mots de la list de df.WORD
indic = data.filter(items=df.WORD).sum(axis=1) > 0
data['indic'] = indic
#filtered_data contient seulement les lignes pour lesquelles il y a l'apparition d'un mot de la liste df.WORD
filtered_data = data[data['indic']==True]
words = filtered_data.filter(items=df.WORD)
#on calcule la corrélation sur ces lignes filtrées entre les apparitions de chaque mot de df.WORD
corr_w = abs(words.corr())
#Affichage graphique de correl
plt.figure(figsize=(12,10))
sns.heatmap(corr_w, annot=False, cmap=plt.cm.Reds)
plt.show()
#Exclusion des variables trop corrélées avec d'autres (si correl > 0.75)
columns = np.full((corr_w.shape[0],), True, dtype=bool)
for i in range(corr_w.shape[0]):
for j in range(i+1, corr_w.shape[0]):
if corr_w.iloc[i,j] >= 0.75:
if columns[j]:
columns[j] = False
#On exclue les mots trop corrélés avec d'autres de notre liste de mots
selected_columns = words.columns[columns]
words = words[selected_columns]
#Ici il nous rest les mots plus toutes les données de marché de la base hormis les ticker
filtered_data = filtered_data[filtered_data.columns[1:27]].join(words)
##################################### REGRESSION MODEL ######################################
import sklearn
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV
#nos inputs sont composés à partir des lignes filtrées
# - des mots filtrés après retrait des corrélations élevées
# - des données de variation du volume traité par rapport à la veille
# - du niveau de volume traité du jour
list_input=words.columns.tolist()
list_input+=['HISTO_M','VO','VOL_J']
X=filtered_data[list_input]
# 2 variables à expliquer pour lesquelles on obtient un AUC correct:
#- RDMT supérieur à 2%, et (volume supérieure au quantile 75% ou Volume Futur qui augmente d'au moins 75%)
y2=filtered_data.RDMT_M.apply(lambda x : 1 if x>= 0.02 else 0)*\
((filtered_data.FUTUR_VO_J).apply(lambda x : 1 if x>0.75 else 0)+\
(filtered_data.VO-filtered_data['75_CENT_VO']).apply(lambda x : 1 if x> 0 else 0)-\
((filtered_data.FUTUR_VO_J).apply(lambda x : 1 if x>0.75 else 0)*\
(filtered_data.VO-filtered_data['75_CENT_VO']).apply(lambda x : 1 if x> 0 else 0)))
# - RDMT supérieur à celui du mois précédent et rendement futur supérieur à 2%
y3=(filtered_data.RDMT_M-filtered_data.HISTO_M).apply(lambda x : 1 if x>= 0 else 0)*(filtered_data.RDMT_M.apply(lambda x : 1 if x>0.02 else 0))
list_y=[y2,y3]
#PLOTING USING plotly
import plotly.express as px
from plotly.offline import plot
filtered_data['axis']=range(1,len(filtered_data)+1)
filtered_data['indic_y2']=y2
filtered_data['indic_y3']=y3
fig_y2= px.bar(filtered_data, x='axis', y='RDMT_M', color='indic_y2',opacity=1)
plot(fig_y2)
fig_y3 = px.bar(filtered_data, x='axis', y='RDMT_M', color='indic_y3',opacity=1)
plot(fig_y3)
# initialisation du modèle de départ utilisé pour les hyperparamétrages
start_model = xgb.XGBClassifier(silent=False,
learning_rate=0.2,
n_estimators=200,
objective='binary:logistic',
subsample = 1,
colsample_bytree = 1,
nthread=4,
scale_pos_weight=1, random_state=1,
seed=1)
#Grille d'hyperparam pour la varaible y2
parameter_space_y2={'max_depth':[6,9,10],
'min_child_weight':[1,2,3],
'gamma':[0,0.4,1],
'subsample':[0.9,1],
'colsample_bytree':[0.9,1],
'reg_alpha':[0,1],
'learning_rate':[0.02],
'n_estimators':[500]}
#Grille d'hyperparam pour la varaible y3
parameter_space_y3={'max_depth':[6,9,10],
'min_child_weight':[0,1,1.5],
'gamma':[0,1],
'subsample':[0.7,0.95,1],
'colsample_bytree':[0.7,0.95,1],
'reg_alpha':[0,1],
'learning_rate':[0.02],
'n_estimators':[500]}
list_grid=[parameter_space_y2,parameter_space_y3]
# /!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\
do_tuning=False #do_tuning doit être False pour éviter de lancer l'hyperparamétrage, environ (1h10)
# /!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\
#nos 2 modèles issus de l'hyperparamétrage (permet de lancer le code sans refaire l'hyperparamétrage)
tuned_model_y2=xgb.XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=0.9, gamma=1,
learning_rate=0.02, max_delta_step=0, max_depth=6,
min_child_weight=2, missing=None, n_estimators=500, n_jobs=1,
nthread=4, objective='binary:logistic', random_state=1,
reg_alpha=1, reg_lambda=1, scale_pos_weight=1, seed=1,
silent=False, subsample=1, verbosity=1)
tuned_model_y3=xgb.XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=1, gamma=1,
learning_rate=0.02, max_delta_step=0, max_depth=10,
min_child_weight=0, missing=None, n_estimators=500, n_jobs=1,
nthread=4, objective='binary:logistic', random_state=1,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=1,
silent=False, subsample=1, verbosity=1)
tuned_model=[tuned_model_y2,tuned_model_y3]
dic_result=[]
i=0
#Boucle pour l'hyperparamétrage sur les 2 variables à expliquer
for y in list_y:
print('\t Model for y{} :'.format(i+2))
print('===========================')
#Splitting the dataset into training and validation sets
X_train,X_test,y_train,y_test= train_test_split(X,y,test_size=0.2,random_state=7)
#normalisation des données
from sklearn.preprocessing import StandardScaler
scaler=StandardScaler()
scaler.fit(X_train)
X_train=scaler.transform(X_train)
X_test=scaler.transform(X_test)
eval_set = [(X_train, y_train), (X_test, y_test)]
eval_metric = ["error","auc"]
##################################### MODELE DE DEPART : un premier exemple
#entraînement du modèle
start_model.fit(X_train, y_train, eval_metric=eval_metric,eval_set=eval_set, verbose=False)
#prévision test du modèle
y_hat=start_model.predict(X_test)
print('============= START MODEL for (y=y{}) ============\n'.format(i+2))
print('AUC Score :{}\n'.format(sklearn.metrics.roc_auc_score(y_test,start_model.predict_proba(X_test)[:,1])))
print(classification_report(y_test,y_hat))
########################### HYPERPARAMETRAGE ###########################
#Set features and parameters of hyperparameters tuning
if do_tuning:
scoring=['roc_auc','recall']
refit='roc_auc'
parameter_space=list_grid[i]
#Set tuning
clf = GridSearchCV(start_model, param_grid=parameter_space,n_jobs=-1, cv=4,scoring=scoring,verbose=3,return_train_score=True,refit=refit)
#Launch tuning
clf.fit(X_train,y_train)
model=clf.best_estimator_
else:
model=tuned_model[i] #récupération en dur du modèle issu de l'hyperparamétrage
model.fit(X_train,y_train)
y_pred=model.predict(X_test)
dic_temp={'estimator':model,'name':'y{}'.format(i+2),
'X_train':X_train,'y_train':y_train,'X_test':X_test,'y_test':y_test}
#Display result
print( '=============== TUNED MODEL (y = y{}) ================='.format(i+2))
print('Parameters:\n', model.get_params())
auc=sklearn.metrics.roc_auc_score(y_test,model.predict_proba(X_test)[:,1])
recall=sklearn.metrics.recall_score(y_test,y_pred)
precision=sklearn.metrics.precision_score(y_test,y_pred)
print('\nAUC Score :{}\n'.format(auc))
print(classification_report(y_test,y_pred))
add_result={'AUC':auc,'recall':recall,'precision':precision,'y_pred':y_pred}
dic_temp.update(add_result)
#adding model and prediction result to a list of dictionnary
dic_result.append(dic_temp)
i+=1
#Comparison of the own model of each variable y
print("y\tAUC\tRecall\tPrecision \n=====================================")
for i in range(len(list_y)):
print("y{}\t{}\t{}\t{}\n".format(i+2,dic_result[i]['AUC'].round(2),dic_result[i]['recall'].round(2),dic_result[i]['precision'].round(2)))
##### RESULT
# Select the best variable y and its tuned model, here the best seems y3
k=1 #(0 for y2 and 1 for y3)
y=list_y[k]
best_model=xgb.XGBClassifier(**dic_result[k]['estimator'].get_params())
X_train=dic_result[k]['X_train']
X_test=dic_result[k]['X_test']
y_train=dic_result[k]['y_train']
y_test=dic_result[k]['y_test']
eval_set = [(X_train, y_train), (X_test, y_test)]
eval_metric = ["error","auc"]
best_model.set_params(learning_rate=0.02,n_estimators=5000)
best_model.fit(X_train, y_train, eval_metric=eval_metric,eval_set=eval_set,early_stopping_rounds=500,verbose=False)
y_pred=best_model.predict(X_test)
#Display Result
print( '============== BEST MODEL : choice y=y{} ==================\n'.format(k+2))
conf=confusion_matrix(y_test,y_pred)
print('Matrice de confusion:\n',conf)
print('\nAUC Score :{}\n'.format(sklearn.metrics.roc_auc_score(y_test,best_model.predict_proba(X_test)[:,1])))
print(classification_report(y_test,y_pred))
#Plotting AUC in terms of estimator number
results = best_model.evals_result()
epochs = len(results['validation_0']['error'])
x_axis = range(0, epochs)
fig, ax = plt.subplots()
ax.plot(x_axis, results['validation_0']['auc'], label='Train')
ax.plot(x_axis, results['validation_1']['auc'], label='Test')
ax.legend()
plt.ylabel('AUC')
plt.xlabel('Estimator n-th')
plt.title('XGBoost AUC (y{})'.format(k+2))
plt.show()
# VARIABLE IMPORTANCE
features_names=['f{} = {}'.format(i,X.columns.tolist()[i]) for i in range(len(X.columns.tolist()))]
print( '\nFEATURES IMPORTANCE :')
print( '\nFEATURES NAMES :\n',features_names)
#Plot Top 10 importance input variables (features)
fig = plt.figure(figsize=(12,7))
ax_cover = fig.add_subplot(121)
xgb.plot_importance(best_model,max_num_features=10,importance_type='cover',height=0.5, title='Feature Importance (Cover)',ax=ax_cover,show_values=False)
ax_cover.grid(b=None)
ax_gain=fig.add_subplot(122)
xgb.plot_importance(best_model,max_num_features=10,importance_type='gain',height=0.5, title='Feature Importance (Gain)',ax=ax_gain,show_values=False)
ax_gain.grid(b=None)
fig.show() | t_moy_m])
# Sortie Tableau énoncé
print | conditional_block |
Code_Projet_ML.py | # -*- coding: utf-8 -*-
"""
Created on Wed May 18 22:56:15 2020
@author: paulg
"""
import sys
sys.version
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import csv
import seaborn as sns
######################## DATA TREATMENT #############################################################
# Recupération des données
filename = "./cac40_v3.csv"
data = pd.read_csv(filename, quotechar='\"', doublequote=False,quoting=csv.QUOTE_NONE).drop(columns=['"'])
data=data.replace('\"','',regex=True)
data.columns = data.columns.str.replace('\"','')
| print(data.dtypes)
#On récupère la liste des tickers
list_tickers=data.TICKER.unique().tolist()
#Modification de la base ( date et ajout de données sur les varaitions futures de volume)
data['annee']= pd.to_datetime(data.annee*10000+data.mois*100+data.jour,format='%Y%m%d')
data.rename(columns={'annee': 'date'}, inplace=True)
data.drop(columns=['mois', 'jour'],axis='columns', inplace=True)
data=data.sort_values(by=['TICKER','date'],ascending=[1,1])
#Calcul de la variation future (à 1j, 1semaine, 1mois) du volume futur pour chaque ticker
CHG_VO_J=[data[data.TICKER == t].VO.shift(-1)/data[data.TICKER == t].VO-1 for t in list_tickers]
CHG_VO_S=[data[data.TICKER == t].VO.shift(-5)/data[data.TICKER == t].VO-1 for t in list_tickers]
CHG_VO_M=[data[data.TICKER == t].VO.shift(-20)/data[data.TICKER == t].VO-1 for t in list_tickers]
#Calcul de la médiane du volume écahangé pour chaque ticker, et du quantile à 75%
MEDIAN_VO=[data[data.TICKER == t].VO*0+data[data.TICKER == t].VO.median() for t in list_tickers]
quantile_VO=[data[data.TICKER == t].VO*0+data[data.TICKER == t].VO.quantile(0.75) for t in list_tickers]
#Ajout de ces données calculées à notre base de data
data['FUTUR_VO_J']=pd.concat(CHG_VO_J)
data['FUTUR_VO_S']=pd.concat(CHG_VO_S)
data['FUTUR_VO_M']=pd.concat(CHG_VO_M)
data['MEDIAN_VO']=pd.concat(MEDIAN_VO)
data['75_CENT_VO']=pd.concat(quantile_VO)
#Pour plus de lisibilité on replace les nouvelles données sur le volume avant celles sur les apparitions des mots
cols= list(data.columns.values)
data=data[cols[0:22]+cols[-5:]+cols[22:-5]]
#On récupère la liste de tous les mots
all_words=data.columns[27:]
# Quelques statistiques de la base (calculées par Ticker)
stats_by_ticker=pd.DataFrame(index=list_tickers)
#rendement quotidien moyen par ticker
stats_by_ticker["RDM_MOYEN_M"]=[data[data.TICKER == t].RDMT_M.values.mean() for t in list_tickers]
#mot le plus apparus par ticker
stats_by_ticker['MOST_FREQ_WORD']=[data[data.TICKER==t][all_words].sum().argmax() for t in list_tickers]
#nombre d'apparition du mot le plus souvent apparus par ticker
stats_by_ticker['NB_WORD']=[data[data.TICKER==t][all_words].sum().max() for t in list_tickers]
#Stats classified by words (no more classified by ticker)
stats_by_word=pd.DataFrame(index=all_words)
#nombre d'apparitions du mot
stats_by_word['APPARITIONS']=[data[w].sum() for w in all_words]
#rendement moyen lorsque le mot est cité
stats_by_word["RDM_MOYEN_M"]=[(data[data[w]==1].RDMT_M).mean() for w in all_words]
#frequence hausse rendement mensuel (entre historique et futur) lorsque le mot est cité
stats_by_word['HAUSSE_RDMT_M']=[(data[data[w]==1].RDMT_M>data[data[w]==1].HISTO_M).mean() for w in all_words]
#frequence volume traité du jour supérieur à la médiane (et quantile 75%) lorsque le mot est cité
stats_by_word['VO>MEDIAN']=[(data[data[w]==1].VO>data[data[w]==1].MEDIAN_VO).mean() for w in all_words]
stats_by_word['VO>QUANTIL_75']=[(data[data[w]==1].VO>data[data[w]==1]['75_CENT_VO']).mean() for w in all_words]
#frequence hausse historique (et future) du volume traité à 1jour lorsque le mot est cité
stats_by_word['VO_HISTO_J>0']=[(data[data[w]==1].VOL_J>0).mean() for w in all_words]
stats_by_word['VO_FUTUR_J>0']=[(data[data[w]==1].FUTUR_VO_J>0).mean() for w in all_words]
stats_by_word=stats_by_word.sort_values(by=['APPARITIONS','RDM_MOYEN_M'],ascending=[0,0])
#Save dataframe of statistique in an excel
#writer = pd.ExcelWriter(r'Statistiques de la base.xlsx', engine='xlsxwriter')
#stats_by_word.to_excel(writer, sheet_name='by Word')
#stats_by_ticker.to_excel(writer, sheet_name='by Ticker')
## Close the Pandas Excel writer and output the Excel file.
#writer.save()
#Récupération des mots qui apparaissent plus de 400 fois
list_words=[]
for w in all_words:
if data[w].sum()>=400:
list_words.append(w)
# calcul du rendement moyen pour chacun de ses mots,
result=[]
for w in list_words:
apparitions=sum(data[w].values)
rdmt_moy_m=sum(data[w].values*data['RDMT_M'].values)/apparitions
# on cnserve ceux pour lesquels le rendement moyen > 1%
if rdmt_moy_m >=0.01:
result.append([w,apparitions,rdmt_moy_m])
# Sortie Tableau énoncé
print("Mot\tApparition\tRdt mensuel moyen")
print("==================================")
for i in range(len(result)):
print("\n{}\t{}\t{}".format(result[i][0],result[i][1],result[i][2].round(4)))
#DataFrame contenant la liste des mots filtrés, leurs rendements moyens et leurs nombres d'apparitions
df = pd.DataFrame(result,columns=['WORD','APPARITIONS','RETURN'])
#On crée une variable indicatrice sur la condition d'apparition d'un des mots de la list de df.WORD
indic = data.filter(items=df.WORD).sum(axis=1) > 0
data['indic'] = indic
#filtered_data contient seulement les lignes pour lesquelles il y a l'apparition d'un mot de la liste df.WORD
filtered_data = data[data['indic']==True]
words = filtered_data.filter(items=df.WORD)
#on calcule la corrélation sur ces lignes filtrées entre les apparitions de chaque mot de df.WORD
corr_w = abs(words.corr())
#Affichage graphique de correl
plt.figure(figsize=(12,10))
sns.heatmap(corr_w, annot=False, cmap=plt.cm.Reds)
plt.show()
#Exclusion des variables trop corrélées avec d'autres (si correl > 0.75)
columns = np.full((corr_w.shape[0],), True, dtype=bool)
for i in range(corr_w.shape[0]):
for j in range(i+1, corr_w.shape[0]):
if corr_w.iloc[i,j] >= 0.75:
if columns[j]:
columns[j] = False
#On exclue les mots trop corrélés avec d'autres de notre liste de mots
selected_columns = words.columns[columns]
words = words[selected_columns]
#Ici il nous rest les mots plus toutes les données de marché de la base hormis les ticker
filtered_data = filtered_data[filtered_data.columns[1:27]].join(words)
##################################### REGRESSION MODEL ######################################
import sklearn
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV
#nos inputs sont composés à partir des lignes filtrées
# - des mots filtrés après retrait des corrélations élevées
# - des données de variation du volume traité par rapport à la veille
# - du niveau de volume traité du jour
list_input=words.columns.tolist()
list_input+=['HISTO_M','VO','VOL_J']
X=filtered_data[list_input]
# 2 variables à expliquer pour lesquelles on obtient un AUC correct:
#- RDMT supérieur à 2%, et (volume supérieure au quantile 75% ou Volume Futur qui augmente d'au moins 75%)
y2=filtered_data.RDMT_M.apply(lambda x : 1 if x>= 0.02 else 0)*\
((filtered_data.FUTUR_VO_J).apply(lambda x : 1 if x>0.75 else 0)+\
(filtered_data.VO-filtered_data['75_CENT_VO']).apply(lambda x : 1 if x> 0 else 0)-\
((filtered_data.FUTUR_VO_J).apply(lambda x : 1 if x>0.75 else 0)*\
(filtered_data.VO-filtered_data['75_CENT_VO']).apply(lambda x : 1 if x> 0 else 0)))
# - RDMT supérieur à celui du mois précédent et rendement futur supérieur à 2%
y3=(filtered_data.RDMT_M-filtered_data.HISTO_M).apply(lambda x : 1 if x>= 0 else 0)*(filtered_data.RDMT_M.apply(lambda x : 1 if x>0.02 else 0))
list_y=[y2,y3]
#PLOTING USING plotly
import plotly.express as px
from plotly.offline import plot
filtered_data['axis']=range(1,len(filtered_data)+1)
filtered_data['indic_y2']=y2
filtered_data['indic_y3']=y3
fig_y2= px.bar(filtered_data, x='axis', y='RDMT_M', color='indic_y2',opacity=1)
plot(fig_y2)
fig_y3 = px.bar(filtered_data, x='axis', y='RDMT_M', color='indic_y3',opacity=1)
plot(fig_y3)
# initialisation du modèle de départ utilisé pour les hyperparamétrages
start_model = xgb.XGBClassifier(silent=False,
learning_rate=0.2,
n_estimators=200,
objective='binary:logistic',
subsample = 1,
colsample_bytree = 1,
nthread=4,
scale_pos_weight=1, random_state=1,
seed=1)
#Grille d'hyperparam pour la varaible y2
parameter_space_y2={'max_depth':[6,9,10],
'min_child_weight':[1,2,3],
'gamma':[0,0.4,1],
'subsample':[0.9,1],
'colsample_bytree':[0.9,1],
'reg_alpha':[0,1],
'learning_rate':[0.02],
'n_estimators':[500]}
#Grille d'hyperparam pour la varaible y3
parameter_space_y3={'max_depth':[6,9,10],
'min_child_weight':[0,1,1.5],
'gamma':[0,1],
'subsample':[0.7,0.95,1],
'colsample_bytree':[0.7,0.95,1],
'reg_alpha':[0,1],
'learning_rate':[0.02],
'n_estimators':[500]}
list_grid=[parameter_space_y2,parameter_space_y3]
# /!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\
do_tuning=False #do_tuning doit être False pour éviter de lancer l'hyperparamétrage, environ (1h10)
# /!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\/!\
#nos 2 modèles issus de l'hyperparamétrage (permet de lancer le code sans refaire l'hyperparamétrage)
tuned_model_y2=xgb.XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=0.9, gamma=1,
learning_rate=0.02, max_delta_step=0, max_depth=6,
min_child_weight=2, missing=None, n_estimators=500, n_jobs=1,
nthread=4, objective='binary:logistic', random_state=1,
reg_alpha=1, reg_lambda=1, scale_pos_weight=1, seed=1,
silent=False, subsample=1, verbosity=1)
tuned_model_y3=xgb.XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=1, gamma=1,
learning_rate=0.02, max_delta_step=0, max_depth=10,
min_child_weight=0, missing=None, n_estimators=500, n_jobs=1,
nthread=4, objective='binary:logistic', random_state=1,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=1,
silent=False, subsample=1, verbosity=1)
tuned_model=[tuned_model_y2,tuned_model_y3]
dic_result=[]
i=0
#Boucle pour l'hyperparamétrage sur les 2 variables à expliquer
for y in list_y:
print('\t Model for y{} :'.format(i+2))
print('===========================')
#Splitting the dataset into training and validation sets
X_train,X_test,y_train,y_test= train_test_split(X,y,test_size=0.2,random_state=7)
#normalisation des données
from sklearn.preprocessing import StandardScaler
scaler=StandardScaler()
scaler.fit(X_train)
X_train=scaler.transform(X_train)
X_test=scaler.transform(X_test)
eval_set = [(X_train, y_train), (X_test, y_test)]
eval_metric = ["error","auc"]
##################################### MODELE DE DEPART : un premier exemple
#entraînement du modèle
start_model.fit(X_train, y_train, eval_metric=eval_metric,eval_set=eval_set, verbose=False)
#prévision test du modèle
y_hat=start_model.predict(X_test)
print('============= START MODEL for (y=y{}) ============\n'.format(i+2))
print('AUC Score :{}\n'.format(sklearn.metrics.roc_auc_score(y_test,start_model.predict_proba(X_test)[:,1])))
print(classification_report(y_test,y_hat))
########################### HYPERPARAMETRAGE ###########################
#Set features and parameters of hyperparameters tuning
if do_tuning:
scoring=['roc_auc','recall']
refit='roc_auc'
parameter_space=list_grid[i]
#Set tuning
clf = GridSearchCV(start_model, param_grid=parameter_space,n_jobs=-1, cv=4,scoring=scoring,verbose=3,return_train_score=True,refit=refit)
#Launch tuning
clf.fit(X_train,y_train)
model=clf.best_estimator_
else:
model=tuned_model[i] #récupération en dur du modèle issu de l'hyperparamétrage
model.fit(X_train,y_train)
y_pred=model.predict(X_test)
dic_temp={'estimator':model,'name':'y{}'.format(i+2),
'X_train':X_train,'y_train':y_train,'X_test':X_test,'y_test':y_test}
#Display result
print( '=============== TUNED MODEL (y = y{}) ================='.format(i+2))
print('Parameters:\n', model.get_params())
auc=sklearn.metrics.roc_auc_score(y_test,model.predict_proba(X_test)[:,1])
recall=sklearn.metrics.recall_score(y_test,y_pred)
precision=sklearn.metrics.precision_score(y_test,y_pred)
print('\nAUC Score :{}\n'.format(auc))
print(classification_report(y_test,y_pred))
add_result={'AUC':auc,'recall':recall,'precision':precision,'y_pred':y_pred}
dic_temp.update(add_result)
#adding model and prediction result to a list of dictionnary
dic_result.append(dic_temp)
i+=1
#Comparison of the own model of each variable y
print("y\tAUC\tRecall\tPrecision \n=====================================")
for i in range(len(list_y)):
print("y{}\t{}\t{}\t{}\n".format(i+2,dic_result[i]['AUC'].round(2),dic_result[i]['recall'].round(2),dic_result[i]['precision'].round(2)))
##### RESULT
# Select the best variable y and its tuned model, here the best seems y3
k=1 #(0 for y2 and 1 for y3)
y=list_y[k]
best_model=xgb.XGBClassifier(**dic_result[k]['estimator'].get_params())
X_train=dic_result[k]['X_train']
X_test=dic_result[k]['X_test']
y_train=dic_result[k]['y_train']
y_test=dic_result[k]['y_test']
eval_set = [(X_train, y_train), (X_test, y_test)]
eval_metric = ["error","auc"]
best_model.set_params(learning_rate=0.02,n_estimators=5000)
best_model.fit(X_train, y_train, eval_metric=eval_metric,eval_set=eval_set,early_stopping_rounds=500,verbose=False)
y_pred=best_model.predict(X_test)
#Display Result
print( '============== BEST MODEL : choice y=y{} ==================\n'.format(k+2))
conf=confusion_matrix(y_test,y_pred)
print('Matrice de confusion:\n',conf)
print('\nAUC Score :{}\n'.format(sklearn.metrics.roc_auc_score(y_test,best_model.predict_proba(X_test)[:,1])))
print(classification_report(y_test,y_pred))
#Plotting AUC in terms of estimator number
results = best_model.evals_result()
epochs = len(results['validation_0']['error'])
x_axis = range(0, epochs)
fig, ax = plt.subplots()
ax.plot(x_axis, results['validation_0']['auc'], label='Train')
ax.plot(x_axis, results['validation_1']['auc'], label='Test')
ax.legend()
plt.ylabel('AUC')
plt.xlabel('Estimator n-th')
plt.title('XGBoost AUC (y{})'.format(k+2))
plt.show()
# VARIABLE IMPORTANCE
features_names=['f{} = {}'.format(i,X.columns.tolist()[i]) for i in range(len(X.columns.tolist()))]
print( '\nFEATURES IMPORTANCE :')
print( '\nFEATURES NAMES :\n',features_names)
#Plot Top 10 importance input variables (features)
fig = plt.figure(figsize=(12,7))
ax_cover = fig.add_subplot(121)
xgb.plot_importance(best_model,max_num_features=10,importance_type='cover',height=0.5, title='Feature Importance (Cover)',ax=ax_cover,show_values=False)
ax_cover.grid(b=None)
ax_gain=fig.add_subplot(122)
xgb.plot_importance(best_model,max_num_features=10,importance_type='gain',height=0.5, title='Feature Importance (Gain)',ax=ax_gain,show_values=False)
ax_gain.grid(b=None)
fig.show() | data.head()
#Affichage des caractéristiques
print("Nombre de lignes : {}\nNombre de colonnes : {}\n".format(len(data), len(data.columns)))
data['recommandation'] = pd.to_numeric(data['recommandation']) #on convertit en numeric la donnée de l'apparition du mot 'recommandation'
| random_line_split |
normalizer.py | """
Copyright © 2021-2022 The Johns Hopkins University Applied Physics Laboratory LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from collections import defaultdict
from typing import Set
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
class Normalizer:
"""Utility class for normalizing data."""
valid_methods = ["task", "run"]
def __init__(
self,
perf_measure: str, | ste_data: dict = {},
data_range: defaultdict = None,
method: str = "task",
scale: int = 100,
offset: int = 1,
) -> None:
"""Constructor for Normalizer.
Args:
perf_measure (str): Name of column to use for metrics calculations.
data (pd.DataFrame): Reference data for calculating data range. Assumed
DataFrame with task name as index and one column of performance data.
ste_data (dict, optional): The STE data for computing quantiles. Defaults to {}.
data_range (defaultdict, optional): Dictionary object for data range. Defaults to None.
method (str, optional): Normalization method. Valid values are 'task' and 'run'.
Defaults to 'task'.
scale (int, optional): Normalization scale, interpreted as from 0 to this value.
Defaults to 100.
offset (int, optional): Offset to normalized data. Defaults to 1.
Raises:
ValueError: If data range validation fails.
"""
self.perf_measure = perf_measure
# Get unique task names in data
self.unique_tasks = set(data.index.unique())
if data_range is not None:
# Validate and set data range for normalizer
if self._validate_data_range(
data_range=data_range, task_names=self.unique_tasks
):
self.data_range = data_range
self.run_min = min([val["min"] for val in self.data_range.values()])
self.run_max = max([val["max"] for val in self.data_range.values()])
elif data is not None:
self.calculate_data_range(data, ste_data)
else:
raise ValueError(
f"Must provide data or data range to initialize Normalizer"
)
if self._validate_method(method):
self.method = method
if self._validate_scale(scale):
self.scale = scale
if self._validate_offset(offset):
self.offset = offset
def calculate_data_range(self, data: pd.DataFrame, ste_data: dict = {}) -> None:
"""Calculates data range per task for given data.
A task data range is the minimum and maximum value of the task performance.
Args:
data (pd.DataFrame): Reference data for calculating data range. Assumed
DataFrame with task name as index and one column of performance data.
ste_data (dict, optional): The STE data for computing quantiles. Defaults to {}.
Raises:
ValueError: If data contains more than just performance values and task name.
"""
data_column = data.columns.to_numpy()
if len(data_column) > 1:
raise ValueError(
f"Data must only have one column with performance measures"
)
# Initialize data range as empty object
self.data_range = defaultdict(dict)
# Get data range over scenario and STE data if not provided as input
for task in self.unique_tasks:
# Get feature range for each task
task_min = np.nanmin(data.loc[task])
task_max = np.nanmax(data.loc[task])
if ste_data.get(task):
x_ste = np.concatenate(
[
ste_data_df[ste_data_df["block_type"] == "train"][
self.perf_measure
].to_numpy()
for ste_data_df in ste_data.get(task)
]
)
self.data_range[task]["min"] = min(task_min, np.nanmin(x_ste))
self.data_range[task]["max"] = max(task_max, np.nanmax(x_ste))
else:
self.data_range[task]["min"] = task_min
self.data_range[task]["max"] = task_max
self.run_min = min([val["min"] for val in self.data_range.values()])
self.run_max = max([val["max"] for val in self.data_range.values()])
def _validate_data_range(
self, data_range: defaultdict, task_names: Set[str]
) -> bool:
"""Validates data range object.
Args:
data_range (defaultdict): Dictionary object for data range.
task_names (Set[str]): Set of task names in the data.
Raises:
TypeError: If data range is not a dictionary object.
KeyError: If data range is not defined for all tasks.
KeyError: If the keys min and max are missing.
Returns:
bool: True if validation succeeds.
"""
if not isinstance(data_range, (dict, defaultdict)):
raise TypeError(f"Invalid data range type - Must be a dictionary")
elif not set(data_range.keys()).issuperset(task_names):
raise KeyError(f"Data range not defined for all tasks: {task_names}")
elif False in [key.keys() >= {"min", "max"} for key in data_range.values()]:
raise KeyError(f"Missing required fields: min and max")
else:
return True
def _validate_method(self, method: str) -> bool:
"""Validates normalization method.
Args:
method (str): Normalization method.
Raises:
ValueError: If method is not in list of valid methods.
Returns:
bool: True if validation succeeds.
"""
if method not in self.valid_methods:
raise ValueError(
f"Invalid normalization method: {method}\n"
f"Valid methods are: {self.valid_methods}"
)
else:
return True
def _validate_scale(self, scale: int) -> bool:
"""Validates normalization scale.
Args:
scale (int): Normalization scale.
Raises:
TypeError: If scale is not an integer.
ValueError: If scale is less than or equal to 0.
Returns:
bool: True if validation succeeds.
"""
if not isinstance(scale, int):
raise TypeError(f"Invalid scale type: {type(scale)}")
elif scale <= 0:
raise ValueError(f"Scale value must be greater than 0: {scale}")
else:
return True
def _validate_offset(self, offset: int) -> bool:
"""Validates normalization offset.
Args:
offset (int): Normalization offset.
Raises:
TypeError: If offset is not an integer.
ValueError: If scale is less than or equal to 0.
Returns:
bool: True if validation succeeds.
"""
if not isinstance(offset, int):
raise TypeError(f"Invalid offset type: {type(offset)}")
elif offset <= 0:
raise ValueError(f"Offset value must be greater than 0: {offset}")
else:
return True
def normalize(self, data: pd.DataFrame) -> pd.DataFrame:
"""Normalizes the given data with the current instance method and data range/scale.
Args:
data (pd.DataFrame): Dataframe to be normalized.
Raises:
KeyError: If there's a missing data range for any task.
Returns:
pd.DataFrame: Normalized dataframe.
"""
if self.method == "task":
for task in data["task_name"].unique():
if task in self.data_range.keys():
task_min = self.data_range[task]["min"]
task_max = self.data_range[task]["max"]
task_data = data.loc[
data["task_name"] == task, self.perf_measure
].to_numpy()
if task_min == task_max:
data.loc[data["task_name"] == task, self.perf_measure] = (
task_data * 0
) + self.offset
logger.warning(
f"Performance for task ({task}) is constant - normalizing to 0"
)
else:
data.loc[data["task_name"] == task, self.perf_measure] = (
(task_data - task_min) / (task_max - task_min) * self.scale
) + self.offset
else:
raise KeyError(f"Missing data range for task '{task}'")
return data
elif self.method == "run":
data.loc[:, self.perf_measure] = (
(data[self.perf_measure].to_numpy() - self.run_min)
/ (self.run_max - self.run_min)
* self.scale
) + self.offset
return data | data: pd.DataFrame, | random_line_split |
normalizer.py | """
Copyright © 2021-2022 The Johns Hopkins University Applied Physics Laboratory LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from collections import defaultdict
from typing import Set
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
class Normalizer:
"""Utility class for normalizing data."""
valid_methods = ["task", "run"]
def __init__(
self,
perf_measure: str,
data: pd.DataFrame,
ste_data: dict = {},
data_range: defaultdict = None,
method: str = "task",
scale: int = 100,
offset: int = 1,
) -> None:
"""Constructor for Normalizer.
Args:
perf_measure (str): Name of column to use for metrics calculations.
data (pd.DataFrame): Reference data for calculating data range. Assumed
DataFrame with task name as index and one column of performance data.
ste_data (dict, optional): The STE data for computing quantiles. Defaults to {}.
data_range (defaultdict, optional): Dictionary object for data range. Defaults to None.
method (str, optional): Normalization method. Valid values are 'task' and 'run'.
Defaults to 'task'.
scale (int, optional): Normalization scale, interpreted as from 0 to this value.
Defaults to 100.
offset (int, optional): Offset to normalized data. Defaults to 1.
Raises:
ValueError: If data range validation fails.
"""
self.perf_measure = perf_measure
# Get unique task names in data
self.unique_tasks = set(data.index.unique())
if data_range is not None:
# Validate and set data range for normalizer
if self._validate_data_range(
data_range=data_range, task_names=self.unique_tasks
):
self.data_range = data_range
self.run_min = min([val["min"] for val in self.data_range.values()])
self.run_max = max([val["max"] for val in self.data_range.values()])
elif data is not None:
self.calculate_data_range(data, ste_data)
else:
raise ValueError(
f"Must provide data or data range to initialize Normalizer"
)
if self._validate_method(method):
self.method = method
if self._validate_scale(scale):
self.scale = scale
if self._validate_offset(offset):
self.offset = offset
def calculate_data_range(self, data: pd.DataFrame, ste_data: dict = {}) -> None:
"""Calculates data range per task for given data.
A task data range is the minimum and maximum value of the task performance.
Args:
data (pd.DataFrame): Reference data for calculating data range. Assumed
DataFrame with task name as index and one column of performance data.
ste_data (dict, optional): The STE data for computing quantiles. Defaults to {}.
Raises:
ValueError: If data contains more than just performance values and task name.
"""
data_column = data.columns.to_numpy()
if len(data_column) > 1:
raise ValueError(
f"Data must only have one column with performance measures"
)
# Initialize data range as empty object
self.data_range = defaultdict(dict)
# Get data range over scenario and STE data if not provided as input
for task in self.unique_tasks:
# Get feature range for each task
task_min = np.nanmin(data.loc[task])
task_max = np.nanmax(data.loc[task])
if ste_data.get(task):
x_ste = np.concatenate(
[
ste_data_df[ste_data_df["block_type"] == "train"][
self.perf_measure
].to_numpy()
for ste_data_df in ste_data.get(task)
]
)
self.data_range[task]["min"] = min(task_min, np.nanmin(x_ste))
self.data_range[task]["max"] = max(task_max, np.nanmax(x_ste))
else:
self.data_range[task]["min"] = task_min
self.data_range[task]["max"] = task_max
self.run_min = min([val["min"] for val in self.data_range.values()])
self.run_max = max([val["max"] for val in self.data_range.values()])
def _validate_data_range(
self, data_range: defaultdict, task_names: Set[str]
) -> bool:
"""Validates data range object.
Args:
data_range (defaultdict): Dictionary object for data range.
task_names (Set[str]): Set of task names in the data.
Raises:
TypeError: If data range is not a dictionary object.
KeyError: If data range is not defined for all tasks.
KeyError: If the keys min and max are missing.
Returns:
bool: True if validation succeeds.
"""
if not isinstance(data_range, (dict, defaultdict)):
raise TypeError(f"Invalid data range type - Must be a dictionary")
elif not set(data_range.keys()).issuperset(task_names):
raise KeyError(f"Data range not defined for all tasks: {task_names}")
elif False in [key.keys() >= {"min", "max"} for key in data_range.values()]:
raise KeyError(f"Missing required fields: min and max")
else:
return True
def _validate_method(self, method: str) -> bool:
"""Validates normalization method.
Args:
method (str): Normalization method.
Raises:
ValueError: If method is not in list of valid methods.
Returns:
bool: True if validation succeeds.
"""
if method not in self.valid_methods:
raise ValueError(
f"Invalid normalization method: {method}\n"
f"Valid methods are: {self.valid_methods}"
)
else:
return True
def _validate_scale(self, scale: int) -> bool:
"""Validates normalization scale.
Args:
scale (int): Normalization scale.
Raises:
TypeError: If scale is not an integer.
ValueError: If scale is less than or equal to 0.
Returns:
bool: True if validation succeeds.
"""
if not isinstance(scale, int):
raise TypeError(f"Invalid scale type: {type(scale)}")
elif scale <= 0:
raise ValueError(f"Scale value must be greater than 0: {scale}")
else:
return True
def _validate_offset(self, offset: int) -> bool:
"""Validates normalization offset.
Args:
offset (int): Normalization offset.
Raises:
TypeError: If offset is not an integer.
ValueError: If scale is less than or equal to 0.
Returns:
bool: True if validation succeeds.
"""
if not isinstance(offset, int):
raise TypeError(f"Invalid offset type: {type(offset)}")
elif offset <= 0:
raise ValueError(f"Offset value must be greater than 0: {offset}")
else:
return True
def normalize(self, data: pd.DataFrame) -> pd.DataFrame:
"""Normalizes the given data with the current instance method and data range/scale.
Args:
data (pd.DataFrame): Dataframe to be normalized.
Raises:
KeyError: If there's a missing data range for any task.
Returns:
pd.DataFrame: Normalized dataframe.
"""
if self.method == "task":
for task in data["task_name"].unique():
if task i | return data
elif self.method == "run":
data.loc[:, self.perf_measure] = (
(data[self.perf_measure].to_numpy() - self.run_min)
/ (self.run_max - self.run_min)
* self.scale
) + self.offset
return data
| n self.data_range.keys():
task_min = self.data_range[task]["min"]
task_max = self.data_range[task]["max"]
task_data = data.loc[
data["task_name"] == task, self.perf_measure
].to_numpy()
if task_min == task_max:
data.loc[data["task_name"] == task, self.perf_measure] = (
task_data * 0
) + self.offset
logger.warning(
f"Performance for task ({task}) is constant - normalizing to 0"
)
else:
data.loc[data["task_name"] == task, self.perf_measure] = (
(task_data - task_min) / (task_max - task_min) * self.scale
) + self.offset
else:
raise KeyError(f"Missing data range for task '{task}'")
| conditional_block |
normalizer.py | """
Copyright © 2021-2022 The Johns Hopkins University Applied Physics Laboratory LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from collections import defaultdict
from typing import Set
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
class Normalizer:
"""Utility class for normalizing data."""
valid_methods = ["task", "run"]
def __init__(
self,
perf_measure: str,
data: pd.DataFrame,
ste_data: dict = {},
data_range: defaultdict = None,
method: str = "task",
scale: int = 100,
offset: int = 1,
) -> None:
"""Constructor for Normalizer.
Args:
perf_measure (str): Name of column to use for metrics calculations.
data (pd.DataFrame): Reference data for calculating data range. Assumed
DataFrame with task name as index and one column of performance data.
ste_data (dict, optional): The STE data for computing quantiles. Defaults to {}.
data_range (defaultdict, optional): Dictionary object for data range. Defaults to None.
method (str, optional): Normalization method. Valid values are 'task' and 'run'.
Defaults to 'task'.
scale (int, optional): Normalization scale, interpreted as from 0 to this value.
Defaults to 100.
offset (int, optional): Offset to normalized data. Defaults to 1.
Raises:
ValueError: If data range validation fails.
"""
self.perf_measure = perf_measure
# Get unique task names in data
self.unique_tasks = set(data.index.unique())
if data_range is not None:
# Validate and set data range for normalizer
if self._validate_data_range(
data_range=data_range, task_names=self.unique_tasks
):
self.data_range = data_range
self.run_min = min([val["min"] for val in self.data_range.values()])
self.run_max = max([val["max"] for val in self.data_range.values()])
elif data is not None:
self.calculate_data_range(data, ste_data)
else:
raise ValueError(
f"Must provide data or data range to initialize Normalizer"
)
if self._validate_method(method):
self.method = method
if self._validate_scale(scale):
self.scale = scale
if self._validate_offset(offset):
self.offset = offset
def calculate_data_range(self, data: pd.DataFrame, ste_data: dict = {}) -> None:
"""Calculates data range per task for given data.
A task data range is the minimum and maximum value of the task performance.
Args:
data (pd.DataFrame): Reference data for calculating data range. Assumed
DataFrame with task name as index and one column of performance data.
ste_data (dict, optional): The STE data for computing quantiles. Defaults to {}.
Raises:
ValueError: If data contains more than just performance values and task name.
"""
data_column = data.columns.to_numpy()
if len(data_column) > 1:
raise ValueError(
f"Data must only have one column with performance measures"
)
# Initialize data range as empty object
self.data_range = defaultdict(dict)
# Get data range over scenario and STE data if not provided as input
for task in self.unique_tasks:
# Get feature range for each task
task_min = np.nanmin(data.loc[task])
task_max = np.nanmax(data.loc[task])
if ste_data.get(task):
x_ste = np.concatenate(
[
ste_data_df[ste_data_df["block_type"] == "train"][
self.perf_measure
].to_numpy()
for ste_data_df in ste_data.get(task)
]
)
self.data_range[task]["min"] = min(task_min, np.nanmin(x_ste))
self.data_range[task]["max"] = max(task_max, np.nanmax(x_ste))
else:
self.data_range[task]["min"] = task_min
self.data_range[task]["max"] = task_max
self.run_min = min([val["min"] for val in self.data_range.values()])
self.run_max = max([val["max"] for val in self.data_range.values()])
def _validate_data_range(
self, data_range: defaultdict, task_names: Set[str]
) -> bool:
"""Validates data range object.
Args:
data_range (defaultdict): Dictionary object for data range.
task_names (Set[str]): Set of task names in the data.
Raises:
TypeError: If data range is not a dictionary object.
KeyError: If data range is not defined for all tasks.
KeyError: If the keys min and max are missing.
Returns:
bool: True if validation succeeds.
"""
if not isinstance(data_range, (dict, defaultdict)):
raise TypeError(f"Invalid data range type - Must be a dictionary")
elif not set(data_range.keys()).issuperset(task_names):
raise KeyError(f"Data range not defined for all tasks: {task_names}")
elif False in [key.keys() >= {"min", "max"} for key in data_range.values()]:
raise KeyError(f"Missing required fields: min and max")
else:
return True
def _validate | thod: str) -> bool:
"""Validates normalization method.
Args:
method (str): Normalization method.
Raises:
ValueError: If method is not in list of valid methods.
Returns:
bool: True if validation succeeds.
"""
if method not in self.valid_methods:
raise ValueError(
f"Invalid normalization method: {method}\n"
f"Valid methods are: {self.valid_methods}"
)
else:
return True
def _validate_scale(self, scale: int) -> bool:
"""Validates normalization scale.
Args:
scale (int): Normalization scale.
Raises:
TypeError: If scale is not an integer.
ValueError: If scale is less than or equal to 0.
Returns:
bool: True if validation succeeds.
"""
if not isinstance(scale, int):
raise TypeError(f"Invalid scale type: {type(scale)}")
elif scale <= 0:
raise ValueError(f"Scale value must be greater than 0: {scale}")
else:
return True
def _validate_offset(self, offset: int) -> bool:
"""Validates normalization offset.
Args:
offset (int): Normalization offset.
Raises:
TypeError: If offset is not an integer.
ValueError: If scale is less than or equal to 0.
Returns:
bool: True if validation succeeds.
"""
if not isinstance(offset, int):
raise TypeError(f"Invalid offset type: {type(offset)}")
elif offset <= 0:
raise ValueError(f"Offset value must be greater than 0: {offset}")
else:
return True
def normalize(self, data: pd.DataFrame) -> pd.DataFrame:
"""Normalizes the given data with the current instance method and data range/scale.
Args:
data (pd.DataFrame): Dataframe to be normalized.
Raises:
KeyError: If there's a missing data range for any task.
Returns:
pd.DataFrame: Normalized dataframe.
"""
if self.method == "task":
for task in data["task_name"].unique():
if task in self.data_range.keys():
task_min = self.data_range[task]["min"]
task_max = self.data_range[task]["max"]
task_data = data.loc[
data["task_name"] == task, self.perf_measure
].to_numpy()
if task_min == task_max:
data.loc[data["task_name"] == task, self.perf_measure] = (
task_data * 0
) + self.offset
logger.warning(
f"Performance for task ({task}) is constant - normalizing to 0"
)
else:
data.loc[data["task_name"] == task, self.perf_measure] = (
(task_data - task_min) / (task_max - task_min) * self.scale
) + self.offset
else:
raise KeyError(f"Missing data range for task '{task}'")
return data
elif self.method == "run":
data.loc[:, self.perf_measure] = (
(data[self.perf_measure].to_numpy() - self.run_min)
/ (self.run_max - self.run_min)
* self.scale
) + self.offset
return data
| _method(self, me | identifier_name |
normalizer.py | """
Copyright © 2021-2022 The Johns Hopkins University Applied Physics Laboratory LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from collections import defaultdict
from typing import Set
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
class Normalizer:
"""Utility class for normalizing data."""
valid_methods = ["task", "run"]
def __init__(
self,
perf_measure: str,
data: pd.DataFrame,
ste_data: dict = {},
data_range: defaultdict = None,
method: str = "task",
scale: int = 100,
offset: int = 1,
) -> None:
"""Constructor for Normalizer.
Args:
perf_measure (str): Name of column to use for metrics calculations.
data (pd.DataFrame): Reference data for calculating data range. Assumed
DataFrame with task name as index and one column of performance data.
ste_data (dict, optional): The STE data for computing quantiles. Defaults to {}.
data_range (defaultdict, optional): Dictionary object for data range. Defaults to None.
method (str, optional): Normalization method. Valid values are 'task' and 'run'.
Defaults to 'task'.
scale (int, optional): Normalization scale, interpreted as from 0 to this value.
Defaults to 100.
offset (int, optional): Offset to normalized data. Defaults to 1.
Raises:
ValueError: If data range validation fails.
"""
self.perf_measure = perf_measure
# Get unique task names in data
self.unique_tasks = set(data.index.unique())
if data_range is not None:
# Validate and set data range for normalizer
if self._validate_data_range(
data_range=data_range, task_names=self.unique_tasks
):
self.data_range = data_range
self.run_min = min([val["min"] for val in self.data_range.values()])
self.run_max = max([val["max"] for val in self.data_range.values()])
elif data is not None:
self.calculate_data_range(data, ste_data)
else:
raise ValueError(
f"Must provide data or data range to initialize Normalizer"
)
if self._validate_method(method):
self.method = method
if self._validate_scale(scale):
self.scale = scale
if self._validate_offset(offset):
self.offset = offset
def calculate_data_range(self, data: pd.DataFrame, ste_data: dict = {}) -> None:
"""Calculates data range per task for given data.
A task data range is the minimum and maximum value of the task performance.
Args:
data (pd.DataFrame): Reference data for calculating data range. Assumed
DataFrame with task name as index and one column of performance data.
ste_data (dict, optional): The STE data for computing quantiles. Defaults to {}.
Raises:
ValueError: If data contains more than just performance values and task name.
"""
data_column = data.columns.to_numpy()
if len(data_column) > 1:
raise ValueError(
f"Data must only have one column with performance measures"
)
# Initialize data range as empty object
self.data_range = defaultdict(dict)
# Get data range over scenario and STE data if not provided as input
for task in self.unique_tasks:
# Get feature range for each task
task_min = np.nanmin(data.loc[task])
task_max = np.nanmax(data.loc[task])
if ste_data.get(task):
x_ste = np.concatenate(
[
ste_data_df[ste_data_df["block_type"] == "train"][
self.perf_measure
].to_numpy()
for ste_data_df in ste_data.get(task)
]
)
self.data_range[task]["min"] = min(task_min, np.nanmin(x_ste))
self.data_range[task]["max"] = max(task_max, np.nanmax(x_ste))
else:
self.data_range[task]["min"] = task_min
self.data_range[task]["max"] = task_max
self.run_min = min([val["min"] for val in self.data_range.values()])
self.run_max = max([val["max"] for val in self.data_range.values()])
def _validate_data_range(
self, data_range: defaultdict, task_names: Set[str]
) -> bool:
"""Valida | _validate_method(self, method: str) -> bool:
"""Validates normalization method.
Args:
method (str): Normalization method.
Raises:
ValueError: If method is not in list of valid methods.
Returns:
bool: True if validation succeeds.
"""
if method not in self.valid_methods:
raise ValueError(
f"Invalid normalization method: {method}\n"
f"Valid methods are: {self.valid_methods}"
)
else:
return True
def _validate_scale(self, scale: int) -> bool:
"""Validates normalization scale.
Args:
scale (int): Normalization scale.
Raises:
TypeError: If scale is not an integer.
ValueError: If scale is less than or equal to 0.
Returns:
bool: True if validation succeeds.
"""
if not isinstance(scale, int):
raise TypeError(f"Invalid scale type: {type(scale)}")
elif scale <= 0:
raise ValueError(f"Scale value must be greater than 0: {scale}")
else:
return True
def _validate_offset(self, offset: int) -> bool:
"""Validates normalization offset.
Args:
offset (int): Normalization offset.
Raises:
TypeError: If offset is not an integer.
ValueError: If scale is less than or equal to 0.
Returns:
bool: True if validation succeeds.
"""
if not isinstance(offset, int):
raise TypeError(f"Invalid offset type: {type(offset)}")
elif offset <= 0:
raise ValueError(f"Offset value must be greater than 0: {offset}")
else:
return True
def normalize(self, data: pd.DataFrame) -> pd.DataFrame:
"""Normalizes the given data with the current instance method and data range/scale.
Args:
data (pd.DataFrame): Dataframe to be normalized.
Raises:
KeyError: If there's a missing data range for any task.
Returns:
pd.DataFrame: Normalized dataframe.
"""
if self.method == "task":
for task in data["task_name"].unique():
if task in self.data_range.keys():
task_min = self.data_range[task]["min"]
task_max = self.data_range[task]["max"]
task_data = data.loc[
data["task_name"] == task, self.perf_measure
].to_numpy()
if task_min == task_max:
data.loc[data["task_name"] == task, self.perf_measure] = (
task_data * 0
) + self.offset
logger.warning(
f"Performance for task ({task}) is constant - normalizing to 0"
)
else:
data.loc[data["task_name"] == task, self.perf_measure] = (
(task_data - task_min) / (task_max - task_min) * self.scale
) + self.offset
else:
raise KeyError(f"Missing data range for task '{task}'")
return data
elif self.method == "run":
data.loc[:, self.perf_measure] = (
(data[self.perf_measure].to_numpy() - self.run_min)
/ (self.run_max - self.run_min)
* self.scale
) + self.offset
return data
| tes data range object.
Args:
data_range (defaultdict): Dictionary object for data range.
task_names (Set[str]): Set of task names in the data.
Raises:
TypeError: If data range is not a dictionary object.
KeyError: If data range is not defined for all tasks.
KeyError: If the keys min and max are missing.
Returns:
bool: True if validation succeeds.
"""
if not isinstance(data_range, (dict, defaultdict)):
raise TypeError(f"Invalid data range type - Must be a dictionary")
elif not set(data_range.keys()).issuperset(task_names):
raise KeyError(f"Data range not defined for all tasks: {task_names}")
elif False in [key.keys() >= {"min", "max"} for key in data_range.values()]:
raise KeyError(f"Missing required fields: min and max")
else:
return True
def | identifier_body |
lib.rs | //! # Substrate Enterprise Sample - Product Tracking pallet
#![cfg_attr(not(feature = "std"), no_std)]
use codec::alloc::string::ToString;
use core::convert::TryInto;
use frame_support::{
debug, decl_error, decl_event, decl_module, decl_storage, dispatch, ensure,
sp_runtime::offchain::{
self as rt_offchain,
storage::StorageValueRef,
storage_lock::{StorageLock, Time},
},
sp_std::prelude::*,
traits::EnsureOrigin,
};
use frame_system::{self as system, ensure_signed, offchain::SendTransactionTypes};
use product_registry::ProductId;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
mod types;
use crate::types::*;
mod builders;
use crate::builders::*;
// General constraints to limit data size
// Note: these could also be passed as trait config parameters
pub const IDENTIFIER_MAX_LENGTH: usize = 10;
pub const SHIPMENT_MAX_PRODUCTS: usize = 10;
pub const LISTENER_ENDPOINT: &str = "http://localhost:3005";
pub const LOCK_TIMEOUT_EXPIRATION: u64 = 3000; // in milli-seconds
pub trait Trait: system::Trait + timestamp::Trait + SendTransactionTypes<Call<Self>> {
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
type CreateRoleOrigin: EnsureOrigin<Self::Origin>;
}
decl_storage! {
trait Store for Module<T: Trait> as ProductTracking {
// Shipments
pub Shipments get(fn shipment_by_id): map hasher(blake2_128_concat) ShipmentId => Option<Shipment<T::AccountId, T::Moment>>;
pub ShipmentsOfOrganization get(fn shipments_of_org): map hasher(blake2_128_concat) T::AccountId => Vec<ShipmentId>;
// Shipping events
pub EventCount get(fn event_count): u128 = 0;
pub AllEvents get(fn event_by_idx): map hasher(blake2_128_concat) ShippingEventIndex => Option<ShippingEvent<T::Moment>>;
pub EventsOfShipment get(fn events_of_shipment): map hasher(blake2_128_concat) ShipmentId => Vec<ShippingEventIndex>;
// Off-chain Worker notifications
pub OcwNotifications get (fn ocw_notifications): map hasher(identity) T::BlockNumber => Vec<ShippingEventIndex>;
}
}
decl_event!(
pub enum Event<T>
where
AccountId = <T as system::Trait>::AccountId,
{
ShipmentRegistered(AccountId, ShipmentId, AccountId),
ShipmentStatusUpdated(AccountId, ShipmentId, ShippingEventIndex, ShipmentStatus),
}
);
decl_error! {
pub enum Error for Module<T: Trait> {
InvalidOrMissingIdentifier,
ShipmentAlreadyExists,
ShipmentHasBeenDelivered,
ShipmentIsInTransit,
ShipmentIsUnknown,
ShipmentHasTooManyProducts,
ShippingEventAlreadyExists,
ShippingEventMaxExceeded,
OffchainWorkerAlreadyBusy
}
}
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
type Error = Error<T>;
fn deposit_event() = default;
#[weight = 10_000]
pub fn register_shipment(origin, id: ShipmentId, owner: T::AccountId, products: Vec<ProductId>) -> dispatch::DispatchResult {
T::CreateRoleOrigin::ensure_origin(origin.clone())?;
let who = ensure_signed(origin)?;
// Validate format of shipment ID
Self::validate_identifier(&id)?;
// Validate shipment products
Self::validate_shipment_products(&products)?;
// Check shipment doesn't exist yet (1 DB read)
Self::validate_new_shipment(&id)?;
// Create a shipment instance
let shipment = Self::new_shipment()
.identified_by(id.clone())
.owned_by(owner.clone())
.registered_at(<timestamp::Module<T>>::now())
.with_products(products)
.build();
let status = shipment.status.clone();
// Create shipping event
let event = Self::new_shipping_event()
.of_type(ShippingEventType::ShipmentRegistration)
.for_shipment(id.clone())
.at_location(None)
.with_readings(vec![])
.at_time(shipment.registered)
.build();
// Storage writes
// --------------
// Add shipment (2 DB write)
<Shipments<T>>::insert(&id, shipment);
<ShipmentsOfOrganization<T>>::append(&owner, &id);
// Store shipping event (1 DB read, 3 DB writes)
let event_idx = Self::store_event(event)?;
// Update offchain notifications (1 DB write)
<OcwNotifications<T>>::append(<system::Module<T>>::block_number(), event_idx);
// Raise events
Self::deposit_event(RawEvent::ShipmentRegistered(who.clone(), id.clone(), owner));
Self::deposit_event(RawEvent::ShipmentStatusUpdated(who, id, event_idx, status));
Ok(())
}
#[weight = 10_000]
pub fn track_shipment(
origin,
id: ShipmentId,
operation: ShippingOperation,
#[compact] timestamp: T::Moment,
location: Option<ReadPoint>,
readings: Option<Vec<Reading<T::Moment>>>
) -> dispatch::DispatchResult {
T::CreateRoleOrigin::ensure_origin(origin.clone())?;
let who = ensure_signed(origin)?;
// Validate format of shipment ID
Self::validate_identifier(&id)?;
// Check shipment is known (1 DB read) & do transition checks
let mut shipment = match <Shipments<T>>::get(&id) {
Some(shipment) => match shipment.status {
ShipmentStatus::Delivered => Err(<Error<T>>::ShipmentHasBeenDelivered),
ShipmentStatus::InTransit if operation == ShippingOperation::Pickup =>
Err(<Error<T>>::ShipmentIsInTransit),
_ => Ok(shipment)
}
None => Err(<Error<T>>::ShipmentIsUnknown)
}?;
// Update shipment status
shipment = match operation {
ShippingOperation::Pickup => shipment.pickup(),
ShippingOperation::Deliver => shipment.deliver(timestamp),
_ => shipment,
};
let status = shipment.status.clone();
// Create shipping event
let event = Self::new_shipping_event()
.of_type(operation.clone().into())
.for_shipment(id.clone())
.at_location(location)
.with_readings(readings.unwrap_or_default())
.at_time(timestamp)
.build();
// Storage writes
// --------------
// Store shipping event (1 DB read, 3 DB writes)
let event_idx = Self::store_event(event)?;
// Update offchain notifications (1 DB write)
<OcwNotifications<T>>::append(<system::Module<T>>::block_number(), event_idx);
if operation != ShippingOperation::Scan {
// Update shipment (1 DB write)
<Shipments<T>>::insert(&id, shipment);
// Raise events
Self::deposit_event(RawEvent::ShipmentStatusUpdated(who, id, event_idx, status));
}
Ok(())
}
fn offchain_worker(block_number: T::BlockNumber) {
// Acquiring the lock
let mut lock = StorageLock::<Time>::with_deadline(
b"product_tracking_ocw::lock",
rt_offchain::Duration::from_millis(LOCK_TIMEOUT_EXPIRATION)
);
match lock.try_lock() {
Ok(_guard) => { Self::process_ocw_notifications(block_number); }
Err(_err) => { debug::info!("[product_tracking_ocw] lock is already acquired"); }
};
}
}
}
impl<T: Trait> Module<T> {
// Helper methods
fn new_shipment() -> ShipmentBuilder<T::AccountId, T::Moment> {
ShipmentBuilder::<T::AccountId, T::Moment>::default()
}
fn new_shipping_event() -> ShippingEventBuilder<T::Moment> {
ShippingEventBuilder::<T::Moment>::default()
}
fn store_event(event: ShippingEvent<T::Moment>) -> Result<ShippingEventIndex, Error<T>> {
let event_idx = EventCount::get()
.checked_add(1)
.ok_or(Error::<T>::ShippingEventMaxExceeded)?;
EventCount::put(event_idx);
EventsOfShipment::append(&event.shipment_id, event_idx);
<AllEvents<T>>::insert(event_idx, event);
Ok(event_idx)
}
// (Public) Validation methods
pub fn validate_identifier(id: &[u8]) -> Result<(), Error<T>> {
// Basic identifier validation
ensure!(!id.is_empty(), Error::<T>::InvalidOrMissingIdentifier);
ensure!(
id.len() <= IDENTIFIER_MAX_LENGTH,
Error::<T>::InvalidOrMissingIdentifier
);
Ok(())
}
pub fn validate_new_shipment(id: &[u8]) -> Result<(), Error<T>> {
// Shipment existence check
ensure!(
!<Shipments<T>>::contains_key(id),
Error::<T>::ShipmentAlreadyExists
);
Ok(())
}
pub fn validate_shipment_products(props: &[ProductId]) -> Result<(), Error<T>> |
// --- Offchain worker methods ---
fn process_ocw_notifications(block_number: T::BlockNumber) {
// Check last processed block
let last_processed_block_ref =
StorageValueRef::persistent(b"product_tracking_ocw::last_proccessed_block");
let mut last_processed_block: u32 = match last_processed_block_ref.get::<T::BlockNumber>() {
Some(Some(last_proccessed_block)) if last_proccessed_block >= block_number => {
debug::info!(
"[product_tracking_ocw] Skipping: Block {:?} has already been processed.",
block_number
);
return;
}
Some(Some(last_proccessed_block)) => {
last_proccessed_block.try_into().ok().unwrap() as u32
}
None => 0u32, //TODO: define a OCW_MAX_BACKTRACK_PERIOD param
_ => {
debug::error!("[product_tracking_ocw] Error reading product_tracking_ocw::last_proccessed_block.");
return;
}
};
let start_block = last_processed_block + 1;
let end_block = block_number.try_into().ok().unwrap() as u32;
for current_block in start_block..end_block {
debug::debug!(
"[product_tracking_ocw] Processing notifications for block {}",
current_block
);
let ev_indices = Self::ocw_notifications::<T::BlockNumber>(current_block.into());
let listener_results: Result<Vec<_>, _> = ev_indices
.iter()
.map(|idx| match Self::event_by_idx(idx) {
Some(ev) => Self::notify_listener(&ev),
None => Ok(()),
})
.collect();
if let Err(err) = listener_results {
debug::warn!("[product_tracking_ocw] notify_listener error: {}", err);
break;
}
last_processed_block = current_block;
}
// Save last processed block
if last_processed_block >= start_block {
last_processed_block_ref.set(&last_processed_block);
debug::info!(
"[product_tracking_ocw] Notifications successfully processed up to block {}",
last_processed_block
);
}
}
fn notify_listener(ev: &ShippingEvent<T::Moment>) -> Result<(), &'static str> {
debug::info!("notifying listener: {:?}", ev);
let request =
sp_runtime::offchain::http::Request::post(&LISTENER_ENDPOINT, vec![ev.to_string()]);
let timeout =
sp_io::offchain::timestamp().add(sp_runtime::offchain::Duration::from_millis(3000));
let pending = request
.add_header(&"Content-Type", &"text/plain")
.deadline(timeout) // Setting the timeout time
.send() // Sending the request out by the host
.map_err(|_| "http post request building error")?;
let response = pending
.try_wait(timeout)
.map_err(|_| "http post request sent error")?
.map_err(|_| "http post request sent error")?;
if response.code != 200 {
return Err("http response error");
}
Ok(())
}
}
| {
ensure!(
props.len() <= SHIPMENT_MAX_PRODUCTS,
Error::<T>::ShipmentHasTooManyProducts,
);
Ok(())
} | identifier_body |
lib.rs | //! # Substrate Enterprise Sample - Product Tracking pallet
#![cfg_attr(not(feature = "std"), no_std)]
use codec::alloc::string::ToString;
use core::convert::TryInto;
use frame_support::{
debug, decl_error, decl_event, decl_module, decl_storage, dispatch, ensure,
sp_runtime::offchain::{
self as rt_offchain,
storage::StorageValueRef,
storage_lock::{StorageLock, Time},
},
sp_std::prelude::*,
traits::EnsureOrigin,
};
use frame_system::{self as system, ensure_signed, offchain::SendTransactionTypes};
use product_registry::ProductId;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
mod types;
use crate::types::*;
mod builders;
use crate::builders::*;
// General constraints to limit data size
// Note: these could also be passed as trait config parameters
pub const IDENTIFIER_MAX_LENGTH: usize = 10;
pub const SHIPMENT_MAX_PRODUCTS: usize = 10;
pub const LISTENER_ENDPOINT: &str = "http://localhost:3005";
pub const LOCK_TIMEOUT_EXPIRATION: u64 = 3000; // in milli-seconds
pub trait Trait: system::Trait + timestamp::Trait + SendTransactionTypes<Call<Self>> {
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
type CreateRoleOrigin: EnsureOrigin<Self::Origin>;
}
decl_storage! {
trait Store for Module<T: Trait> as ProductTracking {
// Shipments
pub Shipments get(fn shipment_by_id): map hasher(blake2_128_concat) ShipmentId => Option<Shipment<T::AccountId, T::Moment>>;
pub ShipmentsOfOrganization get(fn shipments_of_org): map hasher(blake2_128_concat) T::AccountId => Vec<ShipmentId>;
// Shipping events
pub EventCount get(fn event_count): u128 = 0;
pub AllEvents get(fn event_by_idx): map hasher(blake2_128_concat) ShippingEventIndex => Option<ShippingEvent<T::Moment>>;
pub EventsOfShipment get(fn events_of_shipment): map hasher(blake2_128_concat) ShipmentId => Vec<ShippingEventIndex>;
// Off-chain Worker notifications
pub OcwNotifications get (fn ocw_notifications): map hasher(identity) T::BlockNumber => Vec<ShippingEventIndex>;
}
}
decl_event!(
pub enum Event<T>
where
AccountId = <T as system::Trait>::AccountId,
{
ShipmentRegistered(AccountId, ShipmentId, AccountId),
ShipmentStatusUpdated(AccountId, ShipmentId, ShippingEventIndex, ShipmentStatus),
}
);
decl_error! {
pub enum Error for Module<T: Trait> {
InvalidOrMissingIdentifier,
ShipmentAlreadyExists,
ShipmentHasBeenDelivered,
ShipmentIsInTransit,
ShipmentIsUnknown,
ShipmentHasTooManyProducts,
ShippingEventAlreadyExists,
ShippingEventMaxExceeded,
OffchainWorkerAlreadyBusy
}
}
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
type Error = Error<T>;
fn deposit_event() = default;
#[weight = 10_000]
pub fn register_shipment(origin, id: ShipmentId, owner: T::AccountId, products: Vec<ProductId>) -> dispatch::DispatchResult {
T::CreateRoleOrigin::ensure_origin(origin.clone())?;
let who = ensure_signed(origin)?;
// Validate format of shipment ID
Self::validate_identifier(&id)?;
// Validate shipment products
Self::validate_shipment_products(&products)?;
// Check shipment doesn't exist yet (1 DB read)
Self::validate_new_shipment(&id)?;
// Create a shipment instance
let shipment = Self::new_shipment()
.identified_by(id.clone())
.owned_by(owner.clone()) | let status = shipment.status.clone();
// Create shipping event
let event = Self::new_shipping_event()
.of_type(ShippingEventType::ShipmentRegistration)
.for_shipment(id.clone())
.at_location(None)
.with_readings(vec![])
.at_time(shipment.registered)
.build();
// Storage writes
// --------------
// Add shipment (2 DB write)
<Shipments<T>>::insert(&id, shipment);
<ShipmentsOfOrganization<T>>::append(&owner, &id);
// Store shipping event (1 DB read, 3 DB writes)
let event_idx = Self::store_event(event)?;
// Update offchain notifications (1 DB write)
<OcwNotifications<T>>::append(<system::Module<T>>::block_number(), event_idx);
// Raise events
Self::deposit_event(RawEvent::ShipmentRegistered(who.clone(), id.clone(), owner));
Self::deposit_event(RawEvent::ShipmentStatusUpdated(who, id, event_idx, status));
Ok(())
}
#[weight = 10_000]
pub fn track_shipment(
origin,
id: ShipmentId,
operation: ShippingOperation,
#[compact] timestamp: T::Moment,
location: Option<ReadPoint>,
readings: Option<Vec<Reading<T::Moment>>>
) -> dispatch::DispatchResult {
T::CreateRoleOrigin::ensure_origin(origin.clone())?;
let who = ensure_signed(origin)?;
// Validate format of shipment ID
Self::validate_identifier(&id)?;
// Check shipment is known (1 DB read) & do transition checks
let mut shipment = match <Shipments<T>>::get(&id) {
Some(shipment) => match shipment.status {
ShipmentStatus::Delivered => Err(<Error<T>>::ShipmentHasBeenDelivered),
ShipmentStatus::InTransit if operation == ShippingOperation::Pickup =>
Err(<Error<T>>::ShipmentIsInTransit),
_ => Ok(shipment)
}
None => Err(<Error<T>>::ShipmentIsUnknown)
}?;
// Update shipment status
shipment = match operation {
ShippingOperation::Pickup => shipment.pickup(),
ShippingOperation::Deliver => shipment.deliver(timestamp),
_ => shipment,
};
let status = shipment.status.clone();
// Create shipping event
let event = Self::new_shipping_event()
.of_type(operation.clone().into())
.for_shipment(id.clone())
.at_location(location)
.with_readings(readings.unwrap_or_default())
.at_time(timestamp)
.build();
// Storage writes
// --------------
// Store shipping event (1 DB read, 3 DB writes)
let event_idx = Self::store_event(event)?;
// Update offchain notifications (1 DB write)
<OcwNotifications<T>>::append(<system::Module<T>>::block_number(), event_idx);
if operation != ShippingOperation::Scan {
// Update shipment (1 DB write)
<Shipments<T>>::insert(&id, shipment);
// Raise events
Self::deposit_event(RawEvent::ShipmentStatusUpdated(who, id, event_idx, status));
}
Ok(())
}
fn offchain_worker(block_number: T::BlockNumber) {
// Acquiring the lock
let mut lock = StorageLock::<Time>::with_deadline(
b"product_tracking_ocw::lock",
rt_offchain::Duration::from_millis(LOCK_TIMEOUT_EXPIRATION)
);
match lock.try_lock() {
Ok(_guard) => { Self::process_ocw_notifications(block_number); }
Err(_err) => { debug::info!("[product_tracking_ocw] lock is already acquired"); }
};
}
}
}
impl<T: Trait> Module<T> {
// Helper methods
fn new_shipment() -> ShipmentBuilder<T::AccountId, T::Moment> {
ShipmentBuilder::<T::AccountId, T::Moment>::default()
}
fn new_shipping_event() -> ShippingEventBuilder<T::Moment> {
ShippingEventBuilder::<T::Moment>::default()
}
fn store_event(event: ShippingEvent<T::Moment>) -> Result<ShippingEventIndex, Error<T>> {
let event_idx = EventCount::get()
.checked_add(1)
.ok_or(Error::<T>::ShippingEventMaxExceeded)?;
EventCount::put(event_idx);
EventsOfShipment::append(&event.shipment_id, event_idx);
<AllEvents<T>>::insert(event_idx, event);
Ok(event_idx)
}
// (Public) Validation methods
pub fn validate_identifier(id: &[u8]) -> Result<(), Error<T>> {
// Basic identifier validation
ensure!(!id.is_empty(), Error::<T>::InvalidOrMissingIdentifier);
ensure!(
id.len() <= IDENTIFIER_MAX_LENGTH,
Error::<T>::InvalidOrMissingIdentifier
);
Ok(())
}
pub fn validate_new_shipment(id: &[u8]) -> Result<(), Error<T>> {
// Shipment existence check
ensure!(
!<Shipments<T>>::contains_key(id),
Error::<T>::ShipmentAlreadyExists
);
Ok(())
}
pub fn validate_shipment_products(props: &[ProductId]) -> Result<(), Error<T>> {
ensure!(
props.len() <= SHIPMENT_MAX_PRODUCTS,
Error::<T>::ShipmentHasTooManyProducts,
);
Ok(())
}
// --- Offchain worker methods ---
fn process_ocw_notifications(block_number: T::BlockNumber) {
// Check last processed block
let last_processed_block_ref =
StorageValueRef::persistent(b"product_tracking_ocw::last_proccessed_block");
let mut last_processed_block: u32 = match last_processed_block_ref.get::<T::BlockNumber>() {
Some(Some(last_proccessed_block)) if last_proccessed_block >= block_number => {
debug::info!(
"[product_tracking_ocw] Skipping: Block {:?} has already been processed.",
block_number
);
return;
}
Some(Some(last_proccessed_block)) => {
last_proccessed_block.try_into().ok().unwrap() as u32
}
None => 0u32, //TODO: define a OCW_MAX_BACKTRACK_PERIOD param
_ => {
debug::error!("[product_tracking_ocw] Error reading product_tracking_ocw::last_proccessed_block.");
return;
}
};
let start_block = last_processed_block + 1;
let end_block = block_number.try_into().ok().unwrap() as u32;
for current_block in start_block..end_block {
debug::debug!(
"[product_tracking_ocw] Processing notifications for block {}",
current_block
);
let ev_indices = Self::ocw_notifications::<T::BlockNumber>(current_block.into());
let listener_results: Result<Vec<_>, _> = ev_indices
.iter()
.map(|idx| match Self::event_by_idx(idx) {
Some(ev) => Self::notify_listener(&ev),
None => Ok(()),
})
.collect();
if let Err(err) = listener_results {
debug::warn!("[product_tracking_ocw] notify_listener error: {}", err);
break;
}
last_processed_block = current_block;
}
// Save last processed block
if last_processed_block >= start_block {
last_processed_block_ref.set(&last_processed_block);
debug::info!(
"[product_tracking_ocw] Notifications successfully processed up to block {}",
last_processed_block
);
}
}
fn notify_listener(ev: &ShippingEvent<T::Moment>) -> Result<(), &'static str> {
debug::info!("notifying listener: {:?}", ev);
let request =
sp_runtime::offchain::http::Request::post(&LISTENER_ENDPOINT, vec![ev.to_string()]);
let timeout =
sp_io::offchain::timestamp().add(sp_runtime::offchain::Duration::from_millis(3000));
let pending = request
.add_header(&"Content-Type", &"text/plain")
.deadline(timeout) // Setting the timeout time
.send() // Sending the request out by the host
.map_err(|_| "http post request building error")?;
let response = pending
.try_wait(timeout)
.map_err(|_| "http post request sent error")?
.map_err(|_| "http post request sent error")?;
if response.code != 200 {
return Err("http response error");
}
Ok(())
}
} | .registered_at(<timestamp::Module<T>>::now())
.with_products(products)
.build(); | random_line_split |
lib.rs | //! # Substrate Enterprise Sample - Product Tracking pallet
#![cfg_attr(not(feature = "std"), no_std)]
use codec::alloc::string::ToString;
use core::convert::TryInto;
use frame_support::{
debug, decl_error, decl_event, decl_module, decl_storage, dispatch, ensure,
sp_runtime::offchain::{
self as rt_offchain,
storage::StorageValueRef,
storage_lock::{StorageLock, Time},
},
sp_std::prelude::*,
traits::EnsureOrigin,
};
use frame_system::{self as system, ensure_signed, offchain::SendTransactionTypes};
use product_registry::ProductId;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
mod types;
use crate::types::*;
mod builders;
use crate::builders::*;
// General constraints to limit data size
// Note: these could also be passed as trait config parameters
pub const IDENTIFIER_MAX_LENGTH: usize = 10;
pub const SHIPMENT_MAX_PRODUCTS: usize = 10;
pub const LISTENER_ENDPOINT: &str = "http://localhost:3005";
pub const LOCK_TIMEOUT_EXPIRATION: u64 = 3000; // in milli-seconds
pub trait Trait: system::Trait + timestamp::Trait + SendTransactionTypes<Call<Self>> {
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
type CreateRoleOrigin: EnsureOrigin<Self::Origin>;
}
decl_storage! {
trait Store for Module<T: Trait> as ProductTracking {
// Shipments
pub Shipments get(fn shipment_by_id): map hasher(blake2_128_concat) ShipmentId => Option<Shipment<T::AccountId, T::Moment>>;
pub ShipmentsOfOrganization get(fn shipments_of_org): map hasher(blake2_128_concat) T::AccountId => Vec<ShipmentId>;
// Shipping events
pub EventCount get(fn event_count): u128 = 0;
pub AllEvents get(fn event_by_idx): map hasher(blake2_128_concat) ShippingEventIndex => Option<ShippingEvent<T::Moment>>;
pub EventsOfShipment get(fn events_of_shipment): map hasher(blake2_128_concat) ShipmentId => Vec<ShippingEventIndex>;
// Off-chain Worker notifications
pub OcwNotifications get (fn ocw_notifications): map hasher(identity) T::BlockNumber => Vec<ShippingEventIndex>;
}
}
decl_event!(
pub enum Event<T>
where
AccountId = <T as system::Trait>::AccountId,
{
ShipmentRegistered(AccountId, ShipmentId, AccountId),
ShipmentStatusUpdated(AccountId, ShipmentId, ShippingEventIndex, ShipmentStatus),
}
);
decl_error! {
pub enum Error for Module<T: Trait> {
InvalidOrMissingIdentifier,
ShipmentAlreadyExists,
ShipmentHasBeenDelivered,
ShipmentIsInTransit,
ShipmentIsUnknown,
ShipmentHasTooManyProducts,
ShippingEventAlreadyExists,
ShippingEventMaxExceeded,
OffchainWorkerAlreadyBusy
}
}
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
type Error = Error<T>;
fn deposit_event() = default;
#[weight = 10_000]
pub fn register_shipment(origin, id: ShipmentId, owner: T::AccountId, products: Vec<ProductId>) -> dispatch::DispatchResult {
T::CreateRoleOrigin::ensure_origin(origin.clone())?;
let who = ensure_signed(origin)?;
// Validate format of shipment ID
Self::validate_identifier(&id)?;
// Validate shipment products
Self::validate_shipment_products(&products)?;
// Check shipment doesn't exist yet (1 DB read)
Self::validate_new_shipment(&id)?;
// Create a shipment instance
let shipment = Self::new_shipment()
.identified_by(id.clone())
.owned_by(owner.clone())
.registered_at(<timestamp::Module<T>>::now())
.with_products(products)
.build();
let status = shipment.status.clone();
// Create shipping event
let event = Self::new_shipping_event()
.of_type(ShippingEventType::ShipmentRegistration)
.for_shipment(id.clone())
.at_location(None)
.with_readings(vec![])
.at_time(shipment.registered)
.build();
// Storage writes
// --------------
// Add shipment (2 DB write)
<Shipments<T>>::insert(&id, shipment);
<ShipmentsOfOrganization<T>>::append(&owner, &id);
// Store shipping event (1 DB read, 3 DB writes)
let event_idx = Self::store_event(event)?;
// Update offchain notifications (1 DB write)
<OcwNotifications<T>>::append(<system::Module<T>>::block_number(), event_idx);
// Raise events
Self::deposit_event(RawEvent::ShipmentRegistered(who.clone(), id.clone(), owner));
Self::deposit_event(RawEvent::ShipmentStatusUpdated(who, id, event_idx, status));
Ok(())
}
#[weight = 10_000]
pub fn track_shipment(
origin,
id: ShipmentId,
operation: ShippingOperation,
#[compact] timestamp: T::Moment,
location: Option<ReadPoint>,
readings: Option<Vec<Reading<T::Moment>>>
) -> dispatch::DispatchResult {
T::CreateRoleOrigin::ensure_origin(origin.clone())?;
let who = ensure_signed(origin)?;
// Validate format of shipment ID
Self::validate_identifier(&id)?;
// Check shipment is known (1 DB read) & do transition checks
let mut shipment = match <Shipments<T>>::get(&id) {
Some(shipment) => match shipment.status {
ShipmentStatus::Delivered => Err(<Error<T>>::ShipmentHasBeenDelivered),
ShipmentStatus::InTransit if operation == ShippingOperation::Pickup =>
Err(<Error<T>>::ShipmentIsInTransit),
_ => Ok(shipment)
}
None => Err(<Error<T>>::ShipmentIsUnknown)
}?;
// Update shipment status
shipment = match operation {
ShippingOperation::Pickup => shipment.pickup(),
ShippingOperation::Deliver => shipment.deliver(timestamp),
_ => shipment,
};
let status = shipment.status.clone();
// Create shipping event
let event = Self::new_shipping_event()
.of_type(operation.clone().into())
.for_shipment(id.clone())
.at_location(location)
.with_readings(readings.unwrap_or_default())
.at_time(timestamp)
.build();
// Storage writes
// --------------
// Store shipping event (1 DB read, 3 DB writes)
let event_idx = Self::store_event(event)?;
// Update offchain notifications (1 DB write)
<OcwNotifications<T>>::append(<system::Module<T>>::block_number(), event_idx);
if operation != ShippingOperation::Scan {
// Update shipment (1 DB write)
<Shipments<T>>::insert(&id, shipment);
// Raise events
Self::deposit_event(RawEvent::ShipmentStatusUpdated(who, id, event_idx, status));
}
Ok(())
}
fn offchain_worker(block_number: T::BlockNumber) {
// Acquiring the lock
let mut lock = StorageLock::<Time>::with_deadline(
b"product_tracking_ocw::lock",
rt_offchain::Duration::from_millis(LOCK_TIMEOUT_EXPIRATION)
);
match lock.try_lock() {
Ok(_guard) => { Self::process_ocw_notifications(block_number); }
Err(_err) => { debug::info!("[product_tracking_ocw] lock is already acquired"); }
};
}
}
}
impl<T: Trait> Module<T> {
// Helper methods
fn new_shipment() -> ShipmentBuilder<T::AccountId, T::Moment> {
ShipmentBuilder::<T::AccountId, T::Moment>::default()
}
fn new_shipping_event() -> ShippingEventBuilder<T::Moment> {
ShippingEventBuilder::<T::Moment>::default()
}
fn store_event(event: ShippingEvent<T::Moment>) -> Result<ShippingEventIndex, Error<T>> {
let event_idx = EventCount::get()
.checked_add(1)
.ok_or(Error::<T>::ShippingEventMaxExceeded)?;
EventCount::put(event_idx);
EventsOfShipment::append(&event.shipment_id, event_idx);
<AllEvents<T>>::insert(event_idx, event);
Ok(event_idx)
}
// (Public) Validation methods
pub fn validate_identifier(id: &[u8]) -> Result<(), Error<T>> {
// Basic identifier validation
ensure!(!id.is_empty(), Error::<T>::InvalidOrMissingIdentifier);
ensure!(
id.len() <= IDENTIFIER_MAX_LENGTH,
Error::<T>::InvalidOrMissingIdentifier
);
Ok(())
}
pub fn validate_new_shipment(id: &[u8]) -> Result<(), Error<T>> {
// Shipment existence check
ensure!(
!<Shipments<T>>::contains_key(id),
Error::<T>::ShipmentAlreadyExists
);
Ok(())
}
pub fn validate_shipment_products(props: &[ProductId]) -> Result<(), Error<T>> {
ensure!(
props.len() <= SHIPMENT_MAX_PRODUCTS,
Error::<T>::ShipmentHasTooManyProducts,
);
Ok(())
}
// --- Offchain worker methods ---
fn process_ocw_notifications(block_number: T::BlockNumber) {
// Check last processed block
let last_processed_block_ref =
StorageValueRef::persistent(b"product_tracking_ocw::last_proccessed_block");
let mut last_processed_block: u32 = match last_processed_block_ref.get::<T::BlockNumber>() {
Some(Some(last_proccessed_block)) if last_proccessed_block >= block_number => {
debug::info!(
"[product_tracking_ocw] Skipping: Block {:?} has already been processed.",
block_number
);
return;
}
Some(Some(last_proccessed_block)) => {
last_proccessed_block.try_into().ok().unwrap() as u32
}
None => 0u32, //TODO: define a OCW_MAX_BACKTRACK_PERIOD param
_ => {
debug::error!("[product_tracking_ocw] Error reading product_tracking_ocw::last_proccessed_block.");
return;
}
};
let start_block = last_processed_block + 1;
let end_block = block_number.try_into().ok().unwrap() as u32;
for current_block in start_block..end_block {
debug::debug!(
"[product_tracking_ocw] Processing notifications for block {}",
current_block
);
let ev_indices = Self::ocw_notifications::<T::BlockNumber>(current_block.into());
let listener_results: Result<Vec<_>, _> = ev_indices
.iter()
.map(|idx| match Self::event_by_idx(idx) {
Some(ev) => Self::notify_listener(&ev),
None => Ok(()),
})
.collect();
if let Err(err) = listener_results {
debug::warn!("[product_tracking_ocw] notify_listener error: {}", err);
break;
}
last_processed_block = current_block;
}
// Save last processed block
if last_processed_block >= start_block |
}
fn notify_listener(ev: &ShippingEvent<T::Moment>) -> Result<(), &'static str> {
debug::info!("notifying listener: {:?}", ev);
let request =
sp_runtime::offchain::http::Request::post(&LISTENER_ENDPOINT, vec![ev.to_string()]);
let timeout =
sp_io::offchain::timestamp().add(sp_runtime::offchain::Duration::from_millis(3000));
let pending = request
.add_header(&"Content-Type", &"text/plain")
.deadline(timeout) // Setting the timeout time
.send() // Sending the request out by the host
.map_err(|_| "http post request building error")?;
let response = pending
.try_wait(timeout)
.map_err(|_| "http post request sent error")?
.map_err(|_| "http post request sent error")?;
if response.code != 200 {
return Err("http response error");
}
Ok(())
}
}
| {
last_processed_block_ref.set(&last_processed_block);
debug::info!(
"[product_tracking_ocw] Notifications successfully processed up to block {}",
last_processed_block
);
} | conditional_block |
lib.rs | //! # Substrate Enterprise Sample - Product Tracking pallet
#![cfg_attr(not(feature = "std"), no_std)]
use codec::alloc::string::ToString;
use core::convert::TryInto;
use frame_support::{
debug, decl_error, decl_event, decl_module, decl_storage, dispatch, ensure,
sp_runtime::offchain::{
self as rt_offchain,
storage::StorageValueRef,
storage_lock::{StorageLock, Time},
},
sp_std::prelude::*,
traits::EnsureOrigin,
};
use frame_system::{self as system, ensure_signed, offchain::SendTransactionTypes};
use product_registry::ProductId;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
mod types;
use crate::types::*;
mod builders;
use crate::builders::*;
// General constraints to limit data size
// Note: these could also be passed as trait config parameters
pub const IDENTIFIER_MAX_LENGTH: usize = 10;
pub const SHIPMENT_MAX_PRODUCTS: usize = 10;
pub const LISTENER_ENDPOINT: &str = "http://localhost:3005";
pub const LOCK_TIMEOUT_EXPIRATION: u64 = 3000; // in milli-seconds
pub trait Trait: system::Trait + timestamp::Trait + SendTransactionTypes<Call<Self>> {
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
type CreateRoleOrigin: EnsureOrigin<Self::Origin>;
}
decl_storage! {
trait Store for Module<T: Trait> as ProductTracking {
// Shipments
pub Shipments get(fn shipment_by_id): map hasher(blake2_128_concat) ShipmentId => Option<Shipment<T::AccountId, T::Moment>>;
pub ShipmentsOfOrganization get(fn shipments_of_org): map hasher(blake2_128_concat) T::AccountId => Vec<ShipmentId>;
// Shipping events
pub EventCount get(fn event_count): u128 = 0;
pub AllEvents get(fn event_by_idx): map hasher(blake2_128_concat) ShippingEventIndex => Option<ShippingEvent<T::Moment>>;
pub EventsOfShipment get(fn events_of_shipment): map hasher(blake2_128_concat) ShipmentId => Vec<ShippingEventIndex>;
// Off-chain Worker notifications
pub OcwNotifications get (fn ocw_notifications): map hasher(identity) T::BlockNumber => Vec<ShippingEventIndex>;
}
}
decl_event!(
pub enum Event<T>
where
AccountId = <T as system::Trait>::AccountId,
{
ShipmentRegistered(AccountId, ShipmentId, AccountId),
ShipmentStatusUpdated(AccountId, ShipmentId, ShippingEventIndex, ShipmentStatus),
}
);
decl_error! {
pub enum Error for Module<T: Trait> {
InvalidOrMissingIdentifier,
ShipmentAlreadyExists,
ShipmentHasBeenDelivered,
ShipmentIsInTransit,
ShipmentIsUnknown,
ShipmentHasTooManyProducts,
ShippingEventAlreadyExists,
ShippingEventMaxExceeded,
OffchainWorkerAlreadyBusy
}
}
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
type Error = Error<T>;
fn deposit_event() = default;
#[weight = 10_000]
pub fn register_shipment(origin, id: ShipmentId, owner: T::AccountId, products: Vec<ProductId>) -> dispatch::DispatchResult {
T::CreateRoleOrigin::ensure_origin(origin.clone())?;
let who = ensure_signed(origin)?;
// Validate format of shipment ID
Self::validate_identifier(&id)?;
// Validate shipment products
Self::validate_shipment_products(&products)?;
// Check shipment doesn't exist yet (1 DB read)
Self::validate_new_shipment(&id)?;
// Create a shipment instance
let shipment = Self::new_shipment()
.identified_by(id.clone())
.owned_by(owner.clone())
.registered_at(<timestamp::Module<T>>::now())
.with_products(products)
.build();
let status = shipment.status.clone();
// Create shipping event
let event = Self::new_shipping_event()
.of_type(ShippingEventType::ShipmentRegistration)
.for_shipment(id.clone())
.at_location(None)
.with_readings(vec![])
.at_time(shipment.registered)
.build();
// Storage writes
// --------------
// Add shipment (2 DB write)
<Shipments<T>>::insert(&id, shipment);
<ShipmentsOfOrganization<T>>::append(&owner, &id);
// Store shipping event (1 DB read, 3 DB writes)
let event_idx = Self::store_event(event)?;
// Update offchain notifications (1 DB write)
<OcwNotifications<T>>::append(<system::Module<T>>::block_number(), event_idx);
// Raise events
Self::deposit_event(RawEvent::ShipmentRegistered(who.clone(), id.clone(), owner));
Self::deposit_event(RawEvent::ShipmentStatusUpdated(who, id, event_idx, status));
Ok(())
}
#[weight = 10_000]
pub fn track_shipment(
origin,
id: ShipmentId,
operation: ShippingOperation,
#[compact] timestamp: T::Moment,
location: Option<ReadPoint>,
readings: Option<Vec<Reading<T::Moment>>>
) -> dispatch::DispatchResult {
T::CreateRoleOrigin::ensure_origin(origin.clone())?;
let who = ensure_signed(origin)?;
// Validate format of shipment ID
Self::validate_identifier(&id)?;
// Check shipment is known (1 DB read) & do transition checks
let mut shipment = match <Shipments<T>>::get(&id) {
Some(shipment) => match shipment.status {
ShipmentStatus::Delivered => Err(<Error<T>>::ShipmentHasBeenDelivered),
ShipmentStatus::InTransit if operation == ShippingOperation::Pickup =>
Err(<Error<T>>::ShipmentIsInTransit),
_ => Ok(shipment)
}
None => Err(<Error<T>>::ShipmentIsUnknown)
}?;
// Update shipment status
shipment = match operation {
ShippingOperation::Pickup => shipment.pickup(),
ShippingOperation::Deliver => shipment.deliver(timestamp),
_ => shipment,
};
let status = shipment.status.clone();
// Create shipping event
let event = Self::new_shipping_event()
.of_type(operation.clone().into())
.for_shipment(id.clone())
.at_location(location)
.with_readings(readings.unwrap_or_default())
.at_time(timestamp)
.build();
// Storage writes
// --------------
// Store shipping event (1 DB read, 3 DB writes)
let event_idx = Self::store_event(event)?;
// Update offchain notifications (1 DB write)
<OcwNotifications<T>>::append(<system::Module<T>>::block_number(), event_idx);
if operation != ShippingOperation::Scan {
// Update shipment (1 DB write)
<Shipments<T>>::insert(&id, shipment);
// Raise events
Self::deposit_event(RawEvent::ShipmentStatusUpdated(who, id, event_idx, status));
}
Ok(())
}
fn offchain_worker(block_number: T::BlockNumber) {
// Acquiring the lock
let mut lock = StorageLock::<Time>::with_deadline(
b"product_tracking_ocw::lock",
rt_offchain::Duration::from_millis(LOCK_TIMEOUT_EXPIRATION)
);
match lock.try_lock() {
Ok(_guard) => { Self::process_ocw_notifications(block_number); }
Err(_err) => { debug::info!("[product_tracking_ocw] lock is already acquired"); }
};
}
}
}
impl<T: Trait> Module<T> {
// Helper methods
fn new_shipment() -> ShipmentBuilder<T::AccountId, T::Moment> {
ShipmentBuilder::<T::AccountId, T::Moment>::default()
}
fn new_shipping_event() -> ShippingEventBuilder<T::Moment> {
ShippingEventBuilder::<T::Moment>::default()
}
fn store_event(event: ShippingEvent<T::Moment>) -> Result<ShippingEventIndex, Error<T>> {
let event_idx = EventCount::get()
.checked_add(1)
.ok_or(Error::<T>::ShippingEventMaxExceeded)?;
EventCount::put(event_idx);
EventsOfShipment::append(&event.shipment_id, event_idx);
<AllEvents<T>>::insert(event_idx, event);
Ok(event_idx)
}
// (Public) Validation methods
pub fn validate_identifier(id: &[u8]) -> Result<(), Error<T>> {
// Basic identifier validation
ensure!(!id.is_empty(), Error::<T>::InvalidOrMissingIdentifier);
ensure!(
id.len() <= IDENTIFIER_MAX_LENGTH,
Error::<T>::InvalidOrMissingIdentifier
);
Ok(())
}
pub fn validate_new_shipment(id: &[u8]) -> Result<(), Error<T>> {
// Shipment existence check
ensure!(
!<Shipments<T>>::contains_key(id),
Error::<T>::ShipmentAlreadyExists
);
Ok(())
}
pub fn validate_shipment_products(props: &[ProductId]) -> Result<(), Error<T>> {
ensure!(
props.len() <= SHIPMENT_MAX_PRODUCTS,
Error::<T>::ShipmentHasTooManyProducts,
);
Ok(())
}
// --- Offchain worker methods ---
fn | (block_number: T::BlockNumber) {
// Check last processed block
let last_processed_block_ref =
StorageValueRef::persistent(b"product_tracking_ocw::last_proccessed_block");
let mut last_processed_block: u32 = match last_processed_block_ref.get::<T::BlockNumber>() {
Some(Some(last_proccessed_block)) if last_proccessed_block >= block_number => {
debug::info!(
"[product_tracking_ocw] Skipping: Block {:?} has already been processed.",
block_number
);
return;
}
Some(Some(last_proccessed_block)) => {
last_proccessed_block.try_into().ok().unwrap() as u32
}
None => 0u32, //TODO: define a OCW_MAX_BACKTRACK_PERIOD param
_ => {
debug::error!("[product_tracking_ocw] Error reading product_tracking_ocw::last_proccessed_block.");
return;
}
};
let start_block = last_processed_block + 1;
let end_block = block_number.try_into().ok().unwrap() as u32;
for current_block in start_block..end_block {
debug::debug!(
"[product_tracking_ocw] Processing notifications for block {}",
current_block
);
let ev_indices = Self::ocw_notifications::<T::BlockNumber>(current_block.into());
let listener_results: Result<Vec<_>, _> = ev_indices
.iter()
.map(|idx| match Self::event_by_idx(idx) {
Some(ev) => Self::notify_listener(&ev),
None => Ok(()),
})
.collect();
if let Err(err) = listener_results {
debug::warn!("[product_tracking_ocw] notify_listener error: {}", err);
break;
}
last_processed_block = current_block;
}
// Save last processed block
if last_processed_block >= start_block {
last_processed_block_ref.set(&last_processed_block);
debug::info!(
"[product_tracking_ocw] Notifications successfully processed up to block {}",
last_processed_block
);
}
}
fn notify_listener(ev: &ShippingEvent<T::Moment>) -> Result<(), &'static str> {
debug::info!("notifying listener: {:?}", ev);
let request =
sp_runtime::offchain::http::Request::post(&LISTENER_ENDPOINT, vec![ev.to_string()]);
let timeout =
sp_io::offchain::timestamp().add(sp_runtime::offchain::Duration::from_millis(3000));
let pending = request
.add_header(&"Content-Type", &"text/plain")
.deadline(timeout) // Setting the timeout time
.send() // Sending the request out by the host
.map_err(|_| "http post request building error")?;
let response = pending
.try_wait(timeout)
.map_err(|_| "http post request sent error")?
.map_err(|_| "http post request sent error")?;
if response.code != 200 {
return Err("http response error");
}
Ok(())
}
}
| process_ocw_notifications | identifier_name |
app.rs | use audio;
use audio::cpal;
use find_folder;
use glium::glutin;
use state;
use std;
use std::cell::{Cell, RefCell};
use std::collections::HashMap;
use std::marker::PhantomData;
use std::path::PathBuf;
use std::sync::{mpsc, Arc};
use std::thread;
use std::time::Duration;
use window::{self, Window};
use ui;
/// An **App** represents the entire context of your application.
///
/// The **App** owns and manages:
///
/// - the event loop (used to drive the application forward)
/// - all OpenGL windows (for graphics and user input, can be referenced via IDs).
pub struct App {
pub(crate) events_loop: glutin::EventsLoop,
pub(crate) windows: RefCell<HashMap<window::Id, Window>>,
pub(super) exit_on_escape: Cell<bool>,
pub(crate) ui: ui::Arrangement,
loop_mode: Cell<LoopMode>,
/// The `App`'s audio-related API.
pub audio: Audio,
/// The current state of the `Mouse`.
pub mouse: state::Mouse,
/// State of the window currently in focus.
pub window: state::Window,
/// State of the keyboard keys.
///
/// `mods` provides state of each of the modifier keys: `shift`, `ctrl`, `alt`, `logo`.
///
/// `down` is the set of keys that are currently pressed.
///
/// NOTE: `down` this is tracked by the nannou `App` so issues might occur if e.g. a key is
/// pressed while the app is in focus and then released when out of focus. Eventually we should
/// change this to query the OS somehow, but I don't think `winit` provides a way to do this
/// yet.
pub keys: state::Keys,
}
/// An **App**'s audio API.
pub struct Audio {
event_loop: Arc<cpal::EventLoop>,
process_fn_tx: RefCell<Option<mpsc::Sender<audio::stream::ProcessFnMsg>>>,
}
/// A handle to the **App** that can be shared across threads.
///
/// This can be used to "wake up" the **App**'s inner event loop.
pub struct Proxy {
events_loop_proxy: glutin::EventsLoopProxy,
}
/// The mode in which the **App** is currently running the event loop.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum LoopMode {
/// Specifies that the application is continuously looping at a consistent rate.
///
/// An application running in the **Rate** loop mode will behave as follows:
///
/// 1. Poll for and collect all pending user input.
/// `update` is then called with all application events that have occurred.
///
/// 2. `update` is called with an `Event::Update`.
///
/// 3. `draw` is called.
///
/// 4. Check the time and sleep for the remainder of the `update_intervale`.
Rate {
/// The minimum interval between emitted updates.
update_interval: Duration,
},
Wait {
/// The number of `update`s (and in turn `draw`s) that should occur since the application
/// last received a non-`Update` event.
updates_following_event: usize,
/// The minimum interval between emitted updates.
update_interval: Duration,
},
}
fn update_interval(fps: f64) -> Duration {
assert!(fps > 0.0);
const NANOSEC_PER_SEC: f64 = 1_000_000_000.0;
let interval_nanosecs = NANOSEC_PER_SEC / fps;
let secs = (interval_nanosecs / NANOSEC_PER_SEC) as u64;
let nanosecs = (interval_nanosecs % NANOSEC_PER_SEC) as u32;
Duration::new(secs, nanosecs)
}
impl LoopMode {
pub const DEFAULT_RATE_FPS: f64 = 60.0;
pub const DEFAULT_UPDATES_FOLLOWING_EVENT: usize = 3;
/// Specify the **Rate** mode with the given frames-per-second.
pub fn rate_fps(fps: f64) -> Self {
let update_interval = update_interval(fps);
LoopMode::Rate { update_interval }
}
/// Specify the **Wait** mode with the given number of updates following each non-`Update`
/// event.
///
/// Uses the default update interval.
pub fn wait(updates_following_event: usize) -> Self {
let update_interval = update_interval(Self::DEFAULT_RATE_FPS);
LoopMode::Wait {
updates_following_event,
update_interval,
}
}
/// Specify the **Wait** mode with the given number of updates following each non-`Update`
/// event.
///
/// Waits long enough to ensure loop iteration never occurs faster than the given `max_fps`.
pub fn wait_with_max_fps(updates_following_event: usize, max_fps: f64) -> Self {
let update_interval = update_interval(max_fps);
LoopMode::Wait {
updates_following_event,
update_interval,
}
}
/// Specify the **Wait** mode with the given number of updates following each non-`Update`
/// event.
///
/// Waits long enough to ensure loop iteration never occurs faster than the given `max_fps`.
pub fn wait_with_interval(updates_following_event: usize, update_interval: Duration) -> Self {
LoopMode::Wait {
updates_following_event,
update_interval,
}
}
}
impl Default for LoopMode {
fn default() -> Self {
LoopMode::rate_fps(Self::DEFAULT_RATE_FPS)
}
}
impl App {
pub const ASSETS_DIRECTORY_NAME: &'static str = "assets";
pub const DEFAULT_EXIT_ON_ESCAPE: bool = true;
// Create a new `App`.
pub(super) fn new(events_loop: glutin::EventsLoop) -> Self {
let windows = RefCell::new(HashMap::new());
let exit_on_escape = Cell::new(Self::DEFAULT_EXIT_ON_ESCAPE);
let loop_mode = Cell::new(LoopMode::default());
let cpal_event_loop = Arc::new(cpal::EventLoop::new());
let process_fn_tx = RefCell::new(None);
let audio = Audio { event_loop: cpal_event_loop, process_fn_tx };
let ui = ui::Arrangement::new();
let mouse = state::Mouse::new();
let window = state::Window::new();
let keys = state::Keys::default();
App {
events_loop,
windows,
exit_on_escape,
loop_mode,
audio,
ui,
mouse,
window,
keys,
}
}
/// Find and return the absolute path to the project's `assets` directory.
///
/// This method looks for the assets directory in the following order:
///
/// 1. Checks the same directory as the executable.
/// 2. Recursively checks exe's parent directories (to a max depth of 5).
/// 3. Recursively checks exe's children directories (to a max depth of 3).
pub fn assets_path(&self) -> Result<PathBuf, find_folder::Error> {
let exe_path = std::env::current_exe()?;
find_folder::Search::ParentsThenKids(5, 3)
.of(exe_path.parent().expect("executable has no parent directory to search").into())
.for_folder(Self::ASSETS_DIRECTORY_NAME)
}
/// Begin building a new OpenGL window.
pub fn new_window<'a>(&'a self) -> window::Builder<'a, 'static> {
window::Builder::new(self)
}
/// The number of windows currently in the application.
pub fn window_count(&self) -> usize {
self.windows.borrow().len()
}
/// A reference to the window with the given `Id`.
pub fn window(&self, id: window::Id) -> Option<std::cell::Ref<Window>> {
let windows = self.windows.borrow();
if !windows.contains_key(&id) {
None
} else {
Some(std::cell::Ref::map(windows, |ws| &ws[&id]))
}
}
/// Return whether or not the `App` is currently set to exit when the `Escape` key is pressed.
pub fn exit_on_escape(&self) -> bool {
self.exit_on_escape.get()
}
/// Specify whether or not the app should close when the `Escape` key is pressed.
///
/// By default this is `true`.
pub fn set_exit_on_escape(&self, b: bool) {
self.exit_on_escape.set(b);
}
/// Returns the **App**'s current **LoopMode**.
pub fn loop_mode(&self) -> LoopMode {
self.loop_mode.get()
}
/// Sets the loop mode of the **App**.
///
/// Note: Setting the loop mode will not affect anything until the end of the current loop
/// iteration. The behaviour of a single loop iteration is described under each of the
/// **LoopMode** variants.
pub fn set_loop_mode(&self, mode: LoopMode) {
self.loop_mode.set(mode);
}
/// A handle to the **App** that can be shared across threads.
///
/// This can be used to "wake up" the **App**'s inner event loop.
pub fn create_proxy(&self) -> Proxy {
let events_loop_proxy = self.events_loop.create_proxy();
Proxy { events_loop_proxy }
}
/// Create a new `Ui` for the window with the given `Id`.
///
/// Returns `None` if there is no window for the given `window_id`.
pub fn new_ui(&self, window_id: window::Id) -> ui::Builder {
ui::Builder::new(self, window_id)
}
}
impl Audio {
/// Enumerate the available audio devices on the system.
///
/// Produces an iterator yielding `audio::Device`s.
pub fn devices(&self) -> audio::Devices {
let devices = cpal::devices();
audio::Devices { devices }
}
/// Enumerate the available audio devices on the system that support input streams.
///
/// Produces an iterator yielding `audio::Device`s.
pub fn input_devices(&self) -> audio::stream::input::Devices {
let devices = cpal::input_devices();
audio::stream::input::Devices { devices }
}
/// Enumerate the available audio devices on the system that support output streams.
///
/// Produces an iterator yielding `audio::Device`s.
pub fn output_devices(&self) -> audio::stream::output::Devices {
let devices = cpal::output_devices();
audio::stream::output::Devices { devices }
}
/// The current default audio input device.
pub fn default_input_device(&self) -> Option<audio::Device> {
cpal::default_input_device()
.map(|device| audio::Device { device })
}
/// The current default audio output device.
pub fn default_output_device(&self) -> Option<audio::Device> {
cpal::default_output_device()
.map(|device| audio::Device { device })
}
/// Begin building a new input audio stream.
///
/// If this is the first time a stream has been created, this method will spawn the
/// `cpal::EventLoop::run` method on its own thread, ready to run built streams.
pub fn new_input_stream<M, F, S>(&self, model: M, capture: F)
-> audio::stream::input::Builder<M, F, S>
{
audio::stream::input::Builder {
capture,
builder: self.new_stream(model),
}
}
/// Begin building a new output audio stream.
///
/// If this is the first time a stream has been created, this method will spawn the
/// `cpal::EventLoop::run` method on its own thread, ready to run built streams.
pub fn new_output_stream<M, F, S>(&self, model: M, render: F)
-> audio::stream::output::Builder<M, F, S>
{
audio::stream::output::Builder {
render,
builder: self.new_stream(model),
}
}
// Builder initialisation shared between input and output streams.
//
// If this is the first time a stream has been created, this method will spawn the
// `cpal::EventLoop::run` method on its own thread, ready to run built streams.
fn new_stream<M, S>(&self, model: M) -> audio::stream::Builder<M, S> {
let process_fn_tx = if self.process_fn_tx.borrow().is_none() {
let event_loop = self.event_loop.clone();
let (tx, rx) = mpsc::channel();
let mut loop_context = audio::stream::LoopContext::new(rx);
thread::Builder::new()
.name("cpal::EventLoop::run thread".into())
.spawn(move || event_loop.run(move |id, data| loop_context.process(id, data)))
.expect("failed to spawn cpal::EventLoop::run thread");
*self.process_fn_tx.borrow_mut() = Some(tx.clone());
tx
} else {
self.process_fn_tx.borrow().as_ref().unwrap().clone()
};
audio::stream::Builder {
event_loop: self.event_loop.clone(),
process_fn_tx: process_fn_tx, | model,
sample_rate: None,
channels: None,
frames_per_buffer: None,
device: None,
sample_format: PhantomData,
}
}
}
impl Proxy {
/// Wake up the application!
///
/// This wakes up the **App**'s inner event loop and inserts an **Awakened** event.
pub fn wakeup(&self) -> Result<(), glutin::EventsLoopClosed> {
self.events_loop_proxy.wakeup()
}
} | random_line_split | |
app.rs | use audio;
use audio::cpal;
use find_folder;
use glium::glutin;
use state;
use std;
use std::cell::{Cell, RefCell};
use std::collections::HashMap;
use std::marker::PhantomData;
use std::path::PathBuf;
use std::sync::{mpsc, Arc};
use std::thread;
use std::time::Duration;
use window::{self, Window};
use ui;
/// An **App** represents the entire context of your application.
///
/// The **App** owns and manages:
///
/// - the event loop (used to drive the application forward)
/// - all OpenGL windows (for graphics and user input, can be referenced via IDs).
pub struct App {
pub(crate) events_loop: glutin::EventsLoop,
pub(crate) windows: RefCell<HashMap<window::Id, Window>>,
pub(super) exit_on_escape: Cell<bool>,
pub(crate) ui: ui::Arrangement,
loop_mode: Cell<LoopMode>,
/// The `App`'s audio-related API.
pub audio: Audio,
/// The current state of the `Mouse`.
pub mouse: state::Mouse,
/// State of the window currently in focus.
pub window: state::Window,
/// State of the keyboard keys.
///
/// `mods` provides state of each of the modifier keys: `shift`, `ctrl`, `alt`, `logo`.
///
/// `down` is the set of keys that are currently pressed.
///
/// NOTE: `down` this is tracked by the nannou `App` so issues might occur if e.g. a key is
/// pressed while the app is in focus and then released when out of focus. Eventually we should
/// change this to query the OS somehow, but I don't think `winit` provides a way to do this
/// yet.
pub keys: state::Keys,
}
/// An **App**'s audio API.
pub struct Audio {
event_loop: Arc<cpal::EventLoop>,
process_fn_tx: RefCell<Option<mpsc::Sender<audio::stream::ProcessFnMsg>>>,
}
/// A handle to the **App** that can be shared across threads.
///
/// This can be used to "wake up" the **App**'s inner event loop.
pub struct Proxy {
events_loop_proxy: glutin::EventsLoopProxy,
}
/// The mode in which the **App** is currently running the event loop.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum LoopMode {
/// Specifies that the application is continuously looping at a consistent rate.
///
/// An application running in the **Rate** loop mode will behave as follows:
///
/// 1. Poll for and collect all pending user input.
/// `update` is then called with all application events that have occurred.
///
/// 2. `update` is called with an `Event::Update`.
///
/// 3. `draw` is called.
///
/// 4. Check the time and sleep for the remainder of the `update_intervale`.
Rate {
/// The minimum interval between emitted updates.
update_interval: Duration,
},
Wait {
/// The number of `update`s (and in turn `draw`s) that should occur since the application
/// last received a non-`Update` event.
updates_following_event: usize,
/// The minimum interval between emitted updates.
update_interval: Duration,
},
}
fn update_interval(fps: f64) -> Duration {
assert!(fps > 0.0);
const NANOSEC_PER_SEC: f64 = 1_000_000_000.0;
let interval_nanosecs = NANOSEC_PER_SEC / fps;
let secs = (interval_nanosecs / NANOSEC_PER_SEC) as u64;
let nanosecs = (interval_nanosecs % NANOSEC_PER_SEC) as u32;
Duration::new(secs, nanosecs)
}
impl LoopMode {
pub const DEFAULT_RATE_FPS: f64 = 60.0;
pub const DEFAULT_UPDATES_FOLLOWING_EVENT: usize = 3;
/// Specify the **Rate** mode with the given frames-per-second.
pub fn rate_fps(fps: f64) -> Self {
let update_interval = update_interval(fps);
LoopMode::Rate { update_interval }
}
/// Specify the **Wait** mode with the given number of updates following each non-`Update`
/// event.
///
/// Uses the default update interval.
pub fn wait(updates_following_event: usize) -> Self {
let update_interval = update_interval(Self::DEFAULT_RATE_FPS);
LoopMode::Wait {
updates_following_event,
update_interval,
}
}
/// Specify the **Wait** mode with the given number of updates following each non-`Update`
/// event.
///
/// Waits long enough to ensure loop iteration never occurs faster than the given `max_fps`.
pub fn wait_with_max_fps(updates_following_event: usize, max_fps: f64) -> Self {
let update_interval = update_interval(max_fps);
LoopMode::Wait {
updates_following_event,
update_interval,
}
}
/// Specify the **Wait** mode with the given number of updates following each non-`Update`
/// event.
///
/// Waits long enough to ensure loop iteration never occurs faster than the given `max_fps`.
pub fn wait_with_interval(updates_following_event: usize, update_interval: Duration) -> Self {
LoopMode::Wait {
updates_following_event,
update_interval,
}
}
}
impl Default for LoopMode {
fn default() -> Self {
LoopMode::rate_fps(Self::DEFAULT_RATE_FPS)
}
}
impl App {
pub const ASSETS_DIRECTORY_NAME: &'static str = "assets";
pub const DEFAULT_EXIT_ON_ESCAPE: bool = true;
// Create a new `App`.
pub(super) fn new(events_loop: glutin::EventsLoop) -> Self {
let windows = RefCell::new(HashMap::new());
let exit_on_escape = Cell::new(Self::DEFAULT_EXIT_ON_ESCAPE);
let loop_mode = Cell::new(LoopMode::default());
let cpal_event_loop = Arc::new(cpal::EventLoop::new());
let process_fn_tx = RefCell::new(None);
let audio = Audio { event_loop: cpal_event_loop, process_fn_tx };
let ui = ui::Arrangement::new();
let mouse = state::Mouse::new();
let window = state::Window::new();
let keys = state::Keys::default();
App {
events_loop,
windows,
exit_on_escape,
loop_mode,
audio,
ui,
mouse,
window,
keys,
}
}
/// Find and return the absolute path to the project's `assets` directory.
///
/// This method looks for the assets directory in the following order:
///
/// 1. Checks the same directory as the executable.
/// 2. Recursively checks exe's parent directories (to a max depth of 5).
/// 3. Recursively checks exe's children directories (to a max depth of 3).
pub fn assets_path(&self) -> Result<PathBuf, find_folder::Error> {
let exe_path = std::env::current_exe()?;
find_folder::Search::ParentsThenKids(5, 3)
.of(exe_path.parent().expect("executable has no parent directory to search").into())
.for_folder(Self::ASSETS_DIRECTORY_NAME)
}
/// Begin building a new OpenGL window.
pub fn new_window<'a>(&'a self) -> window::Builder<'a, 'static> {
window::Builder::new(self)
}
/// The number of windows currently in the application.
pub fn window_count(&self) -> usize {
self.windows.borrow().len()
}
/// A reference to the window with the given `Id`.
pub fn window(&self, id: window::Id) -> Option<std::cell::Ref<Window>> {
let windows = self.windows.borrow();
if !windows.contains_key(&id) {
None
} else {
Some(std::cell::Ref::map(windows, |ws| &ws[&id]))
}
}
/// Return whether or not the `App` is currently set to exit when the `Escape` key is pressed.
pub fn exit_on_escape(&self) -> bool {
self.exit_on_escape.get()
}
/// Specify whether or not the app should close when the `Escape` key is pressed.
///
/// By default this is `true`.
pub fn set_exit_on_escape(&self, b: bool) {
self.exit_on_escape.set(b);
}
/// Returns the **App**'s current **LoopMode**.
pub fn loop_mode(&self) -> LoopMode {
self.loop_mode.get()
}
/// Sets the loop mode of the **App**.
///
/// Note: Setting the loop mode will not affect anything until the end of the current loop
/// iteration. The behaviour of a single loop iteration is described under each of the
/// **LoopMode** variants.
pub fn set_loop_mode(&self, mode: LoopMode) {
self.loop_mode.set(mode);
}
/// A handle to the **App** that can be shared across threads.
///
/// This can be used to "wake up" the **App**'s inner event loop.
pub fn create_proxy(&self) -> Proxy {
let events_loop_proxy = self.events_loop.create_proxy();
Proxy { events_loop_proxy }
}
/// Create a new `Ui` for the window with the given `Id`.
///
/// Returns `None` if there is no window for the given `window_id`.
pub fn new_ui(&self, window_id: window::Id) -> ui::Builder {
ui::Builder::new(self, window_id)
}
}
impl Audio {
/// Enumerate the available audio devices on the system.
///
/// Produces an iterator yielding `audio::Device`s.
pub fn devices(&self) -> audio::Devices {
let devices = cpal::devices();
audio::Devices { devices }
}
/// Enumerate the available audio devices on the system that support input streams.
///
/// Produces an iterator yielding `audio::Device`s.
pub fn input_devices(&self) -> audio::stream::input::Devices {
let devices = cpal::input_devices();
audio::stream::input::Devices { devices }
}
/// Enumerate the available audio devices on the system that support output streams.
///
/// Produces an iterator yielding `audio::Device`s.
pub fn output_devices(&self) -> audio::stream::output::Devices {
let devices = cpal::output_devices();
audio::stream::output::Devices { devices }
}
/// The current default audio input device.
pub fn default_input_device(&self) -> Option<audio::Device> {
cpal::default_input_device()
.map(|device| audio::Device { device })
}
/// The current default audio output device.
pub fn default_output_device(&self) -> Option<audio::Device> {
cpal::default_output_device()
.map(|device| audio::Device { device })
}
/// Begin building a new input audio stream.
///
/// If this is the first time a stream has been created, this method will spawn the
/// `cpal::EventLoop::run` method on its own thread, ready to run built streams.
pub fn new_input_stream<M, F, S>(&self, model: M, capture: F)
-> audio::stream::input::Builder<M, F, S>
{
audio::stream::input::Builder {
capture,
builder: self.new_stream(model),
}
}
/// Begin building a new output audio stream.
///
/// If this is the first time a stream has been created, this method will spawn the
/// `cpal::EventLoop::run` method on its own thread, ready to run built streams.
pub fn new_output_stream<M, F, S>(&self, model: M, render: F)
-> audio::stream::output::Builder<M, F, S>
{
audio::stream::output::Builder {
render,
builder: self.new_stream(model),
}
}
// Builder initialisation shared between input and output streams.
//
// If this is the first time a stream has been created, this method will spawn the
// `cpal::EventLoop::run` method on its own thread, ready to run built streams.
fn new_stream<M, S>(&self, model: M) -> audio::stream::Builder<M, S> {
let process_fn_tx = if self.process_fn_tx.borrow().is_none() {
let event_loop = self.event_loop.clone();
let (tx, rx) = mpsc::channel();
let mut loop_context = audio::stream::LoopContext::new(rx);
thread::Builder::new()
.name("cpal::EventLoop::run thread".into())
.spawn(move || event_loop.run(move |id, data| loop_context.process(id, data)))
.expect("failed to spawn cpal::EventLoop::run thread");
*self.process_fn_tx.borrow_mut() = Some(tx.clone());
tx
} else {
self.process_fn_tx.borrow().as_ref().unwrap().clone()
};
audio::stream::Builder {
event_loop: self.event_loop.clone(),
process_fn_tx: process_fn_tx,
model,
sample_rate: None,
channels: None,
frames_per_buffer: None,
device: None,
sample_format: PhantomData,
}
}
}
impl Proxy {
/// Wake up the application!
///
/// This wakes up the **App**'s inner event loop and inserts an **Awakened** event.
pub fn wakeup(&self) -> Result<(), glutin::EventsLoopClosed> |
}
| {
self.events_loop_proxy.wakeup()
} | identifier_body |
app.rs | use audio;
use audio::cpal;
use find_folder;
use glium::glutin;
use state;
use std;
use std::cell::{Cell, RefCell};
use std::collections::HashMap;
use std::marker::PhantomData;
use std::path::PathBuf;
use std::sync::{mpsc, Arc};
use std::thread;
use std::time::Duration;
use window::{self, Window};
use ui;
/// An **App** represents the entire context of your application.
///
/// The **App** owns and manages:
///
/// - the event loop (used to drive the application forward)
/// - all OpenGL windows (for graphics and user input, can be referenced via IDs).
pub struct App {
pub(crate) events_loop: glutin::EventsLoop,
pub(crate) windows: RefCell<HashMap<window::Id, Window>>,
pub(super) exit_on_escape: Cell<bool>,
pub(crate) ui: ui::Arrangement,
loop_mode: Cell<LoopMode>,
/// The `App`'s audio-related API.
pub audio: Audio,
/// The current state of the `Mouse`.
pub mouse: state::Mouse,
/// State of the window currently in focus.
pub window: state::Window,
/// State of the keyboard keys.
///
/// `mods` provides state of each of the modifier keys: `shift`, `ctrl`, `alt`, `logo`.
///
/// `down` is the set of keys that are currently pressed.
///
/// NOTE: `down` this is tracked by the nannou `App` so issues might occur if e.g. a key is
/// pressed while the app is in focus and then released when out of focus. Eventually we should
/// change this to query the OS somehow, but I don't think `winit` provides a way to do this
/// yet.
pub keys: state::Keys,
}
/// An **App**'s audio API.
pub struct Audio {
event_loop: Arc<cpal::EventLoop>,
process_fn_tx: RefCell<Option<mpsc::Sender<audio::stream::ProcessFnMsg>>>,
}
/// A handle to the **App** that can be shared across threads.
///
/// This can be used to "wake up" the **App**'s inner event loop.
pub struct Proxy {
events_loop_proxy: glutin::EventsLoopProxy,
}
/// The mode in which the **App** is currently running the event loop.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum LoopMode {
/// Specifies that the application is continuously looping at a consistent rate.
///
/// An application running in the **Rate** loop mode will behave as follows:
///
/// 1. Poll for and collect all pending user input.
/// `update` is then called with all application events that have occurred.
///
/// 2. `update` is called with an `Event::Update`.
///
/// 3. `draw` is called.
///
/// 4. Check the time and sleep for the remainder of the `update_intervale`.
Rate {
/// The minimum interval between emitted updates.
update_interval: Duration,
},
Wait {
/// The number of `update`s (and in turn `draw`s) that should occur since the application
/// last received a non-`Update` event.
updates_following_event: usize,
/// The minimum interval between emitted updates.
update_interval: Duration,
},
}
fn update_interval(fps: f64) -> Duration {
assert!(fps > 0.0);
const NANOSEC_PER_SEC: f64 = 1_000_000_000.0;
let interval_nanosecs = NANOSEC_PER_SEC / fps;
let secs = (interval_nanosecs / NANOSEC_PER_SEC) as u64;
let nanosecs = (interval_nanosecs % NANOSEC_PER_SEC) as u32;
Duration::new(secs, nanosecs)
}
impl LoopMode {
pub const DEFAULT_RATE_FPS: f64 = 60.0;
pub const DEFAULT_UPDATES_FOLLOWING_EVENT: usize = 3;
/// Specify the **Rate** mode with the given frames-per-second.
pub fn rate_fps(fps: f64) -> Self {
let update_interval = update_interval(fps);
LoopMode::Rate { update_interval }
}
/// Specify the **Wait** mode with the given number of updates following each non-`Update`
/// event.
///
/// Uses the default update interval.
pub fn wait(updates_following_event: usize) -> Self {
let update_interval = update_interval(Self::DEFAULT_RATE_FPS);
LoopMode::Wait {
updates_following_event,
update_interval,
}
}
/// Specify the **Wait** mode with the given number of updates following each non-`Update`
/// event.
///
/// Waits long enough to ensure loop iteration never occurs faster than the given `max_fps`.
pub fn wait_with_max_fps(updates_following_event: usize, max_fps: f64) -> Self {
let update_interval = update_interval(max_fps);
LoopMode::Wait {
updates_following_event,
update_interval,
}
}
/// Specify the **Wait** mode with the given number of updates following each non-`Update`
/// event.
///
/// Waits long enough to ensure loop iteration never occurs faster than the given `max_fps`.
pub fn wait_with_interval(updates_following_event: usize, update_interval: Duration) -> Self {
LoopMode::Wait {
updates_following_event,
update_interval,
}
}
}
impl Default for LoopMode {
fn default() -> Self {
LoopMode::rate_fps(Self::DEFAULT_RATE_FPS)
}
}
impl App {
pub const ASSETS_DIRECTORY_NAME: &'static str = "assets";
pub const DEFAULT_EXIT_ON_ESCAPE: bool = true;
// Create a new `App`.
pub(super) fn new(events_loop: glutin::EventsLoop) -> Self {
let windows = RefCell::new(HashMap::new());
let exit_on_escape = Cell::new(Self::DEFAULT_EXIT_ON_ESCAPE);
let loop_mode = Cell::new(LoopMode::default());
let cpal_event_loop = Arc::new(cpal::EventLoop::new());
let process_fn_tx = RefCell::new(None);
let audio = Audio { event_loop: cpal_event_loop, process_fn_tx };
let ui = ui::Arrangement::new();
let mouse = state::Mouse::new();
let window = state::Window::new();
let keys = state::Keys::default();
App {
events_loop,
windows,
exit_on_escape,
loop_mode,
audio,
ui,
mouse,
window,
keys,
}
}
/// Find and return the absolute path to the project's `assets` directory.
///
/// This method looks for the assets directory in the following order:
///
/// 1. Checks the same directory as the executable.
/// 2. Recursively checks exe's parent directories (to a max depth of 5).
/// 3. Recursively checks exe's children directories (to a max depth of 3).
pub fn | (&self) -> Result<PathBuf, find_folder::Error> {
let exe_path = std::env::current_exe()?;
find_folder::Search::ParentsThenKids(5, 3)
.of(exe_path.parent().expect("executable has no parent directory to search").into())
.for_folder(Self::ASSETS_DIRECTORY_NAME)
}
/// Begin building a new OpenGL window.
pub fn new_window<'a>(&'a self) -> window::Builder<'a, 'static> {
window::Builder::new(self)
}
/// The number of windows currently in the application.
pub fn window_count(&self) -> usize {
self.windows.borrow().len()
}
/// A reference to the window with the given `Id`.
pub fn window(&self, id: window::Id) -> Option<std::cell::Ref<Window>> {
let windows = self.windows.borrow();
if !windows.contains_key(&id) {
None
} else {
Some(std::cell::Ref::map(windows, |ws| &ws[&id]))
}
}
/// Return whether or not the `App` is currently set to exit when the `Escape` key is pressed.
pub fn exit_on_escape(&self) -> bool {
self.exit_on_escape.get()
}
/// Specify whether or not the app should close when the `Escape` key is pressed.
///
/// By default this is `true`.
pub fn set_exit_on_escape(&self, b: bool) {
self.exit_on_escape.set(b);
}
/// Returns the **App**'s current **LoopMode**.
pub fn loop_mode(&self) -> LoopMode {
self.loop_mode.get()
}
/// Sets the loop mode of the **App**.
///
/// Note: Setting the loop mode will not affect anything until the end of the current loop
/// iteration. The behaviour of a single loop iteration is described under each of the
/// **LoopMode** variants.
pub fn set_loop_mode(&self, mode: LoopMode) {
self.loop_mode.set(mode);
}
/// A handle to the **App** that can be shared across threads.
///
/// This can be used to "wake up" the **App**'s inner event loop.
pub fn create_proxy(&self) -> Proxy {
let events_loop_proxy = self.events_loop.create_proxy();
Proxy { events_loop_proxy }
}
/// Create a new `Ui` for the window with the given `Id`.
///
/// Returns `None` if there is no window for the given `window_id`.
pub fn new_ui(&self, window_id: window::Id) -> ui::Builder {
ui::Builder::new(self, window_id)
}
}
impl Audio {
/// Enumerate the available audio devices on the system.
///
/// Produces an iterator yielding `audio::Device`s.
pub fn devices(&self) -> audio::Devices {
let devices = cpal::devices();
audio::Devices { devices }
}
/// Enumerate the available audio devices on the system that support input streams.
///
/// Produces an iterator yielding `audio::Device`s.
pub fn input_devices(&self) -> audio::stream::input::Devices {
let devices = cpal::input_devices();
audio::stream::input::Devices { devices }
}
/// Enumerate the available audio devices on the system that support output streams.
///
/// Produces an iterator yielding `audio::Device`s.
pub fn output_devices(&self) -> audio::stream::output::Devices {
let devices = cpal::output_devices();
audio::stream::output::Devices { devices }
}
/// The current default audio input device.
pub fn default_input_device(&self) -> Option<audio::Device> {
cpal::default_input_device()
.map(|device| audio::Device { device })
}
/// The current default audio output device.
pub fn default_output_device(&self) -> Option<audio::Device> {
cpal::default_output_device()
.map(|device| audio::Device { device })
}
/// Begin building a new input audio stream.
///
/// If this is the first time a stream has been created, this method will spawn the
/// `cpal::EventLoop::run` method on its own thread, ready to run built streams.
pub fn new_input_stream<M, F, S>(&self, model: M, capture: F)
-> audio::stream::input::Builder<M, F, S>
{
audio::stream::input::Builder {
capture,
builder: self.new_stream(model),
}
}
/// Begin building a new output audio stream.
///
/// If this is the first time a stream has been created, this method will spawn the
/// `cpal::EventLoop::run` method on its own thread, ready to run built streams.
pub fn new_output_stream<M, F, S>(&self, model: M, render: F)
-> audio::stream::output::Builder<M, F, S>
{
audio::stream::output::Builder {
render,
builder: self.new_stream(model),
}
}
// Builder initialisation shared between input and output streams.
//
// If this is the first time a stream has been created, this method will spawn the
// `cpal::EventLoop::run` method on its own thread, ready to run built streams.
fn new_stream<M, S>(&self, model: M) -> audio::stream::Builder<M, S> {
let process_fn_tx = if self.process_fn_tx.borrow().is_none() {
let event_loop = self.event_loop.clone();
let (tx, rx) = mpsc::channel();
let mut loop_context = audio::stream::LoopContext::new(rx);
thread::Builder::new()
.name("cpal::EventLoop::run thread".into())
.spawn(move || event_loop.run(move |id, data| loop_context.process(id, data)))
.expect("failed to spawn cpal::EventLoop::run thread");
*self.process_fn_tx.borrow_mut() = Some(tx.clone());
tx
} else {
self.process_fn_tx.borrow().as_ref().unwrap().clone()
};
audio::stream::Builder {
event_loop: self.event_loop.clone(),
process_fn_tx: process_fn_tx,
model,
sample_rate: None,
channels: None,
frames_per_buffer: None,
device: None,
sample_format: PhantomData,
}
}
}
impl Proxy {
/// Wake up the application!
///
/// This wakes up the **App**'s inner event loop and inserts an **Awakened** event.
pub fn wakeup(&self) -> Result<(), glutin::EventsLoopClosed> {
self.events_loop_proxy.wakeup()
}
}
| assets_path | identifier_name |
app.rs | use audio;
use audio::cpal;
use find_folder;
use glium::glutin;
use state;
use std;
use std::cell::{Cell, RefCell};
use std::collections::HashMap;
use std::marker::PhantomData;
use std::path::PathBuf;
use std::sync::{mpsc, Arc};
use std::thread;
use std::time::Duration;
use window::{self, Window};
use ui;
/// An **App** represents the entire context of your application.
///
/// The **App** owns and manages:
///
/// - the event loop (used to drive the application forward)
/// - all OpenGL windows (for graphics and user input, can be referenced via IDs).
pub struct App {
pub(crate) events_loop: glutin::EventsLoop,
pub(crate) windows: RefCell<HashMap<window::Id, Window>>,
pub(super) exit_on_escape: Cell<bool>,
pub(crate) ui: ui::Arrangement,
loop_mode: Cell<LoopMode>,
/// The `App`'s audio-related API.
pub audio: Audio,
/// The current state of the `Mouse`.
pub mouse: state::Mouse,
/// State of the window currently in focus.
pub window: state::Window,
/// State of the keyboard keys.
///
/// `mods` provides state of each of the modifier keys: `shift`, `ctrl`, `alt`, `logo`.
///
/// `down` is the set of keys that are currently pressed.
///
/// NOTE: `down` this is tracked by the nannou `App` so issues might occur if e.g. a key is
/// pressed while the app is in focus and then released when out of focus. Eventually we should
/// change this to query the OS somehow, but I don't think `winit` provides a way to do this
/// yet.
pub keys: state::Keys,
}
/// An **App**'s audio API.
pub struct Audio {
event_loop: Arc<cpal::EventLoop>,
process_fn_tx: RefCell<Option<mpsc::Sender<audio::stream::ProcessFnMsg>>>,
}
/// A handle to the **App** that can be shared across threads.
///
/// This can be used to "wake up" the **App**'s inner event loop.
pub struct Proxy {
events_loop_proxy: glutin::EventsLoopProxy,
}
/// The mode in which the **App** is currently running the event loop.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum LoopMode {
/// Specifies that the application is continuously looping at a consistent rate.
///
/// An application running in the **Rate** loop mode will behave as follows:
///
/// 1. Poll for and collect all pending user input.
/// `update` is then called with all application events that have occurred.
///
/// 2. `update` is called with an `Event::Update`.
///
/// 3. `draw` is called.
///
/// 4. Check the time and sleep for the remainder of the `update_intervale`.
Rate {
/// The minimum interval between emitted updates.
update_interval: Duration,
},
Wait {
/// The number of `update`s (and in turn `draw`s) that should occur since the application
/// last received a non-`Update` event.
updates_following_event: usize,
/// The minimum interval between emitted updates.
update_interval: Duration,
},
}
fn update_interval(fps: f64) -> Duration {
assert!(fps > 0.0);
const NANOSEC_PER_SEC: f64 = 1_000_000_000.0;
let interval_nanosecs = NANOSEC_PER_SEC / fps;
let secs = (interval_nanosecs / NANOSEC_PER_SEC) as u64;
let nanosecs = (interval_nanosecs % NANOSEC_PER_SEC) as u32;
Duration::new(secs, nanosecs)
}
impl LoopMode {
pub const DEFAULT_RATE_FPS: f64 = 60.0;
pub const DEFAULT_UPDATES_FOLLOWING_EVENT: usize = 3;
/// Specify the **Rate** mode with the given frames-per-second.
pub fn rate_fps(fps: f64) -> Self {
let update_interval = update_interval(fps);
LoopMode::Rate { update_interval }
}
/// Specify the **Wait** mode with the given number of updates following each non-`Update`
/// event.
///
/// Uses the default update interval.
pub fn wait(updates_following_event: usize) -> Self {
let update_interval = update_interval(Self::DEFAULT_RATE_FPS);
LoopMode::Wait {
updates_following_event,
update_interval,
}
}
/// Specify the **Wait** mode with the given number of updates following each non-`Update`
/// event.
///
/// Waits long enough to ensure loop iteration never occurs faster than the given `max_fps`.
pub fn wait_with_max_fps(updates_following_event: usize, max_fps: f64) -> Self {
let update_interval = update_interval(max_fps);
LoopMode::Wait {
updates_following_event,
update_interval,
}
}
/// Specify the **Wait** mode with the given number of updates following each non-`Update`
/// event.
///
/// Waits long enough to ensure loop iteration never occurs faster than the given `max_fps`.
pub fn wait_with_interval(updates_following_event: usize, update_interval: Duration) -> Self {
LoopMode::Wait {
updates_following_event,
update_interval,
}
}
}
impl Default for LoopMode {
fn default() -> Self {
LoopMode::rate_fps(Self::DEFAULT_RATE_FPS)
}
}
impl App {
pub const ASSETS_DIRECTORY_NAME: &'static str = "assets";
pub const DEFAULT_EXIT_ON_ESCAPE: bool = true;
// Create a new `App`.
pub(super) fn new(events_loop: glutin::EventsLoop) -> Self {
let windows = RefCell::new(HashMap::new());
let exit_on_escape = Cell::new(Self::DEFAULT_EXIT_ON_ESCAPE);
let loop_mode = Cell::new(LoopMode::default());
let cpal_event_loop = Arc::new(cpal::EventLoop::new());
let process_fn_tx = RefCell::new(None);
let audio = Audio { event_loop: cpal_event_loop, process_fn_tx };
let ui = ui::Arrangement::new();
let mouse = state::Mouse::new();
let window = state::Window::new();
let keys = state::Keys::default();
App {
events_loop,
windows,
exit_on_escape,
loop_mode,
audio,
ui,
mouse,
window,
keys,
}
}
/// Find and return the absolute path to the project's `assets` directory.
///
/// This method looks for the assets directory in the following order:
///
/// 1. Checks the same directory as the executable.
/// 2. Recursively checks exe's parent directories (to a max depth of 5).
/// 3. Recursively checks exe's children directories (to a max depth of 3).
pub fn assets_path(&self) -> Result<PathBuf, find_folder::Error> {
let exe_path = std::env::current_exe()?;
find_folder::Search::ParentsThenKids(5, 3)
.of(exe_path.parent().expect("executable has no parent directory to search").into())
.for_folder(Self::ASSETS_DIRECTORY_NAME)
}
/// Begin building a new OpenGL window.
pub fn new_window<'a>(&'a self) -> window::Builder<'a, 'static> {
window::Builder::new(self)
}
/// The number of windows currently in the application.
pub fn window_count(&self) -> usize {
self.windows.borrow().len()
}
/// A reference to the window with the given `Id`.
pub fn window(&self, id: window::Id) -> Option<std::cell::Ref<Window>> {
let windows = self.windows.borrow();
if !windows.contains_key(&id) {
None
} else {
Some(std::cell::Ref::map(windows, |ws| &ws[&id]))
}
}
/// Return whether or not the `App` is currently set to exit when the `Escape` key is pressed.
pub fn exit_on_escape(&self) -> bool {
self.exit_on_escape.get()
}
/// Specify whether or not the app should close when the `Escape` key is pressed.
///
/// By default this is `true`.
pub fn set_exit_on_escape(&self, b: bool) {
self.exit_on_escape.set(b);
}
/// Returns the **App**'s current **LoopMode**.
pub fn loop_mode(&self) -> LoopMode {
self.loop_mode.get()
}
/// Sets the loop mode of the **App**.
///
/// Note: Setting the loop mode will not affect anything until the end of the current loop
/// iteration. The behaviour of a single loop iteration is described under each of the
/// **LoopMode** variants.
pub fn set_loop_mode(&self, mode: LoopMode) {
self.loop_mode.set(mode);
}
/// A handle to the **App** that can be shared across threads.
///
/// This can be used to "wake up" the **App**'s inner event loop.
pub fn create_proxy(&self) -> Proxy {
let events_loop_proxy = self.events_loop.create_proxy();
Proxy { events_loop_proxy }
}
/// Create a new `Ui` for the window with the given `Id`.
///
/// Returns `None` if there is no window for the given `window_id`.
pub fn new_ui(&self, window_id: window::Id) -> ui::Builder {
ui::Builder::new(self, window_id)
}
}
impl Audio {
/// Enumerate the available audio devices on the system.
///
/// Produces an iterator yielding `audio::Device`s.
pub fn devices(&self) -> audio::Devices {
let devices = cpal::devices();
audio::Devices { devices }
}
/// Enumerate the available audio devices on the system that support input streams.
///
/// Produces an iterator yielding `audio::Device`s.
pub fn input_devices(&self) -> audio::stream::input::Devices {
let devices = cpal::input_devices();
audio::stream::input::Devices { devices }
}
/// Enumerate the available audio devices on the system that support output streams.
///
/// Produces an iterator yielding `audio::Device`s.
pub fn output_devices(&self) -> audio::stream::output::Devices {
let devices = cpal::output_devices();
audio::stream::output::Devices { devices }
}
/// The current default audio input device.
pub fn default_input_device(&self) -> Option<audio::Device> {
cpal::default_input_device()
.map(|device| audio::Device { device })
}
/// The current default audio output device.
pub fn default_output_device(&self) -> Option<audio::Device> {
cpal::default_output_device()
.map(|device| audio::Device { device })
}
/// Begin building a new input audio stream.
///
/// If this is the first time a stream has been created, this method will spawn the
/// `cpal::EventLoop::run` method on its own thread, ready to run built streams.
pub fn new_input_stream<M, F, S>(&self, model: M, capture: F)
-> audio::stream::input::Builder<M, F, S>
{
audio::stream::input::Builder {
capture,
builder: self.new_stream(model),
}
}
/// Begin building a new output audio stream.
///
/// If this is the first time a stream has been created, this method will spawn the
/// `cpal::EventLoop::run` method on its own thread, ready to run built streams.
pub fn new_output_stream<M, F, S>(&self, model: M, render: F)
-> audio::stream::output::Builder<M, F, S>
{
audio::stream::output::Builder {
render,
builder: self.new_stream(model),
}
}
// Builder initialisation shared between input and output streams.
//
// If this is the first time a stream has been created, this method will spawn the
// `cpal::EventLoop::run` method on its own thread, ready to run built streams.
fn new_stream<M, S>(&self, model: M) -> audio::stream::Builder<M, S> {
let process_fn_tx = if self.process_fn_tx.borrow().is_none() | else {
self.process_fn_tx.borrow().as_ref().unwrap().clone()
};
audio::stream::Builder {
event_loop: self.event_loop.clone(),
process_fn_tx: process_fn_tx,
model,
sample_rate: None,
channels: None,
frames_per_buffer: None,
device: None,
sample_format: PhantomData,
}
}
}
impl Proxy {
/// Wake up the application!
///
/// This wakes up the **App**'s inner event loop and inserts an **Awakened** event.
pub fn wakeup(&self) -> Result<(), glutin::EventsLoopClosed> {
self.events_loop_proxy.wakeup()
}
}
| {
let event_loop = self.event_loop.clone();
let (tx, rx) = mpsc::channel();
let mut loop_context = audio::stream::LoopContext::new(rx);
thread::Builder::new()
.name("cpal::EventLoop::run thread".into())
.spawn(move || event_loop.run(move |id, data| loop_context.process(id, data)))
.expect("failed to spawn cpal::EventLoop::run thread");
*self.process_fn_tx.borrow_mut() = Some(tx.clone());
tx
} | conditional_block |
elns.py | import json
from pathlib import Path
import ipywidgets as ipw
import requests_cache
import traitlets as tl
from aiida import orm
from aiidalab_eln import get_eln_connector
from IPython.display import clear_output, display
ELN_CONFIG = Path.home() / ".aiidalab" / "aiidalab-eln-config.json"
ELN_CONFIG.parent.mkdir(
parents=True, exist_ok=True
) # making sure that the folder exists.
def connect_to_eln(eln_instance=None, **kwargs):
# assuming that the connection can only be established to the ELNs
# with the stored configuration.
try:
with open(ELN_CONFIG) as file:
config = json.load(file)
except (FileNotFoundError, json.JSONDecodeError, KeyError):
return (
None,
f"Can't open '{ELN_CONFIG}' (ELN configuration file). Instance: {eln_instance}",
)
# If no ELN instance was specified, trying the default one.
if not eln_instance:
eln_instance = config.pop("default", None)
if eln_instance: # The ELN instance could be identified.
if eln_instance in config:
eln_config = config[eln_instance]
eln_type = eln_config.pop("eln_type", None)
else: # The selected instance is not present in the config.
return None, f"Didn't find configuration for the '{eln_instance}' instance."
# If the ELN type cannot be identified - aborting.
if not eln_type:
return None, f"Can't identify the type of {eln_instance} ELN."
# Everything is alright, can populate the ELN connector
# with the required info.
try:
eln = get_eln_connector(eln_type)(
eln_instance=eln_instance, **eln_config, **kwargs
)
except NotImplementedError as err:
return None, str(err)
eln.connect()
return eln, None
return (
None,
"No ELN instance was provided, the default ELN instance is not configured either. Set a default ELN or select an ELN instance.",
)
class ElnImportWidget(ipw.VBox):
node = tl.Instance(orm.Node, allow_none=True)
def __init__(self, path_to_root="../", **kwargs):
# Used to output additional settings.
self._output = ipw.Output()
# Communicate to the user if something isn't right.
error_message = ipw.HTML()
super().__init__(children=[error_message], **kwargs)
eln, msg = connect_to_eln(**kwargs)
if eln is None:
url = f"{path_to_root}aiidalab-widgets-base/notebooks/eln_configure.ipynb"
error_message.value = f"""Warning! The access to ELN is not configured. Please follow <a href="{url}" target="_blank">the link</a> to configure it.</br> More details: {msg}"""
return
tl.dlink((eln, "node"), (self, "node"))
with requests_cache.disabled():
# Since the cache is enabled in AiiDAlab, we disable it here to get correct results.
eln.import_data()
class ElnExportWidget(ipw.VBox):
node = tl.Instance(orm.Node, allow_none=True)
def __init__(self, path_to_root="../", **kwargs):
self.path_to_root = path_to_root
# Send to ELN button.
send_button = ipw.Button(description="Send to ELN")
send_button.on_click(self.send_to_eln)
# Use non-default destination.
self.modify_settings = ipw.Checkbox(
description="Update destination.", indent=False
)
self.modify_settings.observe(self.handle_output, "value")
# Used to output additional settings.
self._output = ipw.Output()
# Communicate to the user if something isn't right.
self.message = ipw.HTML()
children = [
ipw.HBox([send_button, self.modify_settings]),
self._output,
self.message, | else:
self.modify_settings.disabled = True
send_button.disabled = True
self.message.value = f"""Warning! The access to ELN is not configured. Please follow <a href="{self.path_to_root}/aiidalab-widgets-base/notebooks/eln_configure.ipynb" target="_blank">the link</a> to configure it.</br> </br> More details: {msg}"""
super().__init__(children=children, **kwargs)
@tl.observe("node")
def _observe_node(self, _=None):
if self.node is None or self.eln is None:
return
if "eln" in self.node.extras:
info = self.node.extras["eln"]
else:
try:
q = orm.QueryBuilder().append(
orm.Node,
filters={"extras": {"has_key": "eln"}},
tag="source_node",
project="extras.eln",
)
q.append(
orm.Node,
filters={"uuid": self.node.uuid},
with_ancestors="source_node",
)
info = q.all(flat=True)[0]
except IndexError:
info = {}
self.eln.set_sample_config(**info)
def send_to_eln(self, _=None):
if self.eln and self.eln.is_connected:
self.message.value = f"\u29D7 Sending data to {self.eln.eln_instance}..."
with requests_cache.disabled():
# Since the cache is enabled in AiiDAlab, we disable it here to get correct results.
self.eln.export_data()
self.message.value = (
f"\u2705 The data were successfully sent to {self.eln.eln_instance}."
)
else:
self.message.value = f"""\u274C Something isn't right! We were not able to send the data to the "<strong>{self.eln.eln_instance}</strong>" ELN instance. Please follow <a href="{self.path_to_root}/aiidalab-widgets-base/notebooks/eln_configure.ipynb" target="_blank">the link</a> to update the ELN's configuration."""
def handle_output(self, _=None):
with self._output:
clear_output()
if self.modify_settings.value:
display(
ipw.HTML(
f"""Currently used ELN is: "<strong>{self.eln.eln_instance}</strong>". To change it, please follow <a href="{self.path_to_root}/aiidalab-widgets-base/notebooks/eln_configure.ipynb" target="_blank">the link</a>."""
)
)
display(self.eln.sample_config_editor())
class ElnConfigureWidget(ipw.VBox):
def __init__(self, **kwargs):
self._output = ipw.Output()
self.eln = None
self.eln_instance = ipw.Dropdown(
description="ELN:",
options=("Set up new ELN", {}),
style={"description_width": "initial"},
)
self.update_list_of_elns()
self.eln_instance.observe(self.display_eln_config, names=["value", "options"])
self.eln_types = ipw.Dropdown(
description="ELN type:",
options=["cheminfo", "openbis"],
value="cheminfo",
style={"description_width": "initial"},
)
self.eln_types.observe(self.display_eln_config, names=["value", "options"])
# Buttons.
# Make current ELN the default.
default_button = ipw.Button(description="Set as default", button_style="info")
default_button.on_click(self.set_current_eln_as_default)
# Save current ELN configuration.
save_config = ipw.Button(
description="Save configuration", button_style="success"
)
save_config.on_click(self.save_eln_configuration)
# Erase current ELN from the configuration.
erase_config = ipw.Button(
description="Erase configuration", button_style="danger"
)
erase_config.on_click(self.erase_current_eln_from_configuration)
# Check if connection to the current ELN can be established.
check_connection = ipw.Button(
description="Check connection", button_style="warning"
)
check_connection.on_click(self.check_connection)
self.my_output = ipw.HTML()
self.display_eln_config()
super().__init__(
children=[
self.eln_instance,
self.eln_types,
self._output,
ipw.HBox([default_button, save_config, erase_config, check_connection]),
self.my_output,
],
**kwargs,
)
def write_to_config(self, config):
with open(ELN_CONFIG, "w") as file:
json.dump(config, file, indent=4)
def get_config(self):
try:
with open(ELN_CONFIG) as file:
return json.load(file)
except (FileNotFoundError, json.JSONDecodeError, KeyError):
return {}
def update_list_of_elns(self):
config = self.get_config()
default_eln = config.pop("default", None)
if (
default_eln not in config
): # Erase the default ELN if it is not present in the config
self.write_to_config(config)
default_eln = None
self.eln_instance.options = [("Setup new ELN", {})] + [
(k, v) for k, v in config.items()
]
if default_eln:
self.eln_instance.label = default_eln
def set_current_eln_as_default(self, _=None):
self.update_eln_configuration("default", self.eln_instance.label)
def update_eln_configuration(self, eln_instance, eln_config):
config = self.get_config()
config[eln_instance] = eln_config
self.write_to_config(config)
def erase_current_eln_from_configuration(self, _=None):
config = self.get_config()
config.pop(self.eln_instance.label, None)
self.write_to_config(config)
self.update_list_of_elns()
def check_connection(self, _=None):
if self.eln:
err_message = self.eln.connect()
if self.eln.is_connected:
self.my_output.value = "\u2705 Connected."
return
self.my_output.value = f"\u274C Not connected. {err_message}"
def display_eln_config(self, value=None):
"""Display ELN configuration specific to the selected type of ELN."""
try:
eln_class = get_eln_connector(self.eln_types.value)
except NotImplementedError as err:
with self._output:
clear_output()
display(ipw.HTML("❌" + str(err)))
return
self.eln = eln_class(
eln_instance=self.eln_instance.label if self.eln_instance.value else "",
**self.eln_instance.value,
)
if self.eln_instance.value:
self.eln_types.value = self.eln.eln_type
self.eln_types.disabled = True
else:
self.eln_types.disabled = False
with self._output:
clear_output()
display(self.eln)
def save_eln_configuration(self, _=None):
config = self.eln.get_config()
eln_instance = config.pop("eln_instance")
if eln_instance:
self.update_eln_configuration(eln_instance, config)
self.update_list_of_elns() | ]
self.eln, msg = connect_to_eln()
if self.eln:
tl.dlink((self, "node"), (self.eln, "node")) | random_line_split |
elns.py | import json
from pathlib import Path
import ipywidgets as ipw
import requests_cache
import traitlets as tl
from aiida import orm
from aiidalab_eln import get_eln_connector
from IPython.display import clear_output, display
ELN_CONFIG = Path.home() / ".aiidalab" / "aiidalab-eln-config.json"
ELN_CONFIG.parent.mkdir(
parents=True, exist_ok=True
) # making sure that the folder exists.
def connect_to_eln(eln_instance=None, **kwargs):
# assuming that the connection can only be established to the ELNs
# with the stored configuration.
try:
with open(ELN_CONFIG) as file:
config = json.load(file)
except (FileNotFoundError, json.JSONDecodeError, KeyError):
return (
None,
f"Can't open '{ELN_CONFIG}' (ELN configuration file). Instance: {eln_instance}",
)
# If no ELN instance was specified, trying the default one.
if not eln_instance:
eln_instance = config.pop("default", None)
if eln_instance: # The ELN instance could be identified.
|
return (
None,
"No ELN instance was provided, the default ELN instance is not configured either. Set a default ELN or select an ELN instance.",
)
class ElnImportWidget(ipw.VBox):
node = tl.Instance(orm.Node, allow_none=True)
def __init__(self, path_to_root="../", **kwargs):
# Used to output additional settings.
self._output = ipw.Output()
# Communicate to the user if something isn't right.
error_message = ipw.HTML()
super().__init__(children=[error_message], **kwargs)
eln, msg = connect_to_eln(**kwargs)
if eln is None:
url = f"{path_to_root}aiidalab-widgets-base/notebooks/eln_configure.ipynb"
error_message.value = f"""Warning! The access to ELN is not configured. Please follow <a href="{url}" target="_blank">the link</a> to configure it.</br> More details: {msg}"""
return
tl.dlink((eln, "node"), (self, "node"))
with requests_cache.disabled():
# Since the cache is enabled in AiiDAlab, we disable it here to get correct results.
eln.import_data()
class ElnExportWidget(ipw.VBox):
node = tl.Instance(orm.Node, allow_none=True)
def __init__(self, path_to_root="../", **kwargs):
self.path_to_root = path_to_root
# Send to ELN button.
send_button = ipw.Button(description="Send to ELN")
send_button.on_click(self.send_to_eln)
# Use non-default destination.
self.modify_settings = ipw.Checkbox(
description="Update destination.", indent=False
)
self.modify_settings.observe(self.handle_output, "value")
# Used to output additional settings.
self._output = ipw.Output()
# Communicate to the user if something isn't right.
self.message = ipw.HTML()
children = [
ipw.HBox([send_button, self.modify_settings]),
self._output,
self.message,
]
self.eln, msg = connect_to_eln()
if self.eln:
tl.dlink((self, "node"), (self.eln, "node"))
else:
self.modify_settings.disabled = True
send_button.disabled = True
self.message.value = f"""Warning! The access to ELN is not configured. Please follow <a href="{self.path_to_root}/aiidalab-widgets-base/notebooks/eln_configure.ipynb" target="_blank">the link</a> to configure it.</br> </br> More details: {msg}"""
super().__init__(children=children, **kwargs)
@tl.observe("node")
def _observe_node(self, _=None):
if self.node is None or self.eln is None:
return
if "eln" in self.node.extras:
info = self.node.extras["eln"]
else:
try:
q = orm.QueryBuilder().append(
orm.Node,
filters={"extras": {"has_key": "eln"}},
tag="source_node",
project="extras.eln",
)
q.append(
orm.Node,
filters={"uuid": self.node.uuid},
with_ancestors="source_node",
)
info = q.all(flat=True)[0]
except IndexError:
info = {}
self.eln.set_sample_config(**info)
def send_to_eln(self, _=None):
if self.eln and self.eln.is_connected:
self.message.value = f"\u29D7 Sending data to {self.eln.eln_instance}..."
with requests_cache.disabled():
# Since the cache is enabled in AiiDAlab, we disable it here to get correct results.
self.eln.export_data()
self.message.value = (
f"\u2705 The data were successfully sent to {self.eln.eln_instance}."
)
else:
self.message.value = f"""\u274C Something isn't right! We were not able to send the data to the "<strong>{self.eln.eln_instance}</strong>" ELN instance. Please follow <a href="{self.path_to_root}/aiidalab-widgets-base/notebooks/eln_configure.ipynb" target="_blank">the link</a> to update the ELN's configuration."""
def handle_output(self, _=None):
with self._output:
clear_output()
if self.modify_settings.value:
display(
ipw.HTML(
f"""Currently used ELN is: "<strong>{self.eln.eln_instance}</strong>". To change it, please follow <a href="{self.path_to_root}/aiidalab-widgets-base/notebooks/eln_configure.ipynb" target="_blank">the link</a>."""
)
)
display(self.eln.sample_config_editor())
class ElnConfigureWidget(ipw.VBox):
def __init__(self, **kwargs):
self._output = ipw.Output()
self.eln = None
self.eln_instance = ipw.Dropdown(
description="ELN:",
options=("Set up new ELN", {}),
style={"description_width": "initial"},
)
self.update_list_of_elns()
self.eln_instance.observe(self.display_eln_config, names=["value", "options"])
self.eln_types = ipw.Dropdown(
description="ELN type:",
options=["cheminfo", "openbis"],
value="cheminfo",
style={"description_width": "initial"},
)
self.eln_types.observe(self.display_eln_config, names=["value", "options"])
# Buttons.
# Make current ELN the default.
default_button = ipw.Button(description="Set as default", button_style="info")
default_button.on_click(self.set_current_eln_as_default)
# Save current ELN configuration.
save_config = ipw.Button(
description="Save configuration", button_style="success"
)
save_config.on_click(self.save_eln_configuration)
# Erase current ELN from the configuration.
erase_config = ipw.Button(
description="Erase configuration", button_style="danger"
)
erase_config.on_click(self.erase_current_eln_from_configuration)
# Check if connection to the current ELN can be established.
check_connection = ipw.Button(
description="Check connection", button_style="warning"
)
check_connection.on_click(self.check_connection)
self.my_output = ipw.HTML()
self.display_eln_config()
super().__init__(
children=[
self.eln_instance,
self.eln_types,
self._output,
ipw.HBox([default_button, save_config, erase_config, check_connection]),
self.my_output,
],
**kwargs,
)
def write_to_config(self, config):
with open(ELN_CONFIG, "w") as file:
json.dump(config, file, indent=4)
def get_config(self):
try:
with open(ELN_CONFIG) as file:
return json.load(file)
except (FileNotFoundError, json.JSONDecodeError, KeyError):
return {}
def update_list_of_elns(self):
config = self.get_config()
default_eln = config.pop("default", None)
if (
default_eln not in config
): # Erase the default ELN if it is not present in the config
self.write_to_config(config)
default_eln = None
self.eln_instance.options = [("Setup new ELN", {})] + [
(k, v) for k, v in config.items()
]
if default_eln:
self.eln_instance.label = default_eln
def set_current_eln_as_default(self, _=None):
self.update_eln_configuration("default", self.eln_instance.label)
def update_eln_configuration(self, eln_instance, eln_config):
config = self.get_config()
config[eln_instance] = eln_config
self.write_to_config(config)
def erase_current_eln_from_configuration(self, _=None):
config = self.get_config()
config.pop(self.eln_instance.label, None)
self.write_to_config(config)
self.update_list_of_elns()
def check_connection(self, _=None):
if self.eln:
err_message = self.eln.connect()
if self.eln.is_connected:
self.my_output.value = "\u2705 Connected."
return
self.my_output.value = f"\u274C Not connected. {err_message}"
def display_eln_config(self, value=None):
"""Display ELN configuration specific to the selected type of ELN."""
try:
eln_class = get_eln_connector(self.eln_types.value)
except NotImplementedError as err:
with self._output:
clear_output()
display(ipw.HTML("❌" + str(err)))
return
self.eln = eln_class(
eln_instance=self.eln_instance.label if self.eln_instance.value else "",
**self.eln_instance.value,
)
if self.eln_instance.value:
self.eln_types.value = self.eln.eln_type
self.eln_types.disabled = True
else:
self.eln_types.disabled = False
with self._output:
clear_output()
display(self.eln)
def save_eln_configuration(self, _=None):
config = self.eln.get_config()
eln_instance = config.pop("eln_instance")
if eln_instance:
self.update_eln_configuration(eln_instance, config)
self.update_list_of_elns()
| if eln_instance in config:
eln_config = config[eln_instance]
eln_type = eln_config.pop("eln_type", None)
else: # The selected instance is not present in the config.
return None, f"Didn't find configuration for the '{eln_instance}' instance."
# If the ELN type cannot be identified - aborting.
if not eln_type:
return None, f"Can't identify the type of {eln_instance} ELN."
# Everything is alright, can populate the ELN connector
# with the required info.
try:
eln = get_eln_connector(eln_type)(
eln_instance=eln_instance, **eln_config, **kwargs
)
except NotImplementedError as err:
return None, str(err)
eln.connect()
return eln, None | conditional_block |
elns.py | import json
from pathlib import Path
import ipywidgets as ipw
import requests_cache
import traitlets as tl
from aiida import orm
from aiidalab_eln import get_eln_connector
from IPython.display import clear_output, display
ELN_CONFIG = Path.home() / ".aiidalab" / "aiidalab-eln-config.json"
ELN_CONFIG.parent.mkdir(
parents=True, exist_ok=True
) # making sure that the folder exists.
def connect_to_eln(eln_instance=None, **kwargs):
# assuming that the connection can only be established to the ELNs
# with the stored configuration.
try:
with open(ELN_CONFIG) as file:
config = json.load(file)
except (FileNotFoundError, json.JSONDecodeError, KeyError):
return (
None,
f"Can't open '{ELN_CONFIG}' (ELN configuration file). Instance: {eln_instance}",
)
# If no ELN instance was specified, trying the default one.
if not eln_instance:
eln_instance = config.pop("default", None)
if eln_instance: # The ELN instance could be identified.
if eln_instance in config:
eln_config = config[eln_instance]
eln_type = eln_config.pop("eln_type", None)
else: # The selected instance is not present in the config.
return None, f"Didn't find configuration for the '{eln_instance}' instance."
# If the ELN type cannot be identified - aborting.
if not eln_type:
return None, f"Can't identify the type of {eln_instance} ELN."
# Everything is alright, can populate the ELN connector
# with the required info.
try:
eln = get_eln_connector(eln_type)(
eln_instance=eln_instance, **eln_config, **kwargs
)
except NotImplementedError as err:
return None, str(err)
eln.connect()
return eln, None
return (
None,
"No ELN instance was provided, the default ELN instance is not configured either. Set a default ELN or select an ELN instance.",
)
class ElnImportWidget(ipw.VBox):
node = tl.Instance(orm.Node, allow_none=True)
def __init__(self, path_to_root="../", **kwargs):
# Used to output additional settings.
self._output = ipw.Output()
# Communicate to the user if something isn't right.
error_message = ipw.HTML()
super().__init__(children=[error_message], **kwargs)
eln, msg = connect_to_eln(**kwargs)
if eln is None:
url = f"{path_to_root}aiidalab-widgets-base/notebooks/eln_configure.ipynb"
error_message.value = f"""Warning! The access to ELN is not configured. Please follow <a href="{url}" target="_blank">the link</a> to configure it.</br> More details: {msg}"""
return
tl.dlink((eln, "node"), (self, "node"))
with requests_cache.disabled():
# Since the cache is enabled in AiiDAlab, we disable it here to get correct results.
eln.import_data()
class ElnExportWidget(ipw.VBox):
node = tl.Instance(orm.Node, allow_none=True)
def __init__(self, path_to_root="../", **kwargs):
self.path_to_root = path_to_root
# Send to ELN button.
send_button = ipw.Button(description="Send to ELN")
send_button.on_click(self.send_to_eln)
# Use non-default destination.
self.modify_settings = ipw.Checkbox(
description="Update destination.", indent=False
)
self.modify_settings.observe(self.handle_output, "value")
# Used to output additional settings.
self._output = ipw.Output()
# Communicate to the user if something isn't right.
self.message = ipw.HTML()
children = [
ipw.HBox([send_button, self.modify_settings]),
self._output,
self.message,
]
self.eln, msg = connect_to_eln()
if self.eln:
tl.dlink((self, "node"), (self.eln, "node"))
else:
self.modify_settings.disabled = True
send_button.disabled = True
self.message.value = f"""Warning! The access to ELN is not configured. Please follow <a href="{self.path_to_root}/aiidalab-widgets-base/notebooks/eln_configure.ipynb" target="_blank">the link</a> to configure it.</br> </br> More details: {msg}"""
super().__init__(children=children, **kwargs)
@tl.observe("node")
def _observe_node(self, _=None):
if self.node is None or self.eln is None:
return
if "eln" in self.node.extras:
info = self.node.extras["eln"]
else:
try:
q = orm.QueryBuilder().append(
orm.Node,
filters={"extras": {"has_key": "eln"}},
tag="source_node",
project="extras.eln",
)
q.append(
orm.Node,
filters={"uuid": self.node.uuid},
with_ancestors="source_node",
)
info = q.all(flat=True)[0]
except IndexError:
info = {}
self.eln.set_sample_config(**info)
def send_to_eln(self, _=None):
if self.eln and self.eln.is_connected:
self.message.value = f"\u29D7 Sending data to {self.eln.eln_instance}..."
with requests_cache.disabled():
# Since the cache is enabled in AiiDAlab, we disable it here to get correct results.
self.eln.export_data()
self.message.value = (
f"\u2705 The data were successfully sent to {self.eln.eln_instance}."
)
else:
self.message.value = f"""\u274C Something isn't right! We were not able to send the data to the "<strong>{self.eln.eln_instance}</strong>" ELN instance. Please follow <a href="{self.path_to_root}/aiidalab-widgets-base/notebooks/eln_configure.ipynb" target="_blank">the link</a> to update the ELN's configuration."""
def handle_output(self, _=None):
with self._output:
clear_output()
if self.modify_settings.value:
display(
ipw.HTML(
f"""Currently used ELN is: "<strong>{self.eln.eln_instance}</strong>". To change it, please follow <a href="{self.path_to_root}/aiidalab-widgets-base/notebooks/eln_configure.ipynb" target="_blank">the link</a>."""
)
)
display(self.eln.sample_config_editor())
class ElnConfigureWidget(ipw.VBox):
def __init__(self, **kwargs):
self._output = ipw.Output()
self.eln = None
self.eln_instance = ipw.Dropdown(
description="ELN:",
options=("Set up new ELN", {}),
style={"description_width": "initial"},
)
self.update_list_of_elns()
self.eln_instance.observe(self.display_eln_config, names=["value", "options"])
self.eln_types = ipw.Dropdown(
description="ELN type:",
options=["cheminfo", "openbis"],
value="cheminfo",
style={"description_width": "initial"},
)
self.eln_types.observe(self.display_eln_config, names=["value", "options"])
# Buttons.
# Make current ELN the default.
default_button = ipw.Button(description="Set as default", button_style="info")
default_button.on_click(self.set_current_eln_as_default)
# Save current ELN configuration.
save_config = ipw.Button(
description="Save configuration", button_style="success"
)
save_config.on_click(self.save_eln_configuration)
# Erase current ELN from the configuration.
erase_config = ipw.Button(
description="Erase configuration", button_style="danger"
)
erase_config.on_click(self.erase_current_eln_from_configuration)
# Check if connection to the current ELN can be established.
check_connection = ipw.Button(
description="Check connection", button_style="warning"
)
check_connection.on_click(self.check_connection)
self.my_output = ipw.HTML()
self.display_eln_config()
super().__init__(
children=[
self.eln_instance,
self.eln_types,
self._output,
ipw.HBox([default_button, save_config, erase_config, check_connection]),
self.my_output,
],
**kwargs,
)
def write_to_config(self, config):
with open(ELN_CONFIG, "w") as file:
json.dump(config, file, indent=4)
def get_config(self):
try:
with open(ELN_CONFIG) as file:
return json.load(file)
except (FileNotFoundError, json.JSONDecodeError, KeyError):
return {}
def | (self):
config = self.get_config()
default_eln = config.pop("default", None)
if (
default_eln not in config
): # Erase the default ELN if it is not present in the config
self.write_to_config(config)
default_eln = None
self.eln_instance.options = [("Setup new ELN", {})] + [
(k, v) for k, v in config.items()
]
if default_eln:
self.eln_instance.label = default_eln
def set_current_eln_as_default(self, _=None):
self.update_eln_configuration("default", self.eln_instance.label)
def update_eln_configuration(self, eln_instance, eln_config):
config = self.get_config()
config[eln_instance] = eln_config
self.write_to_config(config)
def erase_current_eln_from_configuration(self, _=None):
config = self.get_config()
config.pop(self.eln_instance.label, None)
self.write_to_config(config)
self.update_list_of_elns()
def check_connection(self, _=None):
if self.eln:
err_message = self.eln.connect()
if self.eln.is_connected:
self.my_output.value = "\u2705 Connected."
return
self.my_output.value = f"\u274C Not connected. {err_message}"
def display_eln_config(self, value=None):
"""Display ELN configuration specific to the selected type of ELN."""
try:
eln_class = get_eln_connector(self.eln_types.value)
except NotImplementedError as err:
with self._output:
clear_output()
display(ipw.HTML("❌" + str(err)))
return
self.eln = eln_class(
eln_instance=self.eln_instance.label if self.eln_instance.value else "",
**self.eln_instance.value,
)
if self.eln_instance.value:
self.eln_types.value = self.eln.eln_type
self.eln_types.disabled = True
else:
self.eln_types.disabled = False
with self._output:
clear_output()
display(self.eln)
def save_eln_configuration(self, _=None):
config = self.eln.get_config()
eln_instance = config.pop("eln_instance")
if eln_instance:
self.update_eln_configuration(eln_instance, config)
self.update_list_of_elns()
| update_list_of_elns | identifier_name |
elns.py | import json
from pathlib import Path
import ipywidgets as ipw
import requests_cache
import traitlets as tl
from aiida import orm
from aiidalab_eln import get_eln_connector
from IPython.display import clear_output, display
ELN_CONFIG = Path.home() / ".aiidalab" / "aiidalab-eln-config.json"
ELN_CONFIG.parent.mkdir(
parents=True, exist_ok=True
) # making sure that the folder exists.
def connect_to_eln(eln_instance=None, **kwargs):
# assuming that the connection can only be established to the ELNs
# with the stored configuration.
try:
with open(ELN_CONFIG) as file:
config = json.load(file)
except (FileNotFoundError, json.JSONDecodeError, KeyError):
return (
None,
f"Can't open '{ELN_CONFIG}' (ELN configuration file). Instance: {eln_instance}",
)
# If no ELN instance was specified, trying the default one.
if not eln_instance:
eln_instance = config.pop("default", None)
if eln_instance: # The ELN instance could be identified.
if eln_instance in config:
eln_config = config[eln_instance]
eln_type = eln_config.pop("eln_type", None)
else: # The selected instance is not present in the config.
return None, f"Didn't find configuration for the '{eln_instance}' instance."
# If the ELN type cannot be identified - aborting.
if not eln_type:
return None, f"Can't identify the type of {eln_instance} ELN."
# Everything is alright, can populate the ELN connector
# with the required info.
try:
eln = get_eln_connector(eln_type)(
eln_instance=eln_instance, **eln_config, **kwargs
)
except NotImplementedError as err:
return None, str(err)
eln.connect()
return eln, None
return (
None,
"No ELN instance was provided, the default ELN instance is not configured either. Set a default ELN or select an ELN instance.",
)
class ElnImportWidget(ipw.VBox):
node = tl.Instance(orm.Node, allow_none=True)
def __init__(self, path_to_root="../", **kwargs):
# Used to output additional settings.
self._output = ipw.Output()
# Communicate to the user if something isn't right.
error_message = ipw.HTML()
super().__init__(children=[error_message], **kwargs)
eln, msg = connect_to_eln(**kwargs)
if eln is None:
url = f"{path_to_root}aiidalab-widgets-base/notebooks/eln_configure.ipynb"
error_message.value = f"""Warning! The access to ELN is not configured. Please follow <a href="{url}" target="_blank">the link</a> to configure it.</br> More details: {msg}"""
return
tl.dlink((eln, "node"), (self, "node"))
with requests_cache.disabled():
# Since the cache is enabled in AiiDAlab, we disable it here to get correct results.
eln.import_data()
class ElnExportWidget(ipw.VBox):
node = tl.Instance(orm.Node, allow_none=True)
def __init__(self, path_to_root="../", **kwargs):
self.path_to_root = path_to_root
# Send to ELN button.
send_button = ipw.Button(description="Send to ELN")
send_button.on_click(self.send_to_eln)
# Use non-default destination.
self.modify_settings = ipw.Checkbox(
description="Update destination.", indent=False
)
self.modify_settings.observe(self.handle_output, "value")
# Used to output additional settings.
self._output = ipw.Output()
# Communicate to the user if something isn't right.
self.message = ipw.HTML()
children = [
ipw.HBox([send_button, self.modify_settings]),
self._output,
self.message,
]
self.eln, msg = connect_to_eln()
if self.eln:
tl.dlink((self, "node"), (self.eln, "node"))
else:
self.modify_settings.disabled = True
send_button.disabled = True
self.message.value = f"""Warning! The access to ELN is not configured. Please follow <a href="{self.path_to_root}/aiidalab-widgets-base/notebooks/eln_configure.ipynb" target="_blank">the link</a> to configure it.</br> </br> More details: {msg}"""
super().__init__(children=children, **kwargs)
@tl.observe("node")
def _observe_node(self, _=None):
if self.node is None or self.eln is None:
return
if "eln" in self.node.extras:
info = self.node.extras["eln"]
else:
try:
q = orm.QueryBuilder().append(
orm.Node,
filters={"extras": {"has_key": "eln"}},
tag="source_node",
project="extras.eln",
)
q.append(
orm.Node,
filters={"uuid": self.node.uuid},
with_ancestors="source_node",
)
info = q.all(flat=True)[0]
except IndexError:
info = {}
self.eln.set_sample_config(**info)
def send_to_eln(self, _=None):
if self.eln and self.eln.is_connected:
self.message.value = f"\u29D7 Sending data to {self.eln.eln_instance}..."
with requests_cache.disabled():
# Since the cache is enabled in AiiDAlab, we disable it here to get correct results.
self.eln.export_data()
self.message.value = (
f"\u2705 The data were successfully sent to {self.eln.eln_instance}."
)
else:
self.message.value = f"""\u274C Something isn't right! We were not able to send the data to the "<strong>{self.eln.eln_instance}</strong>" ELN instance. Please follow <a href="{self.path_to_root}/aiidalab-widgets-base/notebooks/eln_configure.ipynb" target="_blank">the link</a> to update the ELN's configuration."""
def handle_output(self, _=None):
with self._output:
clear_output()
if self.modify_settings.value:
display(
ipw.HTML(
f"""Currently used ELN is: "<strong>{self.eln.eln_instance}</strong>". To change it, please follow <a href="{self.path_to_root}/aiidalab-widgets-base/notebooks/eln_configure.ipynb" target="_blank">the link</a>."""
)
)
display(self.eln.sample_config_editor())
class ElnConfigureWidget(ipw.VBox):
def __init__(self, **kwargs):
self._output = ipw.Output()
self.eln = None
self.eln_instance = ipw.Dropdown(
description="ELN:",
options=("Set up new ELN", {}),
style={"description_width": "initial"},
)
self.update_list_of_elns()
self.eln_instance.observe(self.display_eln_config, names=["value", "options"])
self.eln_types = ipw.Dropdown(
description="ELN type:",
options=["cheminfo", "openbis"],
value="cheminfo",
style={"description_width": "initial"},
)
self.eln_types.observe(self.display_eln_config, names=["value", "options"])
# Buttons.
# Make current ELN the default.
default_button = ipw.Button(description="Set as default", button_style="info")
default_button.on_click(self.set_current_eln_as_default)
# Save current ELN configuration.
save_config = ipw.Button(
description="Save configuration", button_style="success"
)
save_config.on_click(self.save_eln_configuration)
# Erase current ELN from the configuration.
erase_config = ipw.Button(
description="Erase configuration", button_style="danger"
)
erase_config.on_click(self.erase_current_eln_from_configuration)
# Check if connection to the current ELN can be established.
check_connection = ipw.Button(
description="Check connection", button_style="warning"
)
check_connection.on_click(self.check_connection)
self.my_output = ipw.HTML()
self.display_eln_config()
super().__init__(
children=[
self.eln_instance,
self.eln_types,
self._output,
ipw.HBox([default_button, save_config, erase_config, check_connection]),
self.my_output,
],
**kwargs,
)
def write_to_config(self, config):
|
def get_config(self):
try:
with open(ELN_CONFIG) as file:
return json.load(file)
except (FileNotFoundError, json.JSONDecodeError, KeyError):
return {}
def update_list_of_elns(self):
config = self.get_config()
default_eln = config.pop("default", None)
if (
default_eln not in config
): # Erase the default ELN if it is not present in the config
self.write_to_config(config)
default_eln = None
self.eln_instance.options = [("Setup new ELN", {})] + [
(k, v) for k, v in config.items()
]
if default_eln:
self.eln_instance.label = default_eln
def set_current_eln_as_default(self, _=None):
self.update_eln_configuration("default", self.eln_instance.label)
def update_eln_configuration(self, eln_instance, eln_config):
config = self.get_config()
config[eln_instance] = eln_config
self.write_to_config(config)
def erase_current_eln_from_configuration(self, _=None):
config = self.get_config()
config.pop(self.eln_instance.label, None)
self.write_to_config(config)
self.update_list_of_elns()
def check_connection(self, _=None):
if self.eln:
err_message = self.eln.connect()
if self.eln.is_connected:
self.my_output.value = "\u2705 Connected."
return
self.my_output.value = f"\u274C Not connected. {err_message}"
def display_eln_config(self, value=None):
"""Display ELN configuration specific to the selected type of ELN."""
try:
eln_class = get_eln_connector(self.eln_types.value)
except NotImplementedError as err:
with self._output:
clear_output()
display(ipw.HTML("❌" + str(err)))
return
self.eln = eln_class(
eln_instance=self.eln_instance.label if self.eln_instance.value else "",
**self.eln_instance.value,
)
if self.eln_instance.value:
self.eln_types.value = self.eln.eln_type
self.eln_types.disabled = True
else:
self.eln_types.disabled = False
with self._output:
clear_output()
display(self.eln)
def save_eln_configuration(self, _=None):
config = self.eln.get_config()
eln_instance = config.pop("eln_instance")
if eln_instance:
self.update_eln_configuration(eln_instance, config)
self.update_list_of_elns()
| with open(ELN_CONFIG, "w") as file:
json.dump(config, file, indent=4) | identifier_body |
list_buffer.go | package item
import (
"fmt"
"math"
"github.com/phil-mansfield/rogue/error"
)
// BufferIndex is an integer type used to index into ListBuffer.
//
// Since BufferIndex may be changed to different size or to a type of unknown
// signage, all BufferIndex literals must be constructed from 0, 1,
// MaxBufferCount, and NilIndex alone.
type BufferIndex int16
// Note that the usage of "Count" and "Length" in constant names *is* actually
// consistent.
const (
// MaxBufferCount is the largest possible value of ListBuffer.Count.
MaxBufferCount = math.MaxInt16
// NilIndex is a sentinel ListBuffer index value. It is analogous to a a
// nil pointer.
NilIndex = -1
// defaultBufferLength is the length of an empty ListBuffer.
defaultBufferLength = 1 << 8
)
// Node is a wrapper around the type Item which allows it to be an element
// in a linked list.
//
// Next and Prev reference the indices of other Items within the same instance
// of ListBuffer.
type Node struct {
Item Item
Next, Prev BufferIndex
}
// ListBuffer is a data structure which represents numerous lists of items.
type ListBuffer struct {
FreeHead BufferIndex
Buffer []Node
Count BufferIndex
}
// New creates a new ListBuffer instance.
func New() *ListBuffer {
buf := new(ListBuffer)
buf.Init()
return buf
}
// Init initializes a blank ListBuffer instance.
func (buf *ListBuffer) Init() |
// Singleton creates a singleton list containing only the given item.
//
// Singleton returns an error if it is passed
// an uninitialized item, or if the buf is full.
//
// It is correct to call buf.IsFull() prior to all calls to
// buf.Singleton(), since it is not possible to switch upon the type of error
// to identify whether the error has a recoverable cause.
func (buf *ListBuffer) Singleton(item Item) (BufferIndex, *error.Error) {
if buf.IsFull() {
desc := fmt.Sprintf(
"buf has reached maximum capacity of %d Items.",
MaxBufferCount,
)
return NilIndex, error.New(error.Value, desc)
} else if item.Type == Uninitialized {
return NilIndex, error.New(error.Value, "item is uninitialized.")
}
return buf.internalSingleton(item), nil
}
func (buf *ListBuffer) internalSingleton(item Item) BufferIndex {
if buf.Count == BufferIndex(len(buf.Buffer)) {
buf.Buffer = append(buf.Buffer, Node{item, NilIndex, NilIndex})
buf.Count++
return BufferIndex(len(buf.Buffer) - 1)
}
idx := buf.FreeHead
buf.Buffer[idx].Item = item
buf.FreeHead = buf.Buffer[idx].Next
buf.internalUnlink(idx)
buf.Count++
return idx
}
// Link connects the items at indices prev and next so that the item at prev
// comes before the item at next.
//
// Link returns an error if prev or next are not valid indices into buf or if
// the linking would break a pre-existing list or if one of the indices accesses
// a .
func (buf *ListBuffer) Link(prev, next BufferIndex) *error.Error {
// If your functions don't have 50 lines of error handling for two lines
// of state altering-code, you aren't cautious enough.
inRange, initialized := buf.legalIndex(prev)
if !inRange {
desc := fmt.Sprintf(
"prev, %d, is out of range for IndexBuffer of length %d.",
prev, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at prev, %d, has the Type value Uninitialized.", prev,
)
return error.New(error.Value, desc)
}
inRange, initialized = buf.legalIndex(next)
if !inRange {
desc := fmt.Sprintf(
"next, %d, is out of range for IndexBuffer of length %d.",
next, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at next, %d, has the Type value Uninitialized.", next,
)
return error.New(error.Value, desc)
}
if buf.Buffer[prev].Next != NilIndex {
desc := fmt.Sprintf(
"ItemNode at prev, %d, is already linked to Next ItemNode at %d.",
prev, buf.Buffer[prev].Next,
)
return error.New(error.Value, desc)
}
if buf.Buffer[next].Prev != NilIndex {
desc := fmt.Sprintf(
"ItemNode at next, %d, is already linked to Prev ItemNode at %d.",
next, buf.Buffer[next].Prev,
)
return error.New(error.Value, desc)
}
buf.internalLink(prev, next)
return nil
}
func (buf *ListBuffer) internalLink(prev, next BufferIndex) {
buf.Buffer[next].Prev = prev
buf.Buffer[prev].Next = next
}
// Unlink removes the item at the given index from its current list.
//
// An error is returned if idx is not a valid index into the buffer or if
// it represents an uninitialized item.
func (buf *ListBuffer) Unlink(idx BufferIndex) *error.Error {
inRange, initialized := buf.legalIndex(idx)
if !inRange {
desc := fmt.Sprintf(
"idx, %d, is out of range for IndexBuffer of length %d.",
idx, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at idx, %d, has the Type value Uninitialized.", idx,
)
return error.New(error.Value, desc)
}
buf.internalUnlink(idx)
return nil
}
func (buf *ListBuffer) internalUnlink(idx BufferIndex) {
next := buf.Buffer[idx].Next
prev := buf.Buffer[idx].Prev
if prev != NilIndex {
buf.Buffer[prev].Next = next
}
if next != NilIndex {
buf.Buffer[next].Prev = prev
}
buf.Buffer[idx].Next = NilIndex
buf.Buffer[idx].Prev = NilIndex
}
// Delete frees the buffer resources associated with the item at the given
// index.
//
// An error is returned if idx is not a valid index into buffer or if it
// represents an uninitialized item.
func (buf *ListBuffer) Delete(idx BufferIndex) *error.Error {
inRange, initialized := buf.legalIndex(idx)
if !inRange {
desc := fmt.Sprintf(
"idx, %d, is out of range for IndexBuffer of length %d.",
idx, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at idx, %d, has the Type value Uninitialized.", idx,
)
return error.New(error.Value, desc)
}
buf.internalDelete(idx)
return nil
}
func (buf *ListBuffer) internalDelete(idx BufferIndex) {
buf.internalUnlink(idx)
if buf.FreeHead != NilIndex {
buf.internalLink(idx, buf.FreeHead)
}
node := &buf.Buffer[idx]
node.Item.Clear()
node.Next = buf.FreeHead
node.Prev = NilIndex
buf.FreeHead = idx
buf.Count--
}
// IsFull returns true if no more items can be added to the buffer.
func (buf *ListBuffer) IsFull() bool {
return buf.Count >= MaxBufferCount
}
// Get returns the item stored at the given index within the buffer.
//
// An error is returned if idx is not a valid index into the buffer or if it
// represents an uninitialized item.
func (buf *ListBuffer) Get(idx BufferIndex) (Item, *error.Error) {
inRange, initialized := buf.legalIndex(idx)
if !inRange {
desc := fmt.Sprintf(
"idx, %d, is out of range for IndexBuffer of length %d.",
idx, len(buf.Buffer),
)
return Item{}, error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at idx, %d, has the Type value Uninitialized.", idx,
)
return Item{}, error.New(error.Value, desc)
}
return buf.Buffer[idx].Item, nil
}
// Set updates the item stored at the given index within the buffer.
//
// An error is returned if idx is not a valid index into the buffer or if it
// represents an uninitialized item.
func (buf *ListBuffer) Set(idx BufferIndex, item Item) (*error.Error) {
inRange, initialized := buf.legalIndex(idx)
if !inRange {
desc := fmt.Sprintf(
"idx, %d, is out of range for IndexBuffer of length %d.",
idx, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at idx, %d, has the Type value Uninitialized.", idx,
)
return error.New(error.Value, desc)
}
buf.Buffer[idx].Item = item
return nil
}
// legalIndex determines the legality of accessing the buffer at idx. inRange
// is true if the index is valid and initialized is true if there is an valid
// item at idx.
func (buf *ListBuffer) legalIndex(idx BufferIndex) (inRange, initialized bool) {
inRange = idx >= 0 && idx < BufferIndex(len(buf.Buffer))
if inRange {
initialized = buf.Buffer[idx].Item.Type != Uninitialized
} else {
initialized = true
}
return inRange, initialized
}
// Check performs various consistency checks on the buffer and returns an error
// indicating which the first failed check. If all checks pass, nil is returned.
func (buf *ListBuffer) Check() *error.Error {
// Check that the buffer isn't too long.
if len(buf.Buffer) > MaxBufferCount {
desc := fmt.Sprintf(
"Buffer length of %d is larger than max of %d.",
len(buf.Buffer), MaxBufferCount,
)
return error.New(error.Sanity, desc)
}
// Check that all items are valid.
for i := 0; i < len(buf.Buffer); i++ {
if err := buf.Buffer[i].Item.Check(); err != nil { return err }
}
// Check that the item count is correct.
count := 0
for i := 0; i < len(buf.Buffer); i++ {
if buf.Buffer[i].Item.Type != Uninitialized {
count++
}
}
if BufferIndex(count) != buf.Count {
desc := fmt.Sprintf(
"buf.Count = %d, but there are %d items in buffer.",
buf.Count, count,
)
return error.New(error.Sanity, desc)
}
// Check all Prev indices.
for i := 0; i < len(buf.Buffer); i++ {
prev := buf.Buffer[i].Prev
if prev != NilIndex && buf.Buffer[prev].Next != BufferIndex(i) {
desc := fmt.Sprintf(
"Prev index of item %d is %d, but Next index of item %d is %d.",
i, prev, prev, buf.Buffer[prev].Next,
)
return error.New(error.Sanity, desc)
}
}
// Check all Next indices.
for i := 0; i < len(buf.Buffer); i++ {
next := buf.Buffer[i].Next
if next != NilIndex && buf.Buffer[next].Prev != BufferIndex(i) {
desc := fmt.Sprintf(
"Next index of item %d is %d, but Prev index of item %d is %d.",
i, next, next, buf.Buffer[next].Prev,
)
return error.New(error.Sanity, desc)
}
}
// Check for cycles.
checkBuffer := make([]bool, len(buf.Buffer))
for i := 0; i < len(buf.Buffer); i++ {
if !checkBuffer[i] && buf.hasCycle(BufferIndex(i), checkBuffer) {
desc := fmt.Sprintf("List with head at index %d contains cycle.", i)
return error.New(error.Sanity, desc)
}
}
return nil
}
// hasCycle returns true if there is a cycle after in the list following the
// given index and that cycle has not already been detected.
func (buf *ListBuffer) hasCycle(idx BufferIndex, checkBuffer []bool) bool {
checkBuffer[idx] = true
tortise := idx
hare := idx
for hare != NilIndex {
hare = buf.Incr(buf.Incr(hare))
tortise = buf.Incr(tortise)
if hare != NilIndex { checkBuffer[hare] = true }
if tortise != NilIndex { checkBuffer[tortise] = true }
if hare == tortise && hare != NilIndex {
return true
}
}
return false
}
// Incr returns the index of item which follows idx in the current list.
func (buf *ListBuffer) Incr(idx BufferIndex) BufferIndex {
if idx == NilIndex {
return NilIndex
}
return buf.Buffer[idx].Next
}
func (buf *ListBuffer) Decr(idx BufferIndex) BufferIndex {
if idx == NilIndex {
return NilIndex
}
return buf.Buffer[idx].Prev
}
| {
buf.Buffer = make([]Node, defaultBufferLength)
buf.FreeHead = 0
buf.Count = 0
for i := 0; i < len(buf.Buffer); i++ {
buf.Buffer[i].Item.Clear()
buf.Buffer[i].Prev = BufferIndex(i - 1)
buf.Buffer[i].Next = BufferIndex(i + 1)
}
buf.Buffer[0].Prev = NilIndex
buf.Buffer[len(buf.Buffer)-1].Next = NilIndex
} | identifier_body |
list_buffer.go | package item
import (
"fmt"
"math"
"github.com/phil-mansfield/rogue/error"
)
// BufferIndex is an integer type used to index into ListBuffer.
//
// Since BufferIndex may be changed to different size or to a type of unknown
// signage, all BufferIndex literals must be constructed from 0, 1,
// MaxBufferCount, and NilIndex alone.
type BufferIndex int16
// Note that the usage of "Count" and "Length" in constant names *is* actually
// consistent.
const (
// MaxBufferCount is the largest possible value of ListBuffer.Count.
MaxBufferCount = math.MaxInt16
// NilIndex is a sentinel ListBuffer index value. It is analogous to a a
// nil pointer.
NilIndex = -1
// defaultBufferLength is the length of an empty ListBuffer.
defaultBufferLength = 1 << 8
)
// Node is a wrapper around the type Item which allows it to be an element
// in a linked list.
//
// Next and Prev reference the indices of other Items within the same instance
// of ListBuffer.
type Node struct {
Item Item
Next, Prev BufferIndex
}
// ListBuffer is a data structure which represents numerous lists of items.
type ListBuffer struct {
FreeHead BufferIndex
Buffer []Node
Count BufferIndex
}
// New creates a new ListBuffer instance.
func New() *ListBuffer {
buf := new(ListBuffer)
buf.Init()
return buf
}
// Init initializes a blank ListBuffer instance.
func (buf *ListBuffer) Init() {
buf.Buffer = make([]Node, defaultBufferLength)
buf.FreeHead = 0
buf.Count = 0
for i := 0; i < len(buf.Buffer); i++ {
buf.Buffer[i].Item.Clear()
buf.Buffer[i].Prev = BufferIndex(i - 1)
buf.Buffer[i].Next = BufferIndex(i + 1)
}
buf.Buffer[0].Prev = NilIndex
buf.Buffer[len(buf.Buffer)-1].Next = NilIndex
}
// Singleton creates a singleton list containing only the given item.
//
// Singleton returns an error if it is passed
// an uninitialized item, or if the buf is full.
//
// It is correct to call buf.IsFull() prior to all calls to
// buf.Singleton(), since it is not possible to switch upon the type of error
// to identify whether the error has a recoverable cause.
func (buf *ListBuffer) Singleton(item Item) (BufferIndex, *error.Error) {
if buf.IsFull() {
desc := fmt.Sprintf(
"buf has reached maximum capacity of %d Items.",
MaxBufferCount,
)
return NilIndex, error.New(error.Value, desc)
} else if item.Type == Uninitialized {
return NilIndex, error.New(error.Value, "item is uninitialized.")
}
return buf.internalSingleton(item), nil
}
func (buf *ListBuffer) internalSingleton(item Item) BufferIndex {
if buf.Count == BufferIndex(len(buf.Buffer)) {
buf.Buffer = append(buf.Buffer, Node{item, NilIndex, NilIndex})
buf.Count++
return BufferIndex(len(buf.Buffer) - 1)
}
idx := buf.FreeHead
buf.Buffer[idx].Item = item
buf.FreeHead = buf.Buffer[idx].Next
buf.internalUnlink(idx)
buf.Count++
return idx
}
// Link connects the items at indices prev and next so that the item at prev
// comes before the item at next.
//
// Link returns an error if prev or next are not valid indices into buf or if
// the linking would break a pre-existing list or if one of the indices accesses
// a .
func (buf *ListBuffer) Link(prev, next BufferIndex) *error.Error {
// If your functions don't have 50 lines of error handling for two lines
// of state altering-code, you aren't cautious enough.
inRange, initialized := buf.legalIndex(prev)
if !inRange {
desc := fmt.Sprintf(
"prev, %d, is out of range for IndexBuffer of length %d.",
prev, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at prev, %d, has the Type value Uninitialized.", prev,
)
return error.New(error.Value, desc)
}
inRange, initialized = buf.legalIndex(next)
if !inRange {
desc := fmt.Sprintf(
"next, %d, is out of range for IndexBuffer of length %d.",
next, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at next, %d, has the Type value Uninitialized.", next,
)
return error.New(error.Value, desc)
}
if buf.Buffer[prev].Next != NilIndex {
desc := fmt.Sprintf(
"ItemNode at prev, %d, is already linked to Next ItemNode at %d.",
prev, buf.Buffer[prev].Next,
)
return error.New(error.Value, desc)
}
if buf.Buffer[next].Prev != NilIndex {
desc := fmt.Sprintf(
"ItemNode at next, %d, is already linked to Prev ItemNode at %d.",
next, buf.Buffer[next].Prev,
)
return error.New(error.Value, desc)
}
buf.internalLink(prev, next)
return nil
}
func (buf *ListBuffer) internalLink(prev, next BufferIndex) {
buf.Buffer[next].Prev = prev
buf.Buffer[prev].Next = next
}
// Unlink removes the item at the given index from its current list.
//
// An error is returned if idx is not a valid index into the buffer or if
// it represents an uninitialized item.
func (buf *ListBuffer) Unlink(idx BufferIndex) *error.Error {
inRange, initialized := buf.legalIndex(idx)
if !inRange {
desc := fmt.Sprintf(
"idx, %d, is out of range for IndexBuffer of length %d.",
idx, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at idx, %d, has the Type value Uninitialized.", idx,
)
return error.New(error.Value, desc)
}
buf.internalUnlink(idx)
return nil
}
func (buf *ListBuffer) internalUnlink(idx BufferIndex) {
next := buf.Buffer[idx].Next
prev := buf.Buffer[idx].Prev
if prev != NilIndex {
buf.Buffer[prev].Next = next
}
if next != NilIndex {
buf.Buffer[next].Prev = prev
}
buf.Buffer[idx].Next = NilIndex
buf.Buffer[idx].Prev = NilIndex
}
// Delete frees the buffer resources associated with the item at the given
// index.
//
// An error is returned if idx is not a valid index into buffer or if it
// represents an uninitialized item.
func (buf *ListBuffer) Delete(idx BufferIndex) *error.Error {
inRange, initialized := buf.legalIndex(idx)
if !inRange {
desc := fmt.Sprintf(
"idx, %d, is out of range for IndexBuffer of length %d.",
idx, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized |
buf.internalDelete(idx)
return nil
}
func (buf *ListBuffer) internalDelete(idx BufferIndex) {
buf.internalUnlink(idx)
if buf.FreeHead != NilIndex {
buf.internalLink(idx, buf.FreeHead)
}
node := &buf.Buffer[idx]
node.Item.Clear()
node.Next = buf.FreeHead
node.Prev = NilIndex
buf.FreeHead = idx
buf.Count--
}
// IsFull returns true if no more items can be added to the buffer.
func (buf *ListBuffer) IsFull() bool {
return buf.Count >= MaxBufferCount
}
// Get returns the item stored at the given index within the buffer.
//
// An error is returned if idx is not a valid index into the buffer or if it
// represents an uninitialized item.
func (buf *ListBuffer) Get(idx BufferIndex) (Item, *error.Error) {
inRange, initialized := buf.legalIndex(idx)
if !inRange {
desc := fmt.Sprintf(
"idx, %d, is out of range for IndexBuffer of length %d.",
idx, len(buf.Buffer),
)
return Item{}, error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at idx, %d, has the Type value Uninitialized.", idx,
)
return Item{}, error.New(error.Value, desc)
}
return buf.Buffer[idx].Item, nil
}
// Set updates the item stored at the given index within the buffer.
//
// An error is returned if idx is not a valid index into the buffer or if it
// represents an uninitialized item.
func (buf *ListBuffer) Set(idx BufferIndex, item Item) (*error.Error) {
inRange, initialized := buf.legalIndex(idx)
if !inRange {
desc := fmt.Sprintf(
"idx, %d, is out of range for IndexBuffer of length %d.",
idx, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at idx, %d, has the Type value Uninitialized.", idx,
)
return error.New(error.Value, desc)
}
buf.Buffer[idx].Item = item
return nil
}
// legalIndex determines the legality of accessing the buffer at idx. inRange
// is true if the index is valid and initialized is true if there is an valid
// item at idx.
func (buf *ListBuffer) legalIndex(idx BufferIndex) (inRange, initialized bool) {
inRange = idx >= 0 && idx < BufferIndex(len(buf.Buffer))
if inRange {
initialized = buf.Buffer[idx].Item.Type != Uninitialized
} else {
initialized = true
}
return inRange, initialized
}
// Check performs various consistency checks on the buffer and returns an error
// indicating which the first failed check. If all checks pass, nil is returned.
func (buf *ListBuffer) Check() *error.Error {
// Check that the buffer isn't too long.
if len(buf.Buffer) > MaxBufferCount {
desc := fmt.Sprintf(
"Buffer length of %d is larger than max of %d.",
len(buf.Buffer), MaxBufferCount,
)
return error.New(error.Sanity, desc)
}
// Check that all items are valid.
for i := 0; i < len(buf.Buffer); i++ {
if err := buf.Buffer[i].Item.Check(); err != nil { return err }
}
// Check that the item count is correct.
count := 0
for i := 0; i < len(buf.Buffer); i++ {
if buf.Buffer[i].Item.Type != Uninitialized {
count++
}
}
if BufferIndex(count) != buf.Count {
desc := fmt.Sprintf(
"buf.Count = %d, but there are %d items in buffer.",
buf.Count, count,
)
return error.New(error.Sanity, desc)
}
// Check all Prev indices.
for i := 0; i < len(buf.Buffer); i++ {
prev := buf.Buffer[i].Prev
if prev != NilIndex && buf.Buffer[prev].Next != BufferIndex(i) {
desc := fmt.Sprintf(
"Prev index of item %d is %d, but Next index of item %d is %d.",
i, prev, prev, buf.Buffer[prev].Next,
)
return error.New(error.Sanity, desc)
}
}
// Check all Next indices.
for i := 0; i < len(buf.Buffer); i++ {
next := buf.Buffer[i].Next
if next != NilIndex && buf.Buffer[next].Prev != BufferIndex(i) {
desc := fmt.Sprintf(
"Next index of item %d is %d, but Prev index of item %d is %d.",
i, next, next, buf.Buffer[next].Prev,
)
return error.New(error.Sanity, desc)
}
}
// Check for cycles.
checkBuffer := make([]bool, len(buf.Buffer))
for i := 0; i < len(buf.Buffer); i++ {
if !checkBuffer[i] && buf.hasCycle(BufferIndex(i), checkBuffer) {
desc := fmt.Sprintf("List with head at index %d contains cycle.", i)
return error.New(error.Sanity, desc)
}
}
return nil
}
// hasCycle returns true if there is a cycle after in the list following the
// given index and that cycle has not already been detected.
func (buf *ListBuffer) hasCycle(idx BufferIndex, checkBuffer []bool) bool {
checkBuffer[idx] = true
tortise := idx
hare := idx
for hare != NilIndex {
hare = buf.Incr(buf.Incr(hare))
tortise = buf.Incr(tortise)
if hare != NilIndex { checkBuffer[hare] = true }
if tortise != NilIndex { checkBuffer[tortise] = true }
if hare == tortise && hare != NilIndex {
return true
}
}
return false
}
// Incr returns the index of item which follows idx in the current list.
func (buf *ListBuffer) Incr(idx BufferIndex) BufferIndex {
if idx == NilIndex {
return NilIndex
}
return buf.Buffer[idx].Next
}
func (buf *ListBuffer) Decr(idx BufferIndex) BufferIndex {
if idx == NilIndex {
return NilIndex
}
return buf.Buffer[idx].Prev
}
| {
desc := fmt.Sprintf(
"Item at idx, %d, has the Type value Uninitialized.", idx,
)
return error.New(error.Value, desc)
} | conditional_block |
list_buffer.go | package item
import (
"fmt" | "math"
"github.com/phil-mansfield/rogue/error"
)
// BufferIndex is an integer type used to index into ListBuffer.
//
// Since BufferIndex may be changed to different size or to a type of unknown
// signage, all BufferIndex literals must be constructed from 0, 1,
// MaxBufferCount, and NilIndex alone.
type BufferIndex int16
// Note that the usage of "Count" and "Length" in constant names *is* actually
// consistent.
const (
// MaxBufferCount is the largest possible value of ListBuffer.Count.
MaxBufferCount = math.MaxInt16
// NilIndex is a sentinel ListBuffer index value. It is analogous to a a
// nil pointer.
NilIndex = -1
// defaultBufferLength is the length of an empty ListBuffer.
defaultBufferLength = 1 << 8
)
// Node is a wrapper around the type Item which allows it to be an element
// in a linked list.
//
// Next and Prev reference the indices of other Items within the same instance
// of ListBuffer.
type Node struct {
Item Item
Next, Prev BufferIndex
}
// ListBuffer is a data structure which represents numerous lists of items.
type ListBuffer struct {
FreeHead BufferIndex
Buffer []Node
Count BufferIndex
}
// New creates a new ListBuffer instance.
func New() *ListBuffer {
buf := new(ListBuffer)
buf.Init()
return buf
}
// Init initializes a blank ListBuffer instance.
func (buf *ListBuffer) Init() {
buf.Buffer = make([]Node, defaultBufferLength)
buf.FreeHead = 0
buf.Count = 0
for i := 0; i < len(buf.Buffer); i++ {
buf.Buffer[i].Item.Clear()
buf.Buffer[i].Prev = BufferIndex(i - 1)
buf.Buffer[i].Next = BufferIndex(i + 1)
}
buf.Buffer[0].Prev = NilIndex
buf.Buffer[len(buf.Buffer)-1].Next = NilIndex
}
// Singleton creates a singleton list containing only the given item.
//
// Singleton returns an error if it is passed
// an uninitialized item, or if the buf is full.
//
// It is correct to call buf.IsFull() prior to all calls to
// buf.Singleton(), since it is not possible to switch upon the type of error
// to identify whether the error has a recoverable cause.
func (buf *ListBuffer) Singleton(item Item) (BufferIndex, *error.Error) {
if buf.IsFull() {
desc := fmt.Sprintf(
"buf has reached maximum capacity of %d Items.",
MaxBufferCount,
)
return NilIndex, error.New(error.Value, desc)
} else if item.Type == Uninitialized {
return NilIndex, error.New(error.Value, "item is uninitialized.")
}
return buf.internalSingleton(item), nil
}
func (buf *ListBuffer) internalSingleton(item Item) BufferIndex {
if buf.Count == BufferIndex(len(buf.Buffer)) {
buf.Buffer = append(buf.Buffer, Node{item, NilIndex, NilIndex})
buf.Count++
return BufferIndex(len(buf.Buffer) - 1)
}
idx := buf.FreeHead
buf.Buffer[idx].Item = item
buf.FreeHead = buf.Buffer[idx].Next
buf.internalUnlink(idx)
buf.Count++
return idx
}
// Link connects the items at indices prev and next so that the item at prev
// comes before the item at next.
//
// Link returns an error if prev or next are not valid indices into buf or if
// the linking would break a pre-existing list or if one of the indices accesses
// a .
func (buf *ListBuffer) Link(prev, next BufferIndex) *error.Error {
// If your functions don't have 50 lines of error handling for two lines
// of state altering-code, you aren't cautious enough.
inRange, initialized := buf.legalIndex(prev)
if !inRange {
desc := fmt.Sprintf(
"prev, %d, is out of range for IndexBuffer of length %d.",
prev, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at prev, %d, has the Type value Uninitialized.", prev,
)
return error.New(error.Value, desc)
}
inRange, initialized = buf.legalIndex(next)
if !inRange {
desc := fmt.Sprintf(
"next, %d, is out of range for IndexBuffer of length %d.",
next, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at next, %d, has the Type value Uninitialized.", next,
)
return error.New(error.Value, desc)
}
if buf.Buffer[prev].Next != NilIndex {
desc := fmt.Sprintf(
"ItemNode at prev, %d, is already linked to Next ItemNode at %d.",
prev, buf.Buffer[prev].Next,
)
return error.New(error.Value, desc)
}
if buf.Buffer[next].Prev != NilIndex {
desc := fmt.Sprintf(
"ItemNode at next, %d, is already linked to Prev ItemNode at %d.",
next, buf.Buffer[next].Prev,
)
return error.New(error.Value, desc)
}
buf.internalLink(prev, next)
return nil
}
func (buf *ListBuffer) internalLink(prev, next BufferIndex) {
buf.Buffer[next].Prev = prev
buf.Buffer[prev].Next = next
}
// Unlink removes the item at the given index from its current list.
//
// An error is returned if idx is not a valid index into the buffer or if
// it represents an uninitialized item.
func (buf *ListBuffer) Unlink(idx BufferIndex) *error.Error {
inRange, initialized := buf.legalIndex(idx)
if !inRange {
desc := fmt.Sprintf(
"idx, %d, is out of range for IndexBuffer of length %d.",
idx, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at idx, %d, has the Type value Uninitialized.", idx,
)
return error.New(error.Value, desc)
}
buf.internalUnlink(idx)
return nil
}
func (buf *ListBuffer) internalUnlink(idx BufferIndex) {
next := buf.Buffer[idx].Next
prev := buf.Buffer[idx].Prev
if prev != NilIndex {
buf.Buffer[prev].Next = next
}
if next != NilIndex {
buf.Buffer[next].Prev = prev
}
buf.Buffer[idx].Next = NilIndex
buf.Buffer[idx].Prev = NilIndex
}
// Delete frees the buffer resources associated with the item at the given
// index.
//
// An error is returned if idx is not a valid index into buffer or if it
// represents an uninitialized item.
func (buf *ListBuffer) Delete(idx BufferIndex) *error.Error {
inRange, initialized := buf.legalIndex(idx)
if !inRange {
desc := fmt.Sprintf(
"idx, %d, is out of range for IndexBuffer of length %d.",
idx, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at idx, %d, has the Type value Uninitialized.", idx,
)
return error.New(error.Value, desc)
}
buf.internalDelete(idx)
return nil
}
func (buf *ListBuffer) internalDelete(idx BufferIndex) {
buf.internalUnlink(idx)
if buf.FreeHead != NilIndex {
buf.internalLink(idx, buf.FreeHead)
}
node := &buf.Buffer[idx]
node.Item.Clear()
node.Next = buf.FreeHead
node.Prev = NilIndex
buf.FreeHead = idx
buf.Count--
}
// IsFull returns true if no more items can be added to the buffer.
func (buf *ListBuffer) IsFull() bool {
return buf.Count >= MaxBufferCount
}
// Get returns the item stored at the given index within the buffer.
//
// An error is returned if idx is not a valid index into the buffer or if it
// represents an uninitialized item.
func (buf *ListBuffer) Get(idx BufferIndex) (Item, *error.Error) {
inRange, initialized := buf.legalIndex(idx)
if !inRange {
desc := fmt.Sprintf(
"idx, %d, is out of range for IndexBuffer of length %d.",
idx, len(buf.Buffer),
)
return Item{}, error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at idx, %d, has the Type value Uninitialized.", idx,
)
return Item{}, error.New(error.Value, desc)
}
return buf.Buffer[idx].Item, nil
}
// Set updates the item stored at the given index within the buffer.
//
// An error is returned if idx is not a valid index into the buffer or if it
// represents an uninitialized item.
func (buf *ListBuffer) Set(idx BufferIndex, item Item) (*error.Error) {
inRange, initialized := buf.legalIndex(idx)
if !inRange {
desc := fmt.Sprintf(
"idx, %d, is out of range for IndexBuffer of length %d.",
idx, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at idx, %d, has the Type value Uninitialized.", idx,
)
return error.New(error.Value, desc)
}
buf.Buffer[idx].Item = item
return nil
}
// legalIndex determines the legality of accessing the buffer at idx. inRange
// is true if the index is valid and initialized is true if there is an valid
// item at idx.
func (buf *ListBuffer) legalIndex(idx BufferIndex) (inRange, initialized bool) {
inRange = idx >= 0 && idx < BufferIndex(len(buf.Buffer))
if inRange {
initialized = buf.Buffer[idx].Item.Type != Uninitialized
} else {
initialized = true
}
return inRange, initialized
}
// Check performs various consistency checks on the buffer and returns an error
// indicating which the first failed check. If all checks pass, nil is returned.
func (buf *ListBuffer) Check() *error.Error {
// Check that the buffer isn't too long.
if len(buf.Buffer) > MaxBufferCount {
desc := fmt.Sprintf(
"Buffer length of %d is larger than max of %d.",
len(buf.Buffer), MaxBufferCount,
)
return error.New(error.Sanity, desc)
}
// Check that all items are valid.
for i := 0; i < len(buf.Buffer); i++ {
if err := buf.Buffer[i].Item.Check(); err != nil { return err }
}
// Check that the item count is correct.
count := 0
for i := 0; i < len(buf.Buffer); i++ {
if buf.Buffer[i].Item.Type != Uninitialized {
count++
}
}
if BufferIndex(count) != buf.Count {
desc := fmt.Sprintf(
"buf.Count = %d, but there are %d items in buffer.",
buf.Count, count,
)
return error.New(error.Sanity, desc)
}
// Check all Prev indices.
for i := 0; i < len(buf.Buffer); i++ {
prev := buf.Buffer[i].Prev
if prev != NilIndex && buf.Buffer[prev].Next != BufferIndex(i) {
desc := fmt.Sprintf(
"Prev index of item %d is %d, but Next index of item %d is %d.",
i, prev, prev, buf.Buffer[prev].Next,
)
return error.New(error.Sanity, desc)
}
}
// Check all Next indices.
for i := 0; i < len(buf.Buffer); i++ {
next := buf.Buffer[i].Next
if next != NilIndex && buf.Buffer[next].Prev != BufferIndex(i) {
desc := fmt.Sprintf(
"Next index of item %d is %d, but Prev index of item %d is %d.",
i, next, next, buf.Buffer[next].Prev,
)
return error.New(error.Sanity, desc)
}
}
// Check for cycles.
checkBuffer := make([]bool, len(buf.Buffer))
for i := 0; i < len(buf.Buffer); i++ {
if !checkBuffer[i] && buf.hasCycle(BufferIndex(i), checkBuffer) {
desc := fmt.Sprintf("List with head at index %d contains cycle.", i)
return error.New(error.Sanity, desc)
}
}
return nil
}
// hasCycle returns true if there is a cycle after in the list following the
// given index and that cycle has not already been detected.
func (buf *ListBuffer) hasCycle(idx BufferIndex, checkBuffer []bool) bool {
checkBuffer[idx] = true
tortise := idx
hare := idx
for hare != NilIndex {
hare = buf.Incr(buf.Incr(hare))
tortise = buf.Incr(tortise)
if hare != NilIndex { checkBuffer[hare] = true }
if tortise != NilIndex { checkBuffer[tortise] = true }
if hare == tortise && hare != NilIndex {
return true
}
}
return false
}
// Incr returns the index of item which follows idx in the current list.
func (buf *ListBuffer) Incr(idx BufferIndex) BufferIndex {
if idx == NilIndex {
return NilIndex
}
return buf.Buffer[idx].Next
}
func (buf *ListBuffer) Decr(idx BufferIndex) BufferIndex {
if idx == NilIndex {
return NilIndex
}
return buf.Buffer[idx].Prev
} | random_line_split | |
list_buffer.go | package item
import (
"fmt"
"math"
"github.com/phil-mansfield/rogue/error"
)
// BufferIndex is an integer type used to index into ListBuffer.
//
// Since BufferIndex may be changed to different size or to a type of unknown
// signage, all BufferIndex literals must be constructed from 0, 1,
// MaxBufferCount, and NilIndex alone.
type BufferIndex int16
// Note that the usage of "Count" and "Length" in constant names *is* actually
// consistent.
const (
// MaxBufferCount is the largest possible value of ListBuffer.Count.
MaxBufferCount = math.MaxInt16
// NilIndex is a sentinel ListBuffer index value. It is analogous to a a
// nil pointer.
NilIndex = -1
// defaultBufferLength is the length of an empty ListBuffer.
defaultBufferLength = 1 << 8
)
// Node is a wrapper around the type Item which allows it to be an element
// in a linked list.
//
// Next and Prev reference the indices of other Items within the same instance
// of ListBuffer.
type Node struct {
Item Item
Next, Prev BufferIndex
}
// ListBuffer is a data structure which represents numerous lists of items.
type ListBuffer struct {
FreeHead BufferIndex
Buffer []Node
Count BufferIndex
}
// New creates a new ListBuffer instance.
func New() *ListBuffer {
buf := new(ListBuffer)
buf.Init()
return buf
}
// Init initializes a blank ListBuffer instance.
func (buf *ListBuffer) Init() {
buf.Buffer = make([]Node, defaultBufferLength)
buf.FreeHead = 0
buf.Count = 0
for i := 0; i < len(buf.Buffer); i++ {
buf.Buffer[i].Item.Clear()
buf.Buffer[i].Prev = BufferIndex(i - 1)
buf.Buffer[i].Next = BufferIndex(i + 1)
}
buf.Buffer[0].Prev = NilIndex
buf.Buffer[len(buf.Buffer)-1].Next = NilIndex
}
// Singleton creates a singleton list containing only the given item.
//
// Singleton returns an error if it is passed
// an uninitialized item, or if the buf is full.
//
// It is correct to call buf.IsFull() prior to all calls to
// buf.Singleton(), since it is not possible to switch upon the type of error
// to identify whether the error has a recoverable cause.
func (buf *ListBuffer) | (item Item) (BufferIndex, *error.Error) {
if buf.IsFull() {
desc := fmt.Sprintf(
"buf has reached maximum capacity of %d Items.",
MaxBufferCount,
)
return NilIndex, error.New(error.Value, desc)
} else if item.Type == Uninitialized {
return NilIndex, error.New(error.Value, "item is uninitialized.")
}
return buf.internalSingleton(item), nil
}
func (buf *ListBuffer) internalSingleton(item Item) BufferIndex {
if buf.Count == BufferIndex(len(buf.Buffer)) {
buf.Buffer = append(buf.Buffer, Node{item, NilIndex, NilIndex})
buf.Count++
return BufferIndex(len(buf.Buffer) - 1)
}
idx := buf.FreeHead
buf.Buffer[idx].Item = item
buf.FreeHead = buf.Buffer[idx].Next
buf.internalUnlink(idx)
buf.Count++
return idx
}
// Link connects the items at indices prev and next so that the item at prev
// comes before the item at next.
//
// Link returns an error if prev or next are not valid indices into buf or if
// the linking would break a pre-existing list or if one of the indices accesses
// a .
func (buf *ListBuffer) Link(prev, next BufferIndex) *error.Error {
// If your functions don't have 50 lines of error handling for two lines
// of state altering-code, you aren't cautious enough.
inRange, initialized := buf.legalIndex(prev)
if !inRange {
desc := fmt.Sprintf(
"prev, %d, is out of range for IndexBuffer of length %d.",
prev, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at prev, %d, has the Type value Uninitialized.", prev,
)
return error.New(error.Value, desc)
}
inRange, initialized = buf.legalIndex(next)
if !inRange {
desc := fmt.Sprintf(
"next, %d, is out of range for IndexBuffer of length %d.",
next, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at next, %d, has the Type value Uninitialized.", next,
)
return error.New(error.Value, desc)
}
if buf.Buffer[prev].Next != NilIndex {
desc := fmt.Sprintf(
"ItemNode at prev, %d, is already linked to Next ItemNode at %d.",
prev, buf.Buffer[prev].Next,
)
return error.New(error.Value, desc)
}
if buf.Buffer[next].Prev != NilIndex {
desc := fmt.Sprintf(
"ItemNode at next, %d, is already linked to Prev ItemNode at %d.",
next, buf.Buffer[next].Prev,
)
return error.New(error.Value, desc)
}
buf.internalLink(prev, next)
return nil
}
func (buf *ListBuffer) internalLink(prev, next BufferIndex) {
buf.Buffer[next].Prev = prev
buf.Buffer[prev].Next = next
}
// Unlink removes the item at the given index from its current list.
//
// An error is returned if idx is not a valid index into the buffer or if
// it represents an uninitialized item.
func (buf *ListBuffer) Unlink(idx BufferIndex) *error.Error {
inRange, initialized := buf.legalIndex(idx)
if !inRange {
desc := fmt.Sprintf(
"idx, %d, is out of range for IndexBuffer of length %d.",
idx, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at idx, %d, has the Type value Uninitialized.", idx,
)
return error.New(error.Value, desc)
}
buf.internalUnlink(idx)
return nil
}
func (buf *ListBuffer) internalUnlink(idx BufferIndex) {
next := buf.Buffer[idx].Next
prev := buf.Buffer[idx].Prev
if prev != NilIndex {
buf.Buffer[prev].Next = next
}
if next != NilIndex {
buf.Buffer[next].Prev = prev
}
buf.Buffer[idx].Next = NilIndex
buf.Buffer[idx].Prev = NilIndex
}
// Delete frees the buffer resources associated with the item at the given
// index.
//
// An error is returned if idx is not a valid index into buffer or if it
// represents an uninitialized item.
func (buf *ListBuffer) Delete(idx BufferIndex) *error.Error {
inRange, initialized := buf.legalIndex(idx)
if !inRange {
desc := fmt.Sprintf(
"idx, %d, is out of range for IndexBuffer of length %d.",
idx, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at idx, %d, has the Type value Uninitialized.", idx,
)
return error.New(error.Value, desc)
}
buf.internalDelete(idx)
return nil
}
func (buf *ListBuffer) internalDelete(idx BufferIndex) {
buf.internalUnlink(idx)
if buf.FreeHead != NilIndex {
buf.internalLink(idx, buf.FreeHead)
}
node := &buf.Buffer[idx]
node.Item.Clear()
node.Next = buf.FreeHead
node.Prev = NilIndex
buf.FreeHead = idx
buf.Count--
}
// IsFull returns true if no more items can be added to the buffer.
func (buf *ListBuffer) IsFull() bool {
return buf.Count >= MaxBufferCount
}
// Get returns the item stored at the given index within the buffer.
//
// An error is returned if idx is not a valid index into the buffer or if it
// represents an uninitialized item.
func (buf *ListBuffer) Get(idx BufferIndex) (Item, *error.Error) {
inRange, initialized := buf.legalIndex(idx)
if !inRange {
desc := fmt.Sprintf(
"idx, %d, is out of range for IndexBuffer of length %d.",
idx, len(buf.Buffer),
)
return Item{}, error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at idx, %d, has the Type value Uninitialized.", idx,
)
return Item{}, error.New(error.Value, desc)
}
return buf.Buffer[idx].Item, nil
}
// Set updates the item stored at the given index within the buffer.
//
// An error is returned if idx is not a valid index into the buffer or if it
// represents an uninitialized item.
func (buf *ListBuffer) Set(idx BufferIndex, item Item) (*error.Error) {
inRange, initialized := buf.legalIndex(idx)
if !inRange {
desc := fmt.Sprintf(
"idx, %d, is out of range for IndexBuffer of length %d.",
idx, len(buf.Buffer),
)
return error.New(error.Value, desc)
} else if !initialized {
desc := fmt.Sprintf(
"Item at idx, %d, has the Type value Uninitialized.", idx,
)
return error.New(error.Value, desc)
}
buf.Buffer[idx].Item = item
return nil
}
// legalIndex determines the legality of accessing the buffer at idx. inRange
// is true if the index is valid and initialized is true if there is an valid
// item at idx.
func (buf *ListBuffer) legalIndex(idx BufferIndex) (inRange, initialized bool) {
inRange = idx >= 0 && idx < BufferIndex(len(buf.Buffer))
if inRange {
initialized = buf.Buffer[idx].Item.Type != Uninitialized
} else {
initialized = true
}
return inRange, initialized
}
// Check performs various consistency checks on the buffer and returns an error
// indicating which the first failed check. If all checks pass, nil is returned.
func (buf *ListBuffer) Check() *error.Error {
// Check that the buffer isn't too long.
if len(buf.Buffer) > MaxBufferCount {
desc := fmt.Sprintf(
"Buffer length of %d is larger than max of %d.",
len(buf.Buffer), MaxBufferCount,
)
return error.New(error.Sanity, desc)
}
// Check that all items are valid.
for i := 0; i < len(buf.Buffer); i++ {
if err := buf.Buffer[i].Item.Check(); err != nil { return err }
}
// Check that the item count is correct.
count := 0
for i := 0; i < len(buf.Buffer); i++ {
if buf.Buffer[i].Item.Type != Uninitialized {
count++
}
}
if BufferIndex(count) != buf.Count {
desc := fmt.Sprintf(
"buf.Count = %d, but there are %d items in buffer.",
buf.Count, count,
)
return error.New(error.Sanity, desc)
}
// Check all Prev indices.
for i := 0; i < len(buf.Buffer); i++ {
prev := buf.Buffer[i].Prev
if prev != NilIndex && buf.Buffer[prev].Next != BufferIndex(i) {
desc := fmt.Sprintf(
"Prev index of item %d is %d, but Next index of item %d is %d.",
i, prev, prev, buf.Buffer[prev].Next,
)
return error.New(error.Sanity, desc)
}
}
// Check all Next indices.
for i := 0; i < len(buf.Buffer); i++ {
next := buf.Buffer[i].Next
if next != NilIndex && buf.Buffer[next].Prev != BufferIndex(i) {
desc := fmt.Sprintf(
"Next index of item %d is %d, but Prev index of item %d is %d.",
i, next, next, buf.Buffer[next].Prev,
)
return error.New(error.Sanity, desc)
}
}
// Check for cycles.
checkBuffer := make([]bool, len(buf.Buffer))
for i := 0; i < len(buf.Buffer); i++ {
if !checkBuffer[i] && buf.hasCycle(BufferIndex(i), checkBuffer) {
desc := fmt.Sprintf("List with head at index %d contains cycle.", i)
return error.New(error.Sanity, desc)
}
}
return nil
}
// hasCycle returns true if there is a cycle after in the list following the
// given index and that cycle has not already been detected.
func (buf *ListBuffer) hasCycle(idx BufferIndex, checkBuffer []bool) bool {
checkBuffer[idx] = true
tortise := idx
hare := idx
for hare != NilIndex {
hare = buf.Incr(buf.Incr(hare))
tortise = buf.Incr(tortise)
if hare != NilIndex { checkBuffer[hare] = true }
if tortise != NilIndex { checkBuffer[tortise] = true }
if hare == tortise && hare != NilIndex {
return true
}
}
return false
}
// Incr returns the index of item which follows idx in the current list.
func (buf *ListBuffer) Incr(idx BufferIndex) BufferIndex {
if idx == NilIndex {
return NilIndex
}
return buf.Buffer[idx].Next
}
func (buf *ListBuffer) Decr(idx BufferIndex) BufferIndex {
if idx == NilIndex {
return NilIndex
}
return buf.Buffer[idx].Prev
}
| Singleton | identifier_name |
run_swmm_DDPG.py | """
This script runs Deep Q-Network RL algorithm for control
of stormwater systems using a SWMM model as the environment
Author: Benjamin Bowes
Date: May 10, 2019
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from pyswmm import Simulation, Nodes, Links
from smart_stormwater_rl.RL_DDPG.actor_critic import Actor, Critic
from smart_stormwater_rl.replay_memory import ReplayMemoryAgent, random_indx, create_minibatch
from smart_stormwater_rl.reward_functions import reward_function2 as reward_function # specify reward function to use
from smart_stormwater_rl.pyswmm_utils import OrnsteinUhlenbeckProcess, save_state, save_action, gen_noise
num_episodes = 10000 # number of times to repeat simulation
rewards_episode_tracker = [] # init rewards of each episode
flood_episode_tracker = [] # init flooding of each episode
swmm_inp = "C:/Users/Ben Bowes/PycharmProjects/smart_stormwater_rl/swmm_input_files/simple_2_ctl_smt.inp"
save_model_name = "saved_model_" # init model name
actor_dir = "smart_stormwater_rl/RL_DDPG/saved_models_actor"
critic_dir = "smart_stormwater_rl/RL_DDPG/saved_models_critic"
reward_dir = "smart_stormwater_rl/RL_DDPG/saved_model_rewards"
out_dir = "smart_stormwater_rl/RL_DDPG/saved_swmm_output"
rwd = "rwd2" # name of reward function for labeling plots/data
gamma = 0.99
lr = 0.00005
tau = 0.001
batch_size = 100
# Initialize input states TODO dynamically read from swmm input file
temp_height = np.zeros(2, dtype='int32') # St1.depth, St2.depth
temp_valve = np.zeros(2, dtype='int32') # R1.current_setting, R2.current_setting
# input_states = np.append(temp_height, temp_valve)
input_states = temp_height
# Allocate actions and set range
action_space = 2 # number of structures to control
act_range = np.asarray([0., 1.])
# Initialize Actor, returns two networks: Actor and target
actor = Actor(input_states.shape, action_space, act_range, 0.1 * lr, tau)
# Initialize Critic, returns two networks: Critic and target
critic = Critic(input_states.shape, action_space, lr, tau)
# Replay Memory
replay = ReplayMemoryAgent(len(input_states), action_space, 1000000)
# init lists to store values for plotting
St1_depth = []
St2_depth = []
J3_depth = []
St1_flooding = []
St2_flooding = []
J3_flooding = []
R1_position = []
R2_position = []
episode = 0
t_epsi = 0
while episode < num_episodes: # loop through all episodes
if episode % 100 == 0:
print("episode: ", episode)
print("t_epsi", t_epsi)
# load model weights if not first episode
if episode != 0:
actor.load_weights(os.path.join(actor_dir, save_model_name + "_actor.h5"))
critic.load_weights(os.path.join(critic_dir, save_model_name + "_critic.h5"))
St1_depth_episode = []
St2_depth_episode = []
J3_depth_episode = []
St1_flooding_episode = []
St2_flooding_episode = []
J3_flooding_episode = []
R1_position_episode = []
R2_position_episode = []
episode += 1
# initialize simulation
sim = Simulation(swmm_inp) # read input file
control_time_step = 900 # control time step in seconds
sim.step_advance(control_time_step) # set control time step
node_object = Nodes(sim) # init node object
St1 = node_object["St1"]
St2 = node_object["St2"]
J3 = node_object["J3"]
node_list = [St1, St2, J3]
# Change pond depth if desired
# St1.full_depth = 4
# St2.full_depth = 4
link_object = Links(sim) # init link object
R1 = link_object["R1"]
R2 = link_object["R2"]
valve_list = [R1, R2]
# Simulation Tracker
reward_sim = []
flood_sim = []
# temp_height = np.asarray([St1.initial_depth, St2.initial_depth])
# temp_flood = np.asarray([0, 0, 0])
# Initialize Noise Process
noise = OrnsteinUhlenbeckProcess(size=action_space)
step_count = 0
# for step in sim:
sim.start()
sim_len = sim.end_time - sim.start_time
num_steps = int(sim_len.total_seconds()/control_time_step)
while step_count <= num_steps - 1: # loop through all steps in the simulation
# print("step_count: ", step_count)
# print("current sim time: ", sim.current_time)
t_epsi += 1
step_count += 1
if step_count >= num_steps:
break
else:
# initialize valve settings
if sim.current_time == sim.start_time:
R1.target_setting = 0.5
R2.target_setting = 0.5
# construct current system states as inputs
temp_height = np.asarray([St1.depth, St2.depth])
temp_flood = np.asarray([St1.flooding, St2.flooding, J3.flooding])
temp_valve = np.asarray([R1.current_setting, R2.current_setting])
node_states = np.append(temp_height, temp_flood)
# input_states = np.append(node_states, temp_valve).reshape(1, len(node_states) + len(temp_valve))
input_states = temp_height
# print(input_states)
# print("valve: ", temp_valve)
# record values
St1_depth_episode.append(St1.depth)
St2_depth_episode.append(St2.depth)
J3_depth_episode.append(J3.depth)
St1_flooding_episode.append(St1.flooding)
St2_flooding_episode.append(St2.flooding)
J3_flooding_episode.append(J3.flooding)
R1_position_episode.append(R1.current_setting)
R2_position_episode.append(R2.current_setting)
# Select action according to the current policy (Actor weights) and exploration noise
# (sigmoid in last layer of actor should give range of actions from 0 to 1)
action = actor.predict(input_states) # one action for each controllable structure
# print(action)
# add exploration noise and make sure actions are within range
# action = np.clip(action + gen_noise(episode, action_space), act_range[0], act_range[1])
# noise_t = np.array([noise.generate(t_epsi), noise.generate(t_epsi)], 'float32').transpose()
noise_t = noise.generate(1)
expo = np.exp(-episode*3/num_episodes)
action = np.clip(action + noise_t * expo, act_range[0], act_range[1])
# action = np.clip(action + (np.random.random(1) / episode), act_range[0], act_range[1]) # try random exploration
# if np.random.random(1) < 0.1:
# action = np.array(np.random.random(2), 'float32', ndmin=2)
# action = np.clip(action, act_range[0], act_range[1])
# Implement Action
for i, j in enumerate(valve_list):
j.target_setting = action[0][i]
# R1.target_setting = action[0][0]
# R2.target_setting = action[1][1]
# Execute selected actions
sim.__next__() # advances swmm model by one step
# print("step_count: ", step_count)
# print("current sim time: ", sim.current_time)
| temp_new_flood = np.asarray([St1.flooding, St2.flooding, J3.flooding])
temp_new_valve = np.asarray([R1.current_setting, R2.current_setting])
node_new_states = np.append(temp_new_height, temp_new_flood)
# input_new_states = np.append(node_new_states, temp_new_valve).reshape(1, len(node_new_states) +
# len(temp_new_valve))
input_new_states = temp_new_height
# print("new state", input_new_states)
# print("new valve: ", temp_new_valve)
# Observe reward
reward = reward_function(temp_new_height, temp_new_flood)
reward_sim.append(reward)
# print("reward: ", reward)
# add to replay
replay.replay_memory_update(input_states, input_new_states, reward, action, False)
# Sample minibatch from memory
rnd_indx = random_indx(batch_size, replay.replay_memory['states'].data().shape[0])
minibatch = create_minibatch(rnd_indx, replay, batch_size, action_space)
batch_states = minibatch['states']
batch_new_states = minibatch['states_new']
batch_actions = minibatch['actions']
batch_rewards = minibatch['rewards']
batch_terminal = minibatch['terminal']
# Predict target q-values using target network (critic takes [state, action] as input)
q_values = critic.target_predict([batch_new_states, actor.target_predict(batch_new_states)])
# calculate critic targets using the Bellman equation
critic_target = np.asarray(q_values)
for i in range(q_values.shape[0]):
if batch_terminal[i]:
critic_target[i] = batch_rewards[i]
else:
critic_target[i] = batch_rewards[i] + gamma * q_values[i]
# Train both networks on sampled batch, update target networks
# Train critic
critic.train_on_batch(batch_states, batch_actions, critic_target)
# Q-Value Gradients under Current Policy
actions_for_grads = actor.model.predict(batch_states)
grads = critic.gradients(batch_states, actions_for_grads) # changed from batch_actions to actions_for_grads
# Train actor
actor.train(batch_states, actions_for_grads, np.array(grads).reshape((-1, action_space))) # changed from batch_actions to actions_for_grads
# Transfer weights to target networks at rate Tau
actor.transfer_weights()
critic.transfer_weights()
# close simulation at end of episode
sim.report()
sim.close()
# Store reward values
rewards_episode_tracker.append(np.mean(np.asarray(reward_sim)))
if episode != 1:
if rewards_episode_tracker[-1] >= max(rewards_episode_tracker[:-1]):
best_episode = episode
St1_depth = St1_depth_episode
St2_depth = St2_depth_episode
J3_depth = J3_depth_episode
St1_flooding = St1_flooding_episode
St2_flooding = St2_flooding_episode
J3_flooding = J3_flooding_episode
R1_position = R1_position_episode
R2_position = R2_position_episode
out_states = [St1_depth, St2_depth, J3_depth, St1_flooding, St2_flooding, J3_flooding]
out_actions = [R1_position, R2_position]
# save neural network models
save_model_name = "saved_model_" + str(episode)
actor.save(os.path.join(actor_dir, save_model_name))
critic.save(os.path.join(critic_dir, save_model_name))
np.save(os.path.join(reward_dir, save_model_name + "_rewards"), rewards_episode_tracker) # save all mean rewards
save_state_name = "DDPG_" + str(num_episodes) + "_states_" + rwd
save_action_name = "DDPG_" + str(num_episodes) + "_actions_" + rwd
save_state(out_states, os.path.join(out_dir, save_state_name + "states.csv"))
save_action(out_actions, os.path.join(out_dir, save_action_name + "actions.csv"))
# plot results from last episode
plt.subplot(2, 2, 1)
plt.plot(St1_depth)
plt.ylim(0, 5)
plt.title('St1_depth')
plt.ylabel("ft")
plt.xlabel("time step")
plt.subplot(2, 2, 2)
plt.plot(St2_depth)
plt.ylim(0, 5)
plt.title('St2_depth')
plt.ylabel("ft")
plt.xlabel("time step")
plt.subplot(2, 2, 3)
plt.plot(J3_depth)
plt.ylim(0, 2)
plt.title('J3_depth')
plt.ylabel("ft")
plt.xlabel("time step")
# bar graph for total flooding
plt.subplot(2, 2, 4)
plt.bar([0, 1, 2], [sum(St1_flooding), sum(St2_flooding), sum(J3_flooding)], tick_label=["St1", "St2", "J3"])
plt.ylim(0)
plt.title('total_flooding')
plt.ylabel("10^3 cubic feet")
plt.tight_layout()
# plt.show()
plt.savefig("smart_stormwater_rl/RL_DDPG/plots/ddpg_model_results_" + str(best_episode) + rwd + ".png", dpi=300)
plt.close()
# plot rewards and actions
plt.subplot(2, 1, 1)
plt.plot(rewards_episode_tracker)
plt.ylabel("average reward")
plt.xlabel("episode")
plt.subplot(2, 1, 2)
plt.plot(R1_position)
plt.plot(R2_position, linestyle='--')
plt.ylim(0, 1)
plt.ylabel("orifice position")
plt.xlabel("time step")
plt.tight_layout()
plt.savefig("smart_stormwater_rl/RL_DDPG/plots/ddpg_model_rewards_" + str(num_episodes) + rwd + "epi" +
str(best_episode) + ".png", dpi=300)
plt.close() | # Observe next state
temp_new_height = np.asarray([St1.depth, St2.depth])
| random_line_split |
run_swmm_DDPG.py | """
This script runs Deep Q-Network RL algorithm for control
of stormwater systems using a SWMM model as the environment
Author: Benjamin Bowes
Date: May 10, 2019
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from pyswmm import Simulation, Nodes, Links
from smart_stormwater_rl.RL_DDPG.actor_critic import Actor, Critic
from smart_stormwater_rl.replay_memory import ReplayMemoryAgent, random_indx, create_minibatch
from smart_stormwater_rl.reward_functions import reward_function2 as reward_function # specify reward function to use
from smart_stormwater_rl.pyswmm_utils import OrnsteinUhlenbeckProcess, save_state, save_action, gen_noise
num_episodes = 10000 # number of times to repeat simulation
rewards_episode_tracker = [] # init rewards of each episode
flood_episode_tracker = [] # init flooding of each episode
swmm_inp = "C:/Users/Ben Bowes/PycharmProjects/smart_stormwater_rl/swmm_input_files/simple_2_ctl_smt.inp"
save_model_name = "saved_model_" # init model name
actor_dir = "smart_stormwater_rl/RL_DDPG/saved_models_actor"
critic_dir = "smart_stormwater_rl/RL_DDPG/saved_models_critic"
reward_dir = "smart_stormwater_rl/RL_DDPG/saved_model_rewards"
out_dir = "smart_stormwater_rl/RL_DDPG/saved_swmm_output"
rwd = "rwd2" # name of reward function for labeling plots/data
gamma = 0.99
lr = 0.00005
tau = 0.001
batch_size = 100
# Initialize input states TODO dynamically read from swmm input file
temp_height = np.zeros(2, dtype='int32') # St1.depth, St2.depth
temp_valve = np.zeros(2, dtype='int32') # R1.current_setting, R2.current_setting
# input_states = np.append(temp_height, temp_valve)
input_states = temp_height
# Allocate actions and set range
action_space = 2 # number of structures to control
act_range = np.asarray([0., 1.])
# Initialize Actor, returns two networks: Actor and target
actor = Actor(input_states.shape, action_space, act_range, 0.1 * lr, tau)
# Initialize Critic, returns two networks: Critic and target
critic = Critic(input_states.shape, action_space, lr, tau)
# Replay Memory
replay = ReplayMemoryAgent(len(input_states), action_space, 1000000)
# init lists to store values for plotting
St1_depth = []
St2_depth = []
J3_depth = []
St1_flooding = []
St2_flooding = []
J3_flooding = []
R1_position = []
R2_position = []
episode = 0
t_epsi = 0
while episode < num_episodes: # loop through all episodes
if episode % 100 == 0:
print("episode: ", episode)
print("t_epsi", t_epsi)
# load model weights if not first episode
if episode != 0:
|
St1_depth_episode = []
St2_depth_episode = []
J3_depth_episode = []
St1_flooding_episode = []
St2_flooding_episode = []
J3_flooding_episode = []
R1_position_episode = []
R2_position_episode = []
episode += 1
# initialize simulation
sim = Simulation(swmm_inp) # read input file
control_time_step = 900 # control time step in seconds
sim.step_advance(control_time_step) # set control time step
node_object = Nodes(sim) # init node object
St1 = node_object["St1"]
St2 = node_object["St2"]
J3 = node_object["J3"]
node_list = [St1, St2, J3]
# Change pond depth if desired
# St1.full_depth = 4
# St2.full_depth = 4
link_object = Links(sim) # init link object
R1 = link_object["R1"]
R2 = link_object["R2"]
valve_list = [R1, R2]
# Simulation Tracker
reward_sim = []
flood_sim = []
# temp_height = np.asarray([St1.initial_depth, St2.initial_depth])
# temp_flood = np.asarray([0, 0, 0])
# Initialize Noise Process
noise = OrnsteinUhlenbeckProcess(size=action_space)
step_count = 0
# for step in sim:
sim.start()
sim_len = sim.end_time - sim.start_time
num_steps = int(sim_len.total_seconds()/control_time_step)
while step_count <= num_steps - 1: # loop through all steps in the simulation
# print("step_count: ", step_count)
# print("current sim time: ", sim.current_time)
t_epsi += 1
step_count += 1
if step_count >= num_steps:
break
else:
# initialize valve settings
if sim.current_time == sim.start_time:
R1.target_setting = 0.5
R2.target_setting = 0.5
# construct current system states as inputs
temp_height = np.asarray([St1.depth, St2.depth])
temp_flood = np.asarray([St1.flooding, St2.flooding, J3.flooding])
temp_valve = np.asarray([R1.current_setting, R2.current_setting])
node_states = np.append(temp_height, temp_flood)
# input_states = np.append(node_states, temp_valve).reshape(1, len(node_states) + len(temp_valve))
input_states = temp_height
# print(input_states)
# print("valve: ", temp_valve)
# record values
St1_depth_episode.append(St1.depth)
St2_depth_episode.append(St2.depth)
J3_depth_episode.append(J3.depth)
St1_flooding_episode.append(St1.flooding)
St2_flooding_episode.append(St2.flooding)
J3_flooding_episode.append(J3.flooding)
R1_position_episode.append(R1.current_setting)
R2_position_episode.append(R2.current_setting)
# Select action according to the current policy (Actor weights) and exploration noise
# (sigmoid in last layer of actor should give range of actions from 0 to 1)
action = actor.predict(input_states) # one action for each controllable structure
# print(action)
# add exploration noise and make sure actions are within range
# action = np.clip(action + gen_noise(episode, action_space), act_range[0], act_range[1])
# noise_t = np.array([noise.generate(t_epsi), noise.generate(t_epsi)], 'float32').transpose()
noise_t = noise.generate(1)
expo = np.exp(-episode*3/num_episodes)
action = np.clip(action + noise_t * expo, act_range[0], act_range[1])
# action = np.clip(action + (np.random.random(1) / episode), act_range[0], act_range[1]) # try random exploration
# if np.random.random(1) < 0.1:
# action = np.array(np.random.random(2), 'float32', ndmin=2)
# action = np.clip(action, act_range[0], act_range[1])
# Implement Action
for i, j in enumerate(valve_list):
j.target_setting = action[0][i]
# R1.target_setting = action[0][0]
# R2.target_setting = action[1][1]
# Execute selected actions
sim.__next__() # advances swmm model by one step
# print("step_count: ", step_count)
# print("current sim time: ", sim.current_time)
# Observe next state
temp_new_height = np.asarray([St1.depth, St2.depth])
temp_new_flood = np.asarray([St1.flooding, St2.flooding, J3.flooding])
temp_new_valve = np.asarray([R1.current_setting, R2.current_setting])
node_new_states = np.append(temp_new_height, temp_new_flood)
# input_new_states = np.append(node_new_states, temp_new_valve).reshape(1, len(node_new_states) +
# len(temp_new_valve))
input_new_states = temp_new_height
# print("new state", input_new_states)
# print("new valve: ", temp_new_valve)
# Observe reward
reward = reward_function(temp_new_height, temp_new_flood)
reward_sim.append(reward)
# print("reward: ", reward)
# add to replay
replay.replay_memory_update(input_states, input_new_states, reward, action, False)
# Sample minibatch from memory
rnd_indx = random_indx(batch_size, replay.replay_memory['states'].data().shape[0])
minibatch = create_minibatch(rnd_indx, replay, batch_size, action_space)
batch_states = minibatch['states']
batch_new_states = minibatch['states_new']
batch_actions = minibatch['actions']
batch_rewards = minibatch['rewards']
batch_terminal = minibatch['terminal']
# Predict target q-values using target network (critic takes [state, action] as input)
q_values = critic.target_predict([batch_new_states, actor.target_predict(batch_new_states)])
# calculate critic targets using the Bellman equation
critic_target = np.asarray(q_values)
for i in range(q_values.shape[0]):
if batch_terminal[i]:
critic_target[i] = batch_rewards[i]
else:
critic_target[i] = batch_rewards[i] + gamma * q_values[i]
# Train both networks on sampled batch, update target networks
# Train critic
critic.train_on_batch(batch_states, batch_actions, critic_target)
# Q-Value Gradients under Current Policy
actions_for_grads = actor.model.predict(batch_states)
grads = critic.gradients(batch_states, actions_for_grads) # changed from batch_actions to actions_for_grads
# Train actor
actor.train(batch_states, actions_for_grads, np.array(grads).reshape((-1, action_space))) # changed from batch_actions to actions_for_grads
# Transfer weights to target networks at rate Tau
actor.transfer_weights()
critic.transfer_weights()
# close simulation at end of episode
sim.report()
sim.close()
# Store reward values
rewards_episode_tracker.append(np.mean(np.asarray(reward_sim)))
if episode != 1:
if rewards_episode_tracker[-1] >= max(rewards_episode_tracker[:-1]):
best_episode = episode
St1_depth = St1_depth_episode
St2_depth = St2_depth_episode
J3_depth = J3_depth_episode
St1_flooding = St1_flooding_episode
St2_flooding = St2_flooding_episode
J3_flooding = J3_flooding_episode
R1_position = R1_position_episode
R2_position = R2_position_episode
out_states = [St1_depth, St2_depth, J3_depth, St1_flooding, St2_flooding, J3_flooding]
out_actions = [R1_position, R2_position]
# save neural network models
save_model_name = "saved_model_" + str(episode)
actor.save(os.path.join(actor_dir, save_model_name))
critic.save(os.path.join(critic_dir, save_model_name))
np.save(os.path.join(reward_dir, save_model_name + "_rewards"), rewards_episode_tracker) # save all mean rewards
save_state_name = "DDPG_" + str(num_episodes) + "_states_" + rwd
save_action_name = "DDPG_" + str(num_episodes) + "_actions_" + rwd
save_state(out_states, os.path.join(out_dir, save_state_name + "states.csv"))
save_action(out_actions, os.path.join(out_dir, save_action_name + "actions.csv"))
# plot results from last episode
plt.subplot(2, 2, 1)
plt.plot(St1_depth)
plt.ylim(0, 5)
plt.title('St1_depth')
plt.ylabel("ft")
plt.xlabel("time step")
plt.subplot(2, 2, 2)
plt.plot(St2_depth)
plt.ylim(0, 5)
plt.title('St2_depth')
plt.ylabel("ft")
plt.xlabel("time step")
plt.subplot(2, 2, 3)
plt.plot(J3_depth)
plt.ylim(0, 2)
plt.title('J3_depth')
plt.ylabel("ft")
plt.xlabel("time step")
# bar graph for total flooding
plt.subplot(2, 2, 4)
plt.bar([0, 1, 2], [sum(St1_flooding), sum(St2_flooding), sum(J3_flooding)], tick_label=["St1", "St2", "J3"])
plt.ylim(0)
plt.title('total_flooding')
plt.ylabel("10^3 cubic feet")
plt.tight_layout()
# plt.show()
plt.savefig("smart_stormwater_rl/RL_DDPG/plots/ddpg_model_results_" + str(best_episode) + rwd + ".png", dpi=300)
plt.close()
# plot rewards and actions
plt.subplot(2, 1, 1)
plt.plot(rewards_episode_tracker)
plt.ylabel("average reward")
plt.xlabel("episode")
plt.subplot(2, 1, 2)
plt.plot(R1_position)
plt.plot(R2_position, linestyle='--')
plt.ylim(0, 1)
plt.ylabel("orifice position")
plt.xlabel("time step")
plt.tight_layout()
plt.savefig("smart_stormwater_rl/RL_DDPG/plots/ddpg_model_rewards_" + str(num_episodes) + rwd + "epi" +
str(best_episode) + ".png", dpi=300)
plt.close()
| actor.load_weights(os.path.join(actor_dir, save_model_name + "_actor.h5"))
critic.load_weights(os.path.join(critic_dir, save_model_name + "_critic.h5")) | conditional_block |
export.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package api defines the structures for the infection publishing API.
package api
import (
"context"
"fmt"
"net/http"
"strconv"
"time"
"github.com/googlepartners/exposure-notifications/internal/database"
"github.com/googlepartners/exposure-notifications/internal/logging"
"github.com/googlepartners/exposure-notifications/internal/model"
"github.com/googlepartners/exposure-notifications/internal/storage"
)
const (
batchIDParam = "batch-id"
)
func NewBatchServer(db *database.DB, bsc BatchServerConfig) *BatchServer {
return &BatchServer{
db: db,
bsc: bsc,
}
}
// BatchServer hosts end points to manage export batches.
type BatchServer struct {
db *database.DB
bsc BatchServerConfig
}
type BatchServerConfig struct {
CreateTimeout time.Duration
TmpBucket string
Bucket string
MaxRecords int
}
// CreateBatchesHandler is a handler to iterate the rows of ExportConfig and
// create entries in ExportBatchJob as appropriate.
func (s *BatchServer) CreateBatchesHandler(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), s.bsc.CreateTimeout)
defer cancel()
logger := logging.FromContext(ctx)
// Obtain lock to make sure there are no other processes working to create batches.
lock := "create_batches"
unlockFn, err := s.db.Lock(ctx, lock, s.bsc.CreateTimeout) // TODO(jasonco): double this?
if err != nil {
if err == database.ErrAlreadyLocked {
msg := fmt.Sprintf("Lock %s already in use. No work will be performed.", lock)
logger.Infof(msg)
w.Write([]byte(msg)) // We return status 200 here so that Cloud Scheduler does not retry.
return
}
logger.Errorf("Could not acquire lock %s: %v", lock, err)
http.Error(w, fmt.Sprintf("Could not acquire lock %s, check logs.", lock), http.StatusInternalServerError)
return
}
defer unlockFn()
now := time.Now().UTC()
it, err := s.db.IterateExportConfigs(ctx, now)
if err != nil {
logger.Errorf("Failed to get export config iterator: %v", err)
http.Error(w, "Failed to get export config iterator, check logs.", http.StatusInternalServerError)
return
}
defer it.Close()
done := false
for !done {
select {
case <-ctx.Done():
if err := ctx.Err(); err != context.DeadlineExceeded && err != context.Canceled { // May be context.Canceled due to test code.
logger.Errorf("Context error: %v", err)
return
}
logger.Infof("Timed out before iterating batches. Will pick up on next invocation.")
return
default:
// Fallthrough to process a record.
}
var ec *model.ExportConfig
var err error
ec, done, err = it.Next()
if err != nil {
logger.Errorf("Failed to iterate export config: %v", err)
http.Error(w, "Failed to iterate export config, check logs.", http.StatusInternalServerError)
return
}
if done {
return
}
if ec == nil {
continue
}
if err := s.maybeCreateBatches(ctx, ec, now); err != nil {
logger.Errorf("Failed to create batches for config %d: %v. Continuing", ec.ConfigID, err)
}
}
}
func (s *BatchServer) maybeCreateBatches(ctx context.Context, ec *model.ExportConfig, now time.Time) error {
logger := logging.FromContext(ctx)
latestEnd, err := s.db.LatestExportBatchEnd(ctx, ec)
if err != nil {
return fmt.Errorf("fetching most recent batch for config %d: %v", ec.ConfigID, err)
}
ranges := makeBatchRanges(ec.Period, latestEnd, now)
if len(ranges) == 0 {
logger.Debugf("Batch creation for config %d is not required. Skipping.", ec.ConfigID)
return nil
}
var batches []*model.ExportBatch
for _, br := range ranges {
batches = append(batches, &model.ExportBatch{
ConfigID: ec.ConfigID,
FilenameRoot: ec.FilenameRoot,
StartTimestamp: br.start,
EndTimestamp: br.end,
IncludeRegions: ec.IncludeRegions,
ExcludeRegions: ec.ExcludeRegions,
Status: model.ExportBatchOpen,
})
}
if err := s.db.AddExportBatches(ctx, batches); err != nil {
return fmt.Errorf("creating export batches for config %d: %v", ec.ConfigID, err)
}
logger.Infof("Created %d batch(es) for config %d.", len(batches), ec.ConfigID)
return nil
}
type batchRange struct {
start, end time.Time
}
var sanityDate = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
func makeBatchRanges(period time.Duration, latestEnd, now time.Time) []batchRange {
// Truncate now to align with period; use this as the end date.
end := now.Truncate(period)
// If the end date < latest end date, we already have a batch that covers this period, so return no batches.
if end.Before(latestEnd) {
return nil
}
// Subtract period to get the start date.
start := end.Add(-period)
// Special case: if there have not been batches before, return only a single one.
// We use sanityDate here because the loop below will happily create batch ranges
// until the beginning of time otherwise.
if latestEnd.Before(sanityDate) {
return []batchRange{{start: start, end: end}}
}
// Build up a list of batches until we reach that latestEnd.
// Allow for overlap so we don't miss keys; this might happen in the event that
// an ExportConfig was edited and the new settings don't quite align.
ranges := []batchRange{}
for end.After(latestEnd) {
ranges = append([]batchRange{{start: start, end: end}}, ranges...)
start = start.Add(-period)
end = end.Add(-period)
}
return ranges
}
// CreateFilesHandler is a handler to iterate the rows of ExportBatch, and creates GCS files
func (s *BatchServer) CreateFilesHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
logger := logging.FromContext(ctx)
// Poll for a batch and obtain a lease for it
ttl := 15 * time.Minute // TODO(jasonco): take from args?
batch, err := s.db.LeaseBatch(ctx, ttl, time.Now().UTC())
if err != nil {
logger.Errorf("Failed to lease batch: %v", err)
http.Error(w, "Failed to lease batch, check logs.", http.StatusInternalServerError)
return
}
if batch == nil {
logger.Debugf("No work to do.")
return
}
ctx, cancel := context.WithDeadline(context.Background(), batch.LeaseExpires)
defer cancel()
// Create file(s)
if err = s.createExportFilesForBatch(ctx, *batch); err != nil {
logger.Errorf("Failed to create files for batch: %v", err)
http.Error(w, "Failed to create files for batch, check logs.", http.StatusInternalServerError)
return
}
fmt.Fprintf(w, "Batch %d marked completed", batch.BatchID)
}
func (s *BatchServer) createExportFilesForBatch(ctx context.Context, eb model.ExportBatch) error {
logger := logging.FromContext(ctx)
logger.Infof("Creating files for export config %v, batchID %v", eb.ConfigID, eb.BatchID)
logger.Infof("MaxRecords %v, since %v, until %v", s.bsc.MaxRecords, eb.StartTimestamp, eb.EndTimestamp)
logger.Infof("Included regions %v, ExcludedRegions %v ", eb.IncludeRegions, eb.ExcludeRegions)
logger.Infof("FilenameRoot %v ", eb.FilenameRoot)
var (
done = false
batchCount = 0
recordCount = 1
exposureKeys []*model.Infection
files []string
criteria = database.IterateInfectionsCriteria{
SinceTimestamp: eb.StartTimestamp,
UntilTimestamp: eb.EndTimestamp,
IncludeRegions: eb.IncludeRegions,
ExcludeRegions: eb.ExcludeRegions,
OnlyLocalProvenance: false, // include federated ids
}
)
it, err := s.db.IterateInfections(ctx, criteria)
if err != nil {
return fmt.Errorf("iterating infections: %v", err)
}
defer it.Close()
exp, done, err := it.Next()
// TODO(lmohanan): Watch for context deadline
for !done && err == nil {
if exp != nil {
exposureKeys = append(exposureKeys, exp)
recordCount++
}
if recordCount == s.bsc.MaxRecords {
objectName := fmt.Sprintf(eb.FilenameRoot+"%s-%d", eb.StartTimestamp.Unix(), batchCount)
if err = s.createFile(ctx, objectName, exposureKeys, eb, batchCount); err != nil |
// Append to files list
files = append(files, objectName)
batchCount++
recordCount = 1
}
exp, done, err = it.Next()
}
if err != nil {
return fmt.Errorf("iterating infections: %v", err)
}
// Create a file for the remaining keys
objectName := fmt.Sprintf(eb.FilenameRoot+"%s-%d", eb.StartTimestamp.Unix(), batchCount)
if err = s.createFile(ctx, objectName, exposureKeys, eb, batchCount); err != nil {
return err
}
// Append to files list
files = append(files, objectName)
batchCount++
// Update ExportFile for the files created: set batchSize and update status .
// TODO(lmohanan): Figure out batchCount ahead of time and do this immediately after writing to GCS
// for better failure protection.
// TODO(lmohanan): Perform UpdateExportFile and CompleteBatch as a transaction.
for _, file := range files {
s.db.UpdateExportFile(ctx, file, model.ExportBatchComplete, batchCount)
}
// Update ExportFile for the batch to mark it complete.
if err := s.db.CompleteBatch(ctx, eb.BatchID); err != nil {
return fmt.Errorf("marking batch %v complete: %v", eb.BatchID, err)
}
return nil
}
func (s *BatchServer) createFile(ctx context.Context, objectName string, exposureKeys []*model.Infection, eb model.ExportBatch, batchCount int) error {
// Add ExportFile entry with Status Pending
ef := model.ExportFile{
Filename: objectName,
BatchID: eb.BatchID,
Region: "", // TODO(lmohanan) figure out where region comes from.
BatchNum: batchCount,
Status: model.ExportBatchPending,
}
// TODO(lmohanan) Handle partial failure: If redoing this batch after a failure,
// these inserts can fail due to duplicate filename.
if err := s.db.AddExportFile(ctx, &ef); err != nil {
return fmt.Errorf("adding export file entry: %v", err)
}
// Format keys
data, err := MarshalExportFile(eb.StartTimestamp, eb.EndTimestamp, exposureKeys, "US")
if err != nil {
return fmt.Errorf("marshalling export file: %v", err)
}
// Write to GCS
err = storage.CreateObject(ctx, s.bsc.Bucket, objectName, data)
if err != nil {
return fmt.Errorf("creating file: %v", err)
}
return nil
}
func NewTestExportHandler(db *database.DB) http.Handler {
return &testExportHandler{db: db}
}
type testExportHandler struct {
db *database.DB
}
func (h *testExportHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
logger := logging.FromContext(ctx)
limit := 30000
limits, ok := r.URL.Query()["limit"]
if ok && len(limits) > 0 {
lim, err := strconv.Atoi(limits[0])
if err == nil {
limit = lim
}
}
logger.Infof("limiting to %v", limit)
since := time.Now().UTC().AddDate(0, 0, -5)
until := time.Now().UTC()
exposureKeys, err := h.queryExposureKeys(ctx, since, until, limit)
if err != nil {
logger.Errorf("error getting infections: %v", err)
http.Error(w, "internal processing error", http.StatusInternalServerError)
}
data, err := MarshalExportFile(since, until, exposureKeys, "US")
if err != nil {
logger.Errorf("error marshalling export file: %v", err)
http.Error(w, "internal processing error", http.StatusInternalServerError)
}
objectName := fmt.Sprintf("testExport-%d-records.pb", limit)
if err := storage.CreateObject(ctx, "apollo-public-bucket", objectName, data); err != nil {
logger.Errorf("error creating cloud storage object: %v", err)
http.Error(w, "internal processing error", http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}
func (h *testExportHandler) queryExposureKeys(ctx context.Context, since, until time.Time, limit int) ([]*model.Infection, error) {
criteria := database.IterateInfectionsCriteria{
SinceTimestamp: since,
UntilTimestamp: until,
OnlyLocalProvenance: false, // include federated ids
}
it, err := h.db.IterateInfections(ctx, criteria)
if err != nil {
return nil, err
}
defer it.Close()
var exposureKeys []*model.Infection
num := 1
exp, done, err := it.Next()
for !done && err == nil && num <= limit {
if exp != nil {
exposureKeys = append(exposureKeys, exp)
num++
}
exp, done, err = it.Next()
}
if err != nil {
return nil, err
}
return exposureKeys, nil
}
| {
return err
} | conditional_block |
export.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package api defines the structures for the infection publishing API.
package api
import (
"context"
"fmt"
"net/http"
"strconv"
"time"
"github.com/googlepartners/exposure-notifications/internal/database"
"github.com/googlepartners/exposure-notifications/internal/logging"
"github.com/googlepartners/exposure-notifications/internal/model"
"github.com/googlepartners/exposure-notifications/internal/storage"
)
const (
batchIDParam = "batch-id"
)
func NewBatchServer(db *database.DB, bsc BatchServerConfig) *BatchServer {
return &BatchServer{
db: db,
bsc: bsc,
}
}
// BatchServer hosts end points to manage export batches.
type BatchServer struct {
db *database.DB
bsc BatchServerConfig
}
type BatchServerConfig struct {
CreateTimeout time.Duration
TmpBucket string
Bucket string
MaxRecords int
}
// CreateBatchesHandler is a handler to iterate the rows of ExportConfig and
// create entries in ExportBatchJob as appropriate.
func (s *BatchServer) CreateBatchesHandler(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), s.bsc.CreateTimeout)
defer cancel()
logger := logging.FromContext(ctx)
// Obtain lock to make sure there are no other processes working to create batches.
lock := "create_batches"
unlockFn, err := s.db.Lock(ctx, lock, s.bsc.CreateTimeout) // TODO(jasonco): double this?
if err != nil {
if err == database.ErrAlreadyLocked {
msg := fmt.Sprintf("Lock %s already in use. No work will be performed.", lock)
logger.Infof(msg)
w.Write([]byte(msg)) // We return status 200 here so that Cloud Scheduler does not retry.
return
}
logger.Errorf("Could not acquire lock %s: %v", lock, err)
http.Error(w, fmt.Sprintf("Could not acquire lock %s, check logs.", lock), http.StatusInternalServerError)
return
}
defer unlockFn()
now := time.Now().UTC()
it, err := s.db.IterateExportConfigs(ctx, now)
if err != nil {
logger.Errorf("Failed to get export config iterator: %v", err)
http.Error(w, "Failed to get export config iterator, check logs.", http.StatusInternalServerError)
return
}
defer it.Close()
done := false
for !done {
select {
case <-ctx.Done():
if err := ctx.Err(); err != context.DeadlineExceeded && err != context.Canceled { // May be context.Canceled due to test code.
logger.Errorf("Context error: %v", err)
return
}
logger.Infof("Timed out before iterating batches. Will pick up on next invocation.")
return
default:
// Fallthrough to process a record.
}
var ec *model.ExportConfig
var err error
ec, done, err = it.Next()
if err != nil {
logger.Errorf("Failed to iterate export config: %v", err)
http.Error(w, "Failed to iterate export config, check logs.", http.StatusInternalServerError)
return
}
if done {
return
}
if ec == nil {
continue
}
if err := s.maybeCreateBatches(ctx, ec, now); err != nil {
logger.Errorf("Failed to create batches for config %d: %v. Continuing", ec.ConfigID, err)
}
}
}
func (s *BatchServer) maybeCreateBatches(ctx context.Context, ec *model.ExportConfig, now time.Time) error {
logger := logging.FromContext(ctx)
latestEnd, err := s.db.LatestExportBatchEnd(ctx, ec)
if err != nil {
return fmt.Errorf("fetching most recent batch for config %d: %v", ec.ConfigID, err)
}
ranges := makeBatchRanges(ec.Period, latestEnd, now)
if len(ranges) == 0 {
logger.Debugf("Batch creation for config %d is not required. Skipping.", ec.ConfigID)
return nil
}
var batches []*model.ExportBatch
for _, br := range ranges {
batches = append(batches, &model.ExportBatch{
ConfigID: ec.ConfigID,
FilenameRoot: ec.FilenameRoot,
StartTimestamp: br.start,
EndTimestamp: br.end,
IncludeRegions: ec.IncludeRegions,
ExcludeRegions: ec.ExcludeRegions,
Status: model.ExportBatchOpen,
})
}
if err := s.db.AddExportBatches(ctx, batches); err != nil {
return fmt.Errorf("creating export batches for config %d: %v", ec.ConfigID, err)
}
logger.Infof("Created %d batch(es) for config %d.", len(batches), ec.ConfigID)
return nil
}
type batchRange struct {
start, end time.Time
}
var sanityDate = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
func makeBatchRanges(period time.Duration, latestEnd, now time.Time) []batchRange {
// Truncate now to align with period; use this as the end date.
end := now.Truncate(period)
// If the end date < latest end date, we already have a batch that covers this period, so return no batches.
if end.Before(latestEnd) {
return nil
}
// Subtract period to get the start date.
start := end.Add(-period)
// Special case: if there have not been batches before, return only a single one.
// We use sanityDate here because the loop below will happily create batch ranges
// until the beginning of time otherwise.
if latestEnd.Before(sanityDate) {
return []batchRange{{start: start, end: end}}
}
// Build up a list of batches until we reach that latestEnd.
// Allow for overlap so we don't miss keys; this might happen in the event that
// an ExportConfig was edited and the new settings don't quite align.
ranges := []batchRange{}
for end.After(latestEnd) {
ranges = append([]batchRange{{start: start, end: end}}, ranges...)
start = start.Add(-period)
end = end.Add(-period)
}
return ranges
}
// CreateFilesHandler is a handler to iterate the rows of ExportBatch, and creates GCS files
func (s *BatchServer) CreateFilesHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
logger := logging.FromContext(ctx)
// Poll for a batch and obtain a lease for it
ttl := 15 * time.Minute // TODO(jasonco): take from args?
batch, err := s.db.LeaseBatch(ctx, ttl, time.Now().UTC())
if err != nil {
logger.Errorf("Failed to lease batch: %v", err)
http.Error(w, "Failed to lease batch, check logs.", http.StatusInternalServerError)
return
}
if batch == nil {
logger.Debugf("No work to do.")
return
}
ctx, cancel := context.WithDeadline(context.Background(), batch.LeaseExpires)
defer cancel()
// Create file(s)
if err = s.createExportFilesForBatch(ctx, *batch); err != nil {
logger.Errorf("Failed to create files for batch: %v", err)
http.Error(w, "Failed to create files for batch, check logs.", http.StatusInternalServerError)
return
}
fmt.Fprintf(w, "Batch %d marked completed", batch.BatchID)
}
func (s *BatchServer) createExportFilesForBatch(ctx context.Context, eb model.ExportBatch) error {
logger := logging.FromContext(ctx)
logger.Infof("Creating files for export config %v, batchID %v", eb.ConfigID, eb.BatchID)
logger.Infof("MaxRecords %v, since %v, until %v", s.bsc.MaxRecords, eb.StartTimestamp, eb.EndTimestamp)
logger.Infof("Included regions %v, ExcludedRegions %v ", eb.IncludeRegions, eb.ExcludeRegions)
logger.Infof("FilenameRoot %v ", eb.FilenameRoot)
var (
done = false
batchCount = 0
recordCount = 1
exposureKeys []*model.Infection
files []string
criteria = database.IterateInfectionsCriteria{
SinceTimestamp: eb.StartTimestamp, | OnlyLocalProvenance: false, // include federated ids
}
)
it, err := s.db.IterateInfections(ctx, criteria)
if err != nil {
return fmt.Errorf("iterating infections: %v", err)
}
defer it.Close()
exp, done, err := it.Next()
// TODO(lmohanan): Watch for context deadline
for !done && err == nil {
if exp != nil {
exposureKeys = append(exposureKeys, exp)
recordCount++
}
if recordCount == s.bsc.MaxRecords {
objectName := fmt.Sprintf(eb.FilenameRoot+"%s-%d", eb.StartTimestamp.Unix(), batchCount)
if err = s.createFile(ctx, objectName, exposureKeys, eb, batchCount); err != nil {
return err
}
// Append to files list
files = append(files, objectName)
batchCount++
recordCount = 1
}
exp, done, err = it.Next()
}
if err != nil {
return fmt.Errorf("iterating infections: %v", err)
}
// Create a file for the remaining keys
objectName := fmt.Sprintf(eb.FilenameRoot+"%s-%d", eb.StartTimestamp.Unix(), batchCount)
if err = s.createFile(ctx, objectName, exposureKeys, eb, batchCount); err != nil {
return err
}
// Append to files list
files = append(files, objectName)
batchCount++
// Update ExportFile for the files created: set batchSize and update status .
// TODO(lmohanan): Figure out batchCount ahead of time and do this immediately after writing to GCS
// for better failure protection.
// TODO(lmohanan): Perform UpdateExportFile and CompleteBatch as a transaction.
for _, file := range files {
s.db.UpdateExportFile(ctx, file, model.ExportBatchComplete, batchCount)
}
// Update ExportFile for the batch to mark it complete.
if err := s.db.CompleteBatch(ctx, eb.BatchID); err != nil {
return fmt.Errorf("marking batch %v complete: %v", eb.BatchID, err)
}
return nil
}
func (s *BatchServer) createFile(ctx context.Context, objectName string, exposureKeys []*model.Infection, eb model.ExportBatch, batchCount int) error {
// Add ExportFile entry with Status Pending
ef := model.ExportFile{
Filename: objectName,
BatchID: eb.BatchID,
Region: "", // TODO(lmohanan) figure out where region comes from.
BatchNum: batchCount,
Status: model.ExportBatchPending,
}
// TODO(lmohanan) Handle partial failure: If redoing this batch after a failure,
// these inserts can fail due to duplicate filename.
if err := s.db.AddExportFile(ctx, &ef); err != nil {
return fmt.Errorf("adding export file entry: %v", err)
}
// Format keys
data, err := MarshalExportFile(eb.StartTimestamp, eb.EndTimestamp, exposureKeys, "US")
if err != nil {
return fmt.Errorf("marshalling export file: %v", err)
}
// Write to GCS
err = storage.CreateObject(ctx, s.bsc.Bucket, objectName, data)
if err != nil {
return fmt.Errorf("creating file: %v", err)
}
return nil
}
func NewTestExportHandler(db *database.DB) http.Handler {
return &testExportHandler{db: db}
}
type testExportHandler struct {
db *database.DB
}
func (h *testExportHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
logger := logging.FromContext(ctx)
limit := 30000
limits, ok := r.URL.Query()["limit"]
if ok && len(limits) > 0 {
lim, err := strconv.Atoi(limits[0])
if err == nil {
limit = lim
}
}
logger.Infof("limiting to %v", limit)
since := time.Now().UTC().AddDate(0, 0, -5)
until := time.Now().UTC()
exposureKeys, err := h.queryExposureKeys(ctx, since, until, limit)
if err != nil {
logger.Errorf("error getting infections: %v", err)
http.Error(w, "internal processing error", http.StatusInternalServerError)
}
data, err := MarshalExportFile(since, until, exposureKeys, "US")
if err != nil {
logger.Errorf("error marshalling export file: %v", err)
http.Error(w, "internal processing error", http.StatusInternalServerError)
}
objectName := fmt.Sprintf("testExport-%d-records.pb", limit)
if err := storage.CreateObject(ctx, "apollo-public-bucket", objectName, data); err != nil {
logger.Errorf("error creating cloud storage object: %v", err)
http.Error(w, "internal processing error", http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}
func (h *testExportHandler) queryExposureKeys(ctx context.Context, since, until time.Time, limit int) ([]*model.Infection, error) {
criteria := database.IterateInfectionsCriteria{
SinceTimestamp: since,
UntilTimestamp: until,
OnlyLocalProvenance: false, // include federated ids
}
it, err := h.db.IterateInfections(ctx, criteria)
if err != nil {
return nil, err
}
defer it.Close()
var exposureKeys []*model.Infection
num := 1
exp, done, err := it.Next()
for !done && err == nil && num <= limit {
if exp != nil {
exposureKeys = append(exposureKeys, exp)
num++
}
exp, done, err = it.Next()
}
if err != nil {
return nil, err
}
return exposureKeys, nil
} | UntilTimestamp: eb.EndTimestamp,
IncludeRegions: eb.IncludeRegions,
ExcludeRegions: eb.ExcludeRegions, | random_line_split |
export.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package api defines the structures for the infection publishing API.
package api
import (
"context"
"fmt"
"net/http"
"strconv"
"time"
"github.com/googlepartners/exposure-notifications/internal/database"
"github.com/googlepartners/exposure-notifications/internal/logging"
"github.com/googlepartners/exposure-notifications/internal/model"
"github.com/googlepartners/exposure-notifications/internal/storage"
)
const (
batchIDParam = "batch-id"
)
func NewBatchServer(db *database.DB, bsc BatchServerConfig) *BatchServer {
return &BatchServer{
db: db,
bsc: bsc,
}
}
// BatchServer hosts end points to manage export batches.
type BatchServer struct {
db *database.DB
bsc BatchServerConfig
}
type BatchServerConfig struct {
CreateTimeout time.Duration
TmpBucket string
Bucket string
MaxRecords int
}
// CreateBatchesHandler is a handler to iterate the rows of ExportConfig and
// create entries in ExportBatchJob as appropriate.
func (s *BatchServer) CreateBatchesHandler(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), s.bsc.CreateTimeout)
defer cancel()
logger := logging.FromContext(ctx)
// Obtain lock to make sure there are no other processes working to create batches.
lock := "create_batches"
unlockFn, err := s.db.Lock(ctx, lock, s.bsc.CreateTimeout) // TODO(jasonco): double this?
if err != nil {
if err == database.ErrAlreadyLocked {
msg := fmt.Sprintf("Lock %s already in use. No work will be performed.", lock)
logger.Infof(msg)
w.Write([]byte(msg)) // We return status 200 here so that Cloud Scheduler does not retry.
return
}
logger.Errorf("Could not acquire lock %s: %v", lock, err)
http.Error(w, fmt.Sprintf("Could not acquire lock %s, check logs.", lock), http.StatusInternalServerError)
return
}
defer unlockFn()
now := time.Now().UTC()
it, err := s.db.IterateExportConfigs(ctx, now)
if err != nil {
logger.Errorf("Failed to get export config iterator: %v", err)
http.Error(w, "Failed to get export config iterator, check logs.", http.StatusInternalServerError)
return
}
defer it.Close()
done := false
for !done {
select {
case <-ctx.Done():
if err := ctx.Err(); err != context.DeadlineExceeded && err != context.Canceled { // May be context.Canceled due to test code.
logger.Errorf("Context error: %v", err)
return
}
logger.Infof("Timed out before iterating batches. Will pick up on next invocation.")
return
default:
// Fallthrough to process a record.
}
var ec *model.ExportConfig
var err error
ec, done, err = it.Next()
if err != nil {
logger.Errorf("Failed to iterate export config: %v", err)
http.Error(w, "Failed to iterate export config, check logs.", http.StatusInternalServerError)
return
}
if done {
return
}
if ec == nil {
continue
}
if err := s.maybeCreateBatches(ctx, ec, now); err != nil {
logger.Errorf("Failed to create batches for config %d: %v. Continuing", ec.ConfigID, err)
}
}
}
func (s *BatchServer) maybeCreateBatches(ctx context.Context, ec *model.ExportConfig, now time.Time) error {
logger := logging.FromContext(ctx)
latestEnd, err := s.db.LatestExportBatchEnd(ctx, ec)
if err != nil {
return fmt.Errorf("fetching most recent batch for config %d: %v", ec.ConfigID, err)
}
ranges := makeBatchRanges(ec.Period, latestEnd, now)
if len(ranges) == 0 {
logger.Debugf("Batch creation for config %d is not required. Skipping.", ec.ConfigID)
return nil
}
var batches []*model.ExportBatch
for _, br := range ranges {
batches = append(batches, &model.ExportBatch{
ConfigID: ec.ConfigID,
FilenameRoot: ec.FilenameRoot,
StartTimestamp: br.start,
EndTimestamp: br.end,
IncludeRegions: ec.IncludeRegions,
ExcludeRegions: ec.ExcludeRegions,
Status: model.ExportBatchOpen,
})
}
if err := s.db.AddExportBatches(ctx, batches); err != nil {
return fmt.Errorf("creating export batches for config %d: %v", ec.ConfigID, err)
}
logger.Infof("Created %d batch(es) for config %d.", len(batches), ec.ConfigID)
return nil
}
type batchRange struct {
start, end time.Time
}
var sanityDate = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
func makeBatchRanges(period time.Duration, latestEnd, now time.Time) []batchRange {
// Truncate now to align with period; use this as the end date.
end := now.Truncate(period)
// If the end date < latest end date, we already have a batch that covers this period, so return no batches.
if end.Before(latestEnd) {
return nil
}
// Subtract period to get the start date.
start := end.Add(-period)
// Special case: if there have not been batches before, return only a single one.
// We use sanityDate here because the loop below will happily create batch ranges
// until the beginning of time otherwise.
if latestEnd.Before(sanityDate) {
return []batchRange{{start: start, end: end}}
}
// Build up a list of batches until we reach that latestEnd.
// Allow for overlap so we don't miss keys; this might happen in the event that
// an ExportConfig was edited and the new settings don't quite align.
ranges := []batchRange{}
for end.After(latestEnd) {
ranges = append([]batchRange{{start: start, end: end}}, ranges...)
start = start.Add(-period)
end = end.Add(-period)
}
return ranges
}
// CreateFilesHandler is a handler to iterate the rows of ExportBatch, and creates GCS files
func (s *BatchServer) CreateFilesHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
logger := logging.FromContext(ctx)
// Poll for a batch and obtain a lease for it
ttl := 15 * time.Minute // TODO(jasonco): take from args?
batch, err := s.db.LeaseBatch(ctx, ttl, time.Now().UTC())
if err != nil {
logger.Errorf("Failed to lease batch: %v", err)
http.Error(w, "Failed to lease batch, check logs.", http.StatusInternalServerError)
return
}
if batch == nil {
logger.Debugf("No work to do.")
return
}
ctx, cancel := context.WithDeadline(context.Background(), batch.LeaseExpires)
defer cancel()
// Create file(s)
if err = s.createExportFilesForBatch(ctx, *batch); err != nil {
logger.Errorf("Failed to create files for batch: %v", err)
http.Error(w, "Failed to create files for batch, check logs.", http.StatusInternalServerError)
return
}
fmt.Fprintf(w, "Batch %d marked completed", batch.BatchID)
}
func (s *BatchServer) createExportFilesForBatch(ctx context.Context, eb model.ExportBatch) error |
func (s *BatchServer) createFile(ctx context.Context, objectName string, exposureKeys []*model.Infection, eb model.ExportBatch, batchCount int) error {
// Add ExportFile entry with Status Pending
ef := model.ExportFile{
Filename: objectName,
BatchID: eb.BatchID,
Region: "", // TODO(lmohanan) figure out where region comes from.
BatchNum: batchCount,
Status: model.ExportBatchPending,
}
// TODO(lmohanan) Handle partial failure: If redoing this batch after a failure,
// these inserts can fail due to duplicate filename.
if err := s.db.AddExportFile(ctx, &ef); err != nil {
return fmt.Errorf("adding export file entry: %v", err)
}
// Format keys
data, err := MarshalExportFile(eb.StartTimestamp, eb.EndTimestamp, exposureKeys, "US")
if err != nil {
return fmt.Errorf("marshalling export file: %v", err)
}
// Write to GCS
err = storage.CreateObject(ctx, s.bsc.Bucket, objectName, data)
if err != nil {
return fmt.Errorf("creating file: %v", err)
}
return nil
}
func NewTestExportHandler(db *database.DB) http.Handler {
return &testExportHandler{db: db}
}
type testExportHandler struct {
db *database.DB
}
func (h *testExportHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
logger := logging.FromContext(ctx)
limit := 30000
limits, ok := r.URL.Query()["limit"]
if ok && len(limits) > 0 {
lim, err := strconv.Atoi(limits[0])
if err == nil {
limit = lim
}
}
logger.Infof("limiting to %v", limit)
since := time.Now().UTC().AddDate(0, 0, -5)
until := time.Now().UTC()
exposureKeys, err := h.queryExposureKeys(ctx, since, until, limit)
if err != nil {
logger.Errorf("error getting infections: %v", err)
http.Error(w, "internal processing error", http.StatusInternalServerError)
}
data, err := MarshalExportFile(since, until, exposureKeys, "US")
if err != nil {
logger.Errorf("error marshalling export file: %v", err)
http.Error(w, "internal processing error", http.StatusInternalServerError)
}
objectName := fmt.Sprintf("testExport-%d-records.pb", limit)
if err := storage.CreateObject(ctx, "apollo-public-bucket", objectName, data); err != nil {
logger.Errorf("error creating cloud storage object: %v", err)
http.Error(w, "internal processing error", http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}
func (h *testExportHandler) queryExposureKeys(ctx context.Context, since, until time.Time, limit int) ([]*model.Infection, error) {
criteria := database.IterateInfectionsCriteria{
SinceTimestamp: since,
UntilTimestamp: until,
OnlyLocalProvenance: false, // include federated ids
}
it, err := h.db.IterateInfections(ctx, criteria)
if err != nil {
return nil, err
}
defer it.Close()
var exposureKeys []*model.Infection
num := 1
exp, done, err := it.Next()
for !done && err == nil && num <= limit {
if exp != nil {
exposureKeys = append(exposureKeys, exp)
num++
}
exp, done, err = it.Next()
}
if err != nil {
return nil, err
}
return exposureKeys, nil
}
| {
logger := logging.FromContext(ctx)
logger.Infof("Creating files for export config %v, batchID %v", eb.ConfigID, eb.BatchID)
logger.Infof("MaxRecords %v, since %v, until %v", s.bsc.MaxRecords, eb.StartTimestamp, eb.EndTimestamp)
logger.Infof("Included regions %v, ExcludedRegions %v ", eb.IncludeRegions, eb.ExcludeRegions)
logger.Infof("FilenameRoot %v ", eb.FilenameRoot)
var (
done = false
batchCount = 0
recordCount = 1
exposureKeys []*model.Infection
files []string
criteria = database.IterateInfectionsCriteria{
SinceTimestamp: eb.StartTimestamp,
UntilTimestamp: eb.EndTimestamp,
IncludeRegions: eb.IncludeRegions,
ExcludeRegions: eb.ExcludeRegions,
OnlyLocalProvenance: false, // include federated ids
}
)
it, err := s.db.IterateInfections(ctx, criteria)
if err != nil {
return fmt.Errorf("iterating infections: %v", err)
}
defer it.Close()
exp, done, err := it.Next()
// TODO(lmohanan): Watch for context deadline
for !done && err == nil {
if exp != nil {
exposureKeys = append(exposureKeys, exp)
recordCount++
}
if recordCount == s.bsc.MaxRecords {
objectName := fmt.Sprintf(eb.FilenameRoot+"%s-%d", eb.StartTimestamp.Unix(), batchCount)
if err = s.createFile(ctx, objectName, exposureKeys, eb, batchCount); err != nil {
return err
}
// Append to files list
files = append(files, objectName)
batchCount++
recordCount = 1
}
exp, done, err = it.Next()
}
if err != nil {
return fmt.Errorf("iterating infections: %v", err)
}
// Create a file for the remaining keys
objectName := fmt.Sprintf(eb.FilenameRoot+"%s-%d", eb.StartTimestamp.Unix(), batchCount)
if err = s.createFile(ctx, objectName, exposureKeys, eb, batchCount); err != nil {
return err
}
// Append to files list
files = append(files, objectName)
batchCount++
// Update ExportFile for the files created: set batchSize and update status .
// TODO(lmohanan): Figure out batchCount ahead of time and do this immediately after writing to GCS
// for better failure protection.
// TODO(lmohanan): Perform UpdateExportFile and CompleteBatch as a transaction.
for _, file := range files {
s.db.UpdateExportFile(ctx, file, model.ExportBatchComplete, batchCount)
}
// Update ExportFile for the batch to mark it complete.
if err := s.db.CompleteBatch(ctx, eb.BatchID); err != nil {
return fmt.Errorf("marking batch %v complete: %v", eb.BatchID, err)
}
return nil
} | identifier_body |
export.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package api defines the structures for the infection publishing API.
package api
import (
"context"
"fmt"
"net/http"
"strconv"
"time"
"github.com/googlepartners/exposure-notifications/internal/database"
"github.com/googlepartners/exposure-notifications/internal/logging"
"github.com/googlepartners/exposure-notifications/internal/model"
"github.com/googlepartners/exposure-notifications/internal/storage"
)
const (
batchIDParam = "batch-id"
)
func NewBatchServer(db *database.DB, bsc BatchServerConfig) *BatchServer {
return &BatchServer{
db: db,
bsc: bsc,
}
}
// BatchServer hosts end points to manage export batches.
type BatchServer struct {
db *database.DB
bsc BatchServerConfig
}
type BatchServerConfig struct {
CreateTimeout time.Duration
TmpBucket string
Bucket string
MaxRecords int
}
// CreateBatchesHandler is a handler to iterate the rows of ExportConfig and
// create entries in ExportBatchJob as appropriate.
func (s *BatchServer) | (w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), s.bsc.CreateTimeout)
defer cancel()
logger := logging.FromContext(ctx)
// Obtain lock to make sure there are no other processes working to create batches.
lock := "create_batches"
unlockFn, err := s.db.Lock(ctx, lock, s.bsc.CreateTimeout) // TODO(jasonco): double this?
if err != nil {
if err == database.ErrAlreadyLocked {
msg := fmt.Sprintf("Lock %s already in use. No work will be performed.", lock)
logger.Infof(msg)
w.Write([]byte(msg)) // We return status 200 here so that Cloud Scheduler does not retry.
return
}
logger.Errorf("Could not acquire lock %s: %v", lock, err)
http.Error(w, fmt.Sprintf("Could not acquire lock %s, check logs.", lock), http.StatusInternalServerError)
return
}
defer unlockFn()
now := time.Now().UTC()
it, err := s.db.IterateExportConfigs(ctx, now)
if err != nil {
logger.Errorf("Failed to get export config iterator: %v", err)
http.Error(w, "Failed to get export config iterator, check logs.", http.StatusInternalServerError)
return
}
defer it.Close()
done := false
for !done {
select {
case <-ctx.Done():
if err := ctx.Err(); err != context.DeadlineExceeded && err != context.Canceled { // May be context.Canceled due to test code.
logger.Errorf("Context error: %v", err)
return
}
logger.Infof("Timed out before iterating batches. Will pick up on next invocation.")
return
default:
// Fallthrough to process a record.
}
var ec *model.ExportConfig
var err error
ec, done, err = it.Next()
if err != nil {
logger.Errorf("Failed to iterate export config: %v", err)
http.Error(w, "Failed to iterate export config, check logs.", http.StatusInternalServerError)
return
}
if done {
return
}
if ec == nil {
continue
}
if err := s.maybeCreateBatches(ctx, ec, now); err != nil {
logger.Errorf("Failed to create batches for config %d: %v. Continuing", ec.ConfigID, err)
}
}
}
func (s *BatchServer) maybeCreateBatches(ctx context.Context, ec *model.ExportConfig, now time.Time) error {
logger := logging.FromContext(ctx)
latestEnd, err := s.db.LatestExportBatchEnd(ctx, ec)
if err != nil {
return fmt.Errorf("fetching most recent batch for config %d: %v", ec.ConfigID, err)
}
ranges := makeBatchRanges(ec.Period, latestEnd, now)
if len(ranges) == 0 {
logger.Debugf("Batch creation for config %d is not required. Skipping.", ec.ConfigID)
return nil
}
var batches []*model.ExportBatch
for _, br := range ranges {
batches = append(batches, &model.ExportBatch{
ConfigID: ec.ConfigID,
FilenameRoot: ec.FilenameRoot,
StartTimestamp: br.start,
EndTimestamp: br.end,
IncludeRegions: ec.IncludeRegions,
ExcludeRegions: ec.ExcludeRegions,
Status: model.ExportBatchOpen,
})
}
if err := s.db.AddExportBatches(ctx, batches); err != nil {
return fmt.Errorf("creating export batches for config %d: %v", ec.ConfigID, err)
}
logger.Infof("Created %d batch(es) for config %d.", len(batches), ec.ConfigID)
return nil
}
type batchRange struct {
start, end time.Time
}
var sanityDate = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
func makeBatchRanges(period time.Duration, latestEnd, now time.Time) []batchRange {
// Truncate now to align with period; use this as the end date.
end := now.Truncate(period)
// If the end date < latest end date, we already have a batch that covers this period, so return no batches.
if end.Before(latestEnd) {
return nil
}
// Subtract period to get the start date.
start := end.Add(-period)
// Special case: if there have not been batches before, return only a single one.
// We use sanityDate here because the loop below will happily create batch ranges
// until the beginning of time otherwise.
if latestEnd.Before(sanityDate) {
return []batchRange{{start: start, end: end}}
}
// Build up a list of batches until we reach that latestEnd.
// Allow for overlap so we don't miss keys; this might happen in the event that
// an ExportConfig was edited and the new settings don't quite align.
ranges := []batchRange{}
for end.After(latestEnd) {
ranges = append([]batchRange{{start: start, end: end}}, ranges...)
start = start.Add(-period)
end = end.Add(-period)
}
return ranges
}
// CreateFilesHandler is a handler to iterate the rows of ExportBatch, and creates GCS files
func (s *BatchServer) CreateFilesHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
logger := logging.FromContext(ctx)
// Poll for a batch and obtain a lease for it
ttl := 15 * time.Minute // TODO(jasonco): take from args?
batch, err := s.db.LeaseBatch(ctx, ttl, time.Now().UTC())
if err != nil {
logger.Errorf("Failed to lease batch: %v", err)
http.Error(w, "Failed to lease batch, check logs.", http.StatusInternalServerError)
return
}
if batch == nil {
logger.Debugf("No work to do.")
return
}
ctx, cancel := context.WithDeadline(context.Background(), batch.LeaseExpires)
defer cancel()
// Create file(s)
if err = s.createExportFilesForBatch(ctx, *batch); err != nil {
logger.Errorf("Failed to create files for batch: %v", err)
http.Error(w, "Failed to create files for batch, check logs.", http.StatusInternalServerError)
return
}
fmt.Fprintf(w, "Batch %d marked completed", batch.BatchID)
}
func (s *BatchServer) createExportFilesForBatch(ctx context.Context, eb model.ExportBatch) error {
logger := logging.FromContext(ctx)
logger.Infof("Creating files for export config %v, batchID %v", eb.ConfigID, eb.BatchID)
logger.Infof("MaxRecords %v, since %v, until %v", s.bsc.MaxRecords, eb.StartTimestamp, eb.EndTimestamp)
logger.Infof("Included regions %v, ExcludedRegions %v ", eb.IncludeRegions, eb.ExcludeRegions)
logger.Infof("FilenameRoot %v ", eb.FilenameRoot)
var (
done = false
batchCount = 0
recordCount = 1
exposureKeys []*model.Infection
files []string
criteria = database.IterateInfectionsCriteria{
SinceTimestamp: eb.StartTimestamp,
UntilTimestamp: eb.EndTimestamp,
IncludeRegions: eb.IncludeRegions,
ExcludeRegions: eb.ExcludeRegions,
OnlyLocalProvenance: false, // include federated ids
}
)
it, err := s.db.IterateInfections(ctx, criteria)
if err != nil {
return fmt.Errorf("iterating infections: %v", err)
}
defer it.Close()
exp, done, err := it.Next()
// TODO(lmohanan): Watch for context deadline
for !done && err == nil {
if exp != nil {
exposureKeys = append(exposureKeys, exp)
recordCount++
}
if recordCount == s.bsc.MaxRecords {
objectName := fmt.Sprintf(eb.FilenameRoot+"%s-%d", eb.StartTimestamp.Unix(), batchCount)
if err = s.createFile(ctx, objectName, exposureKeys, eb, batchCount); err != nil {
return err
}
// Append to files list
files = append(files, objectName)
batchCount++
recordCount = 1
}
exp, done, err = it.Next()
}
if err != nil {
return fmt.Errorf("iterating infections: %v", err)
}
// Create a file for the remaining keys
objectName := fmt.Sprintf(eb.FilenameRoot+"%s-%d", eb.StartTimestamp.Unix(), batchCount)
if err = s.createFile(ctx, objectName, exposureKeys, eb, batchCount); err != nil {
return err
}
// Append to files list
files = append(files, objectName)
batchCount++
// Update ExportFile for the files created: set batchSize and update status .
// TODO(lmohanan): Figure out batchCount ahead of time and do this immediately after writing to GCS
// for better failure protection.
// TODO(lmohanan): Perform UpdateExportFile and CompleteBatch as a transaction.
for _, file := range files {
s.db.UpdateExportFile(ctx, file, model.ExportBatchComplete, batchCount)
}
// Update ExportFile for the batch to mark it complete.
if err := s.db.CompleteBatch(ctx, eb.BatchID); err != nil {
return fmt.Errorf("marking batch %v complete: %v", eb.BatchID, err)
}
return nil
}
func (s *BatchServer) createFile(ctx context.Context, objectName string, exposureKeys []*model.Infection, eb model.ExportBatch, batchCount int) error {
// Add ExportFile entry with Status Pending
ef := model.ExportFile{
Filename: objectName,
BatchID: eb.BatchID,
Region: "", // TODO(lmohanan) figure out where region comes from.
BatchNum: batchCount,
Status: model.ExportBatchPending,
}
// TODO(lmohanan) Handle partial failure: If redoing this batch after a failure,
// these inserts can fail due to duplicate filename.
if err := s.db.AddExportFile(ctx, &ef); err != nil {
return fmt.Errorf("adding export file entry: %v", err)
}
// Format keys
data, err := MarshalExportFile(eb.StartTimestamp, eb.EndTimestamp, exposureKeys, "US")
if err != nil {
return fmt.Errorf("marshalling export file: %v", err)
}
// Write to GCS
err = storage.CreateObject(ctx, s.bsc.Bucket, objectName, data)
if err != nil {
return fmt.Errorf("creating file: %v", err)
}
return nil
}
func NewTestExportHandler(db *database.DB) http.Handler {
return &testExportHandler{db: db}
}
type testExportHandler struct {
db *database.DB
}
func (h *testExportHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
logger := logging.FromContext(ctx)
limit := 30000
limits, ok := r.URL.Query()["limit"]
if ok && len(limits) > 0 {
lim, err := strconv.Atoi(limits[0])
if err == nil {
limit = lim
}
}
logger.Infof("limiting to %v", limit)
since := time.Now().UTC().AddDate(0, 0, -5)
until := time.Now().UTC()
exposureKeys, err := h.queryExposureKeys(ctx, since, until, limit)
if err != nil {
logger.Errorf("error getting infections: %v", err)
http.Error(w, "internal processing error", http.StatusInternalServerError)
}
data, err := MarshalExportFile(since, until, exposureKeys, "US")
if err != nil {
logger.Errorf("error marshalling export file: %v", err)
http.Error(w, "internal processing error", http.StatusInternalServerError)
}
objectName := fmt.Sprintf("testExport-%d-records.pb", limit)
if err := storage.CreateObject(ctx, "apollo-public-bucket", objectName, data); err != nil {
logger.Errorf("error creating cloud storage object: %v", err)
http.Error(w, "internal processing error", http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}
func (h *testExportHandler) queryExposureKeys(ctx context.Context, since, until time.Time, limit int) ([]*model.Infection, error) {
criteria := database.IterateInfectionsCriteria{
SinceTimestamp: since,
UntilTimestamp: until,
OnlyLocalProvenance: false, // include federated ids
}
it, err := h.db.IterateInfections(ctx, criteria)
if err != nil {
return nil, err
}
defer it.Close()
var exposureKeys []*model.Infection
num := 1
exp, done, err := it.Next()
for !done && err == nil && num <= limit {
if exp != nil {
exposureKeys = append(exposureKeys, exp)
num++
}
exp, done, err = it.Next()
}
if err != nil {
return nil, err
}
return exposureKeys, nil
}
| CreateBatchesHandler | identifier_name |
unlinked_file.rs | //! Diagnostic emitted for files that aren't part of any crate.
use std::iter;
use hir::{db::DefDatabase, DefMap, InFile, ModuleSource};
use ide_db::{
base_db::{FileId, FileLoader, SourceDatabase, SourceDatabaseExt},
source_change::SourceChange,
RootDatabase,
};
use syntax::{
ast::{self, edit::IndentLevel, HasModuleItem, HasName},
AstNode, TextRange,
};
use text_edit::TextEdit;
use crate::{fix, Assist, Diagnostic, DiagnosticCode, DiagnosticsContext, Severity};
// Diagnostic: unlinked-file
//
// This diagnostic is shown for files that are not included in any crate, or files that are part of
// crates rust-analyzer failed to discover. The file will not have IDE features available.
pub(crate) fn unlinked_file(
ctx: &DiagnosticsContext<'_>,
acc: &mut Vec<Diagnostic>,
file_id: FileId,
) {
// Limit diagnostic to the first few characters in the file. This matches how VS Code
// renders it with the full span, but on other editors, and is less invasive.
let fixes = fixes(ctx, file_id);
// FIXME: This is a hack for the vscode extension to notice whether there is an autofix or not before having to resolve diagnostics.
// This is to prevent project linking popups from appearing when there is an autofix. https://github.com/rust-lang/rust-analyzer/issues/14523
let message = if fixes.is_none() {
"file not included in crate hierarchy"
} else {
"file not included in module tree"
};
let range = ctx.sema.db.parse(file_id).syntax_node().text_range();
let range = FileLoader::file_text(ctx.sema.db, file_id)
.char_indices()
.take(3)
.last()
.map(|(i, _)| i)
.map(|i| TextRange::up_to(i.try_into().unwrap()))
.unwrap_or(range);
acc.push(
Diagnostic::new(DiagnosticCode::Ra("unlinked-file", Severity::WeakWarning), message, range)
.with_fixes(fixes),
);
}
fn fixes(ctx: &DiagnosticsContext<'_>, file_id: FileId) -> Option<Vec<Assist>> {
// If there's an existing module that could add `mod` or `pub mod` items to include the unlinked file,
// suggest that as a fix.
let source_root = ctx.sema.db.source_root(ctx.sema.db.file_source_root(file_id));
let our_path = source_root.path_for_file(&file_id)?;
let parent = our_path.parent()?;
let (module_name, _) = our_path.name_and_extension()?;
let (parent, module_name) = match module_name {
// for mod.rs we need to actually look up one higher
// and take the parent as our to be module name
"mod" => {
let (name, _) = parent.name_and_extension()?;
(parent.parent()?, name.to_owned())
}
_ => (parent, module_name.to_owned()),
};
// check crate roots, i.e. main.rs, lib.rs, ...
'crates: for &krate in &*ctx.sema.db.relevant_crates(file_id) {
let crate_def_map = ctx.sema.db.crate_def_map(krate);
let root_module = &crate_def_map[DefMap::ROOT];
let Some(root_file_id) = root_module.origin.file_id() else { continue };
let Some(crate_root_path) = source_root.path_for_file(&root_file_id) else { continue };
let Some(rel) = parent.strip_prefix(&crate_root_path.parent()?) else { continue };
// try resolving the relative difference of the paths as inline modules
let mut current = root_module;
for ele in rel.as_ref().components() {
let seg = match ele {
std::path::Component::Normal(seg) => seg.to_str()?,
std::path::Component::RootDir => continue,
// shouldn't occur
_ => continue 'crates,
};
match current.children.iter().find(|(name, _)| name.to_smol_str() == seg) {
Some((_, &child)) => current = &crate_def_map[child],
None => continue 'crates,
}
if !current.origin.is_inline() {
continue 'crates;
}
}
let InFile { file_id: parent_file_id, value: source } =
current.definition_source(ctx.sema.db);
let parent_file_id = parent_file_id.file_id()?;
return make_fixes(ctx.sema.db, parent_file_id, source, &module_name, file_id);
}
// if we aren't adding to a crate root, walk backwards such that we support `#[path = ...]` overrides if possible
// build all parent paths of the form `../module_name/mod.rs` and `../module_name.rs`
let paths = iter::successors(Some(parent), |prev| prev.parent()).filter_map(|path| {
let parent = path.parent()?;
let (name, _) = path.name_and_extension()?;
Some(([parent.join(&format!("{name}.rs"))?, path.join("mod.rs")?], name.to_owned()))
});
let mut stack = vec![];
let &parent_id =
paths.inspect(|(_, name)| stack.push(name.clone())).find_map(|(paths, _)| {
paths.into_iter().find_map(|path| source_root.file_for_path(&path))
})?;
stack.pop();
'crates: for &krate in ctx.sema.db.relevant_crates(parent_id).iter() {
let crate_def_map = ctx.sema.db.crate_def_map(krate);
let Some((_, module)) = crate_def_map.modules().find(|(_, module)| {
module.origin.file_id() == Some(parent_id) && !module.origin.is_inline()
}) else {
continue;
};
if stack.is_empty() {
return make_fixes(
ctx.sema.db,
parent_id,
module.definition_source(ctx.sema.db).value,
&module_name,
file_id,
);
} else {
// direct parent file is missing,
// try finding a parent that has an inline tree from here on
let mut current = module;
for s in stack.iter().rev() {
match module.children.iter().find(|(name, _)| name.to_smol_str() == s) {
Some((_, child)) => {
current = &crate_def_map[*child];
}
None => continue 'crates,
}
if !current.origin.is_inline() {
continue 'crates;
}
}
let InFile { file_id: parent_file_id, value: source } =
current.definition_source(ctx.sema.db);
let parent_file_id = parent_file_id.file_id()?;
return make_fixes(ctx.sema.db, parent_file_id, source, &module_name, file_id);
}
}
None
}
fn make_fixes(
db: &RootDatabase,
parent_file_id: FileId,
source: ModuleSource,
new_mod_name: &str,
added_file_id: FileId,
) -> Option<Vec<Assist>> {
fn is_outline_mod(item: &ast::Item) -> bool {
matches!(item, ast::Item::Module(m) if m.item_list().is_none())
}
let mod_decl = format!("mod {new_mod_name};");
let pub_mod_decl = format!("pub mod {new_mod_name};");
let mut mod_decl_builder = TextEdit::builder();
let mut pub_mod_decl_builder = TextEdit::builder();
let mut items = match &source {
ModuleSource::SourceFile(it) => it.items(),
ModuleSource::Module(it) => it.item_list()?.items(),
ModuleSource::BlockExpr(_) => return None,
};
// If there's an existing `mod m;` statement matching the new one, don't emit a fix (it's
// probably `#[cfg]`d out).
for item in items.clone() {
if let ast::Item::Module(m) = item {
if let Some(name) = m.name() {
if m.item_list().is_none() && name.to_string() == new_mod_name {
cov_mark::hit!(unlinked_file_skip_fix_when_mod_already_exists);
return None;
}
}
}
}
// If there are existing `mod m;` items, append after them (after the first group of them, rather).
match items.clone().skip_while(|item| !is_outline_mod(item)).take_while(is_outline_mod).last() {
Some(last) => {
cov_mark::hit!(unlinked_file_append_to_existing_mods);
let offset = last.syntax().text_range().end();
let indent = IndentLevel::from_node(last.syntax());
mod_decl_builder.insert(offset, format!("\n{indent}{mod_decl}"));
pub_mod_decl_builder.insert(offset, format!("\n{indent}{pub_mod_decl}"));
}
None => {
// Prepend before the first item in the file.
match items.next() {
Some(first) => {
cov_mark::hit!(unlinked_file_prepend_before_first_item);
let offset = first.syntax().text_range().start();
let indent = IndentLevel::from_node(first.syntax());
mod_decl_builder.insert(offset, format!("{mod_decl}\n\n{indent}"));
pub_mod_decl_builder.insert(offset, format!("{pub_mod_decl}\n\n{indent}"));
}
None => {
// No items in the file, so just append at the end.
cov_mark::hit!(unlinked_file_empty_file);
let mut indent = IndentLevel::from(0);
let offset = match &source {
ModuleSource::SourceFile(it) => it.syntax().text_range().end(),
ModuleSource::Module(it) => {
indent = IndentLevel::from_node(it.syntax()) + 1;
it.item_list()?.r_curly_token()?.text_range().start()
}
ModuleSource::BlockExpr(it) => {
it.stmt_list()?.r_curly_token()?.text_range().start()
}
};
mod_decl_builder.insert(offset, format!("{indent}{mod_decl}\n"));
pub_mod_decl_builder.insert(offset, format!("{indent}{pub_mod_decl}\n"));
}
}
}
}
let trigger_range = db.parse(added_file_id).tree().syntax().text_range();
Some(vec![
fix(
"add_mod_declaration",
&format!("Insert `{mod_decl}`"),
SourceChange::from_text_edit(parent_file_id, mod_decl_builder.finish()),
trigger_range,
),
fix(
"add_pub_mod_declaration",
&format!("Insert `{pub_mod_decl}`"),
SourceChange::from_text_edit(parent_file_id, pub_mod_decl_builder.finish()),
trigger_range,
),
])
}
#[cfg(test)]
mod tests {
use crate::tests::{check_diagnostics, check_fix, check_fixes, check_no_fix};
#[test]
fn unlinked_file_prepend_first_item() {
cov_mark::check!(unlinked_file_prepend_before_first_item);
// Only tests the first one for `pub mod` since the rest are the same
check_fixes(
r#"
//- /main.rs
fn f() {}
//- /foo.rs
$0
"#,
vec![
r#"
mod foo;
fn f() {}
"#,
r#"
pub mod foo;
fn f() {}
"#,
],
);
}
#[test]
fn unlinked_file_append_mod() {
cov_mark::check!(unlinked_file_append_to_existing_mods);
check_fix(
r#"
//- /main.rs
//! Comment on top
mod preexisting;
mod preexisting2;
struct S;
mod preexisting_bottom;)
//- /foo.rs
$0
"#,
r#"
//! Comment on top
mod preexisting;
mod preexisting2;
mod foo;
struct S;
mod preexisting_bottom;)
"#,
);
}
#[test]
fn unlinked_file_insert_in_empty_file() {
cov_mark::check!(unlinked_file_empty_file);
check_fix(
r#"
//- /main.rs
//- /foo.rs
$0
"#,
r#"
mod foo;
"#,
);
}
#[test]
fn unlinked_file_insert_in_empty_file_mod_file() {
check_fix(
r#"
//- /main.rs
//- /foo/mod.rs
$0
"#,
r#"
mod foo;
"#,
);
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
// bar module
//- /bar/foo/mod.rs
$0
"#,
r#"
// bar module
mod foo;
"#,
);
}
#[test]
fn unlinked_file_old_style_modrs() {
check_fix(
r#"
//- /main.rs
mod submod;
//- /submod/mod.rs
// in mod.rs
//- /submod/foo.rs
$0
"#,
r#"
// in mod.rs
mod foo;
"#,
);
}
#[test]
fn unlinked_file_new_style_mod() {
check_fix(
r#"
//- /main.rs
mod submod;
//- /submod.rs
//- /submod/foo.rs
$0
"#,
r#"
mod foo;
"#,
);
}
#[test]
fn | () {
cov_mark::check!(unlinked_file_skip_fix_when_mod_already_exists);
check_no_fix(
r#"
//- /main.rs
#[cfg(never)]
mod foo;
//- /foo.rs
$0
"#,
);
}
#[test]
fn unlinked_file_with_cfg_on() {
check_diagnostics(
r#"
//- /main.rs
#[cfg(not(never))]
mod foo;
//- /foo.rs
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple() {
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
mod foo {
}
//- /bar/foo/baz.rs
$0
"#,
r#"
mod foo {
mod baz;
}
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple_modrs() {
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
mod baz {
}
//- /bar/baz/foo/mod.rs
$0
"#,
r#"
mod baz {
mod foo;
}
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple_modrs_main() {
check_fix(
r#"
//- /main.rs
mod bar {
}
//- /bar/foo/mod.rs
$0
"#,
r#"
mod bar {
mod foo;
}
"#,
);
}
}
| unlinked_file_with_cfg_off | identifier_name |
unlinked_file.rs | //! Diagnostic emitted for files that aren't part of any crate.
use std::iter;
use hir::{db::DefDatabase, DefMap, InFile, ModuleSource};
use ide_db::{
base_db::{FileId, FileLoader, SourceDatabase, SourceDatabaseExt},
source_change::SourceChange,
RootDatabase,
};
use syntax::{
ast::{self, edit::IndentLevel, HasModuleItem, HasName},
AstNode, TextRange,
};
use text_edit::TextEdit;
use crate::{fix, Assist, Diagnostic, DiagnosticCode, DiagnosticsContext, Severity};
// Diagnostic: unlinked-file
//
// This diagnostic is shown for files that are not included in any crate, or files that are part of
// crates rust-analyzer failed to discover. The file will not have IDE features available.
pub(crate) fn unlinked_file(
ctx: &DiagnosticsContext<'_>,
acc: &mut Vec<Diagnostic>,
file_id: FileId,
) {
// Limit diagnostic to the first few characters in the file. This matches how VS Code
// renders it with the full span, but on other editors, and is less invasive.
let fixes = fixes(ctx, file_id);
// FIXME: This is a hack for the vscode extension to notice whether there is an autofix or not before having to resolve diagnostics.
// This is to prevent project linking popups from appearing when there is an autofix. https://github.com/rust-lang/rust-analyzer/issues/14523
let message = if fixes.is_none() {
"file not included in crate hierarchy"
} else {
"file not included in module tree"
};
let range = ctx.sema.db.parse(file_id).syntax_node().text_range();
let range = FileLoader::file_text(ctx.sema.db, file_id)
.char_indices()
.take(3)
.last()
.map(|(i, _)| i)
.map(|i| TextRange::up_to(i.try_into().unwrap()))
.unwrap_or(range);
acc.push(
Diagnostic::new(DiagnosticCode::Ra("unlinked-file", Severity::WeakWarning), message, range)
.with_fixes(fixes),
);
}
fn fixes(ctx: &DiagnosticsContext<'_>, file_id: FileId) -> Option<Vec<Assist>> {
// If there's an existing module that could add `mod` or `pub mod` items to include the unlinked file,
// suggest that as a fix.
let source_root = ctx.sema.db.source_root(ctx.sema.db.file_source_root(file_id));
let our_path = source_root.path_for_file(&file_id)?;
let parent = our_path.parent()?;
let (module_name, _) = our_path.name_and_extension()?;
let (parent, module_name) = match module_name {
// for mod.rs we need to actually look up one higher
// and take the parent as our to be module name
"mod" => {
let (name, _) = parent.name_and_extension()?;
(parent.parent()?, name.to_owned())
}
_ => (parent, module_name.to_owned()),
};
// check crate roots, i.e. main.rs, lib.rs, ...
'crates: for &krate in &*ctx.sema.db.relevant_crates(file_id) {
let crate_def_map = ctx.sema.db.crate_def_map(krate);
let root_module = &crate_def_map[DefMap::ROOT];
let Some(root_file_id) = root_module.origin.file_id() else { continue };
let Some(crate_root_path) = source_root.path_for_file(&root_file_id) else { continue };
let Some(rel) = parent.strip_prefix(&crate_root_path.parent()?) else { continue };
// try resolving the relative difference of the paths as inline modules
let mut current = root_module;
for ele in rel.as_ref().components() {
let seg = match ele {
std::path::Component::Normal(seg) => seg.to_str()?,
std::path::Component::RootDir => continue,
// shouldn't occur
_ => continue 'crates,
};
match current.children.iter().find(|(name, _)| name.to_smol_str() == seg) {
Some((_, &child)) => current = &crate_def_map[child],
None => continue 'crates,
}
if !current.origin.is_inline() {
continue 'crates;
}
}
let InFile { file_id: parent_file_id, value: source } =
current.definition_source(ctx.sema.db);
let parent_file_id = parent_file_id.file_id()?;
return make_fixes(ctx.sema.db, parent_file_id, source, &module_name, file_id);
}
// if we aren't adding to a crate root, walk backwards such that we support `#[path = ...]` overrides if possible
// build all parent paths of the form `../module_name/mod.rs` and `../module_name.rs`
let paths = iter::successors(Some(parent), |prev| prev.parent()).filter_map(|path| {
let parent = path.parent()?;
let (name, _) = path.name_and_extension()?;
Some(([parent.join(&format!("{name}.rs"))?, path.join("mod.rs")?], name.to_owned()))
});
let mut stack = vec![];
let &parent_id =
paths.inspect(|(_, name)| stack.push(name.clone())).find_map(|(paths, _)| {
paths.into_iter().find_map(|path| source_root.file_for_path(&path))
})?;
stack.pop();
'crates: for &krate in ctx.sema.db.relevant_crates(parent_id).iter() {
let crate_def_map = ctx.sema.db.crate_def_map(krate);
let Some((_, module)) = crate_def_map.modules().find(|(_, module)| {
module.origin.file_id() == Some(parent_id) && !module.origin.is_inline()
}) else {
continue;
};
if stack.is_empty() {
return make_fixes(
ctx.sema.db,
parent_id,
module.definition_source(ctx.sema.db).value,
&module_name,
file_id,
);
} else {
// direct parent file is missing,
// try finding a parent that has an inline tree from here on
let mut current = module;
for s in stack.iter().rev() {
match module.children.iter().find(|(name, _)| name.to_smol_str() == s) {
Some((_, child)) => {
current = &crate_def_map[*child];
}
None => continue 'crates,
}
if !current.origin.is_inline() {
continue 'crates;
}
}
let InFile { file_id: parent_file_id, value: source } =
current.definition_source(ctx.sema.db);
let parent_file_id = parent_file_id.file_id()?;
return make_fixes(ctx.sema.db, parent_file_id, source, &module_name, file_id);
}
}
None
}
fn make_fixes(
db: &RootDatabase,
parent_file_id: FileId,
source: ModuleSource,
new_mod_name: &str,
added_file_id: FileId,
) -> Option<Vec<Assist>> {
fn is_outline_mod(item: &ast::Item) -> bool {
matches!(item, ast::Item::Module(m) if m.item_list().is_none())
}
let mod_decl = format!("mod {new_mod_name};");
let pub_mod_decl = format!("pub mod {new_mod_name};");
let mut mod_decl_builder = TextEdit::builder();
let mut pub_mod_decl_builder = TextEdit::builder();
let mut items = match &source {
ModuleSource::SourceFile(it) => it.items(),
ModuleSource::Module(it) => it.item_list()?.items(),
ModuleSource::BlockExpr(_) => return None,
};
// If there's an existing `mod m;` statement matching the new one, don't emit a fix (it's
// probably `#[cfg]`d out).
for item in items.clone() {
if let ast::Item::Module(m) = item {
if let Some(name) = m.name() {
if m.item_list().is_none() && name.to_string() == new_mod_name {
cov_mark::hit!(unlinked_file_skip_fix_when_mod_already_exists);
return None;
}
}
}
}
// If there are existing `mod m;` items, append after them (after the first group of them, rather).
match items.clone().skip_while(|item| !is_outline_mod(item)).take_while(is_outline_mod).last() {
Some(last) => {
cov_mark::hit!(unlinked_file_append_to_existing_mods);
let offset = last.syntax().text_range().end();
let indent = IndentLevel::from_node(last.syntax());
mod_decl_builder.insert(offset, format!("\n{indent}{mod_decl}"));
pub_mod_decl_builder.insert(offset, format!("\n{indent}{pub_mod_decl}"));
}
None => {
// Prepend before the first item in the file.
match items.next() {
Some(first) => {
cov_mark::hit!(unlinked_file_prepend_before_first_item);
let offset = first.syntax().text_range().start();
let indent = IndentLevel::from_node(first.syntax());
mod_decl_builder.insert(offset, format!("{mod_decl}\n\n{indent}"));
pub_mod_decl_builder.insert(offset, format!("{pub_mod_decl}\n\n{indent}"));
}
None => {
// No items in the file, so just append at the end.
cov_mark::hit!(unlinked_file_empty_file);
let mut indent = IndentLevel::from(0);
let offset = match &source {
ModuleSource::SourceFile(it) => it.syntax().text_range().end(),
ModuleSource::Module(it) => {
indent = IndentLevel::from_node(it.syntax()) + 1;
it.item_list()?.r_curly_token()?.text_range().start()
}
ModuleSource::BlockExpr(it) => {
it.stmt_list()?.r_curly_token()?.text_range().start()
}
};
mod_decl_builder.insert(offset, format!("{indent}{mod_decl}\n"));
pub_mod_decl_builder.insert(offset, format!("{indent}{pub_mod_decl}\n"));
}
}
}
}
let trigger_range = db.parse(added_file_id).tree().syntax().text_range();
Some(vec![
fix(
"add_mod_declaration",
&format!("Insert `{mod_decl}`"),
SourceChange::from_text_edit(parent_file_id, mod_decl_builder.finish()),
trigger_range,
),
fix(
"add_pub_mod_declaration",
&format!("Insert `{pub_mod_decl}`"),
SourceChange::from_text_edit(parent_file_id, pub_mod_decl_builder.finish()),
trigger_range,
),
])
}
#[cfg(test)]
mod tests {
use crate::tests::{check_diagnostics, check_fix, check_fixes, check_no_fix};
#[test]
fn unlinked_file_prepend_first_item() {
cov_mark::check!(unlinked_file_prepend_before_first_item);
// Only tests the first one for `pub mod` since the rest are the same
check_fixes(
r#"
//- /main.rs
fn f() {}
//- /foo.rs
$0
"#,
vec![
r#"
mod foo;
fn f() {}
"#,
r#"
pub mod foo;
fn f() {}
"#,
],
);
}
#[test]
fn unlinked_file_append_mod() {
cov_mark::check!(unlinked_file_append_to_existing_mods);
check_fix(
r#"
//- /main.rs
//! Comment on top
mod preexisting;
mod preexisting2;
struct S;
mod preexisting_bottom;)
//- /foo.rs
$0
"#,
r#"
//! Comment on top
mod preexisting;
mod preexisting2;
mod foo;
struct S;
mod preexisting_bottom;)
"#,
);
}
#[test]
fn unlinked_file_insert_in_empty_file() {
cov_mark::check!(unlinked_file_empty_file);
check_fix(
r#"
//- /main.rs
//- /foo.rs
$0
"#,
r#"
mod foo;
"#,
);
}
#[test]
fn unlinked_file_insert_in_empty_file_mod_file() {
check_fix(
r#"
//- /main.rs
//- /foo/mod.rs
$0
"#,
r#"
mod foo;
"#,
);
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
// bar module
//- /bar/foo/mod.rs
$0
"#,
r#"
// bar module
mod foo;
"#,
);
}
#[test]
fn unlinked_file_old_style_modrs() {
check_fix(
r#"
//- /main.rs
mod submod;
//- /submod/mod.rs
// in mod.rs
//- /submod/foo.rs
$0
"#,
r#"
// in mod.rs
mod foo;
"#,
);
}
#[test]
fn unlinked_file_new_style_mod() {
check_fix(
r#"
//- /main.rs
mod submod;
//- /submod.rs
//- /submod/foo.rs
$0
"#,
r#"
mod foo;
"#,
);
}
#[test]
fn unlinked_file_with_cfg_off() {
cov_mark::check!(unlinked_file_skip_fix_when_mod_already_exists);
check_no_fix(
r#"
//- /main.rs
#[cfg(never)]
mod foo;
//- /foo.rs
$0
"#,
);
}
#[test]
fn unlinked_file_with_cfg_on() {
check_diagnostics(
r#"
//- /main.rs
#[cfg(not(never))]
mod foo;
//- /foo.rs
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple() {
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
mod foo {
}
//- /bar/foo/baz.rs
$0
"#,
r#"
mod foo {
mod baz;
}
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple_modrs() {
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
mod baz {
}
//- /bar/baz/foo/mod.rs
$0
"#,
r#"
mod baz {
mod foo;
}
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple_modrs_main() |
}
| {
check_fix(
r#"
//- /main.rs
mod bar {
}
//- /bar/foo/mod.rs
$0
"#,
r#"
mod bar {
mod foo;
}
"#,
);
} | identifier_body |
unlinked_file.rs | //! Diagnostic emitted for files that aren't part of any crate.
use std::iter;
use hir::{db::DefDatabase, DefMap, InFile, ModuleSource};
use ide_db::{
base_db::{FileId, FileLoader, SourceDatabase, SourceDatabaseExt},
source_change::SourceChange,
RootDatabase,
};
use syntax::{
ast::{self, edit::IndentLevel, HasModuleItem, HasName},
AstNode, TextRange,
};
use text_edit::TextEdit;
use crate::{fix, Assist, Diagnostic, DiagnosticCode, DiagnosticsContext, Severity};
// Diagnostic: unlinked-file
//
// This diagnostic is shown for files that are not included in any crate, or files that are part of
// crates rust-analyzer failed to discover. The file will not have IDE features available.
pub(crate) fn unlinked_file(
ctx: &DiagnosticsContext<'_>,
acc: &mut Vec<Diagnostic>,
file_id: FileId,
) {
// Limit diagnostic to the first few characters in the file. This matches how VS Code
// renders it with the full span, but on other editors, and is less invasive.
let fixes = fixes(ctx, file_id);
// FIXME: This is a hack for the vscode extension to notice whether there is an autofix or not before having to resolve diagnostics.
// This is to prevent project linking popups from appearing when there is an autofix. https://github.com/rust-lang/rust-analyzer/issues/14523
let message = if fixes.is_none() {
"file not included in crate hierarchy"
} else {
"file not included in module tree"
};
let range = ctx.sema.db.parse(file_id).syntax_node().text_range();
let range = FileLoader::file_text(ctx.sema.db, file_id)
.char_indices()
.take(3)
.last()
.map(|(i, _)| i)
.map(|i| TextRange::up_to(i.try_into().unwrap()))
.unwrap_or(range);
acc.push(
Diagnostic::new(DiagnosticCode::Ra("unlinked-file", Severity::WeakWarning), message, range)
.with_fixes(fixes),
);
}
fn fixes(ctx: &DiagnosticsContext<'_>, file_id: FileId) -> Option<Vec<Assist>> {
// If there's an existing module that could add `mod` or `pub mod` items to include the unlinked file,
// suggest that as a fix.
let source_root = ctx.sema.db.source_root(ctx.sema.db.file_source_root(file_id));
let our_path = source_root.path_for_file(&file_id)?;
let parent = our_path.parent()?;
let (module_name, _) = our_path.name_and_extension()?;
let (parent, module_name) = match module_name {
// for mod.rs we need to actually look up one higher
// and take the parent as our to be module name
"mod" => {
let (name, _) = parent.name_and_extension()?;
(parent.parent()?, name.to_owned())
}
_ => (parent, module_name.to_owned()),
};
// check crate roots, i.e. main.rs, lib.rs, ...
'crates: for &krate in &*ctx.sema.db.relevant_crates(file_id) {
let crate_def_map = ctx.sema.db.crate_def_map(krate);
let root_module = &crate_def_map[DefMap::ROOT];
let Some(root_file_id) = root_module.origin.file_id() else { continue };
let Some(crate_root_path) = source_root.path_for_file(&root_file_id) else { continue };
let Some(rel) = parent.strip_prefix(&crate_root_path.parent()?) else { continue };
// try resolving the relative difference of the paths as inline modules
let mut current = root_module;
for ele in rel.as_ref().components() {
let seg = match ele {
std::path::Component::Normal(seg) => seg.to_str()?,
std::path::Component::RootDir => continue,
// shouldn't occur
_ => continue 'crates,
};
match current.children.iter().find(|(name, _)| name.to_smol_str() == seg) {
Some((_, &child)) => current = &crate_def_map[child],
None => continue 'crates,
}
if !current.origin.is_inline() {
continue 'crates;
}
}
let InFile { file_id: parent_file_id, value: source } =
current.definition_source(ctx.sema.db);
let parent_file_id = parent_file_id.file_id()?;
return make_fixes(ctx.sema.db, parent_file_id, source, &module_name, file_id);
}
// if we aren't adding to a crate root, walk backwards such that we support `#[path = ...]` overrides if possible
// build all parent paths of the form `../module_name/mod.rs` and `../module_name.rs`
let paths = iter::successors(Some(parent), |prev| prev.parent()).filter_map(|path| {
let parent = path.parent()?;
let (name, _) = path.name_and_extension()?;
Some(([parent.join(&format!("{name}.rs"))?, path.join("mod.rs")?], name.to_owned()))
});
let mut stack = vec![];
let &parent_id =
paths.inspect(|(_, name)| stack.push(name.clone())).find_map(|(paths, _)| {
paths.into_iter().find_map(|path| source_root.file_for_path(&path))
})?;
stack.pop();
'crates: for &krate in ctx.sema.db.relevant_crates(parent_id).iter() {
let crate_def_map = ctx.sema.db.crate_def_map(krate);
let Some((_, module)) = crate_def_map.modules().find(|(_, module)| {
module.origin.file_id() == Some(parent_id) && !module.origin.is_inline()
}) else {
continue;
};
if stack.is_empty() {
return make_fixes(
ctx.sema.db,
parent_id,
module.definition_source(ctx.sema.db).value,
&module_name,
file_id,
);
} else {
// direct parent file is missing,
// try finding a parent that has an inline tree from here on
let mut current = module;
for s in stack.iter().rev() {
match module.children.iter().find(|(name, _)| name.to_smol_str() == s) {
Some((_, child)) => {
current = &crate_def_map[*child];
}
None => continue 'crates,
}
if !current.origin.is_inline() {
continue 'crates;
}
}
let InFile { file_id: parent_file_id, value: source } =
current.definition_source(ctx.sema.db);
let parent_file_id = parent_file_id.file_id()?;
return make_fixes(ctx.sema.db, parent_file_id, source, &module_name, file_id);
}
}
None
}
fn make_fixes(
db: &RootDatabase,
parent_file_id: FileId,
source: ModuleSource,
new_mod_name: &str,
added_file_id: FileId,
) -> Option<Vec<Assist>> {
fn is_outline_mod(item: &ast::Item) -> bool {
matches!(item, ast::Item::Module(m) if m.item_list().is_none())
}
let mod_decl = format!("mod {new_mod_name};");
let pub_mod_decl = format!("pub mod {new_mod_name};");
let mut mod_decl_builder = TextEdit::builder();
let mut pub_mod_decl_builder = TextEdit::builder();
let mut items = match &source {
ModuleSource::SourceFile(it) => it.items(),
ModuleSource::Module(it) => it.item_list()?.items(),
ModuleSource::BlockExpr(_) => return None,
};
// If there's an existing `mod m;` statement matching the new one, don't emit a fix (it's
// probably `#[cfg]`d out).
for item in items.clone() {
if let ast::Item::Module(m) = item {
if let Some(name) = m.name() {
if m.item_list().is_none() && name.to_string() == new_mod_name {
cov_mark::hit!(unlinked_file_skip_fix_when_mod_already_exists);
return None;
}
}
}
}
// If there are existing `mod m;` items, append after them (after the first group of them, rather).
match items.clone().skip_while(|item| !is_outline_mod(item)).take_while(is_outline_mod).last() {
Some(last) => {
cov_mark::hit!(unlinked_file_append_to_existing_mods);
let offset = last.syntax().text_range().end();
let indent = IndentLevel::from_node(last.syntax());
mod_decl_builder.insert(offset, format!("\n{indent}{mod_decl}"));
pub_mod_decl_builder.insert(offset, format!("\n{indent}{pub_mod_decl}"));
}
None => {
// Prepend before the first item in the file.
match items.next() {
Some(first) => {
cov_mark::hit!(unlinked_file_prepend_before_first_item);
let offset = first.syntax().text_range().start();
let indent = IndentLevel::from_node(first.syntax());
mod_decl_builder.insert(offset, format!("{mod_decl}\n\n{indent}"));
pub_mod_decl_builder.insert(offset, format!("{pub_mod_decl}\n\n{indent}"));
}
None => {
// No items in the file, so just append at the end.
cov_mark::hit!(unlinked_file_empty_file);
let mut indent = IndentLevel::from(0);
let offset = match &source {
ModuleSource::SourceFile(it) => it.syntax().text_range().end(),
ModuleSource::Module(it) => {
indent = IndentLevel::from_node(it.syntax()) + 1;
it.item_list()?.r_curly_token()?.text_range().start()
}
ModuleSource::BlockExpr(it) => {
it.stmt_list()?.r_curly_token()?.text_range().start()
}
};
mod_decl_builder.insert(offset, format!("{indent}{mod_decl}\n"));
pub_mod_decl_builder.insert(offset, format!("{indent}{pub_mod_decl}\n"));
}
}
}
}
let trigger_range = db.parse(added_file_id).tree().syntax().text_range();
Some(vec![
fix(
"add_mod_declaration",
&format!("Insert `{mod_decl}`"),
SourceChange::from_text_edit(parent_file_id, mod_decl_builder.finish()),
trigger_range,
),
fix(
"add_pub_mod_declaration",
&format!("Insert `{pub_mod_decl}`"),
SourceChange::from_text_edit(parent_file_id, pub_mod_decl_builder.finish()),
trigger_range,
),
])
}
#[cfg(test)]
mod tests {
use crate::tests::{check_diagnostics, check_fix, check_fixes, check_no_fix};
#[test]
fn unlinked_file_prepend_first_item() {
cov_mark::check!(unlinked_file_prepend_before_first_item);
// Only tests the first one for `pub mod` since the rest are the same
check_fixes(
r#"
//- /main.rs
fn f() {}
//- /foo.rs
$0
"#,
vec![
r#"
mod foo;
fn f() {}
"#,
r#"
pub mod foo;
fn f() {}
"#,
],
);
}
#[test]
fn unlinked_file_append_mod() {
cov_mark::check!(unlinked_file_append_to_existing_mods);
check_fix(
r#"
//- /main.rs
//! Comment on top
mod preexisting;
mod preexisting2;
struct S;
mod preexisting_bottom;)
//- /foo.rs
$0
"#,
r#"
//! Comment on top
mod preexisting;
mod preexisting2;
mod foo;
struct S;
mod preexisting_bottom;)
"#,
);
}
#[test]
fn unlinked_file_insert_in_empty_file() {
cov_mark::check!(unlinked_file_empty_file);
check_fix(
r#"
//- /main.rs
//- /foo.rs
$0 | mod foo;
"#,
);
}
#[test]
fn unlinked_file_insert_in_empty_file_mod_file() {
check_fix(
r#"
//- /main.rs
//- /foo/mod.rs
$0
"#,
r#"
mod foo;
"#,
);
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
// bar module
//- /bar/foo/mod.rs
$0
"#,
r#"
// bar module
mod foo;
"#,
);
}
#[test]
fn unlinked_file_old_style_modrs() {
check_fix(
r#"
//- /main.rs
mod submod;
//- /submod/mod.rs
// in mod.rs
//- /submod/foo.rs
$0
"#,
r#"
// in mod.rs
mod foo;
"#,
);
}
#[test]
fn unlinked_file_new_style_mod() {
check_fix(
r#"
//- /main.rs
mod submod;
//- /submod.rs
//- /submod/foo.rs
$0
"#,
r#"
mod foo;
"#,
);
}
#[test]
fn unlinked_file_with_cfg_off() {
cov_mark::check!(unlinked_file_skip_fix_when_mod_already_exists);
check_no_fix(
r#"
//- /main.rs
#[cfg(never)]
mod foo;
//- /foo.rs
$0
"#,
);
}
#[test]
fn unlinked_file_with_cfg_on() {
check_diagnostics(
r#"
//- /main.rs
#[cfg(not(never))]
mod foo;
//- /foo.rs
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple() {
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
mod foo {
}
//- /bar/foo/baz.rs
$0
"#,
r#"
mod foo {
mod baz;
}
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple_modrs() {
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
mod baz {
}
//- /bar/baz/foo/mod.rs
$0
"#,
r#"
mod baz {
mod foo;
}
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple_modrs_main() {
check_fix(
r#"
//- /main.rs
mod bar {
}
//- /bar/foo/mod.rs
$0
"#,
r#"
mod bar {
mod foo;
}
"#,
);
}
} | "#,
r#" | random_line_split |
h2ctypes.py | """A generator of ctypes wrappers for C libraries.
"""
from collections import namedtuple
import ctypes
try:
from ctypes import wintypes
except ValueError:
class wintypes:
"""Standard types defined by :file:`Windows.h`.
"""
BYTE = ctypes.c_ubyte
DWORD = ctypes.c_uint32
ULONG = ctypes.c_uint32
WORD = ctypes.c_ushort
from enum import IntEnum
import functools
import inspect
import re
C_TYPES = {"_Bool": ctypes.c_bool,
"char": ctypes.c_char, # also ctypes.c_byte
"wchar_t": ctypes.c_wchar,
"unsigned char": ctypes.c_ubyte,
"short": ctypes.c_short,
"unsigned short": ctypes.c_ushort,
"int": ctypes.c_int,
"unsigned int": ctypes.c_uint,
"long": ctypes.c_long,
"unsigned long": ctypes.c_ulong,
"long long": ctypes.c_longlong,
"unsigned long long": ctypes.c_ulonglong,
"size_t": ctypes.c_size_t,
"ssize_t": ctypes.c_ssize_t,
"float": ctypes.c_float,
"double": ctypes.c_double,
"long double": ctypes.c_longdouble,
"char*": ctypes.c_char_p,
"wchar_t*": ctypes.c_wchar_p,
"void*": ctypes.c_void_p,
"int32_t": ctypes.c_int32,
"uint32_t": ctypes.c_uint32,
"int64_t": ctypes.c_int64,
"uint64_t": ctypes.c_uint64,
"BYTE": wintypes.BYTE,
"DWORD": wintypes.DWORD,
"ULONG": wintypes.ULONG,
"WORD": wintypes.WORD}
class CIntEnum(IntEnum):
def from_param(self):
return ctypes.c_int(int(self))
@staticmethod
def as_ctype():
return ctypes.c_int
class CUIntEnum(IntEnum):
def from_param(self):
return ctypes.c_uint(int(self))
@staticmethod
def as_ctype():
return ctypes.c_uint
def as_ctype(type):
"""Unwraps an IntEnum type into a C type.
"""
return getattr(type, "as_ctype", lambda: type)()
class ParseError(Exception):
"""Raised by unparseable constructs.
"""
class Parse(namedtuple("_Parse", "constants enums structs fundecls")):
"""The result of the parsing of a C header.
"""
def export_for_pydoc(self, module_globals):
"""Export a parse to a module's global dict.
"""
module_all = module_globals.setdefault("__all__", [])
for k, v in sorted(self.constants.items()):
module_globals[k] = v
module_all.append(k)
for k, v in sorted(self.enums.items()):
module_globals[k] = v
module_all.append(k)
for fname, (argtypes, argtuple, restype) in sorted(
self.fundecls.items()):
prototype = "def {}{}: pass".format(
fname, inspect.formatargspec(argtuple._fields))
d = {}
exec(prototype, globals(), d)
func = d[fname]
for arg, argtype in zip(argtuple._fields, argtypes):
func.__annotations__[arg] = argtype
func.__annotations__["return"] = restype
module_globals[fname] = func
module_all.append(fname)
class Parser:
"""A stateful C header parser.
An instance of the parser keeps tracks of the ``#defines``, whether of
constants or of types (no other preprocessor macro is handled).
"""
def __init__(self, *fnames, compiler="gcc"):
self.types = C_TYPES
self.constants = {}
lines = []
for fname in fnames:
with open(fname) as f:
lines.extend(line.split("//")[0] for line in f)
self.header = re.sub(
r"/\*.*?\*/", "", "".join(lines), flags=re.DOTALL)
if compiler not in ("gcc", "msvc"):
raise ValueError("Unknown compiler")
self.compiler = compiler
def parse(self):
"""Parse the header file.
Four mappings are returned in a single namespace object: constants,
enum typedefs, struct typedefs and function declarations.
Constants are mapped onto their value, with ``#define``'s with no value
mapped to None. Structs are mapped onto ctypes structs. Functions
are mapped onto ``((type, ...), namedtuple, restype)`` triplets, where
each namedtuple's fields are the names of the arguments.
Definitions that include unknown types are silently ignored.
"""
return Parse(constants=self.parse_defines(),
enums=self.parse_enums(),
structs=self.parse_structs(),
fundecls=self.parse_functions())
def parse_decl(self, decl):
"""Parse a type name as a :mod:`ctypes` type and identifier pair.
"""
array_match = re.search(r"\[(.+?)\]$", decl)
if array_match:
decl = decl[:array_match.start()]
array_size = eval(array_match.group(1), {}, dict(self.constants))
else:
array_size = None
ident_match = re.search(r"\w+$", decl)
if not ident_match:
raise ParseError
ident = ident_match.group()
type_s = decl[:ident_match.start()]
pointed_to = type_s.rstrip("* ")
n_stars = type_s[len(pointed_to):].count("*")
pointed_to = " ".join(el for el in pointed_to.split() if el != "const")
if pointed_to in ("char", "wchar_t", "void") and n_stars >= 1:
pointed_to += "*"
n_stars -= 1
try:
ctype = self.types[pointed_to]
except KeyError:
raise ParseError
if n_stars:
ctype = as_ctype(ctype)
for _ in range(n_stars):
ctype = ctypes.POINTER(ctype)
if array_size is not None:
ctype = ctype * array_size
return ctype, ident
def parse_defines(self):
"""Parse ``#define``'s of constants and of types.
"""
for line in self.header.splitlines():
if line.lower().startswith("#define"):
_, line = line.strip().split(None, 1) # remove #define
if " " in line:
symbol, value = line.split(None, 1)
if value.isdigit():
value = int(value)
elif value.startswith("0x"):
value = int(value, 16)
elif value in self.types:
self.types[symbol] = self.types[value]
else:
symbol = line
value = ""
self.constants[symbol] = value
return self.constants
def | (self):
"""Parse ``typedef enum``'s.
"""
# Notes on enum types
#
# GCC:
#
# Normally, the type is unsigned int if there are no negative values in
# the enumeration, otherwise int. If -fshort-enums is specified, then
# if there are negative values it is the first of signed char, short
# and int that can represent all the values, otherwise it is the first
# of unsigned char, unsigned short and unsigned int that can represent
# all the values.
#
# On some targets, -fshort-enums is the default; this is determined by
# the ABI.
#
# MSVC:
#
# A variable declared as enum is an int [32-bit].
enums = {}
entry_re = re.compile(r"\s*(\w+)\s*(?:=\s*(\w+)\s*)?")
for entries, enumname in re.findall(
r"typedef\s+enum\s+\w*\s*{([^}]*)}\s*(\w+)\s*;", self.header,
re.DOTALL):
if self.compiler == "msvc":
underlying_type = ctypes.c_int
elif self.compiler == "gcc":
underlying_type = ctypes.c_uint
values = []
for entry in entries.split(","):
name, value = re.match(entry_re, entry).groups()
value = eval(value) if value is not None else (
values[-1][1] + 1 if values else 0)
if value < 0:
underlying_type = ctypes.c_int
values.append((name, value))
enum_type = {ctypes.c_int: CIntEnum,
ctypes.c_uint: CUIntEnum}[underlying_type]
self.types[enumname] = enums[enumname] = enum_type(enumname, values)
return enums
def parse_structs(self):
"""Parse ``typedef struct``'s.
"""
structs = {}
for fields, structname in re.findall(
r"typedef\s+struct\s+\w*\s*{([^}]*)}\s*(\w+)\s*;", self.header,
re.DOTALL):
fieldtypes = []
fieldnames = []
for field in fields.split(";"):
field = field.strip()
if not field:
continue
fieldtype, fieldname = self.parse_decl(field)
fieldtypes.append(fieldtype)
fieldnames.append(fieldname)
struct = type(
str(structname),
(ctypes.Structure,),
{"_fields_": list(zip(fieldnames, map(as_ctype, fieldtypes)))})
struct.__doc__ = "\n".join(
"{0}: {1}".format(field, type.__name__)
for field, type in zip(fieldnames, fieldtypes))
self.types[structname] = structs[structname] = struct
return structs
def parse_functions(self):
"""Parse function declarations
"""
fundecls = {}
for prefix, fname, proto in re.findall(
r"^(.+?\s+)?(\w+)\s*\(([\w\*\s,]+)\);", self.header, re.MULTILINE):
prefix = " ".join(self.constants.get(word, word)
for word in prefix.split()).strip()
if prefix == "void":
restype = None
else:
restype, _ = self.parse_decl(prefix + " _")
assert _ == "_"
argtypes = []
argnames = []
for argspec in proto.split(","):
argspec = argspec.strip()
if argspec == "void":
continue
argtype, argname = self.parse_decl(argspec)
argtypes.append(argtype)
argnames.append(argname)
fundecls[fname] = argtypes, namedtuple("args", argnames), restype
return fundecls
def deref(obj):
"""Cast a ctypes object or byref into a Python object.
"""
try:
return obj._obj.value # byref
except AttributeError:
try:
return obj.value # plain ctypes
except AttributeError:
return obj # plain python
class DLLError(Exception):
"""Raised when a DLL function returns a non-success exit code.
"""
def __init__(self, code):
self.code = code
class DLL:
"""A wrapper for a `ctypes` DLL object.
"""
def __init__(self, dll, parse, success_code):
self._dll = dll
self._fundecls = parse.fundecls
for fname in parse.fundecls:
self._set_success_codes(fname, [success_code])
def _set_success_codes(self, fname, success_codes):
"""Add a method with specific success codes.
"""
func = getattr(self._dll, fname)
argtypes, func.argtuple_t, restype = self._fundecls[fname]
argtypes = [argtype
if not (isinstance(argtype, type(ctypes.POINTER(ctypes.c_int))) and
argtype._type_.__module__ != "ctypes") # remove struct (nested) pointers
else ctypes.c_voidp for argtype in argtypes]
func.argtypes = argtypes
try:
success_code_type, = set(type(code) for code in success_codes)
except ValueError:
raise AssertionError("Success code of different types")
if success_code_type == restype:
func.success_codes = success_codes
func.errcheck = errcheck
else:
func.restype = restype
setattr(self, fname, func)
def _prohibit(self, fname):
"""Hide a DLL function.
"""
@functools.wraps(getattr(cls, fname))
def prohibited(*args, **kwargs):
raise AttributeError(
"{} is not a public function of the DLL".format(fname))
setattr(self, fname, prohibited)
def errcheck(retcode, func, args):
"""Return all (deref'ed) arguments on success, raise exception on failure.
"""
if retcode in func.success_codes:
return func.argtuple_t(*[deref(arg) for arg in args])
else:
raise DLLError(type(func.success_codes[0])(retcode))
| parse_enums | identifier_name |
h2ctypes.py | """A generator of ctypes wrappers for C libraries.
"""
from collections import namedtuple
import ctypes
try:
from ctypes import wintypes
except ValueError:
class wintypes:
"""Standard types defined by :file:`Windows.h`.
"""
BYTE = ctypes.c_ubyte
DWORD = ctypes.c_uint32
ULONG = ctypes.c_uint32
WORD = ctypes.c_ushort
from enum import IntEnum
import functools
import inspect
import re
C_TYPES = {"_Bool": ctypes.c_bool,
"char": ctypes.c_char, # also ctypes.c_byte
"wchar_t": ctypes.c_wchar,
"unsigned char": ctypes.c_ubyte,
"short": ctypes.c_short,
"unsigned short": ctypes.c_ushort,
"int": ctypes.c_int,
"unsigned int": ctypes.c_uint,
"long": ctypes.c_long,
"unsigned long": ctypes.c_ulong,
"long long": ctypes.c_longlong,
"unsigned long long": ctypes.c_ulonglong,
"size_t": ctypes.c_size_t,
"ssize_t": ctypes.c_ssize_t,
"float": ctypes.c_float,
"double": ctypes.c_double,
"long double": ctypes.c_longdouble,
"char*": ctypes.c_char_p,
"wchar_t*": ctypes.c_wchar_p,
"void*": ctypes.c_void_p,
"int32_t": ctypes.c_int32,
"uint32_t": ctypes.c_uint32,
"int64_t": ctypes.c_int64,
"uint64_t": ctypes.c_uint64,
"BYTE": wintypes.BYTE,
"DWORD": wintypes.DWORD,
"ULONG": wintypes.ULONG,
"WORD": wintypes.WORD}
class CIntEnum(IntEnum):
def from_param(self):
return ctypes.c_int(int(self))
@staticmethod
def as_ctype():
return ctypes.c_int
class CUIntEnum(IntEnum):
def from_param(self):
return ctypes.c_uint(int(self))
@staticmethod
def as_ctype():
return ctypes.c_uint
def as_ctype(type):
"""Unwraps an IntEnum type into a C type.
"""
return getattr(type, "as_ctype", lambda: type)()
class ParseError(Exception):
"""Raised by unparseable constructs.
"""
class Parse(namedtuple("_Parse", "constants enums structs fundecls")):
"""The result of the parsing of a C header.
"""
def export_for_pydoc(self, module_globals):
"""Export a parse to a module's global dict.
"""
module_all = module_globals.setdefault("__all__", [])
for k, v in sorted(self.constants.items()):
module_globals[k] = v
module_all.append(k)
for k, v in sorted(self.enums.items()):
module_globals[k] = v
module_all.append(k)
for fname, (argtypes, argtuple, restype) in sorted(
self.fundecls.items()):
prototype = "def {}{}: pass".format(
fname, inspect.formatargspec(argtuple._fields))
d = {}
exec(prototype, globals(), d)
func = d[fname]
for arg, argtype in zip(argtuple._fields, argtypes):
func.__annotations__[arg] = argtype
func.__annotations__["return"] = restype
module_globals[fname] = func
module_all.append(fname)
class Parser:
"""A stateful C header parser.
An instance of the parser keeps tracks of the ``#defines``, whether of
constants or of types (no other preprocessor macro is handled).
"""
def __init__(self, *fnames, compiler="gcc"):
self.types = C_TYPES
self.constants = {}
lines = []
for fname in fnames:
with open(fname) as f:
lines.extend(line.split("//")[0] for line in f)
self.header = re.sub(
r"/\*.*?\*/", "", "".join(lines), flags=re.DOTALL)
if compiler not in ("gcc", "msvc"):
raise ValueError("Unknown compiler")
self.compiler = compiler
def parse(self):
"""Parse the header file.
Four mappings are returned in a single namespace object: constants,
enum typedefs, struct typedefs and function declarations.
Constants are mapped onto their value, with ``#define``'s with no value
mapped to None. Structs are mapped onto ctypes structs. Functions
are mapped onto ``((type, ...), namedtuple, restype)`` triplets, where
each namedtuple's fields are the names of the arguments.
Definitions that include unknown types are silently ignored.
"""
return Parse(constants=self.parse_defines(),
enums=self.parse_enums(),
structs=self.parse_structs(),
fundecls=self.parse_functions())
def parse_decl(self, decl):
"""Parse a type name as a :mod:`ctypes` type and identifier pair.
"""
array_match = re.search(r"\[(.+?)\]$", decl)
if array_match:
decl = decl[:array_match.start()]
array_size = eval(array_match.group(1), {}, dict(self.constants))
else:
array_size = None
ident_match = re.search(r"\w+$", decl)
if not ident_match:
raise ParseError
ident = ident_match.group()
type_s = decl[:ident_match.start()]
pointed_to = type_s.rstrip("* ")
n_stars = type_s[len(pointed_to):].count("*")
pointed_to = " ".join(el for el in pointed_to.split() if el != "const") | n_stars -= 1
try:
ctype = self.types[pointed_to]
except KeyError:
raise ParseError
if n_stars:
ctype = as_ctype(ctype)
for _ in range(n_stars):
ctype = ctypes.POINTER(ctype)
if array_size is not None:
ctype = ctype * array_size
return ctype, ident
def parse_defines(self):
"""Parse ``#define``'s of constants and of types.
"""
for line in self.header.splitlines():
if line.lower().startswith("#define"):
_, line = line.strip().split(None, 1) # remove #define
if " " in line:
symbol, value = line.split(None, 1)
if value.isdigit():
value = int(value)
elif value.startswith("0x"):
value = int(value, 16)
elif value in self.types:
self.types[symbol] = self.types[value]
else:
symbol = line
value = ""
self.constants[symbol] = value
return self.constants
def parse_enums(self):
"""Parse ``typedef enum``'s.
"""
# Notes on enum types
#
# GCC:
#
# Normally, the type is unsigned int if there are no negative values in
# the enumeration, otherwise int. If -fshort-enums is specified, then
# if there are negative values it is the first of signed char, short
# and int that can represent all the values, otherwise it is the first
# of unsigned char, unsigned short and unsigned int that can represent
# all the values.
#
# On some targets, -fshort-enums is the default; this is determined by
# the ABI.
#
# MSVC:
#
# A variable declared as enum is an int [32-bit].
enums = {}
entry_re = re.compile(r"\s*(\w+)\s*(?:=\s*(\w+)\s*)?")
for entries, enumname in re.findall(
r"typedef\s+enum\s+\w*\s*{([^}]*)}\s*(\w+)\s*;", self.header,
re.DOTALL):
if self.compiler == "msvc":
underlying_type = ctypes.c_int
elif self.compiler == "gcc":
underlying_type = ctypes.c_uint
values = []
for entry in entries.split(","):
name, value = re.match(entry_re, entry).groups()
value = eval(value) if value is not None else (
values[-1][1] + 1 if values else 0)
if value < 0:
underlying_type = ctypes.c_int
values.append((name, value))
enum_type = {ctypes.c_int: CIntEnum,
ctypes.c_uint: CUIntEnum}[underlying_type]
self.types[enumname] = enums[enumname] = enum_type(enumname, values)
return enums
def parse_structs(self):
"""Parse ``typedef struct``'s.
"""
structs = {}
for fields, structname in re.findall(
r"typedef\s+struct\s+\w*\s*{([^}]*)}\s*(\w+)\s*;", self.header,
re.DOTALL):
fieldtypes = []
fieldnames = []
for field in fields.split(";"):
field = field.strip()
if not field:
continue
fieldtype, fieldname = self.parse_decl(field)
fieldtypes.append(fieldtype)
fieldnames.append(fieldname)
struct = type(
str(structname),
(ctypes.Structure,),
{"_fields_": list(zip(fieldnames, map(as_ctype, fieldtypes)))})
struct.__doc__ = "\n".join(
"{0}: {1}".format(field, type.__name__)
for field, type in zip(fieldnames, fieldtypes))
self.types[structname] = structs[structname] = struct
return structs
def parse_functions(self):
"""Parse function declarations
"""
fundecls = {}
for prefix, fname, proto in re.findall(
r"^(.+?\s+)?(\w+)\s*\(([\w\*\s,]+)\);", self.header, re.MULTILINE):
prefix = " ".join(self.constants.get(word, word)
for word in prefix.split()).strip()
if prefix == "void":
restype = None
else:
restype, _ = self.parse_decl(prefix + " _")
assert _ == "_"
argtypes = []
argnames = []
for argspec in proto.split(","):
argspec = argspec.strip()
if argspec == "void":
continue
argtype, argname = self.parse_decl(argspec)
argtypes.append(argtype)
argnames.append(argname)
fundecls[fname] = argtypes, namedtuple("args", argnames), restype
return fundecls
def deref(obj):
"""Cast a ctypes object or byref into a Python object.
"""
try:
return obj._obj.value # byref
except AttributeError:
try:
return obj.value # plain ctypes
except AttributeError:
return obj # plain python
class DLLError(Exception):
"""Raised when a DLL function returns a non-success exit code.
"""
def __init__(self, code):
self.code = code
class DLL:
"""A wrapper for a `ctypes` DLL object.
"""
def __init__(self, dll, parse, success_code):
self._dll = dll
self._fundecls = parse.fundecls
for fname in parse.fundecls:
self._set_success_codes(fname, [success_code])
def _set_success_codes(self, fname, success_codes):
"""Add a method with specific success codes.
"""
func = getattr(self._dll, fname)
argtypes, func.argtuple_t, restype = self._fundecls[fname]
argtypes = [argtype
if not (isinstance(argtype, type(ctypes.POINTER(ctypes.c_int))) and
argtype._type_.__module__ != "ctypes") # remove struct (nested) pointers
else ctypes.c_voidp for argtype in argtypes]
func.argtypes = argtypes
try:
success_code_type, = set(type(code) for code in success_codes)
except ValueError:
raise AssertionError("Success code of different types")
if success_code_type == restype:
func.success_codes = success_codes
func.errcheck = errcheck
else:
func.restype = restype
setattr(self, fname, func)
def _prohibit(self, fname):
"""Hide a DLL function.
"""
@functools.wraps(getattr(cls, fname))
def prohibited(*args, **kwargs):
raise AttributeError(
"{} is not a public function of the DLL".format(fname))
setattr(self, fname, prohibited)
def errcheck(retcode, func, args):
"""Return all (deref'ed) arguments on success, raise exception on failure.
"""
if retcode in func.success_codes:
return func.argtuple_t(*[deref(arg) for arg in args])
else:
raise DLLError(type(func.success_codes[0])(retcode)) | if pointed_to in ("char", "wchar_t", "void") and n_stars >= 1:
pointed_to += "*" | random_line_split |
h2ctypes.py | """A generator of ctypes wrappers for C libraries.
"""
from collections import namedtuple
import ctypes
try:
from ctypes import wintypes
except ValueError:
class wintypes:
"""Standard types defined by :file:`Windows.h`.
"""
BYTE = ctypes.c_ubyte
DWORD = ctypes.c_uint32
ULONG = ctypes.c_uint32
WORD = ctypes.c_ushort
from enum import IntEnum
import functools
import inspect
import re
C_TYPES = {"_Bool": ctypes.c_bool,
"char": ctypes.c_char, # also ctypes.c_byte
"wchar_t": ctypes.c_wchar,
"unsigned char": ctypes.c_ubyte,
"short": ctypes.c_short,
"unsigned short": ctypes.c_ushort,
"int": ctypes.c_int,
"unsigned int": ctypes.c_uint,
"long": ctypes.c_long,
"unsigned long": ctypes.c_ulong,
"long long": ctypes.c_longlong,
"unsigned long long": ctypes.c_ulonglong,
"size_t": ctypes.c_size_t,
"ssize_t": ctypes.c_ssize_t,
"float": ctypes.c_float,
"double": ctypes.c_double,
"long double": ctypes.c_longdouble,
"char*": ctypes.c_char_p,
"wchar_t*": ctypes.c_wchar_p,
"void*": ctypes.c_void_p,
"int32_t": ctypes.c_int32,
"uint32_t": ctypes.c_uint32,
"int64_t": ctypes.c_int64,
"uint64_t": ctypes.c_uint64,
"BYTE": wintypes.BYTE,
"DWORD": wintypes.DWORD,
"ULONG": wintypes.ULONG,
"WORD": wintypes.WORD}
class CIntEnum(IntEnum):
def from_param(self):
return ctypes.c_int(int(self))
@staticmethod
def as_ctype():
return ctypes.c_int
class CUIntEnum(IntEnum):
def from_param(self):
return ctypes.c_uint(int(self))
@staticmethod
def as_ctype():
return ctypes.c_uint
def as_ctype(type):
"""Unwraps an IntEnum type into a C type.
"""
return getattr(type, "as_ctype", lambda: type)()
class ParseError(Exception):
"""Raised by unparseable constructs.
"""
class Parse(namedtuple("_Parse", "constants enums structs fundecls")):
"""The result of the parsing of a C header.
"""
def export_for_pydoc(self, module_globals):
"""Export a parse to a module's global dict.
"""
module_all = module_globals.setdefault("__all__", [])
for k, v in sorted(self.constants.items()):
module_globals[k] = v
module_all.append(k)
for k, v in sorted(self.enums.items()):
module_globals[k] = v
module_all.append(k)
for fname, (argtypes, argtuple, restype) in sorted(
self.fundecls.items()):
prototype = "def {}{}: pass".format(
fname, inspect.formatargspec(argtuple._fields))
d = {}
exec(prototype, globals(), d)
func = d[fname]
for arg, argtype in zip(argtuple._fields, argtypes):
func.__annotations__[arg] = argtype
func.__annotations__["return"] = restype
module_globals[fname] = func
module_all.append(fname)
class Parser:
"""A stateful C header parser.
An instance of the parser keeps tracks of the ``#defines``, whether of
constants or of types (no other preprocessor macro is handled).
"""
def __init__(self, *fnames, compiler="gcc"):
self.types = C_TYPES
self.constants = {}
lines = []
for fname in fnames:
with open(fname) as f:
lines.extend(line.split("//")[0] for line in f)
self.header = re.sub(
r"/\*.*?\*/", "", "".join(lines), flags=re.DOTALL)
if compiler not in ("gcc", "msvc"):
raise ValueError("Unknown compiler")
self.compiler = compiler
def parse(self):
"""Parse the header file.
Four mappings are returned in a single namespace object: constants,
enum typedefs, struct typedefs and function declarations.
Constants are mapped onto their value, with ``#define``'s with no value
mapped to None. Structs are mapped onto ctypes structs. Functions
are mapped onto ``((type, ...), namedtuple, restype)`` triplets, where
each namedtuple's fields are the names of the arguments.
Definitions that include unknown types are silently ignored.
"""
return Parse(constants=self.parse_defines(),
enums=self.parse_enums(),
structs=self.parse_structs(),
fundecls=self.parse_functions())
def parse_decl(self, decl):
"""Parse a type name as a :mod:`ctypes` type and identifier pair.
"""
array_match = re.search(r"\[(.+?)\]$", decl)
if array_match:
decl = decl[:array_match.start()]
array_size = eval(array_match.group(1), {}, dict(self.constants))
else:
array_size = None
ident_match = re.search(r"\w+$", decl)
if not ident_match:
raise ParseError
ident = ident_match.group()
type_s = decl[:ident_match.start()]
pointed_to = type_s.rstrip("* ")
n_stars = type_s[len(pointed_to):].count("*")
pointed_to = " ".join(el for el in pointed_to.split() if el != "const")
if pointed_to in ("char", "wchar_t", "void") and n_stars >= 1:
pointed_to += "*"
n_stars -= 1
try:
ctype = self.types[pointed_to]
except KeyError:
raise ParseError
if n_stars:
ctype = as_ctype(ctype)
for _ in range(n_stars):
ctype = ctypes.POINTER(ctype)
if array_size is not None:
ctype = ctype * array_size
return ctype, ident
def parse_defines(self):
"""Parse ``#define``'s of constants and of types.
"""
for line in self.header.splitlines():
if line.lower().startswith("#define"):
_, line = line.strip().split(None, 1) # remove #define
if " " in line:
symbol, value = line.split(None, 1)
if value.isdigit():
value = int(value)
elif value.startswith("0x"):
value = int(value, 16)
elif value in self.types:
self.types[symbol] = self.types[value]
else:
symbol = line
value = ""
self.constants[symbol] = value
return self.constants
def parse_enums(self):
"""Parse ``typedef enum``'s.
"""
# Notes on enum types
#
# GCC:
#
# Normally, the type is unsigned int if there are no negative values in
# the enumeration, otherwise int. If -fshort-enums is specified, then
# if there are negative values it is the first of signed char, short
# and int that can represent all the values, otherwise it is the first
# of unsigned char, unsigned short and unsigned int that can represent
# all the values.
#
# On some targets, -fshort-enums is the default; this is determined by
# the ABI.
#
# MSVC:
#
# A variable declared as enum is an int [32-bit].
enums = {}
entry_re = re.compile(r"\s*(\w+)\s*(?:=\s*(\w+)\s*)?")
for entries, enumname in re.findall(
r"typedef\s+enum\s+\w*\s*{([^}]*)}\s*(\w+)\s*;", self.header,
re.DOTALL):
if self.compiler == "msvc":
underlying_type = ctypes.c_int
elif self.compiler == "gcc":
underlying_type = ctypes.c_uint
values = []
for entry in entries.split(","):
name, value = re.match(entry_re, entry).groups()
value = eval(value) if value is not None else (
values[-1][1] + 1 if values else 0)
if value < 0:
underlying_type = ctypes.c_int
values.append((name, value))
enum_type = {ctypes.c_int: CIntEnum,
ctypes.c_uint: CUIntEnum}[underlying_type]
self.types[enumname] = enums[enumname] = enum_type(enumname, values)
return enums
def parse_structs(self):
"""Parse ``typedef struct``'s.
"""
structs = {}
for fields, structname in re.findall(
r"typedef\s+struct\s+\w*\s*{([^}]*)}\s*(\w+)\s*;", self.header,
re.DOTALL):
fieldtypes = []
fieldnames = []
for field in fields.split(";"):
field = field.strip()
if not field:
continue
fieldtype, fieldname = self.parse_decl(field)
fieldtypes.append(fieldtype)
fieldnames.append(fieldname)
struct = type(
str(structname),
(ctypes.Structure,),
{"_fields_": list(zip(fieldnames, map(as_ctype, fieldtypes)))})
struct.__doc__ = "\n".join(
"{0}: {1}".format(field, type.__name__)
for field, type in zip(fieldnames, fieldtypes))
self.types[structname] = structs[structname] = struct
return structs
def parse_functions(self):
"""Parse function declarations
"""
fundecls = {}
for prefix, fname, proto in re.findall(
r"^(.+?\s+)?(\w+)\s*\(([\w\*\s,]+)\);", self.header, re.MULTILINE):
prefix = " ".join(self.constants.get(word, word)
for word in prefix.split()).strip()
if prefix == "void":
restype = None
else:
restype, _ = self.parse_decl(prefix + " _")
assert _ == "_"
argtypes = []
argnames = []
for argspec in proto.split(","):
argspec = argspec.strip()
if argspec == "void":
continue
argtype, argname = self.parse_decl(argspec)
argtypes.append(argtype)
argnames.append(argname)
fundecls[fname] = argtypes, namedtuple("args", argnames), restype
return fundecls
def deref(obj):
"""Cast a ctypes object or byref into a Python object.
"""
try:
return obj._obj.value # byref
except AttributeError:
try:
return obj.value # plain ctypes
except AttributeError:
return obj # plain python
class DLLError(Exception):
"""Raised when a DLL function returns a non-success exit code.
"""
def __init__(self, code):
self.code = code
class DLL:
"""A wrapper for a `ctypes` DLL object.
"""
def __init__(self, dll, parse, success_code):
self._dll = dll
self._fundecls = parse.fundecls
for fname in parse.fundecls:
self._set_success_codes(fname, [success_code])
def _set_success_codes(self, fname, success_codes):
"""Add a method with specific success codes.
"""
func = getattr(self._dll, fname)
argtypes, func.argtuple_t, restype = self._fundecls[fname]
argtypes = [argtype
if not (isinstance(argtype, type(ctypes.POINTER(ctypes.c_int))) and
argtype._type_.__module__ != "ctypes") # remove struct (nested) pointers
else ctypes.c_voidp for argtype in argtypes]
func.argtypes = argtypes
try:
success_code_type, = set(type(code) for code in success_codes)
except ValueError:
raise AssertionError("Success code of different types")
if success_code_type == restype:
func.success_codes = success_codes
func.errcheck = errcheck
else:
func.restype = restype
setattr(self, fname, func)
def _prohibit(self, fname):
"""Hide a DLL function.
"""
@functools.wraps(getattr(cls, fname))
def prohibited(*args, **kwargs):
|
setattr(self, fname, prohibited)
def errcheck(retcode, func, args):
"""Return all (deref'ed) arguments on success, raise exception on failure.
"""
if retcode in func.success_codes:
return func.argtuple_t(*[deref(arg) for arg in args])
else:
raise DLLError(type(func.success_codes[0])(retcode))
| raise AttributeError(
"{} is not a public function of the DLL".format(fname)) | identifier_body |
h2ctypes.py | """A generator of ctypes wrappers for C libraries.
"""
from collections import namedtuple
import ctypes
try:
from ctypes import wintypes
except ValueError:
class wintypes:
"""Standard types defined by :file:`Windows.h`.
"""
BYTE = ctypes.c_ubyte
DWORD = ctypes.c_uint32
ULONG = ctypes.c_uint32
WORD = ctypes.c_ushort
from enum import IntEnum
import functools
import inspect
import re
C_TYPES = {"_Bool": ctypes.c_bool,
"char": ctypes.c_char, # also ctypes.c_byte
"wchar_t": ctypes.c_wchar,
"unsigned char": ctypes.c_ubyte,
"short": ctypes.c_short,
"unsigned short": ctypes.c_ushort,
"int": ctypes.c_int,
"unsigned int": ctypes.c_uint,
"long": ctypes.c_long,
"unsigned long": ctypes.c_ulong,
"long long": ctypes.c_longlong,
"unsigned long long": ctypes.c_ulonglong,
"size_t": ctypes.c_size_t,
"ssize_t": ctypes.c_ssize_t,
"float": ctypes.c_float,
"double": ctypes.c_double,
"long double": ctypes.c_longdouble,
"char*": ctypes.c_char_p,
"wchar_t*": ctypes.c_wchar_p,
"void*": ctypes.c_void_p,
"int32_t": ctypes.c_int32,
"uint32_t": ctypes.c_uint32,
"int64_t": ctypes.c_int64,
"uint64_t": ctypes.c_uint64,
"BYTE": wintypes.BYTE,
"DWORD": wintypes.DWORD,
"ULONG": wintypes.ULONG,
"WORD": wintypes.WORD}
class CIntEnum(IntEnum):
def from_param(self):
return ctypes.c_int(int(self))
@staticmethod
def as_ctype():
return ctypes.c_int
class CUIntEnum(IntEnum):
def from_param(self):
return ctypes.c_uint(int(self))
@staticmethod
def as_ctype():
return ctypes.c_uint
def as_ctype(type):
"""Unwraps an IntEnum type into a C type.
"""
return getattr(type, "as_ctype", lambda: type)()
class ParseError(Exception):
"""Raised by unparseable constructs.
"""
class Parse(namedtuple("_Parse", "constants enums structs fundecls")):
"""The result of the parsing of a C header.
"""
def export_for_pydoc(self, module_globals):
"""Export a parse to a module's global dict.
"""
module_all = module_globals.setdefault("__all__", [])
for k, v in sorted(self.constants.items()):
module_globals[k] = v
module_all.append(k)
for k, v in sorted(self.enums.items()):
module_globals[k] = v
module_all.append(k)
for fname, (argtypes, argtuple, restype) in sorted(
self.fundecls.items()):
prototype = "def {}{}: pass".format(
fname, inspect.formatargspec(argtuple._fields))
d = {}
exec(prototype, globals(), d)
func = d[fname]
for arg, argtype in zip(argtuple._fields, argtypes):
|
func.__annotations__["return"] = restype
module_globals[fname] = func
module_all.append(fname)
class Parser:
"""A stateful C header parser.
An instance of the parser keeps tracks of the ``#defines``, whether of
constants or of types (no other preprocessor macro is handled).
"""
def __init__(self, *fnames, compiler="gcc"):
self.types = C_TYPES
self.constants = {}
lines = []
for fname in fnames:
with open(fname) as f:
lines.extend(line.split("//")[0] for line in f)
self.header = re.sub(
r"/\*.*?\*/", "", "".join(lines), flags=re.DOTALL)
if compiler not in ("gcc", "msvc"):
raise ValueError("Unknown compiler")
self.compiler = compiler
def parse(self):
"""Parse the header file.
Four mappings are returned in a single namespace object: constants,
enum typedefs, struct typedefs and function declarations.
Constants are mapped onto their value, with ``#define``'s with no value
mapped to None. Structs are mapped onto ctypes structs. Functions
are mapped onto ``((type, ...), namedtuple, restype)`` triplets, where
each namedtuple's fields are the names of the arguments.
Definitions that include unknown types are silently ignored.
"""
return Parse(constants=self.parse_defines(),
enums=self.parse_enums(),
structs=self.parse_structs(),
fundecls=self.parse_functions())
def parse_decl(self, decl):
"""Parse a type name as a :mod:`ctypes` type and identifier pair.
"""
array_match = re.search(r"\[(.+?)\]$", decl)
if array_match:
decl = decl[:array_match.start()]
array_size = eval(array_match.group(1), {}, dict(self.constants))
else:
array_size = None
ident_match = re.search(r"\w+$", decl)
if not ident_match:
raise ParseError
ident = ident_match.group()
type_s = decl[:ident_match.start()]
pointed_to = type_s.rstrip("* ")
n_stars = type_s[len(pointed_to):].count("*")
pointed_to = " ".join(el for el in pointed_to.split() if el != "const")
if pointed_to in ("char", "wchar_t", "void") and n_stars >= 1:
pointed_to += "*"
n_stars -= 1
try:
ctype = self.types[pointed_to]
except KeyError:
raise ParseError
if n_stars:
ctype = as_ctype(ctype)
for _ in range(n_stars):
ctype = ctypes.POINTER(ctype)
if array_size is not None:
ctype = ctype * array_size
return ctype, ident
def parse_defines(self):
"""Parse ``#define``'s of constants and of types.
"""
for line in self.header.splitlines():
if line.lower().startswith("#define"):
_, line = line.strip().split(None, 1) # remove #define
if " " in line:
symbol, value = line.split(None, 1)
if value.isdigit():
value = int(value)
elif value.startswith("0x"):
value = int(value, 16)
elif value in self.types:
self.types[symbol] = self.types[value]
else:
symbol = line
value = ""
self.constants[symbol] = value
return self.constants
def parse_enums(self):
"""Parse ``typedef enum``'s.
"""
# Notes on enum types
#
# GCC:
#
# Normally, the type is unsigned int if there are no negative values in
# the enumeration, otherwise int. If -fshort-enums is specified, then
# if there are negative values it is the first of signed char, short
# and int that can represent all the values, otherwise it is the first
# of unsigned char, unsigned short and unsigned int that can represent
# all the values.
#
# On some targets, -fshort-enums is the default; this is determined by
# the ABI.
#
# MSVC:
#
# A variable declared as enum is an int [32-bit].
enums = {}
entry_re = re.compile(r"\s*(\w+)\s*(?:=\s*(\w+)\s*)?")
for entries, enumname in re.findall(
r"typedef\s+enum\s+\w*\s*{([^}]*)}\s*(\w+)\s*;", self.header,
re.DOTALL):
if self.compiler == "msvc":
underlying_type = ctypes.c_int
elif self.compiler == "gcc":
underlying_type = ctypes.c_uint
values = []
for entry in entries.split(","):
name, value = re.match(entry_re, entry).groups()
value = eval(value) if value is not None else (
values[-1][1] + 1 if values else 0)
if value < 0:
underlying_type = ctypes.c_int
values.append((name, value))
enum_type = {ctypes.c_int: CIntEnum,
ctypes.c_uint: CUIntEnum}[underlying_type]
self.types[enumname] = enums[enumname] = enum_type(enumname, values)
return enums
def parse_structs(self):
"""Parse ``typedef struct``'s.
"""
structs = {}
for fields, structname in re.findall(
r"typedef\s+struct\s+\w*\s*{([^}]*)}\s*(\w+)\s*;", self.header,
re.DOTALL):
fieldtypes = []
fieldnames = []
for field in fields.split(";"):
field = field.strip()
if not field:
continue
fieldtype, fieldname = self.parse_decl(field)
fieldtypes.append(fieldtype)
fieldnames.append(fieldname)
struct = type(
str(structname),
(ctypes.Structure,),
{"_fields_": list(zip(fieldnames, map(as_ctype, fieldtypes)))})
struct.__doc__ = "\n".join(
"{0}: {1}".format(field, type.__name__)
for field, type in zip(fieldnames, fieldtypes))
self.types[structname] = structs[structname] = struct
return structs
def parse_functions(self):
"""Parse function declarations
"""
fundecls = {}
for prefix, fname, proto in re.findall(
r"^(.+?\s+)?(\w+)\s*\(([\w\*\s,]+)\);", self.header, re.MULTILINE):
prefix = " ".join(self.constants.get(word, word)
for word in prefix.split()).strip()
if prefix == "void":
restype = None
else:
restype, _ = self.parse_decl(prefix + " _")
assert _ == "_"
argtypes = []
argnames = []
for argspec in proto.split(","):
argspec = argspec.strip()
if argspec == "void":
continue
argtype, argname = self.parse_decl(argspec)
argtypes.append(argtype)
argnames.append(argname)
fundecls[fname] = argtypes, namedtuple("args", argnames), restype
return fundecls
def deref(obj):
"""Cast a ctypes object or byref into a Python object.
"""
try:
return obj._obj.value # byref
except AttributeError:
try:
return obj.value # plain ctypes
except AttributeError:
return obj # plain python
class DLLError(Exception):
"""Raised when a DLL function returns a non-success exit code.
"""
def __init__(self, code):
self.code = code
class DLL:
"""A wrapper for a `ctypes` DLL object.
"""
def __init__(self, dll, parse, success_code):
self._dll = dll
self._fundecls = parse.fundecls
for fname in parse.fundecls:
self._set_success_codes(fname, [success_code])
def _set_success_codes(self, fname, success_codes):
"""Add a method with specific success codes.
"""
func = getattr(self._dll, fname)
argtypes, func.argtuple_t, restype = self._fundecls[fname]
argtypes = [argtype
if not (isinstance(argtype, type(ctypes.POINTER(ctypes.c_int))) and
argtype._type_.__module__ != "ctypes") # remove struct (nested) pointers
else ctypes.c_voidp for argtype in argtypes]
func.argtypes = argtypes
try:
success_code_type, = set(type(code) for code in success_codes)
except ValueError:
raise AssertionError("Success code of different types")
if success_code_type == restype:
func.success_codes = success_codes
func.errcheck = errcheck
else:
func.restype = restype
setattr(self, fname, func)
def _prohibit(self, fname):
"""Hide a DLL function.
"""
@functools.wraps(getattr(cls, fname))
def prohibited(*args, **kwargs):
raise AttributeError(
"{} is not a public function of the DLL".format(fname))
setattr(self, fname, prohibited)
def errcheck(retcode, func, args):
"""Return all (deref'ed) arguments on success, raise exception on failure.
"""
if retcode in func.success_codes:
return func.argtuple_t(*[deref(arg) for arg in args])
else:
raise DLLError(type(func.success_codes[0])(retcode))
| func.__annotations__[arg] = argtype | conditional_block |
lib.rs | //! ## clickhouse-rs
//! Asynchronous [Yandex ClickHouse](https://clickhouse.yandex/) client library for rust programming language.
//!
//! ### Installation
//! Library hosted on [crates.io](https://crates.io/crates/clickhouse-rs/).
//!
//! ```toml
//! [dependencies]
//! clickhouse-rs = "*"
//! ```
//!
//! ### Supported data types
//!
//! * Date
//! * DateTime
//! * Decimal(P, S)
//! * Float32, Float64
//! * String, FixedString(N)
//! * UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64
//! * Nullable(T)
//! * Array(UInt/Int/String/Date/DateTime)
//! * SimpleAggregateFunction(F, T)
//! * IPv4/IPv6
//! * UUID
//!
//! ### DNS
//!
//! ```url
//! schema://user:password@host[:port]/database?param1=value1&...¶mN=valueN
//! ```
//!
//! parameters:
//!
//! - `compression` - Whether or not use compression (defaults to `none`). Possible choices:
//! * `none`
//! * `lz4`
//!
//! - `readonly` - Restricts permissions for read data, write data and change settings queries. (defaults to `none`). Possible choices:
//! * `0` - All queries are allowed.
//! * `1` - Only read data queries are allowed.
//! * `2` - Read data and change settings queries are allowed.
//!
//! - `connection_timeout` - Timeout for connection (defaults to `500 ms`)
//! - `keepalive` - TCP keep alive timeout in milliseconds.
//! - `nodelay` - Whether to enable `TCP_NODELAY` (defaults to `true`).
//!
//! - `pool_min` - Lower bound of opened connections for `Pool` (defaults to `10`).
//! - `pool_max` - Upper bound of opened connections for `Pool` (defaults to `20`).
//!
//! - `ping_before_query` - Ping server every time before execute any query. (defaults to `true`).
//! - `send_retries` - Count of retry to send request to server. (defaults to `3`).
//! - `retry_timeout` - Amount of time to wait before next retry. (defaults to `5 sec`).
//! - `ping_timeout` - Timeout for ping (defaults to `500 ms`).
//!
//! - `alt_hosts` - Comma separated list of single address host for load-balancing.
//!
//! example:
//! ```url
//! tcp://user:password@host:9000/clicks?compression=lz4&ping_timeout=42ms
//! ```
//!
//! ## Optional features
//!
//! `clickhouse-rs` puts some functionality behind optional features to optimize compile time
//! for the most common use cases. The following features are available.
//!
//! - `tokio_io` *(enabled by default)* — I/O based on [Tokio](https://tokio.rs/).
//! - `async_std` — I/O based on [async-std](https://async.rs/) (doesn't work together with `tokio_io`).
//! - `tls` — TLS support (allowed only with `tokio_io`).
//!
//! ### Example
//!
//! ```rust
//! # use std::env;
//! use clickhouse_rs::{Block, Pool, errors::Error};
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Error> {
//! let ddl = r"
//! CREATE TABLE IF NOT EXISTS payment (
//! customer_id UInt32,
//! amount UInt32,
//! account_name Nullable(FixedString(3))
//! ) Engine=Memory";
//!
//! let block = Block::new()
//! .column("customer_id", vec![1_u32, 3, 5, 7, 9])
//! .column("amount", vec![2_u32, 4, 6, 8, 10])
//! .column("account_name", vec![Some("foo"), None, None, None, Some("bar")]);
//!
//! # let database_url = env::var("DATABASE_URL").unwrap_or("tcp://localhost:9000?compression=lz4".into());
//! let pool = Pool::new(database_url);
//!
//! let mut client = pool.get_handle().await?;
//! client.execute(ddl).await?;
//! client.insert("payment", block).await?;
//! let block = client.query("SELECT * FROM payment").fetch_all().await?;
//!
//! for row in block.rows() {
//! let id: u32 = row.get("customer_id")?;
//! let amount: u32 = row.get("amount")?;
//! let name: Option<&str> = row.get("account_name")?;
//! println!("Found payment {}: {} {:?}", id, amount, name);
//! }
//! Ok(())
//! }
//! ```
#![recursion_limit = "1024"]
use std::{fmt, future::Future, time::Duration};
use futures_util::{
future, future::BoxFuture, future::FutureExt, stream, stream::BoxStream, StreamExt,
};
use log::{info, warn};
use crate::{
connecting_stream::ConnectingStream,
errors::{DriverError, Error, Result},
io::ClickhouseTransport,
pool::PoolBinding,
retry_guard::retry_guard,
types::{
query_result::stream_blocks::BlockStream, Cmd, Context, IntoOptions, OptionsSource, Packet,
Query, QueryResult, SqlType,
},
};
pub use crate::{
pool::Pool,
types::{block::Block, Options},
};
mod binary;
mod client_info;
mod connecting_stream;
/// Error types.
pub mod errors;
mod io;
/// Pool types.
pub mod pool;
mod retry_guard;
/// Clickhouse types.
pub mod types;
/// This macro is a convenient way to pass row into a block.
///
/// ```rust
/// # use clickhouse_rs::{Block, row, errors::Error};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// block.push(row!{customer_id: 1, amount: 2, account_name: "foo"})?;
/// block.push(row!{customer_id: 4, amount: 4, account_name: "bar"})?;
/// block.push(row!{customer_id: 5, amount: 5, account_name: "baz"})?;
/// # assert_eq!(block.row_count(), 3);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
///
/// If a column name has special characters, you can use the alternative syntax
/// with `=>` to pass an expression as column name:
///
/// ```rust
/// # use clickhouse_rs::{Block, row, errors::Error};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// block.push(row!{"customer.id" => 1, amount: 2, "account.name" => "foo"})?;
/// block.push(row!{"customer.id" => 4, amount: 4, "account.name" => "bar"})?;
/// block.push(row!{"customer.id" => 5, amount: 5, "account.name" => "baz"})?;
/// # assert_eq!(block.row_count(), 3);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
///
/// You can also use `Vec<(String, Value)>` to construct a row and insert it into a block:
///
/// ```rust
/// # use clickhouse_rs::{Block, errors::Error, types::Value};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// for i in 1..10 {
/// let mut row = Vec::new();
/// for j in 1..10 {
/// row.push((format!("#{}", j), Value::from(i * j)));
/// }
/// block.push(row)?;
/// }
/// assert_eq!(block.row_count(), 9);
/// # println!("{:?}", block);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
#[macro_export]
macro_rules! row {
() => { $crate::types::RNil };
( $i:ident, $($tail:tt)* ) => {
row!( $($tail)* ).put(stringify!($i).into(), $i.into())
};
( $i:ident ) => { row!($i: $i) };
( $k:ident: $v:expr ) => {
$crate::types::RNil.put(stringify!($k).into(), $v.into())
};
( $k:ident: $v:expr, $($tail:tt)* ) => {
row!( $($tail)* ).put(stringify!($k).into(), $v.into())
};
( $k:expr => $v:expr ) => {
$crate::types::RNil.put($k.into(), $v.into())
};
( $k:expr => $v:expr, $($tail:tt)* ) => {
row!( $($tail)* ).put($k.into(), $v.into())
};
}
#[macro_export]
macro_rules! try_opt {
($expr:expr) => {
match $expr {
Ok(val) => val,
Err(err) => return Err(err),
}
};
}
#[doc(hidden)]
pub struct Client {
_private: (),
}
/// Clickhouse client handle.
pub struct ClientHandle {
inner: Option<ClickhouseTransport>,
context: Context,
pool: PoolBinding,
}
impl fmt::Debug for ClientHandle {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ClientHandle")
.field("server_info", &self.context.server_info)
.finish()
}
}
impl Client {
#[deprecated(since = "0.1.4", note = "please use Pool to connect")]
pub async fn connect(options: Options) -> Result<ClientHandle> {
let source = options.into_options_src();
Self::open(source, None).await
}
pub(crate) async fn open(source: OptionsSource, pool: Option<Pool>) -> Result<ClientHandle> {
let options = try_opt!(source.get());
let compress = options.compression;
let timeout = options.connection_timeout;
let context = Context {
options: source.clone(),
..Context::default()
};
with_timeout(
async move {
let addr = match &pool {
None => &options.addr,
Some(p) => p.get_addr(),
};
info!("try to connect to {}", addr);
if addr.port() == Some(8123) {
warn!("You should use port 9000 instead of 8123 because clickhouse-rs work through the binary interface.");
}
let mut stream = ConnectingStream::new(addr, &options).await?;
stream.set_nodelay(options.nodelay)?;
stream.set_keepalive(options.keepalive)?;
let transport = ClickhouseTransport::new(stream, compress, pool.clone());
let mut handle = ClientHandle {
inner: Some(transport),
context,
pool: match pool {
None => PoolBinding::None,
Some(p) => PoolBinding::Detached(p),
},
};
handle.hello().await?;
Ok(handle)
},
timeout,
)
.await
}
}
impl ClientHandle {
pub(crate) async fn hello(&mut self) -> Result<()> {
let context = self.context.clone();
info!("[hello] -> {:?}", &context);
let mut h = None;
let mut info = None;
let mut stream = self.inner.take().unwrap().call(Cmd::Hello(context.clone()));
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Hello(inner, server_info)) => {
info!("[hello] <- {:?}", &server_info);
h = Some(inner);
info = Some(server_info);
}
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
self.inner = h;
self.context.server_info = info.unwrap();
Ok(())
}
pub async fn ping(&mut self) -> Result<()> {
let timeout = try_opt!(self.context.options.get()).ping_timeout;
with_timeout(
async move {
info!("[ping]");
let mut h = None;
let transport = self.inner.take().unwrap().clear().await?;
let mut stream = transport.call(Cmd::Ping);
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Pong(inner)) => {
info!("[pong]");
h = Some(inner);
}
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
self.inner = h;
Ok(())
},
timeout,
)
.await
}
/// Executes Clickhouse `query` on Conn.
pub fn query<Q>(&mut self, sql: Q) -> QueryResult
where
Query: From<Q>,
{
let query = Query::from(sql);
QueryResult {
client: self,
query,
}
}
/// Convenience method to prepare and execute a single SQL statement.
pub async fn execute<Q>(&mut self, sql: Q) -> Result<()>
where
Query: From<Q>,
{
let transport = self.execute_(sql).await?;
self.inner = Some(transport);
Ok(())
}
async fn execute_<Q>(&mut self, sql: Q) -> Result<ClickhouseTransport>
where
Query: From<Q>,
{
let timeout = try_opt!(self.context.options.get())
.execute_timeout
.unwrap_or_else(|| Duration::from_secs(0));
let context = self.context.clone();
let query = Query::from(sql);
with_timeout(
async {
self.wrap_future(move |c| {
info!("[execute query] {}", query.get_sql());
let transport = c.inner.take().unwrap();
async move {
let mut h = None;
let transport = transport.clear().await?;
let mut stream = transport.call(Cmd::SendQuery(query, context.clone()));
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Eof(inner)) => h = Some(inner),
Ok(Packet::Block(_))
| Ok(Packet::ProfileInfo(_))
| Ok(Packet::Progress(_)) => (),
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
Ok(h.unwrap())
}
})
.await
},
timeout,
)
.await
}
/// Convenience method to insert block of data.
pub async fn insert<Q, B>(&mut self, table: Q, block: B) -> Result<()>
where
Query: From<Q>,
B: AsRef<Block>,
{
let transport = self.insert_(table, block.as_ref()).await?;
self.inner = Some(transport);
Ok(())
}
async fn insert_<Q>(&mut self, table: Q, block: &Block) -> Result<ClickhouseTransport>
where
Query: From<Q>,
{
let timeout = try_opt!(self.context.options.get())
.insert_timeout
.unwrap_or_else(|| Duration::from_secs(0));
let mut names: Vec<_> = Vec::with_capacity(block.column_count());
for column in block.columns() {
names.push(try_opt!(column_name_to_string(column.name())));
}
let fields = names.join(", ");
let query = Query::from(table)
.map_sql(|table| format!("INSERT INTO {} ({}) VALUES", table, fields));
let context = self.context.clone();
with_timeout(
async {
self.wrap_future(move |c| {
info!("[insert] {}", query.get_sql());
let transport = c.inner.take().unwrap();
async move {
let transport = transport.clear().await?;
let stream = transport.call(Cmd::SendQuery(query, context.clone()));
let (transport, b) = stream.read_block().await?;
let dst_block = b.unwrap();
let casted_block = match block.cast_to(&dst_block) {
Ok(value) => value,
Err(err) => return Err(err),
};
let send_cmd = Cmd::Union(
Box::new(Cmd::SendData(casted_block, context.clone())),
Box::new(Cmd::SendData(Block::default(), context.clone())),
);
let (transport, _) = transport.call(send_cmd).read_block().await?;
Ok(transport)
}
})
.await
},
timeout,
)
.await
}
pub(crate) async fn wrap_future<T, R, F>(&mut self, f: F) -> Result<T>
where
F: FnOnce(&mut Self) -> R + Send,
R: Future<Output = Result<T>>,
T: 'static,
{
let ping_before_query = try_opt!(self.context.options.get()).ping_before_query;
if ping_before_query {
self.check_connection().await?;
}
f(self).await
}
pub(crate) fn wrap_stream<'a, F>(&'a mut self, f: F) -> BoxStream<'a, Result<Block>>
where
F: (FnOnce(&'a mut Self) -> BlockStream<'a>) + Send + 'static,
{
let ping_before_query = match self.context.options.get() {
Ok(val) => val.ping_before_query,
Err(err) => return Box::pin(stream::once(future::err(err))),
};
if ping_before_query {
let fut: BoxFuture<'a, BoxStream<'a, Result<Block>>> = Box::pin(async move {
let inner: BoxStream<'a, Result<Block>> = match self.check_connection().await {
Ok(_) => Box::pin(f(self)),
Err(err) => Box::pin(stream::once(future::err(err))),
};
inner
});
Box::pin(fut.flatten_stream())
} else {
Box::pin(f(self))
}
}
/// Check connection and try to reconnect if necessary.
pub async fn check_connection(&mut self) -> Result<()> {
self.pool.detach();
let source = self.context.options.clone();
let pool = self.pool.clone();
let (send_retries, retry_timeout) = {
let options = try_opt!(source.get());
(options.send_retries, options.retry_timeout)
};
retry_guard(self, &source, pool.into(), send_retries, retry_timeout).await?;
if !self.pool.is_attached() && self.pool.is_some() {
self.pool.attach();
}
Ok(())
}
pub(crate) fn set_inside(&self, value: bool) {
if let Some(ref inner) = self.inner {
inner.set_inside(value);
} else {
unreachable!()
}
}
}
fn column_name_to_string(name: &str) -> Result<String> {
if name.chars().all(|ch| ch.is_numeric()) {
return Ok(name.to_string());
}
if name.chars().any(|ch| ch == '`') {
let err = format!("Column name {:?} shouldn't contains backticks.", name);
return Err(Error::Other(err.into()));
}
Ok(format!("`{}`", name))
}
#[cfg(feature = "async_std")]
async fn with_timeout<F, T>(future: F, duration: Duration) -> F::Output
where
F: Future<Output = Result<T>>,
{
use async_std::io;
use futures_util::future::TryFutureExt;
io::timeout(duration, future.map_err(Into::into))
.map_err(Into::into)
.await
}
#[cfg(not(feature = "async_std"))]
async fn with_timeout<F, T>(future: F, timeout: Duration) -> F::Output
where
F: Future<Output = Result<T>>,
{
| g(test)]
pub(crate) mod test_misc {
use crate::*;
use std::env;
use lazy_static::lazy_static;
lazy_static! {
pub static ref DATABASE_URL: String = env::var("DATABASE_URL").unwrap_or_else(|_| {
"tcp://localhost:9000?compression=lz4&ping_timeout=1s&retry_timeout=2s".into()
});
}
#[test]
fn test_column_name_to_string() {
assert_eq!(column_name_to_string("id").unwrap(), "`id`");
assert_eq!(column_name_to_string("234").unwrap(), "234");
assert_eq!(column_name_to_string("ns:attr").unwrap(), "`ns:attr`");
assert!(column_name_to_string("`").is_err());
}
}
| tokio::time::timeout(timeout, future).await?
}
#[cf | identifier_body |
lib.rs | //! ## clickhouse-rs
//! Asynchronous [Yandex ClickHouse](https://clickhouse.yandex/) client library for rust programming language.
//!
//! ### Installation
//! Library hosted on [crates.io](https://crates.io/crates/clickhouse-rs/).
//!
//! ```toml
//! [dependencies]
//! clickhouse-rs = "*"
//! ```
//!
//! ### Supported data types
//!
//! * Date
//! * DateTime
//! * Decimal(P, S)
//! * Float32, Float64
//! * String, FixedString(N)
//! * UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64
//! * Nullable(T)
//! * Array(UInt/Int/String/Date/DateTime)
//! * SimpleAggregateFunction(F, T)
//! * IPv4/IPv6
//! * UUID
//!
//! ### DNS
//!
//! ```url
//! schema://user:password@host[:port]/database?param1=value1&...¶mN=valueN
//! ```
//!
//! parameters:
//!
//! - `compression` - Whether or not use compression (defaults to `none`). Possible choices:
//! * `none`
//! * `lz4`
//!
//! - `readonly` - Restricts permissions for read data, write data and change settings queries. (defaults to `none`). Possible choices:
//! * `0` - All queries are allowed.
//! * `1` - Only read data queries are allowed.
//! * `2` - Read data and change settings queries are allowed.
//!
//! - `connection_timeout` - Timeout for connection (defaults to `500 ms`)
//! - `keepalive` - TCP keep alive timeout in milliseconds.
//! - `nodelay` - Whether to enable `TCP_NODELAY` (defaults to `true`).
//!
//! - `pool_min` - Lower bound of opened connections for `Pool` (defaults to `10`).
//! - `pool_max` - Upper bound of opened connections for `Pool` (defaults to `20`).
//!
//! - `ping_before_query` - Ping server every time before execute any query. (defaults to `true`).
//! - `send_retries` - Count of retry to send request to server. (defaults to `3`).
//! - `retry_timeout` - Amount of time to wait before next retry. (defaults to `5 sec`).
//! - `ping_timeout` - Timeout for ping (defaults to `500 ms`).
//!
//! - `alt_hosts` - Comma separated list of single address host for load-balancing.
//!
//! example:
//! ```url
//! tcp://user:password@host:9000/clicks?compression=lz4&ping_timeout=42ms
//! ```
//!
//! ## Optional features
//!
//! `clickhouse-rs` puts some functionality behind optional features to optimize compile time
//! for the most common use cases. The following features are available.
//!
//! - `tokio_io` *(enabled by default)* — I/O based on [Tokio](https://tokio.rs/).
//! - `async_std` — I/O based on [async-std](https://async.rs/) (doesn't work together with `tokio_io`).
//! - `tls` — TLS support (allowed only with `tokio_io`).
//!
//! ### Example
//!
//! ```rust
//! # use std::env;
//! use clickhouse_rs::{Block, Pool, errors::Error};
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Error> {
//! let ddl = r"
//! CREATE TABLE IF NOT EXISTS payment (
//! customer_id UInt32,
//! amount UInt32,
//! account_name Nullable(FixedString(3))
//! ) Engine=Memory";
//!
//! let block = Block::new()
//! .column("customer_id", vec![1_u32, 3, 5, 7, 9])
//! .column("amount", vec![2_u32, 4, 6, 8, 10])
//! .column("account_name", vec![Some("foo"), None, None, None, Some("bar")]);
//!
//! # let database_url = env::var("DATABASE_URL").unwrap_or("tcp://localhost:9000?compression=lz4".into());
//! let pool = Pool::new(database_url);
//!
//! let mut client = pool.get_handle().await?;
//! client.execute(ddl).await?;
//! client.insert("payment", block).await?;
//! let block = client.query("SELECT * FROM payment").fetch_all().await?;
//!
//! for row in block.rows() {
//! let id: u32 = row.get("customer_id")?;
//! let amount: u32 = row.get("amount")?;
//! let name: Option<&str> = row.get("account_name")?;
//! println!("Found payment {}: {} {:?}", id, amount, name);
//! }
//! Ok(())
//! }
//! ```
#![recursion_limit = "1024"]
use std::{fmt, future::Future, time::Duration};
use futures_util::{
future, future::BoxFuture, future::FutureExt, stream, stream::BoxStream, StreamExt,
};
use log::{info, warn};
use crate::{
connecting_stream::ConnectingStream,
errors::{DriverError, Error, Result},
io::ClickhouseTransport,
pool::PoolBinding,
retry_guard::retry_guard,
types::{
query_result::stream_blocks::BlockStream, Cmd, Context, IntoOptions, OptionsSource, Packet,
Query, QueryResult, SqlType,
},
};
pub use crate::{
pool::Pool,
types::{block::Block, Options},
};
mod binary;
mod client_info;
mod connecting_stream;
/// Error types.
pub mod errors;
mod io;
/// Pool types.
pub mod pool;
mod retry_guard;
/// Clickhouse types.
pub mod types;
/// This macro is a convenient way to pass row into a block.
///
/// ```rust
/// # use clickhouse_rs::{Block, row, errors::Error};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// block.push(row!{customer_id: 1, amount: 2, account_name: "foo"})?;
/// block.push(row!{customer_id: 4, amount: 4, account_name: "bar"})?;
/// block.push(row!{customer_id: 5, amount: 5, account_name: "baz"})?;
/// # assert_eq!(block.row_count(), 3);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
///
/// If a column name has special characters, you can use the alternative syntax
/// with `=>` to pass an expression as column name:
///
/// ```rust
/// # use clickhouse_rs::{Block, row, errors::Error};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// block.push(row!{"customer.id" => 1, amount: 2, "account.name" => "foo"})?;
/// block.push(row!{"customer.id" => 4, amount: 4, "account.name" => "bar"})?;
/// block.push(row!{"customer.id" => 5, amount: 5, "account.name" => "baz"})?;
/// # assert_eq!(block.row_count(), 3);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
///
/// You can also use `Vec<(String, Value)>` to construct a row and insert it into a block:
///
/// ```rust
/// # use clickhouse_rs::{Block, errors::Error, types::Value};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// for i in 1..10 {
/// let mut row = Vec::new();
/// for j in 1..10 {
/// row.push((format!("#{}", j), Value::from(i * j)));
/// }
/// block.push(row)?;
/// }
/// assert_eq!(block.row_count(), 9);
/// # println!("{:?}", block);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
#[macro_export]
macro_rules! row {
() => { $crate::types::RNil };
( $i:ident, $($tail:tt)* ) => {
row!( $($tail)* ).put(stringify!($i).into(), $i.into())
};
( $i:ident ) => { row!($i: $i) };
( $k:ident: $v:expr ) => {
$crate::types::RNil.put(stringify!($k).into(), $v.into())
};
( $k:ident: $v:expr, $($tail:tt)* ) => {
row!( $($tail)* ).put(stringify!($k).into(), $v.into())
};
( $k:expr => $v:expr ) => {
$crate::types::RNil.put($k.into(), $v.into())
};
( $k:expr => $v:expr, $($tail:tt)* ) => {
row!( $($tail)* ).put($k.into(), $v.into())
};
}
#[macro_export]
macro_rules! try_opt {
($expr:expr) => {
match $expr {
Ok(val) => val,
Err(err) => return Err(err),
}
};
}
#[doc(hidden)]
pub struct Client {
_private: (),
}
/// Clickhouse client handle.
pub struct ClientHandle {
inner: Option<ClickhouseTransport>,
context: Context,
pool: PoolBinding,
}
impl fmt::Debug for ClientHandle {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ClientHandle")
.field("server_info", &self.context.server_info)
.finish()
}
}
impl Client {
#[deprecated(since = "0.1.4", note = "please use Pool to connect")]
pub async fn connect(options: Options) -> Result<ClientHandle> {
let source = options.into_options_src();
Self::open(source, None).await
}
pub(crate) async fn open(source: OptionsSource, pool: Option<Pool>) -> Result<ClientHandle> {
let options = try_opt!(source.get());
let compress = options.compression;
let timeout = options.connection_timeout;
let context = Context {
options: source.clone(),
..Context::default()
};
with_timeout(
async move {
let addr = match &pool {
None => &options.addr,
Some(p) => p.get_addr(),
};
info!("try to connect to {}", addr);
if addr.port() == Some(8123) {
warn!("You should use port 9000 instead of 8123 because clickhouse-rs work through the binary interface.");
}
let mut stream = ConnectingStream::new(addr, &options).await?;
stream.set_nodelay(options.nodelay)?;
stream.set_keepalive(options.keepalive)?;
let transport = ClickhouseTransport::new(stream, compress, pool.clone());
let mut handle = ClientHandle {
inner: Some(transport),
context,
pool: match pool {
None => PoolBinding::None,
Some(p) => PoolBinding::Detached(p),
},
};
handle.hello().await?;
Ok(handle)
},
timeout,
)
.await
}
}
impl ClientHandle {
pub(crate) async fn hello(&mut self) -> Result<()> {
let context = self.context.clone();
info!("[hello] -> {:?}", &context);
let mut h = None;
let mut info = None;
let mut stream = self.inner.take().unwrap().call(Cmd::Hello(context.clone()));
while let Some(packet) = stream.next().await { | }
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
self.inner = h;
self.context.server_info = info.unwrap();
Ok(())
}
pub async fn ping(&mut self) -> Result<()> {
let timeout = try_opt!(self.context.options.get()).ping_timeout;
with_timeout(
async move {
info!("[ping]");
let mut h = None;
let transport = self.inner.take().unwrap().clear().await?;
let mut stream = transport.call(Cmd::Ping);
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Pong(inner)) => {
info!("[pong]");
h = Some(inner);
}
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
self.inner = h;
Ok(())
},
timeout,
)
.await
}
/// Executes Clickhouse `query` on Conn.
pub fn query<Q>(&mut self, sql: Q) -> QueryResult
where
Query: From<Q>,
{
let query = Query::from(sql);
QueryResult {
client: self,
query,
}
}
/// Convenience method to prepare and execute a single SQL statement.
pub async fn execute<Q>(&mut self, sql: Q) -> Result<()>
where
Query: From<Q>,
{
let transport = self.execute_(sql).await?;
self.inner = Some(transport);
Ok(())
}
async fn execute_<Q>(&mut self, sql: Q) -> Result<ClickhouseTransport>
where
Query: From<Q>,
{
let timeout = try_opt!(self.context.options.get())
.execute_timeout
.unwrap_or_else(|| Duration::from_secs(0));
let context = self.context.clone();
let query = Query::from(sql);
with_timeout(
async {
self.wrap_future(move |c| {
info!("[execute query] {}", query.get_sql());
let transport = c.inner.take().unwrap();
async move {
let mut h = None;
let transport = transport.clear().await?;
let mut stream = transport.call(Cmd::SendQuery(query, context.clone()));
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Eof(inner)) => h = Some(inner),
Ok(Packet::Block(_))
| Ok(Packet::ProfileInfo(_))
| Ok(Packet::Progress(_)) => (),
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
Ok(h.unwrap())
}
})
.await
},
timeout,
)
.await
}
/// Convenience method to insert block of data.
pub async fn insert<Q, B>(&mut self, table: Q, block: B) -> Result<()>
where
Query: From<Q>,
B: AsRef<Block>,
{
let transport = self.insert_(table, block.as_ref()).await?;
self.inner = Some(transport);
Ok(())
}
async fn insert_<Q>(&mut self, table: Q, block: &Block) -> Result<ClickhouseTransport>
where
Query: From<Q>,
{
let timeout = try_opt!(self.context.options.get())
.insert_timeout
.unwrap_or_else(|| Duration::from_secs(0));
let mut names: Vec<_> = Vec::with_capacity(block.column_count());
for column in block.columns() {
names.push(try_opt!(column_name_to_string(column.name())));
}
let fields = names.join(", ");
let query = Query::from(table)
.map_sql(|table| format!("INSERT INTO {} ({}) VALUES", table, fields));
let context = self.context.clone();
with_timeout(
async {
self.wrap_future(move |c| {
info!("[insert] {}", query.get_sql());
let transport = c.inner.take().unwrap();
async move {
let transport = transport.clear().await?;
let stream = transport.call(Cmd::SendQuery(query, context.clone()));
let (transport, b) = stream.read_block().await?;
let dst_block = b.unwrap();
let casted_block = match block.cast_to(&dst_block) {
Ok(value) => value,
Err(err) => return Err(err),
};
let send_cmd = Cmd::Union(
Box::new(Cmd::SendData(casted_block, context.clone())),
Box::new(Cmd::SendData(Block::default(), context.clone())),
);
let (transport, _) = transport.call(send_cmd).read_block().await?;
Ok(transport)
}
})
.await
},
timeout,
)
.await
}
pub(crate) async fn wrap_future<T, R, F>(&mut self, f: F) -> Result<T>
where
F: FnOnce(&mut Self) -> R + Send,
R: Future<Output = Result<T>>,
T: 'static,
{
let ping_before_query = try_opt!(self.context.options.get()).ping_before_query;
if ping_before_query {
self.check_connection().await?;
}
f(self).await
}
pub(crate) fn wrap_stream<'a, F>(&'a mut self, f: F) -> BoxStream<'a, Result<Block>>
where
F: (FnOnce(&'a mut Self) -> BlockStream<'a>) + Send + 'static,
{
let ping_before_query = match self.context.options.get() {
Ok(val) => val.ping_before_query,
Err(err) => return Box::pin(stream::once(future::err(err))),
};
if ping_before_query {
let fut: BoxFuture<'a, BoxStream<'a, Result<Block>>> = Box::pin(async move {
let inner: BoxStream<'a, Result<Block>> = match self.check_connection().await {
Ok(_) => Box::pin(f(self)),
Err(err) => Box::pin(stream::once(future::err(err))),
};
inner
});
Box::pin(fut.flatten_stream())
} else {
Box::pin(f(self))
}
}
/// Check connection and try to reconnect if necessary.
pub async fn check_connection(&mut self) -> Result<()> {
self.pool.detach();
let source = self.context.options.clone();
let pool = self.pool.clone();
let (send_retries, retry_timeout) = {
let options = try_opt!(source.get());
(options.send_retries, options.retry_timeout)
};
retry_guard(self, &source, pool.into(), send_retries, retry_timeout).await?;
if !self.pool.is_attached() && self.pool.is_some() {
self.pool.attach();
}
Ok(())
}
pub(crate) fn set_inside(&self, value: bool) {
if let Some(ref inner) = self.inner {
inner.set_inside(value);
} else {
unreachable!()
}
}
}
fn column_name_to_string(name: &str) -> Result<String> {
if name.chars().all(|ch| ch.is_numeric()) {
return Ok(name.to_string());
}
if name.chars().any(|ch| ch == '`') {
let err = format!("Column name {:?} shouldn't contains backticks.", name);
return Err(Error::Other(err.into()));
}
Ok(format!("`{}`", name))
}
#[cfg(feature = "async_std")]
async fn with_timeout<F, T>(future: F, duration: Duration) -> F::Output
where
F: Future<Output = Result<T>>,
{
use async_std::io;
use futures_util::future::TryFutureExt;
io::timeout(duration, future.map_err(Into::into))
.map_err(Into::into)
.await
}
#[cfg(not(feature = "async_std"))]
async fn with_timeout<F, T>(future: F, timeout: Duration) -> F::Output
where
F: Future<Output = Result<T>>,
{
tokio::time::timeout(timeout, future).await?
}
#[cfg(test)]
pub(crate) mod test_misc {
use crate::*;
use std::env;
use lazy_static::lazy_static;
lazy_static! {
pub static ref DATABASE_URL: String = env::var("DATABASE_URL").unwrap_or_else(|_| {
"tcp://localhost:9000?compression=lz4&ping_timeout=1s&retry_timeout=2s".into()
});
}
#[test]
fn test_column_name_to_string() {
assert_eq!(column_name_to_string("id").unwrap(), "`id`");
assert_eq!(column_name_to_string("234").unwrap(), "234");
assert_eq!(column_name_to_string("ns:attr").unwrap(), "`ns:attr`");
assert!(column_name_to_string("`").is_err());
}
} | match packet {
Ok(Packet::Hello(inner, server_info)) => {
info!("[hello] <- {:?}", &server_info);
h = Some(inner);
info = Some(server_info); | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.