content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
"""Define the API serializers."""
|
nilq/baby-python
|
python
|
__version__='1.0.3'
|
nilq/baby-python
|
python
|
import os
import featuretools as ft
import pandas as pd
from vbridge.utils.directory_helpers import exist_entityset, load_entityset, save_entityset
from vbridge.utils.entityset_helpers import remove_nan_entries
def create_entityset(dataset_id, entity_configs, relationships, table_dir, load_exist=True,
save=True, verbose=True):
if load_exist and exist_entityset(dataset_id):
es = load_entityset(dataset_id)
else:
es = ft.EntitySet(id=dataset_id)
# Add the entities to the entityset
for table_name, info in entity_configs.items():
table_df = pd.read_csv(os.path.join(table_dir, '{}.csv'.format(table_name)),
date_parser=pd.to_datetime)
if dataset_id == 'mimic-demo':
table_df.columns = [col.upper() for col in table_df.columns]
# Remove entries with missing identifiers
index = info.get('index', table_df.columns[0])
index_columns = info.get('identifiers', []) + [index]
table_df = remove_nan_entries(table_df, index_columns, verbose=verbose)
# ALl identifiers are set as strings
for col in index_columns:
table_df[col] = table_df[col].astype('str')
es.entity_from_dataframe(entity_id=table_name,
dataframe=table_df,
index=index,
time_index=info.get('time_index', None),
secondary_time_index=info.get('secondary_index', None))
# Add the relationships to the entityset
for parent, primary_key, child, foreign_key in relationships:
new_relationship = ft.Relationship(es[parent][primary_key], es[child][foreign_key])
es = es.add_relationship(new_relationship)
# Add interesting values for categorical columns
for table_name, info in entity_configs.items():
if 'interesting_values' in info:
item_index = info['item_index']
interesting_values = info['interesting_values']
if interesting_values == 'ALL':
interesting_values = es[table_name].df[item_index].unique()
elif isinstance(interesting_values, int):
interesting_values = es[table_name].df[item_index] \
.value_counts()[:interesting_values].index
es[table_name][item_index].interesting_values = interesting_values
if save:
save_entityset(es, dataset_id)
return es
|
nilq/baby-python
|
python
|
src = Split('''
rec_libc.c
rec_main.c
''')
component = aos_component('recovery', src)
component.add_global_includes('.')
|
nilq/baby-python
|
python
|
import django
import sys,os
rootpath = os.path.dirname(os.path.realpath(__file__)).replace("\\","/")
rootpath = rootpath.split("/apps")[0]
# print(rootpath)
syspath=sys.path
sys.path=[]
sys.path.append(rootpath) #指定搜索路径绝对目录
sys.path.extend([rootpath+i for i in os.listdir(rootpath) if i[0]!="."])#将工程目录下的一级目录添加到python搜索路径中
sys.path.extend(syspath)
from apps.common.func.WebFunc import *
from all_models.models import *
import json
def getServiceInterfaceCoverage():
serviceNameList = srcFolders
standardDataDict = {}
for serviceName in serviceNameList:
print("serviceName:", serviceName)
execSql = "SELECT interfaceUrl,serviceName FROM tb_standard_interface WHERE state=1 AND apiStatus=1 AND serviceName='%s'" % serviceName
standardData = executeSqlGetDict(execSql)
print("standardData:", standardData)
if not standardData:
print("33333333333333")
standardDataDict[serviceName] = {"dataList": [], "serviceInterfaceCount": 0, "serviceInterfaceIsCoveredCount": 0, "moduleDict": {}}
else:
# 生成标准dict
for tmpInterfaceDict in standardData:
tmpServiceName = tmpInterfaceDict['serviceName']
if tmpServiceName not in standardDataDict.keys():
standardDataDict[tmpServiceName] = {"dataList": [], "serviceInterfaceCount": 0, "serviceInterfaceIsCoveredCount": 0, "moduleDict": {}}
standardDataDict[tmpServiceName]['dataList'].append(tmpInterfaceDict)
standardDataDict[tmpServiceName]['serviceInterfaceCount'] += 1
httpInterface = TbHttpInterface.objects.filter(state=1, url=tmpInterfaceDict["interfaceUrl"])
httpTestcaseStep = TbHttpTestcaseStep.objects.filter(state=1, url=tmpInterfaceDict["interfaceUrl"])
if len(httpInterface) != 0 or len(httpTestcaseStep) != 0:
standardDataDict[tmpServiceName]['serviceInterfaceIsCoveredCount'] += 1
print("standardDataDict:", standardDataDict)
return standardDataDict
if __name__ == "__main__":
now_time = datetime.datetime.now()
yes_time = now_time + datetime.timedelta(-1)
standardDataDict = getServiceInterfaceCoverage()
for standardData in standardDataDict:
coveredResult = TbWebPortalServiceInterfaceCovered.objects.filter(serviceName=standardData, state=1)
if len(coveredResult) != 0:
coveredResult.delete()
serviceInterfaceCoverage = TbWebPortalServiceInterfaceCovered()
serviceInterfaceCoverage.serviceName = standardData
serviceInterfaceCoverage.standardInterfaceNum = standardDataDict[standardData]["serviceInterfaceCount"]
serviceInterfaceCoverage.coveredInterfaceNum = standardDataDict[standardData][
"serviceInterfaceIsCoveredCount"]
serviceInterfaceCoverage.serviceTestDetail = json.dumps(standardDataDict[standardData]["dataList"])
if standardDataDict[standardData]["serviceInterfaceCount"] == 0:
serviceInterfaceCoverage.coverage = "%.2f" % 0
else:
serviceInterfaceCoverage.coverage = "%.2f" % ((standardDataDict[standardData][
"serviceInterfaceIsCoveredCount"] /
standardDataDict[standardData][
"serviceInterfaceCount"]) * 100)
serviceInterfaceCoverage.state = 1
serviceInterfaceCoverage.statisticalTime = yes_time
serviceInterfaceCoverage.save()
else:
serviceInterfaceCoverage = TbWebPortalServiceInterfaceCovered()
serviceInterfaceCoverage.serviceName = standardData
serviceInterfaceCoverage.standardInterfaceNum = standardDataDict[standardData]["serviceInterfaceCount"]
serviceInterfaceCoverage.coveredInterfaceNum = standardDataDict[standardData]["serviceInterfaceIsCoveredCount"]
serviceInterfaceCoverage.serviceTestDetail = json.dumps(standardDataDict[standardData]["dataList"])
if standardDataDict[standardData]["serviceInterfaceCount"] == 0:
serviceInterfaceCoverage.coverage = "%.2f" % 0
else:
serviceInterfaceCoverage.coverage = "%.2f" % ((standardDataDict[standardData]["serviceInterfaceIsCoveredCount"] / standardDataDict[standardData]["serviceInterfaceCount"]) * 100)
serviceInterfaceCoverage.state = 1
serviceInterfaceCoverage.statisticalTime = yes_time
serviceInterfaceCoverage.save()
|
nilq/baby-python
|
python
|
""" The model train file trains the model on the download dataset and other parameters specified in the assemblyconfig file
The main function runs the training and populates the created file structure with the trained model, logs and plots
"""
import os
import sys
current_path=os.path.dirname(__file__)
parentdir = os.path.dirname(current_path)
os.environ["CUDA_VISIBLE_DEVICES"]="0" # Nvidia Quadro GV100
#os.environ["CUDA_VISIBLE_DEVICES"]="1" # Nvidia Quadro M2000
#Adding Path to various Modules
sys.path.append("../core")
sys.path.append("../visualization")
sys.path.append("../utilities")
sys.path.append("../datasets")
sys.path.append("../trained_models")
sys.path.append("../config")
#path_var=os.path.join(os.path.dirname(__file__),"../utilities")
#sys.path.append(path_var)
#sys.path.insert(0,parentdir)
#Importing Required Modules
import pathlib
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import backend as K
K.clear_session()
#Importing Config files
import assembly_config as config
import model_config as cftrain
import voxel_config as vc
#Importing required modules from the package
from measurement_system import HexagonWlsScanner
from assembly_system import VRMSimulationModel
from wls400a_system import GetInferenceData
from data_import import GetTrainData
from encode_decode_model import Encode_Decode_Model
from training_viz import TrainViz
from metrics_eval import MetricsEval
from keras_lr_multiplier import LRMultiplier
from point_cloud_construction import GetPointCloud
class Unet_DeployModel:
"""Train Model Class, the initialization parameters are parsed from modelconfig_train.py file
:param batch_size: mini batch size while training the model
:type batch_size: int (required)
:param epochs: no of epochs to conduct training
:type epochs: int (required)
:param split_ratio: train and validation split for the model
:type assembly_system: float (required)
The class contains run_train_model method
"""
def unet_run_model(self,model,X_in_test,model_path,logs_path,plots_path,test_result=0,Y_out_test_list=0,activate_tensorboard=0,run_id=0,tl_type='full_fine_tune'):
"""run_train_model function trains the model on the dataset and saves the trained model,logs and plots within the file structure, the function prints the training evaluation metrics
:param model: 3D CNN model compiled within the Deep Learning Class, refer https://keras.io/models/model/ for more information
:type model: keras.models (required)
:param X_in: Train dataset input (predictor variables), 3D Voxel representation of the cloud of point and node deviation data obtained from the VRM software based on the sampling input
:type X_in: numpy.array [samples*voxel_dim*voxel_dim*voxel_dim*deviation_channels] (required)
:param Y_out: Train dataset output (variables to predict), Process Parameters/KCCs obtained from sampling
:type Y_out: numpy.array [samples*assembly_kccs] (required)
:param model_path: model path at which the trained model is saved
:type model_path: str (required)
:param logs_path: logs path where the training metrics file is saved
:type logs_path: str (required)
:param plots_path: plots path where model training loss convergence plot is saved
:type plots_path: str (required)
:param activate_tensorboard: flag to indicate if tensorboard should be added in model callbacks for better visualization, 0 by default, set to 1 to activate tensorboard
:type activate_tensorboard: int
:param run_id: Run id index used in data study to conduct multiple training runs with different dataset sizes, defaults to 0
:type run_id: int
"""
import tensorflow as tf
from tensorflow.keras.models import load_model
import tensorflow.keras.backend as K
#model_file_path=model_path+'/unet_trained_model_'+str(run_id)+'.h5'
model_file_path=model_path+'/unet_trained_model_'+str(run_id)
#tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir='C:\\Users\\sinha_s\\Desktop\\dlmfg_package\\dlmfg\\trained_models\\inner_rf_assembly\\logs',histogram_freq=1)
#inference_model=load_model(model_file_path,custom_objects={'mse_scaled': mse_scaled} )
model.load_weights(model_file_path)
print("Trained Model Weights loaded successfully")
print("Conducting Inference...")
model_outputs=model.predict(X_in_test)
y_pred=model_outputs[0]
print("Inference Completed !")
if(test_result==1):
metrics_eval=MetricsEval();
eval_metrics,accuracy_metrics_df=metrics_eval.metrics_eval_base(y_pred,Y_out_test_list[0],logs_path)
#y_cop_pred_flat=y_cop_pred.flatten()
#y_cop_test_flat=y_cop_test.flatten()
#combined_array=np.stack([y_cop_test_flat,y_cop_pred_flat],axis=1)
#filtered_array=combined_array[np.where(combined_array[:,0] >= 0.05)]
#y_cop_test_vector=filtered_array[:,0:1]
#y_cop_pred_vector=filtered_array[:,1:2]
eval_metrics_cop_list=[]
accuracy_metrics_df_cop_list=[]
for i in range(1,len(model_outputs)):
y_cop_pred=model_outputs[i]
y_cop_test=Y_out_test_list[i]
y_cop_pred_vector=np.reshape(y_cop_pred,(y_cop_pred.shape[0],-1))
y_cop_test_vector=np.reshape(y_cop_test,(y_cop_test.shape[0],-1))
y_cop_pred_vector=y_cop_pred_vector.T
y_cop_test_vector=y_cop_test_vector.T
print(y_cop_pred_vector.shape)
#y_cop_test_flat=y_cop_test.flatten()
eval_metrics_cop,accuracy_metrics_df_cop=metrics_eval.metrics_eval_cop(y_cop_pred_vector,y_cop_test_vector,logs_path)
eval_metrics_cop_list.append(eval_metrics_cop)
accuracy_metrics_df_cop_list.append(accuracy_metrics_df_cop)
return y_pred,model_outputs,model,eval_metrics,accuracy_metrics_df,eval_metrics_cop_list,accuracy_metrics_df_cop_list
return y_pred,model_outputs,model
def plot_decode_cop_voxel(base_cop,plot_file_name):
import plotly.graph_objects as go
import plotly as py
import plotly.express as px
X, Y, Z = np.mgrid[0:len(base_cop), 0:len(base_cop), 0:len(base_cop)]
#input_conv_data[0,:,:,:,0]=0.2
values_cop = base_cop.flatten()
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaled_values=scaler.fit_transform(values_cop.reshape(-1, 1))
trace1=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=scaled_values[:,0],
isomin=0,
isomax=1,
opacity=0.1, # needs to be small to see through all surfaces
surface_count=17, # needs to be a large number for good volume rendering
colorscale='Greens'
)
layout = go.Layout(
margin=dict(
l=0,
r=0,
b=0,
t=0
)
)
data=[trace1]
fig = go.Figure(data=data,layout=layout)
py.offline.plot(fig, filename=plot_file_name)
def plot_decode_cop_dev(nominal_cop,dev_vector,plot_file_name):
import plotly.graph_objects as go
import plotly as py
import plotly.express as px
#input_conv_data[0,:,:,:,0]=0.2
values_cop = dev_vector.flatten()
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaled_values=scaler.fit_transform(values_cop.reshape(-1, 1))
trace1=go.Scatter3d(
x=nominal_cop[:,0],
y=nominal_cop[:,1],
z=nominal_cop[:,2],
#surfacecolor=dev_vector,
hoverinfo="text",
hovertext=dev_vector,
mode='markers',
marker=dict(
showscale=True,
size=12,
#color=scaled_values[:,0],
color=dev_vector, # set color to an array/list of desired values
colorscale='Viridis', # choose a colorscale
opacity=0.6
)
)
layout = go.Layout(
margin=dict(
l=0,
r=0,
b=0,
t=0
)
)
data=[trace1]
fig = go.Figure(data=data,layout=layout)
#print(plot_file_name)
py.offline.plot(fig, filename=plot_file_name)
if __name__ == '__main__':
print('Parsing from Assembly Config File....')
data_type=config.assembly_system['data_type']
application=config.assembly_system['application']
part_type=config.assembly_system['part_type']
part_name=config.assembly_system['part_name']
data_format=config.assembly_system['data_format']
assembly_type=config.assembly_system['assembly_type']
assembly_kccs=config.assembly_system['assembly_kccs']
assembly_kpis=config.assembly_system['assembly_kpis']
voxel_dim=config.assembly_system['voxel_dim']
point_dim=config.assembly_system['point_dim']
voxel_channels=config.assembly_system['voxel_channels']
noise_type=config.assembly_system['noise_type']
mapping_index=config.assembly_system['mapping_index']
system_noise=config.assembly_system['system_noise']
aritifical_noise=config.assembly_system['aritifical_noise']
data_folder=config.assembly_system['data_folder']
kcc_folder=config.assembly_system['kcc_folder']
kcc_files=config.assembly_system['kcc_files']
test_kcc_files=config.assembly_system['test_kcc_files']
print('Parsing from Training Config File')
model_type=cftrain.model_parameters['model_type']
output_type=cftrain.model_parameters['output_type']
batch_size=cftrain.model_parameters['batch_size']
epocs=cftrain.model_parameters['epocs']
split_ratio=cftrain.model_parameters['split_ratio']
optimizer=cftrain.model_parameters['optimizer']
loss_func=cftrain.model_parameters['loss_func']
regularizer_coeff=cftrain.model_parameters['regularizer_coeff']
activate_tensorboard=cftrain.model_parameters['activate_tensorboard']
print('Creating file Structure....')
folder_name=part_type
train_path='../trained_models/'+part_type
pathlib.Path(train_path).mkdir(parents=True, exist_ok=True)
train_path=train_path+'/unet_model_multi_output'
pathlib.Path(train_path).mkdir(parents=True, exist_ok=True)
model_path=train_path+'/model'
pathlib.Path(model_path).mkdir(parents=True, exist_ok=True)
logs_path=train_path+'/logs'
pathlib.Path(logs_path).mkdir(parents=True, exist_ok=True)
plots_path=train_path+'/plots'
pathlib.Path(plots_path).mkdir(parents=True, exist_ok=True)
deployment_path=train_path+'/deploy'
pathlib.Path(deployment_path).mkdir(parents=True, exist_ok=True)
#Objects of Measurement System, Assembly System, Get Inference Data
print('Initializing the Assembly System and Measurement System....')
measurement_system=HexagonWlsScanner(data_type,application,system_noise,part_type,data_format)
vrm_system=VRMSimulationModel(assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim,aritifical_noise)
get_data=GetTrainData()
kcc_sublist=cftrain.encode_decode_params['kcc_sublist']
output_heads=cftrain.encode_decode_params['output_heads']
encode_decode_multi_output_construct=config.encode_decode_multi_output_construct
if(output_heads==len(encode_decode_multi_output_construct)):
print("Valid Output Stages and heads")
else:
print("Inconsistent model setting")
#Check for KCC sub-listing
if(kcc_sublist!=0):
output_dimension=len(kcc_sublist)
else:
output_dimension=assembly_kccs
#print(input_conv_data.shape,kcc_subset_dump.shape)
print('Building Unet Model')
output_dimension=assembly_kccs
input_size=(voxel_dim,voxel_dim,voxel_dim,voxel_channels)
model_depth=cftrain.encode_decode_params['model_depth']
inital_filter_dim=cftrain.encode_decode_params['inital_filter_dim']
dl_model_unet=Encode_Decode_Model(output_dimension)
model=dl_model_unet.encode_decode_3d_multi_output_attention(inital_filter_dim,model_depth,input_size,output_heads,voxel_channels)
print(model.summary())
#sys.exit()
test_input_file_names_x=config.encode_decode_construct['input_test_data_files_x']
test_input_file_names_y=config.encode_decode_construct['input_test_data_files_y']
test_input_file_names_z=config.encode_decode_construct['input_test_data_files_z']
if(activate_tensorboard==1):
tensorboard_str='tensorboard' + '--logdir '+logs_path
print('Visualize at Tensorboard using ', tensorboard_str)
print('Importing and Preprocessing Cloud-of-Point Data')
point_index=get_data.load_mapping_index(mapping_index)
get_point_cloud=GetPointCloud()
cop_file_name=vc.voxel_parameters['nominal_cop_filename']
cop_file_path='../resources/nominal_cop_files/'+cop_file_name
#Read cop from csv file
print('Importing Nominal COP')
nominal_cop=vrm_system.get_nominal_cop(cop_file_path)
test_input_dataset=[]
test_input_dataset.append(get_data.data_import(test_input_file_names_x,data_folder))
test_input_dataset.append(get_data.data_import(test_input_file_names_y,data_folder))
test_input_dataset.append(get_data.data_import(test_input_file_names_z,data_folder))
#kcc_dataset=get_data.data_import(kcc_files,kcc_folder)
test_input_conv_data, test_kcc_subset_dump_dummy,test_kpi_subset_dump=get_data.data_convert_voxel_mc(vrm_system,test_input_dataset,point_index)
#Saving for Voxel plotting
#voxel_plot=get_point_cloud.getcopdev(test_input_conv_data[0,:,:,:,:],point_index,nominal_cop)
#np.savetxt((logs_path+'/voxel_plot_x_64.csv'),voxel_plot[:,0], delimiter=",")
#np.savetxt((logs_path+'/voxel_plot_y_64.csv'),voxel_plot[:,1], delimiter=",")
#np.savetxt((logs_path+'/voxel_plot_z_64.csv'),voxel_plot[:,2], delimiter=",")
#Test output files
deploy_output=1
if(deploy_output==1):
test_kcc_dataset=get_data.data_import(test_kcc_files,kcc_folder)
if(kcc_sublist!=0):
print("Sub-setting Process Parameters: ",kcc_sublist)
test_kcc_dataset=test_kcc_dataset[:,kcc_sublist]
else:
print("Using all Process Parameters")
Y_out_test_list=[None]
#Y_out_test_list.append(test_kcc_subset_dump)
for encode_decode_construct in encode_decode_multi_output_construct:
#importing file names for model output
print("Importing output data for stage: ",encode_decode_construct)
test_output_file_names_x=encode_decode_construct['output_test_data_files_x']
test_output_file_names_y=encode_decode_construct['output_test_data_files_y']
test_output_file_names_z=encode_decode_construct['output_test_data_files_z']
test_output_dataset=[]
test_output_dataset.append(get_data.data_import(test_output_file_names_x,data_folder))
test_output_dataset.append(get_data.data_import(test_output_file_names_y,data_folder))
test_output_dataset.append(get_data.data_import(test_output_file_names_z,data_folder))
test_output_conv_data, test_kcc_subset_dump,test_kpi_subset_dump=get_data.data_convert_voxel_mc(vrm_system,test_output_dataset,point_index,test_kcc_dataset)
Y_out_test_list[0]=test_kcc_subset_dump
Y_out_test_list.append(test_output_conv_data)
#Pre-processing to point cloud data
unet_deploy_model=Unet_DeployModel()
if(deploy_output==1):
y_pred,model_outputs,model,eval_metrics,accuracy_metrics_df,eval_metrics_cop_list,accuracy_metrics_df_cop_list=unet_deploy_model.unet_run_model(model,test_input_conv_data,model_path,logs_path,plots_path,deploy_output,Y_out_test_list)
print("Predicted Process Parameters...")
print(y_pred)
accuracy_metrics_df.to_csv(logs_path+'/metrics_test_KCC.csv')
np.savetxt((logs_path+'/predicted_process_parameter.csv'), y_pred, delimiter=",")
print("Model Deployment Complete")
print("The Model KCC Validation Metrics are ")
print(accuracy_metrics_df)
accuracy_metrics_df.mean().to_csv(logs_path+'/metrics_test_kcc_summary.csv')
print("The Model KCC metrics summary ")
print(accuracy_metrics_df.mean())
index=1
for accuracy_metrics_df_cop in accuracy_metrics_df_cop_list:
accuracy_metrics_df_cop.to_csv(logs_path+'/metrics_test_cop_'+str(index)+'.csv')
print("The Model Segmentation Validation Metrics are ")
print(accuracy_metrics_df_cop.mean())
accuracy_metrics_df_cop.mean().to_csv(logs_path+'/metrics_test_cop_summary_'+str(index)+'.csv')
print("Plotting Cloud-of-Point for comparison")
part_id=0
y_cop_pred=model_outputs[index]
y_cop_actual=Y_out_test_list[index]
#y_cop_pred_plot=y_cop_pred[part_id,:,:,:,:]
#y_cop_actual_plot=test_input_conv_data[part_id,:,:,:,:]
dev_actual=get_point_cloud.getcopdev(y_cop_actual[part_id,:,:,:,:],point_index,nominal_cop)
dev_pred=get_point_cloud.getcopdev(y_cop_pred[part_id,:,:,:,:],point_index,nominal_cop)
dev_pred_matlab_plot_x=np.zeros((len(y_cop_pred),point_dim))
dev_pred_matlab_plot_y=np.zeros((len(y_cop_pred),point_dim))
dev_pred_matlab_plot_z=np.zeros((len(y_cop_pred),point_dim))
dev_actual_matlab_plot_x=np.zeros((len(y_cop_pred),point_dim))
dev_actual_matlab_plot_y=np.zeros((len(y_cop_pred),point_dim))
dev_actual_matlab_plot_z=np.zeros((len(y_cop_pred),point_dim))
# Saving for Matlab plotting
print("Saving Files for VRM Plotting...")
from tqdm import tqdm
for i in tqdm(range(len(y_cop_pred))):
actual_dev=get_point_cloud.getcopdev(y_cop_actual[i,:,:,:,:],point_index,nominal_cop)
pred_dev=get_point_cloud.getcopdev(y_cop_pred[i,:,:,:,:],point_index,nominal_cop)
dev_pred_matlab_plot_x[i,:]=pred_dev[:,0]
dev_pred_matlab_plot_y[i,:]=pred_dev[:,1]
dev_pred_matlab_plot_z[i,:]=pred_dev[:,2]
dev_actual_matlab_plot_x[i,:]=actual_dev[:,0]
dev_actual_matlab_plot_y[i,:]=actual_dev[:,1]
dev_actual_matlab_plot_z[i,:]=actual_dev[:,2]
np.savetxt((logs_path+'/DX_pred_'+str(index)+'.csv'),dev_pred_matlab_plot_x, delimiter=",")
np.savetxt((logs_path+'/DY_pred_'+str(index)+'.csv'),dev_pred_matlab_plot_y, delimiter=",")
np.savetxt((logs_path+'/DZ_pred_'+str(index)+'.csv'),dev_pred_matlab_plot_z, delimiter=",")
np.savetxt((logs_path+'/DX_actual_'+str(index)+'.csv'),dev_actual_matlab_plot_x, delimiter=",")
np.savetxt((logs_path+'/DY_actual_'+str(index)+'.csv'),dev_actual_matlab_plot_y, delimiter=",")
np.savetxt((logs_path+'/DZ_actual_'+str(index)+'.csv'),dev_actual_matlab_plot_z, delimiter=",")
filenamestr_pred=["/pred_plot_x"+str(index)+".html","/pred_plot_y"+str(index)+".html","/pred_plot_z"+str(index)+".html"]
filenamestr_actual=["/actual_plot_x"+str(index)+".html","/actual_plot_y"+str(index)+".html","/actual_plot_z"+str(index)+".html"]
print("Plotting All components for sample id: ",part_id)
for i in range(3):
pass
#pred Plot
#plot_decode_cop_dev(nominal_cop,dev_pred[:,i],plot_file_name=deployment_path+filenamestr_pred[i])
#plot_decode_cop_dev(nominal_cop,dev_actual[:,i],plot_file_name=deployment_path+filenamestr_actual[i])
index=index+1
from tqdm import tqdm
from cam_viz import CamViz
print("Saving Grad CAM File...")
#Parameters for Gradient Based Class Activation Maps
layers_gradient=["Identity0_1","Identity1_1","Identity2_1","Identity3_1"]
process_parameter_id=0
grad_cam_plot_matlab=np.zeros((len(layers_gradient),point_dim))
for i in tqdm(range(len(layers_gradient))):
#Under deafault setting max process param deviations are plotted
# Change here for explicit specification of process parameter
#layer_name="Act1_1"
layer_name=layers_gradient[i]
#print(layer_name)
camviz=CamViz(model,layer_name)
#process_parameter_id=np.argmax(abs(y_pred[i,:]))
cop_input=test_input_conv_data[0:1,:,:,:,:]
fmap_eval, grad_wrt_fmap_eval=camviz.grad_cam_3d(cop_input,process_parameter_id)
alpha_k_c= grad_wrt_fmap_eval.mean(axis=(0,1,2,3)).reshape((1,1,1,-1))
Lc_Grad_CAM = np.maximum(np.sum(fmap_eval*alpha_k_c,axis=-1),0).squeeze()
scale_factor = np.array(cop_input.shape[1:4])/np.array(Lc_Grad_CAM.shape)
from scipy.ndimage.interpolation import zoom
import tensorflow.keras.backend as K
_grad_CAM = zoom(Lc_Grad_CAM,scale_factor)
arr_min, arr_max = np.min(_grad_CAM), np.max(_grad_CAM)
grad_CAM = (_grad_CAM - arr_min) / (arr_max - arr_min + K.epsilon())
#print(grad_CAM.shape)
grad_cam_plot_matlab[i,:]=get_point_cloud.getcopdev_gradcam(grad_CAM,point_index,nominal_cop)
#Saving File
np.savetxt((logs_path+'/grad_cam_pred_'+layer_name+'.csv'),grad_cam_plot_matlab, delimiter=",")
if(deploy_output==0):
y_pred,y_cop_pred_list,model=unet_deploy_model.unet_run_model(model,test_input_conv_data,model_path,logs_path,plots_path,deploy_output)
print('Predicted KCCs')
print(y_pred)
|
nilq/baby-python
|
python
|
#FLM: Calculate GCD of selected glyphs
# Description:
# Calculate the Greatest Common Denominator of selected glyphs
# Credits:
# Pablo Impallari
# http://www.impallari.com
# Dependencies
import fractions
from robofab.world import CurrentFont
# Clear Output windows
from FL import *
fl.output=""
# Function
def gcd(L):
return reduce(fractions.gcd, L)
f = CurrentFont()
widths = []
rounded = []
list = f.selection
items = len(list)
for a in list:
currentWidth = int(f[a].width)
widths.append( currentWidth )
if currentWidth % 2 != 0:
currentWidth = currentWidth + 1
rounded.append( currentWidth )
widths.sort()
rounded.sort()
print "Original widths:"
print widths
print gcd( widths )
print ""
print "Rounded Up widths:"
print rounded
print gcd( rounded )
print ""
print "Done!"
|
nilq/baby-python
|
python
|
# Discord Packages
import discord
from discord.ext import commands
# Bot Utilities
from cogs.utils.db import DB
from cogs.utils.db_tools import get_user, get_users
from cogs.utils.defaults import easy_embed
from cogs.utils.my_errors import NoDM
from cogs.utils.server import Server
import asyncio
import operator
import os
import random
import string
import threading
import requests
class Github(commands.Cog):
def __init__(self, bot):
self.bot = bot
cacher = self.Cacher(self)
self.bot.loop.create_task(cacher.loop())
database = DB(data_dir=self.bot.data_dir)
database.populate_tables()
def id_generator(self, size=6, chars=string.ascii_uppercase + string.digits):
return "".join(random.choice(chars) for _ in range(size))
@commands.guild_only()
@commands.group(name="github", aliases=["gh"])
async def ghGroup(self, ctx):
"""
Gruppe for Github kommandoer
"""
if ctx.invoked_subcommand is None:
await ctx.send_help(ctx.command)
@ghGroup.command(name="auth", aliases=["add", "verify", "verifiser", "koble"])
async def auth(self, ctx):
"""
Kommando for å koble din Github- til din Discord-bruker
"""
random_string = self.id_generator()
is_user_registered = self.is_user_registered(ctx.author.id, random_string)
if is_user_registered:
return await ctx.send(ctx.author.mention + " du er allerede registrert!")
try:
embed = easy_embed(self, ctx)
discord_id_and_key = f"{ctx.author.id}:{random_string}"
registration_link = "https://github.com/login/oauth/authorize" \
f"?client_id={self.bot.settings.github['client_id']}" \
f"&redirect_uri={self.bot.settings.github['callback_uri']}" \
f"?params={discord_id_and_key}"
embed.title = "Hei! For å verifisere GitHub kontoen din, følg lenken under"
embed.description = f"[Verifiser med GitHub]({registration_link})"
await ctx.author.send(embed=embed)
await ctx.send(ctx.author.mention + " sender ny registreringslenke på DM!")
await asyncio.sleep(120) # Assume the user uses less than two minutes to auth
self._get_users()
except discord.Forbidden:
raise NoDM
except Exception as E:
self.bot.logger.warn('Error when verifying Github user:\n%s', E)
@ghGroup.command(name="remove", aliases=["fjern"])
async def remove(self, ctx):
"""
Kommando for å fjerne kobling mellom Github- og Discord-bruker
"""
conn = DB(data_dir=self.bot.data_dir).connection
cursor = conn.cursor()
cursor.execute(f"DELETE FROM github_users WHERE discord_id={ctx.author.id}")
conn.commit()
return await ctx.send(ctx.author.mention + "fjernet Githuben din.")
@ghGroup.command(name="repos", aliases=["stars", "stjerner"])
async def show_repos(self, ctx, user: discord.Member = None):
"""
Viser mest stjernede repoene til brukeren. maks 5
"""
is_self = False
if not user:
user = ctx.author
is_self = True
gh_user = get_user(self, user.id)
if gh_user is None:
usr = user.name
if is_self:
usr = "Du"
return await ctx.send(f"{usr} har ikke registrert en bruker enda.")
embed = easy_embed(self, ctx)
(_id, discord_id, auth_token, github_username) = gh_user
gh_repos = self._get_repos(github_username, auth_token)
if len(gh_repos) == 0:
return await ctx.send("Denne brukeren har ingen repos")
stars = {}
new_obj = {}
for gh_repo in gh_repos:
if gh_repo["private"]:
print(gh_repo["name"])
continue
stars[gh_repo["id"]] = gh_repo["stargazers_count"]
new_obj[gh_repo["id"]] = gh_repo
stars = dict(sorted(stars.items(), key=operator.itemgetter(1), reverse=True))
stop = 5 if (len(stars) >= 5) else len(stars)
idrr = list(stars.items())
embed.title = f"{stop} mest stjernede repoer"
for n in range(0, stop):
repo_id, *overflow = idrr[n]
repo = new_obj[repo_id]
title = f"{repo['name']} - ⭐:{repo['stargazers_count']}"
desc = repo["description"]
if not repo["description"]:
desc = "Ingen beskrivelse oppgitt"
desc += f"\n[Link]({repo['html_url']})"
embed.add_field(name=title, value=desc, inline=False)
await ctx.send(embed=embed)
@ ghGroup.command(name="user", aliases=["meg", "bruker"])
async def show_user(self, ctx, user: discord.Member = None):
"""
Kommando som viser et sammendrag fra github brukeren
"""
is_self = False
if not user:
user = ctx.author
is_self = True
gh_user = get_user(self, user.id)
if gh_user is None:
usr = user.name
if is_self:
usr = "Du"
return await ctx.send(f"{usr} har ikke registrert en bruker enda.")
(_id, discord_id, auth_token, github_username) = gh_user
gh_user = requests.get("https://api.github.com/user", headers={
"Authorization": "token " + auth_token,
"Accept": "application/json"
}).json()
embed = easy_embed(self, ctx)
embed.title = gh_user["login"]
embed.description = gh_user["html_url"]
embed.set_thumbnail(url=gh_user["avatar_url"])
embed.add_field(name="Følgere / Følger",
value=f"{gh_user['followers']} / {gh_user['following']}", inline=False)
embed.add_field(name="Biografi", value=gh_user["bio"], inline=False)
embed.add_field(name="Offentlige repos", value=gh_user["public_repos"], inline=False)
return await ctx.send(embed=embed)
@ ghGroup.command(name="combined", aliases=["kombinert"])
async def combined_stars(self, ctx):
"""
Kommando som viser de 15 brukerene med mest stjerner totalt
"""
embed = easy_embed(self, ctx)
tot_stars = {}
for repo_ in self.all_repos:
repo = self.all_repos[repo_]
try:
tot_stars[str(repo["discord_user"])] = tot_stars[str(repo["discord_user"])] + repo["stargazers_count"]
except KeyError:
tot_stars[str(repo["discord_user"])] = repo["stargazers_count"]
tot_stars = dict(sorted(tot_stars.items(), key=operator.itemgetter(1), reverse=True))
stop = 15 if (len(tot_stars) >= 15) else len(tot_stars)
idrr = list(tot_stars.items())
embed.title = f"{stop} mest stjernede brukere"
for n in range(0, stop):
discord_user, stars = idrr[n]
title = f"⭐:{stars}"
desc = f"{self.bot.get_user(int(discord_user)).mention}"
embed.add_field(name=title, value=desc, inline=False)
return await ctx.send(embed=embed)
@ ghGroup.command(name="users", aliases=["brukere", "total"])
async def show_users(self, ctx):
"""
Kommando som viser top 10 stjernede repoer samlet mellom alle registrerte brukere
"""
embed = easy_embed(self, ctx)
stop = 10 if (len(self.all_stars) >= 10) else len(self.all_stars)
idrr = list(self.all_stars.items())
embed.title = f"{stop} mest stjernede repoer"
for n in range(0, stop):
repo_id, *overflow = idrr[n]
repo = self.all_repos[repo_id]
title = f"{repo['name']} - ⭐:{repo['stargazers_count']}"
desc = repo["description"]
if not repo["description"]:
desc = "Ingen beskrivelse oppgitt"
desc += f"\n[Link]({repo['html_url']}) - {self.bot.get_user(repo['discord_user']).mention}"
embed.add_field(name=title, value=desc, inline=False)
return await ctx.send(embed=embed)
def is_user_registered(self, discord_id, random_string):
conn = DB(data_dir=self.bot.data_dir).connection
if conn is None:
return False
cursor = conn.cursor()
cursor.execute(f"SELECT * FROM github_users WHERE discord_id={discord_id}")
rows = cursor.fetchone()
if rows is not None:
conn.close()
return True
cursor.execute(f"SELECT * FROM pending_users WHERE discord_id={discord_id}")
row = cursor.fetchone()
if row is not None:
cursor.execute(f"DELETE FROM pending_users WHERE discord_id={discord_id}")
cursor.execute("INSERT INTO pending_users(discord_id, verification) VALUES(?, ?);", (discord_id, random_string))
conn.commit()
conn.close()
return False
def _get_repos(self, user, token):
headers = {
"Authorization": "token " + token,
"Accept": "application/json"
}
url = f"https://api.github.com/users/{user}/repos"
res = requests.get(url, headers=headers, params={"per_page": 100, "page": 1})
gh_repos = res.json()
while "next" in res.links.keys():
res = requests.get(res.links["next"]["url"], headers=headers)
gh_repos.extend(res.json())
return gh_repos
def _get_users(self):
self.bot.logger.debug("Running GitHub user fetcher")
self.all_stars = {}
self.all_repos = {}
users = get_users(self)
members = []
for guild in self.bot.guilds:
for member in guild.members:
if member.id in members:
pass
else:
members.append(member.id)
stars = {}
for user in users:
(_id, discord_id, auth_token, github_username) = user
if discord_id not in members:
continue
gh_repos = self._get_repos(github_username, auth_token)
if len(gh_repos) == 0:
continue
for gh_repo in gh_repos:
if gh_repo["private"]:
print(gh_repo["name"])
continue
stars[gh_repo["id"]] = gh_repo["stargazers_count"]
self.all_repos[gh_repo["id"]] = {"discord_user": discord_id, **gh_repo}
self.all_stars = dict(sorted(stars.items(), key=operator.itemgetter(1), reverse=True))
async def remover(self, member):
try:
conn = DB(data_dir=self.bot.data_dir).connection
cursor = conn.cursor()
cursor.execute(f"DELETE FROM github_users WHERE discord_id={member.id}")
conn.commit()
self.bot.logger.info("%s left, purged from database", member.name)
except:
pass
class Cacher():
def __init__(self, bot):
self.bot = bot
async def loop(self):
while True:
self.bot._get_users()
await asyncio.sleep(int(60*60*12))
def check_folder(data_dir):
f = f"{data_dir}/db"
if not os.path.exists(f):
os.makedirs(f)
def start_server(bot):
server = threading.Thread(target=Server, kwargs={"data_dir": bot.data_dir, "settings": bot.settings.github})
server.start()
def setup(bot):
check_folder(bot.data_dir)
start_server(bot)
n = Github(bot)
bot.add_listener(n.remover, "on_member_remove")
bot.add_cog(n)
|
nilq/baby-python
|
python
|
from pybrain.structure.modules.linearlayer import LinearLayer
from pybrain.structure.moduleslice import ModuleSlice
from pybrain.structure.connections.identity import IdentityConnection
from pybrain.structure.networks.feedforward import FeedForwardNetwork
from pybrain.structure.connections.shared import MotherConnection, SharedFullConnection
from pybrain.structure.modules.biasunit import BiasUnit
from pybrain.utilities import crossproduct
from pybrain.structure.networks.convolutional import SimpleConvolutionalNetwork
__author__ = 'Tom Schaul, tom@idsia.ch'
class ConvolutionalBoardNetwork(SimpleConvolutionalNetwork):
""" A type of convolutional network, designed for handling game boards.
It pads the borders with a uniform bias input to allow one output per board position.
"""
def __init__(self, boardSize, convSize, numFeatureMaps, **args):
inputdim = 2
FeedForwardNetwork.__init__(self, **args)
inlayer = LinearLayer(inputdim*boardSize*boardSize, name = 'in')
self.addInputModule(inlayer)
# we need some treatment of the border too - thus we pad the direct board input.
x = convSize/2
insize = boardSize+2*x
if convSize % 2 == 0:
insize -= 1
paddedlayer = LinearLayer(inputdim*insize*insize, name = 'pad')
self.addModule(paddedlayer)
# we connect a bias to the padded-parts (with shared but trainable weights).
bias = BiasUnit()
self.addModule(bias)
biasConn = MotherConnection(inputdim)
paddable = []
if convSize % 2 == 0:
xs = range(x)+range(insize-x+1, insize)
else:
xs = range(x)+range(insize-x, insize)
paddable.extend(crossproduct([range(insize), xs]))
paddable.extend(crossproduct([xs, range(x, boardSize+x)]))
for (i, j) in paddable:
self.addConnection(SharedFullConnection(biasConn, bias, paddedlayer,
outSliceFrom = (i*insize+j)*inputdim,
outSliceTo = (i*insize+j+1)*inputdim))
for i in range(boardSize):
inmod = ModuleSlice(inlayer, outSliceFrom = i*boardSize*inputdim,
outSliceTo = (i+1)*boardSize*inputdim)
outmod = ModuleSlice(paddedlayer, inSliceFrom = ((i+x)*insize+x)*inputdim,
inSliceTo = ((i+x)*insize+x+boardSize)*inputdim)
self.addConnection(IdentityConnection(inmod, outmod))
self._buildStructure(inputdim, insize, paddedlayer, convSize, numFeatureMaps)
self.sortModules()
|
nilq/baby-python
|
python
|
from uuid import uuid4
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy_utils import (
UUIDType,
URLType,
)
db = SQLAlchemy()
class Tag(db.Model):
__tablename__ = 'tag'
object_id = db.Column('id', UUIDType(), primary_key=True, default=uuid4)
value = db.Column(db.String(40))
post = db.relationship('Post', backref='tags')
post_id = db.Column(UUIDType(), db.ForeignKey('post.id'))
def __str__(self) -> str:
return f'Tag {self.value} on {self.post}'
class Reference(db.Model):
__tablename__ = 'reference'
object_id = db.Column('id', UUIDType(), primary_key=True, default=uuid4)
url = db.Column(URLType)
description = db.Column(db.String(300))
post = db.relationship('Post', backref='references')
post_id = db.Column(UUIDType(), db.ForeignKey('post.id'))
def __str__(self) -> str:
return f'Reference to {self.url} on {self.post}'
class Author(db.Model):
__tablename__ = 'author'
object_id = db.Column('id', UUIDType(), primary_key=True, default=uuid4)
name = db.Column(db.String(100), nullable=False)
media_url = db.Column(URLType)
organisation = db.Column(db.String(100))
organisation_url = db.Column(URLType)
def __str__(self) -> str:
return f'Author {self.name}'
class Post(db.Model):
__tablename__ = 'post'
object_id = db.Column('id', UUIDType(), primary_key=True, default=uuid4)
title = db.Column(db.String(100), nullable=False)
date_published = db.Column(db.DateTime(timezone=True), nullable=False)
date_written = db.Column(db.DateTime(timezone=True))
summary = db.Column(db.String(200), nullable=False)
body = db.Column(db.Text, nullable=False)
footer = db.Column(db.String(100), nullable=False)
author = db.relationship('Author', backref='posts')
author_id = db.Column(
UUIDType(),
db.ForeignKey('author.id'),
nullable=False
)
def __str__(self) -> str:
return f'Post {self.title} by {self.author}'
|
nilq/baby-python
|
python
|
import subprocess
import os
import json
def main():
files = os.listdir("./processed")
if os.path.isfile("concate.jsonl"):
return
pd = [[],[],[]]
for fn in files:
source = os.path.join("./processed", fn)
with open(source, "r") as f:
d = json.load(f)
pd[2].append(d["geo_code"])
pd[0].append(d['polarity'])
pd[1].append(d["subjectivity"])
with open("test.csv", "w") as f:
f.writelines("polarity,subjectivity,geo\n")
for i in range(len(pd[0])):
for j in range(len(pd)):
f.writelines(str(pd[j][i]))
if j < len(pd) -1:
f.writelines(",")
f.writelines("\n")
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
"""
日 K 範例程式
"""
import asyncio
try:
from skcom.receiver import AsyncQuoteReceiver as QuoteReceiver
except ImportError as ex:
print('尚未生成 SKCOMLib.py 請先執行一次 python -m skcom.tools.setup')
print('例外訊息:', ex)
exit(1)
async def on_receive_kline(kline):
"""
處理日 K 資料
"""
# TODO: 在 Git-Bash 按下 Ctrl+C 之後才會觸發
print('[%s %s] 的日K資料' % (kline['id'], kline['name']))
for quote in kline['quotes']:
print(
'>> 日期:%s 開:%.2f 收:%.2f 高:%.2f 低:%.2f 量:%d' % (
quote['date'],
quote['open'],
quote['close'],
quote['high'],
quote['low'],
quote['volume']
)
)
async def main():
"""
main()
"""
qrcv = QuoteReceiver()
# 第二個參數是日數限制
# * 0 不限制日數, 取得由史以來所有資料, 用於首次資料蒐集
# * 預設值 20, 取得近月資料
qrcv.set_kline_hook(on_receive_kline, 5)
await qrcv.root_task()
if __name__ == '__main__':
asyncio.run(main())
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# file://mkpy3_util.py
# Kenneth Mighell
# SETI Institute
def mkpy3_util_str2bool(v):
"""Utility function for argparse."""
import argparse
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
# fi
return None
# fed
def mkpy3_util_accept_str_or_int(v):
"""Utility function for argparse."""
import argparse
if isinstance(v, int):
return str(v)
elif isinstance(v, str):
return v
else:
raise argparse.ArgumentTypeError("str or int value expected.")
# fi
# fed
def mkpy3_util_check_file_exists(filename, overwrite):
"""Utility function."""
import os
import sys
assert isinstance(filename, str)
assert isinstance(overwrite, bool)
msg = "Requested output file already exists (overwrite=False):\n"
if not overwrite:
if os.path.isfile(filename):
print("\n***** ERROR *****\n\n%s" % (msg))
print("new_filename='%s'\n" % filename)
sys.exit(1)
# fi
# fi
# fed
if __name__ == "__main__":
pass
# fi
# EOF
|
nilq/baby-python
|
python
|
import asyncio
import os
import sys
from os.path import realpath
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler as EventHandler
from watchdog.events import FileSystemEvent as Event
# Event handler class for watchdog
class Handler(EventHandler):
# Private
_future_resolved = False
# Common filetypes to watch
patterns = ["*.py", "*.txt", "*.aiml", "*.json", "*.cfg", "*.xml", "*.html"]
def __init__(self, loop, *args, **kwargs):
self.loop = loop
# awaitable future to race on
self.changed = asyncio.Future(loop=loop)
# Continue init for EventHandler
return super(Handler, self).__init__(*args, **kwargs)
def on_any_event(self, event):
# Resolve future
if isinstance(event, Event) and not self._future_resolved:
self.loop.call_soon_threadsafe(self.changed.set_result, event)
self._future_resolved = True
def clear_screen():
if os.name == 'nt':
seq = '\x1Bc'
else:
seq = '\x1B[2J\x1B[H'
sys.stdout.write(seq)
def reload():
""" Reload process """
try:
# Reload and replace current process
os.execv(sys.executable, [sys.executable] + sys.argv)
except OSError:
# Ugh, that failed
# Try spawning a new process and exitj
os.spawnv(
os.P_NOWAIT,
sys.executable,
[sys.executable] + sys.argv,
)
os._exit(os.EX_OK)
async def run_with_reloader(loop, coroutine, cleanup=None, *args, **kwargs):
""" Run coroutine with reloader """
clear_screen()
print("🤖 Running in debug mode with live reloading")
print(" (don't forget to disable it for production)")
# Create watcher
handler = Handler(loop)
watcher = Observer()
# Setup
path = realpath(os.getcwd())
watcher.schedule(handler, path=path, recursive=True)
watcher.start()
print(" (watching {})".format(path))
# Run watcher and coroutine together
done, pending = await asyncio.wait([coroutine, handler.changed],
return_when=asyncio.FIRST_COMPLETED)
# Cleanup
cleanup and cleanup()
watcher.stop()
for fut in done:
# If change event, then reload
if isinstance(fut.result(), Event):
print("Reloading...")
reload()
|
nilq/baby-python
|
python
|
# pip3 install https://github.com/s4w3d0ff/python-poloniex/archive/v0.4.6.zip
from poloniex import Poloniex
polo = Poloniex()
# Ticker:
print(polo('returnTicker')['BTC_ETH'])
# or
print(polo.returnTicker()['BTC_ETH'])
# Public trade history:
print(polo.marketTradeHist('BTC_ETH'))
# Basic Private Setup (Api key/secret required):
import poloniex
polo = poloniex.Poloniex('your-Api-Key-Here-xxxx','yourSecretKeyHere123456789')
# or
polo.key = 'your-Api-Key-Here-xxxx'
polo.secret = 'yourSecretKeyHere123456789'
# Get all your balances
balance = polo.returnBalances()
print("I have %s ETH!" % balance['ETH'])
# or
balance = polo('returnBalances')
print("I have %s BTC!" % balance['BTC'])
# Private trade history:
print(polo.returnTradeHistory('BTC_ETH'))
|
nilq/baby-python
|
python
|
"""
In the 20×20 grid below, four numbers along a diagonal line have been marked in red.
<GRID MOVED TO MAIN>
The product of these numbers is 26 × 63 × 78 × 14 = 1788696.
What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20×20 grid?
"""
import math
def greatest_product(grid, n):
grid = [int(x) for x in grid.split()]
side = int(math.sqrt(len(grid)))
if side**2 != len(grid):
# Grid is not a square
return None
def get(x, y):
return grid[x + (y * side)]
num = side - n + 1
def max_hor():
r = 0
for row in range(side):
for i in range(num):
tmp = 1
for j in range(n):
tmp *= get(i + j, row)
if tmp > r:
r = tmp
return tmp
def max_ver():
r = 0
for col in range(side):
for i in range(num):
tmp = 1
for j in range(n):
tmp *= get(col, i + j)
if tmp > r:
r = tmp
return tmp
def max_diag_up():
r = 0
for y in range(n, side):
for x in range(0, side-n):
tmp = 1
for j in range(n):
tmp *= get(x+j, y-j)
if tmp > r:
r = tmp
return r
def max_diag_down():
r = 0
for y in range(0, side - n):
for x in range(n, side):
tmp = 1
for j in range(n):
tmp *= get(x-j, y+j)
if tmp > r:
r = tmp
return r
return max(max_hor(), max_ver(), max_diag_up(), max_diag_down())
if __name__ == "__main__":
grid = """
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
"""
print(greatest_product(grid, 4))
|
nilq/baby-python
|
python
|
import unittest #importing unittest module
from credential import Credential # importing class Credential
import pyperclip # importing pyperclip module
class TestCredential(unittest.TestCase):
"""
Test class that defines the test cases for the credential class behaviours
Args:
unittest.TestCase: TestCase class that helps in creating test cases
"""
def setUp(self):
"""
Set up method to run before each test case.
"""
self.new_credential = Credential("Peter","Instagram", "2019")
def tearDown(self):
"""
Tear down method that cleans up after each test case has run
"""
Credential.credentials = []
def test_init(self):
"""
test_init test case to test whether the object is correctly instantiated
"""
self.assertEqual(self.new_credential.username, "Peter")
self.assertEqual(self.new_credential.accountname, "Instagram")
self.assertEqual(self.new_credential.password, "2019")
def test_save_credential(self):
"""
test_save_credential test case to check whether credential is successfully saved
"""
self.new_credential.save_credential()
self.assertEqual(len(Credential.credentials), 1)
def test_save_multiple_credentials(self):
"""
test_save_multiple_credentials test case to check whether a user can save multiple credentials
"""
self.new_credential.save_credential()
test_credential = Credential ("Peter", "Instagram","2019")
test_credential.save_credential()
self.assertEqual(len(Credential.credentials), 2)
def test_delete_credential(self):
"""
test_delete_credential test case to test if user can delete an
already saved credential
"""
self.new_credential.save_credential()
test_credential = Credential("Peter", "Instagram","2019")
test_credential.save_credential()
test_credential.delete_credential()
self.assertEqual(len(Credential.credentials),1)
def test_find_credential_by_accountname(self):
"""
test_find_credential_by_accountname testcase to test if user is able to search for an a saved credential
by its accountname
"""
self.new_credential.save_credential()
test_credential = Credential("Peter", "Instagram", "2019")
test_credential.save_credential()
found_credential = Credential.find_accountname("Instagram")
self.assertEqual(found_credential.accountname, test_credential.accountname)
def test_credential_exists(self):
"""
test_credential_exists test case to check whether a credential exists within credentials saved
"""
self.new_credential.save_credential()
test_credential = Credential("Peter", "Instagram", "2019")
test_credential.save_credential()
credential_exists = Credential.credential_exists("Instagram")
self.assertTrue(credential_exists)
def test_display_all_credentials(self):
"""
test_display_all_credentials test case to test whether a user is able to view all the credentials they have saved within
password locker
"""
self.new_credential.save_credential()
test_credential = Credential("Peter", "Instagram", "2019")
test_credential.save_credential()
self.assertEqual(Credential.display_credentials(), Credential.credentials)
def test_copy_username(self):
"""
test_copy_username to test if user can copy their username to their machine clipboard
"""
self.new_credential.save_credential()
Credential.copy_accountname("Instagram")
self.assertEqual(self.new_credential.username, pyperclip.paste())
def test_copy_accountname(self):
"""
test_copy_accountname to test if user can copy their accountname to their machine clipboard
"""
self.new_credential.save_credential()
Credential.copy_accountname("Instagram")
self.assertEqual(self.new_credential.accountname,pyperclip.paste())
def test_copy_password(self):
"""
test_copy_password to test if user can copy their password to their machine clipboard
"""
self.new_credential.save_credential()
Credential.copy_password("Pinterest")
self.assertEqual(self.new_credential.password,pyperclip.paste())
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
class KeystoneAuthException(Exception):
""" Generic error class to identify and catch our own errors. """
pass
|
nilq/baby-python
|
python
|
import os
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
from torch_geometric.utils import to_networkx
def draw_nx_graph(G, name='Lobster', path='./visualization/train_nxgraph/'):
fig = plt.figure(figsize=(12,12))
ax = plt.subplot(111)
ax.set_title(name, fontsize=10)
nx.draw(G)
if not os.path.exists(path):
os.makedirs(path)
save_name = path + name + '.png'
plt.savefig(save_name, format="PNG")
plt.close()
def draw_pyg_graph(G, name='Lobster', path='./visualization/train_pyggraph/'):
fig = plt.figure(figsize=(12,12))
ax = plt.subplot(111)
ax.set_title(name, fontsize=10)
nx_graph = to_networkx(G)
if not os.path.exists(path):
os.makedirs(path)
save_name = path + name + '.png'
nx.draw(nx_graph)
plt.savefig(save_name, format="PNG")
plt.close()
def draw_graph_list(G_list, row, col, fname='exp/gen_graph.png',
layout='spring', is_single=False, k=1, node_size=55,
alpha=1, width=1.3):
os.makedirs(os.path.dirname(fname), exist_ok=True)
plt.switch_backend('agg')
for i, G in enumerate(G_list):
plt.subplot(row, col, i + 1)
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
# plt.axis("off")
# turn off axis label
plt.xticks([])
plt.yticks([])
if layout == 'spring':
pos = nx.spring_layout( G, k=k / np.sqrt(G.number_of_nodes()),
iterations=100)
elif layout == 'spectral':
pos = nx.spectral_layout(G)
if is_single:
# node_size default 60, edge_width default 1.5
nx.draw_networkx_nodes( G, pos, node_size=node_size,
node_color='#336699', alpha=1, linewidths=0,
font_size=0)
nx.draw_networkx_edges(G, pos, alpha=alpha, width=width)
else:
nx.draw_networkx_nodes( G, pos, node_size=1.5,
node_color='#336699', alpha=1,
linewidths=0.2, font_size=1.5)
nx.draw_networkx_edges(G, pos, alpha=0.3, width=0.2)
plt.tight_layout()
plt.savefig(fname, dpi=300)
plt.close()
def draw_graph_list_separate(G_list, fname='exp/gen_graph', layout='spring',
is_single=False, k=1, node_size=55, alpha=1,
width=1.3):
for i, G in enumerate(G_list):
plt.switch_backend('agg')
plt.axis("off")
# turn off axis label
# plt.xticks([])
# plt.yticks([])
if layout == 'spring':
pos = nx.spring_layout( G, k=k / np.sqrt(G.number_of_nodes()),
iterations=100)
elif layout == 'spectral':
pos = nx.spectral_layout(G)
if is_single:
# node_size default 60, edge_width default 1.5
nx.draw_networkx_nodes( G, pos, node_size=node_size,
node_color='#336699', alpha=1, linewidths=0,
font_size=0)
nx.draw_networkx_edges(G, pos, alpha=alpha, width=width)
else:
nx.draw_networkx_nodes( G, pos, node_size=1.5,
node_color='#336699', alpha=1,
linewidths=0.2, font_size=1.5)
nx.draw_networkx_edges(G, pos, alpha=0.3, width=0.2)
plt.draw()
plt.tight_layout()
plt.savefig(fname+'_{:03d}.png'.format(i), dpi=300)
plt.close()
def gran_vis(args):
num_col = args.vis_num_row
num_row = int(np.ceil(args.num_vis / num_col))
test_epoch = args.dataset
test_epoch = test_epoch[test_epoch.rfind('_') + 1:test_epoch.find('.pth')]
save_gen_base = plots = './visualization/gen_plots/' + args.dataset + '/'
save_gen_plots = save_gen_base + args.model + str(args.z_dim) + '_' \
+ flow_name + '_' + decoder_name + '/'
save_name = os.path.join(save_gen_plots,
'{}_gen_graphs_epoch_{}_block_{}_stride_{}.png'.format(args.model,
test_epoch,
args.block_size,
args.stride))
# remove isolated nodes for better visulization
graphs_pred_vis = [copy.deepcopy(gg) for gg in graphs_gen[:args.num_vis]]
if args.better_vis:
for gg in graphs_pred_vis:
gg.remove_nodes_from(list(nx.isolates(gg)))
# display the largest connected component for better visualization
vis_graphs = []
for gg in graphs_pred_vis:
CGs = [gg.subgraph(c) for c in nx.connected_components(gg)]
CGs = sorted(CGs, key=lambda x: x.number_of_nodes(), reverse=True)
vis_graphs += [CGs[0]]
if args.is_single_plot:
draw_graph_list(vis_graphs, num_row, num_col, fname=save_name, layout='spring')
else:
draw_graph_list_separate(vis_graphs, fname=save_name[:-4], is_single=True, layout='spring')
save_name = os.path.join(save_gen_plots, 'train_graphs.png')
if args.is_single_plot:
draw_graph_list(train_loader.dataset[:args.num_vis], num_row, num_col,
fname=save_name, layout='spring')
else:
draw_graph_list_separate(train_loader.dataset[:args.num_vis],
fname=save_name[:-4], is_single=True,
layout='spring')
|
nilq/baby-python
|
python
|
from .xgb import XgbParser
from .lgb import LightgbmParser
from .pmml import PmmlParser
|
nilq/baby-python
|
python
|
from . bitbucket import BitBucket
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import pickle
from os import path, makedirs
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import MediaIoBaseDownload
import io
import pathlib
from datetime import datetime
import json
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/drive.metadata.readonly',
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/documents.readonly',
'https://www.googleapis.com/auth/spreadsheets.readonly']
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
creds = None
if path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in from
# his default browser
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file ('credentials.json',
SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next log in so we don't need to authorize
# every time we execute this code
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('drive', 'v3', credentials=creds)
sheetsService = build('sheets', 'v4', credentials=creds)
docsService = build('docs', 'v1', credentials=creds)
# Checks if config.json exists
# TODO: Check if all necessary keys exists inside json file
if not path.exists('config.json'):
Exception('You need provide a config.json')
with open('config.json', encoding='utf-8') as fh:
config = json.load(fh)
DATABASE_SHEET = config['DATABASE_SHEET']
DEFAULT_FOLDER = config['DEFAULT_FOLDER']
sheet = sheetsService.spreadsheets()
# Count how many columns
values = sheet.values().get(spreadsheetId=DATABASE_SHEET,
range="A1:Z1",
majorDimension="COLUMNS").execute()['values']
column_quantity = len(values)
# Convert columns quantity to alphabet (1=a, 2=b, 3=b ...)
column_in_char = chr(column_quantity + 96)
# Get all rows in the database
values = sheet.values().get(spreadsheetId=DATABASE_SHEET,
range=f"A2:{column_in_char}999",
majorDimension="ROWS").execute()['values']
# We need to add some columns if it doesn't exist on row
# every single row needs to have same column quantity
for row in values:
while len(row) < column_quantity:
row.append("")
# TODO: Put that on the config.json
# Backup of database (folder name)
BACKUP_PATH="backup"
# Backup of generated PDF's (folder name)
BACKUP_PATH_PDF="backup-pdf"
# Create path if doesn't exist yet
# it will create at same path of this code
if not path.exists(BACKUP_PATH):
makedirs(BACKUP_PATH)
# Write database backup in format: month.day_hour_minute_second to doesn't conflit to another backup
curr_time = datetime.now()
file_name = f"{curr_time.month}.{curr_time.day}_{curr_time.hour}_{curr_time.minute}_{curr_time.second}"
with open (path.join(pathlib.Path().absolute(), BACKUP_PATH, f"{file_name}.bkp"), 'w', encoding='utf-8') as file:
file.write(str(values))
# For each row in the database (ignore the first one, based on query)
for index, value in enumerate(values):
# Some changes because of the date and time format
# (if doesn't do that, can causes conflicts due the "/")
date = str(value[2]).replace("/", "-")
created_at = str(value[0]).replace("/", "-")
area = str(value[1])
# Create a default title as format: [DATE]$[CREATED_AT]$[AREA]
documentTitle = f"{date}_{created_at}_{area}".replace(' ','').replace(':','x')
print(f"Using title: {documentTitle}")
# Check if is there any document with this title
results = service.files().list(q = f"'{DEFAULT_FOLDER}' in parents and name='{documentTitle}' and trashed = false", pageSize=1, fields="nextPageToken, files(id, name)").execute()
items = results.get('files', [])
print(f"Found: {str(items)}")
# If already exist, don't create another
if (len(items) > 0):
continue
# Else, create one using database information
else:
# Relations between area and Document ID for template
# TODO: Change it to list comprehension
areas = []
for templateFile in config['TEMPLATE_FILES_ID']:
# For each file template, get his name and his ID for map every template
# avaliable on Drive
areas.append(((templateFile['name'], templateFile['id'])))
# TODO: Change it to list comprehension
textReplacementsToDo = []
for fieldIndex, field in enumerate(config['DATABASE_FIELDS_REPRESENTATION']):
# Get a field and his representation for each correspondent in database column
# we do that for replace in the document
textReplacementsToDo.append([field, values[index][fieldIndex]])
# Create a file using the template based on area
body = {
'name': documentTitle,
'parents': [
DEFAULT_FOLDER
]
}
# Get templata file ID
templateFileId = [x[1] for x in areas if x[0] == area]
if templateFileId[0] != '':
templateFileId = templateFileId[0]
else:
Exception(f"There is no template string for: {area}")
currentDocument = service.files().copy(fileId=templateFileId, body=body).execute()
currentDocumentId = currentDocument.get('id')
# Do some replacements on placeholder words to database values
requests = [{
'replaceAllText': {
'containsText': {
'text': replacement[0],
'matchCase': 'true'
},
'replaceText': replacement[1]
}
} for replacement in textReplacementsToDo]
docsService.documents().batchUpdate(documentId = currentDocumentId, body={'requests': requests}).execute()
print("Downloading files...")
# Creates backup folder if doesn't exist yet
if not path.exists(BACKUP_PATH_PDF):
makedirs(BACKUP_PATH_PDF)
responses = service.files().list(q = f"'{DEFAULT_FOLDER}' in parents and trashed = false", fields="nextPageToken, files(id,name)").execute()
for file in responses.get('files', []):
exists = path.exists(path.join (BACKUP_PATH_PDF, f"{file['name']}.pdf"))
# Check if we already downloaded this file
if exists:
continue
request = service.files().export_media(fileId=file.get('id'),
mimeType='application/pdf')
fh = io.FileIO(path.join(pathlib.Path().absolute(), BACKUP_PATH_PDF, f"{file.get('name')}.pdf"), 'wb')
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
done = downloader.next_chunk()
# TODO: Merge everything to only one document
# TODO: Make this code a class
# if __name__ == '__main__':
# main()
|
nilq/baby-python
|
python
|
import unittest
from conjur.data_object.user_input_data import UserInputData
class UserInputDataTest(unittest.TestCase):
def test_user_input_data_constructor(self):
mock_action = None
mock_user_id = None
mock_new_password = None
user_input_data = UserInputData(action=mock_action, id=mock_user_id, new_password=mock_new_password)
self.assertEquals(user_input_data.action, mock_action)
self.assertEquals(user_input_data.user_id, mock_user_id)
self.assertEquals(user_input_data.new_password, mock_new_password)
''''
Verifies that proper dictionary is printed when action is rotate-api-key
'''
def test_user_input_data_rotate_api_key_is_printed_as_dict_properly(self):
EXPECTED_REP_OBJECT={'action': 'rotate-api-key', 'id': 'someuser'}
mock_user_input_data = UserInputData(action='rotate-api-key', id='someuser', new_password=None)
rep_obj = mock_user_input_data.__repr__()
self.assertEquals(str(EXPECTED_REP_OBJECT), rep_obj)
''''
Verifies that proper dictionary is printed when action is change-password
'''
def test_user_input_data_change_password_is_printed_as_dict_properly(self):
EXPECTED_REP_OBJECT={'action': 'change-password', 'new_password': '****'}
mock_user_input_data = UserInputData(action='change-password', id=None, new_password='somepassword')
rep_obj = mock_user_input_data.__repr__()
self.assertEquals(str(EXPECTED_REP_OBJECT), rep_obj)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
######################################################################
## Author: Carl Schaefer, Smithsonian Institution Archives
######################################################################
import re
import wx
import wx.lib.scrolledpanel as scrolled
import db_access as dba
import dm_common as dmc
import dm_wx
from dm_wx import FRAME_WIDTH, FRAME_HEIGHT
import message_list
####################################################################
## MessageParams
####################################################################
class SearchParams ():
##################################################################
def __init__ (self,
global_id="",
date_from="",
date_to="",
folder="",
from_line="",
to_line="",
cc_line="",
bcc_line="",
replies="",
subject="",
attachment="",
body="",
body_search_type="",
selected_status="",
sort_order=""):
self.global_id = global_id
self.date_from = date_from
self.date_to = date_to
self.from_line = from_line
self.to_line = to_line
self.cc_line = cc_line
self.bcc_line = bcc_line
self.replies = replies
self.subject = subject
self.folder = folder
self.body = body
self.attachment = attachment
self.body = body
self.body_search_type = body_search_type
self.selected_status = selected_status
self.sort_order = sort_order
self.params = [
("Selected", selected_status),
("Global ID", global_id),
("Date From", date_from),
("Date To", date_to),
("From", from_line),
("To", to_line),
("Cc", cc_line),
("Bcc", bcc_line),
("Replies", replies),
("Subject", subject),
("Folder", folder),
("Attachment Name", attachment),
("Body Search", body),
("Plain/HTML", body_search_type),
("Sort Order", sort_order) ]
##################################################################
def params_text (self):
plist = []
for (label, value) in self.params:
if value:
if not self.body and label == "Plain/HTML":
continue
plist.append(label + '="' + value + '"')
return ", ".join(plist)
####################################################################
## MessageSearch
####################################################################
class MessageSearch (scrolled.ScrolledPanel):
variable_names = [
"global_id",
"date_from",
"date_to",
"folder_select",
"subject",
"from_line",
"to_line",
"cc_line",
"attachment",
"body",
"plain_cb",
"html_cb",
"any_rb",
"sel_rb",
"unsel_rb",
"oldest_rb",
"newest_rb"
]
name2default = {
"global_id" : "",
"date_from" : "",
"date_to" : "",
"folder_select" : 0,
"subject" : "",
"from_line" : "",
"to_line" : "",
"cc_line" : "",
"body" : "",
"attachment" : "",
"plain_cb" : True,
"html_cb" : False,
"any_rb" : True,
"sel_rb" : False,
"unsel_rb" : False,
"oldest_rb" : True,
"newest_rb" : False
}
name2component = {}
account = None
account_id = None
cnx = None
browse = None
browse_notebook = None
results = None
results_notebook = None
global_id = None
date_from = None
date_to = None
folder = None
subject = None
from_line = None
to_line = None
cc_line = None
attachment = None
body = None
plain_cb = None
html_cb = None
any_rb = None
sel_rb = None
unsel_rb = None
oldest_rb = None
newest_rb = None
selected_status = None # values: "any", "selected", "unselected"
####################################################################
def __init__ (self, parent):
wx.ScrolledWindow.__init__ (self, parent=parent)
normal_font_size = self.GetFont().GetPointSize() # get the current size
bigger_font_size = normal_font_size + 3
grid = wx.FlexGridSizer(cols=2)
aname = wx.StaticText(self, label="Sort Order")
rb_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.name2component["oldest_rb"] = oldest_rb = \
wx.RadioButton(self, label=" Oldest first", name="oldest_rb",
style=wx.RB_GROUP)
self.name2component["newest_rb"] = newest_rb = \
wx.RadioButton(self, label=" Newest first ", name="newest_rb")
rb_sizer.Add(oldest_rb, 0, wx.RIGHT|wx.LEFT, 10)
rb_sizer.Add(newest_rb, 0, wx.RIGHT|wx.LEFT, 10)
grid.Add(aname, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
grid.Add(rb_sizer, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
aname = wx.StaticText(self, label="Message status")
rb_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.name2component["any_rb"] = any_rb = \
wx.RadioButton(self, label=" Any ", name="any_rb",
style=wx.RB_GROUP)
self.name2component["sel_rb"] = sel_rb = \
wx.RadioButton(self, label=" Selected ", name="sel_rb")
self.name2component["unsel_rb"] = unsel_rb = \
wx.RadioButton(self, label=" Unselected ", name="unsel_rb")
rb_sizer.Add(any_rb, 0, wx.RIGHT|wx.LEFT, 10)
rb_sizer.Add(sel_rb, 0, wx.RIGHT|wx.LEFT, 10)
rb_sizer.Add(unsel_rb, 0, wx.RIGHT|wx.LEFT, 10)
grid.Add(aname, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
grid.Add(rb_sizer, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
aname = wx.StaticText(self, label="Global Id")
self.name2component["global_id"] = aval = \
wx.TextCtrl(self, name="global_id", size=(400, -1))
grid.Add(aname, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
grid.Add(aval, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
aname = wx.StaticText(self, label="Date From")
self.name2component["date_from"] = aval = \
wx.TextCtrl(self, name="date_from", size=(200, -1))
grid.Add(aname, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
grid.Add(aval, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
aname = wx.StaticText(self, label="Date To")
self.name2component["date_to"] = aval = \
wx.TextCtrl(self, name="date_to", size=(200, -1))
grid.Add(aname, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
grid.Add(aval, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
aname = wx.StaticText(self, label="Folder")
self.name2component["folder_select"] = aval = \
wx.ComboBox(self, style=wx.CB_DROPDOWN,
choices=["[ALL FOLDERS"], name="folder_select")
grid.Add(aname, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
grid.Add(aval, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
aname = wx.StaticText(self, label="Subject Line")
self.name2component["subject"] = aval = \
wx.TextCtrl(self, name="subject", size=(400, -1))
grid.Add(aname, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
grid.Add(aval, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
aname = wx.StaticText(self, label="From Line")
self.name2component["from_line"] = aval = \
wx.TextCtrl(self, name="from_line", size=(200, -1))
grid.Add(aname, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
grid.Add(aval, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
aname = wx.StaticText(self, label="To Line")
self.name2component["to_line"] = aval = \
wx.TextCtrl(self, name="to_line", size=(200, -1))
grid.Add(aname, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
grid.Add(aval, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
aname = wx.StaticText(self, label="Cc Line")
self.name2component["cc_line"] = aval = \
wx.TextCtrl(self, name="cc_line", size=(200, -1))
grid.Add(aname, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
grid.Add(aval, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
aname = wx.StaticText(self, label="Attachment Name")
self.name2component["attachment"] = aval = \
wx.TextCtrl(self, name="attachment", size=(200, -1))
grid.Add(aname, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
grid.Add(aval, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
aname = wx.StaticText(self, label="Body Text")
self.name2component["body"] = aval = \
wx.TextCtrl(self, name="body", size=(400, -1))
grid.Add(aname, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
grid.Add(aval, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
cb_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.name2component["plain_cb"] = plain_cb = \
wx.CheckBox(self, name="plain_cb", label="text/plain")
self.name2component["html_cb"] = html_cb = \
wx.CheckBox(self, name="html_cb", label="text/html")
cb_sizer.Add(wx.StaticText(self, label="Search body text:"))
cb_sizer.Add(plain_cb, 0, wx.RIGHT|wx.LEFT, 10)
cb_sizer.Add(html_cb, 0, wx.LEFT, 10)
grid.Add((5,5))
grid.Add(cb_sizer, 0, wx.ALIGN_LEFT|wx.TOP|wx.RIGHT, 5)
box = wx.StaticBoxSizer(wx.StaticBox(self), wx.VERTICAL)
box.Add(grid, 1, wx.EXPAND)
hz = wx.BoxSizer(wx.HORIZONTAL)
hz.Add(dm_wx.ActionButtons(self, "Search for Messages"), 0)
sizer = wx.BoxSizer(orient=wx.VERTICAL)
sizer.Add((FRAME_WIDTH, 10))
sizer.Add(box, 0, wx.ALIGN_CENTER)
sizer.Add((FRAME_WIDTH, 10))
sizer.Add(hz, 0, wx.ALIGN_CENTER)
self.SetSizer(sizer)
self.SetupScrolling()
self.ResetVariables()
self.name2component["reset_button"].Bind(wx.EVT_BUTTON, \
self.ExecuteReset)
self.name2component["go_button"].Bind(wx.EVT_BUTTON, \
self.ValidateVariablesAndGo)
####################################################################
def OnPageSelect (self):
# this is called when accounts.set_account() is called
(account_id, account_name, account_dir) = \
self.acp.get_account()
fs = self.name2component["folder_select"]
fs.Clear()
fs.Append("ALL FOLDERS")
if account_id:
new_choices = \
dba.get_folder_names_for_account(self.cnx, account_id)
for c in sorted(new_choices):
fs.Append(c)
fs.SetSelection(0)
self.Layout()
####################################################################
def ResetVariables (self):
for v in self.variable_names:
if v == "folder_select":
self.name2component[v].SetSelection(self.name2default[v])
else:
self.name2component[v].SetValue(self.name2default[v])
self.Layout()
####################################################################
def ExecuteReset (self, event):
self.ResetVariables()
self.GetParent().SetFocus()
####################################################################
def validate_date (self, date):
m = re.match("^\d{4}(-\d{2}(-\d{2})?)?$", date)
if m:
return True
else:
return False
####################################################################
def validate_date_to (self, date):
if not date:
return ""
elif self.validate_date(date):
if len(date) == 10:
return date
elif len(date) == 7:
return date + "-31"
elif len(date) == 4:
return date + "-12-31"
else:
return None
####################################################################
def validate_date_from (self, date):
if not date:
return ""
elif self.validate_date(date):
if len(date) == 10:
return date
elif len(date) == 7:
return date + "-01"
elif len(date) == 4:
return date + "-01-01"
else:
return None
####################################################################
def ValidateVariablesAndGo (self, event):
ready = True
if not self.acp.account_is_set():
md = wx.MessageDialog(parent=self, message="Before searching for " + \
"addresses or messages, you must load an account",
caption="Default account not set",
style=wx.OK|wx.ICON_EXCLAMATION)
retcode = md.ShowModal()
ready = False
self.browse.switch_to_account_search()
return
self.body_search_type = "both"
self.global_id = self.name2component["global_id"].GetValue().strip()
self.date_from = self.name2component["date_from"].GetValue().strip()
self.date_to = self.name2component["date_to"].GetValue().strip()
self.folder_select = \
self.name2component["folder_select"].GetCurrentSelection()
if self.folder_select > 0:
self.folder = \
self.name2component["folder_select"].GetString(self.folder_select)
else:
self.folder = ""
self.from_line = self.name2component["from_line"].GetValue().strip()
self.to_line = self.name2component["to_line"].GetValue().strip()
self.cc_line = self.name2component["cc_line"].GetValue().strip()
self.subject = self.name2component["subject"].GetValue().strip()
self.body = self.name2component["body"].GetValue().strip()
self.attachment = self.name2component["attachment"].GetValue().strip()
self.any_rb = self.name2component["any_rb"].GetValue()
self.sel_rb = self.name2component["sel_rb"].GetValue()
self.unsel_rb = self.name2component["unsel_rb"].GetValue()
self.oldest = self.name2component["oldest_rb"].GetValue()
self.newest = self.name2component["newest_rb"].GetValue()
self.selected_status = "any"
if self.sel_rb:
self.selected_status = "selected"
elif self.unsel_rb:
self.selected_status = "unselected"
self.plain_cb = self.name2component["plain_cb"].GetValue()
self.html_cb = self.name2component["html_cb"].GetValue()
if self.plain_cb and self.html_cb:
self.body_search_type = "both"
elif self.plain_cb:
self.body_search_type = "plain"
elif self.html_cb:
self.body_search_type = "html"
else:
if self.body:
md = wx.MessageDialog(parent=self,
message="If you specify a body search string, " + \
"they you must check at " + \
"at least one of the search types: text/plain or text/html",
caption="Error",
style=wx.OK|wx.ICON_EXCLAMATION)
retcode = md.ShowModal()
ready = False
self.date_from = self.validate_date_from(self.date_from)
if self.date_from == None:
md = wx.MessageDialog(parent=self,
message="Date must be like '2014' or '2014-03' or '2014-03-15'",
caption="Error",
style=wx.OK|wx.ICON_EXCLAMATION)
retcode = md.ShowModal()
ready = False
self.date_to = self.validate_date_to(self.date_to)
if self.date_to == None:
md = wx.MessageDialog(parent=self,
message="Date must be like '2014' or '2014-03' or '2014-03-15'",
caption="Error",
style=wx.OK|wx.ICON_EXCLAMATION)
retcode = md.ShowModal()
ready = False
if ready:
self.sort_order = "newest" if self.newest else "oldest"
self.bcc_line = "" # only from address_info page
self.replies_to = "" # only from Get Replies on message_info page
self.search_params = SearchParams(
self.global_id,
self.date_from,
self.date_to,
self.folder,
self.from_line,
self.to_line,
self.cc_line,
self.bcc_line,
self.replies_to,
self.subject,
self.attachment,
self.body,
self.body_search_type,
self.selected_status,
self.sort_order
)
self.search_message()
######################################################################
def search_message (self):
(account_id, account_name, account_name) = \
self.acp.get_account()
message_info = dba.search_message(self.cnx,
account_id, self.search_params)
if len(message_info) == 0:
md = wx.MessageDialog(parent=self,
message="No messages matching search criteria",
caption="No data",
style=wx.OK|wx.ICON_EXCLAMATION)
retcode = md.ShowModal()
else:
self.results.page_id = self.results.page_id + 1
message_list.MessageList(self.browse, self.acp, self.results_notebook,
self.cnx, message_info, self.search_params)
self.browse.switch_to_results()
|
nilq/baby-python
|
python
|
from django.urls import path
from .views import (
FlightListView,
FlightDetailView,
FlightUpdateView,
HomePageView,
search_results_view,
contact_view,
FlightCreateView,
FlightDeleteView,
AllFlightView,
EachFlightDetail,
)
urlpatterns = [
path('flights/list/', FlightListView.as_view(), name='flights_list'),
path("flight/<int:pk>/detail/", FlightDetailView.as_view(), name="flight_detail"),
path("", HomePageView.as_view(), name="home_page"),
path("search/results/", search_results_view, name="search_results"),
path("contact/", contact_view, name="contact_form"),
]
# Flight CRUD urls
urlpatterns += [
path('flight/create/', FlightCreateView.as_view(), name="flight-create"),
path('flight/<int:pk>/update/', FlightUpdateView.as_view(), name="flight-update"),
path('flight/<int:pk>/delete/', FlightDeleteView.as_view(), name="flight-delete"),
]
urlpatterns += [
path('flyways/flights/list', AllFlightView.as_view(), name="admin-flights"),
path("flyways/flights/<int:pk>/detail/", EachFlightDetail.as_view(), name="admin-flight-details"),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#encoding: utf-8
#####################################################################
########################## Global Variables #########################
#####################################################################
## Define any global variables here that do not need to be changed ##
#####################################################################
#####################################################################
import os
import re
try:
import ConfigParser
except:
import configparser as ConfigParser
# relo version
VERSION = (0, 6, 'beta')
def get_version():
return '%s.%s' % (VERSION[0], VERSION[1])
def get_long_version():
return '%s.%s %s' % (VERSION[0], VERSION[1], VERSION[2])
# relo installer root path
INSTALLER_ROOT = os.path.dirname(os.path.abspath(__file__))
###### Root #####
# relo root path
ROOT = os.environ.get("RELO_ROOT")
if not ROOT:
ROOT = os.path.join(os.environ["HOME"], ".relo")
# directories
PATH_ETC = os.path.join(ROOT, 'etc')
PATH_BIN = os.path.join(ROOT, 'bin')
PATH_LOG = os.path.join(ROOT, 'log')
PATH_SCRIPTS = os.path.join(ROOT, 'scripts')
# files
PATH_BIN_RELO = os.path.join(PATH_BIN, 'relo')
PATH_ETC_CONFIG = os.path.join(PATH_ETC, 'config.cfg')
##### Home #####
# relo home path
PATH_HOME = os.environ.get("RELO_HOME")
if not PATH_HOME:
PATH_HOME = os.path.join(os.environ["HOME"], ".relo")
# directories
PATH_HOME_ETC = os.path.join(PATH_HOME, 'etc')
# files
##### Config #####
class ReloConfig(object):
def __init__(self):
self.config = ConfigParser.SafeConfigParser()
def loadConfig(self):
self.config.read([PATH_ETC_CONFIG, os.path.join(INSTALLER_ROOT, 'etc', 'config.cfg')])
def saveConfig(self):
self.config.write(PATH_ETC_CONFIG)
def listConfig(self, category):
def listCore():
print "[Core]"
for item in self.config.items('core'):
print " - " + str(item)
def listLocal():
print "[Local]"
for item in self.config.items('local'):
print " - " + str(item)
def listNet():
print "[Net]"
for item in self.config.items('net'):
print " - " + str(item)
if category == None or category == 'core':
listCore()
if category == None or category == 'local':
listLocal()
if category == None or category == 'net':
listNet()
else:
print "category not found"
def readConfig(self, key):
section, option = key.split('.')
return self.config.get(section, option)
def writeConfig(self, key, value):
section, option = key.split('.')
self.config.set(section, option, value)
conf = ReloConfig()
conf.loadConfig()
### Relo Downloads ###
RELO_UPDATE_URL_MASTER = conf.readConfig('core.master')
RELO_UPDATE_URL_DEVELOP = conf.readConfig('core.develop')
RELO_UPDATE_URL_PYPI = conf.readConfig('core.pypi')
RELO_UPDATE_URL_CONFIG = conf.readConfig('core.config')
RELO_MASTER_VERSION_URL = conf.readConfig('core.master-version')
RELO_DEVELOP_VERSION_URL = conf.readConfig('core.develop-version')
### Relo Index -> move to config file later
##### Inverted Index Variables #####
# Words which should not be indexed
STOP_WORDS = ("the", "of", "to", "and", "a", "in", "is", "it", "you", "that")
# Do not index any words shorter than this
MIN_WORD_LENGTH = 3
# Consider these characters to be punctuation (they will be replaced with spaces prior to word extraction)
PUNCTUATION_CHARS = ".,;:!?@£$%^&*()-–<>[]{}\\|/`~'\""
# A redis key to store a list of metaphones present in this project
REDIS_KEY_METAPHONES = "id:%(project_id)s:metaphones"
# A redis key to store a list of item IDs which have the given metaphone within the given project
REDIS_KEY_METAPHONE = "id:%(project_id)s:mp:%(metaphone)s"
# A redis key to store a list of documents present in this project
REDIS_KEY_DOCUMENTS = "id:%(project_id)s:docs"
# A redis key to store meta information which are associated with the document within the given project
REDIS_KEY_DOCUMENT = "id%(project_id)s:doc:%(document)s"
# A redis key to store a list of projects stored in the database
REDIS_KEY_PROJECTS = "projects"
|
nilq/baby-python
|
python
|
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import user_passes_test
from django.urls import reverse
import csv
from .serializers import DaySerializer
from rest_framework.views import APIView
from rest_framework.response import Response
import datetime
import calendar
from django.shortcuts import get_object_or_404
from django.views import generic
from django.utils.safestring import mark_safe
from django.contrib.auth import authenticate, login
from django.shortcuts import redirect
from .models import Day, Teacher, Kindergarten, Parent, Child, TeachersDay
from .utils import Calendar
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from .utils import plan_month
class MonthView(LoginRequiredMixin, UserPassesTestMixin, generic.ListView):
model = Day
def test_func(self):
return is_admin_teacher(self.request.user)
def get(self, request, *args, **kwargs):
teacher = Teacher.objects.get(user=self.request.user)
kindergarten = teacher.kindergarten
response = HttpResponse(content_type="text/csv")
year = self.kwargs["year"]
month = self.kwargs["month"]
dates = []
for w in calendar.monthcalendar(year, month):
for d in w:
if d > 0:
dates.append(d)
response["Content-Disposition"] = "attachment; filename=\"dochazka_{}-{}.csv\"".format(
year, month)
writer = csv.writer(response)
writer.writerow(["Jméno"] + dates)
for child in kindergarten.childern:
present_list = child.present_list(year, month)
writer.writerow([child.name] + [present_list[d] for d in present_list])
return response
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
try:
teacher = Teacher.objects.get(user=self.request.user)
context['user'] = teacher
except Exception as e:
parent = Parent.objects.get(user=self.request.user)
context['user'] = parent
def get_queryset(self):
teacher = Teacher.objects.get(user=self.request.user)
kindergarten = teacher.kindergarten
year = self.kwargs["year"]
month = self.kwargs["month"]
month_range = calendar.monthrange(year, month)
return Day.objects.filter(
kindergarten=kindergarten,
date__gte=datetime.date(year=year, month=month, day=1),
date__lte=datetime.date(year=year, month=month, day=month_range[1]),
)
class ParentView(LoginRequiredMixin, generic.DetailView):
model = Parent
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['childern'] = Child.objects.filter(parent=self.object)
context["kindergarten"] = self.object.kindergarten
try:
teacher = Teacher.objects.get(user=self.request.user)
context['user'] = teacher
except Exception as e:
parent = Parent.objects.get(user=self.request.user)
context['user'] = parent
return context
def get_object(self, **kwargs):
if not "pk" in self.kwargs:
return get_object_or_404(Parent, user=self.request.user)
else:
return get_object_or_404(Parent, pk=self.kwargs["pk"])
class TeacherView(LoginRequiredMixin, generic.DetailView):
model = Teacher
loging_url = "/login/"
redirect_field_name = "redirect_to"
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['kindergarten'] = self.object.kindergarten
try:
teacher = Teacher.objects.get(user=self.request.user)
context['user'] = teacher
except Exception as e:
parent = Parent.objects.get(user=self.request.user)
context['user'] = parent
return context
def get_object(self, **kwargs):
if not "pk" in self.kwargs:
return get_object_or_404(Teacher, user=self.request.user)
else:
return get_object_or_404(Teacher, pk=self.kwargs["pk"])
def kgview(request, uri_name):
print(uri_name)
class KindergartenView(generic.DetailView):
model = Kindergarten
slug_field = "uri_name"
def get_object(self):
object = get_object_or_404(Kindergarten,uri_name=self.kwargs['uri_name'])
return object
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# Add in a QuerySet of all the books
if self.request.user and not self.request.user.is_anonymous:
teachers = Teacher.objects.filter(user=self.request.user)
parents = Parent.objects.filter(user=self.request.user)
if teachers.count():
teacher = teachers[0]
context["teacher"] = teachers
context['childern'] = Child.objects.filter(parent__kindergarten=teacher.kindergarten)
context['teachers'] = Teacher.objects.filter(kindergarten=teacher.kindergarten)
elif parents.count():
parent = parents[0]
context["parent"] = parent
context['teachers'] = Teacher.objects.filter(kindergarten=parent.kindergarten)
else:
pass
if not self.request.user.is_anonymous:
teachers = Teacher.objects.filter(user=self.request.user)
parents = Parent.objects.filter(user=self.request.user)
if teachers.count():
context['user'] = teachers[0]
elif parents.count():
context['user'] = parent
else:
context["user"] = None
return context
def _get_day_index(day_name):
days = ["monday", "tuesday", "wednesday", "thursday", "friday",
"saturday", "sunday"]
return days.index(day_name.lower())
class DayOfWeekView(LoginRequiredMixin, APIView):
"""
List all snippets, or create a new snippet.
"""
def get(self, request, year, month, day):
day = self.get_object(year, month, day)
serializer = DaySerializer(day, many=False)
return Response(serializer.data)
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
try:
teacher = Teacher.objects.get(user=self.request.user)
context['user'] = teacher
except Exception as e:
parent = Parent.objects.get(user=self.request.user)
context['user'] = parent
def get_object(self, year, month, day_name):
#day_name = self.kwargs["day"].lower()
#year = self.kwargs["year"]
#month = self.kwargs["month"]
today = datetime.date.today()
cal = calendar.monthcalendar(year, month)
for week in cal:
date_number = week[_get_day_index(day_name)]
if date_number > 0 and date_number >= today.day:
return Day.objects.get(date=datetime.date(year=year, month=month, day=date_number))
class DayView(LoginRequiredMixin, generic.DetailView):
model = Day
def get_object(self, **kwargs):
user = self.request.user
try:
teacher = Teacher.objects.get(user=user)
self.kg = teacher.kindergarten
except ObjectDoesNotExist as exp:
parent = Parent.objects.get(user=user)
self.kg = parent.kindergarten
return get_object_or_404(Day, kindergarten=self.kg,
date=datetime.date(self.kwargs["year"], self.kwargs["month"], self.kwargs["day"]))
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
parents = Parent.objects.filter(user=self.request.user, kindergarten=self.kg)
if len(parents):
context["parent"] = self.get_parent_context(parents[0])
teachers = Teacher.objects.filter(user=self.request.user)
if len(teachers):
context["teacher_view"] = self.get_teacher_context(teachers[0])
context["past"] = False
now = datetime.datetime.now()
latest = datetime.datetime(now.year, now.month, now.day, 20, 00)
day = datetime.datetime(self.object.date.year, self.object.date.month, self.object.date.day)
if latest > day:
context["past"] = True
# Add in a QuerySet of all the books
#context['childern'] = Child.objects.filter()
try:
teacher = Teacher.objects.get(user=self.request.user)
context['user'] = teacher
except Exception as e:
parent = Parent.objects.get(user=self.request.user)
context['user'] = parent
return context
def get_parent_context(self, parent):
context = {}
day = self.object
childern_planned = Child.objects.filter(parent=parent, days__in=[day])
childern_present = Child.objects.filter(parent=parent, present__in=[day])
childern_all = Child.objects.filter(parent=parent)
childern_absent = Child.objects.filter(parent=parent, absent_all__in=[day])
teachers = Teacher.objects.filter(days_planned=day)
context["parent"] = parent
context["teachers_for_the_day"] = teachers
context["childern_planned"] = [ch.pk for ch in childern_planned]
context["childern_present"] = [ch.pk for ch in childern_present]
context["childern_absent"] = [ch.pk for ch in childern_absent]
context["childern_all"] = childern_all
return context
def get_teacher_context(self, teacher):
context = {}
day = self.object
childern_planned = Child.objects.filter(parent__kindergarten=teacher.kindergarten, days__in=[day])
childern_present = Child.objects.filter(parent__kindergarten=teacher.kindergarten, present__in=[day])
childern_absent = Child.objects.filter(parent__kindergarten=teacher.kindergarten, absent_all__in=[day])
childern_all = Child.objects.filter(parent__kindergarten=teacher.kindergarten)
teachers = Teacher.objects.filter(days_planned=day)
for t in teachers:
days = TeachersDay.objects.filter(date=day.date, teacher=teacher)
if len(days) > 0:
t.today = days[0]
context["teacher"] = teacher
context["teachers_for_the_day"] = teachers
context["childern_planned"] = [ch.pk for ch in childern_planned]
context["childern_present"] = [ch.pk for ch in childern_present]
context["childern_absent"] = [ch.pk for ch in childern_absent]
context["childern_all"] = childern_all
context["meals"] = day.meals
return context
class ChildView(generic.DetailView):
model = Child
slug_field = "uuid"
slug_url_kwarg = 'uuid'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
context["parent"] = self.object.parent
# Add in a QuerySet of all the books
#context['childern'] = Child.objects.filter()
try:
teacher = Teacher.objects.get(user=self.request.user)
context['user'] = teacher
except Exception as e:
parent = Parent.objects.get(user=self.request.user)
context['user'] = parent
return context
class KindergartensView(generic.ListView):
model = Kindergarten
template_name = 'kindergarden/kindergartens.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
if not self.request.user.is_anonymous:
teachers = Teacher.objects.filter(user=self.request.user)
parents = Parent.objects.filter(user=self.request.user)
if teachers.count():
context['user'] = teachers[0]
elif parents.count():
context['user'] = parents[0]
else:
context["user"] = None
return context
# ==================================================================
@login_required
def get_parent(request):
user = request.user
return get_object_or_404(Parent, user=request.user)
@login_required
def get_teacher(request):
user = request.user
return get_object_or_404(Teacher, user=request.user)
@method_decorator(login_required, name='dispatch')
class CalendarView(generic.ListView):
model = Day
template_name = 'kindergarden/calendar.html'
def get(self, request, *args, **kwargs):
if "/calendar/" == request.path:
today = datetime.date.today()
year = today.year
month = today.month
return HttpResponseRedirect(reverse('month', args=(year,month)))
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.teacher = get_teacher(self.request)
if self.teacher.is_admin:
plan_month(self.teacher.kindergarten, self.kwargs["year"],
self.kwargs["month"])
url = reverse("month", args=[self.kwargs["year"], self.kwargs["month"]])
return HttpResponseRedirect(url)
else:
self.get()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
today = datetime.date.today()
if "year" in self.kwargs:
year = self.kwargs["year"]
month = self.kwargs["month"]
else:
year = today.year
month = today.month
user = self.request.user
ch_reserved = []
ch_present = []
month_filter = {
"date__year": year,
"date__month": month
}
context["year"] = year
context["month"] = month
teacher = None
parent = None
try:
teacher = Teacher.objects.get(user=user)
kg = teacher.kindergarten
context["teacher"] = teacher
context["kindergarten"] = teacher.kindergarten
context["user"] = teacher
except ObjectDoesNotExist as exp:
parent = Parent.objects.get(user=user)
kg = parent.kindergarten
context["parent"] = parent
context["user"] = parent
ch_reserved = {ch: [d for d in ch.days.filter(**month_filter)] for ch in parent.child_set.all()}
ch_present = {ch: [d for d in ch.present.filter(**month_filter)] for ch in parent.child_set.all()}
context["kindergarten"] = parent.kindergarten
days = Day.objects.filter(kindergarten=kg, **month_filter)
# use today's date for the calendar
d = get_date(self.request.GET.get('day', None))
# Instantiate our calendar class with today's year and date
cal = Calendar(datetime.date(year=year,
month=month, day=1))
# Call the formatmonth method, which returns our calendar as a table
html_cal = cal.formatmonth(
teacher=teacher,
withyear=True,
days=days,
childern_present=ch_present,
childern_reserved=ch_reserved
)
context['calendar'] = mark_safe(html_cal)
time_delta_forward = datetime.timedelta(days=calendar.monthrange(year, month)[1])
if month == 1:
prev_month = 12
prev_year = year - 1
else:
prev_month = month - 1
prev_year = year
time_delta_backward = datetime.timedelta(days=calendar.monthrange(prev_year, prev_month)[1])
next_month_day = datetime.date(year=year, month=month, day=1) + time_delta_forward
previous_month_day = datetime.date(year=year, month=month, day=1) - time_delta_backward
context['previous_month'] = previous_month_day.month
context['previous_year'] = previous_month_day.year
context['next_month'] = next_month_day.month
context['next_year'] = next_month_day.year
context['this_month'] = today.month
context['this_year'] = today.year
context["kindergarden"] = kg
return context
def is_admin_teacher(user):
try:
Teacher.objects.get(user=user)
return Teacher.is_admin
except ObjectDoesNotExist as e:
return False
#@user_passes_test(can_save_day)
@login_required(login_url="login")
def save_day(request, year, month, day):
day = Day.objects.get(date=datetime.date(year, month, day))
form = request.POST
teachers = Teacher.objects.filter(user=request.user)
parents = Parent.objects.filter(user=request.user)
if teachers.count():
kindergarten = teachers[0].kindergarten
elif parents.count():
kindergarten = parents[0].kindergarten
teachers_for_the_day = Teacher.objects.filter(kindergarten=kindergarten, days_planned=day)
for child in kindergarten.childern:
if teachers.count() and teachers[0].is_admin or \
parents.count() and child.parent == parents[0]:
if "child-{}-present".format(child.pk) in form:
if not day in child.present.all():
child.present.add(day)
else:
if day in child.present.all():
child.present.remove(day)
child.absent_all.add(day)
if "child-{}-planned".format(child.pk) in form:
if not day in child.days.all():
if day.capacity > day.child_day_planned.count():
child.days.add(day)
else:
from .utils import CapacityFilled
raise CapacityFilled(day, child)
c_key = "child-{}-compensation".format(child.pk)
if c_key in form and form[c_key] != "":
c_year, c_month, c_day = map(lambda x: int(x), form[c_key].split("-"))
compensate_date = datetime.date(c_year, c_month, c_day)
child.absent_all.remove(Day.objects.get(date=compensate_date, kindergarten=kindergarten))
else:
if day in child.days.all():
child.days.remove(day)
child.absent_all.add(day)
if not len(parents):
for teacher in teachers_for_the_day:
teachers_day = TeachersDay.objects.filter(date=day.date, teacher=teacher)
t_key = "teacher-{}-present".format(teacher.pk)
if form[t_key]:
units = list((int(v) for v in form[t_key].split(":")))
if len(units) > 2:
hours, minutes, seconds = units
elif len(units) == 2:
hours, minutes = units
if len(teachers_day) == 0:
teachers_day = TeachersDay.objects.create(date=day.date,
teacher=teacher, duration=datetime.timedelta(hours=hours,
minutes=minutes))
else:
teachers_day = teachers_day[0]
teachers_day.duration = datetime.timedelta(hours=hours,
minutes=minutes)
teachers_day.save()
if "meals" in form:
day.meals = int(form["meals"])
day.save()
url = reverse("day", args=[day.date.year, day.date.month, day.date.day])
return HttpResponseRedirect(url)
def get_date(req_day):
if req_day:
year, month = (int(x) for x in req_day.split('-'))
return datetime.date(year, month, day=1)
return datetime.date.today()
def prev_month(d):
first = d.replace(day=1)
prev_month = first - datetime.timedelta(days=1)
month = 'month=' + str(prev_month.year) + '-' + str(prev_month.month)
return month
def next_month(d):
days_in_month = calendar.monthrange(d.year, d.month)[1]
last = d.replace(day=days_in_month)
next_month = last + datetime.timedelta(days=1)
month = 'month=' + str(next_month.year) + '-' + str(next_month.month)
return month
|
nilq/baby-python
|
python
|
import logging
from abc import abstractmethod
from datetime import datetime
import json
from dacite import from_dict
from os.path import join
from airflow.models.dag import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.cloud.transfers.gcs_to_bigquery import GCSToBigQueryOperator
from airflow.utils.task_group import TaskGroup
from airflow.operators.bash import BashOperator
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.sensors.gcs import GCSObjectExistenceSensor
from airflow.operators.dummy import DummyOperator
from gcp_airflow_foundations.base_class import file_source_config
from gcp_airflow_foundations.source_class.source import DagBuilder
from gcp_airflow_foundations.base_class.file_source_config import FileSourceConfig
from gcp_airflow_foundations.base_class.file_table_config import FileTableConfig
class GenericFileIngestionDagBuilder(DagBuilder):
"""
Builds DAGs to load files from a generic file system to BigQuery.
"""
source_type = "FTP"
def set_schema_method_type(self):
self.schema_source_type = self.config.source.schema_options.schema_source_type
def get_bq_ingestion_task(self, dag, table_config):
taskgroup = TaskGroup(group_id="ftp_taskgroup")
file_source_config = from_dict(data_class=FileSourceConfig, data=self.config.source.extra_options["file_source_config"])
tasks = []
skip_gcs_upload = False
if "skip_gcs_upload" in self.config.source.extra_options["file_source_config"]:
skip_gcs_upload = True
if not skip_gcs_upload:
tasks.append(self.metadata_file_sensor(table_config, taskgroup))
tasks.append(self.flag_file_sensor(table_config, taskgroup))
tasks.append(self.schema_file_sensor(table_config, taskgroup))
tasks.append(self.get_file_list_task(table_config, taskgroup))
tasks.append(self.file_sensor(table_config, taskgroup))
tasks.append(self.file_ingestion_task(table_config, taskgroup))
tasks.append(self.load_to_landing_task(table_config, taskgroup))
if file_source_config.delete_gcs_files:
tasks.append(self.delete_gcs_files(table_config, taskgroup))
for task in tasks:
if task is None:
tasks.remove(task)
not_none_tasks = list(filter(None.__ne__, tasks))
for i in range(len(not_none_tasks) - 1):
not_none_tasks[i] >> not_none_tasks[i + 1]
return taskgroup
def metadata_file_sensor(self, table_config, taskgroup):
"""
Implements a sensor for either the metadata file specified in the table config, which specifies
the parameterized file names to ingest.
"""
file_source_config = from_dict(data_class=FileSourceConfig, data=self.config.source.extra_options["file_source_config"])
if "metadata_file" in table_config.extra_options.get("file_table_config"):
metadata_file_name = table_config.extra_options.get("file_table_config")["metadata_file"]
bucket = self.config.source.extra_options["gcs_bucket"]
timeout = file_source_config.sensor_timeout
return GCSObjectExistenceSensor(
task_id="wait_for_metadata_file",
bucket=bucket,
object=metadata_file_name,
task_group=taskgroup,
timeout=timeout
)
else:
return None
@abstractmethod
def flag_file_sensor(self, table_config):
"""
Implements an Airflow sensor to wait for optional flag files for ingestion.
e.g. for .PARQUET file ingestion, waiting for a _SUCCESS file is part of a common flow.
"""
pass
def schema_file_sensor(self, table_config, taskgroup):
"""
Implements an Airflow sensor to wait for an (optional) schema file in GCS
"""
file_source_config = from_dict(data_class=FileSourceConfig, data=self.config.source.extra_options["file_source_config"])
bucket = self.config.source.extra_options["gcs_bucket"]
schema_file_name = None
timeout = file_source_config.sensor_timeout
if "schema_file" in table_config.extra_options.get("file_table_config"):
schema_file_name = table_config.extra_options.get("file_table_config")["schema_file"]
return GCSObjectExistenceSensor(
task_id="wait_for_schema_file",
bucket=bucket,
object=schema_file_name,
task_group=taskgroup,
timeout=timeout
)
else:
return None
@abstractmethod
def file_ingestion_task(self, table_config):
"""
Implements an Airflow task to ingest the files from the FTP source into GCS (e.g. from an SFTP server or an AWS bucket)
"""
pass
@abstractmethod
def file_sensor(self, table_config):
"""
Returns an Airflow sensor that waits for the list of files specified the metadata file provided
Should be Xcom pulled from get_file_list_task()
"""
pass
@abstractmethod
def delete_gcs_files(table_config, taskgroup):
pass
def get_file_list_task(self, table_config, taskgroup):
return PythonOperator(
task_id="get_file_list",
op_kwargs={"table_config": table_config},
python_callable=self.get_list_of_files,
task_group=taskgroup
)
def get_list_of_files(self, table_config, **kwargs):
# gcs_hook = GCSHook()
file_source_config = from_dict(data_class=FileSourceConfig, data=self.config.source.extra_options["file_source_config"])
airflow_date_template = file_source_config.airflow_date_template
if airflow_date_template == "ds":
ds = kwargs["ds"]
else:
ds = kwargs["prev_ds"]
ds = datetime.strptime(ds, "%Y-%m-%d").strftime(file_source_config.date_format)
logging.info(ds)
# XCom push the list of files
# overwrite if in table_config
dir_prefix = table_config.extra_options.get("file_table_config")["directory_prefix"]
dir_prefix = dir_prefix.replace("{{ ds }}", ds)
gcs_bucket_prefix = file_source_config.gcs_bucket_prefix
if file_source_config.source_format == "PARQUET":
file_list = [dir_prefix]
kwargs['ti'].xcom_push(key='file_list', value=file_list)
return
else:
# bucket = self.config.source.extra_options["gcs_bucket"]
if "metadata_file" in table_config.extra_options.get("file_table_config"):
# metadata_file_name = table_config.extra_options.get("file_table_config")["metadata_file"]
# metadata_file = gcs_hook.download(bucket_name=bucket, object_name=metadata_file_name, filename="metadata.csv")
file_list = []
with open('metadata.csv', newline='') as f:
for line in f:
file_list.append(line.strip())
else:
templated_file_name = file_source_config.file_name_template
templated_file_name = templated_file_name.replace("{{ TABLE_NAME }}", table_config.table_name)
file_list = [templated_file_name]
# support replacing files with current dates
file_list[:] = [file.replace("{{ ds }}", ds) if "{{ ds }}" in file else file for file in file_list]
# add dir prefix to files
file_list[:] = [join(gcs_bucket_prefix, file) for file in file_list]
logging.info(file_list)
kwargs['ti'].xcom_push(key='file_list', value=file_list)
def load_to_landing_task(self, table_config, taskgroup):
return PythonOperator(
task_id="load_gcs_to_landing_zone",
op_kwargs={"table_config": table_config},
python_callable=self.load_to_landing,
task_group=taskgroup
)
# flake8: noqa: C901
def load_to_landing(self, table_config, **kwargs):
gcs_hook = GCSHook()
file_source_config = from_dict(data_class=FileSourceConfig, data=self.config.source.extra_options["file_source_config"])
# Parameters
ds = kwargs['ds']
ti = kwargs['ti']
data_source = self.config.source
bucket = data_source.extra_options["gcs_bucket"]
source_format = file_source_config.source_format
field_delimeter = file_source_config.delimeter
gcp_project = data_source.gcp_project
landing_dataset = data_source.landing_zone_options.landing_zone_dataset
landing_table_name = table_config.landing_zone_table_name_override
table_name = table_config.table_name
destination_table = f"{gcp_project}:{landing_dataset}.{table_config.landing_zone_table_name_override}" + f"_{ds}"
if "skip_gcs_upload" not in data_source.extra_options["file_source_config"]:
files_to_load = ti.xcom_pull(key='file_list', task_ids='ftp_taskgroup.get_file_list')
else:
dir_prefix = table_config.extra_options.get("file_table_config")["directory_prefix"]
dir_prefix = dir_prefix.replace("{{ ds }}", ds)
files_to_load = [dir_prefix]
gcs_bucket_prefix = file_source_config.gcs_bucket_prefix
if gcs_bucket_prefix is None:
gcs_bucket_prefix = ""
if not gcs_bucket_prefix == "":
gcs_bucket_prefix += "/"
destination_path_prefix = gcs_bucket_prefix + table_name + "/" + ds
if "gcs_bucket_path_format_mode" in self.config.source.extra_options["file_source_config"]:
date = datetime.strptime(ds, '%Y-%m-%d').strftime('%Y/%m/%d')
destination_path_prefix = gcs_bucket_prefix + table_name + "/" + date
logging.info(destination_path_prefix)
files_to_load = [destination_path_prefix + "/" + f for f in files_to_load]
logging.info(files_to_load)
if "parquet_upload_option" in table_config.extra_options.get("file_table_config"):
parquet_upload_option = table_config.extra_options.get("file_table_config")["parquet_upload_option"]
else:
parquet_upload_option = "BASH"
source_format = file_source_config.source_format
if source_format == "PARQUET" and parquet_upload_option == "BASH":
date_column = table_config.extra_options.get("sftp_table_config")["date_column"]
gcs_bucket_prefix = file_source_config.gcs_bucket_prefix
# bq load command if parquet
partition_prefix = ti.xcom_pull(key='partition_prefix', task_ids='ftp_taskgroup.load_sftp_to_gcs')
if not partition_prefix:
partition_prefix = self.config.source.extra_options["sftp_source_config"]["partition_prefix"]
partition_prefix = partition_prefix.replace("date", table_config.extra_options.get("sftp_table_config")["date_column"])
partition_prefix = partition_prefix.replace("ds", kwargs['prev_ds'])
if "prefix" in table_config.extra_options.get("file_table_config"):
partition_prefix = partition_prefix + "/" + table_config.extra_options.get("file_table_config")["prefix"]
command = self.get_load_script(gcp_project, landing_dataset, landing_table_name + f"_{ds}", bucket, gcs_bucket_prefix, partition_prefix, table_name, date_column, ds)
logging.info(command)
try:
bash = BashOperator(
task_id="import_files_to_bq_landing",
bash_command=command
)
bash.execute(context=kwargs)
except Exception:
logging.info("Load into BQ landing zone failed.")
else:
# gcs->bq operator else
if file_source_config.file_prefix_filtering:
logging.info(files_to_load)
for i in range(len(files_to_load)):
matching_gcs_files = gcs_hook.list(bucket_name=bucket, prefix=files_to_load[i])
logging.info(matching_gcs_files)
if len(matching_gcs_files) > 1:
raise AirflowException(f"There is more than one matching file with the prefix {files_to_load[i]} in the bucket {bucket}")
files_to_load[i] = matching_gcs_files[0]
schema_file_name = None
if "schema_file" in table_config.extra_options.get("file_table_config"):
schema_file_name = table_config.extra_options.get("file_table_config")["schema_file"]
allow_quoted_newlines = False
if "allow_quoted_newlines" in table_config.extra_options.get("file_table_config"):
allow_quoted_newlines = table_config.extra_options.get("file_table_config")["allow_quoted_newlines"]
if parquet_upload_option == "GCS" and source_format == "PARQUET":
prefix = ""
if "prefix" in table_config.extra_options.get("file_table_config"):
prefix = table_config.extra_options.get("file_table_config")["prefix"]
prefix = destination_path_prefix + "/" + prefix
logging.info(destination_path_prefix)
# logging.info(destination_path_prefix + "/" + partition_prefix)
files_to_load = gcs_hook.list(bucket_name=bucket, prefix=prefix)
logging.info(files_to_load)
# Get files to load from metadata file
if schema_file_name:
schema_file = gcs_hook.download(bucket_name=bucket, object_name=schema_file_name)
# Only supports json schema file format - add additional support if required
schema_fields = json.loads(schema_file)
gcs_to_bq = GCSToBigQueryOperator(
task_id='import_files_to_bq_landing',
bucket=bucket,
source_objects=files_to_load,
source_format=source_format,
schema_fields=schema_fields,
field_delimiter=field_delimeter,
destination_project_dataset_table=destination_table,
allow_quoted_newlines=allow_quoted_newlines,
write_disposition='WRITE_TRUNCATE',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=1,
)
else:
gcs_to_bq = GCSToBigQueryOperator(
task_id='import_files_to_bq_landing',
bucket=bucket,
source_objects=files_to_load,
source_format=source_format,
field_delimiter=field_delimeter,
destination_project_dataset_table=destination_table,
allow_quoted_newlines=allow_quoted_newlines,
write_disposition='WRITE_TRUNCATE',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=1,
)
gcs_to_bq.execute(context=kwargs)
kwargs['ti'].xcom_push(key='loaded_files', value=files_to_load)
def get_load_script(self, gcp_project, landing_dataset, landing_table_name, bucket, gcs_bucket_prefix, partition_prefix, table_name, date_column, ds):
if not partition_prefix == "":
partition_prefix += "/"
full_table_name = f"{landing_dataset}.{landing_table_name}"
source_uri_prefix = f"gs://{bucket}/{gcs_bucket_prefix}{table_name}/{ds}"
uri_wildcards = f"gs://{bucket}/{gcs_bucket_prefix}{table_name}/{ds}/{partition_prefix}*"
command = f"bq load --source_format=PARQUET --autodetect --hive_partitioning_mode=STRINGS --hive_partitioning_source_uri_prefix={source_uri_prefix} {full_table_name} {uri_wildcards}"
logging.info(command)
return command
def validate_extra_options(self):
# try and parse as FTPSourceConfig
# file_source_config = from_dict(data_class=FileSourceConfig, data=self.config.source.extra_options["file_source_config"])
tables = self.config.tables
for table_config in tables:
# try and parse as FTPTableConfig
# file_table_config = from_dict(data_class=FileTableConfig, data=table_config.extra_options.get("file_table_config"))
pass
|
nilq/baby-python
|
python
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------
from ._log_query_client_async import LogsQueryClient
from ._metrics_query_client_async import MetricsQueryClient
__all__ = [
"LogsQueryClient",
"MetricsQueryClient"
]
|
nilq/baby-python
|
python
|
def shift(string):
for c in string:
print(chr(ord(c) + 2))
shift(input("Inserisci la stringa: "))
|
nilq/baby-python
|
python
|
# Sphinx extension to insert the last updated date, based on the git revision
# history, into Sphinx documentation. For example, do:
#
# .. |last_updated| last_updated::
#
# *This document last updated:* |last_updated|.
import subprocess
from email.utils import parsedate_tz
from docutils import nodes
from sphinx.util.compat import Directive
import datetime
def setup(app):
app.add_config_value('lastupdated_enabled', True, True)
app.add_directive('last_updated', LastUpdatedDirective)
class LastUpdatedDirective(Directive):
has_content = False
def run(self):
env = self.state.document.settings.env
src, line = self.state_machine.get_source_and_line()
date = subprocess.check_output(["git", "log", "-1", "--format=%cd", src])
#If source file is new (i.e. not in repo), git returns an empty string:
if date != '':
date = "%d-%d-%d" % parsedate_tz(date)[:3]
else:
date = datetime.date.today()
date = "%d-%d-%d" % (date.year, date.month, date.day)
node = nodes.Text(date)
return [node]
|
nilq/baby-python
|
python
|
##############################################################################
# Copyright 2009, Gerhard Weis
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the authors nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT
##############################################################################
'''
Import all essential functions and constants to re-export them here for easy
access.
This module contains also various pre-defined ISO 8601 format strings.
'''
from __future__ import absolute_import
from .isodates import parse_date, date_isoformat
from .isotime import parse_time, time_isoformat
from .isodatetime import parse_datetime, datetime_isoformat
from .isoduration import parse_duration, duration_isoformat
from .isoerror import ISO8601Error
from .isotzinfo import parse_tzinfo, tz_isoformat
from .tzinfo import UTC, FixedOffset, LOCAL
from .duration import Duration
from .isostrf import strftime
from .isostrf import DATE_BAS_COMPLETE, DATE_BAS_ORD_COMPLETE
from .isostrf import DATE_BAS_WEEK, DATE_BAS_WEEK_COMPLETE
from .isostrf import DATE_CENTURY, DATE_EXT_COMPLETE
from .isostrf import DATE_EXT_ORD_COMPLETE, DATE_EXT_WEEK
from .isostrf import DATE_EXT_WEEK_COMPLETE, DATE_YEAR
from .isostrf import DATE_BAS_MONTH, DATE_EXT_MONTH
from .isostrf import TIME_BAS_COMPLETE, TIME_BAS_MINUTE
from .isostrf import TIME_EXT_COMPLETE, TIME_EXT_MINUTE
from .isostrf import TIME_HOUR
from .isostrf import TZ_BAS, TZ_EXT, TZ_HOUR
from .isostrf import DT_BAS_COMPLETE, DT_EXT_COMPLETE
from .isostrf import DT_BAS_ORD_COMPLETE, DT_EXT_ORD_COMPLETE
from .isostrf import DT_BAS_WEEK_COMPLETE, DT_EXT_WEEK_COMPLETE
from .isostrf import D_DEFAULT, D_WEEK, D_ALT_EXT, D_ALT_BAS
from .isostrf import D_ALT_BAS_ORD, D_ALT_EXT_ORD
__all__ = [
'parse_date', 'date_isoformat', 'parse_time', 'time_isoformat', 'parse_datetime', 'datetime_isoformat',
'parse_duration', 'duration_isoformat', 'ISO8601Error', 'parse_tzinfo', 'tz_isoformat', 'UTC', 'FixedOffset',
'LOCAL', 'Duration', 'strftime', 'DATE_BAS_COMPLETE', 'DATE_BAS_ORD_COMPLETE', 'DATE_BAS_WEEK',
'DATE_BAS_WEEK_COMPLETE', 'DATE_CENTURY', 'DATE_EXT_COMPLETE', 'DATE_EXT_ORD_COMPLETE', 'DATE_EXT_WEEK',
'DATE_EXT_WEEK_COMPLETE', 'DATE_YEAR', 'DATE_BAS_MONTH', 'DATE_EXT_MONTH', 'TIME_BAS_COMPLETE', 'TIME_BAS_MINUTE',
'TIME_EXT_COMPLETE', 'TIME_EXT_MINUTE', 'TIME_HOUR', 'TZ_BAS', 'TZ_EXT', 'TZ_HOUR', 'DT_BAS_COMPLETE',
'DT_EXT_COMPLETE', 'DT_BAS_ORD_COMPLETE', 'DT_EXT_ORD_COMPLETE', 'DT_BAS_WEEK_COMPLETE', 'DT_EXT_WEEK_COMPLETE',
'D_DEFAULT', 'D_WEEK', 'D_ALT_EXT', 'D_ALT_BAS', 'D_ALT_BAS_ORD', 'D_ALT_EXT_ORD'
]
|
nilq/baby-python
|
python
|
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import json
from auth import TwitterAuth
#Very simple (non-production) Twitter stream example
#1. Download / install python and tweepy (pip install tweepy)
#2. Fill in information in auth.py
#3. Run as: python streaming_simple.py
#4. It will keep running until the user presses ctrl+c to exit
#All output stored to output.json (one tweet per line)track
#Text of tweets also printed as recieved (see note about not doing this in production (final) code
class StdOutListener(StreamListener):
#This function gets called every time a new tweet is received on the stream
def on_data(self, data):
#Just write data to one line in the file
fhOut.write(data)
#Convert the data to a json object (shouldn't do this in production; might slow down and miss tweets)
j=json.loads(data)
#See Twitter reference for what fields are included -- https://dev.twitter.com/docs/platform-objects/tweets
#text=j["text"] #The text of the tweet
#print(text)
def on_error(self, status):
print("ERROR")
print(status)
if __name__ == '__main__':
try:
#Create a file to store output. "a" means append (add on to previous file)
fhOut = open("output.json","a")
#Create the listener
l = StdOutListener()
auth = OAuthHandler(TwitterAuth.consumer_key, TwitterAuth.consumer_secret)
auth.set_access_token(TwitterAuth.access_token, TwitterAuth.access_token_secret)
#Connect to the Twitter stream
stream = Stream(auth, l)
#Terms to track
stream.filter(track=["#coronavirus","#corona","#cdc"])
#Alternatively, location box for geotagged tweets
#stream.filter(locations=[-0.530, 51.322, 0.231, 51.707])
except KeyboardInterrupt:
#User pressed ctrl+c -- get ready to exit the program
pass
#Close the
fhOut.close()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import os
import sys
import copy
import random
import numpy as np
import torch
from torchvision import transforms
from .datasets import register_dataset
import utils
@register_dataset('VisDA2017')
class VisDADataset:
"""
VisDA Dataset class
"""
def __init__(self, name, img_dir, LDS_type, is_target):
self.name = name
self.img_dir = img_dir
self.LDS_type = LDS_type
self.is_target = is_target
def get_data(self):
normalize_transform = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
self.train_transforms = transforms.Compose([
transforms.Resize((256, 256)),
transforms.RandomCrop((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize_transform
])
self.test_transforms = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
normalize_transform
])
train_path = os.path.join('data/VisDA2017/', '{}.txt'.format(self.name.split('_')[1]))
test_path = os.path.join('data/VisDA2017/', '{}.txt'.format(self.name.split('_')[1]))
train_dataset = utils.ImageList(open(train_path).readlines(), self.img_dir)
val_dataset = utils.ImageList(open(test_path).readlines(), self.img_dir)
test_dataset = utils.ImageList(open(test_path).readlines(), self.img_dir)
self.num_classes = 12
train_dataset.targets, val_dataset.targets, test_dataset.targets = torch.from_numpy(train_dataset.labels), \
torch.from_numpy(val_dataset.labels), \
torch.from_numpy(test_dataset.labels)
return self.num_classes, train_dataset, val_dataset, test_dataset, self.train_transforms, self.test_transforms
|
nilq/baby-python
|
python
|
# Sample PySys testcase
# Copyright (c) 2015-2016 Software AG, Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA, and/or its subsidiaries and/or its affiliates and/or their licensors.
# Use, reproduction, transfer, publication or disclosure is prohibited except as specifically provided for in your License Agreement with Software AG
from pysys.constants import *
from pysys.basetest import BaseTest
from apama.correlator import CorrelatorHelper
class PySysTest(BaseTest):
def execute(self):
# create the correlator helper, start the correlator and attach an
# engine_receive process listening to a test channel. The helper will
# automatically get an available port that will be used for all
# operations against it
correlator = CorrelatorHelper(self, name='testcorrelator')
correlator.start(logfile='testcorrelator.log', config=PROJECT.TEST_SUBJECT_DIR+'/initialization.yaml')
receiveProcess = correlator.receive(filename='receive.evt', channels=['output'], logChannels=True)
correlator.applicationEventLogging(enable=True)
# send in the events contained in the test.evt file (directory defaults
# to the testcase input)
correlator.send(filenames=['test.evt'])
# wait for all events to be processed
correlator.flush()
# wait until the receiver writes the expected events to disk
self.waitForSignal('receive.evt', expr="Msg", condition="==1")
def validate(self):
# look for log statements in the correlator log file
self.assertGrep('testcorrelator.log', expr=' (ERROR|FATAL) ', contains=False)
# check the received events against the reference
self.assertDiff('receive.evt', 'ref_receive.evt')
|
nilq/baby-python
|
python
|
from selenium import webdriver
from selenium.webdriver import ActionChains
driver = webdriver.Chrome() # give executabe_path = "driver_.exe" path
driver.get("https://swisnl.github.io/jQuery-contextMenu/demo.html")
driver.maximize_window() # maximze the window
button = driver.find_element_by_xpath("/html/body/div/section/div/div/div/p/span")
actions = ActionChains(driver)
actions.context_click(button).perform() #Double click on the button
|
nilq/baby-python
|
python
|
import pytest
import tfchain
from stubs.ExplorerClientStub import TFChainExplorerGetClientStub
def test():
# create a tfchain client for testnet
c = tfchain.TFChainClient.TFChainClient(network_type="testnet")
# (we replace internal client logic with custom logic as to ensure we can test without requiring an active network)
explorer_client = TFChainExplorerGetClientStub()
# add the blockchain info
explorer_client.chain_info = '{"blockid":"5c86c987668ca47948a149413f4f004651249073eff4f144fd26b50e218705a8","difficulty":"30203","estimatedactivebs":"2365","height":16639,"maturitytimestamp":1549646167,"target":[0,2,43,120,39,20,204,42,102,32,125,110,53,77,39,71,99,124,13,223,197,154,115,42,126,62,185,120,208,177,21,190],"totalcoins":"0","arbitrarydatatotalsize":4328,"minerpayoutcount":16721,"transactioncount":17262,"coininputcount":633,"coinoutputcount":1225,"blockstakeinputcount":16639,"blockstakeoutputcount":16640,"minerfeecount":622,"arbitrarydatacount":572}'
explorer_client.hash_add('5c86c987668ca47948a149413f4f004651249073eff4f144fd26b50e218705a8', '{"hashtype":"blockid","block":{"minerpayoutids":["84b378d60cbdd78430b39c8eddf226119b6f28256388557dd15f0b046bf3c3ed"],"transactions":[{"id":"9aec9f849e35f0bdd14c5ea9daed20c8fbfa09f5a6771bb46ce787eb7e2b00a0","height":16639,"parent":"5c86c987668ca47948a149413f4f004651249073eff4f144fd26b50e218705a8","rawtransaction":{"version":1,"data":{"coininputs":null,"blockstakeinputs":[{"parentid":"144b2b7711fda335cdae5865ab3729d641266087bc4e088d9fba806345045903","fulfillment":{"type":1,"data":{"publickey":"ed25519:d285f92d6d449d9abb27f4c6cf82713cec0696d62b8c123f1627e054dc6d7780","signature":"f09af1c62026aed18d1d8f80e5a7bd4947a6cb5b6b69097c5b10cb983f0d729662c511a4852fa63690884e2b5c600e3935e08b81aaa757d9f0eb740292ec8309"}}}],"blockstakeoutputs":[{"value":"3000","condition":{"type":1,"data":{"unlockhash":"015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"}}}],"minerfees":null}},"coininputoutputs":null,"coinoutputids":null,"coinoutputunlockhashes":null,"blockstakeinputoutputs":[{"value":"3000","condition":{"type":1,"data":{"unlockhash":"015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"}},"unlockhash":"015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"}],"blockstakeoutputids":["83aa29b3e77f703526e28fbc0d2bfcf2b66c06b665e11cb5535b9575fd0e8105"],"blockstakeunlockhashes":["015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"],"unconfirmed":false}],"rawblock":{"parentid":"8485f94209bf3e01ed169244ab2072ebb0d1c5dc589c95b39a3fbab3641b7a7e","timestamp":1549646257,"pobsindexes":{"BlockHeight":16638,"TransactionIndex":0,"OutputIndex":0},"minerpayouts":[{"value":"10000000000","unlockhash":"015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"}],"transactions":[{"version":1,"data":{"coininputs":null,"blockstakeinputs":[{"parentid":"144b2b7711fda335cdae5865ab3729d641266087bc4e088d9fba806345045903","fulfillment":{"type":1,"data":{"publickey":"ed25519:d285f92d6d449d9abb27f4c6cf82713cec0696d62b8c123f1627e054dc6d7780","signature":"f09af1c62026aed18d1d8f80e5a7bd4947a6cb5b6b69097c5b10cb983f0d729662c511a4852fa63690884e2b5c600e3935e08b81aaa757d9f0eb740292ec8309"}}}],"blockstakeoutputs":[{"value":"3000","condition":{"type":1,"data":{"unlockhash":"015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"}}}],"minerfees":null}}]},"blockid":"5c86c987668ca47948a149413f4f004651249073eff4f144fd26b50e218705a8","difficulty":"30203","estimatedactivebs":"2365","height":16639,"maturitytimestamp":1549646167,"target":[0,2,43,120,39,20,204,42,102,32,125,110,53,77,39,71,99,124,13,223,197,154,115,42,126,62,185,120,208,177,21,190],"totalcoins":"0","arbitrarydatatotalsize":4328,"minerpayoutcount":16721,"transactioncount":17262,"coininputcount":633,"coinoutputcount":1225,"blockstakeinputcount":16639,"blockstakeoutputcount":16640,"minerfeecount":622,"arbitrarydatacount":572},"blocks":null,"transaction":{"id":"0000000000000000000000000000000000000000000000000000000000000000","height":0,"parent":"0000000000000000000000000000000000000000000000000000000000000000","rawtransaction":{"version":0,"data":{"coininputs":[],"minerfees":null}},"coininputoutputs":null,"coinoutputids":null,"coinoutputunlockhashes":null,"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},"transactions":null,"multisigaddresses":null,"unconfirmed":false}')
# override internal functionality, as to use our stub client
c.explorer_get = explorer_client.explorer_get
c.explorer_post = explorer_client.explorer_post
# a wallet is required to initiate an atomic swap contract
w = tfchain.TFChainWallet.TFChainWallet(client=c, seed='remain solar kangaroo welcome clean object friend later bounce strong ship lift hamster afraid you super dolphin warm emotion curve smooth kiss stem diet')
# one can verify that its transaction is sent as sender,
# not super useful, but it does also contain an optional check to know if it is already refundable
# verification will fail if the contract could not be found
with pytest.raises(tfchain.errors.AtomicSwapContractNotFound):
w.atomicswap.verify('023b1c17a01945573933e62ca7a1297057681622aaea52c4c4e198077a263890')
# add the coin output info of the submitted atomic swap contract
explorer_client.hash_add('023b1c17a01945573933e62ca7a1297057681622aaea52c4c4e198077a263890', '{"hashtype":"coinoutputid","block":{"minerpayoutids":null,"transactions":null,"rawblock":{"parentid":"0000000000000000000000000000000000000000000000000000000000000000","timestamp":0,"pobsindexes":{"BlockHeight":0,"TransactionIndex":0,"OutputIndex":0},"minerpayouts":null,"transactions":null},"blockid":"0000000000000000000000000000000000000000000000000000000000000000","difficulty":"0","estimatedactivebs":"0","height":0,"maturitytimestamp":0,"target":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"totalcoins":"0","arbitrarydatatotalsize":0,"minerpayoutcount":0,"transactioncount":0,"coininputcount":0,"coinoutputcount":0,"blockstakeinputcount":0,"blockstakeoutputcount":0,"minerfeecount":0,"arbitrarydatacount":0},"blocks":null,"transaction":{"id":"0000000000000000000000000000000000000000000000000000000000000000","height":0,"parent":"0000000000000000000000000000000000000000000000000000000000000000","rawtransaction":{"version":0,"data":{"coininputs":[],"minerfees":null}},"coininputoutputs":null,"coinoutputids":null,"coinoutputunlockhashes":null,"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},"transactions":[{"id":"4a7ac7930379675c82d0462a86e6d6f4018bdb2bdabaf49f4c177b8de19b4e7c","height":16930,"parent":"c25f345403080b8372a38f66608aa5a2287bdc61b82efe5ee6503ce85e8bcd35","rawtransaction":{"version":1,"data":{"coininputs":[{"parentid":"753aaeaa0c9e6c9f1f8da1974c83d8ca067ad536f464a2e2fc038bbd0404d084","fulfillment":{"type":1,"data":{"publickey":"ed25519:e4f55bc46b5feb37c03a0faa2d624a9ee1d0deb5059aaa9625d8b4f60f29bcab","signature":"b5081e41797f53233c727c344698400a73f2cdd364e241df915df413d3eeafb425ce9b51de3731bcbf830c399a706f4d24ae7066f947a4a36ae1b25415bcde00"}}}],"coinoutputs":[{"value":"50000000000","condition":{"type":2,"data":{"sender":"01b73c4e869b6167abe6180ebe7a907f56e0357b4a2f65eb53d22baad84650eb62fce66ba036d0","receiver":"01746b199781ea316a44183726f81e0734d93e7cefc18e9a913989821100aafa33e6eb7343fa8c","hashedsecret":"4163d4b31a1708cd3bb95a0a8117417bdde69fd1132909f92a8ec1e3fe2ccdba","timelock":1549736249}}}],"minerfees":["1000000000"]}},"coininputoutputs":[{"value":"51000000000","condition":{"type":1,"data":{"unlockhash":"01b73c4e869b6167abe6180ebe7a907f56e0357b4a2f65eb53d22baad84650eb62fce66ba036d0"}},"unlockhash":"01b73c4e869b6167abe6180ebe7a907f56e0357b4a2f65eb53d22baad84650eb62fce66ba036d0"}],"coinoutputids":["023b1c17a01945573933e62ca7a1297057681622aaea52c4c4e198077a263890"],"coinoutputunlockhashes":["02fb27c67c373c2f30611e0b98bf92ed6e6eb0a69b471457b282903945180cd5c5b8068731f767"],"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false}],"multisigaddresses":null,"unconfirmed":false}')
# one can verify it all manually
contract = w.atomicswap.verify('023b1c17a01945573933e62ca7a1297057681622aaea52c4c4e198077a263890')
assert contract.outputid == '023b1c17a01945573933e62ca7a1297057681622aaea52c4c4e198077a263890'
assert contract.amount == '50 TFT'
assert contract.refund_timestamp == 1549736249
assert contract.sender == '01b73c4e869b6167abe6180ebe7a907f56e0357b4a2f65eb53d22baad84650eb62fce66ba036d0'
assert contract.receiver == '01746b199781ea316a44183726f81e0734d93e7cefc18e9a913989821100aafa33e6eb7343fa8c'
assert contract.secret_hash == '4163d4b31a1708cd3bb95a0a8117417bdde69fd1132909f92a8ec1e3fe2ccdba'
# the amount can however be verified automatically
w.atomicswap.verify('023b1c17a01945573933e62ca7a1297057681622aaea52c4c4e198077a263890', amount=50)
# which will fail if the amount is wrong
with pytest.raises(tfchain.errors.AtomicSwapContractInvalid):
w.atomicswap.verify('023b1c17a01945573933e62ca7a1297057681622aaea52c4c4e198077a263890', amount=42)
# the secret hash can be verified as well, not so important as the sender,
# would be more used if one is the receiver, but it is possible none the less.
w.atomicswap.verify('023b1c17a01945573933e62ca7a1297057681622aaea52c4c4e198077a263890', secret_hash='4163d4b31a1708cd3bb95a0a8117417bdde69fd1132909f92a8ec1e3fe2ccdba')
# which will fail if the secret hash is wrong
with pytest.raises(tfchain.errors.AtomicSwapContractInvalid):
w.atomicswap.verify('023b1c17a01945573933e62ca7a1297057681622aaea52c4c4e198077a263890', secret_hash='4163d4b31a1708cd3bb95a0a8117417bdde69fd1132909f92a8ec1e3fe2ccdbb')
# a minimum duration can also be defined, where the duration defines how long it takes until the
# contract becomes refundable, 0 if already assumed to be refundable
w.atomicswap.verify('023b1c17a01945573933e62ca7a1297057681622aaea52c4c4e198077a263890', min_refund_time='+1d')
# which will fail if assumed wrong
with pytest.raises(tfchain.errors.AtomicSwapContractInvalid):
w.atomicswap.verify('023b1c17a01945573933e62ca7a1297057681622aaea52c4c4e198077a263890', min_refund_time=0)
# if one is assumed to be the sender, it can also be verified automatically
w.atomicswap.verify('023b1c17a01945573933e62ca7a1297057681622aaea52c4c4e198077a263890', sender=True)
# if one assumed its position wrong, it will however fail
with pytest.raises(tfchain.errors.AtomicSwapContractInvalid):
w.atomicswap.verify('023b1c17a01945573933e62ca7a1297057681622aaea52c4c4e198077a263890', receiver=True)
# all can be verified at once of course
w.atomicswap.verify('023b1c17a01945573933e62ca7a1297057681622aaea52c4c4e198077a263890',
amount=50, secret_hash='4163d4b31a1708cd3bb95a0a8117417bdde69fd1132909f92a8ec1e3fe2ccdba',
min_refund_time='+1d', sender=True)
# once the refund time has been reached, it does become refundable, and min_refund_time=0 should validate correctly
explorer_client.hash_add('5c86c987668ca47948a149413f4f004651249073eff4f144fd26b50e218705a8', '{"hashtype":"blockid","block":{"minerpayoutids":["84b378d60cbdd78430b39c8eddf226119b6f28256388557dd15f0b046bf3c3ed"],"transactions":[{"id":"9aec9f849e35f0bdd14c5ea9daed20c8fbfa09f5a6771bb46ce787eb7e2b00a0","height":16639,"parent":"5c86c987668ca47948a149413f4f004651249073eff4f144fd26b50e218705a8","rawtransaction":{"version":1,"data":{"coininputs":null,"blockstakeinputs":[{"parentid":"144b2b7711fda335cdae5865ab3729d641266087bc4e088d9fba806345045903","fulfillment":{"type":1,"data":{"publickey":"ed25519:d285f92d6d449d9abb27f4c6cf82713cec0696d62b8c123f1627e054dc6d7780","signature":"f09af1c62026aed18d1d8f80e5a7bd4947a6cb5b6b69097c5b10cb983f0d729662c511a4852fa63690884e2b5c600e3935e08b81aaa757d9f0eb740292ec8309"}}}],"blockstakeoutputs":[{"value":"3000","condition":{"type":1,"data":{"unlockhash":"015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"}}}],"minerfees":null}},"coininputoutputs":null,"coinoutputids":null,"coinoutputunlockhashes":null,"blockstakeinputoutputs":[{"value":"3000","condition":{"type":1,"data":{"unlockhash":"015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"}},"unlockhash":"015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"}],"blockstakeoutputids":["83aa29b3e77f703526e28fbc0d2bfcf2b66c06b665e11cb5535b9575fd0e8105"],"blockstakeunlockhashes":["015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"],"unconfirmed":false}],"rawblock":{"parentid":"8485f94209bf3e01ed169244ab2072ebb0d1c5dc589c95b39a3fbab3641b7a7e","timestamp":1549791703,"pobsindexes":{"BlockHeight":16638,"TransactionIndex":0,"OutputIndex":0},"minerpayouts":[{"value":"10000000000","unlockhash":"015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"}],"transactions":[{"version":1,"data":{"coininputs":null,"blockstakeinputs":[{"parentid":"144b2b7711fda335cdae5865ab3729d641266087bc4e088d9fba806345045903","fulfillment":{"type":1,"data":{"publickey":"ed25519:d285f92d6d449d9abb27f4c6cf82713cec0696d62b8c123f1627e054dc6d7780","signature":"f09af1c62026aed18d1d8f80e5a7bd4947a6cb5b6b69097c5b10cb983f0d729662c511a4852fa63690884e2b5c600e3935e08b81aaa757d9f0eb740292ec8309"}}}],"blockstakeoutputs":[{"value":"3000","condition":{"type":1,"data":{"unlockhash":"015a080a9259b9d4aaa550e2156f49b1a79a64c7ea463d810d4493e8242e6791584fbdac553e6f"}}}],"minerfees":null}}]},"blockid":"5c86c987668ca47948a149413f4f004651249073eff4f144fd26b50e218705a8","difficulty":"30203","estimatedactivebs":"2365","height":16639,"maturitytimestamp":1549646167,"target":[0,2,43,120,39,20,204,42,102,32,125,110,53,77,39,71,99,124,13,223,197,154,115,42,126,62,185,120,208,177,21,190],"totalcoins":"0","arbitrarydatatotalsize":4328,"minerpayoutcount":16721,"transactioncount":17262,"coininputcount":633,"coinoutputcount":1225,"blockstakeinputcount":16639,"blockstakeoutputcount":16640,"minerfeecount":622,"arbitrarydatacount":572},"blocks":null,"transaction":{"id":"0000000000000000000000000000000000000000000000000000000000000000","height":0,"parent":"0000000000000000000000000000000000000000000000000000000000000000","rawtransaction":{"version":0,"data":{"coininputs":[],"minerfees":null}},"coininputoutputs":null,"coinoutputids":null,"coinoutputunlockhashes":null,"blockstakeinputoutputs":null,"blockstakeoutputids":null,"blockstakeunlockhashes":null,"unconfirmed":false},"transactions":null,"multisigaddresses":null,"unconfirmed":false}', force=True)
# we should be able to refund at this point
w.atomicswap.verify('023b1c17a01945573933e62ca7a1297057681622aaea52c4c4e198077a263890',
amount=50, secret_hash='4163d4b31a1708cd3bb95a0a8117417bdde69fd1132909f92a8ec1e3fe2ccdba',
min_refund_time=0, sender=True)
|
nilq/baby-python
|
python
|
from collections import defaultdict
from datetime import datetime
from schemas import Task, TaskStatus
tasks_db = defaultdict(lambda: defaultdict(dict))
def current_datetime_str():
now = datetime.now()
day_mon_date = now.strftime("%a, %b, %d")
today = now.strftime('%Y%m%d')
hr = now.strftime("%-H")
mnt = now.strftime("%-M")
apm = now.strftime("%p")
return {
"today": today,
'day_mon_date': day_mon_date,
"hr": hr,
"mnt": mnt,
"apm": apm
}
def update_today_slots():
cds = current_datetime_str()
today_tasks = tasks_db.get(cds['today'], {})
for slot, task_dict in today_tasks.get('booked', {}).items():
# Mark elapsed tasks
if slot[4:6] < cds['hr']:
task_dict['status'] = TaskStatus.MISSED
# Mark inprogress tasks
elif slot[:2] < cds['hr']:
task_dict['status'] = TaskStatus.IN_PROGRESS
free_slots = [slot for slot in today_tasks.get('free', []) if slot[4:6] >= cds['hr']]
if free_slots == []:
# first_time
print(f"Creating slots since I got {today_tasks.get('free')}")
free_slots = [f'{hr}00{hr + 1}00' for hr in range(int(cds['hr']) + 1, 24)]
tasks_db[cds['today']]['free'] = free_slots
return cds
def get_today_bookings():
timestamp = update_today_slots()
return tasks_db[timestamp['today']]
def book_appointment(task: Task):
timestamp = update_today_slots()
today_calendar = tasks_db[timestamp['today']]
booked_slots = today_calendar['booked']
free_slots = today_calendar['free']
# booked_tasks = [info.get('name') for slot, info in booked_slots.items()]
for h in range(task.effort):
tasks_db[timestamp['today']]['booked'][free_slots[h]] = {"name": task.name,
"status": task.status}
tasks_db[timestamp['today']]['free'].remove(free_slots[h])
return booked_slots
|
nilq/baby-python
|
python
|
from django import forms
from .models import User
class StudentRegistration(forms.ModelForm):
class Meta:
model=User
fields=['name','email','password']
widgets={
'name':forms.TextInput(attrs={'class':'form-control'}),
'email':forms.EmailInput(attrs={'class':'form-control'}),
'password':forms.PasswordInput(attrs={'class':'form-control'}),
}
|
nilq/baby-python
|
python
|
# -*- coding:utf8 -*-
""" SCI - Simple C Interpreter """
from ..lexical_analysis.token_type import ID
from ..lexical_analysis.token_type import XOR_OP, AND_OP, ADD_OP, ADDL_OP, SUB_OP, MUL_OP
from ..lexical_analysis.token_type import NOT_OP, NEG_OP, DEC_OP, INC_OP
from ..lexical_analysis.token_type import LEA_OP
from ..lexical_analysis.token_type import SHL_OP, SHR_OP
from ..lexical_analysis.token_type import CMP_OP, CMPL_OP, CMPB_OP, TEST
from ..lexical_analysis.token_type import JL, JG, JGE, JLE, JE, JNE, JMP, JMPQ
from ..lexical_analysis.token_type import POP, POPQ, PUSH, PUSHQ, MOV, MOVL
from ..lexical_analysis.token_type import CALLQ, HLT, RETQ
from ..lexical_analysis.token_type import NOP, NOPW, NOPL, XCHG, DATA16_OP
from ..lexical_analysis.token_type import REGISTER
from ..lexical_analysis.token_type import COMMA, DOLLAR, LPAREN, RPAREN, NUMBER, ASTERISK
from .tree import *
class ProgrammSyntaxError(Exception):
""" A syntax error in the assembly program. """
def error(message):
""" An error message. """
raise ProgrammSyntaxError(message)
class Parser():
""" The effective Assembly parser, which relies on the lexer. """
def __init__(self, lexer):
self.lexer = lexer
self.current_token_line = []
self.current_token = None
def eat(self, token_type):
""" Compare the current token type with the passed token
type and if they match then "eat" the current token
and assign the next token to the self.current_token,
otherwise raise an exception. """
if self.current_token.type == token_type and self.current_token_line:
self.current_token_line.pop(0)
if self.current_token_line:
self.current_token = self.current_token_line[0]
return True
return False
error(
'Expected token <{}> but found <{}> at line {}.'.format(
token_type, self.current_token.type, self.lexer.line
)
)
def program(self):
"""
program : declarations
"""
root = Program(
sections=self.sections(),
line=self.lexer.line,
prog_counter=0
)
return root
def sections(self):
"""
sections : section+
"""
sections = []
for section in self.lexer.sections:
sections.append(self.section(section))
return sections
def section(self, section):
"""
section : NUM ID operations+
"""
num = section.start_addr
name = section.name
content = self.operations(section.operations)
return Section(
name=name,
prog_counter=int(num.value, 16),
content=content,
line=section.file_line,
)
def operations(self, operations):
"""
operations : operation+
"""
result = []
for operation in operations:
line = operation.line
prog_counter = int(operation.pc.value, 16)
self.current_token_line = operation.tokens[1:]
oper = self.operation(prog_counter=prog_counter, line=line)
if oper:
result.append(oper)
return result
def operation(self, prog_counter, line):
"""
operation : operator addr_expression{,2}
"""
self.current_token = self.current_token_line[0]
if self.current_token.type is CALLQ:
return self.callqop(prog_counter, line)
if self.current_token.type in [SUB_OP, XOR_OP, AND_OP, ADD_OP, ADDL_OP,
SHL_OP, TEST]:
return self.binop(prog_counter, line)
if self.current_token.type is MUL_OP:
return self.ternaryop(prog_counter, line)
if self.current_token.type in [NOT_OP, NEG_OP, DEC_OP, INC_OP]:
return self.unop(prog_counter, line)
if self.current_token.type is LEA_OP:
return self.binop(prog_counter, line)
if self.current_token.type in [JL, JG, JGE, JLE, JE, JNE, JMP, JMPQ]:
return self.jmpop(prog_counter, line)
if self.current_token.type in [CMP_OP, CMPL_OP, CMPB_OP]:
return self.cmpop(prog_counter, line)
if self.current_token.type in [POP, POPQ, PUSH, PUSHQ]:
return self.stackop(prog_counter, line)
if self.current_token.type in [MOV, MOVL]:
return self.movop(prog_counter, line)
if self.current_token.type in [NOP, NOPW, NOPL, DATA16_OP]:
return self.noop(prog_counter, line)
if self.current_token.type is XCHG:
return self.xchgop(prog_counter, line)
if self.current_token.type is HLT:
return self.hltop(prog_counter, line)
if self.current_token.type is RETQ:
return self.retqop(prog_counter, line)
if self.current_token.type is ID:
return None
error("Unkown operation {} at line {}"
.format(self.current_token, line)
)
def callqop(self, prog_counter, line):
"""
callqop : CALLQ ADDR
"""
operation = self.current_token
self.eat(operation.type)
if self.current_token_line:
call_addr = self.addr_expression(prog_counter, line)
if self.current_token.type is COMMA:
error("incompatible operand with callq operator at line {}"
.format(line))
else:
error("incompatible operand with callq operator at line {}"
.format(self.lexer.line))
return CallQOp(
call_addr=call_addr,
ret_addr=str(int(prog_counter, 16)+0x8),
prog_counter=prog_counter,
line=line
)
def binop(self, prog_counter, line):
"""
binqop : BINOP ADDR COMMA ADDR
"""
operation = self.current_token
self.eat(operation.type)
left = self.addr_expression(prog_counter, line)
if self.current_token.type is COMMA:
self.eat(COMMA)
else:
error("Incompatible Operand {} with binary operator {} at line{}"
.format(left, operation.value, line)
)
return BinOp(
left=left,
op=operation,
right=self.addr_expression(prog_counter, line),
prog_counter=prog_counter,
line=line
)
def ternaryop(self, prog_counter, line):
"""
ternaryop : BINOP ADDR COMMA ADDR (COMMA ADDR)?
"""
operation = self.current_token
self.eat(operation.type)
left = self.addr_expression(prog_counter, line)
if self.current_token.type is COMMA:
self.eat(COMMA)
else:
error("Incompatible Operand {} with binary operator {} at line{}"
.format(left, operation.value, line)
)
middle = self.addr_expression(prog_counter, line)
if self.current_token.type is COMMA:
self.eat(COMMA)
right = self.addr_expression(prog_counter, line)
return TernOp(
left=left,
op=operation,
middle=middle,
right=right,
prog_counter=prog_counter,
line=line
)
else:
return BinOp(
left=left,
op=operation,
right=middle,
prog_counter=prog_counter,
line=line
)
def unop(self, prog_counter, line):
"""
unop : UNOP ADDR
"""
operation = self.current_token
self.eat(operation.type)
operand = self.addr_expression(prog_counter, line)
return UnOp(
operand=operand,
op=operation,
prog_counter=prog_counter,
line=line
)
def jmpop(self, prog_counter, line):
"""
jmpop : JMPOP ADDR
"""
operation = self.current_token
self.eat(operation.type)
addr = self.addr_expression(prog_counter, line)
if self.current_token.type is COMMA:
error("Incompatible operand with jump operator {} at line{}"
.format(operation.value, line)
)
return JmpStmt(
op=operation,
jmpaddr=addr,
line=line,
prog_counter=prog_counter
)
def cmpop(self, prog_counter, line):
"""
cmpop : CMPOP ADDR COMMA ADDR
"""
operation = self.current_token
self.eat(operation.type)
left = self.addr_expression(prog_counter, line)
if self.current_token.type is COMMA:
self.eat(COMMA)
else:
error("Incompatible operands with binary operator {} at line{}"
.format(operation.value, line)
)
return CmpOp(
op=operation,
left=left,
right=self.addr_expression(prog_counter, line),
line=line,
prog_counter=prog_counter
)
def stackop(self, prog_counter, line):
"""
stackop : STACKOP ADDR
"""
operation = self.current_token
self.eat(operation.type)
addr = self.addr_expression(prog_counter, line)
if self.current_token.type is COMMA:
error("Incompatible operand with stack operator {} at line{}"
.format(operation.value, line)
)
return StackOp(
op=operation,
expr=addr,
line=line,
prog_counter=prog_counter
)
def movop(self, prog_counter, line):
"""
movop : MOVOP ADDR COMMA ADDR
"""
operation = self.current_token
self.eat(operation.type)
left = self.addr_expression(prog_counter, line)
if self.current_token.type is COMMA:
self.eat(COMMA)
else:
error("Incompatible operand with operator {} at line {}:{}"
.format(operation.value, line, self.current_token.value)
)
return MovOp(
left=left,
op=operation,
right=self.addr_expression(prog_counter, line),
prog_counter=prog_counter,
line=line
)
def noop(self, prog_counter, line):
"""
noop : NOP
"""
operation = self.current_token
self.eat(operation.type)
if self.current_token_line:
_ = self.addr_expression(prog_counter, line)
return NullOp(
op=operation,
line=line,
prog_counter=prog_counter
)
def xchgop(self, prog_counter, line):
"""
xchgop : XCHG ADDR COMMA ADDR
"""
operation = self.current_token
self.eat(operation.type)
left = self.addr_expression(prog_counter, line)
if self.current_token.type is COMMA:
self.eat(COMMA)
else:
error("Incompatible Operand {} with binary operator xchg at line{}"
.format(left, line)
)
return XchgOp(
left=left,
op=operation,
right=self.addr_expression(prog_counter, line),
prog_counter=prog_counter,
line=line
)
def hltop(self, prog_counter, line):
"""
hltop : HLT
"""
operation = self.current_token
res = self.eat(operation.type)
if not res:
_ = self.addr_expression(prog_counter, line)
return NullOp(
op=operation,
prog_counter=prog_counter,
line=line,
)
def retqop(self, prog_counter, line):
"""
retqop : RETQ
"""
operation = self.current_token
self.eat(operation.type)
if self.current_token_line:
_ = self.addr_expression(prog_counter, line)
return NullOp(
op=operation,
prog_counter=prog_counter,
line=line,
)
def addr_expression(self, prog_counter, line):
"""
addr_exp : <HARD STUFF>
"""
if self.current_token.type is DOLLAR:
self.eat(DOLLAR)
if self.current_token.type is NUMBER:
token = self.current_token
self.eat(NUMBER)
return AddrExpression(token, prog_counter, line)
error("Invalid offset at line %s" % line)
if self.current_token.type is REGISTER:
token = self.current_token
self.eat(REGISTER)
return Register(token, prog_counter, line)
if self.current_token.type is NUMBER:
token = self.current_token
self.eat(NUMBER)
if self.current_token.type is LPAREN:
self.eat(LPAREN)
register = self.addr_expression(prog_counter, line)
if self.current_token.type is COMMA:
self.eat(COMMA)
second_reg = self.addr_expression(prog_counter, line)
if self.current_token.type is COMMA:
self.eat(COMMA)
number = AddrExpression(self.current_token,
prog_counter=prog_counter,
line=line)
self.eat(NUMBER)
self.eat(RPAREN)
return TernaryAddrExpression(
token=token,
reg_1=register,
reg_2=second_reg,
offset=number,
prog_counter=prog_counter,
line=line
)
error("Wrong compound expression")
self.eat(RPAREN)
return CompoundAddrExpression(
token,
AddrExpression(token, prog_counter, line),
register,
prog_counter,
line
)
return AddrExpression(token, prog_counter, line)
if self.current_token.type is ASTERISK:
token = self.current_token
self.eat(ASTERISK)
compound = self.addr_expression(prog_counter, line)
return CompoundAddrExpression(
token,
AddrExpression(token.value, prog_counter, line),
compound,
prog_counter,
line
)
if self.current_token.type is LPAREN:
self.eat(LPAREN)
register = self.addr_expression(prog_counter, line)
if self.current_token.type is COMMA:
token = self.current_token
self.eat(COMMA)
second_reg = self.addr_expression(prog_counter, line)
if self.current_token.type is COMMA:
self.eat(COMMA)
number = AddrExpression(self.current_token,
prog_counter=prog_counter,
line=line)
self.eat(NUMBER)
self.eat(RPAREN)
return TernaryAddrExpression(
token=token,
reg_1=register,
reg_2=second_reg,
offset=number,
prog_counter=prog_counter,
line=line
)
error("Wrong compound expression")
self.eat(RPAREN)
def parse(self):
"""
program : declarations
declarations : declaration operations+
declaration : NUMBER ID
operations : operation | stmt
operation : unop | binop | nullop | noop | stackop | functioncall
stmt : jmpstmt | retstmt
"""
node = self.program()
return node
|
nilq/baby-python
|
python
|
#=========================================================================
# helpers.py
#=========================================================================
# Author : Christopher Torng
# Date : June 2, 2019
#
import os
import yaml
#-------------------------------------------------------------------------
# Utility functions
#-------------------------------------------------------------------------
# get_top_dir
#
# Returns the path to the top directory containing the flag
#
# - flag : a filename that marks the top of the tree
# - relative : boolean, return relative path to current working directory
#
def get_top_dir( flag='.MFLOWGEN_TOP', relative=True ):
try:
return os.environ[ 'MFLOWGEN_HOME' ]
except KeyError:
tmp = os.getcwd()
while tmp != '/':
tmp = os.path.dirname( tmp )
if flag in os.listdir( tmp ):
break
if not relative:
return tmp
else:
return os.path.relpath( tmp, os.getcwd() )
# get_files_in_dir
#
# Returns a list of all files in the directory tree
#
# - p : path to a directory
#
def get_files_in_dir( p ):
file_list = []
for root, subfolders, files in os.walk( p ):
for f in files:
file_list.append( os.path.join( root, f ) )
return file_list
# stamp
#
# Returns a path with the basename prefixed with '.stamp.'
#
# - p : path to a file or directory
#
def stamp( p, stamp='.stamp.' ):
p_dirname = os.path.dirname( p )
p_basename = os.path.basename( p )
p_stamp = stamp + p_basename
if p_dirname : return p_dirname + '/' + p_stamp
else : return p_stamp
#-------------------------------------------------------------------------
# YAML helper functions
#-------------------------------------------------------------------------
# read_yaml
#
# Takes a path to a yaml file and returns the data
#
def read_yaml( path ):
with open( path ) as f:
try:
data = yaml.load( f, Loader=yaml.FullLoader )
except AttributeError:
# PyYAML for python2 does not have FullLoader
data = yaml.load( f )
return data
# write_yaml
#
# Takes a path to a file and dumps data
#
def write_yaml( data, path ):
with open( path, 'w' ) as f:
yaml.dump( data, f, default_flow_style=False )
#-------------------------------------------------------------------------
# Colors
#-------------------------------------------------------------------------
RED = '\033[31m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
BOLD = '\033[1m'
END = '\033[0m'
def bold( text ):
return BOLD + text + END
def red( text ):
return RED + text + END
def green( text ):
return GREEN + text + END
def yellow( text ):
return YELLOW + text + END
|
nilq/baby-python
|
python
|
from engine import Engine
from engine import get_engine
|
nilq/baby-python
|
python
|
#!/usr/bin/python
#----------------------------------------------------------------------
# Copyright (c) 2008 Board of Trustees, Princeton University
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
import os, sys
import traceback
import logging, logging.handlers
CRITICAL=logging.CRITICAL
ERROR=logging.ERROR
WARNING=logging.WARNING
INFO=logging.INFO
DEBUG=logging.DEBUG
# a logger that can handle tracebacks
class _SfaLogger:
def __init__ (self,logfile=None,loggername=None,level=logging.INFO):
# default is to locate loggername from the logfile if avail.
if not logfile:
#loggername='console'
#handler=logging.StreamHandler()
#handler.setFormatter(logging.Formatter("%(levelname)s %(message)s"))
logfile = "/var/log/sfa.log"
if not loggername:
loggername=os.path.basename(logfile)
try:
handler=logging.handlers.RotatingFileHandler(logfile,maxBytes=1000000, backupCount=5)
except IOError:
# This is usually a permissions error becaue the file is
# owned by root, but httpd is trying to access it.
tmplogfile=os.getenv("TMPDIR", "/tmp") + os.path.sep + os.path.basename(logfile)
# In strange uses, 2 users on same machine might use same code,
# meaning they would clobber each others files
# We could (a) rename the tmplogfile, or (b)
# just log to the console in that case.
# Here we default to the console.
if os.path.exists(tmplogfile) and not os.access(tmplogfile,os.W_OK):
loggername = loggername + "-console"
handler = logging.StreamHandler()
else:
handler=logging.handlers.RotatingFileHandler(tmplogfile,maxBytes=1000000, backupCount=5)
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
self.logger=logging.getLogger(loggername)
self.logger.setLevel(level)
# check if logger already has the handler we're about to add
handler_exists = False
for l_handler in self.logger.handlers:
if l_handler.baseFilename == handler.baseFilename and \
l_handler.level == handler.level:
handler_exists = True
if not handler_exists:
self.logger.addHandler(handler)
self.loggername=loggername
def setLevel(self,level):
self.logger.setLevel(level)
# shorthand to avoid having to import logging all over the place
def setLevelDebug(self):
self.logger.setLevel(logging.DEBUG)
# define a verbose option with s/t like
# parser.add_option("-v", "--verbose", action="count", dest="verbose", default=0)
# and pass the coresponding options.verbose to this method to adjust level
def setLevelFromOptVerbose(self,verbose):
if verbose==0:
self.logger.setLevel(logging.WARNING)
elif verbose==1:
self.logger.setLevel(logging.INFO)
elif verbose>=2:
self.logger.setLevel(logging.DEBUG)
# in case some other code needs a boolean
def getBoolVerboseFromOpt(self,verbose):
return verbose>=1
####################
def info(self, msg):
self.logger.info(msg)
def debug(self, msg):
self.logger.debug(msg)
def warn(self, msg):
self.logger.warn(msg)
# some code is using logger.warn(), some is using logger.warning()
def warning(self, msg):
self.logger.warning(msg)
def error(self, msg):
self.logger.error(msg)
def critical(self, msg):
self.logger.critical(msg)
# logs an exception - use in an except statement
def log_exc(self,message):
self.error("%s BEG TRACEBACK"%message+"\n"+traceback.format_exc().strip("\n"))
self.error("%s END TRACEBACK"%message)
def log_exc_critical(self,message):
self.critical("%s BEG TRACEBACK"%message+"\n"+traceback.format_exc().strip("\n"))
self.critical("%s END TRACEBACK"%message)
# for investigation purposes, can be placed anywhere
def log_stack(self,message):
to_log="".join(traceback.format_stack())
self.info("%s BEG STACK"%message+"\n"+to_log)
self.info("%s END STACK"%message)
def enable_console(self, stream=sys.stdout):
formatter = logging.Formatter("%(message)s")
handler = logging.StreamHandler(stream)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
info_logger = _SfaLogger(loggername='info', level=logging.INFO)
debug_logger = _SfaLogger(loggername='debug', level=logging.DEBUG)
warn_logger = _SfaLogger(loggername='warning', level=logging.WARNING)
error_logger = _SfaLogger(loggername='error', level=logging.ERROR)
critical_logger = _SfaLogger(loggername='critical', level=logging.CRITICAL)
logger = info_logger
sfi_logger = _SfaLogger(logfile=os.path.expanduser("~/.sfi/")+'sfi.log',loggername='sfilog', level=logging.DEBUG)
########################################
import time
def profile(logger):
"""
Prints the runtime of the specified callable. Use as a decorator, e.g.,
@profile(logger)
def foo(...):
...
"""
def logger_profile(callable):
def wrapper(*args, **kwds):
start = time.time()
result = callable(*args, **kwds)
end = time.time()
args = map(str, args)
args += ["%s = %s" % (name, str(value)) for (name, value) in kwds.iteritems()]
# should probably use debug, but then debug is not always enabled
logger.info("PROFILED %s (%s): %.02f s" % (callable.__name__, ", ".join(args), end - start))
return result
return wrapper
return logger_profile
if __name__ == '__main__':
print 'testing sfalogging into logger.log'
logger1=_SfaLogger('logger.log', loggername='std(info)')
logger2=_SfaLogger('logger.log', loggername='error', level=logging.ERROR)
logger3=_SfaLogger('logger.log', loggername='debug', level=logging.DEBUG)
for (logger,msg) in [ (logger1,"std(info)"),(logger2,"error"),(logger3,"debug")]:
print "====================",msg, logger.logger.handlers
logger.enable_console()
logger.critical("logger.critical")
logger.error("logger.error")
logger.warn("logger.warning")
logger.info("logger.info")
logger.debug("logger.debug")
logger.setLevel(logging.DEBUG)
logger.debug("logger.debug again")
@profile(logger)
def sleep(seconds = 1):
time.sleep(seconds)
logger.info('console.info')
sleep(0.5)
logger.setLevel(logging.DEBUG)
sleep(0.25)
|
nilq/baby-python
|
python
|
import threading
import time
import queue
EXIT_FLAG = 0
class exampleThread(threading.Thread):
def __init__(self, threadID, name, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.q = q
def run(self):
print("Starting ", self.name)
process_data(self.name, self.q)
print("Exiting ", self.name)
def process_data(threadName, q):
while not EXIT_FLAG:
lock.acquire()
if not wordsQueue.empty():
data = q.get()
lock.release()
print("%s processing %s" % (threadName, data))
time.sleep(1)
else:
lock.release()
time.sleep(1)
threadList = ["Thread-1", "Thread-2", "Thread-3"]
nameList = ["One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight"]
lock = threading.Lock()
wordsQueue = queue.Queue(10)
threads = []
threadID = 1
for thread_name in threadList:
thread = exampleThread(threadID, thread_name, wordsQueue)
thread.start()
threads.append(thread)
threadID += 1
lock.acquire()
for word in nameList:
wordsQueue.put(word)
lock.release()
while not wordsQueue.empty():
pass
EXIT_FLAG = 1
for t in threads:
t.join()
print("Exiting Main thread")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import math
import pytest
import bayeslite
from bayeslite.guess import bayesdb_guess_population
from bayeslite.guess import bayesdb_guess_stattypes
from bayeslite.exception import BQLError
from bayeslite.metamodels.crosscat import CrosscatMetamodel
import crosscat.LocalEngine
def test_guess_stattypes():
n = ['a', 'b']
a_z = range(ord('a'), ord('z') + 1)
rows = [[chr(c), c % 2] for c in a_z]
with pytest.raises(ValueError):
# Duplicate column names.
bayesdb_guess_stattypes(['a', 'a'], rows)
with pytest.raises(ValueError):
# Too many columns in data.
bayesdb_guess_stattypes(['a'], rows)
with pytest.raises(ValueError):
# Too few columns in data.
bayesdb_guess_stattypes(['a', 'b', 'c'], rows)
assert [st[0] for st in
bayesdb_guess_stattypes(n, rows)] == ['key', 'nominal']
rows = [[chr(c), c % 2] for c in a_z] + [['q', ord('q') % 2]]
# Ignore the first column, rather than calling it nominal, because
# it's almost entirely unique, so one category cannot say much about others.
assert [st[0] for st in
bayesdb_guess_stattypes(n, rows)] == ['ignore', 'nominal']
rows = [[c % 2, chr(c)] for c in a_z]
assert [st[0] for st in
bayesdb_guess_stattypes(n, rows)] == ['nominal', 'key']
rows = [[c % 2, chr(c)] for c in a_z] + [[0, 'k']]
# Ignore the second column because it is almost unique, as above.
assert [st[0] for st in
bayesdb_guess_stattypes(n, rows)] == ['nominal', 'ignore']
rows = [[chr(c), i] for i, c in enumerate(a_z)]
assert [st[0] for st in
bayesdb_guess_stattypes(n, rows)] == ['key', 'numerical']
rows = [[chr(c), math.sqrt(i)] for i, c in enumerate(a_z)]
assert [st[0] for st in
bayesdb_guess_stattypes(n, rows)] == ['key', 'numerical']
rows = [[chr(c) + chr(d), isqrt(i)] for i, (c, d)
in enumerate(itertools.product(a_z, a_z))]
assert [st[0] for st in
bayesdb_guess_stattypes(n, rows)] == ['key', 'numerical']
rows = [[chr(c) + chr(d) + chr(e), isqrt(i)] for i, (c, d, e)
in enumerate(itertools.product(a_z, a_z, a_z))]
assert [st[0] for st in
bayesdb_guess_stattypes(n, rows)] == ['key', 'nominal']
rows = [[i, chr(c)] for i, c in enumerate(a_z)]
# second field is unique, and we already have a key.
assert [st[0] for st in
bayesdb_guess_stattypes(n, rows)] == ['key', 'ignore']
rows = [[isqrt(i), chr(c) + chr(d)] for i, (c, d)
in enumerate(itertools.product(a_z, a_z))]
assert [st[0] for st in
bayesdb_guess_stattypes(n, rows)] == ['numerical', 'key']
rows = [[isqrt(i), chr(c) + chr(d) + chr(e)] for i, (c, d, e)
in enumerate(itertools.product(a_z, a_z, a_z))]
assert [st[0] for st in
bayesdb_guess_stattypes(n, rows)] == ['nominal', 'key']
with pytest.raises(ValueError):
# Nonunique key.
bayesdb_guess_stattypes(n, rows, overrides=[('a', 'key')])
with pytest.raises(ValueError):
# Two keys.
bayesdb_guess_stattypes(n, rows,
overrides=[('a', 'key'), ('b', 'key')])
with pytest.raises(ValueError):
# No such column.
bayesdb_guess_stattypes(n, rows, overrides=[('c', 'numerical')])
with pytest.raises(ValueError):
# Column overridden twice.
bayesdb_guess_stattypes(n, rows,
overrides=[('a', 'key'), ('a', 'ignore')])
with pytest.raises(ValueError):
# Column overridden twice, even to the same stattype.
bayesdb_guess_stattypes(n, rows,
overrides=[('a', 'key'), ('a', 'key')])
assert [st[0] for st in
bayesdb_guess_stattypes(n, rows, overrides=[('b', 'key')])] == \
['nominal', 'key']
assert [st[0] for st in
bayesdb_guess_stattypes(n, rows, overrides=[('b', 'ignore')])] == \
['nominal', 'ignore']
assert [st[0] for st in
bayesdb_guess_stattypes(n, rows, overrides=[('a', 'numerical')])] \
== ['numerical', 'key']
rows = [['none' if c < ord('m') else c, chr(c)] for c in a_z]
# Nullify 'none' because it is in the nullify list.
# Categorical because <20 remaining.
assert [st[0] for st in
bayesdb_guess_stattypes(n, rows)] == ['nominal', 'key']
rows = [[3 if c < ord('y') else 5, chr(c)] for c in a_z]
# Nullify 3 because it holds so many of the values.
# Ignore because <2 remaining.
assert [st[0] for st in bayesdb_guess_stattypes(n, rows)] == \
['ignore', 'key']
# Ensure columns of unique floats are only taken to be keys when they are
# integer-valued, not otherwise.
rows = [[math.sqrt(c), c + 0.5] for c in a_z]
assert [st[0] for st in bayesdb_guess_stattypes(n, rows)] == \
['numerical', 'numerical']
rows = [[c + 0.5, float(c)] for c in a_z]
assert [st[0] for st in bayesdb_guess_stattypes(n, rows)] == \
['numerical', 'key']
# A column with a mix of ints and non-integer-valued floats should be
# numerical.
rows = [[c + 0.5, float(c + 0.5) if c % 2 == 0 else int(c)] for c in a_z]
assert [st[0] for st in bayesdb_guess_stattypes(n, rows)] == \
['numerical', 'numerical']
def test_guess_population():
bdb = bayeslite.bayesdb_open(builtin_metamodels=False)
bdb.sql_execute('CREATE TABLE t(x NUMERIC, y NUMERIC, z NUMERIC)')
a_z = range(ord('a'), ord('z') + 1)
aa_zz = ((c, d) for c in a_z for d in a_z)
data = ((chr(c) + chr(d), (c + d) % 2, math.sqrt(c + d)) for c, d in aa_zz)
for row in data:
bdb.sql_execute('INSERT INTO t (x, y, z) VALUES (?, ?, ?)', row)
cc = crosscat.LocalEngine.LocalEngine(seed=0)
metamodel = CrosscatMetamodel(cc)
bayeslite.bayesdb_register_metamodel(bdb, metamodel)
with pytest.raises(ValueError):
# No modelled columns. (x is key.)
bayesdb_guess_population(bdb, 'p', 't',
overrides=[('y', 'ignore'), ('z', 'ignore')])
bayesdb_guess_population(bdb, 'p', 't')
with pytest.raises(ValueError):
# Population already exists.
bayesdb_guess_population(bdb, 'p', 't')
assert bdb.sql_execute('SELECT * FROM bayesdb_variable').fetchall() == [
(1, None, 1, 'y', 'nominal'),
(1, None, 2, 'z', 'numerical'),
]
def test_guess_schema():
bdb = bayeslite.bayesdb_open(builtin_metamodels=False)
bdb.sql_execute('CREATE TABLE t(x NUMERIC, y NUMERIC, z NUMERIC)')
a_z = range(ord('a'), ord('z') + 1)
aa_zz = ((c, d) for c in a_z for d in a_z)
data = ((chr(c) + chr(d), (c + d) % 2, math.sqrt(c + d)) for c, d in aa_zz)
for row in data:
bdb.sql_execute('INSERT INTO t (x, y, z) VALUES (?, ?, ?)', row)
with pytest.raises(BQLError):
bdb.execute('GUESS SCHEMA FOR non_existant_table')
guess = bdb.execute('GUESS SCHEMA FOR t')
assert len(guess.description) == 4
assert guess.description[0][0] == u'column'
assert guess.description[1][0] == u'stattype'
assert guess.description[2][0] == u'num_distinct'
assert guess.description[3][0] == u'reason'
assert len(guess.fetchall()) == 3
def isqrt(n):
x = n
y = (x + 1)//2
while y < x:
x = y
y = (x + n//x)//2
return x
|
nilq/baby-python
|
python
|
"""Class and container for pedigree information, vcf, and bam file by sample"""
from future import print_function
import pandas as pd
import re
import func
class Ped:
"""Family_ID - '.' or '0' for unknown
Individual_ID - '.' or '0' for unknown
Paternal_ID - '.' or '0' for unknown
Maternal_ID - '.' or '0' for unknown
Sex - '1'=male; '2'=female; ['other', '0', '.']=unknown
Phenotype - '1'=unaffected, '2'=affected, ['-9', '0', '.']= missing"""
def __init__(self, ped_file_name, extra_column_names=[]):
"""read ped file into pandas data frame"""
self.fname = ped_file_name
self.ped = pd.read_table(self.fname, usecols=range(6+len(extra_column_names)))
self.ped.columns = ['fam_id', 'ind_id', 'fa_id', 'mo_id', 'sex', 'pheno'] + extra_column_names
self.ped.replace(['.', '0', 0, -9, '-9'], [None]*5, inplace=True)
self.ped['fam_id'] = self.ped['fam_id'].astype(str)
def addVcf(self, field='fam_id', file_pat='/mnt/ceph/asalomatov/SSC_Eichler/rerun/ssc%s/%s-JHC-vars.vcf.gz'):
num_subst = len(re.findall('\%s', file_pat))
print('%s substitutions found' % num_subst)
if num_subst > 0:
x = self.ped[field].apply(lambda f: func.checkFile(file_pat % ((f,) * num_subst)))
self.ped['vcf'] = pd.Series(x, index=self.ped.index)
else:
self.ped['vcf'] = file_pat
def addBam(self, field='ind_id', file_pat='/mnt/ceph/asalomatov/SSC_Eichler/data_S3/%s*.bam'):
num_subst = len(re.findall('\%s', file_pat))
print('%s substitutions found' % num_subst)
if num_subst > 0:
x = self.ped[field].apply(lambda f: func.listFiles(file_pat % ((f,) * num_subst)))
self.ped['bam'] = pd.Series(x, index=self.ped.index)
else:
self.ped['bam'] = file_pat
def addBai(self, field='ind_id', file_pat='/mnt/ceph/asalomatov/SSC_Eichler/data_S3/%s*bam.bai'):
num_subst = len(re.findall('\%s', file_pat))
print('%s substitutions found' % num_subst)
if num_subst > 0:
x = self.ped[field].apply(lambda f: func.listFiles(file_pat % ((f,) * num_subst)))
self.ped['bai'] = pd.Series(x, index=self.ped.index)
else:
self.ped['bai'] = file_pat
def addTestFile(self, field='ind_id', file_pat='/mnt/scratch/asalomatov/data/SSC/wes/feature_sets/fb/all_SNP/%s'):
num_subst = len(re.findall('\%s', file_pat))
print('%s substitutions found' % num_subst)
if num_subst > 0:
x = self.ped[field].apply(lambda f: func.listFiles(file_pat % ((f,) * num_subst)))
self.ped['test'] = pd.Series(x, index=self.ped.index)
else:
self.ped['test'] = file_pat
def getAllMembers(self, family_id):
return self.ped['ind_id'][self.ped['fam_id'] == family_id].tolist()
def getProbands(self, family_id):
return self.ped['ind_id'][(self.ped['fam_id'] == family_id) & (self.ped['pheno'] == 2)].tolist()
def getSiblings(self, family_id):
return self.ped['ind_id'][(self.ped['fam_id'] == family_id) & (self.ped['pheno'] == 1) \
& ~self.ped['fa_id'].isnull() & ~self.ped['mo_id'].isnull() ].tolist()
def getParents(self, family_id):
return self.ped['ind_id'][(self.ped['fam_id'] == family_id) & \
self.ped['fa_id'].isnull() & self.ped['mo_id'].isnull() ].tolist()
def getFather(self, family_id):
res = self.ped['ind_id'][(self.ped['fam_id'] == family_id) & (self.ped['sex'] == 1) & \
self.ped['fa_id'].isnull() & self.ped['mo_id'].isnull()]
if len(res.index) == 0: return None
assert len(res) == 1
return res.iloc[0]
def getMother(self, family_id):
res = self.ped['ind_id'][(self.ped['fam_id'] == family_id) & (self.ped['sex'] == 2) & \
self.ped['fa_id'].isnull() & self.ped['mo_id'].isnull() ]
if len(res.index) == 0: return None
assert len(res) == 1
return res.iloc[0]
def getChildsFather(self, individial_id):
res = self.ped['fa_id'][(self.ped['ind_id'] == individial_id)]
if len(res.index) == 0: return None
assert len(res) == 1
return res.iloc[0]
def getChildsMother(self, individial_id):
res = self.ped['mo_id'][(self.ped['ind_id'] == individial_id)]
if len(res.index) == 0: return None
assert len(res) == 1
return res.iloc[0]
def isAffected(self, individial_id):
res = self.ped['pheno'][(self.ped['ind_id'] == individial_id)] == 2
if len(res.index) == 0: return None
assert len(res) == 1
return res.iloc[0]
def getIndivVCF(self, individial_id):
res = self.ped['vcf'][(self.ped['ind_id'] == individial_id)]
if len(res.index) == 0: return None
assert len(res) == 1
return res.iloc[0]
def getIndivBAM(self, individial_id):
res = self.ped['bam'][(self.ped['ind_id'] == individial_id)]
if len(res.index) == 0: return None
assert len(res) == 1
return res.iloc[0]
def getFamily(self, individial_id):
res = self.ped['fam_id'][(self.ped['ind_id'] == individial_id)]
if len(res.index) == 0: return None
assert len(res) == 1
return res.iloc[0]
def getFamilyVCF(self, family_id):
res = self.ped['vcf'][(self.ped['fam_id'] == family_id)]
res = res.unique()
if res.size == 0: return None
return res[0]
def getFamilyBam(self, family_id):
res = self.ped['bam'][(self.ped['fam_id'] == family_id)]
res = res.unique()
if len(res.index) == 0: return None
assert len(res) == 1
return res[0]
def getAllProbands(self):
res = self.ped['ind_id'][self.ped['pheno'] == 2]
res = res.tolist()
if not res: return None
return res
def getAllTrios(self):
fam = self.ped['fam_id'].unique()
res = [x for x in fam if len(self.getAllMembers(x)) == 3]
return res
def getAllQuads(self):
fam = self.ped['fam_id'].unique()
res = [x for x in fam if len(self.getAllMembers(x)) == 4]
if not res: return None
return res
def isTrio(self, family_id):
res = len(self.ped['fam_id'][(self.ped['fam_id'] == family_id)]) == 3
return res
def isQuad(self, family_id):
res = len(self.ped['fam_id'][(self.ped['fam_id'] == family_id)]) == 4
return res
if __name__ == '__main__':
infile = '/mnt/scratch/asalomatov/data/SSCped/SSC.ped'
myped=Ped(infile, ['collection'])
myped.addVcfSSC()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import unittest
from .. import util
class UtilTest(unittest.TestCase):
def test_is_valid_sha1(self):
def is_valid(sha1: str):
return util.is_valid_sha1(sha1)
self.assertTrue(is_valid("0123456789abcabcabcd0123456789abcabcabcd"))
self.assertTrue(is_valid("0" * 40))
self.assertFalse(is_valid("0123456789abcabcabcd0123456789abcabcabc"))
self.assertFalse(is_valid("z123456789abcabcabcd0123456789abcabcabcd"))
self.assertFalse(is_valid(None))
self.assertFalse(is_valid(""))
self.assertFalse(is_valid("abc"))
self.assertFalse(is_valid("z" * 40))
|
nilq/baby-python
|
python
|
"""Main code for training. Probably needs refactoring."""
import os
from glob import glob
import dgl
import pandas as pd
import pytorch_lightning as pl
import sastvd as svd
import sastvd.codebert as cb
import sastvd.helpers.dclass as svddc
import sastvd.helpers.doc2vec as svdd2v
import sastvd.helpers.glove as svdg
import sastvd.helpers.joern as svdj
import sastvd.helpers.losses as svdloss
import sastvd.helpers.ml as ml
import sastvd.helpers.rank_eval as svdr
import sastvd.helpers.sast as sast
import sastvd.ivdetect.evaluate as ivde
import sastvd.linevd.gnnexplainer as lvdgne
import torch as th
import torch.nn.functional as F
import torchmetrics
from dgl.data.utils import load_graphs, save_graphs
from dgl.dataloading import GraphDataLoader
from dgl.nn.pytorch import GATConv, GraphConv
from sklearn.metrics import PrecisionRecallDisplay, precision_recall_curve
from tqdm import tqdm
def ne_groupnodes(n, e):
"""Group nodes with same line number."""
nl = n[n.lineNumber != ""].copy()
nl.lineNumber = nl.lineNumber.astype(int)
nl = nl.sort_values(by="code", key=lambda x: x.str.len(), ascending=False)
nl = nl.groupby("lineNumber").head(1)
el = e.copy()
el.innode = el.line_in
el.outnode = el.line_out
nl.id = nl.lineNumber
nl = svdj.drop_lone_nodes(nl, el)
el = el.drop_duplicates(subset=["innode", "outnode", "etype"])
el = el[el.innode.apply(lambda x: isinstance(x, float))]
el = el[el.outnode.apply(lambda x: isinstance(x, float))]
el.innode = el.innode.astype(int)
el.outnode = el.outnode.astype(int)
return nl, el
def feature_extraction(_id, graph_type="cfgcdg", return_nodes=False):
"""Extract graph feature (basic).
_id = svddc.BigVulDataset.itempath(177775)
_id = svddc.BigVulDataset.itempath(180189)
_id = svddc.BigVulDataset.itempath(178958)
return_nodes arg is used to get the node information (for empirical evaluation).
"""
# Get CPG
n, e = svdj.get_node_edges(_id)
n, e = ne_groupnodes(n, e)
# Return node metadata
if return_nodes:
return n
# Filter nodes
e = svdj.rdg(e, graph_type.split("+")[0])
n = svdj.drop_lone_nodes(n, e)
# Plot graph
# svdj.plot_graph_node_edge_df(n, e)
# Map line numbers to indexing
n = n.reset_index(drop=True).reset_index()
iddict = pd.Series(n.index.values, index=n.id).to_dict()
e.innode = e.innode.map(iddict)
e.outnode = e.outnode.map(iddict)
# Map edge types
etypes = e.etype.tolist()
d = dict([(y, x) for x, y in enumerate(sorted(set(etypes)))])
etypes = [d[i] for i in etypes]
# Append function name to code
if "+raw" not in graph_type:
try:
func_name = n[n.lineNumber == 1].name.item()
except:
print(_id)
func_name = ""
n.code = func_name + " " + n.name + " " + "</s>" + " " + n.code
else:
n.code = "</s>" + " " + n.code
# Return plain-text code, line number list, innodes, outnodes
return n.code.tolist(), n.id.tolist(), e.innode.tolist(), e.outnode.tolist(), etypes
# %%
class BigVulDatasetLineVD(svddc.BigVulDataset):
"""IVDetect version of BigVul."""
def __init__(self, gtype="pdg", feat="all", **kwargs):
"""Init."""
super(BigVulDatasetLineVD, self).__init__(**kwargs)
lines = ivde.get_dep_add_lines_bigvul()
lines = {k: set(list(v["removed"]) + v["depadd"]) for k, v in lines.items()}
self.lines = lines
self.graph_type = gtype
glove_path = svd.processed_dir() / "bigvul/glove_False/vectors.txt"
self.glove_dict, _ = svdg.glove_dict(glove_path)
self.d2v = svdd2v.D2V(svd.processed_dir() / "bigvul/d2v_False")
self.feat = feat
def item(self, _id, codebert=None):
"""Cache item."""
savedir = svd.get_dir(
svd.cache_dir() / f"bigvul_linevd_codebert_{self.graph_type}"
) / str(_id)
if os.path.exists(savedir):
g = load_graphs(str(savedir))[0][0]
# g.ndata["_FVULN"] = g.ndata["_VULN"].max().repeat((g.number_of_nodes()))
# if "_SASTRATS" in g.ndata:
# g.ndata.pop("_SASTRATS")
# g.ndata.pop("_SASTCPP")
# g.ndata.pop("_SASTFF")
# g.ndata.pop("_GLOVE")
# g.ndata.pop("_DOC2VEC")
if "_CODEBERT" in g.ndata:
if self.feat == "codebert":
for i in ["_GLOVE", "_DOC2VEC", "_RANDFEAT"]:
g.ndata.pop(i, None)
if self.feat == "glove":
for i in ["_CODEBERT", "_DOC2VEC", "_RANDFEAT"]:
g.ndata.pop(i, None)
if self.feat == "doc2vec":
for i in ["_CODEBERT", "_GLOVE", "_RANDFEAT"]:
g.ndata.pop(i, None)
return g
code, lineno, ei, eo, et = feature_extraction(
svddc.BigVulDataset.itempath(_id), self.graph_type
)
if _id in self.lines:
vuln = [1 if i in self.lines[_id] else 0 for i in lineno]
else:
vuln = [0 for _ in lineno]
g = dgl.graph((eo, ei))
gembeds = th.Tensor(svdg.get_embeddings_list(code, self.glove_dict, 200))
g.ndata["_GLOVE"] = gembeds
g.ndata["_DOC2VEC"] = th.Tensor([self.d2v.infer(i) for i in code])
if codebert:
code = [c.replace("\\t", "").replace("\\n", "") for c in code]
chunked_batches = svd.chunks(code, 128)
features = [codebert.encode(c).detach().cpu() for c in chunked_batches]
g.ndata["_CODEBERT"] = th.cat(features)
g.ndata["_RANDFEAT"] = th.rand(size=(g.number_of_nodes(), 100))
g.ndata["_LINE"] = th.Tensor(lineno).int()
g.ndata["_VULN"] = th.Tensor(vuln).float()
# Get SAST labels
s = sast.get_sast_lines(svd.processed_dir() / f"bigvul/before/{_id}.c.sast.pkl")
rats = [1 if i in s["rats"] else 0 for i in g.ndata["_LINE"]]
cppcheck = [1 if i in s["cppcheck"] else 0 for i in g.ndata["_LINE"]]
flawfinder = [1 if i in s["flawfinder"] else 0 for i in g.ndata["_LINE"]]
g.ndata["_SASTRATS"] = th.tensor(rats).long()
g.ndata["_SASTCPP"] = th.tensor(cppcheck).long()
g.ndata["_SASTFF"] = th.tensor(flawfinder).long()
g.ndata["_FVULN"] = g.ndata["_VULN"].max().repeat((g.number_of_nodes()))
g.edata["_ETYPE"] = th.Tensor(et).long()
emb_path = svd.cache_dir() / f"codebert_method_level/{_id}.pt"
g.ndata["_FUNC_EMB"] = th.load(emb_path).repeat((g.number_of_nodes(), 1))
g = dgl.add_self_loop(g)
save_graphs(str(savedir), [g])
return g
def cache_items(self, codebert):
"""Cache all items."""
for i in tqdm(self.df.sample(len(self.df)).id.tolist()):
try:
self.item(i, codebert)
except Exception as E:
print(E)
def cache_codebert_method_level(self, codebert):
"""Cache method-level embeddings using Codebert.
ONLY NEEDS TO BE RUN ONCE.
"""
savedir = svd.get_dir(svd.cache_dir() / "codebert_method_level")
done = [int(i.split("/")[-1].split(".")[0]) for i in glob(str(savedir / "*"))]
done = set(done)
batches = svd.chunks((range(len(self.df))), 128)
for idx_batch in tqdm(batches):
batch_texts = self.df.iloc[idx_batch[0] : idx_batch[-1] + 1].before.tolist()
batch_ids = self.df.iloc[idx_batch[0] : idx_batch[-1] + 1].id.tolist()
if set(batch_ids).issubset(done):
continue
texts = ["</s> " + ct for ct in batch_texts]
embedded = codebert.encode(texts).detach().cpu()
assert len(batch_texts) == len(batch_ids)
for i in range(len(batch_texts)):
th.save(embedded[i], savedir / f"{batch_ids[i]}.pt")
def __getitem__(self, idx):
"""Override getitem."""
return self.item(self.idx2id[idx])
class BigVulDatasetLineVDDataModule(pl.LightningDataModule):
"""Pytorch Lightning Datamodule for Bigvul."""
def __init__(
self,
batch_size: int = 32,
sample: int = -1,
methodlevel: bool = False,
nsampling: bool = False,
nsampling_hops: int = 1,
gtype: str = "cfgcdg",
splits: str = "default",
feat: str = "all",
):
"""Init class from bigvul dataset."""
super().__init__()
dataargs = {"sample": sample, "gtype": gtype, "splits": splits, "feat": feat}
self.train = BigVulDatasetLineVD(partition="train", **dataargs)
self.val = BigVulDatasetLineVD(partition="val", **dataargs)
self.test = BigVulDatasetLineVD(partition="test", **dataargs)
codebert = cb.CodeBert()
self.train.cache_codebert_method_level(codebert)
self.val.cache_codebert_method_level(codebert)
self.test.cache_codebert_method_level(codebert)
self.train.cache_items(codebert)
self.val.cache_items(codebert)
self.test.cache_items(codebert)
self.batch_size = batch_size
self.nsampling = nsampling
self.nsampling_hops = nsampling_hops
def node_dl(self, g, shuffle=False):
"""Return node dataloader."""
sampler = dgl.dataloading.MultiLayerFullNeighborSampler(self.nsampling_hops)
return dgl.dataloading.NodeDataLoader(
g,
g.nodes(),
sampler,
batch_size=self.batch_size,
shuffle=shuffle,
drop_last=False,
num_workers=1,
)
def train_dataloader(self):
"""Return train dataloader."""
if self.nsampling:
g = next(iter(GraphDataLoader(self.train, batch_size=len(self.train))))
return self.node_dl(g, shuffle=True)
return GraphDataLoader(self.train, shuffle=True, batch_size=self.batch_size)
def val_dataloader(self):
"""Return val dataloader."""
if self.nsampling:
g = next(iter(GraphDataLoader(self.val, batch_size=len(self.val))))
return self.node_dl(g)
return GraphDataLoader(self.val, batch_size=self.batch_size)
def val_graph_dataloader(self):
"""Return test dataloader."""
return GraphDataLoader(self.val, batch_size=32)
def test_dataloader(self):
"""Return test dataloader."""
return GraphDataLoader(self.test, batch_size=32)
# %%
class LitGNN(pl.LightningModule):
"""Main Trainer."""
def __init__(
self,
hfeat: int = 512,
embtype: str = "codebert",
embfeat: int = -1, # Keep for legacy purposes
num_heads: int = 4,
lr: float = 1e-3,
hdropout: float = 0.2,
mlpdropout: float = 0.2,
gatdropout: float = 0.2,
methodlevel: bool = False,
nsampling: bool = False,
model: str = "gat2layer",
loss: str = "ce",
multitask: str = "linemethod",
stmtweight: int = 5,
gnntype: str = "gat",
random: bool = False,
scea: float = 0.7,
):
"""Initilisation."""
super().__init__()
self.lr = lr
self.random = random
self.save_hyperparameters()
# Set params based on embedding type
if self.hparams.embtype == "codebert":
self.hparams.embfeat = 768
self.EMBED = "_CODEBERT"
if self.hparams.embtype == "glove":
self.hparams.embfeat = 200
self.EMBED = "_GLOVE"
if self.hparams.embtype == "doc2vec":
self.hparams.embfeat = 300
self.EMBED = "_DOC2VEC"
# Loss
if self.hparams.loss == "sce":
self.loss = svdloss.SCELoss(self.hparams.scea, 1 - self.hparams.scea)
self.loss_f = th.nn.CrossEntropyLoss()
else:
self.loss = th.nn.CrossEntropyLoss(
weight=th.Tensor([1, self.hparams.stmtweight]).cuda()
)
self.loss_f = th.nn.CrossEntropyLoss()
# Metrics
self.accuracy = torchmetrics.Accuracy()
self.auroc = torchmetrics.AUROC(compute_on_step=False)
self.mcc = torchmetrics.MatthewsCorrcoef(2)
# GraphConv Type
hfeat = self.hparams.hfeat
gatdrop = self.hparams.gatdropout
numheads = self.hparams.num_heads
embfeat = self.hparams.embfeat
gnn_args = {"out_feats": hfeat}
if self.hparams.gnntype == "gat":
gnn = GATConv
gat_args = {"num_heads": numheads, "feat_drop": gatdrop}
gnn1_args = {**gnn_args, **gat_args, "in_feats": embfeat}
gnn2_args = {**gnn_args, **gat_args, "in_feats": hfeat * numheads}
elif self.hparams.gnntype == "gcn":
gnn = GraphConv
gnn1_args = {"in_feats": embfeat, **gnn_args}
gnn2_args = {"in_feats": hfeat, **gnn_args}
# model: gat2layer
if "gat" in self.hparams.model:
self.gat = gnn(**gnn1_args)
self.gat2 = gnn(**gnn2_args)
fcin = hfeat * numheads if self.hparams.gnntype == "gat" else hfeat
self.fc = th.nn.Linear(fcin, self.hparams.hfeat)
self.fconly = th.nn.Linear(embfeat, self.hparams.hfeat)
self.mlpdropout = th.nn.Dropout(self.hparams.mlpdropout)
# model: mlp-only
if "mlponly" in self.hparams.model:
self.fconly = th.nn.Linear(embfeat, self.hparams.hfeat)
self.mlpdropout = th.nn.Dropout(self.hparams.mlpdropout)
# model: contains femb
if "+femb" in self.hparams.model:
self.fc_femb = th.nn.Linear(embfeat * 2, self.hparams.hfeat)
# self.resrgat = ResRGAT(hdim=768, rdim=1, numlayers=1, dropout=0)
# self.gcn = GraphConv(embfeat, hfeat)
# self.gcn2 = GraphConv(hfeat, hfeat)
# Transform codebert embedding
self.codebertfc = th.nn.Linear(768, self.hparams.hfeat)
# Hidden Layers
self.fch = []
for _ in range(8):
self.fch.append(th.nn.Linear(self.hparams.hfeat, self.hparams.hfeat))
self.hidden = th.nn.ModuleList(self.fch)
self.hdropout = th.nn.Dropout(self.hparams.hdropout)
self.fc2 = th.nn.Linear(self.hparams.hfeat, 2)
def forward(self, g, test=False, e_weights=[], feat_override=""):
"""Forward pass.
data = BigVulDatasetLineVDDataModule(batch_size=1, sample=2, nsampling=True)
g = next(iter(data.train_dataloader()))
e_weights and h_override are just used for GNNExplainer.
"""
if self.hparams.nsampling and not test:
hdst = g[2][-1].dstdata[self.EMBED]
h_func = g[2][-1].dstdata["_FUNC_EMB"]
g2 = g[2][1]
g = g[2][0]
if "gat2layer" in self.hparams.model:
h = g.srcdata[self.EMBED]
elif "gat1layer" in self.hparams.model:
h = g2.srcdata[self.EMBED]
else:
g2 = g
h = g.ndata[self.EMBED]
if len(feat_override) > 0:
h = g.ndata[feat_override]
h_func = g.ndata["_FUNC_EMB"]
hdst = h
if self.random:
return th.rand((h.shape[0], 2)).to(self.device), th.rand(
h_func.shape[0], 2
).to(self.device)
# model: contains femb
if "+femb" in self.hparams.model:
h = th.cat([h, h_func], dim=1)
h = F.elu(self.fc_femb(h))
# Transform h_func if wrong size
if self.hparams.embfeat != 768:
h_func = self.codebertfc(h_func)
# model: gat2layer
if "gat" in self.hparams.model:
if "gat2layer" in self.hparams.model:
h = self.gat(g, h)
if self.hparams.gnntype == "gat":
h = h.view(-1, h.size(1) * h.size(2))
h = self.gat2(g2, h)
if self.hparams.gnntype == "gat":
h = h.view(-1, h.size(1) * h.size(2))
elif "gat1layer" in self.hparams.model:
h = self.gat(g2, h)
if self.hparams.gnntype == "gat":
h = h.view(-1, h.size(1) * h.size(2))
h = self.mlpdropout(F.elu(self.fc(h)))
h_func = self.mlpdropout(F.elu(self.fconly(h_func)))
# Edge masking (for GNNExplainer)
if test and len(e_weights) > 0:
g.ndata["h"] = h
g.edata["ew"] = e_weights
g.update_all(
dgl.function.u_mul_e("h", "ew", "m"), dgl.function.mean("m", "h")
)
h = g.ndata["h"]
# model: mlp-only
if "mlponly" in self.hparams.model:
h = self.mlpdropout(F.elu(self.fconly(hdst)))
h_func = self.mlpdropout(F.elu(self.fconly(h_func)))
# Hidden layers
for idx, hlayer in enumerate(self.hidden):
h = self.hdropout(F.elu(hlayer(h)))
h_func = self.hdropout(F.elu(hlayer(h_func)))
h = self.fc2(h)
h_func = self.fc2(
h_func
) # Share weights between method-level and statement-level tasks
if self.hparams.methodlevel:
g.ndata["h"] = h
return dgl.mean_nodes(g, "h"), None
else:
return h, h_func # Return two values for multitask training
def shared_step(self, batch, test=False):
"""Shared step."""
logits = self(batch, test)
if self.hparams.methodlevel:
if self.hparams.nsampling:
raise ValueError("Cannot train on method level with nsampling.")
labels = dgl.max_nodes(batch, "_VULN").long()
labels_func = None
else:
if self.hparams.nsampling and not test:
labels = batch[2][-1].dstdata["_VULN"].long()
labels_func = batch[2][-1].dstdata["_FVULN"].long()
else:
labels = batch.ndata["_VULN"].long()
labels_func = batch.ndata["_FVULN"].long()
return logits, labels, labels_func
def training_step(self, batch, batch_idx):
"""Training step."""
logits, labels, labels_func = self.shared_step(
batch
) # Labels func should be the method-level label for statements
# print(logits.argmax(1), labels_func)
loss1 = self.loss(logits[0], labels)
if not self.hparams.methodlevel:
loss2 = self.loss_f(logits[1], labels_func)
# Need some way of combining the losses for multitask training
loss = 0
if "line" in self.hparams.multitask:
loss1 = self.loss(logits[0], labels)
loss += loss1
if "method" in self.hparams.multitask and not self.hparams.methodlevel:
loss2 = self.loss(logits[1], labels_func)
loss += loss2
logits = logits[1] if self.hparams.multitask == "method" else logits[0]
pred = F.softmax(logits, dim=1)
acc = self.accuracy(pred.argmax(1), labels)
if not self.hparams.methodlevel:
acc_func = self.accuracy(logits.argmax(1), labels_func)
mcc = self.mcc(pred.argmax(1), labels)
# print(pred.argmax(1), labels)
self.log("train_loss", loss, on_epoch=True, prog_bar=True, logger=True)
self.log("train_acc", acc, prog_bar=True, logger=True)
if not self.hparams.methodlevel:
self.log("train_acc_func", acc_func, prog_bar=True, logger=True)
self.log("train_mcc", mcc, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
"""Validate step."""
logits, labels, labels_func = self.shared_step(batch)
loss = 0
if "line" in self.hparams.multitask:
loss1 = self.loss(logits[0], labels)
loss += loss1
if "method" in self.hparams.multitask:
loss2 = self.loss_f(logits[1], labels_func)
loss += loss2
logits = logits[1] if self.hparams.multitask == "method" else logits[0]
pred = F.softmax(logits, dim=1)
acc = self.accuracy(pred.argmax(1), labels)
mcc = self.mcc(pred.argmax(1), labels)
self.log("val_loss", loss, on_step=True, prog_bar=True, logger=True)
self.auroc.update(logits[:, 1], labels)
self.log("val_auroc", self.auroc, prog_bar=True, logger=True)
self.log("val_acc", acc, prog_bar=True, logger=True)
self.log("val_mcc", mcc, prog_bar=True, logger=True)
return loss
def test_step(self, batch, batch_idx):
"""Test step."""
logits, labels, _ = self.shared_step(
batch, True
) # TODO: Make work for multitask
if self.hparams.methodlevel:
labels_f = labels
return logits[0], labels_f, dgl.unbatch(batch)
batch.ndata["pred"] = F.softmax(logits[0], dim=1)
batch.ndata["pred_func"] = F.softmax(logits[1], dim=1)
logits_f = []
labels_f = []
preds = []
for i in dgl.unbatch(batch):
preds.append(
[
list(i.ndata["pred"].detach().cpu().numpy()),
list(i.ndata["_VULN"].detach().cpu().numpy()),
i.ndata["pred_func"].argmax(1).detach().cpu(),
list(i.ndata["_LINE"].detach().cpu().numpy()),
]
)
logits_f.append(dgl.mean_nodes(i, "pred_func").detach().cpu())
labels_f.append(dgl.mean_nodes(i, "_FVULN").detach().cpu())
return [logits[0], logits_f], [labels, labels_f], preds
def test_epoch_end(self, outputs):
"""Calculate metrics for whole test set."""
all_pred = th.empty((0, 2)).long().cuda()
all_true = th.empty((0)).long().cuda()
all_pred_f = []
all_true_f = []
all_funcs = []
from importlib import reload
reload(lvdgne)
reload(ml)
if self.hparams.methodlevel:
for out in outputs:
all_pred_f += out[0]
all_true_f += out[1]
for idx, g in enumerate(out[2]):
all_true = th.cat([all_true, g.ndata["_VULN"]])
gnnelogits = th.zeros((g.number_of_nodes(), 2), device="cuda")
gnnelogits[:, 0] = 1
if out[1][idx] == 1:
zeros = th.zeros(g.number_of_nodes(), device="cuda")
importance = th.ones(g.number_of_nodes(), device="cuda")
try:
if out[1][idx] == 1:
importance = lvdgne.get_node_importances(self, g)
importance = importance.unsqueeze(1)
gnnelogits = th.cat([zeros.unsqueeze(1), importance], dim=1)
except Exception as E:
print(E)
pass
all_pred = th.cat([all_pred, gnnelogits])
func_pred = out[0][idx].argmax().repeat(g.number_of_nodes())
all_funcs.append(
[
gnnelogits.detach().cpu().numpy(),
g.ndata["_VULN"].detach().cpu().numpy(),
func_pred.detach().cpu(),
]
)
all_true = all_true.long()
else:
for out in outputs:
all_pred = th.cat([all_pred, out[0][0]])
all_true = th.cat([all_true, out[1][0]])
all_pred_f += out[0][1]
all_true_f += out[1][1]
all_funcs += out[2]
all_pred = F.softmax(all_pred, dim=1)
all_pred_f = F.softmax(th.stack(all_pred_f).squeeze(), dim=1)
all_true_f = th.stack(all_true_f).squeeze().long()
self.all_funcs = all_funcs
self.all_true = all_true
self.all_pred = all_pred
self.all_pred_f = all_pred_f
self.all_true_f = all_true_f
# Custom ranked accuracy (inc negatives)
self.res1 = ivde.eval_statements_list(all_funcs)
# Custom ranked accuracy (only positives)
self.res1vo = ivde.eval_statements_list(all_funcs, vo=True, thresh=0)
# Regular metrics
multitask_pred = []
multitask_true = []
for af in all_funcs:
line_pred = list(zip(af[0], af[2]))
multitask_pred += [list(i[0]) if i[1] == 1 else [1, 0] for i in line_pred]
multitask_true += list(af[1])
self.linevd_pred = multitask_pred
self.linevd_true = multitask_true
multitask_true = th.LongTensor(multitask_true)
multitask_pred = th.Tensor(multitask_pred)
self.f1thresh = ml.best_f1(multitask_true, [i[1] for i in multitask_pred])
self.res2mt = ml.get_metrics_logits(multitask_true, multitask_pred)
self.res2 = ml.get_metrics_logits(all_true, all_pred)
self.res2f = ml.get_metrics_logits(all_true_f, all_pred_f)
# Ranked metrics
rank_metrs = []
rank_metrs_vo = []
for af in all_funcs:
rank_metr_calc = svdr.rank_metr([i[1] for i in af[0]], af[1], 0)
if max(af[1]) > 0:
rank_metrs_vo.append(rank_metr_calc)
rank_metrs.append(rank_metr_calc)
try:
self.res3 = ml.dict_mean(rank_metrs)
except Exception as E:
print(E)
pass
self.res3vo = ml.dict_mean(rank_metrs_vo)
# Method level prediction from statement level
method_level_pred = []
method_level_true = []
for af in all_funcs:
method_level_true.append(1 if sum(af[1]) > 0 else 0)
pred_method = 0
for logit in af[0]:
if logit[1] > 0.5:
pred_method = 1
break
method_level_pred.append(pred_method)
self.res4 = ml.get_metrics(method_level_true, method_level_pred)
return
def plot_pr_curve(self):
"""Plot Precision-Recall Curve for Positive Class (after test)."""
precision, recall, thresholds = precision_recall_curve(
self.linevd_true, [i[1] for i in self.linevd_pred]
)
disp = PrecisionRecallDisplay(precision, recall)
disp.plot()
return
def configure_optimizers(self):
"""Configure optimizer."""
return th.optim.AdamW(self.parameters(), lr=self.lr)
def get_relevant_metrics(trial_result):
"""Get relevant metrics from results."""
ret = {}
ret["trial_id"] = trial_result[0]
ret["checkpoint"] = trial_result[1]
ret["acc@5"] = trial_result[2][5]
ret["stmt_f1"] = trial_result[3]["f1"]
ret["stmt_rec"] = trial_result[3]["rec"]
ret["stmt_prec"] = trial_result[3]["prec"]
ret["stmt_mcc"] = trial_result[3]["mcc"]
ret["stmt_fpr"] = trial_result[3]["fpr"]
ret["stmt_fnr"] = trial_result[3]["fnr"]
ret["stmt_rocauc"] = trial_result[3]["roc_auc"]
ret["stmt_prauc"] = trial_result[3]["pr_auc"]
ret["stmt_prauc_pos"] = trial_result[3]["pr_auc_pos"]
ret["func_f1"] = trial_result[4]["f1"]
ret["func_rec"] = trial_result[4]["rec"]
ret["func_prec"] = trial_result[4]["prec"]
ret["func_mcc"] = trial_result[4]["mcc"]
ret["func_fpr"] = trial_result[4]["fpr"]
ret["func_fnr"] = trial_result[4]["fnr"]
ret["func_rocauc"] = trial_result[4]["roc_auc"]
ret["func_prauc"] = trial_result[4]["pr_auc"]
ret["MAP@5"] = trial_result[5]["MAP@5"]
ret["nDCG@5"] = trial_result[5]["nDCG@5"]
ret["MFR"] = trial_result[5]["MFR"]
ret["MAR"] = trial_result[5]["MAR"]
ret["stmtline_f1"] = trial_result[6]["f1"]
ret["stmtline_rec"] = trial_result[6]["rec"]
ret["stmtline_prec"] = trial_result[6]["prec"]
ret["stmtline_mcc"] = trial_result[6]["mcc"]
ret["stmtline_fpr"] = trial_result[6]["fpr"]
ret["stmtline_fnr"] = trial_result[6]["fnr"]
ret["stmtline_rocauc"] = trial_result[6]["roc_auc"]
ret["stmtline_prauc"] = trial_result[6]["pr_auc"]
ret["stmtline_prauc_pos"] = trial_result[6]["pr_auc_pos"]
ret = {k: round(v, 3) if isinstance(v, float) else v for k, v in ret.items()}
ret["learning_rate"] = trial_result[7]
ret["stmt_loss"] = trial_result[3]["loss"]
ret["func_loss"] = trial_result[4]["loss"]
ret["stmtline_loss"] = trial_result[6]["loss"]
return ret
|
nilq/baby-python
|
python
|
# Generated by Django 4.0 on 2021-12-17 12:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('src', '0012_alter_articlecategory_options_article_slug_and_more'),
('src', '0013_alter_product_description_alter_product_name'),
]
operations = [
]
|
nilq/baby-python
|
python
|
from hallo.events import EventInvite
from hallo.function import Function
import hallo.modules.channel_control.channel_control
from hallo.server import Server
class Invite(Function):
"""
IRC only, invites users to a given channel.
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "invite"
# Names which can be used to address the function
self.names = {"invite"}
# Help documentation, if it's just a single line, can be set here
self.help_docs = "Invite someone to a channel"
def run(self, event):
# Get server object
server_obj = event.server
# If server isn't IRC type, we can't invite people
if server_obj.type != Server.TYPE_IRC:
return event.create_response(
"Error, this function is only available for IRC servers."
)
# If 0 arguments, ask for clarification
line_split = event.command_args.split()
if len(line_split) == 0:
return event.create_response(
"Error, please specify a user to invite and/or a channel to invite to."
)
# If 1 argument, see if it's a channel or a user.
if len(line_split) == 1:
# If message was sent in private message, it's referring to a channel
if event.channel is None:
channel = server_obj.get_channel_by_name(event.command_args)
if channel is None:
return event.create_response(
"Error, {} is not known on {}.".format(
event.command_args, server_obj.name
)
)
return event.create_response(self.send_invite(channel, event.user))
# See if it's a channel that hallo is in
test_channel = server_obj.get_channel_by_name(event.command_args)
if test_channel is not None and test_channel.in_channel:
return event.create_response(self.send_invite(test_channel, event.user))
# Argument must be a user?
target_user = server_obj.get_user_by_name(event.command_args)
if target_user is None:
return event.create_response(
"Error, {} is not known on {}.".format(
event.command_args, server_obj.name
)
)
return event.create_response(self.send_invite(event.channel, target_user))
# If 2 arguments, try with first argument as channel
target_channel = server_obj.get_channel_by_name(line_split[0])
if target_channel is not None and target_channel.in_channel:
target_user = server_obj.get_user_by_name(line_split[1])
if target_user is None:
return event.create_response(
"Error, {} is not known on {}.".format(
line_split[1], server_obj.name
)
)
return event.create_response(self.send_invite(target_channel, target_user))
# 2 args, try with second argument as channel
target_user = server_obj.get_user_by_name(line_split[0])
if target_user is None:
return event.create_response(
"Error, {} is not known on {}.".format(line_split[0], server_obj.name)
)
target_channel = server_obj.get_channel_by_name(line_split[1])
if target_channel is None:
return event.create_response(
"Error, {} is not known on {}.".format(line_split[1], server_obj.name)
)
return event.create_response(self.send_invite(target_channel, target_user))
def send_invite(self, channel, user):
"""
Sends an invite to a specified user to join a given channel.
:param channel: Channel to invite target to
:type channel: destination.Channel
:param user: User to invite to channel
:type user: destination.User
:return: Response to send to requester
:rtype: str
"""
# Check if in channel
if not channel.in_channel:
return "Error, I'm not in that channel."
# Check if user is in channel
if user in channel.get_user_list():
return "Error, {} is already in {}".format(user.name, channel.name)
# Check if hallo has op in channel
if not hallo.modules.channel_control.channel_control.hallo_has_op(channel):
return "Error, I don't have power to invite users in {}.".format(
channel.name
)
# Send invite
invite_evt = EventInvite(channel.server, channel, None, user, inbound=False)
channel.server.send(invite_evt)
return "Invite sent."
|
nilq/baby-python
|
python
|
from die import Die
import pygal
die_1 = Die()
die_2 = Die()
results = []
for roll_num in range(1000):
result = die_1.roll() + die_2.roll()
results.append(result)
#分析结果
frequencies = []
max_result = die_1.num_sides + die_2.num_sides
for value in range(2,max_result+1):
#results.count()查每个值出现的次数
frequency = results.count(value)
frequencies.append(frequency)
#可视化结果
hist = pygal.Bar()
hist.title = "Result of rolling one D6 1000 times"
hist.x_labels = [2,3,4,5,6,7,8,9,10,11,12]
hist.x_title = "Result"
hist.y_title = "Frequency of Result"
hist.add('D6 + D6',frequencies)
hist.render_to_file('die_visual.svg')
|
nilq/baby-python
|
python
|
from .index import index
from .village import village
from .voice import voice
from .confirm_voice import confirm_voice
from .selectstyle import selectstyle
|
nilq/baby-python
|
python
|
try:
from .secrets import *
except ImportError:
import sys
sys.exit('secrets.py settings file not found. Please run `prepare.sh` to create one.')
from .server import *
#
# Put production server environment specific overrides below.
#
COWRY_RETURN_URL_BASE = 'https://onepercentclub.com'
COWRY_LIVE_PAYMENTS = True
# Send email for real
EMAIL_BACKEND = 'bluebottle.utils.email_backend.DKIMBackend'
SESSION_COOKIE_DOMAIN = '.onepercentclub.com'
ANALYTICS_CODE = 'UA-2761714-4'
PRODUCTION = True
DOCDATA_SETTINGS = {
'profile': 'webmenu',
'days_to_pay': 5,
'testing_mode': False,
}
AFOM_ENABLED = True
|
nilq/baby-python
|
python
|
from django.db import models
import addons.myminio.settings as settings
from addons.base import exceptions
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from addons.myminio import SHORT_NAME, FULL_NAME
from addons.myminio.provider import MyMinIOProvider
from addons.myminio.serializer import MyMinIOSerializer
from addons.myminio.utils import bucket_exists, get_bucket_names
from framework.auth.core import Auth
from osf.models.files import File, Folder, BaseFileNode
class MyMinIOFileNode(BaseFileNode):
_provider = SHORT_NAME
class MyMinIOFolder(MyMinIOFileNode, Folder):
pass
class MyMinIOFile(MyMinIOFileNode, File):
version_identifier = 'version'
class UserSettings(BaseOAuthUserSettings):
oauth_provider = MyMinIOProvider
serializer = MyMinIOSerializer
class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon):
oauth_provider = MyMinIOProvider
serializer = MyMinIOSerializer
folder_id = models.TextField(blank=True, null=True)
folder_name = models.TextField(blank=True, null=True)
folder_location = models.TextField(blank=True, null=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE)
@property
def folder_path(self):
return self.folder_name
@property
def display_name(self):
return u'{0}: {1}'.format(self.config.full_name, self.folder_id)
def set_folder(self, folder_id, auth):
host = settings.HOST
if not bucket_exists(host,
self.external_account.oauth_key,
self.external_account.oauth_secret, folder_id):
error_message = ('We are having trouble connecting to that bucket. '
'Try a different one.')
raise exceptions.InvalidFolderError(error_message)
self.folder_id = str(folder_id)
self.folder_name = folder_id
self.save()
self.nodelogger.log(action='bucket_linked', extra={'bucket': str(folder_id)}, save=True)
def get_folders(self, **kwargs):
# This really gets only buckets, not subfolders,
# as that's all we want to be linkable on a node.
try:
buckets = get_bucket_names(self)
except Exception:
raise exceptions.InvalidAuthError()
return [
{
'addon': SHORT_NAME,
'kind': 'folder',
'id': bucket,
'name': bucket,
'path': bucket,
'urls': {
'folders': ''
}
}
for bucket in buckets
]
@property
def complete(self):
return self.has_auth and self.folder_id is not None
def authorize(self, user_settings, save=False):
self.user_settings = user_settings
self.nodelogger.log(action='node_authorized', save=save)
def clear_settings(self):
self.folder_id = None
self.folder_name = None
self.folder_location = None
def deauthorize(self, auth=None, log=True):
"""Remove user authorization from this node and log the event."""
self.clear_settings()
self.clear_auth() # Also performs a save
if log:
self.nodelogger.log(action='node_deauthorized', save=True)
def delete(self, save=True):
self.deauthorize(log=False)
super(NodeSettings, self).delete(save=save)
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Cannot serialize credentials for {} addon'.format(FULL_NAME))
return {
'host': settings.HOST,
'access_key': self.external_account.oauth_key,
'secret_key': self.external_account.oauth_secret,
}
def serialize_waterbutler_settings(self):
if not self.folder_id:
raise exceptions.AddonError('Cannot serialize settings for {} addon'.format(FULL_NAME))
return {
'bucket': self.folder_id
}
def create_waterbutler_log(self, auth, action, metadata):
url = self.owner.web_url_for('addon_view_or_download_file', path=metadata['path'], provider=SHORT_NAME)
self.owner.add_log(
'{0}_{1}'.format(SHORT_NAME, action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': metadata['materialized'],
'bucket': self.folder_id,
'urls': {
'view': url,
'download': url + '?action=download'
}
},
)
def after_delete(self, user):
self.deauthorize(Auth(user=user), log=True)
|
nilq/baby-python
|
python
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
import yaml
import os
import logging
import plotly
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
if not log.handlers:
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter("%(levelname)s - %(message)s"))
log.addHandler(ch)
# %%
# Parse experiment yaml file
experiments_path = "../experiments/regression_test.yaml"
# Get experiment information from yaml file.
experiment_params = yaml.load(open(experiments_path))
regression_tests_dir = os.path.expandvars(experiment_params["regression_tests_dir"])
datasets_to_run = experiment_params["datasets_to_run"]
regression_params = experiment_params["regression_parameters"]
# %%
# Retrieve stats, if they are not there, try to collect them:
def collect_stats(
full_stats_path, regression_params, regression_tests_dir, datasets_to_run
):
# TODO(Toni): recollection of results should be automatic by looking for results.yaml files in the
# regression_tests_dir file system.
# Collect all yaml results for a given parameter name:
stats = dict()
for regression_param in regression_params:
# Redirect to param_name_value dir param_name = regression_param['name']
param_name = regression_param["name"]
stats[param_name] = dict()
for param_value in regression_param["values"]:
results_dir = os.path.join(
regression_tests_dir, param_name, str(param_value)
)
# Redirect to modified params_dir
params_dir = os.path.join(results_dir, "params")
stats[param_name][param_value] = dict()
for dataset in datasets_to_run:
dataset_name = dataset["name"]
pipelines_to_run = dataset["pipelines"]
stats[param_name][param_value][dataset_name] = dict()
for pipeline in pipelines_to_run:
results_file = os.path.join(
results_dir, dataset_name, pipeline, "results.yaml"
)
if os.path.isfile(results_file):
stats[param_name][param_value][dataset_name][
pipeline
] = yaml.load(open(results_file, "r"))
else:
log.warning(
"Could not find results file: {}. Adding cross to boxplot...".format(
results_file
)
)
stats[param_name][param_value][dataset_name][pipeline] = False
# Save all stats in regression tests root directory for future usage.
with open(full_stats_path, "w") as outfile:
outfile.write(yaml.dump(stats))
return stats
full_stats_path = os.path.join(regression_tests_dir, "all_stats.yaml")
stats = dict()
if os.path.isfile(full_stats_path):
log.info("Found existent stats. Opening full stats from:" + full_stats_path)
stats = yaml.load(open(full_stats_path))
else:
log.info("Collecting full stats.")
stats = collect_stats(
full_stats_path, regression_params, regression_tests_dir, datasets_to_run
)
# Push to the cloud?!
# %%
# Store stats in a tidy Pandas DataFrame # TODO(Toni): this should be done in the evaluation_lib.py script...
def listify_regression_stats(stats):
""" Makes a list of lists out of the stats (for easy conversion into pandas dataframe) """
stats_list = []
for param_name in stats:
for param_value in stats[param_name]:
for dataset_name in stats[param_name][param_value]:
for pipeline in stats[param_name][param_value][dataset_name]:
result = stats[param_name][param_value][dataset_name][pipeline]
if result != False:
result = result["absolute_errors"].np_arrays["error_array"]
stats_list.append(
[param_name, param_value, dataset_name, pipeline, result]
)
return stats_list
# Create or load Pandas DataFrame
df = pd.DataFrame()
all_stats_pickle_dir = os.path.join(regression_tests_dir, "all_stats.pkl")
if os.path.isfile(all_stats_pickle_dir):
log.info(
"Found existent pickle file. Opening pickled stats from:" + all_stats_pickle_dir
)
df = pd.read_pickle(all_stats_pickle_dir)
else:
log.info("Creating dataframe stats.")
df = pd.DataFrame.from_records(listify_regression_stats(stats))
df.columns = [
"Param Name",
"Param Value",
"Dataset Name",
"Pipe Type",
"ATE errors",
]
df.set_index(["Param Name", "Dataset Name"], inplace=True)
# Save dataframe as pickle for future use
# df.to_pickle(all_stats_pickle_dir)
# Print df
df
# %%
def regression_boxplot(param_name, dataset_name, tidy):
tidy.set_index(["Param Value", "Pipe Type"], inplace=True)
tidy_2 = (
tidy["ATE errors"]
.apply(lambda x: pd.Series(x))
.stack()
.reset_index(level=2, drop=True)
.to_frame("ATE errors")
)
tidy_2.reset_index(level=["Pipe Type", "Param Value"], drop=False, inplace=True)
fig = px.box(
tidy_2, x="Param Value", y="ATE errors", points="all", color="Pipe Type"
)
fig.update_layout(
title=go.layout.Title(text="Dataset: " + dataset_name),
xaxis=go.layout.XAxis(title=go.layout.xaxis.Title(text=param_name)),
yaxis=go.layout.YAxis(
title=go.layout.yaxis.Title(text="ATE [m]"), rangemode="tozero"
),
template="plotly_white",
)
return fig
# %%
# Generate figures
figures = [
regression_boxplot(x, y, df.loc[x].loc[[y]])
for x in df.index.levels[0]
for y in df.index.levels[1]
]
# %%
# Show figures
for figure in figures:
figure.show()
# %%
import plotly.io as pio
pio.orca.status
plotly.io.orca.config.executable = "venv/bin/orca-server"
# %%
# Save figures
if not os.path.exists("figures"):
os.mkdir("figures")
for fig in figures:
plotly.offline.plot(
fig,
filename="figures/regression_test_"
+ fig.layout.title.text
+ "_"
+ fig.layout.xaxis.title.text
+ ".html",
)
# for figure in figures:
# figure.write_image("figures/"+ figure.layout.title.text + ".svg")
# %%
import chart_studio
import chart_studio.plotly as py
import chart_studio.tools as tls
import plotly.graph_objects as go
from chart_studio.grid_objs import Column, Grid
from datetime import datetime as dt
import numpy as np
from IPython.display import IFrame
upload_plots_online = True
if upload_plots_online:
for fig in figures:
py.iplot(
fig,
filename="regression_test_"
+ fig.layout.title.text
+ "_"
+ fig.layout.xaxis.title.text
+ ".html",
world_readable=True,
auto_open=True,
)
# %%
def url_to_iframe(url, text=True):
html = ""
# style
html += """<head>
<style>
div.textbox {
margin: 30px;
font-weight: bold;
}
</style>
</head>'
"""
# iframe
html += (
"<iframe src="
+ url
+ '.embed#{} width=750 height=400 frameBorder="0"></iframe>'
)
if text:
html += """<body>
<div class="textbox">
<p>Click on the presentation above and use left/right arrow keys to flip through the slides.</p>
</div>
</body>
"""
return html
# %%
|
nilq/baby-python
|
python
|
import os, sys
sys.path.append(os.path.join(os.environ['GGP_PATH'], 'analogy','rule_mapper'))
sys.path.append(os.path.join(os.environ['GGP_PATH'], 'analogy','test_gen'))
import gdlyacc
from GDL import *
from PositionIndex import PositionIndex
import rule_mapper2
import psyco
# constants to ignore, along with numbers
exclude = ['north','south','east','west']
def cross_product(l1, l2):
r = []
for a1 in l1:
r.extend((a1, a2) for a2 in l2)
return r
def get_all_constants(grounds):
consts = set()
for g in grounds:
poses = PositionIndex.get_all_positions(g)
for p in poses:
consts.add(p.fetch(g))
return consts
def build_c2p(int_rep, map = {}):
""" returns a map of constants to the predicates that they appear in """
c2p = {} # const -> [(pos, pred)]
for g in int_rep.get_statics() + int_rep.get_inits():
pred = g.get_predicate()
for p in PositionIndex.get_all_positions(g):
term = p.fetch(g)
if isinstance(term, Constant) and \
isinstance(term.get_name(), str) and \
term.get_name() not in exclude:
c2p.setdefault(term.get_name(), []).append((p, pred))
return c2p
def filter_matches(matches, cmap, pmap):
""" filters out ground matches that violate the commitments already
set by the current (partial) constant mapping
cmap = constant mapping
pmap = position mapping for this predicate """
good_matches = []
# is the same for all grounds, only have to calculate once
all_src_p = pmap.keys()
all_tgt_p = [pmap[p] for p in all_src_p]
pos_pairs = zip(all_src_p, all_tgt_p)
for src_g, tgt_g in matches:
valid = True
for sp, tp in pos_pairs:
sc = sp.fetch(src_g)
if sc in cmap:
tc = tp.fetch(tgt_g)
if cmap[sc] != tc:
# violates commitment
valid = False
break
if valid:
good_matches.append((src_g, tgt_g))
return good_matches
def commit_ground_match(src_g, tgt_g, cmap, pmap):
""" make constant mapping commitments based on the matching of these two grounds
cmap = constant map
pmap = position map """
for src_p in pmap:
tgt_p = pmap[src_p]
src_c = src_p.fetch(src_g)
tgt_c = tgt_p.fetch(tgt_g)
assert src_c not in cmap or cmap[src_c] == tgt_c, "Constant mapping inconsistency"
if src_c not in cmap:
cmap[src_c] = tgt_c
if __name__ == '__main__':
import psycocompile
# get the mapping
gdlyacc.parse_file(sys.argv[1])
src_int_rep = gdlyacc.int_rep.copy()
gdlyacc.parse_file(sys.argv[2])
tgt_int_rep = gdlyacc.int_rep.copy()
psyco.full()
best_map = rule_mapper2.do_mapping(src_int_rep, tgt_int_rep)
pred_map = dict((s.get_name(), t.get_name()) for s, t in best_map.get_pred_matches().items())
#src_c2p = build_c2p(src_int_rep, pred_map)
src_gnds = {} # pred -> [grounds]
for g in src_int_rep.get_statics() + src_int_rep.get_inits():
src_gnds.setdefault(g.get_predicate(), []).append(g)
#tgt_c2p = build_c2p(tgt_int_rep)
tgt_gnds = {} # pred -> [grounds]
for g in tgt_int_rep.get_statics() + tgt_int_rep.get_inits():
tgt_gnds.setdefault(g.get_predicate(), []).append(g)
cmap = {} # the committed mapping
# first map common constants to each other
src_consts = get_all_constants(reduce(lambda x,y: x+y, src_gnds.values()))
tgt_consts = get_all_constants(reduce(lambda x,y: x+y, tgt_gnds.values()))
for sc in src_consts:
if sc in tgt_consts:
cmap[sc] = sc
# this is temporary, in the future, order the predicates by how many other
# predicates it constrains
pred_order = filter(lambda x: x in pred_map, src_gnds.keys())
for src_p in pred_order:
tgt_p = pred_map[src_p]
print src_p, tgt_p
if src_p not in src_gnds or tgt_p not in tgt_gnds:
print >> sys.stderr, "PROBABLY A BAD MATCH BETWEEN %s AND %s" % (src_p, tgt_p)
continue
matches = cross_product(src_gnds[src_p], tgt_gnds[tgt_p])
# get the position mapping this is fake right now, but we should get this
# from a different script in the future right now just assume all the
# constant positions are preserved
tmp_src_g, tmp_tgt_g = matches[0]
src_p = PositionIndex.get_all_positions(tmp_src_g)
tgt_p = PositionIndex.get_all_positions(tmp_tgt_g)
pmap = dict([(p, p) for p in src_p if p in tgt_p])
# here we're going to match up all the grounds for this predicate
# the order of the matching is random and can affect the quality of the
# match, but I don't have any good idea about how to do it right now
matches = filter_matches(matches, cmap, pmap)
while len(matches) > 0:
src_g, tgt_g = matches.pop()
commit_ground_match(src_g, tgt_g, cmap, pmap)
matches = filter_matches(matches, cmap, pmap)
for sp, tp in pred_map.items():
print 'map predicate %s %s' % (sp, tp)
for src_c, tgt_c in cmap.items():
print 'map constant %s %s' % (src_c, tgt_c)
|
nilq/baby-python
|
python
|
# This file is subject to the terms and conditions defined in
# file 'LICENSE', which is part of this source code package.
import subprocess
import re
import numpy as np
def main():
m = 100
for methodIndex in range(18):
for n in (10, 32, 100, 316, 1000, 3162, 10000):
data = []
for i in range(100):
stdout = subprocess.run(['x64\Release\exectime.exe', str(methodIndex), str(m), str(n)],
stdout=subprocess.PIPE).stdout.decode('utf-8')
tokens = re.findall(r'(\[.+\]): ([\.\d]+)', stdout)[0]
data.append(float(tokens[1]))
print(methodIndex, str(n) + 'x' + str(n), tokens[0], np.mean(data), np.std(data))
if __name__ == '__main__':
import sys
sys.exit(main())
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# testPyComments.py
""" Test functioning of Python line counters. """
import unittest
from argparse import Namespace
from pysloc import count_lines_python, MapHolder
class TestPyComments(unittest.TestCase):
""" Test functioning of Python line counters. """
def setUp(self):
pass
def tearDown(self):
pass
def test_name_to_func_map(self):
""" Verify that line counts for known python file are correct. """
test_file = 'tests/commentsForPy'
options = Namespace()
options.already = set()
options.ex_re = None
options.map_holder = MapHolder()
options.verbose = False
lines, sloc = count_lines_python(test_file, options, 'py')
self.assertEqual(lines, 29)
self.assertEqual(sloc, 13)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import binascii
import time
from typing import List, Tuple, Union, cast
logging = True
loggingv = False
_hex = "0123456789abcdef"
def now():
return int(time.monotonic() * 1000)
def log(msg: str, *args: object):
if logging:
if len(args):
msg = msg.format(*args)
print(msg)
def logv(msg: str, *args: object):
if loggingv:
if len(args):
msg = msg.format(*args)
print(msg)
def hex_num(n: int, len: int = 8):
r = "0x"
for i in range(len):
r += _hex[(n >> ((len - 1 - i) * 4)) & 0xf]
return r
def buf2hex(buf: bytes):
return binascii.hexlify(buf).decode()
# r = ""
# # is this quadartic?
# for b in buf:
# r += _hex[b >> 4] + _hex[b & 0xf]
# return r
def hex2buf(s: str):
return binascii.unhexlify(s)
# r = bytearray(len(s) >> 1)
# for idx in range(0, len(s), 2):
# r[idx >> 1] = (_hex.index(s[idx].lower()) <<
# 4) | _hex.index(s[idx+1].lower())
# return r
def u16(buf: bytes, off: int):
return buf[off] | (buf[off+1] << 8)
def set_u16(buf: bytearray, off: int, val: int):
buf[off] = val & 0xff
buf[off + 1] = val >> 8
def u32(buf: bytes, off: int):
return buf[off] | (buf[off+1] << 8) | (buf[off+2] << 16) | (buf[off+3] << 24)
def hash(buf: bytes, bits: int = 30):
# return busio.JACDAC.__dict__["hash"](buf, bits)
if bits < 1:
return 0
h = fnv1(buf)
if bits >= 32:
return h >> 0
else:
return ((h ^ (h >> bits)) & ((1 << bits) - 1))
def fnv1(data: bytes):
h = 0x811c9dc5
for i in range(len(data)):
h = ((h * 0x1000193) & 0xffff_ffff) ^ data[i]
return h
def short_id(longid: Union[bytes, str]):
if isinstance(longid, str):
longid = hex2buf(longid)
h = hash(longid)
return (
chr(0x41 + h % 26) +
chr(0x41 + (h // 26) % 26) +
chr(0x30 + (h // (26 * 26)) % 10) +
chr(0x30 + (h // (26 * 26 * 10)) % 10)
)
def crc16(buf: bytes, start: int = 0, end: int = None):
if end is None:
end = len(buf)
crc = 0xffff
while start < end:
data = buf[start]
start += 1
x = (crc >> 8) ^ data
x ^= x >> 4
crc = ((crc << 8) ^ (x << 12) ^ (x << 5) ^ x) & 0xffff
return crc
def color_to_rgb(rgb: Union[int, Tuple[int, int, int], List[int]], default = (0,0,0)) -> Tuple[int, int, int]:
"""
Maps various format to a r,g,b tuple
"""
if rgb is None:
return default
elif type(rgb) == int:
irgb = cast(int, rgb)
r = (irgb >> 16) & 0xff
g = (irgb >> 8) & 0xff
b = (irgb >> 0) & 0xff
elif type(rgb) == tuple:
trgb = cast(Tuple[int, int, int], rgb)
r = (trgb[0]) & 0xff
g = (trgb[1]) & 0xff
b = (trgb[2]) & 0xff
else:
lrgb = cast(List[int], rgb)
r = (lrgb[0]) & 0xff
g = (lrgb[1]) & 0xff
b = (lrgb[2]) & 0xff
return (r,g,b)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Pih2o utilities.
"""
import logging
LOGGER = logging.getLogger("pih2o")
|
nilq/baby-python
|
python
|
# Code generated by `typeddictgen`. DO NOT EDIT.
"""V1beta1PodDisruptionBudgetStatusDict generated type."""
import datetime
from typing import TypedDict, Dict
V1beta1PodDisruptionBudgetStatusDict = TypedDict(
"V1beta1PodDisruptionBudgetStatusDict",
{
"currentHealthy": int,
"desiredHealthy": int,
"disruptedPods": Dict[str, datetime.datetime],
"disruptionsAllowed": int,
"expectedPods": int,
"observedGeneration": int,
},
total=False,
)
|
nilq/baby-python
|
python
|
import sys
import os
from src.model.userManagement import getLeaderBoard
import configparser
from discord import Client, Message, Guild, Member
from pymysql import Connection
from src.utils.readConfig import getLanguageConfig
languageConfig = getLanguageConfig()
async def getLeaderBoardTop10(self: Client, message: Message, db: Connection):
"""
Reply for leader board top 10
:param self: Client obj
:param message: Message Obj
:param db: Database obj
:return: None
"""
leaderBoardData: tuple = getLeaderBoard(db)
myGuild: Guild = self.guilds[0]
if leaderBoardData is None:
systemError = str(languageConfig['error']["dbError"])
messageSendBack: str = systemError
else:
title = str(languageConfig["leaderBoard"]["title"])
messageSendBack = title + "\n"
for i in range(0, len(leaderBoardData)):
try:
userObj: Member or None = await myGuild.fetch_member(leaderBoardData[i][0])
except Exception as err:
userObj = None
if userObj is None:
userDisplayName = str(languageConfig['leaderBoard']["alternativeNameForNotFound"])
else:
userDisplayName: str = userObj.display_name
moneyDisplay: float = leaderBoardData[i][1] / 100
msg = str(languageConfig['leaderBoard']["formatInLine"])\
.replace("?@user", f" {userDisplayName} ")\
.replace("?@amount", f"{moneyDisplay}")
messageSendBack += f"{i + 1}:" + msg + "\n"
await message.channel.send(messageSendBack)
|
nilq/baby-python
|
python
|
# Copyright 2019 Graphcore Ltd.
# coding=utf-8
"""
Derived from
https://www.tensorflow.org/probability/api_docs/python/tfp/mcmc/HamiltonianMonteCarlo
"""
import tensorflow as tf
from tensorflow.contrib.compiler import xla
import tensorflow_probability as tfp
import time
try:
from tensorflow.python import ipu
device = '/device:IPU:0'
scope = ipu.scopes.ipu_scope
options = tf.python.ipu.utils.create_ipu_config()
tf.python.ipu.utils.configure_ipu_system(options)
except ImportError:
device = '/device:GPU:0'
scope = tf.device
N_REPEATS = 100
N_LEAPFROG = 5
N_STEPS_PER_REPEAT = int(10e3)
TARGET_TIME_TEN_THOUSAND_STEPS = 0.22
# Target distribution is proportional to: `exp(-x (1 + x))`.
def unnormalized_log_prob(x):
return -x - x**2.
# Initialize the HMC transition kernel.
hmc = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_log_prob,
num_leapfrog_steps=N_LEAPFROG,
step_size=1.)
# Run single HMC step repeatedly
def run_single_steps():
def _step(i, state):
new_state, _ = hmc.one_step(state, hmc.bootstrap_results(state))
return [i + 1, new_state]
_, s = tf.while_loop(cond=lambda i, _: i < N_STEPS_PER_REPEAT,
body=_step,
loop_vars=[tf.constant(0), 1.])
return s
# To test effect of bootstrap_results in run_single_steps(), run bootstrap_results in isolation
def test_bootstrap_results():
def _step(i, state):
new_state = hmc.bootstrap_results(state).proposed_state
return [i + 1, new_state]
_, s = tf.while_loop(cond=lambda i, _: i < N_STEPS_PER_REPEAT,
body=_step,
loop_vars=[tf.constant(0), 1.])
return s
if __name__ == '__main__':
with scope(device):
ss = xla.compile(run_single_steps, ())
# br = xla.compile(test_bootstrap_results, ())
conf = tf.ConfigProto(log_device_placement=True)
sess = tf.Session(config=conf)
sess.run(tf.global_variables_initializer())
# Run once to compile
sess.run(ss)
# sess.run(br)
t_total = 0.
t_total_br = 0.
print('Running HMC.')
for itr in range(N_REPEATS):
# HMC
t_bef = time.time()
state_out = sess.run(ss)
t_total += time.time() - t_bef
# for itr in range(N_REPEATS):
# # Bootstrap results
# t_bef = time.time()
# _ = sess.run(br)
# t_total_br = time.time() - t_bef
print(f'Avg time per step {t_total / float(N_REPEATS * N_STEPS_PER_REPEAT)}')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
# Clear the console.
os.system("clear")
def msg(stat):
print '\033[1;42m'+'\033[1;37m'+stat+'\033[1;m'+'\033[1;m'
def newline():
print ""
def new_hosts(domain):
msg(" What would be the public directory name? \n - Press enter to keep default name (\"public_html\") ")
public_dir = raw_input()
# Check and set name of the public directory.
if public_dir == "":
public_dir = "public_html"
newline()
# Define the webserver parent directory
msg(" What would be the server parent directory? \n - Press enter to keep \"/var/www/\" as default location. ")
server_parent_dir = raw_input()
if server_parent_dir == "":
server_parent_dir = "/var/www/"
else:
if os.path.exists(server_parent_dir) == False:
msg(" Parent directory (\""+server_parent_dir+"\") was not found! \n Please enter server parent directory again: ")
server_parent_dir = raw_input()
else:
msg(" Server parent directory has changed to:(\""+server_parent_dir+"\") ")
newline()
msg(" Creating the Directory Structure ")
os.system("sudo mkdir -p "+server_parent_dir+domain+"/"+public_dir)
newline()
msg(" Change directory permissions? \n It will give current user permission for this vhost and permit read access. \n If you want to change permission then type Y and press enter \n If you are not sure then press enter and skip this step")
uper = raw_input()
if (uper == "Y" or uper == "y"):
msg(" Granting Proper Permissions ")
os.system("sudo chown -R $USER:$USER "+server_parent_dir+domain+"/"+public_dir)
print("Proper Permissions Granted")
newline()
msg(" Making Sure Read Access is Permitted ")
os.system("sudo chmod -R 755 "+server_parent_dir+domain+"/"+public_dir)
print("Read Access is Permitted")
else:
msg( "Permission process skipped" )
newline()
msg(" Adding A Demo Page ")
file_object = open(server_parent_dir+domain+"/"+public_dir+"/index.html", "w")
file_object.write("<!DOCTYPE html><html lang='en'><head><meta charset='UTF-8'><title>Virtual Hosts Created Successfully!</title><style>html{background-color: #508bc9; color: #fff;font-family: sans-serif, arial;}.container{width: 80%;margin: auto auto;}.inl{text-align: center;}.inl img{border-radius: 10px;}a{color: #f2d8ab; }</style></head><body><div class='container'><h1>Virtual Hosts Created Successfully!</h1><p><b>Apache-VHC</b> has successfully created a virtual host on your server.</body></html>")
file_object.close()
print("Demo Page Added")
newline()
msg(" Creating Virtual Host File ")
host_file = open("/tmp/"+domain+".conf", "w")
host_file.write("<VirtualHost *:80>\nServerAdmin localserver@localhost\nServerName "+domain+"\nServerAlias www."+domain+"\nDocumentRoot "+server_parent_dir+domain+"/"+public_dir+"\nErrorLog ${APACHE_LOG_DIR}/error.log\nCustomLog ${APACHE_LOG_DIR}/access.log combined\n</VirtualHost>")
host_file.close()
os.system("sudo mv \"/tmp/"+domain+".conf\" \"/etc/apache2/sites-available/\"")
print("Virtual Host File added")
newline()
msg(" Activating New Virtual Host ")
os.system("sudo a2dissite 000-default.conf")
os.system("sudo a2ensite "+domain+".conf")
newline()
msg(" Restarting Apache Server ")
os.system("sudo service apache2 restart")
os.system("service apache2 reload")
print("Apache Server Restarted")
newline()
msg(" Setting Up Local Host File ")
if host_flag == 0:
os.system("sudo sed -i -e '1i127.0.1.1 "+domain+"\' \"/etc/hosts\"")
else:
print " There already is a Local Host File. "
print "\nSuccess! Please visit http://"+domain+"/ from any web browser\n\n"
host_flag = 0
newline()
print "\n Welcome to Apache-VHC\n - This script will setup and configure Apache Virtual Hosts for you.\n - All you have to do is answer these questions.\n - IMPORTANT: Make sure you have Apache configured.\n"
newline()
msg(" What would be the domain name? ")
domain = raw_input()
if os.path.exists("/var/www/"+domain):
msg(" IMPORTANT: It seems that you have already configured a virtual hosts with the same domain name \n If you continue then all your data of "+domain+" will be overwritten and this cannot be undone \n Do you want to continue? (yes/no) ")
flag = raw_input()
host_flag = 1
if (flag == "no" or flag == ""):
newline()
msg(" New Virtual Host was not created due to a conflict. \n Please choose a different name and try again. ")
newline()
if flag == "yes":
newline()
msg(" Existing host "+domain+" will be overwritten ... ")
new_hosts(domain)
else:
new_hosts(domain)
|
nilq/baby-python
|
python
|
from __future__ import print_function
import os
import unittest
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from autosklearn.data.abstract_data_manager import AbstractDataManager
dataset_train = [[2.5, 3.3, 2, 5, 1, 1],
[1.0, 0.7, 1, 5, 1, 0],
[1.3, 0.8, 1, 4, 1, 1]]
dataset_train = np.array(dataset_train)
dataset_valid = [[1.5, 1.7, 1, 4, 1, 1],
[2.0, 2.1, 1, 5, 1, 0],
[1.9, 1.8, 2, 4, 0, 1]]
dataset_valid = np.array(dataset_valid)
dataset_test = [[0.9, 2.2, 2, 4, 1, 1],
[0.7, 3.1, 1, 5, 1, 1],
[2.4, 2.6, 2, 5, 0, 1]]
dataset_test = np.array(dataset_test)
N = "Numerical"
B = "Binary"
C = "Categorical"
class InitFreeDataManager(AbstractDataManager):
def __init__(self):
pass
class CompetitionDataManagerTest(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.D = InitFreeDataManager()
self.D._data = {}
self.D._data['X_train'] = dataset_train.copy()
self.D._data['X_valid'] = dataset_valid.copy()
self.D._data['X_test'] = dataset_test.copy()
def test_perform1HotEncoding(self):
self.D.feat_type = [N, N, N, N, N, N]
self.D._info = {'is_sparse': 0, 'has_missing': 0}
self.D.perform1HotEncoding()
assert_array_almost_equal(dataset_train, self.D.data['X_train'])
assert_array_almost_equal(dataset_valid, self.D.data['X_valid'])
assert_array_almost_equal(dataset_test, self.D.data['X_test'])
self.assertIsInstance(self.D.data['X_train'], np.ndarray)
self.assertIsInstance(self.D.data['X_valid'], np.ndarray)
self.assertIsInstance(self.D.data['X_test'], np.ndarray)
def test_perform1HotEncoding_binary_data(self):
self.D.feat_type = [N, N, N, N, B, B]
self.D._info = {'is_sparse': 0, 'has_missing': 0}
self.D.perform1HotEncoding()
# Nothing should have happened to the array...
assert_array_almost_equal(dataset_train, self.D.data['X_train'])
assert_array_almost_equal(dataset_valid, self.D.data['X_valid'])
assert_array_almost_equal(dataset_test, self.D.data['X_test'])
self.assertIsInstance(self.D.data['X_train'], np.ndarray)
self.assertIsInstance(self.D.data['X_valid'], np.ndarray)
self.assertIsInstance(self.D.data['X_test'], np.ndarray)
def test_perform1HotEncoding_categorical_data(self):
self.D.feat_type = [N, N, C, C, B, B]
self.D._info = {'is_sparse': 0, 'has_missing': 0}
self.D.perform1HotEncoding()
# Check if converted back to dense array
self.assertIsInstance(self.D.data['X_train'], np.ndarray)
self.assertIsInstance(self.D.data['X_valid'], np.ndarray)
self.assertIsInstance(self.D.data['X_test'], np.ndarray)
# Check if the dimensions are correct
self.assertEqual((3, 8), self.D.data['X_train'].shape)
self.assertEqual((3, 8), self.D.data['X_valid'].shape)
self.assertEqual((3, 8), self.D.data['X_test'].shape)
# Some tests if encoding works
self.assertEqual(self.D.data['X_train'][:, :4].max(), 1)
self.assertEqual(self.D.data['X_valid'][:, :4].min(), 0)
self.assertEqual(self.D.data['X_test'][:, :4].min(), 0)
# Test that other stuff is not encoded
self.assertEqual(self.D.data['X_train'][0, 4], 2.5)
def test_perform1HotEncoding_binary_data_with_missing_values(self):
# self.D.feat_type = [N, N, N, N, B, B]
#self.D.info = {'is_sparse': 0, 'has_missing': 1}
#self.D.perform1HotEncoding()
#self.assertEqual((3, 8), self.D.data['X_train'].shape)
pass
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
__author__ = 'Grzegorz Latuszek, Michal Ernst, Marcin Usielski'
__copyright__ = 'Copyright (C) 2018-2019, Nokia'
__email__ = 'grzegorz.latuszek@nokia.com, michal.ernst@nokia.com, marcin.usielski@nokia.com'
import pytest
def test_device_directly_created_must_be_given_io_connection(buffer_connection):
from moler.device.unixlocal import UnixLocal
dev = UnixLocal(io_connection=buffer_connection)
assert dev.io_connection == buffer_connection
def test_device_add_neighbour_device(buffer_connection):
from moler.device.unixlocal import UnixLocal
dev1 = UnixLocal(io_connection=buffer_connection)
dev2 = UnixLocal(io_connection=buffer_connection)
neighbour_devices = dev1.get_neighbour_devices(device_type=UnixLocal)
assert 0 == len(neighbour_devices)
dev1.add_neighbour_device(neighbour_device=dev2, bidirectional=True)
neighbour_devices = dev1.get_neighbour_devices(device_type=UnixLocal)
assert 1 == len(neighbour_devices)
neighbour_devices = dev2.get_neighbour_devices(device_type=UnixLocal)
assert 1 == len(neighbour_devices)
# device is added only once
dev1.add_neighbour_device(neighbour_device=dev2)
neighbour_devices = dev1.get_neighbour_devices(device_type=UnixLocal)
assert 1 == len(neighbour_devices)
neighbour_devices = dev1.get_neighbour_devices(device_type=None)
assert 1 == len(neighbour_devices)
neighbour_devices = dev1.get_neighbour_devices(device_type=int)
assert 0 == len(neighbour_devices)
def test_device_add_neighbour_device_without_bidirectional(buffer_connection):
from moler.device.unixlocal import UnixLocal
dev1 = UnixLocal(io_connection=buffer_connection)
dev2 = UnixLocal(io_connection=buffer_connection)
dev1.add_neighbour_device(neighbour_device=dev2, bidirectional=False)
neighbour_devices = dev1.get_neighbour_devices(device_type=UnixLocal)
assert 1 == len(neighbour_devices)
neighbour_devices = dev2.get_neighbour_devices(device_type=UnixLocal)
assert 0 == len(neighbour_devices)
def test_device_may_be_created_on_named_connection(configure_net_1_connection):
from moler.device.unixlocal import UnixLocal
dev = UnixLocal.from_named_connection(connection_name='net_1')
assert dev.io_connection is not None
assert dev.io_connection.name == 'net_1'
def test_device_unix_can_return_cd_command(configure_net_1_connection):
from moler.device.unixlocal import UnixLocal
from moler.cmd.unix.cd import Cd
ux = UnixLocal.from_named_connection(connection_name='net_1')
ux.establish_connection()
assert hasattr(ux, 'get_cmd')
assert isinstance(
ux.get_cmd(
cmd_name='cd',
cmd_params={
"path": "/home/user/"
}
),
Cd
)
# --------------------------- resources ---------------------------
@pytest.yield_fixture
def configure_net_1_connection():
import mock
from moler.config import connections as conn_cfg
with mock.patch.object(conn_cfg, "default_variant", {}):
with mock.patch.object(conn_cfg, "named_connections", {}):
conn_cfg.set_default_variant(io_type='memory', variant="threaded")
conn_cfg.define_connection(name='net_1', io_type='memory')
yield
|
nilq/baby-python
|
python
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for process_sites_contamination.py"""
import os
import unittest
import pandas as pd
from pandas.testing import assert_frame_equal
from .process_sites_contamination import process_site_contamination
_EXPECTED_SITE_COUNT = 1
class ProcessTest(unittest.TestCase):
def test_e2e(self):
self.maxDiff = None
base_path = os.path.dirname(__file__)
base_path = os.path.join(base_path, './data/test_data')
processed_count = process_site_contamination(base_path, base_path,
base_path)
self.assertEqual(_EXPECTED_SITE_COUNT, processed_count)
## validate the csvs
test_df = pd.read_csv(
os.path.join(base_path, 'superfund_sites_contamination.csv'))
expected_df = pd.read_csv(
os.path.join(base_path,
'superfund_sites_contamination_expected.csv'))
assert_frame_equal(test_df, expected_df)
## clean up
os.remove(os.path.join(base_path, 'superfund_sites_contamination.csv'))
os.remove(os.path.join(base_path, 'superfund_sites_contamination.tmcf'))
os.remove(os.path.join(base_path, 'superfund_sites_contamination.mcf'))
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import mpmath
from mpsci.distributions import benktander1
def test_pdf():
with mpmath.workdps(50):
x = mpmath.mpf('1.5')
p = benktander1.pdf(x, 2, 3)
# Expected value computed with Wolfram Alpha:
# PDF[BenktanderGibratDistribution[2, 3], 3/2]
valstr = '1.090598817302604549131682068809802266147250025484891499295'
expected = mpmath.mpf(valstr)
assert mpmath.almosteq(p, expected)
def test_logpdf():
with mpmath.workdps(50):
x = mpmath.mpf('1.5')
p = benktander1.logpdf(x, 2, 3)
# Expected value computed with Wolfram Alpha:
# log(PDF[BenktanderGibratDistribution[2, 3], 3/2])
valstr = '0.086726919062697113736142804022160705324241157062981346304'
expected = mpmath.mpf(valstr)
assert mpmath.almosteq(p, expected)
def test_cdf_invcdf():
with mpmath.workdps(50):
x = mpmath.mpf('1.5')
p = benktander1.cdf(x, 2, 3)
# Expected value computed with Wolfram Alpha:
# CDF[BenktanderGibratDistribution[2, 3], 3/2]
valstr = '0.59896999842391210365289674809988804989249935760023852777'
expected = mpmath.mpf(valstr)
assert mpmath.almosteq(p, expected)
x1 = benktander1.invcdf(expected, 2, 3)
assert mpmath.almosteq(x1, x)
def test_sf_invsf():
with mpmath.workdps(50):
x = mpmath.mpf('1.5')
p = benktander1.sf(x, 2, 3)
# Expected value computed with Wolfram Alpha:
# SurvivalFunction[BenktanderGibratDistribution[2, 3], 3/2]
valstr = '0.40103000157608789634710325190011195010750064239976147223'
expected = mpmath.mpf(valstr)
assert mpmath.almosteq(p, expected)
x1 = benktander1.invsf(expected, 2, 3)
assert mpmath.almosteq(x1, x)
def test_mean():
with mpmath.workdps(50):
a = 2
b = 3
m = benktander1.mean(a, b)
assert mpmath.almosteq(m, mpmath.mpf('1.5'))
def test_var():
with mpmath.workdps(50):
a = 2
b = 3
m = benktander1.var(a, b)
# Expected value computed with Wolfram Alpha:
# Var[BenktanderGibratDistribution[2, 3]]
valstr = '0.129886916731278610514259475545032373691162070980680465530'
expected = mpmath.mpf(valstr)
assert mpmath.almosteq(m, expected)
|
nilq/baby-python
|
python
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from .models import Comment, Webpage, Template, User
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ['title', 'content']
class WebpageForm(forms.ModelForm):
class Meta:
model = Webpage
fields = [
'name', 'template_used', 'user_title',
'user_text_1', 'user_text_2', 'user_text_3',
'user_image_1', 'user_image_2', 'user_image_3'
]
class TemplateForm(forms.ModelForm):
class Meta:
model = Template
fields = ['name', 'style_sheet']
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
|
nilq/baby-python
|
python
|
import unittest
import os
from examples.example_utils import delete_experiments_folder
from smallab.runner.runner import ExperimentRunner
from smallab.runner_implementations.fixed_resource.simple import SimpleFixedResourceAllocatorRunner
from smallab.specification_generator import SpecificationGenerator
from smallab.utilities.experiment_loading.experiment_loader import experiment_iterator
from tests.test_overlapping_checkpointed_experiment import SimpleExperiment, SimpleFailExperiment
class TestResourceAllocator(unittest.TestCase):
def tearDown(self) -> None:
try:
os.remove("tmp.pkl")
except FileNotFoundError:
pass
try:
delete_experiments_folder("test")
except FileNotFoundError:
pass
def testmain(self):
# Same specification as before
generation_specification = {"seed": [1, 2, 3, 4, 5, 6, 7, 8], "num_calls": [[10, 20, 30]]}
specifications = SpecificationGenerator().generate(generation_specification)
output_generation_specification = {"seed": [1, 2, 3, 4, 5, 6, 7, 8], "num_calls": [10, 20, 30]}
output_specifications = SpecificationGenerator().generate(output_generation_specification)
name = "test"
# This time we will run them all in parallel
runner = ExperimentRunner()
expr = SimpleExperiment()
runner.run(name, specifications, expr, specification_runner=SimpleFixedResourceAllocatorRunner([1,2,3]),
use_dashboard=True, propagate_exceptions=True,context_type="spawn")
log_base = os.path.join("experiment_runs",name,"logs")
for root, dirs, files in os.walk(log_base):
for file in files:
with open(os.path.join(root,file),"r") as f:
lines = f.readlines()
self.assertNotEqual([],lines)
for result in experiment_iterator(name):
if result["result"] != []:
output_specifications.remove(result["specification"])
self.assertEqual([],output_specifications)
def test_save_correctly_final_output(self):
# Same specification as before
generation_specification = {"seed": [1, 2, 3, 4, 5, 6, 7, 8], "num_calls": [[10, 20, 30]]}
specifications = SpecificationGenerator().generate(generation_specification)
output_generation_specification = {"seed": [1, 2, 3, 4, 5, 6, 7, 8], "num_calls": [10, 20, 30]}
output_specifications = SpecificationGenerator().generate(output_generation_specification)
name = "test"
# This time we will run them all in parallel
runner = ExperimentRunner()
runner.run(name, specifications, SimpleExperiment(), specification_runner=SimpleFixedResourceAllocatorRunner([1,2,3]),
use_dashboard=False, propagate_exceptions=True)
for result in experiment_iterator(name):
if result["result"] != []:
output_specifications.remove(result["specification"])
self.assertEqual([], output_specifications)
runner.run(name,specifications,SimpleFailExperiment())
|
nilq/baby-python
|
python
|
import os
import torch
import argparse
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from model import * # NOTE : Import all the models here
from utils import progress_bar
# NOTE : All parser related stuff here
parser = argparse.ArgumentParser(description='PyTorch Audio Style Transfer')
parser.add_argument('--lr', default=0.01, type=float, help='learning rate')
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc, start_epoch = 0, 0 # best test accuracy, start from epoch 0 or last checkpoint epoch
# NOTE : All data related stuff here
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root='../dataset', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='../dataset', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# NOTE : Build model here & check if to be resumed
print('==> Building network..')
t_net = TransformationNetwork()
t_net = t_net.to(device)
if device == 'cuda':
t_net = torch.nn.DataParallel(t_net)
cudnn.benchmark = True
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('../save/checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('../save/checkpoint/ckpt.t7')
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
# NOTE : Define losses here
criterion = nn.CrossEntropyLoss()
def train(epoch, curr_class, old_classes):
print('\nEpoch: %d' % epoch)
net.train()
train_loss, correct, total = 0, 0, 0
params = net.parameters()
optimizer = optim.SGD(params, lr=args.lr, momentum=0.9, weight_decay=5e-4)
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
# NOTE : Main optimizing here
optimizer.zero_grad()
y_pred = net(inputs)
loss = criterion(outputs, Y)
loss.backward()
optimizer.step()
# NOTE : Logging here
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
with open("../save/logs/train_loss.log", "a+") as lfile:
lfile.write("{}\n".format(train_loss / total))
with open("../save/logs/train_acc", "a+") as afile:
afile.write("{}\n".format(correct / total))
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
def test(epoch, curr_class):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
_, outputs = t_net(inputs, old_class=False)
loss = loss(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
with open("./logs/test_loss_{}.log".format(curr_class), "a+") as lfile:
lfile.write(str(test_loss / total))
lfile.write("\n")
with open("./logs/test_acc_{}.log".format(curr_class), "a+") as afile:
afile.write(str(correct / total))
afile.write("\n")
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
acc = 100.*correct/total
if acc > best_acc:
print('Saving..')
state = {'net': net.state_dict(), 'acc': acc, 'epoch': epoch}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt.t7')
best_acc = acc
# NOTE : Final running here
for epoch in range(start_epoch, start_epoch + 200):
train(epoch, i, old_classes_arr)
test(epoch, i)
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2005-2006
# The President and Fellows of Harvard College.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the University nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: Geoffrey Mainland <mainland@eecs.harvard.edu>
# Tinyos-2: Stephen Dawson-Haggerty
import os
import re
import struct
import sys
import traceback
from tinyos.packet.SerialH import Serial
from tinyos.message.SerialPacket import SerialPacket
import tinyos.packet.PacketDispatcher
import tinyos.packet.PacketSource
import tinyos.packet.SFSource
try:
import tinyos.packet.SerialSource
except:
tinyos.packet.SerialSource = None
DEBUG = False
class MoteIFException(Exception):
def __init__(self, *args):
self.args = args
class MoteIF:
def __init__(self):
self.listeners = {}
def addListener(self, listener, msgClass):
if listener not in self.listeners:
self.listeners[listener] = {}
amTypes = self.listeners[listener]
amTypes[msgClass.get_amType()] = msgClass
def removeListener(self, listener):
del self.listeners[listener]
def dispatchPacket(self, source, packet):
#try:
#print "Packet length: ", len(packet)
# print "Dispatching from MoteIF"
# for i in packet:
# print ord(i)," ",
# print
try:
# Message.py ignores base_offset, so we'll just chop off
# the first byte (the SERIAL_AMTYPE) here.
serial_pkt = SerialPacket(packet[1:],
data_length=len(packet)-1)
except:
traceback.print_exc()
try:
data_start = serial_pkt.offset_data(0) + 1
data_end = data_start + serial_pkt.get_header_length()
data = packet[data_start:data_end]
amType = serial_pkt.get_header_type()
except Exception, x:
print >>sys.stderr, x
print >>sys.stderr, traceback.print_tb(sys.exc_info()[2])
for l, amTypes in self.listeners.items():
if amType in amTypes:
try:
msgClass = amTypes[amType]
msg = msgClass(data=data,
data_length = len(data),
addr=serial_pkt.get_header_src(),
gid=serial_pkt.get_header_group())
l.receive(source, msg)
except Exception, x:
print >>sys.stderr, x
print >>sys.stderr, traceback.print_tb(sys.exc_info()[2])
def sendMsg(self, dest, addr, amType, group, msg):
try:
payload = msg.dataGet()
msg = SerialPacket(None)
msg.set_header_dest(int(addr))
msg.set_header_group(int(group))
msg.set_header_type(int(amType))
msg.set_header_length(len(payload))
# from tinyos.packet.Serial
data = chr(Serial.TOS_SERIAL_ACTIVE_MESSAGE_ID)
data += msg.dataGet()[0:msg.offset_data(0)]
data += payload
dest.writePacket(data)
except Exception, x:
print >>sys.stderr, x
print >>sys.stderr, traceback.print_tb(sys.exc_info()[2])
def addSource(self, name=None):
if name == None:
name = os.environ.get("MOTECOM", "sf@localhost:9002")
m = re.match(r'([^@]*)@(.*)', name)
if m == None:
raise MoteIFException("base source '%s'" % (name))
(sourceType, args) = m.groups()
if sourceType == "sf":
source = tinyos.packet.SFSource.SFSource(self, args)
elif sourceType == "serial" and tinyos.packet.SerialSource != None:
source = tinyos.packet.SerialSource.SerialSource(self, args)
else:
raise MoteIFException("bad source")
source.start()
#block until the source has started up.
source.semaphore.acquire()
source.semaphore.release()
return source
def finishAll(self):
tinyos.packet.PacketSource.finishAll()
|
nilq/baby-python
|
python
|
num = int(input('Digite um número inteiro: '))
if (num % 2) == 0:
print('O número escolhido é PAR.')
else:
print('O número escolhido é ÍMPAR')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import subprocess
from deoplete.source.base import Base
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
# deoplete related variables
self.rank = 1000
self.name = "cmake"
self.mark = "[cmake]"
self.input_pattern = r"[^\w\s]$"
self.min_pattern_length = 1
self.filetypes = ["cmake"]
self.vars = {}
def gather_candidates(self, context):
completion_candidates = []
completion_candidates += self.vim.call("cmake#gather_candidates", "command")
completion_candidates += self.vim.call("cmake#gather_candidates", "variable")
completion_candidates += self.vim.call("cmake#gather_candidates", "property")
return completion_candidates
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding:utf8 -*-
from library.cloudflare import CloudFlare
from library.dnspod import Dnspod
from helpers.logger import log_error
support = ['dnspod', 'cloudflare']
allowed_types = ['A', 'CNAME', 'AAAA', 'NS']
class dns:
def help(self, req, resp):
h = '''
dns管理
公网dns 支持dnspod,cloudflare
注释:
-t : 类型 支持dnspod cloudflare
-d : 域名
-rt : dns类型 支持 A,CNAME,AAAA,NS
-n : 名
-c : 内容
-h : 操作的机器
ops dns list_domains -t dnspod 获取公网dns域名列表
ops dns add_record -d domain --rt record_type -n name -c content -t dnspod 添加公网dns
ops dns edit_record -d domain --ri record_id --rt record_type -n name -c content -t dnspod 修改公网dns
ops dns del_record -d domain --ri record_id -t dnspod 删除公网dns
'''
return h
def list_domains(self, req, resp):
t = req.get_param(name='t')
if t is None or t not in support:
return '%s type is not support' % t
if t == 'cloudflare':
try:
cloudflare = CloudFlare()
return cloudflare.get_domains_list()
except Exception as e:
log_error(e)
raise Exception(e)
elif t == 'dnspod':
try:
dp = Dnspod()
return dp.get_domains_list()
except Exception as e:
log_error(e)
raise Exception(e)
def add_record(self, req, resp):
record_type = req.get_param(name='rt')
name = req.get_param(name='n')
content = req.get_param(name='c')
domain = req.get_param(name='d')
t = req.get_param(name='t')
if t is None or t not in support:
return '%s type is not support' % t
if record_type is None or record_type not in allowed_types:
return '%s type is not support' % t
if name is None or name == '':
return '-n is empty'
if content is None or content == '':
return '-c is empty'
if domain is None or domain == '':
return '-d is empty'
if t == 'cloudflare':
try:
cloudflare = CloudFlare()
return cloudflare.add_record(
domain=domain,
record_type=record_type,
name=name,
content=content)
except Exception as e:
log_error(e)
raise Exception(e)
elif t == 'dnspod':
try:
dp = Dnspod()
return dp.add_record(
domain=domain,
record_type=record_type,
name=name,
content=content)
except Exception as e:
log_error(e)
raise Exception(e)
def del_record(self, req, resp):
record_id = req.get_param(name='ri')
domain = req.get_param(name='d')
t = req.get_param(name='t')
if t is None or t not in support:
return '%s type is not support' % t
if record_id is None or record_id == '':
return '-rt is empty'
if domain is None or domain == '':
return '-d is empty'
if t == 'cloudflare':
try:
cloudflare = CloudFlare()
return cloudflare.delete_record(
domain=domain, record_id=record_id)
except Exception as e:
log_error(e)
raise Exception(e)
elif t == 'dnspod':
try:
dp = Dnspod()
return dp.delete_record(domain=domain, record_id=record_id)
except Exception as e:
log_error(e)
raise Exception(e)
def edit_record(self, req, resp):
record_type = req.get_param(name='rt')
record_id = req.get_param(name='ri')
name = req.get_param(name='n')
content = req.get_param(name='c')
domain = req.get_param(name='d')
t = req.get_param(name='t')
if t is None or t not in support:
return '%s type is not support' % t
if record_type is None or record_type not in allowed_types:
return '%s type is not support' % t
if record_id is None or record_id == '':
return '-rt is empty'
if name is None or name == '':
return '-n is empty'
if content is None or content == '':
return '-c is empty'
if domain is None or domain == '':
return '-d is empty'
if t == 'cloudflare':
try:
cloudflare = CloudFlare()
return cloudflare.add_record(
domain=domain,
record_type=record_type,
name=name,
content=content)
except Exception as e:
log_error(e)
raise Exception(e)
elif t == 'dnspod':
try:
dp = Dnspod()
return dp.add_record(
domain=domain,
record_type=record_type,
name=name,
content=content)
except Exception as e:
log_error(e)
raise Exception(e)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2
# Copyright (c) 2016-2017, Daimler AG. All rights reserved.
import argparse
# Find the best implementation available
import logging
import os
from generic_tf_tools.tf_records import TFCreator
from generic_tf_tools.data2example import SwedenImagesv2
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(name='TfRecordsBuild')
def parsArgs():
parser = argparse.ArgumentParser(description='Build TF Records')
parser.add_argument('--source_dir', '-r', help='Enter the raw data source folder', default='')
parser.add_argument('--dest_dir', '-d', type=str, help='definde destination directory')
parser.add_argument('--dataset-id', '-id', type=str, help='defined dataset id')
parser.add_argument('--file_list', '-f', help='Enter path to split files', default='DepthData')
parser.add_argument('--dataset_type', '-t', help='Enter Dataset Type', default='FullSeeingThroughFogDataset')
parser.add_argument('--batch_size', '-bs', type=int, help='Enter Batch Size per Record File', default=4)
parser.add_argument('--num_threads', '-nt', type=int, help='Enter Number of Threads for parallel execution', default=1)
parser.add_argument('--force_same_shape', '-fs', type=bool, help='Enforce same shape for all examples. Safety Feature not implemented', default=False)
parser.add_argument('--stage', '-s', help='Stage (train, val, test)', default='train')
args = parser.parse_args()
global hazed
return args
def create_generic_db(args):
"""
Create a generic DB
"""
# load dataset job
dataset_dir = os.path.join(args.dest_dir, args.dataset_id)
if not os.path.isdir(dataset_dir):
os.makedirs(dataset_dir)
#raise IOError("Dataset dir %s does not exist" % dataset_dir)
batch_size = args.batch_size
num_threads = args.num_threads
force_same_shape = args.force_same_shape
with open(args.file_list, 'r') as f:
entry_ids = f.readlines()
entry_ids = [i.replace(',','_').split('\n')[0] for i in entry_ids]
# create main DB creator object and execute main method
records_dir = os.path.join(dataset_dir, args.stage)
if not os.path.exists(records_dir):
os.makedirs(records_dir)
conversionClass = None
if args.dataset_type == 'FullSeeingThroughFogDataset':
conversionClass = SwedenImagesv2(source_dir=args.source_dir)
else:
logger.error('Wrong TF conversion Class specified')
raise ValueError
tf_creator = TFCreator(entry_ids,
args.stage,
args.source_dir,
records_dir,
batch_size,
num_threads,
conversionClass,
args.force_same_shape)
tf_creator()
logger.info('Generic TF-DB creation Done')
logger.info('Created %s db for stage %s in %s' % ('features', args.stage, args.source_dir))
if __name__ == '__main__':
args = parsArgs()
try:
create_generic_db(
args
)
except Exception as e:
logger.error('Failed DatasetBuild')
raise
|
nilq/baby-python
|
python
|
"""
Properties of Dictionary Keys
Dictionary values have no restrictions. They can be any arbitrary Python object, either standard
objects or user-defined objects. However, same is not true for the keys.
There are two important points to remember about dictionary keys −
(a) More than one entry per key not allowed. Which means no duplicate key is allowed. When duplicate
keys encountered during assignment, the last assignment wins. For example −
"""
dict = {'Name': 'Zara', 'Age': 7, 'Name': 'Manni'}
print ("dict['Name']: ", dict['Name'])
"""
When the above code is executed, it produces the following result −
dict['Name']: Manni
"""
"""
(b) Keys must be immutable. Which means you can use strings, numbers or tuples as dictionary
keys but something like ['key'] is not allowed. Following is a simple example −
"""
dict = {['Name']: 'Zara', 'Age': 7}
print ("dict['Name']: ", dict['Name'])
"""
When the above code is executed, it produces the following result −
Traceback (most recent call last):
File "test.py", line 3, in <module>
dict = {['Name']: 'Zara', 'Age': 7};
TypeError: list objects are unhashable"""
|
nilq/baby-python
|
python
|
def readFile(file):
f = open(file)
data = f.read()
f.close()
return data
def readFileLines(file):
data = readFile(file)
return data.strip().split("\n")
def readFileNumberList(file):
lines = readFileLines(file)
return list(map(int, lines))
def differencesBetweenNumbers(numbers):
# only allowed to have four levels of difference
differences = dict()
previous = 0
for current in numbers:
delta = current - previous
if not delta in differences:
differences[delta] = 0
differences[delta] += 1
previous = current
return differences
numbers = readFileNumberList("10.input.txt")
# add start and end
real_begin = 0
real_end = max(numbers) + 3
numbers.append(real_begin) # starts a 0 anyway
numbers.append(real_end)
numbers.sort()
print(numbers)
print("Part 1")
deltas = differencesBetweenNumbers(numbers)
ones = deltas[1]
threes = deltas[3]
print(ones * threes)
print("Part 2")
#print(ones)
#print(threes)
def generateComboOne(numbers):
combos = []
for i in range(len(numbers)):
v = [numbers[i]]
combos.append(v)
return combos
def generateComboTwo(numbers):
combos = []
sequence = []
for a in range(len(numbers)):
sequence.append(numbers[a])
for b in range(a +1,len(numbers)):
sequence.append(numbers[b])
combos.append(sequence)
sequence = []
return combos
def generateComboThree(numbers):
combos = []
sequence = []
for a in range(len(numbers)):
sequence.append(numbers[a])
for b in range(a +1,len(numbers)):
sequence.append(numbers[b])
for c in range(b +1,len(numbers)):
sequence.append(numbers[c])
combos.append(sequence)
sequence = []
return combos
# def generateComboFour(numbers):
# combos = []
# sequence = []
# for a in range(len(numbers)):
# sequence.append(numbers[a])
# for b in range(a + 1,len(numbers)):
# sequence.append(numbers[b])
# for c in range(b + 1,len(numbers)):
# sequence.append([numbers[c]])
# for d in range(c + 1,len(numbers)):
# sequence.append([numbers[d]])
# combos.append(sequence)
# sequence = []
# return combos
def validCombo(begin, end, combo):
# can it hook up to begin?
#print("\t{}".format(combo))
if combo[0] -3 > begin:
return False
# can it hook up to end?
if combo[-1] +3 < end:
return False
# check that each number only differs bu at most 3
for i in range(len(combo) -1):
if combo[i] +3 < combo[i+1]:
return False
return True
def validComboCount(begin, end, combos):
count = 0
for c in combos:
if validCombo(begin, end, c):
count += 1
return count
def combinationsBetween(begin, between, end):
count = 1 # all always works
# does none work?
if begin +3 >= end:
count += 1
if len(between) ==0:
return 0
if len(between) == 1:
# with or without the number
return count
if len(between) == 2:
a = between[0]
b = between[1]
# a can work by itself
if a + 3 >= end:
count +=1
# b can work by itself
if b - 3 <= begin:
count +=1
return count
if len(between) == 3:
# generate all sequences and count each one that works
combos = generateComboOne(between)
combos.extend(generateComboTwo(between))
#print(combos)
count += validComboCount(begin, end, combos)
return count
if len(between) == 4:
combos = generateComboOne(between)
combos.extend(generateComboTwo(between))
combos.extend(generateComboThree(between))
#print(combos)
count += validComboCount(begin, end, combos)
return count
# need to calculate
return -1
# numbers with a difference of three between them can't move
# only numbers between combinations can move
# a single number between blocks can't move
print("\n\n\n")
sequence = []
previous_pair = (0,0)
print("({})".format(real_begin))
combo_counts = []
i = 1
while i < len(numbers)-1:
a = numbers[i]
b = numbers[i+1]
delta = b - a
if delta == 3:
i+=1
# A and B are a fixed pair in the sequence
#print(sequence)
#print("_{}_ _{}_".format(a, b))
begin = previous_pair[1]
between = sequence
end = a
previous_pair = (a,b)
# how many combinations between the end points?
# simply try them all and see if they work
combos = "?"
print("_{}_ {} _{}_ ".format(begin, between, end), end="")
combos = combinationsBetween(begin, between, end)
print("combos:{}".format(combos))
if combos > 0:
combo_counts.append(combos)
sequence =[]
else:
sequence.append(a)
i +=1
print("({})".format(real_end))
print(combo_counts)
import math
## multiply together
total = 1
for c in combo_counts:
total *= c # math.factorial(c)
print(total)
# n =
# r =
# math.factorial(sum(combo_counts)) / (math.factorial(len(combo_counts)) *
print("expect")
print(19208)
# tiny 8
# small 19208
# normal ?
# hmm must be missing something
# brute force tree that generates all the combinations via recursion might be faster
# could add all valid next numbers and then recurse for each
# function returns 1 or zero at the leaf when it reaches the end
# DFS over BFS to reduce memory consumption
# only 100 numbers so will only recurse
def recursive(index, numbers, memo):
#print(index)
length = len(numbers)
if index == (length -1):
return 1
if index in memo:
return memo[index]
total = 0
current = numbers[index]
# find possible new index
i = index + 1
while i < length and (current + 3) >= (numbers[i]):
total += recursive(i, numbers, memo)
i += 1
memo[index] = total
return total
print("test")
memo = dict()
count = recursive(0, numbers, memo)
print("count")
print(count)
|
nilq/baby-python
|
python
|
import re
from src.vcd import VCD
from src.module import Module
from src.interval_list import IntervalList
from src.wire import Wire
class VCDFactory():
"""
Factory class
"""
seperator = "$enddefinitions $end"
@staticmethod
def read_raw(filename):
with open(filename, 'r') as f:
raw_data = f.read()
return raw_data
@staticmethod
def parseMeta(meta, vcd):
meta = re.sub('\n+','',re.sub(' +',' ',meta)).replace(" $end "," $end")
meta = meta.split(" $end")[:-1]
pointer = Module()
for elem in meta:
data = elem.split(" ")
if (data[0] == "$var"):
vcd.nameToId.setdefault(data[4], data[3])
values = vcd.idToValues.setdefault(data[3], IntervalList())
pointer.addWire(Wire(data[2], data[3], data[4], values))
elif (data[0] == "$scope"):
if (vcd.topModule is None):
pointer.setName(data[2])
vcd.topModule = pointer
else:
module = Module(data[2], parent=pointer)
pointer.addModule(module)
pointer = module
elif (data[0] == "$upscope"):
pointer = pointer.parent
@staticmethod
def convert(string):
if (string[0] in ('b', 'h')):
string = '0'+string
return eval(string)
@staticmethod
def parseData(data, vcd):
data = data.strip().split("\n")
counter = 0
while (True):
try:
lower_bound_index = data.index("#"+str(counter))+1
upper_bound_index = data.index("#"+str(counter+1))
updates = data[lower_bound_index : upper_bound_index]
for update in updates:
id = update[-1:]
value = update[:-1].strip()
vcd.idToValues[id].insert(counter, VCDFactory.convert(value))
counter += 1
except ValueError as e:
break
@staticmethod
def parse(raw_data):
# Pre-process the raw data
index = raw_data.find(VCDFactory.seperator)
meta = raw_data[:index]
data = raw_data[index+len(VCDFactory.seperator):]
# Create the VCD object
vcd = VCD()
# Parse raw data and populate the VCD object accordingly
VCDFactory.parseMeta(meta, vcd)
VCDFactory.parseData(data, vcd)
return vcd
@staticmethod
def read(filename):
return VCDFactory.parse(VCDFactory.read_raw(filename))
|
nilq/baby-python
|
python
|
import pytest
from sovtokenfees.constants import FEES
from plenum.common.exceptions import InvalidClientRequest
def test_set_fees_handler_static_validation(set_fees_handler, set_fees_request):
set_fees_handler.static_validation(set_fees_request)
def test_set_fees_handler_static_validation_no_fees(set_fees_handler, set_fees_request):
del set_fees_request.operation[FEES]
with pytest.raises(InvalidClientRequest, match="missed fields - fees"):
set_fees_handler.static_validation(set_fees_request)
def test_set_fees_handler_static_validation_negative_fees(set_fees_handler, set_fees_request):
set_fees_request.operation[FEES]["nym_alias"] = -1
with pytest.raises(InvalidClientRequest, match="set_fees -- negative value"):
set_fees_handler.static_validation(set_fees_request)
def test_set_fees_handler_static_validation_empty_alias(set_fees_handler, set_fees_request):
set_fees_request.operation[FEES][""] = 1
with pytest.raises(InvalidClientRequest, match="set_fees -- empty string"):
set_fees_handler.static_validation(set_fees_request)
|
nilq/baby-python
|
python
|
from app import controller #yeah...kinda stupid
import json
class controller():
def __init__(s,gen_new,nam=None,SECRET_KEY=b'12'):
s.q={}
s.gen_new=gen_new
s.max_id=0
if nam is None:nam=__name__
s.app=Flask(nam)
s.app.config["SECRET_KEY"]=SECRET_KEY
s.addroute()
def addroute(s):
s.app.add_url_rule("/","main",s.main)
def run(s):
s.app.run()
def _create_new(s,index):
# print("creating new index",index)
s.q[index]=s.gen_new()
def _findid(s):
if "id" in session.keys():
if session["id"] in s.q.keys():
return int(session["id"])
s._create_new(s.max_id)
session["id"]=s.max_id
s.max_id+=1
return s.max_id-1
def _getobj(s):
return s.q[s._findid()]
def callfunc(s,func,*p,**kw):
obj=s._getobj()
return getattr(obj,func)(*p,**kw)
def main(s):
return s.callfunc("main")
ret="Hello World "+str(s.id)
if not "key" in session.keys():
session["key"]=str(np.random.randint(1000,10000))
ret+=" "+str(session["key"])
#ret=str(session)
# return ret
resp=make_response(ret)
resp.set_cookie("test1","I am the cookie")
return resp
# return str(session["uid"])+"\n"+s.findwho().main()
class handler(controller):
"""a controller made to work with webstates"""
def __init__(s,gen_new,nam=None,SECRET_KEY=b'12'):
controller.__init__(s,gen_new,nam=nam,SECRET_KEY=SECRET_KEY)
def addroute(s):
s.app.add_url_rule("/<function>","main",s.main)
s.app.add_url_rule("/","main",s.main)
def main(s,function=""):
print("calling function",function)
if "." in function:return None
ret=None
if not (function=="" or function[0]=="_"):ret=s.callfunc(function)#can only call functions that are not of type _something
if type(ret) in [str,bool,float,int]:
return str(ret)
elif type(ret) in [list,dict]:
return json.dumps(ret,indent=2)
else:
return s.callfunc("statefunc","vis")
|
nilq/baby-python
|
python
|
from flask import Flask,request
from PIL import Image
from tempfile import TemporaryFile
import json,base64
import captcha as capt
import model
app = Flask(__name__)
@app.route('/')
def hello():
return "hello,world"
@app.route('/captcha',methods=['GET','POST'])
def captcha():
if request.method == 'GET':
return makeErrJson(1)
else:
#global skl_model
img_base64 = request.form['data']
img = base64.b64decode(img_base64)
imgs = []
with TemporaryFile() as f:
f.write(img)
imgs = capt.Captcha(f).getImgs(4,(20,25))
code = skl_model.predict_imgs(imgs,20*25)
print(code)
return makeSuccessJson(code)
def makeErrJson(err):
msg = {
1:"payload error"
}
return json.dumps({
'err':err,
'msg':msg[err],
'data':None
})
def makeSuccessJson(data):
return json.dumps({
'err':0,
'msg':'success',
'data':data
})
if __name__ == '__main__':
skl_model = model.Model()
skl_model.loadModel("test1.model")
app.run(threaded=False)
|
nilq/baby-python
|
python
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import event_entry
import alarm_entry
class rmon(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-rmon - based on the path /rmon. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__event_entry','__alarm_entry',)
_yang_name = 'rmon'
_rest_name = 'rmon'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__alarm_entry = YANGDynClass(base=YANGListType("alarm_index",alarm_entry.alarm_entry, yang_name="alarm-entry", rest_name="alarm", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='alarm-index', extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}), is_container='list', yang_name="alarm-entry", rest_name="alarm", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='list', is_config=True)
self.__event_entry = YANGDynClass(base=YANGListType("event_index",event_entry.event_entry, yang_name="event-entry", rest_name="event", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='event-index', extensions={u'tailf-common': {u'info': u'RMON event', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'event', u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'rmon_event'}}), is_container='list', yang_name="event-entry", rest_name="event", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RMON event', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'event', u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'rmon_event'}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'rmon']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'rmon']
def _get_event_entry(self):
"""
Getter method for event_entry, mapped from YANG variable /rmon/event_entry (list)
"""
return self.__event_entry
def _set_event_entry(self, v, load=False):
"""
Setter method for event_entry, mapped from YANG variable /rmon/event_entry (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_event_entry is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_event_entry() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("event_index",event_entry.event_entry, yang_name="event-entry", rest_name="event", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='event-index', extensions={u'tailf-common': {u'info': u'RMON event', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'event', u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'rmon_event'}}), is_container='list', yang_name="event-entry", rest_name="event", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RMON event', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'event', u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'rmon_event'}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """event_entry must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("event_index",event_entry.event_entry, yang_name="event-entry", rest_name="event", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='event-index', extensions={u'tailf-common': {u'info': u'RMON event', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'event', u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'rmon_event'}}), is_container='list', yang_name="event-entry", rest_name="event", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RMON event', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'event', u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'rmon_event'}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='list', is_config=True)""",
})
self.__event_entry = t
if hasattr(self, '_set'):
self._set()
def _unset_event_entry(self):
self.__event_entry = YANGDynClass(base=YANGListType("event_index",event_entry.event_entry, yang_name="event-entry", rest_name="event", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='event-index', extensions={u'tailf-common': {u'info': u'RMON event', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'event', u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'rmon_event'}}), is_container='list', yang_name="event-entry", rest_name="event", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RMON event', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'event', u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'rmon_event'}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='list', is_config=True)
def _get_alarm_entry(self):
"""
Getter method for alarm_entry, mapped from YANG variable /rmon/alarm_entry (list)
"""
return self.__alarm_entry
def _set_alarm_entry(self, v, load=False):
"""
Setter method for alarm_entry, mapped from YANG variable /rmon/alarm_entry (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_alarm_entry is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_alarm_entry() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("alarm_index",alarm_entry.alarm_entry, yang_name="alarm-entry", rest_name="alarm", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='alarm-index', extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}), is_container='list', yang_name="alarm-entry", rest_name="alarm", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """alarm_entry must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("alarm_index",alarm_entry.alarm_entry, yang_name="alarm-entry", rest_name="alarm", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='alarm-index', extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}), is_container='list', yang_name="alarm-entry", rest_name="alarm", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='list', is_config=True)""",
})
self.__alarm_entry = t
if hasattr(self, '_set'):
self._set()
def _unset_alarm_entry(self):
self.__alarm_entry = YANGDynClass(base=YANGListType("alarm_index",alarm_entry.alarm_entry, yang_name="alarm-entry", rest_name="alarm", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='alarm-index', extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}), is_container='list', yang_name="alarm-entry", rest_name="alarm", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='list', is_config=True)
event_entry = __builtin__.property(_get_event_entry, _set_event_entry)
alarm_entry = __builtin__.property(_get_alarm_entry, _set_alarm_entry)
_pyangbind_elements = {'event_entry': event_entry, 'alarm_entry': alarm_entry, }
|
nilq/baby-python
|
python
|
# dir_utils.py is derived from [3DMPPE_POSENET_RELEASE](https://github.com/mks0601/3DMPPE_POSENET_RELEASE.git)
# distributed under MIT License (c) 2019 Gyeongsik Moon.
import os
import sys
def make_folder(folder_name):
if not os.path.exists(folder_name):
os.makedirs(folder_name)
def add_pypath(path):
if path not in sys.path:
sys.path.insert(0, path)
def link_file(src, target):
if os.path.isdir(target) or os.path.isfile(target):
os.remove(target)
os.system('ln -s {} {}'.format(src, target))
|
nilq/baby-python
|
python
|
import numpy as np
import theano as th
import theano.tensor as tt
import src.kinematics as kn
def test_unzero6dof():
# Make sure that our unzeroing actually doesn't change anything.
q = tt.dmatrix('q')
q_ = np.random.rand(50, 6)
th.config.compute_test_value = 'warn'
q.tag.test_value = q_
u = tt.constant(2.*(np.random.rand(100, 3) - .5))
f_6dof = th.function(inputs=[q], outputs=kn.th_6dof_rigid(q, u))
res1 = f_6dof(q_)
res2 = f_6dof(kn.unzero_6dof(q_))
assert np.allclose(res1, res2)
|
nilq/baby-python
|
python
|
from conans import ConanFile
class OSSCoreTestsConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake_find_package"
def requirements(self):
self.requires("catch2/2.13.3")
self.requires("nlohmann_json/3.9.1")
|
nilq/baby-python
|
python
|
# Import packages to extend Python (just like we extend Sublime, Atom, or VSCode)
from random import randint
# re-import our game variables
from gameComponents import gameVars, winLose
# [] => this is an array
# name = [value1, value2, value3]
# an array is a special type of container that can hold mutiple items.
# arrays are indexed (their contents are assigned a number)
# the index always starts at 0
# player_choice == False
while gameVars.player_choice is False:
print("***1==============*/ EMRE'S RPS GAME */==============****1")
print("Computer Lives:", gameVars.computer_lives, "/", gameVars.total_lives)
print("Player Lives:", gameVars.player_lives, "/", gameVars.total_lives)
print("===========================================")
# Version 1, to explain array indexing
# player_choice = choices [1]
# print("index 1 in the choice array is" + player_choice + ",which is paper")
print("Choose your deadly weapon! Or type quit to exit\n")
gameVars.player_choice = input("Choose rock, paper, or scissors: \n")
#player_choice now equals TRUE -> it has a values
if gameVars.player_choice == "quit":
print("You chose to quit")
exit()
gameVars.computer_choice = gameVars.choices[randint(0, 2)]
print("user chose: " + gameVars.player_choice)
# this will be the AI choice -> a random pick from the choices array
print("computer chose:" + gameVars.computer_choice)
if gameVars.computer_choice == gameVars.player_choice:
print("tie")
elif gameVars.computer_choice == "rock":
if gameVars.player_choice == "scissors":
#verbose way
#player_lives = player_lives - 1
#simplified way
gameVars.player_lives -= 1
print("you lose! player lives:", gameVars.player_lives)
else:
print("you win!")
gameVars.computer_lives -= 1
elif gameVars.computer_choice == "paper":
if gameVars.player_choice == "rock":
gameVars.computer_lives -= 1
print("you lose! player lives:", gameVars.player_lives)
else:
print("you win!")
gameVars.player_lives -= 1
elif gameVars.computer_choice == "scissors":
if gameVars.player_choice == "paper":
gameVars.player_lives -= 1
print("you lose! player lives:", gameVars.player_lives)
else:
print("you win!")
gameVars.computer_lives -= 1
if gameVars.player_lives == 0:
winLose.winorlose("lost")
if gameVars.computer_lives == 0:
winLose.winorlose("won")
else:
gameVars.player_choice = False
print("Player lives:", gameVars.player_lives)
print("Computer lives:", gameVars.computer_lives)
# map the loop keep running, by setting player_choice back to False
# unset, so that our loop condition will evaluate to True
gameVars.player_choice = False
|
nilq/baby-python
|
python
|
class File(object):
def __init__(self,name, current_type):
self.name = name
self.block = 0
self.critical = 0
self.major = 0
# current modification type like 'modify' 'add' 'delete'
self.current_type = current_type
self.authors = list()
@staticmethod
def to_dict(files_dict,file_obj):
files_dict[file_obj.name] = file_obj
def add_author(self,author):
self.authors.append(author)
def get_authors(self):
return self.authors
def add_block(self,block):
self.block += block
def get_block(self):
return self.block
def add_critical(self,critical):
self.critical += critical
def get_critical(self):
return self.critical
def add_major(self,major):
self.major += major
def get_major(self):
return self.major
def set_current_type(self,type):
self.current_type = type
def get_current_type(self):
return self.current_type
|
nilq/baby-python
|
python
|
# Software Name: its-client
# SPDX-FileCopyrightText: Copyright (c) 2016-2022 Orange
# SPDX-License-Identifier: MIT License
#
# This software is distributed under the MIT license, see LICENSE.txt file for more details.
#
# Author: Frédéric GARDES <frederic.gardes@orange.com> et al.
# Software description: This Intelligent Transportation Systems (ITS)
# [MQTT](https://mqtt.org/) client based on the [JSon](https://www.json.org)
# [ETSI](https://www.etsi.org/committee/its) specification transcription provides a ready to connect project
# for the mobility (connected and autonomous vehicles, road side units, vulnerable road users,...).
from pygeotile.tile import Tile
def lat_lng_to_quad_key(latitude, longitude, level_of_detail, slash=False):
tile = Tile.for_latitude_longitude(latitude, longitude, level_of_detail)
if slash:
quad_tree = f"/{'/'.join(tile.quad_tree)}"
else:
quad_tree = tile.quad_tree
return quad_tree
def is_edgy(direction, q):
return (
int(q)
in {"up": [0, 1], "right": [1, 3], "down": [2, 3], "left": [0, 2]}[direction]
)
def get_up_or_down(q):
return str((int(q) + 2) % 4)
def get_right_or_left(q):
q_as_int = int(q)
if q_as_int % 2 == 0:
return str((q_as_int + 1) % 4)
else:
return str((q_as_int - 1) % 4)
def get_neighbour(quadtree, direction):
edge_crossed = False
result = ""
for index, q in enumerate(quadtree[::-1]):
if index == 0 or edge_crossed:
edge_crossed = is_edgy(direction, q)
result += {
"up": get_up_or_down,
"down": get_up_or_down,
"right": get_right_or_left,
"left": get_right_or_left,
}[direction](q)
else:
result += q
return result[::-1]
# This is the translation of the Java code given by Mathieu on 2019/11/15.
# It works just fine but as long as pygeotile des not give us any error it's probably better to use this lib.
#
#
#
#
# class PixelXY:
# def __init__(self, pixelX, pixelY):
# self.pixelX = pixelX
# self.pixelY = pixelY
# class TileXY:
# def __init__(self, tileX, tileY):
# self.tileX = tileX
# self.tileY = tileY
# def clip(n, minValue, maxValue):
# return min(max(n, minValue), maxValue)
# def latLngToQuadKey(latitude, longitude, levelOfDetail):
# return tileXYToQuadKey(pixelXYToTileXY(latLongToPixelXY(latitude, longitude, levelOfDetail)), levelOfDetail)
# def latLongToPixelXY(latitude, longitude, levelOfDetail):
# latitude = clip(latitude, MIN_LATITUDE, MAX_LATITUDE)
# longitude = clip(longitude, MIN_LONGITUDE, MAX_LONGITUDE)
# x = (longitude + 180) /360
# sinLatitude = math.sin(latitude * math.pi / 180)
# y = 0.5 - math.log((1 + sinLatitude) / (1 - sinLatitude)) / (4 * math.pi);
# mapSize = mapSizeFun(levelOfDetail)
# pixelX = int (clip(x * mapSize + 0.5, 0, mapSize - 1))
# pixelY = int (clip(y * mapSize + 0.5, 0, mapSize - 1))
# return PixelXY(pixelX, pixelY)
# def mapSizeFun(levelOfDetail):
# return 256 << levelOfDetail
# def pixelXYToTileXY(pixelXY):
# tileX = int(pixelXY.pixelX / 256)
# tileY = int(pixelXY.pixelY / 256)
# return TileXY(tileX, tileY)
# def tileXYToQuadKey(tileXY, levelOfDetail):
# tileX = tileXY.tileX
# tileY = tileXY.tileY
# quadKey = ""
# for i in range(levelOfDetail, 0, -1):
# digit = 0
# mask = 1 << (i - 1)
# if((tileX & mask) != 0):
# digit = digit +1
# if ((tileY & mask) != 0):
# digit = digit+2
# quadKey += str(digit)
# return quadKey
|
nilq/baby-python
|
python
|
from pathlib import Path as _Path
from sys import platform as _platform
__all__ = [
"hmmfetch",
"hmmpress",
"hmmscan",
"hmmsearch",
"hmmemit",
"phmmer",
"binary_version",
]
binary_version = "3.3.2"
if _platform not in ["linux", "darwin"]:
raise RuntimeError(f"Unsupported platform: {_platform}.")
_suffix = "manylinux2010_x86_64"
if _platform == "darwin":
_suffix = "macosx_10_9_x86_64"
_bin = _Path(__file__).parent.absolute() / f"v{binary_version}"
hmmemit = _bin / f"hmmemit_{_suffix}"
hmmfetch = _bin / f"hmmfetch_{_suffix}"
hmmpress = _bin / f"hmmpress_{_suffix}"
hmmscan = _bin / f"hmmscan_{_suffix}"
hmmsearch = _bin / f"hmmsearch_{_suffix}"
phmmer = _bin / f"phmmer_{_suffix}"
|
nilq/baby-python
|
python
|
import time
import matplotlib.pyplot as plt
import numpy as np
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if self.name:
print('[%s]' % self.name, end=' ')
print('Elapsed: %s' % (time.time() - self.tstart))
def plot_object_color(object_list, color_mapping):
N = len(object_list)
object_id = 1
for object_name in object_list:
color = color_mapping[object_name]
plt.subplot(1, N, object_id)
plot_color(color, object_name)
object_id += 1
def generate_objectcatetory_json(scene_objects):
# Use http://www.jsoneditoronline.org/ to clean the json
# http://jsonformat.com/#jsondataurllabel
""" Get object category from object name, with some manual editing """
print('{')
for obj in scene_objects:
objtype = obj.replace('SM_', '').split('_')[0].replace('BookLP', 'Book').replace('Wire1', 'Wire')
print(' ', repr(obj), ':', repr(objtype), ',')
print('}')
def check_coverage(dic_instance_mask):
""" Check the portion of labeled image """
marked_region = None
for object_name in list(dic_instance_mask.keys()):
instance_mask = dic_instance_mask[object_name]
if marked_region is None:
marked_region = np.zeros(instance_mask.shape[0:2])
marked_region += instance_mask
assert (marked_region.max() == 1)
if marked_region.max() > 1:
print('There are invalid regions in the labeling')
coverage = float(marked_region.sum()) / (marked_region.shape[0] * marked_region.shape[1])
print('Coverage %.2f' % coverage)
return marked_region
|
nilq/baby-python
|
python
|
from datetime import datetime
import logging
from telegram import (
InlineKeyboardButton
)
from iot.devices.base import BaseDevice, BaseBroadlinkDevice
from iot.rooms import d_factory, bl_d_factory
from iot.utils.keyboard.base import (
CLOSE_INLINE_KEYBOARD_COMMAND,
InlineKeyboardMixin,
KeyboardCallBackQueryHandler
)
logger = logging.getLogger(__name__)
JUMP_ROOMS_TEXT = "Jump to Rooms"
BACK_TEXT = "<- Back"
CLOSE_TEXT = "Closed! /keyboard to reactivate keyboard"
class CommandKeyboardCBHandler(KeyboardCallBackQueryHandler, InlineKeyboardMixin):
def func_name_to_text(self, name):
return name.replace("_", " ")
def jump_rooms_button(self):
return InlineKeyboardButton(
JUMP_ROOMS_TEXT, callback_data=self.return_cb_data("rooms")
)
def footer_buttons(self, target, target_type):
button_list = [
self.back_button(target, target_type),
self.close_button()
]
# Add Jump rooms button if target_type is device
if target_type == "device":
button_list.insert(0, [self.jump_rooms_button()])
return button_list
def back_button(self, back_target, target_type):
cb_data = None
# Rooms top level keyboard
if target_type == "rooms":
text = "Top Menu"
cb_data = "rooms"
# Room second level keyboard (listing devices), Back to Rooms kb
elif target_type == "room":
text = BACK_TEXT
cb_data = back_target
# Devices first level (listing device features), Back to Room kb
elif target_type == "device":
text = BACK_TEXT
cb_data = back_target
return InlineKeyboardButton(
text, callback_data=self.return_cb_data(cb_data)
)
def construct_keyboard_markup(
self, options, back_target, target_type, cols=0
):
button_list = [
InlineKeyboardButton(
name, callback_data=self.return_cb_data(command)) \
for name, command in options.items()
]
footer_buttons = self.footer_buttons(back_target, target_type)
keyboard = self.build_keyboard(button_list, cols=cols,
footer_buttons=footer_buttons
)
markup = self.build_inline_keyboard_markup(keyboard)
return markup
def build_rooms_keyboard(self):
rooms_data = dict((r, r) for r in self.server.rooms.keys())
markup = self.construct_keyboard_markup(rooms_data, None, "rooms")
return markup
def build_room_devices_keyboard(self, room):
room = self.server.rooms[room]
rooms_devices_data = dict((d, d) for d in room.DEVICES.keys())
rooms_broadlink_devices_data = dict(
(d, d) for d in room.BL_DEVICES.keys()
)
rooms_devices_data.update(rooms_broadlink_devices_data)
markup = self.construct_keyboard_markup(
rooms_devices_data, "rooms", "room"
)
return markup
def build_device_keyboard(self, device):
device = self.server.devices[device]
if isinstance(device,BaseDevice):
factory_kls = d_factory
elif isinstance(device, BaseBroadlinkDevice):
factory_kls = bl_d_factory
device_interface = \
factory_kls.get_device_type_interface(device.device_type)
command = "{} {}"
interface_data = dict(
(self.func_name_to_text(i), command.format(device.id, i)) \
for i in device_interface
)
markup = self.construct_keyboard_markup(
interface_data, device.room.name, "device"
)
return markup
def process_query(self, update, context, internal_callback_data):
query, query_data = super(CommandKeyboardCBHandler, self).process_query(
update, context, internal_callback_data)
query_data_length = len(query_data)
# Single length callback_data eg. room, tv
if query_data_length == 1:
query_data = query_data[0]
if query_data in self.server.rooms.keys():
self.handle_room(query_data, query, update, context)
elif query_data in self.server.devices.keys():
self.handle_device(query_data, query, update, context)
elif query_data == "rooms":
self.top_menu(query, update, context)
elif query_data == CLOSE_INLINE_KEYBOARD_COMMAND:
self.handle_close(CLOSE_TEXT, query, update, context)
# Actual device feature command callback_data eg. aircon powerful
elif query_data_length == 2:
device_id = query_data[0]
feature = query_data[1]
device = self.server.devices[device_id]
# Call server call_device
self.server.call_device(
update, context, device, feature,
handler_name=self.handler_name
)
# Update server last command handled
self.server.last_command_handled = (
self.__class__.__name__, device_id, feature,
str(datetime.now()).split(".")[0]
)
def handle_room(self, room_name, query, update, context):
reply_markup = self.build_room_devices_keyboard(room_name)
context.bot.edit_message_text(text="Select {} device".format(room_name),
chat_id=query.message.chat_id,
message_id=query.message.message_id,
reply_markup=reply_markup)
self.answer_query(query, context)
def handle_device(self, device_id, query, update, context):
reply_markup = self.build_device_keyboard(device_id)
context.bot.edit_message_text(text="Select {} feature".format(device_id),
chat_id=query.message.chat_id,
message_id=query.message.message_id,
reply_markup=reply_markup)
self.answer_query(query, context)
def top_menu(self, query, update, context):
# To prevent "Message is not modified" from raising
# as we should not be editing the message if it's in top menu
if query.message.text == "Select room":
self.answer_query(query, context, text="Already at top menu!")
return
reply_markup = self.build_rooms_keyboard()
context.bot.edit_message_text(text="Select room",
chat_id=query.message.chat_id,
message_id=query.message.message_id,
reply_markup=reply_markup)
self.answer_query(query, context)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
__author__ = """Larissa Triess"""
__email__ = "larissa@triess.eu"
from .compute import (
get_points_over_angles_and_label_statistics as get_angle_label_stats,
)
from .compute import (
get_points_over_distance_and_label_statistics as get_distance_label_stats,
)
__all__ = [
"get_distance_label_stats",
"get_angle_label_stats",
]
|
nilq/baby-python
|
python
|
#Given an array of integers nums.
#A pair (i,j) is called good if nums[i] == nums[j] and i < j.
#Return the number of good pairs.
class Solution:
def numIdenticalPairs(self, nums: List[int]) -> int:
hash = {}
count = 0
for i in range(0,len(nums)):
for j in range(1,len(nums)):
if nums[i] == nums[j] and i < j :
count+=1
return count
|
nilq/baby-python
|
python
|
from django.http import HttpResponse
from django.utils import simplejson
from django.template.defaultfilters import slugify
from django.utils.encoding import force_unicode
from django.core.exceptions import ValidationError
import models
from scipy_central.submission.models import TagCreation
import datetime
from collections import defaultdict
def get_tag_uses(start_date=None, end_date=None):
"""
Returns a list of tuples of the form: [(n_uses, Tag.pk), ....]
This allows one to use the builtin ``list.sort()`` function where Python
orders the list based on the first entry in the tuple.
The list will be returned in the order of the ``Tag.pk``, but the
first tuple entry is the number of uses of that tag, allowing for easy
sorting using Python's ``sort`` method.
"""
if start_date is None:
start_date = datetime.date.min
if end_date is None:
end_date = datetime.date.max
tags_created = TagCreation.objects.all().\
filter(date_created__gte=start_date).\
filter(date_created__lte=end_date)
# Let all the revisions from each submission be grouped, so that duplicate
# tags across revisions only have a single influence
uses_by_sub_pk = defaultdict(set)
for use in tags_created:
uses_by_sub_pk[use.revision.entry_id].add(use.tag)
# Then for each set of tags in each submission, iterate a create a dict
# where the keys are the tag's primary key and the values are the number
# of uses of that tag
uses_by_pk = defaultdict(int)
for tag_set in uses_by_sub_pk.itervalues():
for tag in tag_set:
uses_by_pk[tag.pk] += 1
# Finally, create a list of hit counts, which can be used for sorting
hit_counts = []
for key, val in uses_by_pk.iteritems():
hit_counts.append((val, key))
return hit_counts
def parse_tags(tagstring):
"""
Parses tag input, with multiple word input being activated and
delineated by commas and double quotes. Quotes take precedence, so
they may contain commas.
Returns a sorted list of unique tag names.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
SPC: took this code from:
https://github.com/alex/django-taggit/blob/master/taggit/utils.py
"""
if not tagstring:
return []
tagstring = force_unicode(tagstring)
# SPC: removing this: we require commas to separate multiword tags
# Special case - if there are no commas or double quotes in the
# input, we don't *do* a recall... I mean, we know we only need to
# split on spaces.
#if u',' not in tagstring and u'"' not in tagstring:
#words = list(set(split_strip(tagstring, u' ')))
#words.sort()
#return words
if u',' not in tagstring and u'"' not in tagstring:
tagstring += ','
words = []
buffer_list = []
# Defer splitting of non-quoted sections until we know if there are
# any unquoted commas.
to_be_split = []
saw_loose_comma = False
open_quote = False
i = iter(tagstring)
try:
while True:
c = i.next()
if c == u'"':
if buffer_list:
to_be_split.append(u''.join(buffer_list))
buffer_list = []
# Find the matching quote
open_quote = True
c = i.next()
while c != u'"':
buffer_list.append(c)
c = i.next()
if buffer_list:
word = u''.join(buffer_list).strip()
if word:
words.append(word)
buffer_list = []
open_quote = False
else:
if not saw_loose_comma and c == u',':
saw_loose_comma = True
buffer_list.append(c)
except StopIteration:
# If we were parsing an open quote which was never closed treat
# the buffer_list as unquoted.
if buffer_list:
if open_quote and u',' in buffer_list:
saw_loose_comma = True
to_be_split.append(u''.join(buffer_list))
if to_be_split:
if saw_loose_comma:
delimiter = u','
else:
delimiter = u' '
for chunk in to_be_split:
words.extend(split_strip(chunk, delimiter))
words = list(set(words))
words.sort()
return words
def split_strip(string, delimiter=u','):
"""
Splits ``string`` on ``delimiter``, stripping each resulting string
and returning a list of non-empty strings.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
SPC: took this code from:
https://github.com/alex/django-taggit/blob/master/taggit/utils.py
"""
if not string:
return []
words = [w.strip() for w in string.split(delimiter)]
return [w for w in words if w]
def get_and_create_tags(tagstring):
tag_list = []
for tag in parse_tags(tagstring):
try:
tag_obj = models.Tag.objects.get_or_create(name=tag)[0]
except ValidationError:
pass
else:
# Does the tag really exist or was it found because of the lack of
# case sensitivity (e.g. "2D" vs "2d"
if tag_obj.id is None:
tag_obj = models.Tag.objects.get(slug=slugify(tag))
tag_list.append(tag_obj)
return tag_list
def tag_autocomplete(request):
"""
Filters through all available tags to find those starting with, or
containing the string ``contains_str``.
Parts from http://djangosnippets.org/snippets/233/
"""
# TODO(KGD): cache this lookup for 30 minutes
# Also, randomize the tag order to prevent only the those with lower
# primary keys from being shown more frequently
# TODO(KGD): put the typed text in bold, e.g. typed="bi" then return
# proba<b>bi</b>lity
all_tags = [tag.name for tag in models.Tag.objects.all()]
contains_str = request.REQUEST.get('term', '').lower()
starts = []
includes = []
for item in all_tags:
index = item.lower().find(contains_str)
if index == 0:
starts.append(item)
elif index > 0:
includes.append(item)
# Return tags starting with ``contains_str`` at the top of the list,
# followed by tags that only include ``contains_str``
starts.extend(includes)
return HttpResponse(simplejson.dumps(starts), mimetype='text/text')
|
nilq/baby-python
|
python
|
from qupulse.hardware.setup import HardwareSetup, PlaybackChannel, MarkerChannel
from qupulse.pulses import PointPT, RepetitionPT, TablePT
#%%
""" Connect and setup to your AWG. Change awg_address to the address of your awg and awg_name to the name of
your AWGs manufacturer (Zürich Instruments: ZI, TaborElectronics: Tabor).
"""
awg_name = 'TABOR'
awg_address = '127.0.0.1'
hardware_setup = HardwareSetup()
if awg_name == 'ZI':
from qupulse.hardware.awgs.zihdawg import HDAWGRepresentation
awg = HDAWGRepresentation(awg_address, 'USB')
channel_pairs = []
for pair_name in ('AB', 'CD', 'EF', 'GH'):
channel_pair = getattr(awg, 'channel_pair_%s' % pair_name)
for ch_i, ch_name in enumerate(pair_name):
playback_name = '{name}_{ch_name}'.format(name=awg_name, ch_name=ch_name)
hardware_setup.set_channel(playback_name,
PlaybackChannel(channel_pair, ch_i))
hardware_setup.set_channel(playback_name + '_MARKER_FRONT', MarkerChannel(channel_pair, 2 * ch_i))
hardware_setup.set_channel(playback_name + '_MARKER_BACK', MarkerChannel(channel_pair, 2 * ch_i + 1))
awg_channel = awg.channel_pair_AB
elif awg_name == 'TABOR':
from qupulse.hardware.awgs.tabor import TaborAWGRepresentation
awg = TaborAWGRepresentation(awg_address, reset=True)
channel_pairs = []
for pair_name in ('AB', 'CD'):
channel_pair = getattr(awg, 'channel_pair_%s' % pair_name)
channel_pairs.append(channel_pair)
for ch_i, ch_name in enumerate(pair_name):
playback_name = '{name}_{ch_name}'.format(name=awg_name, ch_name=ch_name)
hardware_setup.set_channel(playback_name, PlaybackChannel(channel_pair, ch_i))
hardware_setup.set_channel(playback_name + '_MARKER', MarkerChannel(channel_pair, ch_i))
awg_channel = channel_pairs[0]
else:
ValueError('Unknown AWG')
#%%
""" Create three simple pulses and put them together to a PulseTemplate called dnp """
plus = [(0, 0), ('ta', 'va', 'hold'), ('tb', 'vb', 'linear'), ('tend', 0, 'jump')]
minus = [(0, 0), ('ta', '-va', 'hold'), ('tb', '-vb', 'linear'), ('tend', 0, 'jump')]
zero_pulse = PointPT([(0, 0), ('tend', 0)], ('X', 'Y'))
plus_pulse = TablePT(entries={'X': plus, 'Y': plus})
minus_pulse = TablePT(entries={'X': minus, 'Y': minus})
dnp = RepetitionPT(minus_pulse, 'n_minus') @ RepetitionPT(zero_pulse, 'n_zero') @ RepetitionPT(plus_pulse, 'n_plus')
#%%
""" Create a program dnp with the number of pulse repetitions as volatile parameters """
sample_rate = awg_channel.sample_rate / 10**9
n_quant = 192
t_quant = n_quant / sample_rate
dnp_prog = dnp.create_program(parameters=dict(tend=float(t_quant), ta=float(t_quant/3), tb=float(2*t_quant/3),
va=0.12, vb=0.25, n_minus=3, n_zero=3, n_plus=3),
channel_mapping={'X': '{}_A'.format(awg_name), 'Y': '{}_B'.format(awg_name)},
volatile={'n_minus', 'n_zero', 'n_plus'})
dnp_prog.cleanup()
#%%
""" Upload this program to the AWG """
hardware_setup.register_program('dnp', dnp_prog)
hardware_setup.arm_program('dnp')
#%%
""" Run initial program """
awg_channel.run_current_program()
#%%
""" Change volatile parameters to new values and run the modified program """
hardware_setup.update_parameters('dnp', dict(n_zero=1, n_plus=5))
awg_channel.run_current_program()
|
nilq/baby-python
|
python
|
from unittest import TestCase
from mandrill import InvalidKeyError
from mock import patch
from welcome_mailer import settings
from welcome_mailer.backends import email
from welcome_mailer.testing_utils import create_user, fake_user_ping
class TestBaseBackend(TestCase):
""" Test cases for the base email backend """
def test_send_email(self):
""" Test sending an email with the base backend.
Sending an email with this backend should raise a
NotImplementedError.
"""
backend = email.BaseBackend()
user = create_user()
with self.assertRaises(NotImplementedError):
backend.send_email(user)
@patch('welcome_mailer.backends.email.mandrill_backend.mandrill.Users.ping',
autospec=True, side_effect=fake_user_ping)
class TestMandrillBackend(TestCase):
""" Test cases for the mandrill email backend """
def test_create(self, mock_ping):
""" Test creating a mandrill backend.
The mandrill backend should accept an API key in its
constructor.
"""
backend = email.MandrillBackend('apikey')
self.assertFalse(backend.authenticated)
# ping shouldn't be called until we actually try to send an
# email.
self.assertEqual(0, mock_ping.call_count)
def test_authenticate(self, mock_ping):
""" Test authenticating the backend.
This method should send a ping through mandrill to determine if
the API key is valid.
"""
backend = email.MandrillBackend('apikey')
backend.authenticate()
self.assertTrue(backend.authenticated)
self.assertEqual(1, mock_ping.call_count)
def test_authenticate_already_authenticated(self, mock_ping):
""" Test authenticating when already authenticated.
If the backend is already authenticated, then the API should not
be hit again.
"""
backend = email.MandrillBackend('apikey')
backend.authenticated = True
backend.authenticate()
self.assertTrue(backend.authenticated)
self.assertEqual(0, mock_ping.call_count)
def test_authenticate_invalid_key(self, mock_ping):
""" Test authenticating with an invalid key.
Attempting to authenticate an invalid key should raise an
InvalidKeyError.
"""
backend = email.MandrillBackend('invalid')
with self.assertRaises(InvalidKeyError):
backend.authenticate()
self.assertFalse(backend.authenticated)
self.assertEqual(1, mock_ping.call_count)
def test_get_message(self, mock_ping):
""" Test getting the message content for a user.
This method should generate the message content for a welcome
email to a specific user. It should pull in global variables
from settings, and generate personal variables for the current
user.
"""
backend = email.MandrillBackend('apikey')
user = create_user()
expected = settings.MESSAGE_CONFIG
expected.update({
'merge_vars': [
{
'rcpt': user.email,
'vars': [
{
'name': 'FNAME',
'content': user.first_name,
},
{
'name': 'LNAME',
'content': user.last_name,
},
],
},
],
'to': [
{
'email': user.email,
'name': str(user),
},
],
})
self.assertEqual(expected, backend.get_message(user))
@patch('welcome_mailer.backends.email.mandrill_backend.mandrill.Messages.send_template', # noqa
return_value={})
def test_send_email(self, mock_send_template, mock_ping):
""" Test sending an email to a user.
The function should attempt to send a templated email using
mandrill.
"""
backend = email.MandrillBackend('apikey')
user = create_user(email='test@example.com')
template_name = settings.TEMPLATE_NAME
template_content = []
message = backend.get_message(user)
backend.send_email(user)
self.assertEqual(1, mock_ping.call_count)
mock_send_template.assert_called_with(
template_name=template_name,
template_content=template_content,
message=message)
|
nilq/baby-python
|
python
|
from __future__ import print_function
from __future__ import division
import os
import sys
sys.path.append(os.getcwd())
import argparse
import json
import random
import warnings
import time
from collections import defaultdict, OrderedDict
from types import SimpleNamespace
import glog as log
import os.path as osp
from QEBATangentAttack.adversarial import Adversarial
from QEBATangentAttack.rv_generator import load_pgen
from QEBATangentAttack.utils import Misclassification, MSE, TargetClass
import math
import torch
from torch.nn import functional as F
import numpy as np
from dataset.dataset_loader_maker import DataLoaderMaker
from dataset.target_class_dataset import ImageNetDataset, CIFAR10Dataset, CIFAR100Dataset
from models.standard_model import StandardModel
from models.defensive_model import DefensiveModel
from config import IN_CHANNELS, CLASS_NUM, IMAGE_DATA_ROOT
from QEBATangentAttack.tangent_point_analytical_solution import TangentFinder
class QEBATangentAttack(object):
"""A powerful adversarial attack that requires neither gradients
nor probabilities.
Notes
-----
Features:
* ability to switch between two types of distances: MSE and Linf.
* ability to continue previous attacks by passing an instance of the
Adversarial class
* ability to pass an explicit starting point; especially to initialize
a targeted attack
* ability to pass an alternative attack used for initialization
* ability to specify the batch size
"""
def __init__(self, model, dataset, clip_min, clip_max, height, width, channels, norm, epsilon,
iterations=64,
initial_num_evals=100,
max_num_evals=10000,
stepsize_search='geometric_progression',
gamma=0.01,
batch_size=256,
internal_dtype=torch.float64,
log_every_n_steps=1,
verbose=False,
rv_generator=None, atk_level=None,
mask=None,
save_calls=None,
discretize=False,
suffix='',
plot_adv=True,
threshold=None,
distance=MSE,
maximum_queries=10000
):
"""Applies QEBA
Parameters
----------
input_or_adv : `numpy.ndarray` or :class:`Adversarial`
The original, correctly classified input. If it is a
numpy array, label must be passed as well. If it is
an :class:`Adversarial` instance, label must not be passed.
label : int
The reference label of the original input. Must be passed
if input is a numpy array, must not be passed if input is
an :class:`Adversarial` instance.
unpack : bool
If true, returns the adversarial input, otherwise returns
the Adversarial object.
iterations : int
Number of iterations to run.
initial_num_evals: int
Initial number of evaluations for gradient estimation.
Larger initial_num_evals increases time efficiency, but
may decrease query efficiency.
max_num_evals: int
Maximum number of evaluations for gradient estimation.
stepsize_search: str
How to search for stepsize; choices are 'geometric_progression',
'grid_search'. 'geometric progression' initializes the stepsize
by ||x_t - x||_p / sqrt(iteration), and keep decreasing by half
until reaching the target side of the boundary. 'grid_search'
chooses the optimal epsilon over a grid, in the scale of
||x_t - x||_p.
gamma: float
The binary search threshold theta is gamma / sqrt(d) for
l2 attack and gamma / d for linf attack.
batch_size : int
Batch size for model prediction. It is not the data_loader's batch size!
Higher precision might be slower but is numerically more stable.
log_every_n_steps : int
Determines verbositity of the logging.
verbose : bool
Controls verbosity of the attack.
"""
self.model = model
self.clip_min = clip_min
self.clip_max = clip_max
self.norm = norm
self.epsilon = epsilon
self.ord = np.inf if self.norm == "linf" else 2
self.initial_num_evals = initial_num_evals
self.max_num_evals = max_num_evals
self.stepsize_search = stepsize_search
self.gamma = gamma
self.batch_size = batch_size
self.verbose = verbose
self.internal_dtype = internal_dtype
self.log_every_n_steps = log_every_n_steps
self.rv_generator = rv_generator
self.discretize = discretize
self.suffix = suffix
self.plot_adv = plot_adv
self._default_threshold = threshold
self._default_distance = distance
self.iterations = iterations
self.atk_level = atk_level # int type
self.shape = [channels, height, width]
if mask is not None:
self.use_mask = True
self.pert_mask = mask
self.loss_mask = 1 - mask
else:
self.use_mask = False
self.pert_mask = torch.ones(self.shape).float()
self.loss_mask = torch.ones(self.shape).float()
self.__mask_succeed = 0
# Set binary search threshold.
self.fourier_basis_aux = None
self.dim = np.prod(self.shape)
if self.norm == 'l2':
self.theta = self.gamma / np.sqrt(self.dim)
else:
self.theta = self.gamma / self.dim
self.printv('QEBA optimized for {} distance'.format(self.norm))
self.save_calls = save_calls
if save_calls is not None:
if not os.path.isdir(save_calls):
os.mkdir(save_calls)
self.save_cnt = 0
self.save_outs = []
self.save_hashes = []
self.maximum_queries = maximum_queries
self.dataset_name = dataset
self.dataset_loader = DataLoaderMaker.get_test_attacked_data(dataset, 1)
self.total_images = len(self.dataset_loader.dataset)
self.query_all = torch.zeros(self.total_images)
self.distortion_all = defaultdict(OrderedDict) # key is image index, value is {query: distortion}
self.correct_all = torch.zeros_like(self.query_all) # number of images
self.not_done_all = torch.zeros_like(self.query_all) # always set to 0 if the original image is misclassified
self.success_all = torch.zeros_like(self.query_all)
self.success_query_all = torch.zeros_like(self.query_all)
self.distortion_with_max_queries_all = torch.zeros_like(self.query_all)
def gen_random_basis(self, N):
basis = torch.from_numpy(np.random.randn(N, *self.shape)).type(self.internal_dtype)
return basis
def gen_custom_basis(self, N, sample, atk_level=None):
if self.rv_generator is not None:
basis = torch.from_numpy(self.rv_generator.generate_ps(sample, N)).type(self.internal_dtype)
else:
basis = self.gen_random_basis(N)
return basis
def count_stop_query_and_distortion(self, images, perturbed, adversarial, success_stop_queries, batch_image_positions):
dist = torch.norm((perturbed - images).view(1, -1), self.ord, 1)
working_ind = torch.nonzero(dist > self.epsilon).view(-1)
success_stop_queries[working_ind] = adversarial._total_prediction_calls
for inside_batch_index, index_over_all_images in enumerate(batch_image_positions):
self.distortion_all[index_over_all_images][adversarial._total_prediction_calls] = dist[
inside_batch_index].item()
def attack(self, image_index, a):
"""
a: Adversarial class
"""
# query = torch.zeros(1).float()
success_stop_queries = torch.zeros(1).float() # stop query count once the distortion < epsilon
batch_size = a.unperturbed.size(0)
batch_image_positions = np.arange(image_index * batch_size,
min((image_index + 1) * batch_size, self.total_images)).tolist()
self.external_dtype = a.unperturbed.dtype
assert self.internal_dtype in [torch.float32, torch.float64]
assert self.external_dtype in [torch.float32, torch.float64]
assert not (self.external_dtype == torch.float64 and
self.internal_dtype == torch.float32)
a.set_distance_dtype(self.internal_dtype)
# ===========================================================
# Increase floating point precision
# Construct batch decision function with binary output.
# ===========================================================
def decision_function(x):
outs = []
num_batchs = int(math.ceil(x.size(0) * 1.0 / self.batch_size))
for j in range(num_batchs):
current_batch = x[self.batch_size * j:
self.batch_size * (j + 1)]
current_batch = current_batch.type(self.external_dtype)
out = a.forward(current_batch, strict=False)[1] # forward function returns predictions, is_adversarial, 这里is_adversarial其实是prediction == true label
outs.append(out)
outs = torch.cat(outs, dim=0)
return outs
# ===========================================================
# intialize time measurements
# ===========================================================
self.time_gradient_estimation = 0
self.time_search = 0
self.time_initialization = 0
# ===========================================================
# Initialize variables, constants, hyperparameters, etc.
# ===========================================================
warnings.simplefilter('always', UserWarning) # make sure repeated warnings are shown
# ===========================================================
# get bounds
bounds = a.bounds()
self.clip_min, self.clip_max = bounds
# ===========================================================
# Find starting point
# ===========================================================
_, num_evals = self.initialize_starting_point(a)
# query += num_evals
if a.perturbed is None:
warnings.warn(
'Initialization failed. It might be necessary to pass an explicit starting point.')
return
# get original and starting point in the right format
assert a.perturbed.dtype == self.external_dtype
original = a.unperturbed.type(self.internal_dtype) # target class image
perturbed = a.perturbed.type(self.internal_dtype)
original = original.squeeze()
if perturbed.dim() > 3:
perturbed = perturbed.squeeze(0)
self.count_stop_query_and_distortion(original, perturbed, a, success_stop_queries, batch_image_positions)
# ===========================================================
# Iteratively refine adversarial
# ===========================================================
# Project the initialization to the boundary.
perturbed, dist_post_update, mask_succeed, num_evals = self.binary_search_batch(original, torch.unsqueeze(perturbed,dim=0),
decision_function)
# query += num_evals
dist = torch.norm((perturbed - original).view(batch_size, -1), self.ord, 1)
self.count_stop_query_and_distortion(original, perturbed, a, success_stop_queries, batch_image_positions)
# log starting point
# distance = a.distance.value
# self.log_step(0, distance, a=a, perturbed=perturbed)
if mask_succeed > 0:
self.__mask_succeed = 1
return
step = 0
old_perturbed = perturbed
while a._total_prediction_calls < self.maximum_queries:
step += 1
# ===========================================================
# Gradient direction estimation.
# ===========================================================
# Choose delta.
delta = self.select_delta(dist_post_update, step)
c0 = a._total_prediction_calls
# Choose number of evaluations.
num_evals = int(min([int(self.initial_num_evals * np.sqrt(step)), self.max_num_evals]))
# approximate gradient.
gradf, avg_val = self.approximate_gradient(decision_function, perturbed,
num_evals, delta, atk_level=self.atk_level)
# query += num_evals
# Calculate auxiliary information for the exp
# grad_gt = a._model.gradient_one(perturbed, label=a._criterion.target_class()) * self.pert_mask
# dist_dir = original - perturbed
# if self.rv_generator is not None:
# rho = self.rho_ref
# else:
# rho = 1.0
if self.norm == 'linf':
update = torch.sign(gradf)
else:
update = gradf
c1 = a._total_prediction_calls
# ===========================================================
# Update, and binary search back to the boundary.
# ===========================================================
if self.stepsize_search == 'geometric_progression':
# find tangent point
perturbed = self.geometric_progression_for_tangent_point(decision_function, original, perturbed, update,
dist, step)
c2 = a._total_prediction_calls
# Binary search to return to the boundary.
perturbed, dist_post_update, mask_succeed, num_evals = self.binary_search_batch(original, perturbed[None], decision_function)
# query += num_evals
c3 = a._total_prediction_calls
self.count_stop_query_and_distortion(original, perturbed, a, success_stop_queries, batch_image_positions)
elif self.stepsize_search == 'grid_search':
# Grid search for stepsize.
epsilons = torch.logspace(-4, 0, steps=20) * dist
epsilons_shape = [20] + len(self.shape) * [1]
perturbeds = perturbed + epsilons.view(epsilons_shape) * update
perturbeds = torch.clamp(perturbeds, min=self.clip_min, max=self.clip_max)
idx_perturbed = decision_function(perturbeds)
self.count_stop_query_and_distortion(original, perturbed, a, success_stop_queries,
batch_image_positions)
if idx_perturbed.sum().item() > 0:
# Select the perturbation that yields the minimum distance after binary search.
perturbed, dist_post_update, mask_succeed, num_evals = self.binary_search_batch(original, perturbeds[idx_perturbed], decision_function)
# query += num_evals
self.count_stop_query_and_distortion(original, perturbed, a, success_stop_queries,
batch_image_positions)
# compute new distance.
dist = torch.norm((perturbed - original).view(batch_size, -1), self.ord, 1)
log.info(
'{}-th image, iteration: {}, {}: distortion {:.4f}, query: {}'.format(image_index + 1, step, self.norm,
dist.item(),
a._total_prediction_calls))
# ===========================================================
# Log the step
# ===========================================================
# if self.norm == 'l2':
# distance = dist ** 2 / self.dim / (self.clip_max - self.clip_min) ** 2
# elif self.norm == 'linf':
# distance = dist / (self.clip_max - self.clip_min)
# self.log_step(step, distance, a=a, perturbed=perturbed, update=update * epsilon,
# aux_info=(gradf, grad_gt, dist_dir, rho))
if self.stepsize_search == 'geometric_progression':
self.printv("Call in grad approx / geo progress / binary search: {}/{}/{}".format(c1 - c0, c2 - c1, c3 - c2))
a.__best_adversarial = perturbed
if mask_succeed > 0:
self.__mask_succeed = 1
break
if a._total_prediction_calls >= self.maximum_queries:
break
old_perturbed = perturbed
# Save the labels
if self.save_calls is not None:
log.info("Total saved calls: {}".format(len(self.save_outs)))
return old_perturbed, torch.tensor([a._total_prediction_calls]).float(), success_stop_queries, dist, (dist <= self.epsilon)
def initialize_starting_point(self, a):
starting_point = self._starting_point
num_evals = 0
a.__best_adversarial = starting_point.clone() # FIXME 我自己添加的
if a.perturbed is not None:
log.info('Attack is applied to a previously found adversarial.'
' Continuing search for better adversarials.')
if starting_point is not None: # pragma: no cover
warnings.warn(
'Ignoring starting_point parameter because the attack'
' is applied to a previously found adversarial.')
return a.perturbed, num_evals
if starting_point is not None:
a.forward_one(starting_point)
assert a.perturbed is not None, ('Invalid starting point provided. Please provide a starting point that is adversarial.')
return a.perturbed, num_evals + 1
"""
Apply BlendedUniformNoiseAttack if without initialization.
Efficient Implementation of BlendedUniformNoiseAttack in Foolbox.
"""
while True:
random_noise = torch.from_numpy(np.random.uniform(self.clip_min, self.clip_max, size=self.shape)).type(self.external_dtype)
_, success = a.forward_one(random_noise)
num_evals += 1
if success:
break
if num_evals > 1e4: # FIXME replaced with HSJA that uses a target image?
return
# Binary search to minimize l2 distance to the original input.
low = 0.0
high = 1.0
while high - low > 0.001:
mid = (high + low) / 2.0
# FIXME 这个a.unperturbed其实是target class image
blended = self.loss_mask * ((1 - mid) * a.unperturbed + mid * random_noise) + \
(torch.ones_like(self.loss_mask) - self.loss_mask) * a.perturbed
_, success = a.forward_one(blended.type(self.external_dtype))
num_evals += 1
if success:
high = mid
else:
low = mid
return blended, num_evals
def compute_distance(self, x_ori, x_pert, norm='l2'):
# Compute the distance between two images.
if norm == 'l2':
return torch.norm((x_ori - x_pert)*self.loss_mask, p=2).item()
elif norm == 'linf':
return torch.max(torch.abs(x_ori - x_pert)).item()
def clip_image(self, image, clip_min, clip_max):
# Clip an image, or an image batch, with upper and lower threshold.
return torch.min(torch.max(image, clip_min), clip_max)
def project(self, unperturbed, perturbed_inputs, alphas):
""" Projection onto given l2 / linf balls in a batch. """
alphas_shape = [alphas.size(0)] + [1] * len(self.shape)
alphas = alphas.view(*alphas_shape)
if self.norm == 'l2':
projected = self.loss_mask * ((1 - alphas) * unperturbed + alphas * perturbed_inputs) + (
torch.ones_like(self.loss_mask) - self.loss_mask) * perturbed_inputs
elif self.norm == 'linf':
projected = self.clip_image(perturbed_inputs, unperturbed - alphas, unperturbed + alphas)
return projected
def binary_search_batch(self, unperturbed, perturbed_inputs,
decision_function):
""" Binary search to approach the boundary. """
num_evals = 0
# Compute distance between each of perturbed and unperturbed input.
dists_post_update = torch.tensor(
[self.compute_distance(unperturbed, perturbed_x, self.norm) for perturbed_x in perturbed_inputs])
# Choose upper thresholds in binary searchs based on constraint.
if self.norm == 'linf':
highs = dists_post_update
# Stopping criteria.
thresholds = torch.clamp_max(dists_post_update * self.theta, max=self.theta)
else:
highs = torch.ones(perturbed_inputs.size(0))
thresholds = self.theta
lows = torch.zeros(perturbed_inputs.size(0))
lows = lows.type(self.internal_dtype)
highs = highs.type(self.internal_dtype)
if self.use_mask:
_mask = torch.tensor([self.pert_mask] * perturbed_inputs.size(0))
masked = perturbed_inputs * _mask + unperturbed * (torch.ones_like(_mask) - _mask)
masked_decisions = decision_function(masked)
masked_decisions = masked_decisions.int()
num_evals += masked.size(0)
highs[masked_decisions == 1] = 0
succeed = torch.sum(masked_decisions).item() > 0
else:
succeed = False
# Call recursive function.
success = bool(decision_function(perturbed_inputs)[0].item())
assert success
while torch.max((highs - lows) / thresholds).item() > 1:
# projection to mids.
mids = (highs + lows) / 2.0
mid_inputs = self.project(unperturbed, perturbed_inputs, mids)
# Update highs and lows based on model decisions.
decisions = decision_function(mid_inputs)
num_evals += mid_inputs.size(0)
decisions = decisions.int()
lows = torch.where(decisions == 0, mids, lows)
highs = torch.where(decisions == 1, mids, highs)
out_inputs = self.project(unperturbed, perturbed_inputs, highs)
assert out_inputs.size(0) == 1
success = bool(decision_function(out_inputs)[0].item())
assert success
# Compute distance of the output to select the best choice.
# (only used when stepsize_search is grid_search.)
dists = torch.tensor([self.compute_distance(unperturbed, out, self.norm) for out in out_inputs])
idx = torch.argmin(dists)
dist = dists_post_update[idx]
out = out_inputs[idx]
return out, dist, succeed, num_evals
def select_delta(self, dist_post_update, current_iteration):
"""
Choose the delta at the scale of distance
between x and perturbed sample.
"""
if current_iteration == 1:
delta = 0.1 * (self.clip_max - self.clip_min)
else:
if self.norm == 'l2':
delta = np.sqrt(self.dim) * self.theta * dist_post_update
elif self.norm == 'linf':
delta = self.dim * self.theta * dist_post_update
return delta
def approximate_gradient(self, decision_function, sample,
num_evals, delta, atk_level=None):
""" Gradient direction estimation """
# import time
# t0 = time.time()
dims = tuple(range(1, 1 + len(self.shape)))
rv_raw = self.gen_custom_basis(num_evals, sample=sample.detach().cpu().numpy(), atk_level=atk_level)
_mask = torch.stack([self.pert_mask] * num_evals)
rv = rv_raw * _mask
rv = rv / torch.sqrt(torch.sum(torch.mul(rv,rv),dim=dims,keepdim=True))
perturbed = sample + delta * rv
perturbed = torch.clamp(perturbed, min=self.clip_min, max=self.clip_max)
if self.discretize:
perturbed = (perturbed * 255.0).round() / 255.0
rv = (perturbed - sample) / delta
# query the model.
decisions = decision_function(perturbed)
# t4 = time.time()
decision_shape = [decisions.size(0)] + [1] * len(self.shape)
fval = 2 * decisions.type(self.internal_dtype).view(decision_shape) - 1.0
# Baseline subtraction (when fval differs)
vals = fval if torch.abs(torch.mean(fval)).item() == 1.0 else fval - torch.mean(fval).item()
# vals = fval
gradf = torch.mean(vals * rv, dim=0)
# Get the gradient direction.
gradf = gradf / torch.linalg.norm(gradf)
return gradf, torch.mean(fval)
def geometric_progression_for_stepsize(self, x, update, dist,
decision_function,
current_iteration):
""" Geometric progression to search for stepsize.
Keep decreasing stepsize by half until reaching
the desired side of the boundary.
"""
if hasattr(dist,"item"):
dist = dist.item()
num_evals = 0
if self.use_mask:
size_ratio = np.sqrt(self.pert_mask.sum().item() / torch.numel(self.pert_mask).item())
epsilon = dist * size_ratio / np.sqrt(current_iteration) + 0.1
else:
epsilon = dist / np.sqrt(current_iteration)
while True:
updated = torch.clamp(x + epsilon * update, min=self.clip_min, max=self.clip_max)
success = bool(decision_function(updated[None])[0].item())
num_evals += 1
if success:
break
else:
epsilon = epsilon / 2.0 # pragma: no cover
return epsilon, num_evals
def geometric_progression_for_tangent_point(self, decision_function, x_original, x_boundary, normal_vector,
dist, cur_iter):
"""
Geometric progression to search for stepsize.
Keep decreasing stepsize by half until reaching
the desired side of the boundary,
"""
radius = dist.item() / np.sqrt(cur_iter)
num_evals = 0
success = bool(decision_function(x_boundary[None])[0].item())
assert success
while True:
# x_projection = calculate_projection_of_x_original(x_original.view(-1),x_boundary.view(-1),normal_vector.view(-1))
# if torch.norm(x_projection.view(-1) - x_original.view(-1),p=self.ord).item() <= radius:
# log.info("projection point lies inside ball! reduce radius from {:.3f} to {:.3f}".format(radius, radius/2.0))
# radius /= 2.0
# continue
# else:
tangent_finder = TangentFinder(x_original.view(-1), x_boundary.view(-1), radius, normal_vector.view(-1),
norm="l2")
tangent_point = tangent_finder.compute_tangent_point()
tangent_point = tangent_point.view_as(x_original).type(x_original.dtype)
tangent_point = torch.clamp(tangent_point, self.clip_min, self.clip_max)
success = bool(decision_function(tangent_point[None])[0].item())
num_evals += 1
if success:
break
radius /= 2.0
return tangent_point
def log_step(self, step, distance, message='', always=False, a=None, perturbed=None, update=None, aux_info=None):
def cos_sim(x1, x2):
cos = (x1 * x2).sum() / torch.sqrt((x1 ** 2).sum() * (x2 ** 2).sum())
return cos
assert len(self.logger) == step
if aux_info is not None:
gradf, grad_gt, dist_dir, rho = aux_info
cos_est = cos_sim(-gradf, grad_gt)
cos_distpred = cos_sim(dist_dir, -gradf)
cos_distgt = cos_sim(dist_dir, grad_gt)
self.logger.append(
(a._total_prediction_calls, distance, cos_est.item(), rho, cos_distpred.item(), cos_distgt.item()))
else:
self.logger.append((a._total_prediction_calls, distance, 0, 0, 0, 0))
if not always and step % self.log_every_n_steps != 0:
return
self.printv('Step {}: {:.5e} {}'.format(
step,
distance,
message))
if aux_info is not None:
self.printv("\tEstimated vs. GT: {}".format(cos_est))
self.printv("\tRho: {}".format(rho))
self.printv("\tEstimated vs. Distance: {}".format(cos_distpred))
self.printv("\tGT vs. Distance: {}".format(cos_distgt))
if not self.plot_adv:
return # Dont plot
if a is not None:
import matplotlib.pyplot as plt
fig = plt.figure()
# plt.imshow(perturbed[:,:,::-1]/255) #keras
plt.imshow(perturbed.transpose(1, 2, 0)) # pytorch
np.savez('QEBA/perturbed%s%d.npz' % (self.suffix, step), pert=perturbed.transpose(1, 2, 0),
info=np.array([a._total_prediction_calls, distance]))
plt.axis('off')
plt.title('Call %d Distance %f' % (a._total_prediction_calls, distance))
fig.savefig('QEBA/%sstep%d.png' % (self.suffix, step))
plt.close(fig)
if update is not None:
fig = plt.figure()
abs_update = (update - update.min()) / (update.max() - update.min())
plt.imshow(abs_update.transpose(1, 2, 0)) # pytorch
plt.axis('off')
plt.title('Call %d Distance %f' % (a._total_prediction_calls, distance))
fig.savefig('QEBA/update%d.png' % step)
plt.close(fig)
#
self.printv("Call:", a._total_prediction_calls, "Saved to",
'QEBA/%sstep%d.png' % (self.suffix, step))
def printv(self, *args, **kwargs):
if self.verbose:
log.info(*args, **kwargs)
def get_image_of_target_class(self,dataset_name, target_labels, target_model):
images = []
for label in target_labels: # length of target_labels is 1
if dataset_name == "ImageNet":
dataset = ImageNetDataset(IMAGE_DATA_ROOT[dataset_name],label.item(), "validation")
elif dataset_name == "CIFAR-10":
dataset = CIFAR10Dataset(IMAGE_DATA_ROOT[dataset_name], label.item(), "validation")
elif dataset_name=="CIFAR-100":
dataset = CIFAR100Dataset(IMAGE_DATA_ROOT[dataset_name], label.item(), "validation")
index = np.random.randint(0, len(dataset))
image, true_label = dataset[index]
image = image.unsqueeze(0)
if dataset_name == "ImageNet" and target_model.input_size[-1] != 299:
image = F.interpolate(image,
size=(target_model.input_size[-2], target_model.input_size[-1]), mode='bilinear',
align_corners=False)
with torch.no_grad():
logits = target_model(image.cuda())
while logits.max(1)[1].item() != label.item():
index = np.random.randint(0, len(dataset))
image, true_label = dataset[index]
image = image.unsqueeze(0)
if dataset_name == "ImageNet" and target_model.input_size[-1] != 299:
image = F.interpolate(image,
size=(target_model.input_size[-2], target_model.input_size[-1]), mode='bilinear',
align_corners=False)
with torch.no_grad():
logits = target_model(image.cuda())
assert true_label == label.item()
images.append(torch.squeeze(image))
return torch.stack(images) # B,C,H,W
def initialize(self, sample, decision_function, target_images, true_labels, target_labels):
"""
sample: the shape of sample is [C,H,W] without batch-size
Efficient Implementation of BlendedUniformNoiseAttack in Foolbox.
"""
num_eval = 0
if target_images is None:
while True:
random_noise = torch.from_numpy(np.random.uniform(self.clip_min, self.clip_max, size=self.shape)).float()
# random_noise = torch.FloatTensor(*self.shape).uniform_(self.clip_min, self.clip_max)
success = decision_function(random_noise[None])[0].item()
num_eval += 1
if success:
break
if num_eval > 1000:
log.info("Initialization failed! Use a misclassified image as `target_image")
if target_labels is None:
target_labels = torch.randint(low=0, high=CLASS_NUM[self.dataset_name],
size=true_labels.size()).long()
invalid_target_index = target_labels.eq(true_labels)
while invalid_target_index.sum().item() > 0:
target_labels[invalid_target_index] = torch.randint(low=0, high=CLASS_NUM[self.dataset_name],
size=target_labels[invalid_target_index].size()).long()
invalid_target_index = target_labels.eq(true_labels)
initialization = self.get_image_of_target_class(self.dataset_name,target_labels, self.model).squeeze()
return initialization, 1
# assert num_eval < 1e4, "Initialization failed! Use a misclassified image as `target_image`"
# Binary search to minimize l2 distance to original image.
low = 0.0
high = 1.0
while high - low > 0.001:
mid = (high + low) / 2.0
blended = (1 - mid) * sample + mid * random_noise
success = decision_function(blended[None])[0].item()
num_eval += 1
if success:
high = mid
else:
low = mid
# Sometimes, the found `high` is so tiny that the difference between initialization and sample is very very small, this case will cause inifinity loop
initialization = (1 - high) * sample + high * random_noise
else:
initialization = target_images
return initialization, num_eval
def attack_all_images(self, args, arch_name, target_model, result_dump_path):
if args.targeted and args.target_type == "load_random":
loaded_target_labels = np.load("./target_class_labels/{}/label.npy".format(args.dataset))
loaded_target_labels = torch.from_numpy(loaded_target_labels).long()
for batch_index, (images, true_labels) in enumerate(self.dataset_loader):
if args.dataset == "ImageNet" and target_model.input_size[-1] != 299:
images = F.interpolate(images,
size=(target_model.input_size[-2], target_model.input_size[-1]), mode='bilinear',
align_corners=False)
logit = target_model(images.cuda())
pred = logit.argmax(dim=1)
correct = pred.eq(true_labels.cuda()).float() # shape = (batch_size,)
if correct.int().item() == 0: # we must skip any image that is classified incorrectly before attacking, otherwise this will cause infinity loop in later procedure
log.info("{}-th original image is classified incorrectly, skip!".format(batch_index+1))
continue
selected = torch.arange(batch_index * args.batch_size, min((batch_index + 1) * args.batch_size, self.total_images))
if args.targeted:
if args.target_type == 'random':
target_labels = torch.randint(low=0, high=CLASS_NUM[args.dataset],
size=true_labels.size()).long()
invalid_target_index = target_labels.eq(true_labels)
while invalid_target_index.sum().item() > 0:
target_labels[invalid_target_index] = torch.randint(low=0, high=logit.shape[1],
size=target_labels[invalid_target_index].shape).long()
invalid_target_index = target_labels.eq(true_labels)
elif args.target_type == "load_random":
target_labels = loaded_target_labels[selected]
assert target_labels[0].item()!=true_labels[0].item()
elif args.target_type == 'least_likely':
target_labels = logit.argmin(dim=1).detach().cpu()
elif args.target_type == "increment":
target_labels = torch.fmod(true_labels + 1, CLASS_NUM[args.dataset])
else:
raise NotImplementedError('Unknown target_type: {}'.format(args.target_type))
target_images = self.get_image_of_target_class(self.dataset_name,target_labels, target_model)
self._default_criterion = TargetClass(target_labels[0].item()) # FIXME bug??
a = Adversarial(model, self._default_criterion, images, true_labels[0].item(),
distance=self._default_distance, threshold=self._default_threshold,
targeted_attack=args.targeted)
else:
target_labels = None
self._default_criterion = Misclassification() # FIXME bug??
a = Adversarial(model, self._default_criterion, images, true_labels[0].item(),
distance=self._default_distance, threshold=self._default_threshold,
targeted_attack=args.targeted)
self.external_dtype = a.unperturbed.dtype
def decision_function(x):
out = a.forward(x, strict=False)[1] # forward function returns pr
return out
target_images = self.initialize(images.squeeze(0),decision_function,None,true_labels,target_labels)
if model is None or self._default_criterion is None:
raise ValueError('The attack needs to be initialized'
' with a model and a criterion or it'
' needs to be called with an Adversarial'
' instance.')
# p_gen = self.rv_generator
# if p_gen is None:
# rho = 1.0
# else:
# loss_ = F.cross_entropy(logit, true_labels.cuda())
# loss_.backward()
# grad_gt = images.grad.detach()
#
# rho = p_gen.calc_rho(grad_gt, images).item()
# self.rho_ref = rho
self._starting_point = target_images[0] # Adversarial input to use as a starting point, required for targeted attacks.
adv_images, query, success_query, distortion_with_max_queries, success_epsilon = self.attack(batch_index,a)
distortion_with_max_queries = distortion_with_max_queries.detach().cpu()
with torch.no_grad():
adv_logit = target_model(adv_images.cuda())
adv_pred = adv_logit.argmax(dim=1)
## Continue query count
not_done = correct.clone()
if args.targeted:
not_done = not_done * (1 - adv_pred.eq(target_labels.cuda()).float()).float() # not_done初始化为 correct, shape = (batch_size,)
else:
not_done = not_done * adv_pred.eq(true_labels.cuda()).float() #
success = (1 - not_done.detach().cpu()) * correct.detach().cpu() * success_epsilon.float() *(success_query <= self.maximum_queries).float()
for key in ['query', 'correct', 'not_done',
'success', 'success_query', "distortion_with_max_queries"]:
value_all = getattr(self, key + "_all")
value = eval(key)
value_all[selected] = value.detach().float().cpu()
# 每攻击成功就写一个
# meta_info_dict = {"avg_correct": self.correct_all.mean().item(),
# "avg_not_done": self.not_done_all[self.correct_all.bool()].mean().item(),
# # "mean_query": self.success_query_all[self.success_all.bool()].mean().item(),
# # "median_query": self.success_query_all[self.success_all.bool()].median().item(),
# # "max_query": self.success_query_all[self.success_all.bool()].max().item(),
# "correct_all": self.correct_all.detach().cpu().numpy().astype(np.int32).tolist(),
# "not_done_all": self.not_done_all.detach().cpu().numpy().astype(np.int32).tolist(),
# "success_all": self.success_all.detach().cpu().numpy().astype(np.int32).tolist(),
# "query_all": self.query_all.detach().cpu().numpy().astype(np.int32).tolist(),
# "success_query_all": self.success_query_all.detach().cpu().numpy().astype(
# np.int32).tolist(),
# "distortion": self.distortion_all,
# "avg_distortion_with_max_queries": self.distortion_with_max_queries_all.mean().item(),
# "args": vars(args)}
# with open(result_dump_path, "w") as result_file_obj:
# json.dump(meta_info_dict, result_file_obj, sort_keys=True)
log.info('{} is attacked finished ({} images)'.format(arch_name, self.total_images))
log.info('Saving results to {}'.format(result_dump_path))
meta_info_dict = {"avg_correct": self.correct_all.mean().item(),
"avg_not_done": self.not_done_all[self.correct_all.bool()].mean().item(),
"mean_query": self.success_query_all[self.success_all.bool()].mean().item(),
"median_query": self.success_query_all[self.success_all.bool()].median().item(),
"max_query": self.success_query_all[self.success_all.bool()].max().item(),
"correct_all": self.correct_all.detach().cpu().numpy().astype(np.int32).tolist(),
"not_done_all": self.not_done_all.detach().cpu().numpy().astype(np.int32).tolist(),
"success_all":self.success_all.detach().cpu().numpy().astype(np.int32).tolist(),
"query_all": self.query_all.detach().cpu().numpy().astype(np.int32).tolist(),
"success_query_all": self.success_query_all.detach().cpu().numpy().astype(np.int32).tolist(),
"distortion": self.distortion_all,
"avg_distortion_with_max_queries": self.distortion_with_max_queries_all.mean().item(),
"args": vars(args)}
with open(result_dump_path, "w") as result_file_obj:
json.dump(meta_info_dict, result_file_obj, sort_keys=True)
log.info("done, write stats info to {}".format(result_dump_path))
def get_exp_dir_name(dataset, norm, targeted, target_type, args):
if target_type == "load_random":
target_type = "random"
target_str = "untargeted" if not targeted else "targeted_{}".format(target_type)
if args.attack_defense:
dirname = 'QEBATangentAttack_on_defensive_model-{}-{}-{}'.format(dataset, norm, target_str)
else:
dirname = 'QEBATangentAttack-{}-{}-{}'.format(dataset, norm, target_str)
return dirname
def print_args(args):
keys = sorted(vars(args).keys())
max_len = max([len(key) for key in keys])
for key in keys:
prefix = ' ' * (max_len + 1 - len(key)) + key
log.info('{:s}: {}'.format(prefix, args.__getattribute__(key)))
def set_log_file(fname):
import subprocess
tee = subprocess.Popen(['tee', fname], stdin=subprocess.PIPE)
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
os.dup2(tee.stdin.fileno(), sys.stderr.fileno())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--gpu",type=int, required=True)
parser.add_argument('--json-config', type=str, default='./configures/QEBA.json',
help='a configures file to be passed in instead of arguments')
parser.add_argument('--epsilon', type=float, help='the lp perturbation bound')
parser.add_argument("--norm",type=str, choices=["l2","linf"],required=True)
parser.add_argument('--batch-size', type=int, default=1, help='batch size must set to 1')
parser.add_argument('--dataset', type=str, required=True,
choices=['CIFAR-10', 'CIFAR-100', 'ImageNet', "FashionMNIST", "MNIST", "TinyImageNet"], help='which dataset to use')
parser.add_argument('--arch', default=None, type=str, help='network architecture')
parser.add_argument('--all_archs', action="store_true")
parser.add_argument('--targeted', action="store_true")
parser.add_argument('--target_type',type=str, default='increment', choices=['random', 'load_random', 'least_likely',"increment"])
parser.add_argument('--exp-dir', default='logs', type=str, help='directory to save results and logs')
parser.add_argument('--seed', default=0, type=int, help='random seed')
parser.add_argument('--attack_discretize', action="store_true")
parser.add_argument('--atk_level', type=int, default=999)
parser.add_argument('--attack_defense',action="store_true")
parser.add_argument("--num_iterations",type=int,default=64)
parser.add_argument('--stepsize_search', type=str, choices=['geometric_progression', 'grid_search'],default='geometric_progression')
parser.add_argument('--defense_model',type=str, default=None)
parser.add_argument('--max_queries',type=int, default=10000)
parser.add_argument('--gamma',type=float)
parser.add_argument('--max_num_evals', type=int,default=100)
parser.add_argument('--pgen',type=str,choices=['naive',"resize","DCT9408","DCT192"],required=True)
args = parser.parse_args()
assert args.batch_size == 1, "HSJA only supports mini-batch size equals 1!"
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
os.environ["TORCH_HOME"] = "/home1/machen/.cache/torch/pretrainedmodels"
args_dict = None
if not args.json_config:
# If there is no json file, all of the args must be given
args_dict = vars(args)
else:
# If a json file is given, use the JSON file as the base, and then update it with args
defaults = json.load(open(args.json_config))[args.dataset][args.norm]
arg_vars = vars(args)
arg_vars = {k: arg_vars[k] for k in arg_vars if arg_vars[k] is not None}
defaults.update(arg_vars)
args = SimpleNamespace(**defaults)
args_dict = defaults
# if args.targeted:
# if args.dataset == "ImageNet":
# args.max_queries = 20000
args.exp_dir = osp.join(args.exp_dir,
get_exp_dir_name(args.dataset, args.norm, args.targeted, args.target_type, args)) # 随机产生一个目录用于实验
os.makedirs(args.exp_dir, exist_ok=True)
if args.all_archs:
if args.attack_defense:
log_file_path = osp.join(args.exp_dir, 'run_pgen_{}_defense_{}.log'.format(args.pgen,args.defense_model))
else:
log_file_path = osp.join(args.exp_dir, 'run_pgen_{}.log'.format(args.pgen))
elif args.arch is not None:
if args.attack_defense:
log_file_path = osp.join(args.exp_dir, 'run_pgen_{}_defense_{}_{}.log'.format(args.pgen,args.arch, args.defense_model))
else:
log_file_path = osp.join(args.exp_dir, 'run_pgen_{}_{}.log'.format(args.pgen,args.arch))
set_log_file(log_file_path)
if args.attack_defense:
assert args.defense_model is not None
torch.backends.cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.all_archs:
archs = args.all_archs
else:
assert args.arch is not None
archs = [args.arch]
args.arch = ", ".join(archs)
log.info('Command line is: {}'.format(' '.join(sys.argv)))
log.info("Log file is written in {}".format(log_file_path))
log.info('Called with args:')
print_args(args)
PGEN = args.pgen
p_gen = load_pgen(args.dataset, PGEN, args)
if args.dataset.startswith("CIFAR"):
if PGEN == 'naive':
ITER = 150
maxN = 30
initN = 30
elif PGEN.startswith('DCT') or PGEN.startswith('resize'):
ITER = 150
maxN = 30
initN = 30
elif PGEN.startswith('PCA'):
ITER = 150
maxN = 30
initN = 30
else:
raise NotImplementedError()
elif args.dataset == 'ImageNet' or args.dataset == 'CelebA':
if PGEN == 'naive':
ITER = 100
maxN = 100
initN = 100
elif PGEN.startswith('PCA'):
ITER = 100
maxN = 100
initN = 100
elif PGEN.startswith('DCT') or PGEN.startswith('resize'):
ITER = 100
maxN = 100
initN = 100
elif PGEN == 'NNGen':
ITER = 500
maxN = 30
initN = 30
maxN = 10000 # FIXME 原来的梯度估计花费的上限太小了,和我的HSJA等比较不公平!
initN = 100
for arch in archs:
if args.attack_defense:
save_result_path = args.exp_dir + "/{}_{}_pgen_{}_result.json".format(arch, args.defense_model,args.pgen)
else:
save_result_path = args.exp_dir + "/{}_pgen_{}_result.json".format(arch,args.pgen)
# if os.path.exists(save_result_path):
# continue
log.info("Begin attack {} on {}, result will be saved to {}".format(arch, args.dataset, save_result_path))
if args.attack_defense:
model = DefensiveModel(args.dataset, arch, no_grad=True, defense_model=args.defense_model)
else:
model = StandardModel(args.dataset, arch, no_grad=True)
model.cuda()
model.eval()
attacker = QEBATangentAttack(model, args.dataset, 0, 1.0, model.input_size[-2], model.input_size[-1], IN_CHANNELS[args.dataset],
args.norm, args.epsilon, iterations=ITER, initial_num_evals=initN, max_num_evals=maxN,
internal_dtype=torch.float32, rv_generator=p_gen, atk_level=args.atk_level, mask=None,
gamma=args.gamma, batch_size=256, stepsize_search = args.stepsize_search,
log_every_n_steps=1, suffix=PGEN, verbose=False, maximum_queries=args.max_queries)
attacker.attack_all_images(args, arch, model, save_result_path)
model.cpu()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Copyright (c) 2015, Göran Gustafsson. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###############################################################################
# Version: 1.0 #
# Web: https://github.com/ggustafsson/VideoConversionSim.py #
# Git: https://github.com/ggustafsson/VideoConversionSim.py.git #
# Email: gustafsson.g@gmail.com #
###############################################################################
import datetime
import random
import simpy
import statistics
servers = 2
jobs_per_server = 4
uploads = (24 * 60)
uploads_interval = (1 * 60)
max_waiting_time = (5 * 60)
min_video_length = 30
max_video_length = (30 * 60)
conversion_time = 0.5
color_normal = "\033[0m"
color_uploaded = "\033[1;31m"
color_started = "\033[1;33m"
color_finished = "\033[1;32m"
def time_f(seconds):
"""Takes seconds as input and returns it in one of the following formats:
30 sec
657 sec (0:10:57)
"""
if seconds >= 60:
time = datetime.timedelta(seconds=seconds)
time -= datetime.timedelta(microseconds=time.microseconds)
output = "%d sec (%s)" % (seconds, time)
else:
output = "%d sec" % seconds
return output
def upload(env, uploads, interval, resources):
"""Generates video uploads at random times."""
for i in range(uploads):
number = i + 1
conversion = convert(env, "Video %04d" % number, resources)
env.process(conversion)
wait = random.expovariate(1.0 / interval)
yield env.timeout(wait)
def convert(env, name, resources):
"""Simulates arrival, queuing, conversion and release of resources."""
global above_max_waiting
global longest_wait
global video_lengths
global waiting_times
arrived = env.now
length = random.randint(min_video_length, max_video_length)
duration = length * conversion_time
video_lengths.append(length)
print("%6d -" % env.now +
color_uploaded + " %s uploaded " % name + color_normal +
": Length is %s" % time_f(length))
with resources.request() as wait_for_slot:
yield wait_for_slot
waited = env.now - arrived
waiting_times.append(waited)
if waited > max_waiting_time:
above_max_waiting += 1
if waited > longest_wait:
longest_wait = waited
print("%6d -" % env.now +
color_started + " %s started " % name + color_normal +
": Waited for %s" % time_f(waited))
yield env.timeout(duration)
print("%6d -" % env.now +
color_finished + " %s finished " % name + color_normal +
": Duration was %s" % time_f(duration))
above_max_waiting = 0
longest_wait = 0
server_slots = servers * jobs_per_server
video_lengths = []
waiting_times = []
print("%d server(s), %d job(s) each = %d conversion(s) at a time" % \
(servers, jobs_per_server, server_slots))
print("%d video files total, 1 new every ~%s\n" % (uploads, \
time_f(uploads_interval)))
print(" Video length = %s - %s" % (time_f(min_video_length), \
time_f(max_video_length)))
print(" Conversion time = %d%% of video length" % (conversion_time * 100))
print("Max waiting time = %s\n" % time_f(max_waiting_time))
env = simpy.Environment()
resources = simpy.Resource(env, capacity=(server_slots))
uploading = upload(env, uploads, uploads_interval, resources)
env.process(uploading)
env.run()
video_length_mean = statistics.mean(video_lengths)
video_conversion_mean = video_length_mean * conversion_time
print("\n Mean video length: %s" % time_f(video_length_mean))
print("Mean conversion time: %s\n" % time_f(video_conversion_mean))
video_length_median = statistics.median(video_lengths)
video_conversion_median = video_length_median * conversion_time
print(" Median video length: %s" % time_f(video_length_median))
print("Median conversion time: %s\n" % time_f(video_conversion_median))
print(" Mean waiting time: %s" % time_f(statistics.mean(waiting_times)))
print(" Median waiting time: %s" % time_f(statistics.median(waiting_times)))
print("Longest waiting time: %s\n" % time_f(longest_wait))
print("Above max waiting time: %d out of %d" % (above_max_waiting, \
uploads))
|
nilq/baby-python
|
python
|
from DeepJetCore.DataCollection import DataCollection
from pprint import pprint
dc = DataCollection()
dc.readFromFile('dc/dataCollection.dc')#/storage/9/dseith/DeepJet/deepCSV/results/../../Ntuples/Thu_135917_batch/dataCollections/deepCSV/train/dataCollection.dc')
#dc.readFromFile('/storage/9/dseith/DeepJet/deepCSV/results/../../Ntuples/Thu_135917_batch/dataCollections/deepFlavour_FT_reg/train/dataCollection.dc')
#pprint (dc.means[0])
#print '-'*100
#pprint (dc.means[1])
#print '-'*100
#pprint (dc.means.dtype.names)
#pprint (dc.means[0][0].dtype)
#pprint (dc.useweights)
#pprint (dc.weighter)
#pprint (dc.samples)
#pprint (dc.sampleentries)
#pprint (dc.originRoots)
#pprint (dc.nsamples)
#pprint (dc.useweights)
##pprint (dc.__batchsize)
pprint (dc.dataclass)
#pprint (dc.weighter)
#pprint (dc.means)
six_times = [
'TagVarCSVTrk_trackJetDistVal',
'TagVarCSVTrk_trackPtRel',
'TagVarCSVTrk_trackDeltaR',
'TagVarCSVTrk_trackPtRatio',
'TagVarCSVTrk_trackSip3dSig',
'TagVarCSVTrk_trackSip2dSig',
'TagVarCSVTrk_trackDecayLenVal'
]
four_times = ['TagVarCSV_trackEtaRel']
variable_list = ['jet_pt', 'jet_eta',
'TagVarCSV_jetNSecondaryVertices',
'TagVarCSV_trackSumJetEtRatio', 'TagVarCSV_trackSumJetDeltaR',
'TagVarCSV_vertexCategory', 'TagVarCSV_trackSip2dValAboveCharm',
'TagVarCSV_trackSip2dSigAboveCharm', 'TagVarCSV_trackSip3dValAboveCharm',
'TagVarCSV_trackSip3dSigAboveCharm', 'TagVarCSV_jetNSelectedTracks',
'TagVarCSV_jetNTracksEtaRel',
'TagVarCSVTrk_trackJetDistVal',
'TagVarCSVTrk_trackPtRel',
'TagVarCSVTrk_trackDeltaR',
'TagVarCSVTrk_trackPtRatio',
'TagVarCSVTrk_trackSip3dSig',
'TagVarCSVTrk_trackSip2dSig',
'TagVarCSVTrk_trackDecayLenVal',
'TagVarCSV_trackEtaRel',
'TagVarCSV_vertexMass',
'TagVarCSV_vertexNTracks',
'TagVarCSV_vertexEnergyRatio',
'TagVarCSV_vertexJetDeltaR',
'TagVarCSV_flightDistance2dVal',
'TagVarCSV_flightDistance2dSig',
'TagVarCSV_flightDistance3dVal',
'TagVarCSV_flightDistance3dSig']
means = dc.means[0]
stddevs = dc.means[1]
varnames = dc.means.dtype.names
variables = []
for mean, stddev, name in zip(means, stddevs, varnames):
if name in variable_list:
if name in six_times:
for i in range(0, 6):
var = name+'_'+str(i)
variables.append( { 'name' : var, 'scale' : stddev, 'offset' : mean , 'defaults' : 0.0 } )
elif name in four_times:
for i in range(0, 4):
var = name+'_'+str(i)
variables.append( { 'name' : var, 'scale' : stddev, 'offset' : mean , 'defaults' : 0.0 } )
else:
var = name
variables.append( { 'name' : var, 'scale' : stddev, 'offset' : mean , 'defaults' : 0.0} )
#pprint (variables)
#variables = [ { 'name' : 'node_0', 'variables' : variables } ]
print len(variables)
outputs = [
"probb",
"probbb",
"probc",
"probudsg"
]
var_dic = {}
var_dic['class_labels'] = outputs#[{ 'labels' : outputs, 'name' : 'dense_6_0' }]
var_dic['inputs'] = variables
#var_dic["input_sequences"] = []
#var_dic['inputs'] = variables
#var_dic['class_labels'] = outputs
#var_dic['keras_version'] = '2.0.0'
pprint (var_dic)
import json
with open('DeepCSV_var.json', 'w') as json_file:
json.dump(var_dic, json_file)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.