input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>ttumkaya/WALiSuite_V2.0<filename>WALiSuite_GenerateSummaryPlots.py
# coding: utf-8
# In[1]:
import os
import seaborn as sns
import pandas as pd
from scipy import stats
import scipy as sp
import numpy as np
import datetime as dt
get_ipython().magic('matplotlib inline')
import matplotlib
# matplotlib.use('Agg')
# %matplotlib notebook
import matplotlib.pyplot as plt
from matplotlib import gridspec
from itertools import groupby
from operator import itemgetter
import bootstrap_contrast as bs
from nptdms import *
import math
from collections import Counter
import shutil
import progressbar
from svgutils.compose import *
from matplotlib.lines import Line2D
import random
import dabest
plt.rcParams['pdf.fonttype'] = 42
# ### Get all the metric ES and CIs to a DF
# In[10]:
def collectEffectSizes(rootDirectory):
# temp = {'ORNs':[],'Sex-Satiety-Air':[],'LightIntensity':[], 'Metric':[], 'ES':[], 'CIs':[], 'pVal':[]}
temp = {'ORNs':[],'Sex-Satiety-Air':[],'ES':[], 'CIs':[], 'pVal':[],'LightIntensity':[], 'parent_n':[],'parent_std':[],
'offspring_n':[],'offspring_std':[]}
ornList = os.listdir(rootDirectory)
bar = progressbar.ProgressBar()
## go thru the ORNs
for ORN in bar(ornList):
# rootDir = os.path.join(rootDirectory,ORN)
# metricList = next(os.walk(rootDir))[1]
rootDir = os.path.join(rootDirectory,ORN,"Starved","weighted_TSALE","P10")
# print ORN
## go thru the metrics for each ORN folder
# for metric in metricList:
# rootDir_metric_P10 = os.path.join(rootDir,metric,'P10')
# files_in_metric_dir = os.listdir(rootDir_metric_P10)
files_in_metric_dir = os.listdir(rootDir)
os.chdir(rootDir)
## go thru each csv file for each metric
for afile in files_in_metric_dir:
if afile[-4:] == '.csv':
conditions = afile[:-4].strip().split('_')
sex_satiety_air = conditions[-3] + '_' + conditions[-2] + '_' + conditions[-1]
tempDF = pd.read_csv(afile)
## go thru each row in the csv files
for row in range(len(tempDF)):
lightInt = tempDF['reference_group'][row].split('_')[-3]
ES = tempDF['stat_summary'][row]
CI_low = tempDF['bca_ci_low'][row]
CI_up = tempDF['bca_ci_high'][row]
parent_N = tempDF['Parent_N'][row]
parent_STD = tempDF['Parent_SD'][row]
offspring_N = tempDF['Offspring_N'][row]
offspring_STD = tempDF['Offspring_SD'][row]
try:
p_val = tempDF['pvalue_2samp_ind_ttest'][row]
except:
p_val = np.nan
temp['ORNs'].append(ORN)
temp['Sex-Satiety-Air'].append(sex_satiety_air)
temp['LightIntensity'].append(lightInt)
# temp['Metric'].append(metric)
temp['ES'].append(ES)
temp['CIs'].append([CI_low,CI_up])
temp['pVal'].append(p_val)
temp['parent_n'].append(parent_N)
temp['parent_std'].append(parent_STD)
temp['offspring_n'].append(offspring_N)
temp['offspring_std'].append(offspring_STD)
# All_ORN_EffectSizes_df = pd.DataFrame(temp,columns=['ORNs','Sex-Satiety-Air','LightIntensity', 'Metric', 'ES', 'CIs', 'pVal'])
All_ORN_EffectSizes_df = pd.DataFrame(temp,columns=['ORNs','LightIntensity','Sex-Satiety-Air', 'ES', 'CIs', 'pVal','parent_n',
'parent_std','offspring_n','offspring_std'])
return All_ORN_EffectSizes_df
# In[11]:
All_ORN_EffectSizes_df = collectEffectSizes(rootDirectory="C:/Users/tumkayat/Desktop/ORScreening/TransferToSOD/PulseVConstantLED_ORexperiments")
# In[12]:
All_ORN_EffectSizes_df
# In[147]:
All_ORN_EffectSizes_df['Tag'] = All_ORN_EffectSizes_df['ORNs'] + '_'+ All_ORN_EffectSizes_df['Sex-Satiety-Air']
a = All_ORN_EffectSizes_df[All_ORN_EffectSizes_df['Sex-Satiety-Air'] == 'male_fed_NoAir']
b = All_ORN_EffectSizes_df[All_ORN_EffectSizes_df['Sex-Satiety-Air'] == 'male_fed_Air']
c = All_ORN_EffectSizes_df[All_ORN_EffectSizes_df['Sex-Satiety-Air'] == 'male_starved_NoAir']
d = All_ORN_EffectSizes_df[All_ORN_EffectSizes_df['Sex-Satiety-Air'] == 'male_starved_Air']
print a.shape
print b.shape
print c.shape
print d.shape
# In[150]:
All_ORN_EffectSizes_df.to_csv("C:/Users/tumkayat/Desktop/allData.csv")
# In[115]:
## To find what is in one list but not the other
set(a['Tag']).symmetric_difference(set(b['Tag']))
# In[187]:
All_ORN_EffectSizes_df['ORN-Intensity-Sex-Satiety-Air'].unique()
# ### delta-delta plotting function for any given two groups
# In[243]:
from math import *
def ddplot(df,colname,idx,fsize=(10,5),color='k',s=60):
## Calculate Pooled CIs
def calculateSTD(mean,CI_ub,n1,n2):
moe = abs(CI_ub - mean)
std_and_n = moe / 1.96
std = std_and_n / sqrt((1./n1)+(1./n2))
return std
## Calculate CIs for delta-deltas
def calculatePooledMOE(std1,std2,n1=52,n2=52):
pooledSD = sqrt(((n1-1)*(std1**2)+(n2-1)*(std2**2))/(n1+n2-2))
moe = 1.96 * pooledSD * sqrt((1./n1)+(1./n2))
return moe, pooledSD
temp = {'reference group':[], 'test group':[], 'ES':[], 'MOE':[]}
for i in idx:
## get the group names to compare
group1 = i[0]
group2 = i[1]
## get Mean and calculate pooled SD of the first group
group1_ES = df[df[col_name] == group1]['ES'].values[0]
group1_CI_ub = df[df[col_name] == group1]['CIs'].values[0][1]
group1_parent_n = df[df[col_name] == group1]['parent_n'].values
group1_offspring_n = df[df[col_name] == group1]['offspring_n'].values
group1_STD = calculateSTD(group1_ES, group1_CI_ub, group1_parent_n, group1_offspring_n)
group1_N = (group1_parent_n + group1_offspring_n)/2.
## get Mean and calculate pooled SD of the second group
group2_ES = df[df[col_name] == group2]['ES'].values[0]
group2_CI_ub = df[df[col_name] == group2]['CIs'].values[0][1]
group2_parent_n = df[df[col_name] == group2]['parent_n'].values
group2_offspring_n = df[df[col_name] == group2]['offspring_n'].values
group2_STD = calculateSTD(group2_ES, group2_CI_ub, group2_parent_n, group2_offspring_n)
group2_N = (group2_parent_n + group2_offspring_n)/2.
## calculate delta-delta effect size and MOE
deltadelta_ES = group2_ES - group1_ES
deltadelta_MOE, deltadelta_STD = calculatePooledMOE(group1_STD, group2_STD, group1_N, group2_N)
temp['reference group'].append(group1)
temp['test group'].append(group2)
temp['ES'].append(deltadelta_ES)
temp['MOE'].append(deltadelta_MOE)
fig = plt.figure(figsize=fsize)
ax1 = fig.add_subplot(111)
style='ticks'
context='notebook'
font='Arial'
colorPalette = 'muted'
sns.set(style=style,context =context,font=font,font_scale=1.5)
color = color
s = s
x = 2
for i in range(len(temp['ES'])):
y = temp['ES'][i]
ax1.scatter(x, y, color = color, s = s)
ax1.plot([x, x], [y-temp['MOE'][0],y+temp['MOE'][0]],color=color, lw=1)
x = x + 2
sns.despine(ax=ax1)
ax1.tick_params(left=True,top=False, bottom=False, right=False)
ax1.set_ylim(-.5,.5)
ax1.set_xticks(range(1,2*(len(temp['ES'])),2)) ## sets the y ticks
ax1.set_xticklabels([(i + ' V ' + j) for i in temp['reference group'] for j in temp['test group']],rotation=30, fontsize=8) ## sets the x ticks' labels
plt.axhline(0, color='k', lw='.5', ls='-')
ax1.set_ylabel('delta-delta ES')
stats = pd.DataFrame(temp)
return fig, stats
# In[249]:
ORN = 'Or92a'
f, b = ddplot(df, colname = 'ORN-Intensity-Sex-Satiety-Air',
idx = [[ORN + '_7mV_fileName_NoAir_Constant', ORN + '_14mV_fileName_NoAir_Pulse'],
[ORN + '_14mV_fileName_NoAir_Constant', ORN + '_21mV_fileName_NoAir_Pulse'],
[ORN + '_21mV_fileName_NoAir_Constant', ORN + '_28mV_fileName_NoAir_Pulse']])
f.savefig('/Users/tumkayat/Desktop/ORScreening/TransferToSOD/PulseVConstantLED_ORexperiments/' + ORN + '/Pulsed_v_Constant.pdf',dpi=1000,bbox_inches='tight')
b.to_csv('/Users/tumkayat/Desktop/ORScreening/TransferToSOD/PulseVConstantLED_ORexperiments/' + ORN + '/Pulsed_v_Constant.csv')
# ### End of delta-delta function
# ### Average Mean across 3-intensities and Pooling the SDs
#
# In[76]:
from math import *
def calculatePooledMOE(data):
sd_list = []
for CI_pair in data:
sd = calculateSD(CI_pair[0], CI_pair[1])
sd_list.append(sd)
pooled_moe = poolSDs(sd_list[0], sd_list[1],sd_list[2])
return pooled_moe
def calculateSD(CI_lb ,CI_ub):
moe = abs(CI_ub - CI_lb)
std_and_n = moe / 1.96
std = std_and_n / sqrt((1./104)+(1./52))
return std
def poolSDs(std1,std2,std3,n1=52,n2=52,n3=52):
pooledSD = sqrt(((n1-1)*(std1**2)+(n2-1)*(std2**2)+(n3-1)*(std3**2))/(n1+n2+n3-3))
moe = 1.96 * pooledSD * sqrt((1./n1)+(1./n2)+(1./n3))
return moe
# In[81]:
temp = {'ORNs':[], 'Sex-Satiety-Air':[], 'ES_Mean':[], 'MOE_Pooled':[]}
for g,d in All_ORN_EffectSizes_df.groupby(['ORNs','Sex-Satiety-Air']):
ORN = g[0]
ES_Mean = d['ES'].mean()
pooled_moe = calculatePooledMOE(d['CIs'])
conditions = d['Sex-Satiety-Air'].iloc[0]
temp['ORNs'].append(ORN)
temp['ES_Mean'].append(ES_Mean)
temp['MOE_Pooled'].append(pooled_moe)
temp['Sex-Satiety-Air'].append(conditions)
allIntensities_PooledAverage_df = pd.DataFrame(temp)
# In[82]:
allIntensities_PooledAverage_df
# ### Average Mean across 3-intensities and Pooling the SDs: END
# ### Implementing the Empirical Bayes
# In[207]:
df = pd.read_pickle("C:/Users/tumkayat/Desktop/ORScreening/All_merged_intensity_wTSALE/Gr21a/weighted_TSALE/weighted_TSALE_values.pkl")
# In[208]:
df = df.assign(Sex_Satiety_Wind = pd.Series(df['Sex'] + '_' + df['Satiety'] + '_' + df['Wind status'], index = df.index))
# In[6]:
Z_scores = {'Di':[], 'Si':[], 'ORNs':[], 'Sex_Satiety_Wind':[], 'LightInt':[]}
rootDirectory = "C:/Users/tumkayat/Desktop/ORScreening/All_merged_intensity_wTSALE/"
ornList = os.listdir(rootDirectory)
bar = progressbar.ProgressBar()
## go thru the ORNs
for ORN in bar(ornList):
rootDir = os.path.join(rootDirectory, ORN, "weighted_TSALE", "weighted_TSALE_values.pkl")
df = pd.read_pickle(rootDir)
df = df.assign(Sex_Satiety_Wind = pd.Series(df['Sex'] + '_' + df['Satiety'] + '_' + df['Wind status'], index = df.index))
for condition in df['Sex_Satiety_Wind'].unique():
for intensity in df['Light Intensity(uW/mm2)'].unique():
dfOI = df[(df['Sex_Satiety_Wind'] == condition) & (df['Light Intensity(uW/mm2)'] == intensity)]
## calculate the mean difference as Offspring - Parent, since having 2 or 3 independent groups does not affect the mean
ctrl_wTSALE = dfOI[dfOI['Status'] == 'Parent']['weighted_TSALE_P10']
exp_wTSALE = dfOI[dfOI['Status'] == 'Offspring']['weighted_TSALE_P10']
Di = exp_wTSALE.mean() - ctrl_wTSALE.mean()
## calculate Si for three genotypes and then get the average - different than combininb the controls
genotypes = df['Genotype'].unique()
g0_data = dfOI[dfOI['Genotype'] == genotypes[0]]['weighted_TSALE_P10']
g1_data = dfOI[dfOI['Genotype'] == genotypes[1]]['weighted_TSALE_P10']
g2_data = dfOI[dfOI['Genotype'] == genotypes[2]]['weighted_TSALE_P10']
Si = (g0_data.std() + g1_data.std() + g2_data.std()) / 3.
Z_scores['ORNs'].append(ORN)
Z_scores['Sex_Satiety_Wind'].append(condition)
Z_scores['LightInt'].append(intensity)
Z_scores['Di'].append(Di)
Z_scores['Si'].append(Si)
Z_scores_df = pd.DataFrame(Z_scores)
Z_scores_df_dropna = Z_scores_df.dropna()
a0 = np.percentile(Z_scores_df_dropna['Si'], 10)
Z_scores_df_dropna['Zi'] = Z_scores_df_dropna['Di'] / (a0 + Z_scores_df_dropna['Si'])
# In[7]:
Z_scores_df_dropna['Tag'] = Z_scores_df_dropna['ORNs'] + '_' + Z_scores_df_dropna['Sex_Satiety_Wind']
# In[240]:
male_fed_noair = Z_scores_df_dropna[Z_scores_df_dropna['Sex_Satiety_Wind'] == 'male_fed_NoAir']
# In[257]:
a0 = np.percentile(male_fed_noair['Si'], 90)
male_fed_noair['Zi'] = male_fed_noair['Di'] / (a0 + male_fed_noair['Si'])
# In[17]:
##Drop Gr66a, EBprot blows up otherwise
a = Z_scores_df_dropna[Z_scores_df_dropna['ORNs'] != 'Gr66a']
a
# In[19]:
a.to_csv('C:/Users/tumkayat/Desktop/allData_Z_Gr66aREMOVED_CombosADDED.csv')
# In[215]:
##!!!! Calculate Z scores for stand-alone dataframes
def calculate_Z(Di, ):
return
# ### Implementing the Empirical Bayes: END
# ### Bell & Wilson Method
# ### Bell & Wilson Method: END
# ### PlotMe
# In[25]:
def generateSummaryPlot(df, condition1, condition2, metric, sort_by=None, sort_by_order=None, fsize= (5,25), gap_thru_yaxis = 5, offset = 1, s =30):
df_condition = df[((df['Sex-Satiety-Air'] == condition1) | (df['Sex-Satiety-Air'] == condition2)) & (df['Metric'] == metric)]
ORs_with_both_conditions = []
for OR in df_condition['ORNs'].unique():
list_of_conditions_per_ORN = df_condition[df_condition['ORNs'] == OR]['Sex-Satiety-Air']
## if an ORN has both conditions drop one
if (condition1 in list_of_conditions_per_ORN.values) & (condition2 in list_of_conditions_per_ORN.values):
ORs_with_both_conditions.append(OR)
# print 'Before dropping', df_condition.shape
for i in ORs_with_both_conditions:
drop_df = df_condition[(df_condition['ORNs'] == i) & (df_condition['Sex-Satiety-Air'] == condition2)]
ind = list(drop_df.index.values)
# print ind
df_condition = df_condition.drop(ind)
df_condition = df_condition.reset_index(drop=True)
# print 'After dropping',df_condition.shape
## If none, will use the first df's order
fig = plt.figure(figsize=fsize)
ax1 = fig.add_subplot(111)
plt.title(condition1,fontsize = 15)
style='ticks'
context='notebook'
font='Arial'
colorPalette = 'muted'
sns.set(style=style,context =context,font=font)
s = s
gap_thru_yaxis = gap_thru_yaxis
offset = offset
i = 0
if sort_by is None:
ORNList_sorted = sort_by_order
keep_the_ORN_Order = sort_by_order
else:
df_condition_sorted = df_condition[df_condition['LightIntensity'] == '70uW'].sort_values(by=['ES'],ascending=False)
ORNList_sorted = df_condition_sorted['ORNs'].unique()
keep_the_ORN_Order = ORNList_sorted
y_val = np.arange(1, len(ORNList_sorted)*gap_thru_yaxis, gap_thru_yaxis)
for OR in ORNList_sorted:
OR_df = df_condition[df_condition['ORNs'] == OR]
y = y_val[i] + offset
i = i+1
# print OR, len(OR_df)
for row in range(len(OR_df)):
ES = OR_df.iloc[row]['ES']
CIs= OR_df.iloc[row]['CIs']
intensity = OR_df.iloc[row]['LightIntensity']
if intensity == '14uW':
color = sns.dark_palette("red",n_colors=3)[2]
elif intensity == '42uW':
color = sns.dark_palette("red",n_colors=3)[1]
elif intensity == '70uW':
color = sns.dark_palette("red",n_colors=3)[0]
ax1.scatter(ES, y, color = color, s = s)
ax1.plot([CIs[0], CIs[1]], [y,y], color = color, lw=.5)
y = y - offset
sns.despine(ax=ax1)
ax1.tick_params(left=True,top=False, bottom=False, right=False)
ax1.set_xlim(-1.0,1.0)
ax1.set_ylim(-1, y_val[-1]+gap_thru_yaxis)
ax1.set_yticks(y_val) ## sets the y ticks
ax1.set_yticklabels(ORNList_sorted, fontsize=8) ## sets the x ticks' labels
plt.axvline(0,color='k',lw='0.4',ls='-')
# ax1.set_ylabel('ORNs')
ax1.set_xlabel('weighted TSALE',fontsize=10)
plt.xticks(fontsize=8)
from matplotlib.lines import Line2D
custom_lines = [Line2D([0], [0], color = sns.dark_palette("red",n_colors=3)[2], marker = 'o', lw=1),
Line2D([0], [0], color = sns.dark_palette("red",n_colors=3)[1], marker = 'o', lw=1),
Line2D([0], [0], color = | |
of metrics means aggregation
of metrics across all results, here aggregation
could be sum, average, rate, etc.
"""
@property
def raw_page(self):
return self
results = proto.RepeatedField(
proto.MESSAGE, number=1, message="GoogleAdsRow",
)
next_page_token = proto.Field(proto.STRING, number=2,)
total_results_count = proto.Field(proto.INT64, number=3,)
field_mask = proto.Field(
proto.MESSAGE, number=5, message=gp_field_mask.FieldMask,
)
summary_row = proto.Field(proto.MESSAGE, number=6, message="GoogleAdsRow",)
class SearchGoogleAdsStreamRequest(proto.Message):
r"""Request message for
[GoogleAdsService.SearchStream][google.ads.googleads.v7.services.GoogleAdsService.SearchStream].
Attributes:
customer_id (str):
Required. The ID of the customer being
queried.
query (str):
Required. The query string.
summary_row_setting (google.ads.googleads.v7.enums.types.SummaryRowSettingEnum.SummaryRowSetting):
Determines whether a summary row will be
returned. By default, summary row is not
returned. If requested, the summary row will be
sent in a response by itself after all other
query results are returned.
"""
customer_id = proto.Field(proto.STRING, number=1,)
query = proto.Field(proto.STRING, number=2,)
summary_row_setting = proto.Field(
proto.ENUM,
number=3,
enum=gage_summary_row_setting.SummaryRowSettingEnum.SummaryRowSetting,
)
class SearchGoogleAdsStreamResponse(proto.Message):
r"""Response message for
[GoogleAdsService.SearchStream][google.ads.googleads.v7.services.GoogleAdsService.SearchStream].
Attributes:
results (Sequence[google.ads.googleads.v7.services.types.GoogleAdsRow]):
The list of rows that matched the query.
field_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that represents what fields were
requested by the user.
summary_row (google.ads.googleads.v7.services.types.GoogleAdsRow):
Summary row that contains summary of metrics
in results. Summary of metrics means aggregation
of metrics across all results, here aggregation
could be sum, average, rate, etc.
request_id (str):
The unique id of the request that is used for
debugging purposes.
"""
results = proto.RepeatedField(
proto.MESSAGE, number=1, message="GoogleAdsRow",
)
field_mask = proto.Field(
proto.MESSAGE, number=2, message=gp_field_mask.FieldMask,
)
summary_row = proto.Field(proto.MESSAGE, number=3, message="GoogleAdsRow",)
request_id = proto.Field(proto.STRING, number=4,)
class GoogleAdsRow(proto.Message):
r"""A returned row from the query.
Attributes:
account_budget (google.ads.googleads.v7.resources.types.AccountBudget):
The account budget in the query.
account_budget_proposal (google.ads.googleads.v7.resources.types.AccountBudgetProposal):
The account budget proposal referenced in the
query.
account_link (google.ads.googleads.v7.resources.types.AccountLink):
The AccountLink referenced in the query.
ad_group (google.ads.googleads.v7.resources.types.AdGroup):
The ad group referenced in the query.
ad_group_ad (google.ads.googleads.v7.resources.types.AdGroupAd):
The ad referenced in the query.
ad_group_ad_asset_view (google.ads.googleads.v7.resources.types.AdGroupAdAssetView):
The ad group ad asset view in the query.
ad_group_ad_label (google.ads.googleads.v7.resources.types.AdGroupAdLabel):
The ad group ad label referenced in the
query.
ad_group_asset (google.ads.googleads.v7.resources.types.AdGroupAsset):
The ad group asset referenced in the query.
ad_group_audience_view (google.ads.googleads.v7.resources.types.AdGroupAudienceView):
The ad group audience view referenced in the
query.
ad_group_bid_modifier (google.ads.googleads.v7.resources.types.AdGroupBidModifier):
The bid modifier referenced in the query.
ad_group_criterion (google.ads.googleads.v7.resources.types.AdGroupCriterion):
The criterion referenced in the query.
ad_group_criterion_label (google.ads.googleads.v7.resources.types.AdGroupCriterionLabel):
The ad group criterion label referenced in
the query.
ad_group_criterion_simulation (google.ads.googleads.v7.resources.types.AdGroupCriterionSimulation):
The ad group criterion simulation referenced
in the query.
ad_group_extension_setting (google.ads.googleads.v7.resources.types.AdGroupExtensionSetting):
The ad group extension setting referenced in
the query.
ad_group_feed (google.ads.googleads.v7.resources.types.AdGroupFeed):
The ad group feed referenced in the query.
ad_group_label (google.ads.googleads.v7.resources.types.AdGroupLabel):
The ad group label referenced in the query.
ad_group_simulation (google.ads.googleads.v7.resources.types.AdGroupSimulation):
The ad group simulation referenced in the
query.
ad_parameter (google.ads.googleads.v7.resources.types.AdParameter):
The ad parameter referenced in the query.
age_range_view (google.ads.googleads.v7.resources.types.AgeRangeView):
The age range view referenced in the query.
ad_schedule_view (google.ads.googleads.v7.resources.types.AdScheduleView):
The ad schedule view referenced in the query.
domain_category (google.ads.googleads.v7.resources.types.DomainCategory):
The domain category referenced in the query.
asset (google.ads.googleads.v7.resources.types.Asset):
The asset referenced in the query.
batch_job (google.ads.googleads.v7.resources.types.BatchJob):
The batch job referenced in the query.
bidding_strategy (google.ads.googleads.v7.resources.types.BiddingStrategy):
The bidding strategy referenced in the query.
bidding_strategy_simulation (google.ads.googleads.v7.resources.types.BiddingStrategySimulation):
The bidding strategy simulation referenced in
the query.
billing_setup (google.ads.googleads.v7.resources.types.BillingSetup):
The billing setup referenced in the query.
call_view (google.ads.googleads.v7.resources.types.CallView):
The call view referenced in the query.
campaign_budget (google.ads.googleads.v7.resources.types.CampaignBudget):
The campaign budget referenced in the query.
campaign (google.ads.googleads.v7.resources.types.Campaign):
The campaign referenced in the query.
campaign_asset (google.ads.googleads.v7.resources.types.CampaignAsset):
The campaign asset referenced in the query.
campaign_audience_view (google.ads.googleads.v7.resources.types.CampaignAudienceView):
The campaign audience view referenced in the
query.
campaign_bid_modifier (google.ads.googleads.v7.resources.types.CampaignBidModifier):
The campaign bid modifier referenced in the
query.
campaign_criterion (google.ads.googleads.v7.resources.types.CampaignCriterion):
The campaign criterion referenced in the
query.
campaign_criterion_simulation (google.ads.googleads.v7.resources.types.CampaignCriterionSimulation):
The campaign criterion simulation referenced
in the query.
campaign_draft (google.ads.googleads.v7.resources.types.CampaignDraft):
The campaign draft referenced in the query.
campaign_experiment (google.ads.googleads.v7.resources.types.CampaignExperiment):
The campaign experiment referenced in the
query.
campaign_extension_setting (google.ads.googleads.v7.resources.types.CampaignExtensionSetting):
The campaign extension setting referenced in
the query.
campaign_feed (google.ads.googleads.v7.resources.types.CampaignFeed):
The campaign feed referenced in the query.
campaign_label (google.ads.googleads.v7.resources.types.CampaignLabel):
The campaign label referenced in the query.
campaign_shared_set (google.ads.googleads.v7.resources.types.CampaignSharedSet):
Campaign Shared Set referenced in AWQL query.
campaign_simulation (google.ads.googleads.v7.resources.types.CampaignSimulation):
The campaign simulation referenced in the
query.
carrier_constant (google.ads.googleads.v7.resources.types.CarrierConstant):
The carrier constant referenced in the query.
change_event (google.ads.googleads.v7.resources.types.ChangeEvent):
The ChangeEvent referenced in the query.
change_status (google.ads.googleads.v7.resources.types.ChangeStatus):
The ChangeStatus referenced in the query.
combined_audience (google.ads.googleads.v7.resources.types.CombinedAudience):
The CombinedAudience referenced in the query.
conversion_action (google.ads.googleads.v7.resources.types.ConversionAction):
The conversion action referenced in the
query.
conversion_custom_variable (google.ads.googleads.v7.resources.types.ConversionCustomVariable):
The conversion custom variable referenced in
the query.
click_view (google.ads.googleads.v7.resources.types.ClickView):
The ClickView referenced in the query.
currency_constant (google.ads.googleads.v7.resources.types.CurrencyConstant):
The currency constant referenced in the
query.
custom_audience (google.ads.googleads.v7.resources.types.CustomAudience):
The CustomAudience referenced in the query.
custom_interest (google.ads.googleads.v7.resources.types.CustomInterest):
The CustomInterest referenced in the query.
customer (google.ads.googleads.v7.resources.types.Customer):
The customer referenced in the query.
customer_asset (google.ads.googleads.v7.resources.types.CustomerAsset):
The customer asset referenced in the query.
customer_manager_link (google.ads.googleads.v7.resources.types.CustomerManagerLink):
The CustomerManagerLink referenced in the
query.
customer_client_link (google.ads.googleads.v7.resources.types.CustomerClientLink):
The CustomerClientLink referenced in the
query.
customer_client (google.ads.googleads.v7.resources.types.CustomerClient):
The CustomerClient referenced in the query.
customer_extension_setting (google.ads.googleads.v7.resources.types.CustomerExtensionSetting):
The customer extension setting referenced in
the query.
customer_feed (google.ads.googleads.v7.resources.types.CustomerFeed):
The customer feed referenced in the query.
customer_label (google.ads.googleads.v7.resources.types.CustomerLabel):
The customer label referenced in the query.
customer_negative_criterion (google.ads.googleads.v7.resources.types.CustomerNegativeCriterion):
The customer negative criterion referenced in
the query.
customer_user_access (google.ads.googleads.v7.resources.types.CustomerUserAccess):
The CustomerUserAccess referenced in the
query.
customer_user_access_invitation (google.ads.googleads.v7.resources.types.CustomerUserAccessInvitation):
The CustomerUserAccessInvitation referenced
in the query.
detail_placement_view (google.ads.googleads.v7.resources.types.DetailPlacementView):
The detail placement view referenced in the
query.
display_keyword_view (google.ads.googleads.v7.resources.types.DisplayKeywordView):
The display keyword view referenced in the
query.
distance_view (google.ads.googleads.v7.resources.types.DistanceView):
The distance view referenced in the query.
dynamic_search_ads_search_term_view (google.ads.googleads.v7.resources.types.DynamicSearchAdsSearchTermView):
The dynamic search ads search term view
referenced in the query.
expanded_landing_page_view (google.ads.googleads.v7.resources.types.ExpandedLandingPageView):
The expanded landing page view referenced in
the query.
extension_feed_item (google.ads.googleads.v7.resources.types.ExtensionFeedItem):
The extension feed item referenced in the
query.
feed (google.ads.googleads.v7.resources.types.Feed):
The feed referenced in the query.
feed_item (google.ads.googleads.v7.resources.types.FeedItem):
The feed item referenced in the query.
feed_item_set (google.ads.googleads.v7.resources.types.FeedItemSet):
The feed item set referenced in the query.
feed_item_set_link (google.ads.googleads.v7.resources.types.FeedItemSetLink):
The feed item set link referenced in the
query.
feed_item_target (google.ads.googleads.v7.resources.types.FeedItemTarget):
The feed item target referenced in the query.
feed_mapping (google.ads.googleads.v7.resources.types.FeedMapping):
The feed mapping referenced in the query.
feed_placeholder_view (google.ads.googleads.v7.resources.types.FeedPlaceholderView):
The feed placeholder view referenced in the
query.
gender_view (google.ads.googleads.v7.resources.types.GenderView):
The gender view referenced in the query.
geo_target_constant (google.ads.googleads.v7.resources.types.GeoTargetConstant):
The geo target constant referenced in the
query.
geographic_view (google.ads.googleads.v7.resources.types.GeographicView):
The geographic view referenced in the query.
group_placement_view (google.ads.googleads.v7.resources.types.GroupPlacementView):
The group placement view referenced in the
query.
hotel_group_view (google.ads.googleads.v7.resources.types.HotelGroupView):
The hotel group view referenced in the query.
hotel_performance_view (google.ads.googleads.v7.resources.types.HotelPerformanceView):
The hotel performance view referenced in the
query.
income_range_view (google.ads.googleads.v7.resources.types.IncomeRangeView):
The income range view referenced in the
query.
keyword_view (google.ads.googleads.v7.resources.types.KeywordView):
The keyword view referenced in the query.
keyword_plan (google.ads.googleads.v7.resources.types.KeywordPlan):
The keyword plan referenced in the query.
keyword_plan_campaign (google.ads.googleads.v7.resources.types.KeywordPlanCampaign):
The keyword plan campaign referenced in the
query.
keyword_plan_campaign_keyword (google.ads.googleads.v7.resources.types.KeywordPlanCampaignKeyword):
The keyword plan campaign keyword referenced
in the query.
keyword_plan_ad_group (google.ads.googleads.v7.resources.types.KeywordPlanAdGroup):
The keyword plan ad group referenced in the
query.
keyword_plan_ad_group_keyword (google.ads.googleads.v7.resources.types.KeywordPlanAdGroupKeyword):
The keyword plan ad group referenced in the
query.
label (google.ads.googleads.v7.resources.types.Label):
The label referenced in the query.
landing_page_view (google.ads.googleads.v7.resources.types.LandingPageView):
The landing page view referenced in the
query.
language_constant (google.ads.googleads.v7.resources.types.LanguageConstant):
The language constant referenced in the
query.
location_view (google.ads.googleads.v7.resources.types.LocationView):
The location view referenced in the query.
managed_placement_view (google.ads.googleads.v7.resources.types.ManagedPlacementView):
The managed placement view referenced in the
query.
media_file (google.ads.googleads.v7.resources.types.MediaFile):
The media file referenced in the query.
mobile_app_category_constant (google.ads.googleads.v7.resources.types.MobileAppCategoryConstant):
The mobile app category constant referenced
in the query.
mobile_device_constant (google.ads.googleads.v7.resources.types.MobileDeviceConstant):
The mobile device constant referenced in the
query.
offline_user_data_job (google.ads.googleads.v7.resources.types.OfflineUserDataJob):
The offline user data job referenced in the
query.
operating_system_version_constant (google.ads.googleads.v7.resources.types.OperatingSystemVersionConstant):
The operating system version constant
referenced in the query.
paid_organic_search_term_view (google.ads.googleads.v7.resources.types.PaidOrganicSearchTermView):
The paid organic search term view referenced
in the query.
parental_status_view (google.ads.googleads.v7.resources.types.ParentalStatusView):
The parental status view referenced in the
query.
product_bidding_category_constant (google.ads.googleads.v7.resources.types.ProductBiddingCategoryConstant):
The Product Bidding Category referenced in
the query.
product_group_view (google.ads.googleads.v7.resources.types.ProductGroupView):
The product group view referenced in the
query.
recommendation (google.ads.googleads.v7.resources.types.Recommendation):
The recommendation referenced in the query.
search_term_view (google.ads.googleads.v7.resources.types.SearchTermView):
The search term view referenced in the query.
shared_criterion (google.ads.googleads.v7.resources.types.SharedCriterion):
The shared set referenced in the query.
shared_set (google.ads.googleads.v7.resources.types.SharedSet):
The shared set referenced in the query.
shopping_performance_view (google.ads.googleads.v7.resources.types.ShoppingPerformanceView):
The shopping performance view referenced in
the query.
third_party_app_analytics_link (google.ads.googleads.v7.resources.types.ThirdPartyAppAnalyticsLink):
The AccountLink referenced in the query.
topic_view (google.ads.googleads.v7.resources.types.TopicView):
The topic view referenced in the query.
user_interest (google.ads.googleads.v7.resources.types.UserInterest):
The user interest referenced in the query.
life_event (google.ads.googleads.v7.resources.types.LifeEvent):
The life event referenced in the query.
user_list (google.ads.googleads.v7.resources.types.UserList):
The user list | |
<filename>berryimu.py
#!/usr/bin/python
#
# This program includes a number of calculations to improve the
# values returned from a BerryIMU. If this is new to you, it
# may be worthwhile first to look at berryIMU-simple.py, which
# has a much more simplified version of code which is easier
# to read.
#
#
# The BerryIMUv1, BerryIMUv2 and BerryIMUv3 are supported
#
# This script is python 2.7 and 3 compatible
#
# Feel free to do whatever you like with this code.
# Distributed as-is; no warranty is given.
#
# https://ozzmaker.com/berryimu/
import time
import math
import IMU
import datetime
import os
import sys
RAD_TO_DEG = 57.29578
M_PI = 3.14159265358979323846
G_GAIN = 0.070 # [deg/s/LSB] If you change the dps for gyro, you need to update this value accordingly
AA = 0.40 # Complementary filter constant
MAG_LPF_FACTOR = 0.4 # Low pass filter constant magnetometer
ACC_LPF_FACTOR = 0.4 # Low pass filter constant for accelerometer
ACC_MEDIANTABLESIZE = 2 # Median filter table size for accelerometer. Higher = smoother but a longer delay
MAG_MEDIANTABLESIZE = 9 # Median filter table size for magnetometer. Higher = smoother but a longer delay
################# Compass Calibration values ############
# Use calibrateBerryIMU.py to get calibration values
# Calibrating the compass isnt mandatory, however a calibrated
# compass will result in a more accurate heading value.
magXmin = 0
magYmin = 0
magZmin = 0
magXmax = 0
magYmax = 0
magZmax = 0
'''
Here is an example:
magXmin = -1748
magYmin = -1025
magZmin = -1876
magXmax = 959
magYmax = 1651
magZmax = 708
Dont use the above values, these are just an example.
'''
############### END Calibration offsets #################
#Kalman filter variables
Q_angle = 0.02
Q_gyro = 0.0015
R_angle = 0.005
y_bias = 0.0
x_bias = 0.0
z_bias = 0.0
XP_00 = 0.0
XP_01 = 0.0
XP_10 = 0.0
XP_11 = 0.0
YP_00 = 0.0
YP_01 = 0.0
YP_10 = 0.0
YP_11 = 0.0
ZP_00 = 0.0
ZP_01 = 0.0
ZP_10 = 0.0
ZP_11 = 0.0
KFangleX = 0.0
KFangleY = 0.0
KFangleZ = 0.0
def kalmanFilterY ( accAngle, gyroRate, DT):
y=0.0
S=0.0
global KFangleY
global Q_angle
global Q_gyro
global y_bias
global YP_00
global YP_01
global YP_10
global YP_11
KFangleY = KFangleY + DT * (gyroRate - y_bias)
YP_00 = YP_00 + ( - DT * (YP_10 + YP_01) + Q_angle * DT )
YP_01 = YP_01 + ( - DT * YP_11 )
YP_10 = YP_10 + ( - DT * YP_11 )
YP_11 = YP_11 + ( + Q_gyro * DT )
y = accAngle - KFangleY
S = YP_00 + R_angle
K_0 = YP_00 / S
K_1 = YP_10 / S
KFangleY = KFangleY + ( K_0 * y )
y_bias = y_bias + ( K_1 * y )
YP_00 = YP_00 - ( K_0 * YP_00 )
YP_01 = YP_01 - ( K_0 * YP_01 )
YP_10 = YP_10 - ( K_1 * YP_00 )
YP_11 = YP_11 - ( K_1 * YP_01 )
return KFangleY
def kalmanFilterX ( accAngle, gyroRate, DT):
x=0.0
S=0.0
global KFangleX
global Q_angle
global Q_gyro
global x_bias
global XP_00
global XP_01
global XP_10
global XP_11
KFangleX = KFangleX + DT * (gyroRate - x_bias)
XP_00 = XP_00 + ( - DT * (XP_10 + XP_01) + Q_angle * DT )
XP_01 = XP_01 + ( - DT * XP_11 )
XP_10 = XP_10 + ( - DT * XP_11 )
XP_11 = XP_11 + ( + Q_gyro * DT )
x = accAngle - KFangleX
S = XP_00 + R_angle
K_0 = XP_00 / S
K_1 = XP_10 / S
KFangleX = KFangleX + ( K_0 * x )
x_bias = x_bias + ( K_1 * x )
XP_00 = XP_00 - ( K_0 * XP_00 )
XP_01 = XP_01 - ( K_0 * XP_01 )
XP_10 = XP_10 - ( K_1 * XP_00 )
XP_11 = XP_11 - ( K_1 * XP_01 )
return KFangleX
def kalmanFilterZ ( accAngle, gyroRate, DT):
Z=0.0
S=0.0
global KFangleZ
global Q_angle
global Q_gyro
global z_bias
global ZP_00
global ZP_01
global ZP_10
global ZP_11
KFangleZ = KFangleZ + DT * (gyroRate - z_bias)
ZP_00 = ZP_00 + ( - DT * (ZP_10 + ZP_01) + Q_angle * DT )
ZP_01 = ZP_01 + ( - DT * ZP_11 )
ZP_10 = ZP_10 + ( - DT * ZP_11 )
ZP_11 = ZP_11 + ( + Q_gyro * DT )
z = accAngle - KFangleZ
S = ZP_00 + R_angle
K_0 = ZP_00 / S
K_1 = ZP_10 / S
KFangleZ = KFangleZ + ( K_0 * z )
z_bias = z_bias + ( K_1 * z )
ZP_00 = ZP_00 - ( K_0 * ZP_00 )
ZP_01 = ZP_01 - ( K_0 * ZP_01 )
ZP_10 = ZP_10 - ( K_1 * ZP_00 )
ZP_11 = ZP_11 - ( K_1 * ZP_01 )
return KFangleZ
def init():
for i in range(0, 5): # Try a couple times to detect IMU
if(IMU.detectIMU()): # Detect if BerryIMU is connected.
break
else:
print(" No BerryIMU found!")
continue
IMU.initIMU() #Initialise the accelerometer, gyroscope and compass
#Setup the tables for the mdeian filter. Fill them all with '1' so we dont get devide by zero error
acc_medianTable1X = [1] * ACC_MEDIANTABLESIZE
acc_medianTable1Y = [1] * ACC_MEDIANTABLESIZE
acc_medianTable1Z = [1] * ACC_MEDIANTABLESIZE
acc_medianTable2X = [1] * ACC_MEDIANTABLESIZE
acc_medianTable2Y = [1] * ACC_MEDIANTABLESIZE
acc_medianTable2Z = [1] * ACC_MEDIANTABLESIZE
mag_medianTable1X = [1] * MAG_MEDIANTABLESIZE
mag_medianTable1Y = [1] * MAG_MEDIANTABLESIZE
mag_medianTable1Z = [1] * MAG_MEDIANTABLESIZE
mag_medianTable2X = [1] * MAG_MEDIANTABLESIZE
mag_medianTable2Y = [1] * MAG_MEDIANTABLESIZE
mag_medianTable2Z = [1] * MAG_MEDIANTABLESIZE
# Set up time for first loop time calculation
a = datetime.datetime.now()
def getValues(motionAlgorithm = 'accurate'):
"""
motionAlgorithm determines whether to use complimentary filter or kalman filter to determine angles
'fast' = CF filter
'accurate' = Kalman filter
"""
global acc_medianTable1X
global acc_medianTable1Y
global acc_medianTable1Z
global acc_medianTable2X
global acc_medianTable2Y
global acc_medianTable2Z
global mag_medianTable1X
global mag_medianTable1Y
global mag_medianTable1Z
global mag_medianTable2X
global mag_medianTable2Y
global mag_medianTable2Z
global a
gyroXangle = 0.0
gyroYangle = 0.0
gyroZangle = 0.0
angleX = 0.0
angleY = 0.0
angleZ = 0.0
kalmanX = 0.0
kalmanY = 0.0
oldXMagRawValue = 0
oldYMagRawValue = 0
oldZMagRawValue = 0
oldXAccRawValue = 0
oldYAccRawValue = 0
oldZAccRawValue = 0
#Read the accelerometer,gyroscope and magnetometer values
ACCx = IMU.readACCx()
ACCy = IMU.readACCy()
ACCz = IMU.readACCz()
GYRx = IMU.readGYRx()
GYRy = IMU.readGYRy()
GYRz = IMU.readGYRz()
MAGx = IMU.readMAGx()
MAGy = IMU.readMAGy()
MAGz = IMU.readMAGz()
#Apply compass calibration
MAGx -= (magXmin + magXmax) /2
MAGy -= (magYmin + magYmax) /2
MAGz -= (magZmin + magZmax) /2
##Calculate loop Period(LP). How long between Gyro Reads
b = datetime.datetime.now() - a
a = datetime.datetime.now()
LP = b.microseconds/(1000000*1.0)
# outputString = "Loop Time %5.2f " % ( LP )
###############################################
#### Apply low pass filter ####
###############################################
MAGx = MAGx * MAG_LPF_FACTOR + oldXMagRawValue*(1 - MAG_LPF_FACTOR);
MAGy = MAGy * MAG_LPF_FACTOR + oldYMagRawValue*(1 - MAG_LPF_FACTOR);
MAGz = MAGz * MAG_LPF_FACTOR + oldZMagRawValue*(1 - MAG_LPF_FACTOR);
ACCx = ACCx * ACC_LPF_FACTOR + oldXAccRawValue*(1 - ACC_LPF_FACTOR);
ACCy = ACCy * ACC_LPF_FACTOR + oldYAccRawValue*(1 - ACC_LPF_FACTOR);
ACCz = ACCz * ACC_LPF_FACTOR + oldZAccRawValue*(1 - ACC_LPF_FACTOR);
oldXMagRawValue = MAGx
oldYMagRawValue = MAGy
oldZMagRawValue = MAGz
oldXAccRawValue = ACCx
oldYAccRawValue = ACCy
oldZAccRawValue = ACCz
#########################################
#### Median filter for accelerometer ####
#########################################
# cycle the table
for x in range (ACC_MEDIANTABLESIZE-1,0,-1 ):
acc_medianTable1X[x] = acc_medianTable1X[x-1]
acc_medianTable1Y[x] = acc_medianTable1Y[x-1]
acc_medianTable1Z[x] = acc_medianTable1Z[x-1]
# Insert the lates values
acc_medianTable1X[0] = ACCx
acc_medianTable1Y[0] = ACCy
acc_medianTable1Z[0] = ACCz
# Copy the tables
acc_medianTable2X = acc_medianTable1X[:]
acc_medianTable2Y = acc_medianTable1Y[:]
acc_medianTable2Z = acc_medianTable1Z[:]
# Sort table 2
acc_medianTable2X.sort()
acc_medianTable2Y.sort()
acc_medianTable2Z.sort()
# The middle value is the value we are interested in
ACCx = acc_medianTable2X[int(ACC_MEDIANTABLESIZE/2)];
ACCy = acc_medianTable2Y[int(ACC_MEDIANTABLESIZE/2)];
ACCz = acc_medianTable2Z[int(ACC_MEDIANTABLESIZE/2)];
#########################################
#### Median filter for magnetometer ####
#########################################
# cycle the table
for x in range (MAG_MEDIANTABLESIZE-1,0,-1 ):
mag_medianTable1X[x] = mag_medianTable1X[x-1]
mag_medianTable1Y[x] = mag_medianTable1Y[x-1]
mag_medianTable1Z[x] = mag_medianTable1Z[x-1]
# Insert the latest values
mag_medianTable1X[0] = MAGx
mag_medianTable1Y[0] = MAGy
mag_medianTable1Z[0] = MAGz
# Copy the tables
mag_medianTable2X = mag_medianTable1X[:]
mag_medianTable2Y = mag_medianTable1Y[:]
mag_medianTable2Z = mag_medianTable1Z[:]
# Sort table 2
mag_medianTable2X.sort()
mag_medianTable2Y.sort()
mag_medianTable2Z.sort()
# The middle value is the value we are interested in
MAGx = mag_medianTable2X[int(MAG_MEDIANTABLESIZE/2)];
MAGy = mag_medianTable2Y[int(MAG_MEDIANTABLESIZE/2)];
MAGz = mag_medianTable2Z[int(MAG_MEDIANTABLESIZE/2)];
#Convert Gyro raw to degrees per second
rate_gyr_x = GYRx * G_GAIN
rate_gyr_y | |
float(nav_line[3 + i*19:3 + (i + 1)*19])
# Reading Cuc/e Eccentricity/Cus/Sqrt(A)
elif cnt1 == 2:
for i in xrange(0, 4):
cnt3 += 1
temp[cnt2][cnt3] = float(nav_line[3 + i*19:3 + (i + 1)*19])
# Reading Toe/Cic/OMEGA/Cis
elif cnt1 == 3:
for i in xrange(0, 4):
cnt3 += 1
temp[cnt2][cnt3] = float(nav_line[3 + i*19:3 + (i + 1)*19])
# Reading Inclination/Crc/omega/OMEGA DOT
elif cnt1 == 4:
for i in xrange(0, 4):
cnt3 += 1
temp[cnt2][cnt3] = float(nav_line[3 + i*19:3 + (i + 1)*19])
# Reading IDOT/L2 Codes/GPS Week# (to go with Toe)/L2 P data flag
elif cnt1 == 5:
for i in xrange(0, 4):
cnt3 += 1
temp[cnt2][cnt3] = float(nav_line[3 + i*19:3 + (i + 1)*19])
# Reading SV accuracy/SV health/TGD/IODC
elif cnt1 == 6:
for i in xrange(0, 4):
cnt3 += 1
temp[cnt2][cnt3] = float(nav_line[3 + i*19:3 + (i + 1)*19])
# Reading Transmission time of message (sec of GPS week)
elif cnt1 == 7:
temp[cnt2][34] = float(nav_line[3:3 + 19])
# Update the raw_ephem array
raw_ephem = temp
# Update the row count in raw_ephem array
cnt2 += 1
# Extend the "temp" array by one row
temp = np.nan*np.ones((cnt2 + 1, 35))
# Cast the "raw_ephem" array into the extended "temp[0:n-1][:]"
temp[0:cnt2][:] = raw_ephem
# Just read another time entry
# Update the nav_line count
cnt0 += 1
# End of "for" loop over the navigation message
# Close the navigation file
nav_message.close()
# //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
# EXTRACTING THE EPHEMERIS OUT OF THE NAVIGATION MESSAGE
# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
# Post processing the ephemeris:
print 'Extracting the Ephemeris...'
# Rounding up the time stamp base on the minute
rndup_index = []
rnddown_index = []
# Loop over the entire "raw_ephem" array
for i in xrange(0, len(raw_ephem)):
# If the minute is not exactly zero, then round up hour, minute, and second
if raw_ephem[i, 5] != 0:
# If the minute is less than 30,
if raw_ephem[i, 5] < 30:
raw_ephem[i, 4] -= 1 # round the hour down by 1 unit
raw_ephem[i, 5] = 0 # set the minute to zero
raw_ephem[i, 6] = 0 # set the second to zero
rnddown_index.append(i)
# If the minute is greater than or equal 30,
elif raw_ephem[i, 5] >= 30:
raw_ephem[i, 4] += 1 # round the hour up by 1 unit
raw_ephem[i, 5] = 0 # set the minute to zero
raw_ephem[i, 6] = 0 # set the second to zero
rndup_index.append(i)
# Create the flags for different types of rounding
if rnddown_index:
rnddown_flag = True
elif rndup_index:
rndup_flag = True
elif not (rndup_index and rnddown_index):
rnddown_flag = False
rndup_flag = False
# Collect the initial and final TOWs from the flight data
TOWo = TOW[0] # Initial time of week (seconds)
TOWf = TOW[-1] # Final time of week (seconds)
# Days of week based on the initial time of week
DyOW = (TOWo - np.mod(TOWo, dd2sec))/dd2sec # days
# The remaining seconds of the day after extracting the days
rem_sec = np.mod(TOWo, dd2sec) # seconds
# Hours of the day
HrOD = (rem_sec - np.mod(rem_sec, hr2sec))/hr2sec # hours
# The remaining seconds of the hour after extracting days and hours
rem_sec = np.mod(rem_sec, hr2sec) # seconds
# Minutes of the hour
MnOH = (rem_sec - np.mod(rem_sec, mn2sec))/mn2sec # minutes
# Flight duration in GPS time (seconds)
deltaTOW = TOWf - TOWo
if (rem_sec + deltaTOW)/mn2sec <= 60:
LkUpHr = HrOD
elif (rem_sec + deltaTOW)/mn2sec > 60:
LkUpHr = HrOD + 1
print "Look-up hour: %d" % LkUpHr
# Look up for the nearest time stamp, then collect the PRNs in this time stamp.
ephem = np.nan*np.ones((32, 17))
sv_clock = np.nan * np.ones((32, 5))
search_again = False
prev_PRN = 0
PRN = 1
cnt4 = 0
# Loop over the entire "raw_ephem" array
for i in xrange(0, len(raw_ephem)):
# Look up the day of the month
if raw_ephem[i, 3] == DyOM:
# If found the day, look up the hour
if raw_ephem[i, 4] == LkUpHr:
# If the hour matches, check for new PRN
if raw_ephem[i, 0] == PRN and raw_ephem[i, 0] != prev_PRN:
# If new PRN is found, collect the ephemeris parameters
for j in xrange(0, 17):
if j == 0:
ephem[cnt4, j] = raw_ephem[i, 0]
# Col. 0: PRN
else:
ephem[cnt4, j] = raw_ephem[i, 10 + j]
# Col. 1: Crs (meter)
# Col. 2: Delta n (rad/sec)
# Col. 3: Mo (rad)
# Col. 4: Cuc (rad)
# Col. 5: Eccentricity, e
# Col. 6: Cus (rad)
# Col. 7: sqrt(A) (sqrt(meter))
# Col. 8: Toe, time of ephemeris (sec of GPS Week)
# Col. 9: Cic (rad)
# Col. 10: OMEGA (rad)
# Col. 11: Cis (rad)
# Col. 12: i_o, reference inclination (rad)
# Col. 13: Crc (meter)
# Col. 14: omega (rad)
# Col. 15: OMEGA DOT (rad/sec)
# Col. 16: IDOT, inclination rate (rad/sec)
# Collect the SV Clock information
for m in xrange(0, 5):
# Collect af0, af1, and af2
if m < 3:
sv_clock[cnt4, m] = raw_ephem[i, m + 7]
# Collect toc, time of clock (sec of GPS Week)
elif m == 3:
sv_clock[cnt4, m] = DyOW*dd2sec + LkUpHr*hr2sec + raw_ephem[i, 5]*mn2sec + raw_ephem[i, 6]
# Collect TGD, group delay (sec)
else:
sv_clock[cnt4, m] = raw_ephem[i, 32]
# Remember the current PRN
prev_PRN = PRN
# Update the PRN and the "ephemeris" row index
PRN += 1
cnt4 += 1
elif raw_ephem[i, 0] == (PRN - 1) and raw_ephem[i, 0] == prev_PRN:
# Else if the PRN is repeated, check for rounding condition
if rndup_flag:
# If the previous time stamp was rounded up,
print "Used the original time stamp instead of the rounded up on PRN %s" % prev_PRN
# use the current (original) time stamp instead of the rounding one.
for j in xrange(0, 17):
if j == 0:
ephem[cnt4 - 1, j] = raw_ephem[i, 0]
else:
ephem[cnt4 - 1, j] = raw_ephem[i, 10 + j]
# Collect the SV Clock information
for m in xrange(0, 5):
# Collect af0, af1, and af2
if m < 3:
sv_clock[cnt4 - 1, m] = raw_ephem[i, m + 7]
# Collect toc, time of clock (sec of GPS Week)
elif m == 3:
sv_clock[cnt4 - 1, m] = DyOW * dd2sec + LkUpHr * hr2sec + raw_ephem[i, 5] * mn2sec + \
raw_ephem[i, 6]
# Collect TGD, group delay (sec)
else:
sv_clock[cnt4 - 1, m] = raw_ephem[i, 32]
elif rnddown_flag:
# If the current time stamp is rounded down, skipped it and used the previous (original) one.
print 'Skipped repeated rounded down time stamp on PRN %s' % prev_PRN
# End of "raw_ephem" and did not find all 32 satellites in the time stamp,
# or did not find any time stamp that matches the look up hour.
elif i == (len(raw_ephem) - 1) and cnt4 < 32:
# If the hour does not match, adjust the look up hour based on the minute of the hour.
print "Did not find any time stamp that matches the initial TOW."
if MnOH <= 30:
# If the minute is less than or equal to 30,
HrOD -= 1 # decrease the look up hour by 1 unit
if (rem_sec + deltaTOW) / mn2sec <= 60:
LkUpHr = HrOD
elif (rem_sec + deltaTOW) / mn2sec > 60:
LkUpHr = HrOD + 1
print "Change look up hour to %d." % LkUpHr
elif MnOH > 30:
# If the minute is greater than 30,
HrOD += 1 # increase the look up hour by 1 unit
if (rem_sec + deltaTOW) / mn2sec <= 60:
LkUpHr = HrOD
elif (rem_sec + deltaTOW) / mn2sec > 60:
LkUpHr = HrOD + 1
print "Change look up hour to %d." % LkUpHr
search_again = True
if search_again:
prev_PRN = 0
| |
import base64
import json
import warnings
from collections import defaultdict, namedtuple
from enum import Enum, EnumMeta
from types import FunctionType
from typing import Any, Generator, NamedTuple, Tuple, Union
import lpipe.exceptions
import lpipe.logging
from lpipe import normalize, signature, utils
from lpipe.action import Action
from lpipe.contrib import kinesis, mindictive, sqs
from lpipe.payload import Payload
from lpipe.queue import Queue, QueueType
RESERVED_KEYWORDS = set(["logger", "state", "payload"])
class EventSourceType(Enum):
RAW = 1 # This may be a Cloudwatch or manually triggered event
KINESIS = 2
SQS = 3
class State(NamedTuple):
"""Represents the state of the call to process_event.
Args:
event (Any): https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html
context (Any): https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html
paths (dict): Keys are path names / enums and values are a list of Action objects
path_enum (EnumMeta): An Enum class which define the possible paths available in this lambda.
logger:
exception_handler (FunctionType): A function which will be used to capture exceptions (e.g. contrib.sentry.capture)
debug (bool):
"""
event: Any
context: Any
paths: dict
path_enum: EnumMeta
logger: Any
debug: bool = False
exception_handler: FunctionType = None
def build_event_response(n_records, n_ok, logger) -> dict:
response = {
"event": "Finished.",
"stats": {"received": n_records, "successes": n_ok},
}
if hasattr(logger, "events") and logger.events:
response["logs"] = json.dumps(logger.events, cls=utils.AutoEncoder)
return response
def log_exception(state: State, e: BaseException):
state.logger.error(utils.exception_to_str(e))
if state.exception_handler:
state.exception_handler(e)
def parse_event(
event: Any, event_source_type: EventSourceType
) -> Generator[Tuple[Any, dict, str], None, None]:
try:
records = get_records_from_event(event_source_type, event)
assert isinstance(records, list)
except AssertionError as e:
raise lpipe.exceptions.InvalidPayloadError(
f"Failed to extract records from event: {event}"
) from e
for record in records:
try:
yield (
record,
get_payload_from_record(event_source_type, record),
get_event_source(event_source_type, record),
)
except TypeError as e:
raise lpipe.exceptions.InvalidPayloadError(
f"Bad record provided for event source type {event_source_type}. {record} {utils.exception_to_str(e)}"
) from e
def parse_record(
state: State, record: Any, event_source: str, default_path: Union[str, Enum] = None
) -> Payload:
try:
kwargs = {"event_source": event_source}
if not default_path:
for field in ["path", "kwargs"]:
assert field in record
kwargs.update({"path": record["path"], "kwargs": record["kwargs"]})
else:
kwargs.update({"path": default_path, "kwargs": record})
return Payload(**kwargs).validate(state.path_enum)
except AssertionError as e:
raise lpipe.exceptions.InvalidPayloadError(
"'path' or 'kwargs' missing from payload."
) from e
def process_event(
event: Any,
context: Any,
event_source_type: EventSourceType,
paths: dict = None,
path_enum: EnumMeta = None,
default_path: Union[str, Enum] = None,
call: FunctionType = None,
logger: Any = None,
debug: bool = False,
exception_handler: FunctionType = None,
) -> dict:
"""Process an AWS Lambda event.
Args:
event: https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html
context: https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html
event_source_type (EventSourceType): The event source type.
paths (dict): Keys are path names / enums and values are a list of Action objects
path_enum (EnumMeta): An Enum class which define the possible paths available in this lambda.
default_path (Union[str, Enum]): The path to be run for every message received.
call (FunctionType): A callable which, if set and `paths` is not, will disable directed-graph workflow features and default to calling this
logger:
debug (bool):
exception_handler (FunctionType): A function which will be used to capture exceptions (e.g. contrib.sentry.capture)
"""
logger = lpipe.logging.setup(logger=logger, context=context, debug=debug)
logger.debug(
f"Event received. event_source_type: {event_source_type}, event: {event}"
)
try:
assert isinstance(event_source_type, EventSourceType)
except AssertionError as e:
raise lpipe.exceptions.InvalidConfigurationError(
f"Invalid event source type '{event_source_type}'"
) from e
if isinstance(call, FunctionType):
if not paths:
default_path = "AUTO_PATH"
paths = {default_path: [call]}
else:
raise lpipe.exceptions.InvalidConfigurationError(
"If you initialize lpipe with a function/callable, you may not define paths, as you have disabled the directed-graph interface."
)
paths, path_enum = normalize.normalize_path_enum(path_enum=path_enum, paths=paths)
state = State(
event=event,
context=context,
logger=logger,
debug=debug,
paths=paths,
path_enum=path_enum,
exception_handler=exception_handler,
)
n_records = 0
successful_records = []
_output = []
_exceptions = []
try:
for encoded_record, record, event_source in parse_event(
event, event_source_type
):
n_records += 1
ret = None
try:
payload = parse_record(
state=state,
record=record,
event_source=event_source,
default_path=default_path,
)
with logger.context(bind={"payload": payload.to_dict()}):
logger.log("Record received.")
# Run your path/action/functions against the payload found in this record.
ret = execute_payload(payload=payload, state=state)
# Will handle cleanup for successful records later, if necessary.
successful_records.append(encoded_record)
except lpipe.exceptions.FailButContinue as e:
"""Drop poisoned records on the floor
Captures:
InvalidPayloadError
InvalidPathError
"""
log_exception(state, e)
continue
except lpipe.exceptions.FailCatastrophically as e:
"""Preserve poisoned records, trigger redrive
Captures:
InvalidConfigurationError
"""
log_exception(state, e)
_exceptions.append({"exception": e, "record": record})
_output.append(ret)
except AssertionError as e:
logger.error(f"'records' is not a list {utils.exception_to_str(e)}")
return build_event_response(0, 0, logger)
response = build_event_response(
n_records=n_records, n_ok=len(successful_records), logger=logger
)
# Handle cleanup for successful records, if necessary, before creating an error state.
if _exceptions:
advanced_cleanup(event_source_type, successful_records, logger)
raise lpipe.exceptions.FailCatastrophically(
f"Encountered catastrophic exceptions while handling one or more records: {response}"
)
if any(_output):
response["output"] = _output
return response
def execute_payload(payload: Payload, state: State) -> Any:
"""Given a Payload, execute Actions in a Path and fire off messages to the payload's Queues.
Args:
payload (Payload):
state (State):
"""
ret = None
if payload.path is not None and not isinstance(payload.path, state.path_enum):
payload.path = normalize.normalize_path(state.path_enum, payload.path)
if isinstance(payload.path, Enum): # PATH
state.paths[payload.path] = normalize.normalize_actions(
state.paths[payload.path]
)
for action in state.paths[payload.path]:
ret = execute_action(payload=payload, action=action, state=state)
elif isinstance(payload.queue, Queue): # QUEUE (aka SHORTCUT)
queue = payload.queue
assert isinstance(queue.type, QueueType)
if queue.path:
record = {"path": queue.path, "kwargs": payload.kwargs}
else:
record = payload.kwargs
with state.logger.context(
bind={
"path": queue.path,
"queue_type": queue.type,
"queue_name": queue.name,
"record": record,
}
):
state.logger.log("Pushing record.")
put_record(queue=queue, record=record)
else:
state.logger.info(
f"Path should be a string (path name), Path (path Enum), or Queue: {payload.path})"
)
return ret
def execute_action(payload: Payload, action: Action, state: State) -> Any:
"""Execute functions, paths, and queues (shortcuts) in an Action.
Args:
payload (Payload):
action: (Action):
state (State):
"""
assert isinstance(action, Action)
ret = None
# Build action kwargs and validate type hints
try:
if RESERVED_KEYWORDS & set(payload.kwargs):
state.logger.warning(
f"Payload contains a reserved argument name. Please update your function use a different argument name. Reserved keywords: {RESERVED_KEYWORDS}"
)
action_kwargs = build_action_kwargs(
action, {**{k: None for k in RESERVED_KEYWORDS}, **payload.kwargs}
)
for k in RESERVED_KEYWORDS:
action_kwargs.pop(k, None)
except (TypeError, AssertionError) as e:
raise lpipe.exceptions.InvalidPayloadError(
f"Failed to run {payload.path.name} {action} due to {utils.exception_to_str(e)}"
) from e
default_kwargs = {"logger": state.logger, "state": state, "payload": payload}
# Run action functions
for f in action.functions:
assert isinstance(f, FunctionType)
try:
# TODO: if ret, evaluate viability of passing to next in sequence
_log_context = {"path": payload.path.name, "function": f.__name__}
with state.logger.context(bind={**_log_context, "kwargs": action_kwargs}):
state.logger.log("Executing function.")
with state.logger.context(bind=_log_context):
ret = f(**{**action_kwargs, **default_kwargs})
ret = return_handler(ret=ret, state=state)
except lpipe.exceptions.LPBaseException:
# CAPTURES:
# lpipe.exceptions.FailButContinue
# lpipe.exceptions.FailCatastrophically
raise
except Exception as e:
state.logger.error(
f"Skipped {payload.path.name} {f.__name__} due to unhandled Exception {e.__class__.__name__}. This is very serious; please update your function to handle this."
)
log_exception(state, e)
payloads = []
for _path in action.paths:
payloads.append(
Payload(
path=normalize.normalize_path(state.path_enum, _path),
kwargs=action_kwargs,
event_source=payload.event_source,
).validate(state.path_enum)
)
for _queue in action.queues:
payloads.append(
Payload(
queue=_queue, kwargs=action_kwargs, event_source=payload.event_source
).validate()
)
for p in payloads:
ret = execute_payload(payload=p, state=state)
return ret
def return_handler(ret: Any, state: State) -> Any:
if not ret:
return ret
_payloads = []
try:
if isinstance(ret, Payload):
_payloads.append(ret.validate(state.path_enum))
elif isinstance(ret, list):
for r in ret:
if isinstance(r, Payload):
_payloads.append(r.validate(state.path_enum))
except Exception as e:
state.logger.debug(utils.exception_to_str(e))
raise lpipe.exceptions.FailButContinue(
f"Something went wrong while extracting Payloads from a function return value: {ret}"
) from e
if _payloads:
state.logger.debug(f"{len(_payloads)} dynamic payloads received")
for p in _payloads:
state.logger.debug(f"executing dynamic payload: {p}")
try:
ret = execute_payload(payload=p, state=state)
except Exception:
state.logger.error(f"Failed to execute returned {p}")
raise
return ret
def advanced_cleanup(
event_source_type: EventSourceType, records: list, logger, **kwargs
):
"""If exceptions were raised, cleanup all successful records before raising.
Args:
event_source_type (EventSourceType):
records (list): records which we succesfully executed
logger:
"""
if event_source_type == EventSourceType.SQS:
cleanup_sqs_records(records, logger)
# If the queue type was not handled, no cleanup was necessary by lpipe.
def cleanup_sqs_records(records: list, logger):
base_err_msg = (
"Unable to delete successful records messages from SQS queue. AWS should "
"still handle this automatically when the lambda finishes executing, but "
"this may result in successful messages being sent to the DLQ if any "
"other messages fail."
)
try:
Message = namedtuple("Message", ["message_id", "receipt_handle"])
messages = defaultdict(list)
for record in records:
m = Message(
message_id=mindictive.get_nested(record, ["messageId"]),
receipt_handle=mindictive.get_nested(record, ["receiptHandle"]),
)
messages[mindictive.get_nested(record, ["eventSourceARN"])].append(m)
for k in messages.keys():
queue_url = sqs.get_queue_url(k)
sqs.batch_delete_messages(
queue_url,
[
{"Id": m.message_id, "ReceiptHandle": m.receipt_handle}
for m in messages
],
)
except KeyError as e:
logger.warning(
f"{base_err_msg} If you're testing, this is not an issue. {utils.exception_to_str(e)}"
)
except Exception as e:
logger.warning(f"{base_err_msg} {utils.exception_to_str(e)}")
def build_action_kwargs(action: | |
of the
solution for D.
The default is False
niter : int, optional
If N-R refinement is to be done, niter is how many iterations
to compute.
The default is 3.
grid : boolean, optional
whether or not to show the axes grids.
The default is False.
FVaverage : boolean, optional
Whether or not to average the abundance profiles over a
convective turnover timescale. See also tauconv.
The default is False.
tauconv : float, optional
If averaging the abundance profiles over a convective turnover
timescale, give the convective turnover timescale (seconds).
The default value is None.
returnY : boolean, optional
If True, return abundance vectors as well as radius and diffusion
coefficient vectors
The default is False.
Returns
--------
x : array
radial co-ordinates (Mm) for which we have a diffusion coefficient
D : array
Diffusion coefficient (cm^2/s)
'''
xlong = self.get('Y',fname=fname1,resolution='l') # for plotting
if debug: print(xlong)
x = xlong
# x = x * 1.e8
def mf(fluid,fname):
'''
Get mass fraction profile of fluid 'fluid' at fname.
'''
y = self.get(fluid,fname=fname,resolution='l')
if fluid == 'FV H+He':
rhofluid = self.get('Rho H+He',fname=fname,resolution='l')
else:
rhofluid = self.get('RHOconv',fname=fname,resolution='l')
rho = self.get('Rho',fname=fname,resolution='l')
y = rhofluid * y / rho
return y
if FVaverage is False:
y1 = mf(fluid,fname2)
y1long = y1 # for plotting
y0 = mf(fluid,fname1)
y0long = y0 # for plotting
else:
if tauconv is None:
raise IOError("Please define tauconv")
# Find the dumps accross which one should average:
# first profile:
myt0 = self.get('t',fname1)[-1]
myt01 = myt0 - tauconv / 2.
myt02 = myt0 + tauconv / 2.
myidx01 = np.abs(self.get('t') - myt01).argmin()
myidx02 = np.abs(self.get('t') - myt02).argmin()
mycyc01 = self.cycles[myidx01]
mycyc02 = self.cycles[myidx02]
# second profile:
myt1 = self.get('t',fname2)[-1]
myt11 = myt1 - tauconv / 2.
myt12 = myt1 + tauconv / 2.
myidx11 = np.abs(self.get('t') - myt11).argmin()
myidx12 = np.abs(self.get('t') - myt12).argmin()
mycyc11 = self.cycles[myidx11]
mycyc12 = self.cycles[myidx12]
# do the average for the first profile:
ytmp = np.zeros(len(x))
count=0
for cyc in range(mycyc01,mycyc02):
ytmp += mf(fluid,cyc)
count+=1
y0 = ytmp / float(count)
# do the average for the first profile:
ytmp = np.zeros(len(x))
count=0
for cyc in range(mycyc11,mycyc12):
ytmp += mf(fluid,cyc)
count+=1
y1 = ytmp / float(count)
y0long = y0
y1long = y1
if fluid == 'FV H+He':
y1 = y1[::-1]
x = x[::-1]
y0 = y0[::-1]
if debug: print(len(xlong), len(y0long))
idx0 = np.abs(np.array(self.cycles) - fname1).argmin()
idx1 = np.abs(np.array(self.cycles) - fname2).argmin()
t0 = self.get('t')[idx0]
t1 = self.get('t')[idx1]
deltat = t1 - t0
# now we want to exclude any zones where the abundances
# of neighboring cells are the same. This is hopefully
# rare inside the computational domain and limited to only
# a very small number of zones
indexarray = np.where(np.diff(y0) == 0)[0]
print('removing zones:', indexarray)
y1 = np.delete(y1,indexarray)
y0 = np.delete(y0,indexarray)
x = np.delete(x,indexarray)
dt = float(deltat)
# Calculate D starting from outer boundary:
D = np.zeros(len(x))
m = len(x) - 1
# now do the solution:
for i in range(m,1,-1):
xl = np.float64(x[i] - x[i-1])
r = np.float64(y0[i] - y1[i])
p = np.float64(dt * (y0[i] - y0[i-1]) / (xl * xl))
if i == m:
D[i] = np.float64(r / p)
else:
xr = np.float64(x[i+1] - x[i])
xm = np.float64(xl + xr) / 2.
q = np.float64(dt * (y0[i] - y0[i+1]) / (xr * xm))
D[i] = np.float64((r - q * D[i+1]) / p)
D = D * 1.e16 # Mm^2/s ==> cm^2/s
x = x * 1e8 # Mm ==> cm
pl.figure()
pl.plot(xlong,np.log10(y0long),utils.linestyle(1)[0],\
markevery=utils.linestyle(1)[1],\
label='fluid above'+' '+str(fname1))
pl.plot(xlong,np.log10(y1long),utils.linestyle(2)[0],\
markevery=utils.linestyle(2)[1],\
label='fluid above'+' '+str(fname2))
pl.ylabel('$\log\,X$ '+fluid.replace('FV',''))
pl.xlabel('r / Mm')
pl.ylim(-8,0.1)
pl.legend(loc='lower left').draw_frame(False)
if grid:
pl.grid()
pl.twinx()
pl.plot(x/1.e8,np.log10(D),'k-',\
label='$D$') #'$D > 0$')
pl.plot(x/1.e8,np.log10(-D),'k--',\
label='$D < 0$')
pl.ylabel('$\log D\,/\,{\\rm cm}^2\,{\\rm s}^{-1}$')
pl.legend(loc='upper right').draw_frame(False)
if returnY:
return x/1.e8, D, y0, y1
else:
return x/1.e8,D
def plot_entrainment_rates(self,dumps,r1,r2,fit=False,fit_bounds=None,save=False,lims=None,ifig=4,
Q = 1.944*1.60218e-6/1e43,RR = 8.3144598,amu = 1.66054e-24/1e27,
airmu = 1.39165,cldmu = 0.725,fkair = 0.203606102635,
fkcld = 0.885906040268,AtomicNoair = 6.65742024965,
AtomicNocld = 1.34228187919):
'''
Plots entrainment rates for burnt and unburnt material
Parameters
----------
data_path : str
data path
r1 : float
This function will only search for the convective
boundary in the range between r1/r2
r2 : float
fit : boolean, optional
show the fits used in finding the upper boundary
fit_bounds : array
The time to start and stop the fit for average entrainment
rate units in minutes
save : bool, optional
save the plot or not
lims : list, optional
axes lims [xl,xu,yl,yu]
Examples
---------
.. ipython::
In [136]: data_dir = '/data/ppm_rpod2/YProfiles/'
.....: project = 'AGBTP_M2.0Z1.e-5'
.....: ppm.set_YProf_path(data_dir+project)
@savefig plot_entrainment_rates.png width=6in
In [136]: F4 = ppm.yprofile('F4')
.....: dumps = np.array(range(0,1400,100))
.....: F4.plot_entrainment_rates(dumps,27.7,28.5)
'''
atomicnocldinv = 1./AtomicNocld
atomicnoairinv = 1./AtomicNoair
patience0 = 5
patience = 10
nd = len(dumps)
t = np.zeros(nd)
L_H = np.zeros(nd)
t00 = time.time()
t0 = t00
k = 0
for i in range(nd):
t[i] = self.get('t', fname = dumps[i], resolution = 'l')[-1]
if dumps[i] >= 620:
L_H[i] = self.get('L_C12pg', fname = dumps[i], resolution = 'l', airmu = airmu, \
cldmu = cldmu, fkair = fkair, fkcld = fkcld, AtomicNoair = AtomicNoair, \
AtomicNocld = AtomicNocld, corr_fact = 1.0)
else:
L_H[i] = 0.
t_now = time.time()
if (t_now - t0 >= patience) or \
((t_now - t00 < patience) and (t_now - t00 >= patience0) and (k == 0)):
time_per_dump = (t_now - t00)/float(i + 1)
time_remaining = (nd - i - 1)*time_per_dump
print('Processing will be done in {:.0f} s.'.format(time_remaining))
t0 = t_now
k += 1
ndot = L_H/Q
X_H = fkcld*1./AtomicNocld
mdot_L = 1.*amu*ndot/X_H
dt = cdiff(t)
m_HHe_burnt = (1e27/nuconst.m_sun)*np.cumsum(mdot_L*dt)
m_HHe_present = self.entrainment_rate(dumps,r1,r2, var='vxz', criterion='min_grad', offset=-1., \
integrate_both_fluids=False, show_output=False, return_time_series=True)
m_HHe_total = m_HHe_present + m_HHe_burnt
if fit_bounds is not None:
idx2 = list(range(np.argmin(t/60. < fit_bounds[0]), np.argmin(t/60. < fit_bounds[1])))
print(idx2)
m_HHe_total_fc2 = np.polyfit(t[idx2], m_HHe_total[idx2], 1)
m_HHe_total_fit2 = m_HHe_total_fc2[0]*t[idx2] + m_HHe_total_fc2[1]
mdot2 = m_HHe_total_fc2[0]
mdot2_str = '{:e}'.format(mdot2)
parts = mdot2_str.split('e')
mantissa = float(parts[0])
exponent = int(parts[1])
lbl2 = r'$\dot{{\mathrm{{M}}}}_\mathrm{{e}} = {:.2f} \times 10^{{{:d}}}$ M$_\odot$ s$^{{-1}}$'.\
format(mantissa, exponent)
pl.close(ifig); pl.figure(ifig)
if fit:
pl.plot(t[idx2]/60., m_HHe_total_fit2, '-', color = 'k', lw = 0.5, \
zorder = 102, label = lbl2)
pl.plot(t/60., m_HHe_present, ':', color = cb(3), label = 'present')
pl.plot(t/60., m_HHe_burnt, '--', color = cb(6), label = 'burnt')
pl.plot(t/60., m_HHe_total, '-', color = cb(5), label = 'total')
pl.xlabel('t / min')
pl.ylabel(r'M$_\mathrm{HHe}$ [M_Sun]')
if lims is not None:
pl.axis(lims)
pl.gca().ticklabel_format(style='sci', scilimits=(0,0), axis='y')
pl.legend(loc = 0)
pl.tight_layout()
if save:
pl.savefig('entrainment_rate.pdf')
'''
def plot_entrainment_rates(self,rp_set,dumps,r1,r2,burning_on_from=0,fit=False,fit_bounds=None,
save=False,lims=None,T9_func=None,return_burnt=False,Q = 1.944,
airmu = 1.39165,cldmu = 0.725,fkcld = 0.885906040268,
AtomicNocld = 1.34228187919):
Plots entrainment rates for burnt and unburnt material
Parameters
----------
rp_set : rp_set instance
r1/r2 : float
This function will only search for the convective
boundary in the range between r1/r2
fit : boolean, optional
show the fits used in finding the upper boundary
fit_bounds : [int,int]
The time to start and stop the fit for average entrainment
rate units in minutes
save : bool, optional
save the plot or not
lims : list, optional
axes lims [xl,xu,yl,yu]
amu = 1.66054e-24/1e27
atomicnocldinv = 1./AtomicNocld
patience0 = 5
patience = 10
nd = len(dumps)
t = np.zeros(nd)
L_C12C12 = np.zeros(nd)
r = self.get('Y', fname=dumps[0], resolution='l')
idx_top = np.argmin(np.abs(r - r1))
idx = range(idx_top, len(r))
dV = -4.*np.pi*r**2*cdiff(r)
t00 = time.time()
t0 = t00
k = 0
for i in range(nd):
t[i] = self.get('t', fname = dumps[i], resolution = 'l')[-1]
if dumps[i] >= burning_on_from:
enuc_C12C12 = self.get('enuc_C12C12', dumps[i], airmu=airmu, \
cldmu=cldmu, fkcld=fkcld, AtomicNocld=AtomicNocld, \
Q=Q, T9_func=T9_func)
rp = rp_set.get_dump(dumps[i])
avg_fv = rp.get_table('fv')[0, ::-1, 0]
sigma_fv = rp.get_table('fv')[3, ::-1, 0]
avg_fv[avg_fv < 1e-9] = 1e-9
eta = 1. + (sigma_fv/avg_fv)**2
# limit eta where avg_fv --> 0
eta[avg_fv < 1e-6] = 1.
L_C12C12[i] = np.sum(eta[idx]*enuc_C12C12[idx]*dV[idx])
t_now = time.time()
if (t_now - t0 >= patience) or \
((t_now - t00 < patience) and \
(t_now - t00 >= patience0) and \
(k ==0)):
time_per_dump = (t_now - t00)/float(i + 1)
time_remaining | |
<reponame>rcasteran/jarvis4se<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Module containing all object's class, i.e. all objects that will be manipulated by
Systems engineers"""
# Libraries
from enum import Enum
# Modules
from . import util
class BaseType(Enum):
"""BaseType class"""
DATA = 0
FUNCTION = 1
FUNCTIONAL_ELEMENT = 2
FUNCTIONAL_INTERFACE = 3
PHYSICAL_ELEMENT = 4
PHYSICAL_INTERFACE = 5
def __str__(self):
"""Get the str representation from Enum"""
type_str = ''
if self == self.DATA:
type_str = 'Data'
elif self == self.FUNCTION:
type_str = 'Function'
elif self == self.FUNCTIONAL_ELEMENT:
type_str = 'Functional element'
elif self == self.FUNCTIONAL_INTERFACE:
type_str = 'Functional interface'
elif self == self.PHYSICAL_ELEMENT:
type_str = 'Physical element'
elif self == self.PHYSICAL_INTERFACE:
type_str = 'Physical interface'
return type_str
@classmethod
def get_enum(cls, obj_type):
"""Get the Enum representation from string"""
enum_type = None
if obj_type == 'Data':
enum_type = cls.DATA
elif obj_type == 'Function':
enum_type = cls.FUNCTION
elif obj_type == 'Functional element':
enum_type = cls.FUNCTIONAL_ELEMENT
elif obj_type == 'Functional interface':
enum_type = cls.FUNCTIONAL_INTERFACE
elif obj_type == 'Physical element':
enum_type = cls.PHYSICAL_ELEMENT
elif obj_type == 'Physical interface':
enum_type = cls.PHYSICAL_INTERFACE
return enum_type
class FunctionType(Enum):
"""Function type: Compatible with ArKItect and XMI"""
UNKNOWN = 0
ENABLING = 1
HIGH_LEVEL_FUNCTION = 2
FUNCTION = 3
HIGH_LEVEL_SAFETY = 4
SAFETY = 5
ADD = 6
SUBTRACT = 7
MULTIPLY = 8
DIVIDE = 9
def __str__(self):
"""Get the str representation from Enum"""
function_type = 'unknown'
if self == self.ENABLING:
function_type = 'Enabling function'
elif self == self.HIGH_LEVEL_FUNCTION:
function_type = 'High level function'
elif self == self.FUNCTION:
function_type = 'Function'
elif self == self.HIGH_LEVEL_SAFETY:
function_type = 'High level safety function'
elif self == self.SAFETY:
function_type = 'Safety function'
elif self == self.ADD:
function_type = 'Add'
elif self == self.SUBTRACT:
function_type = 'Subtract'
elif self == self.MULTIPLY:
function_type = 'Multiply'
elif self == self.DIVIDE:
function_type = 'Divide'
elif self == self.UNKNOWN:
function_type = 'unknown'
return function_type
@classmethod
def get_name(cls, function_type):
"""Get the Enum representation from string"""
name = cls.UNKNOWN
if function_type == 'Enabling function':
name = cls.ENABLING
elif function_type == 'High level function':
name = cls.HIGH_LEVEL_FUNCTION
elif function_type == 'Function':
name = cls.FUNCTION
elif function_type == 'High level safety function':
name = cls.HIGH_LEVEL_SAFETY
elif function_type == 'Safety function':
name = cls.SAFETY
elif function_type == 'Add':
name = cls.ADD
elif function_type == 'Subtract':
name = cls.SUBTRACT
elif function_type == 'Multiply':
name = cls.MULTIPLY
elif function_type == 'Divide':
name = cls.DIVIDE
return name
@classmethod
def get_parent_function_type_list(cls):
"""Returns string representation of abjects able to be parent i.e. composed"""
base_function_type = [cls.FUNCTION, cls.HIGH_LEVEL_FUNCTION, cls.SAFETY,
cls.HIGH_LEVEL_SAFETY, cls.UNKNOWN]
return [str(i) for i in base_function_type]
class Function:
"""Function class: Compatible with ArKItect and XMI"""
def __init__(self, p_id='', p_name='', p_alias='', p_type=BaseType.FUNCTION, p_parent=None,
p_ark_obj=None, p_role=None, p_operand=None, p_derived=''):
"""Init Object"""
self.id = p_id
self.name = p_name
self.alias = p_alias
self.type = p_type
self.parent = p_parent
self.ark_obj = p_ark_obj
self.child_list = set()
self.port_list = set()
self.input_role = p_role
self.operand = p_operand
self.derived = p_derived
def set_id(self, p_id):
"""Set id"""
self.id = p_id
def set_name(self, p_name):
"""Set name"""
self.name = p_name
def set_alias(self, p_alias):
"""Set alias"""
self.alias = p_alias
def set_type(self, p_type):
"""Set type"""
self.type = p_type
self.set_operand()
def set_parent(self, p_parent):
"""Set parent"""
self.parent = p_parent
def set_ark_obj(self, p_ark_obj):
"""Specific for ArKItect"""
self.ark_obj = p_ark_obj
def add_child(self, p_child):
"""Add child to child_list"""
self.child_list.add(p_child)
def add_port(self, p_port):
"""Specific for XMI"""
self.port_list.add(p_port)
def set_input_role(self, p_role):
"""Set input role"""
self.input_role = p_role
def set_operand(self):
"""Set operand"""
if self.type == FunctionType.DIVIDE:
self.operand = "denominator"
elif self.type == FunctionType.SUBTRACT:
self.operand = "subtractor"
else:
# May have further type/operand in the future
pass
def set_derived(self, p_derived):
"""Set derived"""
self.derived = p_derived
class SystemElementType(Enum):
"""System element type: Compatible with LT SPICE"""
UNKNOWN = 0
HIGH_LEVEL = 1
SYSTEM = 2
RESISTOR = 3
CAPACITOR = 4
ZENER = 5
NPN = 6
PNP = 7
DIODE = 8
VOLTAGE = 9
LED = 10
def __str__(self):
"""Get the str representation from Enum"""
sys_elem_type = 'unknown'
if self == self.HIGH_LEVEL:
sys_elem_type = 'High level system element'
elif self == self.SYSTEM:
sys_elem_type = 'System element'
elif self == self.RESISTOR:
sys_elem_type = 'res'
elif self == self.CAPACITOR:
sys_elem_type = 'cap'
elif self == self.ZENER:
sys_elem_type = 'zener'
elif self == self.NPN:
sys_elem_type = 'npn'
elif self == self.PNP:
sys_elem_type = 'pnp'
elif self == self.DIODE:
sys_elem_type = 'diode'
elif self == self.VOLTAGE:
sys_elem_type = 'voltage'
elif self == self.LED:
sys_elem_type = 'LED'
return sys_elem_type
@classmethod
def get_name(cls, sys_elem_type):
"""Get the Enum representation from string"""
name = cls.UNKNOWN
if sys_elem_type == 'High level system element':
name = cls.HIGH_LEVEL
elif sys_elem_type == 'System element':
name = cls.SYSTEM
# LTSPICE compatible
elif sys_elem_type == 'res':
name = cls.RESISTOR
# LTSPICE compatible
elif sys_elem_type == 'cap':
name = cls.CAPACITOR
# LTSPICE compatible
elif sys_elem_type == 'zener':
name = cls.ZENER
# LTSPICE compatible
elif sys_elem_type == 'npn':
name = cls.NPN
# LTSPICE compatible
elif sys_elem_type == 'pnp':
name = cls.PNP
# LTSPICE compatible
elif sys_elem_type == 'diode':
name = cls.DIODE
# LTSPICE compatible
elif sys_elem_type == 'voltage':
name = cls.VOLTAGE
# LTSPICE compatible
elif sys_elem_type == 'LED':
name = cls.LED
return name
class Element:
"""Element class : Compatible with LTSPICE and Depends on EndPoint class"""
def __init__(self, p_id='', p_name='', p_alias='', p_type=SystemElementType.UNKNOWN,
p_parent=None, p_ark_obj=None, p_role=None, p_spice_rotation='',
p_spice_prefix='', p_spice_model=''):
"""Init Object"""
self.id = p_id
self.name = p_name
self.alias = p_alias
self.type = p_type
self.parent = p_parent
self.ark_obj = p_ark_obj
self.child_list = set()
self.port_list = set()
self.input_role = p_role
self.point_list = []
self.spice_window_list = set()
self.spice_rotation = p_spice_rotation
self.constraint_list = set()
self.spice_prefix = p_spice_prefix
self.spice_model = p_spice_model
def set_id(self, p_id):
"""Set id"""
self.id = p_id
def set_name(self, p_name):
"""Set name"""
self.name = p_name
def set_alias(self, p_alias):
"""Set alias"""
self.alias = p_alias
def set_type(self, p_type):
"""Set type"""
self.type = p_type
def set_parent(self, p_parent):
"""Set parent"""
self.parent = p_parent
def set_ark_obj(self, p_ark_obj):
"""Specific for ArKItect"""
self.ark_obj = p_ark_obj
def add_child(self, p_child):
"""Add child to child_list"""
self.child_list.add(p_child)
def add_port(self, p_port):
"""Specific for XMI"""
self.port_list.add(p_port)
def set_input_role(self, p_role):
"""Set input role"""
self.input_role = p_role
def add_spice_window(self, p_window):
"""Specific for LTSPICE"""
self.spice_window_list.add(p_window)
def set_spice_rotation(self, p_ref):
"""Specific for LTSPICE"""
self.spice_rotation = p_ref
def add_point(self, p_point):
"""Specific for LTSPICE"""
self.point_list.append(p_point)
def add_constraint(self, p_constraint):
"""Specific for LTSPICE"""
self.constraint_list.add(p_constraint)
def set_spice_prefix(self, p_prefix):
"""Specific for LTSPICE"""
self.spice_prefix = p_prefix
def set_spice_model(self, p_model):
"""Specific for LTSPICE"""
self.spice_model = p_model
def determine_relative_point(self, p_point):
"""Specific for LTSPICE"""
if str(self.type).find("voltage") > -1:
if self.spice_rotation == "R0":
point = util.Point()
point.set_x(p_point.x)
point.set_y(p_point.y + 16)
self.add_point(point)
point = util.Point()
point.set_x(p_point.x)
point.set_y(p_point.y + 96)
self.add_point(point)
elif self.spice_rotation == "R90":
point = util.Point()
point.set_x(p_point.x - 16)
point.set_y(p_point.y)
self.add_point(point)
point = util.Point()
point.set_x(p_point.x - 96)
point.set_y(p_point.y)
self.add_point(point)
elif self.spice_rotation == "R180":
point = util.Point()
point.set_x(p_point.x)
point.set_y(p_point.y - 16)
self.add_point(point)
point = util.Point()
point.set_x(p_point.x)
point.set_y(p_point.y - 96)
self.add_point(point)
elif self.spice_rotation == "R270":
point = util.Point()
point.set_x(p_point.x + 16)
point.set_y(p_point.y)
self.add_point(point)
point = util.Point()
point.set_x(p_point.x + 96)
point.set_y(p_point.y)
self.add_point(point)
else:
print("Unsupported rotation value: " + self.spice_rotation)
elif str(self.type).find("cap") > -1 or str(self.type).find("zener") > -1 or \
str(self.type).find("diode") > -1 or str(self.type).find("LED") > -1:
if self.spice_rotation == "R0":
point = util.Point()
point.set_x(p_point.x + 16)
point.set_y(p_point.y)
self.add_point(point)
point = util.Point()
point.set_x(p_point.x + 16)
point.set_y(p_point.y + 64)
self.add_point(point)
elif self.spice_rotation == "R90":
point = util.Point()
point.set_x(p_point.x)
point.set_y(p_point.y + 16)
self.add_point(point)
point = util.Point()
point.set_x(p_point.x - 64)
point.set_y(p_point.y + 16)
self.add_point(point)
elif self.spice_rotation == "R180":
point = util.Point()
point.set_x(p_point.x - 16)
point.set_y(p_point.y)
self.add_point(point)
point = util.Point()
point.set_x(p_point.x - 16)
point.set_y(p_point.y - 64)
self.add_point(point)
elif self.spice_rotation == "R270":
point = util.Point()
point.set_x(p_point.x)
point.set_y(p_point.y - 16)
self.add_point(point)
point = util.Point()
point.set_x(p_point.x + 64)
point.set_y(p_point.y - 16)
self.add_point(point)
else:
print("Unsupported rotation value: " + self.spice_rotation)
elif str(self.type).find("res") > -1:
if self.spice_rotation == "R0":
point = util.Point()
point.set_x(p_point.x + 16)
point.set_y(p_point.y + 16)
self.add_point(point)
point = util.Point()
point.set_x(p_point.x + 16)
point.set_y(p_point.y + 96)
self.add_point(point)
elif self.spice_rotation == "R90":
point = util.Point()
point.set_x(p_point.x - 16)
point.set_y(p_point.y + 16)
self.add_point(point)
point = util.Point()
point.set_x(p_point.x - 96)
point.set_y(p_point.y + 16)
self.add_point(point)
elif self.spice_rotation == "R180":
point = util.Point()
point.set_x(p_point.x - 16)
point.set_y(p_point.y - 16)
self.add_point(point)
| |
= os.path.abspath(os.path.dirname(__file__))
trex_path = os.path.join(cur_path, 'automation', 'trex_control_plane', 'interactive')
if trex_path not in sys.path:
sys.path.insert(1, trex_path)
from trex.astf.trex_astf_profile import ASTFProfile
from trex.astf.sim import decode_tunables
tunables = {}
if pa().tunable:
tunables = decode_tunables(pa().tunable)
try:
profile = ASTFProfile.load(input_file, **tunables)
json_content = profile.to_json_str()
except Exception as e:
raise DpdkSetup('ERROR: Could not convert astf profile to JSON:\n%s' % e)
with open(dst_json_file, 'w') as f:
f.write(json_content)
os.chmod(dst_json_file, 0o777)
def verify_stf_file(self):
""" check the input file of STF """
if not pa() or not pa().file or pa().astf:
return
extension = os.path.splitext(pa().file)[1]
if extension == '.py':
raise DpdkSetup('ERROR: Python files can not be used with STF mode, did you forget "--astf" flag?')
elif extension != '.yaml':
pass # should we fail here?
def is_hugepage_file_exits(self,socket_id):
t = ['2048','1048576']
for obj in t:
filename = '/sys/devices/system/node/node{}/hugepages/hugepages-{}kB/nr_hugepages'.format(socket_id,obj)
if os.path.isfile(filename):
return (True,filename,int(obj))
return (False,None,None)
def config_hugepages(self, wanted_count = None):
mount_output = subprocess.check_output('mount', stderr = subprocess.STDOUT).decode(errors='replace')
if 'hugetlbfs' not in mount_output:
huge_mnt_dir = '/mnt/huge'
if not os.path.isdir(huge_mnt_dir):
print("Creating huge node")
os.makedirs(huge_mnt_dir)
os.system('mount -t hugetlbfs nodev %s' % huge_mnt_dir)
for socket_id in range(2):
r = self.is_hugepage_file_exits(socket_id)
if not r[0]:
if socket_id == 0:
print('WARNING: hugepages config file does not exist!')
continue
if wanted_count is None:
if self.m_cfg_dict[0].get('low_end', False):
if socket_id == 0:
if pa() and pa().limit_ports:
if_count = pa().limit_ports
else:
if_count = self.m_cfg_dict[0].get('port_limit', len(self.m_cfg_dict[0]['interfaces']))
wanted_count = 20 + 40 * if_count
else:
wanted_count = 1 # otherwise, DPDK will not be able to see the device
else:
wanted_count = 2048
if r[2] > 2048:
wanted_count = wanted_count / 1024
if wanted_count < 1 :
wanted_count = 1
filename = r[1]
with open(filename) as f:
configured_hugepages = int(f.read())
if configured_hugepages < wanted_count:
os.system('echo %d > %s' % (wanted_count, filename))
time.sleep(0.1)
with open(filename) as f: # verify
configured_hugepages = int(f.read())
if configured_hugepages < wanted_count:
print('WARNING: tried to configure %d hugepages for socket %d, but result is: %d' % (wanted_count, socket_id, configured_hugepages))
def run_servers(self):
''' Run both scapy & bird server according to pa'''
if not pa():
return
try:
master_core = self.m_cfg_dict[0]['platform']['master_thread_id']
except:
master_core = 0
if should_scapy_server_run():
ret = os.system('{sys_exe} general_daemon_server restart -c {cores} -n {name} --py -e "{exe}" -r -d -i'.format(sys_exe=sys.executable,
cores=master_core,
name='Scapy',
exe='-m trex.scapy_server.scapy_zmq_server'))
if ret:
print("Could not start scapy daemon server, which is needed by GUI to create packets.\nIf you don't need it, use --no-scapy-server flag.")
sys.exit(-1)
if pa().bird_server:
ret = os.system('{sys_exe} general_daemon_server restart -n {name} --py -e "{exe}" -i'.format(sys_exe=sys.executable,
name='PyBird',
exe='-m trex.pybird_server.pybird_zmq_server'))
if ret:
print("Could not start bird server\nIf you don't need it, don't use --bird-server flag.")
sys.exit(-1)
if pa().emu:
emu_zmq_tcp_flag = '--emu-zmq-tcp' if pa().emu_zmq_tcp else ''
exe = './trex-emu {emu_zmq_tcp}'.format(emu_zmq_tcp = emu_zmq_tcp_flag)
ret = os.system('{sys_exe} general_daemon_server restart -n {name} --sudo -e "{exe}"'.format(sys_exe=sys.executable,
name='Emu',
exe=exe))
if ret:
print("Could not start emu service\nIf you don't need it, don't use -emu flag.")
sys.exit(-1)
# check vdev Linux interfaces status
# return True if interfaces are vdev
def check_vdev(self, if_list):
if not if_list:
return
af_names = []
ifname_re = re.compile('iface\s*=\s*([^\s,]+)')
found_vdev = False
found_pdev = False
for iface in if_list:
if iface == 'dummy':
continue
elif '--vdev' in iface:
found_vdev = True
if 'net_af_packet' in iface:
res = ifname_re.search(iface)
if res:
af_names.append(res.group(1))
elif ':' not in iface: # no PCI => assume af_packet
found_vdev = True
af_names.append(iface)
else:
found_pdev = True
if found_vdev:
if found_pdev:
raise DpdkSetup('You have mix of vdev and pdev interfaces in config file!')
for name in af_names:
if not os.path.exists('/sys/class/net/%s' % name):
raise DpdkSetup('ERROR: Could not find Linux interface %s.' % name)
oper_state = '/sys/class/net/%s/operstate' % name
if os.path.exists(oper_state):
with open(oper_state) as f:
f_cont = f.read().strip()
if f_cont in ('down', 'DOWN'):
raise DpdkSetup('ERROR: Requested Linux interface %s is DOWN.' % name)
return found_vdev
def check_trex_running(self, if_list):
if if_list and map_driver.args.parent and self.m_cfg_dict[0].get('enable_zmq_pub', True):
publisher_port = self.m_cfg_dict[0].get('zmq_pub_port', 4500)
pid = dpdk_nic_bind.get_tcp_port_usage(publisher_port)
if pid:
cmdline = dpdk_nic_bind.read_pid_cmdline(pid)
print('ZMQ port is used by following process:\npid: %s, cmd: %s' % (pid, cmdline))
sys.exit(-1)
# verify that all interfaces of i40e NIC are in use by current instance of TRex
def check_i40e_binds(self, if_list):
# i40e device IDs taked from dpdk/drivers/net/i40e/base/i40e_devids.h
i40e_device_ids = [0x1572, 0x1574, 0x1580, 0x1581, 0x1583, 0x1584, 0x1585, 0x1586, 0x1587, 0x1588, 0x1589, 0x158A, 0x158B]
iface_without_slash = set()
for iface in if_list:
iface_without_slash.add(self.split_pci_key(iface))
show_warning_devices = set()
unbind_devices = set()
for iface in iface_without_slash:
if iface == 'dummy':
continue
iface = self.split_pci_key(iface)
if self.m_devices[iface]['Device'] not in i40e_device_ids: # not i40e
return
iface_pci = iface.split('.')[0]
for device in self.m_devices.values():
if device['Slot'] in iface_without_slash: # we use it
continue
if iface_pci == device['Slot'].split('.')[0]:
if device.get('Driver_str') == 'i40e':
if pa() and pa().unbind_unused_ports:
# if --unbind-unused-ports is set we unbind ports that are not
# used by TRex
unbind_devices.add(device['Slot'])
else:
print('ERROR: i40e interface %s is under Linux and will interfere with TRex interface %s' % (device['Slot'], iface))
print('See following link for more information: https://trex-tgn.cisco.com/youtrack/issue/trex-528')
print('Unbind the interface from Linux with following command:')
print(' sudo ./dpdk_nic_bind.py -u %s' % device['Slot'])
print('')
sys.exit(-1)
if device.get('Driver_str') in dpdk_nic_bind.dpdk_drivers:
show_warning_devices.add(device['Slot'])
for dev in show_warning_devices:
print('WARNING: i40e interface %s is under DPDK driver and might interfere with current TRex interfaces.' % dev)
if unbind_devices:
print('Unbinding unused i40e interfaces: %s' % unbind_devices)
dpdk_nic_bind.unbind_all(unbind_devices, force=True)
def do_run (self, only_check_all_mlx=False):
""" returns code that specifies if interfaces are Mellanox/Napatech etc. """
self.load_config_file()
self.preprocess_astf_file_if_needed()
self.verify_stf_file()
if not pa() or pa().dump_interfaces is None or (pa().dump_interfaces == [] and pa().cfg):
if_list = if_list_remove_sub_if(self.m_cfg_dict[0]['interfaces'])
else:
if_list = pa().dump_interfaces
if not if_list:
self.run_dpdk_lspci()
for dev in self.m_devices.values():
if dev.get('Driver_str') in dpdk_nic_bind.dpdk_drivers + dpdk_nic_bind.dpdk_and_kernel:
if_list.append(dev['Slot'])
if self.check_vdev(if_list):
self.check_trex_running(if_list)
self.run_servers()
# no need to config hugepages
return
self.run_dpdk_lspci()
if_list = list(map(self.pci_name_to_full_name, if_list))
Broadcom_cnt=0;
# check how many mellanox cards we have
Mellanox_cnt=0;
dummy_cnt=0
for key in if_list:
if key == 'dummy':
dummy_cnt += 1
continue
key = self.split_pci_key(key)
if key not in self.m_devices:
err=" %s does not exist " %key;
raise DpdkSetup(err)
if 'Vendor_str' not in self.m_devices[key]:
err=" %s does not have Vendor_str " %key;
raise DpdkSetup(err)
if 'Mellanox' in self.m_devices[key]['Vendor_str']:
Mellanox_cnt += 1
if 'Broadcom' in self.m_devices[key]['Vendor_str']:
Broadcom_cnt += 1
if not (pa() and pa().dump_interfaces):
if (Mellanox_cnt > 0) and ((Mellanox_cnt + dummy_cnt) != len(if_list)):
err = "All driver should be from one vendor. You have at least one driver from Mellanox but not all."
raise DpdkSetup(err)
if Mellanox_cnt > 0:
self.set_only_mellanox_nics()
if self.get_only_mellanox_nics():
if not pa().no_ofed_check:
self.verify_ofed_os()
self.check_ofed_version()
for key in if_list:
if key == 'dummy':
continue
if pa().no_ofed_check: # in case of no-ofed don't optimized for Azure
continue
key = self.split_pci_key(key)
if 'Virtual' not in self.m_devices[key]['Device_str']:
pci_id = self.m_devices[key]['Slot_str']
self.tune_mlx_device(pci_id)
if 'Interface' in self.m_devices[key]:
dev_ids = self.m_devices[key]['Interface'].split(",")
for dev_id in dev_ids:
self.disable_flow_control_mlx_device (dev_id)
self.set_max_mtu_mlx_device(dev_id)
if only_check_all_mlx:
if Mellanox_cnt > 0:
sys.exit(MLX_EXIT_CODE);
else:
sys.exit(0);
self.check_i40e_binds(if_list)
self.check_trex_running(if_list)
self.config_hugepages() # should be after check of running TRex
self.run_servers()
Napatech_cnt=0;
to_bind_list = []
for key in if_list:
if key == 'dummy':
continue
key = self.split_pci_key(key)
if key not in self.m_devices:
err=" %s does not exist " %key;
raise DpdkSetup(err)
if (is_napatech(self.m_devices[key])):
# These adapters doesn't need binding
Napatech_cnt += 1
continue
if self.m_devices[key].get('Driver_str') not in (dpdk_nic_bind.dpdk_drivers + dpdk_nic_bind.dpdk_and_kernel):
to_bind_list.append(key)
if Napatech_cnt:
# This is currently a hack needed until the DPDK NTACC PMD can do proper
# cleanup.
os.system("ipcs | grep 2117a > /dev/null && ipcrm shm `ipcs | grep 2117a | cut -d' ' -f2` > /dev/null")
if to_bind_list:
if Mellanox_cnt:
ret = self.do_bind_all('mlx5_core', to_bind_list)
if ret:
ret = self.do_bind_all('mlx4_core', to_bind_list)
if ret:
raise DpdkSetup('Unable to bind interfaces to driver mlx5_core/mlx4_core.')
return MLX_EXIT_CODE
else:
if march == 'ppc64le':
print('Trying to bind to vfio-pci ...')
self.try_bind_to_vfio_pci(to_bind_list)
return
else:
# if igb_uio is ready, use it as safer choice, afterwards try vfio-pci
if load_igb_uio():
print('Trying to bind to igb_uio ...')
ret = self.do_bind_all('igb_uio', to_bind_list)
if ret:
raise DpdkSetup('Unable to bind interfaces to driver igb_uio.') # module present, loaded, but unable to bind
return
try:
print('Trying to bind to vfio-pci ...')
self.try_bind_to_vfio_pci(to_bind_list)
return
except VFIOBindErr as e:
pass
#print(e)
print('Trying to compile and bind to igb_uio | |
# A set of helper functions for the NSBL codebase
from py_db import db
db = db('NSBL')
def random_sql_helpers():
sql_dict = {
}
def get_team_abb(team_name, year):
qry = db.query("SELECT team_abb FROM teams WHERE year = %s AND team_name = '%s';" % (year, team_name))
if qry != ():
team_abb = qry[0][0]
else:
print "\n\n!!!!ERROR!!!! - no team_abb for %s, %s" % (year, team_name)
team_abb = None
return team_abb
def get_division(team_name, year):
division_dict = {
}
qry = """SELECT team_name
, division
FROM teams
WHERE 1
AND year = %s
;""" % (year)
res = db.query(qry)
for row in res:
tm, div = row
division_dict[tm] = div
division = division_dict.get(team_name)
divisional_teams = []
conference_teams = []
non_conference_teams = []
for k,v in division_dict.items():
if v == division and k != team_name:
divisional_teams.append(k)
if v[:2] == division[:2] and k != team_name:
conference_teams.append(k)
if v[:2] != division[:2]:
non_conference_teams.append(k)
return division, divisional_teams, conference_teams, non_conference_teams
def get_team_name(city_name, year):
qry = db.query("SELECT team_name FROM teams WHERE year = %s AND city_name = '%s';" % (year, city_name))
if qry != ():
team_name = qry[0][0]
else:
print "\n\n!!!!ERROR!!!! - no team_name for %s, %s" % (year, city_name)
team_name = None
return team_name
def get_team(mascot_name, year):
qry = db.query("SELECT team_name FROM teams WHERE year = %s AND mascot_name = '%s';" % (year, mascot_name))
if qry != ():
team_name = qry[0][0]
else:
print "\n\n!!!!ERROR!!!! - no team_name for %s, %s" % (year, mascot_name)
team_name = None
return team_name
def get_mascot_names(team_abb, year):
qry = db.query("SELECT mascot_name FROM teams WHERE year = %s AND (team_abb = '%s' OR spreadsheet_abb = '%s');" % (year, team_abb, team_abb))
if qry != ():
mascot_name = qry[0][0]
else:
print "\n\n!!!!ERROR!!!! - no mascot_name for %s, %s" % (year, team_abb)
mascot_name = None
return mascot_name
def get_park_factors(team_abb, year):
""""
Scaled park factors (by a factor of 1/3) from fangraphs
"""
qry = db.query("""SELECT park_factor
FROM teams_current_franchise tcf
JOIN teams t ON (tcf.team_name = t.team_name)
WHERE 1
AND (tcf.primary_abb = '%s' OR tcf.secondary_abb = '%s' OR tcf.tertiary_abb = '%s' OR t.team_abb = '%s')
AND year = %s;""" % (team_abb, team_abb, team_abb, team_abb, year))
if qry != ():
park_factor = qry[0][0]
else:
print "\n\n!!!!ERROR!!!! - no park_factor for %s, %s" % (year, team_abb)
park_factor = 100.0
return park_factor
def get_pos_adj(position):
qry = db.query("SELECT adjustment FROM helper_positional_adjustment WHERE position = '%s';" % (position))
if qry != ():
pos_adj = qry[0][0]
else:
print "\n\n!!!!ERROR!!!! - no position adjustment for %s" % (position)
pos_adj = 0.0
return pos_adj
def get_pos_formula(position):
"""
Returns coefficients for error/range/arm/passed ball values according to http://www.ontonova.com/floodstudy/4647-5.html.
This should possibly be scaled down?
"""
# [range, error, arm, passed ball]
qry = db.query("SELECT rng, err, arm, passed_ball FROM helper_zips_positions WHERE position = '%s';" % (position))
if qry != ():
pos_formula = [float(qry[0][0]), float(qry[0][1]), float(qry[0][2]), float(qry[0][3])]
else:
print "\n\n!!!!ERROR!!!! - no position formula for %s" % (position)
pos_formula = [0,0,0,0]
# for i, v in enumerate(pos_formula):
# # research from http://dmbo.net/smf/index.php?topic=4883.0 and ad_hoc/defensive_value_analysis.xlsx shows that the original defensive values should be regressed back ~72.5%
# pos_formula[i] = 0.725 * v
return pos_formula
def get_league_average_hitters(year, category):
q = """SELECT
pa,
r,
(h+bb+hbp)/pa as obp,
(1b + 2*2b + 3*3b + 4*hr)/ab as slg,
woba
FROM processed_league_averages_hitting
WHERE year = %s
"""
qry = q % year
query = db.query(qry)[0]
lg_pa, lg_r, lg_obp, lg_slg, lg_woba = query
avgs = {"lg_pa":lg_pa, "lg_r":lg_r, "lg_obp":lg_obp, "lg_slg":lg_slg, "lg_woba":lg_woba}
return avgs.get(category)
def get_zips_average_hitters(year, category):
q = """SELECT
pa,
r,
(h+bb+hbp)/pa as obp,
(1b + 2*2b + 3*3b + 4*hr)/ab as slg,
woba
FROM zips_averages_hitting
WHERE year = %s
"""
qry = q % year
query = db.query(qry)[0]
lg_pa, lg_r, lg_obp, lg_slg, lg_woba = query
avgs = {"lg_pa":lg_pa, "lg_r":lg_r, "lg_obp":lg_obp, "lg_slg":lg_slg, "lg_woba":lg_woba}
return avgs.get(category)
def get_offensive_metrics(year, pf, pa, ab, bb, hbp, _1b, _2b, _3b, hr, sb, cs):
"""
Converts a players offensive boxscore stats in a given year to more advanced stats (ops, wOBA, park_adjusted wOBA, OPS+, wRC, wRC/27, wRC+, RAA, and offensive WAR)
"""
wOBA = ((0.691*bb + 0.722*hbp + 0.884*_1b + 1.257*_2b + 1.593*_3b + 2.058*hr + 0.2*sb - 0.398*cs)/(pa))
park_wOBA = wOBA/pf
h = _1b + _2b + _3b + hr
if pa != 0:
obp = (h + bb + hbp)/float(pa)
else:
obp = 0.0
if ab != 0:
slg = (_1b + 2*_2b + 3*_3b + 4*hr)/float(ab)
else:
slg = 0.0
ops = obp+slg
lg_obp = float(get_league_average_hitters(year,'lg_obp'))
lg_slg = float(get_league_average_hitters(year,'lg_slg'))
OPS_plus = 100*(((obp/pf)/lg_obp)+((slg/pf)/lg_slg)-1)
lg_woba = float(get_league_average_hitters(year,'lg_woba'))
lg_r = float(get_league_average_hitters(year,'lg_r'))
lg_pa = float(get_league_average_hitters(year,'lg_pa'))
wrc = (((park_wOBA-lg_woba)/1.15)+(lg_r/lg_pa))*pa
if (ab-h) != 0:
wrc27 = wrc*27/(ab-h)
else:
wrc27 = 0.0
wRC_plus = ((wrc/pa/(lg_r/lg_pa)*100))
raa = pa*((park_wOBA-lg_woba)/1.25)
oWAR = raa/10
return ops, wOBA, park_wOBA, OPS_plus, wrc, wrc27, wRC_plus, raa, oWAR
def get_zips_offensive_metrics(year, pf, pa, ab, bb, hbp, _1b, _2b, _3b, hr, sb, cs):
"""
Converts a players offensive zips boxscore stats in a given year to more advanced stats (ops, wOBA, park_adjusted wOBA, OPS+, wRC, wRC/27, wRC+, RAA, and offensive WAR)
"""
wOBA = ((0.691*bb + 0.722*hbp + 0.884*_1b + 1.257*_2b + 1.593*_3b + 2.058*hr + 0.2*sb - 0.398*cs)/(pa))
park_wOBA = wOBA/pf
h = _1b + _2b + _3b + hr
if pa != 0:
obp = (h + bb + hbp)/float(pa)
else:
obp = 0.0
if ab != 0:
slg = (_1b + 2*_2b + 3*_3b + 4*hr)/float(ab)
else:
slg = 0.0
ops = obp+slg
lg_obp = float(get_zips_average_hitters(year,'lg_obp'))
lg_slg = float(get_zips_average_hitters(year,'lg_slg'))
OPS_plus = 100*(((obp/pf)/lg_obp)+((slg/pf)/lg_slg)-1)
lg_woba = float(get_zips_average_hitters(year,'lg_woba'))
lg_r = float(get_zips_average_hitters(year,'lg_r'))
lg_pa = float(get_zips_average_hitters(year,'lg_pa'))
wrc = (((park_wOBA-lg_woba)/1.15)+(lg_r/lg_pa))*pa
if (ab-h) != 0:
wrc27 = wrc*27/(ab-h)
else:
wrc27 = 0.0
wRC_plus = ((wrc/pa/(lg_r/lg_pa)*100))
raa = pa*((park_wOBA-lg_woba)/1.25)
oWAR = raa/10
return ops, wOBA, park_wOBA, OPS_plus, wrc, wrc27, wRC_plus, raa, oWAR
def get_league_average_pitchers(year, category):
q = """SELECT
r,
gs,
era,
era as fip,
fip_const
FROM processed_league_averages_pitching
WHERE year = %s
"""
qry = q % year
query = db.query(qry)[0]
lg_r, lg_gs, lg_era, lg_fip, fip_const = query
avgs = {"lg_r":lg_r, "lg_gs":lg_gs, "lg_era":lg_era, "lg_fip":lg_fip, "fip_const":fip_const}
return avgs.get(category)
def get_zips_average_pitchers(year, category):
q = """SELECT
r,
gs,
era,
era as fip,
fip_const
FROM zips_averages_pitching
WHERE year = %s
"""
qry = q % year
query = db.query(qry)[0]
lg_r, lg_gs, lg_era, lg_fip, fip_const = query
avgs = {"lg_r":lg_r, "lg_gs":lg_gs, "lg_era":lg_era, "lg_fip":lg_fip, "fip_const":fip_const}
return avgs.get(category)
def get_pitching_metrics(metric_9, ip, year, pf, g, gs, _type):
"""
Converts a players pitching boxscore stats in a given year to either parkadjusted FIP/ERA, FIP-/ERA-, fWAR/rWAR.
"""
park_metric = metric_9/pf
search_metric = 'lg_' + _type
lg_metric = float(get_league_average_pitchers(year, search_metric))
metric_min = 100*(park_metric/lg_metric)
RApxMETRIC = float(park_metric)/0.92
lg_r = float(get_league_average_pitchers(year, 'lg_r'))
lg_gs = float(get_league_average_pitchers(year, 'lg_gs'))
metric_RE = ((((18-(float(ip)/float(g)))*(float(lg_r)/float(lg_gs))+(float(ip)/float(g))*RApxMETRIC)/18)+2)*1.5
if (float(gs)/float(g)) > 0.5:
METRIC_x_win = ((lg_metric-RApxMETRIC)/(metric_RE))+0.5
METRIC_x_win_9 = METRIC_x_win - 0.38
else:
METRIC_x_win = ((lg_metric-RApxMETRIC)/(metric_RE))+0.52
METRIC_x_win_9 = METRIC_x_win - 0.46
METRIC_WAR = METRIC_x_win_9*float(ip)/9.0
return park_metric, metric_min, METRIC_WAR
def get_zips_pitching_metrics(metric_9, ip, year, pf, g, gs, _type):
park_metric = metric_9/pf
search_metric = 'lg_' + _type
lg_metric = float(get_zips_average_pitchers(year, search_metric))
metric_min = 100*(park_metric/lg_metric)
RApxMETRIC = float(park_metric)/0.92
lg_r = float(get_zips_average_pitchers(year, 'lg_r'))
lg_gs = float(get_zips_average_pitchers(year, 'lg_gs'))
metric_RE = ((((18-(float(ip)/float(g)))*(float(lg_r)/float(lg_gs))+(float(ip)/float(g))*RApxMETRIC)/18)+2)*1.5
if (gs >= 3 or float(gs)/float(g) > 0.4):
METRIC_x_win = ((lg_metric-RApxMETRIC)/(metric_RE))+0.5
METRIC_x_win_9 = METRIC_x_win - 0.38
else:
METRIC_x_win = ((lg_metric-RApxMETRIC)/(metric_RE))+0.52
METRIC_x_win_9 = METRIC_x_win - 0.46
METRIC_WAR = METRIC_x_win_9*float(ip)/9.0
return park_metric, metric_min, METRIC_WAR
def get_hand(player_name):
if player_name[len(player_name)-1:] == "*":
hand = 'l'
elif player_name[len(player_name)-1:] == "#":
hand = 's'
else:
hand = 'r'
return hand
def get_def_values(search_name, position, year):
"""
Gets the baseline defensive ratings for a player given the desired position and year.
"""
p = position.lower()
pos = position.upper()
rn = '%s_range' % p
er = '%s_error' % p
arm, pb = 0,2
if p == 'c':
arm = 'c_arm'
pb = 'c_pb'
elif p in ('lf','rf','cf'):
arm = 'of_arm'
try:
if p not in ('p','dh', 'ph', 'if', 'sp', 'rp', 'of'):
rtg_q = """SELECT
%s,
%s,
%s,
%s
FROM zips_defense
WHERE year = %s
AND player_name = '%s'"""
rtg_qry = rtg_q % (rn, er, arm, pb, year, search_name)
rtgs = db.query(rtg_qry)[0]
else:
rtgs = (0,0,0,0)
except IndexError:
rtgs = (0,0,0,0)
_r, error, _a, passed_ball = rtgs
if error | |
"""Module used to store riven-related data and means to calculate it.
Logic and data should be separated but I'm too mongolic to do it yet"""
import settings
import utils
import rating_profile
class Riven:
"""class used to store all riven-related data and means to calculate it."""
def __init__(
self,
weapon="",
name="",
initial_price=-1,
buyout_price=-1,
seller="",
polarity="",
rerolls=-1,
mastery_rank=-1,
riven_rank=-1,
stats=[],
):
self.weapon_name = weapon # name of the weapon.
self.name = name.capitalize() # name of the riven.
self.initial_price = initial_price # starting price of the auction.
self.buyout_price = buyout_price # buyout price of the auction.
self.seller = seller # name of the seller.
self.stats = stats # stats of the riven.
self.polarity = polarity.capitalize() # polarity of the riven.
self.rerolls = rerolls # number of rerolls.
self.mastery_rank = mastery_rank # mastery rank needed to use the riven.
self.riven_rank = riven_rank # rank of the riven
self.buyout_price = buyout_price if buyout_price else 99999 # buyout price of the riven.
self.riven_type = utils.get_weapon_type(self.weapon_name)
self.weapon = self.get_strongest_version()
self.outdated = False # indicates if the stats are outdated or wrong.
self.disposition = [
self.get_disposition(x) for x in self.get_ocurrences()
] # possible riven dispositions
self.wantedweapon = (
self.check_wanted()
) # indicates if the riven is in the wanted weapons list.
self.grades = self.get_grades() # checks stats grades.
self.riven_rate = self.checkstats() # riven rating based on checkstats algorithm.
self.paths = self.prepare_paths()
self.real_value = self.riven_rate / self.buyout_price * 100
self.message = self.riv_to_text()
def get_disposition(self, weapon):
"""Gets the disposition of a weapon given the name."""
return settings.weapon_list[self.riven_type][weapon]["disposition"]
def check_wanted(self):
"""checks if the weapon is wanted for godrolls, specific rolls and unrolleds."""
if self.weapon_name in settings.wished_unrolleds:
return True
if any(self.weapon_name == x for x in settings.wished_rivens):
return True
if self.weapon_name in settings.wished_weapons[self.riven_type]:
return True
return False
def get_ocurrences(self):
"""Finds any variants a weapon may have. dirty but fast code.
check conditions for each weapon that has self.weapon_name inside their name.
the conditions are:
1: it starts with the weapon name.
2: it contains the weapon name and also a a variant name.
only one condition is needed to be True."""
# first gets a list with all weapons that contain given weapon in the name.
weapon_list = [
weapon
for category in settings.weapon_list
for weapon in category.keys()
if self.weapon_name in weapon and self.weapon_name in category
]
return [
weaponname
for weaponname in weapon_list
if weaponname == self.weapon_name
or (utils.check_variant(weaponname) and self.weapon_name in (weaponname))
]
def get_strongest_version(self):
"""Finds the best version of the weapon, useful to weight stats when rating rivens"""
versions = self.get_ocurrences()
strongest = utils.get_weapon(self.weapon_name)
for version in versions:
if utils.get_weapon(version)["critical_chance"] > strongest["critical_chance"]:
strongest = utils.get_weapon(version)
return strongest
def ratestats(self):
"""Rates the stats of the riven based on the profiles of the weapon."""
self.punctuations = rating_profile.rate_riven(self)
return self.punctuations[0][1]
def checkstats(self):
"""Punctuation system. it checks how good the riven stats are against
settings.wished or decent combinations of they type."""
punctuation = self.ratestats()
if self.weapon_name in settings.wished_rivens:
for wished_roll in settings.wished_rivens[self.weapon_name]:
stats = [stat[1] for stat in self.stats]
if utils.compare_stats(list(wished_roll["stats"].values()), stats):
punctuation = 80
if self.stats[-1][0] is False:
punctuation += max(
settings.weights_list[self.riven_type][key][self.stats[-1][1]][1]
for key in settings.weights_list[self.riven_type].keys()
)
break
return punctuation
def calculate_grade(self, stat, dispo, fakerank):
"""Gets the proper grade of the stat,
trying every disposition and also checks for fake ranks"""
# gets the base value of the stat based on the weapon type.
res = abs(settings.stat_list[stat[1]]["value" + str(self.riven_type)]) * dispo
res = (res / 9) * (self.riven_rank + 1) if not fakerank else res
# if there is negative.
if self.stats[-1][0] is False:
# if the stat is negative.
if stat[0] is False:
# if 2 positives.
if len(self.stats) == 3:
res *= 0.5
# if 3 positives.
else:
res *= 0.7575
# if 2 positives.
elif len(self.stats) == 3:
res *= 1.25
# if 3 positives.
else:
res *= 0.947
# if there is no negative.
else:
# if 2 positives.
if len(self.stats) == 3:
res *= 0.7575
# puts res in a 0-1 scale. formula is (value - minimum) / (maximum - minimum).
res = (abs(stat[2]) - res * 0.9) / (res * 1.1 - res * 0.9) if res != 0 else 0
# above calculations should be changed based on the new scale
# however it's easier to work with this one.
if stat[1] == "range" and (1.1 > res > 1):
res = 1
elif stat[1] == "range" and (0 > res > -0.1):
res = 0
return res
def calculate_grades(self, fakerank):
"""Calculates the grade of the stat of a riven.
if the grades aren't compatible with a given disposition it tries with other ones,
if the weapon has any variant.
if there is no disposition to which grades are good it returns the closest one
based on a distance system.
the formula is: base stat value based on weapon type * disposition * stat system.
the system calculates res based on these rules:
if the weapon has 3 positives and a negative the positives are
weighted *0.947 and the negative *0.7575.
if the weapon has 2 positives and a negative the positives are
weighted *1.25 and the negative *0.5.
if the weapon has 3 positives and no negative the positives are weighted *0.7575.
if the weapon has 2 positives and no negative the positives stay the same."""
grades = [0, 0, 0, 0]
best_distance = 9999
for dispo in self.disposition:
distance, buen_grade = 0, 0
grades_aux = []
for stat in self.stats:
res = self.calculate_grade(stat, dispo, fakerank)
grades_aux.append(res)
# if res is within the 0-1 scale then the grade is good.
# otherwise we get the distance to said range.
if 0 < res < 1:
buen_grade += 1
else:
if res < 0:
distance += abs(res)
else:
distance += res - 1
# if all the stats have grades within 0-1 scale it returns them.
# else if the distance is less than the current best
# distance it updates the grades and the distance.
if buen_grade == len(self.stats):
self.disposition = dispo
return grades_aux
if distance < best_distance:
grades = grades_aux
best_distance = distance
if best_distance > 30 and fakerank is False:
return self.calculate_grades(True)
if any(0 > grade or grade > 1.1 for grade in grades):
self.outdated = True
self.disposition = self.disposition[0]
return grades
def get_grades(self):
"""This module normalizes the grades then calculates
its punctuation based on simodeus bot's."""
grades = [utils.scale_range(grade, 0, 1, -10, 10) for grade in self.calculate_grades(False)]
self.grade_letters = []
for grade in grades:
if grade > 9.5:
self.grade_letters.append("S")
elif grade > 7.5:
self.grade_letters.append("A+")
elif grade > 5.5:
self.grade_letters.append("A")
elif grade > 3.5:
self.grade_letters.append("A-")
elif grade > 1.5:
self.grade_letters.append("B+")
elif grade > -1.5:
self.grade_letters.append("B")
elif grade > -3.5:
self.grade_letters.append("B-")
elif grade > -5.5:
self.grade_letters.append("C+")
elif grade > -7.5:
self.grade_letters.append("C")
elif grade > -9.5:
self.grade_letters.append("C-")
else:
self.grade_letters.append("F")
return grades
def prepare_paths(self):
"""Creates the paths and files that the riven will write its information in"""
paths = []
if self.wantedweapon is True:
path = settings.wanted_path
else:
path = settings.unwanted_path
paths.append(path + "\\All rivens.txt")
if self.rerolls == 0:
paths.append(path + "\\Unrolleds\\Unrolled list.txt")
paths.append(
path
+ "\\Unrolleds\\Unrolled "
+ self.weapon_name.capitalize().replace("_", " ")
+ ".txt"
)
if self.riven_rate >= 95:
paths.append(path + "\\godrolls\\Godrolls.txt")
paths.append(
path
+ "\\Godrolls\\"
+ self.weapon_name.capitalize().replace("_", " ")
+ " Godrolls.txt"
)
elif self.riven_rate >= 85:
paths.append(path + "\\Personal use\\Usables.txt")
paths.append(
path
+ "\\Personal use\\Usable "
+ self.weapon_name.capitalize().replace("_", " ")
+ ".txt"
)
elif self.riven_rate >= 70:
paths.append(path + "\\Decents\\Decents.txt")
paths.append(
path
+ "\\Decents\\Decent "
+ self.weapon_name.capitalize().replace("_", " ")
+ ".txt"
)
elif self.riven_rate >= 60:
paths.append(path + "\\Normal ones\\Normal ones.txt")
paths.append(
path
+ "\\Normal ones\\Normal "
+ self.weapon_name.capitalize().replace("_", " ")
+ ".txt"
)
elif self.riven_rate >= 40:
paths.append(path + "\\Bad ones\\Bad ones.txt")
paths.append(
path + "\\Bad ones\\Bad " + self.weapon_name.capitalize().replace("_", " ") + ".txt"
)
elif self.riven_rate < 40:
paths.append(path + "\\Trashcan\\Trash.txt")
paths.append(
path
+ "\\Trashcan\\"
+ self.weapon_name.capitalize().replace("_", " ")
+ " Trash.txt"
)
return paths
def | |
import slicer
from RVXLiverSegmentationLib import removeNodeFromMRMLScene
from .RVXLiverSegmentationLogic import RVXLiverSegmentationLogic
from .RVXLiverSegmentationUtils import getMarkupIdPositionDictionary, createLabelMapVolumeNodeBasedOnModel
class VesselSeedPoints(object):
"""Helper class containing the different seed points to use for vessel VMTK extraction.
"""
def __init__(self, idPositionDict, pointIdList=None):
"""
Parameters
----------
idPositionDict: Dict[str, List[Float]]
Dictionary with nodeId as key and node position as value
pointIdList: List[str] or None
List of points to add to the vessel seed points
"""
self._idPositionDict = idPositionDict
self._pointList = []
self._pointIdList = []
if pointIdList is not None:
for pointId in pointIdList:
self.appendPoint(pointId)
def appendPoint(self, pointId):
"""Adds input point id to the current seed list.
Parameters
----------
pointId: str - Id of the point to add to list
"""
self._pointIdList.append(pointId)
self._pointList.append(self._idPositionDict[pointId])
def isValid(self):
return len(self._pointList) > 1
def getSeedPositions(self):
"""
Returns
-------
List[List[float]] - List containing all the nodes before last in the seed list if valid else empty list.
"""
return self._pointList[:-1] if self.isValid() else []
def getStopperPositions(self):
"""
Returns
-------
List[List[float]] - List containing last node position in the seed list if valid else empty list.
"""
return [self._pointList[-1]] if self.isValid() else []
def copy(self):
"""
Returns
-------
VesselSeedPoints - Deep copy of current object
"""
copy = VesselSeedPoints(self._idPositionDict.copy())
copy._pointIdList = list(self._pointIdList)
copy._pointList = list(self._pointList)
return copy
@staticmethod
def combine(first, second):
"""Combine two VesselSeedPoints into one VesselSeedPoints. Second vessel seed points will be added to first list.
To be correctly combined, first last id must correspond to second first id. Else method will raise a ValueError.
Parameters
----------
first: VesselSeedPoints
First list of vessel points
second: VesselSeedPoints
Second list of vessel points. List will be added after first list.
Returns
-------
VesselSeedPoints combining both lists.
Raises
------
ValueError if either first or second is Invalid or if first last point doesn't correspond to second first point.
"""
if not isinstance(first, VesselSeedPoints) or not isinstance(second, VesselSeedPoints):
raise ValueError("Combine expects %s types. Got %s and %s types" % (
VesselSeedPoints.__name__, type(first).__name__, type(second).__name__))
if not first.isValid() or not second.isValid() or first.lastPointId() != second.firstPointId():
raise ValueError("Cannot combine vessel seed points %s and %s" % (first, second))
combined = first.copy()
combined._pointList += second._pointList[1:]
combined._pointIdList += second._pointIdList[1:]
return combined
def firstPointId(self):
"""
Returns
-------
str or None
First point id in the vessel seeds
"""
return self._pointIdList[0] if self.isValid() else None
def lastPointId(self):
"""
Returns
-------
str or None
Last point is in the vessel seeds
"""
return self._pointIdList[-1] if self.isValid() else None
def __repr__(self):
return str(self._pointIdList)
def __eq__(self, other):
if not isinstance(other, VesselSeedPoints):
return False
else:
return (self._pointIdList, self._pointList) == (other._pointIdList, other._pointList)
def __ne__(self, other):
return not self == other
def __le__(self, other):
return self.getSeedPositions() + self.getStopperPositions() <= other.getSeedPositions() + other.getStopperPositions()
def __lt__(self, other):
return self.getSeedPositions() + self.getStopperPositions() < other.getSeedPositions() + other.getStopperPositions()
def __ge__(self, other):
return not self.__lt__(other)
def __gt__(self, other):
return not self.__le__(other)
class IExtractVesselStrategy(object):
"""Interface object for vessel volume extraction from source vessel branch tree and associated markup.
"""
def extractVesselVolumeFromVesselBranchTree(self, vesselBranchTree, vesselBranchMarkup, logic):
"""Extract vessel volume and model from input data.
The data are expected to be unchanged when the algorithm has run.
Parameters
----------
vesselBranchTree: VesselBranchTree
Tree containing the hierarchy of the markups
vesselBranchMarkup: vtkMRMLMarkupsFiducialNode
Markup containing all the vessel branches
logic: RVXLiverSegmentationLogic
Returns
-------
Tuple[vtkMRMLScalarVolume, vtkMRMLModel]
Tuple containing extracted volume information and associated poly data model
"""
pass
def mergeVolumes(volumes, volName):
"""Merges volumes nodes into a single volume node with volName label. Also returns extracted volume surface mesh.
Parameters
----------
volumes: List[vtkMRMLVolumeNode]
volName: str
Returns
-------
Tuple[vtkMRMLVolumeNode, vtkMRMLModelNode]
"""
# Extract list of volumes as list of np arrays
npVolumes = [slicer.util.arrayFromVolume(volume).astype(int) for volume in volumes]
# Merge all volumes in one
mergedVol = npVolumes[0]
for i in range(1, len(npVolumes)):
mergedVol |= npVolumes[i]
# Create output volume in slicer
outVol = createLabelMapVolumeNodeBasedOnModel(volumes[0], volName)
slicer.util.updateVolumeFromArray(outVol, mergedVol)
return outVol, RVXLiverSegmentationLogic.createVolumeBoundaryModel(outVol, volName + "Model", threshold=1)
class ExtractAllVesselsInOneGoStrategy(IExtractVesselStrategy):
"""Strategy uses VMTK on all markup points at once to extract data.
"""
def extractVesselVolumeFromVesselBranchTree(self, vesselBranchTree, vesselBranchMarkup, logic):
"""Extract vessel volume and model from input data.
The data are expected to be unchanged when the algorithm has run.
Parameters
----------
vesselBranchTree: VesselBranchTree
Tree containing the hierarchy of the markups
vesselBranchMarkup: vtkMRMLMarkupsFiducialNode
Markup containing all the vessel branches
logic: RVXLiverSegmentationLogic
Returns
-------
Tuple[vtkMRMLScalarVolume, vtkMRMLModel]
Tuple containing extracted volume information and associated poly data model
"""
# Extract all the node ids in the tree and group them by either seed or end id
# End Ids regroup all the ids which are tree leaves
nodeList = vesselBranchTree.getNodeList()
seedIds = []
endIds = []
for node in nodeList:
if vesselBranchTree.isLeaf(node):
endIds.append(node)
else:
seedIds.append(node)
# Convert seed id list and end id list to position lists
idPositionDict = getMarkupIdPositionDictionary(vesselBranchMarkup)
seedsPositions = [idPositionDict[nodeId] for nodeId in seedIds]
endPositions = [idPositionDict[nodeId] for nodeId in endIds]
# Call VMTK level set segmentation algorithm and return values
seedsNodes, stoppersNodes, outVolume, outModel = logic.extractVesselVolumeFromPosition(seedsPositions, endPositions)
removeNodeFromMRMLScene(seedsNodes)
removeNodeFromMRMLScene(stoppersNodes)
return outVolume, outModel
class ExtractVesselFromVesselSeedPointsStrategy(IExtractVesselStrategy):
"""Base class for strategies using VMTK on multiple start + end points and aggregating results as one volume.
deriving classes must implement a function returning a list of node pairs constructed from vessel tree and node id
position dictionary
"""
def constructVesselSeedList(self, vesselBranchTree, idPositionDict):
"""
Parameters
----------
vesselBranchTree: VesselBranchTree
Tree containing the hierarchy of the markups
idPositionDict: Dict[str,List[float]]
Dictionary with nodeId as key and node position as value
Returns
-------
List[VesselSeedPoints] - List of VesselSeedPoints to extract using VMTK
"""
pass
def extractVesselVolumeFromVesselBranchTree(self, vesselBranchTree, vesselBranchMarkup, logic):
"""Extract vessel volume and model from input data.
The data are expected to be unchanged when the algorithm has run.
Parameters
----------
vesselBranchTree: VesselBranchTree
Tree containing the hierarchy of the markups
vesselBranchMarkup: vtkMRMLMarkupsFiducialNode
Markup containing all the vessel branches
logic: RVXLiverSegmentationLogic
Returns
-------
Tuple[vtkMRMLScalarVolume, vtkMRMLModel]
Tuple containing extracted volume information and associated poly data model
"""
# Convert seed id list and end id list to position lists
idPositionDict = getMarkupIdPositionDictionary(vesselBranchMarkup)
# Extract all the branches in the tree.
# Loop over all ids
vesselSeedList = self.constructVesselSeedList(vesselBranchTree, idPositionDict)
volumes = []
elementsToRemoveFromScene = []
for vesselSeeds in vesselSeedList:
seedsNodes, stoppersNodes, outVolume, outModel = logic.extractVesselVolumeFromPosition(
vesselSeeds.getSeedPositions(), vesselSeeds.getStopperPositions())
elementsToRemoveFromScene.append(seedsNodes)
elementsToRemoveFromScene.append(stoppersNodes)
elementsToRemoveFromScene.append(outModel)
elementsToRemoveFromScene.append(outVolume)
volumes.append(outVolume)
outVolume, outModel = mergeVolumes(volumes, "levelSetSegmentation")
for volume in elementsToRemoveFromScene:
removeNodeFromMRMLScene(volume)
return outVolume, outModel
class ExtractOneVesselPerParentChildNode(ExtractVesselFromVesselSeedPointsStrategy):
"""Strategy uses VMTK on parent + child pair and merges the results as output.
Example :
node 0
|_ node 1-0
|_ node 1-1
|_node 2-0
|_node 2-1
|_node 3-1
Expected VMTK run :
Branch [0 & 1-0]
Branch [0 & 1-1]
Branch [1-1 & 2-0]
Branch [1-1 & 2-1]
Branch [2-1 & 3-1]
"""
def constructVesselSeedList(self, vesselBranchTree, idPositionDict):
"""
Parameters
----------
vesselBranchTree: VesselBranchTree
Tree containing the hierarchy of the markups
idPositionDict: Dict[str,List[float]]
Dictionary with nodeId as key and node position as value
Returns
-------
List[VesselSeedPoints] - List of VesselSeedPoints to extract using VMTK
"""
# Extract all the branches in the tree and return as branch list
nodeList = vesselBranchTree.getNodeList()
vesselSeedList = []
for node in nodeList:
for child in vesselBranchTree.getChildrenNodeId(node):
vesselSeedList.append(VesselSeedPoints(idPositionDict, [node, child]))
return vesselSeedList
class ExtractOneVesselPerParentAndSubChildNode(ExtractVesselFromVesselSeedPointsStrategy):
"""Strategy uses VMTK on parent + sub child pair and merges the results as output.
Example :
node 0
|_ node 1-0
|_ node 1-1
|_node 2-0
|_node 2-1
|_node 3-1
Expected VMTK run :
Branch [0 & 1-0]
Branch [0 & 2-0]
Branch [0 & 2-1]
Branch [1-1 & 3-1]
"""
def constructVesselSeedList(self, vesselBranchTree, idPositionDict):
"""
Parameters
----------
vesselBranchTree: VesselBranchTree
Tree containing the hierarchy of the markups
idPositionDict: Dict[str,List[float]]
Dictionary with nodeId as key and node position as value
Returns
-------
List[VesselSeedPoints] - List of VesselSeedPoints to extract using VMTK
"""
return self.parentSubChildBranchPairs(vesselBranchTree, idPositionDict)
def parentSubChildBranchPairs(self, vesselBranchTree, idPositionDict, startNode=None):
# Initialize vessel seed list
vesselSeedList = []
# Initialize start node as tree root if startNode not provided
isStartNodeRoot = False
if startNode is None:
startNode = vesselBranchTree.getRootNodeId()
isStartNodeRoot = True
for child in vesselBranchTree.getChildrenNodeId(startNode):
# Construct startNode + subChildren pairs
subChildren = vesselBranchTree.getChildrenNodeId(child)
for subChild in subChildren:
vesselSeedList.append(VesselSeedPoints(idPositionDict, [startNode, subChild]))
# Special case if starting | |
<gh_stars>0
#!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
from datetime import datetime
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from models import ConflictException
from models import StringMessage
from models import BooleanMessage
from models import TeeShirtSize
from models import (Profile, ProfileMiniForm, ProfileForm)
from models import (Conference, ConferenceForm, ConferenceForms,
ConferenceQueryForm, ConferenceQueryForms)
from models import (Session, SessionForm, SessionForms)
from models import (Speaker, SpeakerForm, SpeakerForms)
from settings import WEB_CLIENT_ID
from settings import ANDROID_CLIENT_ID
from settings import IOS_CLIENT_ID
from settings import ANDROID_AUDIENCE
from utils import getUserId
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
ANNOUNCEMENT_TPL = ('Last chance to attend! The following conferences '
'are nearly sold out: %s')
MEMCACHE_FSPEAKER_KEY = "RECENT_FEATURED_SPEAKER"
FSPEAKER_TPL = ('{0} featured in the following sessions: {1}')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": ["Default", "Topic"],
}
SESS_DEFAULTS = {
"highlights": ['Default', 'Highlights'],
"typeOfSession": "lecture",
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
WISHLIST_POST = endpoints.ResourceContainer(
sessionKey=messages.StringField(1),
)
USER_SESSIONS_POST = endpoints.ResourceContainer(
date=messages.StringField(1, required=True),
dateTo=messages.StringField(2),
)
SESS_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
typeOfSession=messages.StringField(2),
)
SESS_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1),
)
SESS_PUT_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeSessionKey=messages.StringField(1),
)
SPEAKER_GET_REQUEST = endpoints.ResourceContainer(
speaker=messages.StringField(1),
)
SP_GET_REQUEST = endpoints.ResourceContainer(
websafeSpeakerKey=messages.StringField(1),
)
SP_POST_REQUEST = endpoints.ResourceContainer(
SpeakerForm,
)
SP_PUT_REQUEST = endpoints.ResourceContainer(
SpeakerForm,
websafeSpeakerKey=messages.StringField(1),
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api(name='conference', version='v1', audiences=[ANDROID_AUDIENCE],
allowed_client_ids=[WEB_CLIENT_ID,
API_EXPLORER_CLIENT_ID,
ANDROID_CLIENT_ID,
IOS_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object.
User logged in is required
If user's profile doesn't exist for some reason create it.
return:
ConferenceForm/request.
"""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get/create profile
prof = self._getProfileFromUser()
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = prof.key
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email')
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
"""Update conference Object
Only owner must be allowed
"""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException('No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(
conf,
getattr(prof, 'displayName')) for conf in confs]
)
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"],
filtr["operator"],
filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@staticmethod
def _notifyFollowers():
"""Query confs that have followers, and open seats.
Send emails to the followers of each conf and remove them from the list.
(executed by SetNotificationHandler cron job)
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable > 0,
Conference.hasFollowers == True
)
).fetch()
for conf in confs:
for follower in conf.followedBy:
taskqueue.add(params={'email': follower, 'conference': conf.name},
url='/tasks/send_email_2_follower')
conf.followedBy = []
conf.put()
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, names[conf.organizerUserId])
for conf in conferences]
)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/follow/{websafeConferenceKey}',
http_method='GET', name='followConference')
def followConference(self, request):
"""Add user to the followers list of the conf,
This list is used to notify users when a conf becomes available again
Returns:
True: when succees
False: if no conf or conf is not full
"""
retVal = True
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
email = user.email()
wsck = request.websafeConferenceKey
c_key = ndb.Key(urlsafe=wsck)
if | |
1),
(6, 3, -5, -5): (0, 1),
(6, 3, -5, -4): (0, 1),
(6, 3, -5, -3): (0, 0),
(6, 3, -5, -2): (-1, -1),
(6, 3, -5, -1): (0, 1),
(6, 3, -5, 0): (0, 1),
(6, 3, -5, 1): (0, 1),
(6, 3, -5, 2): (0, 1),
(6, 3, -5, 3): (0, 1),
(6, 3, -5, 4): (0, 1),
(6, 3, -5, 5): (0, 1),
(6, 3, -4, -5): (-1, 1),
(6, 3, -4, -4): (-1, 1),
(6, 3, -4, -3): (-1, 0),
(6, 3, -4, -2): (-1, -1),
(6, 3, -4, -1): (0, 1),
(6, 3, -4, 0): (0, 1),
(6, 3, -4, 1): (0, 1),
(6, 3, -4, 2): (0, 1),
(6, 3, -4, 3): (0, 1),
(6, 3, -4, 4): (0, 1),
(6, 3, -4, 5): (0, 1),
(6, 3, -3, -5): (-1, 1),
(6, 3, -3, -4): (-1, 1),
(6, 3, -3, -3): (-1, 0),
(6, 3, -3, -2): (-1, -1),
(6, 3, -3, -1): (0, 1),
(6, 3, -3, 0): (0, 1),
(6, 3, -3, 1): (0, 1),
(6, 3, -3, 2): (0, 1),
(6, 3, -3, 3): (0, 1),
(6, 3, -3, 4): (0, 1),
(6, 3, -3, 5): (0, 1),
(6, 3, -2, -5): (-1, 1),
(6, 3, -2, -4): (-1, 1),
(6, 3, -2, -3): (-1, 0),
(6, 3, -2, -2): (-1, -1),
(6, 3, -2, -1): (0, 1),
(6, 3, -2, 0): (0, 1),
(6, 3, -2, 1): (0, 1),
(6, 3, -2, 2): (0, 1),
(6, 3, -2, 3): (0, 1),
(6, 3, -2, 4): (0, 1),
(6, 3, -2, 5): (0, 1),
(6, 3, -1, -5): (-1, 1),
(6, 3, -1, -4): (-1, 1),
(6, 3, -1, -3): (-1, 0),
(6, 3, -1, -2): (0, 1),
(6, 3, -1, -1): (0, 1),
(6, 3, -1, 0): (1, 1),
(6, 3, -1, 1): (1, 1),
(6, 3, -1, 2): (1, 1),
(6, 3, -1, 3): (1, 1),
(6, 3, -1, 4): (1, 1),
(6, 3, -1, 5): (1, 0),
(6, 3, 0, -5): (0, 1),
(6, 3, 0, -4): (0, 1),
(6, 3, 0, -3): (-1, 1),
(6, 3, 0, -2): (-1, 1),
(6, 3, 0, -1): (1, 1),
(6, 3, 0, 0): (1, 1),
(6, 3, 0, 1): (1, 1),
(6, 3, 0, 2): (1, 1),
(6, 3, 0, 3): (1, 1),
(6, 3, 0, 4): (1, 1),
(6, 3, 0, 5): (1, 0),
(6, 3, 1, -5): (1, 0),
(6, 3, 1, -4): (1, 0),
(6, 3, 1, -3): (1, 0),
(6, 3, 1, -2): (1, -1),
(6, 3, 1, -1): (1, 1),
(6, 3, 1, 0): (0, 1),
(6, 3, 1, 1): (0, 1),
(6, 3, 1, 2): (0, 1),
(6, 3, 1, 3): (0, 1),
(6, 3, 1, 4): (0, 1),
(6, 3, 1, 5): (0, 1),
(6, 3, 2, -5): (1, 0),
(6, 3, 2, -4): (1, 0),
(6, 3, 2, -3): (1, 0),
(6, 3, 2, -2): (1, -1),
(6, 3, 2, -1): (0, 1),
(6, 3, 2, 0): (-1, 1),
(6, 3, 2, 1): (-1, 1),
(6, 3, 2, 2): (-1, 1),
(6, 3, 2, 3): (-1, 1),
(6, 3, 2, 4): (-1, 1),
(6, 3, 2, 5): (-1, 1),
(6, 3, 3, -5): (0, 1),
(6, 3, 3, -4): (0, 1),
(6, 3, 3, -3): (0, 0),
(6, 3, 3, -2): (1, 1),
(6, 3, 3, -1): (1, 1),
(6, 3, 3, 0): (-1, 1),
(6, 3, 3, 1): (-1, 1),
(6, 3, 3, 2): (-1, 1),
(6, 3, 3, 3): (-1, 1),
(6, 3, 3, 4): (-1, 1),
(6, 3, 3, 5): (-1, 1),
(6, 3, 4, -5): (0, 1),
(6, 3, 4, -4): (0, 1),
(6, 3, 4, -3): (0, 1),
(6, 3, 4, -2): (0, 1),
(6, 3, 4, -1): (0, 1),
(6, 3, 4, 0): (0, 1),
(6, 3, 4, 1): (0, 1),
(6, 3, 4, 2): (0, 1),
(6, 3, 4, 3): (-1, 1),
(6, 3, 4, 4): (-1, 1),
(6, 3, 4, 5): (-1, 1),
(6, 3, 5, -5): (0, 1),
(6, 3, 5, -4): (0, 1),
(6, 3, 5, -3): (0, 1),
(6, 3, 5, -2): (0, 1),
(6, 3, 5, -1): (0, 1),
(6, 3, 5, 0): (0, 1),
(6, 3, 5, 1): (0, 1),
(6, 3, 5, 2): (0, 1),
(6, 3, 5, 3): (0, 1),
(6, 3, 5, 4): (0, 1),
(6, 3, 5, 5): (0, 1),
(6, 4, -5, -5): (0, 1),
(6, 4, -5, -4): (0, 0),
(6, 4, -5, -3): (-1, -1),
(6, 4, -5, -2): (0, 1),
(6, 4, -5, -1): (0, 1),
(6, 4, -5, 0): (0, 1),
(6, 4, -5, 1): (0, 1),
(6, 4, -5, 2): (0, 1),
(6, 4, -5, 3): (0, 1),
(6, 4, -5, 4): (0, 1),
(6, 4, -5, 5): (0, 1),
(6, 4, -4, -5): (-1, 1),
(6, 4, -4, -4): (-1, 0),
(6, 4, -4, -3): (-1, -1),
(6, 4, -4, -2): (0, 1),
(6, 4, -4, -1): (0, 1),
(6, 4, -4, 0): (0, 1),
(6, 4, -4, 1): (0, 1),
(6, 4, -4, 2): (0, 1),
(6, 4, -4, 3): (0, 1),
(6, 4, -4, 4): (0, 1),
(6, 4, -4, 5): (0, 1),
(6, 4, -3, -5): (-1, 1),
(6, 4, -3, -4): (-1, 0),
(6, 4, -3, -3): (-1, -1),
(6, 4, -3, -2): (0, 1),
(6, 4, -3, -1): (0, 1),
(6, 4, -3, 0): (0, 1),
(6, 4, -3, 1): (0, 1),
(6, 4, -3, 2): (0, 1),
(6, 4, -3, 3): (0, 1),
(6, 4, -3, 4): (0, 1),
(6, 4, -3, 5): (0, 1),
(6, 4, -2, -5): (-1, 1),
(6, 4, -2, -4): (-1, 0),
(6, 4, -2, -3): (-1, -1),
(6, 4, -2, -2): (0, 1),
(6, 4, -2, -1): (0, 1),
(6, 4, -2, 0): (0, 1),
(6, 4, -2, 1): (0, 1),
(6, 4, -2, 2): (0, 1),
(6, 4, -2, 3): (0, 1),
(6, 4, -2, 4): (0, 1),
(6, 4, -2, 5): (0, 1),
(6, 4, -1, -5): (-1, 1),
(6, 4, -1, -4): (-1, 0),
(6, 4, -1, -3): (0, 1),
(6, 4, -1, -2): (0, 1),
(6, 4, -1, -1): (0, 1),
(6, 4, -1, 0): (1, 1),
(6, 4, -1, 1): (1, 1),
(6, 4, -1, 2): (1, 1),
(6, 4, -1, 3): (1, 1),
(6, 4, -1, 4): (1, 1),
(6, 4, -1, 5): (1, 0),
(6, 4, 0, -5): (0, 1),
(6, 4, 0, -4): (-1, 1),
(6, 4, 0, -3): (-1, 1),
(6, 4, 0, -2): (-1, 1),
(6, 4, 0, -1): (1, 1),
(6, 4, 0, 0): (1, 1),
(6, 4, 0, 1): (1, 1),
(6, 4, 0, 2): (1, 1),
(6, 4, 0, 3): (1, 1),
(6, 4, 0, 4): (1, 1),
(6, 4, 0, 5): (1, 0),
(6, 4, 1, -5): (1, 0),
(6, 4, 1, -4): (1, 0),
(6, 4, 1, -3): (1, -1),
(6, 4, 1, -2): (1, 1),
(6, 4, 1, -1): (0, 1),
(6, 4, 1, 0): (0, 1),
(6, 4, 1, 1): (0, 1),
(6, 4, 1, 2): (0, 1),
(6, 4, 1, 3): (0, 1),
(6, 4, 1, 4): (0, 1),
(6, 4, 1, 5): (0, 1),
(6, 4, 2, -5): (1, 0),
(6, 4, 2, -4): (1, 0),
(6, 4, 2, -3): (1, -1),
(6, 4, 2, -2): (0, 1),
(6, 4, 2, -1): (-1, 1),
(6, 4, 2, 0): (-1, 1),
(6, 4, 2, 1): (-1, 1),
(6, 4, 2, 2): (-1, 1),
(6, 4, 2, 3): (-1, 1),
(6, 4, 2, 4): (-1, 1),
(6, 4, 2, 5): (-1, 1),
(6, 4, 3, -5): (0, 1),
(6, 4, 3, -4): (0, 0),
(6, 4, 3, -3): (1, 1),
(6, 4, 3, -2): (1, 1),
(6, | |
import time
import pymysql # for pulling UCSC data
import pandas as pd
from pathlib import Path
import logging
# app
from .progress_bar import * # tqdm, context-friendly
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
logging.getLogger('numexpr').setLevel(logging.WARNING)
# these login stats for the public database should not change.
HOST = 'genome-mysql.soe.ucsc.edu'
USER = 'genome'
DB = 'hg38'
# cpg related table schema: http://genome.ucsc.edu/cgi-bin/hgTables?db=hg38&hgta_group=regulation&hgta_track=cpgIslandExt&hgta_table=cpgIslandExt&hgta_doSchema=describe+table+schema
possible_tables = [
'refGene', # cruzdb used this in examples -- 88,819 genes
'knownGene', # 232,184 -- genes and pseudo genes too (use TranscriptType == 'coding_protein')
'ncbiRefSeq', # 173,733 genes -- won't have matching descriptions; no kgXref shared key.
# 'wgEncodeGencodeBasicV38', # 177k genes -- doesn't work
]
table_mapper = {
'txStart': 'chromStart', # knownGene transcription start, refGene start, ncbiRefSeq start
'txEnd': 'chromStart',
}
conn = None
def fetch_genes(dmr_regions_file=None, tol=250, ref=None, tissue=None, sql=None,
save=True, verbose=False, use_cached=True, no_sync=False, genome_build=None,
host=HOST, user=USER, password='', db=DB):
"""find genes that are adjacent to significantly different CpG regions provided.
Summary:
fetch_genes() annotates the DMR region output file, using the UCSC Genome Browser database as a reference
as to what genes are nearby. This is an exploratory tool, as there are many versions of the human genome
that map genes to slightly different locations.
fetch_genes() is an EXPLORATORY tool and makes a number of simplicifications:
* the DMR regions file saves one CpG probe name and location, even though clusters of probes may map to
that nearby area.
* it measures the distance from the start position of the one representative probe per region to any nearby
genes, using the `tol`erance parameter as the cutoff. Tolerance is the max number of base pairs of separation
between the probe sequence start and the gene sequence start for it to be considered as a match.
* The default `tol`erance is 250, but that is arbitrary. Increase it to expand the search area, or decrease it
to be more conservative. Remember that Illumina CpG probe sequences are 50 base pairs long, so 100 is nearly
overlapping. 300 or 500 would also be reasonable.
* "Adjacent" in the linear sequence may not necessarily mean that the CpG island is FUNCTIONALLY coupled to the
regulatory or coding region of the nearby protein. DNA superstructure can position regulatory elements near to
a coding region that are far upstream or downstream from the mapped position, and there is no easy way to identify
"adjacent" in this sense.
* Changing the `tol`erance, or the reference database will result major differences in the output, and thus
one's interpretation of the same data.
* Before interpreting these "associations" you should also consider filtering candidate genes by
specific cell types where they are expressed. You should know the tissue from which your samples originated.
And filter candidate genes to exclude those that are only expressed in your tissue during development,
if your samples are from adults, and vice versa.
Arguments:
dmr_regions_file:
pass in the output file DataFrame or FILEPATH from DMR function.
Omit if you specify the `sql` kwarg instead.
ref: default is `refGene`
use one of possible_tables for lookup:
- 'refGene' -- 88,819 genes -- default table used in comb-b and cruzdb packages.
- 'knownGene' -- 232,184 genes -- pseudo genes too (the "WHere TranscriptType == 'coding_protein'" clause would work, but these fields are missing from the data returned.)
- 'ncbiRefSeq' -- 173,733 genes -- this table won't have gene descriptions, because it cannot be joined with the 'kgXref' (no shared key).
Additionally, 'gtexGeneV8' is used for tissue-expression levels. Pseudogenes are ommited using the "WHERE score > 0" clause in the SQL.
tol: default 250
+/- this many base pairs consistutes a gene "related" to a CpG region provided.
tissue: str
if specified, adds additional columns to output with the expression levels for identified genes
in any/all tissue(s) that match the keyword. (e.g. if your methylation samples are whole blood,
specify `tissue=blood`) For all 54 tissues, use `tissue=all`
genome_build: (None, NEW, OLD)
Only the default human genome build, hg38, is currently supported. Even though many other builds are available
in the UCSC database, most tables do not join together in the same way.
use_cached:
If True, the first time it downloads a dataset from UCSC Genome Browser, it will save to disk
and use that local copy thereafter. To force it to use the online copy, set to False.
no_sync:
methylize ships with a copy of the relevant UCSC gene browser tables, and will auto-update these
every month. If you want to run this function without accessing this database, you can avoid updating
using the `no_sync=True` kwarg.
host, user, password, db:
Internal database connections for UCSC server. You would only need to mess with these of the server domain changes
from the current hardcoded value {HOST}. Necessary for tables to be updated and for `tissue` annotation.
sql:
a DEBUG mode that bypasses the function and directly queries the database for any information the user wants.
Be sure to specify the complete SQL statement, including the ref-table (e.g. refGene or ncbiRefSeq).
.. note::
This method flushes cache periodically. After 30 days, it deletes cached reference gene tables and re-downloads.
"""
if verbose:
logging.basicConfig(level=logging.INFO)
if isinstance(dmr_regions_file, pd.DataFrame):
regions = dmr_regions_file
reqd_regions = set(['name', 'chromStart'])
if set(regions.columns) & reqd_regions != reqd_regions:
raise KeyError(f"Your file of CpG regions must have these columns, at a minimum: {reqd_regions}")
LOGGER.info(f"Loaded {regions.shape[0]} CpG regions.")
elif not sql and dmr_regions_file is None:
raise Exception("Either provide a path to the DMR stats file or a sql query.")
elif not sql:
regions = pd.read_csv(dmr_regions_file) #.sort_values('z_p')
reqd_regions = set(['name', 'chromStart'])
if set(regions.columns) & reqd_regions != reqd_regions:
raise KeyError(f"Your file of CpG regions must have these columns, at a minimum: {reqd_regions}")
LOGGER.info(f"Loaded {regions.shape[0]} CpG regions from {dmr_regions_file}.")
if not ref:
ref = possible_tables[0] # refGene
global conn # allows function to reuse the same connection
if conn is None and no_sync is False:
conn = pymysql.connect(host=host, user=user, password=password, db=db, cursorclass=pymysql.cursors.DictCursor)
if sql:
with conn.cursor() as cur:
cur.execute(sql)
return list(cur.fetchall())
# these will be packed into the output CSV saved, but a nested dataframe is returned.
matches = {i:[] for i in regions.name} # cpg name --> [gene names]
distances = {i:[] for i in regions.name}
descriptions = {i:[] for i in regions.name}
# fetch WHOLE table needed, unless using cache
package_path = Path(__file__).parent
cache_file = Path(package_path, 'data', f"{ref}.pkl")
cache_available = cache_file.exists()
# don't use cache if over 1 month old:
if use_cached and cache_available and no_sync is False:
last_download = cache_file.stat().st_ctime
if time.time() - last_download > 2629746:
LOGGER.info(f"Cached genome table is over 1 month old; re-downloading from UCSC.")
cache_file.unlink()
cache_available = False
if use_cached and cache_available:
genes = pd.read_pickle(cache_file)
LOGGER.info(f"""Using cached `{ref}`: {Path(package_path, 'data', f"{ref}.pkl")} with ({len(genes)}) genes""")
elif no_sync is False: # download it
LOGGER.info(f"Downloading {ref}")
# chrom, txStart, txEnd; all 3 tables have name, but knownGene lacks a name2.
if ref == 'knownGene':
sql = f"""SELECT name as name2, txStart, txEnd, description FROM {ref} LEFT JOIN kgXref ON kgXref.kgID = {ref}.name;"""
else:
sql = f"""SELECT name, name2, txStart, txEnd, description FROM {ref} LEFT JOIN kgXref ON kgXref.refseq = {ref}.name;"""
with conn.cursor() as cur:
cur.execute(sql)
genes = list(cur.fetchall())
if use_cached:
import pickle
with open(Path(package_path, 'data', f"{ref}.pkl"),'wb') as f:
pickle.dump(genes, f)
LOGGER.info(f"Cached {Path(package_path, 'data', f'{ref}.pkl')} on first use, with {len(genes)} genes")
else:
LOGGER.info(f"Using {ref} with {len(genes)} genes")
# compare two dataframes and calc diff.
# need to loop here: but prob some matrix way of doing this faster
done = 0
for gene in tqdm(genes, total=len(genes), desc="Mapping genes"):
closeby = regions[ abs(regions.chromStart - gene['txStart']) < tol ]
if len(closeby) > 0:
for idx,item in closeby.iterrows():
matches[item['name']].append(gene['name2'])
dist = item['chromStart'] - gene['txStart']
distances[item['name']].append(dist)
desc = gene['description'].decode('utf8') if gene['description'] != None else ''
descriptions[item['name']].append(desc)
done += 1
#if done % 1000 == 0:
# LOGGER.info(f"[{done} matches]")
# also, remove duplicate gene matches for the same region (it happens a lot)
matches = {k: ','.join(set(v)) for k,v in matches.items()}
distances = {k: ','.join(set([str(j) for j in v])) | |
import datetime
import os
from typing import TYPE_CHECKING, Iterable, Optional, Union
from uuid import uuid4
from django.conf import settings
from django.contrib.postgres.aggregates import StringAgg
from django.db import models
from django.db.models import JSONField # type: ignore
from django.db.models import Case, Count, F, FilteredRelation, Q, Sum, Value, When
from django.db.models.functions import Coalesce
from django.urls import reverse
from django.utils.encoding import smart_text
from django_measurement.models import MeasurementField
from django_prices.models import MoneyField
from draftjs_sanitizer import clean_draft_js
from measurement.measures import Weight
from mptt.managers import TreeManager
from mptt.models import MPTTModel
from versatileimagefield.fields import PPOIField, VersatileImageField
from ..core.db.fields import SanitizedJSONField
from ..core.models import (
ModelWithMetadata,
PublishableModel,
PublishedQuerySet,
SortableModel,
)
from ..core.permissions import ProductPermissions
from ..core.utils import build_absolute_uri
from ..core.utils.draftjs import json_content_to_raw_text
from ..core.utils.translations import TranslationProxy
from ..core.weight import WeightUnits, zero_weight
from ..discount import DiscountInfo
from ..discount.utils import calculate_discounted_price
from ..seo.models import SeoModel, SeoModelTranslation
from . import AttributeInputType
from ..unurshop.ushop.models import Shop
if TYPE_CHECKING:
# flake8: noqa
from prices import Money
from ..account.models import User
from django.db.models import OrderBy
class Category(MPTTModel, ModelWithMetadata, SeoModel):
name = models.CharField(max_length=250)
slug = models.SlugField(max_length=255, unique=True, allow_unicode=True)
description = models.TextField(blank=True)
description_json = JSONField(blank=True, default=dict)
parent = models.ForeignKey(
"self", null=True, blank=True, related_name="children", on_delete=models.CASCADE
)
background_image = VersatileImageField(
upload_to="category-backgrounds", blank=True, null=True
)
background_image_alt = models.CharField(max_length=128, blank=True)
objects = models.Manager()
tree = TreeManager()
translated = TranslationProxy()
def __str__(self) -> str:
return self.name
class CategoryTranslation(SeoModelTranslation):
language_code = models.CharField(max_length=10)
category = models.ForeignKey(
Category, related_name="translations", on_delete=models.CASCADE
)
name = models.CharField(max_length=128)
description = models.TextField(blank=True)
description_json = JSONField(blank=True, default=dict)
class Meta:
unique_together = (("language_code", "category"),)
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
class_ = type(self)
return "%s(pk=%r, name=%r, category_pk=%r)" % (
class_.__name__,
self.pk,
self.name,
self.category_id,
)
class ProductType(ModelWithMetadata):
name = models.CharField(max_length=250)
slug = models.SlugField(max_length=255, unique=True, allow_unicode=True)
has_variants = models.BooleanField(default=True)
is_shipping_required = models.BooleanField(default=True)
is_digital = models.BooleanField(default=False)
weight = MeasurementField(
measurement=Weight, unit_choices=WeightUnits.CHOICES, default=zero_weight
)
class Meta:
ordering = ("slug",)
app_label = "product"
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
class_ = type(self)
return "<%s.%s(pk=%r, name=%r)>" % (
class_.__module__,
class_.__name__,
self.pk,
self.name,
)
class ProductsQueryset(PublishedQuerySet):
def collection_sorted(self, user: "User"):
qs = self.visible_to_user(user)
qs = qs.order_by(
F("collectionproduct__sort_order").asc(nulls_last=True),
F("collectionproduct__id"),
)
return qs
def published_with_variants(self):
published = self.published()
return published.filter(variants__isnull=False).distinct()
def visible_to_user(self, user):
if self.user_has_access_to_all(user):
return self.all()
return self.published_with_variants()
def sort_by_attribute(
self, attribute_pk: Union[int, str], descending: bool = False
):
"""Sort a query set by the values of the given product attribute.
:param attribute_pk: The database ID (must be a numeric) of the attribute
to sort by.
:param descending: The sorting direction.
"""
qs: models.QuerySet = self
# If the passed attribute ID is valid, execute the sorting
if not (isinstance(attribute_pk, int) or attribute_pk.isnumeric()):
return qs.annotate(
concatenated_values_order=Value(
None, output_field=models.IntegerField()
),
concatenated_values=Value(None, output_field=models.CharField()),
)
# Retrieve all the products' attribute data IDs (assignments) and
# product types that have the given attribute associated to them
associated_values = tuple(
AttributeProduct.objects.filter(attribute_id=attribute_pk).values_list(
"pk", "product_type_id"
)
)
if not associated_values:
qs = qs.annotate(
concatenated_values_order=Value(
None, output_field=models.IntegerField()
),
concatenated_values=Value(None, output_field=models.CharField()),
)
else:
attribute_associations, product_types_associated_to_attribute = zip(
*associated_values
)
qs = qs.annotate(
# Contains to retrieve the attribute data (singular) of each product
# Refer to `AttributeProduct`.
filtered_attribute=FilteredRelation(
relation_name="attributes",
condition=Q(attributes__assignment_id__in=attribute_associations),
),
# Implicit `GROUP BY` required for the `StringAgg` aggregation
grouped_ids=Count("id"),
# String aggregation of the attribute's values to efficiently sort them
concatenated_values=Case(
# If the product has no association data but has
# the given attribute associated to its product type,
# then consider the concatenated values as empty (non-null).
When(
Q(product_type_id__in=product_types_associated_to_attribute)
& Q(filtered_attribute=None),
then=models.Value(""),
),
default=StringAgg(
F("filtered_attribute__values__name"),
delimiter=",",
ordering=(
[
f"filtered_attribute__values__{field_name}"
for field_name in AttributeValue._meta.ordering or []
]
),
),
output_field=models.CharField(),
),
concatenated_values_order=Case(
# Make the products having no such attribute be last in the sorting
When(concatenated_values=None, then=2),
# Put the products having an empty attribute value at the bottom of
# the other products.
When(concatenated_values="", then=1),
# Put the products having an attribute value to be always at the top
default=0,
output_field=models.IntegerField(),
),
)
# Sort by concatenated_values_order then
# Sort each group of products (0, 1, 2, ...) per attribute values
# Sort each group of products by name,
# if they have the same values or not values
ordering = "-" if descending else ""
return qs.order_by(
f"{ordering}concatenated_values_order",
f"{ordering}concatenated_values",
f"{ordering}name",
)
class Product(SeoModel, ModelWithMetadata, PublishableModel):
product_type = models.ForeignKey(
ProductType, related_name="products", on_delete=models.CASCADE
)
name = models.CharField(max_length=250)
slug = models.SlugField(max_length=255, unique=True, allow_unicode=True)
# ushop
ushop = models.ForeignKey(Shop, on_delete=models.CASCADE, blank=True, null=True)
was_price = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
blank=True,
null=True,
)
usale = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
blank=True,
null=True,
)
description = models.TextField(blank=True)
description_json = SanitizedJSONField(
blank=True, default=dict, sanitizer=clean_draft_js
)
category = models.ForeignKey(
Category,
related_name="products",
on_delete=models.SET_NULL,
null=True,
blank=True,
)
currency = models.CharField(
max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,
default=settings.DEFAULT_CURRENCY,
)
minimal_variant_price_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
blank=True,
null=True,
)
minimal_variant_price = MoneyField(
amount_field="minimal_variant_price_amount", currency_field="currency"
)
updated_at = models.DateTimeField(auto_now=True, null=True)
charge_taxes = models.BooleanField(default=True)
weight = MeasurementField(
measurement=Weight, unit_choices=WeightUnits.CHOICES, blank=True, null=True
)
available_for_purchase = models.DateField(blank=True, null=True, auto_now_add=True)
visible_in_listings = models.BooleanField(default=False)
default_variant = models.OneToOneField(
"ProductVariant",
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name="+",
)
deleted_at = models.DateTimeField(null=True, blank=True)
objects = ProductsQueryset.as_manager()
translated = TranslationProxy()
class Meta:
app_label = "product"
ordering = ("slug",)
permissions = (
(ProductPermissions.MANAGE_PRODUCTS.codename, "Manage products."),
)
def __iter__(self):
if not hasattr(self, "__variants"):
setattr(self, "__variants", self.variants.all())
return iter(getattr(self, "__variants"))
def __repr__(self) -> str:
class_ = type(self)
return "<%s.%s(pk=%r, name=%r)>" % (
class_.__module__,
class_.__name__,
self.pk,
self.name,
)
def __str__(self) -> str:
return self.name
@property
def plain_text_description(self) -> str:
return json_content_to_raw_text(self.description_json)
def get_first_image(self):
images = list(self.images.all())
return images[0] if images else None
@staticmethod
def sort_by_attribute_fields() -> list:
return ["concatenated_values_order", "concatenated_values", "name"]
def is_available_for_purchase(self):
return (
self.available_for_purchase is not None
and datetime.date.today() >= self.available_for_purchase
)
class ProductTranslation(SeoModelTranslation):
language_code = models.CharField(max_length=10)
product = models.ForeignKey(
Product, related_name="translations", on_delete=models.CASCADE
)
name = models.CharField(max_length=250)
description = models.TextField(blank=True)
description_json = SanitizedJSONField(
blank=True, default=dict, sanitizer=clean_draft_js
)
class Meta:
unique_together = (("language_code", "product"),)
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
class_ = type(self)
return "%s(pk=%r, name=%r, product_pk=%r)" % (
class_.__name__,
self.pk,
self.name,
self.product_id,
)
class ProductVariantQueryset(models.QuerySet):
def annotate_quantities(self):
return self.annotate(
quantity=Coalesce(Sum("stocks__quantity"), 0),
quantity_allocated=Coalesce(
Sum("stocks__allocations__quantity_allocated"), 0
),
)
def create(self, **kwargs):
"""Create a product's variant.
After the creation update the "minimal_variant_price" of the product.
"""
variant = super().create(**kwargs)
from .tasks import update_product_minimal_variant_price_task
update_product_minimal_variant_price_task.delay(variant.product_id)
return variant
def bulk_create(self, objs, batch_size=None, ignore_conflicts=False):
"""Insert each of the product's variant instances into the database.
After the creation update the "minimal_variant_price" of all the products.
"""
variants = super().bulk_create(
objs, batch_size=batch_size, ignore_conflicts=ignore_conflicts
)
product_ids = set()
for obj in objs:
product_ids.add(obj.product_id)
product_ids = list(product_ids)
from .tasks import update_products_minimal_variant_prices_of_catalogues_task
update_products_minimal_variant_prices_of_catalogues_task.delay(
product_ids=product_ids
)
return variants
class ProductVariant(SortableModel, ModelWithMetadata):
sku = models.CharField(max_length=255, unique=True)
name = models.CharField(max_length=255, blank=True)
currency = models.CharField(
max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,
default=settings.DEFAULT_CURRENCY,
blank=True,
null=True,
)
price_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
)
price = MoneyField(amount_field="price_amount", currency_field="currency")
product = models.ForeignKey(
Product, related_name="variants", on_delete=models.CASCADE, null=True
)
images = models.ManyToManyField("ProductImage", through="VariantImage")
track_inventory = models.BooleanField(default=True)
cost_price_amount = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
blank=True,
null=True,
)
cost_price = MoneyField(amount_field="cost_price_amount", currency_field="currency")
weight = MeasurementField(
measurement=Weight, unit_choices=WeightUnits.CHOICES, blank=True, null=True
)
deleted_at = models.DateTimeField(null=True, blank=True)
objects = ProductVariantQueryset.as_manager()
translated = TranslationProxy()
class Meta:
ordering = ("sort_order", "sku")
app_label = "product"
def __str__(self) -> str:
return self.name or self.sku
@property
def is_visible(self) -> bool:
return self.product.is_visible
def get_price(self, discounts: Optional[Iterable[DiscountInfo]] = None) -> "Money":
return calculate_discounted_price(
product=self.product,
price=self.price,
collections=self.product.collections.all(),
discounts=discounts,
)
def get_weight(self):
return self.weight or self.product.weight or self.product.product_type.weight
def is_shipping_required(self) -> bool:
return self.product.product_type.is_shipping_required
def is_digital(self) -> bool:
is_digital = self.product.product_type.is_digital
return not self.is_shipping_required() and is_digital
def display_product(self, translated: bool = False) -> str:
if translated:
product = self.product.translated
variant_display = str(self.translated)
else:
variant_display = str(self)
product = self.product
product_display = (
f"{product} ({variant_display})" if variant_display else str(product)
)
return smart_text(product_display)
def get_first_image(self) -> "ProductImage":
images = list(self.images.all())
return images[0] if images else self.product.get_first_image()
def get_ordering_queryset(self):
return self.product.variants.filter(deleted_at__isnull = True).all()
def delete(self, *args, **kwargs):
if self.sort_order is not None:
qs = self.get_ordering_queryset()
qs.filter(sort_order__gt=self.sort_order).update(
sort_order=F("sort_order") - 1
)
from datetime import datetime
self.deleted_at = datetime.now()
self.save()
# return super().delete(*args, **kwargs)
class ProductVariantTranslation(models.Model):
language_code = models.CharField(max_length=10)
product_variant = models.ForeignKey(
ProductVariant, related_name="translations", on_delete=models.CASCADE
)
name = models.CharField(max_length=255, blank=True)
translated = TranslationProxy()
class Meta:
unique_together = (("language_code", "product_variant"),)
def __repr__(self):
class_ = type(self)
return "%s(pk=%r, name=%r, variant_pk=%r)" % (
class_.__name__,
self.pk,
self.name,
self.product_variant_id,
)
def __str__(self):
return self.name or str(self.product_variant)
class DigitalContent(ModelWithMetadata):
FILE = "file"
TYPE_CHOICES = ((FILE, "digital_product"),)
use_default_settings = models.BooleanField(default=True)
automatic_fulfillment = models.BooleanField(default=False)
content_type | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: <NAME> @<EMAIL>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from .torch_layers import TripletLoss, NTXentLoss1, NTXentLoss2, FCSeries, GreedyHashLoss
from utils import HParams, Timer, get_latest_file, Notifier, resize_maxdim
from copy import deepcopy
import cv2
def default_hparams():
hparams = HParams(
name='MSResnet50', # this should match the class name below
# model params
d_model=256, # embedding layer
dropout=0.1, # dropout rate
freeze_bn=False, # freeze batch norm
do_triplet=False,
triplet_margin=1.0,
triplet_metric='l2', # ['l2', 'cosine']
do_buffer=True, # whether there is buffer layers between bottleneck & triplet/simclr loss
buffer_dim=[256], # dimension of buffer layers
buffer_relu_last=False,
do_simclr=True, # use NTXent in SimCLR mode
simclr_version=1, # version of simclr [1,2]
simclr_temperature=0.8,
do_greedy=False, # do greedy binarisation hashing with intergrated sign() fn
greedy_weight=0.01,
do_quantise=False, # binary quantization loss
quantise_weight=1.0,
do_balance=False, # balance loss bits
balance_weight=0.01,
# training params
dataset='PSBattles', # MSCOCO, PSBattles
train_all_layers=True, # if False train the last layer only
nepochs=20,
batch_size=16,
npos=4, # number of positives in a batch
optimizer='SGD',
lr=0.001,
lr_steps=[0.6], # step decay for lr; value > 1 means lr is fixed
lr_sf=0.1, # scale factor for learning rate when condition is met
neg_random_rate=0., # used in dataloader for psbattles, random sample neg from org
to_square_size=0, # if not zero, pad image 2 square and resize
resume=True,
save_every=5, # save model every x epoch
report_every=100, # tensorboard report every x iterations
val_every=3, # validate every x epoch
checkpoint_path='./', # path to save/restore checkpoint
ms_weight='/vol/research/tubui1/projects/content_prov/MicrosoftVision.ResNet50.tar',
slack_token='/user/HS<PASSWORD>/slack_token.txt' # token for slack messenger
)
return hparams
def resize_fn(im_array, target_size=224):
h, w = im_array.shape[:2]
if h==w==target_size:
return im_array
else:
return cv2.resize(im_array, (target_size, target_size))
class MSResnet50(nn.Module):
def __init__(self, hps):
self.hps = hps
super(MSResnet50, self).__init__()
self.model = torchvision.models.resnet50(pretrained=False, progress=False)
if not hps.train_all_layers:
for param in self.model.parameters():
param.requires_grad = False
if hps.ms_weight:
self.load_ms_weight(hps.ms_weight)
# add layers
if hps.d_model:
self.model.fc = nn.Linear(self.model.fc.in_features, hps.d_model)
self.init_weights()
d_preembed = hps.d_model
else:
layers = list(self.model.children())[:-1] + [nn.Flatten()]
d_preembed = self.model.fc.in_features
self.model = nn.Sequential(*layers)
# train attributes
self.device = None
self.optimizer = None
self.writer = None
# loss
if hps.do_greedy:
self.binariser = GreedyHashLoss()
buffer_dim = hps.buffer_dim if hps.do_buffer else []
self.buffer_layer = FCSeries(d_preembed, buffer_dim, relu_last=hps.buffer_relu_last)
if hps.do_triplet:
self.regressor = TripletLoss(hps.triplet_margin, hps.triplet_metric)
elif hps.simclr_version == 1:
self.regressor = NTXentLoss1(hps.batch_size, hps.npos, hps.simclr_temperature)
else:
self.regressor = NTXentLoss2(hps.batch_size, hps.npos, hps.simclr_temperature)
# eval attributes
self.normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def init_weights(self):
# initialize weights for final layer only
if self.hps.d_model:
initrange = 0.1
self.model.fc.bias.data.zero_()
self.model.fc.weight.data.uniform_(-initrange, initrange)
def load_ms_weight(self, weight):
print(f'Loading MS weight from {weight}')
pretrained_state = torch.load(weight)['state_dict']
self.model.load_state_dict(pretrained_state, strict=False)
def forward(self, x):
output = {}
embed_float = self.model(x) # fingerprint
if self.hps.do_greedy:
output['embedding'], output['greedy_loss'] = self.binariser(embed_float)
elif self.hps.do_quantise or self.hps.do_balance:
output['embedding'] = embed_float.tanh()
else:
output['embedding'] = embed_float
output['regress'] = self.buffer_layer(output['embedding']) # for loss
return output
def predict_from_cv2_images(self, img_lst):
device = next(self.parameters()).device
# preprocess
num_images = len(img_lst)
if self.hps.to_square_size:
img_lst = [resize_maxdim(im, 224) for im in img_lst]
else:
img_lst = [resize_fn(im, 224) for im in img_lst]
pre_x = [im.astype(np.float32).transpose(2, 0, 1)/255 for im in img_lst]
pre_x = [self.normalizer(torch.tensor(x_, dtype=torch.float)) for x_ in pre_x]
out = []
with torch.no_grad():
for id_ in range(0, num_images, self.hps.batch_size):
start_, end_ = id_, min(id_ + self.hps.batch_size, num_images)
batch = pre_x[start_:end_]
batch = torch.stack(batch).to(device)
pred = self.__call__(batch)['embedding'].cpu().numpy()
out.append(pred)
out = np.concatenate(out)
return out
@staticmethod
def freeze_bn(module):
if isinstance(module, nn.modules.batchnorm._BatchNorm):
module.eval() # not updating running mean/var
module.weight.requires_grad = False # not updating weight/bis, or alpha/beta in the paper
module.bias.requires_grad = False
def train(self, mode=True):
"""
override train fn with freezing batchnorm
"""
super().train(mode)
if self.hps.freeze_bn:
self.model.apply(self.freeze_bn) # freeze running mean/var in bn layers in cnn_model only
def compute_loss(self, pred):
loss = self.regressor(pred['regress'])
if self.hps.do_greedy:
loss += self.hps.greedy_weight * pred['greedy_loss']
if self.hps.do_quantise:
loss += self.hps.quantise_weight * (pred['embedding'].abs()-1).pow(2).mean()
if self.hps.do_balance:
loss += self.hps.balance_weight * pred['embedding'].sum(dim=1).abs().mean()
return loss
def preprocess(self, x, y):
"""
preprocess data, model dependent
"""
if self.device is None: # check if device is set
self.device = next(self.parameters()).device # current device
x = [resize_fn(x_,224).astype(np.float32).transpose(2, 0, 1)/255. for x_ in x]
x = [self.normalizer(torch.tensor(x_, dtype=torch.float32)) for x_ in x]
x = torch.stack(x).to(self.device)
y = torch.tensor(y, dtype=torch.long).to(self.device)
return x, y
def train_epoch(self, data_loader, ep):
"""
perform train procedure for a single epoch
:param data_loader: iterable dataloader
:param ep: epoch number
:return: ave total loss
"""
timer = Timer()
self.train()
train_summ = 0
niters = len(data_loader)
loader = data_loader.load()
for bid in range(niters):
# get a batch
data, labels = next(loader)
data, labels = self.preprocess(data, labels)
# train step
self.optimizer.zero_grad()
pred = self.__call__(data)
loss = self.compute_loss(pred)
loss.backward()
self.optimizer.step()
# report
train_summ += loss.item()
if bid % int(niters / 5) == 0: # print
msg = ' Train epoch: %d [%d/%d (%.2f)] \tLoss: %.4f; time: %s'
val = (ep, bid, niters, bid / niters, loss.item(), timer.time(True))
print(msg % val, flush=True)
# logging
if bid % self.hps.report_every == 0:
self.writer.add_scalar('Loss/train', loss.item(), ep*niters + bid)
train_summ /= niters
print('====> Epoch: %d Lr: %f, Ave. loss: %.4f Elapse time: %s' % (ep,
self.lr_scheduler.get_last_lr()[0], train_summ, timer.time(False)))
self.lr_scheduler.step() # update learning rate
return train_summ
def val_epoch(self, data_loader, ep):
"""
perform validation for a single epoch
:param data_loader: iterable dataloader
:param ep: epoch number
:return: loss
"""
timer = Timer()
self.eval()
val_summ = 0
niters = len(data_loader)
loader = data_loader.load()
with torch.no_grad():
for bid in range(niters):
data, labels = next(loader)
data, labels = self.preprocess(data, labels)
pred = self.__call__(data)
loss = self.compute_loss(pred)
val_summ += loss.item()
self.writer.add_scalar('Loss/val', loss.item(), ep * niters + bid)
val_summ /= niters
print('====> Validation Epoch: %d Ave. loss: %.4f Elapse time: %s' % (
ep, val_summ, timer.time(False)))
return val_summ
def get_optimizer(self):
if self.hps.optimizer == 'Adam':
optim = torch.optim.Adam(self.parameters(), lr=self.hps.lr,
betas=(0.9, 0.98), eps=1e-9, weight_decay=5e-4)
elif self.hps.optimizer == 'SGD':
optim = torch.optim.SGD(self.parameters(), lr=self.hps.lr, momentum=0.9,
weight_decay=5e-4)
return optim
def load_pretrained_weight(self, pretrain_path):
device = next(self.parameters()).device # load to current device
print('Loading pretrained model %s.' % pretrain_path)
pretrained_state = torch.load(pretrain_path, map_location=device)
if 'model_state_dict' in pretrained_state:
print('This pretrained model is a checkpoint, loading model_state_dict only.')
pretrained_state = pretrained_state['model_state_dict']
model_state = self.state_dict()
matched_keys, not_matched_keys = [], []
for k,v in pretrained_state.items():
if k in model_state and v.size() == model_state[k].size():
matched_keys.append(k)
else:
not_matched_keys.append(k)
if len(not_matched_keys):
print('[%s] The following keys are not loaded: %s' % (self.hps.name, not_matched_keys))
pretrained_state = {k: pretrained_state[k] for k in matched_keys}
# pretrained_state = { k:v for k,v in pretrained_state.items() if k in \
# model_state and v.size() == model_state[k].size() }
model_state.update(pretrained_state)
self.load_state_dict(model_state)
def load_checkpoint(self, checkpoint_path):
device = next(self.parameters()).device # load to current device
print('Resuming from %s.' % checkpoint_path)
checkpoint = torch.load(checkpoint_path, map_location=device)
self.load_state_dict(checkpoint['model_state_dict'])
if 'optimizer_state_dict' in checkpoint:
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if 'lr_scheduler_state_dict' in checkpoint:
self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
# return the rest
excl_keys = ['model_state_dict', 'optimizer_state_dict']
out = {key: checkpoint[key] for key in checkpoint if key not in excl_keys}
return out
def save_checkpoint(self, checkpoint_path, save_optimizer=True, **kwargs):
print('Saving checkpoint at %s' % checkpoint_path)
checkpoint = {'model_state_dict': self.state_dict()}
if save_optimizer:
checkpoint.update(optimizer_state_dict=self.optimizer.state_dict())
checkpoint.update(lr_scheduler_state_dict=self.lr_scheduler.state_dict())
checkpoint.update(**kwargs)
torch.save(checkpoint, checkpoint_path)
def do_train(self, train_loader, val_loader=None, pretrain=''):
"""
train and val procedure
:param train_loader:
:param val_loader:
:param pretrain:
:return: None
"""
# train settings
timer = Timer()
self.device = next(self.parameters()).device # current device
self.optimizer = self.get_optimizer()
milestones = [int(self.hps.nepochs * i) for i in self.hps.lr_steps]
self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones, self.hps.lr_sf)
self.writer = SummaryWriter(log_dir=os.path.join(self.hps.checkpoint_path, 'logs'))
self.notifier = Notifier(self.hps.slack_token)
self.notifier.send_init_text()
# self.writer.add_graph(self)
# load pretrained weight if avai
if pretrain:
self.load_pretrained_weight(pretrain)
# load last checkpoint if avai
epoch0 = 1
if self.hps.resume:
checkpoint_path = get_latest_file(self.hps.checkpoint_path, 'ckpt_*.tar')
if checkpoint_path:
epoch0 = self.load_checkpoint(checkpoint_path)['epoch']
# import pdb; pdb.set_trace()
# train
val_loss = None
val_loss_records = []
best_model = deepcopy(self.state_dict())
best_val = np.inf
for epoch in range(epoch0, self.hps.nepochs + 1):
train_loss = self.train_epoch(train_loader, epoch)
if epoch % self.hps.save_every == 0: # checkpoint
checkpoint_path = os.path.join(self.hps.checkpoint_path, 'ckpt_%02d.tar' % epoch)
self.save_checkpoint(checkpoint_path, save_optimizer=True, epoch=epoch, loss=train_loss)
if val_loader is not None and epoch % self.hps.val_every == 0: # validation
val_loss = self.val_epoch(val_loader, epoch)
val_loss_records.append([val_loss, epoch])
if val_loss < best_val:
best_val = val_loss
best_model = deepcopy(self.state_dict())
print('Best val loss recorded at epoch | |
"""builder.py
Modified June 26 2015
<NAME>
<EMAIL>
This module contains everything that is needed to construct a system dynamics model
in python, using syntax that is compatible with the pysd model simulation functionality.
These functions could be used to construct a system dynamics model from scratch, in a
pinch. Due to the highly visual structure of system dynamics models, I still recommend
using an external model construction tool such as vensim or stella/iThink to build and
debug models.
"""
# Todo: Add a __doc__ function that summarizes the docstrings of the whole model
# Todo: Give the __doc__ function a 'short' and 'long' option
# Todo: Modify static functions to reference their own function attribute
# If we have a function that defines a constant value (or a big, constructed
# numpy array) it may be better to have the array constructed once (outside of the
# function, perhaps as an attribute) than to construct and return it every time
# the function is called. (Of course, it may be that python detects this and does
# it for us - we should find out)
# Alternately, there may be a clever way to cache this so that we don't have to
# change the model file.
#
# Todo: Template separation is getting a bit out of control. Perhaps bring it back in?
# Todo: create a function that gets the subscript family name, given one of its elements
# this should be robust to the possibility that different families contain the same
# child, as would be the case with subranges. Probably means that we'll have to collect
# all of the subelements and pass them together to get the family name.
import re
import keyword
from templates import templates
import numpy as np
class Builder(object):
def __init__(self, outfile_name, dictofsubs={}):
""" The builder class
Parameters
----------
outfilename: <string> valid python filename
including '.py'
dictofsubs: dictionary
# Todo: rewrite this once we settle on a schema
"""
# Todo: check that the no-subscript value of dictofsubs is an empty dictionary.
self.filename = outfile_name
self.stocklist = []
self.preamble = []
self.body = []
self.dictofsubs = dictofsubs
self.preamble.append(templates['new file'].substitute())
if dictofsubs:
self.preamble.append(templates['subscript_dict'].substitute(dictofsubs=dictofsubs.__repr__()))
def write(self):
""" Writes out the model file """
with open(self.filename, 'w') as outfile:
[outfile.write(element) for element in self.preamble]
[outfile.write(element) for element in self.body]
def add_stock(self, identifier, sub, expression, initial_condition):
"""Adds a stock to the python model file based upon the interpreted expressions
for the initial condition.
Parameters
----------
identifier: <string> valid python identifier
Our translators are responsible for translating the model identifiers into
something that python can use as a function name.
expression: <string>
This contains reference to all of the flows that
initial_condition: <string>
An expression that defines the value that the stock should take on when the model
is initialized. This may be a constant, or a call to other model elements.
sub : basestring
unlike in flaux where it's a list of strings. Because for now, a stock
is only declared once.
"""
# todo: consider the case where different flows work over different subscripts
# todo: properly handle subscripts here
# todo: build a test case to test the above
# todo: force the sub parameter to be a list
# todo: handle docstrings
initial_condition = initial_condition.replace('\n','').replace('\t','') # Todo:pull out
if sub:
if isinstance(sub, basestring): sub = [sub] # Todo: rework
directory, size = get_array_info(sub, self.dictofsubs)
if re.search(';',initial_condition): # format arrays for numpy
initial_condition = 'np.'+ np.array(np.mat(initial_condition.strip(';'))).__repr__()
# todo: I don't like the fact that the array is applied even when it isnt needed
initial_condition += '*np.ones((%s))'%(','.join(map(str,size)))
funcstr = templates['stock'].substitute(identifier=identifier,
expression=expression,
initial_condition=initial_condition)
if sub: # this is super bad coding practice, should change it.
funcstr += '%s.dimension_dir = '%identifier+directory.__repr__()+'\n'
self.body.append(funcstr)
self.stocklist.append(identifier)
def add_flaux(self, identifier, sub, expression, doc=''):
"""Adds a flow or auxiliary element to the model.
Parameters
----------
identifier: <string> valid python identifier
Our translators are responsible for translating the model identifiers into
something that python can use as a function name.
expression: list of strings
Each element in the array is the equation that will be evaluated to fill
the return array at the coordinates listed at corresponding locations
in the `sub` dictionary
sub: list of strings
List of strings of subscript indices that correspond to the
list of expressions, and collectively define the shape of the output
['a1,pears', 'a2,pears']
doc: <string>
The documentation string of the model
Returns
-------
identifier: <string>
The name of the constructed function
Example
-------
assume we have some subscripts
apples = [a1, a2, a3]
pears = [p1, p2, p3]
now sub list a list:
sub = ['a1,pears', 'a2,pears']
"""
# todo: why does the no-sub condition give [''] as the argument?
# todo: evaluate if we should instead use syntax [['a1','pears'],['a2','pears']]
# todo: build a test case to test
# todo: clean up this function
# todo: add docstring handling
docstring = ''
if sub[0]!='': #todo: consider factoring this out if it is useful for the multiple flows
directory, size = get_array_info(sub, self.dictofsubs)
funcset = 'loc_dimension_dir = %s.dimension_dir \n'%identifier
funcset += ' output = np.ndarray((%s))\n'%','.join(map(str,size)) #lines which encode the expressions for partially defined subscript pieces
for expr, subi in zip(expression, sub):
expr = expr.replace('\n','').replace('\t','').strip() # todo: pull out
indices = ','.join(map(str,getelempos(subi, self.dictofsubs)))
if re.search(';',expr): # if 2d array, format for numpy
expr = 'np.'+np.array(np.mat(expr.strip(';'))).__repr__()
funcset += ' output[%s] = %s\n'%(indices, expr)
else:
funcset = 'loc_dimension_dir = 0 \n'
funcset += ' output = %s\n'%expression[0]
funcstr = templates['flaux'].substitute(identifier=identifier,
expression=funcset,
docstring=docstring)
if sub[0] != '': # todo: make less brittle
funcstr += '%s.dimension_dir = '%identifier+directory.__repr__()+'\n' # todo: do we like 'dimension_dictionary' as a name?
self.body.append(funcstr)
return identifier
def add_lookup(self, identifier, valid_range, copair_list):
"""Constructs a function that implements a lookup.
The function encodes the coordinate pairs as numeric values in the python file.
Parameters
----------
identifier: <string> valid python identifier
Our translators are responsible for translating the model identifiers into
something that python can use as a function name.
range: <tuple>
Minimum and maximum bounds on the lookup. Currently, we don't do anything
with this, but in the future, may use it to implement some error checking.
copair_list: a list of tuples, eg. [(0, 1), (1, 5), (2, 15)]
The coordinates of the lookup formatted in coordinate pairs.
"""
# todo: Add a docstring capability
# in the future, we may want to check in bounds for the range. for now, lazy...
xs, ys = zip(*copair_list)
xs_str = str(list(xs))
ys_str = str(list(ys))
self.body.append(templates['lookup'].substitute(identifier=identifier,
xs_str=xs_str,
ys_str=ys_str))
def add_initial(self, component):
""" Implement vensim's `INITIAL` command as a build-time function.
component cannot be a full expression, must be a reference to
a single external element.
"""
if not re.search('[a-zA-Z]',component):
naked_component="number"
funcstr = ('\ndef initial_%s(inval): \n'%naked_component +
' return inval \n\n'
)
else:
naked_component = component.split("()")[0]
funcstr = ('\ndef initial_%s(inval): \n'%naked_component +
' if not hasattr(initial_%s, "value"): \n'%naked_component +
' initial_%s.value = inval \n'%naked_component +
' return initial_%s.value \n\n'%naked_component
)
self.body.append(funcstr)
return 'initial_%s(%s)'%(naked_component, component)
def add_n_delay(self, delay_input, delay_time, initial_value, order, sub):
"""Constructs stock and flow chains that implement the calculation of
a delay.
delay_input: <string>
Reference to the model component that is the input to the delay
delay_time: <string>
Can be a number (in string format) or a reference to another model element
which will calculate the delay. This is calculated throughout the simulation
at runtime.
initial_value: <string>
This is used to initialize the stocks that are present in the delay. We
initialize the stocks with equal values so that the outflow in the first
timestep is equal to this value.
order: int
The number of stocks in the delay pipeline. As we construct the delays at
build time, this must be an integer and cannot be calculated from other
model components. Anything else will yield a ValueError.
Returns
-------
outflow: basestring
Reference to the flow which contains the output of the delay process
"""
try:
order = int(order)
except ValueError:
print "Order of delay must be an int. (Can't even be a reference | |
a_drop_cols2: columns to drop in the seconda dataframe after the join
:param a_where: where condition to apply after the join
:param a_cache: if True, the resulting dataframe will be cached
:param a_unpersist1: if True, the first dataframe will be unpersisted
:param a_unpersist2: if True, the second
:param a_row_count: if True, the records will be counted. If also a_cache=True, it will immediately be in the cache.
:param a_old_count: count of records of previous a_df1, if known.
:param a_return_count: if True, the new record count will be returned witht he dataframe
:param a_verbose_level: print verbosity level
:return: a new dataframe or pair (dataframe, new_count) if a_return_count==True
"""
if a_join_cols is None:
raise ValueError("a_join_cols must be either string or a list of strings")
alllowed_joins = ["left", "right", "inner", "full outer"]
a_how = a_how.lower()
if a_how not in alllowed_joins:
raise ValueError(f"The join type {a_how} is not in allowed joins: {alllowed_joins}")
if isinstance(a_join_cols, str):
a_join_cols = [a_join_cols]
if isinstance(a_drop_cols1, str):
a_drop_cols1 = [a_drop_cols1]
if isinstance(a_drop_cols2, str):
a_drop_cols2 = [a_drop_cols2]
table_name1 = temp_table(a_df1, a_prefix='df1')
table_name2 = temp_table(a_df2, a_prefix='df2')
# we could do this using sets, but this will lose column order
# therefore below you may see this dummy way using lists
cols1 = a_df1.columns if not a_drop_cols1 else [c for c in a_df1.columns if c not in a_drop_cols1]
cols2 = a_df2.columns if not a_drop_cols2 else [c for c in a_df2.columns if c not in a_drop_cols2]
join_cols = a_join_cols if a_join_cols else list(set(cols1).intersection(cols2))
s_except1 = f"- all except {a_drop_cols1}" if a_drop_cols1 else ""
s_except2 = f"- all except df1 and except {a_drop_cols2}" if a_drop_cols2 else ""
# depending on the join type, we need to take the correct intersecion of columns
if a_how == "inner" or a_how == "left":
join_cols_list = [f"df1.{c}" for c in join_cols]
elif a_how == "right":
join_cols_list = [f"df2.{c}" for c in join_cols]
elif a_how == "full outer":
join_cols_list = [f"coalesce(df1.{c}, df2.{c}) as {c}" for c in join_cols]
else:
raise ValueError(f"The join type {a_how} is not in allowed joins: {alllowed_joins}")
cols2_minus_cols1 = [c for c in cols2 if c not in cols1]
first_cols = ["df1.{}".format(safe_col_name(c)) for c in cols1 if c not in join_cols]
second_cols = ["df2.{}".format(safe_col_name(c)) for c in cols2_minus_cols1 if c not in join_cols]
query = """
select
-- join columns:
{}{}
-- df1 columns{}:
{}{}
-- df2 columns{}:
{}
from
{} df1
{} join {} df2
on {}
""".format(", ".join(join_cols_list),
"," if len(first_cols) > 0 or len(second_cols) > 0 else "",
s_except1,
", ".join(first_cols),
"," if len(second_cols) > 0 else "",
s_except2,
", ".join(second_cols),
table_name1,
a_how,
table_name2,
join_clause(join_cols, 'df1', 'df2'))
if a_where:
query = "select * from (\n" + query + "\n) as x\n where \n" + a_where
print_verbose(3, a_verbose_level, query)
result = _spark.sql(query)
new_cnt = -1
if a_cache:
print_verbose(1, a_verbose_level, "caching joined dataset")
cache(result)
if a_row_count or a_return_count:
print_verbose(1, a_verbose_level, "counting records of the joined dataset...")
new_cnt = result.count()
if a_old_count:
dropped = a_old_count - new_cnt
dropped_percent = "({:.2%})".format(dropped / a_old_count)
print_verbose(1, a_verbose_level, f"old record count: {a_old_count:,}; new record count: {new_cnt:,}; dropped {dropped:,} {dropped_percent} records.")
else:
print_verbose(1, a_verbose_level, f"record count: {new_cnt:,}")
if a_unpersist1:
print_verbose(1, a_verbose_level, "unpersisting the 1st dataframe")
unpersist(a_df1)
if a_unpersist2:
print_verbose(1, a_verbose_level, "unpersisting the 2nd dataframe")
unpersist(a_df2)
print_verbose(1, a_verbose_level, "done join.")
if a_temp_table:
result.createOrReplaceTempView(a_temp_table)
if a_return_count:
return result, new_cnt
return result
def union(a_df_list: list, a_columns: list=None, a_verbose_level=3, a_type=""):
schema_types = None
# check if everything is all right with the schemas: the datatypes must be the same
for i, df in enumerate(a_df_list):
if schema_types is None:
schema_types = [str(f.dataType) for f in df.schema.fields if a_columns is None or f.name in a_columns]
else:
schema_types2 = [str(f.dataType) for f in df.schema.fields if a_columns is None or f.name in a_columns]
if schema_types2 != schema_types:
raise ValueError(f"Schema of the {i+1}th dataframe is different. Expected: {schema_types}, actual: {schema_types2}")
cols = "*" if not a_columns else ", ".join(a_columns)
temp_tables = [temp_table(df) for df in a_df_list]
query = f"\nunion {a_type}\n".join([f"select {cols} from {t}" for t in temp_tables])
return sql(query, a_verbose_level=a_verbose_level)
def union_all(a_df_list: list, a_columns: list=None, a_verbose_level=3):
return union(a_df_list, a_columns, a_verbose_level, a_type="all")
def moving_average(a_df, a_group_col, a_value_col, a_sort_col, a_moving_avg_col, a_window, a_min_periods=1, a_return_all_columns=True):
# DO NOT REMOVE select(*a_df.columns): this is related to inability to add column dynamically to it
schema = a_df.select(*a_df.columns).schema if a_return_all_columns else a_df.select(a_group_col, a_sort_col, a_value_col).schema
schema = (schema.add(StructField(a_moving_avg_col, DoubleType())))
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def ma(a_pdf):
a_pdf[a_moving_avg_col] = a_pdf.sort_values(a_sort_col)[a_value_col].rolling(window=a_window, min_periods=a_min_periods).mean()
return a_pdf
return a_df.groupby(a_group_col).apply(ma)
def exp_moving_average(a_df, a_group_col, a_value_col, a_sort_col, a_moving_avg_col, a_span, a_min_periods=1, a_return_all_columns=True):
# DO NOT REMOVE select(*a_df.columns): this is related to inability to add column dynamically to it
schema = a_df.select(*a_df.columns).schema if a_return_all_columns else a_df.select(a_group_col, a_sort_col, a_value_col).schema
schema = (schema.add(StructField(a_moving_avg_col, DoubleType())))
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def ema(a_pdf):
a_pdf[a_moving_avg_col] = a_pdf.sort_values(a_sort_col)[a_value_col].ewm(span=a_span, min_periods=a_min_periods).mean()
return a_pdf
return a_df.groupby(a_group_col).apply(ema)
def median(a_df, a_part_columns, a_column, a_new_column_name, a_row_count=False, a_cache=False):
if isinstance(a_part_columns, str):
a_part_columns = [a_part_columns]
w = Window.partitionBy(*a_part_columns).orderBy(a_column)
df_result = a_df.withColumn(
"rank", sf.row_number().over(w)
).withColumn(
"count_row_part",
sf.count(a_column).over(Window.partitionBy(a_part_columns))
).withColumn(
"even_flag",
sf.when(
sf.col("count_row_part") % 2 == 0,
sf.lit(1)
).otherwise(
sf.lit(0)
)
).withColumn(
"mid_value",
sf.floor(sf.col("count_row_part") / 2)
).withColumn(
"avg_flag",
sf.when(
(sf.col("even_flag") == 1) & (sf.col("rank") == sf.col("mid_value"))
|
((sf.col("rank") - 1) == sf.col("mid_value")),
sf.lit(1)
).otherwise(
sf.when(
sf.col("rank") == sf.col("mid_value") + 1,
sf.lit(1)
)
)
).filter(
sf.col("avg_flag") == 1
).drop(
"avg_flag"
).groupby(
a_part_columns
).agg(
sf.avg(sf.col(a_column)).alias(a_new_column_name)
)
if a_cache and a_row_count:
count(df_result, a_cache=True)
elif a_cache:
cache(df_result)
elif a_row_count:
count(df_result, a_cache=False)
return df_result
def check_if_statistics_are_correct(a_stats):
known_stats = {"mean",
"std",
"min",
"max",
"skew",
"kurtosis",
"mean_minus_1std",
"mean_minus_2std",
"mean_minus_3std",
"mean_plus_1std",
"mean_plus_2std",
"mean_plus_3std",
"q25_minus_15iqr",
"q75_plus_15iqr",
"sum",
"count"}
unknown_stats = [s for s in a_stats if s not in known_stats]
if unknown_stats:
raise ValueError(f"Unknown a_stats: {unknown_stats}")
def calc_stat_local(a_pdf,
stats, quantiles,
a_column_prefix_map, a_group_columns,
a_return_uppercase=False, a_reset_index=True, a_round_to_decimal_places=-1
):
# region indicators of which statistics to calculate; they will go as closures to the stat_func()
if isinstance(a_group_columns, str):
a_group_columns = [a_group_columns]
calc_count = "count" in stats
calc_ms = "mean_minus_1std" in stats or "mean_minus_2std" in stats or "mean_minus_3std" in stats \
or "mean_plus_1std" in stats or "mean_plus_2std" in stats or "mean_plus_3std" in stats
calc_mean = "mean" in stats or calc_ms
calc_std = "std" in stats or calc_ms
calc_sum = "sum" in stats
calc_min = "min" in stats
calc_max = "max" in stats
calc_skew = "skew" in stats
calc_kurtosis = "kurtosis" in stats
calc_mean_minus_1std = "mean_minus_1std" in stats
calc_mean_minus_2std = "mean_minus_2std" in stats
calc_mean_minus_3std = "mean_minus_3std" in stats
calc_mean_plus_1std = "mean_plus_1std" in stats
calc_mean_plus_2std = "mean_plus_2std" in stats
calc_mean_plus_3std = "mean_plus_3std" in stats
calc_iqr = "q25_minus_15iqr" in stats or "q75_plus_15iqr" in stats
# in the case if IQR is requested, we need to appen 0.25 and 0.75 to the list of quantiles
calc_quantiles = quantiles if not calc_iqr else sorted(list(set(quantiles).union({0.25, 0.75})))
# endregion
# region prepare list of pointers on statistics function
def count_func():
def f(x):
return len(x)
f.__name__ = "count"
return f
def min_func():
def f(x):
return np.min(x)
f.__name__ = "min"
return f
def max_func():
def f(x):
return np.max(x)
f.__name__ = "max"
return f
def quantile_func(a_q):
def f(x):
return np.quantile(x, a_q)
f.__name__ = "q" + f"{a_q:.2f}"[2:]
return f
def std(a):
return np.std(a)
def skew(a):
return scipy_skew(a)
def kurtosis(a):
return scipy_kurtosis(a)
agg_funcs = []
if calc_mean:
agg_funcs.append(np.mean)
if calc_std:
agg_funcs.append(std)
if calc_sum:
agg_funcs.append(np.sum)
if calc_count:
agg_funcs.append(count_func())
if calc_min:
agg_funcs.append(min_func())
if calc_max:
agg_funcs.append(max_func())
if calc_skew:
agg_funcs.append(skew)
if calc_kurtosis:
agg_funcs.append(kurtosis)
if calc_quantiles:
for q in calc_quantiles:
agg_funcs.append(quantile_func(q))
# endregion
# the final list of columns to return
get_cols = stats + ["q" + f"{q:.2f}"[2:] for q in quantiles]
pdf_res_all = None
for c, p in a_column_prefix_map.items():
pdf_res = a_pdf[a_group_columns + [c]]
pdf_res: pd.DataFrame = pdf_res.groupby(a_group_columns).agg(agg_funcs)
pdf_res.columns = pdf_res.columns.droplevel(0)
# these columns can be pre-computed based on other statistics
if calc_mean_minus_1std:
pdf_res["mean_minus_1std"] = pdf_res["mean"] - pdf_res["std"]
if calc_mean_plus_1std:
pdf_res["mean_plus_1std"] = pdf_res["mean"] + pdf_res["std"]
if calc_mean_minus_2std:
pdf_res["mean_minus_2std"] = pdf_res["mean"] - 2 * pdf_res["std"],
if calc_mean_plus_2std:
pdf_res["mean_plus_2std"] = pdf_res["mean"] + 2 * pdf_res["std"]
if calc_mean_minus_3std:
pdf_res["mean_minus_3std"] = pdf_res["mean"] - 3 * pdf_res["std"],
if calc_mean_plus_3std:
pdf_res["mean_plus_3std"] = pdf_res["mean"] + 3 * pdf_res["std"]
if calc_iqr:
pdf_res["q25_minus_15iqr"] = pdf_res["q25"] - 1.5 * (pdf_res["q75"] - pdf_res["q25"])
pdf_res["q75_plus_15iqr"] = pdf_res["q75"] + 1.5 * | |
range(nspin):
for iat in range(strc.nat):
print >> fout, "----- Calc radfunc for atom ",iat
print >> fout, "-- (L)APW states --"
print >> fout, (' '*10+'POTENTIAL PARAMETERS FOR JATOM=%-3d name=%-10s') % (iat,strc.aname[iat])
print >> fout, ' '*11+'L'+(' '*7)+'U(R)'+(' '*11)+"U'(R)",(' '*9)+'DU/DE'+(' '*10)+"DU'/DE"+(' '*9)+"NORM-U'"
# val/cond states: calculate the radial functions u, udot and ulo
npt = strc.nrpt[iat]
dh = log(strc.rmt[iat]/strc.r0[iat])/(npt - 1) # logarithmic step for the radial mesh
for l in range(in1.nt):
eh = 0.5*Elapw[isp,iat,l] # converting to Hartrees
# Calculate the function at el (u_l(r,E_l)):
a,b,nodes,uv,duv = rd.outwin(strc.rel, Vr[isp,iat,:], strc.r0[iat], dh, npt, eh, float(l), strc.Znuc[iat])
# Calculate |u_l(r,E_l)|^2:
ovlp = rd.rint13g(strc.rel, a, b, a, b, dh, npt, strc.r0[iat])
# Calculate the normalization factor:
trx = 1/sqrt(ovlp)
# Store the normalized values at the boundaries in p and dp:
uv *= trx
duv *= trx
# Normalize the function u_l:
a *= trx
b *= trx
#print >> fout, 'uv[is='+str(isp)+',iat='+str(iat)+',l='+str(l)+']=', uv, 'duv=', duv
dele = 2e-3 # the up and downward energy-shift in Hartrees
#delei = 0.25/dele
# Calculate u_l(r,E_1=E_l-\Delta E)
ac,bc,nodel,uvc,duvc = rd.outwin(strc.rel, Vr[isp,iat,:], strc.r0[iat], dh, npt, eh-dele, float(l), strc.Znuc[iat])
ovlp = rd.rint13g(strc.rel, ac, bc, ac, bc, dh, npt, strc.r0[iat])
trx = 1/sqrt(ovlp)
uvc *= trx
duvc *= trx
ac *= trx
bc *= trx
ae,be,nodeu,uve,duve = rd.outwin(strc.rel, Vr[isp,iat,:], strc.r0[iat], dh, npt, eh+dele, float(l), strc.Znuc[iat])
ovlp = rd.rint13g(strc.rel, ae, be, ae, be, dh, npt, strc.r0[iat])
trx = 1/sqrt(ovlp)
uve *= trx
duve *= trx
ae *= trx
be *= trx
uve = (uve - uvc)*(0.25/dele)
duve= (duve-duvc)*(0.25/dele)
ae = (ae-ac)*(0.25/dele)
be = (be-bc)*(0.25/dele)
#Insure ortogonalization
# Calculate <u_l|udot_l>
cross = rd.rint13g(strc.rel, a, b, ae, be, dh, npt, strc.r0[iat])
if( cross > 0.05): print >> fout, 'For l='+str(l)+' correction='+str(-cross)+' overlap='+str(ovlp)
# Set orthogonalized udot_l
ae -= cross * a
be -= cross * b
uve -= cross * uv # pe
duve -= cross * duv # dpe
# Store the orthogonalized values at the boundaries in pe and dpe:
self.umt[isp,iat,l,0] = uv # p
self.umt[isp,iat,l,2] = duv # dp
self.umt[isp,iat,l,1] = uve # pe
self.umt[isp,iat,l,3] = duve # dpe
# Calculate |udot_l(r,E_l)|^2 and store it in pei(j,iat)
self.umt[isp,iat,l,4] = rd.rint13g(strc.rel, ae, be, ae, be, dh, npt, strc.r0[iat])
self.ul [isp,iat,l,:npt] = a[:]
self.us [isp,iat,l,:npt] = b[:]
self.udot [isp,iat,l,:npt] = ae[:]
self.usdot[isp,iat,l,:npt] = be[:]
print >> fout, (' '*10+'%2d '+'%14.6f '*5+' '*5+'%2d '*3) % (l,self.umt[isp,iat,l,0],self.umt[isp,iat,l,2],self.umt[isp,iat,l,1],self.umt[isp,iat,l,3],self.umt[isp,iat,l,4],nodel,nodes,nodeu)
store_nodel[iat,l] = nodel
store_nodeu[iat,l] = nodeu
# Calculate the radial function for local orbitals.
nsp,nat,lomaxp1,nloat = shape(Elo)
self.umtlo = zeros((nspin,strc.nat,lomaxp1,nloat,4))
self.ulo = zeros((nspin,strc.nat,lomaxp1,nloat,nrad))
self.uslo = zeros((nspin,strc.nat,lomaxp1,nloat,nrad))
#self.nLO_at = zeros((strc.nat,self.lomax+1), dtype=intc)
for isp in range(nspin):
for iat in range(strc.nat):
print >> fout, (' '*10+'LOCAL ORBITAL POTENTIAL PARAMETERS FOR JATOM=%-3d name=%-10s') % (iat,strc.aname[iat])
print >> fout, (' '*11)+'L'+(' '*7)+'U(R)'+(' '*10)+"U'(R)"+(' '*9)+'<u|u_LO>'+(' '*6)+'<u_dot|u_LO>'+' num. nodes'+ ' Elo'
npt = strc.nrpt[iat]
dh = log(strc.rmt[iat]/strc.r0[iat])/(npt - 1) # logarithmic step for the radial mesh
for l in range(lomaxp1):
for ilo in in1.nLO_at_ind[iat][l]:
el = 0.5*Elo[isp,iat,l,ilo] # in Hartrees
#print >> fout, 'Calculating function at l=', l, 'ilo=', ilo, 'Elo=', el*2.0
# Calculate the function at el (u_l(r,Elo_l)):
a,b,nodes,uv,duv = rd.outwin(strc.rel, Vr[isp,iat,:], strc.r0[iat], dh, npt, el, float(l), strc.Znuc[iat])
# Calculate |u_l(r,E_l)|^2:
ovlp = rd.rint13g(strc.rel, a, b, a, b, dh, npt, strc.r0[iat])
# Calculate the normalization factor:
trx = 1/sqrt(ovlp)
# Normalize the function u_l:
uv *= trx # plo
duv *= trx # dplo
a *= trx
b *= trx
# Calculate pi12lo(l,iat)=<u_l(r,E_l)|u_l(r,E_{lo})>:
pi12lo = rd.rint13g(strc.rel, self.ul[isp,iat,l,:npt], self.us[isp,iat,l,:npt], a, b, dh, npt, strc.r0[iat])
# Calculate pe12lo(l,iat)=<\dot{u}_l(r,E_l)|u_l(r,E_{lo})>:
pe12lo = rd.rint13g(strc.rel, self.udot[isp,iat,l,:npt], self.usdot[isp,iat,l,:npt], a, b, dh, npt, strc.r0[iat])
self.umtlo[isp,iat,l,ilo,0] = uv # plo
self.umtlo[isp,iat,l,ilo,1] = duv # dplo
self.umtlo[isp,iat,l,ilo,2] = pi12lo # pi12lo
self.umtlo[isp,iat,l,ilo,3] = pe12lo # pe12lo
# Store the radial wave function :
self.ulo [isp,iat,l,ilo,:npt] = a[:]
self.uslo[isp,iat,l,ilo,:npt] = b[:]
print >> fout, ((' '*10)+'%2d '+('%14.6f'*4)+(' '*5)+' %2d '+' %10.6f') % (l,uv,duv,pi12lo,pe12lo,nodes,el*2)
for l in range(lomaxp1):
if len(in1.nLO_at_ind[iat][l])>0:
print >> fout, ' '*10+'LOCAL ORBITALS OVERLAPS:'
print >> fout, ' '*10+'jlo klo <u|u> E[jlo] E[klo]'
for jlo in in1.nLO_at_ind[iat][l]:
for klo in in1.nLO_at_ind[iat][l]:
pilolo = rd.rint13g(strc.rel, self.ulo[isp,iat,l,jlo,:], self.uslo[isp,iat,l,jlo,:], self.ulo[isp,iat,l,klo,:], self.uslo[isp,iat,l,klo,:], dh, npt, strc.r0[iat])
print >> fout, ((' '*7)+('%5d '*2)+('%15.6f '*3)) % (jlo,klo,pilolo,Elo[isp,iat,l,jlo],Elo[isp,iat,l,klo])
def get_ABC(self, in1, strc, fout):
"""Calculate the cofficients A,B,C for (L)APW and the local orbitals"""
cutoff = 200
#nspin, nat, nt, x = shape(self.umt)
nspin, nat, lomaxp1, nloat, x = shape(self.umtlo)
self.abcelo = zeros((nspin,nat,lomaxp1,nloat,3))
for isp in range(nspin):
for iat in range(nat):
Rmt = strc.rmt[iat]
for l in range(lomaxp1):
# P(l) = ul_Rmt(1,l,jatom)
# PE(l) = ul_Rmt(2,l,jatom)
# DP(l) = dul_Rmt(1,l,jatom)
# DPE(l) = dul_Rmt(2,l,jatom)
# PLO(l) = ul_Rmt(2+jlo,l,jatom)
# DPLO(l)= dul_Rmt(2+jlo,l,jatom)
#
p = self.umt[isp,iat,l,0] # p
pe = self.umt[isp,iat,l,1] # pe
dp = self.umt[isp,iat,l,2] # dp
dpe = self.umt[isp,iat,l,3] # dpe
pei = self.umt[isp,iat,l,4] # pei
if in1.lapw[l,iat]==0: # APW+lo, hence ony u and dotu are used
alonorm=sqrt(1 + (p/pe)**2 * pei)
alo = 1/alonorm
blo = -p/(pe*alonorm)
clo = 0
#
ilo=0
self.abcelo[isp,iat,l,ilo,0] = alo
self.abcelo[isp,iat,l,ilo,1] = blo
self.abcelo[isp,iat,l,ilo,2] = clo
print >> fout, 'lo coefficient: iat=%-2d l=%-2d ilo=%-1d lapw=%1d a=%-12.7f b=%-12.7f c=%-12.7f' % (iat,l,ilo,in1.lapw[l,iat],self.abcelo[isp,iat,l,ilo,0],self.abcelo[isp,iat,l,ilo,1],self.abcelo[isp,iat,l,ilo,2])
for ilo in in1.nLO_at_ind[iat][l]:
plo = self.umtlo[isp,iat,l,ilo,0] # plo
dplo = self.umtlo[isp,iat,l,ilo,1] # dplo
pi12lo = self.umtlo[isp,iat,l,ilo,2] # pi12lo
pe12lo = self.umtlo[isp,iat,l,ilo,3] # pe12lo
if in1.lapw[l,iat]: # This is LAPW+LO
# We construct the LO orbtial as u_new = ALO*u + BLO*dotu + CLO*u_LO
# and require u_new(R) = 0
# du_new/dr(R) = 0
# and <u_new|u_new> = 1
# which leads to:
# xac = (u_LO*ddotu-du_LO*dotu)*R^2
# xbc = -(u_LO*du - du_LO*u)*R^2
# clo = 1/sqrt( 1 + xac*(xac + 2*<u_LO|u>)+xbc*(xbc*<dotu|dotu>+2*<dotu|u_LO> )
# alo = xac/sqrt( 1 + xac*(xac + 2*<u_LO|u>)+xbc*(xbc*<dotu|dotu>+2*<dotu|u_LO> )
# blo = xbc/sqrt( 1 + xac*(xac + 2*<u_LO|u>)+xbc*(xbc*<dotu|dotu>+2*<dotu|u_LO> )
xac = (plo*dpe - dplo*pe)*Rmt**2
xbc = -(plo*dp - dplo*p)*Rmt**2
clo = 1/sqrt(1 + xac*(xac + 2*pi12lo) + xbc*(xbc*pei + 2*pe12lo))
clo = min(clo,cutoff)
alo = clo*xac
blo = clo*xbc
#print >> fout, '%s%12.7f '*9 % ('debug_lo p=', p, 'dp=', dp, 'plo=', plo, 'dplo=', dplo, 'pe=', pe, 'dpe=', dpe, 'pi12lo=', pi12lo, 'pe12lo=', pe12lo, 'rm=', Rmt)
else: # must be APW+lo+LO
# We construct the LO orbital as u_new = ALO*u + CLO*u_LO
# and require u_new(R) = 0
# and <u_new|u_new> = 1
# which leads to:
# xac = sqrt(1 + (u/u_LO)**2 - 2*(u/u_LO)*<u|u_LO>)
# ALO = 1/sqrt(1 + (u/u_LO)**2 - 2*(u/u_LO)*<u|u_LO>)
# CLO = -u/u_LO /sqrt(1 + (u/u_LO)**2 - 2*(u/u_LO)*<u|u_LO>)
xbc=-p/plo
xac=sqrt(1+xbc**2+2*xbc*pi12lo)
alo = 1/xac
blo = 0
clo = xbc/xac
self.abcelo[isp,iat,l,ilo,0] = alo
self.abcelo[isp,iat,l,ilo,1] = blo
self.abcelo[isp,iat,l,ilo,2] = clo
print >> fout, 'lo coefficient: iat=%-2d l=%-2d ilo=%-1d lapw=%1d a=%-12.7f b=%-12.7f c=%-12.7f' % (iat,l,ilo,in1.lapw[l,iat],self.abcelo[isp,iat,l,ilo,0],self.abcelo[isp,iat,l,ilo,1],self.abcelo[isp,iat,l,ilo,2])
class CoreStates:
def __init__(self, case, strc, nspin, fout):
self.l2kappa={'S ': -1, 'P ': -2, 'PP': 1, 'D ': -3, 'DD': 2, 'F ': -4, 'FF': 3}
fin = open(case+'.core','r')
print >> fout, 'read core states occupuation information!'
self.occ_inc = [[] for iat in range(strc.nat)]
ncore = zeros(strc.nat,dtype=int)
for iat in range(strc.nat):
dat = fin.next().split()
norb, shift, iprint = int(dat[0]), float(dat[1]), int(dat[2])
ncore[iat] = norb
for iorb in range(norb):
dat = fin.next().split(',')
qn_n, qn_kappa, occ = int(dat[0]), int(dat[1]), int(dat[2].split()[0])
self.occ_inc[iat].append( occ )
if (occ < 1e-2):
ncore[iat] -= 1
#print 'n='+str(qn_n)+' kappa=%2d' % (qn_kappa,)+' occ='+str(occ)
print >> fout, 'Number of core states at '+str(iat)+'-th atom:', ncore[iat]
line = fin.next()
ncoremax = max(ncore)
self.eig_core = [[[] for i in range(strc.nat)] for j in range(nspin)]
self.l_core = [[] for j in range(strc.nat)]
self.corind = []
n_sym_kap_ocm=[[] for iat in range(strc.nat)]
########
#import radials as rd # radial wave functions
if ncoremax != 0:
# core states present
isp=0 # spin up
for iat in range(strc.nat):
line = fin.next()
line = fin.next()
t_atomname, t_norb = line[20:30], int(line[41:43])
if (t_norb != len(self.occ_inc[iat])):
print | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Author - <NAME>
Class - CPS 470
Assignment - Homework 2: Simulation of Memory Allocation Strategies
Creation Date - Tuesday June 23 10:11:47 2020
Due Date - Thursday June 25 13:59:59 2020
Purpose: Study the performances of the memory allocation strategies first-fit, next-fit, best-fit, worst-fit
through a series of request and release operations
"""
import random
import copy
import matplotlib.pyplot as plt
"""
printMemory prints the contents of the physical memory
"""
def printMemory():
ctr = 0
block = 1
# print(memory) # Debugging
while(ctr < len(memory)):
if(memory[ctr] > 0):
for i in range(memory[ctr]):
print(block)
block += 1
else:
for i in range(abs(memory[ctr])):
print("-")
ctr = ctr + abs(memory[ctr])
return
"""
firstFit runs the First-Fit memory allocation algorithm
"""
def firstFit(requestSize):
full = False
allocated = False
ctrSearches = 0
index = 0
# print("Start of First-Fit, size is ", requestSize) # Debugging
while(index < len(memory) and not allocated):
ctrSearches += 1
if(memory[index] < 0 and abs(memory[index]) >= requestSize): # Allocate memory
allocated = insSpecificLoc(index, requestSize)
else:
index += abs(memory[index]) # Keep iterating through array
if(not allocated):
full = True # stop inserting memory because its full
ffSearches[requestSize - minR] += ctrSearches
# print("End of First-Fit") # Debugging
# print(memory)
return full
"""
bestFit runs the Best-Fit memory allocation algorithm
"""
def bestFit(requestSize):
allocated = False
full = False
bestHole = len(memory)
ctrSearches = 0
insIndex = -1
index = 0
# print("Start of Best-Fit") # Debugging
while(index < len(memory)): # Search entire length of memory for best hole to allocate memory
ctrSearches += 1
if(memory[index] < 0): # Find empty holes
if(abs(memory[index]) >= requestSize): # Find empty holes with big enough size
if(abs(memory[index]) < bestHole): # Compare to current best hole size
bestHole = abs(memory[index])
insIndex = index
index += abs(memory[index]) # Keep iterating through array
if(insIndex > -1):
allocated = insSpecificLoc(insIndex, requestSize)
if(not allocated):
full = True # stop inserting memory because its full
bfSearches[requestSize - minR] += ctrSearches
# print("End of Best-Fit") # Debugging
return full
"""
worstFit runs the Worst-Fit memory allocation algorithm
"""
def worstFit(requestSize):
allocated = False
full = False
worstHole = 0
ctrSearches = 0
insIndex = -1
index = 0
# print("Start of Worst-Fit") # Debugging
while(index < len(memory)): # Search entire length of memory for best hole to allocate memory
ctrSearches += 1
if(memory[index] < 0): # Find empty holes
if(abs(memory[index]) >= requestSize): # Find empty holes with big enough size
if(abs(memory[index]) > worstHole): # Compare to current best hole size
worstHole = abs(memory[index])
insIndex = index
index += abs(memory[index]) # Keep iterating through array
if(insIndex > -1):
allocated = insSpecificLoc(insIndex, requestSize)
if(allocated == False):
full = True # stop inserting memory because its full
wfSearches[requestSize - minR] += ctrSearches
# print("End of Worst-Fit") # Debugging
return full
"""
checkAllocation returns the number of allocated spaces of memory
"""
def checkAllocation():
allocMem = 0
for i in range(len(memory)):
if(memory[i]>0):
allocMem = allocMem + memory[i]
return allocMem
"""
checkBlocks returns the number of blocks allocated
"""
def checkBlocks():
allocBlocks = 0
for i in range(len(memory)):
if(memory[i]>0):
allocBlocks += 1
return allocBlocks
"""
checkBlockInd takes a block number and finds its index in memory to be used in freeBlock()
"""
def checkBlockInd(blockNum):
blockCtr = 1
retInd = 0
for i in range(len(memory)):
if(memory[i] > 0 and blockCtr == blockNum):
retInd = i
else:
blockCtr += 1
return retInd
"""
insertRequest inserts initial requests into the block of memory
"""
def insertRequest(loc, size):
filled = False
index = 0
while(index < len(memory) and not filled):
if(memory[index] < 0 and (index <= loc) and (abs(memory[index]) + index >= loc + size)):
next = abs(memory[index]) + index
memory[index] = index - loc
memory[loc] = size
memory[loc + size] = (loc + size) - next
filled = True
else:
index += abs(memory[index])
return filled
"""
insSpecificLoc inserts requests from each memory allocation algorithm
"""
def insSpecificLoc(loc, size):
filled = False
holeSize = abs(memory[loc])
if(memory[loc] < 0 and abs(memory[loc]) > size): # Allocating memory into a space with extra memory
filled = True
memory[loc] = size # Allocate first part of space to hold data
memory[loc + size] = -(holeSize - size) # Reallocate rest of space to be empty space
elif(memory[loc] < 0 and abs(memory[loc]) == size): # Allocating memory into a space the exact same size
filled = True
memory[loc] = size
else:
# Do Nothing
filled = False
return filled
"""
freeBlock frees a block of allocated memory
"""
def freeBlock(blockIndex):
freed = False
prev = -1
cur = 0
index = 0
# Find the block
while(not freed and cur < len(memory)):
if(blockIndex == 1): # Special case
if(memory[cur] > 0): # No free memory before this allocated block
if(memory[memory[cur]] > 0): # No free memory after this block
memory[cur] = -memory[cur] # Set it to be free
else: # Free memory after block and coalesce
memory[cur] = -memory[cur] + memory[memory[cur]]
else: # Free memory prior to allocated block
prev = 0
cur = abs(memory[cur])
if((memory[cur] + cur < len(memory)) and memory[memory[cur]] > 0): # No free memory after this block
memory[prev] = memory[prev] - memory[cur] # Set it to be free
elif(memory[cur] + cur < len(memory)): # Free memory after block and coalesce
memory[prev] = memory[prev] - memory[cur] + memory[memory[cur]]
else:
memory[prev] = memory[prev] - memory[cur] # set it to be free
freed = True
else: # blockIndex != 1
while(index < blockIndex): # Search for the right set of blocks
prev = cur
if(memory[cur] > 0):
index += 1
else:
# Hole -- do nothing
if(index < blockIndex):
cur = cur + abs(memory[cur])
if(memory[prev] > 0): # No free memory before this allocated block
if((memory[cur] + cur < len(memory)) and memory[memory[cur]] > 0): # No free memory after this block
memory[cur] = -memory[cur] # Set it to be free
elif(memory[cur] + cur < len(memory)): # Free memory after block and coalesce
memory[cur] = -memory[cur] + memory[memory[cur]]
else:
memory[prev] = memory[prev] - memory[cur] # Set it to be free
else: # Free memory prior to allocated block
if((memory[cur] + cur < len(memory)) and memory[memory[cur]] > 0): # No free memory after this block
memory[prev] = memory[prev] - memory[cur] # Set it to be free
elif(memory[cur] + cur < len(memory)): # Free memory after block and coalesce
memory[prev] = memory[prev] - memory[cur] + memory[memory[cur]]
else:
memory[prev] = memory[prev] - memory[cur] # Set it to be free
freed = True
print("Block %s freed." % (blockIndex))
printMemory()
return freed
"""
plotPoints graphs the outcomes of 3 memory allocation algorithms: First-Fit, Best-Fit, and Worst-Fit
"""
def plotPoints(rSize):
# Plot results for Memory Utilization
fig, ax = plt.subplots()
ax.set(xlabel="d", ylabel="Memory Utilization",
title="Average Memory Utilization using Same Memory & Same Requests")
ax.plot(rSize, ffUtil, label="First-Fit")
ax.plot(rSize, bfUtil, label="Best-Fit")
ax.plot(rSize, wfUtil, label="Worst-Fit")
ax.legend()
fig.savefig("UtilizationGraph.png")
plt.show()
# Plot results for Search Times
fig, ax = plt.subplots()
ax.set(xlabel="d", ylabel="Search Times",
title="Average Search Times using Same Memory & Same Requests")
ax.plot(rSize, ffSearches, label="First-Fit")
ax.plot(rSize, bfSearches, label="Best-Fit")
ax.plot(rSize, wfSearches, label="Worst-Fit")
ax.legend()
fig.savefig("TimeGraph.png")
plt.show()
return
"""
Main is the primary method that does everything include starting the simulation
"""
def main():
global memory
# Keeps track of the memory utilization per request size of each memory allocation algorithm
global ffUtil
global bfUtil
global wfUtil
# Instatiate vars
ffUtil = []
bfUtil = []
wfUtil = []
# Keeps track of the number of holes examined by each memory allocation algorithm
global ffSearches
global bfSearches
global wfSearches
# Instatiate vars
ffSearches = []
bfSearches = []
wfSearches = []
# global firstFitBlocks = [] # Number of blocks allocated in first fit
# global bestFitBlocks = [] # Number of blocks allocated in best fit
# global worstFitBlocks = [] # Number of blocks allocated in worst fit
global minR
minR = 2 # Minimum d value, | |
"""
# Integrating Authorize.Net
### 1. Validate Currency Support
Example:
from frappe.integration_broker.doctype.integration_service.integration_service import get_integration_controller
controller = get_integration_controller("AuthorizeNet")
controller().validate_transaction_currency(currency)
### 2. Redirect for payment
Example:
payment_details = {
"amount": 600,
"title": "Payment for bill : 111",
"description": "payment via cart",
"reference_doctype": "Payment Request",
"reference_docname": "PR0001",
"payer_email": "<EMAIL>",
"payer_name": "<NAME>",
"order_id": "111",
"currency": "USD"
}
# redirect the user to this url
url = controller().get_payment_url(**payment_details)
### 3. On Completion of Payment
Write a method for `on_payment_authorized` in the reference doctype
Example:
def on_payment_authorized(payment_status):
# your code to handle callback
##### Note:
payment_status - payment gateway will put payment status on callback.
For authorize.net status parameter is one from: [Completed, Failed]
More Details:
<div class="small">For details on how to get your API credentials, follow this link: <a href="https://support.authorize.net/authkb/index?page=content&id=A405" target="_blank">https://support.authorize.net/authkb/index?page=content&id=A405</a></div>
"""
from __future__ import unicode_literals
import frappe
from frappe import _, _dict
from frappe.utils import get_url, call_hook_method, flt
from frappe.model.document import Document
from frappe.integrations.utils import create_request_log, create_payment_gateway
import json
from datetime import datetime
import urllib
import authorize
from authorize import AuthorizeResponseError, AuthorizeInvalidError
from authorizenet.utils import get_authorizenet_user, get_card_accronym, authnet_address, get_contact
def log(*args, **kwargs):
print("\n".join(args))
class AuthorizeNetSettings(Document):
service_name = "AuthorizeNet"
supported_currencies = ["USD"]
is_embedable = True
def validate(self):
create_payment_gateway("AuthorizeNet")
call_hook_method("payment_gateway_enabled", gateway=self.service_name)
if not self.flags.ignore_mandatory:
self.validate_authorizenet_credentails()
def on_update(self):
pass
def get_embed_context(self, context):
# list countries for billing address form
context["authorizenet_countries"] = frappe.get_list("Country", fields=["country_name", "name"], ignore_permissions=1)
default_country = frappe.get_value("System Settings", "System Settings", "country")
default_country_doc = next((x for x in context["authorizenet_countries"] if x.name == default_country), None)
country_idx = context["authorizenet_countries"].index(default_country_doc)
context["authorizenet_countries"].pop(country_idx)
context["authorizenet_countries"] = [default_country_doc] + context["authorizenet_countries"]
context["year"] = datetime.today().year
# get the authorizenet user record
authnet_user = get_authorizenet_user()
if authnet_user:
context["stored_payments"] = authnet_user.get("stored_payments", [])
def get_embed_form(self, context={}):
context.update({
"source": "templates/includes/integrations/authorizenet/embed.html"
})
context = _dict(context)
self.get_embed_context(context)
return {
"form": frappe.render_template(context.source, context),
"style_url": "/assets/css/authorizenet_embed.css",
"script_url": "/assets/js/authorizenet_embed.js"
}
def validate_authorizenet_credentails(self):
pass
def validate_transaction_currency(self, currency):
if currency not in self.supported_currencies:
frappe.throw(_("Please select another payment method. {0} does not support transactions in currency \"{1}\"").format(self.service_name, currency))
def build_authorizenet_request(self, **kwargs):
"""Creates an AuthorizeNet Request record to keep params off the url"""
data = {
"doctype": "AuthorizeNet Request",
"status": "Issued",
}
data.update(kwargs)
del data["reference_docname"] # have to set it after insert
request = frappe.get_doc(data)
request.flags.ignore_permissions = 1
request.insert()
# TODO: Why must we save doctype first before setting docname?
request.reference_docname = kwargs["reference_docname"]
request.save()
frappe.db.commit()
return request
def get_payment_url(self, **kwargs):
request = self.build_authorizenet_request(**kwargs)
url = "./integrations/authorizenet_checkout/{0}"
result = get_url(url.format(request.get("name" )))
return result
def get_settings(self):
settings = frappe._dict({
"api_login_id": self.api_login_id,
"api_transaction_key": self.get_password(fieldname="api_transaction_key", raise_exception=False)
})
return settings
def process_payment(self):
# used for feedback about which payment was used
authorizenet_data = {}
# the current logged in contact
contact = get_contact()
# get authorizenet user if available
authnet_user = get_authorizenet_user()
# the cc data available
data = self.process_data
# get auth keys
settings = self.get_settings()
# fetch redirect info
redirect_to = data.get("notes", {}).get("redirect_to") or None
redirect_message = data.get("notes", {}).get("redirect_message") or None
# uses dummy request doc for unittests as we are only testing processing
if not data.get("unittest"):
if data.get("name"):
request = frappe.get_doc("AuthorizeNet Request", data.get("name"))
else:
# Create request from scratch when embeding form on the fly
#
# This allows payment processing without having to pre-create
# a request first.
#
# This path expects all the payment request information to be
# available!!
#
# keys expected: ('amount', 'currency', 'order_id', 'title', \
# 'description', 'payer_email', 'payer_name', \
# 'reference_docname', 'reference_doctype')
request = self.build_authorizenet_request(**{ \
key: data[key] for key in \
('amount', 'currency', 'order_id', 'title', \
'description', 'payer_email', 'payer_name', \
'reference_docname', 'reference_doctype') })
data["name"] = request.get("name")
else:
request = frappe.get_doc({"doctype": "AuthorizeNet Request"})
request.flags.ignore_permissions = 1
# set the max log level as per settings
request.max_log_level(self.log_level)
try:
if self.card_info:
# ensure card fields exist
required_card_fields = ['name_on_card', 'card_number', 'exp_month', 'exp_year', 'card_code']
for f in required_card_fields:
if not self.card_info.get(f):
request.status = "Error"
return request, None, "Missing field: %s" % f, {}
# prepare authorize api
authorize.Configuration.configure(
authorize.Environment.TEST if self.use_sandbox else authorize.Environment.PRODUCTION,
settings.api_login_id,
settings.api_transaction_key
)
# cache billing fields as per authorize api requirements
billing = authnet_address(self.billing_info)
if self.shipping_info:
shipping = authnet_address(self.shipping_info)
else:
shipping = None
# attempt to find valid email address
email = self.process_data.get("payer_email")
if email:
email = email.split(',')[0]
if "@" not in email and contact:
email = contact.get("email_id")
if "@" not in email:
if contact and contact.user:
email = frappe.get_value("User", contact.user, "email_id")
if "@" not in email:
log("AUTHNET FAILURE! Bad email: {0}".format(email))
raise ValueError("There are no valid emails associated with this customer")
# build transaction data
transaction_data = {
"order": {
"invoice_number": data["order_id"]
},
"amount": flt(self.process_data.get("amount")),
"email": email,
"description": self.card_info.get("name_on_card"),
"customer_type": "individual"
}
# track ip for tranasction records
if frappe.local.request_ip:
transaction_data.update({
"extra_options": {
"customer_ip": frappe.local.request_ip
}
})
# get authorizenet profile informatio for stored payments
authorizenet_profile = self.process_data.get("authorizenet_profile");
# use card
# see: https://vcatalano.github.io/py-authorize/transaction.html
if self.card_info != None:
# exp formating for sale/auth api
expiration_date = "{0}/{1}".format(
self.card_info.get("exp_month"),
self.card_info.get("exp_year"))
transaction_data.update({
"credit_card": {
"card_number": self.card_info.get("card_number"),
"expiration_date": expiration_date,
"card_code": self.card_info.get("card_code")
}
})
elif authorizenet_profile:
# if the customer_id isn't provided, then fetch from authnetuser
if not authorizenet_profile.get("customer_id"):
authorizenet_profile["customer_id"] = authnet_user.get("authorizenet_id")
# or stored payment
transaction_data.update({
"customer_id": authorizenet_profile.get("customer_id"),
"payment_id": authorizenet_profile.get("payment_id")
})
# track transaction payment profile ids to return later
authorizenet_data.update({
"customer_id": authorizenet_profile.get("customer_id"),
"payment_id": authorizenet_profile.get("payment_id")
})
else:
raise "Missing Credit Card Information"
name_parts = self.card_info["name_on_card"].split(' ')
first_name = name_parts[0]
last_name = " ".join(name_parts[1:])
# add billing information if available
if len(billing.keys()):
transaction_data["billing"] = billing
transaction_data["billing"]["first_name"] = first_name
transaction_data["billing"]["last_name"] = last_name
if shipping and len(shipping.keys()):
transaction_data["shipping"] = billing
transaction_data["shipping"]["first_name"] = first_name
transaction_data["shipping"]["last_name"] = last_name
# include line items if available
if self.process_data.get("line_items"):
transaction_data["line_items"] = self.process_data.get("line_items")
request.log_action("Requesting Transaction: %s" % \
json.dumps(transaction_data), "Debug")
# performt transaction finally
result = authorize.Transaction.sale(transaction_data)
request.log_action(json.dumps(result), "Debug")
# if all went well, record transaction id
request.transaction_id = result.transaction_response.trans_id
request.status = "Captured"
request.flags.ignore_permissions = 1
except AuthorizeInvalidError as iex:
# log validation errors
request.log_action(frappe.get_traceback(), "Error")
request.status = "Error"
error_msg = ""
errors = []
if iex.children and len(iex.children) > 0:
for field_error in iex.children:
print(field_error.asdict())
for field_name, error in field_error.asdict().iteritems():
errors.append(error)
error_msg = "\n".join(errors)
request.error_msg = error_msg
except AuthorizeResponseError as ex:
# log authorizenet server response errors
result = ex.full_response
request.log_action(json.dumps(result), "Debug")
request.log_action(str(ex), "Error")
request.status = "Error"
request.error_msg = ex.text
redirect_message = str(ex)
if result and hasattr(result, 'transaction_response'):
# if there is extra transaction data, log it
errors = result.transaction_response.errors
request.log_action("\n".join([err.error_text for err in errors]), "Error")
request.log_action(frappe.get_traceback(), "Error")
request.transaction_id = result.transaction_response.trans_id
redirect_message = "Success"
pass
except Exception as ex:
log(frappe.get_traceback())
# any other errors
request.log_action(frappe.get_traceback(), "Error")
request.status = "Error"
request.error_msg = "[UNEXPECTED ERROR]: {0}".format(ex)
pass
# now check if we should store payment information on success
if request.status in ("Captured", "Authorized") and \
self.card_info and \
self.card_info.get("store_payment") and \
contact:
try:
# create customer if authnet_user doesn't exist
if not authnet_user:
request.log_action("Creating AUTHNET customer", "Info")
customer_result = authorize.Customer.from_transaction(request.transaction_id)
request.log_action("Success", "Debug")
authnet_user = frappe.get_doc({
"doctype": "AuthorizeNet Users",
"authorizenet_id": customer_result.customer_id,
"contact": contact.name
})
card_store_info = {
"card_number": self.card_info.get("card_number"),
"expiration_month": self.card_info.get("exp_month"),
"expiration_year": self.card_info.get("exp_year"),
"card_code": self.card_info.get("card_code"),
"billing": self.billing_info
}
request.log_action("Storing Payment Information With AUTHNET", "Info")
request.log_action(json.dumps(card_store_info), "Debug")
try:
card_result = authorize.CreditCard.create(
authnet_user.get("authorizenet_id"), card_store_info)
except AuthorizeResponseError as ex:
card_result = ex.full_response
request.log_action(json.dumps(card_result), "Debug")
request.log_action(str(ex), "Error")
try:
# duplicate payment profile
if card_result["messages"][0]["message"]["code"] == "E00039":
request.log_action("Duplicate payment profile, ignore", "Error")
else:
raise ex
except:
raise ex
request.log_action("Success: %s" % card_result.payment_id, "Debug")
address_short = "{0}, {1} {2}".format(
billing.get("city"),
billing.get("state"),
billing.get("pincode"))
card_label = "{0}{1}".format(
get_card_accronym(self.card_info.get("card_number")), self.card_info.get("card_number")[-4:])
authnet_user.flags.ignore_permissions = 1
authnet_user.append("stored_payments", {
"doctype": "AuthorizeNet Stored Payment",
"short_text": "%s %s" % (card_label,
address_short),
"long_text": "{0}\n{1}\n{2}, {3} {4}\n{5}".format(
card_label,
billing.get("address", ""),
billing.get("city", ""),
billing.get("state", ""),
billing.get("pincode", ""),
frappe.get_value("Country", filters={"name": self.billing_info.get("country")}, fieldname="country_name")
),
"address_1": self.billing_info.get("address_1"),
"address_2": self.billing_info.get("address_2"),
"expires": "{0}-{1}-01".format(
self.card_info.get("exp_year"),
self.card_info.get("exp_month")),
"city": self.billing_info.get("city"),
"state": self.billing_info.get("state"),
"postal_code": self.billing_info.get("pincode"),
"country": frappe.get_value("Country", self.billing_info.get("country"), fieldname="code"),
"payment_type": "Card",
"authorizenet_payment_id": card_result.payment_id
})
authorizenet_data.update({
"customer_id": authnet_user.get("authorizenet_id"),
"payment_id": card_result.payment_id
})
if not data.get("unittest"):
authnet_user.save()
request.log_action("Stored in DB", "Debug")
except Exception as exx:
# any other errors
request.log_action(frappe.get_traceback(), "Error")
raise exx
return request, redirect_to, redirect_message, authorizenet_data
def create_request(self, data):
self.process_data = frappe._dict(data)
# try:
# remove sensitive info from being entered into db
self.card_info = self.process_data.get("card_info")
self.billing_info = self.process_data.get("billing_info")
self.shipping_info = self.process_data.get("shipping_info")
redirect_url = ""
request, redirect_to, redirect_message, authorizenet_data = self.process_payment()
if self.process_data.get('creation'):
del self.process_data['creation']
if self.process_data.get('modified'):
del self.process_data['modified']
if self.process_data.get('log'):
del self.process_data['log']
# sanitize card info
if self.process_data.get("card_info"):
self.process_data.card_info["card_number"] = "%s%s" % ("X" * \
(len(self.process_data.card_info["card_number"]) - 4),
self.process_data["card_info"]["card_number"][-4:])
self.process_data.card_info["card_code"] = "X" * \
len(self.process_data.card_info["card_code"])
if not self.process_data.get("unittest"):
self.integration_request = create_request_log(self.process_data, "Host", self.service_name)
if request.get('status') == "Captured":
status = "Completed"
elif request.get('status') == "Authorized":
status = "Authorized"
else:
status = "Failed"
request.log_action(status, "Info")
# prevents unit test from inserting data on db
if not self.process_data.get("unittest"):
self.integration_request.status = status
self.integration_request.save()
request.save()
custom_redirect_to = None
if status != "Failed":
try:
if not self.process_data.get("unittest"):
custom_redirect_to = frappe.get_doc(
self.process_data.reference_doctype,
self.process_data.reference_docname).run_method("on_payment_authorized",
status)
request.log_action("Custom Redirect To: %s" % custom_redirect_to, "Info")
except Exception as ex:
log(frappe.get_traceback())
request.log_action(frappe.get_traceback(), "Error")
raise ex
if custom_redirect_to:
redirect_to = custom_redirect_to
if request.status == "Captured" or request.status == "Authorized":
redirect_url = "/integrations/payment-success"
redirect_message = "Continue Shopping"
success = True
else:
redirect_url = "/integrations/payment-failed"
if request.error_msg:
redirect_message = "Declined due to:\n" + request.error_msg
else:
redirect_message = "Declined"
success = False
params = []
if redirect_to:
# Fixes issue where system passes a relative url for orders
if redirect_to == "orders":
redirect_to = "/orders"
params.append(urllib.urlencode({"redirect_to": redirect_to}))
if redirect_message:
params.append(urllib.urlencode({"redirect_message": redirect_message}))
if len(params) > 0:
redirect_url += "?" + "&".join(params)
if not self.process_data.get("unittest"):
request.log_action("Redirect To: %s" % redirect_url, "Info")
request.save()
else:
for l in request.log:
print(l.get("level") + "----------------")
print(l.get("log"))
print("")
self.process_data = {}
self.card_info = {}
self.billing_info = {}
self.shipping_info = {}
return {
"redirect_to": redirect_url,
"error": redirect_message if status == "Failed" else None,
"status": status,
"authorizenet_data": authorizenet_data
}
# except Exception:
# frappe.log_error(frappe.get_traceback())
# return{
# "redirect_to": frappe.redirect_to_message(_("Server Error"), _("There was an internal error processing your payment. Please try again later.")),
# "status": 401
# }
@frappe.whitelist(allow_guest=True)
def process(options, request_name=None):
data = {}
# handles string json as well as dict argument
if isinstance(options, basestring):
options = json.loads(options)
# fixes bug where js null value is casted as a string
if request_name == 'null':
request_name = None
if not options.get("unittest"):
if request_name:
request = frappe.get_doc("AuthorizeNet Request", request_name).as_dict()
else:
request = {}
else:
request = {}
data.update(options)
data.update(request)
data = frappe.get_doc("AuthorizeNet Settings").create_request(data)
frappe.db.commit()
return data
@frappe.whitelist()
def get_service_details():
return """
<div>
<p> To obtain the API Login ID and Transaction Key:
<a href="https://support.authorize.net/authkb/index?page=content&id=A405" target="_blank">
https://support.authorize.net/authkb/index?page=content&id=A405
</a>
</p>
<p> Steps to configure Service:</p>
<ol>
<li>
Log into the Merchant Interface at https://account.authorize.net.
</li>
<br>
<li>
Click <strong>Account</strong> from the main toolbar.
</li>
<br>
<li>
Click <strong>Settings</strong> in the main left-side menu.
</li>
<br>
<li>
Click <strong>API Credentials & Keys.</strong>
</li>
<br>
<li>
Enter your <strong>Secret Answer.</strong>
</li>
<br>
<li>
Select <strong>New Transaction Key.</strong>
</li>
<br>
<li>
Input API Credentials in <a href="/desk#Form/AuthorizeNet%20Settings">AuthorizeNet Settings</a>
</li>
<br>
</ol>
<p>
<strong>Note:</strong> When obtaining a new Transaction Key, you may choose to disable the old Transaction Key by clicking the box titled, <strong>Disable Old Transaction Key Immediately</strong>. You may want to do this if you suspect your previous Transaction Key is being used fraudulently.
Click Submit to continue. Your new Transaction Key is displayed.
If the <strong>Disable Old Transaction Key Immediately</strong> box is not checked, the old Transaction Key will automatically expire in 24 hours. When the box is checked, the Transaction Key expires immediately.
</p>
<p>
Be sure to store the Transaction | |
<reponame>TheSin-/terracoin-masternode-tool
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Bertrand256
# Created on: 2017-03
import argparse
import datetime
import json
import os
import re
import copy
from configparser import ConfigParser
from os.path import expanduser
from random import randint
from shutil import copyfile
import logging
import bitcoin
from logging.handlers import RotatingFileHandler
from PyQt5.QtCore import QLocale
from app_utils import encrypt, decrypt
import app_cache as cache
import default_config
import app_utils
from db_intf import DBCache
from wnd_utils import WndUtils
APP_NAME_SHORT = 'TerracoinMasternodeTool'
APP_NAME_LONG = 'Terracoin Masternode Tool'
PROJECT_URL = 'https://github.com/TheSin-/terracoin-masternode-tool'
FEE_SAT_PER_BYTE = 11
MIN_TX_FEE = 2000
APP_CFG_CUR_VERSION = 2 # current version of configuration file format
SCREENSHOT_MODE = False
class HWType:
trezor = 'TREZOR'
keepkey = 'KEEPKEY'
ledger_nano_s = 'LEDGERNANOS'
@staticmethod
def get_desc(hw_type):
if hw_type == HWType.trezor:
return 'Trezor'
elif hw_type == HWType.keepkey:
return 'KeepKey'
elif hw_type == HWType.ledger_nano_s:
return 'Ledger Nano S'
else:
return '???'
class AppConfig(object):
def __init__(self):
self.initialized = False
self.app_path = '' # will be passed in the init method
self.log_level_str = 'WARNING'
self.app_version = ''
QLocale.setDefault(self.get_default_locale())
self.date_format = self.get_default_locale().dateFormat(QLocale.ShortFormat)
self.date_time_format = self.get_default_locale().dateTimeFormat(QLocale.ShortFormat)
# List of Terracoin network configurations. Multiple conn configs advantage is to give the possibility to use
# another config if particular one is not functioning (when using "public" RPC service, it could be node's
# maintanance)
self.terracoin_net_configs = []
# to distribute the load evenly over "public" RPC services, we choose radom connection (from enabled ones)
# if it is set to False, connections will be used accoording to its order in terracoin_net_configs list
self.random_terracoin_net_config = True
# list of all enabled terracoind configurations (TerracoinNetworkConnectionCfg) - they will be used accourding to
# the order in list
self.active_terracoin_net_configs = []
# list of misbehaving terracoin network configurations - they will have the lowest priority during next
# connections
self.defective_net_configs = []
self.hw_type = HWType.trezor # TREZOR, KEEPKEY, LEDGERNANOS
self.hw_keepkey_psw_encoding = 'NFC' # Keepkey passphrase UTF8 chars encoding:
# NFC: compatible with official Keepkey client app
# NFKD: compatible with Trezor
self.block_explorer_tx = 'https://insight.terracoin.io/tx/%TXID%'
self.block_explorer_addr = 'https://insight.terracoin.io/address/%ADDRESS%'
self.terracoin_services_proposal_api = 'https://services.terracoin.io/api/v1/proposal?hash=%HASH%'
self.check_for_updates = True
self.backup_config_file = True
self.read_proposals_external_attributes = True # if True, some additional attributes will be downloaded from
# external sources
self.dont_use_file_dialogs = False
self.confirm_when_voting = True
self.add_random_offset_to_vote_time = True # To avoid identifying one user's masternodes by vote time
self.csv_delimiter =';'
self.masternodes = []
self.last_bip32_base_path = ''
self.bip32_recursive_search = True
self.modified = False
self.cache_dir = ''
self.app_config_file_name = ''
self.log_dir = ''
self.log_file = ''
self.log_level_str = ''
self.db_cache_file_name = ''
self.cfg_backup_dir = ''
self.app_last_version = ''
def init(self, app_path):
""" Initialize configuration after openning the application. """
self.app_path = app_path
try:
with open(os.path.join(app_path, 'version.txt')) as fptr:
lines = fptr.read().splitlines()
self.app_version = app_utils.extract_app_version(lines)
except:
pass
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Path to a configuration file", dest='config')
parser.add_argument('--data-dir', help="Root directory for configuration file, cache and log dubdirs",
dest='data_dir')
args = parser.parse_args()
app_user_dir = ''
if args.data_dir:
if os.path.exists(args.data_dir):
if os.path.isdir(args.data_dir):
app_user_dir = args.data_dir
else:
WndUtils.errorMsg('--data-dir parameter doesn\'t point to a directory. Using the default '
'data directory.')
else:
WndUtils.errorMsg('--data-dir parameter doesn\'t point to an existing directory. Using the default '
'data directory.')
if not app_user_dir:
home_dir = expanduser('~')
app_user_dir = os.path.join(home_dir, APP_NAME_SHORT)
if not os.path.exists(app_user_dir):
os.makedirs(app_user_dir)
self.cache_dir = os.path.join(app_user_dir, 'cache')
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
cache.init(self.cache_dir, self.app_version)
self.app_last_version = cache.get_value('app_version', '', str)
self.app_config_file_name = ''
if args.config is not None:
self.app_config_file_name = args.config
if not os.path.exists(self.app_config_file_name):
msg = 'Config file "%s" does not exist.' % self.app_config_file_name
print(msg)
raise Exception(msg)
if not self.app_config_file_name:
self.app_config_file_name = os.path.join(app_user_dir, 'config.ini')
# setup logging
self.log_dir = os.path.join(app_user_dir, 'logs')
self.log_file = os.path.join(self.log_dir, 'tmt.log')
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.log_level_str = 'INFO'
log_exists = os.path.exists(self.log_file)
handler = RotatingFileHandler(filename=self.log_file, mode='a', backupCount=30)
logger = logging.getLogger()
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s |%(threadName)s |%(filename)s |%(funcName)s '
'|%(message)s', datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(self.log_level_str)
if log_exists:
handler.doRollover()
logging.info('App started')
# database (SQLITE) cache for caching bigger datasets:
self.db_cache_file_name = os.path.join(self.cache_dir, 'tmt_cache.db')
try:
self.db_intf = DBCache(self.db_cache_file_name)
except Exception as e:
logging.exception('SQLite initialization error')
# directory for configuration backups:
self.cfg_backup_dir = os.path.join(app_user_dir, 'backup')
if not os.path.exists(self.cfg_backup_dir):
os.makedirs(self.cfg_backup_dir)
try:
# read configuration from a file
self.read_from_file()
except:
pass
if not self.app_last_version or \
app_utils.version_str_to_number(self.app_last_version) < app_utils.version_str_to_number(self.app_version):
cache.save_data()
self.initialized = True
def start_cache(self):
""" Start cache save thread after GUI initializes. """
cache.start()
def close(self):
cache.finish()
self.db_intf.close()
def copy_from(self, src_config):
self.terracoin_net_configs = copy.deepcopy(src_config.terracoin_net_configs)
self.random_terracoin_net_config = src_config.random_terracoin_net_config
self.hw_type = src_config.hw_type
self.hw_keepkey_psw_encoding = src_config.hw_keepkey_psw_encoding
self.block_explorer_tx = src_config.block_explorer_tx
self.block_explorer_addr = src_config.block_explorer_addr
self.terracoin_services_proposal_api = src_config.terracoin_services_proposal_api
self.check_for_updates = src_config.check_for_updates
self.backup_config_file = src_config.backup_config_file
self.read_proposals_external_attributes = src_config.read_proposals_external_attributes
self.dont_use_file_dialogs = src_config.dont_use_file_dialogs
self.confirm_when_voting = src_config.confirm_when_voting
self.add_random_offset_to_vote_time = src_config.add_random_offset_to_vote_time
self.csv_delimiter = src_config.csv_delimiter
if self.initialized:
# self.set_log_level reconfigures the logger configuration so call this function
# if this object is the main AppConfig object (it's initialized)
self.set_log_level(src_config.log_level_str)
else:
# ... otherwise just copy attribute without reconfiguring logger
self.log_level_str = src_config.log_level_str
def get_default_locale(self):
if SCREENSHOT_MODE:
return QLocale(QLocale.English)
else:
return QLocale.system()
def to_string(self, data):
""" Converts date/datetime or number to string using the current locale. """
if isinstance(data, datetime.datetime):
return self.get_default_locale().toString(data, self.date_time_format)
elif isinstance(data, datetime.date):
return self.get_default_locale().toString(data, self.date_format)
elif isinstance(data, float):
# don't use QT float to number conversion due to weird behavior
dp = self.get_default_locale().decimalPoint()
ret_str = str(data)
if dp != '.':
ret_str.replace('.', dp)
return ret_str
elif isinstance(data, str):
return data
elif isinstance(data, int):
return str(data)
else:
raise Exception('Argument is not a datetime type')
def read_from_file(self):
ini_version = None
was_default_ssh_in_ini_v1 = False
was_default_direct_localhost_in_ini_v1 = False
ini_v1_localhost_rpc_cfg = None
# from v0.9.15 some public nodes changed its names and port numbers to the official HTTPS port number: 443
# correct the configuration
if not self.app_last_version or \
(app_utils.version_str_to_number(self.app_last_version) < app_utils.version_str_to_number('0.9.16')):
correct_public_nodes = True
else:
correct_public_nodes = False
configuration_corrected = False
if os.path.exists(self.app_config_file_name):
config = ConfigParser()
try:
section = 'CONFIG'
config.read(self.app_config_file_name)
ini_version = config.get(section, 'CFG_VERSION', fallback=1) # if CFG_VERSION not set it's old config
log_level_str = config.get(section, 'log_level', fallback='WARNING')
if log_level_str not in ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'):
log_level_str = 'WARNING'
if self.log_level_str != log_level_str:
self.set_log_level(log_level_str)
if ini_version == 1:
# read network config from old file format
terracoind_connect_method = config.get(section, 'terracoind_connect_method', fallback='rpc')
rpc_user = config.get(section, 'rpc_user', fallback='')
rpc_password = config.get(section, 'rpc_password', fallback='')
rpc_ip = config.get(section, 'rpc_ip', fallback='')
rpc_port = config.get(section, 'rpc_port', fallback='8889')
ros_ssh_host = config.get(section, 'ros_ssh_host', fallback='')
ros_ssh_port = config.get(section, 'ros_ssh_port', fallback='22')
ros_ssh_username = config.get(section, 'ros_ssh_username', fallback='')
ros_rpc_bind_ip = config.get(section, 'ros_rpc_bind_ip', fallback='127.0.0.1')
ros_rpc_bind_port = config.get(section, 'ros_rpc_bind_port', fallback='13332')
ros_rpc_username = config.get(section, 'ros_rpc_username', fallback='')
ros_rpc_password = config.get(section, 'ros_rpc_password', fallback='')
# convert terracoin network config from config version 1
if ros_ssh_host and ros_ssh_port and ros_ssh_username and ros_rpc_bind_ip and \
ros_rpc_bind_port and ros_rpc_username and ros_rpc_password:
# import RPC over SSH configuration
cfg = TerracoinNetworkConnectionCfg('rpc')
cfg.enabled = True if terracoind_connect_method == 'rpc_ssh' else False
cfg.host = ros_rpc_bind_ip
cfg.port = ros_rpc_bind_port
cfg.use_ssl = False
cfg.username = ros_rpc_username
cfg.password = <PASSWORD>
cfg.use_ssh_tunnel = True
cfg.ssh_conn_cfg.host = ros_ssh_host
cfg.ssh_conn_cfg.port = ros_ssh_port
cfg.ssh_conn_cfg.username = ros_ssh_username
self.terracoin_net_configs.append(cfg)
was_default_ssh_in_ini_v1 = cfg.enabled
if rpc_user and rpc_password and rpc_ip and rpc_port:
cfg = TerracoinNetworkConnectionCfg('rpc')
cfg.enabled = True if terracoind_connect_method == 'rpc' else False
cfg.host = rpc_ip
cfg.port = rpc_port
cfg.use_ssl = False
cfg.username = rpc_user
cfg.password = <PASSWORD>
cfg.use_ssh_tunnel = False
self.terracoin_net_configs.append(cfg)
was_default_direct_localhost_in_ini_v1 = cfg.enabled and cfg.host == '127.0.0.1'
ini_v1_localhost_rpc_cfg = cfg
if correct_public_nodes:
if cfg.host.lower() == 'alice.dash-dmt.eu':
cfg.host = 'alice.dash-masternode-tool.org'
cfg.port = '443'
configuration_corrected = True
elif cfg.host.lower() == 'luna.dash-dmt.eu':
cfg.host = 'luna.dash-masternode-tool.org'
cfg.port = '443'
configuration_corrected = True
self.last_bip32_base_path = config.get(section, 'bip32_base_path', fallback="44'/83'/0'/0/0")
if not self.last_bip32_base_path:
self.last_bip32_base_path = "44'/83'/0'/0/0"
self.bip32_recursive_search = config.getboolean(section, 'bip32_recursive', fallback=True)
self.hw_type = config.get(section, 'hw_type', fallback=HWType.trezor)
if self.hw_type not in (HWType.trezor, HWType.keepkey, HWType.ledger_nano_s):
logging.warning('Invalid hardware wallet type: ' + self.hw_type)
self.hw_type = HWType.trezor
self.hw_keepkey_psw_encoding = config.get(section, 'hw_keepkey_psw_encoding', fallback='NFC')
if self.hw_keepkey_psw_encoding not in ('NFC', 'NFKD'):
logging.warning('Invalid value of the hw_keepkey_psw_encoding config option: ' +
self.hw_keepkey_psw_encoding)
self.hw_keepkey_psw_encoding = 'NFC'
self.random_terracoin_net_config = self.value_to_bool(config.get(section, 'random_terracoin_net_config',
fallback='1'))
self.check_for_updates = self.value_to_bool(config.get(section, 'check_for_updates', fallback='1'))
self.backup_config_file = self.value_to_bool(config.get(section, 'backup_config_file', fallback='1'))
self.read_proposals_external_attributes = \
self.value_to_bool(config.get(section, 'read_external_proposal_attributes', fallback='1'))
self.dont_use_file_dialogs = self.value_to_bool(config.get(section, 'dont_use_file_dialogs',
fallback='0'))
self.confirm_when_voting = self.value_to_bool(config.get(section, 'confirm_when_voting',
fallback='1'))
self.add_random_offset_to_vote_time = \
self.value_to_bool(config.get(section, 'add_random_offset_to_vote_time', fallback='1'))
for section in config.sections():
if re.match('MN\d', section):
mn = MasterNodeConfig()
mn.name = config.get(section, 'name', fallback='')
mn.ip = config.get(section, 'ip', fallback='')
mn.port = config.get(section, 'port', fallback='')
mn.privateKey = config.get(section, 'private_key', fallback='')
mn.collateralBip32Path = config.get(section, 'collateral_bip32_path', fallback='')
| |
are bearings without a z location
for b in cycle(self.bearing_elements):
if bearings_no_zloc:
if b in bearings_no_zloc:
# first check if b.n is on list, if not, check for n_link
node_l = df.loc[(df.n_l == b.n) & (df.tag != b.tag), "nodes_pos_l"]
node_r = df.loc[(df.n_r == b.n) & (df.tag != b.tag), "nodes_pos_r"]
if len(node_l) == 0 and len(node_r) == 0:
node_l = df.loc[
(df.n_link == b.n) & (df.tag != b.tag), "nodes_pos_l"
]
node_r = node_l
if len(node_l):
df.loc[df.tag == b.tag, "nodes_pos_l"] = node_l.values[0]
df.loc[df.tag == b.tag, "nodes_pos_r"] = node_l.values[0]
bearings_no_zloc.discard(b)
elif len(node_r):
df.loc[df.tag == b.tag, "nodes_pos_l"] = node_r.values[0]
df.loc[df.tag == b.tag, "nodes_pos_r"] = node_r.values[0]
bearings_no_zloc.discard(b)
else:
break
dfb = df[
(df.type == "BearingElement")
| (df.type == "BearingElement6DoF")
| (df.type == "SealElement")
]
z_positions = [pos for pos in dfb["nodes_pos_l"]]
z_positions = list(dict.fromkeys(z_positions))
for z_pos in z_positions:
dfb_z_pos = dfb[dfb.nodes_pos_l == z_pos]
dfb_z_pos = dfb_z_pos.sort_values(by="n_l")
if z_pos == df_shaft["nodes_pos_l"].iloc[0]:
y_pos = (
max(
df_shaft["odl"][
df_shaft.n_l == int(dfb_z_pos.iloc[0]["n_l"])
].values
)
/ 2
)
elif z_pos == df_shaft["nodes_pos_r"].iloc[-1]:
y_pos = (
max(
df_shaft["odr"][
df_shaft.n_r == int(dfb_z_pos.iloc[0]["n_r"])
].values
)
/ 2
)
else:
y_pos = (
max(
[
max(
df_shaft["odl"][
df_shaft._n == int(dfb_z_pos.iloc[0]["n_l"])
].values
),
max(
df_shaft["odr"][
df_shaft._n == int(dfb_z_pos.iloc[0]["n_l"]) - 1
].values
),
]
)
/ 2
)
mean_od = np.mean(nodes_o_d)
scale_size = dfb["scale_factor"] * mean_od
y_pos_sup = y_pos + 2 * scale_size
for t in dfb_z_pos.tag:
df.loc[df.tag == t, "y_pos"] = y_pos
df.loc[df.tag == t, "y_pos_sup"] = y_pos_sup
y_pos += 2 * mean_od * df["scale_factor"][df.tag == t].values[0]
y_pos_sup += 2 * mean_od * df["scale_factor"][df.tag == t].values[0]
# define position for point mass elements
dfb = df[
(df.type == "BearingElement")
| (df.type == "BearingElement6DoF")
| (df.type == "SealElement")
]
for p in point_mass_elements:
z_pos = dfb[dfb.n_l == p.n]["nodes_pos_l"].values[0]
y_pos = dfb[dfb.n_l == p.n]["y_pos"].values[0]
df.loc[df.tag == p.tag, "nodes_pos_l"] = z_pos
df.loc[df.tag == p.tag, "nodes_pos_r"] = z_pos
df.loc[df.tag == p.tag, "y_pos"] = y_pos
self.df = df
def _check_number_dof(self):
"""Verify the consistency of degrees of freedom.
This method loops for all the elements, checking if the number of degrees of
freedom is consistent.
E.g.: inputting 2 shaft elements, one with 4 dof and one with 6, will raise
an error.
Raises
------
Exception
Error pointing out difference between the number of DoF's from each element
type.
Returns
-------
number_dof : int
Number of degrees of freedom from the adopted shaft element.
"""
number_dof = len(self.shaft_elements[0].dof_mapping()) / 2
if any(len(sh.dof_mapping()) != number_dof * 2 for sh in self.shaft_elements):
raise Exception(
"The number of degrees o freedom of all elements must be the same! There are SHAFT elements with discrepant DoFs."
)
if any(len(disk.dof_mapping()) != number_dof for disk in self.disk_elements):
raise Exception(
"The number of degrees o freedom of all elements must be the same! There are DISK elements with discrepant DoFs."
)
if any(
len(brg.dof_mapping()) != number_dof / 2 for brg in self.bearing_elements
):
raise Exception(
"The number of degrees o freedom of all elements must be the same! There are BEARING elements with discrepant DoFs."
)
return int(number_dof)
def __eq__(self, other):
"""Equality method for comparasions.
Parameters
----------
other : obj
parameter for comparasion
Returns
-------
True if other is equal to the reference parameter.
False if not.
"""
if self.elements == other.elements and self.parameters == other.parameters:
return True
else:
return False
def run_modal(self, speed, num_modes=12, sparse=True):
"""Run modal analysis.
Method to calculate eigenvalues and eigvectors for a given rotor system.
Tthe natural frequencies and dampings ratios are calculated for a given
rotor speed. It means that for each speed input there's a different set of
eigenvalues and eigenvectors, hence, different natural frequencies and damping
ratios are returned.
Available plotting methods:
.plot_mode_2d()
.plot_mode_3d()
Parameters
----------
speed : float
Speed at which the eigenvalues and eigenvectors will be calculated.
num_modes : int, optional
The number of eigenvalues and eigenvectors to be calculated using ARPACK.
If sparse=True, it determines the number of eigenvalues and eigenvectors
to be calculated. It must be smaller than Rotor.ndof - 1. It is not
possible to compute all eigenvectors of a matrix with ARPACK.
If sparse=False, num_modes does not have any effect over the method.
Default is 12.
sparse : bool, optional
If True, ARPACK is used to calculate a desired number (according to
num_modes) or eigenvalues and eigenvectors.
If False, scipy.linalg.eig() is used to calculate all the eigenvalues and
eigenvectors.
Default is True.
Returns
-------
evalues : array
Eigenvalues array
evectors : array
Eigenvectors array
wn : array
Undamped natural frequencies array
wd : array
Damped natural frequencies array
log_dec : array
Logarithmic decrement array
Example
-------
>>> import ross as rs
>>> rotor = rs.rotor_example()
>>> modal = rotor.run_modal(speed=0, sparse=False)
>>> modal.wn[:2]
array([91.79655318, 96.28899977])
>>> modal.wd[:2]
array([91.79655318, 96.28899977])
Plotting 3D mode shape
>>> mode1 = 0 # First mode
>>> fig = modal.plot_mode_3d(mode1)
Plotting 2D mode shape
>>> mode2 = 1 # Second mode
>>> fig = modal.plot_mode_2d(mode2)
"""
evalues, evectors = self._eigen(speed, num_modes=num_modes, sparse=sparse)
wn_len = num_modes // 2
wn = (np.absolute(evalues))[:wn_len]
wd = (np.imag(evalues))[:wn_len]
damping_ratio = (-np.real(evalues) / np.absolute(evalues))[:wn_len]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
log_dec = 2 * np.pi * damping_ratio / np.sqrt(1 - damping_ratio ** 2)
modal_results = ModalResults(
speed,
evalues,
evectors,
wn,
wd,
damping_ratio,
log_dec,
self.ndof,
self.nodes,
self.nodes_pos,
self.shaft_elements_length,
)
return modal_results
def run_critical_speed(self, speed_range=None, num_modes=12, rtol=0.005):
"""Calculate the critical speeds and damping ratios for the rotor model.
This function runs an iterative method over "run_modal()" to minimize
(using scipy.optimize.newton) the error between the rotor speed and the rotor
critical speeds (rotor speed - critical speed).
Differently from run_modal(), this function doesn't take a speed input because
it iterates over the natural frequencies calculated in the last iteration.
The initial value is considered to be the undamped natural frequecies for
speed = 0 (no gyroscopic effect).
Once the error is within an acceptable range defined by "rtol", it returns the
approximated critical speed.
With the critical speeds calculated, the function uses the results to
calculate the log dec and damping ratios for each critical speed.
Parameters
----------
speed_range : tuple
Tuple (start, end) with the desired range of frequencies (rad/s).
The function returns all eigenvalues within this range.
num_modes : int, optional
The number of eigenvalues and eigenvectors to be calculated using ARPACK.
If sparse=True, it determines the number of eigenvalues and eigenvectors
to be calculated. It must be smaller than Rotor.ndof - 1. It is not
possible to compute all eigenvectors of a matrix with ARPACK.
If speed_range is not None, num_modes is overrided.
Default is 12.
rtol : float, optional
Tolerance (relative) for termination. Applied to scipy.optimize.newton.
Default is 0.005 (0.5%).
Returns
-------
CriticalSpeedResults : An instance of CriticalSpeedResults class, which is
used to post-process results. Attributes stored:
CriticalSpeedResults.wn() : undamped critical speeds.
CriticalSpeedResults.wd(): damped critical speeds.
CriticalSpeedResults.log_dec : log_dec for each critical speed.
CriticalSpeedResults.damping_ratio : damping ratio for each critical speed.
CriticalSpeedResults.whirl_direction : whirl dir. for each critical speed.
Examples
--------
>>> import ross as rs
>>> rotor = rs.rotor_example()
Finding the first Nth critical speeds
>>> results = rotor.run_critical_speed(num_modes=8)
>>> np.round(results.wd())
array([ 92., 96., 271., 300.])
>>> np.round(results.wn())
array([ 92., 96., 271., 300.])
Finding the first critical speeds within a speed range
>>> results = rotor.run_critical_speed(speed_range=(100, 1000))
>>> np.round(results.wd())
array([271., 300., 636., 867.])
Changing output units
>>> np.round(results.wd("rpm"))
array([2590., 2868., 6074., 8278.])
Retrieving whirl directions
>>> results.whirl_direction # doctest: +ELLIPSIS
array([...
"""
num_modes = (self.ndof - 4) * 2 if speed_range is not None else num_modes
modal = self.run_modal(0, num_modes)
_wn = modal.wn
_wd = modal.wd
wn = np.zeros_like(_wn)
wd = np.zeros_like(_wd)
for i in range(len(wn)):
wn_func = lambda s: (s - self.run_modal(s, num_modes).wn[i])
wn[i] = newton(func=wn_func, x0=_wn[i], rtol=rtol)
for i in range(len(wd)):
wd_func = lambda s: (s - self.run_modal(s, num_modes).wd[i])
wd[i] = newton(func=wd_func, x0=_wd[i], rtol=rtol)
log_dec = np.zeros_like(wn)
damping_ratio = np.zeros_like(wn)
whirl_direction = list(np.zeros_like(wn))
for i, s in enumerate(wd):
modal = self.run_modal(s, num_modes)
log_dec[i] = modal.log_dec[i]
damping_ratio[i] = | |
import tkinter as tk
from tkinter import ttk
from matplotlib.pyplot import close
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg,
NavigationToolbar2Tk)
from matplotlib.mathtext import math_to_image
from io import BytesIO
from PIL import ImageTk, Image
from sympy import latex
from math import pi, cos, sin
from sgraph import *
from braid import *
from col_perm import *
from pres_mat import *
from visualization import *
from casson_gordon import *
from typing import List, Tuple, Callable, Dict
from math import log10, floor
font_style = "Calibri"
font_size = 25
# Function for rounding eigenvalues
def round_to_2(x: float):
if(x==0):
return 0
else:
return round(x, -int(floor(log10(abs(x))))+1)
# Class for main window
class Clasper(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
# Configure the grid
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=1)
self.grid_columnconfigure(2, weight=1)
self.grid_columnconfigure(3, weight=1)
# Configure counter/control variables
self.braid_inv_control = ""
self.braid_seif_control = ""
self.computed_invariants = False
self.computed_seif = False
# Configure input variables
self.braid_str = tk.StringVar()
self.complete_graph = tk.IntVar(value=0)
# Configure invariant variables
self.cpf = 0
self.alexander = 0
self.signature_value = 0
self.seif = ""
self.pm = 0
# Configure frames for checking the braid
self.braid_check = tk.Frame(self)
self.cycle_decomp_frame = tk.Frame(self)
self.euler_char_frame = tk.Frame(self)
self.euler_char_frame.grid(column=2, row=3, pady=10, sticky='W')
self.euler_char_frame.grid_columnconfigure(0, weight=3)
self.euler_char_frame.grid_columnconfigure(0, weight=1)
self.euler_char_frame.euler_char_val = tk.Frame(self.euler_char_frame)
# Configure frames for everything
self.strands = Strands(self)
self.strands.grid(
column=0, row=4, pady=10, rowspan=6, sticky='N')
self.color = Color(self)
self.color.grid(
column=1, row=4, pady=10, rowspan=6, sticky='N')
self.signature = Signature(self)
self.signature.grid(
column=2, row=4, pady=10, rowspan=6, sticky='N')
self.braid_visual = tk.Frame(self)
self.braid_visual.grid(
column=0, row=14, pady=10, columnspan=4, sticky='N')
self.ccomplex_visual = tk.Frame(self)
self.ccomplex_visual.grid(
column=0, row=15, pady=10, columnspan=4, sticky='N')
self.invariant_frame = tk.Frame(self)
self.invariant_frame.grid(column=0, row=11,
columnspan=4, rowspan=3)
"""
----- Implementing the GUI ----
"""
# (0, 0) Instructions for entering braids
ttk.Label(
self, text='''Braids - LinkInfo format or comma/space '''+
'''separated. Colors and signature inputs - space separated.\n'''+
'''Press enter to compute invariants with defaults.'''
''' See paper for details about the C-Complex.\n'''+
'''Written by <NAME>.''',
font=(font_style, font_size), background='cyan').grid(
column=0, row=0, columnspan=4)
# (0, 0->1) Setting up the entry for the braid
ttk.Label(
self, text='Braid:', font=(font_style, font_size)).grid(
column=0, row=1, pady=10)
ttk.Entry(self, textvariable=self.braid_str,
font=(font_style, font_size), width=40).grid(column=1, row=1,
padx=0, pady=10, sticky='W', columnspan=2)
# (1, 2) Examples for braid entries
ttk.Label(
self, text="""Example: '-2 -3 2 -3 -1 -2 -3'"""+
""" or '-2, -3, 2, -3, -1, -2, -3' or """+
"""'{4, {-2, -3, 2, -3, -1, -2, -3}}'""",
font=(font_style, font_size), background='cyan').grid(
column=1, row=2, pady=10, sticky='W', columnspan=3)
# Creating a style object
style = ttk.Style()
# Adding style for buttons
style.configure('C.TButton', font=('calibri', font_size),
background='blue')
# Adding style for radiobuttons
style.configure('C.TRadiobutton', font=('calibri', font_size))
# Adding style for checkbuttons
style.configure('C.TCheckbutton', font=('calibri', font_size))
ttk.Checkbutton(self, text="All Seifert surfaces intersecting",
style='C.TCheckbutton',
variable=self.complete_graph).grid(column=2, row=1,
padx=30, pady=10, sticky='W')
# Setup for printing the cycle decomposition
ttk.Button(self, text="Cycle Decomposition", command=self.compute_cyc,
style='C.TButton').grid(column=0, row=3, pady=10)
# Setup for printing the Euler Characteristic of the C-Complex
ttk.Button(self.euler_char_frame, text="Euler Characteristic of C-Complex",
command=self.get_sgraph_euler_char,
style='C.TButton').grid(column=0, row=0, pady=10, sticky='W')
# Button to compute invariants
ttk.Button(self, text="Compute link invariants",
command=self.get_invariants, style='C.TButton').grid(
column=0, row=10, pady=10)
ttk.Button(self, text="Invariants in LaTeX",
command=self.get_latex, style='C.TButton').grid(
column=1, row=10, pady=10)
ttk.Button(self, text="Export Seifert matrices",
command=self.get_seifert_matrices, style='C.TButton').grid(
column=2, row=10, pady=10)
# Compute invariants with defaults
def compute_with_defaults(self, int: int):
self.strands.strand_choice.set(1)
self.color.color_choice.set(2)
self.signature.signature_choice.set(1)
self.get_invariants()
# Processing Link Info style inputs
def link_info(self, braid: str) -> Braid:
start = braid.index('{')+1
strands = int(braid[start])
new_braid = braid[start:]
braid1 = new_braid[
new_braid.index('{')+1: new_braid.index('}')].split(',')
braid1 = list(filter(lambda x: x.strip()!="", braid1))
braid1 = list(map(lambda x: int(x), braid1))
return Braid(braid1, strands)
# Processing comma separated inputs
def csv_input(self, braid: str) -> List[int]:
braid1 = braid.strip().split(",")
braid1 = list(filter(lambda x: x.strip()!="", braid1))
braid1 = [int(x) for x in braid1]
return braid1
# Processing space separated inputs
def space_input(self, braid: str) -> List[int]:
braid1 = braid.strip().split(" ")
braid1 = list(filter(lambda x: x.strip()!="", braid1))
braid1 = [int(x) for x in braid1]
return braid1
# Command for computing the cycle decomposition and generating the braid
def compute_cyc(self) -> Braid:
self.cycle_decomp_frame.destroy()
self.cycle_decomp_frame = tk.Frame(self)
self.cycle_decomp_frame.grid(
column=1, row=3, pady=10, sticky='W')
p_braid = self.strands.make_braid()
ttk.Label(self.cycle_decomp_frame, text=str(p_braid.cycle_decomp),
font=(font_style, font_size)).pack()
# Command for computing the cycle decomposition and generating the braid
def get_sgraph_euler_char(self) -> Braid:
self.euler_char_frame.euler_char_val.destroy()
self.euler_char_frame.euler_char_val = tk.Frame(self.euler_char_frame)
self.euler_char_frame.euler_char_val.grid(
column=1, row=0, padx=20, pady=10, sticky='E')
try:
graph = self.color.get_graph()
ttk.Label(self.euler_char_frame.euler_char_val,
text="= "+str(graph.sgraph_euler_char()),
font=(font_style, font_size)).pack()
except Exception:
pass
# Print latex
def get_latex(self):
new_window = tk.Toplevel(self)
try:
if((self.braid_inv_control.strip() == self.braid_str.get().strip())
and self.computed_invariants):
pass
else:
graph = self.color.get_graph()
# Print the Euler characteristic of the SGraph
self.get_sgraph_euler_char()
if(self.braid_seif_control.strip() !=
self.braid_str.get().strip()):
(self.seif, self.pm) = presentation_matrix(graph)
self.cpf = self.pm.conway_potential_function(graph)
self.alexander = self.pm.multivar_alexander_poly(graph)
self.computed_invariants = True
self.computed_seif = True
self.braid_inv_control = self.braid_str.get()
self.braid_seif_control = self.braid_str.get()
cpf_text = tk.Text(new_window, font=(font_style, font_size))
cpf_text.insert(1.0, "Conway Potential Function:\n"+
latex(self.cpf))
cpf_text.pack()
cpf_text.configure(state="disabled")
multi_var_alexander = tk.Text(
new_window, font=(font_style, font_size))
multi_var_alexander.insert(1.0,
"Mutivariable Alexander Polynomial:\n"+
latex(self.alexander))
multi_var_alexander.pack()
multi_var_alexander.configure(state="disabled")
# if tkinter is 8.5 or above you'll want the selection background
# to appear like it does when the widget is activated
# comment this out for older versions of Tkinter
cpf.configure(inactiveselectbackground=cpf.cget(
"selectbackground"))
multi_var_alexander.configure(
inactiveselectbackground=cpf.cget("selectbackground"))
except ValueError:
pass
# Save the seifert matrices to a file
def get_seifert_matrices(self):
if((self.braid_seif_control.strip() == self.braid_str.get().strip())
and self.computed_invariants):
pass
else:
graph = self.color.get_graph()
# Print the Euler characteristic of the SGraph
self.get_sgraph_euler_char()
(self.seif, self.pm) = presentation_matrix(graph)
file_name = tk.filedialog.asksaveasfilename()
self.invariant_frame.destroy()
self.invariant_frame = Inv(self)
self.invariant_frame.grid(column=0, row=11,
columnspan=4, rowspan=3)
p = self.strands.make_braid()
graph = self.invariant_frame.graph
if(file_name):
if("." not in file_name):
file_name += ".txt"
f = open(file_name, 'w+')
f.write("Braid: "+str(p.braid_wrong))
f.write("\nStrands: "+str(p.strands)+"\n\n")
f.write(self.seif)
f.close()
# Command for computing and displaying invariants
def get_invariants(self):
self.invariant_frame.destroy()
self.view_braid()
self.view_c_complex()
self.invariant_frame = Inv(self)
self.invariant_frame.grid(column=0, row=11,
columnspan=4, rowspan=3)
# Command to view the braid
def view_braid(self):
try:
close(self.braid_fig)
except Exception:
pass
self.braid_visual.destroy()
self.braid_visual = tk.Frame(self)
self.braid_visual.grid(
column=0, row=14, pady=10, columnspan=4)
self.braid_fig = visualize_braid(self.color.get_col_braid())
# creating the Tkinter canvas
# containing the Matplotlib figure
canvas = FigureCanvasTkAgg(self.braid_fig, master=self.braid_visual)
canvas.draw()
# placing the canvas on the Tkinter window
canvas.get_tk_widget().pack()
# Command to view the C-Complex
def view_c_complex(self):
try:
close(self.ccomplex_fig)
except Exception:
pass
self.ccomplex_visual.destroy()
self.ccomplex_visual = tk.Frame(self)
self.ccomplex_visual.grid(
column=0, row=15, pady=10, columnspan=4)
self.ccomplex_fig = visualize_clasp_complex(self.color.get_graph())
# creating the Tkinter canvas
# containing the Matplotlib figure
canvas = FigureCanvasTkAgg(self.ccomplex_fig,
master=self.ccomplex_visual)
canvas.draw()
# placing the canvas on the Tkinter window
canvas.get_tk_widget().pack()
# Class for invariants
class Inv(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
# Configure the grid
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=1)
self.grid_columnconfigure(2, weight=1)
self.grid_columnconfigure(3, weight=1)
try:
graph = parent.color.get_graph()
self.graph = graph
except ValueError:
pass
omega = parent.signature.get_omega()
# Print the Euler characteristic of the SGraph
self.parent.get_sgraph_euler_char()
if((self.parent.braid_inv_control.strip() ==
self.parent.braid_str.get().strip())
and self.parent.computed_invariants):
pass
else:
graph = self.parent.color.get_graph()
# Print the Euler characteristic of the SGraph
self.parent.get_sgraph_euler_char()
if(self.parent.braid_seif_control.strip() !=
self.parent.braid_str.get().strip()):
(self.parent.seif, self.parent.pm) = presentation_matrix(graph)
self.parent.cpf = self.parent.pm.conway_potential_function(graph)
self.parent.alexander = \
self.parent.pm.multivar_alexander_poly(graph)
self.parent.computed_invariants = True
self.parent.computed_seif = True
self.parent.braid_inv_control = self.parent.braid_str.get()
self.parent.braid_seif_control = self.parent.braid_str.get()
ttk.Label(self, text='Conway Potential Function:',
font=(font_style, font_size)).grid(
column=0, row=0, pady=10)
self.make_latex_label(latex(self.parent.cpf),
column=1, row=0, y_pad=10, sticky='W',
columnspan=3, rowspan=1, size=(2000, 100))
ttk.Label(self, text='Multivariable Alexander Polynomial:',
font=(font_style, font_size)).grid(
column=0, row=1, pady=10)
self.make_latex_label(latex(self.parent.alexander),
column=1, row=1, y_pad=10, sticky='W',
columnspan=3, rowspan=1, size=(2000, 50))
ttk.Label(self, text='Cimasoni-Florens Signature:',
font=(font_style, font_size)).grid(
column=0, row=2, pady=15)
signat = self.parent.pm.signature(omega)
ttk.Label(self, text=str(signat[0]), font=(font_style, 30)).grid(
column=1, row=2, pady=15, sticky='W')
eig_val_str = str([round_to_2(x) for x in signat[1]])[1:-1]
eig_val = "(Eigenvalues: "+eig_val_str+")"
ttk.Label(self, text=str(eig_val), font=(font_style, 25)).grid(
column=2, row=2, columnspan=2, padx=10, pady=15, sticky='W')
# Renders latex as a label and places it on the grid
def make_latex_label(self, latex_string: str, column: int,
row: int, y_pad: int, sticky: str, columnspan: int, rowspan: int,
size = Tuple[int, int]):
# Creating buffer for storing image in memory
buffer = BytesIO()
# Writing png image with our rendered latex text to buffer
math_to_image("$" + latex_string + "$",
buffer, dpi=1000, format='png')
# Remoting buffer to 0, so that we can read from it
buffer.seek(0)
# Creating Pillow image object from it
pimage= Image.open(buffer)
pimage.thumbnail(size)
# Creating PhotoImage object from Pillow image object
image = ImageTk.PhotoImage(pimage)
# Creating label with our image
label = ttk.Label(self, image=image)
# Storing reference to our image object so it's not garbage collected,
# since TkInter doesn't store references by itself
label.img = image
label.grid(column=column, row=row, pady=y_pad, sticky=sticky,
columnspan=columnspan, rowspan=rowspan)
buffer.flush()
# Class for strand inputs
class Strands(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
| |
"""
<h2>Sobolev Alignment</h2>
@author: <NAME>
Example
-------
::
from anndata import AnnData
import numpy as np
import pandas as pd
from sobolev_alignment import SobolevAlignment
# Generate data
n_source = 100
n_target = 200
n_features = 500
X_source = np.random.normal(size=(n_source, n_features))
X_source = np.exp(X_source + np.random.randint(3,10,n_features)).astype(int)
X_source = AnnData(
X_source,
obs=pd.DataFrame(np.random.choice(['A','B'], n_source).astype(str), columns=['pool'])
)
X_target = np.random.normal(size=(n_target, n_features))
X_target = np.exp(X_target + np.random.randint(3,10,n_features)).astype(int)
X_target = AnnData(
X_target,
obs=pd.DataFrame(np.random.choice(['A','B'], n_target).astype(str), columns=['pool'])
)
# Create a Sobolev Alignemnt instance
sobolev_alignment_clf = SobolevAlignment(
source_scvi_params={'train': {'early_stopping': True}, 'model': {}, 'plan': {}},
target_scvi_params={'train': {'early_stopping': True}, 'model': {}, 'plan': {}},
n_jobs=2
)
# Compute consensus features
sobolev_alignment_clf.fit(
X_source, X_target,
source_batch_name='pool', target_batch_name='pool'
)
::
Notes
-------
-
References
-------
[1] Mourragui et al 2022
[2] Lopez et al, Deep generative modeling for single-cell transcriptomics, Nature Methods, 2018.
[3] Meanti et al, Kernel methods through the roof: handling billions of points efficiently,
NeurIPS, 2020.
"""
import os, sys, gc, scipy, torch, scvi, logging
import numpy as np
import pandas as pd
import seaborn as sns
from pickle import load, dump
from copy import deepcopy
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from anndata import AnnData
from .generate_artificial_sample import parallel_generate_samples
from .krr_approx import KRRApprox
from .kernel_operations import mat_inv_sqrt
from .feature_analysis import higher_order_contribution, _compute_offset
from .multi_krr_approx import MultiKRRApprox
from .interpolated_features import compute_optimal_tau, project_on_interpolate_PV
# Default library size used when re-scaling artificial data
DEFAULT_LIB_SIZE = 10**3
class SobolevAlignment:
"""
"""
default_scvi_params = {
'model': {},
'plan': {},
'train': {}
}
def __init__(
self,
source_scvi_params: dict = None,
target_scvi_params: dict = None,
source_krr_params: dict = None,
target_krr_params: dict = None,
n_jobs=1
):
"""
Parameters
----------
source_scvi_params
Dictionary with scvi params for the source dataset. Must have three keys, each assigned to a dictionary
of params: model, plan and train.
target_scvi_params
Dictionary with scvi params for the target dataset. Must have three keys, each assigned to a dictionary
of params: model, plan and train.
"""
# scVI params
self.scvi_params = {
'source': source_scvi_params if source_scvi_params is not None else self.default_scvi_params,
'target': target_scvi_params if target_scvi_params is not None else self.default_scvi_params
}
# KRR params
self.krr_params = {
'source': source_krr_params if source_krr_params is not None else {'method': 'falkon'},
'target': target_krr_params if target_krr_params is not None else {'method': 'falkon'}
}
self._check_same_kernel() # Check whether source and target have the same kernel
self.scaler_ = {}
# Create scVI models
self.n_jobs = n_jobs
# Initialize some values
self._frob_norm_param = None
def fit(
self,
X_source: AnnData,
X_target: AnnData,
source_batch_name: str = None,
target_batch_name: str = None,
continuous_covariate_names: list = None,
n_artificial_samples: int = int(10e5),
fit_vae: bool = True,
krr_approx: bool=True,
sample_artificial: bool=True,
n_samples_per_sample_batch: int=10**6,
frac_save_artificial: float = 0.1,
save_mmap: str = None,
log_input: bool = True,
n_krr_clfs: int = 1,
no_posterior_collapse = True,
mean_center: bool = False,
unit_std: bool = False,
frob_norm_source: bool = False,
lib_size_norm: bool = False
):
"""
Runs the complete Sobolev Alignment workflow between a source (e.g. cell line) and a target (e.g. tumor) dataset.
<br/>
Source and target data should be passed as AnnData and potential batch names (source_batch_name, target_batch_name)
should be part of the "obs" element of X_source and X_target.
<br/>
We allow the user to set all possible parameters from this API. However, the following choices were made in the manuscript
presenting Sobolev Alignment (ref [1]):
<ul>
<li> source_batch_name and target_batch_name set to the experimental batch of cell lines and tumors respectively, allowing
to use native scVI batch-effect correction within cell lines and tumors.
<li> continuous_covariate_names set to None.
<li> n_artificial_samples set to 10e7. In absence of large computing resource, a lower number (e.g. 10e6) could be used.
<li> fit_vae, krr_approx and sample_artificial are set to True (running the complete pipeline) but playing with these
three parameters allow to test different combination (e.g. kernel, penalization, number of artificial samples, ...)
<li> n_samples_per_sample_batch set to 10e6. When not used on GPU, we recommend using 5*10e5.
<li> save_mmap is set to /tmp/. This allows to save the model points in memory.
<li> log_input and no_posterior_collapse are set to True.
<li> All other parameters are False and n_krr_clfs is 1.
</ul>
Parameters
----------
X_source: AnnData
Source data.
X_target: AnnData
Target data.
source_batch_name: str, default to None
Name of the batch to use in scVI for batch-effect correction. If None, no batch-effect correction
performed at the source-level.
target_batch_name: str, default to None
Name of the batch to use in scVI for batch-effect correction. If None, no batch-effect correction
performed at the target-level.
continuous_covariate_names: str, default to None
Name of continuous covariate to use in scVI training. Will be used for both source and target.
n_artificial_samples: int, default to 10e5
Number of points to sample in both source and target scVI models in approximation. This corresponds to
the number of "model points" used in the Kernel Ridge Regression step of source and target.
fit_vae: bool, default to True
Whether a scVI model (VAE) should be trained. If pre-trained VAEs are available, setting the "scvi_models_"
to these models and using fit_vae=False would allow to directly use these models.
krr_approx: bool, default to True
Whether the KRR approximation should be performed for source and target scVI models.
sample_artificial: bool, default to True
Whether model points should be sampled. In the case when artificial samples have already been sampled and
saved, setting sample_artificial=False allows to use these points without need for re-sampling.
n_samples_per_sample_batch: int, default to 10e6
Number of samples per batch for sampling model points. This parameter does not affect the end-result, but
can be used to alleviate memory issues in case of large n_artificial_samples.
frac_save_artificial: float, default to 0.1
Proportion of model points (artificial samples) to keep in memory. In case when several KRR models are trained
this must be set to 1.
<br/> Setting frac_save_artificial to 0.1 allows to compute the KRR approximation training error after the
complete alignment.
save_mmap: str, default to None
Folder on disk to use for saving the model points (artificial data). This allows to limit the memory usage and
therefore use larger KRR training data. If None, then artificial samples are kept in memory.
<br/> This parameter does not affect the final prediction, simply the memory footprint.
log_input: bool, default to True
Whether model points (artificial samples) are log-transformed before being given as input to KRR. Log-transform
usually increases approximation performance.
n_krr_clfs: int, default to 1
(Prototype) Number of KRR models to use. If larger than 1, the models prediction will be averaged. Experiments show
no improvements when using more than one classifier.
no_posterior_collapse: bool, default to True
Whether posterior collapse should be avoided. If True, then scVI model is re-trained until no hidden neuron is
collapsed. Every five iteration, one hidden neuron gets removed.
mean_center: bool, default to False
Whether model points (artificial samples) should be mean-centered before KRR.
unit_std: bool, default to False
Whether model points (artificial samples) should be standardized before KRR.
frob_norm_source: bool, default to False
In case when source and target data have a vastly different scale, frob_norm_source=True would correct the
target model points (artificial samples) to have a median sample-wise Frobenius norm equal to the median
sample-wise Frobenius norm of the source model points.
lib_size_norm: bool, default to False
Whether model points should be used with equal library size.
Returns
-------
self: fitted Sobolev Alignment instance.
"""
# Save data
self.training_data = {'source': X_source, 'target': X_target}
self.batch_name = {'source': source_batch_name, 'target': target_batch_name}
self.continuous_covariate_names = {'source': continuous_covariate_names, 'target': continuous_covariate_names}
# Save fitting parameters
self._fit_params = {
'sample_artificial': sample_artificial,
'n_samples_per_sample_batch': n_samples_per_sample_batch,
'frac_save_artificial': frac_save_artificial,
'save_mmap': save_mmap,
'log_input': log_input,
'n_krr_clfs': n_krr_clfs,
'no_posterior_collapse': no_posterior_collapse,
'mean_center': mean_center,
'unit_std': unit_std,
'frob_norm_source': frob_norm_source,
'lib_size_norm': lib_size_norm
}
# Train VAE
if fit_vae:
self._train_scvi_modules(no_posterior_collapse=no_posterior_collapse)
# Approximate scVI models by KRR models
if krr_approx:
self.lib_size = self._compute_batch_library_size()
self.approximate_krr_regressions_ = {}
if sample_artificial:
self.mean_center = mean_center
self.unit_std = unit_std
self.artificial_samples_ = {}
self.artificial_embeddings_ = {}
for data_source in ['source', 'target']:
self._train_krr(
data_source=data_source,
n_artificial_samples=n_artificial_samples,
sample_artificial=sample_artificial,
save_mmap=save_mmap,
log_input=log_input,
n_samples_per_sample_batch=n_samples_per_sample_batch,
frac_save_artificial=frac_save_artificial,
n_krr_clfs=n_krr_clfs,
mean_center=self.mean_center,
unit_std=self.unit_std,
frob_norm_source=frob_norm_source,
lib_size_norm=lib_size_norm
)
# Comparison and alignment
self._compare_approximated_encoders()
| |
flattened:
if video_id in phrase_intrvllists:
phrase_intrvllists[video_id].append((t1, t2, 0))
else:
phrase_intrvllists[video_id] = [(t1, t2, 0)]
for video_id, intrvllist in phrase_intrvllists.items():
phrase_intrvllists[video_id] = IntervalList(intrvllist)
phrase_intrvlcol = VideoIntervalCollection(phrase_intrvllists)
print('Get {} intervals for phrase \"{}\"'.format(count_intervals(phrase_intrvlcol), phrase))
return phrase_intrvlcol
def get_relevant_shots(intrvlcol):
relevant_shots = set()
for intrvllist in list(intrvlcol.get_allintervals().values()):
for interval in intrvllist.get_intervals():
relevant_shots.add(interval.get_payload())
print("Get %d relevant shots" % len(relevant_shots))
return relevant_shots
def get_oneface_intrvlcol(relevant_shots):
faces = Face.objects.filter(shot__in=list(relevant_shots)) \
.annotate(video_id=F('shot__video_id')) \
.annotate(min_frame=F('shot__min_frame')) \
.annotate(max_frame=F('shot__max_frame'))
# Materialize all the faces and load them into rekall with bounding box payloads
# Then coalesce them so that all faces in the same frame are in the same interval
# NOTE that this is slow right now since we're loading all faces!
oneface_intrvlcol = VideoIntervalCollection.from_django_qs(
faces,
with_payload=in_array(
bbox_payload_parser(VideoIntervalCollection.django_accessor))
).coalesce(payload_merge_op=payload_plus).filter(payload_satisfies(length_exactly(1)))
num_intrvl = 0
for _, intrvllist in oneface_intrvlcol.get_allintervals().items():
num_intrvl += intrvllist.size()
print("Get %d relevant one face intervals" % num_intrvl)
return oneface_intrvlcol
def get_person_alone_phrase_intervals(person_intrvlcol, phrase, filter_still=True):
phrase_intrvlcol = get_caption_intrvlcol(phrase, person_intrvlcol.get_allintervals().keys())
person_phrase_intrvlcol_raw = person_intrvlcol.overlaps(phrase_intrvlcol)
# only keep intervals which is the same before overlap
person_phrase_intrvlcol = person_phrase_intrvlcol_raw.filter_against(
phrase_intrvlcol,
predicate = equal() )
relevant_shots = get_relevant_shots(person_phrase_intrvlcol)
oneface_intrvlcol = get_oneface_intrvlcol(relevant_shots)
person_alone_phrase_intrvlcol = person_phrase_intrvlcol.overlaps(oneface_intrvlcol)
# run optical flow to filter out still images
intervals = intrvlcol2list(person_alone_phrase_intrvlcol)
if not filter_still:
return intervals
intervals_nostill = filter_still_image_parallel(intervals)
intervals_final = intervals_nostill if len(intervals_nostill) > 0 else intervals
# Todo: always give at least one output
print('Get {} person alone intervals for phrase \"{}\"'.format(len(intervals_final), phrase))
return intervals_final
def filter_still_image_t(interval):
video_id, sfid, efid = interval[:3]
video = Video.objects.filter(id=video_id)[0]
fid = (sfid + efid) // 2
frame_first = load_frame(video, fid, [])
frame_second = load_frame(video, fid + 1, [])
diff = 1. * np.sum(frame_first - frame_second) / frame_first.size
# print(video.id, fid, diff)
return diff > 15
def filter_still_image_parallel(intervals, limit=100):
durations = [i[-1] for i in intervals]
if limit < len(intervals):
# intervals = random.sample(intervals, limit)
intervals = [intervals[idx] for idx in np.argsort(durations)[-limit : ]]
filter_res = par_for(filter_still_image_t, intervals)
return [intrv for i, intrv in enumerate(intervals) if filter_res[i]]
# ============== Applications ==============
class SinglePersonSing:
def __init__(self, person_name, lyric_path, person_intrvlcol=None):
if person_intrvlcol is None:
person_intrvlcol = get_person_intrvlcol(person_name, large_face=True, labeler='old')
self.person_name = person_name.replace(' ', '_')
self.person_intrvlcol = person_intrvlcol
# load lyrics
self.song_name = os.path.basename(lyric_path).replace('.srt', '')
lyrics = []
subs = pysrt.open(lyric_path)
for sub in subs:
text = re.sub('\[.*\]', ' ', sub.text)
text = re.sub('\(.*\)', ' ', text)
text = re.sub('[^\'0-9a-zA-Z]+', ' ', text)
words = []
for word in text.split(' '):
if word != '':
if not is_word_in_lexicon(word.upper()):
print('Word \"{}\" not exist in Lexcion!'.format(word))
else:
words.append(word.upper())
# print("Extracted words from lyrics", words)
lyrics.append((words, time2second(tuple(sub.start)[:4]), time2second(tuple(sub.end)[:4])))
self.lyrics = lyrics
# load cache
cache_path = '/app/result/supercut/cache/{}.pkl'.format(self.person_name)
cache = pickle.load(open(cache_path, 'rb')) if os.path.exists(cache_path) else {'candidates':{}, 'selection':{}}
self.phrase2candidates, self.phrase2selection = cache['candidates'], cache['selection']
self.cache_path = cache_path
def search_phrases(self, phrase_list):
for phrase in phrase_list:
print("searching for \"{}\" ...".format(phrase))
self.phrase2candidates_tmp[phrase] = get_person_alone_phrase_intervals(self.person_intrvlcol,
phrase, filter_still=False)
# def search_candidates_parallel(self, workers=16):
# # collect all words
# words_all = set()
# for idx, (sentence, start, end) in enumerate(self.lyrics):
# words = [word.upper() for word in sentence.replace(',', '').replace('.', '').replace('!', '').split(' ')]
# for w in words:
# # todo: add more phrase
# words_all.add(w)
# words_all = sorted(words_all)
# print(words_all)
# # search all word intervals
# self.phrase2intrvlcol = {}
# for word in words_all:
# self.phrase2intrvlcol[word] = get_caption_intrvlcol(word, self.person_intrvlcol.get_allintervals().keys())
# self.phrase2candidates_tmp = {}
# par_for_process(self.search_phrases, words_all, num_workers=16)
# # filter still images
# for word, intervals in enumerate(zip(words_all, pre_candidates)):
# intervals_nostill = filter_still_image_parallel(intervals)
# intervals_final = intervals_nostill if len(intervals_nostill) > 0 else intervals
# print('Get {} person alone intervals for phrase \"{}\"'.format(len(intervals_final), word))
# self.phrase2candidates[word] = intervals_final
def search_candidates(self):
segments_list = []
candidates_list = []
for idx, (words, start, end) in enumerate(self.lyrics):
segments, candidates = single_person_one_sentence(self.person_intrvlcol, words, self.phrase2candidates)
segments_list.append(segments)
candidates_list.append(candidates)
self.dump_cache()
self.segments_list = segments_list
self.candidates_list = candidates_list
def select_candidates(self, human_selection=False, duplicate_selection=True, redo_selection=False):
self.selections_list = []
if not human_selection:
for idx, (segments, candidates) in enumerate(zip(self.segments_list, self.candidates_list)):
selections = [auto_select_candidates(c, range=(0.99, 1.5), filter='random') for c in candidates]
self.selections_list.append(selections)
else:
self.human_select_candidates(duplicate_selection, redo_selection)
def make_supercuts(self, out_path):
cutting_paths = []
for idx, (words, start, end) in enumerate(self.lyrics):
selections = self.selections_list[idx]
tmp_path = '/app/result/supercut/tmp/{}-{}-{}.mp4'.format(self.person_name, self.song_name, idx)
dilation = self.lyrics[idx+1][1] - end if idx != len(self.lyrics) - 1 else 1
if len(words) > 0:
stitch_video_temporal(selections, tmp_path, out_duration=None, dilation=dilation)
else:
create_silent_clip(tmp_path, out_duration=end - start + dilation)
cutting_paths.append(tmp_path)
print('Concat videos for sentence \"{}\"'.format(sentence))
# if idx == 1:
# break
concat_videos(cutting_paths, out_path)
def dump_cache(self):
pickle.dump({'candidates':self.phrase2candidates, 'selection':self.phrase2selection}, open(self.cache_path, 'wb'))
def human_select_candidates(self, duplicate_selection, redo_selection):
if redo_selection:
for segments in self.segments_list:
for phrase in segments:
if phrase in self.phrase2selection:
del self.phrase2selection[phrase]
def launch_widget(idx_sentence, idx_phrase):
if idx_phrase >= len(self.segments_list[idx_sentence]):
idx_phrase = 0
idx_sentence += 1
self.selections_list.append([])
if idx_sentence >= len(self.segments_list):
clear_output()
print("Finished all selections")
return
phrase, candidates = self.segments_list[idx_sentence][idx_phrase], self.candidates_list[idx_sentence][idx_phrase]
if phrase in self.phrase2selection and duplicate_selection:
self.selections_list[idx_sentence].append(self.phrase2selection[phrase])
launch_widget(idx_sentence, idx_phrase + 1)
return
else:
durations = [i[-1] for i in candidates]
candidates_sort = [candidates[idx] for idx in np.argsort(durations)[::-1]]
result = interval2result(candidates_sort)
print('Select phrase \"{}\" for sentence \"{}\"'.format(phrase, ' '.join(self.segments_list[idx_sentence])))
selection_widget = esper_widget(
result,
disable_playback=False, jupyter_keybindings=True,
crop_bboxes=True)
submit_button = widgets.Button(
layout=widgets.Layout(width='auto'),
description='Save to database',
disabled=False,
button_style='danger'
)
def on_submit(b):
selection = candidates_sort[selection_widget.selected[0]]
# print(selection_widget.selected, selection)
self.selections_list[idx_sentence].append(selection)
if not phrase in self.phrase2selection:
self.phrase2selection[phrase] = selection
self.dump_cache()
clear_output()
launch_widget(idx_sentence, idx_phrase + 1)
submit_button.on_click(on_submit)
display(widgets.HBox([submit_button]))
display(selection_widget)
idx_sentence, idx_phrase = 0, 0
self.selections_list.append([])
launch_widget(idx_sentence, idx_phrase)
def auto_select_candidates(intervals, num_sample=1, range=(0.5, 1.5), filter='random'):
durations = [i[-1] for i in intervals]
median = np.median(durations)
intervals_regular = [i for i in intervals if i[-1] > range[0] * median and i[-1] < range[1] * median]
if len(intervals_regular) == 0:
intervals_regular = [i for i in intervals if i[-1] > 0.5 * median and i[-1] < 1.5 * median]
# Todo: if regular intervals are not enough
if filter == 'random':
if num_sample == 1:
return random.choice(intervals_regular)
else:
return random.sample(intervals_regular, num_sample)
elif filter == 'longest':
durations_regular = [i[-1] for i in intervals_regular]
return intervals_regular[np.argmax(durations_regular)]
def single_person_one_sentence(person_intrvlcol, words, phrase2interval=None, concat_word=4):
# Magic numbers
SHORT_WORD = 4
LEAST_HIT = 3
CONCAT_WORD = concat_word
supercut_candidates = []
if phrase2interval is None:
phrase2interval = {}
segments = []
num_concat = 0
for idx, word in tqdm(enumerate(words)):
if num_concat > 0:
num_concat -= 1
continue
phrase = word
candidates = None
while idx + num_concat < len(words):
if num_concat == CONCAT_WORD:
num_concat -= 1
break
if num_concat > 0:
phrase += ' ' + words[idx + num_concat]
# skip short word for long phrase
if len(phrase) < SHORT_WORD:
num_concat += 1
continue
if candidates is None:
LEAST_HIT = 0
else:
LEAST_HIT = 3
print('{} Searching for phrase \"{}\" {}'.format('=' * 10, phrase, '=' * 10))
if phrase in phrase2interval:
print("Found in cache")
# if phrase in phrase2interval and not phrase2interval[phrase] is None:
if phrase2interval[phrase] is None:
num_concat = num_concat - 1 if num_concat != 0 else 0
break
candidates = phrase2interval[phrase]
segment = phrase
num_concat += 1
else:
person_alone_phrase_intervals = get_person_alone_phrase_intervals(person_intrvlcol, phrase)
num_intervals = len(person_alone_phrase_intervals)
if num_intervals > LEAST_HIT:
candidates = person_alone_phrase_intervals
phrase2interval[phrase] = candidates
segment = phrase
num_concat += 1
else:
phrase2interval[phrase] = None
num_concat = num_concat - 1 if num_concat != 0 else 0
break
# make up for short word
if candidates is None and len(word) < SHORT_WORD:
print('{} Searching for phrase \"{}\" {}'.format('=' * 10, phrase, '=' * 10))
if word in phrase2interval:
print("Found in cache")
candidates = phrase2interval[word]
segment = word
else:
person_alone_phrase_intervals = get_person_alone_phrase_intervals(person_intrvlcol, word)
num_intervals = len(person_alone_phrase_intervals)
if num_intervals > 0:
candidates = person_alone_phrase_intervals
phrase2interval[word] = candidates
segment = word
# if really cannot find the word, use clips from other person instead
if candidates is None:
phrase_intrvlcol = get_caption_intrvlcol(word)
candidates = intrvlcol2list(phrase_intrvlcol)
segment = word
if not candidates is None:
supercut_candidates.append(candidates)
segments.append(segment)
print("-------- Searched words: ", words)
print("-------- Final segments: ", segments)
return segments, supercut_candidates
def multi_person_one_phrase(phrase, filters={}):
'''
Get all intervals which the phrase is being said
@phrase: input phrase to be searched
@filters:
'with_face': must contain exactly one face
'gender': filter by gender
'limit': number of output intervals
| |
<filename>tools/bismark/bismark_wrapper.py
#!/usr/bin/env python
import argparse
import os
import shutil
import subprocess
import sys
import shlex
import tempfile
import fileinput
import fileinput
from glob import glob
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def __main__():
print 'tempfile_location',tempfile.gettempdir()
#Parse Command Line
parser = argparse.ArgumentParser(description='Wrapper for the bismark bisulfite mapper.')
parser.add_argument( '-p', '--num-threads', dest='num_threads',
type=int, default=4, help='Use this many threads to align reads. The default is 4.' )
parser.add_argument( '--bismark_path', dest='bismark_path', help='Path to the bismark perl scripts' )
parser.add_argument( '--bowtie2', action='store_true', default=False, help='Running bismark with bowtie2 and not with bowtie.' )
# input options
parser.add_argument( '--own-file', dest='own_file', help='' )
parser.add_argument( '-D', '--indexes-path', dest='index_path', help='Indexes directory; location of .ebwt and .fa files.' )
parser.add_argument( '-O', '--output', dest='output' )
parser.add_argument( '--output-report-file', dest='output_report_file' )
parser.add_argument( '--suppress-header', dest='suppress_header', action="store_true" )
parser.add_argument( '--mate-paired', dest='mate_paired', action='store_true', help='Reads are mate-paired', default=False)
parser.add_argument( '-1', '--mate1', dest='mate1',
help='The forward reads file in Sanger FASTQ or FASTA format.' )
parser.add_argument( '-2', '--mate2', dest='mate2',
help='The reverse reads file in Sanger FASTQ or FASTA format.' )
parser.add_argument( '--sort-bam', dest='sort_bam', action="store_true" )
parser.add_argument( '--output-unmapped-reads', dest='output_unmapped_reads',
help='Additional output file with unmapped reads (single-end).' )
parser.add_argument( '--output-unmapped-reads-l', dest='output_unmapped_reads_l',
help='File name for unmapped reads (left, paired-end).' )
parser.add_argument( '--output-unmapped-reads-r', dest='output_unmapped_reads_r',
help='File name for unmapped reads (right, paired-end).' )
parser.add_argument( '--output-suppressed-reads', dest='output_suppressed_reads',
help='Additional output file with suppressed reads (single-end).' )
parser.add_argument( '--output-suppressed-reads-l', dest='output_suppressed_reads_l',
help='File name for suppressed reads (left, paired-end).' )
parser.add_argument( '--output-suppressed-reads-r', dest='output_suppressed_reads_r',
help='File name for suppressed reads (right, paired-end).' )
parser.add_argument( '--stdout', dest='output_stdout',
help='File name for the standard output of bismark.' )
parser.add_argument( '--single-paired', dest='single_paired',
help='The single-end reads file in Sanger FASTQ or FASTA format.' )
parser.add_argument( '--fastq', action='store_true', help='Query filetype is in FASTQ format')
parser.add_argument( '--fasta', action='store_true', help='Query filetype is in FASTA format')
parser.add_argument( '--phred64-quals', dest='phred64', action="store_true" )
parser.add_argument( '--skip-reads', dest='skip_reads', type=int )
parser.add_argument( '--qupto', type=int)
# paired end options
parser.add_argument( '-I', '--minins', dest='min_insert' )
parser.add_argument( '-X', '--maxins', dest='max_insert' )
parser.add_argument( '--no-mixed', dest='no_mixed', action="store_true" )
parser.add_argument( '--no-discordant', dest='no_discordant', action="store_true" )
#parse general options
# default 20
parser.add_argument( '--seed-len', dest='seed_len', type=int)
# default 15
parser.add_argument( '--seed-extention-attempts', dest='seed_extention_attempts', type=int )
# default 0
parser.add_argument( '--seed-mismatches', dest='seed_mismatches', type=int )
# default 2
parser.add_argument( '--max-reseed', dest='max_reseed', type=int )
"""
# default 70
parser.add_argument( '--maqerr', dest='maqerr', type=int )
"""
"""
The number of megabytes of memory a given thread is given to store path
descriptors in --best mode. Best-first search must keep track of many paths
at once to ensure it is always extending the path with the lowest cumulative
cost. Bowtie tries to minimize the memory impact of the descriptors, but
they can still grow very large in some cases. If you receive an error message
saying that chunk memory has been exhausted in --best mode, try adjusting
this parameter up to dedicate more memory to the descriptors. Default: 512.
"""
parser.add_argument( '--chunkmbs', type=int, default=512 )
args = parser.parse_args()
# Create bismark index if necessary.
index_dir = ""
if args.own_file:
"""
Create a temporary index with the offered files from the user.
Utilizing the script: bismark_genome_preparation
bismark_genome_preparation --bowtie2 hg19/
"""
tmp_index_dir = tempfile.mkdtemp()
index_path = os.path.join( tmp_index_dir, '.'.join( os.path.split( args.own_file )[1].split( '.' )[:-1] ) )
try:
"""
Create a hard link pointing to args.own_file named 'index_path'.fa.
"""
os.symlink( args.own_file, index_path + '.fa' )
except Exception, e:
if os.path.exists( tmp_index_dir ):
shutil.rmtree( tmp_index_dir )
stop_err( 'Error in linking the reference database.\n' + str( e ) )
# bismark_genome_preparation needs the complete path to the folder in which the database is stored
if args.bowtie2:
cmd_index = 'bismark_genome_preparation --bowtie2 %s ' % ( tmp_index_dir )
else:
cmd_index = 'bismark_genome_preparation %s ' % ( tmp_index_dir )
if args.bismark_path:
if os.path.exists(args.bismark_path):
# add the path to the bismark perl scripts, that is needed for galaxy
cmd_index = os.path.join(args.bismark_path, cmd_index)
else:
# assume the same directory as that script
cmd_index = 'perl %s' % os.path.join(os.path.realpath(os.path.dirname(__file__)), cmd_index)
try:
tmp = tempfile.NamedTemporaryFile( dir=tmp_index_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=cmd_index, shell=True, cwd=tmp_index_dir, stdout=open(os.devnull, 'wb'), stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
buffsize = 1048576
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception, stderr
except Exception, e:
if os.path.exists( tmp_index_dir ):
shutil.rmtree( tmp_index_dir )
stop_err( 'Error indexing reference sequence\n' + str( e ) )
index_dir = tmp_index_dir
else:
# bowtie path is the path to the index directory and the first path of the index file name
index_dir = os.path.dirname( args.index_path )
# Build bismark command
"""
Bismark requires a large amount of temporary disc space. If that is not available, for example on a cluster you can hardcode the
TMP to some larger space. It's not recommended but it works.
"""
#tmp_bismark_dir = tempfile.mkdtemp( dir='/data/0/galaxy_db/tmp/' )
tmp_bismark_dir = tempfile.mkdtemp()
output_dir = os.path.join( tmp_bismark_dir, 'results')
cmd = 'bismark %(args)s --bam --temp_dir %(tmp_bismark_dir)s --gzip -o %(output_dir)s --quiet %(genome_folder)s %(reads)s'
if args.fasta:
# the query input files (specified as mate1,mate2 or singles) are FastA
cmd = '%s %s' % (cmd, '--fasta')
elif args.fastq:
cmd = '%s %s' % (cmd, '--fastq')
if args.bismark_path:
# add the path to the bismark perl scripts, that is needed for galaxy
if os.path.exists(args.bismark_path):
cmd = os.path.join(args.bismark_path, cmd)
else:
# assume the same directory as that script
cmd = 'perl %s' % os.path.join(os.path.realpath(os.path.dirname(__file__)), cmd)
arguments = {
'genome_folder': index_dir,
'args': '',
'tmp_bismark_dir': tmp_bismark_dir,
'output_dir': output_dir,
}
additional_opts = ''
# Set up the reads
if args.mate_paired:
# paired-end reads library
reads = '-1 %s ' % ( args.mate1 )
reads += ' -2 %s ' % ( args.mate2 )
additional_opts += ' -I %s -X %s ' % (args.min_insert, args.max_insert)
else:
# single paired reads library
reads = ' %s ' % ( args.single_paired )
if not args.bowtie2:
# use bowtie specific options
#additional_opts += ' --best ' # bug in bismark, --best is not available as option. Only --non-best, best-mode is activated by default
if args.seed_mismatches:
# --seedmms
additional_opts += ' -n %s ' % args.seed_mismatches
if args.seed_len:
# --seedlen
additional_opts += ' -l %s ' % args.seed_len
# alignment options
if args.bowtie2:
additional_opts += ' -p %s --bowtie2 ' % (int(args.num_threads/2)) #divides by 2 here since bismark will spawn 2 (original top and original bottom) jobs with -p threads each
if args.seed_mismatches:
additional_opts += ' -N %s ' % args.seed_mismatches
if args.seed_len:
additional_opts += ' -L %s ' % args.seed_len
if args.seed_extention_attempts:
additional_opts += ' -D %s ' % args.seed_extention_attempts
if args.max_reseed:
additional_opts += ' -R %s ' % args.max_reseed
if args.no_discordant:
additional_opts += ' --no-discordant '
if args.no_mixed:
additional_opts += ' --no-mixed '
"""
if args.maqerr:
additional_opts += ' --maqerr %s ' % args.maqerr
"""
if args.skip_reads:
additional_opts += ' --skip %s ' % args.skip_reads
if args.qupto:
additional_opts += ' --qupto %s ' % args.qupto
if args.phred64:
additional_opts += ' --phred64-quals '
if args.suppress_header:
additional_opts += ' --sam-no-hd '
if args.output_unmapped_reads or ( args.output_unmapped_reads_l and args.output_unmapped_reads_r):
additional_opts += ' --un '
if args.output_suppressed_reads or ( args.output_suppressed_reads_l and args.output_suppressed_reads_r):
additional_opts += ' --ambiguous '
arguments.update( {'args': additional_opts, 'reads': reads} )
# Final bismark command:
cmd = cmd % arguments
print 'bismark_cmd:', cmd
#sys.stderr.write( cmd )
#sys.exit(1)
# Run
try:
tmp_out = tempfile.NamedTemporaryFile().name
tmp_stdout = open( tmp_out, 'wb' )
tmp_err = tempfile.NamedTemporaryFile().name
tmp_stderr = open( tmp_err, 'wb' )
proc = subprocess.Popen( args=cmd, shell=True, cwd=".", stdout=tmp_stdout, stderr=tmp_stderr )
returncode = proc.wait()
if returncode != 0:
tmp_stdout.close()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp_err, 'rb' )
stderr = ''
buffsize = 1048576
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
raise Exception, stderr
tmp_stdout.close()
tmp_stderr.close()
# TODO: look for errors in program output.
except Exception, e:
stop_err( 'Error in bismark:\n' | |
<filename>src/classes/server.py
import json
from classes.gamemodes import getGamemode, isValidGamemode
from classes.items import Barrels, Gear, Grips, Lists, Magazines, Muzzles, Receivers, Scopes, Stocks, Tactical
from classes.loadouts import Player, PlayerLoadouts
from classes.playlists import getPlaylist, isValidPlaylist
from classes.maps import getMapFileName, getMapName, isValidMap
from classes.commands import CommandType, getMessageType
from classes.server_structs import ServerOptions, ServerInfo
from discord.message import Message
from utils.process_runner import restartServer, startServer, get_hwnds_for_pid
from utils.process_names import getServerInfo
from utils.cheatengine_communication import scan_players, update_loadouts
from subprocess import Popen
requiredPlayerKeys = [
'PlayerName'
]
requiredLoadouts = [
'Loadout1',
'Loadout2',
'Loadout3'
]
requiredLoadoutKeys = [
'Primary',
'Secondary'
]
requiredWeaponKeys = [
'Receiver'
]
class Server:
def __init__(self, config: str = None):
self.Options: ServerOptions = ServerOptions()
self.Info: ServerInfo = ServerInfo()
self.Process: Popen = None
self.Hwnd: int = 0
self.Starting: bool = True
self.PlayerLoadouts: PlayerLoadouts = PlayerLoadouts.Load('loadouts.json')
if(config != None):
self.DefaultConfig = config
else:
self.DefaultConfig = './configs/server_config.json'
if (self.LoadConfig(self.DefaultConfig)):
print('Loaded configuration file {}'.format(self.DefaultConfig))
def Start(self):
self.Process = startServer(self.Options.LaunchOptions)
self.Hwnd = get_hwnds_for_pid(self.Process.pid)[0]
print("PID: " + str(self.Process.pid))
print("HWND: " + str(self.Hwnd))
self.Starting = False
def GetServerInfo(self):
if(self.Starting == True):
return 'RESTARTING'
self.Info = getServerInfo(self.Hwnd)
if(self.Info.PlayerCount == -1):
return '??/' + self.Options.LaunchOptions.MaxPlayers + ' | ' + self.Info.Map
if(self.Info.Map == ''):
return 'NOT ONLINE'
return str(self.Info.PlayerCount) + '/' + str(self.Options.LaunchOptions.MaxPlayers) + ' | ' + self.Info.Map
def Restart(self):
self.Starting = True
self.Process = restartServer(self.Options.LaunchOptions, self.Process)
self.Hwnd = get_hwnds_for_pid(self.Process.pid)[0]
self.Starting = False
def SetMap(self, map):
if(isValidMap(map)):
self.Options.LaunchOptions.Map = map
return True
return False
def SetBots(self, bots):
try:
newBots = int(bots)
if(newBots < 0):
newBots = 0
if(newBots > 16):
newBots = 16
self.Options.LaunchOptions.BotCount = newBots
return newBots
except:
return -1
def SetPlaylist(self, playlist):
if(isValidPlaylist(playlist)):
self.Options.LaunchOptions.Playlist = playlist
return True
return False
def SetGamemode(self, gamemode):
if(isValidGamemode(gamemode)):
self.Options.LaunchOptions.Gamemode = gamemode
return True
return False
def SetSCP(self, scp):
try:
newSCP = int(scp)
if(newSCP < 0):
newSCP = 0
if(newSCP > 999999):
newSCP = 999999
self.Options.LaunchOptions.SCP = newSCP
return newSCP
except:
return -1
def SetTimeLimit(self, timelimit):
try:
newTimeLimit = int(timelimit)
if(newTimeLimit <= 0):
newTimeLimit = None
if(newTimeLimit > 999999):
newTimeLimit = 999999
self.Options.LaunchOptions.TimeLimit = newTimeLimit
return newTimeLimit
except:
return -1
def SetAutoRestart(self, autorestart: bool):
try:
self.Options.AutoRestartInLobby = autorestart
return True
except:
return False
def LoadConfig(self, configFile: str):
options = ServerOptions.LoadFromFile(configFile)
if(options != None):
self.Options = options
return True
return False
def ResetOptions(self):
return self.LoadConfig(self.DefaultConfig)
def ScanPlayers(self):
if(self.Starting != True and self.Info.PlayerCount > 0):
scan_players(self.Info.PlayerCount + self.Options.LaunchOptions.BotCount)
#print('players')
return
def UpdateLoadouts(self):
if(self.Starting != True and self.Info.PlayerCount > 0 and self.Info.Map != 'Lobby'):
update_loadouts()
#print('loadout')
return
def SetPrimary(self, playerName, receiverName):
self.PlayerLoadouts[playerName] = receiverName
# def RegisterPlayer(self, discordId: int, playerName: str, receiverP1: str = 'Heavy Assault Rifle', receiverS1: str = 'Revolver', receiverP2: str = 'LMG-Recon', receiverS2: str = 'Machine Pistol', receiverP3: str = 'Combat Rifle', receiverS3: str = 'Heavy Pistol'):
# self.PlayerLoadouts.RegisterPlayerTemp(discordId, playerName, receiverP1, receiverS1, receiverP2, receiverS2, receiverP3, receiverS3)
# self.PlayerLoadouts.SaveLoadouts('loadouts.json')
def RegisterLoadout(self, discordId: int, jsonLoadout: str):
try:
data = json.loads(jsonLoadout)
for playerKey in requiredPlayerKeys:
if(playerKey not in data):
return playerKey + ' not found!'
for loadout in requiredLoadouts:
if(loadout not in data):
return loadout + ' not found!'
for loadoutKey in requiredLoadoutKeys:
if(loadoutKey not in data[loadout]):
return loadoutKey + ' not found in ' + loadout + '!'
for weaponKey in requiredWeaponKeys:
if(weaponKey not in data[loadout][loadoutKey]):
return weaponKey + ' not found in ' + loadout + ' ' + loadoutKey + '!'
player = Player.LoadFromJson(data)
result = self.PlayerLoadouts.RegisterPlayer(discordId, player)
if(result == ''):
self.PlayerLoadouts.SaveLoadouts('loadouts.json')
return 'Loadout set successfully!'
else:
return result
except:
return 'Unknown error! Something went wrong with setting your loadout.'
async def Command(self, message: Message):
content: str = message.content
discordId: int = message.author.id
command: CommandType = getMessageType(content)
listHelp = 'Lists items for customization. Usage: `list` or `list <list name>` Available lists:\n'
for list in Lists:
listHelp += '`' + list + '`\n'
registerHelp = """Used to set player loadouts. Use the `list` command to get available weapon parts. Example usage:
```register
{
"PlayerName": "YourPlayerNameHere",
"Loadout1": {
"Primary": {
"Receiver": "Bullpup Full Auto",
"Muzzle": 1,
"Stock": "Silverwood z1200 BPFA",
"Barrel": "Hullbreach 047BAR",
"Magazine": 152,
"Scope": "Aim Point Ammo Counter",
"Grip": ""
},
"Secondary": {
"Receiver": "Snub 260",
"Muzzle": 0,
"Stock": "No Stock",
"Barrel": "No Barrel Mod",
"Magazine": 177,
"Scope": "No Optic Mod",
"Grip": ""
},
"Gear1": 15,
"Gear2": 25,
"Gear3": 5,
"Gear4": 6,
"Tactical": 1,
"Camo": 78,
"UpperBody": 6,
"LowerBody": 4,
"Helmet": 17,
"IsFemale": true
},
"Loadout2": {
"Primary": {
"Receiver": "Combat Rifle",
"Muzzle": 3,
"Stock": "Krane Extender Stock",
"Barrel": "Silverwood Light Accuracy Barrel",
"Magazine": 24,
"Scope": "4X Ammo Counter Scope",
"Grip": ""
},
"Secondary": {
"Receiver": "Shotgun",
"Muzzle": 0,
"Stock": "Redsand Compensator Stock",
"Barrel": "Krane SG Bar-20",
"Magazine": 29,
"Scope": "EMI Infrared Scope",
"Grip": "Briar BrSGP1"
},
"Gear1": 7,
"Gear2": 8,
"Gear3": 9,
"Gear4": 10,
"Tactical": 6,
"Camo": 13,
"UpperBody": 5,
"LowerBody": 7,
"Helmet": 15,
"IsFemale": true
},
"Loadout3": {
"Primary": {
"Receiver": "Assault Rifle",
"Muzzle": 2,
"Stock": "Taurex Stabilizing Stock",
"Barrel": "Briar Accuracy Barrel",
"Magazine": 14,
"Scope": "EMI Tech Scope",
"Grip": ""
},
"Secondary": {
"Receiver": "Heavy Pistol",
"Muzzle": 15,
"Stock": "Silverwood Compensator Stock",
"Barrel": "V2 Z900 Mod",
"Magazine": 48,
"Scope": "EMI Infrared Scope Mk. 2",
"Grip": ""
},
"Gear1": 11,
"Gear2": 12,
"Gear3": 13,
"Gear4": 14,
"Tactical": 4,
"Camo": 92,
"UpperBody": 2,
"LowerBody": 2,
"Helmet": 50,
"IsFemale": false
}
}```"""
if(command not in self.Options.AllowedCommands):
await message.channel.send("Command not enabled. Type help for available commands")
return
if(command == CommandType.Status):
await message.channel.send(self.currentServerInfo)
return
if(command == CommandType.Help):
parts = content.split(' ', 1)
if(len(parts) == 1):
response = 'Available commands:\n'
for allowedCommand in self.Options.AllowedCommands:
response += '`' + allowedCommand.value + '`\n'
response += 'For more detailed info on a command type: `help <command>`'
await message.channel.send(response)
return
if(parts[1].lower() == 'register'):
await message.channel.send(registerHelp)
return
if(parts[1].lower() == 'help'):
await message.channel.send('Lists available commands and how to use them. Usage: `help` or `help <command>`')
return
if(parts[1].lower() == 'list'):
await message.channel.send(listHelp)
return
await message.channel.send('Not a valid command')
return
if(command == CommandType.Restart):
if(self.Info.PlayerCount != 0):
await message.channel.send('Server is not empty, cannot restart.')
return
await message.channel.send('Restarting...')
self.Restart()
await message.channel.send('Restart complete!')
return
if(command == CommandType.Map):
newMap = content.split(' ', 1)[1]
mapFileName = getMapFileName(newMap)
if(self.SetMap(mapFileName)):
await message.channel.send('Map changed to ' + getMapName(mapFileName) + '. Restart for changes to take effect.')
return
if(command == CommandType.Bots):
numberOfBots = int(content.split(' ', 1)[1])
result = self.SetBots(numberOfBots)
if(result != -1):
await message.channel.send('Number of bots changed to ' + str(result) + '. Restart for changes to take effect.')
return
await message.channel.send('Number of bots should be a number between 0-16.')
return
if(command == CommandType.Playlist):
playlistStr = content.split(' ', 1)[1]
newPlaylist = getPlaylist(playlistStr)
if(self.SetPlaylist(newPlaylist)):
await message.channel.send('Playlist changed to ' + newPlaylist + '. Restart for changes to take effect.')
return
if(command == CommandType.Gamemode):
gamemodeStr = content.split(' ', 1)[1]
newGamemode = getGamemode(gamemodeStr)
if(self.SetGamemode(newGamemode)):
await message.channel.send('Gamemode changed to ' + newGamemode + '. Restart for changes to take effect.')
return
if(command == CommandType.SCP):
scp = int(content.split(' ', 1)[1])
result = self.SetSCP(scp)
if(result != -1):
await message.channel.send('Starting CP changed to ' + str(result) + '. Restart for changes to take effect.')
return
await message.channel.send('Starting CP should be a number between 0-999999.')
return
if(command == CommandType.TimeLimit):
timeLimit = int(content.split(' ', 1)[1])
result = self.SetTimeLimit(timeLimit)
if(result != -1):
await message.channel.send('TimeLimit changed to ' + str(result) + ' minutes. Restart for changes to take effect.')
return
await message.channel.send('TimeLimit should be a number between 0-999999.')
return
if(command == CommandType.AutoRestart):
autorestart = bool(content.split(' ', 1)[1].lower == 'true')
if(self.SetAutoRestart(autorestart)):
await message.channel.send('Auto restart in lobby changed to ' + str(autorestart) + '.')
return
await message.channel.send('Auto restart should be true or false.')
return
if(command == CommandType.Reset):
return self.ResetOptions()
if(command == CommandType.Register):
parts = content.split('\n', 1)
if(len(parts) == 1):
await message.channel.send(registerHelp)
return
json = parts[1]
result = self.RegisterLoadout(discordId, json)
await message.channel.send(result)
return
if(command == CommandType.List):
parts = content.split(' ', 1)
if(len(parts) == 1):
await message.channel.send(listHelp)
return
if(parts[1].lower() == 'receivers'):
response = ''
for item in Receivers:
response += '`' + item + '`\n'
await message.channel.send(response)
return
if(parts[1].lower() == 'stocks'):
response = ''
for item in Stocks:
response += '`' + item + '`\n'
await message.channel.send(response)
return
if(parts[1].lower() == 'barrels'):
response = ''
for item in Barrels:
response += '`' + item + '`\n'
await message.channel.send(response)
return
| |
# -*- coding: utf-8 -*-
"""
Contains all possible non-ASCII unicode numbers.
"""
from __future__ import (
print_function,
division,
unicode_literals,
absolute_import
)
# Std. lib imports.
import unicodedata
# Local imports.
from natsort.compat.py23 import py23_unichr
# Rather than determine this on the fly, which would incur a startup
# runtime penalty, the hex values of the Unicode numeric characters
# are hard-coded below.
numeric_hex = (
0X30, 0X31, 0X32, 0X33, 0X34, 0X35, 0X36, 0X37, 0X38,
0X39, 0XB2, 0XB3, 0XB9, 0XBC, 0XBD, 0XBE, 0X660, 0X661,
0X662, 0X663, 0X664, 0X665, 0X666, 0X667, 0X668, 0X669,
0X6F0, 0X6F1, 0X6F2, 0X6F3, 0X6F4, 0X6F5, 0X6F6, 0X6F7,
0X6F8, 0X6F9, 0X7C0, 0X7C1, 0X7C2, 0X7C3, 0X7C4, 0X7C5,
0X7C6, 0X7C7, 0X7C8, 0X7C9, 0X966, 0X967, 0X968, 0X969,
0X96A, 0X96B, 0X96C, 0X96D, 0X96E, 0X96F, 0X9E6, 0X9E7,
0X9E8, 0X9E9, 0X9EA, 0X9EB, 0X9EC, 0X9ED, 0X9EE, 0X9EF,
0X9F4, 0X9F5, 0X9F6, 0X9F7, 0X9F8, 0X9F9, 0XA66, 0XA67,
0XA68, 0XA69, 0XA6A, 0XA6B, 0XA6C, 0XA6D, 0XA6E, 0XA6F,
0XAE6, 0XAE7, 0XAE8, 0XAE9, 0XAEA, 0XAEB, 0XAEC, 0XAED,
0XAEE, 0XAEF, 0XB66, 0XB67, 0XB68, 0XB69, 0XB6A, 0XB6B,
0XB6C, 0XB6D, 0XB6E, 0XB6F, 0XB72, 0XB73, 0XB74, 0XB75,
0XB76, 0XB77, 0XBE6, 0XBE7, 0XBE8, 0XBE9, 0XBEA, 0XBEB,
0XBEC, 0XBED, 0XBEE, 0XBEF, 0XBF0, 0XBF1, 0XBF2, 0XC66,
0XC67, 0XC68, 0XC69, 0XC6A, 0XC6B, 0XC6C, 0XC6D, 0XC6E,
0XC6F, 0XC78, 0XC79, 0XC7A, 0XC7B, 0XC7C, 0XC7D, 0XC7E,
0XCE6, 0XCE7, 0XCE8, 0XCE9, 0XCEA, 0XCEB, 0XCEC, 0XCED,
0XCEE, 0XCEF, 0XD66, 0XD67, 0XD68, 0XD69, 0XD6A, 0XD6B,
0XD6C, 0XD6D, 0XD6E, 0XD6F, 0XD70, 0XD71, 0XD72, 0XD73,
0XD74, 0XD75, 0XDE6, 0XDE7, 0XDE8, 0XDE9, 0XDEA, 0XDEB,
0XDEC, 0XDED, 0XDEE, 0XDEF, 0XE50, 0XE51, 0XE52, 0XE53,
0XE54, 0XE55, 0XE56, 0XE57, 0XE58, 0XE59, 0XED0, 0XED1,
0XED2, 0XED3, 0XED4, 0XED5, 0XED6, 0XED7, 0XED8, 0XED9,
0XF20, 0XF21, 0XF22, 0XF23, 0XF24, 0XF25, 0XF26, 0XF27,
0XF28, 0XF29, 0XF2A, 0XF2B, 0XF2C, 0XF2D, 0XF2E, 0XF2F,
0XF30, 0XF31, 0XF32, 0XF33, 0X1040, 0X1041, 0X1042, 0X1043,
0X1044, 0X1045, 0X1046, 0X1047, 0X1048, 0X1049, 0X1090,
0X1091, 0X1092, 0X1093, 0X1094, 0X1095, 0X1096, 0X1097,
0X1098, 0X1099, 0X1369, 0X136A, 0X136B, 0X136C, 0X136D,
0X136E, 0X136F, 0X1370, 0X1371, 0X1372, 0X1373, 0X1374,
0X1375, 0X1376, 0X1377, 0X1378, 0X1379, 0X137A, 0X137B,
0X137C, 0X16EE, 0X16EF, 0X16F0, 0X17E0, 0X17E1, 0X17E2,
0X17E3, 0X17E4, 0X17E5, 0X17E6, 0X17E7, 0X17E8, 0X17E9,
0X17F0, 0X17F1, 0X17F2, 0X17F3, 0X17F4, 0X17F5, 0X17F6,
0X17F7, 0X17F8, 0X17F9, 0X1810, 0X1811, 0X1812, 0X1813,
0X1814, 0X1815, 0X1816, 0X1817, 0X1818, 0X1819, 0X1946,
0X1947, 0X1948, 0X1949, 0X194A, 0X194B, 0X194C, 0X194D,
0X194E, 0X194F, 0X19D0, 0X19D1, 0X19D2, 0X19D3, 0X19D4,
0X19D5, 0X19D6, 0X19D7, 0X19D8, 0X19D9, 0X19DA, 0X1A80,
0X1A81, 0X1A82, 0X1A83, 0X1A84, 0X1A85, 0X1A86, 0X1A87,
0X1A88, 0X1A89, 0X1A90, 0X1A91, 0X1A92, 0X1A93, 0X1A94,
0X1A95, 0X1A96, 0X1A97, 0X1A98, 0X1A99, 0X1B50, 0X1B51,
0X1B52, 0X1B53, 0X1B54, 0X1B55, 0X1B56, 0X1B57, 0X1B58,
0X1B59, 0X1BB0, 0X1BB1, 0X1BB2, 0X1BB3, 0X1BB4, 0X1BB5,
0X1BB6, 0X1BB7, 0X1BB8, 0X1BB9, 0X1C40, 0X1C41, 0X1C42,
0X1C43, 0X1C44, 0X1C45, 0X1C46, 0X1C47, 0X1C48, 0X1C49,
0X1C50, 0X1C51, 0X1C52, 0X1C53, 0X1C54, 0X1C55, 0X1C56,
0X1C57, 0X1C58, 0X1C59, 0X2070, 0X2074, 0X2075, 0X2076,
0X2077, 0X2078, 0X2079, 0X2080, 0X2081, 0X2082, 0X2083,
0X2084, 0X2085, 0X2086, 0X2087, 0X2088, 0X2089, 0X2150,
0X2151, 0X2152, 0X2153, 0X2154, 0X2155, 0X2156, 0X2157,
0X2158, 0X2159, 0X215A, 0X215B, 0X215C, 0X215D, 0X215E,
0X215F, 0X2160, 0X2161, 0X2162, 0X2163, 0X2164, 0X2165,
0X2166, 0X2167, 0X2168, 0X2169, 0X216A, 0X216B, 0X216C,
0X216D, 0X216E, 0X216F, 0X2170, 0X2171, 0X2172, 0X2173,
0X2174, 0X2175, 0X2176, 0X2177, 0X2178, 0X2179, 0X217A,
0X217B, 0X217C, 0X217D, 0X217E, 0X217F, 0X2180, 0X2181,
0X2182, 0X2185, 0X2186, 0X2187, 0X2188, 0X2189, 0X2460,
0X2461, 0X2462, 0X2463, 0X2464, 0X2465, 0X2466, 0X2467,
0X2468, 0X2469, 0X246A, 0X246B, 0X246C, 0X246D, 0X246E,
0X246F, 0X2470, 0X2471, 0X2472, 0X2473, 0X2474, 0X2475,
0X2476, 0X2477, 0X2478, 0X2479, 0X247A, 0X247B, 0X247C,
0X247D, 0X247E, 0X247F, 0X2480, 0X2481, 0X2482, 0X2483,
0X2484, 0X2485, 0X2486, 0X2487, 0X2488, 0X2489, 0X248A,
0X248B, 0X248C, 0X248D, 0X248E, 0X248F, 0X2490, 0X2491,
0X2492, 0X2493, 0X2494, 0X2495, 0X2496, 0X2497, 0X2498,
0X2499, 0X249A, 0X249B, 0X24EA, 0X24EB, 0X24EC, 0X24ED,
0X24EE, 0X24EF, 0X24F0, 0X24F1, 0X24F2, 0X24F3, 0X24F4,
0X24F5, 0X24F6, 0X24F7, 0X24F8, 0X24F9, 0X24FA, 0X24FB,
0X24FC, 0X24FD, 0X24FE, 0X24FF, 0X2776, 0X2777, 0X2778,
0X2779, 0X277A, 0X277B, 0X277C, 0X277D, 0X277E, 0X277F,
0X2780, 0X2781, 0X2782, 0X2783, 0X2784, 0X2785, 0X2786,
0X2787, 0X2788, 0X2789, 0X278A, 0X278B, 0X278C, 0X278D,
0X278E, 0X278F, 0X2790, 0X2791, 0X2792, 0X2793, 0X2CFD,
0X3007, 0X3021, 0X3022, 0X3023, 0X3024, 0X3025, 0X3026,
0X3027, 0X3028, 0X3029, 0X3038, 0X3039, 0X303A, 0X3192,
0X3193, 0X3194, 0X3195, 0X3220, 0X3221, 0X3222, 0X3223,
0X3224, 0X3225, 0X3226, 0X3227, 0X3228, 0X3229, 0X3248,
0X3249, 0X324A, 0X324B, 0X324C, 0X324D, 0X324E, 0X324F,
0X3251, 0X3252, 0X3253, 0X3254, 0X3255, 0X3256, 0X3257,
0X3258, 0X3259, 0X325A, 0X325B, 0X325C, 0X325D, 0X325E,
0X325F, 0X3280, 0X3281, 0X3282, 0X3283, 0X3284, 0X3285,
0X3286, 0X3287, 0X3288, 0X3289, 0X32B1, 0X32B2, 0X32B3,
0X32B4, 0X32B5, 0X32B6, 0X32B7, 0X32B8, 0X32B9, 0X32BA,
0X32BB, 0X32BC, 0X32BD, 0X32BE, 0X32BF, 0X3405, 0X3483,
0X382A, 0X3B4D, 0X4E00, 0X4E03, 0X4E07, 0X4E09, 0X4E5D,
0X4E8C, 0X4E94, 0X4E96, 0X4EBF, 0X4EC0, 0X4EDF, 0X4EE8,
0X4F0D, 0X4F70, 0X5104, 0X5146, 0X5169, 0X516B, 0X516D,
0X5341, 0X5343, 0X5344, 0X5345, 0X534C, 0X53C1, 0X53C2,
0X53C3, 0X53C4, 0X56DB, 0X58F1, 0X58F9, 0X5E7A, 0X5EFE,
0X5EFF, 0X5F0C, 0X5F0D, 0X5F0E, 0X5F10, 0X62FE, 0X634C,
0X67D2, 0X6F06, 0X7396, 0X767E, 0X8086, 0X842C, 0X8CAE,
0X8CB3, 0X8D30, 0X9621, 0X9646, 0X964C, 0X9678, 0X96F6,
0XA620, 0XA621, 0XA622, 0XA623, 0XA624, 0XA625, 0XA626,
0XA627, 0XA628, 0XA629, 0XA6E6, 0XA6E7, 0XA6E8, 0XA6E9,
0XA6EA, 0XA6EB, 0XA6EC, 0XA6ED, 0XA6EE, 0XA6EF, 0XA830,
0XA831, 0XA832, 0XA833, 0XA834, 0XA835, 0XA8D0, 0XA8D1,
0XA8D2, 0XA8D3, 0XA8D4, 0XA8D5, 0XA8D6, 0XA8D7, 0XA8D8,
0XA8D9, 0XA900, 0XA901, 0XA902, 0XA903, 0XA904, 0XA905,
0XA906, 0XA907, 0XA908, 0XA909, 0XA9D0, 0XA9D1, 0XA9D2,
0XA9D3, 0XA9D4, 0XA9D5, 0XA9D6, 0XA9D7, 0XA9D8, 0XA9D9,
0XA9F0, 0XA9F1, 0XA9F2, 0XA9F3, 0XA9F4, 0XA9F5, 0XA9F6,
0XA9F7, 0XA9F8, 0XA9F9, 0XAA50, 0XAA51, 0XAA52, 0XAA53,
0XAA54, 0XAA55, 0XAA56, 0XAA57, 0XAA58, 0XAA59, 0XABF0,
0XABF1, 0XABF2, 0XABF3, 0XABF4, 0XABF5, 0XABF6, 0XABF7,
0XABF8, 0XABF9, 0XF96B, 0XF973, 0XF978, 0XF9B2, 0XF9D1,
0XF9D3, 0XF9FD, 0XFF10, 0XFF11, 0XFF12, 0XFF13, 0XFF14,
0XFF15, 0XFF16, 0XFF17, 0XFF18, 0XFF19, 0X10107, 0X10108,
0X10109, 0X1010A, 0X1010B, 0X1010C, 0X1010D, 0X1010E, 0X1010F,
0X10110, 0X10111, 0X10112, 0X10113, 0X10114, 0X10115, 0X10116,
0X10117, 0X10118, 0X10119, 0X1011A, 0X1011B, 0X1011C, 0X1011D,
0X1011E, 0X1011F, 0X10120, 0X10121, 0X10122, 0X10123, 0X10124,
0X10125, 0X10126, 0X10127, 0X10128, 0X10129, 0X1012A, 0X1012B,
0X1012C, 0X1012D, 0X1012E, 0X1012F, 0X10130, 0X10131, 0X10132,
0X10133, 0X10140, 0X10141, 0X10142, 0X10143, 0X10144, 0X10145,
0X10146, 0X10147, 0X10148, 0X10149, 0X1014A, 0X1014B, 0X1014C,
0X1014D, 0X1014E, 0X1014F, 0X10150, 0X10151, 0X10152, 0X10153,
0X10154, 0X10155, 0X10156, 0X10157, 0X10158, 0X10159, 0X1015A,
0X1015B, 0X1015C, 0X1015D, 0X1015E, 0X1015F, 0X10160, 0X10161,
0X10162, 0X10163, 0X10164, 0X10165, 0X10166, 0X10167, 0X10168,
0X10169, 0X1016A, 0X1016B, 0X1016C, 0X1016D, 0X1016E, 0X1016F,
0X10170, 0X10171, 0X10172, 0X10173, 0X10174, 0X10175, 0X10176,
0X10177, 0X10178, 0X1018A, 0X1018B, 0X102E1, 0X102E2, 0X102E3,
0X102E4, 0X102E5, 0X102E6, 0X102E7, 0X102E8, 0X102E9, 0X102EA,
0X102EB, 0X102EC, 0X102ED, 0X102EE, 0X102EF, 0X102F0, 0X102F1,
0X102F2, 0X102F3, 0X102F4, 0X102F5, 0X102F6, 0X102F7, 0X102F8,
0X102F9, 0X102FA, 0X102FB, 0X10320, 0X10321, 0X10322, 0X10323,
0X10341, 0X1034A, 0X103D1, 0X103D2, 0X103D3, 0X103D4, 0X103D5,
0X104A0, 0X104A1, 0X104A2, 0X104A3, 0X104A4, 0X104A5, 0X104A6,
0X104A7, 0X104A8, 0X104A9, 0X10858, 0X10859, 0X1085A, 0X1085B,
0X1085C, 0X1085D, 0X1085E, 0X1085F, 0X10879, 0X1087A, 0X1087B,
0X1087C, 0X1087D, 0X1087E, 0X1087F, 0X108A7, 0X108A8, 0X108A9,
0X108AA, 0X108AB, 0X108AC, 0X108AD, 0X108AE, 0X108AF, 0X108FB,
0X108FC, 0X108FD, 0X108FE, 0X108FF, 0X10916, 0X10917, 0X10918,
0X10919, 0X1091A, 0X1091B, 0X109BC, 0X109BD, 0X109C0, 0X109C1,
0X109C2, 0X109C3, 0X109C4, 0X109C5, 0X109C6, 0X109C7, 0X109C8,
0X109C9, 0X109CA, 0X109CB, 0X109CC, 0X109CD, 0X109CE, 0X109CF,
0X109D2, 0X109D3, 0X109D4, 0X109D5, 0X109D6, 0X109D7, 0X109D8,
0X109D9, 0X109DA, 0X109DB, 0X109DC, 0X109DD, 0X109DE, 0X109DF,
0X109E0, 0X109E1, 0X109E2, 0X109E3, 0X109E4, 0X109E5, 0X109E6,
0X109E7, 0X109E8, 0X109E9, 0X109EA, 0X109EB, 0X109EC, 0X109ED,
0X109EE, 0X109EF, 0X109F0, 0X109F1, 0X109F2, 0X109F3, 0X109F4,
0X109F5, 0X109F6, 0X109F7, 0X109F8, 0X109F9, 0X109FA, 0X109FB,
0X109FC, 0X109FD, 0X109FE, 0X109FF, 0X10A40, 0X10A41, 0X10A42,
0X10A43, 0X10A44, 0X10A45, 0X10A46, 0X10A47, 0X10A7D, 0X10A7E,
0X10A9D, 0X10A9E, 0X10A9F, 0X10AEB, 0X10AEC, 0X10AED, 0X10AEE,
0X10AEF, 0X10B58, 0X10B59, 0X10B5A, 0X10B5B, 0X10B5C, 0X10B5D,
0X10B5E, 0X10B5F, 0X10B78, 0X10B79, 0X10B7A, 0X10B7B, 0X10B7C,
0X10B7D, 0X10B7E, 0X10B7F, 0X10BA9, 0X10BAA, 0X10BAB, 0X10BAC,
0X10BAD, 0X10BAE, 0X10BAF, 0X10CFA, 0X10CFB, 0X10CFC, 0X10CFD,
0X10CFE, 0X10CFF, 0X10E60, 0X10E61, 0X10E62, 0X10E63, 0X10E64,
0X10E65, 0X10E66, 0X10E67, 0X10E68, 0X10E69, 0X10E6A, 0X10E6B,
0X10E6C, 0X10E6D, 0X10E6E, 0X10E6F, 0X10E70, 0X10E71, 0X10E72,
0X10E73, 0X10E74, 0X10E75, 0X10E76, 0X10E77, 0X10E78, 0X10E79,
0X10E7A, 0X10E7B, 0X10E7C, 0X10E7D, 0X10E7E, 0X11052, 0X11053,
0X11054, 0X11055, 0X11056, 0X11057, 0X11058, 0X11059, 0X1105A,
0X1105B, 0X1105C, 0X1105D, 0X1105E, 0X1105F, 0X11060, 0X11061,
0X11062, 0X11063, 0X11064, 0X11065, 0X11066, 0X11067, 0X11068,
0X11069, 0X1106A, 0X1106B, 0X1106C, 0X1106D, 0X1106E, 0X1106F,
0X110F0, 0X110F1, 0X110F2, 0X110F3, 0X110F4, 0X110F5, 0X110F6,
0X110F7, 0X110F8, 0X110F9, 0X11136, 0X11137, 0X11138, 0X11139,
0X1113A, 0X1113B, 0X1113C, 0X1113D, 0X1113E, 0X1113F, 0X111D0,
0X111D1, 0X111D2, 0X111D3, 0X111D4, 0X111D5, 0X111D6, 0X111D7,
0X111D8, 0X111D9, 0X111E1, 0X111E2, 0X111E3, 0X111E4, 0X111E5,
0X111E6, 0X111E7, 0X111E8, 0X111E9, 0X111EA, 0X111EB, 0X111EC,
0X111ED, 0X111EE, 0X111EF, 0X111F0, 0X111F1, 0X111F2, 0X111F3,
0X111F4, 0X112F0, 0X112F1, 0X112F2, 0X112F3, 0X112F4, 0X112F5,
0X112F6, 0X112F7, 0X112F8, 0X112F9, 0X114D0, 0X114D1, 0X114D2,
0X114D3, 0X114D4, 0X114D5, 0X114D6, 0X114D7, 0X114D8, 0X114D9,
0X11650, 0X11651, 0X11652, 0X11653, 0X11654, 0X11655, 0X11656,
0X11657, | |
+ m.x254 + m.x257 + m.x268 + m.x271
+ m.x338 + m.x341 <= 12)
m.c4036 = Constraint(expr= 12*m.b58 + 12*m.b62 - m.x170 - m.x174 + m.x240 + m.x244 + m.x254 + m.x258 + m.x268 + m.x272
+ m.x338 + m.x342 <= 12)
m.c4037 = Constraint(expr= 12*m.b59 + 12*m.b63 - m.x171 - m.x175 + m.x241 + m.x245 + m.x255 + m.x259 + m.x269 + m.x273
+ m.x339 + m.x343 <= 12)
m.c4038 = Constraint(expr= 12*m.b59 + 12*m.b64 - m.x171 - m.x176 + m.x241 + m.x246 + m.x255 + m.x260 + m.x269 + m.x274
+ m.x339 + m.x344 <= 12)
m.c4039 = Constraint(expr= 12*m.b59 + 12*m.b65 - m.x171 - m.x177 + m.x241 + m.x247 + m.x255 + m.x261 + m.x269 + m.x275
+ m.x339 + m.x345 <= 12)
m.c4040 = Constraint(expr= 12*m.b60 + 12*m.b66 - m.x172 - m.x178 + m.x242 + m.x248 + m.x256 + m.x262 + m.x270 + m.x276
+ m.x340 + m.x346 <= 12)
m.c4041 = Constraint(expr= 12*m.b60 + 12*m.b67 - m.x172 - m.x179 + m.x242 + m.x249 + m.x256 + m.x263 + m.x270 + m.x277
+ m.x340 + m.x347 <= 12)
m.c4042 = Constraint(expr= 12*m.b61 + 12*m.b68 - m.x173 - m.x180 + m.x243 + m.x250 + m.x257 + m.x264 + m.x271 + m.x278
+ m.x341 + m.x348 <= 12)
m.c4043 = Constraint(expr= 12*m.b63 + 12*m.b68 - m.x175 - m.x180 + m.x245 + m.x250 + m.x259 + m.x264 + m.x273 + m.x278
+ m.x343 + m.x348 <= 12)
m.c4044 = Constraint(expr= 12*m.b65 + 12*m.b71 - m.x177 - m.x183 + m.x247 + m.x253 + m.x261 + m.x267 + m.x275 + m.x281
+ m.x345 + m.x351 <= 12)
m.c4045 = Constraint(expr= 12*m.b67 + 12*m.b71 - m.x179 - m.x183 + m.x249 + m.x253 + m.x263 + m.x267 + m.x277 + m.x281
+ m.x347 + m.x351 <= 12)
m.c4046 = Constraint(expr= 12*m.b68 + 12*m.b69 - m.x180 - m.x181 + m.x250 + m.x251 + m.x264 + m.x265 + m.x278 + m.x279
+ m.x348 + m.x349 <= 12)
m.c4047 = Constraint(expr= 12*m.b70 + 12*m.b71 - m.x182 - m.x183 + m.x252 + m.x253 + m.x266 + m.x267 + m.x280 + m.x281
+ m.x350 + m.x351 <= 12)
m.c4048 = Constraint(expr= 12*m.b58 + 12*m.b59 + 12*m.b60 - m.x170 - m.x171 - m.x172 + m.x240 + m.x241 + m.x242
+ m.x254 + m.x255 + m.x256 + m.x268 + m.x269 + m.x270 + m.x338 + m.x339 + m.x340 <= 12)
m.c4049 = Constraint(expr= 12*m.b62 + 12*m.b69 + 12*m.b70 - m.x174 - m.x181 - m.x182 + m.x244 + m.x251 + m.x252
+ m.x258 + m.x265 + m.x266 + m.x272 + m.x279 + m.x280 + m.x342 + m.x349 + m.x350 <= 12)
m.c4050 = Constraint(expr= 12*m.b64 + 12*m.b69 + 12*m.b70 - m.x176 - m.x181 - m.x182 + m.x246 + m.x251 + m.x252
+ m.x260 + m.x265 + m.x266 + m.x274 + m.x279 + m.x280 + m.x344 + m.x349 + m.x350 <= 12)
m.c4051 = Constraint(expr= 12*m.b66 + 12*m.b69 + 12*m.b70 - m.x178 - m.x181 - m.x182 + m.x248 + m.x251 + m.x252
+ m.x262 + m.x265 + m.x266 + m.x276 + m.x279 + m.x280 + m.x346 + m.x349 + m.x350 <= 12)
m.c4052 = Constraint(expr= 12*m.b72 + 12*m.b75 - m.x184 - m.x187 + m.x240 + m.x243 + m.x254 + m.x257 + m.x268 + m.x271
+ m.x282 + m.x285 + m.x338 + m.x341 <= 12)
m.c4053 = Constraint(expr= 12*m.b72 + 12*m.b76 - m.x184 - m.x188 + m.x240 + m.x244 + m.x254 + m.x258 + m.x268 + m.x272
+ m.x282 + m.x286 + m.x338 + m.x342 <= 12)
m.c4054 = Constraint(expr= 12*m.b73 + 12*m.b77 - m.x185 - m.x189 + m.x241 + m.x245 + m.x255 + m.x259 + m.x269 + m.x273
+ m.x283 + m.x287 + m.x339 + m.x343 <= 12)
m.c4055 = Constraint(expr= 12*m.b73 + 12*m.b78 - m.x185 - m.x190 + m.x241 + m.x246 + m.x255 + m.x260 + m.x269 + m.x274
+ m.x283 + m.x288 + m.x339 + m.x344 <= 12)
m.c4056 = Constraint(expr= 12*m.b73 + 12*m.b79 - m.x185 - m.x191 + m.x241 + m.x247 + m.x255 + m.x261 + m.x269 + m.x275
+ m.x283 + m.x289 + m.x339 + m.x345 <= 12)
m.c4057 = Constraint(expr= 12*m.b74 + 12*m.b80 - m.x186 - m.x192 + m.x242 + m.x248 + m.x256 + m.x262 + m.x270 + m.x276
+ m.x284 + m.x290 + m.x340 + m.x346 <= 12)
m.c4058 = Constraint(expr= 12*m.b74 + 12*m.b81 - m.x186 - m.x193 + m.x242 + m.x249 + m.x256 + m.x263 + m.x270 + m.x277
+ m.x284 + m.x291 + m.x340 + m.x347 <= 12)
m.c4059 = Constraint(expr= 12*m.b75 + 12*m.b82 - m.x187 - m.x194 + m.x243 + m.x250 + m.x257 + m.x264 + m.x271 + m.x278
+ m.x285 + m.x292 + m.x341 + m.x348 <= 12)
m.c4060 = Constraint(expr= 12*m.b77 + 12*m.b82 - m.x189 - m.x194 + m.x245 + m.x250 + m.x259 + m.x264 + m.x273 + m.x278
+ m.x287 + m.x292 + m.x343 + m.x348 <= 12)
m.c4061 = Constraint(expr= 12*m.b79 + 12*m.b85 - m.x191 - m.x197 + m.x247 + m.x253 + m.x261 + m.x267 + m.x275 + m.x281
+ m.x289 + m.x295 + m.x345 + m.x351 <= 12)
m.c4062 = Constraint(expr= 12*m.b81 + 12*m.b85 - m.x193 - m.x197 + m.x249 + m.x253 + m.x263 + m.x267 + m.x277 + m.x281
+ m.x291 + m.x295 + m.x347 + m.x351 <= 12)
m.c4063 = Constraint(expr= 12*m.b82 + 12*m.b83 - m.x194 - m.x195 + m.x250 + m.x251 + m.x264 + m.x265 + m.x278 + m.x279
+ m.x292 + m.x293 + m.x348 + m.x349 <= 12)
m.c4064 = Constraint(expr= 12*m.b84 + 12*m.b85 - m.x196 - m.x197 + m.x252 + m.x253 + m.x266 + m.x267 + m.x280 + m.x281
+ m.x294 + m.x295 + m.x350 + m.x351 <= 12)
m.c4065 = Constraint(expr= 12*m.b72 + 12*m.b73 + 12*m.b74 - m.x184 - m.x185 - m.x186 + m.x240 + m.x241 + m.x242
+ m.x254 + m.x255 + m.x256 + m.x268 + m.x269 + m.x270 + m.x282 + m.x283 + m.x284 + m.x338
+ m.x339 + m.x340 <= 12)
m.c4066 = Constraint(expr= 12*m.b76 + 12*m.b83 + 12*m.b84 - m.x188 - m.x195 - m.x196 + m.x244 + m.x251 + m.x252
+ m.x258 + m.x265 + m.x266 + m.x272 + m.x279 + m.x280 + m.x286 + m.x293 + m.x294 + m.x342
+ m.x349 + m.x350 <= 12)
m.c4067 = Constraint(expr= 12*m.b78 + 12*m.b83 + 12*m.b84 - m.x190 - m.x195 - m.x196 + m.x246 + m.x251 + m.x252
+ m.x260 + m.x265 + m.x266 + m.x274 + m.x279 + m.x280 + m.x288 + m.x293 + m.x294 + m.x344
+ m.x349 + m.x350 <= 12)
m.c4068 = Constraint(expr= 12*m.b80 + 12*m.b83 + 12*m.b84 - m.x192 - m.x195 - m.x196 + m.x248 + m.x251 + m.x252
+ m.x262 + m.x265 + m.x266 + m.x276 + m.x279 + m.x280 + m.x290 + m.x293 + m.x294 + m.x346
+ m.x349 + m.x350 <= 12)
m.c4069 = Constraint(expr= 12*m.b86 + 12*m.b89 - m.x198 - m.x201 + m.x240 + m.x243 + m.x254 + m.x257 + m.x268 + m.x271
+ m.x282 + m.x285 + m.x296 + m.x299 + m.x338 + m.x341 <= 12)
m.c4070 = Constraint(expr= 12*m.b86 + 12*m.b90 - m.x198 - m.x202 + m.x240 + m.x244 + m.x254 + m.x258 + m.x268 + m.x272
+ m.x282 + m.x286 + m.x296 + m.x300 + m.x338 + m.x342 <= 12)
m.c4071 = Constraint(expr= 12*m.b87 + 12*m.b91 - m.x199 - m.x203 + m.x241 + m.x245 + m.x255 + m.x259 + m.x269 + m.x273
+ m.x283 + m.x287 + m.x297 + m.x301 + m.x339 + m.x343 <= 12)
m.c4072 = Constraint(expr= 12*m.b87 + 12*m.b92 - m.x199 - m.x204 + m.x241 + m.x246 + m.x255 + m.x260 + m.x269 + m.x274
+ m.x283 + m.x288 + m.x297 + m.x302 + m.x339 + m.x344 <= 12)
m.c4073 = Constraint(expr= 12*m.b87 + 12*m.b93 - m.x199 - m.x205 + m.x241 + m.x247 + m.x255 + m.x261 + m.x269 + m.x275
+ m.x283 + m.x289 + m.x297 + m.x303 + m.x339 + m.x345 <= 12)
m.c4074 = Constraint(expr= 12*m.b88 + 12*m.b94 - m.x200 - m.x206 + m.x242 + m.x248 + m.x256 + m.x262 + m.x270 + m.x276
+ m.x284 + m.x290 + m.x298 + | |
"""
fftmap library:
This library can be used to create 2D maps. Map here means a large
"virtual" array of floating point numbers, which is virtual in the sense
that the array is filled procedurally, block by block, as needed. The map
can be defined to contain different spatial frequencies in different
proportions.
The block by block generation of the map is achieved by allocating arrays as
needed, and then filling those arrays with random data. The random data is
first generated as randomized frequency spectrum, which is then filtered
according to user defined spectral weighting. This weighted spectrum is then
transformed into "position space" data via 2D FFT. This position space
data is in turn filtered with a window function to generate a noise block
with smoothly decaying edges. Multiple noise blocks obtained in this way
are then interlaced together to obtain smooth noise spectrum everywhere in
the map.
For any set of input parameters, including seed, the generated map is always
the same, regardless of the order in which the map is created or explored.
It is possible to declare very large maps without using much memory, since
the arrays are only allocated when needed. Especially if the FFTMap object
is made to use sparse arrays for some data structures, the maps can be
made practically unlimited in size.
See the documentation on the FFTMap class for details of how to use.
--------------------------------------------------------------------------------
LICENCE - MIT Licence
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
--------------------------------------------------------------------------------
"""
import numpy as np
import scipy.sparse
def spline(x):
if x < -0.5:
return 4 * (0.5 * x ** 2 + x + 0.5)
if x < 0.5:
return 4 * (-0.5* x ** 2 + 0.25)
else:
return 4 * (0.5 * x ** 2 - x + 0.5)
def create_filter_window(block_size):
window = np.zeros([block_size])
for i in range(block_size):
window[i] = spline((2 * i - (block_size - 1)) / block_size)
return window
class FFTMap_:
"""
This is the "core" class, which is not intended to be used directly.
Use FFTMap instead.
"""
def __init__(self, n_blocks, block_size, spectral_filter, seed=None, array_type="ndarray"):
self.n_blocks_x = n_blocks[0]
self.n_blocks_y = n_blocks[1]
self.block_size = block_size
self.fft_block_size = 2 * block_size
# Completed blocks are added here
self.blocks_list = []
if array_type == "ndarray":
# Indices to self.blocks_list
self.blocks_indices = np.full([self.n_blocks_x, self.n_blocks_y], -1)
# Status of blocks
self.blocks_initialized = np.full([self.n_blocks_x, self.n_blocks_y], False)
self.blocks_finished = np.full([self.n_blocks_x, self.n_blocks_y], False)
self.fft_blocks_applied = np.full([self.n_blocks_x, self.n_blocks_y], False)
elif array_type == "dok_matrix":
# Indices to self.blocks_list
self.blocks_indices = scipy.sparse.dok_matrix((self.n_blocks_x, self.n_blocks_y), dtype=int)
# Status of blocks
self.blocks_initialized = scipy.sparse.dok_matrix((self.n_blocks_x, self.n_blocks_y), dtype=bool)
self.blocks_finished = scipy.sparse.dok_matrix((self.n_blocks_x, self.n_blocks_y), dtype=bool)
self.fft_blocks_applied = scipy.sparse.dok_matrix((self.n_blocks_x, self.n_blocks_y), dtype=bool)
else:
raise ValueError("Array type not recognized.")
# Create masks
self.calculate_spectral_mask(spectral_filter)
self.calculate_spatial_mask()
# Normalize spectral mask
self.normalize_spectral_mask()
# Seed
if type(seed) == type(None):
self.seed = np.random.PCG64().random_raw()
elif type(seed) == int:
self.seed = seed
else:
raise ValueError("Invalid seed. Must be an integer.")
# Get a single value
def get_value(self, x, y):
# Calculate block index and extra offset
block_index_x, extra_x = divmod(x, self.block_size)
block_index_y, extra_y = divmod(y, self.block_size)
# Check input
if (block_index_x < 1) or (block_index_x > self.n_blocks_x - 2):
raise ValueError("block_index_x=%d is out of bounds" % block_index_x)
if (block_index_y < 1) or (block_index_y > self.n_blocks_y - 2):
raise ValueError("block_index_y=%d is out of bounds" % block_index_x)
# Finish creating the target block
if not self.blocks_finished[block_index_x, block_index_y]:
self.finish_block(block_index_x, block_index_y)
# Retrieve the value
block = self.blocks_list[self.blocks_indices[block_index_x, block_index_y]]
return block[extra_x, extra_y]
# Get a range of values. This has a lower overhead compared to doing the
# same thing with get_value.
def get_values(self, x1, x2, y1, y2):
# Calculate the block indices, and extra offsets
block_index_x1, extra_x1 = divmod(x1, self.block_size)
block_index_x2, extra_x2 = divmod(x2, self.block_size)
block_index_y1, extra_y1 = divmod(y1, self.block_size)
block_index_y2, extra_y2 = divmod(y2, self.block_size)
# Check input
if (block_index_x1 < 1):
raise ValueError("block_index_x1=%d is out of bounds" % block_index_x1)
if (block_index_x2 > self.n_blocks_x - 2):
raise ValueError("block_index_x2=%d is out of bounds" % block_index_x2)
if (block_index_y1 < 1):
raise ValueError("block_index_y1=%d is out of bounds" % block_index_y1)
if (block_index_y2 > self.n_blocks_y - 2):
raise ValueError("block_index_y2=%d is out of bounds" % block_index_y2)
if (x1 >= x2) or (y1 >= y2):
raise ValueError("Invalid range specified for get_values.")
# Finish creating the required blocks
for block_index_x in range(block_index_x1, block_index_x2 + 1):
for block_index_y in range(block_index_y1, block_index_y2 + 1):
if not self.blocks_finished[block_index_x, block_index_y]:
self.finish_block(block_index_x, block_index_y)
# Retrieve the values
copied_data = np.empty([x2 - x1, y2 - y1], dtype=float)
x_data_size_total = x2 - x1
y_data_size_total = y2 - y1
copied_data_x_index = 0
block_index_x = block_index_x1
while copied_data_x_index < x_data_size_total:
# Prepare
if block_index_x == block_index_x1:
blx1 = extra_x1
else:
blx1 = 0
if block_index_x == block_index_x2:
blx2 = extra_x2
else:
blx2 = self.block_size
x_data_size_step = blx2 - blx1
retx1 = copied_data_x_index
retx2 = copied_data_x_index + x_data_size_step
# Copy
copied_data_y_index = 0
block_index_y = block_index_y1
while copied_data_y_index < y_data_size_total:
# Prepare
if block_index_y == block_index_y1:
bly1 = extra_y1
else:
bly1 = 0
if block_index_y == block_index_y2:
bly2 = extra_y2
else:
bly2 = self.block_size
y_data_size_step = bly2 - bly1
rety1 = copied_data_y_index
rety2 = copied_data_y_index + y_data_size_step
# Copy
block = self.blocks_list[self.blocks_indices[block_index_x, block_index_y]]
copied_data[retx1:retx2, rety1:rety2] = block[blx1: blx2, bly1: bly2]
# Increment
copied_data_y_index += y_data_size_step
block_index_y += 1
# Increment
copied_data_x_index += x_data_size_step
block_index_x += 1
return copied_data
def finish_block(self, block_index_x, block_index_y):
# Apply fft blocks
for i in (-1, 0):
for j in (-1, 0):
fft_index_x = block_index_x + i
fft_index_y = block_index_y + j
if not self.fft_blocks_applied[fft_index_x, fft_index_y]:
self.apply_fft_block(fft_index_x, fft_index_y)
# Mark this block as completed
self.blocks_finished[block_index_x, block_index_y] = True
def apply_fft_block(self, fft_index_x, fft_index_y):
# Initialize the normal blocks in this fft block's area of effect
for i in (0, 1):
for j in (0, 1):
block_index_x = fft_index_x + i
block_index_y = fft_index_y + j
if not self.blocks_initialized[block_index_x, block_index_y]:
block = np.zeros([self.block_size, self.block_size])
block_list_index = len(self.blocks_list)
self.blocks_list.append(block)
self.blocks_initialized[block_index_x, block_index_y] = True
self.blocks_indices[block_index_x, block_index_y] = block_list_index
# Create this fft block
# Get the random noise block for this fft block
temp_array = self.get_rng_mask(fft_index_x, fft_index_y)
# Apply the spectral filter mask
temp_array *= self.spectral_mask
# Transform the filtered noise to waves
temp_array = np.fft.fft2(temp_array)
# Take the real part of noise
temp_array = temp_array.real
# Apply the spatial filter mask
temp_array *= self.spatial_mask
# Apply this fft block to any blocks necessary
for i in (0, 1):
for j in (0, 1):
self.blocks_list[
self.blocks_indices[fft_index_x + i, fft_index_y + j]
] += (
temp_array[
i * self.block_size : (1 + i) * self.block_size,
j * self.block_size : (1 + j) * self.block_size
]
)
# Mark this fft block as applied
self.fft_blocks_applied[fft_index_x, fft_index_y] = True
# Creates spectral filtering mask
def calculate_spectral_mask(self, spectral_filter):
spectral_mask = np.zeros([self.fft_block_size, self.fft_block_size])
for i in range(self.fft_block_size):
for j in range(self.fft_block_size):
i_ = self.fft_block_size - i
j_ = self.fft_block_size - j
spectral_mask[i, j] = (
spectral_filter(np.sqrt(i ** 2 + j ** 2) / self.fft_block_size) +
spectral_filter(np.sqrt(i_ ** 2 + j ** 2) / self.fft_block_size) +
spectral_filter(np.sqrt(i ** 2 + | |
grasp)[what]
# \}
def event (self, gripper, handle, what, default):
if handle is None:
ee = self._buildGripper ("open", gripper, handle)
else:
ee = self._buildGripper ("close", gripper, handle)
if hasattr(ee, "events"):
return ee.events.get(what, default)
return default
## Create a set of controllers for a set of tasks.
#
# A controller is created for each transition of the graph of constraints.
#
# See the following example for usage.
# \code{.py}
# from agimus_sot import Supervisor
# from agimus_sot.factory import Factory, Affordance
# from agimus_sot.task import Task
# from agimus_sot.srdf_parser import parse_srdf
# from hpp.corbaserver.manipulation import Rule
#
# # Constraint graph definition. Should be the same as the one used for planning
# # in HPP.
# grippers = [ "talos/left_gripper", ]
# objects = [ "box" ]
# handlesPerObjects = [ [ "box/handle1", "box/handle2" ], ]
# contactPerObjects = [ [ "box/bottom_surface", ] ]
# rules = [
# Rule([ "talos/left_gripper", ], [ "box/handle2", ], False),
# # Rule([ "talos/left_gripper", ], [ Object.handles[0], ], True),
# Rule([ "talos/left_gripper", ], [ ".*", ], True),
# # Rule([ "talos/right_gripper", ], [ Object.handles[1], ], True),
# ]
#
# # Parse SRDF files to extract gripper and handle information.
# srdf = {}
# srdfTalos = parse_srdf ("srdf/talos.srdf", packageName = "talos_data", prefix="talos")
# srdfBox = parse_srdf ("srdf/cobblestone.srdf", packageName = "gerard_bauzil", prefix="box")
# srdfTable = parse_srdf ("srdf/pedestal_table.srdf", packageName = "gerard_bauzil", prefix="table")
# for w in [ "grippers", "handles" ]:
# srdf[w] = dict()
# for d in [ srdfTalos, srdfBox, srdfTable ]:
# srdf[w].update (d[w])
#
#
# supervisor = Supervisor (robot, hpTasks = hpTasks(robot))
# factory = Factory(supervisor)
#
# # Define parameters
# factory.parameters["period"] = robot.getTimeStep() # This must be made available for your robot
# factory.parameters["simulateTorqueFeedback"] = simulateTorqueFeedbackForEndEffector
# factory.parameters["addTracerToAdmittanceController"] = True
#
# factory.setGrippers (grippers)
# factory.setObjects (objects, handlesPerObjects, contactPerObjects)
# factory.environmentContacts (["table/support",])
# factory.setRules (rules)
# factory.setupFrames (srdf["grippers"], srdf["handles"], robot, disabledGrippers=["table/pose",])
# # At the moment, one must manually enable visual feedback
# factory.gripperFrames["talos/left_gripper"].hasVisualTag = True
# factory.handleFrames["box/handle1"].hasVisualTag = True
# factory.addAffordance (
# Affordance ("talos/left_gripper", "box/handle1",
# openControlType="torque", closeControlType="torque",
# refs = { "angle_open": (0,), "angle_close": (-0.5,), "torque": (-0.05,) },
# controlParams = { "torque_num": ( 5000., 1000.),
# "torque_denom": (0.01,) },
# simuParams = { "refPos": (-0.2,) }))
# factory.addAffordance (
# Affordance ("talos/left_gripper", None,
# openControlType="position", closeControlType="position",
# refs = { "angle_open": (0,), "angle_close": (-0.5,), "torque": (-0.05,) },
# simuParams = { "refPos": (-0.2,) }))
# factory.generate ()
#
# supervisor.makeInitialSot ()
# \endcode
#
# \sa manipulation.constraint_graph_factory.GraphFactoryAbstract, TaskFactory,
# Affordance
class Factory(GraphFactoryAbstract):
class State:
def __init__ (self, tasks, grasps, factory):
self.name = factory._stateName (grasps)
self.grasps = grasps
self.manifold = Task()
self.objectsAlreadyGrasped = {}
for ig, ih in enumerate(grasps):
gName = factory.grippers[ig]
if ih is not None:
gFrame = factory.gripperFrames[gName]
hName = factory.handles[ih]
hFrame = factory.handleFrames[hName]
io = factory.objectFromHandle[ih]
oName = factory.objects[io]
# Add task gripper_close
self.manifold += tasks.g (gName, hName, 'gripper_close')
# Check if this graph interferes with another grasp
if not gFrame.controllable and oName not in self.objectsAlreadyGrasped:
otherGrasp = self.objectsAlreadyGrasped.get(gFrame.robotName)
else:
otherGrasp = self.objectsAlreadyGrasped.get(oName)
self.manifold += tasks.g (gName, hName, 'grasp', otherGrasp)
self.objectsAlreadyGrasped[oName] = (gFrame, hFrame)
else:
# Add task gripper_open
self.manifold += tasks.g (gName, None, 'gripper_open')
def __init__ (self, supervisor):
super(Factory, self).__init__ ()
self.tasks = TaskFactory (self)
self.hpTasks = supervisor.hpTasks
self.lpTasks = supervisor.lpTasks
self.affordances = dict()
self.objectAffordances = dict()
self.sots = dict()
## A dictionnary
# - key: name of the transition after which an action must be done
# - value: dictionnary:
# - key: the reached state (at most two values, depending on whether the dst state was reached)
# - value: sot representing the post-action
self.postActions = dict()
## A dictionnary
# - key: name of the transition before which an action must be done
# - value: sot representing the pre-action
self.preActions = dict()
self.tracers = {}
self.controllers = {}
self.supervisor = supervisor
## Accepted parameters:
## - period: [double, no default]
## Time interval between two iterations of the graph.
## - simulateTorqueFeedback: [boolean, False]
## do not use torque feedback from the robot
## but simulate it instead.
self.parameters = {
"addTracerToAdmittanceController": False,
"addTimerToSotControl": False,
"addTracerToSotControl": False,
"addTracerToVisualServoing": False,
"simulateTorqueFeedback": False,
}
def _newSoT (self, name):
# Create a solver
sot = Solver (name,
self.sotrobot.dynamic.getDimension(),
damping = 0.001,
timer = self.parameters["addTimerToSotControl"],
)
# Make default event signals
# sot. doneSignal = self.supervisor.done_events.controlNormSignal
from .events import logical_and_entity
sot. doneSignal = logical_and_entity ("ade_"+sot.name,
[ self.supervisor.done_events.controlNormSignal,
self.supervisor.done_events.timeEllapsedSignal])
sot.errorSignal = False
if self.parameters["addTimerToSotControl"]:
id = len(self.SoTtracer.signals()) - 1
self.SoTtracer.add (sot.timer.name + ".timer", "solver_"+str(id) + ".timer")
if self.parameters["addTracerToSotControl"]:
id = len(self.SoTtracer.signals()) - 1
self.SoTtracer.add (sot.controlname, "solver_"+str(id) + ".control")
return sot
## Add an Affordance or ObjectAffordance
def addAffordance (self, aff):
if isinstance(aff, Affordance):
self.affordances [(aff.gripper, aff.handle)] = aff
elif isinstance(aff, ObjectAffordance):
self.objectAffordances [aff.object] = aff
else:
raise TypeError ("Argument should be of type Affordance or ObjectAffordance")
def generate (self):
from dynamic_graph.tracer_real_time import TracerRealTime
def addTracer(name, prefix, dir="/tmp", suffix=".txt", size=10 * 1048576):
# default size: 10Mo
tracer = TracerRealTime (name)
self.tracers[tracer.name] = tracer
tracer.setBufferSize (size)
tracer.open (dir, prefix, suffix)
self.sotrobot.device.after.addSignal(name + ".triger")
return tracer
# init tracers
if self.parameters["addTimerToSotControl"] or self.parameters["addTracerToSotControl"]:
self.SoTtracer = self.supervisor.SoTtracer = addTrace(
"tracer_of_solvers", "sot-control-trace")
if self.parameters["addTracerToVisualServoing"]:
self.ViStracer = self.supervisor.ViStracer = addTracer (
"visual_servoing_tracer", "visual-servoing-trace")
super(Factory, self).generate ()
self.supervisor.sots = {}
self.supervisor.grasps = { (gh, w): t for gh, ts in self.tasks._grasp.items() for w, t in ts.items() }
self.supervisor.placements = { (ogh, w): t for ogh, ts in self.tasks._placements.items() for w, t in ts.items() }
self.supervisor.hpTasks = self.hpTasks
self.supervisor.lpTasks = self.lpTasks
self.supervisor.postActions = {}
self.supervisor.preActions = {}
self.supervisor.tracers = self.tracers
self.supervisor.controllers = self.controllers
from dynamic_graph import plug
self.supervisor.sots_indexes = dict()
for tn,sot in self.sots.iteritems():
# Pre action
if self.preActions.has_key(tn):
self.supervisor.addPreAction (tn, self.preActions[tn])
# Action
self.supervisor.addSolver (tn, sot)
# Post action
if self.postActions.has_key(tn):
self.supervisor.addPostActions (tn, self.postActions[tn])
def setupFrames (self, srdfGrippers, srdfHandles, sotrobot, disabledGrippers = ()):
self.sotrobot = sotrobot
self.grippersIdx = { g: i for i,g in enumerate(self.grippers) }
self.handlesIdx = { h: i for i,h in enumerate(self.handles) }
self.gripperFrames = { g: OpFrame(srdfGrippers[g], sotrobot.name, sotrobot.dynamic.model, g not in disabledGrippers) for g in self.grippers }
self.handleFrames = { h: OpFrame(srdfHandles [h], sotrobot.name ) for h in self.handles }
def setupContactFrames (self, srdfContacts):
def addPose(c):
if 'position' not in c:
c.update ({'position': (0,0,0, 0,0,0,1)})
return c
self.contactFrames = { name: OpFrame(addPose(contact), self.sotrobot.name, enabled = True) for name, contact in srdfContacts.items() }
def makeState (self, grasps, priority):
# Nothing to do here
return Factory.State(self.tasks, grasps, self)
def makeLoopTransition (self, state):
n = self._loopTransitionName(state.grasps)
sot = self._newSoT ('sot_'+n)
from .events import logical_and_entity
sot. doneSignal = logical_and_entity("ade_sot_"+n,
[ self.supervisor.done_events.timeEllapsedSignal,
self.supervisor.done_events.controlNormSignal])
self.hpTasks.pushTo(sot)
state.manifold.pushTo(sot)
self.lpTasks.pushTo(sot)
self.sots[n] = sot
def makeTransition (self, stateFrom, stateTo, ig):
sf = stateFrom
st = stateTo
names = self._transitionNames(sf, st, ig)
iobj = self.objectFromHandle [st.grasps[ig]]
obj = self.objects[iobj]
noPlace = self._isObjectGrasped (sf.grasps, iobj)
#TODO compute other grasp on iobj
# it must be a grasp or pregrasp task
grasp = ( self.gripperFrames[self.grippers[ig]], self.handleFrames[self.handles[st.grasps[ig]]] )
if not grasp[0].controllable and obj not in sf.objectsAlreadyGrasped:
otherGrasp = sf.objectsAlreadyGrasped.get(grasp[0].robotName)
else:
otherGrasp = sf.objectsAlreadyGrasped.get(obj)
# The different cases:
pregrasp = True
intersec = not noPlace
preplace = not noPlace
# Start here
nWaypoints = pregrasp + intersec + preplace
nTransitions = 1 + nWaypoints
# Link waypoints
transitions = names[:]
assert nWaypoints > 0
M = 1 + pregrasp
sots = [ ]
for i in range(nTransitions):
ns = ("{0}_{1}{2}".format(names[0], i, i+1),
"{0}_{2}{1}".format(names[1], i, i+1))
for n in ns:
s = self._newSoT('sot_'+n)
from .events import logical_and_entity
s. doneSignal = logical_and_entity("ade_sot_"+n,
[ self.supervisor.done_events.timeEllapsedSignal,
self.supervisor.done_events.controlNormSignal ])
self.hpTasks.pushTo(s)
if pregrasp and i == 1:
# Add pregrasp task
pregraspT = self.tasks.g (self.grippers[ig], self.handles[st.grasps[ig]], 'pregrasp', otherGrasp = otherGrasp)
pregraspT.pushTo (s)
if preplace and i == nTransitions - 2:
# Add preplace task
preplaceT = self.tasks.p (obj, grasp, "preplace")
preplaceT.pushTo (s)
if i < M: sf.manifold.pushTo(s)
else: st.manifold.pushTo(s)
self.lpTasks.pushTo(s)
self.sots[n] = s
sots.append (n)
## Post-actions for transitions from
# 1. pregrasp to intersec, intersec (st) reached:
# x "pregrasp" is not kept because it would make the system slightly diverge when
# the object is not perfectly grasped (which is obviously always the case.
# - keep gripper pose
# - "gripper_close"
key = sots[2*(M-1)]
sot = self._newSoT ("postAction_" + key)
self.hpTasks.pushTo (sot)
# "gripper_close" is in st.manifold
# self.tasks.g (self.grippers[ig], self.handles[st.grasps[ig]], 'gripper_close').pushTo (sot)
self.tasks.g | |
#!/usr/bin/env python
#
# @author: <NAME>
# <NAME>
"""
nimsdata.medimg.nimspfile
=========================
This module provides functions, classes and errors for fully minimally parsing
and reconstructing pfiles. Additional modules are required to enable
full parsing of pfiles, spiral reconstruction, and mux_epi reconstruction.
"""
import os
import bson
import glob
import gzip
import json
import time
import shlex
import struct
import logging
import tarfile
import datetime
import subprocess
import bson.json_util
import numpy as np
import medimg
import dcm.mr.ge
import dcm.mr.generic_mr
from .. import tempdir as tempfile
log = logging.getLogger(__name__)
def unpack_uid(uid):
"""
Convert packed PFile UID to standard DICOM UID.
Parameters
----------
uid : str
packed PFile UID as a string
Returns
-------
uid : str
unpacked PFile UID as string
"""
return ''.join([str(i-1) if i < 11 else '.' for pair in [(ord(c) >> 4, ord(c) & 15) for c in uid] for i in pair if i > 0])
def is_gzip(filepath):
"""
Convert packed PFile UID to standard DICOM UID.
Parameters
----------
uid : str
packed PFile UID as a string
Returns
-------
uid : str
unpacked PFile UID as string
"""
with open(filepath, 'rb') as fp:
compressed = (fp.read(2) == '\x1f\x8b')
return compressed
def get_version(filepath):
"""
Determine the pfile version of the file at filepath.
An NIMSPFileError exception will be raised if the file is not a valid PFile.
Parameters
----------
filepath : str
filepath of file to check
Returns
-------
version : str
PFile version number of file at filepath
Raises
------
NIMSPFileError : Exception
error if the file is not a valid PFile
"""
fileobj = gzip.open(filepath, 'rb') if is_gzip(filepath) else open(filepath, 'rb')
version_bytes = fileobj.read(4)
fileobj.seek(34); logo = (struct.unpack("10s", fileobj.read(struct.calcsize("10s")))[0]).split('\0', 1)[0]
if version_bytes == '\x00\x00\xc0A':
version = 24
elif version_bytes == 'V\x0e\xa0A':
version = 23
elif version_bytes == 'J\x0c\xa0A':
version = 22
elif version_bytes == '\x00\x000A':
version = 12
else:
raise NIMSPFileError(fileobj.name + ' is not a valid PFile or of an unsupported version')
if logo != 'GE_MED_NMR' and logo != 'INVALIDNMR':
raise NIMSPFileError(fileobj.name + ' is not a valid PFile')
fileobj.close()
return version
class NIMSPFileError(medimg.MedImgError):
pass
class NIMSPFile(medimg.MedImgReader):
"""
Parse and load data from a pfile.
This class reads the data and/or header from a pfile, runs k-space reconstruction.
NIMSPFile object can handle several different input types
- .tgz of directory containing Pfile, and supporting files such as ref.dat, vrgf.dat and tensor.dat.
- a single pfile, either gz or uncompressed.
tgz cannot be "full parsed". setting full_parse=True, with an input tgz, will raise an exception.
nims2 input tgz format
Pfile.7, Pfile.7ref.dat, Pfile.7vrgf.dat Pfile.7ref
.. code:: python
import nimsdata
ds = nimsdata.parse('pfile.tgz', filetype='pfile', load_data=True)
if not ds.failure_reason:
nimsdata.write(ds, ds.data, outbase='output_name', filetype='nifti')
Some pfiles require calibration files from another scan. This 'aux_file' can be provided
during `__init__()`, or `load_data()`.
.. code:: python
import nimsdata
ds = nimsdata.parse('muxarcepi_nocal.tgz', filetype='pfile', load_data=True, aux_file='muxarcepi_cal.tgz')
if not ds.failure_reason:
nimsdata.write(ds, ds.data, outbase='output_name', filetype='nifti')
.. code:: python
import nimsdata
ds = nimsdata.parse('muxarcepi_nocal.tgz', filetype='pfile', load_data=False)
ds.load_data(aux_file='muxarcepi_cal.tgz')
if no ds.failure_reason:
nimsdata.write(ds, ds.data, outbase='output_name', filetype='nifti')
"""
domain = u'mr'
filetype = u'pfile'
parse_priority = 5
state = ['orig']
def __init__(self, filepath, load_data=False, full_parse=False, tempdir=None, aux_file=None, num_jobs=4, num_virtual_coils=16, notch_thresh=0, recon_type=None):
"""
Read basic sorting information.
There are a lot of parameters; most of the parameters only apply to mux_epi scans. The muxepi only
parameters are num_jobs, num_virtual_coils, notch_thresh, recon_type and aux_file.
Parameters
----------
filepath : str
path to pfile.7 or pfile.tgz
load_data : bool [default False]
load all data and run reconstruction
full_parse : bool [default False]
full parse the input file, only applies to pfile.7 inputs
tempdir : str
path prefix to use for temp directory
num_jobs : int
muxepi only, number of simultaneous jobs
num_virtual_coils : int
muxepi only, number of virtual coils
notch_thresh : int
muxepi only, number of virtual coils
recon_type : NoneType or str
muxepi only, if recon_type is 'sense', then run sense recon
aux_file : None or str
path to pfile.tgz that contains valid vrgf.dat and ref.dat files
"""
super(NIMSPFile, self).__init__(filepath) # sets self.filepath
self.full_parsed = False # indicates if fully parsed
self.dirpath = os.path.dirname(self.filepath) # what contains the input file
self.basename = os.path.basename(self.filepath)
# TODO setting the file name and extension should be different for .7 and .7.tgz
# if pfile_arc.tgz, file_name = pfile_arc, file_ext = .tgz
# if P?????.7, file_name = P?????, file_ext = .7
self.file_name, self.file_ext = os.path.splitext(self.filepath)
self.num_jobs = num_jobs
self.num_vcoils = num_virtual_coils
self.notch_thresh = notch_thresh
self.recon_type = recon_type
self.aux_file = aux_file
self.tempdir = tempdir
self.data = None
log.debug('parsing %s' % filepath)
if tarfile.is_tarfile(self.filepath): # tgz; find json with a ['header'] section
log.debug('tgz')
with tarfile.open(self.filepath) as archive:
for ti in archive:
if not ti.isreg():
continue
try:
_hdr = json.load(archive.extractfile(ti), object_hook=bson.json_util.object_hook)['header']
except ValueError as e: # json file does not exist
log.debug('%s; not a json file' % e)
except KeyError as e: # header section does not exist
log.debug('%s; header section does not exist' % e)
else:
log.debug('_min_parse_tgz')
self.exam_uid = _hdr.get('session')
self.acquisition_id = _hdr.get('acquisition')
self.timestamp = _hdr.get('timestamp')
self.group_name = _hdr.get('group')
self.project_name = _hdr.get('project')
self.metadata_status = 'pending'
break
else:
raise NIMSPFileError('no json file with header section found. bailing', log_level=logging.WARNING)
else: # .7 or .7.gz, doing it old world style
try:
self.version = get_version(self.filepath)
self._full_parse(self.filepath) if full_parse else self._min_parse(self.filepath) # full_parse arg indicates run full_parse
except Exception as e:
raise NIMSPFileError('not a PFile? %s' % str(e))
if load_data:
self.load_data()
def infer_psd_type(self):
"""
Infer the psd type based on self.psd_type.
Also makes any corrections to the psd_type to account for mis-named psds.
Returns
-------
None : NoneType
sets self.psd_type
"""
dcm.mr.ge.infer_psd_type(self)
if self.psd_type == 'epi' and int(self._hdr.rec.user6) > 0: # XXX HACK check for misnamed mux scans
self.psd_type = 'muxepi'
log.debug('psd_name: %s, psd_type: %s' % (self.psd_name, self.psd_type))
def infer_scan_type(self):
"""
Infer the scan type based on the dataset attributes.
Returns
-------
None : NoneType
sets self.scan_type
"""
dcm.mr.generic_mr.infer_scan_type(self)
log.debug('scan_type: %s' % self.scan_type)
def _min_parse(self, filepath=None):
"""
Parse the minimum sorting information from a pfile.7.
Does not work if input file is a tgz. If NIMSPfile was init'd with a tgz input, the tgz can be
unpacked into a temporary directory, and then this function can parse the unpacked pfile.
Parameters
----------
filepath : str
path to a pfile.7. Does not accept pfile.tgz.
"""
filepath = filepath or self.filepath # use filepath if provided, else fall back to self.filepath
if tarfile.is_tarfile(filepath):
raise NIMSPFileError('_min_parse() expects a .7 or .7.gz')
log.debug('_min_parse of %s' % filepath)
fileobj = gzip.open(filepath, 'rb') if is_gzip(self.filepath) else open(filepath, 'rb')
fileobj.seek(16); self.scan_date = str(struct.unpack("10s", fileobj.read(struct.calcsize("10s")))[0])
fileobj.seek(26); self.scan_time = str(struct.unpack("8s", fileobj.read(struct.calcsize("8s")))[0])
fileobj.seek(64); self.num_timepoints = struct.unpack("h", fileobj.read(struct.calcsize("h")))[0]
fileobj.seek(70); self.num_echos = struct.unpack("h", fileobj.read(struct.calcsize("h")))[0]
fileobj.seek(216); self.rec_user0 = struct.unpack("f", fileobj.read(struct.calcsize("f")))[0]
fileobj.seek(240); self.rec_user6 = struct.unpack("f", fileobj.read(struct.calcsize("f")))[0]
fileobj.seek(244); self.rec_user7 = struct.unpack("f", fileobj.read(struct.calcsize("f")))[0]
fileobj.seek(914); self.ileaves = struct.unpack("h", fileobj.read(struct.calcsize("h")))[0]
if self.version in [24, 23, 22]:
fileobj.seek(143516); self.exam_no = str(struct.unpack("H", fileobj.read(struct.calcsize("H")))[0])
fileobj.seek(145622); self.series_no = struct.unpack("h", fileobj.read(struct.calcsize("h")))[0]
fileobj.seek(145762); self.series_desc = (struct.unpack("65s", fileobj.read(struct.calcsize("65s")))[0]).split('\0', 1)[0]
fileobj.seek(145875); self.series_uid = unpack_uid(struct.unpack("32s", fileobj.read(struct.calcsize("32s")))[0])
fileobj.seek(148388); self.im_datetime = struct.unpack("i", fileobj.read(struct.calcsize("i")))[0]
fileobj.seek(148396); self.tr = struct.unpack("i", fileobj.read(struct.calcsize("i")))[0] / 1e6
fileobj.seek(148834); self.acq_no = struct.unpack("h", fileobj.read(struct.calcsize("h")))[0]
fileobj.seek(148972); self.psd_name = os.path.basename(struct.unpack("33s", fileobj.read(struct.calcsize("33s")))[0]).split('\0', 1)[0].lower()
if self.version in [24, 23]:
fileobj.seek(144248); self.exam_uid = unpack_uid(struct.unpack("32s", fileobj.read(struct.calcsize("32s")))[0])
fileobj.seek(144409); self.patient_id = (struct.unpack("65s", fileobj.read(struct.calcsize("65s")))[0]).split('\0', 1)[0]
if self.version == 22:
fileobj.seek(144240); self.exam_uid = unpack_uid(struct.unpack("32s", fileobj.read(struct.calcsize("32s")))[0])
fileobj.seek(144401); self.patient_id = (struct.unpack("65s", fileobj.read(struct.calcsize("65s")))[0]).split('\0', 1)[0]
if self.version == 12:
fileobj.seek(61576); self.exam_no = str(struct.unpack("H", fileobj.read(struct.calcsize("H")))[0])
fileobj.seek(61966); self.exam_uid = unpack_uid(struct.unpack("32s", fileobj.read(struct.calcsize("32s")))[0])
fileobj.seek(62127); self.patient_id = (struct.unpack("65s", fileobj.read(struct.calcsize("65s")))[0]).split('\0', 1)[0]
fileobj.seek(62710); self.series_no = struct.unpack("h", fileobj.read(struct.calcsize("h")))[0]
fileobj.seek(62786); self.series_desc = (struct.unpack("65s", fileobj.read(struct.calcsize("65s")))[0]).split('\0', 1)[0]
fileobj.seek(62899); self.series_uid = unpack_uid(struct.unpack("32s", fileobj.read(struct.calcsize("32s")))[0])
fileobj.seek(65016); self.im_datetime = struct.unpack("i", fileobj.read(struct.calcsize("i")))[0]
fileobj.seek(65024); self.tr = struct.unpack("i", fileobj.read(struct.calcsize("i")))[0] / 1e6
fileobj.seek(65328); self.acq_no = struct.unpack("h", fileobj.read(struct.calcsize("h")))[0]
fileobj.seek(65374); self.psd_name = os.path.basename(struct.unpack("33s", flleobj.read(struct.calcsize("33s")))[0]).split('\0', 1)[0].lower()
if self.im_datetime > 0:
self.timestamp = datetime.datetime.utcfromtimestamp(self.im_datetime)
else:
month, day, year = map(int, self.scan_date.split('\0', 1)[0].split('/'))
hour, minute = map(int, self.scan_time.split('\0', 1)[0].split(':'))
self.timestamp = datetime.datetime(year + 1900, month, day, hour, minute) # GE's epoch begins in 1900
self.infer_psd_type()
if self.psd_type == 'spiral':
self.num_timepoints = int(self.rec_user0)
elif self.psd_type == 'basic':
self.num_timepoints = (self.num_timepoints * self.num_echos - 6) / 2
elif self.psd_type == 'muxepi':
self.num_timepoints = self.num_timepoints + int(self.rec_user6) * self.ileaves * (int(self.rec_user7) - 1)
self.prescribed_duration = self.num_timepoints * self.tr
self.subj_code, self.group_name, self.project_name = medimg.parse_patient_id(self.patient_id, 'ex' + self.exam_no)
self.metadata_status | |
works properly with float32 data."""
p = np.asarray([940.85083008, 923.78851318, 911.42022705, 896.07220459,
876.89404297, 781.63330078], np.float32) * units('hPa')
hgt = np.asarray([563.671875, 700.93817139, 806.88098145, 938.51745605,
1105.25854492, 2075.04443359], dtype=np.float32) * units.meter
true_p_layer = np.asarray([940.85083008, 923.78851318, 911.42022705, 896.07220459,
876.89404297, 831.86472819], np.float32) * units('hPa')
true_hgt_layer = np.asarray([563.671875, 700.93817139, 806.88098145, 938.51745605,
1105.25854492, 1549.8079], dtype=np.float32) * units.meter
if flip_order:
p = p[::-1]
hgt = hgt[::-1]
p_layer, hgt_layer = get_layer(p, hgt, height=hgt, depth=1000. * units.meter)
assert_array_almost_equal(p_layer, true_p_layer, 4)
assert_array_almost_equal(hgt_layer, true_hgt_layer, 4)
def test_get_layer_ragged_data():
"""Test that an error is raised for unequal length pressure and data arrays."""
p = np.arange(10) * units.hPa
y = np.arange(9) * units.degC
with pytest.raises(ValueError):
get_layer(p, y)
def test_get_layer_invalid_depth_units():
"""Test that an error is raised when depth has invalid units."""
p = np.arange(10) * units.hPa
y = np.arange(9) * units.degC
with pytest.raises(ValueError):
get_layer(p, y, depth=400 * units.degC)
def layer_test_data():
"""Provide test data for testing of layer bounds."""
pressure = np.arange(1000, 10, -100) * units.hPa
temperature = np.linspace(25, -50, len(pressure)) * units.degC
return pressure, temperature
@pytest.mark.parametrize('pressure, variable, heights, bottom, depth, interp, expected', [
(layer_test_data()[0], layer_test_data()[1], None, None, 150 * units.hPa, True,
(np.array([1000, 900, 850]) * units.hPa,
np.array([25.0, 16.666666, 12.62262]) * units.degC)),
(layer_test_data()[0], layer_test_data()[1], None, None, 150 * units.hPa, False,
(np.array([1000, 900]) * units.hPa, np.array([25.0, 16.666666]) * units.degC)),
(layer_test_data()[0], layer_test_data()[1], None, 2 * units.km, 3 * units.km, True,
(np.array([794.85264282, 700., 600., 540.01696548]) * units.hPa,
np.array([7.93049516, 0., -8.33333333, -13.14758845]) * units.degC))
])
def test_get_layer(pressure, variable, heights, bottom, depth, interp, expected):
"""Test get_layer functionality."""
p_layer, y_layer = get_layer(pressure, variable, height=heights, bottom=bottom,
depth=depth, interpolate=interp)
assert_array_almost_equal(p_layer, expected[0], 4)
assert_array_almost_equal(y_layer, expected[1], 4)
def test_greater_or_close():
"""Test floating point greater or close to."""
x = np.array([0.0, 1.0, 1.49999, 1.5, 1.5000, 1.7])
comparison_value = 1.5
truth = np.array([False, False, True, True, True, True])
res = _greater_or_close(x, comparison_value)
assert_array_equal(res, truth)
def test_greater_or_close_mixed_types():
"""Test _greater_or_close with mixed Quantity and array errors."""
with pytest.raises(ValueError):
_greater_or_close(1000. * units.mbar, 1000.)
with pytest.raises(ValueError):
_greater_or_close(1000., 1000. * units.mbar)
def test_less_or_close():
"""Test floating point less or close to."""
x = np.array([0.0, 1.0, 1.49999, 1.5, 1.5000, 1.7])
comparison_value = 1.5
truth = np.array([True, True, True, True, True, False])
res = _less_or_close(x, comparison_value)
assert_array_equal(res, truth)
def test_less_or_close_mixed_types():
"""Test _less_or_close with mixed Quantity and array errors."""
with pytest.raises(ValueError):
_less_or_close(1000. * units.mbar, 1000.)
with pytest.raises(ValueError):
_less_or_close(1000., 1000. * units.mbar)
def test_get_layer_heights_interpolation():
"""Test get_layer_heights with interpolation."""
heights = np.arange(10) * units.km
data = heights.m * 2 * units.degC
heights, data = get_layer_heights(heights, 5000 * units.m, data, bottom=1500 * units.m)
heights_true = np.array([1.5, 2, 3, 4, 5, 6, 6.5]) * units.km
data_true = heights_true.m * 2 * units.degC
assert_array_almost_equal(heights_true, heights, 6)
assert_array_almost_equal(data_true, data, 6)
def test_get_layer_heights_no_interpolation():
"""Test get_layer_heights without interpolation."""
heights = np.arange(10) * units.km
data = heights.m * 2 * units.degC
heights, data = get_layer_heights(heights, 5000 * units.m, data,
bottom=1500 * units.m, interpolate=False)
heights_true = np.array([2, 3, 4, 5, 6]) * units.km
data_true = heights_true.m * 2 * units.degC
assert_array_almost_equal(heights_true, heights, 6)
assert_array_almost_equal(data_true, data, 6)
def test_get_layer_heights_agl():
"""Test get_layer_heights with interpolation."""
heights = np.arange(300, 1200, 100) * units.m
data = heights.m * 0.1 * units.degC
heights, data = get_layer_heights(heights, 500 * units.m, data, with_agl=True)
heights_true = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5]) * units.km
data_true = np.array([30, 40, 50, 60, 70, 80]) * units.degC
assert_array_almost_equal(heights_true, heights, 6)
assert_array_almost_equal(data_true, data, 6)
def test_get_layer_heights_agl_bottom_no_interp():
"""Test get_layer_heights with no interpolation and a bottom."""
heights_init = np.arange(300, 1200, 100) * units.m
data = heights_init.m * 0.1 * units.degC
heights, data = get_layer_heights(heights_init, 500 * units.m, data, with_agl=True,
interpolate=False, bottom=200 * units.m)
# Regression test for #789
assert_array_equal(heights_init[0], 300 * units.m)
heights_true = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7]) * units.km
data_true = np.array([50, 60, 70, 80, 90, 100]) * units.degC
assert_array_almost_equal(heights_true, heights, 6)
assert_array_almost_equal(data_true, data, 6)
def test_lat_lon_grid_deltas_1d():
"""Test for lat_lon_grid_deltas for variable grid."""
lat = np.arange(40, 50, 2.5)
lon = np.arange(-100, -90, 2.5)
dx, dy = lat_lon_grid_deltas(lon, lat)
dx_truth = np.array([[212943.5585, 212943.5585, 212943.5585],
[204946.2305, 204946.2305, 204946.2305],
[196558.8269, 196558.8269, 196558.8269],
[187797.3216, 187797.3216, 187797.3216]]) * units.meter
dy_truth = np.array([[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857]]) * units.meter
assert_almost_equal(dx, dx_truth, 4)
assert_almost_equal(dy, dy_truth, 4)
@pytest.mark.parametrize('flip_order', [(False, True)])
def test_lat_lon_grid_deltas_2d(flip_order):
"""Test for lat_lon_grid_deltas for variable grid with negative delta distances."""
lat = np.arange(40, 50, 2.5)
lon = np.arange(-100, -90, 2.5)
dx_truth = np.array([[212943.5585, 212943.5585, 212943.5585],
[204946.2305, 204946.2305, 204946.2305],
[196558.8269, 196558.8269, 196558.8269],
[187797.3216, 187797.3216, 187797.3216]]) * units.meter
dy_truth = np.array([[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857]]) * units.meter
if flip_order:
lon = lon[::-1]
lat = lat[::-1]
dx_truth = -1 * dx_truth[::-1]
dy_truth = -1 * dy_truth[::-1]
lon, lat = np.meshgrid(lon, lat)
dx, dy = lat_lon_grid_deltas(lon, lat)
assert_almost_equal(dx, dx_truth, 4)
assert_almost_equal(dy, dy_truth, 4)
def test_lat_lon_grid_deltas_extra_dimensions():
"""Test for lat_lon_grid_deltas with extra leading dimensions."""
lon, lat = np.meshgrid(np.arange(-100, -90, 2.5), np.arange(40, 50, 2.5))
lat = lat[None, None]
lon = lon[None, None]
dx_truth = np.array([[[[212943.5585, 212943.5585, 212943.5585],
[204946.2305, 204946.2305, 204946.2305],
[196558.8269, 196558.8269, 196558.8269],
[187797.3216, 187797.3216, 187797.3216]]]]) * units.meter
dy_truth = (np.array([[[[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857]]]])
* units.meter)
dx, dy = lat_lon_grid_deltas(lon, lat)
assert_almost_equal(dx, dx_truth, 4)
assert_almost_equal(dy, dy_truth, 4)
def test_lat_lon_grid_deltas_mismatched_shape():
"""Test for lat_lon_grid_deltas for variable grid."""
lat = np.arange(40, 50, 2.5)
lon = np.array([[-100., -97.5, -95., -92.5],
[-100., -97.5, -95., -92.5],
[-100., -97.5, -95., -92.5],
[-100., -97.5, -95., -92.5]])
with pytest.raises(ValueError):
lat_lon_grid_deltas(lon, lat)
def test_lat_lon_grid_deltas_geod_kwargs():
"""Test that geod kwargs are overridden by users #774."""
lat = np.arange(40, 50, 2.5)
lon = np.arange(-100, -90, 2.5)
dx, dy = lat_lon_grid_deltas(lon, lat, geod=Geod(a=4370997))
dx_truth = np.array([[146095.76101984, 146095.76101984, 146095.76101984],
[140608.9751528, 140608.9751528, 140608.9751528],
[134854.56713287, 134854.56713287, 134854.56713287],
[128843.49645823, 128843.49645823, 128843.49645823]]) * units.meter
dy_truth = np.array([[190720.72311199, 190720.72311199, 190720.72311199, 190720.72311199],
[190720.72311199, 190720.72311199, 190720.72311199, 190720.72311199],
[190720.72311199, 190720.72311199, 190720.72311199,
190720.72311199]]) * units.meter
assert_almost_equal(dx, dx_truth, 4)
assert_almost_equal(dy, dy_truth, 4)
@pytest.fixture()
def deriv_1d_data():
"""Return 1-dimensional data for testing derivative functions."""
return namedtuple('D_1D_Test_Data', 'x values')(np.array([0, 1.25, 3.75]) * units.cm,
np.array([13.5, 12, 10]) * units.degC)
@pytest.fixture()
def deriv_2d_data():
"""Return 2-dimensional data for analytic function for testing derivative functions."""
ret = namedtuple('D_2D_Test_Data', 'x y x0 y0 a b f')(
np.array([0., 2., 7.]), np.array([1., 5., 11., 13.]), 3, 1.5, 0.5, 0.25, 0)
# Makes a value array with y changing along rows (axis 0) and x along columns (axis 1)
return ret._replace(f=ret.a * (ret.x - ret.x0)**2 + ret.b * (ret.y[:, None] - ret.y0)**2)
@pytest.fixture()
def deriv_4d_data():
"""Return simple 4-dimensional data for testing axis handling of derivative functions."""
return np.arange(3 * 3 * 4 * 4).reshape((3, 3, 4, 4))
def test_first_derivative(deriv_1d_data):
"""Test first_derivative with a simple 1D array."""
dv_dx = first_derivative(deriv_1d_data.values, x=deriv_1d_data.x)
# Worked by hand and taken from Chapra and Canale 23.2
truth = np.array([-1.333333, -1.06666667, -0.5333333]) * units('delta_degC / cm')
assert_array_almost_equal(dv_dx, truth, 5)
def test_first_derivative_2d(deriv_2d_data):
"""Test first_derivative with a full 2D array."""
df_dx = first_derivative(deriv_2d_data.f, x=deriv_2d_data.x, axis=1)
df_dx_analytic = np.tile(2 * deriv_2d_data.a * (deriv_2d_data.x - deriv_2d_data.x0),
(deriv_2d_data.f.shape[0], 1))
assert_array_almost_equal(df_dx, df_dx_analytic, 5)
df_dy = first_derivative(deriv_2d_data.f, x=deriv_2d_data.y, axis=0)
# Repeat each row, then flip to get variation along rows
df_dy_analytic = np.tile(2 * deriv_2d_data.b * (deriv_2d_data.y - deriv_2d_data.y0),
(deriv_2d_data.f.shape[1], 1)).T
assert_array_almost_equal(df_dy, df_dy_analytic, 5)
def test_first_derivative_too_small(deriv_1d_data):
"""Test first_derivative with too small an array."""
with pytest.raises(ValueError):
first_derivative(deriv_1d_data.values[None, :].T, x=deriv_1d_data.x, axis=1)
def test_first_derivative_scalar_delta():
"""Test first_derivative with a scalar passed for a delta."""
df_dx = first_derivative(np.arange(3), delta=1)
assert_array_almost_equal(df_dx, np.array([1., 1., 1.]), 6)
def test_first_derivative_masked():
"""Test that first_derivative properly propagates masks."""
data = np.ma.arange(7)
data[3] = np.ma.masked
df_dx = first_derivative(data, delta=1)
truth = np.ma.array([1., 1., 1., 1., 1., 1., 1.],
mask=[False, False, True, True, True, False, False])
assert_array_almost_equal(df_dx, truth)
assert_array_equal(df_dx.mask, truth.mask)
def test_first_derivative_masked_units():
"""Test that first_derivative properly propagates masks with units."""
data = units('K') * np.ma.arange(7)
data[3] = np.ma.masked
x = units('m') * np.ma.arange(7)
df_dx = first_derivative(data, x=x)
truth = units('K / m') * np.ma.array(
[1., 1., 1., 1., 1., 1., 1.],
mask=[False, False, True, True, True, False, False])
assert_array_almost_equal(df_dx, truth)
assert_array_equal(df_dx.mask, truth.mask)
def test_second_derivative(deriv_1d_data):
"""Test second_derivative with a simple 1D array."""
d2v_dx2 = second_derivative(deriv_1d_data.values, x=deriv_1d_data.x)
# Worked by hand
truth = np.ones_like(deriv_1d_data.values) * 0.2133333 * units('delta_degC/cm**2')
assert_array_almost_equal(d2v_dx2, truth, 5)
def test_second_derivative_2d(deriv_2d_data):
"""Test second_derivative with a full 2D array."""
df2_dx2 = second_derivative(deriv_2d_data.f, x=deriv_2d_data.x, axis=1)
assert_array_almost_equal(df2_dx2,
np.ones_like(deriv_2d_data.f) * (2 * deriv_2d_data.a), 5)
df2_dy2 = second_derivative(deriv_2d_data.f, x=deriv_2d_data.y, axis=0)
assert_array_almost_equal(df2_dy2,
np.ones_like(deriv_2d_data.f) * (2 * deriv_2d_data.b), 5)
def test_second_derivative_too_small(deriv_1d_data):
"""Test second_derivative with too small an | |
<filename>advntr/plot.py
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('Agg')
def plot1():
stat_files = ['0_size_related_reads.txt', '1_size_sensitivity.txt', '2_size_blast_selected.txt',
'3_sim_read_coverage__gc_content.txt']
x_label = {0: 'Pattern Size', 1: 'Pattern Size', 2: 'Pattern Size', 3: 'Simulated Read Coverage'}
y_label = {0: 'Reads from the pattern', 1: 'Sensitivity', 2: 'Number of Selected Reads by Blast', 3: 'GC Content'}
i = 0
for file_name in stat_files:
import matplotlib.pyplot as plt
X = []
Y = []
with open(file_name) as input_file:
lines = input_file.readlines()
for line in lines:
line = line.strip()
if line == '':
continue
nums = [float(n) for n in line.split()]
X.append(nums[0])
Y.append(nums[1])
plt.plot(X, Y, 'o')
plt.xlabel(x_label[i])
plt.ylabel(y_label[i])
plt.savefig('%s.png' % file_name) # save the figure to file
plt.close()
i += 1
def plot2():
dirs = ['original_case_r1_p1/', 'penalty_3_reward_1/']
stat_files = ['1_size_sensitivity.txt', '2_size_blast_selected.txt']
x_label = {1: 'Pattern Size', 2: 'Pattern Size'}
y_label = {1: 'Sensitivity', 2: 'Number of Selected Reads by Blast'}
X = []
Y = [[], []]
diagram_index = 1
for file_name in stat_files:
import matplotlib.pyplot as plt
for i in range(len(dirs)):
with open(dirs[i] + file_name) as input_file:
lines = input_file.readlines()
for line in lines:
line = line.strip()
if line == '':
continue
nums = [float(n) for n in line.split()]
if i == 0:
X.append(nums[0])
Y[i].append(nums[1])
plt.plot(X, Y[0], 'o', label='same penalty and reward')
plt.plot(X, Y[1], 'o', label='penalty = 3 * reward')
plt.xlabel(x_label[diagram_index])
plt.ylabel(y_label[diagram_index])
plt.legend(loc=0)
plt.savefig('compare_%s.png' % file_name) # save the figure to file
plt.close()
diagram_index += 1
def plot_coverage_comparison():
stat_files = ['10X_ratio.txt', '20X_ratio.txt', '30X_ratio.txt']
coverages = [10, 20, 30]
X = [[], [], []]
Y = [[], [], []]
import matplotlib.pyplot as plt
for i in range(len(stat_files)):
with open(stat_files[i]) as input_file:
lines = input_file.readlines()
for line in lines:
line = line.strip()
if line == '':
continue
nums = [float(n) for n in line.split()]
X[i].append(coverages[i])
Y[i].append(nums[1])
plt.plot(X[0], Y[0], 'o', label='10X')
plt.plot(X[1], Y[1], 'o', label='20X')
plt.plot(X[2], Y[2], 'o', label='30X')
averages = []
for i in range(3):
sum = 0
for e in Y[i]:
sum += e
averages.append(float(sum) / len(Y[i]))
plt.plot(coverages, averages, label='avg')
plt.xlabel('Coverage')
plt.ylabel('Copy Count / True Copy Count')
plt.legend(loc=0)
plt.savefig('compare_%s.png' % 'coverages') # save the figure to file
plt.close()
def get_x_and_y_from_file(file_name, exclude_x=None):
points = []
X = []
Y = []
with open(file_name) as input_file:
lines = input_file.readlines()
for line in lines:
line = line.strip()
if line == '':
continue
nums = [float(n) for n in line.split()]
points.append((nums[0], nums[1]))
points.sort()
for x, y in points:
if exclude_x and x in exclude_x:
continue
X.append(x)
Y.append(y)
return X, Y
def get_numbers_from_file(file_name):
with open(file_name) as input_file:
lines = input_file.readlines()
result = [float(line.strip()) for line in lines]
return result
def get_pattern_result_map(file_name):
res = {}
min_len = 100
max_len = 0
with open(file_name) as input:
lines = input.readlines()
for line in lines:
line = line.strip()
if line == '':
continue
FP, sensitivity, evalue, p_num, len, TP = line.split()
if float(len) > max_len:
max_len = float(len)
if float(len) < min_len:
min_len = float(len)
if p_num not in res.keys():
res[p_num] = []
res[p_num].append((int(FP), float(sensitivity), float(evalue), int(len), int(TP)))
return res, min_len, max_len
def plot_sensitivity_over_fallout():
stat_file = 'FP_and_sensitivity_evalue_min_len50.0.txt'
# stat_file2 = 'fallout_and_sensitivity_min_len50.0_seq_68.txt'
X, Y = get_x_and_y_from_file(stat_file)
import matplotlib.pyplot as plt
cmap = plt.get_cmap('seismic')
pattern_results, min_len, max_len = get_pattern_result_map(stat_file)
for p_num in pattern_results.keys():
if int(p_num) % 5 != 4:
continue
# if p_num != '19':
# continue
X = []
Y = []
points = []
color = None
for FP, sens, evalue, length, TP in pattern_results[p_num]:
points.append((FP, sens))
color = cmap((float(length) - min_len) / (max_len - min_len))
points.sort()
if points[len(points)-1][1] < 0.8:
continue
if points[0][1] > 0.3:
continue
for x, y in points:
# if x > 1000:
# continue
X.append(x)
Y.append(y)
plt.plot(X, Y, label='Pid:%s |Pattern|: %s' % (p_num, length))
# plt.xscale('log')
plt.xlabel('False Positives')
plt.ylabel('Sensitivity')
# plt.legend(loc=4, prop={'size':9})
# plt.colorbar()
plt.savefig('%s.png' % stat_file) # save the figure to file
plt.close()
def plot_tandem_copy_number_and_genome_copy_number():
stat_file = 'copy_number_analysis.txt'
sens_file = 'original_case_r1_p1/1_size_sensitivity.txt'
X = []
Y = []
Y2 = []
import matplotlib.pyplot as plt
with open(stat_file) as input_file:
lines = input_file.readlines()
for line in lines:
line = line.strip()
if line == '':
continue
nums = [float(n) for n in line.split()]
Y.append(float(nums[1]) / nums[0])
with open(sens_file) as input_file:
lines = input_file.readlines()
for i in range(len(lines)):
line = lines[i]
line = line.strip()
if line == '':
continue
nums = [float(n) for n in line.split()]
X.append(nums[0])
plt.plot(X, Y, 'o', color='blue', label='CN in 100Kbp region / CN in ~1Kbp region')
# plt.plot(X, Y2, 'o', color='red', label='CN in 100Kbp region')
plt.xlabel('Pattern size')
plt.ylabel('Copies 100k / Copies in 1K')
plt.legend(loc=0)
plt.savefig('CN_in_genome_compare.png') # save the figure to file
plt.close()
def plot_reference_repeats():
with open('vntrseek_repeats.txt') as input:
lines = input.readlines()
vntrseek_repeats = [int(float(num.strip())) for num in lines]
with open('pattern_repeat_counts.txt') as input:
lines = input.readlines()
our_repeats = [int(num.strip()) for num in lines]
import matplotlib.pyplot as plt
X = [i+1 for i, j in enumerate(our_repeats)]
plt.xlabel('Pattern ID')
plt.ylabel('Number of Tandem Repeats in Reference Genome')
plt.plot(X, vntrseek_repeats, '-o',color='blue', label='TRF Repeats')
plt.plot(X, our_repeats, '--o', color='red', label="Our Method's Repeats")
plt.legend(loc=0)
plt.savefig('reference_repeats.png')
plt.close()
def plot_copy_count_comparison(eliminated_nodes):
import matplotlib.pyplot as plt
from math import log
X, vntr_coverage_ratio = get_x_and_y_from_file('vntr_coverage_ratio.txt', exclude_x=eliminated_nodes)
_, hmm_y = get_x_and_y_from_file('hmm_repeat_count.txt', exclude_x=eliminated_nodes)
hmm_y = [log(y, 2) for y in hmm_y]
plt.xlabel('Pattern ID')
plt.ylabel('Log of Computed Copy Number divided by True Copy Number')
plt.plot(X, hmm_y, '--o', color='red', label="Log of Estimared Copy Count Divided by True Copy Count")
# plt.ylim([0.6, 2])
# plt.plot(X, vntr_coverage_ratio, color='green', label="VNTR Coverage Ratio in NGS Reads")
plt.legend(loc=0, fontsize = 'x-small')
plt.savefig('copy_count_comparison_log.png')
plt.close()
def plot_FP_for_specific_sensitivity(eliminated_nodes, sensitivity=0.95):
hmm_fps = {}
blast_fps = {}
with open('FP_and_sensitivity_evalue_min_len50.0.txt') as input:
lines = [line.strip() for line in input.readlines() if line.strip() != '']
for line in lines:
FP, sens, _, pattern_id, pattern_len, _ = line.split()
sens = float(sens)
FP = int(FP)
pattern_id = int(pattern_id)
if pattern_id - 1 in eliminated_nodes:
continue
if sens >= sensitivity:
if pattern_id not in blast_fps.keys():
blast_fps[pattern_id] = FP
blast_fps[pattern_id] = min(blast_fps[pattern_id], FP)
with open('FP_and_sensitivity_HMM_read_scoring_method.txt') as input:
lines = [line.strip() for line in input.readlines() if line.strip() != '']
for line in lines:
FP, sens, _, pattern_id, pattern_len, _ = line.split()
sens = float(sens)
FP = int(FP)
pattern_id = int(pattern_id)
if pattern_id - 1 in eliminated_nodes:
continue
if sens >= sensitivity:
if pattern_id not in hmm_fps.keys():
hmm_fps[pattern_id] = FP
hmm_fps[pattern_id] = min(hmm_fps[pattern_id], FP)
# X = sorted(list(set(hmm_fps.keys()) & set(blast_fps.keys())))
X = sorted(list(hmm_fps.keys()))
blast_fps_y = []
hmm_fps_y = []
for x in X:
# blast_fps_y.append(blast_fps[x])
hmm_fps_y.append(hmm_fps[x])
import matplotlib.pyplot as plt
plt.xlabel('Pattern ID')
plt.ylabel('False Positives for Sensitivity of 0.9')
# plt.plot(X, blast_fps_y, '-o',color='blue', label='BLAST False Positives')
print(X)
print(hmm_fps_y)
# plt.plot(X, hmm_fps_y, '-o',color='red', label='False Positive Selected Reads')
plt.legend(loc=0)
# plt.savefig('false_positives_for_sensitivity_of_09.png')
plt.close()
def plot_coverage_ratio_histogram():
m = {}
from math import log
with open('vntr_coverage_ratio.txt') as input:
lines = input.readlines()
for line in lines:
index, ratio = line.strip().split()
ratio = log(float(ratio), 2)
if ratio not in m.keys():
m[ratio] = []
m[ratio].append(index)
l = list(m.keys())
xbins = [-0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
import matplotlib.pyplot as plt
plt.hist(l, xbins, color='blue')
plt.xlabel('log(coverage ratio)')
plt.ylabel('# VNTR')
plt.savefig('coverage_ratio.png')
def plot_gc_content_violin_plot():
from coverage_bias import CoverageBiasDetector
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
bias_detector = CoverageBiasDetector('original_reads/paired_dat.sam')
gc_coverage_map = bias_detector.get_gc_content_coverage_map()
data = []
pos = []
for gc_content in range(0, 10):
pos.append(gc_content * 10)
data.append([float('nan'), float('nan')])
if gc_content in gc_coverage_map:
data[gc_content] = gc_coverage_map[gc_content]
plt.violinplot(data, pos, widths=7, showmeans=True)
plt.xlabel('GC Content Percentage')
plt.ylabel('Coverage')
plt.savefig('gc_coverage_violinplot_simulated.png')
def plot_frequency_of_repeats_in_population(MAOA=False):
from matplotlib import rc, rcParams
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.rcParams['axes.facecolor'] = '#FFFFFF'
rc('text', usetex=True)
rcParams['text.latex.preamble'] = [r'\usepackage{sfmath} \boldmath']
plt.title('Read Recruitment Comparison')
plt.gca().spines['bottom'].set_color('black')
plt.gca().spines['left'].set_color('black')
ax = list([])
x_label_font = 12
y_label_font = 13
plt.ylabel(r'\emph{RU Count Frequency}', fontsize=y_label_font, labelpad=8)
# Turn off axis lines and ticks of the big subplot
for i in range(0):
ax[i].spines['top'].set_color('none')
ax[i].spines['bottom'].set_color('none')
ax[i].spines['left'].set_color('none')
ax[i].spines['right'].set_color('none')
ax[i].tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
plt.ylim(top=100)
legend_size = 12
if not MAOA:
R5_array = (45, 51, 56)
R4_array = (33, 10, 20)
R2_array = (22, 18, 12)
R3_array = (0, 21, 12)
R5 = np.array(R5_array)
R4 = np.array(R4_array)
R2 = np.array(R2_array)
R3 = np.array(R3_array)
ind = (0, 1, 2)
width = 0.35
p5 = plt.bar(ind, R5, width)
p4 = plt.bar(ind, R4, | |
<filename>sparclur/_spotlight.py
from __future__ import annotations
import copy
import os
import shutil
import tempfile
from collections import defaultdict
from typing import List, Union, Dict, Any, Tuple
import numpy as np
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor as Executor
from inspect import signature
import pandas as pd
from sparclur._parser import SparclurHash, TEXT, RENDER, META, REJECTED, REJECTED_AMBIG, VALID_WARNINGS, VALID, FONT, \
TRACER
from sparclur.parsers import present_parsers
from sparclur._parser import Parser
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
def _merge_dict(d1, d2):
if isinstance(d1, dict) and isinstance(d2, dict):
new_dict = dict()
keys = set().union(d1.keys()).union(d2.keys())
for key in keys:
new_dict[key] = _merge_dict(d1.get(key, dict()), d2.get(key, dict()))
return new_dict
else:
if isinstance(d1, dict):
return d2
else:
return d1
def _mapper(entry):
base_path = entry['base_path']
parser = entry['parser']
version = entry['version']
args = entry.get('args',
dict())
args['doc'] = os.path.join(base_path, parser.get_name(), version + '.pdf')
p = parser(**args)
validity = p.validity
sparclur_hash = p.sparclur_hash
spotlight_result = SpotlightResult(p.get_name(), version, validity, sparclur_hash)
return p.get_name(), spotlight_result
def _reducer(entry):
(parser_name, spotlight_results) = entry
spotlight_result = spotlight_results[0]
for result in spotlight_results[1:]:
spotlight_result.update(result)
return spotlight_result
def _combiner_mapper(entry):
parser = entry['parser']
left_version = entry['left']
right_version = entry['right']
spotlight: SpotlightResult = entry['spotlight']
spotlight._compare_hashes(parser, left_version, right_version)
return parser, spotlight
class SpotlightResult:
"""
Results from running Spotlight over a document. Provides the following methods:
validity_report: Provide a table of validity results for the parsers over the original document and the reforges
overall_validity: The overall validity classification for the document given the parsers
recoverable: Returns whether or not the document is unambiguously recoverable
sim_heatmap: A heatmap of the similarity scores for each parser over given pairs of documents
sim_sunburst: A sunburst of the similarity scores. Provides an interactive way to engage with the heatmap.
"""
def __init__(self, parser, version, validity, sparclur_hash):
self._spotlight_result = {parser: {version: {'validity': validity, 'hash': sparclur_hash}}}
def keys(self):
return self._spotlight_result.keys()
def __getitiem__(self, key):
return self._spotlight_result[key]
def __repr__(self):
overall_validity = self.overall_validity()
rep = 'PDF Validity: {validity}'.format(validity=overall_validity)
if overall_validity != VALID:
rep = rep + '\n%s' % self.recoverable()
return rep
def get(self, key, default):
if key in self._spotlight_result:
return self._spotlight_result[key]
else:
return default
def update(self, that: SpotlightResult):
new_spotlight = _merge_dict(self._spotlight_result, that._spotlight_result)
self._spotlight_result = new_spotlight
# parsers = set().union(self.keys()).union(that.keys())
# for parser in parsers:
# if parser in self._spotlight_result:
# self._spotlight_result[parser].update(that._spotlight_result.get(parser, dict()))
# else:
# self._spotlight_result[parser] = that._spotlight_result[parser]
@property
def _parsers(self):
return list(self._spotlight_result.keys())
@property
def _versions(self):
v = set()
for results in self._spotlight_result.values():
v.update(results.keys())
return list(v)
def _compare_hashes(self, parser, left, right):
if 'comparisons' in self._spotlight_result[parser][left]:
right_in_left = right in self._spotlight_result[parser][left]['comparisons']
else:
self._spotlight_result[parser][left]['comparisons'] = dict()
right_in_left = False
if 'comparisons' in self._spotlight_result[parser][right]:
left_in_right = left in self._spotlight_result[parser][right]['comparisons']
else:
self._spotlight_result[parser][right]['comparisons'] = dict()
left_in_right = False
if left_in_right and not right_in_left:
comparison = self._spotlight_result[parser][right]['comparisons'][left]
self._spotlight_result[parser][left]['comparisons'][right] = comparison
if not left_in_right and right_in_left:
comparison = self._spotlight_result[parser][left]['comparisons'][right]
self._spotlight_result[parser][right]['comparisons'][left] = comparison
if not left_in_right and not right_in_left:
left_hash: SparclurHash = self._spotlight_result[parser][left]['hash']
right_hash: SparclurHash = self._spotlight_result[parser][right]['hash']
comparison = left_hash.compare(right_hash)
self._spotlight_result[parser][left]['comparisons'][right] = comparison
self._spotlight_result[parser][right]['comparisons'][left] = comparison
def validity_report(self, report='overall', excluded_parsers=None):
"""
Return a table of the validity classifications for each parser over each document.
Parameters
----------
report : {`overall`, `Renderer`, `Text Extraction`, `Font Extraction`, `Metadata Extraction`, `Tracer`}
Overall takes into account all of the tools of the given parser, or a specific tool can be specified
excluded_parsers : str or List[str]
Parsers to exclude from the report
Returns
-------
DataFrame
A DataFrame of the resulting labels
"""
if excluded_parsers is not None:
if isinstance(excluded_parsers, str):
excluded_parsers = [excluded_parsers]
else:
excluded_parsers = []
parsers = [parser for parser in self._parsers if parser not in excluded_parsers]
cols = self._versions
cols.remove('original')
cols.sort()
cols.insert(0, 'original')
d = []
for parser in parsers:
row = dict()
for col in cols:
row[col] = self._spotlight_result[parser].get(col, dict())\
.get('validity', dict())\
.get(report, dict())\
.get('status', None)
d.append(row)
row = dict()
for col in cols:
row[col] = self.overall_validity(col, excluded_parsers=excluded_parsers)
d.append(row)
parsers.append("Overall")
df = pd.DataFrame(d, index=parsers)
return df.dropna(how='all')
def overall_validity(self, version='original', excluded_parsers=None):
"""
Return a classification for the overall validity of the specified document
Parameters
----------
version : {`original`, `MuPDF`, `Poppler`, `Ghostscript`}
The document version to classify
excluded_parsers : str or List[str]
Any parsers to exclude in the determination of the overall validity
Returns
-------
str
The validity label
"""
if excluded_parsers is not None:
if isinstance(excluded_parsers, str):
excluded_parsers = [excluded_parsers]
else:
excluded_parsers = []
assert max([version in v.keys() for v in self._spotlight_result.values()]), 'Version not found'
validities = {result[version]['validity']['overall']['status']
for parser, result in self._spotlight_result.items() if parser not in excluded_parsers}
if REJECTED in validities:
return REJECTED
elif REJECTED_AMBIG in validities:
return REJECTED_AMBIG
elif VALID_WARNINGS in validities:
return VALID_WARNINGS
else:
return VALID
def _sunburst_data(self, compare_orig, full):
data = []
for parser in self._spotlight_result.keys():
versions = list(self._spotlight_result[parser].keys())
versions.remove('original')
versions.sort()
if compare_orig:
versions.insert(0, 'original')
if full:
for version in versions:
vd = self._spotlight_result[parser][version]['comparisons']
for v in [outer for outer in versions if outer != version]:
inner = version if version == 'original' else version + ' reforged'
outer = v if v == 'original' else v + ' reforged'
row = {'Parser': parser, 'inner': inner, 'outer': outer, 'sim': max(vd[v]['sim'], 0.001)}
data.append(row)
else:
for i in range(len(versions)):
for j in range(i + 1, len(versions)):
version = versions[i]
v = versions[j]
vd = self._spotlight_result[parser][version]['comparisons']
inner = version if version == 'original' else version + ' reforged'
outer = v if v == 'original' else v + ' reforged'
row = {'Parser': parser, 'inner': inner, 'outer': outer, 'sim': max(vd[v]['sim'], 0.001)}
data.append(row)
df = pd.DataFrame(data)
return df
def sim_sunburst(self, compare_orig: bool = True,
full: bool = False,
color: str = 'RdBu',
color_range: List[float] = [.6, 1]):
"""
Create an interactive sunburst for exploring the similarities between the documents for the Spotlight parsers
Parameters
----------
compare_orig : bool
Whether reforge<->original comparison scores should be displayed in the heatmap. These comparisons don't
impact the recoverablity of the file and would only provide insight into a differential between the original
and the reforge
full : bool
Create the full sunburst that has all possible combinations. Turning this off removes duplicate comparisons
to reduce the overall number of slices.
color : str
The color range to use. See https://plotly.com/python/builtin-colorscales/
color_range : List[float]
The range to base the color on. Format is [min, max]
Returns
-------
Plotly Sunburst
"""
df = self._sunburst_data(compare_orig, full)
fig = px.sunburst(df,
path=['Parser', 'inner', 'outer'],
values='sim',
color='sim',
color_continuous_scale=color,
range_color=color_range)
return fig
def _create_heatmap(self, parsers, report, detailed, compare_orig):
if parsers is None:
parsers = list(self._spotlight_result.keys())
elif isinstance(parsers, str):
if parsers in self._spotlight_result:
parsers = [parsers]
else:
parsers = list(self._spotlight_result.keys())
elif isinstance(parsers, list):
parsers = [p for p in parsers if p in self._spotlight_result]
if len(parsers) == 0:
parsers = list(self._spotlight_result.keys())
if report is None:
report = 'sim'
else:
if report not in [RENDER+' sim', TRACER+' sim', TEXT+' sim']:
report = 'sim'
all_versions = set()
columns = []
for parser, versions in [(k, v) for k, v in self._spotlight_result.items() if k in parsers]:
all_versions.update(versions.keys())
if not detailed:
columns.append((parser, report))
else:
all_compares = set()
for v, results in versions.items():
for compares in results['comparisons'].values():
score_names = [sn for sn in compares.keys() if 'sim' in sn and sn != 'sim']
all_compares.update(score_names)
all_compares = list(all_compares)
if len(all_compares) > 1:
all_compares.sort()
all_compares.append('sim')
for score_name in all_compares:
columns.append((parser, score_name))
all_versions.remove('original')
all_versions = list(all_versions)
all_versions.sort()
if compare_orig:
all_versions.insert(0, 'original')
comparisons = []
for i in range(len(all_versions)):
for j in range(i+1, len(all_versions)):
comparisons.append((all_versions[i], all_versions[j]))
d = np.zeros((len(comparisons), len(columns)), dtype=float)
for row_idx, comparison in enumerate(comparisons):
for col_idx, col in enumerate(columns):
d[row_idx, col_idx] = self._spotlight_result.get(col[0], None)\
.get(comparison[0], None)\
.get('comparisons', None)\
.get(comparison[1], None)\
.get(col[1], None)
if not detailed:
columns = ['%s %s' % (p, s) for p, s in columns]
return d, columns, comparisons
def sim_heatmap(self, parsers: str or List[str] = None,
report: str = 'sim',
annotated: bool = True,
detailed: bool = False,
compare_orig: bool = True,
height: int = 10,
width: int = 10,
save_display=None):
"""
A heatmap of the similarity scores for each parser over given pairs of documents
Parameters
----------
parsers : str or List[str]
The parsers to display the similarity scores for
report : {'sim', 'Renderer sim', 'Text Extractor sim', 'Tracer sim'}
The specific similarity score to run. | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import os, sys, time, datetime
import xbmc, xbmcgui
import re, urllib, urlparse, random, json
import openscrapers
from resources.lib.modules import client, cleantitle, control, workers
from resources.lib.modules import trakt, tvmaze, source_utils, log_utils
from resources.lib.modules import debrid, cache
try:
from sqlite3 import dbapi2 as database
except:
from pysqlite2 import dbapi2 as database
try:
import resolveurl
except:
pass
class Sources:
def __init__(self):
self.getConstants()
self.sources = []
def play(self, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, meta, select):
try:
url = None
items = cache.get(self.getSources, 24, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered)
# items = self.getSources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered)
if select is None:
if not episode is None and control.setting('enable.upnext') == 'true':
select = '2'
else:
select = control.setting('hosts.mode')
else:
select = select
title = tvshowtitle if not tvshowtitle is None else title
# if control.window.getProperty('PseudoTVRunning') == 'True':
# return control.resolve(int(sys.argv[1]), True, control.item(path=str(self.sourcesDirect(items))))
if len(items) > 0:
if select == '1' and 'plugin' in control.infoLabel('Container.PluginName'):
control.window.clearProperty(self.itemProperty)
control.window.setProperty(self.itemProperty, json.dumps(items))
control.window.clearProperty(self.metaProperty)
control.window.setProperty(self.metaProperty, meta)
control.sleep(200)
return control.execute('Container.Update(%s?action=addItem&title=%s)' % (sys.argv[0], urllib.quote_plus(title)))
elif select == '0' or select == '1':
url = self.sourcesDialog(items)
else:
url = self.sourcesDirect(items)
if url is None:
return self.errorForSources()
try:
meta = json.loads(meta)
except:
pass
from resources.lib.modules import player
if control.playlist.size() != 0:
# playlist = control.playlist.getPlayListId()
# control.player2().play(control.playlist)
# player.Player().play_playlist(title, year, season, episode, imdb, tvdb, url, meta)
player.Player().play_source(title, year, season, episode, imdb, tvdb, url, meta)
else:
player.Player().play_source(title, year, season, episode, imdb, tvdb, url, meta)
except:
import traceback
traceback.print_exc()
pass
def addItem(self, title):
control.playlist.clear()
items = control.window.getProperty(self.itemProperty)
items = json.loads(items)
if items is None or len(items) == 0:
control.idle()
sys.exit()
meta = control.window.getProperty(self.metaProperty)
meta = json.loads(meta)
# (Kodi bug?) [name,role] is incredibly slow on this directory, [name] is barely tolerable, so just nuke it for speed!
if 'cast' in meta:
del(meta['cast'])
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
downloads = True if control.setting('downloads') == 'true' and not (control.setting(
'movie.download.path') == '' or control.setting('tv.download.path') == '') else False
systitle = sysname = urllib.quote_plus(title)
if 'tvshowtitle' in meta and 'season' in meta and 'episode' in meta:
sysname += urllib.quote_plus(' S%02dE%02d' % (int(meta['season']), int(meta['episode'])))
elif 'year' in meta:
sysname += urllib.quote_plus(' (%s)' % meta['year'])
poster = meta['poster3'] if 'poster3' in meta else '0'
if poster == '0':
poster = meta['poster2'] if 'poster2' in meta else '0'
if poster == '0':
poster = meta['poster'] if 'poster' in meta else '0'
fanart = meta['fanart3'] if 'fanart3' in meta else '0'
if fanart == '0':
fanart = meta['fanart2'] if 'fanart2' in meta else '0'
if fanart == '0':
fanart = meta['fanart'] if 'fanart' in meta else '0'
thumb = meta['thumb'] if 'thumb' in meta else '0'
if thumb == '0':
thumb = poster
if thumb == '0':
thumb = fanart
banner = meta['banner'] if 'banner' in meta else '0'
if banner == '0':
banner = poster
clearart = meta['clearart'] if 'clearart' in meta else '0'
clearlogo = meta['clearlogo'] if 'clearlogo' in meta else '0'
discart = meta['discart'] if 'discart' in meta else '0'
if poster == '0':
poster = control.addonPoster()
if banner == '0':
banner = control.addonBanner()
if not control.setting('fanart') == 'true':
fanart = '0'
if fanart == '0':
fanart = control.addonFanart()
if thumb == '0':
thumb = control.addonFanart()
sysimage = urllib.quote_plus(poster.encode('utf-8'))
downloadMenu = control.lang(32403).encode('utf-8')
for i in range(len(items)):
try:
# item_height = 55
# self.source_list = control.listControl(20, 130, 760, 640, 'font12', '0xFFFFFFFF', '',
# os.path.join(control.images_path, 'highlight11.png'),
# '', 0, 0, 0, 0, item_height)
# self.addControl(self.source_list)
# self.source_list.addItems(self.sources)
# self.setFocus(self.source_list)
if control.setting('sourcelist.multiline') == 'true':
label = str(items[i]['multiline_label'])
else:
label = str(items[i]['label'])
# # # info_label = control.labelControl(control.XBFONT_LEFT, control.XBFONT_CENTER_Y, 760, 640, label, font="font10", alignment=control.XBFONT_CENTER_X)
# # # self.addControl(info_label)
syssource = urllib.quote_plus(json.dumps([items[i]]))
sysurl = '%s?action=playItem&title=%s&source=%s' % (sysaddon, systitle, syssource)
# isPlayable = 'true' if not 'plugin' in control.infoLabel('Container.PluginName') else 'false'
cm = []
if downloads is True:
cm.append((downloadMenu, 'RunPlugin(%s?action=download&name=%s&image=%s&source=%s)' %
(sysaddon, sysname, sysimage, syssource)))
item = control.item(label=label)
item.setArt({'icon': thumb, 'thumb': thumb, 'poster': poster, 'banner': banner, 'clearart': clearart, 'clearlogo': clearlogo, 'discart': discart})
# item.setProperty('IsPlayable', isPlayable)
if not fanart == '0' and not fanart is None:
item.setProperty('Fanart_Image', fanart)
video_streaminfo = {'codec': 'h264'}
item.addStreamInfo('video', video_streaminfo)
item.addContextMenuItems(cm)
item.setInfo(type='video', infoLabels=control.metadataClean(meta))
control.addItem(handle=syshandle, url=sysurl, listitem=item, isFolder=False)
except:
import traceback
traceback.print_exc()
pass
control.content(syshandle, 'files')
control.directory(syshandle, cacheToDisc=True)
def playItem(self, title, source):
try:
meta = control.window.getProperty(self.metaProperty)
meta = json.loads(meta)
year = meta['year'] if 'year' in meta else None
season = meta['season'] if 'season' in meta else None
episode = meta['episode'] if 'episode' in meta else None
imdb = meta['imdb'] if 'imdb' in meta else None
tvdb = meta['tvdb'] if 'tvdb' in meta else None
next = []
prev = []
total = []
for i in range(1,1000):
try:
u = control.infoLabel('ListItem(%s).FolderPath' % str(i))
if u in total:
raise Exception()
total.append(u)
u = dict(urlparse.parse_qsl(u.replace('?','')))
u = json.loads(u['source'])[0]
next.append(u)
except:
break
for i in range(-1000,0)[::-1]:
try:
u = control.infoLabel('ListItem(%s).FolderPath' % str(i))
if u in total:
raise Exception()
total.append(u)
u = dict(urlparse.parse_qsl(u.replace('?','')))
u = json.loads(u['source'])[0]
prev.append(u)
except:
break
items = json.loads(source)
items = [i for i in items+next+prev][:40]
header = control.addonInfo('name')
header2 = header.upper()
progressDialog = control.progressDialog if control.setting('progress.dialog') == '0' else control.progressDialogBG
progressDialog.create(header, '')
progressDialog.update(0)
block = None
for i in range(len(items)):
# label = str(items[i]['label'])
try:
try:
if progressDialog.iscanceled():
break
progressDialog.update(int((100 / float(len(items))) * i), str(items[i]['label']), str(' '))
except:
# progressDialog.update(int((100 / float(len(items))) * i), str(header2), str((items[i]['label']).replace('\n ', '')))
progressDialog.update(int((100 / float(len(items))) * i), str(header2), str(items[i]['label']))
if items[i]['source'] == block:
raise Exception()
w = workers.Thread(self.sourcesResolve, items[i])
w.start()
offset = 60 * 2 if items[i].get('source') in self.hostcapDict else 0
m = ''
for x in range(3600):
try:
if xbmc.abortRequested is True:
return sys.exit()
if progressDialog.iscanceled():
return progressDialog.close()
except:
pass
k = control.condVisibility('Window.IsActive(virtualkeyboard)')
if k:
m += '1'
m = m[-1]
if (w.is_alive() is False or x > 30 + offset) and not k:
break
k = control.condVisibility('Window.IsActive(yesnoDialog)')
if k:
m += '1'
m = m[-1]
if (w.is_alive() is False or x > 30 + offset) and not k:
break
time.sleep(0.5)
for x in range(30):
try:
if xbmc.abortRequested is True:
return sys.exit()
if progressDialog.iscanceled():
return progressDialog.close()
except:
pass
if m == '':
break
if w.is_alive() is False:
break
time.sleep(0.5)
if w.is_alive() is True:
block = items[i]['source']
if self.url is None:
raise Exception()
try:
progressDialog.close()
except:
pass
control.sleep(200)
control.execute('Dialog.Close(virtualkeyboard)')
control.execute('Dialog.Close(yesnoDialog)')
# from resources.lib.modules.player import Player
from resources.lib.modules import player
# control.closeAll()
player.Player().play_source(title, year, season, episode, imdb, tvdb, self.url, meta)
return self.url
except:
pass
try:
progressDialog.close()
except:
pass
self.errorForSources()
except:
import traceback
traceback.print_exc()
pass
def getSources(self, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, quality='HD', timeout=30):
progressDialog = control.progressDialog if control.setting('progress.dialog') == '0' else control.progressDialogBG
progressDialog.create(control.addonInfo('name'), '')
progressDialog.update(0)
self.prepareSources()
sourceDict = self.sourceDict
progressDialog.update(0, control.lang(32600).encode('utf-8'))
content = 'movie' if tvshowtitle is None else 'episode'
if content == 'movie':
sourceDict = [(i[0], i[1], getattr(i[1], 'movie', None)) for i in sourceDict]
genres = trakt.getGenre('movie', 'imdb', imdb)
else:
sourceDict = [(i[0], i[1], getattr(i[1], 'tvshow', None)) for i in sourceDict]
genres = trakt.getGenre('show', 'tvdb', tvdb)
sourceDict = [(i[0], i[1], i[2]) for i in sourceDict if not hasattr(i[1], 'genre_filter') or not i[1].genre_filter or any(x in i[1].genre_filter for x in genres)]
sourceDict = [(i[0], i[1]) for i in sourceDict if not i[2] is None]
language = self.getLanguage()
sourceDict = [(i[0], i[1], i[1].language) for i in sourceDict]
sourceDict = [(i[0], i[1]) for i in sourceDict if any(x in i[2] for x in language)]
try:
sourceDict = [(i[0], i[1], control.setting('provider.' + i[0])) for i in sourceDict]
except:
sourceDict = [(i[0], i[1], 'true') for i in sourceDict]
sourceDict = [(i[0], i[1]) for i in sourceDict if not i[2] == 'false']
sourceDict = [(i[0], i[1], i[1].priority) for i in sourceDict]
random.shuffle(sourceDict)
sourceDict = sorted(sourceDict, key=lambda i: i[2])
threads = []
if content == 'movie':
title = self.getTitle(title)
localtitle = self.getLocalTitle(title, imdb, tvdb, content)
aliases = self.getAliasTitles(imdb, localtitle, content)
for i in sourceDict:
threads.append(workers.Thread(self.getMovieSource, title, localtitle, aliases, year, imdb, i[0], i[1]))
else:
tvshowtitle = self.getTitle(tvshowtitle)
localtvshowtitle = self.getLocalTitle(tvshowtitle, imdb, tvdb, content)
aliases = self.getAliasTitles(imdb, localtvshowtitle, content)
for i in sourceDict:
threads.append(workers.Thread(self.getEpisodeSource, | |
<reponame>induane/stomp.py3<gh_stars>0
import math
import random
import re
import socket
import sys
import threading
import time
import types
import xml.dom.minidom
import errno
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
protocols = frozenset([
'PROTOCOL_SSLv3',
'PROTOCOL_TLSv1_2',
'PROTOCOL_TLSv1_1',
'PROTOCOL_TLSv1',
'PROTOCOL_SSLv23',
'PROTOCOL_SSLv2',
])
DEFAULT_SSL_VERSION = None
SSL_AVAILABLE = True
try:
import ssl
from ssl import SSLError
except ImportError:
SSL_AVAILABLE = False
if SSL_AVAILABLE:
for protocol in protocols:
try:
DEFAULT_SSL_VERSION = getattr(ssl, protocol)
except AttributeError:
continue
except SSLError:
continue
break
try:
from socket import SOL_SOCKET, SO_KEEPALIVE
from socket import SOL_TCP, TCP_KEEPIDLE, TCP_KEEPINTVL, TCP_KEEPCNT
LINUX_KEEPALIVE_AVAIL=True
except ImportError:
LINUX_KEEPALIVE_AVAIL=False
import exception
import listener
import utils
from backward import decode, encode, hasbyte, pack, socksend, NULL
try:
import uuid
except ImportError:
from backward import uuid
try:
from fractions import gcd
except ImportError:
from backward import gcd
import logging
log = logging.getLogger('stomp.py')
class Connection(object):
"""
Represents a STOMP client connection.
"""
# ========= PRIVATE MEMBERS =========
# List of all host names (unqualified, fully-qualified, and IP
# addresses) that refer to the local host (both loopback interface
# and external interfaces). This is used for determining
# preferred targets.
__localhost_names = [ "localhost", "127.0.0.1" ]
try:
__localhost_names.append(socket.gethostbyname(socket.gethostname()))
except:
pass
try:
__localhost_names.append(socket.gethostname())
except:
pass
try:
__localhost_names.append(socket.getfqdn(socket.gethostname()))
except:
pass
#
# Used to parse the STOMP "content-length" header lines,
#
__content_length_re = re.compile('^content-length[:]\\s*(?P<value>[0-9]+)', re.MULTILINE)
def __init__(self,
host_and_ports = [ ('localhost', 61613) ],
user = None,
passcode = None,
prefer_localhost = True,
try_loopback_connect = True,
reconnect_sleep_initial = 0.1,
reconnect_sleep_increase = 0.5,
reconnect_sleep_jitter = 0.1,
reconnect_sleep_max = 60.0,
reconnect_attempts_max = 3,
use_ssl = False,
ssl_key_file = None,
ssl_cert_file = None,
ssl_ca_certs = None,
ssl_cert_validator = None,
wait_on_receipt = False,
ssl_version = DEFAULT_SSL_VERSION,
timeout = None,
version = 1.0,
strict = True,
heartbeats = (0, 0),
keepalive = None,
vhost = None
):
"""
Initialize and start this connection.
\param host_and_ports
a list of (host, port) tuples.
\param prefer_localhost
if True and the local host is mentioned in the (host,
port) tuples, try to connect to this first
\param try_loopback_connect
if True and the local host is found in the host
tuples, try connecting to it using loopback interface
(127.0.0.1)
\param reconnect_sleep_initial
initial delay in seconds to wait before reattempting
to establish a connection if connection to any of the
hosts fails.
\param reconnect_sleep_increase
factor by which the sleep delay is increased after
each connection attempt. For example, 0.5 means
to wait 50% longer than before the previous attempt,
1.0 means wait twice as long, and 0.0 means keep
the delay constant.
\param reconnect_sleep_max
maximum delay between connection attempts, regardless
of the reconnect_sleep_increase.
\param reconnect_sleep_jitter
random additional time to wait (as a percentage of
the time determined using the previous parameters)
between connection attempts in order to avoid
stampeding. For example, a value of 0.1 means to wait
an extra 0%-10% (randomly determined) of the delay
calculated using the previous three parameters.
\param reconnect_attempts_max
maximum attempts to reconnect
\param use_ssl
connect using SSL to the socket. This wraps the
socket in a SSL connection. The constructor will
raise an exception if you ask for SSL, but it can't
find the SSL module.
\param ssl_cert_file
the path to a X509 certificate
\param ssl_key_file
the path to a X509 key file
\param ssl_ca_certs
the path to the a file containing CA certificates
to validate the server against. If this is not set,
server side certificate validation is not done.
\param ssl_cert_validator
function which performs extra validation on the client
certificate, for example checking the returned
certificate has a commonName attribute equal to the
hostname (to avoid man in the middle attacks).
The signature is:
(OK, err_msg) = validation_function(cert, hostname)
where OK is a boolean, and cert is a certificate structure
as returned by ssl.SSLSocket.getpeercert()
\param wait_on_receipt
if a receipt is specified, then the send method should wait
(block) for the server to respond with that receipt-id
before continuing
\param ssl_version
SSL protocol to use for the connection. This should be
one of the PROTOCOL_x constants provided by the ssl module.
The default is ssl.PROTOCOL_SSLv3
\param timeout
the timeout value to use when connecting the stomp socket
\param version
STOMP protocol version (1.0 or 1.1)
\param strict
if true, use the strict version of the protocol. For STOMP 1.1, this means
it will use the STOMP connect header, rather than CONNECT.
\param heartbeats
a tuple containing the heartbeat send and receive time in millis. (0,0)
if no heartbeats
\param keepalive
some operating systems support sending the occasional heart
beat packets to detect when a connection fails. This
parameter can either be set set to a boolean to turn on the
default keepalive options for your OS, or as a tuple of
values, which also enables keepalive packets, but specifies
options specific to your OS implementation
\param vhost
specify a virtual hostname to provide in the 'host' header of the connection
"""
sorted_host_and_ports = []
sorted_host_and_ports.extend(host_and_ports)
#
# If localhost is preferred, make sure all (host, port) tuples that refer to the local host come first in the list
#
if prefer_localhost:
sorted_host_and_ports.sort(key = self.is_localhost)
#
# If the user wishes to attempt connecting to local ports using the loopback interface, for each (host, port) tuple
# referring to a local host, add an entry with the host name replaced by 127.0.0.1 if it doesn't exist already
#
loopback_host_and_ports = []
if try_loopback_connect:
for host_and_port in sorted_host_and_ports:
if self.is_localhost(host_and_port) == 1:
port = host_and_port[1]
if (not ("127.0.0.1", port) in sorted_host_and_ports
and not ("localhost", port) in sorted_host_and_ports):
loopback_host_and_ports.append(("127.0.0.1", port))
#
# Assemble the final, possibly sorted list of (host, port) tuples
#
self.__host_and_ports = []
self.__host_and_ports.extend(loopback_host_and_ports)
self.__host_and_ports.extend(sorted_host_and_ports)
self.__recvbuf = ''
self.__listeners = {}
self.__reconnect_sleep_initial = reconnect_sleep_initial
self.__reconnect_sleep_increase = reconnect_sleep_increase
self.__reconnect_sleep_jitter = reconnect_sleep_jitter
self.__reconnect_sleep_max = reconnect_sleep_max
self.__reconnect_attempts_max = reconnect_attempts_max
self.__timeout = timeout
self.__connect_headers = {}
if user is not None and passcode is not None:
self.__connect_headers['login'] = user
self.__connect_headers['passcode'] = passcode
self.__socket = None
self.__socket_semaphore = threading.BoundedSemaphore(1)
self.__current_host_and_port = None
self.__receiver_thread_exit_condition = threading.Condition()
self.__receiver_thread_exited = False
self.__send_wait_condition = threading.Condition()
self.__connect_wait_condition = threading.Condition()
self.blocking = None
self.connected = False
# setup SSL
if use_ssl and not ssl:
raise Exception("SSL connection requested, but SSL library not found.")
self.__ssl = use_ssl
self.__ssl_cert_file = ssl_cert_file
self.__ssl_key_file = ssl_key_file
self.__ssl_ca_certs = ssl_ca_certs
self.__ssl_cert_validator = ssl_cert_validator
self.__ssl_version = ssl_version
self.__receipts = {}
self.__wait_on_receipt = wait_on_receipt
# protocol version
self.version = version
self.__strict = strict
# setup heartbeating
if version < 1.1 and heartbeats != (0, 0):
raise exception.ProtocolException('Heartbeats can only be set on a 1.1+ connection')
self.heartbeats = heartbeats
# used for 1.1 heartbeat messages (set to true every time a heartbeat message arrives)
self.__received_heartbeat = time.time()
# flag used when we receive the disconnect receipt
self.__disconnect_receipt = None
# function for creating threads used by the connection
self.create_thread_fc = default_create_thread
self.__keepalive = keepalive
self.vhost = vhost
def is_localhost(self, host_and_port):
"""
Return true if the specified host+port is a member of the 'localhost' list of hosts
"""
(host, port) = host_and_port
if host in Connection.__localhost_names:
return 1
else:
return 2
def override_threading(self, create_thread_fc):
"""
Override for thread creation. Use an alternate threading library by
setting this to a function with a single argument (which is the receiver loop callback).
The thread which is returned should be started (ready to run)
"""
self.create_thread_fc = create_thread_fc
#
# Manage the connection
#
def start(self):
"""
Start the connection. This should be called after all
listeners have been registered. If this method is not called,
no frames will be received by the connection.
"""
self.__running = True
self.__attempt_connection()
thread = self.create_thread_fc(self.__receiver_loop)
self.__notify('connecting')
def stop(self):
"""
Stop the connection. This is equivalent to calling
disconnect() but will do a clean shutdown by waiting for the
receiver thread to exit.
"""
self.disconnect()
self.__receiver_thread_exit_condition.acquire()
while not self.__receiver_thread_exited:
self.__receiver_thread_exit_condition.wait()
self.__receiver_thread_exit_condition.release()
def get_host_and_port(self):
"""
Return a (host, port) tuple indicating which STOMP host and
port is currently connected, or None if there is currently no
connection.
"""
return self.__current_host_and_port
def is_connected(self):
"""
Return true if the socket managed by this connection is connected
"""
| |
"""
pass
def Initialize(self, *args): #cannot find CLR method
"""
Initialize(self: UnmanagedMemoryStream, buffer: SafeBuffer, offset: Int64, length: Int64, access: FileAccess)
Initializes a new instance of the System.IO.UnmanagedMemoryStream class in a
safe buffer with a specified offset, length, and file access.
buffer: The buffer to contain the unmanaged memory stream.
offset: The byte position in the buffer at which to start the unmanaged memory stream.
length: The length of the unmanaged memory stream.
access: The mode of file access to the unmanaged memory stream.
Initialize(self: UnmanagedMemoryStream, pointer: Byte*, length: Int64, capacity: Int64, access: FileAccess)
Initializes a new instance of the System.IO.UnmanagedMemoryStream class by
using a pointer to an unmanaged memory location.
pointer: A pointer to an unmanaged memory location.
length: The length of the memory to use.
capacity: The total amount of memory assigned to the stream.
access: One of the System.IO.FileAccess values.
"""
pass
def MemberwiseClone(self, *args): #cannot find CLR method
"""
MemberwiseClone(self: MarshalByRefObject, cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity, which
will cause the object to be assigned a new identity when it is marshaled across
a remoting boundary. A value of false is usually appropriate. true to copy the
current System.MarshalByRefObject object's identity to its clone, which will
cause remoting client calls to be routed to the remote server object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def ObjectInvariant(self, *args): #cannot find CLR method
"""
ObjectInvariant(self: Stream)
Provides support for a System.Diagnostics.Contracts.Contract.
"""
pass
def Read(self, buffer, offset, count):
"""
Read(self: UnmanagedMemoryStream, offset: int, count: int) -> (int, Array[Byte])
Reads the specified number of bytes into the specified array.
offset: The zero-based byte offset in buffer at which to begin storing the data read
from the current stream.
count: The maximum number of bytes to read from the current stream.
Returns: The total number of bytes read into the buffer. This can be less than the
number of bytes requested if that many bytes are not currently available, or
zero (0) if the end of the stream has been reached.
"""
pass
def ReadAsync(self, buffer, offset, count, cancellationToken=None):
""" ReadAsync(self: UnmanagedMemoryStream, buffer: Array[Byte], offset: int, count: int, cancellationToken: CancellationToken) -> Task[int] """
pass
def ReadByte(self):
"""
ReadByte(self: UnmanagedMemoryStream) -> int
Reads a byte from a stream and advances the position within the stream by one
byte, or returns -1 if at the end of the stream.
Returns: The unsigned byte cast to an System.Int32 object, or -1 if at the end of the
stream.
"""
pass
def Seek(self, offset, loc):
"""
Seek(self: UnmanagedMemoryStream, offset: Int64, loc: SeekOrigin) -> Int64
Sets the current position of the current stream to the given value.
offset: The point relative to origin to begin seeking from.
loc: Specifies the beginning, the end, or the current position as a reference point
for origin, using a value of type System.IO.SeekOrigin.
Returns: The new position in the stream.
"""
pass
def SetLength(self, value):
"""
SetLength(self: UnmanagedMemoryStream, value: Int64)
Sets the length of a stream to a specified value.
value: The length of the stream.
"""
pass
def Write(self, buffer, offset, count):
"""
Write(self: UnmanagedMemoryStream, buffer: Array[Byte], offset: int, count: int)
Writes a block of bytes to the current stream using data from a buffer.
buffer: The byte array from which to copy bytes to the current stream.
offset: The offset in the buffer at which to begin copying bytes to the current stream.
count: The number of bytes to write to the current stream.
"""
pass
def WriteAsync(self, buffer, offset, count, cancellationToken=None):
""" WriteAsync(self: UnmanagedMemoryStream, buffer: Array[Byte], offset: int, count: int, cancellationToken: CancellationToken) -> Task """
pass
def WriteByte(self, value):
"""
WriteByte(self: UnmanagedMemoryStream, value: Byte)
Writes a byte to the current position in the file stream.
value: A byte value written to the stream.
"""
pass
def __enter__(self, *args): #cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): #cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, *__args):
"""
__new__(cls: type)
__new__(cls: type, buffer: SafeBuffer, offset: Int64, length: Int64)
__new__(cls: type, buffer: SafeBuffer, offset: Int64, length: Int64, access: FileAccess)
__new__(cls: type, pointer: Byte*, length: Int64)
__new__(cls: type, pointer: Byte*, length: Int64, capacity: Int64, access: FileAccess)
"""
pass
CanRead = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether a stream supports reading.
Get: CanRead(self: UnmanagedMemoryStream) -> bool
"""
CanSeek = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether a stream supports seeking.
Get: CanSeek(self: UnmanagedMemoryStream) -> bool
"""
CanWrite = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets a value indicating whether a stream supports writing.
Get: CanWrite(self: UnmanagedMemoryStream) -> bool
"""
Capacity = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the stream length (size) or the total amount of memory assigned to a stream (capacity).
Get: Capacity(self: UnmanagedMemoryStream) -> Int64
"""
Length = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the length of the data in a stream.
Get: Length(self: UnmanagedMemoryStream) -> Int64
"""
Position = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the current position in a stream.
Get: Position(self: UnmanagedMemoryStream) -> Int64
Set: Position(self: UnmanagedMemoryStream) = value
"""
PositionPointer = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets a byte pointer to a stream based on the current position in the stream.
Get: PositionPointer(self: UnmanagedMemoryStream) -> Byte*
Set: PositionPointer(self: UnmanagedMemoryStream) = value
"""
class WaitForChangedResult(object):
""" Contains information on the change that occurred. """
ChangeType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the type of change that occurred.
Get: ChangeType(self: WaitForChangedResult) -> WatcherChangeTypes
Set: ChangeType(self: WaitForChangedResult) = value
"""
Name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the name of the file or directory that changed.
Get: Name(self: WaitForChangedResult) -> str
Set: Name(self: WaitForChangedResult) = value
"""
OldName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets the original name of the file or directory that was renamed.
Get: OldName(self: WaitForChangedResult) -> str
Set: OldName(self: WaitForChangedResult) = value
"""
TimedOut = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets or sets a value indicating whether the wait operation timed out.
Get: TimedOut(self: WaitForChangedResult) -> bool
Set: TimedOut(self: WaitForChangedResult) = value
"""
class WatcherChangeTypes(Enum, IComparable, IFormattable, IConvertible):
"""
Changes that might occur to a file or directory.
enum (flags) WatcherChangeTypes, values: All (15), Changed (4), Created (1), Deleted (2), Renamed (8)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
| |
<reponame>Vinicius-Tanigawa/Undergraduate-Research-Project<gh_stars>0
## @ingroup Methods-Aerodynamics-AVL
#create_avl_datastructure.py
#
# Created: Oct 2014, <NAME>
# Modified: Jan 2016, <NAME>
# Apr 2017, <NAME>
# Jul 2017, <NAME>
# Aug 2019, <NAME>
# Mar 2020, <NAME>
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import scipy
import numpy as np
from copy import deepcopy
# SUAVE Imports
from SUAVE.Core import Data , Units
# SUAVE-AVL Imports
from .Data.Inputs import Inputs
from .Data.Wing import Wing, Section, Control_Surface
from .Data.Body import Body
from .Data.Aircraft import Aircraft
from .Data.Cases import Run_Case
from .Data.Configuration import Configuration
from SUAVE.Components.Wings.Control_Surfaces import Aileron , Elevator , Slat , Flap , Rudder
from SUAVE.Methods.Aerodynamics.AVL.write_avl_airfoil_file import write_avl_airfoil_file
from SUAVE.Methods.Geometry.Two_Dimensional.Planform.wing_planform import wing_planform
## @ingroup Methods-Aerodynamics-AVL
def translate_avl_wing(suave_wing):
""" Translates wing geometry from the vehicle setup to AVL format
Assumptions:
None
Source:
None
Inputs:
suave_wing.tag [-]
suave_wing.symmetric [boolean]
suave_wing.verical [boolean]
suave_wing - passed into the populate_wing_sections function [data stucture]
Outputs:
w - aircraft wing in AVL format [data stucture]
Properties Used:
N/A
"""
w = Wing()
w.tag = suave_wing.tag
w.symmetric = suave_wing.symmetric
w.vertical = suave_wing.vertical
w = populate_wing_sections(w,suave_wing)
return w
def translate_avl_body(suave_body):
""" Translates body geometry from the vehicle setup to AVL format
Assumptions:
None
Source:
None
Inputs:
body.tag [-]
suave_wing.lengths.total [meters]
suave_body.lengths.nose [meters]
suave_body.lengths.tail [meters]
suave_wing.verical [meters]
suave_body.width [meters]
suave_body.heights.maximum [meters]
suave_wing - passed into the populate_body_sections function [data stucture]
Outputs:
b - aircraft body in AVL format [data stucture]
Properties Used:
N/A
"""
b = Body()
b.tag = suave_body.tag
b.symmetric = True
b.lengths.total = suave_body.lengths.total
b.lengths.nose = suave_body.lengths.nose
b.lengths.tail = suave_body.lengths.tail
b.widths.maximum = suave_body.width
b.heights.maximum = suave_body.heights.maximum
b = populate_body_sections(b,suave_body)
return b
def populate_wing_sections(avl_wing,suave_wing):
""" Creates sections of wing geometry and populates the AVL wing data structure
Assumptions:
None
Source:
None
Inputs:
avl_wing.symmetric [boolean]
suave_wing.spans.projected [meters]
suave_wing.origin [meters]
suave_wing.dihedral [radians]
suave_wing.Segments.sweeps.leading_edge [radians]
suave_wing.Segments.root_chord_percent [-]
suave_wing.Segments.percent_span_location [-]
suave_wing.Segments.sweeps.quarter_chord [radians]
suave_wing.Segment.twist [radians]
Outputs:
avl_wing - aircraft wing in AVL format [data stucture]
Properties Used:
N/A
"""
if len(suave_wing.Segments.keys())>0:
# obtain the geometry for each segment in a loop
symm = avl_wing.symmetric
semispan = suave_wing.spans.projected*0.5 * (2 - symm)
avl_wing.semispan = semispan
root_chord = suave_wing.chords.root
segment_percent_span = 0
segments = suave_wing.Segments
n_segments = len(segments.keys())
segment_sweeps = []
origin = []
origin.append(suave_wing.origin)
for i_segs in range(n_segments):
if (i_segs == n_segments-1):
segment_sweeps.append(0)
else: # this converts all sweeps defined by the quarter chord to leading edge sweep since AVL needs the start of each wing section
#from the leading edge coordinate and not the quarter chord coordinate
if segments[i_segs].sweeps.leading_edge is not None:
# if leading edge sweep is defined
segment_sweep = segments[i_segs].sweeps.leading_edge
else:
# if quarter chord sweep is defined, convert it to leading edge sweep
sweep_quarter_chord = segments[i_segs].sweeps.quarter_chord
chord_fraction = 0.25
segment_root_chord = root_chord*segments[i_segs].root_chord_percent
segment_tip_chord = root_chord*segments[i_segs+1].root_chord_percent
segment_span = semispan*(segments[i_segs+1].percent_span_location - segments[i_segs].percent_span_location )
segment_sweep = np.arctan(((segment_root_chord*chord_fraction) + (np.tan(sweep_quarter_chord )*segment_span - chord_fraction*segment_tip_chord)) /segment_span)
segment_sweeps.append(segment_sweep)
dihedral = segments[i_segs].dihedral_outboard
ctrl_surf_at_seg = False
# condition for the presence of control surfaces in segment
if getattr(segments[i_segs],'control_surfaces',False):
dihedral_ob = segments[i_segs-1].dihedral_outboard
section_spans = []
for cs in segments[i_segs].control_surfaces:
# create a vector if all the section breaks in a segment. sections include beginning and end of control surfaces and end of segment
control_surface_start = semispan*cs.span_fraction_start
control_surface_end = semispan*cs.span_fraction_end
section_spans.append(control_surface_start)
section_spans.append(control_surface_end)
ordered_section_spans = sorted(list(set(section_spans))) # sort the section_spans in order to create sections in spanwise order
num_sections = len(ordered_section_spans) # count the number of sections breaks that the segment will contain \
for section_count in range(num_sections):
# create and append sections onto avl wing structure
if ordered_section_spans[section_count] == semispan*segments[i_segs-1].percent_span_location:
# if control surface begins at beginning of segment, redundant section is removed
section_tags = list(avl_wing.sections.keys())
del avl_wing.sections[section_tags[-1]]
# create section for each break in the wing
section = Section()
section.tag = segments[i_segs].tag + '_section_'+ str(ordered_section_spans[section_count]) + 'm'
root_section_chord = root_chord*segments[i_segs-1].root_chord_percent
tip_section_chord = root_chord*segments[i_segs].root_chord_percent
semispan_section_fraction = (ordered_section_spans[section_count] - semispan*segments[i_segs-1].percent_span_location)/(semispan*(segments[i_segs].percent_span_location - segments[i_segs-1].percent_span_location ))
section.chord = np.interp(semispan_section_fraction,[0.,1.],[root_section_chord,tip_section_chord])
root_section_twist = segments[i_segs-1].twist/Units.degrees
tip_section_twist = root_chord*segments[i_segs].twist/Units.degrees
section.twist = np.interp(semispan_section_fraction,[0.,1.],[root_section_twist,tip_section_twist])
# if wing is a vertical wing, the y and z coordinates are swapped
if avl_wing.vertical:
dz = ordered_section_spans[section_count] - semispan*segments[i_segs-1].percent_span_location
dy = dz*np.tan(dihedral_ob)
l = dz/np.cos(dihedral_ob)
dx = l*np.tan(segment_sweeps[i_segs-1])
else:
dy = ordered_section_spans[section_count] - semispan*segments[i_segs-1].percent_span_location
dz = dy*np.tan(dihedral_ob)
l = dy/np.cos(dihedral_ob)
dx = l*np.tan(segment_sweeps[i_segs-1])
section.origin = [[origin[i_segs-1][0][0] + dx , origin[i_segs-1][0][1] + dy, origin[i_segs-1][0][2] + dz]]
# this loop appends all the control surfaces within a particular wing section
for index , ctrl_surf in enumerate(segments[i_segs].control_surfaces):
if (semispan*ctrl_surf.span_fraction_start == ordered_section_spans[section_count]) or \
(ordered_section_spans[section_count] == semispan*ctrl_surf.span_fraction_end):
c = Control_Surface()
c.tag = ctrl_surf.tag # name of control surface
c.sign_duplicate = '+1' # this float indicates control surface deflection symmetry
c.x_hinge = 1 - ctrl_surf.chord_fraction # this float is the % location of the control surface hinge on the wing
c.deflection = ctrl_surf.deflection / Units.degrees
c.order = index
# if control surface is an aileron, the deflection is asymmetric. This is standard convention from AVL
if (type(ctrl_surf) == Aileron):
c.sign_duplicate = '-1'
c.function = 'aileron'
c.gain = -1.0
# if control surface is a slat, the hinge is taken from the leading edge
elif (type(ctrl_surf) == Slat):
c.x_hinge = -ctrl_surf.chord_fraction
c.function = 'slat'
c.gain = -1.0
elif (type(ctrl_surf) == Flap):
c.function = 'flap'
c.gain = 1.0
elif (type(ctrl_surf) == Elevator):
c.function = 'elevator'
c.gain = 1.0
elif (type(ctrl_surf) == Rudder):
c.function = 'rudder'
c.gain = 1.0
else:
raise AttributeError("Define control surface function as 'slat', 'flap', 'elevator' , 'aileron' or 'rudder'")
section.append_control_surface(c)
elif (semispan*ctrl_surf.span_fraction_start < ordered_section_spans[section_count]) and \
(ordered_section_spans[section_count] < semispan*ctrl_surf.span_fraction_end):
c = Control_Surface()
c.tag = ctrl_surf.tag # name of control surface
c.sign_duplicate = '+1' # this float indicates control surface deflection symmetry
c.x_hinge = 1 - ctrl_surf.chord_fraction # this float is the % location of the control surface hinge on the wing
c.deflection = ctrl_surf.deflection / Units.degrees
c.order = index
# if control surface is an aileron, the deflection is asymmetric. This is standard convention from AVL
if (type(ctrl_surf) == Aileron):
c.sign_duplicate = '-1'
c.function = 'aileron'
c.gain = -1.0
# if control surface is a slat, the hinge is taken from the leading edge
elif (type(ctrl_surf) == Slat):
c.x_hinge = -ctrl_surf.chord_fraction
c.function = 'slat'
c.gain = -1.0
elif (type(ctrl_surf) == Flap):
c.function = 'flap'
c.gain = 1.0
elif (type(ctrl_surf) == Elevator):
c.function = 'elevator'
c.gain = 1.0
elif (type(ctrl_surf) == Rudder):
c.function = 'rudder'
c.gain = 1.0
else:
raise AttributeError("Define control surface function as 'slat', 'flap', 'elevator' , 'aileron' or 'rudder'")
section.append_control_surface(c)
if segments[i_segs].Airfoil:
if segments[i_segs].Airfoil.airfoil.coordinate_file is not None:
section.airfoil_coord_file = write_avl_airfoil_file(segments[i_segs].Airfoil.airfoil.coordinate_file)
elif segments[i_segs].Airfoil.airfoil.naca_airfoil is not None:
section.naca_airfoil = segments[i_segs].Airfoil.airfoil.naca_airfoil
avl_wing.append_section(section)
# check if control surface ends at end of segment
if ordered_section_spans[section_count] == semispan*segments[i_segs].percent_span_location:
ctrl_surf_at_seg = True
if ctrl_surf_at_seg: # if a control surface ends at the end of the segment, there is not need to append another segment
pass
else: # if there is no control surface break at the end of the segment, this block appends a segment
section = Section()
section.tag = segments[i_segs].tag
section.chord = root_chord*segments[i_segs].root_chord_percent
section.twist = segments[i_segs].twist/Units.degrees
section.origin = origin[i_segs]
if segments[i_segs].Airfoil:
if segments[i_segs].Airfoil.airfoil.coordinate_file is not None:
section.airfoil_coord_file = write_avl_airfoil_file(segments[i_segs].Airfoil.airfoil.coordinate_file)
elif segments[i_segs].Airfoil.airfoil.naca_airfoil is not None:
section.naca_airfoil = segments[i_segs].Airfoil.airfoil.naca_airfoil
# append section to wing
avl_wing.append_section(section)
# update origin for next segment
if (i_segs == n_segments-1):
return avl_wing
segment_percent_span = segments[i_segs+1].percent_span_location - segments[i_segs].percent_span_location
if avl_wing.vertical:
dz = semispan*segment_percent_span
dy = dz*np.tan(dihedral)
l = dz/np.cos(dihedral)
dx = l*np.tan(segment_sweep)
else:
dy = semispan*segment_percent_span
dz = dy*np.tan(dihedral)
l = dy/np.cos(dihedral)
dx = l*np.tan(segment_sweep)
origin.append( [[origin[i_segs][0][0] + dx , origin[i_segs][0][1] + dy, origin[i_segs][0][2] + dz]])
else:
symm = avl_wing.symmetric
dihedral = suave_wing.dihedral
span = suave_wing.spans.projected
semispan = suave_wing.spans.projected * 0.5 * (2 - symm)
if suave_wing.sweeps.leading_edge is not None:
sweep = suave_wing.sweeps.leading_edge
else:
suave_wing = wing_planform(suave_wing)
sweep = suave_wing.sweeps.leading_edge
avl_wing.semispan = semispan
origin = suave_wing.origin[0] | |
<reponame>clauswilke/epistasis_evolution<gh_stars>0
#!/usr/bin/python
'''
The script creates a class population.
'''
import numpy as np
import sys
import os.path
class population:
def __init__(self, L, N, s, q, mu, k_start):
self.L = L
self.N = N
self.s = s
self.q = q
self.mu = mu
self.k_start = k_start
self.initialize(k_start)
# replication occurs according to a Moran process
def replicate(self):
prob_repl = self.f*self.n_k/np.sum(self.f*self.n_k) #probability of being replicated based on the number of individuals within a mutation class and the fitness of the mutation class
self.n_k = np.random.multinomial(self.N, prob_repl) #draws offspring based on the probability of replication for each mutation class
def move_class(self, max_move, redistr):
if max_move == 1:
#move n_k into different mutation classes
for k in self.k_class:
down = redistr[k,0] #number of ind in class k to move down to class k-1
up = redistr[k,2] #number of ind in class k to move up to class k+1
#redistribute n_k for different mutational classes
if k==0:
self.n_k[k] = self.n_k[k]-up
self.n_k[k+1] = self.n_k[k+1]+up
if k==self.L:
self.n_k[k-1] = self.n_k[k-1]+down
self.n_k[k] = self.n_k[k]-down
if k>0 and k<self.L:
self.n_k[k-1] = self.n_k[k-1]+down
self.n_k[k] = self.n_k[k]-(up+down)
self.n_k[k+1] = self.n_k[k+1]+up
elif max_move == 3:
#move n_k into different mutation classes
for k in self.k_class:
down3 = redistr[k,0] #number of ind in class k to move down to class k-3
down2 = redistr[k,1] #number of ind in class k to move down to class k-2
down1 = redistr[k,2] #number of ind in class k to move down to class k-1
up1 = redistr[k,4] #number of ind in class k to move up to class k+1
up2 = redistr[k,5] #number of ind in class k to move up to class k+2
up3 = redistr[k,6] #number of ind in class k to move up to class k+3
#redistribute n_k for different mutational classes
if k==0:
self.n_k[k] = self.n_k[k]-(up3+up2+up1)
self.n_k[k+1] = self.n_k[k+1]+up1
self.n_k[k+2] = self.n_k[k+2]+up2
self.n_k[k+3] = self.n_k[k+3]+up3
elif k==1:
self.n_k[k-1] = self.n_k[k-1]+down1
self.n_k[k] = self.n_k[k]-(down1+up3+up2+up1)
self.n_k[k+1] = self.n_k[k+1]+up1
self.n_k[k+2] = self.n_k[k+2]+up2
self.n_k[k+3] = self.n_k[k+3]+up3
elif k==2:
self.n_k[k-1] = self.n_k[k-1]+down1
self.n_k[k-2] = self.n_k[k-2]+down2
self.n_k[k] = self.n_k[k]-(down1+down2+up3+up2+up1)
self.n_k[k+1] = self.n_k[k+1]+up1
self.n_k[k+2] = self.n_k[k+2]+up2
self.n_k[k+3] = self.n_k[k+3]+up3
elif k==self.L:
self.n_k[k-1] = self.n_k[k-1]+down1
self.n_k[k-2] = self.n_k[k-2]+down2
self.n_k[k-3] = self.n_k[k-3]+down3
self.n_k[k] = self.n_k[k]-(down1+down2+down3)
elif k==self.L-1:
self.n_k[k-1] = self.n_k[k-1]+down1
self.n_k[k-2] = self.n_k[k-2]+down2
self.n_k[k-3] = self.n_k[k-3]+down3
self.n_k[k] = self.n_k[k]-(down1+down2+down3+up1)
self.n_k[k+1] = self.n_k[k+1]+up1
elif k==self.L-2:
self.n_k[k-1] = self.n_k[k-1]+down1
self.n_k[k-2] = self.n_k[k-2]+down2
self.n_k[k-3] = self.n_k[k-3]+down3
self.n_k[k] = self.n_k[k]-(down1+down2+down3+up1+up2)
self.n_k[k+1] = self.n_k[k+1]+up1
self.n_k[k+2] = self.n_k[k+2]+up2
else:
self.n_k[k-1] = self.n_k[k-1]+down1
self.n_k[k-2] = self.n_k[k-2]+down2
self.n_k[k-3] = self.n_k[k-3]+down3
self.n_k[k] = self.n_k[k]-(down1+down2+down3+up3+up2+up1)
self.n_k[k+1] = self.n_k[k+1]+up1
self.n_k[k+2] = self.n_k[k+2]+up2
self.n_k[k+3] = self.n_k[k+3]+up3
elif max_move == 4:
#move n_k into different mutation classes
for k in self.k_class:
down4 = redistr[k,0] #number of ind in class k to move down to class k-4
down3 = redistr[k,1] #number of ind in class k to move down to class k-3
down2 = redistr[k,2] #number of ind in class k to move down to class k-2
down1 = redistr[k,3] #number of ind in class k to move down to class k-1
up1 = redistr[k,5] #number of ind in class k to move up to class k+1
up2 = redistr[k,6] #number of ind in class k to move up to class k+2
up3 = redistr[k,7] #number of ind in class k to move up to class k+3
up4 = redistr[k,8] #number of ind in class k to move up to class k+4
#redistribute n_k for different mutational classes
if k==0:
self.n_k[k] = self.n_k[k]-(up4+up3+up2+up1)
self.n_k[k+1] = self.n_k[k+1]+up1
self.n_k[k+2] = self.n_k[k+2]+up2
self.n_k[k+3] = self.n_k[k+3]+up3
self.n_k[k+4] = self.n_k[k+4]+up4
elif k==1:
self.n_k[k-1] = self.n_k[k-1]+down1
self.n_k[k] = self.n_k[k]-(down1+up4+up3+up2+up1)
self.n_k[k+1] = self.n_k[k+1]+up1
self.n_k[k+2] = self.n_k[k+2]+up2
self.n_k[k+3] = self.n_k[k+3]+up3
self.n_k[k+4] = self.n_k[k+4]+up4
elif k==2:
self.n_k[k-1] = self.n_k[k-1]+down1
self.n_k[k-2] = self.n_k[k-2]+down2
self.n_k[k] = self.n_k[k]-(down1+down2+up4+up3+up2+up1)
self.n_k[k+1] = self.n_k[k+1]+up1
self.n_k[k+2] = self.n_k[k+2]+up2
self.n_k[k+3] = self.n_k[k+3]+up3
self.n_k[k+4] = self.n_k[k+4]+up4
elif k==3:
self.n_k[k-1] = self.n_k[k-1]+down1
self.n_k[k-2] = self.n_k[k-2]+down2
self.n_k[k-3] = self.n_k[k-3]+down3
self.n_k[k] = self.n_k[k]-(down1+down2+down3+up4+up3+up2+up1)
self.n_k[k+1] = self.n_k[k+1]+up1
self.n_k[k+2] = self.n_k[k+2]+up2
self.n_k[k+3] = self.n_k[k+3]+up3
self.n_k[k+4] = self.n_k[k+4]+up4
elif k==self.L:
self.n_k[k-1] = self.n_k[k-1]+down1
self.n_k[k-2] = self.n_k[k-2]+down2
self.n_k[k-3] = self.n_k[k-3]+down3
self.n_k[k-4] = self.n_k[k-4]+down4
self.n_k[k] = self.n_k[k]-(down1+down2+down3+down4)
elif k==self.L-1:
self.n_k[k-1] = self.n_k[k-1]+down1
self.n_k[k-2] = self.n_k[k-2]+down2
self.n_k[k-3] = self.n_k[k-3]+down3
self.n_k[k-4] = self.n_k[k-4]+down4
self.n_k[k] = self.n_k[k]-(down1+down2+down3+down4+up1)
self.n_k[k+1] = self.n_k[k+1]+up1
elif k==self.L-2:
self.n_k[k-1] = self.n_k[k-1]+down1
self.n_k[k-2] = self.n_k[k-2]+down2
self.n_k[k-3] = self.n_k[k-3]+down3
self.n_k[k-4] = self.n_k[k-4]+down4
self.n_k[k] = self.n_k[k]-(down1+down2+down3+down4+up1+up2)
self.n_k[k+1] = self.n_k[k+1]+up1
self.n_k[k+2] = self.n_k[k+2]+up2
elif k==self.L-3:
self.n_k[k-1] = self.n_k[k-1]+down1
self.n_k[k-2] = self.n_k[k-2]+down2
self.n_k[k-3] = self.n_k[k-3]+down3
self.n_k[k-4] = self.n_k[k-4]+down4
self.n_k[k] = self.n_k[k]-(down1+down2+down3+down4+up1+up2+up3)
self.n_k[k+1] = self.n_k[k+1]+up1
self.n_k[k+2] = self.n_k[k+2]+up2
self.n_k[k+3] = self.n_k[k+3]+up3
else:
self.n_k[k-1] = self.n_k[k-1]+down1
self.n_k[k-2] = self.n_k[k-2]+down2
self.n_k[k-3] = self.n_k[k-3]+down3
self.n_k[k-4] = self.n_k[k-4]+down4
self.n_k[k] = self.n_k[k]-(down1+down2+down3+down4+up4+up3+up2+up1)
self.n_k[k+1] = self.n_k[k+1]+up1
self.n_k[k+2] = self.n_k[k+2]+up2
self.n_k[k+3] = self.n_k[k+3]+up3
self.n_k[k+4] = self.n_k[k+4]+up4
def draw_indiv_to_move(self, max_move, mut_matrix_file = None):
# set up an empty list of lists to keep track of individuals to move
# first index is the class k, and the second index is the number of individuals to move to k-1, k-2, etc. class.
redistr = np.empty([self.L+1, 2*max_move+1])
#move n_k into different mutation classes
if max_move == 1:
#calculate the number of individuals to move for each mutation class
for k in self.k_class:
#an array of probabilities of moving to k-1, stay at k, and moving to k+1
prob_mut = np.array([self.mu*k/self.L , 1-self.mu, self.mu*(self.L-k)/self.L])
#draw numbers of individuals to move
m = np.random.multinomial(self.n_k[k], prob_mut)
#store number of individuals to move to k-1, to keep in k, and to move to k+1 for each k class
redistr[k]=m
elif max_move == 3 or max_move == 4:
if os.path.isfile(mut_matrix_file): #load the file if it exists in the directory provided
mut_matrix=np.load(mut_matrix_file)
else:
print('The mutation matrix file does not exist')
sys.exit()
#calculate the number of individuals to move for each mutation class
for k in self.k_class:
#index an array of probabilities of moving from to k+3, to k+2, to k+1, of staying at k,
#and moving to k-3, to k-2, to k-1.
r = [i for i in range(k-max_move, k+max_move+1) if i >= 0 and i<=self.L] #find the proper range of values (or columns) to index. If k=0, then range is 0,1,2,3 etc.
prob_mut=mut_matrix[k,r] #extract a correct row and columns from the probability matrix
#check that the probabilities add up to 1
if 1-np.sum(prob_mut)>0.0001:
print(1-np.sum(prob_mut))
print('class k:',k)
print('Probabilities of moving to different mutational classes does not add up to 1')
sys.exit()
#draw numbers of individuals to move
m = np.random.multinomial(self.n_k[k], prob_mut)
#extend the array so there are 0 individuals to move from k to k < 0 and to k > L
if k-max_move<0:
z = np.zeros(2*max_move+1-len(m))
m = np.append(z,m)
elif k+max_move>self.L:
z = np.zeros(2*max_move+1-len(m))
m = np.append(m,z)
else:
pass
#add the number of individuals to move
redistr[k]=m
else:
print('Class population does not support maximum class moves '+str(max_move))
sys.exit()
return redistr
def mutate(self, max_move, mut_matrix_file = None):
# draw number of individuals to move classes
redistr = self.draw_indiv_to_move(max_move, mut_matrix_file)
# move individuals into different classes
self.move_class(max_move, redistr)
# check that the number of individuals didn't change
if np.sum(self.n_k) != self.N:
print(self.n_k)
print('The total number of individuals does not add up to Ne')
sys.exit()
def mean_fitness(self):
mean_fitness = np.average(self.f, weights = self.n_k)
return mean_fitness
def initialize(self, k_start):
#intialize an array of mutation classes, k=1,2,3,...,L
self.k_class = np.array(range(self.L+1))
#calculate an array that contains fitness values for each mutation class k
self.f = np.exp(-self.s*(self.k_class**(self.q)))
#initialize a zero array to hold individuals (n_k) for each mutation class (k)
self.n_k = np.zeros(self.L + 1)
if self.f[k_start]==0: # check if the fitness at t=0 equals to zero
nonzero_f = np.nonzero(np.around(self.f,decimals=2)) #find fitness values > 0.0001
k_start = np.argmin(self.f[nonzero_f]) #find the minimum fitness value | |
or offsetting the brush (like the 'b' and 'm' default hotkeys). The
string argument is one of: radius, lowradius, opacity, value, depth, displacement, uvvectoror none. C: Default is none.
- dynclonemode : dcm (bool) []
- exists : ex (bool) [create]
Returns true or false depending upon whether the specified object exists. Other flags are ignored.
- expandfilename : eef (bool) [create,edit]
If true, it will expand the name of the export file and concatenate it with the surface name. Otherwise it will take the
name as it is. C: Default is true.
- exportaspectratio : ear (float) []
- exportfilemode : efm (unicode) [create,query,edit]
Specifies the export channel.The valid entries here are: alpha, luminance, rgb, rgba. C: Default is luminance/rgb. Q:
When queried, it returns a string.
- exportfilesave : esf (unicode) [edit]
Exports the attribute map and saves to a specified file.
- exportfilesizex : fsx (int) [create,query,edit]
Specifies the width of the attribute map to export. C: Default width is 256. Q: When queried, it returns an integer.
- exportfilesizey : fsy (int) [create,query,edit]
Specifies the width of the attribute map to export. C: Default width is 256. Q: When queried, it returns an integer.
- exportfiletype : eft (unicode) [create,query,edit]
Specifies the image file format. It can be one of the following: iff, tiff, jpeg, alias, rgb, fitpostScriptEPS,
softimage, wavefrontRLA, wavefrontEXP. C: default is tiff. Q: When queried, it returns a string.
- history : ch (bool) [create]
If this is a tool command, turn the construction history on for the tool in question.
- image1 : i1 (unicode) [create,query,edit]
First of three possible icons representing the tool associated with the context.
- image2 : i2 (unicode) [create,query,edit]
Second of three possible icons representing the tool associated with the context.
- image3 : i3 (unicode) [create,query,edit]
Third of three possible icons representing the tool associated with the context.
- importfileload : ifl (unicode) [edit]
Load the attribute map a specified file.
- importfilemode : ifm (unicode) [create,query,edit]
Specifies the channel to import. The valid entries here are: alpha, luminance, red, green, blue, and rgbC: Default is
alpha. Q: When queried, it returns a string.
- importreassign : irm (bool) [create,query,edit]
Specifies if the multiply atrribute maps are to be reassigned while importing. Only maps previously exported from within
Artisan can be reassigned. C: Default is FALSE. Q: When queried, it returns a boolean.
- lastRecorderCmd : lrc (unicode) []
- lastStampName : lsn (unicode) []
- lowerradius : lr (float) [create,query,edit]
Sets the lower size of the brush (only apply on tablet).
- makeStroke : mst (int) []
- mappressure : mp (unicode) [create,query,edit]
Sets the tablet pressure mapping when the table is used. There are four options: none- the pressure has no effect,
opacity- the pressure is mapped to the opacity, radius- the is mapped to modify the radius of the brush, both- the
pressure modifies both the opacity and the radius. C: Default is none. Q: When queried, it returns a string.
- name : n (unicode) [create]
If this is a tool command, name the tool appropriately.
- objectsetnames : osn (unicode) []
- opacity : op (float) [create,query,edit]
Sets the brush opacity. C: Default is 1.0. Q: When queried, it returns a float.
- outline : o (bool) [create,query,edit]
Specifies if the brush should be drawn. C: Default is TRUE. Q: When queried, it returns a boolean.
- outwhilepaint : owp (bool) [create,query,edit]
Specifies if the brush outline should be drawn while painting. C: Default is FALSE. Q: When queried, it returns a
boolean.
- paintmode : pm (unicode) [create,query,edit]
Specifies the paint mode. There are two possibilities: screenand tangent. C: Default is screen. Q: When queried, it
returns a string.
- paintoperationtype : pot (unicode) []
- pickColor : pcm (bool) []
- pickValue : pv (bool) []
- playbackCursor : plc (float, float) []
- playbackPressure : plp (float) []
- preserveclonesource : pcs (bool) []
- profileShapeFile : psf (unicode) [query,edit]
Passes a name of the image file for the stamp shape profile.
- projective : prm (bool) [create,query,edit]
Specifies the projective paint mode. C: Default is 'false'. Q: When queried, it returns a boolean.
- radius : r (float) [create,query,edit]
Sets the size of the brush. C: Default is 1.0 cm. Q: When queried, it returns a float.
- record : rec (bool) []
- reflection : rn (bool) [create,query,edit]
Specifies the reflection mode. C: Default is 'false'. Q: When queried, it returns a boolean.
- reflectionaboutorigin : rno (bool) []
- reflectionaxis : ra (unicode) [create,query,edit]
Specifies the reflection axis. There are three possibilities: x, yand z. C: Default is x. Q: When queried, it returns a
string.
- screenRadius : scR (float) []
- selectclonesource : scs (bool) []
- setcolorfeedback : scf (bool) [create,query,edit]
Specifies if the color feedback is on or off. C: Default is ON. Q: When queried, it returns a boolean.
- setdisplaycvs : dcv (bool) [create,query,edit]
Specifies if the active cvs are displayed. C: Default is ON. Q: When queried, it returns a boolean.
- setopertype : sot (unicode) [create,query,edit]
Specifies the setEdit operation (add, transfer, remove). C: Default is add. Q: When queried, it returns a string.
- settomodify : stm (unicode) [create,query,edit]
Specifies the name of the set to modify. Q: When queried, it returns a string.
- showactive : sa (bool) [create,query,edit]
Sets on/off the display of the surface isoparms. C: Default is TRUE. Q: When queried, it returns a boolean.
- stampDepth : stD (float) []
- stampProfile : stP (unicode) [create,query,edit]
Sets the brush profile of the current stamp. Currently, the following profiles are supported: gaussian, soft, solidand
square. C: Default is gaussian. Q: When queried, it returns a string.
- stampSpacing : stS (float) []
- strokesmooth : ssm (unicode) []
- surfaceConformedBrushVertices : scv (bool) [create,query,edit]
Enables/disables the the display of the effective brush area as affected vertices.
- tablet : tab (bool) [query]
Returns true if the tablet device is present, false if it is absent
- tangentOutline : to (bool) [create,query,edit]
Enables/disables the display of the brush circle tangent to the surface.
- usepressure : up (bool) [create,query,edit]
Sets the tablet pressure on/off. C: Default is false. Q: When queried, it returns a boolean. Flag can
have multiple arguments, passed either as a tuple or a list.
- worldRadius : wlR (float) []
Derived from mel command `maya.cmds.artSetPaintCtx`
"""
pass
def curveEditorCtx(*args, **kwargs):
"""
The curveEditorCtx command creates a new NURBS editor context, which is used to edit a NURBS curve or surface.
Flags:
- direction : dir (int) [query]
Query the current direction of the tangent control. Always zero for the curve case. In the surface case, its 0 for the
normal direction, 1 for U direction and 2 for V direction.
- exists : ex (bool) [create]
Returns true or false depending upon whether the specified object exists. Other flags are ignored.
- history : ch (bool) [create]
If this is a tool command, turn the construction history on for the tool in question.
- image1 : i1 (unicode) [create,query,edit]
First of three possible icons representing the tool associated with the context.
- image2 : i2 (unicode) [create,query,edit]
Second of three possible icons representing the tool associated with the context.
- image3 : i3 (unicode) [create,query,edit]
Third | |
(data) = self.view_addressv3_with_http_info(orcid, put_code, **kwargs) # noqa: E501
return data
def view_addressv3_with_http_info(self, orcid, put_code, **kwargs): # noqa: E501
"""Fetch an address # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.view_addressv3_with_http_info(orcid, put_code, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orcid: (required)
:param str put_code: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['orcid', 'put_code'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method view_addressv3" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'orcid' is set
if ('orcid' not in params or
params['orcid'] is None):
raise ValueError("Missing the required parameter `orcid` when calling `view_addressv3`") # noqa: E501
# verify the required parameter 'put_code' is set
if ('put_code' not in params or
params['put_code'] is None):
raise ValueError("Missing the required parameter `put_code` when calling `view_addressv3`") # noqa: E501
collection_formats = {}
path_params = {}
if 'orcid' in params:
path_params['orcid'] = params['orcid'] # noqa: E501
if 'put_code' in params:
path_params['putCode'] = params['put_code'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['orcid_auth'] # noqa: E501
return self.api_client.call_api(
'/v3.0/{orcid}/address/{putCode}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def view_biographyv3(self, orcid, **kwargs): # noqa: E501
"""Get biography details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.view_biographyv3(orcid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orcid: (required)
:return: BiographyV30
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.view_biographyv3_with_http_info(orcid, **kwargs) # noqa: E501
else:
(data) = self.view_biographyv3_with_http_info(orcid, **kwargs) # noqa: E501
return data
def view_biographyv3_with_http_info(self, orcid, **kwargs): # noqa: E501
"""Get biography details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.view_biographyv3_with_http_info(orcid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orcid: (required)
:return: BiographyV30
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['orcid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method view_biographyv3" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'orcid' is set
if ('orcid' not in params or
params['orcid'] is None):
raise ValueError("Missing the required parameter `orcid` when calling `view_biographyv3`") # noqa: E501
collection_formats = {}
path_params = {}
if 'orcid' in params:
path_params['orcid'] = params['orcid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.orcid+xml; qs=5', 'application/orcid+xml; qs=3', 'application/xml', 'application/vnd.orcid+json; qs=4', 'application/orcid+json; qs=2', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['orcid_auth'] # noqa: E501
return self.api_client.call_api(
'/v3.0/{orcid}/biography', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BiographyV30', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def view_clientv3(self, client_id, **kwargs): # noqa: E501
"""Fetch client details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.view_clientv3(client_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str client_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.view_clientv3_with_http_info(client_id, **kwargs) # noqa: E501
else:
(data) = self.view_clientv3_with_http_info(client_id, **kwargs) # noqa: E501
return data
def view_clientv3_with_http_info(self, client_id, **kwargs): # noqa: E501
"""Fetch client details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.view_clientv3_with_http_info(client_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str client_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['client_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method view_clientv3" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'client_id' is set
if ('client_id' not in params or
params['client_id'] is None):
raise ValueError("Missing the required parameter `client_id` when calling `view_clientv3`") # noqa: E501
collection_formats = {}
path_params = {}
if 'client_id' in params:
path_params['client_id'] = params['client_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['orcid_two_legs'] # noqa: E501
return self.api_client.call_api(
'/v3.0/client/{client_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def view_distinction_summaryv3(self, orcid, put_code, **kwargs): # noqa: E501
"""Fetch an Distinction summary # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.view_distinction_summaryv3(orcid, put_code, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orcid: (required)
:param str put_code: (required)
:return: DistinctionSummaryV30
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.view_distinction_summaryv3_with_http_info(orcid, put_code, **kwargs) # noqa: E501
else:
(data) = self.view_distinction_summaryv3_with_http_info(orcid, put_code, **kwargs) # noqa: E501
return data
def view_distinction_summaryv3_with_http_info(self, orcid, put_code, **kwargs): # noqa: E501
"""Fetch an Distinction summary # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.view_distinction_summaryv3_with_http_info(orcid, put_code, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orcid: (required)
:param str put_code: (required)
:return: DistinctionSummaryV30
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['orcid', 'put_code'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method view_distinction_summaryv3" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'orcid' is set
if ('orcid' not in params or
params['orcid'] is None):
raise ValueError("Missing the required parameter `orcid` when calling `view_distinction_summaryv3`") # noqa: E501
# verify the required parameter 'put_code' is set
if ('put_code' not in params or
params['put_code'] is None):
raise ValueError("Missing the required parameter `put_code` when calling `view_distinction_summaryv3`") # noqa: E501
collection_formats = {}
path_params = {}
if 'orcid' in params:
path_params['orcid'] = params['orcid'] # noqa: E501
if 'put_code' in params:
path_params['putCode'] = params['put_code'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.orcid+xml; qs=5', 'application/orcid+xml; qs=3', 'application/xml', 'application/vnd.orcid+json; qs=4', 'application/orcid+json; qs=2', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['orcid_auth'] # noqa: E501
return self.api_client.call_api(
'/v3.0/{orcid}/distinction/summary/{putCode}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DistinctionSummaryV30', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def view_distinctionsv3(self, orcid, **kwargs): # noqa: E501
"""Fetch all distinctions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.view_distinctionsv3(orcid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orcid: (required)
:return: DistinctionsSummaryV30
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.view_distinctionsv3_with_http_info(orcid, **kwargs) # noqa: E501
else:
(data) = self.view_distinctionsv3_with_http_info(orcid, **kwargs) # noqa: E501
return data
def view_distinctionsv3_with_http_info(self, orcid, **kwargs): # noqa: E501
"""Fetch all distinctions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> | |
def _get_diag_blocks(self, module, diag_blocks):
"""Helper method for determining number of diag_blocks to use
Overrides `diag_blocks` if the `module` does not support
`diag_blocks>1`. I.e. for a Linear layer, we do not want to
use a `diag_blocks>1`.
Args:
module: module
diag_blocks (int): default number of diag blocks to use
"""
return diag_blocks if module.__class__.__name__ == 'Conv2d' else 1
def _get_grad(self, module):
"""Get formated gradient of module
Args:
module: module/layer to get gradient of
Returns:
Formatted gradient with shape [output_dim, input_dim] for module
"""
if module.__class__.__name__ == 'Conv2d':
# n_filters * (in_c * kw * kh)
grad = module.weight.grad.data.view(module.weight.grad.data.size(0), -1)
else:
grad = module.weight.grad.data
if module.bias is not None:
grad = torch.cat([grad, module.bias.grad.data.view(-1, 1)], 1)
return grad
def _get_preconditioned_grad(self, module, grad):
"""Precondition gradient of module
Args:
module: module to compute preconditioned gradient for
grad: formatted gradient from `_get_grad()`
Returns:
preconditioned gradient with same shape as `grad`
"""
#v = self.m_QG[module].t() @ grad @ self.m_QA[module]
v = self.m_QG[module] @ grad @ self.m_QA[module]
if module.bias is not None:
v = [v[:, :-1], v[:, -1:]]
v[0] = v[0].view(module.weight.grad.data.size()) # weight
v[1] = v[1].view(module.bias.grad.data.size()) # bias
else:
v = [v.view(module.weight.grad.data.size())]
return v
def _update_scale_grad(self, updates):
"""Update the gradients in place and scale
Updates the gradients in-place for all modules using the preconditioned
gradients and scales the gradients.
Args:
updates (dict): dict of {module: precon_grad}
"""
vg_sum = 0
for module in self.modules:
v = updates[module]
vg_sum += (v[0] * module.weight.grad.data * self.lr ** 2).sum().item()
if module.bias is not None:
vg_sum += (v[1] * module.bias.grad.data * self.lr ** 2).sum().item()
if self.exclude_communicate_inverse:
nu = 1
else:
nu = min(1.0, math.sqrt(self.kl_clip / abs(vg_sum)))
for module in self.modules:
v = updates[module]
module.weight.grad.data.copy_(v[0])
module.weight.grad.data.mul_(nu)
if module.bias is not None:
module.bias.grad.data.copy_(v[1])
module.bias.grad.data.mul_(nu)
def step(self, closure=None, epoch=None):
"""Perform one K-FAC step
Note:
- this function should always be called before `optimizer.step()`
- gradients must be averaged across ranks before calling `step()`
Args:
closure: for compatibility with the base optimizer class.
`closure` is ignored by KFAC
epoch (int, optional): epoch to use for determining when to end
the `diag_warmup` period. `epoch` is not necessary if not using
`diag_warmup`
"""
# Update params, used for compatibilty with `KFACParamScheduler`
group = self.param_groups[0]
self.lr = group['lr']
self.damping = group['damping']
self.fac_update_freq = group['fac_update_freq']
self.kfac_update_freq = group['kfac_update_freq']
updates = {}
handles = []
if epoch is None:
if self.diag_warmup > 0:
print("WARNING: diag_warmup > 0 but epoch was not passed to "
"KFAC.step(). Defaulting to no diag_warmup")
diag_blocks = self.diag_blocks
else:
diag_blocks = self.diag_blocks if epoch >= self.diag_warmup else 1
if self.steps % self.fac_update_freq == 0:
if not self.exclude_compute_factor:
self._update_A()
self._update_G()
if not self.exclude_communicate_factor:
if hvd.size() > 1:
if self.sparse:
self._allgather_factors()
else:
self._allreduce_factors()
# if we are switching from no diag approx to approx, we need to clear
# off-block-diagonal elements
if not self.have_cleared_Q and \
epoch == self.diag_warmup and \
self.steps % self.kfac_update_freq == 0:
self._clear_eigen()
self.have_cleared_Q = True
if self.steps % self.kfac_update_freq == 0:
# reset rank iter so device get the same layers
# to compute to take advantage of caching
self.rank_iter.reset()
handles = []
#eigen_ranks = self._generate_eigen_ranks(epoch)
#eigen_ranks = self._generate_eigen_ranks_uniform(epoch)
eigen_ranks = self._generate_eigen_ranks_naive(epoch)
for module in self.modules:
ranks_a, ranks_g = eigen_ranks[module]
self.m_dA_ranks[module] = ranks_a[0]
self.m_dG_ranks[module] = ranks_g[0]
rank_a = ranks_a[0]
rank_g = ranks_g[0]
if not self.exclude_compute_inverse:
self._update_eigen_A(module, ranks_a)
self._update_eigen_G(module, ranks_g)
if not self.exclude_communicate_inverse:
if hvd.size() > 1:
self._broadcast_eigendecomp()
elif not self.exclude_compute_inverse:
# should have a barriar
if hvd.size() > 1:
barrier()
for i, module in enumerate(self.modules):
grad = self._get_grad(module)
precon_grad = self._get_preconditioned_grad(module, grad)
updates[module] = precon_grad
self._update_scale_grad(updates)
self.steps += 1
def _generate_eigen_ranks_naive(self, epoch):
if self.module_ranks is not None:
return self.module_ranks
module_ranks = {}
diag_blocks = self.diag_blocks if epoch >= self.diag_warmup else 1
buckets = [0] * hvd.size()
ranks = []
for module in self.modules:
# Get ranks to compute this layer on
n = self._get_diag_blocks(module, diag_blocks)
ranks_a = self.rank_iter.next(n)
ranks_g = self.rank_iter.next(n) if self.distribute_layer_factors \
else ranks_a
module_ranks[module] = (ranks_a, ranks_g)
ranks.append(ranks_a[0])
buckets[ranks_a[0]] += self.m_A[module].shape[1]
buckets[ranks_g[0]] += self.m_G[module].shape[1]
self.module_ranks = module_ranks
if hvd.rank() == 0:
logger.info('buckets: %s', buckets)
logger.info('module_ranks: %s', module_ranks.values())
logger.info('ranks: %s', ranks)
return module_ranks
def _generate_eigen_ranks_uniform(self, epoch):
if self.module_ranks is not None:
return self.module_ranks
module_ranks = {}
diag_blocks = self.diag_blocks if epoch >= self.diag_warmup else 1
buckets = [0] * hvd.size()
dimensions = []
module_factors = []
for i, m in enumerate(self.modules):
name = self.module_names[i]
a_dimension = self.m_A[m].shape[1]
#g_dimension = self.m_G[m].shape[1]
dimensions.append(a_dimension)
module_factors.append(name+'-A')
#dimensions.append(g_dimension)
#module_factors.append(name+'-G')
descending_sorted_idx = np.argsort(dimensions)[::-1]
A_ranks = {}
G_ranks = {}
for i in descending_sorted_idx:
factor = module_factors[i]
dimension = dimensions[i]
m_i = self.module_names.index(factor[0:-2])
m = self.modules[m_i]
bi = np.argmin(buckets)
buckets[bi] += dimension
if factor[-1] == 'A':
A_ranks[m] = (bi,)
G_ranks[m] = (bi,)
else:
G_ranks[m] = (bi,)
ranks = []
for m in self.modules:
module_ranks[m] = (A_ranks[m], G_ranks[m])
ranks.append(A_ranks[m][0])
self.module_ranks = module_ranks
if hvd.rank() == 0:
logger.info('buckets: %s', buckets)
logger.info('module_ranks: %s', module_ranks.values())
logger.info('ranks: %s', ranks)
return module_ranks
def _generate_eigen_ranks(self, epoch):
if self.module_ranks is not None:
return self.module_ranks
module_ranks = {}
diag_blocks = self.diag_blocks if epoch >= self.diag_warmup else 1
buckets = [0] * hvd.size()
for module in self.modules:
i = np.argmin(buckets)
if hvd.rank() == 0:
logger.info('A Name: %s, shape: %s', module, self.m_A[module].shape)
logger.info('G Name: %s, shape: %s', module, self.m_G[module].shape)
a_dimension = self.m_A[module].shape[1]
g_dimension = self.m_G[module].shape[1]
#buckets[i] += (a_dimension) + g_dimension)
buckets[i] += a_dimension
ranks_a = (i,)
i = np.argmin(buckets)
ranks_g = (i,)
buckets[i] += g_dimension
module_ranks[module] = (ranks_a, ranks_g)
self.module_ranks = module_ranks
if hvd.rank() == 0:
logger.info('buckets: %s', buckets)
logger.info('module_ranks: %s', module_ranks.values())
return module_ranks
def _allreduce_factors(self):
"""Allreduce the factors for all layers"""
handles = []
for m in self.modules:
name = self.module_name_map[m]
self.fw_merged_comm.allreduce_async_(name, self.m_A[m].data)
self.bw_merged_comm.allreduce_async_(name, self.m_G[m].data)
self.fw_merged_comm.synchronize()
self.bw_merged_comm.synchronize()
def _allgather_factors(self):
"""Allgather the factors for all layers"""
handles = []
def _get_value_and_idx(sparse_tensor):
tensor = sparse_tensor.data.view(-1)
one_indexes = tensor != 0
indexes = one_indexes.nonzero().data.squeeze().view(-1)
values = tensor.data[indexes]
return values, indexes.int()
for i, m in enumerate(self.modules):
module_name = self.module_names[i]
A_values, A_indexes = _get_value_and_idx(self.m_A[m].data)
A_value_name = module_name + '_A_value'
A_idx_name = module_name + '_A_idx'
h_value = allgather_async(A_values, A_value_name)
h_idx = allgather_async(A_indexes, A_idx_name)
G_values, G_indexes = _get_value_and_idx(self.m_G[m].data)
G_value_name = module_name + '_G_value'
G_idx_name = module_name + '_G_idx'
h_value_G = allgather_async(G_values, G_value_name)
h_idx_G = allgather_async(G_indexes, G_idx_name)
handles.append((h_value, h_idx, h_value_G, h_idx_G))
for i, handle in enumerate(handles):
module_name = self.module_names[i]
module = self.modules[i]
m_A = self.m_A[module].view(-1)
m_A.fill_(0.0)
m_G = self.m_G[module].view(-1)
m_G.fill_(0.0)
h_value_A, h_idx_A, h_value_G, h_idx_G = handle
A_values = hvd.synchronize(h_value_A)
A_indexes = hvd.synchronize(h_idx_A).long()
m_A.scatter_add_(0, A_indexes, A_values)
m_A.div_(hvd.size())
G_values = hvd.synchronize(h_value_G)
G_indexes = hvd.synchronize(h_idx_G).long()
m_G.scatter_add_(0, G_indexes, G_values)
m_G.div_(hvd.size())
def _allreduce_eigendecomp(self):
"""Allreduce the eigendecompositions for all layers
Note: we use `op=hvd.Sum` to simulate an allgather`. Each rank will
either compute the eigendecomposition for a factor or just return
zeros so we sum instead of averaging.
"""
handles = []
for m in self.modules:
handles.append(hvd.allreduce_async_(self.m_QA[m].data, op=hvd.Sum))
handles.append(hvd.allreduce_async_(self.m_QG[m].data, op=hvd.Sum))
handles.append(hvd.allreduce_async_(self.m_dA[m].data, op=hvd.Sum))
handles.append(hvd.allreduce_async_(self.m_dG[m].data, op=hvd.Sum))
for handle in handles:
hvd.synchronize(handle)
def _broadcast_eigendecomp(self):
"""Broadcasts the eigendecompositions for all layers
Note: we use `op=hvd.Sum` to simulate an allgather`. Each rank will
either compute the eigendecomposition for a factor or just return
zeros so we sum instead of averaging.
"""
rank = hvd.rank()
for i, m in enumerate(self.modules):
rank_a = self.m_dA_ranks[m]
rank_g = self.m_dG_ranks[m]
name = self.module_names[i]
self.multi_comm.bcast_async_([name+'mQA'], [self.m_QA[m]], rank_a)
self.multi_comm.bcast_async_([name+'mQG'], [self.m_QG[m]], rank_g)
self.multi_comm.synchronize()
class KFACParamScheduler():
"""Updates KFAC parameters according to the epoch
Similar to `torch.optim.lr_scheduler.StepLR()`
Usage:
Call KFACParamScheduler.step() each epoch to compute new parameter
values.
Args:
kfac (KFAC): wrapped KFAC preconditioner
damping_alpha (float, optional): multiplicative factor of the damping
(default: 1)
damping_schedule (list, optional): list of epochs to update the damping
by `damping_alpha` (default: None)
update_freq_alpha (float, optional): multiplicative factor of the KFAC
update freq (default: 1)
update_freq_schedule (list, optional): list of epochs to update the KFAC
update freq by `update_freq_alpha` (default: None)
start_epoch (int, optional): starting epoch, for use if resuming training
from checkpoint (default: 0)
"""
def __init__(self,
kfac,
damping_alpha=1,
damping_schedule=None,
update_freq_alpha=1,
update_freq_schedule=None,
start_epoch=0):
self.kfac = kfac
params = self.kfac.param_groups[0]
self.damping_base = params['damping']
self.damping_alpha = damping_alpha
self.damping_schedule = | |
respectively). It returns ``True`` if
the first interval ends at the same time as the second interval (+/-
``epsilon``), and the first interval starts after the second interval.
Args:
epsilon: The maximum difference between the end time of the first
interval and the end time of the second interval.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the first interval ends at the same time as the second
interval, and starts after the second interval starts.
"""
return lambda intrvl1, intrvl2: (abs(intrvl1['t2'] - intrvl2['t2']) <= epsilon
and intrvl1['t1'] > intrvl2['t1'])
def finishes_inv(epsilon=0):
"""Returns a function that computes whether a temporal interval has the
same end time as another interval (+/- epsilon), and starts after it.
This is the inverse of the ``finishes`` predicate; it checks whether the
second interval finishes the first interval.
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the second interval ends at the same time as the first interval (+/-
``epsilon``), and the second interval starts after the first interval.
Args:
epsilon: The maximum difference between the end time of the second
interval and the end time of the first interval.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the second interval ends at the same time as the first
interval, and starts after the first interval starts.
"""
return lambda intrvl1, intrvl2: (abs(intrvl1['t2'] - intrvl2['t2']) <= epsilon
and intrvl2['t1'] > intrvl1['t1'])
def during():
"""Returns a function that computes whether a temporal interval takes place
entirely during another temporal interval (i.e. it starts after the other
interval starts and ends before the other interval ends).
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the first interval starts strictly after the second interval starts and
ends strictly before the second interval ends.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the first interval takes place strictly during the second
interval.
"""
return lambda intrvl1, intrvl2: intrvl1['t1'] > intrvl2['t1'] and intrvl1['t2'] < intrvl2['t2']
def during_inv():
"""Returns a function that computes whether a temporal interval takes place
entirely during another temporal interval (i.e. it starts after the other
interval starts and ends before the other interval ends).
This is the inverse of the ``during`` predicate; it checks whether the
second interval takes place during the first interval.
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the second interval starts strictly after the first interval starts and
ends strictly before the first interval ends.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the second interval takes place strictly during the first
interval.
"""
return lambda intrvl1, intrvl2: intrvl2['t1'] > intrvl1['t1'] and intrvl2['t2'] < intrvl1['t2']
def meets_before(epsilon=0):
"""Returns a function that computes whether a temporal interval ends at the
same time as another interval starts (+/- epsilon).
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the absolute time difference between the end of the first interval and the
start of the second interval is less than ``epsilon``.
Args:
epsilon: The maximum time difference between the end of the first
interval and the start of the second interval.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the first interval ends at the same time as the second
interval starts.
"""
return lambda intrvl1, intrvl2: abs(intrvl1['t2']-intrvl2['t1']) <= epsilon
def meets_after(epsilon=0):
"""Returns a function that computes whether a temporal interval ends at the
same time as another interval starts (+/- epsilon).
This is the inverse of the ``meets_before`` predicate; it checks whether
the first interval starts when the second interval ends.
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the absolute time difference between the start of the first interval and
the end of the second interval is less than ``epsilon``.
Args:
epsilon: The maximum time difference between the start of the first
interval and the end of the second interval.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the first interval starts at the same time as the second
interval ends.
"""
return lambda intrvl1, intrvl2: abs(intrvl2['t2']-intrvl1['t1']) <= epsilon
def equal():
"""Returns a function that computes whether two temporal intervals are
strictly equal.
The output function expects two temporal intervals (dicts with keys 't1'
and 't2' for the start and end times, respectively). It returns ``True`` if
the two intervals have equal start times and equal end times.
Returns:
An output function that takes two temporal intervals and returns
``True`` if the two intervals have equal start times and equal end
times.
"""
return lambda intrvl1, intrvl2: intrvl1['t1'] == intrvl2['t1'] and intrvl1['t2'] == intrvl2['t2']
# Unary bounding box predicates.
def _area(bbox):
"""Computes area of a 2D bounding box.
Args:
bbox: A dict with keys 'x1', 'x2', 'y1', 'y2' encoding spatial
co-ordinates.
Returns:
The area of the bounding box.
"""
return (bbox['x2'] - bbox['x1']) * (bbox['y2'] - bbox['y1'])
def _width(bbox):
"""Computes width of a 2D bounding box.
Args:
bbox: A dict with keys 'x1', 'x2', 'y1', 'y2' encoding spatial
co-ordinates.
Returns:
The width (in the X dimension) of the bounding box.
"""
return bbox['x2'] - bbox['x1']
def _height(bbox):
"""Computes height of a 2D bounding box.
Args:
bbox: A dict with keys 'x1', 'x2', 'y1', 'y2' encoding spatial
co-ordinates.
Returns:
The height (in the Y dimension) of the bounding box.
"""
return bbox['y2'] - bbox['y1']
def position(x1, y1, x2, y2, epsilon=0.1):
"""Returns a function that computes whether a 2D bounding box has certain
co-ordinates (+/- epsilon).
The output function takes in a 2D bounding box (dict with keys 'x1', 'x2',
'y1', 'y2') and returns ``True`` if the absolute difference between the
dict's values and the specified co-ordinates are all less than ``epsilon``.
Args:
x1: Value to compare against the bounding box's 'x1' field.
y1: Value to compare against the bounding box's 'y1' field.
x2: Value to compare against the bounding box's 'x2' field.
y2: Value to compare against the bounding box's 'y2' field.
epsilon: Maximum difference against specified co-ordinates.
Returns:
A function that takes a 2D bounding box and returns ``True`` if the
bounding box's spatial co-ordinates are all within ``epsilon`` of ``x1``,
``y1``, ``x2``, and ``y2``.
"""
return lambda bbox: (abs(bbox['x1'] - x1) < epsilon and
abs(bbox['y1'] - y1) < epsilon and
abs(bbox['x2'] - x2) < epsilon and
abs(bbox['y2'] - y2) < epsilon)
def has_value(key, target, epsilon=0.1):
"""Returns a function that computes whether a specified value in a dict
is within ``epsilon`` of ``target``.
The output function takes in a dict ``d`` and returns ``True`` if the
absolute difference between ``d[key]`` and ``target`` is less than
``epsilon``.
Args:
key: Lookup key for the value in the dict to compare.
target: The value to compare against.
epsilon: Maximum difference between the two values.
Returns:
A function that takes a dict and returns ``True`` if the absolute value
between ``dict[key]`` and ``target`` is less than ``epsilon``.
"""
return lambda bbox: abs(bbox[key] - target) < epsilon
def area_exactly(area, epsilon=0.1):
"""Returns a function that computes whether a 2D bounding box has a certain
area (+/- epsilon).
The output function takes in a 2D bounding box (dict with keys 'x1', 'x2',
'y1', 'y2') and returns ``True`` if the absolute difference between the
bounding box's area and the specified area is less than ``epsilon``.
Args:
area: Target area value.
epsilon: Maximum difference between the bounding | |
import sys
from thread import get_ident
from peak.util.decorators import rewrap, cache_source, classy, decorate
__all__ = [
'Service', 'replaces', 'setting', 'InputConflict', 'DynamicRuleError',
'State', 'Action', 'resource', 'registry', 'new', 'empty',
'lookup', 'manager', 'reraise', 'with_', 'call_with', 'ScopeError',
'resource_registry',
]
_in_place = """__iadd__ __isub__ __imul__ __idiv__ __itruediv__ __ifloordiv__
__imod__ __ipow__ __ilshift__ __irshift__ __iand__ __ixor__ __ior__""".split()
_ignore = dict.fromkeys("""
__name__ __module__ __return__ __slots__ get __init__ __metaclass__ __doc__
__call__ __new__""".split() + _in_place
).__contains__
def _no_in_place(self, *args):
raise TypeError(
"In-place operators (other than <<=) cannot be performed on a"
" service class"
)
def _ilshift(cls, factory):
State[cls] = factory
return cls
def _mod(cls, expr):
return 'lambda: '+expr
_std_attrs = dict(
[(k,_no_in_place) for k in _in_place], __ilshift__=_ilshift, __mod__=_mod
)
def redirect_attribute(cls, name, payload):
setattr(type(cls), name, property(
lambda s: getattr(s.get(), name),
lambda s,v: setattr(s.get(), name, v),
lambda s: delattr(s.get(), name),
))
class _ClassDelegate(classy):
"""Type whose attributes/methods are delegated to ``cls.get()``"""
__slots__ = ()
get = None # dummy
decorate(classmethod)
def __class_init__(cls, name, bases, cdict, supr):
meta = type(cls)
if getattr(meta, '__for_class__', None) is not cls:
cls.__class__ = meta = type(meta)(
cls.__name__+'Class', (meta,),
dict(_std_attrs, __module__=cls.__module__, __for_class__=cls)
)
# XXX activate_attrs(meta)?
supr()(cls, name, bases, cdict, supr)
if 'get' not in cdict:
cls.get = staticmethod(classmethod(lookup).__get__(None, cls))
for k, v in cdict.items():
if not isinstance(k, basestring):
continue
if not isinstance(v, (classmethod,staticmethod))and not _ignore(k):
redirect_attribute(cls, k, v)
class State(_ClassDelegate):
"""A thread's current configuration and state"""
def __new__(cls, *rules, **attrs):
return attrs and object.__new__(cls) or empty()
def __init__(self, **attrs):
"""Create an empty state with `rules` in effect"""
self.__dict__.update(attrs)
def __getitem__(self, key):
"""Get the rule for `key`"""
return self.getRule(key)
def __setitem__(self, key, rule):
"""Set the rule for `key`"""
return self.setRule(key, rule)
def swap(self):
"""Make this state current and return the old one"""
raise NotImplementedError("Can't switch to the root state")
def child(self, *rules):
"""Return a new child state of this one, with `rules` in effect"""
raise NotImplementedError # this method is replaced on each instance
def __enter__(self):
"""Make this state a single-use nested state"""
raise NotImplementedError("Can't enter the root state")
def __exit__(self, typ, val, tb):
"""Close this state and invoke exit callbacks"""
raise NotImplementedError("Can't exit the root state")
def on_exit(self, callback):
"""Add a `callback(typ,val,tb)` to be invoked at ``__exit__`` time"""
raise NotImplementedError # this method is replaced on each instance
decorate(staticmethod)
def get(key=None):
"""Return the current state (no args) or a current rule (w/key)"""
# this method is replaced later below
raise NotImplementedError
parent = None
class InputConflict(Exception):
"""Attempt to set a rule that causes a visible conflict in the state"""
class DynamicRuleError(Exception):
"""A fallback or wildcard rule attempted to access dynamic state"""
class ScopeError(Exception):
"""A problem with scoping occurred"""
_exc_info = {}
nones = None, None, None
def _swap_exc_info(data):
this_thread = get_ident()
old = _exc_info.get(this_thread, nones)
_exc_info[this_thread] = data
return old
def new():
"""Return a new child of the current state"""
return State.child()
def _let_there_be_state():
"""Create a world of states, manipulable only by exported functions"""
states = {}
def _swap(what):
this_thread = get_ident()
old = states.setdefault(this_thread, what)
states[this_thread] = what
return old
def lookup(key):
"""Return the value of `key` in the current state"""
try:
state, getRule, lookup, child = states[get_ident()]
except KeyError:
empty().swap()
state, getRule, lookup, child = states[get_ident()]
return lookup(key)
def get(key=None):
try:
state, getRule, lookup, child = states[get_ident()]
except KeyError:
empty().swap()
state, getRule, lookup, child = states[get_ident()]
if key is None:
return state
return getRule(key)
def disallow(key):
raise DynamicRuleError(
"default rule or exit function tried to read dynamic state", key
)
def empty():
"""Return a new, empty State instance"""
state = new_state(root_getrule)
state.parent = root
return state
def new_state(inherit=None, inheritedDistances=None, propagate=None):
buffer = {}
rules = {}
values = {}
distances = {}
computing = {}
get_stack, set_stack = computing.get, computing.setdefault
def getRule(key):
"""Get my rule for `key`"""
try:
rule = rules[key]
except KeyError:
try:
rule = buffer[key]
except KeyError:
rule = buffer.setdefault(key, __fallback(key))
# snapshot the currently-set value - this is thread-safe
# because setdefault is atomic, so rules[key] will only *ever*
# have one value, even if it's not the one now in the buffer
#
rule = rules.setdefault(key, rule)
if key not in distances:
if inheritedDistances is not None and inherit(key)==rule:
distances.setdefault(key, inheritedDistances[key]+1)
else:
distances.setdefault(key, 0)
if computing:
# Ensure that any value being computed in this thread has a
# maximum propagation distance no greater than this rule's
# distance.
stack = get_stack(get_ident())
if stack:
stack[-1] = min(stack[-1], distances[key])
return rule
def setRule(key, rule):
"""Set my rule for `key`, or raise an error if inconsistent"""
buffer[key] = rule
# as long as a snapshot hasn't been taken yet, `old` will be `rule`
old = rules.get(key, rule)
if old is not rule and old != rule:
raise InputConflict(key, old, rule)
def getValue(key):
"""Get the dynamic value of `key`"""
try:
value = values[key]
except KeyError:
this_thread = get_ident()
rule = getRule(key) # this ensures distances[key] is known
stack = set_stack(this_thread, [])
stack.append(distances[key])
try:
value = key.__apply__(key, rule)
finally:
distance = stack.pop()
if not stack:
del computing[this_thread]
else:
stack[-1] = min(stack[-1], distance)
value = publish(distance, key, value)
else:
if computing:
stack = get_stack(get_ident())
if stack:
stack[-1] = min(stack[-1], distances[key])
return value
def publish(distance, key, value):
"""Accept value from this state or child, and maybe propagate it"""
# It's safe to update distances here because no thread can depend
# on the changed distance for propagation unless *some* thread has
# already finished the relevant computation -- i.e., done this very
# update. Otherwise, there would be no values[key], therefore the
# thread would have to follow the same code path, ending up by
# being the thread doing this update! Ergo, this is a safe update.
#
distances[key] = distance
if distance and propagate:
# pass it up to the parent, but use the value the parent has.
# therefore, the parent at "distance" height from us will be
# the arbiter of the value for all its children, ensuring that
# exactly one value is used for the entire subtree!
#
value = propagate(distance-1, key, value)
# Return whatever value wins at this level to our children
return values.setdefault(key, value)
def child():
"""Return a new child state"""
s = new_state(getRule, distances, publish)
s.parent = this
return s
def __fallback(key):
"""Compute the fallback for key"""
old = _swap(disabled)
try:
return key.__fallback__(inherit, key)
finally:
_swap(old)
def swap():
if exited:
raise ScopeError("Can't switch to an exited state")
state, get, lookup, old_child = old = _swap(enabled)
if lookup is disallow:
_swap(old)
raise DynamicRuleError(
"default rule or exit function tried to change states" # XXX
)
return state
def __enter__():
if my_parent or exited:
raise ScopeError("Can't re-enter a previously-entered state")
elif active_child:
raise ScopeError("State already has an active child")
elif get() is this:
raise ScopeError("State is already current")
parent, xx, xx, parents_child = old = states[get_ident()]
if parents_child:
raise ScopeError("Current state already has an active child")
swap()
my_parent.append(old); parents_child.append(this)
return this
def __exit__(typ, val, tb):
if exited:
raise ScopeError("State already exited")
elif not my_parent:
raise ScopeError("State hasn't been entered yet")
elif active_child:
raise ScopeError("Nested state(s) haven't exited yet")
elif get() is not this:
raise ScopeError("Can't exit a non-current state")
parents_child = my_parent[0][-1]
_swap(my_parent.pop()) # reactivate parent state
parents_child.pop()
exited.append(1)
values.clear()
return call_exitfuncs(typ, val, tb)
active_child = []
my_parent = []
exited = []
exit_functions = []
def call_exitfuncs(typ, val, tb):
old = _swap(disabled)
try:
for func in exit_functions:
try:
func(typ, val, tb)
except:
typ, val, tb = sys.exc_info()
finally:
_swap(old)
del typ, val, tb
def on_exit(callback):
if exited:
raise ScopeError("State already exited")
elif not my_parent:
raise ScopeError("State hasn't been entered yet")
if callback not in exit_functions:
exit_functions.append(callback)
this = State(
getRule=getRule, setRule=setRule, swap=swap, child=child,
__enter__=__enter__, __exit__=__exit__, on_exit = on_exit
)
enabled = this, getRule, getValue, active_child
disabled = None, getRule, disallow, active_child
return this
State.get = staticmethod(get)
State.root = root = new_state(); root.child = empty
root_getrule = root.getRule
del root.swap, root.__enter__, root.__exit__
return lookup, empty
lookup, empty = _let_there_be_state(); del _let_there_be_state
class _GeneratorContextManager(object):
"""Helper for @context.manager decorator."""
def __init__(self, gen):
self.gen = gen
def __enter__(self):
for value in self.gen:
return value
else:
raise RuntimeError("generator | |
test_errors_for_non_accesspoint_arn(self):
params = {
'Bucket': 'arn:aws:s3:us-west-2:123456789012:unsupported:resource'
}
context = {}
with self.assertRaises(UnsupportedS3ArnError):
self.arn_handler.handle_arn(params, self.model, context)
def test_ignores_bucket_names(self):
params = {'Bucket': 'mybucket'}
context = {}
self.arn_handler.handle_arn(params, self.model, context)
self.assertEqual(params, {'Bucket': 'mybucket'})
self.assertEqual(context, {})
def test_ignores_create_bucket(self):
arn = 'arn:aws:s3:us-west-2:123456789012:accesspoint/endpoint'
params = {'Bucket': arn}
context = {}
self.model.name = 'CreateBucket'
self.arn_handler.handle_arn(params, self.model, context)
self.assertEqual(params, {'Bucket': arn})
self.assertEqual(context, {})
class TestS3EndpointSetter(unittest.TestCase):
def setUp(self):
self.operation_name = 'GetObject'
self.signature_version = 's3v4'
self.region_name = 'us-west-2'
self.account = '123456789012'
self.bucket = 'mybucket'
self.key = 'key.txt'
self.accesspoint_name = 'myaccesspoint'
self.partition = 'aws'
self.endpoint_resolver = mock.Mock()
self.dns_suffix = 'amazonaws.com'
self.endpoint_resolver.construct_endpoint.return_value = {
'dnsSuffix': self.dns_suffix
}
self.endpoint_setter = self.get_endpoint_setter()
def get_endpoint_setter(self, **kwargs):
setter_kwargs = {
'endpoint_resolver': self.endpoint_resolver,
'region': self.region_name,
}
setter_kwargs.update(kwargs)
return S3EndpointSetter(**setter_kwargs)
def get_s3_request(self, bucket=None, key=None, scheme='https://',
querystring=None):
url = scheme + 's3.us-west-2.amazonaws.com/'
if bucket:
url += bucket
if key:
url += '/%s' % key
if querystring:
url += '?%s' % querystring
return AWSRequest(method='GET', headers={}, url=url)
def get_s3_accesspoint_request(self, accesspoint_name=None,
accesspoint_context=None,
**s3_request_kwargs):
if not accesspoint_name:
accesspoint_name = self.accesspoint_name
request = self.get_s3_request(accesspoint_name, **s3_request_kwargs)
if accesspoint_context is None:
accesspoint_context = self.get_s3_accesspoint_context(
name=accesspoint_name)
request.context['s3_accesspoint'] = accesspoint_context
return request
def get_s3_accesspoint_context(self, **overrides):
accesspoint_context = {
'name': self.accesspoint_name,
'account': self.account,
'region': self.region_name,
'partition': self.partition,
}
accesspoint_context.update(overrides)
return accesspoint_context
def call_set_endpoint(self, endpoint_setter, request, **kwargs):
set_endpoint_kwargs = {
'request': request,
'operation_name': self.operation_name,
'signature_version': self.signature_version,
'region_name': self.region_name,
}
set_endpoint_kwargs.update(kwargs)
endpoint_setter.set_endpoint(**set_endpoint_kwargs)
def test_register(self):
event_emitter = mock.Mock()
self.endpoint_setter.register(event_emitter)
event_emitter.register.assert_called_with(
'before-sign.s3', self.endpoint_setter.set_endpoint)
def test_accesspoint_endpoint(self):
request = self.get_s3_accesspoint_request()
self.call_set_endpoint(self.endpoint_setter, request=request)
expected_url = 'https://%s-%s.s3-accesspoint.%s.amazonaws.com/' % (
self.accesspoint_name, self.account, self.region_name
)
self.assertEqual(request.url, expected_url)
def test_accesspoint_preserves_key_in_path(self):
request = self.get_s3_accesspoint_request(key=self.key)
self.call_set_endpoint(self.endpoint_setter, request=request)
expected_url = 'https://%s-%s.s3-accesspoint.%s.amazonaws.com/%s' % (
self.accesspoint_name, self.account, self.region_name,
self.key
)
self.assertEqual(request.url, expected_url)
def test_accesspoint_preserves_scheme(self):
request = self.get_s3_accesspoint_request(scheme='http://')
self.call_set_endpoint(self.endpoint_setter, request=request)
expected_url = 'http://%s-%s.s3-accesspoint.%s.amazonaws.com/' % (
self.accesspoint_name, self.account, self.region_name,
)
self.assertEqual(request.url, expected_url)
def test_accesspoint_preserves_query_string(self):
request = self.get_s3_accesspoint_request(querystring='acl')
self.call_set_endpoint(self.endpoint_setter, request=request)
expected_url = 'https://%s-%s.s3-accesspoint.%s.amazonaws.com/?acl' % (
self.accesspoint_name, self.account, self.region_name,
)
self.assertEqual(request.url, expected_url)
def test_uses_resolved_dns_suffix(self):
self.endpoint_resolver.construct_endpoint.return_value = {
'dnsSuffix': 'mysuffix.com'
}
request = self.get_s3_accesspoint_request()
self.call_set_endpoint(self.endpoint_setter, request=request)
expected_url = 'https://%s-%s.s3-accesspoint.%s.mysuffix.com/' % (
self.accesspoint_name, self.account, self.region_name,
)
self.assertEqual(request.url, expected_url)
def test_uses_region_of_client_if_use_arn_disabled(self):
client_region = 'client-region'
self.endpoint_setter = self.get_endpoint_setter(
region=client_region, s3_config={'use_arn_region': False})
request = self.get_s3_accesspoint_request()
self.call_set_endpoint(self.endpoint_setter, request=request)
expected_url = 'https://%s-%s.s3-accesspoint.%s.amazonaws.com/' % (
self.accesspoint_name, self.account, client_region,
)
self.assertEqual(request.url, expected_url)
def test_accesspoint_errors_for_custom_endpoint(self):
endpoint_setter = self.get_endpoint_setter(
endpoint_url='https://custom.com')
request = self.get_s3_accesspoint_request()
with self.assertRaises(UnsupportedS3AccesspointConfigurationError):
self.call_set_endpoint(endpoint_setter, request=request)
def test_errors_for_mismatching_partition(self):
endpoint_setter = self.get_endpoint_setter(partition='aws-cn')
accesspoint_context = self.get_s3_accesspoint_context(partition='aws')
request = self.get_s3_accesspoint_request(
accesspoint_context=accesspoint_context)
with self.assertRaises(UnsupportedS3AccesspointConfigurationError):
self.call_set_endpoint(endpoint_setter, request=request)
def test_errors_for_mismatching_partition_when_using_client_region(self):
endpoint_setter = self.get_endpoint_setter(
s3_config={'use_arn_region': False}, partition='aws-cn'
)
accesspoint_context = self.get_s3_accesspoint_context(partition='aws')
request = self.get_s3_accesspoint_request(
accesspoint_context=accesspoint_context)
with self.assertRaises(UnsupportedS3AccesspointConfigurationError):
self.call_set_endpoint(endpoint_setter, request=request)
def test_set_endpoint_for_auto(self):
endpoint_setter = self.get_endpoint_setter(
s3_config={'addressing_style': 'auto'})
request = self.get_s3_request(self.bucket, self.key)
self.call_set_endpoint(endpoint_setter, request)
expected_url = 'https://%s.s3.us-west-2.amazonaws.com/%s' % (
self.bucket, self.key
)
self.assertEqual(request.url, expected_url)
def test_set_endpoint_for_virtual(self):
endpoint_setter = self.get_endpoint_setter(
s3_config={'addressing_style': 'virtual'})
request = self.get_s3_request(self.bucket, self.key)
self.call_set_endpoint(endpoint_setter, request)
expected_url = 'https://%s.s3.us-west-2.amazonaws.com/%s' % (
self.bucket, self.key
)
self.assertEqual(request.url, expected_url)
def test_set_endpoint_for_path(self):
endpoint_setter = self.get_endpoint_setter(
s3_config={'addressing_style': 'path'})
request = self.get_s3_request(self.bucket, self.key)
self.call_set_endpoint(endpoint_setter, request)
expected_url = 'https://s3.us-west-2.amazonaws.com/%s/%s' % (
self.bucket, self.key
)
self.assertEqual(request.url, expected_url)
def test_set_endpoint_for_accelerate(self):
endpoint_setter = self.get_endpoint_setter(
s3_config={'use_accelerate_endpoint': True})
request = self.get_s3_request(self.bucket, self.key)
self.call_set_endpoint(endpoint_setter, request)
expected_url = 'https://%s.s3-accelerate.amazonaws.com/%s' % (
self.bucket, self.key
)
self.assertEqual(request.url, expected_url)
class TestContainerMetadataFetcher(unittest.TestCase):
def setUp(self):
self.responses = []
self.http = mock.Mock()
self.sleep = mock.Mock()
def create_fetcher(self):
return ContainerMetadataFetcher(self.http, sleep=self.sleep)
def fake_response(self, status_code, body):
response = mock.Mock()
response.status_code = status_code
response.content = body
return response
def set_http_responses_to(self, *responses):
http_responses = []
for response in responses:
if isinstance(response, Exception):
# Simulating an error condition.
http_response = response
elif hasattr(response, 'status_code'):
# It's a precreated fake_response.
http_response = response
else:
http_response = self.fake_response(
status_code=200, body=json.dumps(response).encode('utf-8'))
http_responses.append(http_response)
self.http.send.side_effect = http_responses
def assert_request(self, method, url, headers):
request = self.http.send.call_args[0][0]
self.assertEqual(request.method, method)
self.assertEqual(request.url, url)
self.assertEqual(request.headers, headers)
def assert_can_retrieve_metadata_from(self, full_uri):
response_body = {'foo': 'bar'}
self.set_http_responses_to(response_body)
fetcher = self.create_fetcher()
response = fetcher.retrieve_full_uri(full_uri)
self.assertEqual(response, response_body)
self.assert_request('GET', full_uri, {'Accept': 'application/json'})
def assert_host_is_not_allowed(self, full_uri):
response_body = {'foo': 'bar'}
self.set_http_responses_to(response_body)
fetcher = self.create_fetcher()
with self.assertRaisesRegexp(ValueError, 'Unsupported host'):
fetcher.retrieve_full_uri(full_uri)
self.assertFalse(self.http.send.called)
def test_can_specify_extra_headers_are_merged(self):
headers = {
# The 'Accept' header will override the
# default Accept header of application/json.
'Accept': 'application/not-json',
'X-Other-Header': 'foo',
}
self.set_http_responses_to({'foo': 'bar'})
fetcher = self.create_fetcher()
response = fetcher.retrieve_full_uri(
'http://localhost', headers)
self.assert_request('GET', 'http://localhost', headers)
def test_can_retrieve_uri(self):
json_body = {
"AccessKeyId" : "a",
"SecretAccessKey" : "b",
"Token" : "c",
"Expiration" : "d"
}
self.set_http_responses_to(json_body)
fetcher = self.create_fetcher()
response = fetcher.retrieve_uri('/foo?id=1')
self.assertEqual(response, json_body)
# Ensure we made calls to the right endpoint.
headers = {'Accept': 'application/json'}
self.assert_request('GET', 'http://169.254.170.2/foo?id=1', headers)
def test_can_retry_requests(self):
success_response = {
"AccessKeyId" : "a",
"SecretAccessKey" : "b",
"Token" : "c",
"Expiration" : "d"
}
self.set_http_responses_to(
# First response is a connection error, should
# be retried.
ConnectionClosedError(endpoint_url=''),
# Second response is the successful JSON response
# with credentials.
success_response,
)
fetcher = self.create_fetcher()
response = fetcher.retrieve_uri('/foo?id=1')
self.assertEqual(response, success_response)
def test_propagates_credential_error_on_http_errors(self):
self.set_http_responses_to(
# In this scenario, we never get a successful response.
ConnectionClosedError(endpoint_url=''),
ConnectionClosedError(endpoint_url=''),
ConnectionClosedError(endpoint_url=''),
ConnectionClosedError(endpoint_url=''),
ConnectionClosedError(endpoint_url=''),
)
# As a result, we expect an appropriate error to be raised.
fetcher = self.create_fetcher()
with self.assertRaises(MetadataRetrievalError):
fetcher.retrieve_uri('/foo?id=1')
self.assertEqual(self.http.send.call_count, fetcher.RETRY_ATTEMPTS)
def test_error_raised_on_non_200_response(self):
self.set_http_responses_to(
self.fake_response(status_code=404, body=b'Error not found'),
self.fake_response(status_code=404, body=b'Error not found'),
self.fake_response(status_code=404, body=b'Error not found'),
)
fetcher = self.create_fetcher()
with self.assertRaises(MetadataRetrievalError):
fetcher.retrieve_uri('/foo?id=1')
# Should have tried up to RETRY_ATTEMPTS.
self.assertEqual(self.http.send.call_count, fetcher.RETRY_ATTEMPTS)
def test_error_raised_on_no_json_response(self):
# If the service returns a sucess response but with a body that
# does not contain JSON, we should still retry up to RETRY_ATTEMPTS,
# but after exhausting retries we propagate the exception.
self.set_http_responses_to(
self.fake_response(status_code=200, body=b'Not JSON'),
self.fake_response(status_code=200, body=b'Not JSON'),
self.fake_response(status_code=200, body=b'Not JSON'),
)
fetcher = self.create_fetcher()
with self.assertRaises(MetadataRetrievalError) as e:
fetcher.retrieve_uri('/foo?id=1')
self.assertNotIn('Not JSON', str(e.exception))
# Should have tried up to RETRY_ATTEMPTS.
self.assertEqual(self.http.send.call_count, fetcher.RETRY_ATTEMPTS)
def test_can_retrieve_full_uri_with_fixed_ip(self):
self.assert_can_retrieve_metadata_from(
'http://%s/foo?id=1' % ContainerMetadataFetcher.IP_ADDRESS)
def test_localhost_http_is_allowed(self):
self.assert_can_retrieve_metadata_from('http://localhost/foo')
def test_localhost_with_port_http_is_allowed(self):
self.assert_can_retrieve_metadata_from('http://localhost:8000/foo')
def test_localhost_https_is_allowed(self):
self.assert_can_retrieve_metadata_from('https://localhost/foo')
def test_can_use_127_ip_addr(self):
self.assert_can_retrieve_metadata_from('https://127.0.0.1/foo')
def test_can_use_127_ip_addr_with_port(self):
self.assert_can_retrieve_metadata_from('https://127.0.0.1:8080/foo')
def test_link_local_http_is_not_allowed(self):
self.assert_host_is_not_allowed('http://169.254.0.1/foo')
def test_link_local_https_is_not_allowed(self):
self.assert_host_is_not_allowed('https://169.254.0.1/foo')
def test_non_link_local_nonallowed_url(self):
self.assert_host_is_not_allowed('http://172.16.58.3/foo')
def test_error_raised_on_nonallowed_url(self):
self.assert_host_is_not_allowed('http://somewhere.com/foo')
def test_external_host_not_allowed_if_https(self):
self.assert_host_is_not_allowed('https://somewhere.com/foo')
class TestUnsigned(unittest.TestCase):
def test_copy_returns_same_object(self):
self.assertIs(botocore.UNSIGNED, copy.copy(botocore.UNSIGNED))
def test_deepcopy_returns_same_object(self):
self.assertIs(botocore.UNSIGNED, copy.deepcopy(botocore.UNSIGNED))
class TestInstanceMetadataFetcher(unittest.TestCase):
def setUp(self):
urllib3_session_send = 'botocore.httpsession.URLLib3Session.send'
self._urllib3_patch = mock.patch(urllib3_session_send)
self._send = self._urllib3_patch.start()
self._imds_responses = []
self._send.side_effect = self.get_imds_response
self._role_name = 'role-name'
self._creds = {
'AccessKeyId': 'spam',
'SecretAccessKey': 'eggs',
'Token': '<PASSWORD>',
'Expiration': 'something',
}
self._expected_creds = {
'access_key': self._creds['AccessKeyId'],
'secret_key': self._creds['SecretAccessKey'],
'token': self._creds['Token'],
'expiry_time': self._creds['Expiration'],
'role_name': self._role_name
}
def tearDown(self):
self._urllib3_patch.stop()
def add_imds_response(self, body, status_code=200):
response = botocore.awsrequest.AWSResponse(
url='http://169.254.169.254/',
status_code=status_code,
headers={},
raw=RawResponse(body)
)
self._imds_responses.append(response)
def add_get_role_name_imds_response(self, role_name=None):
if role_name is None:
role_name = self._role_name
self.add_imds_response(body=role_name.encode('utf-8'))
def add_get_credentials_imds_response(self, creds=None):
if creds is None:
creds = self._creds
self.add_imds_response(body=json.dumps(creds).encode('utf-8'))
def add_get_token_imds_response(self, token, status_code=200):
self.add_imds_response(body=token.encode('utf-8'),
status_code=status_code)
def add_metadata_token_not_supported_response(self):
self.add_imds_response(b'', status_code=404)
def add_imds_connection_error(self, exception):
self._imds_responses.append(exception)
def get_imds_response(self, request):
response = self._imds_responses.pop(0)
if isinstance(response, Exception):
raise response
return response
def test_disabled_by_environment(self):
env = {'AWS_EC2_METADATA_DISABLED': 'true'}
fetcher = InstanceMetadataFetcher(env=env)
result = fetcher.retrieve_iam_role_credentials()
self.assertEqual(result, {})
self._send.assert_not_called()
def test_disabled_by_environment_mixed_case(self):
env = {'AWS_EC2_METADATA_DISABLED': 'tRuE'}
fetcher = InstanceMetadataFetcher(env=env)
result = fetcher.retrieve_iam_role_credentials()
self.assertEqual(result, {})
self._send.assert_not_called()
def test_disabling_env_var_not_true(self):
url = 'https://example.com/'
env = {'AWS_EC2_METADATA_DISABLED': 'false'}
self.add_get_token_imds_response(token='token')
self.add_get_role_name_imds_response()
self.add_get_credentials_imds_response()
fetcher = InstanceMetadataFetcher(base_url=url, env=env)
result = fetcher.retrieve_iam_role_credentials()
self.assertEqual(result, self._expected_creds)
def test_includes_user_agent_header(self):
user_agent = 'my-user-agent'
self.add_get_token_imds_response(token='token')
self.add_get_role_name_imds_response()
self.add_get_credentials_imds_response()
InstanceMetadataFetcher(
user_agent=user_agent).retrieve_iam_role_credentials()
self.assertEqual(self._send.call_count, 3)
for call in self._send.calls:
self.assertTrue(call[0][0].headers['User-Agent'], user_agent)
def test_non_200_response_for_role_name_is_retried(self):
# Response for role name that have a non 200 status code should
# be retried.
self.add_get_token_imds_response(token='token')
self.add_imds_response(
status_code=429, body=b'{"message": "Slow down"}')
self.add_get_role_name_imds_response()
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
self.assertEqual(result, self._expected_creds)
def test_http_connection_error_for_role_name_is_retried(self):
# Connection related errors should be retried
self.add_get_token_imds_response(token='token')
self.add_imds_connection_error(ConnectionClosedError(endpoint_url=''))
self.add_get_role_name_imds_response()
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
self.assertEqual(result, self._expected_creds)
def test_empty_response_for_role_name_is_retried(self):
# Response for role name that have a non 200 status code should
# be retried.
self.add_get_token_imds_response(token='token')
self.add_imds_response(body=b'')
self.add_get_role_name_imds_response()
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
self.assertEqual(result, self._expected_creds)
def test_non_200_response_is_retried(self):
self.add_get_token_imds_response(token='token')
self.add_get_role_name_imds_response()
# Response for creds that has a 200 status code but has an empty
# body should be retried.
self.add_imds_response(
status_code=429, body=b'{"message": "Slow down"}')
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
self.assertEqual(result, self._expected_creds)
def test_http_connection_errors_is_retried(self):
self.add_get_token_imds_response(token='token')
self.add_get_role_name_imds_response()
# Connection related errors should be retried
self.add_imds_connection_error(ConnectionClosedError(endpoint_url=''))
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
self.assertEqual(result, self._expected_creds)
def test_empty_response_is_retried(self):
self.add_get_token_imds_response(token='token')
self.add_get_role_name_imds_response()
# Response for creds that has a 200 status code but is empty.
# This should be retried.
self.add_imds_response(body=b'')
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
self.assertEqual(result, self._expected_creds)
def test_invalid_json_is_retried(self):
self.add_get_token_imds_response(token='token')
self.add_get_role_name_imds_response()
# Response for creds that has a 200 status code | |
<gh_stars>10-100
import datetime
import hashlib
import re
from mopidy import backend
from mopidy.models import Album, Artist, Image, Ref, SearchResult, Track
from urllib.parse import quote
from mopidy_bandcamp import logger
class BandcampLibraryProvider(backend.LibraryProvider):
root_directory = Ref.directory(uri="bandcamp:browse", name="bandcamp")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tracks = {}
self.images = {}
self.scrape_urls = {}
self.tags = self.backend.config["bandcamp"]["discover_tags"]
self.genres = self.backend.config["bandcamp"]["discover_genres"]
self.pages = self.backend.config["bandcamp"]["discover_pages"]
def browse(self, uri):
if not uri:
return []
logger.debug('Bandcamp browse : "%s"', uri)
if uri == "bandcamp:browse":
dirs = []
if self.pages:
dirs += [
Ref.directory(uri="bandcamp:genres", name="Discover by Genre"),
Ref.directory(uri="bandcamp:tags", name="Discover by Tag"),
]
if self.backend.config["bandcamp"]["identity"]:
dirs.append(Ref.directory(uri="bandcamp:collection", name="Collection"))
dirs.append(Ref.directory(uri="bandcamp:wishlist", name="Wishlist"))
return dirs
for colltype in ["collection", "wishlist"]:
if uri.startswith("bandcamp:" + colltype):
token = None
if uri != "bandcamp:" + colltype:
token = uri.split(":", 2)[2]
out = []
try:
data = self.backend.bandcamp.get_collection(
token=token, ctype=colltype
)
for i in data["items"]:
if "item_art" in i and "art_id" in i["item_art"]:
art = (
f"a{i['item_art']['art_id']:010d}"
if i["item_art"]["art_id"]
else None
)
if i["tralbum_type"] == "a":
aId = f"{i['band_id']}-{i['album_id']}"
name = f"{i['band_name']} - {i['album_title']} (Album)"
if art:
self.images[aId] = art
if colltype == "collection":
out.append(
Ref.album(uri=f"bandcamp:myalbum:{aId}", name=name)
)
self.scrape_urls[f"bandcamp:myalbum:{aId}"] = i[
"item_url"
]
else:
out.append(
Ref.album(uri=f"bandcamp:album:{aId}", name=name)
)
elif i["tralbum_type"] == "t":
aId = 0
if i["album_id"] is not None:
aId = i["album_id"]
tId = f"{i['band_id']}-{aId}-{i['item_id']}"
name = f"{i['item_title']} (Track)"
if art:
self.images[tId] = art
if colltype == "collection":
out.append(
Ref.album(uri=f"bandcamp:mytrack:{tId}", name=name)
)
self.scrape_urls[f"bandcamp:mytrack:{tId}"] = i[
"item_url"
]
else:
out.append(
Ref.album(uri=f"bandcamp:track:{tId}", name=name)
)
if data["more_available"]:
out.append(
Ref.directory(
uri="bandcamp:" + colltype + ":" + data["last_token"],
name="More...",
)
)
except Exception:
logger.exception("Failed to get collection")
return out
if uri == "bandcamp:genres" or uri == "bandcamp:tags":
stype = uri.split(":")[1]
return [
Ref.directory(
uri="bandcamp:"
+ ("tag:" if stype == "tags" else "genre:")
+ re.sub(r",", "%2C", re.sub(r"[^a-z0-9,]", "-", d.lower())),
name=d,
)
for d in (self.tags if stype == "tags" else self.genres)
]
if re.match(r"^bandcamp:(genre|tag):", uri):
component = uri.split(":")
stype, sid = component[1:3]
total = 0
pagenum = int(component[3]) if (len(component) > 3) else 0
out = []
for page in range(self.pages):
try:
if stype == "genre":
resp = self.backend.bandcamp.discover(
genre=sid, page=page + pagenum
)
else:
resp = self.backend.bandcamp.discover(
tag=sid, page=page + pagenum
)
except Exception:
logger.exception('Bandcamp failed to discover genre "%s"', uri)
total = resp["total_count"] if ("total_count" in resp) else 0
for i in resp["items"] if ("items" in resp) else []:
art = f"a{i['art_id']:010d}" if ("art_id" in i) else None
if i["type"] == "a":
aId = f"{i['band_id']}-{i['id']}"
name = f"{i['secondary_text']} - {i['primary_text']} (Album)"
if art:
self.images[aId] = art
out.append(Ref.album(uri="bandcamp:album:" + aId, name=name))
else:
# Only seen discover return album types.
logger.info("Found unknown type: '%s'", i["type"])
logger.info(i)
if (pagenum + self.pages) * self.backend.bandcamp.PAGE_ITEMS < total:
pagenum += self.pages
out.append(
Ref.directory(
uri=f"bandcamp:{stype}:{sid}:{pagenum}", name="More..."
)
)
return out
elif re.match(r"^bandcamp:(my)?(track|album):", uri):
tracks = self.lookup(uri)
return [Ref.track(uri=t.uri, name=t.name) for t in tracks]
def get_images(self, uris):
ret = {}
for uri in uris:
ret[uri] = []
component = uri.split(":")
if re.match(r"^(my)?(track|album|artist)", component[1], re.I):
bcId = uri.split(":")[2]
if bcId in self.images:
i = self.images[bcId]
img = f"https://f4.bcbits.com/img/{i}"
for s in self.backend.image_sizes:
if s in self.backend.bandcamp.IMAGE_SIZE:
d = self.backend.bandcamp.IMAGE_SIZE[s]
ret[uri].append(
Image(uri=img + f"_{s}.jpg", width=d[0], height=d[1])
)
else:
ret[uri].append(Image(uri=img + f"_{s}.jpg"))
else:
name = ""
if len(component) == 3:
name = component[2]
elif len(component) == 2:
name = component[1] if component[1] != "browse" else "bandcamp"
m = hashlib.md5(name.encode("utf-8")).hexdigest()
for s in self.backend.image_sizes:
if s in self.backend.bandcamp.IMAGE_SIZE:
d = self.backend.bandcamp.IMAGE_SIZE[s]
ret[uri].append(
Image(
uri=f"https://dummyimage.com/{d[0]}x{d[1]}/{m[0:3]}/{m[3:6]}&text={quote(name)}",
width=d[0],
height=d[1],
)
)
return ret
def lookup(self, uri):
logger.debug('Bandcamp lookup "%s"', uri)
if len(uri.split(":")) != 3:
return []
_, func, bcId = uri.split(":")
ret = []
if func == "album" or func == "track" or func.startswith("my"):
if func == "track" or func == "mytrack":
artist, album, song = bcId.split("-")
if bcId in self.tracks:
return [self.tracks[bcId]]
else:
artist, album = bcId.split("-")
try:
if func.startswith("my") and (uri in self.scrape_urls):
resp = self.backend.bandcamp.scrape(self.scrape_urls[uri])
comment = "URL: " + self.scrape_urls[uri]
elif func == "track" or func == "mytrack":
resp = self.backend.bandcamp.get_track(artist, song)
else:
resp = self.backend.bandcamp.get_album(artist, album)
except Exception:
logger.exception('Bandcamp failed to load info for "%s"', uri)
return []
my = ""
if func.startswith("my"):
my = "my"
# If we haven't already scraped it, we'll need to:
if "bandcamp_url" in resp:
url = resp["bandcamp_url"]
resp = self.backend.bandcamp.scrape(url)
comment = "URL: " + url
dt = datetime.date
year = "0000"
if "release_date" in resp and resp["release_date"] is not None:
year = dt.fromtimestamp(resp["release_date"]).strftime("%Y")
elif (
"album_release_date" in resp and resp["album_release_date"] is not None
):
year = resp["album_release_date"].split(" ")[2]
if "bandcamp_url" in resp:
comment = "URL: " + resp["bandcamp_url"]
if "art_id" in resp:
self.images[bcId] = f"a{resp['art_id']:010d}"
genre = ""
if "tags" in resp:
genre = "; ".join([t["name"] for t in resp["tags"]])
elif "keywords" in resp:
if type(resp["keywords"]) is list:
genre = "; ".join(resp["keywords"])
else:
genre = "; ".join(resp["keywords"].split(", "))
artref = Artist(
uri=f"bandcamp:artist:{artist}",
name=resp["tralbum_artist"],
sortname=resp["tralbum_artist"],
musicbrainz_id="",
)
albref = None
if "album_title" in resp:
albref = Album(
uri=f"bandcamp:{my}album:{artist}-{album}",
name=resp["album_title"],
artists=[artref],
num_tracks=resp["num_downloadable_tracks"],
num_discs=None,
date=year,
musicbrainz_id="",
)
for track in resp["tracks"]:
if "is_streamable" not in track or track["is_streamable"]:
trref = Track(
uri=f"bandcamp:{my}track:{artist}-{album}-{track['track_id']}",
name=track["title"],
artists=[artref],
album=albref,
composers=[],
performers=[],
genre=genre,
track_no=track["track_num"],
disc_no=None,
date=year,
length=int(track["duration"] * 1000)
if track["duration"]
else None,
bitrate=320 if my == "my" else 128,
comment=comment,
musicbrainz_id="",
last_modified=None,
)
ret.append(trref)
self.tracks[f"{bcId}-{track['track_id']}"] = trref
if "art_id" in resp:
self.images[
f"{artist}-{album}-{track['track_id']}"
] = f"a{resp['art_id']:010d}"
logger.debug("Bandcamp returned %d tracks in lookup", len(ret))
elif func == "artist":
try:
resp = self.backend.bandcamp.get_artist(bcId)
except Exception:
logger.exception('Bandcamp failed to load artist info for "%s"', uri)
return []
if "discography" in resp:
for disc in resp["discography"]:
if disc["item_type"] == "album":
ret.extend(
self.lookup(
f"bandcamp:album:{disc['band_id']}-{disc['item_id']}"
)
)
elif disc["item_type"] == "track":
ret.extend(
self.lookup(
f"bandcamp:track:{disc['band_id']}-0-{disc['item_id']}"
)
)
elif re.match(r"^https?", func, re.I):
url = uri[9:]
try:
resp = self.backend.bandcamp.scrape(url)
if "tracks" not in resp:
return self.lookup(f"bandcamp:artist:{resp['id']}")
else:
my = ""
if "is_purchased" in resp and resp["is_purchased"]:
my = "my"
artist = resp["band_id"]
album = 0
if resp["item_type"] == "album":
album = resp["id"]
elif "current" in resp and "album_id" in resp["current"]:
album = resp["current"]["album_id"]
year = "0000"
if (
"album_release_date" in resp
and resp["album_release_date"] is not None
):
year = resp["album_release_date"].split(" ")[2]
elif (
"current" in resp
and "release_date" in resp["current"]
and resp["current"]["release_date"] is not None
):
year = resp["current"]["release_date"].split(" ")[2]
comment = "URL: " + url
if "art_id" in resp:
self.images[f"{artist}-{album}"] = f"a{resp['art_id']:010d}"
genre = ""
if "keywords" in resp:
if type(resp["keywords"]) is list:
genre = "; ".join(resp["keywords"])
else:
genre = "; ".join(resp["keywords"].split(", "))
artref = Artist(
uri=f"bandcamp:artist:{artist}",
name=resp["tralbum_artist"],
sortname=resp["tralbum_artist"],
musicbrainz_id="",
)
albref = None
if "album_title" in resp:
albref = Album(
uri=f"bandcamp:{my}album:{artist}-{album}",
name=resp["album_title"],
artists=[artref],
num_tracks=resp["num_downloadable_tracks"],
num_discs=None,
date=year,
musicbrainz_id="",
)
for track in resp["tracks"]:
if "file" in track:
trref = Track(
uri=f"bandcamp:{my}track:{artist}-{album}-{track['track_id']}",
name=track["title"],
artists=[artref],
album=albref,
composers=[],
performers=[],
genre=genre,
track_no=track["track_num"],
disc_no=None,
date=year,
length=int(track["duration"] * 1000)
if track["duration"]
else None,
bitrate=320 if "mp3-v0" in track["file"] else 128,
comment=comment,
musicbrainz_id="",
last_modified=None,
)
ret.append(trref)
self.tracks[f"{bcId}-{track['track_id']}"] = trref
if "art_id" in resp:
self.images[
f"{artist}-{album}-{track['track_id']}"
] = f"a{resp['art_id']:010d}"
logger.debug("Bandcamp returned %d tracks in lookup", len(ret))
except Exception:
logger.exception('Bandcamp failed to scrape "%s"', url)
return ret
def search(self, query=None, uris=None, exact=False):
tracks = set()
albums = set()
artists = set()
q = query
if type(query) is dict:
r = []
for v in query.values():
if type(v) is list:
r.extend(v)
else:
r.append(v)
q = "+".join(map(quote, r))
elif type(query) is list:
q = "+".join(map(quote, query))
try:
resp = self.backend.bandcamp.search(q)
except Exception:
logger.exception("Bandcamp failed to search.")
if "results" in resp:
for r in resp["results"]:
if r["type"] == "t":
artref = Artist(
uri=f"bandcamp:artist:{r['band_id']}",
name=r["band_name"],
sortname=r["band_name"],
musicbrainz_id="",
)
albref = Album(
uri=f"bandcamp:album:{r['band_id']}-{r['album_id']}",
name=r["album_name"],
artists=[artref],
num_tracks=None,
num_discs=None,
date="0000",
musicbrainz_id="",
)
comment = ""
if "url" in r:
comment = "URL: " + r["url"]
if "art_id" in r:
self.images[
f"bandcamp:track:{r['band_id']}-{r['album_id']}-{r['id']}"
] = f"a{r['art_id']:010d}"
trref = Track(
uri=f"bandcamp:track:{r['band_id']}-{r['album_id']}-{r['id']}",
name=r["name"],
artists=[artref],
album=albref,
composers=[],
performers=[],
genre="",
track_no=None,
disc_no=None,
date="0000",
length=None,
bitrate=128,
comment=comment,
musicbrainz_id="",
last_modified=None,
)
tracks.add(trref)
elif r["type"] == "a":
artref = Artist(
uri=f"bandcamp:artist:{r['band_id']}",
name=r["band_name"],
sortname=r["band_name"],
musicbrainz_id="",
)
albref = Album(
uri=f"bandcamp:album:{r['band_id']}-{r['id']}",
name=r["name"],
artists=[artref],
num_tracks=None,
num_discs=None,
date="0000",
musicbrainz_id="",
)
albums.add(albref)
elif r["type"] == "b":
artref | |
# Copyright 2014 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
# author: <NAME> <<EMAIL>>
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import print_function
from scalyr_agent import scalyr_logging
__author__ = "<EMAIL>"
import os
import re
import sys
import tempfile
from io import open
import platform
import json
import mock
import pytest
from scalyr_agent.configuration import Configuration, BadConfiguration
from scalyr_agent.config_util import (
parse_array_of_strings,
convert_config_param,
get_config_from_env,
)
from scalyr_agent.json_lib import JsonObject, JsonArray
from scalyr_agent.json_lib.objects import (
ArrayOfStrings,
SpaceAndCommaSeparatedArrayOfStrings,
)
from scalyr_agent.platform_controller import DefaultPaths
from scalyr_agent.test_base import ScalyrTestCase
from scalyr_agent.test_base import skipIf
from scalyr_agent.builtin_monitors.journald_utils import (
LogConfigManager,
JournaldLogFormatter,
)
from scalyr_agent.util import JsonReadFileException
import scalyr_agent.util as scalyr_util
from scalyr_agent.compat import os_environ_unicode
import scalyr_agent.configuration
import six
from six.moves import range
from mock import patch, Mock
class TestConfigurationBase(ScalyrTestCase):
def setUp(self):
super(TestConfigurationBase, self).setUp()
self.original_os_env = dict(
[(k, v) for k, v in six.iteritems(os_environ_unicode)]
)
self._config_dir = tempfile.mkdtemp()
self._config_file = os.path.join(self._config_dir, "agent.json")
self._config_fragments_dir = os.path.join(self._config_dir, "agent.d")
os.makedirs(self._config_fragments_dir)
self._extra_config_fragments_dir = tempfile.mkdtemp() + "extra"
os.makedirs(self._extra_config_fragments_dir)
for key in os_environ_unicode.keys():
if "scalyr" in key.lower():
del os.environ[key]
self._original_win32_file = scalyr_agent.configuration.win32file
# Patch it so tests pass on Windows
scalyr_agent.configuration.win32file = None
def tearDown(self):
"""Restore the pre-test os environment"""
os.environ.clear()
os.environ.update(self.original_os_env)
scalyr_agent.configuration.win32file = self._original_win32_file
def __convert_separators(self, contents):
"""Recursively converts all path values for fields in a JsonObject that end in 'path'.
If this is a JsonArray, iterators over the elements.
@param contents: The contents to convert in a valid Json atom (JsonObject, JsonArray, or primitive)
@return: The passed in object.
"""
contents_type = type(contents)
if contents_type is dict or contents_type is JsonObject:
for key in contents:
value = contents[key]
value_type = type(value)
if key.endswith("path") and (value_type is six.text_type):
contents[key] = self.convert_path(contents[key])
elif value_type in (dict, JsonObject, list, JsonArray):
self.__convert_separators(value)
elif contents_type is list or contents_type is JsonArray:
for i in range(len(contents)):
self.__convert_separators(contents[i])
return contents
def _write_file_with_separator_conversion(self, contents):
contents = scalyr_util.json_encode(
self.__convert_separators(
scalyr_util.json_scalyr_config_decode(contents)
).to_dict()
)
fp = open(self._config_file, "w")
fp.write(contents)
fp.close()
def _write_config_fragment_file_with_separator_conversion(
self, file_path, contents, config_dir=None
):
if config_dir is None:
config_dir = self._config_fragments_dir
contents = scalyr_util.json_encode(
self.__convert_separators(
scalyr_util.json_scalyr_config_decode(contents)
).to_dict()
)
full_path = os.path.join(config_dir, file_path)
with open(full_path, "w") as fp:
fp.write(contents)
def _write_raw_config_fragment_file(self, file_path, contents, config_dir=None):
if config_dir is None:
config_dir = self._config_fragments_dir
full_path = os.path.join(config_dir, file_path)
with open(full_path, "w") as fp:
fp.write(contents)
class LogObject(object):
def __init__(self, config):
self.config = config
self.log_path = config["path"]
class MonitorObject(object):
def __init__(self, config):
self.module_name = config["module"]
self.config = config
self.log_config = {"path": self.module_name.split(".")[-1] + ".log"}
def _create_test_configuration_instance(self, logger=None, extra_config_dir=None):
"""Creates an instance of a Configuration file for testing.
@return: The test instance
@rtype: Configuration
"""
logger = logger or mock.Mock()
default_paths = DefaultPaths(
self.convert_path("/var/log/scalyr-agent-2"),
self.convert_path("/etc/scalyr-agent-2/agent.json"),
self.convert_path("/var/lib/scalyr-agent-2"),
)
if os.path.isfile(self._config_file):
os.chmod(self._config_file, int("640", 8))
return Configuration(
self._config_file, default_paths, logger, extra_config_dir=extra_config_dir
)
# noinspection PyPep8Naming
def assertPathEquals(self, actual_path, expected_path):
"""Similar to `assertEquals` but the expected path is converted to the underlying system's dir separators
before comparison.
@param actual_path: The actual path. This should already be using the correct separator characters.
@param expected_path: The expected path. This should use `/`'s as the separator character. It will be
converted to this system's actual separators.
@type actual_path: six.text_type
@type expected_path: six.text_type
"""
self.assertEquals(actual_path, self.convert_path(expected_path))
def make_path(self, parent_directory, path):
"""Returns the full path created by joining path to parent_directory.
This method is a convenience function because it allows path to use forward slashes
to separate path components rather than the platform's separator character.
@param parent_directory: The parent directory. This argument must use the system's separator character. This may
be None if path is relative to the current working directory.
@param path: The path to add to parent_directory. This should use forward slashes as the separator character,
regardless of the platform's character.
@return: The path created by joining the two with using the system's separator character.
"""
if parent_directory is None and os.path.sep == "/":
return path
if parent_directory is None:
result = ""
elif path.startswith("/"):
result = ""
else:
result = parent_directory
for path_part in path.split("/"):
if len(path_part) > 0:
result = os.path.join(result, path_part)
if os.path.sep == "\\" and not result.startswith("C:\\"):
result = "C:\\%s" % result
return result
def convert_path(self, path):
"""Converts the forward slashes in path to the platform's separator and returns the value.
@param path: The path to convert. This should use forward slashes as the separator character, regardless of the
platform's character.
@return: The path created by converting the forward slashes to the platform's separator.
"""
return self.make_path(None, path)
class TestConfiguration(TestConfigurationBase):
def test_basic_case(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi there",
logs: [ { path:"/var/log/tomcat6/access.log"} ]
}
"""
)
config = self._create_test_configuration_instance()
config.parse()
self.assertEquals(config.api_key, "hi there")
self.assertPathEquals(config.agent_log_path, "/var/log/scalyr-agent-2")
self.assertPathEquals(config.agent_data_path, "/var/lib/scalyr-agent-2")
self.assertEquals(config.additional_monitor_module_paths, "")
self.assertEquals(config.config_directory, self._config_fragments_dir)
self.assertEquals(config.implicit_metric_monitor, True)
self.assertEquals(config.implicit_agent_log_collection, True)
self.assertFalse(config.use_unsafe_debugging)
self.assertEquals(config.scalyr_server, "https://agent.scalyr.com")
self.assertEquals(len(config.server_attributes), 1)
self.assertTrue("serverHost" in config.server_attributes)
self.assertEquals(config.global_monitor_sample_interval, 30.0)
self.assertEquals(config.max_send_rate_enforcement, "unlimited")
self.assertIsNone(config.parsed_max_send_rate_enforcement)
self.assertEquals(config.disable_max_send_rate_enforcement_overrides, False)
self.assertEquals(config.max_allowed_request_size, 5900000)
self.assertEquals(config.min_allowed_request_size, 100 * 1024)
self.assertEquals(config.min_request_spacing_interval, 0.0)
self.assertEquals(config.max_request_spacing_interval, 5.0)
self.assertEquals(config.high_water_bytes_sent, 100 * 1024)
self.assertEquals(config.high_water_request_spacing_adjustment, 0.6)
self.assertEquals(config.low_water_bytes_sent, 20 * 1024)
self.assertEquals(config.low_water_request_spacing_adjustment, 1.5)
self.assertEquals(config.failure_request_spacing_adjustment, 1.5)
self.assertEquals(config.request_too_large_adjustment, 0.5)
self.assertEquals(config.debug_level, 0)
self.assertEquals(config.stdout_severity, "NOTSET")
self.assertEquals(config.request_deadline, 60.0)
self.assertEquals(config.enable_gc_stats, False)
self.assertEquals(config.max_line_size, 49900)
self.assertEquals(config.max_log_offset_size, 200000000)
self.assertEquals(config.max_existing_log_offset_size, 200000000)
self.assertEquals(config.max_sequence_number, 1024 ** 4)
self.assertEquals(config.line_completion_wait_time, 5)
self.assertEquals(config.read_page_size, 64 * 1024)
self.assertEquals(config.internal_parse_max_line_size, config.read_page_size)
self.assertEquals(config.copy_staleness_threshold, 15 * 60)
self.assertEquals(config.log_deletion_delay, 10 * 60)
self.assertEquals(config.max_new_log_detection_time, 1 * 60)
self.assertEquals(config.copying_thread_profile_interval, 0)
self.assertEquals(
config.copying_thread_profile_output_path, "/tmp/copying_thread_profiles_"
)
self.assertTrue(config.ca_cert_path.endswith("ca_certs.crt"))
self.assertTrue(config.verify_server_certificate)
self.assertFalse(config.debug_init)
self.assertFalse(config.pidfile_advanced_reuse_guard)
self.assertFalse(config.strip_domain_from_default_server_host)
self.assertEquals(config.pipeline_threshold, 0)
self.assertEquals(
config.k8s_service_account_cert,
"/run/secrets/kubernetes.io/serviceaccount/ca.crt",
)
self.assertEquals(
config.k8s_service_account_token,
"/var/run/secrets/kubernetes.io/serviceaccount/token",
)
self.assertEquals(
config.k8s_service_account_namespace,
"/var/run/secrets/kubernetes.io/serviceaccount/namespace",
)
self.assertEquals(
config.k8s_kubelet_ca_cert,
"/run/secrets/kubernetes.io/serviceaccount/ca.crt",
)
self.assertEquals(
config.k8s_verify_kubelet_queries,
True,
)
self.assertEquals(len(config.log_configs), 3)
self.assertPathEquals(
config.log_configs[0].get_string("path"), "/var/log/tomcat6/access.log"
)
self.assertEquals(
config.log_configs[0].get_json_object("attributes"), JsonObject()
)
self.assertEquals(
config.log_configs[0].get_json_array("sampling_rules"), JsonArray()
)
self.assertEquals(
config.log_configs[0].get_json_array("redaction_rules"), JsonArray()
)
self.assertPathEquals(
config.log_configs[1].get_string("path"),
"/var/log/scalyr-agent-2/agent.log",
)
self.assertPathEquals(
config.log_configs[2].get_string("path"),
"/var/log/scalyr-agent-2/agent-worker-session-*.log",
)
self.assertFalse(config.log_configs[0].get_bool("ignore_stale_files"))
self.assertEquals(
config.log_configs[0].get_float("staleness_threshold_secs"), 300
)
self.assertEquals(len(config.monitor_configs), 0)
self.assertIsNone(config.network_proxies)
self.assertEqual(config.healthy_max_time_since_last_copy_attempt, 60.0)
def test_empty_config(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi there"
}
"""
)
config = self._create_test_configuration_instance()
config.parse()
self.assertEquals(config.api_key, "hi there")
self.assertEquals(len(config.log_configs), 2)
self.assertPathEquals(
config.log_configs[0].get_string("path"),
"/var/log/scalyr-agent-2/agent.log",
)
self.assertPathEquals(
config.log_configs[1].get_string("path"),
"/var/log/scalyr-agent-2/agent-worker-session-*.log",
)
def test_overriding_basic_settings(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi there",
agent_log_path: "/var/silly1",
agent_data_path: "/var/silly2",
additional_monitor_module_paths: "silly3",
config_directory: "silly4",
implicit_metric_monitor: false,
implicit_agent_log_collection: false,
use_unsafe_debugging: true,
allow_http: true,
scalyr_server: "noland.scalyr.com",
global_monitor_sample_interval: 60.0,
max_send_rate_enforcement: "2 MB/s",
disable_max_send_rate_enforcement_overrides: true,
max_allowed_request_size: 2000000,
min_allowed_request_size: 7000,
min_request_spacing_interval: 2.0,
max_request_spacing_interval: 10.0,
high_water_bytes_sent: 50000,
low_water_bytes_sent: 5000,
high_water_request_spacing_adjustment: 2.0,
low_water_request_spacing_adjustment: -1.0,
failure_request_spacing_adjustment: 2.0,
request_too_large_adjustment: 0.75,
debug_level: 1,
stdout_severity: "WARN",
request_deadline: 30.0,
server_attributes: { region: "us-east" },
ca_cert_path: "/var/lib/foo.pem",
verify_server_certificate: false,
pipeline_threshold: 0.5,
strip_domain_from_default_server_host: true,
max_line_size: 1024,
max_log_offset_size: 1048576,
max_existing_log_offset_size: 2097152,
max_sequence_number: 1024,
line_completion_wait_time: 120,
read_page_size: 3072,
internal_parse_max_line_size: 4013,
copy_staleness_threshold: 240,
log_deletion_delay: 300,
debug_init: true,
pidfile_advanced_reuse_guard: true,
enable_gc_stats: true,
max_new_log_detection_time: 120,
copying_thread_profile_interval: 2,
copying_thread_profile_output_path: "/tmp/some_profiles",
http_proxy: "http://foo.com",
https_proxy: "https://bar.com",
k8s_service_account_cert: "foo_cert",
k8s_service_account_token: "foo_token",
k8s_service_account_namespace: "foo_namespace",
k8s_kubelet_ca_cert: "kubelet_cert",
k8s_verify_kubelet_queries: false,
logs: [ { path: "/var/log/tomcat6/access.log", ignore_stale_files: true} ],
journald_logs: [ { journald_unit: ".*", parser: "journald_catchall" } ],
healthy_max_time_since_last_copy_attempt: 30.0
}
"""
)
config = self._create_test_configuration_instance()
config.parse()
self.assertEquals(config.api_key, "hi there")
self.assertPathEquals(config.agent_log_path, os.path.join("/var/", "silly1"))
self.assertPathEquals(config.agent_data_path, os.path.join("/var/", "silly2"))
self.assertEquals(config.additional_monitor_module_paths, "silly3")
self.assertEquals(
config.config_directory, os.path.join(self._config_dir, "silly4")
)
self.assertEquals(config.implicit_metric_monitor, False)
self.assertEquals(config.implicit_agent_log_collection, False)
self.assertTrue(config.use_unsafe_debugging)
self.assertEquals(config.scalyr_server, "noland.scalyr.com")
self.assertEquals(len(config.server_attributes), 2)
self.assertEquals(config.server_attributes["region"], "us-east")
self.assertEquals(config.global_monitor_sample_interval, 60.0)
self.assertEquals(config.max_send_rate_enforcement, "2 MB/s")
self.assertEquals(config.parsed_max_send_rate_enforcement, 2000000)
self.assertEquals(config.disable_max_send_rate_enforcement_overrides, True)
self.assertEquals(config.max_allowed_request_size, 2000000)
self.assertEquals(config.min_allowed_request_size, 7000)
self.assertEquals(config.min_request_spacing_interval, 2.0)
self.assertEquals(config.max_request_spacing_interval, 10.0)
self.assertEquals(config.high_water_bytes_sent, 50000)
self.assertEquals(config.high_water_request_spacing_adjustment, 2.0)
self.assertEquals(config.low_water_bytes_sent, 5000)
self.assertEquals(config.low_water_request_spacing_adjustment, -1.0)
self.assertEquals(config.max_line_size, 1 * 1024)
self.assertEquals(config.max_log_offset_size, 1 * 1024 * 1024)
self.assertEquals(config.max_existing_log_offset_size, 2 * 1024 * 1024)
self.assertEquals(config.max_sequence_number, 1 * 1024)
self.assertEquals(config.line_completion_wait_time, 2 * 60)
self.assertEquals(config.read_page_size, 3 * 1024)
self.assertEquals(config.internal_parse_max_line_size, 4013)
self.assertEquals(config.copy_staleness_threshold, 4 * 60)
self.assertEquals(config.log_deletion_delay, 5 * 60)
self.assertEquals(config.enable_gc_stats, True)
self.assertEquals(config.copying_thread_profile_interval, 2)
self.assertEquals(
config.copying_thread_profile_output_path,
self.convert_path("/tmp/some_profiles"),
)
self.assertEquals(config.max_new_log_detection_time, 2 * 60)
self.assertTrue(config.strip_domain_from_default_server_host)
self.assertEquals(config.pipeline_threshold, 0.5)
self.assertEquals(config.failure_request_spacing_adjustment, 2.0)
self.assertEquals(config.request_too_large_adjustment, 0.75)
self.assertEquals(config.debug_level, 1)
self.assertEquals(config.stdout_severity, "WARN")
self.assertEquals(config.request_deadline, 30.0)
self.assertPathEquals(config.ca_cert_path, "/var/lib/foo.pem")
self.assertFalse(config.verify_server_certificate)
self.assertTrue(config.debug_init)
self.assertTrue(config.pidfile_advanced_reuse_guard)
self.assertEquals(config.k8s_service_account_cert, "foo_cert")
self.assertEquals(config.k8s_service_account_token, "foo_token")
self.assertEquals(config.k8s_service_account_namespace, "foo_namespace")
self.assertEquals(config.k8s_kubelet_ca_cert, "kubelet_cert")
self.assertEquals(config.k8s_verify_kubelet_queries, False)
self.assertTrue(config.log_configs[0].get_bool("ignore_stale_files"))
self.assertEqual(
config.network_proxies,
{"http": "http://foo.com", "https": "https://bar.com"},
)
self.assertEqual(
config.journald_log_configs[0].get_string("parser"), "journald_catchall"
)
self.assertEqual(config.healthy_max_time_since_last_copy_attempt, 30.0)
def test_missing_api_key(self):
self._write_file_with_separator_conversion(
""" {
logs: [ { path:"/var/log/tomcat6/access.log"} ]
}
"""
)
config = self._create_test_configuration_instance()
self.assertRaises(BadConfiguration, config.parse)
def test_force_https_no_scheme(self):
self._write_file_with_separator_conversion(
""" {
api_key: "hi there",
scalyr_server: "agent.scalyr.com",
}
"""
)
config | |
import datetime
import email
import email.parser
import glob
import mailbox
import os
import re
import subprocess
import time
import urllib
import warnings
from email.header import Header
from email.message import Message
from email.mime.text import MIMEText
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import requests
import yaml
from bs4 import BeautifulSoup
class ListservMessageWarning(BaseException):
"""Base class for Archive class specific exceptions"""
pass
class ListservListWarning(BaseException):
"""Base class for Archive class specific exceptions"""
pass
class ListservArchiveWarning(BaseException):
"""Base class for Archive class specific exceptions"""
pass
class ListservMessage:
"""
Parameters
----------
body
subject
fromname
fromaddr
toname
toaddr
date
contenttype
messageid
Methods
-------
from_url
get_header_from_html
get_body_from_html
get_header_from_listserv_file
get_body_from_listserv_file
get_name
get_addr
get_date
remove_unwanted_header_content
to_dict
to_mbox
Example
-------
msg = ListservMessage.from_url(
list_name="3GPP_TSG_CT_WG6",
url=url_message,
fields="total",
)
"""
empty_header = {
"subject": None,
"fromname": None,
"fromaddr": None,
"toname": None,
"toaddr": None,
"date": None,
"contenttype": None,
}
def __init__(
self,
body: str,
subject: str,
fromname: str,
fromaddr: str,
toname: str,
toaddr: str,
date: str,
contenttype: str,
messageid: Optional[str] = None,
):
self.body = body
self.subject = subject
self.fromname = fromname
self.fromaddr = fromaddr
self.toname = toname
self.toaddr = toaddr
self.date = date
self.contenttype = contenttype
@classmethod
def from_url(
cls,
list_name: str,
url: str,
fields: str = "total",
url_login: str = "https://list.etsi.org/scripts/wa.exe?LOGON",
login: Optional[Dict[str, str]] = {"username": None, "password": None},
session: Optional[str] = None,
) -> "ListservMessage":
"""
Args:
"""
# TODO implement field selection, e.g. return only header, body, etc.
if session is None:
session = get_auth_session(url_login, **login)
soup = get_website_content(url, session=session)
if fields in ["header", "total"]:
header = ListservMessage.get_header_from_html(soup)
else:
header = cls.empty_header
if fields in ["body", "total"]:
body = ListservMessage.get_body_from_html(list_name, url, soup)
else:
body = None
return cls(body, **header)
@classmethod
def from_listserv_file(
cls,
list_name: str,
file_path: str,
header_start_line_nr: int,
fields: str = "total",
) -> "ListservMessage":
file = open(file_path, "r")
fcontent = file.readlines()
file.close()
header_end_line_nr = cls.get_header_end_line_nr(
fcontent, header_start_line_nr
)
if fields in ["header", "total"]:
header = cls.get_header_from_listserv_file(
fcontent, header_start_line_nr, header_end_line_nr
)
else:
header = cls.empty_header
if fields in ["body", "total"]:
body = cls.get_body_from_listserv_file(
fcontent, header_end_line_nr
)
else:
body = None
return cls(body, **header)
@classmethod
def get_header_end_line_nr(
cls,
content: List[str],
header_start_line_nr: int,
) -> List[int]:
"""
The header ends with the first empty line encountered.
Args:
content: The content of one LISTSERV-file.
"""
for lnr, lcont in enumerate(content[header_start_line_nr:]):
if len(lcont) <= 1:
header_end_line_nr = header_start_line_nr + lnr
break
return header_end_line_nr
@classmethod
def get_header_from_listserv_file(
cls,
content: List[str],
header_start_line_nr: int,
header_end_line_nr: int,
) -> Dict[str, str]:
"""
Args:
content:
"""
content = content[header_start_line_nr:header_end_line_nr]
# collect important info from LISTSERV header
header = {}
for lnr in range(len(content)):
line = content[lnr]
# get header keyword and value
if re.match(r"\S+:\s+\S+", line):
key = line.split(":")[0]
value = line.replace(key + ":", "").strip().rstrip("\n")
# if not at the end of header
if lnr < len(content) - 1:
# if header-keyword value is split over two lines
if not re.match(r"\S+:\s+\S+", content[lnr + 1]):
value += " " + content[lnr + 1].strip().rstrip("\n")
header[key.lower()] = value
header = cls.format_header_content(header)
header = cls.remove_unwanted_header_content(header)
return header
@classmethod
def get_body_from_listserv_file(
cls,
content: List[str],
header_end_line_nr: int,
) -> str:
""""""
found = False
# find body 'position' in file
for line_nr, line in enumerate(content[header_end_line_nr:]):
if "=" * 73 in line:
body_end_line_nr = line_nr + header_end_line_nr
found = True
break
if not found:
body_end_line_nr = -1
# get body content
body = content[header_end_line_nr:body_end_line_nr]
# remove empty lines and join into one string
body = ("").join([line for line in body if len(line) > 1])
return body
@classmethod
def get_header_from_html(cls, soup: BeautifulSoup) -> Dict[str, str]:
""""""
text = soup.find(
"b",
text=re.compile(r"^\bSubject\b"),
).parent.parent.parent.parent.text
# collect important info from LISTSERV header
header = {}
for field in text.split("Parts/Attachments:")[0].splitlines():
if len(field) == 0:
continue
field_name = field.split(":")[0].strip()
field_body = field.replace(field_name + ":", "").strip()
header[field_name.lower()] = field_body
header = cls.format_header_content(header)
header = cls.remove_unwanted_header_content(header)
return header
@staticmethod
def get_body_from_html(
list_name: str, url: str, soup: BeautifulSoup
) -> str:
""""""
url_root = ("/").join(url.split("/")[:-2])
a_tags = soup.select(f'a[href*="A3="][href*="{list_name}"]')
href_plain_text = [
tag.get("href") for tag in a_tags if "Fplain" in tag.get("href")
][0]
body_soup = get_website_content(
urllib.parse.urljoin(url_root, href_plain_text)
)
return body_soup.find("pre").text
@classmethod
def format_header_content(cls, header: Dict[str, str]) -> Dict[str, str]:
header["fromname"] = cls.get_name(header["from"]).strip()
header["fromaddr"] = cls.get_addr(header["from"])
header["toname"] = cls.get_name(header["reply-to"]).strip()
header["toaddr"] = cls.get_addr(header["reply-to"])
header["date"] = cls.get_date(header["date"])
header["contenttype"] = header["content-type"]
return header
@classmethod
def remove_unwanted_header_content(
cls, header: Dict[str, str]
) -> Dict[str, str]:
for key in list(header.keys()):
if key not in list(cls.empty_header.keys()):
del header[key]
return header
@staticmethod
def get_name(line: str) -> str:
# get string in between < and >
email_of_sender = re.findall(r"\<(.*)\>", line)
if email_of_sender:
# remove email_of_sender from line
name = line.replace("<" + email_of_sender[0] + ">", "")
# remove special characters
name = re.sub(r"[^a-zA-Z0-9]+", " ", name)
else:
name = line
return name
@staticmethod
def get_addr(line: str) -> str:
# get string in between < and >
email_of_sender = re.findall(r"\<(.*)\>", line)
if email_of_sender:
email_of_sender = email_of_sender[0]
else:
email_of_sender = None
return email_of_sender
@staticmethod
def get_date(line: str) -> str:
line = (" ").join(line.split(" ")[:-1]).lstrip()
# convert format to local version of date and time
date_time_obj = datetime.datetime.strptime(
line, "%a, %d %b %Y %H:%M:%S"
)
return date_time_obj.strftime("%c")
@staticmethod
def create_message_id(
date: str,
from_address: str,
) -> str:
message_id = (".").join([date, from_address])
# remove special characters
message_id = re.sub(r"[^a-zA-Z0-9]+", "", message_id)
return message_id
def to_dict(self) -> Dict[str, str]:
dic = {
"Body": self.body,
"Subject": self.subject,
"FromName": self.fromname,
"FromAddr": self.fromaddr,
"ToName": self.toname,
"ToAddr": self.toaddr,
"Date": self.date,
"ContentType": self.contenttype,
}
return dic
def to_mbox(self, filepath: str, mode: str = "w"):
"""
Safe mail list to .mbox files.
"""
message_id = ListservMessage.create_message_id(
self.date,
self.fromaddr,
)
f = open(filepath, mode, encoding="utf-8")
f.write("\n")
# check that header was selected
if self.subject is not None:
f.write(f"From b'{self.fromaddr}' {self.date}\n")
f.write(f"Content-Type: {self.contenttype}\n")
f.write(f"MIME-Version: 1.0\n")
f.write(f"In-Reply-To: {self.toname} <b'{self.toaddr}'>\n")
f.write(f"From: {self.fromname} <b'{self.fromaddr}'>\n")
f.write(f"Subject: b'{self.subject}\n")
f.write(f"Message-ID: <{message_id}>'\n")
f.write(f"Date: {self.date}'\n")
f.write("\n")
# check that body was selected
if self.body is not None:
f.write(self.body)
f.write("\n")
f.close()
class ListservList:
"""
This class handles a single mailing list of a public archive in the
LISTSERV 16.5 format.
Parameters
----------
name
The of whom the list (e.g. 3GPP_COMMON_IMS_XFER, IEEESCO-DIFUSION, ...)
source
Contains the information of the location of the mailing list.
It can be either an URL where the list or a path to the file(s).
msgs
List of ListservMessage objects
Methods
-------
from_url
from_messages
from_listserv_files
from_listserv_directories
get_messages_from_url
get_period_urls
get_line_numbers_of_header_starts
get_index_of_elements_in_selection
to_dict
to_pandas_dataframe
to_mbox
Example
-------
mlist = ListservList.from_url(
"3GPP_TSG_CT_WG6",
url="https://list.etsi.org/scripts/wa.exe?A0=3GPP_TSG_CT_WG6",
select={
"years": (2020, 2021),
"months": "January",
"weeks": [1,5],
"fields": "header",
},
)
"""
def __init__(
self,
name: str,
source: Union[List[str], str],
msgs: List[ListservMessage],
):
self.name = name
self.source = source
self.messages = msgs
def __len__(self) -> int:
return len(self.messages)
def __iter__(self):
return iter(self.messages)
def __getitem__(self, index) -> ListservMessage:
return self.messages[index]
@classmethod
def from_url(
cls,
name: str,
url: str,
select: dict,
url_login: str = "https://list.etsi.org/scripts/wa.exe?LOGON",
login: Optional[Dict[str, str]] = {"username": None, "password": None},
session: Optional[str] = None,
) -> "ListservList":
"""
Args:
name: Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'
url: URL to the LISTSERV list.
select: Selection criteria that can filter messages by:
- content, i.e. header and/or body
- period, i.e. written in a certain year, month, week-of-month
"""
if session is None:
session = get_auth_session(url_login, **login)
if "fields" not in list(select.keys()):
select["fields"] = "total"
msgs = cls.get_messages_from_url(name, url, select, session)
return cls.from_messages(name, url, msgs)
@classmethod
def from_messages(
cls,
name: str,
url: str,
messages: List[Union[str, ListservMessage]],
fields: str = "total",
url_login: str = "https://list.etsi.org/scripts/wa.exe?LOGON",
login: Optional[Dict[str, str]] = {"username": None, "password": None},
session: Optional[str] = None,
) -> "ListservList":
"""
Args:
messages: Can either be a list of URLs to specific LISTSERV messages
or a list of `ListservMessage` objects.
"""
if not messages:
# create empty ListservList for ListservArchive
msgs = messages
elif isinstance(messages[0], str):
# create ListservList from message URLs
if session is None:
session = get_auth_session(url_login, **login)
msgs = []
for idx, url in enumerate(messages):
msgs.append(
ListservMessage.from_url(
list_name=name,
url=url,
fields=fields,
session=session,
)
)
else:
# create ListservList from list of ListservMessages
msgs = messages
return cls(name, url, msgs)
@classmethod
def from_listserv_directories(
cls,
name: str,
directorypaths: List[str],
filedsc: | |
15
if self.grating == "580V":
print(" For 580V we use bright skyline at 5578 AA ...")
sky_line = 5578
sky_line_2 = 0
if self.grating == "1000R":
# print " For 1000R we use skylines at 6300.5 and 6949.0 AA ..." ### TWO LINES GIVE WORSE RESULTS THAN USING ONLY 1...
print(" For 1000R we use skyline at 6949.0 AA ...")
sky_line = 6949.0 # 6300.5
lowlow = 22 # for getting a good continuuem in 6949.0
lowhigh = 12
highlow = 36
highhigh = 52
sky_line_2 = 0 # 6949.0 #7276.5 fails
lowlow_2 = 22 # for getting a good continuuem in 6949.0
lowhigh_2 = 12
highlow_2 = 36
highhigh_2 = 52
if sky_line_2 != 0:
print(" ... first checking {} ...".format(sky_line))
for fibre_sky in range(self.n_spectra):
skyline_spec = fluxes(
self.wavelength,
self.intensity_corrected[fibre_sky],
sky_line,
plot=False,
verbose=False,
lowlow=lowlow,
lowhigh=lowhigh,
highlow=highlow,
highhigh=highhigh,
) # fmin=-5.0E-17, fmax=2.0E-16,
# resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
self.intensity_corrected[fibre_sky] = skyline_spec[11]
skyline_sky = fluxes(
self.wavelength,
self.sky_emission[fibre_sky],
sky_line,
plot=False,
verbose=False,
lowlow=lowlow,
lowhigh=lowhigh,
highlow=highlow,
highhigh=highhigh,
) # fmin=-5.0E-17, fmax=2.0E-16,
scale_per_fibre[fibre_sky] = old_div(skyline_spec[3], skyline_sky[3]) # TODO: get data for 2D and test if can remove
self.sky_emission[fibre_sky] = skyline_sky[11]
if sky_line_2 != 0:
print(" ... now checking {} ...".format(sky_line_2))
for fibre_sky in range(self.n_spectra):
skyline_spec = fluxes(
self.wavelength,
self.intensity_corrected[fibre_sky],
sky_line_2,
plot=False,
verbose=False,
lowlow=lowlow_2,
lowhigh=lowhigh_2,
highlow=highlow_2,
highhigh=highhigh_2,
) # fmin=-5.0E-17, fmax=2.0E-16,
# resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
self.intensity_corrected[fibre_sky] = skyline_spec[11]
skyline_sky = fluxes(
self.wavelength,
self.sky_emission[fibre_sky],
sky_line_2,
plot=False,
verbose=False,
lowlow=lowlow_2,
lowhigh=lowhigh_2,
highlow=highlow_2,
highhigh=highhigh_2,
) # fmin=-5.0E-17, fmax=2.0E-16,
scale_per_fibre_2[fibre_sky] = (
old_div(skyline_spec[3], skyline_sky[3]) # TODO: get data for 2D and test if can remove
)
self.sky_emission[fibre_sky] = skyline_sky[11]
# Median value of scale_per_fibre, and apply that value to all fibres
if sky_line_2 == 0:
scale_sky_rss = np.nanmedian(scale_per_fibre)
self.sky_emission = self.sky_emission * scale_sky_rss
else:
scale_sky_rss = np.nanmedian(
old_div((scale_per_fibre + scale_per_fibre_2), 2) # TODO: get data for 2D and test if can remove
)
# Make linear fit
scale_sky_rss_1 = np.nanmedian(scale_per_fibre)
scale_sky_rss_2 = np.nanmedian(scale_per_fibre_2)
print(
" Median scale for line 1 : {} range [ {}, {} ]]".format(
scale_sky_rss_1, np.nanmin(scale_per_fibre), np.nanmax(scale_per_fibre)
)
)
print(
" Median scale for line 2 : {} range [ {}, {} ]]".format(
scale_sky_rss_2, np.nanmin(scale_per_fibre_2), np.nanmax(scale_per_fibre_2)
)
)
b = old_div((scale_sky_rss_1 - scale_sky_rss_2), (
sky_line - sky_line_2 # TODO: get data for 2D and test if can remove
))
a = scale_sky_rss_1 - b * sky_line
# ,a+b*sky_line,a+b*sky_line_2
print(" Appling linear fit with a = {} b = {} to all fibres in sky image...".format(a, b))
for i in range(self.n_wave):
self.sky_emission[:, i] = self.sky_emission[:, i] * (
a + b * self.wavelength[i]
)
if plot:
plt.figure(figsize=(fig_size, fig_size / 2.5))
label1 = "$\lambda$" + np.str(sky_line)
plt.plot(scale_per_fibre, alpha=0.5, label=label1)
plt.minorticks_on()
plt.ylim(np.nanmin(scale_per_fibre), np.nanmax(scale_per_fibre))
plt.axhline(y=scale_sky_rss, color="k", linestyle="--")
if sky_line_2 == 0:
text = (
"Scale OBJECT / SKY using sky line $\lambda$ {}".format(sky_line))
print(" Scale per fibre in the range [{} , {} ], median value is {}".format(np.nanmin(scale_per_fibre), np.nanmax(scale_per_fibre), scale_sky_rss))
print(" Using median value to scale sky emission provided...")
if sky_line_2 != 0:
text = (
"Scale OBJECT / SKY using sky lines $\lambda$ {} and $\lambda$".format(sky_line, sky_line_2))
label2 = "$\lambda$ {}".format(sky_line_2)
plt.plot(scale_per_fibre_2, alpha=0.5, label=label2)
plt.axhline(y=scale_sky_rss_1, color="k", linestyle=":")
plt.axhline(y=scale_sky_rss_2, color="k", linestyle=":")
plt.legend(frameon=False, loc=1, ncol=2)
plt.title(text)
plt.xlabel("Fibre")
# plt.show()
# plt.close()
self.intensity_corrected = (
self.intensity_corrected - self.sky_emission
)
# (3) No sky spectrum or image is provided, obtain the sky using the n_sky lowest fibres
if sky_method == "self":
print("\n Using {} lowest intensity fibres to create a sky...".format(n_sky))
self.find_sky_emission(
n_sky=n_sky,
plot=plot,
sky_fibres=sky_fibres,
sky_wave_min=sky_wave_min,
sky_wave_max=sky_wave_max,
)
# print "\n AFTER SKY SUBSTRACTION:"
# self.compute_integrated_fibre(plot=False, warnings=warnings) #title =" - Throughput corrected", text="after throughput correction..."
# count_negative = 0
# for i in range(self.n_spectra):
# if self.integrated_fibre[i] < 0.11 :
# #print " Fibre ",i," has an integrated flux of ", self.integrated_fibre[i]
# count_negative=count_negative+1
# print self.integrated_fibre
# print " Number of fibres with NEGATIVE integrated value AFTER SKY SUBSTRACTION = ", count_negative
# If this RSS is an offset sky, perform a median filter to increase S/N
if is_sky:
print("\n> This RSS file is defined as SKY... applying median filter with window {} ...".format(win_sky))
medfilt_sky = median_filter(
self.intensity_corrected, self.n_spectra, self.n_wave, win_sky=win_sky
)
self.intensity_corrected = copy.deepcopy(medfilt_sky)
print(" Median filter applied, results stored in self.intensity_corrected !")
# Get airmass and correct for extinction AFTER SKY SUBTRACTION
ZD = (self.ZDSTART + self.ZDEND)/2
self.airmass = 1/np.cos(np.radians(ZD))
self.extinction_correction = np.ones(self.n_wave)
if do_extinction:
self.do_extinction_curve(pth.join(DATA_PATH, "ssoextinct.dat"), plot=plot)
# Check if telluric correction is needed & apply
if telluric_correction[0] != 0:
plot_integrated_fibre_again = plot_integrated_fibre_again + 1
print("\n> Applying telluric correction...")
if plot:
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(self.wavelength, telluric_correction)
plt.minorticks_on()
plt.axvline(x=self.valid_wave_min, color="k", linestyle="--")
plt.axvline(x=self.valid_wave_max, color="k", linestyle="--")
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.ylim(0.9, 2)
plt.title("Telluric correction")
plt.xlabel("Wavelength [$\AA$]")
# plt.show()
# plt.close()
if plot:
integrated_intensity_sorted = np.argsort(self.integrated_fibre)
region = [
integrated_intensity_sorted[-1],
integrated_intensity_sorted[0],
]
print(" Example of telluric correction using fibres {} and {} :".format(region[0], region[1]))
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(
self.wavelength,
self.intensity_corrected[region[0]],
color="r",
alpha=0.3,
)
plt.plot(
self.wavelength,
self.intensity_corrected[region[1]],
color="r",
alpha=0.3,
)
for i in range(self.n_spectra):
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :] * telluric_correction
)
if plot:
plt.plot(
self.wavelength,
self.intensity_corrected[region[0]],
color="b",
alpha=0.5,
)
plt.plot(
self.wavelength,
self.intensity_corrected[region[1]],
color="g",
alpha=0.5,
)
plt.minorticks_on()
plt.axvline(x=self.valid_wave_min, color="k", linestyle="--")
plt.axvline(x=self.valid_wave_max, color="k", linestyle="--")
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.ylim(
np.nanmin(self.intensity_corrected[region[1]]),
np.nanmax(self.intensity_corrected[region[0]]),
) # CHECK THIS AUTOMATICALLY
plt.xlabel("Wavelength [$\AA$]")
# plt.show()
# plt.close()
# Check if identify emission lines is requested & do
if id_el:
if brightest_line_wavelength == 0:
self.el = self.identify_el(
high_fibres=high_fibres,
brightest_line=brightest_line,
cut=cut,
verbose=True,
plot=plot_id_el,
fibre=0,
broad=broad,
)
print("\n Emission lines identified saved in self.el !!")
else:
brightest_line_rest_wave = 6562.82
print("\n As given, line {} at rest wavelength = {} is at {}".format(brightest_line, brightest_line_rest_wave, brightest_line_wavelength))
self.el = [
[brightest_line],
[brightest_line_rest_wave],
[brightest_line_wavelength],
[7.2],
]
# PUTAAA sel.el=[peaks_name,peaks_rest, p_peaks_l, p_peaks_fwhm]
else:
self.el = [[0], [0], [0], [0]]
# Check if id_list provided
if id_list[0] != 0:
if id_el:
print("\n> Checking if identified emission lines agree with list provided")
# Read list with all emission lines to get the name of emission lines
emission_line_file = "data/lineas_c89_python.dat"
el_center, el_name = read_table(emission_line_file, ["f", "s"])
# Find brightest line to get redshift
for i in range(len(self.el[0])):
if self.el[0][i] == brightest_line:
obs_wave = self.el[2][i]
redshift = (self.el[2][i] - self.el[1][i])/self.el[1][i]
print(" Brightest emission line {} foud at {} , redshift = {}".format(brightest_line, obs_wave, redshift))
el_identified = [[], [], [], []]
n_identified = 0
for line in id_list:
id_check = 0
for i in range(len(self.el[1])):
if line == self.el[1][i]:
if verbose:
print(" Emission line {} {} has been identified".format(self.el[0][i], self.el[1][i]))
n_identified = n_identified + 1
id_check = 1
el_identified[0].append(self.el[0][i]) # Name
el_identified[1].append(self.el[1][i]) # Central wavelength
el_identified[2].append(
self.el[2][i]
) # Observed wavelength
el_identified[3].append(self.el[3][i]) # "FWHM"
if id_check == 0:
for i in range(len(el_center)):
if line == el_center[i]:
el_identified[0].append(el_name[i])
print(" Emission line {} {} has NOT been identified, adding...".format(el_name[i], line))
el_identified[1].append(line)
el_identified[2].append(line * (redshift + 1))
el_identified[3].append(4 * broad)
self.el = el_identified
print(" Number of emission lines identified = {} of a total of {} provided. self.el updated accordingly".format(n_identified, len(id_list)))
else:
print("\n> List of emission lines provided but no identification was requested")
# Clean sky residuals if requested
if clean_sky_residuals:
plot_integrated_fibre_again = plot_integrated_fibre_again + 1
self.clean_sky_residuals(
extra_w=extra_w,
step=step_csr,
dclip=dclip,
verbose=verbose,
fibre=fibre,
wave_min=valid_wave_min,
wave_max=valid_wave_max,
)
# set_data was till here... -------------------------------------------------------------------
if fibre != 0:
plot_integrated_fibre_again = 0
# Plot corrected values
if plot == True and rss_clean == False: # plot_integrated_fibre_again > 0 :
self.compute_integrated_fibre(
plot=plot,
title=" - Intensities Corrected",
warnings=warnings,
text="after all corrections have been applied...",
valid_wave_min=valid_wave_min,
valid_wave_max=valid_wave_max,
correct_negative_sky=correct_negative_sky,
)
integrated_intensity_sorted = np.argsort(self.integrated_fibre)
region = []
for fibre_ in range(high_fibres):
region.append(integrated_intensity_sorted[-1 - fibre_])
print("\n> Checking results using {} fibres with the highest integrated intensity".format(high_fibres))
print(" which are : {}".format(region))
plt.figure(figsize=(fig_size, fig_size / 2.5))
I = np.nansum(self.intensity[region], axis=0)
plt.plot(self.wavelength, I, "r-", label="Uncorrected", alpha=0.3)
Ic = np.nansum(self.intensity_corrected[region], axis=0)
plt.axhline(y=0, color="k", linestyle=":")
plt.plot(self.wavelength, Ic, "g-", label="Corrected", alpha=0.4)
plt.ylabel("Flux")
plt.xlabel("Wavelength [$\AA$]")
| |
import enum
from os import name
from typing import List, Dict, Optional, Tuple
import torch
from torch import nn, einsum
from einops import rearrange, repeat
from comvex.vit import ViTBase
from comvex.utils import FeedForward, PathDropout, ProjectionHead, PatchEmbeddingXd
from comvex.utils.helpers import name_with_msg, config_pop_argument
from .config import CoaTLiteConfig
class CoaTBase(ViTBase):
def __init__(
self,
image_size: int,
image_channel: int,
patch_size: int,
num_layers_in_stages: List[int],
num_channels: List[int],
expand_scales: List[int],
kernel_size_on_heads: Dict[int, int],
heads: Optional[int] = None,
) -> None:
super().__init__(image_size, image_channel, patch_size, use_patch_and_flat=False)
self.image_channel = image_channel
self.num_stages = len(num_layers_in_stages)
self.num_layers_in_stages = num_layers_in_stages
self.num_channels = num_channels
self.expand_scales = expand_scales
self.patch_sizes = [self.patch_size, *((2, )*(self.num_stages - 1))]
if heads is not None:
assert (
heads == sum(kernel_size_on_heads.values())
), name_with_msg(f"Number of heads should be equal for `heads` ({heads}) and the sum of values of `kernel_size_on_heads` ({sum(kernel_size_on_heads.values())})")
self.heads = heads or sum(kernel_size_on_heads.values())
self.kernel_size_on_heads = kernel_size_on_heads
class ConvolutionalPositionEncoding(nn.Module):
def __init__(
self,
dim: int,
kernel_size: int = 3,
use_cls: bool = True,
) -> None:
super().__init__()
self.depth_wise_conv = nn.Conv2d(
dim,
dim,
kernel_size=kernel_size,
stride=1,
padding=kernel_size // 2,
groups=dim
)
self.use_cls = use_cls
def forward(self, x: torch.Tensor, H: Optional[int] = None, W: Optional[int] = None) -> torch.Tensor:
r"""
If `H` and `W` are not given, assume `x` hasn't been flatten and its shape should be (b, c, h, w)
"""
if self.use_cls:
cls_token, x = x[:, :1, :], x[:, 1:, :]
if H and W:
x = rearrange(x, "b (h w) c -> b c h w", h=H, w=W)
x = self.depth_wise_conv(x)
if H and W:
x = rearrange(x, "b c h w -> b (h w) c")
if self.use_cls:
x = torch.cat([cls_token, x], dim=1)
return x
class ConvolutionalRelativePositionEncoding(nn.Module):
def __init__(
self,
dim: int,
heads: Optional[int],
head_dim: Optional[int],
kernel_size_on_heads: Dict[int, int] = { 3: 2, 5: 3, 7: 3 }, # From: https://github.com/mlpc-ucsd/CoaT/blob/main/src/models/coat.py#L358
use_cls: bool = True,
) -> None:
super().__init__()
head_list = list(kernel_size_on_heads.values())
if heads is None and head_dim is None:
if any([True if h is None or h <= 0 else False for h in head_list]):
raise ValueError(
"Please specify exact number (integers that are greater than 0) of heads for each kernel size when `heads` and `head_dim` are None."
)
self.heads = sum(head_list)
else:
self.heads = heads or dim // head_dim
self.head_dim = head_dim or dim // self.heads
assert (
dim // self.heads == self.head_dim
), name_with_msg(f"`dim` ({dim}) can't be divided by `heads` ({self.heads}). Please check `heads`, `head_dim`, or `kernel_size_on_heads`.")
self.depth_wise_conv_list = nn.ModuleList([
nn.Conv2d(
self.head_dim*num_heads,
self.head_dim*num_heads,
kernel_size=kernel_size,
stride=1,
padding=kernel_size // 2,
groups=self.head_dim*num_heads,
) for kernel_size, num_heads in kernel_size_on_heads.items()
])
self.split_list = [num_heads*self.head_dim for num_heads in kernel_size_on_heads.values()]
self.use_cls = use_cls
def forward(self, q: torch.Tensor, v: torch.Tensor, H: Optional[int] = None, W: Optional[int] = None) -> torch.Tensor:
r"""
If `H` and `W` are not given, assume `x` hasn't been flatten and its shape should be (b, c, h, w), so does the outputs
"""
if H and W:
b, p, n, d = v.shape # p for heads
if self.use_cls:
v = v[:, :, 1:, :]
if H and W:
v = rearrange(v, "b p (h w) d -> b (p d) h w", h=H, w=W)
v_list = torch.split(v, self.split_list, dim=1)
v_list = [conv(v) for v, conv in zip(v_list, self.depth_wise_conv_list)]
v = torch.cat(v_list, dim=1)
if H and W:
v = rearrange(v, "b (p d) h w -> b p (h w) d", p=p, d=d)
if self.use_cls:
cls_relative_position = torch.zeros(
(b, p, 1, d),
dtype=v.dtype,
device=v.device,
layout=v.layout
)
v = torch.cat([cls_relative_position, v], dim=-2)
return q*v
class FactorizedAttention(nn.Module):
def __init__(
self,
dim: int,
kernel_size_on_heads: Dict[int, int],
heads: Optional[int] = None,
head_dim: Optional[int] = None,
use_cls: bool = True,
use_bias: bool = True,
conv_relative_postion_encoder: Optional[nn.Module] = None,
attention_dropout: float = 0.,
ff_dropout: float = 0.,
) -> None:
super().__init__()
assert (
heads is not None or head_dim is not None
), name_with_msg(self, f"Either `heads` ({heads}) or `head_dim` ({head_dim}) must be specified")
self.heads = heads if heads is not None else dim // head_dim
head_dim = head_dim if head_dim is not None else dim // heads
assert (
head_dim * self.heads == dim
), name_with_msg(self, f"Head dimension ({head_dim}) times the number of heads ({self.heads}) must be equal to embedding dimension ({dim})")
self.relative_position_encoder = ConvolutionalRelativePositionEncoding(
dim,
heads,
head_dim,
kernel_size_on_heads=kernel_size_on_heads,
use_cls=use_cls
) if conv_relative_postion_encoder is None else conv_relative_postion_encoder
self.QKV = nn.Linear(dim, 3*dim, bias=use_bias)
self.out_linear = nn.Linear(dim, dim)
self.attention_dropout = nn.Dropout(attention_dropout)
self.out_dropout= nn.Dropout(ff_dropout)
self.scale = head_dim**(-0.5)
self.use_cls = use_cls
def forward(self, x: torch.Tensor, H: int, W: int) -> torch.Tensor:
h = self.heads
#
q, k, v = self.QKV(x).chunk(chunks=3, dim=-1)
q = rearrange(q, "b n (h d) -> b h n d", h=h)
k = rearrange(k, "b n (h d) -> b h d n", h=h).softmax(dim=-1)
v = rearrange(v, "b n (h d) -> b h n d", h=h)
#
attention = einsum("b h p n, b h n q -> b h p q", k, v)
attention = self.attention_dropout(attention)
#
relative_position = self.relative_position_encoder(q, v, H, W)
#
out = einsum("b h n p, b h p q -> b h n q", q*self.scale, attention) + relative_position
out = rearrange(out, "b h n d -> b n (h d)")
out = self.out_linear(out)
out = self.out_dropout(out)
return out
class ConvAttentionalModule(nn.Module):
def __init__(
self,
dim: int,
use_cls: bool = True,
use_conv_position_encoder: bool = False,
conv_position_encoder: Optional[nn.Module] = None,
**kwargs,
) -> None:
super().__init__()
# Set `conv_position_encoder` to optional since the official implementation adds convolutional position encoding in `SerialBlock`,
# which differ from `Figure 2` in the paper
# https://github.com/mlpc-ucsd/CoaT/blob/main/src/models/coat.py#L211-L214
use_conv_position_encoder = True if conv_position_encoder is not None else use_conv_position_encoder
if use_conv_position_encoder:
self.conv_position_encoder = ConvolutionalPositionEncoding(
dim,
use_cls=use_cls
) if conv_position_encoder is None else conv_position_encoder
self.factorized_attn = FactorizedAttention(dim=dim, use_cls=use_cls, **kwargs)
self.use_conv_position_encoder = use_conv_position_encoder
self.use_cls = use_cls
def forward(self, x: torch.Tensor, H: int, W: int) -> torch.Tensor:
if self.use_conv_position_encoder:
x = self.conv_position_encoder(x, H, W)
x = self.factorized_attn(x, H, W)
return x
class CoaTSerialBlock(nn.Module):
def __init__(
self,
dim: int,
ff_expand_scale: int = 4,
path_dropout: float = 0.,
conv_position_encoder: Optional[nn.Module] = None,
use_cls: bool = True,
**kwargs,
) -> None:
super().__init__()
self.conv_position_encoder = ConvolutionalPositionEncoding(
dim,
use_cls=use_cls
) if conv_position_encoder is None else conv_position_encoder
self.norm_0 = nn.LayerNorm(dim)
self.conv_attn_module = ConvAttentionalModule(
dim,
use_cls=use_cls,
use_conv_position_encoder=False,
conv_position_encoder=None,
**kwargs,
)
self.path_dropout_0 = PathDropout(path_dropout)
self.norm_1 = nn.LayerNorm(dim)
self.ff_block = FeedForward(
dim,
ff_expand_scale=ff_expand_scale,
ff_dropout=kwargs["ff_dropout"],
)
self.path_dropout_1 = PathDropout(path_dropout)
def forward(self, x: torch.Tensor, H: int, W: int) -> torch.Tensor:
# Add convolutional position encoding before the ``ConvAttentionalModule`,
# which differ from `Figure 2` in the paper but aligns the official implementation:
# https://github.com/mlpc-ucsd/CoaT/blob/main/src/models/coat.py#L211-L214
x = self.conv_position_encoder(x, H, W)
x = x + self.path_dropout_0(self.conv_attn_module(self.norm_0(x), H, W))
x = x + self.path_dropout_1(self.ff_block(self.norm_1(x)))
return x
class CoaTParallelBlock(nn.Module):
def __init__(
self,
num_feature_maps: int,
dim: int,
ff_expand_scale: int = 4,
path_dropout: float = 0.,
conv_position_encoder: Optional[nn.Module] = None,
use_cls: bool = True,
**kwargs,
) -> None:
super().__init__()
self.conv_position_encoder = nn.ModuleList([
ConvolutionalPositionEncoding(
dim,
use_cls=use_cls
) if conv_position_encoder is None else conv_position_encoder for _ in range(num_feature_maps)
])
self.norm_0 = nn.ModuleList([
nn.LayerNorm(dim) for _ in range(num_feature_maps)
])
self.conv_attn_module = nn.ModuleList([
ConvAttentionalModule(
dim,
use_cls=use_cls,
use_conv_position_encoder=False,
conv_position_encoder=None,
**kwargs,
) for _ in range(num_feature_maps)
])
self.path_dropout_0 = nn.ModuleList([
PathDropout(path_dropout)
])
self.norm_1 = nn.ModuleList([
nn.LayerNorm(dim)
])
self.ff_block = nn.ModuleList([
FeedForward(
dim,
ff_expand_scale=ff_expand_scale,
ff_dropout=kwargs["ff_dropout"],
) for _ in range(num_feature_maps)
])
self.path_dropout_1 = nn.ModuleList([
PathDropout(path_dropout)
])
def forward(self, *args: List[torch.Tensor], sizes: Tuple[Tuple[int, int]]) -> List[torch.Tensor]:
num_inputs = len(args)
assert (
num_inputs == len(self.serial_block_list)
), name_with_msg(self, f"The number of inputs ({num_inputs}) should be aligned with the number of feature maps ({len(self.serial_block_list)})")
#
args = [conv_position_encoder(x, H, W) for x, H, W, conv_position_encoder in zip(args, sizes, self.conv_position_encoder)]
#
args = [norm(x) for x, norm in zip(args, self.norm_0)]
args = [conv_attn_module(x, H, W) for x, H, W, conv_attn_module in zip(args, sizes, self.conv_attn_module)]
for idx in range(num_inputs):
args[idx] = torch.stack([self.interpolate(x, size=sizes[idx]) for x in args], dim=0).sum(dim=0)
args = [x + path_dropout(x) for x, path_dropout in zip(args, self.path_dropout_0)]
#
args = [norm(x) for x, norm in zip(args, self.norm_1)]
args = [ff_block(x) for x, ff_block in zip(args, self.ff_block)]
args = [x + path_dropout(x) for x, path_dropout in zip(args, self.path_dropout_1)]
| |
various architectures
tensor_h = 320
tensor_w = 320
x = tf.placeholder(tf.float32, [None, tensor_h * tensor_w, 1], name="x")
y = tf.placeholder(tf.float32, [None, 6], name="y")
# keep_prob = tf.placeholder(tf.float32, name="keep_prob") # dropout (keep probability)
keep_prob = tf.placeholder(tf.float32, len(dropout), name="keep_prob")
TrainingProp = 0.7
n_classes = 6
training_iters = 30000 # 1500 12500,
if CONTINUE_TRAIN:
training_iters = current_step + training_iters
################################################ Graph 3conv begin
if Graph_3conv == 1:
# tf Graph input
filter_size_1 = 11
filter_size_2 = 5
filter_size_3 = 3
SEED = 8 # hy: 8, 16, 64, number of filters, feature map size: input(42) - filter_size_1 + 1 = 38
conv2_out = 16 # hy: 16, 32, 64 outputs of final conv layer, feature map size: input(21) - filter_size_2 + 1 = 19
conv3_out = 32 # hy: 16, 32, 64 outputs of final conv layer, feature map size: input(21) - filter_size_2 + 1 = 19
def conv_net(_X, _weights, _biases, _dropout):
# - INPUT Layer
# Reshape input picture
_X = tf.reshape(_X,
shape=[-1, tensor_h, tensor_w, 1]) # hy: use updated proper values for shape
print '\nArchitecture\ninput tensor', _X.get_shape()
# _X = tf.reshape(_X, shape=[-1, 32, 32, 3]) # TODO num channnels change
# a = np.array(_X[0])
# print(a.shape)
# Image._show(Image.fromarray(a, 'RGB'))
################################
# - Convolution Layer 1
k = 4
conv1 = conv2d(_X, _weights['wc1'], _biases['bc1'], k) # 4
print 'conv1 ( f=', filter_size_1, 'k=', k, ')', conv1.get_shape()
# Max Pooling (down-sampling)
k = 2
conv1 = max_pool(conv1, k) # TODO return it to K=2
print 'conv1 max pooling ( k=', k, ')', conv1.get_shape()
# Apply Dropout
conv1 = tf.nn.dropout(conv1, _dropout[0]) # TODO comment it later
print '- dropout ( keep rate', dropout[0], ')', conv1.get_shape()
################################
# - Convolution Layer 2
k = 1
conv2 = conv2d(conv1, _weights['wc2'], _biases['bc2'], k)
print '\nconv2 ( f=', filter_size_2, 'k=', k, ')', conv2.get_shape()
# # Max Pooling (down-sampling)
k = 2
conv2 = max_pool(conv2, k)
print 'conv2 - max pooling (k=', k, ')', conv2.get_shape()
# # Apply Dropout
conv2 = tf.nn.dropout(conv2, _dropout[1]) # TODO comment it later!
print '- dropout ( keep rate', dropout[1], ')', conv2.get_shape()
################################
# - Convolution Layer 3
k = 1
conv3 = conv2d(conv2, _weights['wc3'], _biases['bc3'], k)
print '\nconv3 ( f=', filter_size_3, 'k=', k, ')', conv3.get_shape()
k = 2
conv3 = max_pool(conv3, k)
print 'conv3 - max pooling ( k=', k, ')', conv3.get_shape()
conv3 = tf.nn.dropout(conv3, _dropout[2])
print '- dropout ( keep rate', dropout[2], ')', conv3.get_shape()
# Fully connected layer
dense1 = tf.reshape(conv3,
[-1,
_weights['wd1'].get_shape().as_list()[0]]) # Reshape conv2 output to fit dense layer input
print '\ndensel reshape:', dense1.get_shape(), 'n_hidden', n_hidden
dense1 = tf.nn.relu(tf.add(tf.matmul(dense1, _weights['wd1']), _biases['bd1'])) # Relu activation
print 'densel - relu:', dense1.get_shape()
dense1 = tf.nn.dropout(dense1, _dropout[3]) # Apply Dropout
print '- dropout ( keep rate', dropout[3], ')', dense1.get_shape()
# Output, class prediction
out = tf.add(tf.matmul(dense1, _weights['out']), _biases['out'])
print 'out:', out.get_shape()
return out
# Store layers weight & bias #Graph_3conv
weights = {
'wc1': tf.Variable(tf.random_normal([filter_size_1, filter_size_1, 1, SEED], stddev=0.1, seed=SEED), name="wc1"),
# 5x5 conv, 1 input, 8 outputs
'wc2': tf.Variable(tf.random_normal([filter_size_2, filter_size_2, SEED, conv2_out], stddev=0.1, seed=SEED),
name="wc2"), # 5x5 conv, 8 inputs, 16 outputs
'wc3': tf.Variable(tf.random_normal([filter_size_3, filter_size_3, conv2_out, conv3_out], stddev=0.1, seed=SEED),
name="wc3"), # 5x5 conv, 8 inputs, 16 outputs
# 'wc4': tf.Variable(tf.random_normal([filter_size_4, filter_size_4, conv3_out, conv4_out], stddev=0.1, seed=SEED), name="wc4"), # 5x5 conv, 8 inputs, 16 outputs
# 'wd1': tf.Variable(tf.random_normal([16 * 24 / 2 * 42 / 2, n_hidden], stddev=0.1, seed=SEED)), # fully connected, 8*8*64 inputs, 1024 outputs
# 'wd1': tf.Variable(tf.random_normal([8 * 8 * 64, 1024], stddev=0.1)), # fully connected, 8*8*64 inputs, 1024 outputs
'wd1': tf.Variable(tf.random_normal([6 * 6 * conv3_out, n_hidden], stddev=0.1, seed=SEED), name="wd1"),
# hy: fully connected, 8*8*64 inputs, 1024 outputs
'out': tf.Variable(tf.random_normal([n_hidden, n_classes], stddev=0.1, seed=SEED), name="w_out")
# 1024 inputs, 10 outputs (class prediction)
}
biases = {
'bc1': tf.Variable(tf.random_normal([SEED]), name="bc1"),
'bc2': tf.Variable(tf.random_normal([conv2_out]), name="bc2"), # hy: use variable, instead fixed number
'bc3': tf.Variable(tf.random_normal([conv3_out]), name="bc3"), # hy: use variable, instead fixed number
'bd1': tf.Variable(tf.random_normal([n_hidden]), name="bd1"),
'out': tf.Variable(tf.random_normal([n_classes]), name="b_out") # hy:
}
# hy: try with zero mean
# tf.image.per_image_whitening(x)
# this operation computes (x-mean)/adjusted_stddev
pred = conv_net(x, weights, biases, dropout)
# val2_pred = conv_net(x, weights, biases, dropout_1s)
# pred = conv_net(x, weights, biases, keep_prob)
pred = tf.add(pred, 0, name="pred")
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y), name="cost")
# val2_cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
################ 3conv optimizer
if optimizer_type == 'GD':
# learning_rate = tf.train.exponential_decay(learning_rate, step,100000, 0.96, staircase=True)
# hy: GradientDescentOptimizer
print '\noptimizer', optimizer_type, '\tlearning_rate', learning_rate
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
print '\noptimizer', optimizer_type, '\tlearning_rate', learning_rate, 'lr_decay', lr_decay, 'decay_step', decay_step
amaxpred = tf.argmax(pred, 1) # Just to check the bug
amaxy = tf.argmax(y, 1) # Just to check for the debug
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name="accuracy")
# val2_accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Build the summary operation based on the TF collection of Summaries.
# Adding variables to be visualized
# hy:add diagrams
summary = tf.scalar_summary('Accuracy', accuracy)
tf.scalar_summary('Loss', cost)
##################################################################################################
# Tensor VIEW
# _X = np.array(_X[0])
# tensor_im = cv2.imread('../Data/data_1/hinten/hinten_ww1_rz235_1_ex1_35.jpg')
# tensor_im = cv2.cvtColor(tensor_im, cv2.COLOR_BGR2GRAY)
# tensor_im = imutils.resize(tensor_im, width=settings.w_resize, height=settings.h_resize) # w=146, h=121
# tensor_im = np.asarray(tensor_im, np.float32)
# print(a.shape)
# Image._show(Image.fromarray(a, 'RGB'))
# tf.image_summary('Images Original',tf.reshape(x, shape=[-1, 24, 42, 1]),max_images=4)
tf.image_summary('Original', tf.reshape(x, shape=[-1, tensor_h, tensor_w, 1]),
max_images=1) # hy:images_view
# images after conv1 before max pool
# _X = tf.reshape(x, shape=[-1, 24, 42, 1])
_X = tf.reshape(x, shape=[-1, tensor_h, tensor_w, 1]) # hy for display
# hy: conv1 view
# conv1 = tf.placeholder(tf.float32, name="conv1") #hy added
conv1 = conv2d(_X, weights['wc1'], biases['bc1'], 4)
conv1 = tf.add(conv1, 0, name="conv1")
print 'for conv1 view', conv1.get_shape()
conv_view_size = 46
tf.image_summary('1.Conv', tf.reshape(conv1, shape=[-1, conv_view_size, conv_view_size, 1]), max_images=SEED) # hy
# hy: conv2 view
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'], 1)
conv2 = tf.add(conv2, 0, name="conv2")
print 'for conv2 view', conv2.get_shape()
# tf.image_summary('Output of Second Convolution',tf.reshape(conv2, shape=[-1, 24, 42, 1]), max_images=16)
tf.image_summary('2.Conv', tf.reshape(conv2, shape=[-1, conv_view_size, conv_view_size, 1]),
max_images=conv2_out) # hy
# hy: conv3 view
conv3 = conv2d(conv2, weights['wc3'], biases['bc3'], 1)
conv3 = tf.add(conv3, 0, name="conv3")
print 'for conv3 view', conv3.get_shape()
tf.image_summary('3.Conv', tf.reshape(conv3, shape=[-1, conv_view_size, conv_view_size, 1]),
max_images=conv3_out) # hy
tf.histogram_summary('Histogram 1.Conv', weights['wc1'])
# tf.histogram_summary('Histogram 2.Conv', weights['wc2']) #hy: added
tf.histogram_summary('Histogram pred', pred) # hy: added
summary_op = tf.merge_all_summaries()
################################################ Graph 3conv end
########################### CLASSIFIER end ###################################################
################################################ Graph 3conv end
################################### TRAIN begin #####################################################
if RETRAIN or CONTINUE_TRAIN:
try:
#total_images, digits, carimages, cartargets, f, val2_digits, val2_images, val2_targets, val2_f = tools.import_data(
# add_online=False)
#train_size = int(total_images * TrainingProp)
train_size = 1
print 'train size', train_size
batch_size = 1
# batch_size = int(train_size / n_classes * 2)# *2
print 'batch size', batch_size
val1_batch_xs, val1_batch_ys = digits.images[train_size + 1:1 - 1], \
digits.target[train_size + 1:1 - 1]
'''
val2_batch_xs, val2_batch_ys = val2_digits.images[0:len(val2_images) - 1], \
val2_digits.target[0:len(val2_images) - 1] # hy: use calc size
'''
except Exception as e:
print 'Check if file is created correctedly. Setting an array element with a sequence.'
print str(e)
with tf.Session() as sess:
saver = tf.train.Saver()
if RETRAIN:
# Initializing the variables
init = tf.initialize_all_variables() #hy: try
sess.run(init)
# Creating a saver for the model
if CONTINUE_TRAIN: #set model path
ckpt = tf.train.get_checkpoint_state(checkpoint_dir="")
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print "Continue to train with ", ckpt.model_checkpoint_path
else:
print 'not found model'
elapsed_time = time.time() - start_time
print 'Total elapsed time3:', "{:.2f}".format(elapsed_time), 's'
#hy: added to display all results in one graph
train_writer = tf.train.SummaryWriter(tensorboard_path + '/train', sess.graph)
validation_writer = tf.train.SummaryWriter(tensorboard_path + '/vali', sess.graph)
test_writer = tf.train.SummaryWriter(tensorboard_path + '/test', sess.graph)
#from datetime import datetime
#tensorboard_path = '../Tensorboard_data/sum107/'+str(datetime.now())+'/'
#summary_writer = tf.train.SummaryWriter(tensorboard_path, graph_def=sess.graph_def)
if RETRAIN:
step = 1
if CONTINUE_TRAIN:
step = current_step
# hy register finished class learning
acc_pre = 0
# Keep training until | |
#########################################################################################################
#----------This class represents the nnUNet Multiple Head Trainer. Implementation-----------------------#
#----------inspired by original implementation (--> nnUNetTrainerV2), copied code is marked as such.----#
#########################################################################################################
import os
import torch
import numpy as np
from itertools import tee
from collections import OrderedDict
from nnunet_ext.paths import default_plans_identifier
from nnunet.utilities.nd_softmax import softmax_helper
from nnunet.utilities.tensor_utilities import sum_tensor
from nnunet_ext.training.model_restore import restore_model
from nnunet.network_architecture.generic_UNet import Generic_UNet
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet_ext.run.default_configuration import get_default_configuration
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
from nnunet_ext.network_architecture.MultiHead_Module import MultiHead_Module
from nnunet.training.dataloading.dataset_loading import load_dataset, unpack_dataset
from nnunet.training.data_augmentation.data_augmentation_moreDA import get_moreDA_augmentation
from nnunet_ext.utilities.helpful_functions import join_texts_with_char, nestedDictToFlatTable, dumpDataFrameToCsv
class nnUNetTrainerMultiHead(nnUNetTrainerV2): # Inherit default trainer class for 2D, 3D low resolution and 3D full resolution U-Net
def __init__(self, split, task, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False, save_interval=5, already_trained_on=None, use_progress=True,
identifier=default_plans_identifier, extension='multihead', tasks_list_with_char=None, mixed_precision=True,
save_csv=True):
r"""Constructor of Multi Head Trainer for 2D, 3D low resolution and 3D full resolution nnU-Nets.
"""
# -- Initialize using parent class -- #
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16)
# -- Set the provided split -- #
self.split = split
# -- Set the name of the head which is referred to as a task name -- #
self.task = task
# -- Set identifier to use for building the .json file that is used for restoring states -- #
self.identifier = identifier
# -- Store the fold for tracking and saving in the self.already_trained_on file -- #
self.fold = fold
# -- Store the flag if it is desired to save the validation metrics at every nth epoch as a csv as well -- #
self.csv = save_csv
# -- Set trainer_class_name -- #
self.trainer_class_name = self.__class__.__name__
# -- Initialize or set self.already_trained_on dictionary to keep track of the trained tasks so far for restoring -- #
if already_trained_on is not None:
self.already_trained_on = already_trained_on # Use provided already_trained on
# -- If the current fold does not exists initialize it -- #
if self.already_trained_on.get(str(self.fold), None) is None:
self.already_trained_on[str(self.fold)] = {'finished_training_on': list(), 'start_training_on': None, 'finished_validation_on': list(),
'used_identifier': self.identifier, 'prev_trainer': [self.trainer_class_name], 'val_metrics_should_exist': False,
'checkpoint_should_exist': False, 'tasks_at_time_of_checkpoint': list(),
'active_task_at_time_of_checkpoint': None} # Add current fold as new entry
else: # It exists, then check if everything is in it
# -- Define a list of all expected keys that should be in the already_trained_on dict for the current fold -- #
keys = ['finished_training_on', 'start_training_on', 'finished_validation_on', 'used_identifier', 'prev_trainer',\
'val_metrics_should_exist', 'checkpoint_should_exist','tasks_at_time_of_checkpoint',\
'active_task_at_time_of_checkpoint']
# -- Check that everything is provided as expected -- #
assert all(key in self.already_trained_on[str(self.fold)] for key in keys),\
"The provided already_trained_on dictionary does not contain all necessary elements"
else:
self.already_trained_on = {str(self.fold): {'finished_training_on': list(), 'start_training_on': None, 'finished_validation_on': list(),
'used_identifier': self.identifier, 'prev_trainer': [self.trainer_class_name], 'val_metrics_should_exist': False,
'checkpoint_should_exist' : False, 'tasks_at_time_of_checkpoint': list(),
'active_task_at_time_of_checkpoint': None}}
# -- Set the path were the trained_on file will be stored: grand parent directory from output_folder, ie. were all tasks are stored -- #
self.trained_on_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(self.output_folder))))
# -- Set save_every, so the super trainer class creates checkpoint individually and the validation metrics will be filtered accordingly -- #
self.save_every = save_interval
# -- Initialize subject_names list that is used to store the subject names for every nth evaluation -- #
self.subject_names_raw = list() # Store the names as is, ie. not cleaned (removed duplicates etc.) --> For evaluation necessary
# -- Extract network_name that might come in handy at a later stage -- #
# -- For more details on how self.output_folder is built look at get_default_configuration -- #
help_path = os.path.normpath(self.output_folder) # Normalize path in order to avoid errors
help_path = help_path.split(os.sep) # Split the path using '\' seperator
self.network_name = help_path[-5] # 5th element from back is the name of the used network
# -- Set the extension for output file -- #
self.extension = extension
# -- Set if the model should be compressed as floating point 16 -- #
self.mixed_precision = mixed_precision
# -- Ensure that it is a tuple and that the first element is a list and second element a string -- #
assert isinstance(tasks_list_with_char, tuple) and isinstance(tasks_list_with_char[0], list) and isinstance(tasks_list_with_char[1], str),\
"tasks_list_with_char should be a tuple consisting of a list of tasks as the first and a string "+\
"representing the character that is used to join the tasks as the second element.."
# -- Store the tuple consisting of a list with tasks and the character that should be used to join the tasks -- #
self.tasks_list_with_char = tasks_list_with_char
# -- Set tasks_joined_name for validation dataset building -- #
self.tasks_joined_name = join_texts_with_char(self.tasks_list_with_char[0], self.tasks_list_with_char[1])
# -- Define a dictionary for the metrics for validation after every nth epoch -- #
self.validation_results = dict()
# -- If -c is used, the self.validation_results need to be restored as well -- #
# -- Check if the val_metrics should exist -- #
if self.already_trained_on[str(self.fold)]['val_metrics_should_exist']:
try:
# -- Try to load the file -- #
self.validation_results = load_json(join(self.output_folder, 'val_metrics.json'))
except: # File does not exist
assert False, "The val_metrics.json file could not be loaded although it is expected to exist given the current state of the model."
# -- Set use_prograss_bar if desired so a progress will be shown in the terminal -- #
self.use_progress_bar = use_progress
# -- Define the empty Multi Head Network which might be used before intialization, so there is no error thrown (rehearsal) -- #
self.mh_network = None
# -- Define an empty trainer_model -- #
self.trainer_model = None
# -- Define flag for evaluation (per batch or per subject) -- #
self.eval_batch = True
# -- Update self.init_tasks so the storing works properly -- #
self.init_args = (split, task, plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16, save_interval, self.already_trained_on, use_progress, identifier, extension,
tasks_list_with_char, mixed_precision, save_csv)
def initialize(self, training=True, force_load_plans=False, num_epochs=500, prev_trainer_path=None):
r"""Overwrite parent function, since we want to include a prev_trainer that is used as a base for the Multi Head Trainer.
Further the num_epochs should be set by the user if desired.
"""
# -- The Trainer embodies the actual model that will be used as foundation to continue training on -- #
# -- It should be already initialized since the output_folder will be used. If it is None, the model will be initialized and trained. -- #
# -- Further the trainer needs to be of class nnUNetTrainerV2 or nnUNetTrainerMultiHead for this method, nothing else. -- #
# -- Set prev_trainer_path correctly as class instance and not a string -- #
self.trainer_path = prev_trainer_path
# -- Set nr_epochs to provided number -- #
self.max_num_epochs = num_epochs
# -- Initialize the trained_on_tasks and load trained_on_folds -- #
trained_on_tasks = list()
trained_on_folds = self.already_trained_on.get(str(self.fold), list())
# -- Reset the trained_on_tasks if the trained_on_folds exist for the current fold -- #
if isinstance(trained_on_folds, dict):
trained_on_tasks = trained_on_folds.get('finished_training_on', list())
# -- The new_trainer indicates if the model is a new multi head model, -- #
# -- ie. if it has been trained on only one task so far (True) or on more than one (False) -- #
if len(trained_on_tasks) > 1:
self.new_trainer = False
else:
self.new_trainer = True
super().initialize(training, force_load_plans) # --> This updates the corresponding variables automatically since we inherit this class
def initialize_network(self):
r"""Extend Initialization of Network --> Load pre-trained model (specified to setup the network).
Optimizer and lr initialization is still the same, since only the network is different.
"""
if self.trainer_path is None:
# -- Initialize from beginning and start training, since no model is provided -- #
super().initialize_network() # --> This updates the corresponding variables automatically since we inherit this class
# -- Create a Multi Head Generic_UNet from the current network using the provided split and first task name -- #
# -- Do not rely on self.task for initialization, since the user might provide the wrong task (unintended), -- #
# -- however for self.plans, the user needs to extract the correct plans_file path by himself using always the -- #
# -- first | |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import Perceptron
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import ComplementNB
class ModelAlteration():
def strat_kfold_evaluation(
self,
df,
model,
target:int,
folds:int,
shuffle:bool=True,
random_state:int=None) -> [float, ([],[])]:
'''
Implements some centroid based clustering algorithms on n-dimensional data
Parameters
------------
df : Your dataframe
model : A scikitlearn model used to classify labels
target : The index of your target column
folds : How often your dataframe should be split
shuffle : Specifies if the samples should be shuffled
random_state: If shuffle=True, random_state specifies the used seed.
if None, shuffle will always be random.
Returns
------------
accuracy : A list which contains the accuracy of the model over each folds
best_fold : The fold with the highest accuracy with the used model
'''
data, target = df.loc[:, df.columns!=target].values, df[target].values
skf = StratifiedKFold(n_splits=folds, shuffle=shuffle, random_state=random_state)
accuracy = [0 for _ in range(folds)]
best_fold = []
for i, index in enumerate(skf.split(data, target)):
x_train, x_test = data[index[0]], data[index[1]]
y_train, y_test = target[index[0]], target[index[1]]
model.fit(x_train, y_train)
accuracy[i] = (model.score(x_test, y_test))*100
if accuracy[i] >= max(accuracy[:-1]): best_fold = index
return(accuracy, best_fold)
def plot_accuracy(self, acc:[[float]], xlab:str, legend:[str], xaxis:[]=[]):
'''
Plots all permutation of the parameters.
------------
acc :[[float]]
Contains the accuracy of all folds.
xlab :String
Contains the name for the x-axis.
legend :[String]
Contains the values for the plot legend.
xaxis :[int] or [float]
Contains values for the x-axis.
'''
plt.xlabel(xlab)
plt.ylabel('Accuracy [%]')
acc = acc if len(acc)>0 else [acc]
if not xaxis:
for i, accuracy in enumerate(acc):
plt.plot(range(len(accuracy)), accuracy, label = legend[i])
else:
for i, accuracy in enumerate(acc):
plt.plot(xaxis, accuracy, label = legend[i])
plt.legend(loc="upper left")
plt.show()
def optimize_knn(self,
df,
target:int,
neighbours:[int] = list(range(1,11)),
metric:[int]=[1,2,3],
folds:int = 10,
plot:bool=True):
'''
Attempts to find the most optimal model parameters for the k-nearest-
neighbours (kNN) classifier by finding the best fold for each permutation
of the parameters. The best fold is determined by strat_kfold_evaluation().
The accuracy of all best folds is then compared and the parameters of
the best fold are returned (in addition to the fold itself)
Parameters
------------
df : dataframe
Your datatable
target : int
The index of your target column
neighbours : [int]
A list which contains the number of neighbors which should be used in kNN.
metric : [int]
Which metric should be used for kNN
1 - Manhattan
2 - Euclidean
3>= - Minkowski
folds : int
How often your dataframe should be split in strat_kfold_evaluation
plot : bool
Plots the accuracies over each fold
Returns
------------
best_fold: (np.array(int), {model_parameters})
l : An indexlist of the fold which has performed best overall
dic : And a dict with the model parameters for the best fold
'''
best_acc, best_model, fold_acc = 0, 0, [[None for _ in neighbours] for _ in metric]
epoch, end = 1, len(neighbours)*len(metric)
for i,m in enumerate(metric):
for j,k in enumerate(neighbours):
model = KNeighborsClassifier(n_neighbors=k, p = m)
fold_acc[i][j], tmp_fold = (lambda x: [max(x[0]), x[1]])(self.strat_kfold_evaluation(df, model, target, folds))
if fold_acc[i][j] > best_acc:
best_acc = fold_acc[i][j]
best_model = (tmp_fold, {"n_neighbors" : k, "p" : m})
print("Epoch %s/%s | neighbours=%s, metric=%s, Accuracy=%s" % (epoch, end, k, m, fold_acc[i][j]))
epoch += 1
if plot: self.plot_accuracy(fold_acc, "Number of neighbours", list(map(lambda x: "Metric " + x, list(map(str, metric)))), neighbours)
return(best_model)
def optimize_perceptron(self,
df,
target:int,
learning_rate:[float] = np.linspace(1, 20, num=20),
penalty:[int]=[0,1,2,3],
folds:int = 10,
plot:bool=True):
'''
Attempts to find the most optimal model parameters for the perceptron
classifier by finding the best fold for each permutation of the
parameters. The best fold is determined by strat_kfold_evaluation().
The accuracy of all best folds is then compared and the parameters of
the best fold are returned (in addition to the fold itself)
Parameters
------------
df : dataframe
Your datatable
target : int
The index of your target column
learning_rate : [float]
A list containing the number of learning_rates the algorithm should
try out
penalty : [int]
Which penalty should be used
0 - None
1 - l1
2 - l2
3 - elasticnet
folds : int
How often your dataframe should be split in strat_kfold_evaluation
plot : bool
Plots the accuracies over each fold
Returns
------------
best_fold: (np.array(int), {model_parameters})
l : An indexlist of the fold which has performed best overall
dic : And a dict with the model parameters for the best fold
'''
best_acc, best_model, fold_acc = 0, 0, [[None for _ in learning_rate] for _ in penalty]
epoch, end = 1, len(learning_rate)*len(penalty)
penalty = list(map((lambda x, d={0:None, 1:"l1", 2:"l2", 3:"elasticnet"}: d[x]), penalty))
for i, m in enumerate(penalty):
for j, k in enumerate(learning_rate):
model = Perceptron(eta0=k, penalty=m)
fold_acc[i][j], tmp_fold = (lambda x: [max(x[0]), x[1]])(self.strat_kfold_evaluation(df, model, target, folds))
if fold_acc[i][j] > best_acc:
best_acc = fold_acc[i][j]
best_model = (tmp_fold, { "eta0" : k, "penalty" : m})
print("Epoch %s/%s | learning_rate=%s, penalty=%s, Accuracy=%s" % (epoch, end, k, m, fold_acc[i][j]))
epoch += 1
if plot: self.plot_accuracy(fold_acc, "Used learning_rate", list(map(lambda x: "penalty: " + str(x), penalty)), list(learning_rate))
return(best_model)
def optimize_SVM(self,
df,
target:int,
regularization:[float] = np.linspace(1, 10, num=10),
kernel:[int]=[1,2,3],
folds:int = 10,
plot:bool=True):
'''
Attempts to find the most optimal model parameters for the SVM
classifier by finding the best fold for each permutation of the
parameters. The best fold is determined by strat_kfold_evaluation().
The accuracy of all best folds is then compared and the parameters of
the best fold are returned (in addition to the fold itself)
Parameters
------------
df : dataframe
Your datatable
target : int
The index of your target column
regularization: [float]
A list containing all penalties which should be tried out on the
respective kernel function
kernel : [int]
Which kernel functions should be used (refers to sklearn.svm.SVC)
0 - Linear (Takes a long time without dimension reduction)
1 - Poly
2 - rbf
3 - sigmoid
4 - precomputed (Look at Sklearns documentary first if you want to use it)
folds : int
How often your dataframe should be split in strat_kfold_evaluation
plot : bool
Plots the accuracies over each fold if True
Returns
------------
best_fold: (np.array(int), {model_parameters})
l : An indexlist of the fold which has performed best overall
dic : And a dict with the model parameters for the best fold
'''
best_acc, best_model, fold_acc = 0, 0, [[None for _ in regularization] for _ in kernel]
epoch, end = 1, len(regularization)*len(kernel)
kernel = list(map((lambda x, d={0:"linear", 1:"poly", 2:"rbf", 3:"sigmoid"}: d[x]), kernel))
for i, kern in enumerate(kernel):
for j, reg in enumerate(regularization):
model = SVC(C=reg, kernel=kern)
fold_acc[i][j], tmp_fold = (lambda x: [max(x[0]), x[1]])(self.strat_kfold_evaluation(df, model, target, folds))
if fold_acc[i][j] > best_acc:
best_acc = fold_acc[i][j]
best_model = (tmp_fold, {"C" :reg, "kernel" :kern})
print("Epoch %s/%s | regularization = %s, kernel = %s, Accuracy = %s" % (epoch, end, reg, kern, fold_acc[i][j]))
epoch += 1
if plot: self.plot_accuracy(fold_acc, "Used regularization", list(map(lambda x: "kernel: " + str(x), kernel)), list(regularization))
return(best_model)
def optimize_decision_tree(self,
df,
target:int,
criterion = ["gini", "entropy"],
max_depth:[int]= np.linspace(1, 10, num=10),
splitter = ["best", "random"],
folds:int = 10,
plot:bool=True):
'''
Attempts to find the most optimal model parameters for the decision tree
classifier by finding the best fold for each permutation of the
parameters. The best fold is determined by strat_kfold_evaluation().
The accuracy of all best folds is then compared and the parameters of
the best fold are returned (in addition to the fold itself)
Parameters
------------
df : dataframe
Your datatable
target : int
The index of your target column
criterion : [String]
A list containing "gini" and "entropy"
max_depth : [int]
A list containing the number of max_depth the algorithm should
try out
splitter : [String]
A list containing "best" and "random"
folds : int
How often your dataframe should be split in | |
"""
Provides class Hiarrchy for the analysis of multiple segementations orgainized
in a hierarchy (each segmentation is a subset of the next one).
# Author: <NAME> (Max Planck Institute for Biochemistry)
# $Id$
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from builtins import zip
from builtins import str
from builtins import range
#from past.utils import old_div
from past.builtins import basestring
__version__ = "$Revision$"
from copy import deepcopy
import logging
import numpy
import scipy
import scipy.ndimage as ndimage
from .labels import Labels
from .connected import Connected
from .statistics import Statistics
from .grey import Grey
import pyto.util.nested as nested
class Hierarchy(Labels):
"""
A hierarchy contains segments that are orgainuzed in a strictly hierarchical
manner, that is they satisfy the following requirements:
1) Each segment is either superset (overlaps), subset or does not
intersect any other segment.
2) Segments are organized in levels, all segments of a level taken
together overlap all segmenents at one level below the level.
Adding and removing hierarchy levels:
- addLevel: adds given segments to a specified level
- extractLevelsGen: generator that yields levels
- popLevel: removes top or bottom level and returns it
- extractLevel: returns specified level
- removeLowerLevels: removes ids corresponding to all levels below a
given level
- removeHigherLevels: removes ids corresponding to all levels above a
given level
- remove: removes segments corresponding to the given ids
Lower level id related methods:
- getIdLevels: returns levels of specified ids
- findHigherIds: finds ids directly above given ids
- findLowerIds: finds ids directly below given ids
Analysis:
-
Conversion:
- toSegment(): converts this instance to Segment provided this instance
is flat (no segment is above or below any other segment)
Data structure attributes (should not be accessed directly):
- self.ids: (numpy.ndarray) all ids
eg: [2,4,5,6,12,16,19,22, ...]
- self.levelIds: each element of this list is alist that contains all
ids present at that level
eg: [[2,4,5,6], [12,16,19], [22], ...]
- self._higherIds: distionary of one level higher ids for each id
eg: {2:12, 4:16, 5:16, 6:16, 12;22, 16:22, 22:None, ...}
- self._lowerIds: dictionary of (lists of) one level lower ids for each id
inverse of self._higherIds)
eg: {2:[], 3:[], 5:[], 12:[2], 16:[4,5,6], 19:[], 22:[12,16], ...}
- self.data: ndarray of all segments
There might be a number of level-defined properties, whose names are given
in self.properties.
"""
###############################################################
#
# Lower level manipulations (methods may access id structures directly)
#
##############################################################
def __init__(self, data=None, levelIds=[], higherIds={}):
"""
Initializes id and data related attributes.
Specifying args levelIds and higherIds is enough to make a functional
id-related attributes (data structures). In this case attributes
ids, levelIds, _higherIds and _lowerIds are set. These arguments are
deepcopied, so they are not changed, nor they can be changed in this
instance.
Arguments:
- levelIds: nested list of ids organized in levels
- higherIds: dictionary of id : higher_id pairs
- data: (ndarray) data array
"""
# initialize super (in order to be able to call super.setDefaults, for
# example)
super(Hierarchy, self).__init__(data)
# initialize id-related structures
self.levelIds = deepcopy(levelIds)
self.ids = nested.flatten(self.levelIds)
self.ids = numpy.asarray(self.ids, dtype='int')
self._higherIds = deepcopy(higherIds)
self.makeLowerIds()
# initialize data
self.data = data
# initialize properties
self.properties = []
def makeLowerIds(self):
"""
Makes a data structure that holds self._lowerIds using
self._higherIds.
"""
self._lowerIds = {}
for l_id in self._higherIds:
h_id = self.getHigherId(l_id)
if h_id > 0:
try:
l_ids = self.getLowerIds(h_id)
l_ids.append(l_id)
except KeyError:
l_ids = [l_id]
self._lowerIds[h_id] = l_ids
def orderLevelIds(self):
"""
Orders ids in the self.levelIds so that on each level ids that have
higher ids precede those that do not. Also orders self.ids in the
same way.
"""
# order the top level?
# order levels below top
for level in range(self.topLevel, 0, -1):
# find ids at level-1 that have higher ids
lower = self.findLowerIds(ids=self.getIds(level))
lower = nested.flatten(lower)
# find ids at level-1 that do not have higher ids
other = set(self.getIds(level-1)).difference(lower)
# put those that have higher first
self.levelIds[level-1] = lower + list(other)
# order ids in the same way
self.ids = nested.flatten(self.levelIds)
def getIds(self, level):
"""
Returns ids that exist at the (argument) level in a flat list.
Argument:
- level: an int, or a slice (indexed by level)
"""
if len(self.ids) == 0:
return []
if isinstance(level, slice):
res = self.levelIds[level]
return nested.flatten(res)
else:
# level should not be numpy.* type
res = self.levelIds[int(level)]
return res
def addIds(self, ids, level):
"""
Adds ids at the level. If ids is None or an empty list (ndarray),
[] is added.
Arguments:
- ids: single or a list (ndarray) of ids to be added
- level: id level
"""
# convert ids to ndarray
if ids is None:
ids = numpy.array([])
elif isinstance(ids, list) or isinstance(ids, numpy.ndarray):
ids = numpy.asarray(ids)
else:
ids = numpy.array([ids])
# add to id structures
if self.ids is None:
self.ids = numpy.array([], dtype='int')
self.ids = numpy.append(self.ids, ids)
if self.levelIds is None:
self.levelIds = []
self.levelIds.insert(level, ids.tolist())
def getIdLevels(self, ids):
"""
Returns levels of given ids.
Argument:
- ids: one or a list of ids
"""
if isinstance(ids, list) or isinstance(ids, numpy.ndarray):
levels = [self.getIdLevels(id_) for id_ in ids]
else:
id_ = ids
for level in range(len(self.levelIds)):
if id_ in self.levelIds[level]:
return level
else:
return None
return levels
def checkIds(self):
"""
Checks if the same id appears at different levels.
"""
flat_l_ids = nested.flatten(self.levelIds)
if len(flat_l_ids) == len(set(flat_l_ids)):
return True
else:
return False
def getHigherId(self, id_, check=False):
"""
Returns id directly above given id_, or 0 if there is no higher
level id.
Raises KeyError if id_ is not in self.ids.
Argument:
- id: segment id
"""
# check (prevents infinite loop in findHigherIds)
if check and not self.checkIds():
raise ValueError("Id-structures are not consistent")
try:
higher = self._higherIds[id_]
except KeyError:
if id_ in self.ids:
# not sure what's better: 0 or None
higher = 0
else:
raise
return higher
def getHighestBranchId(self, id_):
"""
Finds the highest id on a branch to which argument id_ belongs.
A branch is a part of the id hierarchy (tree) that contains no branching
points, that is all ids on a branch have strict higher-lower relation.
Argument:
- id_: segment id
"""
hi = self.getHigherId(id_=id_)
if (hi is None) or (hi == 0):
# no higher id exist
return id_
else:
lo = self.getLowerIds(id_=hi)
if len(lo) > 1:
# higher id is a branching point
return id_
else:
# higher id ok, try searching up
return self.getHighestBranchId(id_=hi)
def findHigherIds(self, ids, level=None, mode='single'):
"""
Returns ids that are directly above (argument) ids.
If mode is single, for each element of ids an id is found that is
directly above the given id, and it belongs either to the specified
level, or to the first higher level (if level is None). A single id is
returned if argument ids is a sigle id, or a list of (higher) ids that
correspond to argument ids is returned if argument ids is a list.
If mode is all, for each element of argument ids all ids directly above
up to (including) the argument level are returned. If arg level is None,
all ids above the specified arguments are returned. If argument ids
is a single number a list of ids is returned, while if it is a list, a
nested list corresponding to the specified ids is returned.
None is specified in the return for each id that has no higher ids (at
the required level) in the single mode, and empty list in the all mode.
Note that this differs slightly from the return of getHigherId where
0 is used insted of None.
Argument:
- ids: snigle id, or a list, of ids
- level: None for one level up, or (int) level
- mode: 'single' or 'all'
Return: higher | |
# fixture components ---------------------------------------------
@pytest.fixture
def ColorFormat_from_colorchoice_parent_(self, request):
return method_mock(request, ColorFormat, "from_colorchoice_parent")
@pytest.fixture
def color_(self, request):
return instance_mock(request, ColorFormat)
class DescribeTextBulletSize(object):
""" Unit-test suite for `pptx.text.bullets.TextBulletSize` object. """
def it_can_set_the_size_to_follow_text(self, follow_text_fixture):
text_bullet_size, _TextBulletSizeFollowText_, expected_xml, follow_text_ = follow_text_fixture
text_bullet_size.follow_text()
assert text_bullet_size._parent.xml == expected_xml
_TextBulletSizeFollowText_.assert_called_once_with(text_bullet_size._parent.eg_textBulletSize)
def it_can_set_the_size_to_points(self, points_fixture):
text_bullet_size, _TextBulletSizePoints_, expected_xml, size_points_ = points_fixture
text_bullet_size.set_points()
assert text_bullet_size._parent.xml == expected_xml
_TextBulletSizePoints_.assert_called_once_with(text_bullet_size._parent.eg_textBulletSize)
def it_can_set_the_size_to_percentage(self, percentage_fixture):
text_bullet_size, _TextBulletSizePercent_, expected_xml, size_points_ = percentage_fixture
text_bullet_size.set_percentage()
assert text_bullet_size._parent.xml == expected_xml
_TextBulletSizePercent_.assert_called_once_with(text_bullet_size._parent.eg_textBulletSize)
def it_knows_its_points(self, points_size_, type_prop_):
points_size_.points = Pt(12)
type_prop_.return_value = "TextBulletSizePoints"
text_bullet_size = TextBulletSize(None, points_size_)
points = text_bullet_size.points
assert points == Pt(12)
def it_can_change_its_points(self, points_size_, type_prop_):
type_prop_.return_value = "TextBulletSizePoints"
text_bullet_size = TextBulletSize(None, points_size_)
text_bullet_size.points = Pt(42)
assert points_size_.points == Pt(42)
def it_knows_its_percent(self, percentage_size_, type_prop_):
percentage_size_.percentage = 150
type_prop_.return_value = "TextBulletSizePercent"
text_bullet_size = TextBulletSize(None, percentage_size_)
percent = text_bullet_size.percentage
assert percent == 150
def it_can_change_its_percentage(self, percentage_size_, type_prop_):
type_prop_.return_value = "TextBulletSizePercent"
text_bullet_size = TextBulletSize(None, percentage_size_)
text_bullet_size.percentage = 42
assert percentage_size_.percentage == 42
def it_knows_its_type(self, type_fixture):
bullet_size, expected_value = type_fixture
bullet_size_type = bullet_size.type
assert bullet_size_type == expected_value
# fixtures -------------------------------------------------------
@pytest.fixture(
params = [
("a:pPr", "a:pPr/a:buSzTx"),
("a:pPr/a:buSzPct", "a:pPr/a:buSzTx")
]
)
def follow_text_fixture(self, request, follow_text_):
cxml, expected_cxml = request.param
text_bullet_size = TextBulletSize.from_parent(element(cxml))
_TextBulletSizeFollowText_ = class_mock(request, "pptx.text.bullets._TextBulletSizeFollowText", return_value=follow_text_, autospec=True)
expected_xml = xml(expected_cxml)
return text_bullet_size, _TextBulletSizeFollowText_, expected_xml, follow_text_
@pytest.fixture(
params = [
("a:pPr", "a:pPr/a:buSzPts"),
("a:pPr/a:buSzPct", "a:pPr/a:buSzPts")
]
)
def points_fixture(self, request, points_size_):
cxml, expected_cxml = request.param
text_bullet_size = TextBulletSize.from_parent(element(cxml))
_TextBulletSizePoints_ = class_mock(request, "pptx.text.bullets._TextBulletSizePoints", return_value=points_size_, autospec=True)
expected_xml = xml(expected_cxml)
return text_bullet_size, _TextBulletSizePoints_, expected_xml, points_size_
@pytest.fixture(
params = [
("a:pPr", "a:pPr/a:buSzPct"),
("a:pPr/a:buSzPts", "a:pPr/a:buSzPct")
]
)
def percentage_fixture(self, request, percentage_size_):
cxml, expected_cxml = request.param
text_bullet_size = TextBulletSize.from_parent(element(cxml))
_TextBulletSizePercent_ = class_mock(request, "pptx.text.bullets._TextBulletSizePercent", return_value=percentage_size_, autospec=True)
expected_xml = xml(expected_cxml)
return text_bullet_size, _TextBulletSizePercent_, expected_xml, percentage_size_
@pytest.fixture
def type_fixture(self, text_bullet_size_):
expected_value = text_bullet_size_.type = 42
text_bullet_size = TextBulletSize(None, text_bullet_size_)
return text_bullet_size, expected_value
# fixture components ---------------------------------------------
@pytest.fixture
def follow_text_(self, request):
return instance_mock(request, _TextBulletSizeFollowText)
@pytest.fixture
def points_size_(self, request):
return instance_mock(request, _TextBulletSizePoints)
@pytest.fixture
def percentage_size_(self, request):
return instance_mock(request, _TextBulletSizePercent)
@pytest.fixture
def text_bullet_size_(self, request):
return instance_mock(request, TextBulletSize)
@pytest.fixture
def type_prop_(self, request):
return property_mock(request, TextBulletSize, "type")
class Describe_TextBulletSize(object):
""" Unit-test suite for `pptx.text.bullets._TextBulletSize` object. """
def it_raises_on_points_access(self, points_raise_fixture):
bullet_size, exception_type = points_raise_fixture
with pytest.raises(exception_type):
bullet_size.points
def it_raises_on_percentage_access(self, percentage_raise_fixture):
bullet_size, exception_type = percentage_raise_fixture
with pytest.raises(exception_type):
bullet_size.percentage
# fixtures -------------------------------------------------------
@pytest.fixture
def points_raise_fixture(self):
bullet_size = _TextBulletSize("foobar")
exception_type = TypeError
return bullet_size, exception_type
@pytest.fixture
def percentage_raise_fixture(self):
bullet_size = _TextBulletSize("foobar")
exception_type = TypeError
return bullet_size, exception_type
class DescribeTextBulletSizePercent(object):
""" Unit-test suite for `pptx.text.bullets._TextBulletSizePercent` object. """
def it_knows_its_bullet_size_type(self, size_type_fixture):
percent_size, expected_value = size_type_fixture
size_type = percent_size.type
assert size_type == expected_value
def it_knows_its_percentage(self, get_percentage_fixture):
bullet_size_percent, expected_value = get_percentage_fixture
percentage = bullet_size_percent.percentage
assert percentage == expected_value
def it_can_change_its_percentage(self, set_percentage_fixture):
bullet_size_percent, percentage, percentageSize, expected_xml = set_percentage_fixture
bullet_size_percent.percentage = percentage
assert percentageSize.xml == expected_xml
# fixtures -------------------------------------------------------
@pytest.fixture
def size_type_fixture(self):
xBulletSize = element("a:buSzPct")
percent_size = _TextBulletSizePercent(xBulletSize)
expected_value = "TextBulletSizePercent"
return percent_size, expected_value
@pytest.fixture(
params=[
("a:buSzPct", None),
("a:buSzPct{val=150000}", 1.5),
]
)
def get_percentage_fixture(self, request):
bulletSizePercent_cxml, expected_value = request.param
bulletSizePercent = element(bulletSizePercent_cxml)
bullset_size_percent = _TextBulletSizePercent(bulletSizePercent)
return bullset_size_percent, expected_value
@pytest.fixture(
params=[
("a:buSzPct", 1.5, "a:buSzPct{val=150000}"),
("a:buSzPct{val=4242}", .42, "a:buSzPct{val=42000}"),
]
)
def set_percentage_fixture(self, request):
bulletSizePercent_cxml, percentage, expected_cxml = request.param
bulletSizePercent = element(bulletSizePercent_cxml)
expected_xml = xml(expected_cxml)
bullset_size_percent = _TextBulletSizePercent(bulletSizePercent)
return bullset_size_percent, percentage, bulletSizePercent, expected_xml
class DescribeTextBulletSizePoints(object):
""" Unit-test suite for `pptx.text.bullets._TextBulletSizePoints` object. """
def it_knows_its_bullet_size_type(self, size_type_fixture):
points_size, expected_value = size_type_fixture
size_type = points_size.type
assert size_type == expected_value
def it_knows_its_points(self, get_points_fixture):
bullet_size_points, expected_value = get_points_fixture
points = bullet_size_points.points
assert points == expected_value
def it_can_change_its_points(self, set_points_fixture):
bullet_size_points, points, pointsSize, expected_xml = set_points_fixture
bullet_size_points.points = points
assert pointsSize.xml == expected_xml
# fixtures -------------------------------------------------------
@pytest.fixture
def size_type_fixture(self):
xBulletSize = element("a:buSzPts")
points_size = _TextBulletSizeFollowText(xBulletSize)
expected_value = "TextBulletSizePoints"
return points_size, expected_value
@pytest.fixture(
params=[
("a:buSzPts", None),
("a:buSzPts{val=16}", 2032),
]
)
def get_points_fixture(self, request):
bulletSizePoints_cxml, expected_value = request.param
bulletSizePoints = element(bulletSizePoints_cxml)
bullset_size_points = _TextBulletSizePoints(bulletSizePoints)
return bullset_size_points, expected_value
@pytest.fixture(
params=[
("a:buSzPts", Pt(16), "a:buSzPts{val=1600}"),
("a:buSzPts{val=4242}", Pt(12), "a:buSzPts{val=1200}"),
]
)
def set_points_fixture(self, request):
bulletSizePoints_cxml, points, expected_cxml = request.param
bulletSizePoints = element(bulletSizePoints_cxml)
expected_xml = xml(expected_cxml)
bullset_size_points = _TextBulletSizePoints(bulletSizePoints)
return bullset_size_points, points, bulletSizePoints, expected_xml
class DescribeTextBulletSizeFollowText(object):
""" Unit-test suite for `pptx.text.bullets._TextBulletSizeFollowText` object. """
def it_knows_its_bullet_size_type(self, size_type_fixture):
follow_text_size, expected_value = size_type_fixture
size_type = follow_text_size.type
assert size_type == expected_value
# fixtures -------------------------------------------------------
@pytest.fixture
def size_type_fixture(self):
xBulletSize = element("a:buSzTx")
follow_text_size = _TextBulletSizeFollowText(xBulletSize)
expected_value = "TextBulletSizeFollowText"
return follow_text_size, expected_value
class DescribeTextBulletTypeface(object):
""" Unit-test suite for `pptx.text.bullets.TextBulletTypeface` object. """
def it_can_set_the_size_to_follow_text(self, follow_text_fixture):
text_bullet_typeface, _BulletTypefaceFollowText_, expected_xml, follow_text_ = follow_text_fixture
text_bullet_typeface.follow_text()
assert text_bullet_typeface._parent.xml == expected_xml
_BulletTypefaceFollowText_.assert_called_once_with(text_bullet_typeface._parent.eg_textBulletTypeface)
assert text_bullet_typeface._bullet_typeface is follow_text_
def it_can_set_to_typeface(self, specific_typeface_fixture):
text_bullet_typeface, _BulletTypefaceSpecific_, expected_xml, specific_typeface_ = specific_typeface_fixture
text_bullet_typeface.set_typeface()
assert text_bullet_typeface._parent.xml == expected_xml
_BulletTypefaceSpecific_.assert_called_once_with(text_bullet_typeface._parent.eg_textBulletTypeface)
assert text_bullet_typeface._bullet_typeface is specific_typeface_
def it_knows_its_type(self, type_fixture):
bullet_typeface, expected_value = type_fixture
bullet_type = bullet_typeface.type
assert bullet_type == expected_value
def it_knows_its_typeface(self, text_bullet_typeface_, type_prop_):
text_bullet_typeface_.typeface = "Foobar"
type_prop_.return_value = "BulletTypefaceSpecific"
bullet_typeface = TextBulletTypeface(None, text_bullet_typeface_)
typeface = bullet_typeface.typeface
assert typeface == "Foobar"
def it_can_change_its_typeface(self, text_bullet_typeface_, type_prop_):
type_prop_.return_value = "BulletTypefaceSpecific"
bullet_typeface = TextBulletTypeface(None, text_bullet_typeface_)
bullet_typeface.typeface = "Foobar"
assert text_bullet_typeface_.typeface == "Foobar"
def it_knows_its_pitch_family(self, text_bullet_typeface_, type_prop_):
text_bullet_typeface_.pitch_family = "Foobar"
type_prop_.return_value = "BulletTypefaceSpecific"
bullet_typeface = TextBulletTypeface(None, text_bullet_typeface_)
pitch_family = bullet_typeface.pitch_family
assert pitch_family == "Foobar"
def it_can_change_its_pitch_family(self, text_bullet_typeface_, type_prop_):
type_prop_.return_value = "BulletTypefaceSpecific"
bullet_typeface = TextBulletTypeface(None, text_bullet_typeface_)
bullet_typeface.pitch_family = "Foobar"
assert text_bullet_typeface_.pitch_family == "Foobar"
def it_knows_its_panose(self, text_bullet_typeface_, type_prop_):
text_bullet_typeface_.panose = "Foobar"
type_prop_.return_value = "BulletTypefaceSpecific"
bullet_typeface = TextBulletTypeface(None, text_bullet_typeface_)
panose = bullet_typeface.panose
assert panose == "Foobar"
def it_can_change_its_panose(self, text_bullet_typeface_, type_prop_):
type_prop_.return_value = "BulletTypefaceSpecific"
bullet_typeface = TextBulletTypeface(None, text_bullet_typeface_)
bullet_typeface.panose = "Foobar"
assert text_bullet_typeface_.panose == "Foobar"
def it_knows_its_charset(self, text_bullet_typeface_, type_prop_):
text_bullet_typeface_.charset = "Foobar"
type_prop_.return_value = "BulletTypefaceSpecific"
bullet_typeface = TextBulletTypeface(None, text_bullet_typeface_)
charset = bullet_typeface.charset
assert charset == "Foobar"
def it_can_change_its_charset(self, text_bullet_typeface_, type_prop_):
type_prop_.return_value = "BulletTypefaceSpecific"
bullet_typeface = TextBulletTypeface(None, text_bullet_typeface_)
bullet_typeface.charset = "Foobar"
assert text_bullet_typeface_.charset == "Foobar"
# fixtures -------------------------------------------------------
@pytest.fixture(
params = [
("a:pPr", "a:pPr/a:buFontTx"),
("a:pPr/a:buFont", "a:pPr/a:buFontTx")
]
)
def follow_text_fixture(self, request, follow_text_):
cxml, expected_cxml = request.param
text_bullet_typeface = TextBulletTypeface.from_parent(element(cxml))
_BulletTypefaceFollowText_ = class_mock(request, "pptx.text.bullets._BulletTypefaceFollowText", return_value=follow_text_, autospec=True)
expected_xml = xml(expected_cxml)
return text_bullet_typeface, _BulletTypefaceFollowText_, expected_xml, follow_text_
@pytest.fixture(
params = [
("a:pPr", "a:pPr/a:buFont"),
("a:pPr/a:buFontTx", "a:pPr/a:buFont")
]
)
def specific_typeface_fixture(self, request, specific_typeface_):
cxml, expected_cxml = request.param
text_bullet_typeface = TextBulletTypeface.from_parent(element(cxml))
_BulletTypefaceSpecific_ = class_mock(request, "pptx.text.bullets._BulletTypefaceSpecific", return_value=specific_typeface_, autospec=True)
expected_xml = xml(expected_cxml)
return text_bullet_typeface, _BulletTypefaceSpecific_, expected_xml, specific_typeface_
@pytest.fixture
def type_fixture(self, text_bullet_typeface_):
expected_value = text_bullet_typeface_.type = 42
text_bullet_typeface = TextBulletTypeface(None, text_bullet_typeface_)
return text_bullet_typeface, expected_value
# fixture components ---------------------------------------------
@pytest.fixture
def follow_text_(self, request):
return instance_mock(request, _BulletTypefaceFollowText)
@pytest.fixture
def specific_typeface_(self, request):
return instance_mock(request, _BulletTypefaceSpecific)
@pytest.fixture
def text_bullet_typeface_(self, request):
return instance_mock(request, TextBulletTypeface)
@pytest.fixture
def type_prop_(self, request):
return property_mock(request, TextBulletTypeface, "type")
class Describe_BulletTypeface(object):
""" Unit-test suite for `pptx.text.bullets._BulletTypeface` object. """
def it_raises_on_typeface_access(self, typeface_raise_fixture):
bullet_typeface, exception_type = typeface_raise_fixture
with pytest.raises(exception_type):
bullet_typeface.typeface
def it_raises_on_pitch_family_access(self, pitch_family_raise_fixture):
bullet_typeface, exception_type = pitch_family_raise_fixture
with pytest.raises(exception_type):
bullet_typeface.pitch_family
def it_raises_on_panose_access(self, panose_raise_fixture):
bullet_typeface, exception_type = panose_raise_fixture
with pytest.raises(exception_type):
bullet_typeface.panose
def it_raises_on_charset_access(self, charset_raise_fixture):
bullet_typeface, exception_type = charset_raise_fixture
with pytest.raises(exception_type):
bullet_typeface.charset
# fixtures -------------------------------------------------------
@pytest.fixture
def typeface_raise_fixture(self):
bullet_typeface = _BulletTypeface("foobar")
exception_type = TypeError
return bullet_typeface, exception_type
@pytest.fixture
def pitch_family_raise_fixture(self):
bullet_typeface = _BulletTypeface("foobar")
exception_type = TypeError
return bullet_typeface, exception_type
@pytest.fixture
def panose_raise_fixture(self):
bullet_typeface = _BulletTypeface("foobar")
exception_type = TypeError
return bullet_typeface, exception_type
@pytest.fixture
def charset_raise_fixture(self):
bullet_typeface = _BulletTypeface("foobar")
exception_type = TypeError
return bullet_typeface, exception_type
class DescribeBulletTypefaceFollowText(object):
""" Unit-test suite for `pptx.text.bullets._BulletTypefaceFollowText` object. """
def it_knows_its_bullet_typeface_type(self, typeface_type_fixture):
follow_text_typeface, expected_value = typeface_type_fixture
typeface_type = follow_text_typeface.type
assert typeface_type == expected_value
# fixtures -------------------------------------------------------
@pytest.fixture
def typeface_type_fixture(self):
xBulletTypeface = element("a:buFontTx")
follow_text_typeface = _BulletTypefaceFollowText(xBulletTypeface)
expected_value = "BulletTypefaceFollowText"
return follow_text_typeface, expected_value
class DescribeBulletTypefaceSpecific(object):
""" Unit-test suite for `pptx.text.bullets._BulletTypefaceSpecific` object. """
def it_knows_its_bullet_typeface_type(self, typeface_type_fixture):
specific_typeface, expected_value = typeface_type_fixture
typeface_type = specific_typeface.type
assert typeface_type == expected_value
def it_knows_its_typeface(self, get_typeface_fixture):
bullet_typeface, expected_value = get_typeface_fixture
typeface = bullet_typeface.typeface
assert typeface == expected_value
def it_can_change_its_typeface(self, set_typeface_fixture):
bullet_typeface, typeface, typefaceSpecific, expected_xml = set_typeface_fixture
bullet_typeface.typeface = typeface
assert typefaceSpecific.xml == expected_xml
def it_knows_its_panose(self, get_panose_fixture):
bullet_typeface, expected_value = get_panose_fixture
panose = bullet_typeface.panose
assert panose == expected_value
def it_can_change_its_panose(self, set_panose_fixture):
bullet_typeface, panose, typefaceSpecific, expected_xml = set_panose_fixture
bullet_typeface.panose = panose
assert typefaceSpecific.xml == expected_xml
def it_knows_its_charset(self, get_charset_fixture):
bullet_typeface, expected_value = get_charset_fixture
charset = bullet_typeface.charset
assert charset == expected_value
def it_can_change_its_charset(self, set_charset_fixture):
bullet_typeface, charset, typefaceSpecific, expected_xml = set_charset_fixture
bullet_typeface.charset = charset
assert typefaceSpecific.xml == expected_xml
def | |
x10 * x48 + x102 - x25 * x53 - x27 * x53 - x29 * x53 - x46 * x6 - x47 * x8
x104 = self.p.r1 * w_1_dot_z * x12
x105 = r_xx * self.p.r2 - r_zx * x14
x106 = w_2_dot_x * x105
x107 = r_xy * self.p.r2 - r_zy * x14
x108 = w_2_dot_y * x107
x109 = r_xz * self.p.r2 - r_zz * x14
x110 = w_2_dot_z * x109
x111 = self.p.m1 * self.p.r2 * x12
x112 = -self.p.r2 * x3 - x43
x113 = psi_x_dot * x34 * x35
x114 = psi_y_dot * (-self.p.r1 * x113 - self.p.r2 * x113 -
w_1z * x32 - x33 * x36 - x33 * x38 - x33 * x39)
x115 = psi_x_dot**2 * x0 * x54
x116 = self.p.m2 * self.p.r2 * x12
x117 = psi_x_ddot * (x112 - x50)
x118 = self.p.m3 * self.p.r2 * x12
x119 = r_yx * x67
x120 = r_yz * x69
x121 = x119 - x120
x122 = x121 * x66
x123 = r_yx * x69
x124 = r_yz * x67
x125 = self.p.l * (r_yy * x65 + x123 * x75 + x124 * x75)
x126 = self.p.l * x121 * x75
x127 = -self.p.m3 * x104 + self.p.m3 * x114 + self.p.m3 * x115 + self.p.m3 * x117 + x10 * x90 - x118 * x25 - x118 * x27 - x118 * x29 - x122 * x64 + x125 * x74 + x6 * x92 + x61 * x91 + x62 * x79 + x63 * x85 + x8 * x83 + x84 * \
(x107 - x122) + x93 * (x105 + x125 * x67 - x126 * x69) + x95 * (x109 - x125 * x69 - x126 * x67) + x96 * (-x121 * x98 + x97 * (-r_yy * x75 + x123 * x65 + x124 * x65)) + x99 * (-x100 * x121 + x101 * x125 - x89 * (-x123 - x124) + x97 * (x119 * x75 - x120 * x75))
x128 = -self.p.m2 * x104 + self.p.m2 * x106 + self.p.m2 * x108 + self.p.m2 * x110 + self.p.m2 * x114 + self.p.m2 * \
x115 + self.p.m2 * x117 - x116 * x25 - x116 * x27 - x116 * x29 + x127 + x46 * x91 + x47 * x79 + x48 * x85
x129 = psi_x_ddot * x0 * x2 * x54
x130 = psi_y_ddot * x0 * x34 * x49
x131 = psi_x_dot * (-psi_x_dot * x51 + psi_y_dot * x55)
x132 = psi_y_dot * (psi_x_dot * x55 - psi_y_dot * x51)
x133 = r_zx * x67
x134 = r_zz * x69
x135 = x133 - x134
x136 = self.p.l * x135 * x65
x137 = r_zx * x69
x138 = r_zz * x67
x139 = self.p.l * (r_zy * x65 + x137 * x75 + x138 * x75)
x140 = self.p.l * x135 * x75
x141 = self.p.g * self.p.m3 - self.p.m3 * x129 - self.p.m3 * x130 + self.p.m3 * x131 + self.p.m3 * x132 - x136 * x64 - x136 * x84 + x139 * x74 + x24 * x92 + x26 * x83 + x28 * x90 + x93 * \
(x139 * x67 - x140 * x69) + x95 * (-x139 * x69 - x140 * x67) + x96 * (-x135 * x98 + x97 * (-r_zy * x75 + x137 * x65 + x138 * x65)) + x99 * (-x100 * x135 + x101 * x139 - x89 * (-x137 - x138) + x97 * (x133 * x75 - x134 * x75))
x142 = self.p.g * self.p.m2 - self.p.m2 * x129 - self.p.m2 * \
x130 + self.p.m2 * x131 + self.p.m2 * x132 + x141
F1[0] = psi_x_dot * self.p.m1 * x45 + psi_y_ddot * self.p.m1 * x0 + psi_y_dot * self.p.m1 * x40 + self.p.m1 * x18 + self.p.m1 * \
x20 + self.p.m1 * x22 - self.p.m1 * x4 - x10 * x9 + x103 + x11 * x15 - x23 * x25 - x23 * x27 - x23 * x29 - x5 * x6 - x7 * x8
F1[1] = -self.p.m1 * x104 + self.p.m1 * x106 + self.p.m1 * x108 + self.p.m1 * x110 + self.p.m1 * \
x114 + x11 * x112 - x111 * x25 - x111 * x27 - x111 * x29 + x128 + x5 * x91 + x7 * x79 + x85 * x9
F1[2] = self.p.g * self.p.m1 + x142
F12[0] = x103
F12[1] = x128
F12[2] = x142
F23[0] = x102
F23[1] = x127
F23[2] = x141
return [F1, F12, F23]
def _x_dot(self, x, t, omega_cmd):
"""computes the derivative of the state
This function returns an numpy.array of the derivatives of the states, given the current state and inputs.
Its signature is compatible with scipy.integrate.odeint's first callable argument.
args:
x (numpy.ndarray): state at which the state derivative function is evaluated
t: time [s]. Since this system is time invariant, this argument is unused.
omega_cmd (np.ndarray): motor speed commands [rad/s]
returns:
ModelState containing the time derivatives of all states
"""
eval_state = ModelState(x, skip_checks=True)
# freeze system if state is irrecoverable
if self.is_irrecoverable(state=eval_state, ignore_force_check=True):
return np.zeros(np.shape(eval_state.x))
xdot = ModelState()
xdot.omega = self._compute_omega_dot(eval_state, omega_cmd)
omega_1 = self._get_lower_ball_omega(eval_state)
xdot.q1 = Quaternion(eval_state.q1).q_dot(omega_1, frame='inertial')
xdot.q2 = Quaternion(eval_state.q2).q_dot(eval_state.omega_2, frame='body')
xdot.phi = eval_state.phi_dot
xdot.psi = eval_state.psi_dot
xdot.pos = self._get_lower_ball_vel(omega_1)
return xdot.x
def _get_lower_ball_vel(self, omega_1):
"""computes the linear velocity (x/y) of the lower ball
args:
omega_1 (numpy.ndarray): angular velocity [rad/s] of lower ball
returns:
array with x and y velocity of the lower ball [m/s]
"""
return self.p.r1 * np.array([omega_1[1], -omega_1[0]])
def _get_lower_ball_omega(self, state):
"""computes the angular velocity (x/y/z) of the lower ball
args:
state (ModelState): current state
returns:
array containing angular velocity of lower ball [rad/s]
"""
[r_xx, r_xy, r_xz, r_yx, r_yy, r_yz, r_zx, r_zy, r_zz] = state.R_IB2.reshape(9)
[psi_x, psi_y] = state.psi
[psi_x_dot, psi_y_dot] = state.psi_dot
w_1z = state.omega_1_z
[w_2x, w_2y, w_2z] = state.omega_2
omega_1 = np.zeros(3)
x0 = 1 / self.p.r1
x1 = tan(psi_y)
x2 = self.p.r1 * x1
x3 = self.p.r2 * w_2x
x4 = self.p.r2 * w_2y
x5 = self.p.r2 * w_2z
x6 = r_zx * self.p.r2 * w_2x
x7 = r_zy * self.p.r2 * w_2y
x8 = r_zz * self.p.r2 * w_2z
x9 = 1 / cos(psi_y)
x10 = psi_x_dot * x9
x11 = tan(psi_x)
x12 = psi_x_dot * x11
x13 = x11 * x9
omega_1[0] = x0 * (-r_xx * x3 - r_xy * x4 - r_xz * x5 + self.p.r1 * x10 +
self.p.r2 * x10 + w_1z * x2 + x1 * x6 + x1 * x7 + x1 * x8)
omega_1[1] = x0 * (psi_y_dot * self.p.r1 + psi_y_dot * self.p.r2 - r_yx * x3 - r_yy * x4 - r_yz *
x5 - self.p.r1 * w_1z * x13 - self.p.r2 * x1 * x12 - x12 * x2 - x13 * x6 - x13 * x7 - x13 * x8)
omega_1[2] = w_1z
return omega_1
def _compute_omega_dot(self, state, omega_cmd):
"""computes angular acceleration matrix of rotational part of system dynamics (equal to jacobian matrix since dynamics are linear in angular accelerations)
The non-linear rotational dynamics are of the form
A * [omega_1_z_dot, psi_x_ddot, psi_y_ddot, omega_2_x_dot, omega_2_y_dot, omega_2_z_dot, phi_x_ddot, phi_y_ddot] = b
where A = A(phi_x, phi_y, phi_x_dot, phi_y_dot, psi_x, psi_y) and b(state, inputs).
args:
state (ModelState): current state
omega_cmd (np.ndarray): motor speed commands [rad/s]
Returns: array containing the time derivative of the angular velocity state [rad/s^2]
"""
[r_xx, r_xy, r_xz, r_yx, r_yy, r_yz, r_zx, r_zy, r_zz] = state.R_IB2.reshape(9)
[phi_x, phi_y] = state.phi
[phi_x_dot, phi_y_dot] = state.phi_dot
[psi_x, psi_y] = state.psi
[psi_x_dot, psi_y_dot] = state.psi_dot
w_1z = state.omega_1_z
[w_2x, w_2y, w_2z] = state.omega_2
[omega_x_cmd, omega_y_cmd] = omega_cmd
A | |
from rdflib.namespace import RDF, SKOS, DCTERMS, RDFS, OWL, DC
from rdflib import URIRef, Namespace, Literal, Graph
import markdown
from flask import url_for
import requests
from config import Config
from skos.concept_scheme import ConceptScheme, ConceptSchemeRenderer
from skos.concept import Concept, ConceptRenderer
from skos.collection import CollectionRenderer, Collection
from skos.register import Register
import helper
from datetime import date
from urllib import parse
# Controlled values
CONCEPT = 0
CONCEPTSCHEME = 1
COLLECTION = 2
METHOD = 3
SCHEMAORG = Namespace('http://schema.org/')
def list_concepts():
concepts = []
for c in Config.g.subjects(RDF.type, SKOS.Concept):
label = get_label(c)
date_created = get_created_date(c)
date_modified = get_modified_date(c)
definition = get_definition(c)
scheme = get_in_scheme(c)
concepts.append((c, label, [
(URIRef('http://purl.org/dc/terms/created'), date_created),
(URIRef('http://purl.org/dc/terms/modified'), date_modified),
(URIRef('http://www.w3.org/2004/02/skos/core#definition'), definition),
(URIRef('http://www.w3.org/2004/02/skos/core#inScheme'), scheme)
]))
return sorted(concepts, key=lambda i: i[1])
def list_concept_schemes():
concept_schemes = []
for cc in Config.g.subjects(RDF.type, SKOS.ConceptScheme):
label = get_label(cc)
date_created = get_created_date(cc)
date_modified = get_modified_date(cc)
description = get_description(cc)
concept_schemes.append((cc, label, [
(URIRef('http://purl.org/dc/terms/created'), date_created),
(URIRef('http://purl.org/dc/terms/modified'), date_modified),
description
]))
return sorted(concept_schemes, key=lambda i: i[1])
def list_concept_schemes_and_collections():
items = []
for cc in Config.g.subjects(RDF.type, SKOS.ConceptScheme):
if not is_deprecated(cc):
label = get_label(cc)
date_created = get_created_date(cc)
date_modified = get_modified_date(cc)
description = get_description(cc)
items.append((cc, label, [
(URIRef('http://purl.org/dc/terms/created'), date_created),
(URIRef('http://purl.org/dc/terms/modified'), date_modified),
description
]))
for cc in Config.g.subjects(RDF.type, SKOS.Collection):
if not is_deprecated(cc):
label = get_label(cc)
date_created = get_created_date(cc)
date_modified = get_modified_date(cc)
description = get_description(cc)
items.append((cc, label, [
(URIRef('http://purl.org/dc/terms/created'), date_created),
(URIRef('http://purl.org/dc/terms/modified'), date_modified),
description
]))
return sorted(items, key=lambda i: i[1])
def _split_camel_case_label(label):
new_label = ''
last = 0
for i, letter in enumerate(label):
if letter.isupper():
new_label += ' {}'.format(label[last:i])
last = i
new_label += ' {}'.format(label[last:])
new_label = new_label.strip()
return new_label
def get_label(uri, create=True):
# TODO: title() capitalises all words, we need a post-process function to lower case words that are of types
# such as preposition and conjunction.
for label in Config.g.objects(URIRef(uri), SKOS.prefLabel):
return label
for label in Config.g.objects(URIRef(uri), DCTERMS.title):
return label
for label in Config.g.objects(URIRef(uri), RDFS.label):
return label
# Fetch label by dereferencing URI.
if create:
headers = {'accept': 'text/turtle'}
response_g = Graph()
try:
r = requests.get(uri, headers=headers)
assert 200 <= r.status_code < 300
response_g.parse(data=r.content.decode('utf-8'), format='turtle')
for _, _, label in response_g.triples((uri, SKOS.prefLabel, None)):
return label
for _, _, label in response_g.triples((uri, RDFS.label, None)):
return label
except Exception as e:
# print(uri)
# print('Error dereferencing external URI:', str(e))
# print(r.content.decode('utf-8'))
# print('Create label from the local name of the URI instead.')
# Create label out of the local segment of the URI.
label = helper.uri_label(uri)
label = _split_camel_case_label(label)
return Literal(label)
else:
return Literal(str(uri).split('#')[-1].split('/')[-1])
def get_description(uri):
for description in Config.g.objects(URIRef(uri), DCTERMS.description):
return (DCTERMS.description, description)
for description in Config.g.objects(URIRef(uri), DC.description):
return (DC.description, description)
for description in Config.g.objects(URIRef(uri), RDFS.comment):
return (RDFS.comment, description)
def get_definition(uri):
for definition in Config.g.objects(URIRef(uri), SKOS.definition):
return definition
def get_class_types(uri):
types = []
for type in Config.g.objects(URIRef(uri), RDF.type):
# Only add URIs (and not blank nodes!)
if str(type)[:4] == 'http' \
and str(type) != 'http://www.w3.org/2004/02/skos/core#ConceptScheme' \
and str(type) != 'http://www.w3.org/2004/02/skos/core#Concept' \
and str(type) != 'http://www.w3.org/2004/02/skos/core#Collection':
types.append(type)
return types
def is_deprecated(uri):
for value in Config.g.objects(URIRef(uri), OWL.deprecated):
return bool(value)
return False
def get_narrowers(uri):
narrowers = []
for narrower in Config.g.objects(URIRef(uri), SKOS.narrower):
if not is_deprecated(narrower):
label = get_label(narrower)
narrowers.append((narrower, label))
return sorted(narrowers, key=lambda i: i[1])
def get_broaders(uri):
broaders = []
for broader in Config.g.objects(URIRef(uri), SKOS.broader):
if not is_deprecated(broader):
label = get_label(broader)
broaders.append((broader, label))
return sorted(broaders, key=lambda i: i[1])
def get_members(uri):
members = []
for member in Config.g.objects(URIRef(uri), SKOS.member):
label = get_label(member)
members.append((member, label))
return sorted(members, key=lambda i: i[1])
def get_top_concept_of(uri):
top_concept_ofs = []
for tco in Config.g.objects(URIRef(uri), SKOS.topConceptOf):
label = get_label(tco)
top_concept_ofs.append((tco, label))
return sorted(top_concept_ofs, key=lambda i: i[1])
def get_top_concepts(uri):
top_concepts = []
for tc in Config.g.objects(URIRef(uri), SKOS.hasTopConcept):
label = get_label(tc)
top_concepts.append((tc, label))
return sorted(top_concepts, key=lambda i: i[1])
def get_change_note(uri):
for cn in Config.g.objects(URIRef(uri), SKOS.changeNote):
return cn
def get_alt_labels(uri):
labels = []
for alt_label in Config.g.objects(URIRef(uri), SKOS.altLabel):
labels.append(alt_label)
return sorted(labels)
def get_created_date(uri):
for created in Config.g.objects(URIRef(uri), DCTERMS.created):
created = created.split('-')
created = date(int(created[0]), int(created[1]), int(created[2][:2]))
return created
def get_modified_date(uri):
for modified in Config.g.objects(URIRef(uri), DCTERMS.modified):
modified = modified.split('-')
modified = date(int(modified[0]), int(modified[1]), int(modified[2][:2]))
return modified
def get_uri_skos_type(uri):
uri = parse.unquote_plus(uri)
for _ in Config.g.triples((URIRef(uri), RDF.type, URIRef('https://w3id.org/tern/ontologies/tern/Method'))):
return METHOD
for _ in Config.g.triples((URIRef(uri), RDF.type, SKOS.ConceptScheme)):
return CONCEPTSCHEME
for _ in Config.g.triples((URIRef(uri), RDF.type, SKOS.Concept)):
return CONCEPT
for _ in Config.g.triples((URIRef(uri), RDF.type, SKOS.Collection)):
return COLLECTION
return None
def get_properties(uri):
ignore = [
# Common
RDF.type, SKOS.prefLabel, DCTERMS.title, RDFS.label, DCTERMS.description, SKOS.definition, SKOS.changeNote,
DCTERMS.created, DCTERMS.modified, OWL.sameAs, RDFS.comment, SKOS.altLabel, DCTERMS.bibliographicCitation,
RDFS.isDefinedBy, DC.description, DCTERMS.creator, DCTERMS.contributor, SCHEMAORG.parentOrganization,
SCHEMAORG.contactPoint, SCHEMAORG.member, SCHEMAORG.subOrganization, SCHEMAORG.familyName,
URIRef('http://schema.semantic-web.at/ppt/propagateType'), SCHEMAORG.givenName, SCHEMAORG.honorificPrefix,
SCHEMAORG.jobTitle, SCHEMAORG.memberOf, URIRef('http://schema.semantic-web.at/ppt/appliedType'), SKOS.member,
# Concept
SKOS.narrower, SKOS.broader, SKOS.topConceptOf, SKOS.inScheme, SKOS.closeMatch, SKOS.exactMatch,
# Concept Scheme
SKOS.hasTopConcept
]
properties = []
for _, property, value in Config.g.triples((URIRef(uri), None, None)):
if property in ignore:
continue
label = get_label(value, create=False) if type(value) == URIRef else None
properties.append(((property, get_label(property, create=False)), value, label))
properties.sort(key=lambda x: x[0])
return properties
def get_in_scheme(uri):
"""A concept scheme in which the concept is a part of. A concept may be a member of more than one concept scheme"""
schemes = []
for scheme in Config.g.objects(URIRef(uri), SKOS.inScheme):
label = get_label(scheme)
schemes.append((scheme, label))
return schemes
def _add_narrower(uri, hierarchy, indent):
concepts = []
for concept in Config.g.objects(URIRef(uri), SKOS.narrower):
label = get_label(concept)
concepts.append((concept, label))
for concept in Config.g.objects(URIRef(uri), SKOS.member):
label = get_label(concept)
concepts.append((concept, label))
concepts.sort(key=lambda i: i[1])
for concept in concepts:
tab = indent * '\t'
hierarchy += tab + '- [{}]({})\n'.format(concept[1], url_for('routes.ob', uri=concept[0]))
hierarchy = _add_narrower(concept[0], hierarchy, indent + 1)
return hierarchy
def get_concept_hierarchy_collection(uri):
hierarchy = ''
members = []
for concept_or_collection in Config.g.objects(URIRef(uri), SKOS.member):
if not is_deprecated(concept_or_collection):
label = get_label(concept_or_collection)
members.append((concept_or_collection, label))
members.sort(key=lambda i: i[1])
for member in members:
hierarchy += '- [{}]({})\n'.format(member[1], url_for('routes.ob', uri=member[0]))
hierarchy = _add_narrower(member[0], hierarchy, 1)
return '<div id="concept-hierarchy">' + markdown.markdown(hierarchy) + '</div>'
def get_concept_hierarchy(uri):
hierarchy = ''
top_concepts = []
for top_concept in Config.g.objects(URIRef(uri), SKOS.hasTopConcept):
if not is_deprecated(top_concept):
label = get_label(top_concept)
top_concepts.append((top_concept, label))
top_concepts.sort(key=lambda i: i[1])
for top_concept in top_concepts:
hierarchy += '- [{}]({})\n'.format(top_concept[1], url_for('routes.ob', uri=top_concept[0]))
hierarchy = _add_narrower(top_concept[0], hierarchy, 1)
return '<div id="concept-hierarchy">' + markdown.markdown(hierarchy) + '</div>'
def get_is_defined_by(uri):
for is_def in Config.g.objects(URIRef(uri), RDFS.isDefinedBy):
return is_def
def get_close_match(uri):
close_match = []
for cm in Config.g.objects(URIRef(uri), SKOS.closeMatch):
close_match.append(cm)
return close_match
def get_exact_match(uri):
exact_match = []
for em in Config.g.objects(URIRef(uri), SKOS.exactMatch):
exact_match.append(em)
return exact_match
def get_bibliographic_citation(uri):
for bg in Config.g.objects(URIRef(uri), DCTERMS.bibliographicCitation):
return bg
def get_dcterms_source(uri):
for _, _, source in Config.g.triples((URIRef(uri), DCTERMS.source, None)):
return source
def get_schema_org_parent_org(uri):
for parent_org in Config.g.objects(URIRef(uri), SCHEMAORG.parentOrganization):
label = get_label(parent_org)
return (parent_org, label)
def get_schema_org_contact_point(uri):
for cp in Config.g.objects(URIRef(uri), SCHEMAORG.contactPoint):
label = get_label(cp)
return (cp, label)
def get_schema_org_members(uri):
members = []
for m in Config.g.objects(URIRef(uri), SCHEMAORG.member):
label = get_label(m)
members.append((m, label))
return members
def get_schema_org_sub_orgs(uri):
orgs = []
for org in Config.g.objects(URIRef(uri), SCHEMAORG.subOrganization):
label = get_label(org)
orgs.append((org, label))
return orgs
def get_schema_org_family_name(uri):
for fn in Config.g.objects(URIRef(uri), SCHEMAORG.familyName):
return fn
def get_schema_org_given_name(uri):
for gn in Config.g.objects(URIRef(uri), SCHEMAORG.givenName):
return gn
def get_schema_org_honorific_prefix(uri):
for hp in Config.g.objects(URIRef(uri), SCHEMAORG.honorificPrefix):
return hp
def get_schema_org_job_title(uri):
for jt in Config.g.objects(URIRef(uri), SCHEMAORG.jobTitle):
return jt
def get_schema_org_member_of(uri):
for org in Config.g.objects(URIRef(uri), SCHEMAORG.memberOf):
label = get_label(org)
return (org, label)
def member_of(uri):
"""
The inverse of skos:member - used for better UI navigation.
"""
collections = []
for collection in Config.g.subjects(SKOS.member, URIRef(uri)):
label = get_label(collection)
collections.append((collection, label))
return collections
def get_creator(uri):
for creator in Config.g.objects(URIRef(uri), DCTERMS.creator):
return creator
def get_rdf_predicate(uri):
for predicate in Config.g.objects(URIRef(uri), RDF.predicate):
return predicate
def get_rdf_object(uri):
for o in Config.g.objects(URIRef(uri), RDF.object):
return o
def get_mapping_statement(uri):
uri = URIRef(uri)
for statement in Config.g.subjects(RDF.type, RDF.Statement):
for _, p, o in Config.g.triples((statement, None, None)):
if p == RDF.subject and o == uri:
return [
statement,
get_rdf_predicate(statement),
get_rdf_object(statement),
get_created_date(statement),
get_creator(statement),
get_description(statement)[1],
]
def get_method_purpose(uri):
uri = URIRef(uri)
for _, _, purpose in Config.g.triples((uri, URIRef('https://w3id.org/tern/ontologies/tern/purpose'), None)):
return purpose
def get_method_scope(uri):
uri = URIRef(uri)
for _, _, scope in Config.g.triples((uri, URIRef('https://w3id.org/tern/ontologies/tern/scope'), None)):
return scope
def get_method_equipment(uri):
uri = URIRef(uri)
equipments = []
for _, _, equipment in Config.g.triples((uri, URIRef('https://w3id.org/tern/ontologies/tern/equipment'), None)):
if isinstance(equipment, URIRef):
label = get_label(equipment)
equipments.append((equipment, label))
else:
return equipment
return equipments
def get_method_instructions(uri):
uri = URIRef(uri)
for _, _, instructions in Config.g.triples((uri, URIRef('https://w3id.org/tern/ontologies/tern/instructions'), None)):
return instructions
def get_parameter_relations(uri):
uri = URIRef(uri)
parameters = []
for _, _, parameter in Config.g.triples((uri, URIRef('https://w3id.org/tern/ontologies/tern/hasParameter'), None)):
label = get_label(parameter)
parameters.append((parameter, label))
return parameters
def get_categorical_variables_relations(uri):
uri = URIRef(uri)
cvs = []
for _, _, cv in Config.g.triples((uri, URIRef('https://w3id.org/tern/ontologies/tern/hasCategoricalVariableCollection'), None)):
label = get_label(cv)
cvs.append((cv, label))
return cvs
def get_method_time_required(uri):
uri = URIRef(uri)
for _, _, time_required in Config.g.triples((uri, URIRef('http://schema.org/timeRequired'), None)):
return time_required
def get_method_additional_note(uri):
| |
<filename>POVME/packages/clustering/cluster.py
#!python
# Implementation of Clustering Algorithms in POVME
# By <NAME>
# Advised by <NAME>
# Amaro Lab, UCSD
import scipy.cluster.vq, scipy.cluster.hierarchy
import argparse
import numpy
import sys
import os
import csv
import copy
import itertools
import collections
#import fnmatch
import pylab
import POVME.packages.binana.peel as peel
#import matplotlib.pyplot
class InputReader():
def __init__(self):
#self.coordinates = []
#self.frames = 0
self.overlapMatrix = []
self.prefixToTrajectory = {}
self.indexToNpyFile = {}
self.indexToFrame = {}
self.indexToPrefix = {}
# Save each frame in the trajectory as a set
''' def read_traj(self,traj_file):
trajectory = open(traj_file,'r')
frame_coordinates = []
# temp_coordinates = numoy.array([])
for line in trajectory:
if line[0] == 'E':
self.coordinates.append(frame_coordinates)
# numpy.append(self.coordinates,frame_coordinates)
frame_coordinates = []
self.frames += 1
elif line[0] == 'A':
#output_file_name = 'hierarchical_' + command_input['output_name'] + '.csv'
if line[17] != 'X':
frame_coordinates.append((float(line[29:37].strip()),float(line[38:45].strip()),float(line[46:54].strip())))
#numpy.append(frame_coordinates,(float(line[29:37].strip()),float(line[38:45].strip()),float(line[46:54].strip())))
self.coordinates = numpy.array(self.coordinates)
trajectory.close()
'''
def read_indexFile(self,indexToFrameFile):
if indexToFrameFile == None:
return
#self.indexToNpyFile = {}
#self.indexToFrame = {}
with open(indexToFrameFile) as csvfile:
fieldnames = ['index','frameFile']
reader = csv.DictReader(csvfile, fieldnames=fieldnames)
for row in reader:
self.indexToNpyFile[int(row['index'])] = row['frameFile']
if self.indexToFrame != None:
try:
frameNumber = row['frameFile'].split('frame_')[-1].replace('.npy','')
self.indexToFrame[int(row['index'])] = int(frameNumber)
framePrefix = row['frameFile'].split('/')[-1].replace('frame_%s.npy'%(frameNumber),'')
self.indexToPrefix[int(row['index'])] = framePrefix
except:
raise Exception("Unable to strip frame number or prefix from input filename %s. Disabling frame number output." %(row['frameFile']))
#self.indexToFrame = None
#self.indexToPrefix = None
#self.coordinates.append(numpy.load(row['frame']))
def read_overlap(self,overlap_file):
overlap_suffix = overlap_file[-4:]
if overlap_suffix == '.npy':
self.overlapMatrix = numpy.load(open(overlap_file))
elif overlap_suffix[-4:] == '.csv':
overlap = open(overlap_file,'r')
overlap_values = csv.reader(overlap, delimiter = ',')
for line in overlap_values:
#self.overlapMatrix.append([float(x) for x in line])
row = []
#self.frames += 1
for value in line:
row.append(float(value))
self.overlapMatrix.append(row)
self.overlapMatrix = numpy.array(self.overlapMatrix)
#self.oneMinusOverlapMatrix = 1. - numpy.array(self.overlapMatrix)
overlap.close()
else:
raise Exception('Unrecognized overlap matrix input file type:', overlap_suffix)
def parse_traj_inputs(self, argst, argsT):
if argsT != None:
for argT in argsT:
data = open(argT).read()
datasp = data.strip().split()
for line in datasp:
linesp = line.split(':')
prefix = linesp[0].strip()
trajFile = linesp[1].strip()
self.prefixToTrajectory[prefix] = trajFile
for argt in argst:
argtsp = argt.split(':')
prefix = argtsp[0].strip()
trajFile = argtsp[1].strip()
self.prefixToTrajectory[prefix] = trajFile
## Check to ensure these files exist
for prefix in self.prefixToTrajectory:
trajFileName = self.prefixToTrajectory[prefix]
if not(os.path.exists(trajFileName)):
raise Exception('ERROR - trajectory file %s doesn\'t exist!' %(trajFileName))
class Cluster():
#def __init__(self,coordinates,frames,overlap_values,frameToFileName):
def __init__(self,input_reader):
#self.coordinates = coordinates
#self.frames = frames
self.overlap_values = input_reader.overlapMatrix
self.one_minus_overlap_values = 1. - self.overlap_values
self.frames = len(self.overlap_values)
self.indexToFrame = input_reader.indexToFrame
self.indexToPrefix = input_reader.indexToPrefix
self.indexToNpyFile = input_reader.indexToNpyFile
self.prefixToTrajectory = input_reader.prefixToTrajectory
self.avgLinkage = None
self.whited_overlap_values = None
self.do_sanity_checks()
def do_sanity_checks(self):
self.check_commandline_inputs()
self.ensure_file_prefixes_map_to_trajectories()
#self.ensure_trajectories_exist() #Check performed during -T argument parsing instead
def check_commandline_inputs(self):
if (self.indexToNpyFile == {}) and (self.prefixToTrajectory != {}):
raise Exception("ERROR! Given pdb trajectory (-t/T) but not given index file (-i). Output will return matrix indices instead of frame numbers or cluster representative structures.")
elif self.indexToNpyFile == {}:
print("Not given index file (-i). Clustering will return matrix indices, but not trajectory frame numbers or members/representatives.")
elif (self.indexToNpyFile != {}) and(self.prefixToTrajectory == {}):
print("Given index file (-i) but not prefix-to-trajectory mapping (-t or -T). Clustering will return prefix and frame numbers of cluster members, but will not extract representatives.")
elif (self.indexToNpyFile != {}) and (self.prefixToTrajectory != {}):
print("Given index file (-i) and prefix-to-trajectory mapping (-t or -T). Clustering will return prefix and frame numbers of cluster members, and will extract representatives.")
def ensure_file_prefixes_map_to_trajectories(self):
if (self.prefixToTrajectory == {}) or (self.indexToNpyFile == {}):
print("No -i and/or -t/T arguments given - Skipping file-prefix-to-trajectory mapping completeness test")
return
else:
allPrefixesSet = set(self.indexToPrefix.values())
for prefix in allPrefixesSet:
if not(prefix in list(self.prefixToTrajectory.keys())):
raise Exception('File prefix %s not found in -t arguments (which are %r)' %(prefix, list(self.prefixToTrajectory.keys())))
return
def ensure_trajectories_exist(self):
if self.prefixToTrajectory == {}:
print("No -t/T arguments given. Skipping trajectory-file-existence check")
else:
for trajectoryFile in list(self.prefixToTrajectory.values()):
if not(os.path.exists(trajectoryFile)):
raise Exception("Trajectory file %s not found" %(trajectoryFile))
def kmeans_cluster(self,number_clusters):
if self.whited_overlap_values == None:
self.whited_overlap_values = scipy.cluster.vq.whiten(self.overlap_values)
frames,result = scipy.cluster.vq.kmeans(self.whited_overlap_values, number_clusters)
#frames,result = scipy.cluster.vq.kmeans2(self.whited_overlap_values, number_clusters)
code, dist = scipy.cluster.vq.vq(self.whited_overlap_values,frames)
print("The clusters are {0}".format(code))
print(code.shape)
list_of_clusters = self.separate_clusters(code)
#return code
return list_of_clusters
def hierarchical_cluster(self, number_clusters):
if self.avgLinkage == None:
try:
overlapHash = str(numpy.sum(self.one_minus_overlap_values.flatten()[::self.one_minus_overlap_values.size/100]))[-7:]
except:
overlapHash = str(numpy.sum(self.one_minus_overlap_values.flatten()))[-7:]
linkageFile = 'avg_linkage_hash_%s.npy' %(overlapHash)
if os.path.exists(linkageFile):
self.avgLinkage = numpy.load(linkageFile)
else:
self.avgLinkage = scipy.cluster.hierarchy.average(self.one_minus_overlap_values)
numpy.save(linkageFile, self.avgLinkage)
result = scipy.cluster.hierarchy.fcluster(self.avgLinkage,
number_clusters,
criterion='maxclust')
#result = scipy.cluster.hierarchy.linkage(self.overlap_values)
# Hierarchical clustering seems to have a bad habit of returning clusters nubered starting from 1 instead of 0. This is undesired behavior.
result = result - 1
clusters = self.separate_clusters(result)
return clusters
# scipy.cluster.hierarchy.dendrogram(result)
''' separate_cluster_traj will seperate the original trajectory into the
number of clusters specified. Each new cluster traj will only contain the
frames that belong in that cluster.
cluster_result = list that is number of frames long and contain the cluster
each frame is grouped with.
number_clusters = the number of clusters specified.
file_name = the file name that was passed into main() as a command line arg.
traj_file = the original trajectory file containing all frames
'''
def separate_cluster_traj(self,cluster_result,number_clusters,file_name,traj_file,output_file):
list_writes = [None]*number_clusters
'''Opening n number of clusters (n = number of clusters previously indicated)'''
for i in range(number_clusters):
list_writes[i] = open('cluster_'+ str(i)+'.pdb','wb')
initial_pdb = open(traj_file,'rb')
current_frame = 0
current_cluster = cluster_result[current_frame]
for line in initial_pdb:
if line[0] == 'E':
list_writes[current_cluster].write(line)
if current_frame < len(cluster_result)-1:
current_frame += 1
current_cluster = cluster_result[current_frame]
else:
list_writes[current_cluster].write(line)
initial_pdb.close()
for i in range(number_clusters):
list_writes[i].close()
''' Separates the frames into the set clusters
'''
def separate_clusters(self,cluster_results):
# print "cluster results: {0}".format(cluster_results)
total_num_clusters = len(set(cluster_results))
list_clusters = [list([]) for i in range(total_num_clusters)]
for i in range(len(cluster_results)):
# print "Cluster_res for {0} is {1}".format(i, cluster_results[i])
list_clusters[cluster_results[i]].append(i)
list_clusters.sort(key=len, reverse=True)
return list_clusters
''' csv file containing differences in binding site is first argument already read and stored into memory by previous command '''
#def find_centroids(self,binding_volume_matrix,cluster_results,number_clusters,number_frames, indexToFrame):
def find_centroids(self, list_of_clusters, outputPrefix):
#number_clusters = len(set(cluster_results))
#list_of_clusters = self.separate_clusters(cluster_results)
#print list_of_clusters
''' set to some arbitrary large number? '''
#shortest_average_distance = [1.e20] * number_clusters
#centroid_list = [[0] for i in xrange(number_clusters)]
centroid_list = []
for cluster in list_of_clusters:
sum_distances = []
if len(cluster) == 1:
sum_distances.append(0)
else:
cluster = numpy.array(cluster)
for entry in cluster:
allButEntry = cluster[cluster != entry]
#print cluster, entry, allButEntry
#print self.one_minus_overlap_values[entry,:]
totalDist = numpy.sum(self.one_minus_overlap_values[entry,allButEntry])
sum_distances.append(totalDist)
#print cluster, sum_distances, numpy.argsort(sum_distances)[0]
centroid_cluster_index = numpy.argsort(sum_distances)[0]
centroid_global_index = cluster[centroid_cluster_index]
centroid_list.append(centroid_global_index)
if (self.indexToFrame == {}) and (self.indexToNpyFile != {}):
repsFileName = '%scluster_reps.csv' %(outputPrefix)
membersFileName = '%scluster_members.csv' %(outputPrefix)
print("Unable to extract frame numbers from file names. Writing out npy file names to %s and %s" %(repsFileName, membersFileName))
with open(repsFileName,'wb') as of:
cluster_rep_file_names = [str(self.indexToNpyFile[i]) for i in centroid_list]
of.write('\n'.join(cluster_rep_file_names))
with open(membersFileName,'wb') as of:
for cluster in list_of_clusters:
cluster_member_file_names =[str(self.indexToNpyFile[i]) for i in cluster]
of.write(' '.join(cluster_member_file_names))
of.write('\n')
elif (self.indexToFrame == {}) and (self.indexToNpyFile == {}):
print("No matrix-index-to-trajectory-frame mapping given. Writing out matrix indices")
with open('%scluster_reps.csv' %(outputPrefix),'wb') as of:
of.write('\n'.join([str(i) for i in centroid_list]))
with open('%scluster_members.csv' %(outputPrefix),'wb') as of:
for cluster in list_of_clusters:
of.write(' '.join([str(i) for i in cluster]))
of.write('\n')
elif (self.indexToFrame != {}):
repsFileName = '%scluster_reps.csv' %(outputPrefix)
membersFileName = '%scluster_members.csv' %(outputPrefix)
print("Matrix-index-to-trajectory-frame mapping given. Writing out trajectory frames to %s and %s." %(repsFileName, membersFileName))
with open(repsFileName,'wb') as of:
cluster_rep_frame_nums = [str(self.indexToFrame[i]) for i in centroid_list]
cluster_rep_prefixes = [str(self.indexToPrefix[i]) for i in centroid_list]
cluster_rep_strings = ['_'.join(i) for i in zip(cluster_rep_prefixes, cluster_rep_frame_nums)]
of.write('\n'.join(cluster_rep_strings))
with open(membersFileName,'wb') as of:
for cluster in list_of_clusters:
cluster_member_frame_nums =[str(self.indexToFrame[i]) for i in cluster]
cluster_member_prefixes = [str(self.indexToPrefix[i]) for i in cluster]
cluster_member_strings = ['_'.join(i) for i in zip(cluster_member_prefixes, cluster_member_frame_nums)]
of.write(' '.join(cluster_member_strings))
of.write('\n')
if (self.indexToFrame != {}) and (self.prefixToTrajectory != {}):
print("Extracting trajectory frames")
matrixIndex2Cluster = {}
for index, centroid in enumerate(centroid_list):
matrixIndex2Cluster[centroid] = index
clusterInd2CentFileName = self.extractFrames(matrixIndex2Cluster, outputPrefix, reps=True)
else:
clusterInd2CentFileName = {}
return clusterInd2CentFileName
def outputAllFrames(self, list_of_clusters, outputPrefix):
## check to make sure we'll be able to map all matrix indices to files
for clusterInd, cluster in enumerate(list_of_clusters):
#print cluster
#print indexToFrame.keys()
for matrixInd in cluster:
if not(matrixInd in list(self.indexToFrame.keys())):
raise Exception('User requested all frame pdbs to be output to cluster directories, but the program is unable to map all overlap matrix indices to trajectory/frame combinations. Make sure that -t/-T and | |
#!/usr/bin/env python3
"""pdoc's CLI interface and helper functions."""
import argparse
import ast
import importlib
import inspect
import os
import os.path as path
import json
import re
import sys
import warnings
from contextlib import contextmanager
from functools import lru_cache
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Dict, List, Sequence
from warnings import warn
import pdoc
parser = argparse.ArgumentParser(
description="Automatically generate API docs for Python modules.",
epilog="Further documentation is available at <https://pdoc3.github.io/pdoc/doc>.",
)
aa = parser.add_argument
mode_aa = parser.add_mutually_exclusive_group().add_argument
aa(
'--version', action='version', version='%(prog)s ' + pdoc.__version__)
aa(
"modules",
type=str,
metavar='MODULE',
nargs="+",
help="The Python module name. This may be an import path resolvable in "
"the current environment, or a file path to a Python module or "
"package.",
)
aa(
"-c", "--config",
type=str,
metavar='OPTION=VALUE',
action='append',
default=[],
help="Override template options. This is an alternative to using "
"a custom config.mako file in --template-dir. This option "
"can be specified multiple times.",
)
aa(
"--filter",
type=str,
metavar='STRING',
default=None,
help="Comma-separated list of filters. When specified, "
"only identifiers containing the specified string "
"will be shown in the output. Search is case sensitive. "
"Has no effect when --http is set.",
)
aa(
"-f", "--force",
action="store_true",
help="Overwrite any existing generated (--output-dir) files.",
)
mode_aa(
"--html",
action="store_true",
help="When set, the output will be HTML formatted.",
)
mode_aa(
"--pdf",
action="store_true",
help="When set, the specified modules will be printed to standard output, "
"formatted in Markdown-Extra, compatible with most "
"Markdown-(to-HTML-)to-PDF converters.",
)
aa(
"--html-dir",
type=str,
help=argparse.SUPPRESS,
)
aa(
"-o", "--output-dir",
type=str,
metavar='DIR',
help="The directory to output generated HTML/markdown files to "
"(default: ./html for --html).",
)
aa(
"--html-no-source",
action="store_true",
help=argparse.SUPPRESS,
)
aa(
"--overwrite",
action="store_true",
help=argparse.SUPPRESS,
)
aa(
"--external-links",
action="store_true",
help=argparse.SUPPRESS,
)
aa(
"--template-dir",
type=str,
metavar='DIR',
default=None,
help="Specify a directory containing Mako templates "
"(html.mako, text.mako, config.mako and/or any templates they include). "
"Alternatively, put your templates in $XDG_CONFIG_HOME/pdoc and "
"pdoc will automatically find them.",
)
aa(
"--link-prefix",
type=str,
help=argparse.SUPPRESS,
)
aa(
"--close-stdin",
action="store_true",
help="When set, stdin will be closed before importing, to account for "
"ill-behaved modules that block on stdin."
)
DEFAULT_HOST, DEFAULT_PORT = 'localhost', 8080
def _check_host_port(s):
if s and ':' not in s:
raise argparse.ArgumentTypeError(
"'{}' doesn't match '[HOST]:[PORT]'. "
"Specify `--http :` to use default hostname and port.".format(s))
return s
aa(
"--http",
default='',
type=_check_host_port,
metavar='HOST:PORT',
help="When set, pdoc will run as an HTTP server providing documentation "
"for specified modules. If you just want to use the default hostname "
"and port ({}:{}), set the parameter to :.".format(DEFAULT_HOST, DEFAULT_PORT),
)
aa(
"--skip-errors",
action="store_true",
help="Upon unimportable modules, warn instead of raising."
)
args = argparse.Namespace()
class _WebDoc(BaseHTTPRequestHandler):
args = None # Set before server instantiated
template_config = None
def do_HEAD(self):
status = 200
if self.path != "/":
status = self.check_modified()
self.send_response(status)
self.send_header("Content-type", "text/html; charset=utf-8")
self.end_headers()
def check_modified(self):
try:
module = pdoc.import_module(self.import_path_from_req_url)
new_etag = str(os.stat(module.__file__).st_mtime)
except ImportError:
return 404
old_etag = self.headers.get('If-None-Match', new_etag)
if old_etag == new_etag:
# Don't log repeating checks
self.log_request = lambda *args, **kwargs: None
return 304
return 205
def do_GET(self):
# Deny favicon shortcut early.
if self.path == "/favicon.ico":
return None
importlib.invalidate_caches()
code = 200
if self.path == "/":
modules = [pdoc.import_module(module, reload=True)
for module in self.args.modules]
modules = sorted((module.__name__, inspect.getdoc(module))
for module in modules)
out = pdoc._render_template('/html.mako',
modules=modules,
**self.template_config)
elif self.path.endswith(".ext"):
# External links are a bit weird. You should view them as a giant
# hack. Basically, the idea is to "guess" where something lives
# when documenting another module and hope that guess can actually
# track something down in a more global context.
#
# The idea here is to start specific by looking for HTML that
# exists that matches the full external path given. Then trim off
# one component at the end and try again.
#
# If no HTML is found, then we ask `pdoc` to do its thang on the
# parent module in the external path. If all goes well, that
# module will then be able to find the external identifier.
import_path = self.path[:-4].lstrip("/")
resolved = self.resolve_ext(import_path)
if resolved is None: # Try to generate the HTML...
print("Generating HTML for %s on the fly..." % import_path, file=sys.stderr)
try:
out = pdoc.html(import_path.split(".")[0], **self.template_config)
except Exception as e:
print('Error generating docs: {}'.format(e), file=sys.stderr)
# All hope is lost.
code = 404
out = "External identifier <code>%s</code> not found." % import_path
else:
return self.redirect(resolved)
# Redirect '/pdoc' to '/pdoc/' so that relative links work
# (results in '/pdoc/cli.html' instead of 'cli.html')
elif not self.path.endswith(('/', '.html')):
return self.redirect(self.path + '/')
# Redirect '/pdoc/index.html' to '/pdoc/' so it's more pretty
elif self.path.endswith(pdoc._URL_PACKAGE_SUFFIX):
return self.redirect(self.path[:-len(pdoc._URL_PACKAGE_SUFFIX)] + '/')
else:
try:
out = self.html()
except Exception:
import traceback
from html import escape
code = 404
out = "Error importing module <code>{}</code>:\n\n<pre>{}</pre>".format(
self.import_path_from_req_url, escape(traceback.format_exc()))
out = out.replace('\n', '<br>')
self.send_response(code)
self.send_header("Content-type", "text/html; charset=utf-8")
self.end_headers()
self.echo(out)
def redirect(self, location):
self.send_response(302)
self.send_header("Location", location)
self.end_headers()
def echo(self, s):
self.wfile.write(s.encode("utf-8"))
def html(self):
"""
Retrieves and sends the HTML belonging to the path given in
URL. This method is smart and will look for HTML files already
generated and account for whether they are stale compared to
the source code.
"""
return pdoc.html(self.import_path_from_req_url,
reload=True, http_server=True, external_links=True,
skip_errors=args.skip_errors,
**self.template_config)
def resolve_ext(self, import_path):
def exists(p):
p = path.join(args.output_dir, p)
pkg = path.join(p, pdoc._URL_PACKAGE_SUFFIX.lstrip('/'))
mod = p + pdoc._URL_MODULE_SUFFIX
if path.isfile(pkg):
return pkg[len(args.output_dir):]
elif path.isfile(mod):
return mod[len(args.output_dir):]
return None
parts = import_path.split(".")
for i in range(len(parts), 0, -1):
p = path.join(*parts[0:i])
realp = exists(p)
if realp is not None:
return "/%s#%s" % (realp.lstrip("/"), import_path)
return None
@property
def import_path_from_req_url(self):
pth = self.path.split('#')[0].lstrip('/')
for suffix in ('/',
pdoc._URL_PACKAGE_SUFFIX,
pdoc._URL_INDEX_MODULE_SUFFIX,
pdoc._URL_MODULE_SUFFIX):
if pth.endswith(suffix):
pth = pth[:-len(suffix)]
break
return pth.replace('/', '.')
def module_path(m: pdoc.Module, ext: str):
return path.join(args.output_dir, *re.sub(r'\.html$', ext, m.url()).split('/'))
def _quit_if_exists(m: pdoc.Module, ext: str):
if args.force:
return
paths = [module_path(m, ext)]
if m.is_package: # If package, make sure the dir doesn't exist either
paths.append(path.dirname(paths[0]))
for pth in paths:
if path.lexists(pth):
print("File '%s' already exists. Delete it, or run with --force" % pth,
file=sys.stderr)
sys.exit(1)
@contextmanager
def _open_write_file(filename):
try:
with open(filename, 'w', encoding='utf-8') as f:
yield f
print(filename) # print created file path to stdout
except Exception:
try:
os.unlink(filename)
except Exception:
pass
raise
def recursive_write_files(m: pdoc.Module, ext: str, **kwargs):
assert ext in ('.html', '.md')
filepath = module_path(m, ext=ext)
dirpath = path.dirname(filepath)
if not os.access(dirpath, os.R_OK):
os.makedirs(dirpath)
with _open_write_file(filepath) as f:
if ext == '.html':
f.write(m.html(**kwargs))
elif ext == '.md':
f.write(m.text(**kwargs))
for submodule in m.submodules():
recursive_write_files(submodule, ext=ext, **kwargs)
def _flatten_submodules(modules: Sequence[pdoc.Module]):
for module in modules:
yield module
for submodule in module.submodules():
yield from _flatten_submodules((submodule,))
def _print_pdf(modules, **kwargs):
modules = list(_flatten_submodules(modules))
print(pdoc._render_template('/pdf.mako', modules=modules, **kwargs))
def _warn_deprecated(option, alternative='', use_config_mako=False):
msg = 'Program option `{}` is deprecated.'.format(option)
if alternative:
msg += ' Use `' + alternative + '`'
if use_config_mako:
msg += ' or override config.mako template'
msg += '.'
warn(msg, DeprecationWarning, stacklevel=2)
def _generate_lunr_search(modules: List[pdoc.Module],
index_docstrings: bool,
template_config: dict):
"""Generate index.js for search"""
def trim_docstring(docstring):
return re.sub(r'''
\s+| # whitespace sequences
\s+[-=~]{3,}\s+| # title underlines
^[ \t]*[`~]{3,}\w*$| # code blocks
\s*[`#*]+\s*| # common markdown chars
\s*([^\w\d_>])\1\s*| # sequences of punct of the same kind
\s*</?\w*[^>]*>\s* # simple HTML tags
''', ' ', docstring, flags=re.VERBOSE | re.MULTILINE)
def recursive_add_to_index(dobj):
info = {
'ref': dobj.refname,
'url': to_url_id(dobj.module),
}
if index_docstrings:
info['doc'] = trim_docstring(dobj.docstring)
if isinstance(dobj, pdoc.Function):
info['func'] = 1
index.append(info)
for member_dobj in getattr(dobj, 'doc', {}).values():
recursive_add_to_index(member_dobj)
@lru_cache()
def to_url_id(module):
url = module.url()
if url not in url_cache:
url_cache[url] = len(url_cache)
return url_cache[url]
index = [] # type: List[Dict]
url_cache = {} # type: Dict[str, int]
for top_module in modules:
recursive_add_to_index(top_module)
urls = sorted(url_cache.keys(), key=url_cache.__getitem__)
main_path = args.output_dir
with _open_write_file(path.join(main_path, 'index.js')) as f:
f.write("URLS=")
json.dump(urls, f, indent=0, separators=(',', ':'))
f.write(";\nINDEX=")
json.dump(index, f, indent=0, separators=(',', ':'))
# Generate search.html
with _open_write_file(path.join(main_path, 'doc-search.html')) as f:
rendered_template = pdoc._render_template('/search.mako', **template_config)
f.write(rendered_template)
def main(_args=None):
""" Command-line entry point """
global args
args = _args or parser.parse_args()
warnings.simplefilter("once", DeprecationWarning)
if args.close_stdin:
sys.stdin.close()
if (args.html or args.http) and not args.output_dir:
args.output_dir = 'html'
if args.html_dir:
_warn_deprecated('--html-dir', '--output-dir')
args.output_dir = args.html_dir
if args.overwrite:
_warn_deprecated('--overwrite', '--force')
args.force = args.overwrite
template_config = {}
for config_str in args.config:
try:
key, value = config_str.split('=', 1)
value = ast.literal_eval(value)
template_config[key] = value
except Exception:
raise ValueError(
'Error evaluating --config statement "{}". '
'Make sure string values are quoted?'
.format(config_str)
)
if args.html_no_source:
_warn_deprecated('--html-no-source', | |
<gh_stars>1-10
# Created on 11/9/21 at 11:02 AM
# Author: <NAME>
import os
import scipy.stats
# import scipy.io.wavfile
# import soundfile as sf
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.utils
import torchvision
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
# python speaker_id.py --cfg=cfg/SincNet_TIMIT.cfg
from torch.utils.data import Dataset
from torchvision import datasets, transforms
import sys
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import interactive
# interactive(True)
from dnn_models_pdm import *
from data_io import read_conf_inp
# from pymatreader import read_mat
import numpy as np
import os
# from hdf5storage import savemat
from hdf5storage import loadmat
from sklearn.model_selection import train_test_split
from pytorchtools import EarlyStopping, RMSLELoss
from torch.utils.tensorboard import SummaryWriter
from sklearn.linear_model import LinearRegression
import readchans
import random
from sklearn.metrics import r2_score
import pickle
import matplotlib
from matplotlib.gridspec import GridSpec
from scipy.io import savemat
seednum = 2021
############################ define model parameters ######################
timestart = 625
timeend = 625+500
trialdur = timeend * 2 - timestart * 2
correctModel = False
notrainMode = False
# sr = 1000
# timeend = 800 # when 300ms after stim
# Hyper-parameters
num_epochs = 300
batch_size = 64
learning_rate = 0.001
num_chan = 98
dropout_rate = 0.5
compute_likelihood = True
cross_val_dir = 'crossval_metric_30_625_1625/'
############################# define random seeds ###########################
torch.manual_seed(seednum)
np.random.seed(seednum)
random.seed(seednum)
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
g = torch.Generator()
g.manual_seed(seednum)
######################## tensorbaord initilization ###########################
tb = SummaryWriter('runs/regression_new')
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
######################## creating directory and file nmae ############for s########
# postname = '_prestim500_1000_0123_ddm_2param'
postname = '_1000_ddm_2param_correct'
# postname = '_1000_0123_ddm_2param_final'
modelpath = 'trained_model' + postname
resultpath = 'results' + postname
figurepath = 'figures' + postname
isExist = os.path.exists(modelpath)
isExist = os.path.exists(modelpath)
if not isExist:
os.makedirs(modelpath)
print(modelpath + ' created')
isExist = os.path.exists(figurepath)
if not isExist:
os.makedirs(figurepath)
print(figurepath + ' created')
isExist = os.path.exists(resultpath)
if not isExist:
os.makedirs(resultpath)
print(resultpath + ' created')
####################### some functions for getting the EEG data ##############
def remove_ticks(fig):
for i, ax in enumerate(fig, axes):
ax.tick_params(labelbottom=False, labelleft=False)
def viz_histograms(model, epoch):
for name, weight in model.named_parameters():
try:
tb.add_histogram(name, weight, epoch)
tb.add_histogram(f'{name}.grad', weight.grad, epoch)
except NotImplementedError:
continue
def getIDs():
path = '/home/jenny/pdmattention/'
subj = loadmat('behavior2_task3')['uniquepart'][0]
allDataFiles = os.listdir(path + 'task3/final_interp')
sublist = []
for sub in subj:
# newsub= [x[0:10] for ind, x in enumerate(allDataFiles) if int(x[1:4]) == sub]
newsub = [x[0:10] for ind, x in enumerate(allDataFiles) if int(x[1:4]) == sub]
sublist += newsub
finalsub = []
for i in sublist:
finalsub = [x[0:10] for ind, x in enumerate(allDataFiles) if
x[1:4] != '236' and x[1:4] != '193']
finalsub.sort()
return sublist, finalsub
def getddmparams(subj):
path = '/home/jenny/pdmattention/alphaDC/estimates/'
paramdic = loadmat(path + 'behavior2_task3_HDDM_AlphaJan_20_21_14_04_estimates.mat')
uniquepart= loadmat('behavior2_task3')['uniquepart'][0]
ind = np.where(uniquepart == int(subj[1:4]))[0]
print('ind:',ind)
if len(ind) == 0:
print('!!!Warning: No DDM parameters extracted')
sys.exit()
else:
print('subject DDM Parameters Deteted')
alpha = paramdic['alpha'][0,0][2][ind,:] # take the median
ndt = paramdic['ndt'][0,0][2][ind,:]
delta = paramdic['delta'][0,0][2][ind,:]
return (np.mean(alpha), np.mean(ndt), np.mean(delta))
def chansets_new():
chans = np.arange(0, 128)
chans_del = np.array(
[56, 63, 68, 73, 81, 88, 94, 100, 108, 114, 49, 43, 48, 38, 32, 44, 128, 127, 119, 125, 120, 121, 126,
113, 117, 1, 8, 14, 21, 25]) - 1
chans = np.delete(chans, chans_del)
return chans
def plot_grad_flow(named_parameters):
'''Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.'''
gradss = []
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if (p.requires_grad) and ("bias" not in n):
layers.append(n)
gradss.append(p.grad.detach())
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
# plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
# plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
# plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k")
# plt.xticks(range(0, len(ave_grads), 1), layers, rotation="75")
# plt.xlim(left=0, right=len(ave_grads))
# plt.ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions
# plt.xlabel("Layers")
# plt.ylabel("average gradient")
# plt.title("Gradient flow")
# plt.grid(True)
# plt.legend(['max-gradient', 'mean-gradient', 'zero-gradient'])
# print('grads ', grads[1][0].detach())
# grads_all.append(grads[1][0].detach())
return max_grads, ave_grads, gradss, layers
def loadsubjdict(subID):
path = '/home/jenny/pdmattention/task3/final_interp/'
datadict = loadmat(path + subID + 'final_interp.mat')
return datadict
def loadinfo(subID):
path = '/home/jenny/pdmattention/task3/expinfo/'
infodict = loadmat(path + subID + 'task3_expinfo.mat')
spfs = np.squeeze(infodict['spfs'])
correctchoice = np.zeros(infodict['rt'].shape[1])
easycond = np.squeeze((infodict['condition'] == 1) | (infodict['condition'] == 4))
medcond = np.squeeze((infodict['condition'] == 2) | (infodict['condition'] == 5))
hardcond = np.squeeze((infodict['condition'] == 3) | (infodict['condition'] == 6))
correctchoice[((easycond == True) & (spfs > 2.5))] = 1
correctchoice[((medcond == True) & (spfs > 2.5))] = 1
correctchoice[((hardcond == True) & (spfs > 2.5))] = 1
# 1 would be high freq 0 would be low, 1 would be right hand, 0 would be left hand
datadict = loadsubjdict(subID)
correctchoice = np.squeeze(correctchoice[datadict['trials']])
acc = np.squeeze(datadict['correct'])
responsemat = np.zeros(acc.shape)
responsemat[(acc == 1) & (correctchoice == 1)] = 1
responsemat[(acc == 0) & (correctchoice == 1)] = 0
responsemat[(acc == 1) & (correctchoice == 0)] = 0
responsemat[(acc == 0) & (correctchoice == 0)] = 1
return responsemat
return datadict
def goodchans():
datadict = loadsubjdict('s182_ses1_')
goodchan = datadict['goodchans'][0]
return goodchan
def getdata(datadict, Tstart=250, Tend=1250):
data = np.array(datadict['data'])
data = data[::2, :, :]
sr = np.array(datadict['sr']) / 2
condition = np.array(datadict['condition'])[0]
goodtrials = np.array(datadict['trials'])[0]
correct = np.array(datadict['correct'])[0]
goodchan = goodchans()
data = data[:, :, goodtrials]
data = data[:, :, correct == 1]
condition = condition[correct == 1]
data = data[:, goodchan, :]
return data[Tstart:Tend, :, :], condition
def getrtdata(datadict, Tstart=250, Tend=1250):
data = np.array(datadict['data'])
data = data[::2, :, :]
sr = np.array(datadict['sr']) / 2
condition = np.array(datadict['condition'])[0]
goodtrials = np.array(datadict['trials'])[0]
correct = np.array(datadict['correct'])[0]
rt = np.array(datadict['rt'])[0]
rt_label = np.hstack((np.zeros(len(rt) // 3), np.ones(len(rt) // 3)))
slowest = np.ones(len(rt) - len(rt_label)) + 1
rt_label = np.hstack((rt_label, slowest))
rt_label += 1
# goodchan = goodchans()
# goodchan = chanmotor()
goodchan = chansets_new()
data = data[:, :, goodtrials]
# data = data[:, :, correct==1]
# condition = condition[correct==1]
data = data[:, goodchan, :]
return data[Tstart:Tend, :, :], condition, rt_label, rt, correct
def reshapedata(data):
timestep, nchan, ntrial = data.shape
newdata = np.zeros((ntrial, nchan, timestep))
for i in range(0, ntrial):
newdata[i, :, :] = data[:, :, i].T
return newdata
#
#
# def my_loss(t, v, t0, a):
# # t is target RT
# # v is output
# # t0 is non decision time
#
#
# w = torch.tensor(0.5).cuda() # convert to relative start point
# kk = torch.arange(-4,6) # we set K to be 10
# try:
# k = torch.tile(kk,(t.shape[0],1)).cuda()
# except IndexError:
# k = kk.cuda()
#
# err = torch.tensor(0.01).cuda()
# tt = torch.max(torch.tensor(t.cuda() - torch.tensor(t0).cuda()),err) / torch.max(err,a.cuda()) ** 2 # normalized time
# tt_vec = torch.tile(tt, (1, 10))
# pp = torch.cumsum((w+2*k)*torch.exp(-(((w+2*k)**2)/2)/tt_vec),axis=1)
# pp = pp[:,-1]/torch.sqrt(2*torch.tensor(np.pi)*torch.squeeze(tt)**3)
# pp = pp[:, None]
#
# p = torch.log(pp * torch.exp(-v*torch.max(err, a)*w - (v**2)*torch.tensor(t).cuda()/2) /(torch.max(err,a)**2))
# return -(p.sum())
def my_loss(t, v, t0, a):
# t is target RT
# v is output
# t0 is non decision time
w = torch.tensor(0.5).cuda() # convert to relative start point
kk = torch.arange(-4,6) # we set K to be 10
try:
k = torch.tile(kk,(t.shape[0],1)).cuda()
except IndexError:
k = kk.cuda()
err = torch.tensor(0.02).cuda()
tt = torch.max(torch.tensor(torch.abs(t.cuda()) - torch.tensor(t0).cuda()),err) / torch.max(err,a.cuda()) ** 2 # normalized time
tt_vec = torch.tile(tt, (1, 10))
pp = torch.cumsum((w+2*k)*torch.exp(-(((w+2*k)**2)/2)/tt_vec),axis=1)
pp = pp[:,-1]/torch.sqrt(2*torch.tensor(np.pi)*torch.squeeze(tt)**3)
pp = pp[:, None]
# v = torch.where(torch.tensor(t).cuda()>0, v, -v) # if time is negative, flip the sign of v
v = torch.clamp(v, -6,6)
t = torch.where(torch.tensor(t).cuda() > 0, torch.tensor(t).cuda(), torch.tensor(-t).cuda())
p = pp * (torch.exp(-v*torch.max(err, a)*w - (v**2)*torch.tensor(t).cuda()/2) /(torch.max(err,a)**2))
# p = torch.where(torch.tensor(v).cuda()>0, 1*p, 6*p)
p = torch.log(p)
# p = torch.where(torch.tensor(v).cuda()>0, p, -p)
# print(t,a,v)
# print('probability is ', p)
return -(p.sum())
# def my_loss(t, v, t0, a,z,err=1e-29):
# # t is target RT
# # v is output
# # t0 is non decision time
#
# tt = torch.tensor(t.cuda()-torch.tensor(t0).cuda())/(torch.tensor(a).cuda()**2) # normalized time
# tt[tt<0] = 0.01
# w = torch.tensor(z).cuda()/torch.tensor(a).cuda() # convert to relative start point
# ks = 2 + torch.sqrt(-2 * tt * torch.log(2 * torch.sqrt(2 * torch.tensor(np.pi) * tt) * err)) #bound
# ks = torch.max(ks,torch.square(tt)+1) # ensure bouhndary conditions are met
# kk = torch.arange(-4,6) # we set K to be 10
# try:
# k = torch.tile(kk,(t.shape[0],1)).cuda()
# except IndexError:
# k = kk.cuda()
# tt_vec = torch.tile(tt, (1,10))
# pp = torch.cumsum((w+2*k)*torch.exp(-(((w+2*k)**2)/2)/tt_vec),axis=1)
# pp = pp[:,-1]/torch.sqrt(2*torch.tensor(np.pi)*torch.squeeze(tt)**3)
# pp = pp[:, None]
#
# p = torch.log(pp * torch.exp(-v*a*w - (v**2)*torch.tensor(t).cuda()/2) /(a**2))
# return -(p.sum())
#
# def my_loss:
# p = (t-t0)/a**2
# p = 1/(2*np.pi*(tt**3))
#
#
# def my_loss(t, v, t0, a, err=1e-29):
# # t is target RT
# # v is output
# # t0 is non decision time
#
# tt = torch.tensor(t.cuda() - torch.tensor(t0).cuda()) / (torch.tensor(a).cuda() ** 2) # normalized time
# tt[tt < 0] = 0.01
# w = 0.5
# ks = 2 | |
<filename>a10_octavia/controller/worker/flows/a10_member_flows.py
# Copyright 2019, A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from taskflow.patterns import linear_flow
from octavia.common import constants
from octavia.controller.worker.v1.tasks import database_tasks
from octavia.controller.worker.v1.tasks import lifecycle_tasks
from octavia.controller.worker.v1.tasks import model_tasks
from a10_octavia.common import a10constants
from a10_octavia.controller.worker.tasks import a10_database_tasks
from a10_octavia.controller.worker.tasks import a10_network_tasks
from a10_octavia.controller.worker.tasks import server_tasks
from a10_octavia.controller.worker.tasks import vthunder_tasks
CONF = cfg.CONF
class MemberFlows(object):
def get_create_member_flow(self, topology):
"""Create a flow to create a member
:returns: The flow for creating a member
"""
create_member_flow = linear_flow.Flow(constants.CREATE_MEMBER_FLOW)
create_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask(
requires=[constants.MEMBER,
constants.LISTENERS,
constants.LOADBALANCER,
constants.POOL]))
create_member_flow.add(vthunder_tasks.VthunderInstanceBusy(
requires=a10constants.COMPUTE_BUSY))
create_member_flow.add(database_tasks.MarkMemberPendingCreateInDB(
requires=constants.MEMBER))
if CONF.a10_global.validate_subnet:
create_member_flow.add(a10_network_tasks.ValidateSubnet(
name='validate-subnet',
requires=constants.MEMBER))
create_member_flow.add(a10_database_tasks.GetVThunderByLoadBalancer(
requires=constants.LOADBALANCER,
provides=a10constants.VTHUNDER))
create_member_flow.add(a10_database_tasks.GetLoadBalancerListByProjectID(
requires=a10constants.VTHUNDER,
provides=a10constants.LOADBALANCERS_LIST))
create_member_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
requires=constants.LOADBALANCER_ID,
provides=constants.AMPHORA))
create_member_flow.add(a10_database_tasks.GetMemberListByProjectID(
requires=a10constants.VTHUNDER,
provides=a10constants.MEMBER_LIST))
create_member_flow.add(a10_network_tasks.CalculateDelta(
requires=(constants.LOADBALANCER, a10constants.LOADBALANCERS_LIST,
a10constants.MEMBER_LIST),
provides=constants.DELTAS))
create_member_flow.add(a10_network_tasks.HandleNetworkDeltas(
requires=constants.DELTAS, provides=constants.ADDED_PORTS))
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
create_member_flow.add(vthunder_tasks.VCSSyncWait(
name="vcs_sync_wait_before_probe_device",
requires=a10constants.VTHUNDER))
create_member_flow.add(vthunder_tasks.GetMasterVThunder(
name=a10constants.GET_VTHUNDER_MASTER,
requires=a10constants.VTHUNDER,
provides=a10constants.VTHUNDER))
# managing interface additions here
create_member_flow.add(
vthunder_tasks.AmphoraePostMemberNetworkPlug(
requires=(
constants.LOADBALANCER,
constants.ADDED_PORTS,
a10constants.VTHUNDER)))
create_member_flow.add(vthunder_tasks.VThunderComputeConnectivityWait(
name=a10constants.VTHUNDER_CONNECTIVITY_WAIT,
requires=(a10constants.VTHUNDER, constants.AMPHORA)))
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
create_member_flow.add(
a10_database_tasks.GetBackupVThunderByLoadBalancer(
name="get_backup_vThunder",
requires=(constants.LOADBALANCER, a10constants.VTHUNDER),
provides=a10constants.BACKUP_VTHUNDER))
create_member_flow.add(vthunder_tasks.VThunderComputeConnectivityWait(
name="backup_compute_conn_wait_before_probe_device",
requires=constants.AMPHORA,
rebind={a10constants.VTHUNDER: a10constants.BACKUP_VTHUNDER}))
create_member_flow.add(vthunder_tasks.VCSSyncWait(
name="backup-plug-wait-vcs-ready",
requires=a10constants.VTHUNDER))
create_member_flow.add(vthunder_tasks.GetMasterVThunder(
name=a10constants.GET_MASTER_VTHUNDER,
requires=a10constants.VTHUNDER,
provides=a10constants.VTHUNDER))
create_member_flow.add(
vthunder_tasks.EnableInterfaceForMembers(
requires=[
constants.ADDED_PORTS,
constants.LOADBALANCER,
a10constants.VTHUNDER]))
if CONF.a10_global.handle_vrid:
create_member_flow.add(self.handle_vrid_for_member_subflow())
create_member_flow.add(a10_database_tasks.CountMembersWithIP(
requires=constants.MEMBER, provides=a10constants.MEMBER_COUNT_IP
))
create_member_flow.add(vthunder_tasks.AllowLoadbalancerForwardWithAnySource(
name=a10constants.ALLOW_NO_SNAT,
requires=(constants.MEMBER, constants.AMPHORA)))
create_member_flow.add(a10_database_tasks.GetFlavorData(
rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},
provides=constants.FLAVOR))
create_member_flow.add(self.get_create_member_snat_pool_subflow())
create_member_flow.add(server_tasks.MemberCreate(
requires=(constants.MEMBER, a10constants.VTHUNDER, constants.POOL,
a10constants.MEMBER_COUNT_IP, constants.FLAVOR)))
create_member_flow.add(database_tasks.MarkMemberActiveInDB(
requires=constants.MEMBER))
create_member_flow.add(database_tasks.MarkPoolActiveInDB(
requires=constants.POOL))
create_member_flow.add(database_tasks.
MarkLBAndListenersActiveInDB(
requires=(constants.LOADBALANCER,
constants.LISTENERS)))
create_member_flow.add(vthunder_tasks.WriteMemory(
requires=a10constants.VTHUNDER))
create_member_flow.add(a10_database_tasks.SetThunderUpdatedAt(
requires=a10constants.VTHUNDER))
return create_member_flow
def get_delete_member_flow(self, topology):
"""Flow to delete a member on VThunder
:returns: The flow for deleting a member
"""
delete_member_flow = linear_flow.Flow(constants.DELETE_MEMBER_FLOW)
delete_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask(
requires=[constants.MEMBER,
constants.LISTENERS,
constants.LOADBALANCER,
constants.POOL]))
delete_member_flow.add(vthunder_tasks.VthunderInstanceBusy(
requires=a10constants.COMPUTE_BUSY))
delete_member_flow.add(database_tasks.MarkMemberPendingDeleteInDB(
requires=constants.MEMBER))
delete_member_flow.add(model_tasks.
DeleteModelObject(rebind={constants.OBJECT:
constants.MEMBER}))
delete_member_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
requires=constants.LOADBALANCER_ID,
provides=constants.AMPHORA))
delete_member_flow.add(a10_database_tasks.GetVThunderByLoadBalancer(
requires=constants.LOADBALANCER,
provides=a10constants.VTHUNDER))
delete_member_flow.add(a10_database_tasks.CountMembersWithIP(
requires=constants.MEMBER, provides=a10constants.MEMBER_COUNT_IP))
delete_member_flow.add(
a10_database_tasks.CountMembersWithIPPortProtocol(
requires=(
constants.MEMBER,
constants.POOL),
provides=a10constants.MEMBER_COUNT_IP_PORT_PROTOCOL))
delete_member_flow.add(a10_database_tasks.GetFlavorData(
rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},
provides=constants.FLAVOR))
delete_member_flow.add(a10_database_tasks.GetLoadBalancerListByProjectID(
requires=a10constants.VTHUNDER,
provides=a10constants.LOADBALANCERS_LIST))
delete_member_flow.add(a10_database_tasks.GetMemberListByProjectID(
requires=a10constants.VTHUNDER,
provides=a10constants.MEMBER_LIST))
delete_member_flow.add(a10_network_tasks.CalculateDelta(
requires=(constants.LOADBALANCER, a10constants.LOADBALANCERS_LIST,
a10constants.MEMBER_LIST),
provides=constants.DELTAS))
delete_member_flow.add(a10_network_tasks.HandleNetworkDeltas(
requires=constants.DELTAS, provides=constants.ADDED_PORTS))
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
delete_member_flow.add(vthunder_tasks.VCSSyncWait(
name=a10constants.VCS_SYNC_WAIT,
requires=a10constants.VTHUNDER))
delete_member_flow.add(vthunder_tasks.GetMasterVThunder(
name=a10constants.GET_MASTER_VTHUNDER,
requires=a10constants.VTHUNDER,
provides=a10constants.VTHUNDER))
delete_member_flow.add(
vthunder_tasks.AmphoraePostNetworkUnplug(
requires=(
constants.LOADBALANCER,
constants.ADDED_PORTS,
a10constants.VTHUNDER)))
delete_member_flow.add(vthunder_tasks.VThunderComputeConnectivityWait(
name=a10constants.VTHUNDER_CONNECTIVITY_WAIT,
requires=(a10constants.VTHUNDER, constants.AMPHORA)))
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
delete_member_flow.add(
a10_database_tasks.GetBackupVThunderByLoadBalancer(
name=a10constants.GET_BACKUP_VTHUNDER_BY_LB,
requires=(constants.LOADBALANCER, a10constants.VTHUNDER),
provides=a10constants.BACKUP_VTHUNDER))
delete_member_flow.add(
vthunder_tasks.VThunderComputeConnectivityWait(
name=a10constants.BACKUP_CONNECTIVITY_WAIT + "-before-unplug",
requires=constants.AMPHORA,
rebind={a10constants.VTHUNDER: a10constants.BACKUP_VTHUNDER}))
delete_member_flow.add(vthunder_tasks.VCSSyncWait(
name='member-unplug-' + a10constants.VCS_SYNC_WAIT,
requires=a10constants.VTHUNDER))
delete_member_flow.add(vthunder_tasks.GetMasterVThunder(
name=a10constants.GET_VTHUNDER_MASTER,
requires=a10constants.VTHUNDER,
provides=a10constants.VTHUNDER))
delete_member_flow.add(server_tasks.MemberFindNatPool(
requires=[a10constants.VTHUNDER, constants.POOL,
constants.FLAVOR], provides=a10constants.NAT_FLAVOR))
delete_member_flow.add(a10_database_tasks.GetNatPoolEntry(
requires=[constants.MEMBER, a10constants.NAT_FLAVOR],
provides=a10constants.NAT_POOL))
delete_member_flow.add(a10_network_tasks.ReleaseSubnetAddressForMember(
requires=[constants.MEMBER, a10constants.NAT_FLAVOR, a10constants.NAT_POOL]))
delete_member_flow.add(a10_database_tasks.DeleteNatPoolEntry(
requires=a10constants.NAT_POOL))
delete_member_flow.add(
server_tasks.MemberDelete(
requires=(
constants.MEMBER,
a10constants.VTHUNDER,
constants.POOL,
a10constants.MEMBER_COUNT_IP,
a10constants.MEMBER_COUNT_IP_PORT_PROTOCOL)))
if CONF.a10_global.handle_vrid:
delete_member_flow.add(self.get_delete_member_vrid_subflow())
delete_member_flow.add(database_tasks.DeleteMemberInDB(
requires=constants.MEMBER))
delete_member_flow.add(database_tasks.DecrementMemberQuota(
requires=constants.MEMBER))
delete_member_flow.add(database_tasks.MarkPoolActiveInDB(
requires=constants.POOL))
delete_member_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
delete_member_flow.add(vthunder_tasks.WriteMemory(
requires=a10constants.VTHUNDER))
delete_member_flow.add(a10_database_tasks.SetThunderUpdatedAt(
requires=a10constants.VTHUNDER))
return delete_member_flow
def get_rack_vthunder_delete_member_flow(self, vthunder_conf, device_dict):
"""Flow to delete a member in Thunder devices
:returns: The flow for deleting a member
"""
delete_member_flow = linear_flow.Flow(constants.DELETE_MEMBER_FLOW)
delete_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask(
requires=[constants.MEMBER,
constants.LISTENERS,
constants.LOADBALANCER,
constants.POOL]))
delete_member_flow.add(database_tasks.MarkMemberPendingDeleteInDB(
requires=constants.MEMBER))
delete_member_flow.add(model_tasks.
DeleteModelObject(rebind={constants.OBJECT:
constants.MEMBER}))
delete_member_flow.add(a10_database_tasks.GetVThunderByLoadBalancer(
requires=constants.LOADBALANCER,
provides=a10constants.VTHUNDER))
delete_member_flow.add(vthunder_tasks.SetupDeviceNetworkMap(
requires=a10constants.VTHUNDER,
provides=a10constants.VTHUNDER))
delete_member_flow.add(a10_database_tasks.CountMembersWithIP(
requires=constants.MEMBER, provides=a10constants.MEMBER_COUNT_IP))
delete_member_flow.add(
a10_database_tasks.CountMembersWithIPPortProtocol(
requires=(
constants.MEMBER,
constants.POOL),
provides=a10constants.MEMBER_COUNT_IP_PORT_PROTOCOL))
delete_member_flow.add(a10_database_tasks.GetFlavorData(
rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},
provides=constants.FLAVOR))
delete_member_flow.add(vthunder_tasks.GetVthunderConfByFlavor(
inject={a10constants.VTHUNDER_CONFIG: vthunder_conf,
a10constants.DEVICE_CONFIG_DICT: device_dict},
requires=(constants.LOADBALANCER, a10constants.VTHUNDER_CONFIG,
a10constants.DEVICE_CONFIG_DICT),
rebind={constants.FLAVOR_DATA: constants.FLAVOR},
provides=(a10constants.VTHUNDER_CONFIG, a10constants.USE_DEVICE_FLAVOR)))
delete_member_flow.add(a10_network_tasks.GetLBResourceSubnet(
name=a10constants.GET_LB_RESOURCE_SUBNET,
rebind={a10constants.LB_RESOURCE: constants.MEMBER},
provides=constants.SUBNET))
delete_member_flow.add(
a10_network_tasks.GetMembersOnThunder(
requires=[a10constants.VTHUNDER, a10constants.USE_DEVICE_FLAVOR],
provides=a10constants.MEMBERS))
delete_member_flow.add(
a10_database_tasks.CountMembersOnThunderBySubnet(
requires=[
constants.SUBNET,
a10constants.USE_DEVICE_FLAVOR,
a10constants.MEMBERS],
provides=a10constants.MEMBER_COUNT_THUNDER))
delete_member_flow.add(server_tasks.MemberFindNatPool(
requires=[a10constants.VTHUNDER, constants.POOL,
constants.FLAVOR], provides=a10constants.NAT_FLAVOR))
delete_member_flow.add(a10_database_tasks.GetNatPoolEntry(
requires=[constants.MEMBER, a10constants.NAT_FLAVOR],
provides=a10constants.NAT_POOL))
delete_member_flow.add(a10_network_tasks.ReleaseSubnetAddressForMember(
requires=[constants.MEMBER, a10constants.NAT_FLAVOR, a10constants.NAT_POOL]))
delete_member_flow.add(a10_database_tasks.DeleteNatPoolEntry(
requires=a10constants.NAT_POOL))
delete_member_flow.add(
server_tasks.MemberDelete(
requires=(
constants.MEMBER,
a10constants.VTHUNDER,
constants.POOL,
a10constants.MEMBER_COUNT_IP,
a10constants.MEMBER_COUNT_IP_PORT_PROTOCOL)))
if CONF.a10_global.network_type == 'vlan':
delete_member_flow.add(
vthunder_tasks.DeleteInterfaceTagIfNotInUseForMember(
requires=[
constants.MEMBER,
a10constants.VTHUNDER]))
# Handle VRID setting
if CONF.a10_global.handle_vrid:
delete_member_flow.add(self.get_delete_member_vrid_subflow())
delete_member_flow.add(database_tasks.DeleteMemberInDB(
requires=constants.MEMBER))
delete_member_flow.add(database_tasks.DecrementMemberQuota(
requires=constants.MEMBER))
delete_member_flow.add(database_tasks.MarkPoolActiveInDB(
requires=constants.POOL))
delete_member_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER,
constants.LISTENERS]))
delete_member_flow.add(vthunder_tasks.WriteMemory(
requires=a10constants.VTHUNDER))
delete_member_flow.add(a10_database_tasks.SetThunderUpdatedAt(
requires=a10constants.VTHUNDER))
return delete_member_flow
def get_delete_member_vthunder_internal_subflow(self, member_id, pool):
delete_member_thunder_subflow = linear_flow.Flow(
a10constants.DELETE_MEMBER_VTHUNDER_INTERNAL_SUBFLOW)
delete_member_thunder_subflow.add(
a10_database_tasks.CountMembersWithIPPortProtocol(
name='count_members_ip_port_' + member_id,
requires=(
constants.MEMBER,
constants.POOL),
provides=a10constants.MEMBER_COUNT_IP_PORT_PROTOCOL,
rebind={
constants.MEMBER: member_id,
constants.POOL: pool}))
delete_member_thunder_subflow.add(a10_database_tasks.PoolCountforIP(
name='pool_count_for_ip_' + member_id,
requires=[constants.MEMBER, a10constants.USE_DEVICE_FLAVOR, a10constants.POOLS],
provides=a10constants.POOL_COUNT_IP,
rebind={constants.MEMBER: member_id}))
# NAT pools database and pools clean up for flavor
delete_member_thunder_subflow.add(a10_database_tasks.GetFlavorData(
name='get_flavor_data_' + member_id,
rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},
provides=constants.FLAVOR))
delete_member_thunder_subflow.add(server_tasks.MemberFindNatPool(
name='member_find_nat_pool_' + member_id,
requires=[a10constants.VTHUNDER, constants.POOL,
constants.FLAVOR], provides=a10constants.NAT_FLAVOR,
rebind={constants.POOL: pool}))
delete_member_thunder_subflow.add(a10_database_tasks.GetNatPoolEntry(
name='get_nat_pool_db_entry_' + member_id,
requires=[constants.MEMBER, a10constants.NAT_FLAVOR],
provides=a10constants.NAT_POOL, rebind={constants.MEMBER: member_id}))
delete_member_thunder_subflow.add(a10_network_tasks.ReleaseSubnetAddressForMember(
name='release_subnet_address_for_member_' + member_id,
requires=[constants.MEMBER, a10constants.NAT_FLAVOR, a10constants.NAT_POOL],
rebind={constants.MEMBER: member_id}))
delete_member_thunder_subflow.add(a10_database_tasks.DeleteNatPoolEntry(
name='delete_nat_pool_entry_' + member_id,
requires=a10constants.NAT_POOL))
delete_member_thunder_subflow.add(
server_tasks.MemberDeletePool(
name='delete_thunder_member_pool_' +
member_id,
requires=(
constants.MEMBER,
a10constants.VTHUNDER,
constants.POOL,
a10constants.POOL_COUNT_IP,
a10constants.MEMBER_COUNT_IP_PORT_PROTOCOL),
rebind={
constants.MEMBER: member_id, constants.POOL: pool}))
if CONF.a10_global.network_type == 'vlan':
delete_member_thunder_subflow.add(
vthunder_tasks.DeleteInterfaceTagIfNotInUseForMember(
name='delete_unused_interface_tag_in_member_' +
member_id,
requires=[
constants.MEMBER,
a10constants.VTHUNDER],
rebind={
constants.MEMBER: member_id}))
return delete_member_thunder_subflow
def get_delete_member_vrid_subflow(self):
delete_member_vrid_subflow = linear_flow.Flow(
a10constants.DELETE_MEMBER_VRID_SUBFLOW)
delete_member_vrid_subflow.add(a10_network_tasks.GetLBResourceSubnet(
rebind={a10constants.LB_RESOURCE: constants.MEMBER},
provides=constants.SUBNET))
delete_member_vrid_subflow.add(
a10_database_tasks.GetChildProjectsOfParentPartition(
rebind={a10constants.LB_RESOURCE: constants.MEMBER},
provides=a10constants.PARTITION_PROJECT_LIST
))
delete_member_vrid_subflow.add(
a10_database_tasks.CountLoadbalancersInProjectBySubnet(
requires=[
constants.SUBNET,
a10constants.PARTITION_PROJECT_LIST,
a10constants.USE_DEVICE_FLAVOR],
provides=a10constants.LB_COUNT_SUBNET))
delete_member_vrid_subflow.add(
a10_database_tasks.CountLoadbalancersOnThunderBySubnet(
requires=[a10constants.VTHUNDER, constants.SUBNET, a10constants.USE_DEVICE_FLAVOR],
provides=a10constants.LB_COUNT_THUNDER))
delete_member_vrid_subflow.add(
a10_database_tasks.CountMembersInProjectBySubnet(
requires=[constants.SUBNET, a10constants.PARTITION_PROJECT_LIST],
provides=a10constants.MEMBER_COUNT))
delete_member_vrid_subflow.add(
a10_database_tasks.GetVRIDForLoadbalancerResource(
requires=[
a10constants.PARTITION_PROJECT_LIST,
a10constants.VTHUNDER,
constants.LOADBALANCER],
provides=a10constants.VRID_LIST))
delete_member_vrid_subflow.add(
a10_network_tasks.DeleteVRIDPort(
requires=[
a10constants.VTHUNDER,
a10constants.VRID_LIST,
constants.SUBNET,
a10constants.USE_DEVICE_FLAVOR,
a10constants.LB_COUNT_SUBNET,
a10constants.MEMBER_COUNT,
a10constants.LB_COUNT_THUNDER,
a10constants.MEMBER_COUNT_THUNDER],
rebind={a10constants.LB_RESOURCE: constants.MEMBER},
provides=(
a10constants.VRID,
a10constants.DELETE_VRID)))
delete_member_vrid_subflow.add(a10_database_tasks.DeleteVRIDEntry(
requires=[a10constants.VRID, a10constants.DELETE_VRID]))
return delete_member_vrid_subflow
def get_delete_member_vrid_internal_subflow(self, pool, pool_members):
delete_member_vrid_subflow = linear_flow.Flow(
a10constants.DELETE_MEMBER_VRID_INTERNAL_SUBFLOW)
delete_member_vrid_subflow.add(
a10_database_tasks.GetChildProjectsOfParentPartition(
name='get_child_project_of_parent_partition' + pool,
rebind={a10constants.LB_RESOURCE: pool},
provides=a10constants.PARTITION_PROJECT_LIST
))
delete_member_vrid_subflow.add(
a10_database_tasks.GetSubnetForDeletionInPool(
name='get_subnet_for_deletion_in_pool' + pool,
inject={a10constants.MEMBER_LIST: pool_members},
requires=[
a10constants.PARTITION_PROJECT_LIST,
a10constants.USE_DEVICE_FLAVOR,
a10constants.POOLS],
provides=a10constants.SUBNET_LIST))
delete_member_vrid_subflow.add(
a10_database_tasks.GetVRIDForLoadbalancerResource(
name='get_vrid_for_loadbalancer_resource' + pool,
requires=[
a10constants.PARTITION_PROJECT_LIST,
a10constants.VTHUNDER,
constants.LOADBALANCER],
provides=a10constants.VRID_LIST))
delete_member_vrid_subflow.add(
a10_network_tasks.DeleteMultipleVRIDPort(
name='delete_multiple_vrid_port' + pool,
requires=[
a10constants.VTHUNDER,
a10constants.VRID_LIST,
a10constants.SUBNET_LIST],
rebind={a10constants.LB_RESOURCE: pool},
provides=a10constants.VRID_LIST))
delete_member_vrid_subflow.add(a10_database_tasks.DeleteMultiVRIDEntry(
name='delete_multi_vrid_entry' + pool,
requires=a10constants.VRID_LIST))
return delete_member_vrid_subflow
def handle_vrid_for_member_subflow(self):
handle_vrid_for_member_subflow = linear_flow.Flow(
a10constants.HANDLE_VRID_MEMBER_SUBFLOW)
handle_vrid_for_member_subflow.add(
a10_network_tasks.GetLBResourceSubnet(
rebind={
a10constants.LB_RESOURCE: constants.MEMBER},
provides=constants.SUBNET))
handle_vrid_for_member_subflow.add(
a10_database_tasks.GetChildProjectsOfParentPartition(
rebind={a10constants.LB_RESOURCE: constants.MEMBER},
provides=a10constants.PARTITION_PROJECT_LIST
))
handle_vrid_for_member_subflow.add(
a10_database_tasks.GetVRIDForLoadbalancerResource(
requires=[
a10constants.PARTITION_PROJECT_LIST,
a10constants.VTHUNDER,
constants.LOADBALANCER],
provides=a10constants.VRID_LIST))
handle_vrid_for_member_subflow.add(
a10_network_tasks.HandleVRIDFloatingIP(
requires=[
a10constants.VTHUNDER,
a10constants.VRID_LIST,
constants.SUBNET,
a10constants.VTHUNDER_CONFIG,
a10constants.USE_DEVICE_FLAVOR],
rebind={
a10constants.LB_RESOURCE: constants.MEMBER},
provides=a10constants.VRID_LIST))
handle_vrid_for_member_subflow.add(
a10_database_tasks.UpdateVRIDForLoadbalancerResource(
requires=[
a10constants.VRID_LIST,
a10constants.VTHUNDER],
rebind={
a10constants.LB_RESOURCE: constants.MEMBER}))
return handle_vrid_for_member_subflow
def get_update_member_flow(self, topology):
"""Flow to update a member
:returns: The flow for updating a member
"""
update_member_flow = linear_flow.Flow(constants.UPDATE_MEMBER_FLOW)
update_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask(
requires=[constants.MEMBER,
constants.LISTENERS,
constants.LOADBALANCER,
constants.POOL]))
update_member_flow.add(vthunder_tasks.VthunderInstanceBusy(
requires=a10constants.COMPUTE_BUSY))
update_member_flow.add(database_tasks.MarkMemberPendingUpdateInDB(
requires=constants.MEMBER))
if CONF.a10_global.validate_subnet:
update_member_flow.add(a10_network_tasks.ValidateSubnet(
name='validate-subnet',
requires=constants.MEMBER))
update_member_flow.add(a10_database_tasks.GetVThunderByLoadBalancer(
requires=constants.LOADBALANCER,
provides=a10constants.VTHUNDER))
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
update_member_flow.add(vthunder_tasks.GetMasterVThunder(
name=a10constants.GET_MASTER_VTHUNDER,
requires=a10constants.VTHUNDER,
provides=a10constants.VTHUNDER))
if CONF.a10_global.handle_vrid:
update_member_flow.add(self.handle_vrid_for_member_subflow())
update_member_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
requires=constants.LOADBALANCER_ID,
provides=constants.AMPHORA))
update_member_flow.add(a10_database_tasks.GetLoadbalancersInProjectBySubnet(
requires=[constants.SUBNET, a10constants.PARTITION_PROJECT_LIST],
provides=a10constants.LOADBALANCERS_LIST))
update_member_flow.add(a10_database_tasks.CheckForL2DSRFlavor(
rebind={a10constants.LB_RESOURCE: a10constants.LOADBALANCERS_LIST},
provides=a10constants.L2DSR_FLAVOR))
update_member_flow.add(a10_database_tasks.CountLoadbalancersInProjectBySubnet(
requires=[constants.SUBNET, a10constants.PARTITION_PROJECT_LIST],
provides=a10constants.LB_COUNT_SUBNET))
update_member_flow.add(vthunder_tasks.UpdateLoadbalancerForwardWithAnySource(
requires=(constants.SUBNET, constants.AMPHORA,
a10constants.LB_COUNT_SUBNET, a10constants.L2DSR_FLAVOR)))
update_member_flow.add(a10_database_tasks.GetFlavorData(
rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},
provides=constants.FLAVOR))
update_member_flow.add(server_tasks.MemberUpdate(
requires=(constants.MEMBER, a10constants.VTHUNDER,
constants.POOL, constants.FLAVOR,
constants.UPDATE_DICT)))
update_member_flow.add(database_tasks.UpdateMemberInDB(
requires=[constants.MEMBER, constants.UPDATE_DICT]))
update_member_flow.add(database_tasks.MarkMemberActiveInDB(
requires=constants.MEMBER))
update_member_flow.add(database_tasks.MarkPoolActiveInDB(
requires=constants.POOL))
update_member_flow.add(database_tasks.
MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER,
constants.LISTENERS]))
update_member_flow.add(vthunder_tasks.WriteMemory(
requires=a10constants.VTHUNDER))
update_member_flow.add(a10_database_tasks.SetThunderUpdatedAt(
requires=a10constants.VTHUNDER))
return update_member_flow
def get_rack_vthunder_update_member_flow(self, vthunder_conf, device_dict):
"""Flow to update a member in Thunder devices
:returns: The flow for updating a member
"""
update_member_flow = linear_flow.Flow(constants.UPDATE_MEMBER_FLOW)
update_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask(
requires=[constants.MEMBER,
constants.LISTENERS,
constants.LOADBALANCER,
constants.POOL]))
update_member_flow.add(database_tasks.MarkMemberPendingUpdateInDB(
requires=constants.MEMBER))
if CONF.a10_global.validate_subnet:
update_member_flow.add(a10_network_tasks.ValidateSubnet(
name='validate-subnet',
requires=constants.MEMBER))
update_member_flow.add(a10_database_tasks.GetVThunderByLoadBalancer(
requires=constants.LOADBALANCER,
provides=a10constants.VTHUNDER))
update_member_flow.add(vthunder_tasks.SetupDeviceNetworkMap(
requires=a10constants.VTHUNDER,
provides=a10constants.VTHUNDER))
# For device flavor
update_member_flow.add(a10_database_tasks.GetFlavorData(
rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},
provides=constants.FLAVOR))
update_member_flow.add(vthunder_tasks.GetVthunderConfByFlavor(
inject={a10constants.VTHUNDER_CONFIG: vthunder_conf,
a10constants.DEVICE_CONFIG_DICT: device_dict},
requires=(constants.LOADBALANCER, a10constants.VTHUNDER_CONFIG,
a10constants.DEVICE_CONFIG_DICT),
rebind={constants.FLAVOR_DATA: constants.FLAVOR},
provides=(a10constants.VTHUNDER_CONFIG, a10constants.USE_DEVICE_FLAVOR)))
# Handle VRID settings
if CONF.a10_global.handle_vrid:
update_member_flow.add(self.handle_vrid_for_member_subflow())
update_member_flow.add(server_tasks.MemberUpdate(
requires=(constants.MEMBER, a10constants.VTHUNDER,
constants.POOL, constants.FLAVOR,
constants.UPDATE_DICT)))
update_member_flow.add(database_tasks.UpdateMemberInDB(
requires=[constants.MEMBER, constants.UPDATE_DICT]))
if CONF.a10_global.network_type == 'vlan':
update_member_flow.add(vthunder_tasks.TagInterfaceForMember(
requires=[constants.MEMBER, a10constants.VTHUNDER]))
update_member_flow.add(database_tasks.MarkMemberActiveInDB(
requires=constants.MEMBER))
update_member_flow.add(database_tasks.MarkPoolActiveInDB(
requires=constants.POOL))
update_member_flow.add(database_tasks.
MarkLBAndListenersActiveInDB(
requires=[constants.LOADBALANCER,
constants.LISTENERS]))
update_member_flow.add(vthunder_tasks.WriteMemory(
requires=a10constants.VTHUNDER))
update_member_flow.add(a10_database_tasks.SetThunderUpdatedAt(
requires=a10constants.VTHUNDER))
return update_member_flow
def get_rack_vthunder_create_member_flow(self, vthunder_conf, device_dict):
"""Create a flow to create a rack vthunder member
:returns: The flow for creating a rack vthunder member
"""
create_member_flow = linear_flow.Flow(constants.CREATE_MEMBER_FLOW)
create_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask(
requires=[constants.MEMBER,
constants.LISTENERS,
constants.LOADBALANCER,
constants.POOL]))
create_member_flow.add(database_tasks.MarkMemberPendingCreateInDB(
requires=constants.MEMBER))
if CONF.a10_global.validate_subnet:
create_member_flow.add(a10_network_tasks.ValidateSubnet(
name='validate-subnet',
requires=constants.MEMBER))
create_member_flow.add(a10_database_tasks.GetVThunderByLoadBalancer(
requires=constants.LOADBALANCER,
provides=a10constants.VTHUNDER))
create_member_flow.add(vthunder_tasks.SetupDeviceNetworkMap(
requires=a10constants.VTHUNDER,
provides=a10constants.VTHUNDER))
create_member_flow.add(a10_database_tasks.GetFlavorData(
rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},
provides=constants.FLAVOR))
create_member_flow.add(vthunder_tasks.GetVthunderConfByFlavor(
inject={a10constants.VTHUNDER_CONFIG: vthunder_conf,
a10constants.DEVICE_CONFIG_DICT: device_dict},
requires=(constants.LOADBALANCER, a10constants.VTHUNDER_CONFIG,
a10constants.DEVICE_CONFIG_DICT),
rebind={constants.FLAVOR_DATA: constants.FLAVOR},
provides=(a10constants.VTHUNDER_CONFIG, a10constants.USE_DEVICE_FLAVOR)))
if CONF.a10_global.handle_vrid:
create_member_flow.add(self.handle_vrid_for_member_subflow())
if CONF.a10_global.network_type == 'vlan':
create_member_flow.add(vthunder_tasks.TagInterfaceForMember(
requires=[constants.MEMBER,
a10constants.VTHUNDER]))
create_member_flow.add(a10_database_tasks.CountMembersWithIP(
requires=constants.MEMBER, provides=a10constants.MEMBER_COUNT_IP))
create_member_flow.add(self.get_create_member_snat_pool_subflow())
create_member_flow.add(server_tasks.MemberCreate(
requires=(constants.MEMBER, a10constants.VTHUNDER, constants.POOL,
a10constants.MEMBER_COUNT_IP, constants.FLAVOR)))
create_member_flow.add(database_tasks.MarkMemberActiveInDB(
requires=constants.MEMBER))
create_member_flow.add(database_tasks.MarkPoolActiveInDB(
requires=constants.POOL))
create_member_flow.add(database_tasks.
MarkLBAndListenersActiveInDB(
requires=(constants.LOADBALANCER,
constants.LISTENERS)))
create_member_flow.add(vthunder_tasks.WriteMemory(
requires=a10constants.VTHUNDER))
create_member_flow.add(a10_database_tasks.SetThunderUpdatedAt(
requires=a10constants.VTHUNDER))
return create_member_flow
def get_create_member_snat_pool_subflow(self):
create_member_snat_subflow = linear_flow.Flow(
a10constants.CREATE_MEMBER_SNAT_POOL_SUBFLOW)
create_member_snat_subflow.add(server_tasks.MemberFindNatPool(
requires=[a10constants.VTHUNDER, constants.POOL,
constants.FLAVOR], provides=a10constants.NAT_FLAVOR))
create_member_snat_subflow.add(a10_database_tasks.GetNatPoolEntry(
requires=[constants.MEMBER, a10constants.NAT_FLAVOR],
provides=a10constants.NAT_POOL))
create_member_snat_subflow.add(a10_network_tasks.ReserveSubnetAddressForMember(
requires=[constants.MEMBER, a10constants.NAT_FLAVOR, a10constants.NAT_POOL],
provides=a10constants.SUBNET_PORT))
create_member_snat_subflow.add(a10_database_tasks.UpdateNatPoolDB(
requires=[constants.MEMBER, a10constants.NAT_FLAVOR,
a10constants.NAT_POOL, a10constants.SUBNET_PORT]))
return create_member_snat_subflow
def get_rack_vthunder_batch_update_members_flow(self, old_members, new_members,
updated_members, vthunder_conf, device_dict):
"""Create a flow to batch update members
:returns: The flow for batch updating members
"""
batch_update_members_flow = linear_flow.Flow(
constants.BATCH_UPDATE_MEMBERS_FLOW)
batch_update_members_flow.add(a10_database_tasks.GetVThunderByLoadBalancer(
requires=constants.LOADBALANCER,
provides=a10constants.VTHUNDER))
batch_update_members_flow.add(vthunder_tasks.SetupDeviceNetworkMap(
requires=a10constants.VTHUNDER,
provides=a10constants.VTHUNDER))
batch_update_members_flow.add(a10_database_tasks.GetFlavorData(
rebind={a10constants.LB_RESOURCE: constants.LOADBALANCER},
provides=constants.FLAVOR))
batch_update_members_flow.add(vthunder_tasks.GetVthunderConfByFlavor(
inject={a10constants.VTHUNDER_CONFIG: vthunder_conf,
a10constants.DEVICE_CONFIG_DICT: device_dict},
requires=(constants.LOADBALANCER, a10constants.VTHUNDER_CONFIG,
a10constants.DEVICE_CONFIG_DICT),
rebind={constants.FLAVOR_DATA: constants.FLAVOR},
provides=(a10constants.VTHUNDER_CONFIG, a10constants.USE_DEVICE_FLAVOR)))
batch_update_members_flow.add(a10_network_tasks.GetPoolsOnThunder(
requires=[a10constants.VTHUNDER, a10constants.USE_DEVICE_FLAVOR],
provides=a10constants.POOLS))
batch_update_members_flow.add(server_tasks.MemberFindNatPool(
requires=[a10constants.VTHUNDER, constants.POOL, constants.FLAVOR],
provides=a10constants.NAT_FLAVOR))
# Delete old members
batch_update_members_flow.add(
lifecycle_tasks.MembersToErrorOnRevertTask(
inject={constants.MEMBERS: old_members},
name='{flow}-deleted'.format(
flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW)))
for m in old_members:
batch_update_members_flow.add(database_tasks.MarkMemberPendingDeleteInDB(
inject={constants.MEMBER: m},
name='mark-member-pending-delete-in-db-' + m.id))
batch_update_members_flow.add(model_tasks.DeleteModelObject(
inject={constants.OBJECT: m},
name='{flow}-{id}'.format(
id=m.id, flow=constants.DELETE_MODEL_OBJECT_FLOW)))
batch_update_members_flow.add(a10_database_tasks.CountMembersWithIP(
name='count-member-with-ip-' + m.id,
inject={constants.MEMBER: m},
provides=a10constants.MEMBER_COUNT_IP))
batch_update_members_flow.add(a10_database_tasks.CountMembersWithIPPortProtocol(
name='count-member-with-IP-port-protocol-' + m.id,
inject={constants.MEMBER: m},
requires=constants.POOL,
provides=a10constants.MEMBER_COUNT_IP_PORT_PROTOCOL))
batch_update_members_flow.add(a10_network_tasks.GetLBResourceSubnet(
name='{flow}-{id}'.format(
id=m.id, flow=a10constants.GET_LB_RESOURCE_SUBNET),
inject={a10constants.LB_RESOURCE: m},
provides=constants.SUBNET))
batch_update_members_flow.add(a10_database_tasks.GetNatPoolEntry(
name='get-nat-pool-entry-' + m.id,
inject={constants.MEMBER: m},
requires=[a10constants.NAT_FLAVOR],
provides=a10constants.NAT_POOL))
batch_update_members_flow.add(a10_network_tasks.ReleaseSubnetAddressForMember(
name='release-subnet-address-for-member-' + m.id,
inject={constants.MEMBER: m},
requires=[a10constants.NAT_FLAVOR, a10constants.NAT_POOL]))
batch_update_members_flow.add(a10_database_tasks.DeleteNatPoolEntry(
name='delete-nat-pool-entry-' + m.id,
requires=a10constants.NAT_POOL))
batch_update_members_flow.add(server_tasks.MemberDelete(
name='member-delete-' + m.id,
inject={constants.MEMBER: | |
to which the messages have to be retrieved, defaults to the current time on the gateway.
:param limit: Using this optional parameter you can limit the number of messages retrieved in total.
:return: Returns three values. 1: the status of the operation, 2: the number of messages, 3: the list of retrieved messages.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ MESSAGES message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_messages_frame(from_, to, limit))
# Wait for MESSAGES READ message, decode it and return data.
return super(SIGatewayClient, self).decode_messages_read_frame(self.__receive_frame_until_commands(['MESSAGES READ', 'ERROR']))
def disconnect(self) -> None:
"""
Disconnects the client from the gateway.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Change state to disconnected.
self.__state = SIConnectionState.DISCONNECTED
# Close the WebSocket
self.__ws.close()
def __ensure_in_state(self, state: SIConnectionState) -> None:
if self.__state != state:
raise SIProtocolError("invalid client state")
def __receive_frame_until_commands(self, commands: list) -> str:
while True:
frame = self.__ws.recv()
if super(SIGatewayClient, self).peek_frame_command(frame) in commands:
return frame
class SIAsyncGatewayClientCallbacks:
"""
Base class containing all callback methods that can be called by the SIAsyncGatewayClient. You can use this as your base class and register it using
IAsyncGatewayClient.set_callbacks().
"""
def on_connected(self, access_level: SIAccessLevel, gateway_version: str) -> None:
"""
This method is called once the connection to the gateway could be established and the user has been successfully authorized.
:param access_level: Access level that was granted to the user during authorization.
:param gateway_version: Version of the OpenStuder software running on the gateway.
"""
pass
def on_disconnected(self) -> None:
"""
Called when the connection to the OpenStuder gateway has been gracefully closed by either side or the connection was lost by any other reason.
"""
pass
def on_error(self, reason) -> None:
"""
Called on severe errors.
:param reason: Exception that caused the erroneous behavior.
"""
pass
def on_enumerated(self, status: SIStatus, device_count: int) -> None:
"""
Called when the enumeration operation started using enumerate() has completed on the gateway.
The callback takes two arguments. 1: , 2: the .
:param status: Operation status.
:param device_count: Number of devices present.
"""
pass
def on_description(self, status: SIStatus, id_: Optional[str], description: object) -> None:
"""
Called when the gateway returned the description requested using the describe() method.
:param status: Status of the operation.
:param id_: Subject's ID.
:param description: Description object.
"""
pass
def on_properties_found(self, status: SIStatus, id_: str, count: int, properties: List[str]):
"""
Called when the gateway returned the list of found properties requested using the find_properties() method.
:param status: Status of the find operation.
:param id_: The searched ID (including wildcard character).
:param count: The number of properties found.
:param properties: List of the property IDs.
"""
pass
def on_property_read(self, status: SIStatus, property_id: str, value: Optional[any]) -> None:
"""
Called when the property read operation started using read_property() has completed on the gateway.
:param status: Status of the read operation.
:param property_id: ID of the property read.
:param value: The value read.
"""
pass
def on_properties_read(self, results: List[SIPropertyReadResult]) -> None:
"""
Called when the multiple properties read operation started using read_properties() has completed on the gateway.
:param results: List of all results of the operation.
"""
pass
def on_property_written(self, status: SIStatus, property_id: str) -> None:
"""
Called when the property write operation started using write_property() has completed on the gateway.
:param status: Status of the write operation.
:param property_id: ID of the property written.
"""
pass
def on_property_subscribed(self, status: SIStatus, property_id: str) -> None:
"""
Called when the gateway returned the status of the property subscription requested using the subscribe_to_property() method.
:param status: The status of the subscription.
:param property_id: ID of the property.
"""
pass
def on_properties_subscribed(self, statuses: List[SIPropertySubscriptionResult]) -> None:
"""
Called when the gateway returned the status of the properties subscription requested using the subscribe_to_properties() method.
:param statuses: The statuses of the individual subscriptions.
"""
pass
def on_property_unsubscribed(self, status: SIStatus, property_id: str) -> None:
"""
Called when the gateway returned the status of the property unsubscription requested using the unsubscribe_from_property() method.
:param status: The status of the unsubscription.
:param property_id: ID of the property.
"""
pass
def on_properties_unsubscribed(self, statuses: List[SIPropertySubscriptionResult]) -> None:
"""
Called when the gateway returned the status of the properties unsubscription requested using the unsubscribe_from_properties() method.
:param statuses: The statuses of the individual unsubscriptions.
"""
pass
def on_property_updated(self, property_id: str, value: any) -> None:
"""
This callback is called whenever the gateway send a property update.
:param property_id: ID of the updated property.
:param value: The current value of the property.
"""
pass
def on_datalog_properties_read(self, status: SIStatus, properties: List[str]) -> None:
"""
Called when the datalog property list operation started using read_datalog_properties() has completed on the gateway.
:param status: Status of the operation.
:param properties: List of the IDs of the properties for whom data is available in the data log.
"""
pass
def on_datalog_read_csv(self, status: SIStatus, property_id: str, count: int, values: str) -> None:
"""
Called when the datalog read operation started using read_datalog() has completed on the gateway. This version of the method returns the data in CSV format suitable to
be written to a file.
:param status: Status of the operation.
:param property_id: ID of the property.
:param count: Number of entries.
:param values: Properties data in CSV format whereas the first column is the date and time in ISO 8601 extended format and the second column contains the actual values.
"""
pass
def on_device_message(self, message: SIDeviceMessage) -> None:
"""
This callback is called whenever the gateway send a device message indication.
:param message: The device message received.
"""
pass
def on_messages_read(self, status: SIStatus, count: int, messages: List[SIDeviceMessage]) -> None:
"""
Called when the gateway returned the status of the read messages operation using the read_messages() method.
:param status: The status of the operation.
:param count: Number of messages retrieved.
:param messages: List of retrieved messages.
"""
pass
class SIAsyncGatewayClient(_SIAbstractGatewayClient):
"""
Complete, asynchronous (non-blocking) OpenStuder gateway client.
This client uses an asynchronous model which has the disadvantage to be a bit harder to use than the synchronous version. The advantages are that long operations do not block
the main thread as all results are reported using callbacks, device message indications are supported and subscriptions to property changes are possible.
"""
def __init__(self):
super(SIAsyncGatewayClient, self).__init__()
self.__state: SIConnectionState = SIConnectionState.DISCONNECTED
self.__ws: Optional[websocket.WebSocketApp] = None
self.__thread: Optional[Thread] = None
self.__access_level: SIAccessLevel = SIAccessLevel.NONE
self.__gateway_version: str = ''
self.__user: Optional[str] = None
self.__password: Optional[str] = None
self.on_connected: Optional[Callable[[SIAccessLevel, str], None]] = None
"""
This callback is called once the connection to the gateway could be established and the user has been successfully authorized.
The callback takes two arguments. 1: the access level that was granted to the user during authorization, 2: the version of the OpenStuder software running on the gateway.
"""
self.on_disconnected: Optional[Callable[[], None]] = None
"""
Called when the connection to the OpenStuder gateway has been gracefully closed by either side or the connection was lost by any other reason.
This callback has no parameters.
"""
self.on_error: Optional[Callable[[Exception], None]] = None
"""
Called on severe errors.
The single parameter passed to the callback is the exception that caused the erroneous behavior.
"""
self.on_enumerated: Optional[Callable[[str, int], None]] = None
"""
Called when the enumeration operation started using enumerate() has completed on the gateway.
The callback takes two arguments. 1: operation status, 2: the number of devices present.
"""
self.on_description: Optional[Callable[[str, Optional[str], object], None]] = None
"""
Called when the gateway returned the description requested using the describe() method.
The callback takes three parameters: 1: Status of the operation, 2: the subject's ID, 3: the description object.
"""
self.on_properties_found: Optional[Callable[[SIStatus, str, int, List[str]], None]] = None
"""
Called when the gateway returned the list of found properties requested using the find_properties() method.
The callback takes | |
tuple:
if value in range(i[0], i[1]):
return True
else:
if value == i:
return True
return False
class BooleanLeaf(Leaf):
"""
Class defining Leaf with boolean extensions (e.g., True or False)
"""
def __init__(self, tag, parent=None, value=None, units="", mandatory=False):
super(BooleanLeaf, self).__init__(tag, parent=parent)
self.data = None
""":type: boolean"""
if value is not None:
self.set_value(value)
self.set_units(units)
""":type: string"""
self.set_mandatory(mandatory)
""":type: boolean"""
def parse(self, root):
"""
Abstract method to create instance class BooleanLeaf from XML string
:param root: ElementTree
:return: -
"""
e_data = root.find(self.get_tag())
if e_data is not None:
if len(e_data._children) > 0:
for i in e_data.iter():
i.tail = None
e_data.text = None
self.data = e_data # ?? don't know if need to replace as others
else:
self.set_value(e_data.text)
root.remove(e_data)
self.initialized = True
def get_as_text(self):
"""
Returns data value as text
:param: -
:return: string
"""
if type(self.data) == ET:
return ET.tostring(self.data, encoding="us-ascii", method="text")
return str(self.data).lower()
def get_value(self):
"""
Returns data value
:param: -
:return: int
"""
return self.data
def set_value(self, value):
"""
Sets data value as decimal
:param value: int
:return: -
"""
if value == "true":
self.data = True
elif value == "false":
self.data = False
else:
raise TypeError("Not a boolean!")
class Leafref(StringLeaf):
"""
Class defining Leaf extensions for stringleaf when its data references other instances
"""
def __init__(self, tag, parent=None, value=None, units="", mandatory=False):
self.target = None # must be before the super call as set_value() is overidden
""":type: Yang"""
# super call calls set_value()
super(Leafref, self).__init__(tag, parent=parent, value=value, mandatory=mandatory)
def set_value(self, value):
"""
Sets data value as either a path or a Yang object
:param value: path string or Yang object
:return: -
"""
if value is None:
self.unbind()
self.data = None
self.target = None
return
if type(value) is str:
if self.data != value:
self.unbind()
self.target = None
self.data = value
# self.bind()
elif issubclass(type(value), Yang):
if self.target != value:
self.unbind()
self.data = None
self.target = value
self.target.set_referred(self)
# self.bind()
else:
raise ValueError("Leafref value is of unknown type.")
def is_initialized(self):
"""
Overides Leaf method to check if data contains data and target is set
:param: -
:return: boolean
"""
if (self.data is not None) or (self.target is not None):
return True
else:
return False
def get_as_text(self):
"""
If data return its value as text, otherwise get relative path to target
:param: -
:return: string
"""
if self.data is not None:
return self.data
if self.target is not None:
self.bind()
return self.data
else:
raise ReferenceError("Leafref get_as_text() is called but neither data nor target exists.")
def get_target(self):
"""
Returns get path to target if data is initialized
:param: -
:return: string
"""
if self.target is None:
self.bind() # sets the target
return self.target
# if self.data is not None:
# return self.walk_path(self.data)
def bind(self, relative=False):
"""
Binds the target and add the referee to the referende list in the target. The path is updated to relative or absolut based on the parameter
:param: relative: Boolean
:return: -
"""
if self.target is not None:
if relative:
self.data = self.get_rel_path(self.target)
else:
self.data = self.target.get_path()
elif self.data is not None:
if self._parent is not None:
self.target = self.walk_path(self.data)
self.target.set_referred(self)
def unbind(self):
if self.target is not None:
self.target.unset_referred(self)
class ListedYang(Yang):
"""
Class defined for Virtualizer classes inherit when modeled as list
"""
def __init__(self, tag, keys, parent=None):
super(ListedYang, self).__init__(tag, parent)
self._key_attributes = keys
def get_parent(self):
"""
Returns parent`s parent of ListedYang
:param: -
:return: instance of Yang
"""
return self._parent.get_parent()
def keys(self):
"""
Abstract method to get identifiers of class that inherit ListedYang
"""
if len(self._key_attributes) > 1:
keys = []
for k in self._key_attributes:
keys.append(self.__dict__[k].get_value())
return tuple(keys)
return self.__dict__[self._key_attributes[0]].get_value()
def get_key_tags(self):
"""
Abstract method to get tags of class that inherit ListedYang
"""
if len(self._key_attributes) > 1:
tags = []
for k in self._key_attributes:
tags.append(self.__dict__[k].get_tag())
return tuple(tags)
return self.__dict__[self._key_attributes[0]].get_tag()
def get_path(self):
"""
Returns path of ListedYang based on tags and values of its components
:param: -
:return: string
"""
key_values = self.keys()
if key_values is None:
raise KeyError("List entry without key value: " + self.get_as_text())
key_tags = self.get_key_tags()
if type(key_tags) is tuple:
s = ', '.join('%s=%s' % t for t in zip(key_tags, key_values))
else:
s = key_tags + "=" + key_values
if self.get_parent() is not None:
return self.get_parent().get_path() + "/" + self.get_tag() + "[" + s + "]"
else:
return self.get_tag() + "[" + s + "]"
def empty_copy(self):
"""
Performs copy of instance defining its components with deep copy
:param: -
:return: instance
"""
inst = self.__class__()
for key in self._key_attributes:
setattr(inst, key, getattr(self, key))
return inst
def reduce(self, reference):
"""
Delete instances which equivalently exist in the reference tree.
The call is recursive, a node is removed if and only if all of its children are removed.
:param reference: Yang
:return:
"""
keys = self.get_key_tags()
return super(ListedYang, self).reduce(reference, keys)
# for k, v in self.__dict__.items():
# if k != "_parent" and k != "_operation" and not v in keys:
# if isinstance(v, Yang):
# if k in reference.__dict__.keys():
# if type(v) == type(reference.__dict__[k]):
# v.reduce(reference.__dict__[k])
class ListYang(Yang): # FIXME: to inherit from OrderedDict()
"""
Class to express list as dictionary
"""
def __init__(self, tag, parent=None, type=None):
super(ListYang, self).__init__(tag, parent)
self._data = OrderedDict()
self._type = type
def get_type(self):
"""
Returns class which references elements of _data OrderedDict
:param: -
:return: Yang subclass
"""
return self._type
def set_type(self, type):
"""
Sets class which references elements of _data OrderedDict
:param: Yang subclass
:return: -
"""
self._type = type
def keys(self):
"""
Returns indices of ListYang dictionary
:param: -
:return: list
"""
return self._data.keys()
def values(self):
"""
Returns values of ListYang dictionary
:param: -
:return: list
"""
return self._data.values()
def iterkeys(self):
"""
Returns iterator of keys of ListYang dictionary
:param: -
:return: iterator
"""
return self._data.iterkeys()
def itervalues(self):
"""
Returns iterator of values of ListYang dictionary
:param: -
:return: list
"""
return self._data.itervalues()
def items(self):
"""
Returns items of ListYang dictionary
:param: -
:return: list
"""
return self._data.items()
def iteritems(self):
"""
Returns iterator of items of ListYang dictionary
:param: -
:return: list
"""
return self._data.iteritems()
def has_key(self, key): # PEP8 wants it with 'in' instead of 'has_key()'
"""
Returns if key is in ListYang dictionary
:param key: string
:return: boolean
"""
return key in self._data.keys()
def has_value(self, value):
"""
Returns if value is in ListYang dictionary values
:param value: string or instance
:return: boolean
"""
return value in self._data.values()
def length(self):
"""
Returns length of ListYang dictionary
:param: -
:return: int
"""
return len(self._data)
def is_initialized(self):
"""
Returns if ListYang dictionary contains elements
:param: -
:return: boolean
"""
if len(self._data) > 0:
return True
return False
def add(self, item):
"""
add single or a list of items
:param item: a single ListedYang or a list of ListedYang derivates
:return: item
"""
if type(item) is list or type(item) is tuple:
for i in item:
if isinstance(i, ListedYang):
self.add(i)
else:
raise TypeError("Item must be ListedYang or a list of ListedYang!")
elif isinstance(item, ListedYang):
item.set_parent(self)
self[item.keys()] = item
else:
raise TypeError("Item must be ListedYang or a list of ListedYang!")
return item
def remove(self, item):
'''
remove a single element from the list based on a key or a ListedYang
:param item: key (single or composit) or a ListedYang
:return: item
'''
if isinstance(item, ListedYang):
item = item.keys()
return self._data.pop(item)
def _et(self, node, inherited=False, ordered=True):
"""
Overides Yang method to each ListYang component be defined as SubElement of ElementTree
:param node: ElementTree
:return: ElementTree
"""
if ordered:
ordered_keys = sorted(self.keys())
for k in ordered_keys:
self._data[k]._et(node, ordered)
else:
for v in self.values():
v._et(node, ordered)
return node
# for v in self.values():
# v._et(node)
# return node
def __iter__(self): # ???
"""
Returns iterator of ListYang dict
:param: -
:return: iterator
"""
return self._data.__iter__()
def next(self):
"""
Go to next element of ListYang dictionary
:param: -
:return: -
"""
self._data.next()
def __getitem__(self, key):
"""
Returns ListYang value if key in | |
import numpy as np
import matplotlib.pyplot as plt
import warnings
class DistributionSampler:
def __init__(
self, size=None, dist=None, mean=None, sd=None, lam=None, trials=None,
prob=None
):
'''
Overview
--------
Selects a random sample from a given distribution, based upon the input
parameters, and returns a numpy array object.
You can set the parameters either upon creation of the instance or by
using the set_parameters() method.
Note that if you set the parameters upon creation of the instance, the
class will attempt to create ths distribution immediately.
If you change or create the parameters at a later time, you can create
the sample using the draw() method.
Attributes
----------
size : integer
The number of samples to be selected from the distribution.
distribution : string
The type of distribution to be created. Applicable values are 'Normal',
'Poisson' or 'Binomial'.
mean : float / int , optional
Applicable to Normal distributions only. The mean value will dictate
the centre of the distribution.
sd: float / int , optional
Applicable to Normal distributions only. The sd (Standard Deviation)
will dictate the spread or width of the distribution.
lam : float / int , optional
Applicable to Poisson distributions only. The lam (lambda) controls the
mean and variance of the sample.
trials: float / int , optional
Applicable to Binomial distributions only. The trials parameter is used
to dictate the number of trials to run in generating the sample
prob: float / int , optional
Applicable to Binomial distributions only. The prob parameter is used
to dicitate the probability of a trial being successful.
Notes
-----
The samples are generated using the numpy library, v1.15. For more
details, check the API Reference material here:
https://docs.scipy.org/doc/numpy-1.15.1/reference/
Examples
--------
Instance = DistributionSampler(1000, 'Normal', mean=0, sd = 5)
s = Instance.draw()
Instance = DistributionSampler()
Instance.set_parameters(size=1000, dist='Poisson', lam=5)
s = Instance.draw()
Instance = DistributionSampler()
Instance.size = 1000
Instance.dist = 'Binomial'
Instance.trials = 5
Instance.prob = 0.5
s = Instance.draw()
'''
self.size = size
self.dist = dist
self.mean = mean
self.sd = sd
self.lam = lam
self.trials = trials
self.prob = prob
self.sample = None
self.sample_parameters = {}
# If the parameters are filled upon creation of the instance, run the
# draw method.
if (size is not None) and (dist is not None):
if (mean is not None) and (sd is not None):
self.draw()
elif (lam is not None):
self.draw()
elif (trials is not None) and (prob is not None):
self.draw()
def print_parameters(self):
'''
Overview
--------
Prints the current parameter values to the console. You can update the
parameters using the set_parameters() method.
Parameters
----------
None
Returns
-------
No values are returned, instead the parameters are printed to the
console
Notes
-----
The samples are generated using the numpy library, v1.15. For more
details, check the API Reference material here:
https://docs.scipy.org/doc/numpy-1.15.1/reference/
Examples
--------
Instance.print_parameters()
'''
print('Parameters')
print('-----------')
print('size: {}'.format(self.size))
print('dist: {}'.format(self.dist))
print('mean: {}'.format(self.mean))
print('sd: {}'.format(self.sd))
print('lam: {}'.format(self.lam))
print('trials: {}'.format(self.trials))
print('prob: {}'.format(self.prob))
def print_sample(self):
'''
Overview
--------
Prints the current sample to the console. In the event that there is no
sample to print, instructions on how to set the parameters and create
the sample are given.
Parameters
----------
None
Returns
-------
No values are returned, instead the parameters are printed to the
console
Notes
-----
The samples are generated using the numpy library, v1.15. For more
details, check the API Reference material here:
https://docs.scipy.org/doc/numpy-1.15.1/reference/
Examples
--------
Instance.print_sample()
'''
if self.sample is None:
print(
'No Sample to print. Use the .set_parameters() method to set '
'appropriate parameters and then run the .draw() method to '
'create a sample.'
)
else:
print(self.sample)
def set_parameters(
self, size='', dist='', mean='', sd='', lam='', trials='', prob=''
):
'''
Overview
--------
Sets the parameters for the selection of the distribution. Multiple
parameters can be passed in a single call to the function.
Parameters
----------
size : integer
The number of samples to be selected from the distribution.
distribution : string
The type of distribution to be created. Applicable values are 'Normal',
'Poisson' or 'Binomial'.
mean : float / int , optional
Applicable to Normal distributions only. The mean value will dictate t
he centre of the distribution.
sd: float / int , optional
Applicable to Normal distributions only. The sd (Standard Deviation)
will dictate the spread or width of the distribution.
lam : float / int , optional
Applicable to Poisson distributions only. The lam (lambda) controls
the mean and variance of the sample.
trials: float / int , optional
Applicable to Binomial distributions only. The trials parameter is
used to dictate the number of trials to run in generating the sample
prob: float / int , optional
Applicable to Binomial distributions only. The prob parameter is used
to dicitate the probability of a trial being successful.
Returns
-------
None
Notes
-----
The samples are generated using the numpy library, v1.15. For more
details,check the API Reference material here:
https://docs.scipy.org/doc/numpy-1.15.1/reference/
Examples
--------
s = distribution_sampler(1000, 'Normal', mean=0, sd = 5)
s = distribution_sampler(1000, 'Poisson', lam=5)
s = distribution_sampler(1000, 'Binomial', trials=5, prob=0.5)
'''
local_data = locals()
params = dict(local_data)
del params['self']
for key, value in params.items():
if value == '':
pass
else:
# This is a hack to get around exec not resolving quotes.
if key == 'dist':
self.dist = value
else:
exec('self.{} = {}'.format(key, value))
def _validate_parameters(self):
'''
Private function to validate the input parameters, called during the
draw() method. If the parameters are not valid, an error message will
display with instructions on how to input valid parameters
'''
# Mandatory parameter error handling
if self.dist not in ['Normal', 'Poisson', 'Binomial']:
raise ValueError(
"The dist parameter is mandatory and must equal 'Normal', "
"'Poisson', or 'Binomial'"
)
if not isinstance(self.size, int):
raise ValueError(
'The size parameter is mandatory and must be an integer.'
)
# Distribution Specific Error Handling
if self.size is None:
raise ValueError(
'You need to set a sample size prior to '
'. calling the draw method. This must be an integer. E.g. '
'10000. Parameters can be input either using the '
'set_parameters() method, by manually updating the instance.'
' E.g. Instance.size = 10000 or by passing parameters to the '
'draw() method. E.g. Instance.draw(size=10000)'
)
if self.dist is None:
raise ValueError(
'You need to set a dist parameter to specify the type of '
'distribution prior to calling the draw method. Available '
'parameters are as follows:\n\n'
"'Normal'\n'Poisson'\n'Binomial'"
'Parameters can be input either using the set_parameters()'
' method, by manually updating the instance. E.g. '
'Instance.dist = "Normal" or by passing parameters to the '
'draw() method. E.g. Instance.draw(dist="Normal")'
)
if self.dist == 'Normal':
# Raise an error if the mean or sd parameters aren't set
if (self.mean is None) or (self.sd is None):
raise NameError(
"The mean and sd parameters must be set where the dist is"
"set to 'Normal'. Parameters can be input either using "
"the set_parameters() method, by manually updating the "
"instance (e.g. Instance.mean=1) or by passing parameters "
"to the draw() method. E.g. Instance.draw(mean=q)"
)
# Raise a warning if irrelevent parameters are provided
elif (
(self.lam is not None) or (self.trials is not None) or
(self.prob is not None)
):
warnings.warn(
'The lam, trials and prob parameters are not used in the'
' selection of a normal distribution. These parameters '
'will be ignored.\n'
)
if self.dist == 'Poisson':
# Raise an error if the lam parameter isn't set
if self.lam is None:
raise NameError(
"The lam parameter must be set where dist is set to "
"'Poisson'. 'Parameters can be input either using the "
"set_parameters() method, by manually updating the "
"instance. (e.g. Instance.lam=5), or by passing parameters"
" to the draw() method. E.g. Instance.draw(lam=5)"
)
# Raise a warning if irrelevent parameters are provided
elif (
(self.mean is not None) or (self.sd is not None) | |
for case 2
name1: str, Default 'N1'
Phrase to append to keys of the resulting dataframe for case 1
name2: str, Default 'N2'
Phrase to append to keys of the resulting dataframe for case 2
saveName: str, Default 'saveName'
Name of file to save result dataframe to
Returns:
None
Usage:
an = Analysis()
an.compareTwoCases(saveDir1, saveDir2, name1, name2, saveName)
'''
majorMetric = self.majorMetric
try:
df1 = pd.read_hdf(os.path.join(saveDir1, 'per-gene-measures-%s.h5' % majorMetric), key='df')
df2 = pd.read_hdf(os.path.join(saveDir2, 'per-gene-measures-%s.h5' % majorMetric), key='df')
except Exception as exception:
print('ERROR reading h5 in compareTwoCases:', exception, '\nTrying reading XLSX format')
try:
df1 = pd.read_excel(os.path.join(saveDir1, 'per-gene-measures-%s.xlsx' % majorMetric), header=0, index_col=[0,1]).fillna('')
df2 = pd.read_excel(os.path.join(saveDir2, 'per-gene-measures-%s.xlsx' % majorMetric), header=0, index_col=[0,1]).fillna('')
except Exception as exception:
print('ERROR reading XLSX:', exception)
return
n23_1 = len(np.intersect1d(np.unique(df1.index.get_level_values('gene').values), gEC22))
n23_2 = len(np.intersect1d(np.unique(df2.index.get_level_values('gene').values), gEC22))
commonIndex = df1.index.intersection(df2.index)
df1 = df1.loc[commonIndex]
df2 = df2.loc[commonIndex]
df_T50 = pd.concat([df1['T50'].str.replace(' ','').str.split(','), df2['T50'].str.replace(' ','').str.split(',')], keys=[name1, name2], axis=1, sort=False)
df_EC23T50 = pd.concat([df1['EC23T50'].str.replace(' ','').str.split(','), df2['EC23T50'].str.replace(' ','').str.split(',')], keys=[name1, name2], axis=1, sort=False)
df_AUC = pd.concat([df1['AUC'].astype(float), df2['AUC'].astype(float)], keys=[name1, name2], axis=1, sort=False)
df_EC23T50_count = df_EC23T50.applymap(len)
df_EC23T50_common = df_EC23T50.apply(lambda s: np.intersect1d(s[0], s[1]), axis=1)
df_EC23T50_common_count = df_EC23T50.apply(lambda s: len(np.intersect1d(s[0], s[1])), axis=1)
df_T50_common = df_T50.apply(lambda s: np.intersect1d(s[0], s[1]), axis=1)
df_T50_common_count = df_T50.apply(lambda s: len(np.intersect1d(s[0], s[1])), axis=1)
df_AUC_avg = df_AUC.apply(np.mean, axis=1)
df_res = pd.concat([df_EC23T50_common.apply(cleanListString),
df_EC23T50_common_count,
df_T50_common.apply(cleanListString),
df_T50_common_count,
df1['AUC'].astype(float),
df2['AUC'].astype(float),
df1['EC23T50'].str.split(',').apply(len),
df2['EC23T50'].str.split(',').apply(len),
df1['EC23T50'],
df2['EC23T50']],
keys=[('Inter-measures', 'EC23T50_common'),
('Inter-measures', 'EC23T50_common_count'),
('Inter-measures', 'T50_common'),
('Inter-measures', 'T50_common_count'),
('Intra-measures', 'AUC ' + name1),
('Intra-measures', 'AUC ' + name2),
('Intra-measures', 'EC23 count ' + name1 + ' %s' % n23_1),
('Intra-measures', 'EC23 count ' + name2 + ' %s' % n23_2),
('Intra-measures', 'EC23 ' + name1 + ' %s' % n23_1),
('Intra-measures', 'EC23 ' + name2 + ' %s' % n23_2)],
axis=1, sort=False)
df_res.columns = pd.MultiIndex.from_tuples(df_res.columns)
df_res = df_res.sort_index()
df_res.to_excel('%s.xlsx' % saveName, merge_cells=False)
if False:
df_res_f = df_res.copy()
df_res_f = df_res_f.loc[(df_res_f[('Intra-measures', 'EC23 count ' + name1 + ' %s' % n23_1)] >= 5) &
(df_res_f[('Intra-measures', 'EC23 count ' + name2 + ' %s' % n23_2)] >= 5) &
(df_res_f[('Intra-measures', 'AUC ' + name1)] >= 0.5) &
(df_res_f[('Intra-measures', 'AUC ' + name2)] >= 0.5)]
df_res_f.to_excel('%s_filtered.xlsx' % saveName, merge_cells=False)
return
def runPairOfExperiments(self, args):
'''Analyze the case, compare it with comparison case, find the conserved genes between the cases, analyze case again
Parameters:
saveDir: str
Directory with all bootstrap experiments
saveSubDir: str
Subdirectory for a bootstrap experiment
otherCaseDir: str
Directory holding comparison data
Returns:
None
Usage:
For internal use only
'''
saveDir, saveSubDir, otherCaseDir = args
print(saveDir, saveSubDir, flush=True)
try:
comparisonName = os.path.join(saveDir, saveSubDir, 'comparison')
self.analyzeCase(None, toggleAdjustText=False, noPlot=True,
suffix=saveSubDir, saveDir=os.path.join(saveDir, saveSubDir), printStages=False,
toggleCalculateMajorMetric=False, toggleExportFigureData=True, toggleCalculateMeasures=True)
self.compareTwoCases(os.path.join(saveDir, saveSubDir, ''), otherCaseDir, name1='name1', name2='name2', saveName=comparisonName)
additionalData = externalPanelsData.copy()
additionalData.update({'conservedGenes': pd.read_excel(comparisonName + '.xlsx', index_col=1, header=0)['Inter-measures.T50_common_count']})
self.analyzeCase(None, toggleAdjustText=False, dpi=300,
suffix=saveSubDir, saveDir=os.path.join(saveDir, saveSubDir), printStages=False,
toggleCalculateMajorMetric=False, toggleExportFigureData=True, toggleCalculateMeasures=True, externalPanelsData=additionalData)
except Exception as exception:
print(exception)
return
def analyzeBootstrapExperiments(self):
'''Analyze all bootstrap experiments
Parameters:
None
Returns:
None
Usage:
an = Analysis()
an.analyzeBootstrapExperiments()
'''
saveSubDirs = ['All'] + ['Experiment %s' % (id + 1) for id in self.bootstrapExperiments]
pool = multiprocessing.Pool(processes=self.nCPUs)
pool.map(self.runPairOfExperiments, [(self.bootstrapDir, saveSubDir, os.path.join(self.otherCaseDir, 'bootstrap/', saveSubDir, '') if self.perEachOtherCase else self.otherCaseDir) for saveSubDir in saveSubDirs])
pool.close()
pool.join()
dfs = []
for id in self.bootstrapExperiments:
saveSubDir = 'Experiment %s' % (id + 1)
filePath = os.path.join(self.bootstrapDir, saveSubDir, 'dendrogram-heatmap-%s-data.h5' % self.majorMetric)
try:
df_temp = pd.read_hdf(filePath, key='df_C')
df_temp.index.name = 'gene'
df_temp = df_temp.reset_index()
df_temp = pd.concat([df_temp], keys=[saveSubDir], axis=0, sort=False)
df_temp = pd.concat([df_temp], keys=['species'], axis=0, sort=False)
df_temp.index.names = ['species', 'experiment', 'order']
dfs.append(df_temp)
except Exception as exception:
print(exception)
pass
try:
dfs = pd.concat(dfs, axis=0, sort=False)
print(dfs)
dfs.to_hdf(self.dendroDataName, key='df', mode='a', complevel=4, complib='zlib')
except Exception as exception:
print(exception)
pass
return
def analyzeCombinationVariant(self, variant):
''' Analyze a combination of measures (same as in panels)
Parameters:
variant: str
Name of combination variant (e.g. 'Avg combo4avgs', 'Avg combo3avgs')
Returns:
pandas.DataFrame
Analysis result
Usage:
an = Analysis()
an.analyzeCombinationVariant(variant)
'''
def getPeaksLists(df_temp):
'''Get list of peaks and experiments
Parameters:
df_temp: pandas.DataFrame
Dataframe containing dendrogram data for specific variant
Returns:
list:
All genes in peak for all experiments
list:
Lists of genes in peak for each experiment
list
Experiments
Usage:
getPeaksList(df_temp)
'''
peaksListsMerged = []
peaksLists = []
experiments = []
for experiment in np.unique(df_temp.index.get_level_values('experiment')):
se = df_temp.xs(experiment, level='experiment', axis=0).xs('species', level='species', axis=0)
genesInHalfPeak = getGenesOfPeak(se)
peaksListsMerged.extend(genesInHalfPeak)
peaksLists.append(genesInHalfPeak)
experiments.append(experiment)
return peaksListsMerged, peaksLists, experiments
print('Variant:', variant)
df = pd.read_hdf(self.dendroDataName, key='df').fillna(0).set_index('gene', append=True).droplevel('order')[variant]
variabilitySavePath = self.workingDir + '/variability_' + variant.replace(' ', '-') + '.xlsx'
get_mean_std_cov_ofDataframe(df.unstack('gene').fillna(0.).T).to_excel(variabilitySavePath)
writer = pd.ExcelWriter(self.workingDir + '%s bootstrap_in-peak_genes_SD.xlsx' % variant)
try:
listsMerged, lists, experiments = getPeaksLists(df.copy())
listsSizes = [len(item) for item in lists]
umL = np.unique(listsMerged, return_counts=True)
se = pd.Series(index=umL[0], data=umL[1]/len(experiments)).sort_values(ascending=False)
filePath = os.path.join(self.bootstrapDir + '/All/', 'dendrogram-heatmap-%s-data.xlsx' % self.majorMetric)
peakGenesAll = getGenesOfPeak(pd.read_excel(filePath, index_col=0, header=0, sheet_name='Cluster index')[variant])
df_res = pd.concat([se, se], axis=1, sort=False)
df_res.iloc[:, 1] = np.where(np.isin(df_res.index.values, peakGenesAll), 1, np.nan)
df_res.columns = ['Bootstrap', 'In all']
try:
dfv = pd.read_excel(variabilitySavePath, header=0, index_col=0).reindex(df_res.index)
df_res = pd.concat([df_res, dfv], axis=1, sort=False)
except Exception as exception:
print('ERROR reading variability file:', exception)
df_res.to_excel(writer, 'Sheet1')
df_exp = pd.DataFrame(index=range(1000), columns=experiments)
for i, col in enumerate(df_exp.columns):
df_exp.iloc[:len(lists[i]), i] = lists[i]
df_exp.to_excel(writer, 'Bootstrap lists', index=False)
except Exception as exception:
print('ERROR:', exception)
writer.save()
df_res['Bootstrap'].to_excel(self.workingDir + variant + '_variant.xlsx')
return df_res
def _forScramble(self, args):
'''Function used internally in multiprocessing
Parameters:
workingDir: str
Working directory to retrieve and save file and results to
measures: list
Measures (e.g: [Markers', 'Binomial -log(pvalue)', 'Top50 overlap'])
df: pandas.DataFrame
Ordered genes data
N: int
Size of a chunk
maxDistance: int
Maximum distance away considered to be in peak
halfWindowSize: int
Moving average half-window size
j: int
Identifier of a chunk
Returns:
None
Usage:
For internal use only
'''
workingDir, measures, df, N, maxDistance, halfWindowSize, j, getMax = args
np.random.seed()
allGenes = df.index.values.copy()
listsNonmerged = []
for i in range(N):
np.random.shuffle(allGenes)
data = np.zeros(len(allGenes))
for measure in measures:
data += movingAverageCentered(normSum1(df[measure].loc[allGenes]), halfWindowSize, looped=False)
data = movingAverageCentered(data, halfWindowSize, looped=False)
listsNonmerged.append(getGenesOfPeak(pd.Series(index=allGenes, data=data), maxDistance=maxDistance))
se = pd.Series(listsNonmerged)
if getMax:
dfs = se.apply(pd.Series).reset_index(drop=True).replace(np.nan, 'RemoveNaN')
df = pd.DataFrame(index=range(len(dfs)), columns=np.unique(dfs.values.flatten()), data=False, dtype=np.bool_)
try:
df = df.drop('RemoveNaN', axis=1)
except:
pass
df[:] = (df.columns.values==dfs.values[..., None]).any(axis=1)
return df.mean(axis=0).max()
else:
se.to_pickle(workingDir + '%s' % j)
return
def scramble(self, measures, subDir = '', case='All', N = 10**4, M = 20, getMax = False, maxSuff = ''):
'''Run control analysis for the dendrogram order
Parameters:
measures: list
Measures (e.g: [Markers', 'Binomial -log(pvalue)', 'Top50 overlap'])
subDir: str, Default ''
Subdirectory to save dataframe to
N: int
Chunk size
M: int
Number of chunks
Returns:
None
Usage:
an = Analysis()
an.scramble (measures)
'''
df = pd.read_excel(os.path.join(self.bootstrapDir + '%s/dendrogram-heatmap-%s-data.xlsx' % (case, self.majorMetric)), index_col=0, header=0, sheet_name='Cluster index')
workingDir = self.workingDir + 'random/'
if subDir != '':
workingDir = os.path.join(workingDir, subDir)
if not os.path.exists(workingDir):
os.makedirs(workingDir)
# Run the randomization and prepare results dataframe
if True:
print('\nCalculating chunks', flush=True)
pool = multiprocessing.Pool(processes=self.nCPUs)
result = pool.map(self._forScramble, [(workingDir, measures, df.copy(), N, 25, 10, j, getMax) for j in range(M)])
pool.close()
pool.join()
if getMax:
pd.Series(result).to_hdf(workingDir + 'max%s%s.h5' % (N, maxSuff), key='df', mode='a', complevel=4, complib='zlib')
return
print('\nCombining chunks', flush=True)
dfs = []
for j in range(M):
dfs.append(pd.read_pickle(workingDir + '%s' % j).apply(pd.Series))
print(j, end=' ', flush=True)
dfs = pd.concat(dfs, axis=0, sort=False).reset_index(drop=True).replace(np.nan, 'RemoveNaN')
print('\nAligning genes', flush=True)
df = pd.DataFrame(index=range(len(dfs)), columns=np.unique(dfs.values.flatten()), data=False, dtype=np.bool_)
try:
df = df.drop('RemoveNaN', axis=1)
except:
pass
df[:] = (df.columns.values==dfs.values[..., None]).any(axis=1)
print(df)
print('\nRecording', flush=True)
df.to_hdf(workingDir + 'combined_%s_aligned.h5' % M, key='df', mode='a', complevel=4, complib='zlib')
for j in range(M):
os.remove(workingDir + '%s' % j)
# Save and plot counts distribution
if True:
df = pd.read_hdf(workingDir + 'combined_%s_aligned.h5' % M, key='df')
se = df.sum(axis=0).sort_values(ascending=False)/df.shape[0]
se.to_excel(workingDir + 'se_distribution.xlsx')
se.hist(bins=250)
plt.savefig(workingDir + 'se_distribution.png', dpi=300)
plt.clf()
# Check for variation of i-th quantile
if False:
df = pd.read_hdf(workingDir + 'combined_%s_aligned.h5' % M, key='df')
q = 99.999
res = dict()
for i in range(1, 100):
print(i, end=' ', flush=True)
size = i*2*10**3
res.update({size: np.percentile((df.sample(n=size, axis=0, replace=True).sum(axis=0).sort_values(ascending=False)/size).values, q)})
| |
is_null ( ) ) : return ( lisp_ipc_map_cache_entry ( mc , jdata ) )
if 95 - 95: I1Ii111 * o0oOOo0O0Ooo + OoO0O00 % OoOoOO00 - ooOoO0o / OoOoOO00
if ( mc . source_cache == None ) : return ( [ True , jdata ] )
if 45 - 45: OoooooooOO / oO0o / o0oOOo0O0Ooo + Ii1I + O0 . iII111i
if 34 - 34: iIii1I11I1II1 . o0oOOo0O0Ooo + ooOoO0o
if 96 - 96: O0 / ooOoO0o
if 82 - 82: OoO0O00 * OOooOOo * I11i * I1Ii111 % iIii1I11I1II1
if 50 - 50: Ii1I * Ii1I % I11i / iIii1I11I1II1 / ooOoO0o / iII111i
jdata = mc . source_cache . walk_cache ( lisp_ipc_map_cache_entry , jdata )
return ( [ True , jdata ] )
if 91 - 91: Ii1I - O0 . I11i - OoooooooOO * IiII . II111iiii
if 38 - 38: I1IiiI + OoO0O00
if 11 - 11: iIii1I11I1II1 + i1IIi * IiII - Oo0Ooo
if 66 - 66: I1Ii111 . Ii1I / I1ii11iIi11i / iIii1I11I1II1 + O0 / i1IIi
if 72 - 72: ooOoO0o . II111iiii
if 32 - 32: I1Ii111 - oO0o + OoooooooOO . OoOoOO00 + i11iIiiIii / i1IIi
if 26 - 26: I1IiiI + OoooooooOO % OoOoOO00 . IiII - II111iiii . OoOoOO00
def lisp_itr_discover_eid ( db , eid , input_interface , routed_interface ,
lisp_ipc_listen_socket ) :
oo0ooooO = eid . print_address ( )
if ( db . dynamic_eids . has_key ( oo0ooooO ) ) :
db . dynamic_eids [ oo0ooooO ] . last_packet = lisp_get_timestamp ( )
return
if 37 - 37: OoO0O00 % O0 + OoOoOO00 * I11i . Ii1I * OoO0O00
if 18 - 18: o0oOOo0O0Ooo / OOooOOo
if 28 - 28: O0 / Ii1I - oO0o % I1ii11iIi11i % O0 . OoO0O00
if 100 - 100: O0
if 19 - 19: Ii1I * iIii1I11I1II1 * Oo0Ooo - i11iIiiIii * i11iIiiIii - OOooOOo
oOiiI1i11I = lisp_dynamic_eid ( )
oOiiI1i11I . dynamic_eid . copy_address ( eid )
oOiiI1i11I . interface = routed_interface
oOiiI1i11I . last_packet = lisp_get_timestamp ( )
oOiiI1i11I . get_timeout ( routed_interface )
db . dynamic_eids [ oo0ooooO ] = oOiiI1i11I
if 88 - 88: O0 . iIii1I11I1II1 . I1ii11iIi11i
oO0oo = ""
if ( input_interface != routed_interface ) :
oO0oo = ", routed-interface " + routed_interface
if 38 - 38: Ii1I
if 20 - 20: iIii1I11I1II1 + Oo0Ooo - Ii1I / i11iIiiIii . OoO0O00
Ooo0o0OoO = green ( oo0ooooO , False ) + bold ( " discovered" , False )
lprint ( "Dynamic-EID {} on interface {}{}, timeout {}" . format ( Ooo0o0OoO , input_interface , oO0oo , oOiiI1i11I . timeout ) )
if 56 - 56: I1ii11iIi11i + I1Ii111 - OoO0O00 . I1ii11iIi11i * O0 - I11i
if 58 - 58: oO0o - iIii1I11I1II1 * i11iIiiIii / i11iIiiIii % I11i
if 69 - 69: iII111i * i1IIi
if 100 - 100: Oo0Ooo + Oo0Ooo - II111iiii
if 4 - 4: iII111i / OoO0O00 . i11iIiiIii * II111iiii - Ii1I * IiII
Oooo000 = "learn%{}%{}" . format ( oo0ooooO , routed_interface )
Oooo000 = lisp_command_ipc ( Oooo000 , "lisp-itr" )
lisp_ipc ( Oooo000 , lisp_ipc_listen_socket , "lisp-etr" )
return
if 45 - 45: OoO0O00
if 15 - 15: iII111i * o0oOOo0O0Ooo * Ii1I % IiII
if 31 - 31: ooOoO0o . IiII + I1ii11iIi11i * II111iiii * iII111i + Oo0Ooo
if 35 - 35: oO0o + I1ii11iIi11i / o0oOOo0O0Ooo
if 78 - 78: i11iIiiIii
if 21 - 21: iII111i / ooOoO0o - i11iIiiIii % iII111i
if 94 - 94: OoooooooOO / iII111i * ooOoO0o / i1IIi * i11iIiiIii * II111iiii
if 98 - 98: Ii1I * Ii1I / IiII
if 1 - 1: OOooOOo
if 47 - 47: i11iIiiIii - I11i
if 38 - 38: Oo0Ooo % OoooooooOO + iII111i
if 31 - 31: OoO0O00 + I1Ii111 / iIii1I11I1II1
if 11 - 11: ooOoO0o - OoOoOO00
def lisp_retry_decap_keys ( addr_str , packet , iv , packet_icv ) :
if ( lisp_search_decap_keys == False ) : return
if 19 - 19: O0 . OoOoOO00 - i1IIi . oO0o
if 96 - 96: o0oOOo0O0Ooo % o0oOOo0O0Ooo - OoO0O00 * iIii1I11I1II1 + ooOoO0o - ooOoO0o
if 4 - 4: OoO0O00 - OOooOOo
if 21 - 21: I1Ii111 * i11iIiiIii
if ( addr_str . find ( ":" ) != - 1 ) : return
if 63 - 63: oO0o + OoOoOO00
iiiIIIII1iIi = lisp_crypto_keys_by_rloc_decap [ addr_str ]
if 50 - 50: o0oOOo0O0Ooo / Oo0Ooo * ooOoO0o * Ii1I
for i1IIiI1iII in lisp_crypto_keys_by_rloc_decap :
if 97 - 97: I1IiiI / oO0o + I1Ii111 + I1Ii111
if 86 - 86: o0oOOo0O0Ooo % ooOoO0o + OoOoOO00 * ooOoO0o
if 20 - 20: Ii1I * iII111i / ooOoO0o
if 18 - 18: Oo0Ooo * Ii1I / i11iIiiIii . OoO0O00 + OoooooooOO
if ( i1IIiI1iII . find ( addr_str ) == - 1 ) : continue
if 23 - 23: I1IiiI - I1ii11iIi11i . O0 . OoOoOO00 . OoO0O00
if 81 - 81: IiII * I11i - iIii1I11I1II1
if 41 - 41: oO0o * I11i + I1IiiI - OoO0O00
if 63 - 63: Oo0Ooo * Ii1I - Ii1I
if ( i1IIiI1iII == addr_str ) : continue
if 76 - 76: OoO0O00 . IiII % iIii1I11I1II1 / I1IiiI + iIii1I11I1II1 . I1IiiI
if 57 - 57: IiII - i1IIi * ooOoO0o
if 5 - 5: oO0o . O0 * IiII / Ii1I + OoO0O00
if 75 - 75: OOooOOo * OoOoOO00
o0Iiii = lisp_crypto_keys_by_rloc_decap [ i1IIiI1iII ]
if ( o0Iiii == iiiIIIII1iIi ) : continue
if 82 - 82: Ii1I
if 83 - 83: I1IiiI
if 22 - 22: IiII / Ii1I + I1Ii111 % iIii1I11I1II1
if 75 - 75: OoOoOO00 % OoOoOO00 % o0oOOo0O0Ooo % I1ii11iIi11i + IiII
i1iiiI1i = o0Iiii [ 1 ]
if ( packet_icv != i1iiiI1i . do_icv ( packet , iv ) ) :
lprint ( "Test ICV with key {} failed" . format ( red ( i1IIiI1iII , False ) ) )
continue
if 48 - 48: OoooooooOO + OoO0O00 % i11iIiiIii * OoooooooOO
if 64 - 64: I1ii11iIi11i . I1Ii111
lprint ( "Changing decap crypto key to {}" . format ( red ( i1IIiI1iII , False ) ) )
lisp_crypto_keys_by_rloc_decap [ addr_str ] = o0Iiii
if 81 - 81: IiII . ooOoO0o + O0 . ooOoO0o + iIii1I11I1II1
return
if 68 - 68: i11iIiiIii . iII111i + OoooooooOO + II111iiii + iIii1I11I1II1 % I11i
if 7 - 7: i1IIi - o0oOOo0O0Ooo - I1IiiI
if 62 - 62: OoOoOO00 * oO0o - I1IiiI / Ii1I
if 48 - 48: o0oOOo0O0Ooo % o0oOOo0O0Ooo - OoOoOO00
if 13 - 13: OoO0O00 - Ii1I . ooOoO0o / O0 * OoOoOO00
if 57 - 57: O0 + OoooooooOO % o0oOOo0O0Ooo / I1Ii111 / OOooOOo - OoOoOO00
if 48 - 48: o0oOOo0O0Ooo - II111iiii + OoOoOO00
if 54 - 54: II111iiii - OoO0O00 - o0oOOo0O0Ooo - O0 % I1Ii111
def lisp_decent_pull_xtr_configured ( ) :
return ( lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None )
if 9 - 9: i1IIi % iII111i / Ii1I
if 83 - 83: oO0o
if 1 - 1: oO0o * iIii1I11I1II1 % iIii1I11I1II1 % iIii1I11I1II1 / oO0o + IiII
if 29 - 29: OoooooooOO
if 55 - 55: O0 - o0oOOo0O0Ooo % I1ii11iIi11i * I11i * oO0o
if 83 - 83: iIii1I11I1II1
if 92 - 92: OoO0O00 - iII111i
if 97 - 97: ooOoO0o / I11i . IiII + I1Ii111 . iIii1I11I1II1
def lisp_is_decent_dns_suffix ( dns_name ) :
if ( lisp_decent_dns_suffix == None ) : return ( False )
iI11i1Ii = dns_name . split ( "." )
iI11i1Ii = "." . join ( iI11i1Ii [ 1 : : ] )
return ( iI11i1Ii == lisp_decent_dns_suffix )
if 24 - 24: ooOoO0o - oO0o % OoOoOO00 * Oo0Ooo
if 54 - 54: Ii1I - OoooooooOO % I1IiiI + | |
<filename>xen/xen-4.2.2/tools/ocaml/libs/xl/genwrap.py
#!/usr/bin/python
import sys,os
import idl
# typename -> ( ocaml_type, c_from_ocaml, ocaml_from_c )
builtins = {
"bool": ("bool", "%(c)s = Bool_val(%(o)s)", "Val_bool(%(c)s)" ),
"int": ("int", "%(c)s = Int_val(%(o)s)", "Val_int(%(c)s)" ),
"char *": ("string", "%(c)s = dup_String_val(gc, %(o)s)", "caml_copy_string(%(c)s)"),
"libxl_domid": ("domid", "%(c)s = Int_val(%(o)s)", "Val_int(%(c)s)" ),
"libxl_devid": ("devid", "%(c)s = Int_val(%(o)s)", "Val_int(%(c)s)" ),
"libxl_defbool": ("bool option", "%(c)s = Defbool_val(%(o)s)", "Val_defbool(%(c)s)" ),
"libxl_uuid": ("int array", "Uuid_val(gc, lg, &%(c)s, %(o)s)", "Val_uuid(&%(c)s)"),
"libxl_key_value_list": ("(string * string) list", None, None),
"libxl_mac": ("int array", "Mac_val(gc, lg, &%(c)s, %(o)s)", "Val_mac(&%(c)s)"),
"libxl_hwcap": ("int32 array", None, "Val_hwcap(&%(c)s)"),
}
DEVICE_FUNCTIONS = [ ("add", ["t", "domid", "unit"]),
("remove", ["t", "domid", "unit"]),
("destroy", ["t", "domid", "unit"]),
]
functions = { # ( name , [type1,type2,....] )
"device_vfb": DEVICE_FUNCTIONS,
"device_vkb": DEVICE_FUNCTIONS,
"device_disk": DEVICE_FUNCTIONS,
"device_nic": DEVICE_FUNCTIONS,
"device_pci": DEVICE_FUNCTIONS,
"physinfo": [ ("get", ["unit", "t"]),
],
"cputopology": [ ("get", ["unit", "t array"]),
],
"domain_sched_params":
[ ("get", ["domid", "t"]),
("set", ["domid", "t", "unit"]),
],
}
def stub_fn_name(ty, name):
return "stub_xl_%s_%s" % (ty.rawname,name)
def ocaml_type_of(ty):
if ty.rawname in ["domid","devid"]:
return ty.rawname
elif isinstance(ty,idl.UInt):
if ty.width in [8, 16]:
# handle as ints
width = None
elif ty.width in [32, 64]:
width = ty.width
else:
raise NotImplementedError("Cannot handle %d-bit int" % ty.width)
if width:
return "int%d" % ty.width
else:
return "int"
elif isinstance(ty,idl.Array):
return "%s array" % ocaml_type_of(ty.elem_type)
elif isinstance(ty,idl.Builtin):
if not builtins.has_key(ty.typename):
raise NotImplementedError("Unknown Builtin %s (%s)" % (ty.typename, type(ty)))
typename,_,_ = builtins[ty.typename]
if not typename:
raise NotImplementedError("No typename for Builtin %s (%s)" % (ty.typename, type(ty)))
return typename
elif isinstance(ty,idl.Aggregate):
return ty.rawname.capitalize() + ".t"
else:
return ty.rawname
def ocaml_instance_of(type, name):
return "%s : %s" % (name, ocaml_type_of(type))
def gen_ocaml_ml(ty, interface, indent=""):
if interface:
s = ("""(* %s interface *)\n""" % ty.typename)
else:
s = ("""(* %s implementation *)\n""" % ty.typename)
if isinstance(ty, idl.Enumeration):
s = "type %s = \n" % ty.rawname
for v in ty.values:
s += "\t | %s\n" % v.rawname
elif isinstance(ty, idl.Aggregate):
s = ""
if ty.typename is None:
raise NotImplementedError("%s has no typename" % type(ty))
else:
module_name = ty.rawname[0].upper() + ty.rawname[1:]
if interface:
s += "module %s : sig\n" % module_name
else:
s += "module %s = struct\n" % module_name
s += "\ttype t =\n"
s += "\t{\n"
for f in ty.fields:
if f.type.private:
continue
x = ocaml_instance_of(f.type, f.name)
x = x.replace("\n", "\n\t\t")
s += "\t\t" + x + ";\n"
s += "\t}\n"
if functions.has_key(ty.rawname):
for name,args in functions[ty.rawname]:
s += "\texternal %s : " % name
s += " -> ".join(args)
s += " = \"%s\"\n" % stub_fn_name(ty,name)
s += "end\n"
else:
raise NotImplementedError("%s" % type(ty))
return s.replace("\n", "\n%s" % indent)
def c_val(ty, c, o, indent="", parent = None):
s = indent
if isinstance(ty,idl.UInt):
if ty.width in [8, 16]:
# handle as ints
width = None
elif ty.width in [32, 64]:
width = ty.width
else:
raise NotImplementedError("Cannot handle %d-bit int" % ty.width)
if width:
s += "%s = Int%d_val(%s);" % (c, width, o)
else:
s += "%s = Int_val(%s);" % (c, o)
elif isinstance(ty,idl.Builtin):
if not builtins.has_key(ty.typename):
raise NotImplementedError("Unknown Builtin %s (%s)" % (ty.typename, type(ty)))
_,fn,_ = builtins[ty.typename]
if not fn:
raise NotImplementedError("No c_val fn for Builtin %s (%s)" % (ty.typename, type(ty)))
s += "%s;" % (fn % { "o": o, "c": c })
elif isinstance (ty,idl.Array):
raise("Cannot handle Array type\n")
elif isinstance(ty,idl.Enumeration) and (parent is None):
n = 0
s += "switch(Int_val(%s)) {\n" % o
for e in ty.values:
s += " case %d: *%s = %s; break;\n" % (n, c, e.name)
n += 1
s += " default: failwith_xl(\"cannot convert value to %s\", lg); break;\n" % ty.typename
s += "}"
elif isinstance(ty, idl.Aggregate) and (parent is None):
n = 0
for f in ty.fields:
if f.type.private:
continue
(nparent,fexpr) = ty.member(c, f, parent is None)
s += "%s\n" % c_val(f.type, fexpr, "Field(%s, %d)" % (o,n), parent=nparent)
n = n + 1
else:
s += "%s_val(gc, lg, %s, %s);" % (ty.rawname, ty.pass_arg(c, parent is None, passby=idl.PASS_BY_REFERENCE), o)
return s.replace("\n", "\n%s" % indent)
def gen_c_val(ty, indent=""):
s = "/* Convert caml value to %s */\n" % ty.rawname
s += "static int %s_val (caml_gc *gc, struct caml_logger *lg, %s, value v)\n" % (ty.rawname, ty.make_arg("c_val", passby=idl.PASS_BY_REFERENCE))
s += "{\n"
s += "\tCAMLparam1(v);\n"
s += "\n"
s += c_val(ty, "c_val", "v", indent="\t") + "\n"
s += "\tCAMLreturn(0);\n"
s += "}\n"
return s.replace("\n", "\n%s" % indent)
def ocaml_Val(ty, o, c, indent="", parent = None):
s = indent
if isinstance(ty,idl.UInt):
if ty.width in [8, 16]:
# handle as ints
width = None
elif ty.width in [32, 64]:
width = ty.width
else:
raise NotImplementedError("Cannot handle %d-bit int" % ty.width)
if width:
s += "%s = caml_copy_int%d(%s);" % (o, width, c)
else:
s += "%s = Val_int(%s);" % (o, c)
elif isinstance(ty,idl.Builtin):
if not builtins.has_key(ty.typename):
raise NotImplementedError("Unknown Builtin %s (%s)" % (ty.typename, type(ty)))
_,_,fn = builtins[ty.typename]
if not fn:
raise NotImplementedError("No ocaml Val fn for Builtin %s (%s)" % (ty.typename, type(ty)))
s += "%s = %s;" % (o, fn % { "c": c })
elif isinstance(ty, idl.Array):
s += "{\n"
s += "\t int i;\n"
s += "\t value array_elem;\n"
s += "\t %s = caml_alloc(%s,0);\n" % (o, parent + ty.lenvar.name)
s += "\t for(i=0; i<%s; i++) {\n" % (parent + ty.lenvar.name)
s += "\t %s\n" % ocaml_Val(ty.elem_type, "array_elem", c + "[i]", "")
s += "\t Store_field(%s, i, array_elem);\n" % o
s += "\t }\n"
s += "\t}"
elif isinstance(ty,idl.Enumeration) and (parent is None):
n = 0
s += "switch(%s) {\n" % c
for e in ty.values:
s += " case %s: %s = Int_val(%d); break;\n" % (e.name, o, n)
n += 1
s += " default: failwith_xl(\"cannot convert value from %s\", lg); break;\n" % ty.typename
s += "}"
elif isinstance(ty,idl.Aggregate) and (parent is None):
s += "{\n"
s += "\tvalue %s_field;\n" % ty.rawname
s += "\n"
s += "\t%s = caml_alloc_tuple(%d);\n" % (o, len(ty.fields))
n = 0
for f in ty.fields:
if f.type.private:
continue
(nparent,fexpr) = ty.member(c, f, parent is None)
s += "\n"
s += "\t%s\n" % ocaml_Val(f.type, "%s_field" % ty.rawname, ty.pass_arg(fexpr, c), parent=nparent)
s += "\tStore_field(%s, %d, %s);\n" % (o, n, "%s_field" % ty.rawname)
n = n + 1
s += "}"
else:
s += "%s = Val_%s(gc, lg, %s);" % (o, ty.rawname, ty.pass_arg(c, parent is None))
return s.replace("\n", "\n%s" % indent).rstrip(indent)
def gen_Val_ocaml(ty, indent=""):
s = "/* Convert %s to a caml value */\n" % ty.rawname
s += "static value Val_%s (caml_gc *gc, struct caml_logger *lg, %s)\n" % (ty.rawname, ty.make_arg(ty.rawname+"_c"))
s += "{\n"
s += "\tCAMLparam0();\n"
s += "\tCAMLlocal1(%s_ocaml);\n" % ty.rawname
s += ocaml_Val(ty, "%s_ocaml" % ty.rawname, "%s_c" % ty.rawname, indent="\t") + "\n"
s += "\tCAMLreturn(%s_ocaml);\n" % ty.rawname
s += "}\n"
return s.replace("\n", "\n%s" % indent)
def gen_c_stub_prototype(ty, fns):
s = "/* Stubs for %s */\n" % ty.rawname
for name,args in fns:
# For N args we return one value and take N-1 values as parameters
s += "value %s(" % stub_fn_name(ty, name)
s += ", ".join(["value v%d" % v for v in range(1,len(args))])
s += ");\n"
return s
def autogen_header(open_comment, close_comment):
s = open_comment + " AUTO-GENERATED FILE DO NOT EDIT " + close_comment + "\n"
s += open_comment + " autogenerated by \n"
s += reduce(lambda x,y: x + " ", range(len(open_comment + " ")), "")
s += "%s" % " ".join(sys.argv)
s += "\n " + close_comment + "\n\n"
return s
if __name__ == '__main__':
if len(sys.argv) < 4:
print >>sys.stderr, "Usage: genwrap.py <idl> <mli> <ml> <c-inc>"
sys.exit(1)
(_,types) = idl.parse(sys.argv[1])
# Do not generate these yet.
blacklist = [
"cpupoolinfo",
"domain_create_info",
"domain_build_info",
"vcpuinfo",
"event",
]
for t in blacklist:
if t not in [ty.rawname for ty in types]:
print "unknown type %s in blacklist" % t
types = [ty for ty in types if not ty.rawname in blacklist]
_ml = sys.argv[3]
ml = open(_ml, 'w')
ml.write(autogen_header("(*", "*)"))
_mli = sys.argv[2]
mli = open(_mli, 'w')
mli.write(autogen_header("(*", "*)"))
_cinc = sys.argv[4]
cinc = open(_cinc, 'w')
cinc.write(autogen_header("/*", "*/"))
for ty | |
product.variants.get()
url = reverse(
'dashboard:variant-delete',
kwargs={'product_pk': product.pk, 'variant_pk': variant.pk})
response = admin_client.get(url)
assert response.status_code == 200
assert ProductVariant.objects.filter(pk=variant.pk).exists()
def test_view_variant_images(admin_client, product_with_image):
variant = product_with_image.variants.get()
product_image = product_with_image.images.get()
url = reverse(
'dashboard:variant-images',
kwargs={'product_pk': product_with_image.pk, 'variant_pk': variant.pk})
data = {'images': [product_image.pk]}
response = admin_client.post(url, data)
assert response.status_code == 302
assert get_redirect_location(response) == reverse(
'dashboard:variant-details',
kwargs={'product_pk': product_with_image.pk, 'variant_pk': variant.pk})
assert variant.variant_images.filter(image=product_image).exists()
def test_view_ajax_available_variants_list(
admin_client, product, category, settings):
unavailable_product = Product.objects.create(
name='Test product', price=Money(10, settings.DEFAULT_CURRENCY),
product_type=product.product_type,
category=category, is_published=False)
unavailable_product.variants.create()
url = reverse('dashboard:ajax-available-variants')
response = admin_client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
resp_decoded = json.loads(response.content.decode('utf-8'))
variant = product.variants.get()
assert resp_decoded.get('results') == [
{'id': variant.id, 'text': variant.get_ajax_label()}]
def test_view_product_images(admin_client, product_with_image):
product_image = product_with_image.images.get()
url = reverse(
'dashboard:product-image-list',
kwargs={'product_pk': product_with_image.pk})
response = admin_client.get(url)
assert response.status_code == 200
assert response.context['product'] == product_with_image
assert not response.context['is_empty']
images = response.context['images']
assert len(images) == 1
assert product_image in images
def test_view_product_image_create(
monkeypatch, admin_client, product_with_image):
mock_create_thumbnails = Mock(return_value=None)
monkeypatch.setattr(
'saleor.dashboard.product.forms.create_product_thumbnails.delay',
mock_create_thumbnails)
url = reverse(
'dashboard:product-image-add',
kwargs={'product_pk': product_with_image.pk})
response = admin_client.get(url)
assert response.status_code == 200
image, image_name = create_image()
data = {'image_0': image, 'alt': ['description']}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert ProductImage.objects.count() == 2
product_with_image.refresh_from_db()
images = product_with_image.images.all()
assert len(images) == 2
assert image_name in images[1].image.name
assert images[1].alt == 'description'
mock_create_thumbnails.assert_called_once_with(images[1].pk)
def test_view_product_image_edit_same_image_add_description(
monkeypatch, admin_client, product_with_image):
mock_create_thumbnails = Mock(return_value=None)
monkeypatch.setattr(
'saleor.dashboard.product.forms.create_product_thumbnails.delay',
mock_create_thumbnails)
product_image = product_with_image.images.all()[0]
url = reverse(
'dashboard:product-image-update',
kwargs={
'img_pk': product_image.pk,
'product_pk': product_with_image.pk})
data = {'image_1': ['0.49x0.59'], 'alt': ['description']}
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert product_with_image.images.count() == 1
product_image.refresh_from_db()
assert product_image.alt == 'description'
mock_create_thumbnails.assert_called_once_with(product_image.pk)
def test_view_product_image_edit_new_image(
monkeypatch, admin_client, product_with_image):
mock_create_thumbnails = Mock(return_value=None)
monkeypatch.setattr(
'saleor.dashboard.product.forms.create_product_thumbnails.delay',
mock_create_thumbnails)
product_image = product_with_image.images.all()[0]
url = reverse(
'dashboard:product-image-update',
kwargs={
'img_pk': product_image.pk,
'product_pk': product_with_image.pk})
response = admin_client.get(url)
assert response.status_code == 200
image, image_name = create_image()
data = {'image_0': image, 'alt': ['description']}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert product_with_image.images.count() == 1
product_image.refresh_from_db()
assert image_name in product_image.image.name
assert product_image.alt == 'description'
mock_create_thumbnails.assert_called_once_with(product_image.pk)
def test_view_product_image_delete(admin_client, product_with_image):
product_image = product_with_image.images.all()[0]
url = reverse(
'dashboard:product-image-delete',
kwargs={
'img_pk': product_image.pk,
'product_pk': product_with_image.pk})
response = admin_client.post(url)
assert response.status_code == 302
assert not ProductImage.objects.filter(pk=product_image.pk)
def test_view_product_image_not_deleted_before_confirmation(
admin_client, product_with_image):
product_image = product_with_image.images.all()[0]
url = reverse(
'dashboard:product-image-delete',
kwargs={
'img_pk': product_image.pk,
'product_pk': product_with_image.pk})
response = admin_client.get(url)
assert response.status_code == 200
assert ProductImage.objects.filter(pk=product_image.pk).count()
def test_view_ajax_reorder_product_images(admin_client, product_with_images):
order_before = [img.pk for img in product_with_images.images.all()]
ordered_images = list(reversed(order_before))
url = reverse(
'dashboard:product-images-reorder',
kwargs={'product_pk': product_with_images.pk})
data = {'ordered_images': ordered_images}
response = admin_client.post(
url, data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
order_after = [img.pk for img in product_with_images.images.all()]
assert order_after == ordered_images
def test_view_ajax_reorder_product_images_invalid(
admin_client, product_with_images):
order_before = [img.pk for img in product_with_images.images.all()]
ordered_images = list(reversed(order_before)).append(3)
url = reverse(
'dashboard:product-images-reorder',
kwargs={'product_pk': product_with_images.pk})
data = {'ordered_images': ordered_images}
response = admin_client.post(
url, data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 400
resp_decoded = json.loads(response.content.decode('utf-8'))
assert 'error' in resp_decoded
assert 'ordered_images' in resp_decoded['error']
def test_view_ajax_upload_image(monkeypatch, admin_client, product_with_image):
mock_create_thumbnails = Mock(return_value=None)
monkeypatch.setattr(
'saleor.dashboard.product.forms.create_product_thumbnails.delay',
mock_create_thumbnails)
product = product_with_image
url = reverse(
'dashboard:product-images-upload', kwargs={'product_pk': product.pk})
image, image_name = create_image()
data = {'image_0': image, 'alt': ['description']}
response = admin_client.post(
url, data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
assert ProductImage.objects.count() == 2
product_with_image.refresh_from_db()
images = product_with_image.images.all()
assert len(images) == 2
assert image_name in images[1].image.name
mock_create_thumbnails.assert_called_once_with(images[1].pk)
def test_view_attribute_list_no_results(admin_client):
url = reverse('dashboard:attributes')
response = admin_client.get(url)
assert response.status_code == 200
assert response.context['attributes'].object_list == []
def test_view_attribute_list(db, admin_client, color_attribute):
url = reverse('dashboard:attributes')
response = admin_client.get(url)
assert response.status_code == 200
result = response.context['attributes'].object_list
assert len(result) == 1
assert result[0][0] == color_attribute.pk
assert result[0][1] == color_attribute.name
assert len(result[0][3]) == 2
assert not response.context['is_empty']
def test_view_attribute_details(admin_client, color_attribute):
url = reverse(
'dashboard:attribute-details',
kwargs={'pk': color_attribute.pk})
response = admin_client.get(url)
assert response.status_code == 200
assert response.context['attribute'] == color_attribute
def test_view_attribute_details_no_choices(admin_client):
attribute = Attribute.objects.create(slug='size', name='Size')
url = reverse(
'dashboard:attribute-details', kwargs={'pk': attribute.pk})
response = admin_client.get(url)
assert response.status_code == 200
assert response.context['attribute'] == attribute
def test_view_attribute_create(admin_client, color_attribute):
url = reverse('dashboard:attribute-add')
data = {'name': 'test', 'slug': 'test'}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert Attribute.objects.count() == 2
def test_view_attribute_create_not_valid(admin_client, color_attribute):
url = reverse('dashboard:attribute-add')
data = {}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert Attribute.objects.count() == 1
def test_view_attribute_edit(color_attribute, admin_client):
url = reverse(
'dashboard:attribute-update',
kwargs={'pk': color_attribute.pk})
data = {'name': 'new_name', 'slug': 'new_slug'}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
assert Attribute.objects.count() == 1
color_attribute.refresh_from_db()
assert color_attribute.name == 'new_name'
assert color_attribute.slug == 'new_slug'
def test_view_attribute_delete(admin_client, color_attribute):
url = reverse(
'dashboard:attribute-delete',
kwargs={'pk': color_attribute.pk})
response = admin_client.post(url)
assert response.status_code == 302
assert not Attribute.objects.filter(pk=color_attribute.pk).exists()
def test_view_attribute_not_deleted_before_confirmation(
admin_client, color_attribute):
url = reverse(
'dashboard:attribute-delete',
kwargs={'pk': color_attribute.pk})
response = admin_client.get(url)
assert response.status_code == 200
assert Attribute.objects.filter(pk=color_attribute.pk)
def test_view_attribute_value_create(color_attribute, admin_client):
values = AttributeValue.objects.filter(attribute=color_attribute.pk)
assert values.count() == 2
url = reverse(
'dashboard:attribute-value-add',
kwargs={'attribute_pk': color_attribute.pk})
data = {'name': 'Pink', 'attribute': color_attribute.pk}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
values = AttributeValue.objects.filter(attribute=color_attribute.pk)
assert values.count() == 3
def test_view_attribute_value_create_invalid(
color_attribute, admin_client):
values = AttributeValue.objects.filter(attribute=color_attribute.pk)
assert values.count() == 2
url = reverse(
'dashboard:attribute-value-add',
kwargs={'attribute_pk': color_attribute.pk})
data = {}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
values = AttributeValue.objects.filter(attribute=color_attribute.pk)
assert values.count() == 2
def test_view_attribute_value_edit(color_attribute, admin_client):
values = AttributeValue.objects.filter(attribute=color_attribute.pk)
assert values.count() == 2
url = reverse(
'dashboard:attribute-value-update',
kwargs={'attribute_pk': color_attribute.pk, 'value_pk': values[0].pk})
data = {'name': 'Pink', 'attribute': color_attribute.pk}
response = admin_client.post(url, data, follow=True)
assert response.status_code == 200
values = AttributeValue.objects.filter(
attribute=color_attribute.pk, name='Pink')
assert len(values) == 1
assert values[0].name == 'Pink'
def test_view_attribute_value_delete(color_attribute, admin_client):
values = AttributeValue.objects.filter(attribute=color_attribute.pk)
assert values.count() == 2
deleted_value = values[0]
url = reverse(
'dashboard:attribute-value-delete',
kwargs={
'attribute_pk': color_attribute.pk, 'value_pk': deleted_value.pk})
response = admin_client.post(url, follow=True)
assert response.status_code == 200
values = AttributeValue.objects.filter(attribute=color_attribute.pk)
assert len(values) == 1
assert deleted_value not in values
def test_view_ajax_reorder_attribute_values(
admin_client, color_attribute):
order_before = [val.pk for val in color_attribute.values.all()]
ordered_values = list(reversed(order_before))
url = reverse(
'dashboard:attribute-values-reorder',
kwargs={'attribute_pk': color_attribute.pk})
data = {'ordered_values': ordered_values}
response = admin_client.post(
url, data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
order_after = [val.pk for val in color_attribute.values.all()]
assert response.status_code == 200
assert order_after == ordered_values
def test_view_ajax_reorder_attribute_values_invalid(
admin_client, color_attribute):
order_before = [val.pk for val in color_attribute.values.all()]
ordered_values = list(reversed(order_before)).append(3)
url = reverse(
'dashboard:attribute-values-reorder',
kwargs={'attribute_pk': color_attribute.pk})
data = {'ordered_values': ordered_values}
response = admin_client.post(
url, data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 400
resp_decoded = json.loads(response.content.decode('utf-8'))
assert 'error' in resp_decoded
assert 'ordered_values' in resp_decoded['error']
def test_get_formfield_name_with_unicode_characters(db):
text_attribute = Attribute.objects.create(
slug='ąęαβδηθλμπ', name='ąęαβδηθλμπ')
assert text_attribute.get_formfield_name() == 'attribute-ąęαβδηθλμπ-{}'.format(
text_attribute.pk)
def test_product_variant_form(product, size_attribute):
variant = product.variants.first()
variant.name = ''
variant.save()
example_size = 'Small Size'
data = {
'attribute-{}-{}'.format(
size_attribute.slug, size_attribute.pk): example_size,
'sku': '1111', 'quantity': 2}
form = ProductVariantForm(data, instance=variant)
assert form.is_valid()
form.save()
variant.refresh_from_db()
assert variant.name == example_size
def test_hide_field_in_variant_choice_field_form():
form = VariantChoiceField(Mock())
variants, cart = MagicMock(), MagicMock()
variants.count.return_value = variants.all().count.return_value = 1
variants.all()[0].pk = 'test'
form.update_field_data(variants, discounts=None, taxes=None)
assert isinstance(form.widget, HiddenInput)
assert form.widget.attrs.get('value') == 'test'
def test_product_form_change_attributes(db, product, color_attribute):
product_type = product.product_type
text_attribute = Attribute.objects.create(
slug='author', name='Author')
product_type.product_attributes.add(text_attribute)
color_value = color_attribute.values.first()
new_author = 'Main Tester'
data = {
'name': product.name,
'price': product.price.amount,
'category': product.category.pk,
'description': 'description',
'attribute-{}-{}'.format(
text_attribute.slug, text_attribute.pk): new_author,
'attribute-{}-{}'.format(
color_attribute.slug, color_attribute.pk): color_value.pk}
form = ProductForm(data, instance=product)
assert form.is_valid()
product = form.save()
assert product.attributes[str(color_attribute.pk)] == str(color_value.pk)
# Check that new attribute was created for author
author_value = AttributeValue.objects.get(name=new_author)
assert product.attributes[str(text_attribute.pk)] == str(author_value.pk)
def test_product_form_assign_collection_to_product(product):
collection = Collection.objects.create(name='test_collections')
data = {
'name': product.name,
'price': product.price.amount,
'category': product.category.pk,
'description': 'description',
'collections': [collection.pk]}
form = ProductForm(data, instance=product)
assert form.is_valid()
form.save()
assert product.collections.first().name == 'test_collections'
assert collection.products.first().name == product.name
def test_product_form_sanitize_product_description(
product_type, category, settings):
product = Product.objects.create(
name='Test Product', price=Money(10, settings.DEFAULT_CURRENCY),
description='', pk=10, product_type=product_type, category=category)
data = model_to_dict(product)
data['description'] = (
'<b>bold</b><p><i>italic</i></p><h2>Header</h2><h3>subheader</h3>'
'<blockquote>quote</blockquote>'
'<p><a href="www.mirumee.com">link</a></p>'
'<p>an <script>evil()</script>example</p>')
data['price'] = 20
form = ProductForm(data, instance=product)
assert form.is_valid()
form.save()
assert product.description == (
'<b>bold</b><p><i>italic</i></p><h2>Header</h2><h3>subheader</h3>'
'<blockquote>quote</blockquote>'
'<p><a href="www.mirumee.com">link</a></p>'
'<p>an <script>evil()</script>example</p>')
assert product.seo_description == (
'bolditalicHeadersubheaderquotelinkan evil()example')
def test_product_form_seo_description(unavailable_product):
seo_description = (
'This is a dummy product. '
'HTML <b>shouldn\'t be removed</b> since it\'s a simple text field.')
data = model_to_dict(unavailable_product)
data['price'] = 20
data['description'] = 'a description'
data['seo_description'] = seo_description
form = ProductForm(data, instance=unavailable_product)
assert form.is_valid()
form.save()
assert unavailable_product.seo_description == seo_description
def test_product_form_seo_description_too_long(unavailable_product):
description = (
'Saying it fourth made saw light bring beginning kind over herb '
'won\'t creepeth multiply dry rule divided fish herb cattle greater '
'fly divided midst, gathering can\'t moveth seed greater subdue. '
'Lesser meat living fowl called. Dry don\'t wherein. Doesn\'t above '
'form sixth. Image moving earth | |
<filename>bin/smrtsv.py<gh_stars>1-10
#!/bin/env python
import argparse
import logging
import subprocess
import sys
import os
import re
# Set logging
logging.basicConfig(filename="smrtsv.log", level=logging.DEBUG)
# Set cluster parameters
CLUSTER_SETTINGS = ' -V -cwd -e ./log -o ./log {cluster.params} -w n -S /bin/bash'
CLUSTER_FLAG = ("--drmaa", CLUSTER_SETTINGS, "-w", "60")
# Setup environment for executing commands
PROCESS_ENV = os.environ.copy()
# Prepend to PROCESS_ENV["PATH"]
INSTALL_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
INSTALL_PATH = [ # List of paths relative to INSTALL_DIR to be added to the environment $PATH
"bin",
"dist/miniconda/envs/python2/bin",
"dist/miniconda/envs/python3/bin",
"dist/miniconda/bin",
"dist/celera/wgs-8.3rc2/Linux-amd64/bin/",
"dist/amos-3.1.0/bin",
"canu/Linux-amd64/bin"
]
PROCESS_ENV_PATH = ":".join([os.path.join(INSTALL_DIR, THIS_PATH) for THIS_PATH in INSTALL_PATH])
if "PATH" in PROCESS_ENV:
PROCESS_ENV["PATH"] = PROCESS_ENV_PATH + ":" + PROCESS_ENV["PATH"]
else:
PROCESS_ENV["PATH"] = PROCESS_ENV_PATH
# Prepend to PROCESS_ENV["LD_LIBRARY_PATH"]
INSTALL_LD_PATH = [
"dist/hdf5/lib"
]
PROCESS_ENV_LD_PATH = ":".join([os.path.join(INSTALL_DIR, THIS_PATH) for THIS_PATH in INSTALL_LD_PATH])
if "LD_LIBRARY_PATH" in PROCESS_ENV:
PROCESS_ENV["LD_LIBRARY_PATH"] = PROCESS_ENV_LD_PATH + ":" + PROCESS_ENV["LD_LIBRARY_PATH"]
else:
PROCESS_ENV["LD_LIBRARY_PATH"] = PROCESS_ENV_LD_PATH
os.environ["LD_LIBRARY_PATH"] = PROCESS_ENV["LD_LIBRARY_PATH"]
# Function definitions
def _get_dist_dir():
dirname, filename = os.path.split(os.path.abspath(__file__))
return dirname
# def _build_prefix(args):
# prefix = ["snakemake", "-T", "--rerun-incomplete", "--snakefile", os.path.join(os.path.dirname(_get_dist_dir()), "Snakefile"), "-j", str(args.jobs)]
# if args.dryrun:
# prefix.append("-n")
#
# if args.distribute:
# prefix.extend(CLUSTER_FLAG)
#
# return tuple(prefix)
def _run_cmd(args):
"""
Run a command with the proper environment set.
:param args: A tuple of arguments starting with the command name.
:return: Return code or -1 if the process did not complete.
"""
sys.stdout.flush()
p = subprocess.Popen(args, env=PROCESS_ENV)
p.wait()
ret_code = p.returncode
return ret_code if ret_code is not None else -1
def _run_snake_target(args, *cmd):
"""
Run a snakemake target.
:param args: Arguments processed from the command line.
:param cmd: The command to run as a tuple starting with the name of the snakemake target.
:return: Return code from snakemake.
"""
# Use the user-defined cluster config path if one is given. Otherwise, use
# an empty config that comes with the SMRT-SV distribution.
if args.cluster_config is not None:
cluster_config_path = args.cluster_config
else:
cluster_config_path = os.path.join(os.path.dirname(_get_dist_dir()), "cluster.template.json")
# Setup snakemake command
prefix = [
"snakemake",
"-T",
"--rerun-incomplete",
"--cluster-config", cluster_config_path,
"--snakefile", os.path.join(os.path.dirname(_get_dist_dir()), "Snakefile"),
"-j", str(args.jobs)
]
if args.dryrun:
prefix.append("-n")
if args.distribute:
prefix.extend(CLUSTER_FLAG)
# Append command
prefix.extend(cmd)
# Append path and ld_path
prefix.extend([
"ld_path=%s" % PROCESS_ENV["LD_LIBRARY_PATH"],
"path=%s" % PROCESS_ENV["PATH"]
])
# Report (verbose)
if args.verbose:
print("Running snakemake command: %s" % " ".join(prefix))
# Run snakemake command
return _run_cmd(prefix)
def index(args):
return _run_snake_target(
args,
"prepare_reference",
"--config",
"reference=%s" % args.reference
)
def align(args):
return _run_snake_target(
args,
"align_reads",
"--config",
"reference=%s" % args.reference,
"reads=%s" % args.reads,
"alignments=%s" % args.alignments,
"alignments_dir=%s" % args.alignments_dir,
"batches=%s" % args.batches,
"threads=%s" % args.threads,
"tmp_dir=%s" % args.tmpdir,
"alignment_parameters=%s" % args.alignment_parameters
)
def detect(args):
"""
Detect SVs from signatures in read alignments.
"""
# Find candidate regions in alignments.
sys.stdout.write("Searching for candidate regions\n")
command = (
"get_regions",
"--config",
"reference=%s" % args.reference,
"alignments=%s" % args.alignments,
"assembly_window_size=%s" % args.assembly_window_size,
"assembly_window_slide=%s" % args.assembly_window_slide,
"min_length=%s" % args.min_length,
"min_support=%s" % args.min_support,
"max_support=%s" % args.max_support,
"min_coverage=%s" % args.min_coverage,
"max_coverage=%s" % args.max_coverage,
"min_hardstop_support=%s" % args.min_hardstop_support,
"max_candidate_length=%s" % args.max_candidate_length
)
if args.exclude:
command = command + ("regions_to_exclude=%s" % args.exclude,)
if args.candidates:
command = command + ("candidates=%s" % args.candidates,)
return _run_snake_target(args, *command)
def assemble(args):
"""
Assemble candidate regions from raw reads aligned to regions.
"""
# Generate local assemblies across the genome.
sys.stdout.write("Starting local assemblies\n")
base_command = (
"collect_assembly_alignments",
"--config",
"reference=%s" % args.reference,
"alignments=%s" % args.alignments,
"reads=%s" % args.reads,
"tmp_dir=%s" % args.tmpdir,
"alignment_parameters=\"%s\"" % args.alignment_parameters,
"mapping_quality=\"%s\"" % args.mapping_quality,
"minutes_to_delay_jobs=\"%s\"" % args.minutes_to_delay_jobs,
"assembly_log=\"%s\"" % args.assembly_log
)
if args.candidates:
# For each contig/chromosome in the candidates file, submit a separate
# Snakemake command. To do so, first split regions to assemble into one
# file per contig in a temporary directory.
tmpdir = os.path.join(os.getcwd(), "regions_by_contig")
rebuild_regions_by_contig = False
if not args.dryrun and (not os.path.exists(tmpdir) or args.rebuild_regions):
rebuild_regions_by_contig = True
if rebuild_regions_by_contig:
try:
os.mkdir(tmpdir)
except OSError:
pass
previous_contig = None
with open(args.candidates, "r") as fh:
contigs = set()
for line in fh:
contig = line.strip().split()[0]
if previous_contig != contig:
if previous_contig is not None and rebuild_regions_by_contig:
contig_file.close()
previous_contig = contig
contigs.add(contig)
if rebuild_regions_by_contig:
contig_file = open(os.path.join(tmpdir, "%s.bed" % contig), "w")
if rebuild_regions_by_contig:
contig_file.write(line)
if rebuild_regions_by_contig:
contig_file.close()
# Assemble regions per contig creating a single merged BAM for each contig.
local_assembly_basename = os.path.basename(args.assembly_alignments)
local_assemblies = set()
return_code = 0
for contig in contigs:
contig_local_assemblies = os.path.join("local_assemblies", local_assembly_basename.replace(".bam", ".%s.bam" % contig))
local_assemblies.add(contig_local_assemblies)
if os.path.exists(contig_local_assemblies):
sys.stdout.write("Local assemblies already exist for %s\n" % contig)
continue
command = base_command + ("regions_to_assemble=%s" % os.path.join(tmpdir, "%s.bed" % contig),)
command = command + ("assembly_alignments=%s" % contig_local_assemblies,)
sys.stdout.write("Starting local assemblies for %s\n" % contig)
logging.debug("Assembly command: %s", " ".join(command))
return_code = _run_snake_target(args, *command)
if return_code != 0:
break
# If the last command executed successfully, try to merge all local
# assemblies per contig into a single file.
if not args.dryrun and return_code == 0:
if len(local_assemblies) > 1:
return_code = _run_cmd(["samtools", "merge", args.assembly_alignments] + list(local_assemblies))
else:
return_code = _run_cmd(["samtools", "view", "-b", "-o", args.assembly_alignments] + list(local_assemblies))
if return_code == 0:
return_code = _run_cmd(["samtools", "index", args.assembly_alignments])
# Return the last return code.
return return_code
else:
if args.assembly_alignments:
command = base_command + ("assembly_alignments=%s" % args.assembly_alignments,)
logging.debug("Assembly command: %s", " ".join(command))
return _run_cmd(command)
def call(args):
# Call SVs, indels, and inversions.
sys.stdout.write("Calling variants\n")
return_code = _run_snake_target(
args,
"call_variants",
"--config",
"reference=%s" % args.reference,
"alignments=%s" % args.alignments,
"local_assembly_alignments=%s" % args.assembly_alignments,
"variants=%s" % args.variants,
"species=\"%s\"" % args.species,
"sample=\"%s\"" % args.sample
)
if return_code != 0:
sys.stderr.write("Failed to call variants\n")
return return_code
def run(args):
# Get default jobs
if "jobs" in args:
default_jobs = args.jobs
else:
default_jobs = 1
# Get the number of jobs for each step
job_step = re.split("\\s*[,;:]\\s*", args.runjobs.strip()) # Split into array
job_step = [job_step[i] if len(job_step) > i else '' for i in range(4)] # Extend to length 4
# Convert each number of jobs to integers
for i in range(4):
if job_step[i] != '':
try:
job_step[i] = int(job_step[i])
except ValueError:
sys.stderr.write("Invalid number of jobs for step %d: Must be an integer: \"%s\"\n" % ((i + 1), job_step[i]))
return 1
else:
job_step[i] = default_jobs
# Report the number of jobs for each task
if args.verbose and args.distribute:
print("Jobs per task:")
print("\t* Align: %s" % job_step[0])
print("\t* Detect: %s" % job_step[1])
print("\t* Assemble: %s" % job_step[2])
print("\t* Call: %s" % job_step[3])
# Build reference indices
return_code = index(args)
if return_code != 0:
sys.stderr.write("Failed to index reference\n")
return return_code
# Align
args.jobs = job_step[0]
return_code = align(args)
if return_code != 0:
sys.stderr.write("Failed to align reads\n")
return return_code
# Detect SVs.
args.jobs = job_step[1]
return_code = detect(args)
if return_code != 0:
sys.stderr.write("Failed to identify candidate regions\n")
return return_code
# Run local assemblies.
args.jobs = job_step[2]
return_code = assemble(args)
if return_code != 0:
sys.stderr.write("Failed to generate local assemblies\n")
return return_code
# Call SVs, indels, and inversions.
args.jobs = job_step[3]
return_code = call(args)
if return_code != 0:
sys.stderr.write("Failed to call variants\n")
return return_code
return 0
def genotype(args):
# Genotype SVs.
sys.stdout.write("Genotyping SVs\n")
return_code = _run_snake_target(
args,
"convert_genotypes_to_vcf",
"--config",
"genotyper_config=%s" % args.genotyper_config,
"genotyped_variants=%s" % args.genotyped_variants,
"threads=%s" % args.threads
)
if return_code != 0:
sys.stderr.write("Failed to genotype SVs\n")
return return_code
# Main
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dryrun", "-n", action="store_true", help="Print commands that will run without running them")
parser.add_argument("--distribute", action="store_true", help="Distribute analysis to Grid Engine-style cluster")
parser.add_argument("--jobs", help="number of jobs to run simultaneously", type=int, default=1)
parser.add_argument("--tmpdir", help="temporary directory to use for distributed jobs", default="/var/tmp")
parser.add_argument("--verbose", "-v", help="print extra runtime information", action="store_true")
parser.add_argument("--cluster_config", help="JSON/YAML file specifying cluster configuration parameters to pass to Snakemake's --cluster-config option")
parser.add_argument("--drmaalib", help="For jobs that are distributed, this is the location to the DRMAA library (libdrmaa.so) installed with Grid Engine. Use this to set DRMAA_LIBRARY_PATH in the environment for pipelined commands. If DRMAA_LIBRARY_PATH is already set in the environment when calling this program, this option is not required.")
subparsers = parser.add_subparsers()
# Index a reference for use by BLASR.
parser_index = subparsers.add_parser("index", help="index a reference sequence for use by BLASR")
parser_index.add_argument("reference", help="FASTA file of reference to index")
parser_index.set_defaults(func=index)
# Align PacBio reads to an indexed reference with BLASR.
parser_align = subparsers.add_parser("align", help="align PacBio reads to an indexed reference with BLASR")
parser_align.add_argument("reference", help="FASTA file of indexed reference with .ctab and .sa in the same directory")
parser_align.add_argument("reads", help="text file with one absolute path to a PacBio reads file (.bax.h5) per line")
parser_align.add_argument("--alignments", help="text file with one absolute | |
from __future__ import print_function, division
import os
from datetime import datetime
import copy
import warnings
warnings.filterwarnings("ignore",category=UserWarning)
from .pyemu_warnings import PyemuWarning
import math
import numpy as np
import pandas as pd
from pyemu.mat.mat_handler import get_common_elements,Matrix,Cov
from pyemu.pst.pst_utils import write_parfile,read_parfile
from pyemu.plot.plot_utils import ensemble_helper
#warnings.filterwarnings("ignore",message="Pandas doesn't allow columns to be "+\
# "created via a new attribute name - see"+\
# "https://pandas.pydata.org/pandas-docs/"+\
# "stable/indexing.html#attribute-access")
SEED = 358183147 #from random.org on 5 Dec 2016
#print("setting random seed")
np.random.seed(SEED)
class Ensemble(pd.DataFrame):
""" The base class type for handling parameter and observation ensembles.
It is directly derived from pandas.DataFrame. This class should not be
instantiated directly.
Parameters
----------
*args : list
positional args to pass to pandas.DataFrame()
**kwargs : dict
keyword args to pass to pandas.DataFrame(). Must contain
'columns' and 'mean_values'
Returns
-------
Ensemble : Ensemble
"""
def __init__(self,*args,**kwargs):
"""constructor for base Ensemble type. 'columns' and 'mean_values'
must be in the kwargs
"""
assert "columns" in kwargs.keys(),"ensemble requires 'columns' kwarg"
mean_values = kwargs.pop("mean_values",None)
super(Ensemble,self).__init__(*args,**kwargs)
if mean_values is None:
raise Exception("Ensemble requires 'mean_values' kwarg")
self._mean_values = mean_values
def as_pyemu_matrix(self,typ=Matrix):
"""
Create a pyemu.Matrix from the Ensemble.
Parameters
----------
typ : pyemu.Matrix or derived type
the type of matrix to return
Returns
-------
pyemu.Matrix : pyemu.Matrix
"""
x = self.values.copy().astype(np.float)
return typ(x=x,row_names=list(self.index),
col_names=list(self.columns))
def drop(self,arg):
""" overload of pandas.DataFrame.drop()
Parameters
----------
arg : iterable
argument to pass to pandas.DataFrame.drop()
Returns
-------
Ensemble : Ensemble
"""
df = super(Ensemble,self).drop(arg)
return type(self)(data=df,pst=self.pst)
def dropna(self,*args,**kwargs):
"""overload of pandas.DataFrame.dropna()
Parameters
----------
*args : list
positional args to pass to pandas.DataFrame.dropna()
**kwargs : dict
keyword args to pass to pandas.DataFrame.dropna()
Returns
-------
Ensemble : Ensemble
"""
df = super(Ensemble,self).dropna(*args,**kwargs)
return type(self)(data=df,pst=self.pst)
def draw(self,cov,num_reals=1,names=None):
""" draw random realizations from a multivariate
Gaussian distribution
Parameters
----------
cov: pyemu.Cov
covariance structure to draw from
num_reals: int
number of realizations to generate
names : list
list of columns names to draw for. If None, values all names
are drawn
"""
real_names = np.arange(num_reals,dtype=np.int64)
# make sure everything is cool WRT ordering
if names is not None:
vals = self.mean_values.loc[names]
cov = cov.get(names)
elif self.names != cov.row_names:
names = get_common_elements(self.names,
cov.row_names)
vals = self.mean_values.loc[names]
cov = cov.get(names)
else:
vals = self.mean_values
names = self.names
# generate random numbers
if cov.isdiagonal: #much faster
val_array = np.array([np.random.normal(mu,std,size=num_reals) for\
mu,std in zip(vals,np.sqrt(cov.x))]).transpose()
else:
val_array = np.random.multivariate_normal(vals, cov.as_2d,num_reals)
self.loc[:,:] = np.NaN
self.dropna(inplace=True)
# this sucks - can only set by enlargement one row at a time
for rname,vals in zip(real_names,val_array):
self.loc[rname, names] = vals
# set NaNs to mean_values
idx = pd.isnull(self.loc[rname,:])
self.loc[rname,idx] = self.mean_values[idx]
# def enforce(self):
# """ placeholder method for derived ParameterEnsemble type
# to enforce parameter bounds
#
#
# Raises
# ------
# Exception if called
# """
# raise Exception("Ensemble.enforce() must overloaded by derived types")
def plot(self,bins=10,facecolor='0.5',plot_cols=None,
filename="ensemble.pdf",func_dict = None,
**kwargs):
"""plot ensemble histograms to multipage pdf
Parameters
----------
bins : int
number of bins
facecolor : str
color
plot_cols : list of str
subset of ensemble columns to plot. If None, all are plotted.
Default is None
filename : str
pdf filename. Default is "ensemble.pdf"
func_dict : dict
a dict of functions to apply to specific columns (e.g., np.log10)
**kwargs : dict
keyword args to pass to plot_utils.ensemble_helper()
Returns
-------
None
"""
ensemble_helper(self,bins=bins,facecolor=facecolor,plot_cols=plot_cols,
filename=filename)
def __sub__(self,other):
"""overload of pandas.DataFrame.__sub__() operator to difference two
Ensembles
Parameters
----------
other : pyemu.Ensemble or pandas.DataFrame
the instance to difference against
Returns
-------
Ensemble : Ensemble
"""
diff = super(Ensemble,self).__sub__(other)
return Ensemble.from_dataframe(df=diff)
@classmethod
def from_dataframe(cls,**kwargs):
"""class method constructor to create an Ensemble from
a pandas.DataFrame
Parameters
----------
**kwargs : dict
optional args to pass to the
Ensemble Constructor. Expects 'df' in kwargs.keys()
that must be a pandas.DataFrame instance
Returns
-------
Ensemble : Ensemble
"""
df = kwargs.pop("df")
assert isinstance(df,pd.DataFrame)
df.columns = [c.lower() for c in df.columns]
mean_values = kwargs.pop("mean_values",df.mean(axis=0))
e = cls(data=df,index=df.index,columns=df.columns,
mean_values=mean_values,**kwargs)
return e
@staticmethod
def reseed():
"""method to reset the numpy.random seed using the pyemu.en
SEED global variable
"""
np.random.seed(SEED)
def copy(self):
"""make a deep copy of self
Returns
-------
Ensemble : Ensemble
"""
df = super(Ensemble,self).copy()
return type(self).from_dataframe(df=df)
def covariance_matrix(self,localizer=None):
"""calculate the approximate covariance matrix implied by the ensemble using
mean-differencing operation at the core of EnKF
Parameters
----------
localizer : pyemu.Matrix
covariance localizer to apply
Returns
-------
cov : pyemu.Cov
covariance matrix
"""
mean = np.array(self.mean(axis=0))
delta = self.as_pyemu_matrix(typ=Cov)
for i in range(self.shape[0]):
delta.x[i, :] -= mean
delta *= (1.0 / np.sqrt(float(self.shape[0] - 1.0)))
if localizer is not None:
delta = delta.T * delta
return delta.hadamard_product(localizer)
return delta.T * delta
def get_deviations(self):
"""get the deviations of the ensemble value from the mean vector
Returns
-------
en : pyemu.Ensemble
Ensemble of deviations from the mean
"""
mean_vec = self.mean()
df = self.loc[:,:].copy()
for col in df.columns:
df.loc[:,col] -= mean_vec[col]
return type(self).from_dataframe(pst=self.pst,df=df)
class ObservationEnsemble(Ensemble):
""" Ensemble derived type for observations. This class is primarily used to
generate realizations of observation noise. These are typically generated from
the weights listed in the control file. However, a general covariance matrix can
be explicitly used.
Note:
Does not generate noise realizations for observations with zero weight
"""
def __init__(self,pst,**kwargs):
"""ObservationEnsemble constructor.
Parameters
----------
pst : pyemu.Pst
required Ensemble constructor kwwargs
'columns' and 'mean_values' are generated from pst.observation_data.obsnme
and pst.observation_data.obsval resepctively.
**kwargs : dict
keyword args to pass to Ensemble constructor
Returns
-------
ObservationEnsemble : ObservationEnsemble
"""
kwargs["columns"] = pst.observation_data.obsnme
kwargs["mean_values"] = pst.observation_data.obsval
super(ObservationEnsemble,self).__init__(**kwargs)
self.pst = pst
self.pst.observation_data.index = self.pst.observation_data.obsnme
def copy(self):
"""overload of Ensemble.copy()
Returns
-------
ObservationEnsemble : ObservationEnsemble
"""
df = super(Ensemble,self).copy()
return type(self).from_dataframe(df=df,pst=self.pst.get())
@property
def names(self):
"""property decorated method to get current non-zero weighted
column names. Uses ObservationEnsemble.pst.nnz_obs_names
Returns
-------
list : list
non-zero weight observation names
"""
return self.pst.nnz_obs_names
@property
def mean_values(self):
""" property decorated method to get mean values of observation noise.
This is a zero-valued pandas.Series
Returns
-------
mean_values : pandas Series
"""
vals = self.pst.observation_data.obsval.copy()
vals.loc[self.names] = 0.0
return vals
def draw(self,cov,num_reals):
""" draw realizations of observation noise and add to mean_values
Note: only draws noise realizations for non-zero weighted observations
zero-weighted observations are set to mean value for all realizations
Parameters
----------
cov : pyemu.Cov
covariance matrix that describes the support volume around the
mean values.
num_reals : int
number of realizations to draw
"""
super(ObservationEnsemble,self).draw(cov,num_reals,
names=self.pst.nnz_obs_names)
self.loc[:,self.names] += self.pst.observation_data.obsval
@property
def nonzero(self):
""" property decorated method to get a new ObservationEnsemble
of only non-zero weighted observations
Returns
-------
ObservationEnsemble : ObservationEnsemble
"""
df = self.loc[:,self.pst.nnz_obs_names]
return ObservationEnsemble.from_dataframe(df=df,
pst=self.pst.get(obs_names=self.pst.nnz_obs_names))
@classmethod
def from_id_gaussian_draw(cls,pst,num_reals):
""" this is an experiemental method to help speed up independent draws
for a really large (>1E6) ensemble sizes.
Parameters
----------
pst : pyemu.Pst
a control file instance
num_reals : int
number of realizations to draw
Returns
-------
ObservationEnsemble : ObservationEnsemble
"""
# set up some column names
real_names = np.arange(num_reals,dtype=np.int64)
#arr = np.empty((num_reals,len(pst.obs_names)))
obs = pst.observation_data
stds = {name:1.0/obs.loc[name,"weight"] for name in pst.nnz_obs_names}
nz_names = set(pst.nnz_obs_names)
arr = np.random.randn(num_reals,pst.nobs)
for i,oname in enumerate(pst.obs_names):
if oname in nz_names:
arr[:,i] *= stds[oname]
else:
arr[:,i] = 0.0
df = pd.DataFrame(arr,index=real_names,columns=pst.obs_names)
df.loc[:,pst.obs_names] += pst.observation_data.obsval
new_oe = cls.from_dataframe(pst=pst,df=df)
return new_oe
def to_binary(self, filename):
"""write the observation ensemble to a jco-style binary file. The
ensemble is transposed in the binary file so that the 20-char obs
names are carried
Parameters
----------
filename : str
the filename to write
Returns
-------
None
Note
----
The ensemble is transposed in the binary file
"""
self.as_pyemu_matrix().to_coo(filename)
@classmethod
def from_binary(cls,pst,filename):
"""instantiate an observation obsemble from a jco-type file
Parameters
----------
pst : pyemu.Pst
a Pst instance
filename : str
the binary file name
Returns
-------
oe : ObservationEnsemble
"""
m = Matrix.from_binary(filename)
return ObservationEnsemble(data=m.x,pst=pst, index=m.row_names)
@property
def phi_vector(self):
"""property decorated method to get a vector of L2 norm (phi)
for the realizations. The ObservationEnsemble.pst.weights can be
updated prior to calling this method to evaluate new weighting strategies
Return
------
pandas.DataFrame : pandas.DataFrame
"""
weights = self.pst.observation_data.loc[self.names,"weight"]
obsval = self.pst.observation_data.loc[self.names,"obsval"]
phi_vec = []
for idx in self.index.values:
simval = self.loc[idx,self.names]
phi = (((simval - obsval) * | |
<reponame>CloudReactor/task_manager
from typing import Any, FrozenSet, TYPE_CHECKING
import logging
import random
import string
from django.utils import timezone
from rest_framework.exceptions import APIException
from botocore.exceptions import ClientError
from ..common.aws import *
if TYPE_CHECKING:
from ..models import (
Task,
TaskExecution
)
from .execution_method import ExecutionMethod
logger = logging.getLogger(__name__)
class AwsEcsExecutionMethod(ExecutionMethod):
NAME = 'AWS ECS'
LAUNCH_TYPE_EC2 = 'EC2'
LAUNCH_TYPE_FARGATE = 'FARGATE'
ALL_LAUNCH_TYPES = [LAUNCH_TYPE_FARGATE, LAUNCH_TYPE_EC2]
DEFAULT_LAUNCH_TYPE = LAUNCH_TYPE_FARGATE
DEFAULT_CPU_UNITS = 256
DEFAULT_MEMORY_MB = 512
SERVICE_PROPAGATE_TAGS_TASK_DEFINITION = 'TASK_DEFINITION'
SERVICE_PROPAGATE_TAGS_SERVICE ='SERVICE'
SERVICE_PROPAGATE_TAGS_CHOICES = [
SERVICE_PROPAGATE_TAGS_TASK_DEFINITION,
SERVICE_PROPAGATE_TAGS_SERVICE,
]
CAPABILITIES_WITH_SCHEDULING = frozenset([
ExecutionMethod.ExecutionCapability.MANUAL_START,
ExecutionMethod.ExecutionCapability.SETUP_SERVICE
])
MAX_TAG_COUNT = 50
def __init__(self, task: 'Task'):
super().__init__(self.NAME, task)
def capabilities(self) -> FrozenSet[ExecutionMethod.ExecutionCapability]:
task = self.task
run_env = task.run_environment
if not run_env.can_control_aws_ecs():
return frozenset()
execution_role = task.aws_ecs_default_execution_role or \
run_env.aws_ecs_default_execution_role
if not execution_role:
return frozenset()
cluster_arn = task.aws_ecs_default_cluster_arn or \
run_env.aws_ecs_default_cluster_arn
if not cluster_arn:
return frozenset()
subnets = task.aws_default_subnets or run_env.aws_default_subnets
if not subnets:
return frozenset()
security_groups = task.aws_ecs_default_security_groups or \
run_env.aws_ecs_default_security_groups
if not security_groups:
return frozenset()
if run_env.aws_events_role_arn:
return ExecutionMethod.ALL_CAPABILITIES
return self.CAPABILITIES_WITH_SCHEDULING
def setup_scheduled_execution(self) -> None:
task = self.task
if not task.schedule.startswith('cron') and not task.schedule.startswith('rate'):
raise APIException(detail=f"Schedule '{task.schedule}' is invalid")
aws_scheduled_execution_rule_name = f"CR_{task.uuid}"
client = self.make_events_client()
state = 'ENABLED' if task.enabled else 'DISABLED'
run_env = task.run_environment
execution_role_arn = task.aws_ecs_default_execution_role or run_env.aws_ecs_default_execution_role
logger.info(f"Using execution role arn = '{execution_role_arn}'")
# Need this permission: https://github.com/Miserlou/Zappa/issues/381
response = client.put_rule(
Name=aws_scheduled_execution_rule_name,
ScheduleExpression=task.schedule,
#EventPattern='true',
State=state,
Description=f"Scheduled execution of Task '{task.name}' ({task.uuid})",
RoleArn=execution_role_arn
)
# TODO: use add_creation_args()
# Tags=[
# {
# 'Key': 'string',
# 'Value': 'string'
# },
# ],
# EventBusName='string'
task.aws_scheduled_execution_rule_name = aws_scheduled_execution_rule_name
task.aws_scheduled_event_rule_arn = response['RuleArn']
logger.info(f"got rule ARN = {task.aws_scheduled_event_rule_arn}")
if task.enabled:
client.enable_rule(Name=aws_scheduled_execution_rule_name)
else:
client.disable_rule(Name=aws_scheduled_execution_rule_name)
aws_event_target_rule_name = f"CR_{task.uuid}"
aws_event_target_id = f"CR_{task.uuid}"
cluster_arn = task.aws_ecs_default_cluster_arn or run_env.aws_ecs_default_cluster_arn
launch_type = task.aws_ecs_default_launch_type or run_env.aws_ecs_default_launch_type
platform_version = task.aws_ecs_default_platform_version or \
run_env.aws_ecs_default_platform_version or \
AWS_ECS_PLATFORM_VERSION_LATEST
subnets = task.aws_default_subnets or run_env.aws_default_subnets
security_groups = task.aws_ecs_default_security_groups or run_env.aws_ecs_default_security_groups
assign_public_ip = self.assign_public_ip_str()
response = client.put_targets(
Rule=aws_event_target_rule_name,
Targets=[
{
'Id': aws_event_target_id,
'Arn': cluster_arn,
'RoleArn': run_env.aws_events_role_arn,
'EcsParameters': {
'TaskDefinitionArn': task.aws_ecs_task_definition_arn,
'TaskCount': task.scheduled_instance_count or 1,
'LaunchType': launch_type,
# Only for tasks that use awsvpc networking
'NetworkConfiguration': {
'awsvpcConfiguration': {
'Subnets': subnets,
'SecurityGroups': security_groups,
'AssignPublicIp': assign_public_ip
}
},
'PlatformVersion': platform_version,
#'Group': 'string'
},
},
]
)
handle_aws_multiple_failure_response(response)
task.aws_event_target_rule_name = aws_event_target_rule_name
task.aws_event_target_id = aws_event_target_id
def teardown_scheduled_execution(self) -> None:
client = None
task = self.task
if task.aws_event_target_rule_name and task.aws_event_target_id:
client = self.make_events_client()
try:
response = client.remove_targets(
Rule=task.aws_event_target_rule_name,
#EventBusName='string',
Ids=[
task.aws_event_target_id
],
Force=False
)
handle_aws_multiple_failure_response(response)
task.aws_event_target_rule_name = ''
task.aws_event_target_id = ''
except ClientError as client_error:
error_code = client_error.response['Error']['Code']
# Happens if the schedule rule is removed manually
if error_code == 'ResourceNotFoundException':
logger.warning(f"teardown_scheduled_execution(): Can't remove target {task.aws_event_target_rule_name} because resource not found, exception = {client_error}")
else:
logger.exception(f"teardown_scheduled_execution(): Can't remove target {task.aws_event_target_rule_name} due to unhandled error {error_code}")
raise client_error
if task.aws_scheduled_execution_rule_name:
client = client or self.make_events_client()
try:
client.delete_rule(
Name=task.aws_scheduled_execution_rule_name,
#EventBusName='string'
Force=True
)
except ClientError as client_error:
error_code = client_error.response['Error']['Code']
# Happens if the schedule rule is removed manually
if error_code == 'ResourceNotFoundException':
logger.warning(
f"teardown_scheduled_execution(): Can't disable rule{task.aws_scheduled_execution_rule_name} because resource not found, exception = {client_error}")
else:
logger.exception(
f"teardown_scheduled_execution(): Can't remove target {task.aws_scheduled_execution_rule_name} due to unhandled error {error_code}")
raise client_error
task.aws_scheduled_event_rule_arn = ''
def setup_service(self, force_creation=False):
from ..models.task import Task
task = self.task
run_env = task.run_environment
ecs_client = run_env.make_boto3_client('ecs')
existing_service = self.find_aws_ecs_service(ecs_client=ecs_client)
if existing_service:
service_name = existing_service['serviceName']
else:
service_name = self.make_aws_ecs_service_name()
# When creating a service that specifies multiple target groups, the Amazon ECS service-linked role must be created. The role is created by omitting the role parameter in API requests, or the Role property in AWS CloudFormation. For more information, see Service-Linked Role for Amazon ECS.
#role = task.aws_ecs_default_task_role or task.aws_ecs_default_execution_role or \
# run_env.aws_ecs_default_task_role or run_env.aws_ecs_default_execution_role
new_service_name = service_name
if existing_service:
current_status = existing_service['status'].upper()
if force_creation or (current_status != 'ACTIVE'):
logger.info(f"Deleting service '{service_name}' before updating ...")
cluster = task.aws_ecs_default_cluster_arn or run_env.aws_ecs_default_cluster_arn
deletion_response = ecs_client.delete_service(
cluster=cluster,
service=service_name,
force=True)
deleted_service = deletion_response['service']
current_status = deleted_service['status'].upper()
if current_status in ('DRAINING', 'ACTIVE'):
service_name = deleted_service['serviceName']
logger.info(f"Current status of service '{service_name}' is {current_status}, will change service name")
m = Task.SERVICE_NAME_REGEX.match(service_name)
if m:
index_str = m.group(3)
if index_str:
new_service_name = self.make_aws_ecs_service_name(int(index_str) + 1)
else:
new_service_name = self.make_aws_ecs_service_name()
logger.info(f"Parsed service name '{service_name}', will use '{new_service_name}' as new service name")
else:
new_service_name = self.make_aws_ecs_service_name()
logger.warning(f"Can't match service name '{service_name}', will use '{new_service_name}' as new service name")
existing_service = None
task.aws_ecs_service_arn = ''
task.aws_ecs_service_updated_at = timezone.now()
if existing_service:
args = self.make_common_service_args(include_launch_type=False)
args['service'] = service_name
args['forceNewDeployment'] = \
task.aws_ecs_service_force_new_deployment or False
if task.aws_ecs_service_load_balancer_details_set.count() > 0:
args['healthCheckGracePeriodSeconds'] = \
task.aws_ecs_service_load_balancer_health_check_grace_period_seconds or \
Task.DEFAULT_ECS_SERVICE_LOAD_BALANCER_HEALTH_CHECK_GRACE_PERIOD_SECONDS
response = ecs_client.update_service(**args)
else:
args = self.add_creation_args(self.make_common_service_args(
include_launch_type=True))
args['serviceName'] = service_name
client_token = ''.join(random.choice(string.ascii_letters) for i in range(30))
args['clientToken'] = client_token
args['schedulingStrategy'] = 'REPLICA'
args['deploymentController'] = {
'type': 'ECS'
}
load_balancers = []
for load_balancer in task.aws_ecs_service_load_balancer_details_set.all():
load_balancer_dict = {
'targetGroupArn': load_balancer.target_group_arn,
'containerName': load_balancer.container_name or task.aws_ecs_main_container_name,
'containerPort': load_balancer.container_port
}
load_balancers.append(load_balancer_dict)
args['loadBalancers'] = load_balancers
if task.aws_ecs_service_load_balancer_details_set.count() > 0:
args['healthCheckGracePeriodSeconds'] = \
task.aws_ecs_service_load_balancer_health_check_grace_period_seconds or \
task.DEFAULT_ECS_SERVICE_LOAD_BALANCER_HEALTH_CHECK_GRACE_PERIOD_SECONDS
if task.aws_ecs_service_enable_ecs_managed_tags is not None:
args['enableECSManagedTags'] = task.aws_ecs_service_enable_ecs_managed_tags
if task.aws_ecs_service_propagate_tags:
args['propagateTags'] = task.aws_ecs_service_propagate_tags
response = ecs_client.create_service(**args)
task.aws_ecs_service_arn = response['service']['serviceArn']
task.aws_ecs_service_updated_at = timezone.now()
return response
def teardown_service(self) -> None:
task = self.task
run_env = task.run_environment
ecs_client = run_env.make_boto3_client('ecs')
existing_service = self.find_aws_ecs_service(ecs_client=ecs_client)
cluster = task.aws_ecs_default_cluster_arn or run_env.aws_ecs_default_cluster_arn
if existing_service:
service_name = existing_service['serviceName']
deletion_response = ecs_client.delete_service(
cluster=cluster,
service=service_name,
force=True)
deleted_service = deletion_response['service']
current_status = deleted_service['status'].upper()
if current_status == 'INACTIVE':
logger.info(f'Service {service_name} was inactive, clearing service ARN')
task.aws_ecs_service_arn = ''
else:
logger.info(f'Service {service_name} had status {current_status}, saving service ARN')
# The service ARN is not modified so that the name can be
# incremented next time the service is enabled.
task.aws_ecs_service_arn = deleted_service['serviceArn']
task.aws_ecs_service_updated_at = timezone.now()
# TODO: Mark Task Executions as STOPPED so they are aborted the next
# time they heartbeat
def manually_start(self, task_execution: 'TaskExecution'):
task = task_execution.task
if task_execution.is_service is None:
task_execution.is_service = task.is_service
task_execution.heartbeat_interval_seconds = task_execution.heartbeat_interval_seconds or task.heartbeat_interval_seconds
task_execution.task_max_concurrency = task_execution.task_max_concurrency or task.max_concurrency
task_execution.max_conflicting_age_seconds = task_execution.max_conflicting_age_seconds or task.max_age_seconds
if task_execution.process_max_retries is None:
task_execution.process_max_retries = task.default_max_retries
run_env = task.run_environment
args = self.add_creation_args(self.make_common_args(
include_launch_type=True, task_execution=task_execution),
task_execution=task_execution)
cpu_units = task_execution.allocated_cpu_units \
or task.allocated_cpu_units or self.DEFAULT_CPU_UNITS
memory_mb = task_execution.allocated_memory_mb \
or task.allocated_memory_mb or self.DEFAULT_MEMORY_MB
execution_role_arn = task_execution.aws_ecs_execution_role \
or task.aws_ecs_default_execution_role \
or run_env.aws_ecs_default_execution_role
task_role_arn = task_execution.aws_ecs_task_role \
or task.aws_ecs_default_task_role \
or run_env.aws_ecs_default_task_role
environment = task_execution.make_environment()
flattened_environment = []
for name, value in environment.items():
flattened_environment.append({
'name': name,
'value': value
})
logger.info(f"manually_start() with args = {args}, " +
f"{cpu_units=}, {memory_mb=}, " +
f"{execution_role_arn=}, {task_role_arn=}")
task_execution.aws_ecs_cluster_arn = args['cluster']
task_execution.aws_ecs_task_definition_arn = args['taskDefinition']
task_execution.aws_ecs_platform_version = args['platformVersion']
task_execution.allocated_cpu_units = cpu_units
task_execution.allocated_memory_mb = memory_mb
task_execution.aws_ecs_launch_type = args['launchType']
task_execution.aws_ecs_execution_role = execution_role_arn
task_execution.aws_ecs_task_role = task_role_arn
nc = args['networkConfiguration']['awsvpcConfiguration']
task_execution.aws_subnets = nc['subnets']
task_execution.aws_ecs_security_groups = nc['securityGroups']
task_execution.aws_ecs_assign_public_ip = \
(nc['assignPublicIp'] == 'ENABLED')
task_execution.save()
task.latest_task_execution = task_execution
task.save()
try:
ecs_client = run_env.make_boto3_client('ecs')
overrides = {
'containerOverrides': [
{
'name': task.aws_ecs_main_container_name,
# 'command': [
# 'string',
# ],
'environment': flattened_environment,
'cpu': cpu_units,
'memory': memory_mb,
# 'memoryReservation': task_execution.allocated_memory_mb or task.allocated_memory_mb,
# 'resourceRequirements': [
# {
# 'value': 'string',
# 'type': 'GPU'
# },
# ]
},
],
'executionRoleArn': execution_role_arn,
}
if task_role_arn:
overrides['taskRoleArn'] = task_role_arn
args.update({
'overrides': overrides,
'count': 1,
'startedBy': 'CloudReactor',
# group='string',
# placementConstraints=[
# {
# 'type': 'distinctInstance' | 'memberOf',
# 'expression': 'string'
# },
# ],
# placementStrategy=[
# {
# 'type': 'random' | 'spread' | 'binpack',
# 'field': 'string'
# },
# ],
})
rv = ecs_client.run_task(**args)
logger.info(f"Got run_task() return value {rv}")
# TODO: handle failures in rv['failures'][]
task_execution.aws_ecs_task_arn = rv['tasks'][0]['taskArn']
except Exception:
from ..models import TaskExecution
logger.warning(f'Failed to start Task {task.uuid}', exc_info=True)
task_execution.status = TaskExecution.Status.FAILED
task_execution.stop_reason = TaskExecution.StopReason.FAILED_TO_START
# TODO: add info from execption
task_execution.finished_at = timezone.now()
task_execution.save()
def make_aws_ecs_service_name(self, index: int = 0) -> str:
return 'CR_' + str(self.task.uuid) + '_' + str(index)
def find_aws_ecs_service(self, ecs_client=None) -> Optional[Any]:
task = self.task
run_env = task.run_environment
if ecs_client is None:
ecs_client = run_env.make_boto3_client('ecs')
service_arn_or_name = task.aws_ecs_service_arn or \
self.make_aws_ecs_service_name()
cluster = task.aws_ecs_default_cluster_arn or \
run_env.aws_ecs_default_cluster_arn
try:
response_dict = ecs_client.describe_services(
cluster=cluster,
services=[service_arn_or_name])
services = response_dict['services']
if len(services) == 0:
logger.info(f"No service named '{service_arn_or_name}' found for cluster '{cluster}'")
return None
return services[0]
except Exception:
logger.warning("Can't describe services", exc_info=True)
return None
def make_events_client(self):
run_environment = self.task.run_environment
return run_environment.make_boto3_client('events')
def assign_public_ip_str(self, task_execution: Optional['TaskExecution'] = None) -> | |
S3OptionsFilter("case_details.registered",
cols = 2,
hidden = True,
options = opt_yes_no,
),
S3OptionsFilter("case_details.enrolled_in_school",
cols = 2,
hidden = True,
options = opt_yes_no,
),
S3DateFilter("date_of_birth",
#label = T("Date of Birth"),
hidden = True,
),
S3DateFilter("dvr_case.date",
#label = T("Registration Date"),
hidden = True,
),
S3DateFilter("dvr_case_activity.activity_id$start_date",
label = T("Activity Start Date"),
hidden = True,
),
S3DateFilter("dvr_case_activity.activity_id$end_date",
label = T("Activity End Date"),
hidden = True,
),
S3DateFilter("dvr_case_activity.start_date",
label = T("Protection Response Opened on"),
hidden = True,
),
]
if status_filter:
filter_widgets.insert(2, status_filter)
# Update configuration
resource.configure(crud_form = crud_form,
filter_widgets = filter_widgets,
)
# Custom list fields (must be outside of r.interactive)
list_fields = [(T("Ref.No."), "pe_label"),
"first_name",
#"middle_name",
"last_name",
(T("Phone"), "phone.value"),
"date_of_birth",
"gender",
"person_details.nationality",
(T("ID"), "individual_id.value"),
(T("Family ID"), "family_id.value"),
"dvr_case.date",
"dvr_case.status_id",
]
resource.configure(list_fields = list_fields,
listadd = False,
addbtn = True,
)
if r.method == "report":
# Representation of record IDs in ID aggregations
resource.table.id.represent = s3db.pr_PersonRepresent()
report_fields = ("gender",
"person_details.nationality",
"dvr_case.status_id",
"dvr_case_activity.service_id",
(T("Protection Response Sector"),"dvr_case_activity.dvr_case_activity_need.need_id"),
(T("Protection Assessment"), "dvr_case_activity.dvr_vulnerability_type_case_activity.vulnerability_type_id"),
"age_group",
"address.location_id$L2",
"dvr_case.date",
)
report_facts = [(T("Number of Beneficiaries"), "count(id)"),
]
report_options = {"rows": report_fields,
"cols": report_fields,
"fact": report_facts,
#"defaults": {
# "rows": "gender",
# "cols": "person_details.nationality",
# "fact": report_facts[0],
# }
}
# Drop DoB extra field unless age_group is used as report axis,
# NB does not detect age_group as default axis, so this would
# require a manual workaround
if "~.age_group" not in (get_vars.get("rows"),
get_vars.get("cols")):
extra_fields = resource.get_config("extra_fields")
if extra_fields:
extra_fields = [s for s in extra_fields
if s != "date_of_birth"]
resource.configure(extra_fields = extra_fields or None)
resource.configure(report_options = report_options,
)
elif r.component_name == "evaluation":
s3.stylesheets.append("../themes/STL/evaluation.css")
elif controller == "hrm":
if not r.component:
table = s3db.pr_person_details
field = table.marital_status
field.readable = field.writable = False
field = table.religion
field.readable = field.writable = False
elif r.method == "record" or \
r.component_name == "human_resource":
table = s3db.hrm_human_resource
field = table.site_id
field.readable = field.writable = False
return result
s3.prep = custom_prep
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
# Remove subtitle on case tab
if r.component_name == "dvr_case" and \
isinstance(output, dict) and "subtitle" in output:
output["subtitle"] = None
return output
s3.postp = custom_postp
# Custom rheader tabs
if current.request.controller == "dvr":
attr = dict(attr)
attr["rheader"] = stl_dvr_rheader
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# =========================================================================
# Staff Module
#
settings.hrm.use_code = True
settings.hrm.use_skills = False
settings.hrm.use_address = False
settings.hrm.use_certificates = False
settings.hrm.use_credentials = False
settings.hrm.use_description = False
settings.hrm.use_id = False
settings.hrm.use_trainings = False
settings.hrm.staff_departments = False
settings.hrm.teams = False
settings.hrm.staff_experience = False
def customise_hrm_human_resource_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.controller == "hrm":
resource = r.resource
# Hide "Facility" from form (unused)
table = resource.table
field = table.site_id
field.readable = field.writable = False
# Don't need Location/Facility filters either
std_filters = resource.get_config("filter_widgets")
filter_widgets = []
for filter_widget in std_filters:
if filter_widget.field in ("location_id", "site_id"):
continue
filter_widgets.append(filter_widget)
# Custom list fields
list_fields = ["person_id",
"code",
"job_title_id",
"organisation_id",
(T("Email"), "email.value"),
(settings.get_ui_label_mobile_phone(), "phone.value"),
]
# Update resource config
resource.configure(list_fields = list_fields,
filter_widgets = filter_widgets,
)
# Sort filterOptionsS3 results alphabetically
if r.representation == "json":
resource.configure(orderby = ["pr_person.first_name",
"pr_person.middle_name",
"pr_person.last_name",
],
)
return result
s3.prep = custom_prep
return attr
settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller
# =========================================================================
# Organisation Registry
#
settings.org.branches = True
settings.org.services_hierarchical = True
# Uncomment this to make tree view the default for "Branches" tab
#settings.org.branches_tree_view = True
def customise_org_organisation_controller(**attr):
tabs = [(T("Basic Details"), None),
(T("Staff & Volunteers"), "human_resource"),
]
if settings.get_org_branches():
if settings.get_org_branches_tree_view():
branches = "hierarchy"
else:
branches = "branch"
tabs.insert(1, (T("Branches"), branches))
org_rheader = lambda r, tabs=tabs: current.s3db.org_rheader(r, tabs=tabs)
attr = dict(attr)
attr["rheader"] = org_rheader
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# -------------------------------------------------------------------------
def customise_org_facility_controller(**attr):
s3db = current.s3db
s3db.add_components("org_facility",
org_room = "site_id",
)
# Custom rheader
attr = dict(attr)
attr["rheader"] = stl_org_rheader
return attr
settings.customise_org_facility_controller = customise_org_facility_controller
# -------------------------------------------------------------------------
def customise_org_service_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
# Prevent name/parent change for service categories
# with dependent functionality:
record = r.record
if record and \
record.name in (INDIVIDUAL_SUPPORT, MENTAL_HEALTH) and \
not record.parent:
field = r.table.name
field.writable = False
field = r.table.parent
field.readable = field.writable = False
# @todo: delete should be prevented too?
#r.resource.configure(deletable = False)
return result
s3.prep = custom_prep
return attr
settings.customise_org_service_controller = customise_org_service_controller
# =========================================================================
# Project Module
#
settings.project.mode_3w = True
settings.project.codes = True
settings.project.sectors = False
settings.project.assign_staff_tab = False
# -------------------------------------------------------------------------
def customise_project_project_resource(r, tablename):
s3db = current.s3db
from s3 import S3SQLCustomForm, \
S3TextFilter
# Simplified form
crud_form = S3SQLCustomForm("organisation_id",
"code",
"name",
"description",
"comments",
)
# Custom list fields
list_fields = ["code",
"name",
"organisation_id",
]
# Custom filter widgets
filter_widgets = [S3TextFilter(["name",
"code",
"description",
"organisation_id$name",
"comments",
],
label = current.T("Search"),
),
]
s3db.configure("project_project",
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
)
settings.customise_project_project_resource = customise_project_project_resource
# -------------------------------------------------------------------------
def customise_project_project_controller(**attr):
T = current.T
s3db = current.db
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
# Customise fields
table = s3db.project_project
field = table.code
field.label = T("Code")
return result
s3.prep = custom_prep
# Custom rheader
attr = dict(attr)
attr["rheader"] = stl_project_rheader
return attr
settings.customise_project_project_controller = customise_project_project_controller
# =========================================================================
# Modules
# Comment/uncomment modules here to disable/enable them
# Modules menu is defined in modules/eden/menu.py
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
#("sync", Storage(
# name_nice = T("Synchronization"),
# #description = "Synchronization",
# restricted = True,
# access = "|1|", # Only Administrators can see this module in the default menu & access the controller
# module_type = None # This item is handled separately for the menu
#)),
#("tour", Storage(
# name_nice = T("Guided Tour Functionality"),
# module_type = None,
#)),
#("translate", Storage(
# name_nice = T("Translation Functionality"),
# #description = "Selective translation of strings based on module.",
# module_type = None,
#)),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 6, # 6th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 1
)),
("hrm", Storage(
name_nice = T("Staff"),
#description = "Human Resources Management",
restricted = True,
module_type = 2,
)),
("vol", Storage(
name_nice = T("Volunteers"),
#description = "Human Resources Management",
restricted = True,
module_type = 2,
)),
#("cms", Storage(
# name_nice = T("Content Management"),
# #description = "Content Management System",
# restricted = True,
# module_type = 10,
#)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and | |
task=HELLO_WORLD,
health_check_config=HealthCheckConfig(
health_checker=HealthCheckerConfig(shell=shell_config),
interval_secs=interval_secs,
initial_interval_secs=initial_interval_secs,
max_consecutive_failures=max_consecutive_failures,
min_consecutive_successes=min_consecutive_successes,
timeout_secs=timeout_secs
)
).json_dumps()
)
)
assigned_task = AssignedTask(task=task_config, instanceId=1, assignedPorts={'foo': 9001})
execconfig_data = json.loads(assigned_task.task.executorConfig.data)
assert execconfig_data[
'health_check_config']['health_checker']['shell']['shell_command'] == 'failed command'
mock_sandbox = mock.Mock(spec_set=SandboxInterface)
type(mock_sandbox).root = mock.PropertyMock(return_value='/some/path')
type(mock_sandbox).is_filesystem_image = mock.PropertyMock(return_value=False)
health_checker = HealthCheckerProvider().from_assigned_task(assigned_task, mock_sandbox)
hc = health_checker.threaded_health_checker
assert hc.interval == interval_secs
assert hc.grace_period_secs == initial_interval_secs
assert hc.max_consecutive_failures == max_consecutive_failures
assert hc.min_consecutive_successes == min_consecutive_successes
mock_getpwnam.assert_called_once_with(task_config.job.role)
@mock.patch('pwd.getpwnam')
def test_from_assigned_task_shell_no_demotion(self, mock_getpwnam):
interval_secs = 17
initial_interval_secs = 3
max_consecutive_failures = 2
min_consecutive_successes = 2
timeout_secs = 5
shell_config = ShellHealthChecker(shell_command='failed command')
task_config = TaskConfig(
job=JobKey(role='role', environment='env', name='name'),
executorConfig=ExecutorConfig(
name='thermos-generic',
data=MESOS_JOB(
task=HELLO_WORLD,
health_check_config=HealthCheckConfig(
health_checker=HealthCheckerConfig(shell=shell_config),
interval_secs=interval_secs,
initial_interval_secs=initial_interval_secs,
max_consecutive_failures=max_consecutive_failures,
min_consecutive_successes=min_consecutive_successes,
timeout_secs=timeout_secs
)
).json_dumps()
)
)
assigned_task = AssignedTask(task=task_config, instanceId=1, assignedPorts={'foo': 9001})
execconfig_data = json.loads(assigned_task.task.executorConfig.data)
assert execconfig_data[
'health_check_config']['health_checker']['shell']['shell_command'] == 'failed command'
mock_sandbox = mock.Mock(spec_set=SandboxInterface)
type(mock_sandbox).root = mock.PropertyMock(return_value='/some/path')
type(mock_sandbox).is_filesystem_image = mock.PropertyMock(return_value=False)
health_checker = HealthCheckerProvider(nosetuid_health_checks=True).from_assigned_task(
assigned_task,
mock_sandbox)
hc = health_checker.threaded_health_checker
assert hc.interval == interval_secs
assert hc.grace_period_secs == initial_interval_secs
assert hc.max_consecutive_failures == max_consecutive_failures
assert hc.min_consecutive_successes == min_consecutive_successes
# Should not be trying to access role's user info.
assert not mock_getpwnam.called
@mock.patch.dict(os.environ, {'MESOS_DIRECTORY': '/some/path'})
@mock.patch('pwd.getpwnam')
def test_from_assigned_task_shell_filesystem_image(self, mock_getpwnam):
interval_secs = 17
initial_interval_secs = 3
max_consecutive_failures = 2
min_consecutive_successes = 2
timeout_secs = 5
shell_config = ShellHealthChecker(shell_command='failed command')
task_config = TaskConfig(
job=JobKey(role='role', environment='env', name='name'),
executorConfig=ExecutorConfig(
name='thermos-generic',
data=MESOS_JOB(
task=HELLO_WORLD,
health_check_config=HealthCheckConfig(
health_checker=HealthCheckerConfig(shell=shell_config),
interval_secs=interval_secs,
initial_interval_secs=initial_interval_secs,
max_consecutive_failures=max_consecutive_failures,
min_consecutive_successes=min_consecutive_successes,
timeout_secs=timeout_secs
)
).json_dumps()
)
)
assigned_task = AssignedTask(task=task_config, instanceId=1, assignedPorts={'foo': 9001})
execconfig_data = json.loads(assigned_task.task.executorConfig.data)
assert execconfig_data[
'health_check_config']['health_checker']['shell']['shell_command'] == 'failed command'
mock_sandbox = mock.Mock(spec_set=SandboxInterface)
type(mock_sandbox).root = mock.PropertyMock(return_value='/some/path')
type(mock_sandbox).is_filesystem_image = mock.PropertyMock(return_value=True)
with mock.patch('apache.aurora.executor.common.health_checker.ShellHealthCheck') as mock_shell:
HealthCheckerProvider(
nosetuid_health_checks=False,
mesos_containerizer_path='/some/path/mesos-containerizer').from_assigned_task(
assigned_task,
mock_sandbox)
class NotNone(object):
def __eq__(self, other):
return other is not None
assert mock_shell.mock_calls == [
mock.call(
raw_cmd='failed command',
wrapped_cmd=NotNone(),
preexec_fn=None,
timeout_secs=5.0
)
]
def test_interpolate_cmd(self):
"""Making sure thermos.ports[foo] gets correctly substituted with assignedPorts info."""
interval_secs = 17
initial_interval_secs = 3
max_consecutive_failures = 2
min_consecutive_successes = 2
timeout_secs = 5
shell_cmd = 'FOO_PORT={{thermos.ports[foo]}} failed command'
shell_config = ShellHealthChecker(shell_command=shell_cmd)
task_config = TaskConfig(
executorConfig=ExecutorConfig(
name='thermos-generic',
data=MESOS_JOB(
task=HELLO_WORLD,
health_check_config=HealthCheckConfig(
health_checker=HealthCheckerConfig(shell=shell_config),
interval_secs=interval_secs,
initial_interval_secs=initial_interval_secs,
max_consecutive_failures=max_consecutive_failures,
min_consecutive_successes=min_consecutive_successes,
timeout_secs=timeout_secs
)
).json_dumps()
)
)
assigned_task = AssignedTask(task=task_config, instanceId=1, assignedPorts={'foo': 9001})
interpolated_cmd = HealthCheckerProvider.interpolate_cmd(
assigned_task,
cmd=shell_cmd
)
assert interpolated_cmd == 'FOO_PORT=9001 failed command'
def test_from_assigned_task_no_health_port(self):
interval_secs = 17
initial_interval_secs = 3
max_consecutive_failures = 2
min_consecutive_successes = 2
timeout_secs = 5
task_config = TaskConfig(
executorConfig=ExecutorConfig(
name='thermos-generic',
data=MESOS_JOB(
task=HELLO_WORLD,
health_check_config=HealthCheckConfig(
interval_secs=interval_secs,
initial_interval_secs=initial_interval_secs,
max_consecutive_failures=max_consecutive_failures,
min_consecutive_successes=min_consecutive_successes,
timeout_secs=timeout_secs
)
).json_dumps()
)
)
# No health port and we don't have a shell_command.
assigned_task = AssignedTask(task=task_config, instanceId=1, assignedPorts={'http': 9001})
health_checker = HealthCheckerProvider().from_assigned_task(assigned_task, None)
assert isinstance(health_checker, NoopHealthChecker)
class TestThreadedHealthChecker(unittest.TestCase):
def setUp(self):
self.health = mock.Mock()
self.health.return_value = (True, 'Fake')
self.sandbox = mock.Mock(spec_set=SandboxInterface)
self.sandbox.exists.return_value = True
self.sandbox.root = '/root'
self.initial_interval_secs = 15
self.interval_secs = 10
self.max_consecutive_failures = 1
self.min_consecutive_successes = 2
self.clock = mock.Mock(spec=time)
self.clock.time.return_value = 0
self.health_checker = HealthChecker(
self.health,
None,
self.interval_secs,
self.initial_interval_secs,
self.max_consecutive_failures,
self.min_consecutive_successes,
self.clock)
self.health_checker_sandbox_exists = HealthChecker(
self.health,
self.sandbox,
self.interval_secs,
self.initial_interval_secs,
self.max_consecutive_failures,
self.min_consecutive_successes,
self.clock)
def test_perform_check_if_not_disabled_snooze_file_is_none(self):
self.health_checker_sandbox_exists.threaded_health_checker.snooze_file = None
assert self.health.call_count == 0
assert self.health_checker_sandbox_exists.metrics.sample()['snoozed'] == 0
self.health_checker.threaded_health_checker._perform_check_if_not_disabled()
assert self.health.call_count == 1
assert self.health_checker_sandbox_exists.metrics.sample()['snoozed'] == 0
@mock.patch('os.path', spec_set=os.path)
def test_perform_check_if_not_disabled_no_snooze_file(self, mock_os_path):
mock_os_path.isfile.return_value = False
assert self.health.call_count == 0
assert self.health_checker_sandbox_exists.metrics.sample()['snoozed'] == 0
self.health_checker_sandbox_exists.threaded_health_checker._perform_check_if_not_disabled()
assert self.health.call_count == 1
assert self.health_checker_sandbox_exists.metrics.sample()['snoozed'] == 0
@mock.patch('os.path', spec_set=os.path)
def test_perform_check_if_not_disabled_snooze_file_exists(self, mock_os_path):
mock_os_path.isfile.return_value = True
assert self.health.call_count == 0
assert self.health_checker_sandbox_exists.metrics.sample()['snoozed'] == 0
result = (
self.health_checker_sandbox_exists.threaded_health_checker._perform_check_if_not_disabled())
assert self.health.call_count == 0
assert self.health_checker_sandbox_exists.metrics.sample()['snoozed'] == 1
assert result == (True, None)
def test_maybe_update_health_check_count_reset_count(self):
hc = self.health_checker.threaded_health_checker
hc.attempts = hc.forgiving_attempts
assert hc.current_consecutive_failures == 0
assert hc.current_consecutive_successes == 0
hc.attempts += 1
hc._maybe_update_health_check_count(True, 'reason-1')
assert hc.current_consecutive_failures == 0
assert hc.current_consecutive_successes == 1
hc.attempts += 1
hc._maybe_update_health_check_count(False, 'reason-2')
assert hc.current_consecutive_failures == 1
assert hc.current_consecutive_successes == 0
hc.attempts += 1
hc._maybe_update_health_check_count(True, 'reason-3')
assert hc.current_consecutive_failures == 0
assert hc.current_consecutive_successes == 1
def test_maybe_update_health_check_count_ignore_failures_within_grace_period(self):
hc = self.health_checker.threaded_health_checker
assert hc.current_consecutive_failures == 0
assert hc.current_consecutive_successes == 0
hc.attempts += 1
hc._maybe_update_health_check_count(False, 'reason-1')
assert hc.current_consecutive_failures == 0
assert hc.current_consecutive_successes == 0
hc.attempts += 1
hc._maybe_update_health_check_count(False, 'reason-2')
assert hc.current_consecutive_failures == 0
assert hc.current_consecutive_successes == 0
hc.attempts += 1
hc._maybe_update_health_check_count(False, 'reason-3')
assert hc.current_consecutive_failures == 1
assert hc.current_consecutive_successes == 0
hc.attempts += 1
hc._maybe_update_health_check_count(False, 'reason-4')
assert hc.current_consecutive_failures == 2
assert hc.current_consecutive_successes == 0
def test_maybe_update_health_check_count_dont_ignore_failures_after_grace_period(self):
hc = self.health_checker.threaded_health_checker
hc.attempts = hc.forgiving_attempts
assert hc.current_consecutive_failures == 0
assert hc.current_consecutive_successes == 0
hc.attempts += 1
hc._maybe_update_health_check_count(False, 'reason-1')
assert hc.current_consecutive_failures == 1
assert hc.current_consecutive_successes == 0
hc.attempts += 1
hc._maybe_update_health_check_count(False, 'reason-2')
assert hc.current_consecutive_failures == 2
assert hc.current_consecutive_successes == 0
def test_maybe_update_health_check_count_fail_fast(self):
hc = self.health_checker.threaded_health_checker
assert hc.current_consecutive_failures == 0
assert hc.current_consecutive_successes == 0
assert hc.healthy is True
assert hc.running is False
hc.attempts += 1
hc._maybe_update_health_check_count(False, 'reason-1')
assert hc.current_consecutive_failures == 0
assert hc.current_consecutive_successes == 0
assert hc.running is False
hc.attempts += 1
hc._maybe_update_health_check_count(False, 'reason-2')
assert hc.current_consecutive_failures == 0
assert hc.current_consecutive_successes == 0
assert hc.running is False
hc.attempts += 1
hc._maybe_update_health_check_count(False, 'reason-3')
assert hc.current_consecutive_failures == 1
assert hc.current_consecutive_successes == 0
assert hc.running is False
hc.attempts += 1
hc._maybe_update_health_check_count(False, 'reason-4')
assert hc.current_consecutive_failures == 2
assert hc.current_consecutive_successes == 0
assert hc.running is False
assert hc.healthy is False
assert hc.reason == 'reason-4'
def test_maybe_update_health_check_count_max_failures_1(self):
hc = self.health_checker.threaded_health_checker
hc.current_consecutive_successes = 1
hc.attempts = hc.forgiving_attempts
assert hc.current_consecutive_failures == 0
assert hc.current_consecutive_successes == 1
assert hc.healthy is True
hc.attempts += 1
hc._maybe_update_health_check_count(False, 'reason-1')
assert hc.current_consecutive_failures == 1
assert hc.current_consecutive_successes == 0
assert hc.healthy is True
hc.attempts += 1
hc._maybe_update_health_check_count(False, 'reason-2')
assert hc.current_consecutive_failures == 2
assert hc.current_consecutive_successes == 0
assert hc.healthy is False
assert hc.reason == 'reason-2'
def test_maybe_update_health_check_count_success(self):
hc = self.health_checker.threaded_health_checker
assert hc.current_consecutive_failures == 0
assert hc.current_consecutive_successes == 0
assert hc.running is False
assert hc.healthy is True
hc.attempts += 1
hc._maybe_update_health_check_count(True, 'reason')
assert hc.current_consecutive_failures == 0
assert hc.current_consecutive_successes == 1
assert hc.running is False
assert hc.healthy is True
hc.attempts += 1
hc._maybe_update_health_check_count(True, 'reason')
assert hc.current_consecutive_failures == 0
assert hc.current_consecutive_successes == 2
assert hc.running is True
assert hc.healthy is True
hc.attempts += 1
hc._maybe_update_health_check_count(True, 'reason')
assert hc.current_consecutive_failures == 0
assert hc.current_consecutive_successes == 3
assert hc.running is True
assert hc.healthy is True
def test_run_success(self):
self.health.return_value = (True, 'success')
mock_is_set = mock.Mock(spec=threading._Event.is_set)
liveness = [False, False, False, True]
mock_is_set.side_effect = lambda: liveness.pop(0)
self.health_checker.threaded_health_checker.dead.is_set = mock_is_set
self.health_checker.threaded_health_checker.run()
assert self.clock.sleep.call_count == 3
assert self.health_checker.threaded_health_checker.current_consecutive_failures == 0
assert self.health_checker.threaded_health_checker.current_consecutive_successes == 3
assert self.health_checker.threaded_health_checker.running is True
assert self.health_checker.threaded_health_checker.healthy is True
assert self.health_checker.threaded_health_checker.reason is None
def test_run_failure(self):
self.health.return_value = (False, 'failure')
mock_is_set = mock.Mock(spec=threading._Event.is_set)
liveness = [False, False, False, False, True]
mock_is_set.side_effect = lambda: liveness.pop(0)
self.health_checker.threaded_health_checker.dead.is_set = mock_is_set
self.health_checker.threaded_health_checker.run()
assert self.clock.sleep.call_count == 4
assert self.health_checker.threaded_health_checker.current_consecutive_failures == 2
assert self.health_checker.threaded_health_checker.current_consecutive_successes == 0
assert self.health_checker.threaded_health_checker.running is False
assert self.health_checker.threaded_health_checker.healthy is False
assert self.health_checker.threaded_health_checker.reason == 'failure'
def test_run_failure_unhealthy_when_failfast(self):
health_status = [(False, 'failure-1'), (True, None), (False, 'failure-3'), (False, 'failure-4')]
self.health.side_effect = lambda: health_status.pop(0)
mock_is_set = mock.Mock(spec=threading._Event.is_set)
liveness = [False, False, False, False, True]
mock_is_set.side_effect = lambda: liveness.pop(0)
self.health_checker.threaded_health_checker.dead.is_set = mock_is_set
self.health_checker.threaded_health_checker.run()
assert self.clock.sleep.call_count == 4
assert self.health_checker.threaded_health_checker.current_consecutive_failures == 2
assert self.health_checker.threaded_health_checker.current_consecutive_successes == 0
assert self.health_checker.threaded_health_checker.running is False
assert self.health_checker.threaded_health_checker.healthy is False
assert self.health_checker.threaded_health_checker.reason == 'failure-4'
def test_run_unhealthy_after_callback(self):
health_status = [(True, None), (True, None), (False, 'failure-4'), (False, 'failure-5')]
self.health.side_effect = lambda: health_status.pop(0)
mock_is_set = mock.Mock(spec=threading._Event.is_set)
liveness = [False, False, False, False, True]
mock_is_set.side_effect = lambda: liveness.pop(0)
self.health_checker.threaded_health_checker.dead.is_set = mock_is_set
self.health_checker.threaded_health_checker.run()
assert self.clock.sleep.call_count == 4
assert self.health_checker.threaded_health_checker.current_consecutive_failures == 2
assert self.health_checker.threaded_health_checker.current_consecutive_successes == 0
assert self.health_checker.threaded_health_checker.running is True
assert self.health_checker.threaded_health_checker.healthy is False
assert self.health_checker.threaded_health_checker.reason == 'failure-5'
@mock.patch('apache.aurora.executor.common.health_checker.ExceptionalThread.start',
spec=ExceptionalThread.start)
def test_start(self, mock_start):
assert mock_start.call_count == 0
self.health_checker.threaded_health_checker.start()
mock_start.assert_called_once_with(self.health_checker.threaded_health_checker)
def test_stop(self):
assert not self.health_checker.threaded_health_checker.dead.is_set()
self.health_checker.threaded_health_checker.stop()
assert self.health_checker.threaded_health_checker.dead.is_set()
class TestThreadedHealthCheckerWithDefaults(unittest.TestCase):
'''
Similar tests as above but with the default health check configuration. This
will ensure that the defaults are always valid.
'''
def setUp(self):
self.health = mock.Mock()
self.health.return_value = (True, 'Fake')
self.sandbox = mock.Mock(spec_set=SandboxInterface)
self.sandbox.exists.return_value = True
self.sandbox.root = '/root'
self.health_checker = HealthCheckerProvider().from_assigned_task(
AssignedTask(
task=TaskConfig(
executorConfig=ExecutorConfig(
name='thermos',
data=MESOS_JOB(task=HELLO_WORLD).json_dumps())),
instanceId=1,
assignedPorts={'health': 9001}),
self.sandbox)
self.health_checker.threaded_health_checker.checker = self.health
def test_perform_check_if_not_disabled_snooze_file_is_none(self):
self.health_checker.threaded_health_checker.snooze_file = None
assert self.health.call_count == 0
assert self.health_checker.metrics.sample()['snoozed'] == 0
self.health_checker.threaded_health_checker._perform_check_if_not_disabled()
assert self.health.call_count == 1
assert self.health_checker.metrics.sample()['snoozed'] == 0
@mock.patch('os.path', spec_set=os.path)
def test_perform_check_if_not_disabled_no_snooze_file(self, mock_os_path):
mock_os_path.isfile.return_value = False
assert self.health.call_count == 0
assert self.health_checker.metrics.sample()['snoozed'] == 0
self.health_checker.threaded_health_checker._perform_check_if_not_disabled()
assert self.health.call_count == 1
assert self.health_checker.metrics.sample()['snoozed'] == 0
@mock.patch('os.path', spec_set=os.path)
def test_perform_check_if_not_disabled_snooze_file_exists(self, mock_os_path):
mock_os_path.isfile.return_value = True
assert self.health.call_count == 0
| |
<reponame>bshafi/StockMomentum
from datetime import date, datetime, timedelta, timezone, tzinfo, time
from types import CellType
from typing import Any, Tuple, List
from sm_util import historical_database
from enum import Enum
import psycopg2
class StockAction(Enum):
BUY = 0
SELL = 1
def sentiment_trader_antivix(con, buy_point, observation_period, symbol, start_date: datetime, end_date: datetime):
cursor = con.cursor()
cursor.execute("""
SELECT timestamp, close
FROM vix_daily
WHERE timestamp BETWEEN %s AND %s
ORDER BY timestamp ASC
""", (start_date, end_date))
vix_data = cursor.fetchall()
cursor.execute("""
SELECT timestamp, close
FROM candlestick_daily
WHERE timestamp BETWEEN %s AND %s AND symbol = %s
ORDER BY timestamp ASC
""", (start_date, end_date, symbol))
candlestick_data = cursor.fetchall()
candlestick_i = 0
obs_date = None
actions = []
for (timestamp, vix) in vix_data:
while candlestick_i < len(candlestick_data) and candlestick_data[candlestick_i][0] < timestamp:
candlestick_i = candlestick_i + 1
if candlestick_i >= len(candlestick_data):
break
(candlestick_timestamp, price) = candlestick_data[candlestick_i]
if obs_date == None and vix >= buy_point:
actions.append((candlestick_timestamp, StockAction.BUY, price))
obs_date = candlestick_timestamp
if obs_date != None and (timestamp - obs_date) >= timedelta(days=observation_period):
actions.append((candlestick_timestamp, StockAction.SELL, price))
obs_date = None
return actions
def sentiment_trader_antivix_5min(con, buy_point, observation_period, symbol, start_date: datetime, end_date: datetime):
assert(end_date <= datetime(2021, 10, 1, tzinfo=timezone.utc))
with con.cursor() as cursor:
cursor.execute("""
SELECT timestamp, close
FROM vix_5min
WHERE timestamp BETWEEN %s AND %s
ORDER BY timestamp ASC
""", (start_date, end_date))
vix_data = cursor.fetchall()
cursor.execute("""
SELECT timestamp, close
FROM candlestick_5min
WHERE timestamp BETWEEN %s AND %s AND symbol = %s
ORDER BY timestamp ASC
""", (start_date, end_date, symbol))
candlestick_data = cursor.fetchall()
candlestick_i = 0
obs_date = None
actions = []
for (timestamp, vix) in vix_data:
while candlestick_i < len(candlestick_data) and candlestick_data[candlestick_i][0] < timestamp:
candlestick_i = candlestick_i + 1
if candlestick_i >= len(candlestick_data):
break
if (9 <= timestamp.hour <= 17):
(candlestick_timestamp, price) = candlestick_data[candlestick_i]
if obs_date == None and vix >= buy_point:
actions.append((candlestick_timestamp, StockAction.BUY, price))
obs_date = candlestick_timestamp
if obs_date != None and (timestamp - obs_date) >= timedelta(days=observation_period):
actions.append((candlestick_timestamp, StockAction.SELL, price))
obs_date = None
return actions
def buying_enclosed_vix_daily(con, buy_point, sell_point, start_date: datetime, end_date: datetime) -> List[Tuple[datetime, StockAction]]:
assert(0 <= buy_point <= 100)
assert(0 <= sell_point <= 100)
assert(sell_point > buy_point)
cursor = con.cursor()
actions = []
has_bought = False
cursor.execute("""SELECT timestamp, close as vix
FROM vix_daily
WHERE timestamp BETWEEN %s AND %s
ORDER BY timestamp ASC
""", (start_date, end_date))
vix_data = cursor.fetchall()
for (timestamp, vix) in vix_data:
if has_bought:
if vix >= sell_point:
actions.append((date, StockAction.SELL))
has_bought = False
else:
if vix <= buy_point:
actions.append((date, StockAction.BUY))
has_bought = True
return actions
def get_mins_and_maxs(data, key) -> Tuple[List[Any], List[Any]]:
mins = []
maxs = []
for i in range(1, len(data) - 1):
left = key(data[i - 1])
right = key(data[i + 1])
center = key(data[i])
if left < center and right < center:
maxs.append(data[i])
if left > center and right > center:
mins.append(data[i])
return (mins, maxs)
def moving_avg_vix_daily(con, days_forward=1, days_prior=10) -> List[Tuple[datetime, float, float]]:
cursor = con.cursor()
cursor.execute("""
SELECT timestamp, close FROM vix_daily
""")
vix_daily_close = cursor.fetchall()
vix_perdictions = []
for i in range(days_prior, len(vix_daily_close)-1):
timestamp, close = vix_daily_close[i]
date = timestamp
mins, maxs = get_mins_and_maxs(vix_daily_close[i-days_prior:i+1], lambda row: row[1])
avg_mins = 0
for (timestamp, min) in mins:
avg_mins = avg_mins + min
if len(mins) == 0:
avg_mins = 0
else:
avg_mins = avg_mins / len(mins)
avg_maxs = 0
for (timestamp, max) in maxs:
avg_maxs = avg_maxs + max
if len(maxs) == 0:
avg_maxs = 0
else:
avg_maxs = avg_maxs / len(maxs)
vix_perdictions.append((date + timedelta(days=days_forward), avg_mins, avg_maxs))
return vix_perdictions
def buy_moving_avg_vix_daily(con, start_date: datetime, end_date: datetime, weight=1, days_prior=10) -> List[Tuple[datetime, StockAction]]:
if weight < 0:
raise Exception("Invalid weight")
real_min_time = start_date - timedelta(days=days_prior)
cursor = con.cursor()
cursor.execute(f"""
SELECT timestamp, close FROM vix_daily
WHERE timestamp BETWEEN %s AND %s
""", (real_min_time, end_date))
vix_daily_close = cursor.fetchall()
actions = []
has_bought = False
for i in range(days_prior, len(vix_daily_close)-1):
timestamp, vix = vix_daily_close[i]
minimums, maximums = get_mins_and_maxs(vix_daily_close[i-days_prior:i+1], lambda row: row[1])
avg_mins = 0
for (timestamp, minimum) in minimums:
avg_mins = avg_mins + minimum
if len(minimums) == 0:
avg_mins = 0
else:
avg_mins = avg_mins / len(minimums)
avg_maxs = 0
for (timestamp, maximum) in maximums:
avg_maxs = avg_maxs + maximum
if len(maximums) == 0:
avg_maxs = 0
else:
avg_maxs = avg_maxs / len(maximums)
center = (avg_mins + avg_maxs) / 2
offset = abs(avg_maxs - center)
# assert(offset >= 0)
buy_point = min(center + weight * offset, 90)
sell_point = max(center - weight * offset, 10)
if has_bought:
if vix >= buy_point:
actions.append((date, StockAction.SELL))
has_bought = False
else:
if vix <= sell_point:
actions.append((date, StockAction.BUY))
has_bought = True
return actions
def percent_gains(actions: List[Tuple[datetime, StockAction, float]]):
gains = 0
last_buying_price = None
for timestamp, action, price in actions:
assert(action in [StockAction.BUY, StockAction.SELL])
if action == StockAction.BUY:
assert(last_buying_price == None)
last_buying_price = price
elif action == StockAction.SELL:
assert(last_buying_price != None)
gains = gains + (price - last_buying_price) / last_buying_price
last_buying_price = None
return gains
def percent_return_daily(con, h_actions: List[Tuple[datetime, StockAction]], symbol) -> float:
if len(h_actions) == 0:
return 0
actions = sorted(h_actions, key=lambda x: x[0])
min_date = actions[0][0]
max_date = actions[-1][0]
cursor = con.cursor()
cursor.execute(f"""
SELECT timestamp, close FROM candlestick_daily
WHERE symbol = %s AND timestamp BETWEEN %s AND %s
ORDER BY timestamp ASC
""", (symbol, min_date, max_date))
data = cursor.fetchall()
if len(data) == 0:
return 0
min_symbol_date = data[0][0]
max_symbol_date = data[-1][0]
actions = list(filter(lambda x: min_symbol_date <= x[0] <= max_symbol_date, actions))
if len(actions) != 0 and actions[0][1] == StockAction.SELL:
actions = actions[1:]
if len(actions) == 0:
return 0
gains = 0
last_buying_price = None
i = 0
for (timestamp, close) in data:
date = timestamp
if actions[i][0] <= date:
action_date, action = actions[i]
i = i + 1
if action == StockAction.BUY:
assert(last_buying_price == None)
last_buying_price = close
elif action == StockAction.SELL:
assert(last_buying_price != None)
gains = gains + (close - last_buying_price) / last_buying_price
last_buying_price = None
if i >= len(actions):
break
return gains * 100
def avg_min_loss(SYMBOL, start_date, end_date, actions_list = None, buy_point = 19, observation_period = 3, wins_only = False):
con = historical_database()
data = None
with con.cursor() as cursor:
#TODO: not getting data from earlier than oct 8
cursor.execute("SELECT timestamp, close FROM candlestick_5min WHERE symbol = %s AND timestamp BETWEEN %s AND %s ORDER BY timestamp ASC", (SYMBOL, start_date, end_date))
data = cursor.fetchall()
actions = sentiment_trader_antivix_5min(con, buy_point, observation_period, SYMBOL, start_date, end_date)
def chunk2(l):
for i in range(0, len(l), 2):
data = l[i:i+2]
if len(data) == 2:
yield data
last_i = 0
values = []
for (buy, sell) in chunk2(actions):
buy_timestamp, buy_action, buy_price = buy
assert(buy_action == StockAction.BUY)
sell_timestamp, sell_action, sell_price = sell
assert(sell_action == StockAction.SELL)
buy_i = last_i
while data[buy_i][0] < buy_timestamp:
buy_i = buy_i + 1
sell_i = last_i
while data[sell_i][0] < sell_timestamp:
sell_i = sell_i + 1
min_value = None
for i in range(buy_i, sell_i):
if min_value == None or data[i][1] < min_value:
min_value = data[i][1]
assert(min_value != None)
if min_value != None and float(min_value) < 0.99 * float(buy_price) and (not wins_only or sell_price > buy_price):
values.append((buy_timestamp, sell_timestamp, float((min_value - buy_price)/ buy_price) * 100))
last_i = sell_i
return values
def count_retests(con, symbol, start_date: datetime, end_date: datetime, look_forward: timedelta = timedelta(days=1)):
data = None
with con.cursor() as cursor:
cursor.execute("""
SELECT timestamp, open, high, low, close, volume FROM candlestick_5min
WHERE symbol = %s AND timestamp BETWEEN %s AND %s
ORDER BY TIMESTAMP ASC
""", (symbol, start_date, end_date))
data = cursor.fetchall()
last_datetime = data[-1] - abs(look_forward)
last_candlestick_index = None
for i in range(len(data)-1, 0-1, -1):
timestamp, open, high, low, close, volume = data[i]
if last_datetime >= timestamp:
last_candlestick_index = i
break
retest_counts = []
for i in range(0, last_candlestick_index):
start_timestamp, start_open, start_high, start_low, start_close, start_volume = data[i]
prev_timestamp, prev_open, prev_high, prev_low, prev_close, prev_volume = data[i]
retest_count = 0
j = i + 1
while j < last_candlestick_index and (data[j][0] - start_timestamp) <= look_forward:
assert((data[j][0] - start_timestamp) > timedelta(days=0))
cur_timestamp, cur_open, cur_high, cur_low, cur_close, cur_volume = data[j]
if prev_high <= start_close <= cur_high or prev_close <= start_close <= cur_close:
retest_count = retest_count + 1
prev_timestamp, prev_open, prev_high, prev_low, prev_close, prev_volume = data[j]
j = j + 1
retest_counts.append((start_timestamp, | |
<filename>ready_patterns.py
# -*- coding: utf-8 -*-
from ast_helper import *
import idaapi
import ida_name
import ida_bytes
import ida_struct
import ida_typeinf
import idc
strlen_global = """Patterns.ChainPattern([
Patterns.ExprInst(Patterns.AsgnExpr(Patterns.VarBind("t1"), Patterns.ObjBind("strlenarg"))),
Patterns.DoInst(Patterns.LnotExpr(Patterns.VarBind("t2")),Patterns.BlockInst([
Patterns.ExprInst(Patterns.AsgnExpr(Patterns.VarBind("t3"), Patterns.PtrExpr(Patterns.AnyPattern()))),
Patterns.ExprInst(Patterns.AsgAddExpr(Patterns.VarBind("t1"),Patterns.NumberExpr(Patterns.NumberConcrete(4)))),
Patterns.ExprInst(Patterns.AsgnExpr(Patterns.VarBind("t2"),
Patterns.BAndExpr(
Patterns.BAndExpr(
Patterns.BnotExpr(Patterns.VarBind("t3")),
Patterns.SubExpr(Patterns.VarBind("t3"), Patterns.NumberExpr(Patterns.NumberConcrete(0x1010101)))
),
Patterns.NumberExpr(Patterns.NumberConcrete(0x80808080))
)
)
),
], False)
),
Patterns.ExprInst(Patterns.AsgnExpr(Patterns.AnyPattern(), Patterns.AnyPattern())),
Patterns.IfInst(Patterns.AnyPattern(), Patterns.AnyPattern()),
Patterns.IfInst(Patterns.AnyPattern(), Patterns.AnyPattern()),
Patterns.ExprInst(Patterns.AsgnExpr(Patterns.VarBind("res"), Patterns.AnyPattern()))
])"""
def replacer_strlen_global(idx, ctx):
var = ctx.get_var("res")
varname = ctx.get_var_name(var.idx)
obj = ctx.get_obj("strlenarg")
varexp = make_var_expr(var.idx, var.typ, var.mba)
arg1 = make_obj_expr(obj.addr, obj.type, arg=True)
arglist = ida_hexrays.carglist_t()
arglist.push_back(arg1)
val = ida_hexrays.call_helper(ida_hexrays.dummy_ptrtype(4, False), arglist, "strlen_inlined")
insn = make_cexpr_insn(idx.ea, make_asgn_expr(varexp, val))
idx.cleanup()
idaapi.qswap(idx, insn)
# del original inst because we swapped them on previous line
del insn
return True
#=======================================
# This pattern is works with following case
# dword_XXXX = (anytype)GetProcAddr(<anyArg>, 'funcName1')
# dword_XXXY = (anytype)GetProcAddr(<anyArg>, 'funcName2')
# ....
# After running this code if we decompile function where such pattern exist we will
# automatically get:
# funcName1 = (anytype)GetProcAddr(<anyArg>, 'funcName1')
# funcName2 = (anytype)GetProcAddr(<anyArg>, 'funcName2')
#
#=======================================
get_proc_addr = """Patterns.ExprInst(
Patterns.AsgnExpr(
Patterns.ObjBind("fcnPtr"),
Patterns.CastExpr(
Patterns.CallExpr(
Patterns.ObjConcrete(0x{:x}),
[Patterns.AnyPattern(), Patterns.ObjBind("fcnName")]
)
)
)
)
""".format(0x3) # 0x3 - replace by addr of getProcAddr
def getProc_addr(idx, ctx):
import ida_bytes
obj = ctx.get_obj("fcnPtr")
print "%x" % obj.addr
name = ctx.get_obj("fcnName")
name_str = ida_bytes.get_strlit_contents(name.addr, -1, -1)
ida_name.set_name(obj.addr, name_str)
return False
#========================================
# This pattern will replace code like that
# struct_XXX.field_X = (anytype)sub_XXXX
# struct_XXX.field_Y = (anytype)sub_YYYY
# by
# struct_XXX.sub_XXXX = (anytype)sub_XXXX
# struct_XXX.sub_YYYY = (anytype)sub_YYYY
# where struct_XXX - global variable
# So, it's just renames structure fields
#========================================
global_struct_fields_sub = """
Patterns.ExprInst(
Patterns.AsgnExpr(
Patterns.MemRefGlobalBind('stroff'),
Patterns.CastExpr(
Patterns.ObjBind('fcn'),
)
)
)"""
def rename_struct_field_as_func_name(idx, ctx):
import idc
import ida_bytes
obj = ctx.get_memref('stroff')
print "%x" % obj.ea
ti = idaapi.opinfo_t()
f = idc.GetFlags(obj.ea)
if idaapi.get_opinfo(obj.ea, 0, f, ti):
print("tid=%08x - %s" % (ti.tid, idaapi.get_struc_name(ti.tid)))
print "Offset: {}".format(obj.offset)
import ida_struct
obj2 = ctx.get_obj('fcn')
print "%x" % obj2.addr
name_str = ida_name.get_name(obj2.addr)
print "Name {}".format(name_str)
ida_struct.set_member_name(ida_struct.get_struc(ti.tid), obj.offset, name_str)
return False
#==============================
# Test case for BindExpr
# So it just saves all condition expressions from
# all if without else
#==============================
test_bind_expr = """Patterns.IfInst(Patterns.BindExpr('if_cond', Patterns.AnyPattern()), Patterns.AnyPattern())"""
def test_bind(idx, ctx):
exprs = ctx.get_expr('if_cond')
for i in exprs:
print i
return False
#==============================================================
# Dummy example for switching vptr union based on variable type
# Here we have union like that
# union a{
# vptr1_1 *class1;
# struc_5 *class2;
# }
#==============================================================
test_deep = """
Patterns.ExprInst(
Patterns.CallExpr(
Patterns.CastExpr(
Patterns.MemptrExpr(
Patterns.BindExpr(
'union_type',
Patterns.MemrefExpr(
Patterns.DeepExprPattern(
Patterns.MemptrExpr(Patterns.VarBind('v1'), 0, 8)
)
)
)
)
),
[Patterns.AnyPattern() for i in range(12)]
)
)
"""
test_deep_without_cast = """Patterns.ExprInst(
Patterns.CallExpr(
Patterns.MemptrExpr(
Patterns.BindExpr(
'union_type',
Patterns.MemrefExpr(
Patterns.DeepExprPattern(
Patterns.MemptrExpr(Patterns.VarBind('v1'), 0, 8)
)
)
)
),
[Patterns.AnyPattern() for i in range(12)]
)
)
"""
def test_xx(idx, ctx):
import ida_typeinf
uni = ctx.get_expr('union_type')
var = ctx.get_var('v1')
tname = var.typ.dstr().split(' ')[0]
tinfo = idaapi.tinfo_t()
if tname == 'class1':
idaapi.parse_decl2(idaapi.cvar.idati, 'vptr1_1 *;', tinfo, idaapi.PT_TYP)
uni[0].type = tinfo
uni[0].m = 0
elif tname == "class2":
idaapi.parse_decl2(idaapi.cvar.idati, 'struc_5 *;', tinfo, idaapi.PT_TYP)
uni[0].type = tinfo
uni[0].m = 1
else:
return False
return True
str_asgn = """Patterns.ExprInst(
Patterns.AsgnExpr(Patterns.VarBind('r'),
Patterns.BindExpr('n',Patterns.NumberExpr())
)
)"""
GLOBAL = {}
MAX = 0
LAST_FCN_EA = None
def xx(inst, ctx):
global MAX
global GLOBAL
global LAST_FCN_EA
if LAST_FCN_EA is None:
LAST_FCN_EA = ctx.fcn.entry_ea
if LAST_FCN_EA != ctx.fcn.entry_ea:
GLOBAL = {}
MAX = 0
LAST_FCN_EA = ctx.fcn.entry_ea
print "{:x}".format(inst.ea)
v = ctx.get_var('r')
n = ctx.get_expr('n')[0]
val = n.n._value & 0xff
v_o = get_var_offset(ctx.fcn, v.idx)
print "Var offset from stack:", v_o
print val
if v_o > MAX:
MAX = v_o
if val < 256:
if val == 0:
GLOBAL[v_o] = "\\x00"
else:
GLOBAL[v_o] = chr(val)
ret = ''
for i in range(MAX+1):
if i not in GLOBAL:
ret += '_'
else:
ret += GLOBAL[i]
print ret
#Example for inplace simplifications cpp operators (see pics on readme)
#Not really tested yet - I did it for concrete binary, so it may not work from the box for you
operator_replacing = """Patterns.ExprInst(
Patterns.AsgnExpr(Patterns.VarBind('res'),
Patterns.CallExprExactArgs(
Patterns.ObjBind("function"),
[Patterns.BindExpr("arg1", Patterns.AnyPattern()), Patterns.BindExpr("arg2", Patterns.AnyPattern())]
)
)
)"""
def get_string_repr(obj, ctx):
if obj.opname == "cast":
obj = obj.x
else:
pass
if obj.opname == "obj":
if obj.type.dstr() == "char *":
return repr(ida_bytes.get_strlit_contents(obj.obj_ea, 256, -1))
else:
name = ida_name.get_name(obj.obj_ea).split("@@")[0]
print name
if name[0] == ".":
name = name[1:]
if "endl" in name:
return "std::endl"
return ida_name.demangle_name(name, 0)
elif obj.opname == "ref":
return "&"+get_string_repr(obj.x, ctx)
elif obj.opname == "var":
return ctx.get_var_name(obj.v.idx)
# elif
else:
print obj.opname
return ""
def react_operator(idx, ctx):
print '%x' % (idx.ea)
fcn_object = ctx.get_obj("function")
"""next line was working on ELF"""
demangled = ida_name.demangle_name(ida_name.get_name(fcn_object.addr)[1:], 0)
"""next line was working on MACH-O"""
#demangled = ida_name.demangle_name(ida_name.get_name(fcn_object.addr), 0)
print demangled
if "operator<<" in demangled:
arg2 = ctx.get_expr('arg2')[0]
arg1 = ctx.get_expr('arg1')[0]
arg1_repr = get_string_repr(arg1, ctx)
arg2_repr = get_string_repr(arg2, ctx)
var = ctx.get_var("res")
#varname = ctx.get_var_name(var.idx)
varexp = make_var_expr(var.idx, var.typ, var.mba)
#varexp = make_var_expr(var2.idx, var2.typ, var2.mba, arg=True)
arglist = ida_hexrays.carglist_t()
arglist.push_back(arg2)
helper = ida_hexrays.call_helper(ida_hexrays.dummy_ptrtype(4, False), arglist, "{} << ".format(arg1_repr))
insn = make_cexpr_insn(idx.ea, make_asgn_expr(varexp, helper))
idx.cleanup()
idaapi.qswap(idx, insn)
# del original inst because we swapped them on previous line
del insn
operator_replacing2 = """Patterns.ExprInst(
Patterns.CallExpr(
Patterns.ObjBind("function"),
[Patterns.BindExpr("arg1", Patterns.AnyPattern()), Patterns.BindExpr("arg2", Patterns.AnyPattern())]
)
)"""
def react_operator2(idx, ctx):
print '%x' % (idx.ea)
fcn_object = ctx.get_obj("function")
"""next line was working on ELF"""
demangled = ida_name.demangle_name(ida_name.get_name(fcn_object.addr)[1:], 0)
"""next line was working on MACH-O"""
#demangled = ida_name.demangle_name(ida_name.get_name(fcn_object.addr), 0)
print demangled
if "operator<<" in demangled:
arg1 = ctx.get_expr('arg1')[0]
arg1_repr = get_string_repr(arg1, ctx)
arg2 = ctx.get_expr('arg2')[0]
#varexp = make_var_expr(var2.idx, var2.typ, var2.mba, arg=True)
arglist = ida_hexrays.carglist_t()
arglist.push_back(arg2)
val = ida_hexrays.call_helper(ida_hexrays.dummy_ptrtype(4, False), arglist, "{} << ".format(arg1_repr))
insn = make_cexpr_insn(idx.ea, val)
idx.cleanup()
idaapi.qswap(idx, insn)
del insn
string_deleter = """Patterns.IfInst(
Patterns.UgeExpr(
Patterns.VarBind('len'),
Patterns.NumberExpr(Patterns.NumberConcrete(0x10))
),
Patterns.BlockInst([
Patterns.ExprInst(
Patterns.AsgnExpr(
Patterns.AnyPattern(),
Patterns.VarBind('ptr')
)
),
Patterns.IfInst(
Patterns.UgeExpr(
Patterns.AddExpr(
Patterns.VarBind('len'),
Patterns.NumberExpr(Patterns.NumberConcrete(1))
),
Patterns.NumberExpr(Patterns.NumberConcrete(0x1000))
),
Patterns.AnyPattern()
),
Patterns.ExprInst(
Patterns.CallExpr(
Patterns.ObjConcrete(0x{:x}),
[Patterns.AnyPattern()]
)
)
], False)
)""".format(ida_name.get_name_ea(0, "free_0"))
def handle_string_destr(idx, ctx):
print '%x' % (idx.ea)
var = ctx.get_var('len')
var2 = ctx.get_var('ptr')
print var
off1 = get_var_offset(ctx.fcn, var.idx)
off2 = get_var_offset(ctx.fcn, var2.idx)
print off1 - off2
if off1 - off2 == 20:
print "[+] Found string destructor"
varexp = make_var_expr(var2.idx, var2.typ, var2.mba, arg=True)
arglist = ida_hexrays.carglist_t()
arglist.push_back(varexp)
val = ida_hexrays.call_helper(ida_hexrays.dummy_ptrtype(4, False), arglist, "std::string::destructor")
insn = make_cexpr_insn(idx.ea, val)
idx.cleanup()
idaapi.qswap(idx, insn)
del insn
DWORD_STRUCT = """
Patterns.ExprInst(
Patterns.AsgnExpr(
Patterns.PtrExpr(
Patterns.CastExpr(
Patterns.RefExpr(
Patterns.BindExpr(
'struct_part',
Patterns.MemrefExpr(
Patterns.VarBind(
'struct_var'
)
)
)
)
),
4 #PTRSIZE
),
Patterns.BindExpr(
'values',
Patterns.NumberExpr(
Patterns.AnyPattern()
)
)
)
)
"""
def replace_dword_in_struct(idx, ctx):
print '%x' % idx.ea
struct_expr = ctx.get_expr('struct_part')[0]
var = ctx.get_var("struct_var")
values = ctx.get_expr('values')[0]
offset = struct_expr.m
vals = []
N = extract_number(values)
typename = struct_expr.x.type.dstr()
s_id = ida_struct.get_struc_id(typename)
if s_id == idc.BADADDR:
return
sptr = ida_struct.get_struc(s_id)
is_suits = True
fields = []
inner_offset = 0
while inner_offset < 4:
memb = ida_struct.get_member(sptr, offset+inner_offset)
if memb is None:
print "Not enought members!"
is_suits = False
break
size = ida_struct.get_member_size(memb)
if inner_offset + size > 4:
print "Size fail!(%d bytes lenft but member size is %d)" % (4 - inner_offset, size)
is_suits = False
break
if size == 1:
val = N & 0xff
N = N >> 8
elif size == 2:
val = N & 0xffff
N = N >> 16
else:
print "Unkn size"
is_suits = False
break
fields.append((inner_offset, val))
inner_offset += size
if is_suits is False:
print "Not suitable!"
return
inslist = []
for i in fields:
ins = make_asgn_refvar_number(idx.ea, var, offset+i[0], i[1])
inslist.append(ins)
#########
# Not foldable
#########
blk = make_cblk(inslist)
cblk = make_cblock_insn(idx.ea, blk)
idx.cleanup()
idaapi.qswap(idx, cblk)
del cblk
##########################
# Foldable - not working - IDA crashes at exit idk why;[
##########################
#fake_cond = make_helper_expr("fold")
#blk = make_cblk(inslist)
#cblk = make_cblock_insn(idx.ea, blk)
#cif = make_if(idx.ea, fake_cond, cblk)
#idx.cleanup()
#idaapi.qswap(idx, cif)
#del cif
return True
# Third arg - is chain
#PATTERNS = [(strlen_global, replacer_strlen_global, True)]
# PATTERNS = [(get_proc_addr, getProc_addr, False)]
#PATTERNS = [(test_deep, test_xx, False), (test_deep_without_cast, test_xx, False)]
#PATTERNS = [(global_struct_fields_sub, rename_struct_field_as_func_name, False)]
#PATTERNS = [(test_bind_expr, test_bind, False)]
#PATTERNS = [(str_asgn, xx, False)]
#PATTERNS = [(operator_replacing, react_operator, False), (operator_replacing2, react_operator2, False)]
#PATTERNS = [(string_deleter, handle_string_destr, False)]
# PATTERNS = [(DWORD_STRUCT, replace_dword_in_struct, False)]
call_pattern = '''
Patterns.ExprInst(
Patterns.BindExpr(
"call_expr",
Patterns.CallExpr(
Patterns.CastExpr(
Patterns.BindExpr(
"ref",
Patterns.MemptrExpr(
Patterns.AnyPattern()
)
),
may_missing=True
)
)
)
)
'''
def call_found(obj, ctx):
import ida_typeinf
import idc_bc695
import ida_xref
call = ctx.get_expr('call_expr')[0]
expr = ctx.get_expr('ref')[0]
print type(expr)
print '[*] Offset: %#x' % expr.m
print '[*] First op type: %s' % expr.x.type.dstr().split(' ')[0]
print '[*] Address %#x' % call.ea
# vtab.sub_XXXX(...)
if expr.x.op == idaapi.cot_memref:
vtable_tinfo = expr.x.type.get_pointed_object()
# vtab->sub_XXXX(...)
else:
vtable_tinfo = expr.x.type
if vtable_tinfo.is_ptr():
vtable_tinfo = vtable_tinfo.get_pointed_object()
udt_member | |
<gh_stars>0
"""Solve cube using Thistlethwaite's algorithm."""
from .ida_star import ida_star
from cube.functions import orient
def g1(self) -> tuple:
"""
Solve edge orientation.
Returns
-------
tuple of (list of str, dict of {'G1': int})
Moves to solve edge orientation, statistics (move count in ETM).
Notes
-----
Brute-force method using `ida_star` to solve edge orientation.
This stage is complete when all 12 edges are correctly oriented. An
edge is correctly oriented when it can be moved into position in the
solved orientation without F, F', B, or B' moves.
This stage takes a maximum of 7 moves and reduces the cube into a
group requiring only `<U, D, L, R, F2, B2>` moves to solve.
"""
solve = orient(self.cube)
self.move(solve)
edges = (
((0, 0, 1), (4, 0, 1)),
((0, 1, 0), (1, 0, 1)),
((0, 1, -1), (3, 0, 1)),
((0, -1, 1), (2, 0, 1)),
((2, 1, 0), (1, 1, -1)),
((2, 1, -1), (3, 1, 0)),
((4, 1, 0), (3, 1, -1)),
((4, 1, -1), (1, 1, 0)),
((5, 0, 1), (2, -1, 1)),
((5, 1, 0), (1, -1, 1)),
((5, 1, -1), (3, -1, 1)),
((5, -1, 1), (4, -1, 1))
)
def get_bad_edges(cube):
bad_edges = []
for edge in edges:
sticker = cube[edge[0][0]][edge[0][1]][edge[0][2]]
if sticker in {'L', 'R'}:
bad_edges.append((edge[0][0], edge[1][0]))
elif sticker in {'F', 'B'}:
if cube[edge[1][0]][edge[1][1]][edge[1][2]] in {'U', 'D'}:
bad_edges.append((edge[0][0], edge[1][0]))
return bad_edges
def estimate(cube):
bad_edges = get_bad_edges(cube)
if not bad_edges:
return 0
total_edges = len(bad_edges)
edges = [sum(s in edge for edge in bad_edges) for s in (2, 4)]
minimum = min(edges)
maximum = max(edges)
if total_edges == 2:
return 3 + (maximum != 1)
if total_edges == 4:
return 5 - maximum
if total_edges == 6:
return 3 + (edges[0] != 3 != edges[1])
if total_edges == 8:
if maximum == 4 and minimum < 2:
return 7 - minimum
return 6 - minimum
if total_edges == 10:
return 6
return 7
def next_faces(cube, moves):
faces = ['F', 'B', 'U', 'R', 'L', 'D']
if moves:
face = moves[-1][0]
faces.remove(face)
if face in {'L', 'B', 'D'}:
faces.remove({'L': 'R', 'B': 'F', 'D': 'U'}[face])
bad_edges = get_bad_edges(cube)
count = [set() for _ in range(6)]
for edge in bad_edges:
count[edge[0]].add(edge[1])
count[edge[1]].add(edge[0])
point_symmetry = []
for face in faces:
index = 'ULFRBD'.index(face)
if not count[index]:
faces.remove(face)
elif len(count[index]) == 4:
if face not in {'F', 'B'}:
faces.remove(face)
else:
point_symmetry.append(face)
elif count[index] in ({0,5}, {1,3}, {2,4}):
point_symmetry.append(face)
return ((face,) if face in point_symmetry else
(face, f'{face}2', f"{face}'") for face in faces)
solve.extend(ida_star(self, estimate, next_faces, 7))
return solve, {'G1': len(solve)}
def g2(self) -> tuple:
"""
Solve domino reduction.
Returns
-------
tuple of (list of str, dict of {'G2': int})
Moves to solve domino reduction, statistics (move count in ETM).
Notes
-----
Brute-force method using `ida_star` to solve domino reduction.
This stage is complete when both the top and bottom faces only have
white and yellow (U and D) stickers.
This stage takes a maximum of 10 moves and reduces the cube into a
group requiring only `<U, D, L2, R2, F2, B2>` moves to solve.
"""
def estimate(cube):
edges = [sum(cube[s][1][x] in {'U', 'D'} for s, x in pieces)
for pieces in (((2,0), (4,-1)), ((2,-1), (4,0)))]
total_edges = sum(edges)
corners = [sum(side[y][x] in {'U', 'D'} for y in (0, -1) for x in (0, -1))
for side in cube[1:5]]
for i in 0, 1:
if corners[i+2] < corners[i]:
corners[i], corners[i+2] = corners[i+2], corners[i]
total_corners = sum(corners)
if not total_edges:
if not total_corners:
return 0
return 7
if total_edges == 1:
if corners in ([0,1,0,3], [1,0,1,1]):
return 3
if corners in ([0,0,2,1], [0,1,0,1], [0,1,0,2], [1,0,2,1]):
return 5
return 6
if total_edges == 2:
if total_corners == 4:
if corners == [0,2,0,2]:
return 1 + (edges[0] == 1)
if corners == [1,1,1,1]:
return 2 + (edges[0] == 1)
if corners == [2,0,2,0]:
return 3 + (edges[0] == 1)
return 6
if not total_corners:
return 4 + (edges[0] == 1)
if corners in ([2,1,2,1], [0,4,0,4], [1,2,1,3]):
return 4 + (edges[0] == 1)
if corners in ([0,0,0,4], [2,0,2,0]):
return 4 + (edges[0] != 1)
return 5
if total_edges == 3:
if corners == [0,1,0,3]:
return 3
if corners in ([1,0,1,1], [1,1,1,3], [1,2,1,2], [2,1,2,1]):
return 4
return 5
if corners == [0,4,0,4]:
return 2
if corners in ([1,2,1,3], [2,2,2,2]):
return 3
if corners in ([2,1,2,1], [4,0,4,0]):
return 4
return 5
def next_faces(cube, moves):
faces = ['R', 'L', 'U', 'D', 'F', 'B']
if moves:
face = moves[-1][0]
faces.remove(face)
if face in {'L', 'B', 'D'}:
faces.remove({'L': 'R', 'B': 'F', 'D': 'U'}[face])
ud = {'U', 'D'}
point_symmetry = []
corners = [
0 if cube[s][y][x] in ud else
2 if cube[i][s][s] in ud else
1 for s, y, x, i in (
(0,0,0,1), (0,-1,0,2), (0,-1,-1,3), (0,0,-1,4),
(-1,-1,0,4), (-1,0,0,1), (-1,0,-1,2), (-1,-1,-1,3)
)
]
for face in faces:
if face == 'U':
face_corners = corners[:4]
elif face == 'L':
face_corners = corners[:2] + corners[5:3:-1]
elif face == 'F':
face_corners = corners[1:3] + corners[6:4:-1]
elif face == 'R':
face_corners = corners[2:4] + corners[:5:-1]
elif face == 'B':
face_corners = [corners[i] for i in (0, 3, 7, 4)]
else:
face_corners = corners[4:]
if face_corners[:2] != face_corners[2:]:
continue
s = 'ULFRBD'.index(face)
if face in {'F', 'B'}:
y = {2: -1, 4: 0}[s]
if cube[s][1][0] in ud != cube[s][1][-1] in ud:
continue
if cube[0][y][1] in ud != cube[5][-y-1][1] in ud:
continue
faces.remove(face)
elif face in {'L', 'R'}:
y = {1: 0, 3: -1}[s]
if cube[0][1][y] in ud != cube[5][1][y] in ud:
continue
if cube[2][1][y] in ud != cube[4][1][-y-1] in ud:
continue
if (cube[0][1][y] in ud == cube[2][1][y] in ud and
corners[0] == (corners[1] + 1) % 3):
faces.remove(face)
else:
point_symmetry.append(face)
else:
if cube[s][0][1] in ud != cube[s][-1][1] in ud:
continue
if cube[s][1][0] in ud != cube[s][1][-1] in ud:
continue
if (cube[s][0][1] in ud == cube[s][1][0] in ud and
corners[0] == corners[1]):
faces.remove('U')
else:
point_symmetry.append('U')
return ((face,) if face in point_symmetry else
(f'{face}2',) if face in {'F', 'B'} else
(face, f'{face}2', f"{face}'") for face in faces)
solve = ida_star(self, estimate, next_faces, 10)
return solve, {'G2': len(solve)}
def g3(self) -> tuple:
"""
Solve half turn reduction.
Returns
-------
tuple of (list of str, dict of {'G3': int})
Moves to solve half turn reduction, statistics (move count in
ETM).
Notes
-----
Brute-force method using `ida_star` to solve half turn reduction.
This stage is complete when every face only contains two coloured
stickers and every face has an even number of corners of each
colour.
This stage takes a maximum of 13 moves and reduces the cube into a
group requiring only `<U2, D2, L2, R2, F2, B2>` moves to solve.
"""
def estimate(cube):
faces = 'FB', 'LR', 'FB', 'LR'
edges = [sum(cube[s][y][1] in face for s, face in enumerate(faces, 1))
for y in (0, -1)]
total_edges = sum(edges)
corners = [sum(cube[s][y][x] in {'F', 'B'} for s in (1, 3)
for x in (0, -1)) for y in (0, -1)]
total_corners = sum(corners)
if not total_corners:
if any(sum(side[y][x] == side[1][1] for y in (0, -1)
for x in (0, -1)) % 2 for side in cube[:3]):
return 10
if any(sum(cube[s][y][0] == cube[s][y][-1] for s in opposite) % 2
for y in (0, -1) for opposite in ((0,5), (1,3), (2,4))):
return 9
if not total_edges:
return 0
if total_edges == 2:
return 5
if total_edges == 4:
return 4
if total_edges == 6:
return 7
return 6
if total_corners == 2:
if total_edges == 2:
return 5
return 6
if total_corners == 4:
if max(corners) == 4:
if total_edges == 4:
if corners == edges:
return 1
if max(edges) == 4:
return 5
if edges[0] == 2:
return 3
return 4
return 7
if max(corners) == 3:
return 4
if total_edges == 4:
return 2
return 3
if total_corners == 6:
if corners[0] == 3:
return 5
return 6
if total_edges == 8:
return 2
if | |
trueSolarTime > 1440:
trueSolarTime = trueSolarTime - 1440
hourangle = trueSolarTime / 4.0 - 180.0
# Thanks to <NAME> for the next line:
if hourangle < -180:
hourangle = hourangle + 360.0
harad = radians(hourangle)
csz = sin(radians(latitude)) * sin(radians(solarDec)) + cos(
radians(latitude)
) * cos(radians(solarDec)) * cos(harad)
if csz > 1.0:
csz = 1.0
elif csz < -1.0:
csz = -1.0
zenith = degrees(acos(csz))
azDenom = cos(radians(latitude)) * sin(radians(zenith))
if abs(azDenom) > 0.001:
azRad = (
(sin(radians(latitude)) * cos(radians(zenith))) - sin(radians(solarDec))
) / azDenom
if abs(azRad) > 1.0:
if azRad < 0:
azRad = -1.0
else:
azRad = 1.0
azimuth = 180.0 - degrees(acos(azRad))
if hourangle > 0.0:
azimuth = -azimuth
else:
if latitude > 0.0:
azimuth = 180.0
else:
azimuth = 0.0
if azimuth < 0.0:
azimuth = azimuth + 360.0
exoatmElevation = 90.0 - zenith
if exoatmElevation > 85.0:
refractionCorrection = 0.0
else:
te = tan(radians(exoatmElevation))
if exoatmElevation > 5.0:
refractionCorrection = (
58.1 / te
- 0.07 / (te * te * te)
+ 0.000086 / (te * te * te * te * te)
)
elif exoatmElevation > -0.575:
step1 = -12.79 + exoatmElevation * 0.711
step2 = 103.4 + exoatmElevation * (step1)
step3 = -518.2 + exoatmElevation * (step2)
refractionCorrection = 1735.0 + exoatmElevation * (step3)
else:
refractionCorrection = -20.774 / te
refractionCorrection = refractionCorrection / 3600.0
solarzen = zenith - refractionCorrection
solarelevation = 90.0 - solarzen
return solarelevation
def solar_zenith(self, dateandtime, latitude, longitude):
"""Calculates the solar zenith angle.
:param dateandtime: The date and time for which to calculate
the angle.
:type dateandtime: :class:`~datetime.datetime`
:param latitude: Latitude - Northern latitudes should be positive
:type latitude: float
:param longitude: Longitude - Eastern longitudes should be positive
:type longitude: float
:return: The zenith angle in degrees from vertical.
:rtype: float
If `dateandtime` is a naive Python datetime then it is assumed to be
in the UTC timezone.
"""
return 90.0 - self.solar_elevation(dateandtime, latitude, longitude)
def moon_phase(self, date, rtype=int):
"""Calculates the phase of the moon on the specified date.
:param date: The date to calculate the phase for.
:type date: :class:`datetime.date`
:param rtype: The type to return either int (default) or float.
:return:
A number designating the phase.
| 0 = New moon
| 7 = First quarter
| 14 = Full moon
| 21 = Last quarter
"""
if rtype != float and rtype != int:
rtype = int
moon = self._moon_phase_asfloat(date)
if moon >= 28.0:
moon -= 28.0
moon = rtype(moon)
return moon
def rahukaalam_utc(self, date, latitude, longitude):
"""Calculate ruhakaalam times in the UTC timezone.
:param date: Date to calculate for.
:type date: :class:`datetime.date`
:param latitude: Latitude - Northern latitudes should be positive
:type latitude: float
:param longitude: Longitude - Eastern longitudes should be positive
:type longitude: float
:return: Tuple containing the start and end times for Rahukaalam.
:rtype: tuple
"""
if date is None:
date = datetime.date.today()
sunrise = self.sunrise_utc(date, latitude, longitude)
sunset = self.sunset_utc(date, latitude, longitude)
octant_duration = datetime.timedelta(seconds=(sunset - sunrise).seconds / 8)
# Mo,Sa,Fr,We,Th,Tu,Su
octant_index = [1, 6, 4, 5, 3, 2, 7]
weekday = date.weekday()
octant = octant_index[weekday]
start = sunrise + (octant_duration * octant)
end = start + octant_duration
return start, end
def _proper_angle(self, value):
if value > 0.0:
value /= 360.0
return (value - floor(value)) * 360.0
else:
tmp = ceil(abs(value / 360.0))
return value + tmp * 360.0
def _julianday(self, utcdatetime, timezone=None):
if isinstance(utcdatetime, datetime.datetime):
end_date = utcdatetime.date()
hour = utcdatetime.hour
minute = utcdatetime.minute
second = utcdatetime.second
else:
end_date = utcdatetime
hour = 0
minute = 0
second = 0
if timezone:
if isinstance(timezone, int):
hour_offset = timezone
else:
offset = timezone.localize(utcdatetime).utcoffset()
hour_offset = offset.total_seconds() / 3600.0
else:
hour_offset = 0
start_date = datetime.date(1900, 1, 1)
time_fraction = (hour * 3600.0 + minute * 60.0 + second) / (24.0 * 3600.0)
date_diff = excel_datediff(start_date, end_date)
jd = date_diff + 2415018.5 + time_fraction - (hour_offset / 24)
return jd
def _jday_to_jcentury(self, julianday):
return (julianday - 2451545.0) / 36525.0
def _jcentury_to_jday(self, juliancentury):
return (juliancentury * 36525.0) + 2451545.0
def _geom_mean_long_sun(self, juliancentury):
l0 = 280.46646 + juliancentury * (36000.76983 + 0.0003032 * juliancentury)
return l0 % 360.0
def _geom_mean_anomaly_sun(self, juliancentury):
return 357.52911 + juliancentury * (35999.05029 - 0.0001537 * juliancentury)
def _eccentrilocation_earth_orbit(self, juliancentury):
return 0.016708634 - juliancentury * (
0.000042037 + 0.0000001267 * juliancentury
)
def _sun_eq_of_center(self, juliancentury):
m = self._geom_mean_anomaly_sun(juliancentury)
mrad = radians(m)
sinm = sin(mrad)
sin2m = sin(mrad + mrad)
sin3m = sin(mrad + mrad + mrad)
c = (
sinm * (1.914602 - juliancentury * (0.004817 + 0.000014 * juliancentury))
+ sin2m * (0.019993 - 0.000101 * juliancentury)
+ sin3m * 0.000289
)
return c
def _sun_true_long(self, juliancentury):
l0 = self._geom_mean_long_sun(juliancentury)
c = self._sun_eq_of_center(juliancentury)
return l0 + c
def _sun_true_anomoly(self, juliancentury):
m = self._geom_mean_anomaly_sun(juliancentury)
c = self._sun_eq_of_center(juliancentury)
return m + c
def _sun_rad_vector(self, juliancentury):
v = self._sun_true_anomoly(juliancentury)
e = self._eccentrilocation_earth_orbit(juliancentury)
return (1.000001018 * (1 - e * e)) / (1 + e * cos(radians(v)))
def _sun_apparent_long(self, juliancentury):
true_long = self._sun_true_long(juliancentury)
omega = 125.04 - 1934.136 * juliancentury
return true_long - 0.00569 - 0.00478 * sin(radians(omega))
def _mean_obliquity_of_ecliptic(self, juliancentury):
seconds = 21.448 - juliancentury * (
46.815 + juliancentury * (0.00059 - juliancentury * (0.001813))
)
return 23.0 + (26.0 + (seconds / 60.0)) / 60.0
def _obliquity_correction(self, juliancentury):
e0 = self._mean_obliquity_of_ecliptic(juliancentury)
omega = 125.04 - 1934.136 * juliancentury
return e0 + 0.00256 * cos(radians(omega))
def _sun_rt_ascension(self, juliancentury):
oc = self._obliquity_correction(juliancentury)
al = self._sun_apparent_long(juliancentury)
tananum = cos(radians(oc)) * sin(radians(al))
tanadenom = cos(radians(al))
return degrees(atan2(tananum, tanadenom))
def _sun_declination(self, juliancentury):
e = self._obliquity_correction(juliancentury)
lambd = self._sun_apparent_long(juliancentury)
sint = sin(radians(e)) * sin(radians(lambd))
return degrees(asin(sint))
def _var_y(self, juliancentury):
epsilon = self._obliquity_correction(juliancentury)
y = tan(radians(epsilon) / 2.0)
return y * y
def _eq_of_time(self, juliancentury):
l0 = self._geom_mean_long_sun(juliancentury)
e = self._eccentrilocation_earth_orbit(juliancentury)
m = self._geom_mean_anomaly_sun(juliancentury)
y = self._var_y(juliancentury)
sin2l0 = sin(2.0 * radians(l0))
sinm = sin(radians(m))
cos2l0 = cos(2.0 * radians(l0))
sin4l0 = sin(4.0 * radians(l0))
sin2m = sin(2.0 * radians(m))
Etime = (
y * sin2l0
- 2.0 * e * sinm
+ 4.0 * e * y * sinm * cos2l0
- 0.5 * y * y * sin4l0
- 1.25 * e * e * sin2m
)
return degrees(Etime) * 4.0
def _hour_angle(self, latitude, declination, depression):
latitude_rad = radians(latitude)
declination_rad = radians(declination)
depression_rad = radians(depression)
n = cos(depression_rad)
d = cos(latitude_rad) * cos(declination_rad)
t = tan(latitude_rad) * tan(declination_rad)
h = (n / d) - t
HA = acos(h)
return HA
def _calc_time(self, depression, direction, date, latitude, longitude):
if not isinstance(latitude, Number) or not isinstance(longitude, Number):
raise TypeError("Latitude and longitude must be a numbers")
julianday = self._julianday(date)
if latitude > 89.8:
latitude = 89.8
if latitude < -89.8:
latitude = -89.8
t = self._jday_to_jcentury(julianday)
eqtime = self._eq_of_time(t)
solarDec = self._sun_declination(t)
hourangle = self._hour_angle(latitude, solarDec, depression)
if direction == SUN_SETTING:
hourangle = -hourangle
delta = -longitude - degrees(hourangle)
timeDiff = 4.0 * delta
timeUTC = 720.0 + timeDiff - eqtime
timeUTC = timeUTC / 60.0
hour = int(timeUTC)
minute = int((timeUTC - hour) * 60)
second = int((((timeUTC - hour) * 60) - minute) * 60)
if second > 59:
second -= 60
minute += 1
elif second < 0:
second += 60
minute -= 1
if minute > 59:
minute -= 60
hour += 1
elif minute < 0:
minute += 60
hour -= 1
if hour > 23:
hour -= 24
date += datetime.timedelta(days=1)
elif hour < 0:
hour += 24
date -= datetime.timedelta(days=1)
dt = datetime.datetime(date.year, date.month, date.day, hour, minute, second)
dt = pytz.UTC.localize(dt) # pylint: disable=E1120
return dt
def _moon_phase_asfloat(self, date):
jd = self._julianday(date)
DT = pow((jd - 2382148), 2) / (41048480 * 86400)
T = (jd + DT - 2451545.0) / 36525
T2 = pow(T, 2)
T3 = pow(T, 3)
D = 297.85 + (445267.1115 * T) - (0.0016300 * T2) + (T3 / 545868)
D = radians(self._proper_angle(D))
M = | |
<filename>core/ErrorCodes.py
'''
Created on Jun 29, 2009
@author: <NAME>
'''
import logging
class ErrorCodes:
errorFields = ('pilot','exe','sup','ddm','brokerage','jobdispatcher','taskbuffer')
errorCodes = {}
errorStages = {}
def __init__(self):
for f in self.errorFields:
self.errorCodes['%serrorcode'%f] = {}
self.errorStages['%serrorcode'%f] = {}
## Panda errors can be found at https://twiki.cern.ch/twiki/bin/view/Atlas/PandaErrorCodes
self.errorCodes['ddmerrorcode'][100] = 'DQ2 server error'
self.errorStages['ddmerrorcode'][100] = 'ddm-start'
self.errorCodes['ddmerrorcode'][200] = 'Could not add output files to dataset'
self.errorStages['ddmerrorcode'][200] = 'ddm-end'
self.errorCodes['ddmerrorcode'][201] = 'Panda server failed to register subscription in DQ2'
self.errorStages['ddmerrorcode'][201] = 'ddm-end'
self.errorCodes['jobdispatchererrorcode'][100] = 'Lost heartbeat'
self.errorStages['jobdispatchererrorcode'][100] = 'time-during'
self.errorCodes['jobdispatchererrorcode'][101] = 'Job recovery failed for three days'
self.errorStages['jobdispatchererrorcode'][101] = 'time-during'
self.errorCodes['jobdispatchererrorcode'][102] = 'No reply to sent job'
self.errorStages['jobdispatchererrorcode'][102] = 'time-during'
self.errorCodes['taskbuffererrorcode'][100] = 'Job expired and killed three days after submission (or killed by user)'
self.errorStages['taskbuffererrorcode'][100] = 'user-during'
self.errorCodes['taskbuffererrorcode'][101] = 'transfer timeout (2weeks)'
self.errorStages['taskbuffererrorcode'][101] = 'time-end'
self.errorCodes['taskbuffererrorcode'][102] = 'Expired three days after submission'
self.errorStages['taskbuffererrorcode'][102] = 'time-end'
self.errorCodes['taskbuffererrorcode'][103] = 'Aborted by executor interface'
self.errorStages['taskbuffererrorcode'][103] = 'panda-during'
self.errorCodes['taskbuffererrorcode'][104] = 'Waiting job timed out'
self.errorStages['taskbuffererrorcode'][104] = 'panda-during'
self.errorCodes['taskbuffererrorcode'][105] = 'Reassigned by rebrokeage'
self.errorStages['taskbuffererrorcode'][105] = 'panda-during'
self.errorCodes['taskbuffererrorcode'][106] = 'Reassigned by server-side retry'
self.errorStages['taskbuffererrorcode'][106] = 'panda-during'
self.errorCodes['taskbuffererrorcode'][107] = 'Retried by pilot'
self.errorStages['taskbuffererrorcode'][107] = 'panda-during'
self.errorCodes['taskbuffererrorcode'][110] = 'Input file lost in SE'
self.errorStages['taskbuffererrorcode'][110] = 'panda-during'
self.errorCodes['piloterrorcode'][1008] = 'General pilot error, consult batch log'
self.errorStages['piloterrorcode'][1008] = 'ddm-start'
self.errorCodes['piloterrorcode'][1097] = 'Get function can not be called for staging input file'
self.errorStages['piloterrorcode'][1097] = 'ddm-start'
self.errorCodes['piloterrorcode'][1098] = 'No space left on local disk'
self.errorStages['piloterrorcode'][1098] = 'athena-during'
self.errorCodes['piloterrorcode'][1099] = 'Get error: Staging input file failed'
self.errorStages['piloterrorcode'][1099] = 'ddm-start'
self.errorCodes['piloterrorcode'][1100] = 'Get error: Replica not found'
self.errorStages['piloterrorcode'][1100] = 'ddm-start'
self.errorCodes['piloterrorcode'][1101] = 'LRC registration error: Connection refused'
self.errorStages['piloterrorcode'][1101] = 'ddm-end'
self.errorCodes['piloterrorcode'][1102] = 'Expected output file does not exist'
self.errorStages['piloterrorcode'][1102] = 'athena-end'
self.errorCodes['piloterrorcode'][1103] = 'No such file or directory'
self.errorStages['piloterrorcode'][1103] = 'ddm-start'
self.errorCodes['piloterrorcode'][1104] = 'User work directory too large'
self.errorStages['piloterrorcode'][1104] = 'user-during'
self.errorCodes['piloterrorcode'][1105] = 'Put error: Failed to add file size and checksum to LFC'
self.errorStages['piloterrorcode'][1105] = 'ddm-end'
self.errorCodes['piloterrorcode'][1106] = 'Payload stdout file too big'
self.errorStages['piloterrorcode'][1106] = 'user-during'
self.errorCodes['piloterrorcode'][1107] = 'Get error: Missing DBRelease file'
self.errorStages['piloterrorcode'][1107] = 'ddm-start'
self.errorCodes['piloterrorcode'][1108] = 'Put error: LCG registration failed'
self.errorStages['piloterrorcode'][1108] = 'ddm-end'
self.errorCodes['piloterrorcode'][1109] = 'Required CMTCONFIG incompatible with WN'
self.errorStages['piloterrorcode'][1109] = 'ddm-start'
self.errorCodes['piloterrorcode'][1110] = 'Failed during setup'
self.errorStages['piloterrorcode'][1110] = 'ddm-start'
self.errorCodes['piloterrorcode'][1111] = 'Exception caught by runJob'
self.errorStages['piloterrorcode'][1111] = 'ddm-start'
self.errorCodes['piloterrorcode'][1112] = 'Exception caught by pilot'
self.errorStages['piloterrorcode'][1112] = 'ddm-start'
self.errorCodes['piloterrorcode'][1113] = 'Get error: Failed to import LFC python module'
self.errorStages['piloterrorcode'][1113] = 'ddm-start'
self.errorCodes['piloterrorcode'][1114] = 'Put error: Failed to import LFC python module'
self.errorStages['piloterrorcode'][1114] = 'ddm-end'
self.errorCodes['piloterrorcode'][1115] = 'NFS SQLite locking problems'
self.errorStages['piloterrorcode'][1115] = 'athena-end'
self.errorCodes['piloterrorcode'][1116] = 'Pilot could not download queuedata'
self.errorStages['piloterrorcode'][1116] = 'ddm-start'
self.errorCodes['piloterrorcode'][1117] = 'Pilot found non-valid queuedata'
self.errorStages['piloterrorcode'][1117] = 'ddm-start'
self.errorCodes['piloterrorcode'][1118] = 'Pilot could not curl space report'
self.errorStages['piloterrorcode'][1118] = 'ddm-start'
self.errorCodes['piloterrorcode'][1119] = 'Pilot aborted due to DDM space shortage'
self.errorStages['piloterrorcode'][1119] = 'ddm-start'
self.errorCodes['piloterrorcode'][1120] = 'Space token descriptor does not match destination path'
self.errorStages['piloterrorcode'][1120] = 'ddm-end'
self.errorCodes['piloterrorcode'][1121] = 'Can not read the xml file for registering output files to dispatcher'
self.errorStages['piloterrorcode'][1121] = 'athena-end'
self.errorCodes['piloterrorcode'][1122] = 'Bad replica entry returned by lfc_getreplicas(): SFN not set in LFC for this guid'
self.errorStages['piloterrorcode'][1122] = 'ddm-start'
self.errorCodes['piloterrorcode'][1123] = 'Missing guid in output file list'
self.errorStages['piloterrorcode'][1123] = 'ddm-end'
self.errorCodes['piloterrorcode'][1124] = 'Output file too large'
self.errorStages['piloterrorcode'][1124] = 'athena-during'
self.errorCodes['piloterrorcode'][1130] = 'Get error: Failed to get POOL file catalog'
self.errorStages['piloterrorcode'][1130] = 'ddm-start'
self.errorCodes['piloterrorcode'][1131] = 'Put function can not be called for staging out'
self.errorStages['piloterrorcode'][1131] = 'ddm-end'
self.errorCodes['piloterrorcode'][1132] = 'LRC registration error (consult log file)'
self.errorStages['piloterrorcode'][1132] = 'ddm-end'
self.errorCodes['piloterrorcode'][1133] = 'Put error: Fetching default storage URL failed'
self.errorStages['piloterrorcode'][1133] = 'ddm-end'
self.errorCodes['piloterrorcode'][1134] = 'Put error: Error in mkdir on localSE, not allowed or no available space'
self.errorStages['piloterrorcode'][1134] = 'ddm-end'
self.errorCodes['piloterrorcode'][1135] = 'Could not get file size in job workdir'
self.errorStages['piloterrorcode'][1135] = 'ddm-end'
self.errorCodes['piloterrorcode'][1136] = 'Put error: Error running md5sum to the file in job workdir'
self.errorStages['piloterrorcode'][1136] = 'ddm-end'
self.errorCodes['piloterrorcode'][1137] = 'Put error: Error in copying the file from job workdir to localSE'
self.errorStages['piloterrorcode'][1137] = 'ddm-end'
self.errorCodes['piloterrorcode'][1138] = 'Put error: could not get the file size on localSE'
self.errorStages['piloterrorcode'][1138] = 'ddm-end'
self.errorCodes['piloterrorcode'][1139] = 'Put error: Problem with copying from job workdir to local SE: size mismatch'
self.errorStages['piloterrorcode'][1139] = 'ddm-end'
self.errorCodes['piloterrorcode'][1140] = 'Put error: Error running md5sum to the file on localSE'
self.errorStages['piloterrorcode'][1140] = 'ddm-end'
self.errorCodes['piloterrorcode'][1141] = 'Put error: Problem with copying from job workdir to local SE: md5sum mismatch'
self.errorStages['piloterrorcode'][1141] = 'ddm-end'
self.errorCodes['piloterrorcode'][1142] = 'Put error: failed to register the file on local SE'
self.errorStages['piloterrorcode'][1142] = 'ddm-end'
self.errorCodes['piloterrorcode'][1143] = 'Failed to chmod trf'
self.errorStages['piloterrorcode'][1143] = 'ddm-start'
self.errorCodes['piloterrorcode'][1144] = 'Job killed by panda server'
self.errorStages['piloterrorcode'][1144] = 'user-during'
self.errorCodes['piloterrorcode'][1145] = 'Get error: md5sum mismatch on input file'
self.errorStages['piloterrorcode'][1145] = 'ddm-start'
self.errorCodes['piloterrorcode'][1146] = 'Trf installation dir does not exist and could not be installed'
self.errorStages['piloterrorcode'][1146] = 'ddm-start'
self.errorCodes['piloterrorcode'][1147] = 'Put error: dccp returned readOnly'
self.errorStages['piloterrorcode'][1147] = 'ddm-end'
self.errorCodes['piloterrorcode'][1148] = 'Put error: Failed to remove readOnly file in dCache'
self.errorStages['piloterrorcode'][1148] = 'ddm-end'
self.errorCodes['piloterrorcode'][1149] = 'wget command failed to download trf'
self.errorStages['piloterrorcode'][1149] = 'ddm-start'
self.errorCodes['piloterrorcode'][1150] = 'Looping job killed by pilot'
self.errorStages['piloterrorcode'][1150] = 'athena-end'
self.errorCodes['piloterrorcode'][1151] = 'Get error: Input file staging timed out'
self.errorStages['piloterrorcode'][1151] = 'ddm-start'
self.errorCodes['piloterrorcode'][1152] = 'Put error: File copy timed out'
self.errorStages['piloterrorcode'][1152] = 'ddm-end'
self.errorCodes['piloterrorcode'][1153] = 'Lost job was not finished'
self.errorStages['piloterrorcode'][1153] = 'athena-end'
self.errorCodes['piloterrorcode'][1154] = 'Failed to register log file'
self.errorStages['piloterrorcode'][1154] = 'athena-end'
self.errorCodes['piloterrorcode'][1155] = 'Failed to move output files for lost job'
self.errorStages['piloterrorcode'][1155] = 'athena-end'
self.errorCodes['piloterrorcode'][1156] = 'Pilot could not recover job'
self.errorStages['piloterrorcode'][1156] = 'athena-end'
self.errorCodes['piloterrorcode'][1157] = 'Could not create log file'
self.errorStages['piloterrorcode'][1157] = 'athena-end'
self.errorCodes['piloterrorcode'][1158] = 'Reached maximum number of recovery attempts'
self.errorStages['piloterrorcode'][1158] = 'athena-end'
self.errorCodes['piloterrorcode'][1159] = 'Job recovery could not read PoolFileCatalog.xml file (guids lost)'
self.errorStages['piloterrorcode'][1159] = 'athena-end'
self.errorCodes['piloterrorcode'][1160] = 'LRC registration error: file name string limit exceeded 250'
self.errorStages['piloterrorcode'][1160] = 'ddm-end'
self.errorCodes['piloterrorcode'][1161] = 'Job recovery could not generate xml for remaining output files'
self.errorStages['piloterrorcode'][1161] = 'athena-end'
self.errorCodes['piloterrorcode'][1162] = 'LRC registration error: Non-unique LFN'
self.errorStages['piloterrorcode'][1162] = 'ddm-end'
self.errorCodes['piloterrorcode'][1163] = 'Grid proxy not valid'
self.errorStages['piloterrorcode'][1163] = 'ddm-start'
self.errorCodes['piloterrorcode'][1164] = 'Get error: Local input file missing'
self.errorStages['piloterrorcode'][1164] = 'ddm-start'
self.errorCodes['piloterrorcode'][1165] = 'Put error: Local output file missing'
self.errorStages['piloterrorcode'][1165] = 'ddm-end'
self.errorCodes['piloterrorcode'][1166] = 'Put error: File copy broken by SIGPIPE'
self.errorStages['piloterrorcode'][1166] = 'ddm-end'
self.errorCodes['piloterrorcode'][1167] = 'Get error: Input file missing in PoolFileCatalog.xml'
self.errorStages['piloterrorcode'][1167] = 'ddm-start'
self.errorCodes['piloterrorcode'][1168] = 'Get error: Total file size too large'
self.errorStages['piloterrorcode'][1168] = 'user-start'
self.errorCodes['piloterrorcode'][1169] = 'Put error: LFC registration failed'
self.errorStages['piloterrorcode'][1169] = 'ddm-end'
self.errorCodes['piloterrorcode'][1170] = 'Error running adler32 on the file in job workdir'
self.errorStages['piloterrorcode'][1170] = 'ddm-start'
self.errorCodes['piloterrorcode'][1171] = 'Get error: adler32 mismatch on input file'
self.errorStages['piloterrorcode'][1171] = 'ddm-start'
self.errorCodes['piloterrorcode'][1172] = 'Put error: Problem with copying from job workdir to local SE: adler32 mismatch'
self.errorStages['piloterrorcode'][1172] = 'ddm-end'
self.errorCodes['piloterrorcode'][1173] = 'PandaMover staging error: File is not cached'
self.errorStages['piloterrorcode'][1173] = 'athena-end'
self.errorCodes['piloterrorcode'][1174] = 'PandaMover transfer failure'
self.errorStages['piloterrorcode'][1174] = 'athena-end'
self.errorCodes['piloterrorcode'][1175] = 'Get error: Problem with copying from local SE to job workdir: size mismatch'
self.errorStages['piloterrorcode'][1175] = 'ddm-start'
self.errorCodes['piloterrorcode'][1176] = 'Pilot has no child processes (job wrapper has either crashed or did not send final status)'
self.errorStages['piloterrorcode'][1176] = 'panda-end'
self.errorCodes['piloterrorcode'][1177] = 'Voms proxy not valid'
self.errorStages['piloterrorcode'][1177] = 'ddm-start'
self.errorCodes['piloterrorcode'][1178] = 'Get error: No input files are staged'
self.errorStages['piloterrorcode'][1178] = 'ddm-start'
self.errorCodes['piloterrorcode'][1179] = 'Get error: Failed to get LFC replicas'
self.errorStages['piloterrorcode'][1179] = 'ddm-start'
self.errorCodes['piloterrorcode'][1180] = 'Get error: Globus system error'
self.errorStages['piloterrorcode'][1180] = 'ddm-start'
self.errorCodes['piloterrorcode'][1181] = 'Put error: Globus system error'
self.errorStages['piloterrorcode'][1181] = 'ddm-end'
self.errorCodes['piloterrorcode'][1182] = 'Get error: Failed to get LFC replica'
self.errorStages['piloterrorcode'][1182] = 'ddm-start'
self.errorCodes['piloterrorcode'][1183] = 'LRC registration error: Guid-metadata entry already exists'
self.errorStages['piloterrorcode'][1183] = 'ddm-end'
self.errorCodes['piloterrorcode'][1184] = 'Put error: PoolFileCatalog could not be found in workdir'
self.errorStages['piloterrorcode'][1184] = 'ddm-end'
self.errorCodes['piloterrorcode'][1185] = 'Put error: Error running adler32 on the file in job workdir'
self.errorStages['piloterrorcode'][1185] = 'ddm-end'
self.errorCodes['piloterrorcode'][1186] = 'Software directory does not exist'
self.errorStages['piloterrorcode'][1186] = 'panda-start'
self.errorCodes['piloterrorcode'][1187] = 'Athena metadata is not available'
self.errorStages['piloterrorcode'][1187] = 'athena-end'
self.errorCodes['piloterrorcode'][1188] = 'lcg-getturls failed'
self.errorStages['piloterrorcode'][1188] = 'panda-during'
self.errorCodes['piloterrorcode'][1189] = 'lcg-getturls was timed-out'
self.errorStages['piloterrorcode'][1189] = 'panda-during'
self.errorCodes['piloterrorcode'][1190] = 'LFN too long (exceeding limit of 150 characters)'
self.errorStages['piloterrorcode'][1190] = 'panda-during'
self.errorCodes['piloterrorcode'][1191] = | |
for n2 in nodes2:
if n2 not in nodes:
maps = False
if maps:
if not e in train2all:
train2all[e] = [e2]
else:
train2all[e].append(e2)
test_edges = all_pt_edges.intersection(set(itertools.chain.from_iterable(train2all.values())))
return train2all, test_edges
def join_edges(self, x_r, train2all, likelihood_params, parsimony_params):
"""aggregate edges in the full reconstruction matrix to fit the joint edges in train2all"""
df = pd.DataFrame()
for e in train2all:
#aggregate rows by summing up
if not self.likelihood_params is None:
#if features have been discretized before just sum over the values of the branches
if not 'gt_probs' in self.likelihood_params:
cs = x_r.loc[train2all[e], :].sum(axis=0)
cs[cs > 0] = 1
df = pd.concat([df, cs], axis = 1)
#in case of continuous features sum over the branches (the feature value of a branch represents the difference of the reconstructed value of the feature for the start and the end node of that branch)
elif self.is_phenotype_and_continuous_features:
cs = np.zeros(shape = x_r.shape[1])
cs = cs + x_r.loc[train2all[e], :].iloc[i, :]
df = pd.concat([df, pd.Series(cs)], axis = 1)
#sum up the probabilities
else:
cs = np.zeros(shape = x_r.shape[1])
for i in range(x_r.loc[train2all[e], :].shape[0]):
#print "before aggregation", cs
#print "being aggregated", x_r.loc[train2all[e], :].iloc[i, :]
cs = cs + (1 - cs) * x_r.loc[train2all[e], :].iloc[i, :]
#print "after aggregation", cs
df = pd.concat([df, pd.Series(cs)], axis = 1)
else:
raise Exception("parsimony case not yet implemented")
#make sure there are no entries in the summed up likelihood matrix other than 0 and 1
#assert (df[x_r < 0].any())
#assert (df[x_r > 1].any())
#get df back into samples x features shape
df.columns = train2all.keys()
return df.T
def get_rec_samples(self, x_r, y_r, yp_train, pt_out, ofold, ifold):
"""compute the training samples by running parsimony/likelihood reconstruction for the pt for the training set without the held out samples"""
#retrieve matrix like:
#N1_N4 0
#N2_44 1
m = crh.reconstruct_pt_likelihood(yp_train, self.model_out, self.config, self.likelihood_params, pt_out, ofold, ifold, self.consider_in_recon)
all_pt_edges = set(x_r.index)
train_pt_edges = set(m.index)
#print len(all_pt_edges), "anzahl aller reconstruction samples"
#print len(train_pt_edges), "anzahl aller reconstruction samples ohne test samples"
#print yp_train.index.values, "training samples"
#all edges only present in the full reconstruction
df1 = all_pt_edges.difference(train_pt_edges)
#print "held out edges including not yet joined edges", df1
#all edges that are only in the training reconstruction and thus must have been joined
df2 = train_pt_edges.difference(all_pt_edges)
#print "edges that are only found in the training reconstruction", df2
#print "training edges", train_pt_edges
#map training phenotype edges which don't have a 1:1 equivalent in the all_pt_edges
train2all, test_edges = self.get_training_and_testing_mapping(df2, df1)
#print "mapping from edges only in the training reconstruction to edges in the full reconstruction", train2all
#print "held out edges from get training and testing mapping", test_edges
#get the corresponding samples in the full reconstruction matrix and aggregate them
joint_x_r = self.join_edges(x_r, train2all, self.likelihood_params, self.parsimony_params)
#add the y labels to those rows
joint_y_r = m.loc[train2all, :].iloc[:, 0]
#add all the other training samples
training_edges = all_pt_edges.intersection(train_pt_edges)
x_r_train = pd.concat([x_r.loc[training_edges,:], joint_x_r], axis = 0)
y_r_train = pd.concat([m.loc[training_edges, :].iloc[:,0], joint_y_r], axis = 0)
y_r_train[y_r_train == 2] = 1
x_r_test = x_r.loc[test_edges,]
y_r_test = y_r.loc[test_edges,]
return x_r_train, y_r_train, x_r_test, y_r_test
def setup_folds(self, x, y, cv, x_p, y_p, pt_out, ifold = None, ofold = None, do_block_cross_validation = True):
"""prepare all combinations of training and test folds, either for the standard phyletic pattern or the likelihood case or for the combined case"""
#divide the samples into cv folds and yield training and test fold
folds = []
#block cross validation
if self.block_cross_validation is not None and do_block_cross_validation:
kf = GroupKFold(n_splits = cv)
kf_split = kf.split(x_p, groups = self.block_cross_validation.loc[x.index, "group_id"].tolist())
#normal k fold cross-validation
else:
kf = KFold(n_splits = cv)
kf_split = kf.split(x_p)
ifold = 0
for train, test in kf_split:
xp_train = x_p.iloc[train, :].copy()
yp_train = y_p.iloc[train].copy()
xp_test = x_p.iloc[test, :].copy()
if not self.is_rec_based:
#standard phyletic pattern cv
yield ((xp_train, yp_train, xp_test, None, xp_train, yp_train, xp_test))
#otherwise do max likelihood reconstruction
else:
if ofold is None:
x_train, y_train, x_test, y_test = self.get_rec_samples(x, y, yp_train, pt_out, ofold = ifold, ifold = None)
else:
x_train, y_train, x_test, y_test = self.get_rec_samples(x, y, yp_train, pt_out, ofold = ofold, ifold = ifold)
ifold += 1
yield (x_train, y_train, x_test, y_test, xp_train, yp_train, xp_test)
def outer_cv(self, x, y, x_p, y_p, pt_out, cv_inner = None, do_calibration = True):
"""do a cross validation with cv_outer folds
if only cv_outer is given do a cross validation for each value of the paramter C given
if cv_inner is given, do a nested cross validation optimizing the paramter C in the inner loop"""
#do n fold cross validation
if not cv_inner is None:
#DEBUG print "outer folds set up"
#list of predictions
ocv_preds = pd.Series(np.zeros(shape=[len(y_p)]))
ocv_preds.index = x_p.index
ocv_scores = ocv_preds.copy()
i = 0
for x_train, y_train, x_test, y_test, xp_train, yp_train, xp_test in self.setup_folds(x, y, self.cv_outer, x_p, y_p, pt_out):
#DEBUG print "inner folds set up"
#set up prediction data frame
all_preds = pd.DataFrame(np.zeros(shape=[len(yp_train), len(self.config['c_params'])]))
all_preds.columns = self.config['c_params']
all_preds.index = yp_train.index
#do inner cross validation
for x_train_train, y_train_train, x_train_test, y_train_test, xp_train_train, yp_train_train, xp_train_test in self.setup_folds(x_train, y_train, cv_inner, xp_train, yp_train, pt_out, i, do_block_cross_validation = False, ofold = i):
for c_param in self.config['c_params']:
preds, _ = self.cv(x_train_train, y_train_train, xp_train_train, yp_train_train, xp_train_test, c_param)
all_preds.loc[xp_train_test.index, c_param] = list(preds)
#determine C param with the best accuracy
#copy yp train and change the negative labels from 0 to -1
yp_t_t = yp_train.copy()
yp_t_t[yp_t_t == 0] = -1
perf_m = self.perf_evaluation(yp_t_t, all_preds)
perf_m = perf_m.sort_values(by = self.opt_measure, axis = 1, ascending = False)
c_opt = perf_m.columns[0]
#DEBUG print c_opt, "optimal value of the C param is this fold"
#use that C param to train a classifier to predict the left out samples in the outer cv
p, score = self.cv(x_train, y_train, xp_train, yp_train, xp_test, c_opt, do_calibration = do_calibration)
ocv_preds.loc[xp_test.index] = list(p)
ocv_scores.loc[xp_test.index] = list(score.iloc[:, 0])
i += 1
return ocv_preds, ocv_scores
#store predictions for each fold in all_preds
all_preds = pd.DataFrame(np.zeros(shape=[len(y_p), len(self.config['c_params'])]))
all_preds.index = x_p.index
all_preds.columns = self.config['c_params']
all_scores = all_preds.copy()
#do a cross validation for each value of the C param
for x_train, y_train, x_test, y_test, xp_train, yp_train, xp_test in self.setup_folds(x, y, self.cv_outer, x_p, y_p, pt_out):
for c_param in self.config['c_params']:
preds, scores = self.cv(x_train, y_train, xp_train, yp_train, xp_test, c_param, do_calibration = do_calibration)
all_preds.loc[xp_test.index, c_param] = list(preds)
all_scores.loc[xp_test.index, c_param] = list(scores.iloc[:, 0])
return all_preds, all_scores
def perf_evaluation(self, gold_standard, predictions):
"""evaluate different performance measures"""
colnames = ['bacc', "pos-rec", "neg-rec", "precision", "F1-score", "neg-F1-score"]
perf_m = pd.DataFrame(pd.np.zeros((len(colnames), len(self.config['c_params']))), index = colnames, columns = self.config['c_params'])
for j in range(len(self.config['c_params'])):
c_param = self.config['c_params'][j]
#recall of pt positive class
perf_m.loc['pos-rec', c_param] = self.recall_pos(gold_standard, predictions.iloc[:,j])
#recall of pt negative class
perf_m.loc['neg-rec', c_param] = self.recall_neg(gold_standard, predictions.iloc[:,j])
#balanced accuracy
perf_m.loc['bacc', c_param] = self.bacc(perf_m.loc['pos-rec', c_param], perf_m.loc['neg-rec', c_param])
#precision
perf_m.loc['precision', c_param] = self.precision(gold_standard, predictions.iloc[:,j])
#negative predictive value
perf_m.loc['npv', c_param] = self.npv(gold_standard, predictions.iloc[:,j])
#f1 score
perf_m.loc['F1-score', c_param] = self.f1_score(perf_m.loc['pos-rec', c_param], perf_m.loc['precision', c_param])
perf_m.loc['neg-F1-score', c_param] = self.f1_score(perf_m.loc['neg-rec', c_param], perf_m.loc['npv', c_param])
return perf_m
def majority_feat_sel(self, x, y, x_p, y_p, all_preds, all_scores, k, pt_out, no_classifier = 1):
"""determine the features occuring in the majority of the k best models"""
#sanity check if number of classifiers selected for majority feature selection exceeds the number of c params
if k > len(self.config['c_params']):
sys.exit("number of selected classifiers for feature selection (%s) exceeds the number of c params (%s)" %(k, len(self.config['c_params'])))
#retrieve weights from likelihood_params
if not self.likelihood_params is None and "continuous_target" in self.likelihood_params:
#transform x and y
x, y, w = self.transf_from_probs(x, y)
else:
w = pd.Series(np.ones(shape = len(y)) )
#discard non binary phenotype labels in the reconstruction matrix and target matrix
if not self.likelihood_params is None:
t = float(self.likelihood_params["threshold"])
condition = ~((y != -1) & (y < t))
y = y.loc[condition]
x = x.loc[condition, :]
#determine the k best classifiers
#make sure the indices match
all_preds.index = y_p.index
x.columns = x_p.columns
y.index = x.index
#w.index = x.index
y_p_t = y_p.copy()
#make sure | |
<filename>get_work.py
#!/usr/bin/python3
import sys
import re
import time
import urllib3
import datetime
import math
http = urllib3.PoolManager(headers={"User-Agent":"keisentraut/prime95-optimal-worktodo"})
# print error message and exit hard
def FATAL(msg):
print(f"FATAL: {msg}")
sys.exit(1)
PRINT_DEBUG=1
def DEBUG(msg):
if PRINT_DEBUG:
print(f"# {msg}")
# prints usage
def usage():
DEBUG("This is a script which queries the PrimeNet server in order ")
DEBUG("to get the status of exponents. Please don't run this with ")
DEBUG("large ranges, it might create high load on the server.")
DEBUG("")
DEBUG("see https://mersenneforum.org/showthread.php?t=26750")
DEBUG("")
DEBUG("usage:")
DEBUG(" python.exe get_work.py <from> <to> <print_debug>")
DEBUG("example:")
DEBUG(" python.exe get_work.py 123000 124000 True")
DEBUG(" generates P-1/P+1 worktodo.txt file for Mersenne numbers with exponents between")
DEBUG(" 123000 and 124000.If all Mersenne numbers in this range have appropriate P-1/P+1,")
DEBUG(" then no output is generated. The last argument \"True\" enables debug output.")
DEBUG(" Set the debug output to False, if you want to pass the output directly to Prime95.")
# Bounds defined by gut feeling.
# see also posting by ATH at https://mersenneforum.org/showthread.php?t=26750 where he suggests:
#
# No known factors With known factors
#Exponent P-1 B1 P+1 B1 P-1 B1 P+1 B1
#50K-250K 100M 50M 30M 15M
#250K-500K 30M 15M 15M 8M
#500K-1M 15M 8M 10M 5M
#
def PM1_B1_should(n, known_factors=False):
if known_factors == False:
if n< 100000: return 250000000
if n< 250000: return 100000000
if n< 500000: return 30000000
if n< 1000000: return 15000000
if n< 4000000: return 5000000
if n<10000000: return 2500000
return 2000000
else:
if n< 100000: return 100000000
if n< 250000: return 30000000
if n< 500000: return 15000000
if n< 1000000: return 10000000
if n< 4000000: return 5000000
if n<10000000: return 2500000
return 2000000
def PP1_B1_should(n, known_factors=False):
# half of P-1 bound
return PM1_B1_should(n, known_factors) // 2
# actually, this should be called "is pseudoprime", but its safe enough
def isprime(n):
sp = set([2,3,5,7,11,13,17,19])
if n < 20: return (n in sp)
for b in sp:
if pow(b,n-1,n) != 1:
return False
return True
# takes a string like "2020-02-12" as input, returns True if it was less than 3 months ago.
def is_recent(datestr):
age_days = (datetime.datetime.now() - datetime.datetime.strptime(datestr, "%Y-%m-%d")).days
return age_days <= 90
ECMBOUNDS = [ (11000,100,20), \
(50000,280,25), \
(250000,640,30), \
(1000000,1580,35), \
(3000000,4700,40), \
(11000000,9700,45), \
(44000000,17100,50), \
(110000000,46500,55), \
(260000000,112000,60), \
(800000000,360000,65)]
def get_ecm_level(ecm):
level = 0 # number of digits
for minB1, desired, digits in ECMBOUNDS:
count = 0
for (B1, B2) in ecm:
if B1 >= minB1:
count += ecm[(B1,B2)]
count = count / desired
if count >= 2.:
# twice as many curves as required
# chance to miss factor is exp(-2) = 0.1353352832366127
# therefore, increase digits by 1
level = max(level, digits+1)
elif count >= 1.:
# exactly as many curves as required
# chance to miss factor is exp(-1) = 0.36787944117144233
level = max(level, digits)
elif count >= 0.5:
# half of curves required
# chance to miss factor is exp(-0.5) = 0.6065306597126334
# therefore, reduce digits by 3
level = max(level, digits-3)
return level
# returns t30 B1 bound where you should continue when having t25 completed
def ecm_level_to_B1(level):
B1 = 1000000000000 # larger than any reasonable B1 bound
for minB1, desired, digits in ECMBOUNDS:
if level < digits:
B1 = min(B1, minB1)
return B1
# sometimes, composite factors are reported by the server
# we need to factor them, but we obviously can only do this for "sane" sized numbers
# TODO: implement something better than pollard rho
def factorize(n):
if n >= 10**60:
FATAL(f"Cannot factor {n}, it is too large.")
assert(False)
# stop recursion if prime is found
if isprime(n):
return [n]
# I don't want to implement a fancy algorithm either.
# So we just use some TF and then pollard rho...
for i in [2,3,5,7,11,13,17,19,23,29,31,37,39]:
if n % i == 0:
return [i] + factorize(n//i)
# pollard rho
x,y = 2,2
while True:
x = pow(x,2,n) + 1
y = pow(y,2,n) + 1
y = pow(y,2,n) + 1
if math.gcd(x-y,n) != 1:
f = math.gcd(x-y,n)
return sorted(factorize(n//f) + factorize(f))
def worktodo_PM1(n,B1,B2=None, how_far_factored=67, factors=[]):
assert(B1 >= 11000)
if factors:
factors = ",\"" + ",".join([str(f) for f in factors]) + "\""
else:
factors = ""
if B2:
assert(B1 <= B2 and B2 <= 100000 * B1)
else:
B2 = 0
return f"Pminus1=N/A,1,2,{n},-1,{B1},{B2},{how_far_factored}" + factors
def worktodo_PP1(n,B1,B2=None,nth_run=1, how_far_factored=67, factors=[]):
assert(B1 >= 11000)
if factors:
factors = ",\"" + ",".join([str(f) for f in factors]) + "\""
else:
factors = ""
if B2:
assert(B1 <= B2 and B2 <= 100000 * B1)
else:
B2 = 0
return f"Pplus1=N/A,1,2,{n},-1,{B1},{B2},{nth_run},{how_far_factored}" + factors
#############################################################################################3
if len(sys.argv) < 3 or len(sys.argv) > 4:
usage()
sys.exit(1)
else:
start = int(sys.argv[1])
stop = int(sys.argv[2])
if len(sys.argv) == 4:
PRINT_DEBUG = int(sys.argv[3])
sleep_time = 1.
for n in range(start,stop):
if isprime(n):
DEBUG("-"*80)
if n < 50000:
DEBUG(f"You should use GMP-ECM for this. Ignoring M{n}.")
continue
response = http.request('GET', f"https://www.mersenne.org/report_exponent/?exp_lo={n}&exp_hi=&text=1&full=1&ecmhist=1")
html = response.data.decode('utf-8')
lines = [l.strip() for l in html.split("\n") if l.strip().startswith(f"{n}\t")]
factors = set()
ecm = {} # (B1, B2) : count
pm1 = set() # (B1, B2, E)
pp1 = set() # (B1, B2, start1, start2)
is_recently_assigned = False
is_fully_factored = False
how_far_factored = [True] * 64 + [False] * (100-64) # how_far_factored[i] indicates if [2^(i-1); 2^(i)] was done
for l in lines:
if l.startswith(f"{n}\tFactored\t"):
#41681 Factored 1052945423;16647332713153;2853686272534246492102086015457
factors |= set([int(f) for f in l.split("\t")[2].split(";")])
elif l.startswith(f"{n}\tPRPCofactor\t"):
#41681 PRPCofactor Verified (Factored);2017-11-09;kkmrkkblmbrbk;PRP_PRP_PRP_PRP_;3;37261;1;3
pass
elif l.startswith(f"{n}\tUnfactored\t"):
#100000007 Unfactored 2^79
result = l.split("\t")[2]
assert(result.startswith("2^"))
high = int(result[2:])
for i in range(high):
how_far_factored[i] = True
elif l.startswith(f"{n}\tLL\t"):
# 100000007 LL Verified;2018-02-26;G0rfi3ld;F9042256B193FAA0;3178317
pass
elif l.startswith(f"{n}\tPRP\t"):
# 20825573 PRP Verified;2020-10-12;gLauss;738AD2BB0D72E3AA;1276614;1;3
pass
elif l.startswith(f"{n}\tPM1\t"):
#100000007 PM1 B1=5000000,B2=150000000
result = l.split("\t")[2]
if m:= re.match("^B1=([0-9]*),B2=([0-9]*),E=([0-9]*)$", result):
B1, B2, E = int(m.group(1)), int(m.group(2)), int(m.group(3))
elif m:= re.match("^B1=([0-9]*),B2=([0-9]*)$", result):
B1, B2 = int(m.group(1)), int(m.group(2))
E = 0
elif m:= re.match("^B1=([0-9]*)$", result):
B1 = int(m.group(1))
B2, E = B1, 0
else:
FATAL(f"could not parse PM1 result \"{result}\" in line \"{l}\"")
assert(B1 <= B2)
assert(E in [0,6,12,30,48])
pm1.add( (B1,B2,E))
elif l.startswith(f"{n}\tAssigned\t"):
h = l.split("\t")[2].split(";")
# 41081 Assigned 2017-10-09;<NAME>;PRP test;;0.0;updated on 2017-10-09;expired on 2017-10-13
is_recently_assigned |= is_recent(h[0])
elif l.startswith(f"{n}\tHistory\t"):
h = l.split("\t")[2].split(";")
is_recently_assigned |= is_recent(h[0])
worktype, result = h[2], h[3]
if worktype == "F-ECM" or worktype == "F":
# 41681 History 2015-04-26;<NAME>;F-ECM;Factor: 2853686272534246492102086015457
# 41681 History 2008-08-26;-Anonymous-;F;Factor: 16647332713153
factors.add(int(result.split(" ")[1]))
elif worktype == "CERT" or worktype == "C-PRP" or worktype == "C-LL":
# we don't care for factorization purposes
pass
elif worktype == "NF":
# 100000007 History 2007-07-04;ComputerraRU;NF;no factor to 2^50
if m:= re.match("^no factor from 2\^([0-9]*)[ ]*to 2\^([0-9]*)[ ]*$", result):
low, high = int(m.group(1)), int(m.group(2))
for i in range(low, high):
how_far_factored[i] = True
elif m:= re.match("^no factor to 2\^([0-9]*)$", result):
high = int(m.group(1))
for i in range(high):
how_far_factored[i] = True
else:
FATAL(f"could not parse NF result \"{result}\" in line \"{l}\"")
assert(False)
elif worktype == "NF-ECM":
# 41681 History 2011-01-23;<NAME>;NF-ECM;3 curves, B1=250000, B2=25000000
if m:=re.match("^([0-9]*) curve[s]?, B1=([0-9]*), B2=([0-9]*)$", result):
c = int(m.group(1))
B1 = int(m.group(2))
B2 = int(m.group(3))
elif m:=re.match("^([0-9]*) curve[s]?, B1=([0-9]*)", result):
c = int(m.group(1))
B1 = int(m.group(2))
B2 = B1
else:
FATAL(f"could not parse NF-ECM result \"{result}\" in line \"{l}\"")
assert(False)
assert(B1 <= B2)
assert(c >= 0) # actually, there are entries where count == 0
if (B1,B2) not in ecm: ecm[(B1,B2)] = 0
ecm[(B1,B2)] += c
elif worktype == "NF-PM1":
# 3999971 History 2018-12-21;<NAME>;NF-PM1;B1=3999971, B2=399997100, E=12
if m:= re.match("^B1=([0-9]*), B2=([0-9]*), E=([0-9]*)$", result):
B1, B2, E = int(m.group(1)), int(m.group(2)), int(m.group(3))
elif m:= re.match("^B1=([0-9]*), B2=([0-9]*)$", result):
B1, B2 = int(m.group(1)), int(m.group(2))
E = 0
elif m:= re.match("^B1=([0-9]*)$", result):
B1 = int(m.group(1))
B2, E = B1, 0
else:
FATAL(f"could not parse NF-PM1 result \"{result}\" in line \"{l}\"")
assert(False)
assert(B1 <= B2)
assert(E in [0,6,12,30,48])
pm1.add( (B1,B2,E))
elif worktype == "F-PM1":
# 123031 History 2013-08-29;BloodIce;F-PM1;Factor: 3158950722867400921
# 2000177 History 2019-01-15;<NAME>;F-PM1;Factor: 131059942116526306804441369 / (P-1, B1=1000000)
if m:= re.match("^Factor: ([0-9]*)$", result):
f = int(m.group(1))
factors.add(f)
elif m:= re.match("^Factor: ([0-9]*) / \(P-1, B1=([0-9]*)\)$", result):
f, B1 = int(m.group(1)), int(m.group(2))
B2, E = B1, 0
pm1.add( (B1,B2,E) )
factors.add(f)
elif m:= re.match("^Factor: ([0-9]*) / \(P-1, B1=([0-9]*), B2=([0-9]*)\)$", result):
f, B1, B2 = int(m.group(1)), int(m.group(2)), int(m.group(3))
E = 0
pm1.add( (B1,B2,E) )
factors.add(f)
elif m:= re.match("^Factor: ([0-9]*) / \(P-1, B1=([0-9]*), B2=([0-9]*), E=([0-9]*)\)$", result):
f, B1, B2, E = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
pm1.add( (B1,B2,E) )
factors.add(f)
else:
FATAL(f"Could not parse | |
of the channels as a percentage of range;
The value of the offset (range * percentage) is ALWAYS substracted from the signal
No offset can be used for 1000 mV and 10000 mV range in Buffered mode
Input: digitizer_offset('CH0', '1', 'CH1', '50')
Default: '0'; '0'
Output: 'CH0: 10'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if self.input_mode == 0:
if self.amplitude_0 == 1000 or self.amplitude_0 == 10000:
general.message("No offset can be used for 1000 mV and 10000 mV range in Buffered mode")
sys.exit()
elif self.amplitude_1 == 1000 or self.amplitude_1 == 10000:
general.message("No offset can be used for 1000 mV and 10000 mV range in Buffered mode")
sys.exit()
if len(offset) == 2:
ch = str(offset[0])
ofst = int(offset[1])
if ch == 'CH0':
self.offset_0 = ofst
elif ch == 'CH1':
self.offset_1 = ofst
elif len(offset) == 4:
ch1 = str(offset[0])
ofst1 = int(offset[1])
ch2 = str(offset[2])
ofst2 = int(offset[3])
if ch1 == 'CH0':
self.offset_0 = ofst1
elif ch1 == 'CH1':
self.offset_1 = ofst1
if ch2 == 'CH0':
self.offset_0 = ofst2
elif ch2 == 'CH1':
self.offset_1 = ofst2
elif len(offset) == 1:
ch = str(offset[0])
if ch == 'CH0':
return 'CH0: ' + str(self.offset_0)
elif ch == 'CH1':
return 'CH1: ' + str(self.offset_1)
# to update on-the-fly
if self.state == 0:
pass
elif self.state == 1:
if ( self.amplitude_0 != 1000 or self.amplitude_0 != 10000 ) and self.input_mode == 0:
spcm_dwSetParam_i32 (self.hCard, SPC_OFFS0, -self.offset_0 )
spcm_dwSetParam_i32 (self.hCard, SPC_OFFS1, -self.offset_1 )
elif self.input_mode == 1:
spcm_dwSetParam_i32 (self.hCard, SPC_OFFS0, -self.offset_0 )
spcm_dwSetParam_i32 (self.hCard, SPC_OFFS1, -self.offset_1 )
spcm_dwSetParam_i32 (self.hCard, SPC_M2CMD, M2CMD_CARD_WRITESETUP)
elif self.test_flag == 'test':
self.setting_change_count = 1
if self.input_mode == 0:
assert(self.amplitude_0 != 1000 or self.amplitude_0 != 10000 ), "No offset can be used for 1000 mV and 10000 mV range in Buffered mode"
assert(self.amplitude_1 != 1000 or self.amplitude_1 != 10000 ), "No offset can be used for 1000 mV and 10000 mV range in Buffered mode"
if len(offset) == 2:
ch = str(offset[0])
ofst = int(offset[1])
assert(ch == 'CH0' or ch == 'CH1'), "Incorrect channel; Should be CH0 or CH1"
assert( ofst >= 0 and ofst <= 100 ), "Incorrect offset percentage; Should be 0 <= offset <= 100"
if ch == 'CH0':
self.offset_0 = ofst
elif ch == 'CH1':
self.offset_1 = ofst
elif len(offset) == 4:
ch1 = str(offset[0])
ofst1 = int(offset[1])
ch2 = str(offset[2])
ofst2 = int(offset[3])
assert(ch1 == 'CH0' or ch1 == 'CH1'), "Incorrect channel 1; Should be CH0 or CH1"
assert( ofst1 >= 0 and ofst1 <= 100 ), "Incorrect offset percentage 1; Should be 0 <= offset <= 100"
assert(ch2 == 'CH0' or ch2 == 'CH1'), "Incorrect channel 2; Should be CH0 or CH1"
assert( ofst2 >= 0 and ofst2 <= 100 ), "Incorrect offset percentage 2; Should be 0 <= offset <= 100"
if ch1 == 'CH0':
self.offset_0 = ofst1
elif ch1 == 'CH1':
self.offset_1 = ofst1
if ch2 == 'CH0':
self.offset_0 = ofst2
elif ch2 == 'CH1':
self.offset_1 = ofst2
elif len(offset) == 1:
ch1 = str(offset[0])
assert(ch1 == 'CH0' or ch1 == 'CH1'), "Incorrect channel; Should be CH0 or CH1"
return self.test_offset
else:
assert( 1 == 2 ), 'Incorrect arguments'
def digitizer_coupling(self, *coupling):
"""
Set or query coupling of the channels; Two options are available: [AC, DC]
Input: digitizer_coupling('CH0', 'AC', 'CH1', 'DC')
Default: 'DC'; 'DC'
Output: 'CH0: AC'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if len(coupling) == 2:
ch = str(coupling[0])
cplng = str(coupling[1])
flag = self.coupling_dict[cplng]
if ch == 'CH0':
self.coupling_0 = flag
elif ch == 'CH1':
self.coupling_1 = flag
elif len(coupling) == 4:
ch1 = str(coupling[0])
cplng1 = str(coupling[1])
flag1 = self.coupling_dict[cplng1]
ch2 = str(coupling[2])
cplng2 = str(coupling[3])
flag2 = self.coupling_dict[cplng2]
if ch1 == 'CH0':
self.coupling_0 = flag1
elif ch1 == 'CH1':
self.coupling_1 = flag1
if ch2 == 'CH0':
self.coupling_0 = flag2
elif ch2 == 'CH1':
self.coupling_1 = flag2
elif len(coupling) == 1:
ch = str(coupling[0])
if ch == 'CH0':
return 'CH0: ' + str(self.coupling_0)
elif ch == 'CH1':
return 'CH1: ' + str(self.coupling_1)
elif self.test_flag == 'test':
self.setting_change_count = 1
if len(coupling) == 2:
ch = str(coupling[0])
cplng = str(coupling[1])
assert(ch == 'CH0' or ch == 'CH1'), "Incorrect channel; Should be CH0 or CH1"
assert( cplng in self.coupling_dict ), "Incorrect coupling; Only DC and AC are available"
flag = self.coupling_dict[cplng]
if ch == 'CH0':
self.coupling_0 = flag
elif ch == 'CH1':
self.coupling_1 = flag
elif len(coupling) == 4:
ch1 = str(coupling[0])
cplng1 = str(coupling[1])
ch2 = str(coupling[2])
cplng2 = str(coupling[3])
assert(ch1 == 'CH0' or ch1 == 'CH1'), "Incorrect channel 1; Should be CH0 or CH1"
assert( cplng1 in self.coupling_dict ), "Incorrect coupling 1; Only DC and AC are available"
flag1 = self.coupling_dict[cplng1]
assert(ch2 == 'CH0' or ch2 == 'CH1'), "Incorrect channel 2; Should be CH0 or CH1"
assert( cplng2 in self.coupling_dict ), "Incorrect coupling 2; Only DC and AC are available"
flag2 = self.coupling_dict[cplng2]
if ch1 == 'CH0':
self.coupling_0 = flag1
elif ch1 == 'CH1':
self.coupling_1 = flag1
if ch2 == 'CH0':
self.coupling_0 = flag2
elif ch2 == 'CH1':
self.coupling_1 = flag2
elif len(coupling) == 1:
ch1 = str(coupling[0])
assert(ch1 == 'CH0' or ch1 == 'CH1'), "Incorrect channel; Should be CH0 or CH1"
return self.test_coupling
else:
assert( 1 == 2 ), 'Incorrect arguments'
def digitizer_impedance(self, *impedance):
"""
Set or query impedance of the channels in buffered mode; Two options are available: [1 M, 50]
In the HF mode impedance is fixed at 50 ohm
Input: digitizer_coupling('CH0', '50', 'CH1', '50')
Default: '50'; '50'
Output: 'CH0: 50'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if self.input_mode == 1:
general.message("Impedance is fixed at 50 Ohm in HF mode")
sys.exit()
if len(impedance) == 2:
ch = str(impedance[0])
imp = str(impedance[1])
flag = self.impedance_dict[imp]
if ch == 'CH0':
self.impedance_0 = flag
elif ch == 'CH1':
self.impedance_1 = flag
elif len(impedance) == 4:
ch1 = str(impedance[0])
imp1 = str(impedance[1])
flag1 = self.impedance_dict[imp1]
ch2 = str(impedance[2])
imp2 = str(impedance[3])
flag2 = self.impedance_dict[imp2]
if ch1 == 'CH0':
self.impedance_0 = flag1
elif ch1 == 'CH1':
self.impedance_1 = flag1
if ch2 == 'CH0':
self.impedance_0 = flag2
elif ch2 == 'CH1':
self.impedance_1 = flag2
elif len(impedance) == 1:
ch = str(impedance[0])
if ch == 'CH0':
return 'CH0: ' + str(self.impedance_0)
elif ch == 'CH1':
return 'CH1: ' + str(self.impedance_1)
elif self.test_flag == 'test':
self.setting_change_count = 1
if self.input_mode == 1:
assert( 1 == 2 ), "Impedance is fixed at 50 Ohm in HF mode"
if len(impedance) == 2:
ch = str(impedance[0])
imp = str(impedance[1])
assert(ch == 'CH0' or ch == 'CH1'), "Incorrect channel; Should be CH0 or CH1"
assert( imp in self.impedance_dict ), "Incorrect impedance; Only 1 M and 50 are available"
flag = self.impedance_dict[imp]
if ch == 'CH0':
self.impedance_0 = flag
elif ch == 'CH1':
self.impedance_1 = flag
elif len(impedance) == 4:
ch1 = str(impedance[0])
imp1 = str(impedance[1])
ch2 = str(impedance[2])
imp2 = str(impedance[3])
assert(ch1 == 'CH0' or ch1 == 'CH1'), "Incorrect channel 1; Should be CH0 or CH1"
assert( imp1 in self.impedance_dict ), "Incorrect impedance 1; Only 1 M and 50 are available"
flag1 = self.impedance_dict[imp1]
assert(ch2 == 'CH0' or ch2 == 'CH1'), "Incorrect channel 2; Should be CH0 or CH1"
assert( imp2 in self.impedance_dict ), "Incorrect impedance 2; Only 1 M and 50 are available"
flag2 = self.impedance_dict[imp2]
if ch1 == 'CH0':
self.impedance_0 = flag1
elif ch1 == 'CH1':
self.impedance_1 = flag1
if ch2 == 'CH0':
self.impedance_0 = flag2
elif ch2 == 'CH1':
self.impedance_1 = flag2
elif len(impedance) == 1:
ch1 = str(impedance[0])
assert(ch1 == 'CH0' or ch1 == 'CH1'), "Incorrect channel; Should be CH0 or CH1"
return self.test_impedance
else:
assert( 1 == 2 ), 'Incorrect arguments'
# UNDOCUMENTED
def digitizer_window(self):
"""
Special function for reading integration window
"""
return ( self.win_right - self.win_left ) * 1000 / self.sample_rate
def digitizer_read_settings(self):
"""
Special function for reading settings of | |
Map"])
#
# plt.subplot(2, 1, 2)
# p22, = plt.plot(number_of_robots, path_planning_time2)
# plt.title("Computation Time \n for Various Robot Populations")
# plt.xlabel("Robot Population Size")
# plt.ylabel("Computation \n Time (Seconds)")
# plt.grid()
#
# plt.legend([p12, p22], ["Large Map", "Medium Map"])
#
# left = 0.125 # the left side of the subplots of the figure
# right = 0.9 # the right side of the subplots of the figure
# bottom = 0.1 # the bottom of the subplots of the figure
# top = 1 # the top of the subplots of the figure
# wspace = 0.2 # the amount of width reserved for space between subplots,
# # expressed as a fraction of the average axis width
# hspace = 0.8 # the amount of height reserved for space between subplots,
# # expressed as a fraction of the average axis height
# plt.subplots_adjust(left, bottom, right, top, wspace, hspace)
#
#
print("Computation Times:")
print("Large Map:", path_planning_time1, "\nMedium Map:", path_planning_time2)
#
print("Mission Completion Times:")
print("Large Map:", total_completion_time1, "\nMedium Map:", total_completion_time2)
# plt.savefig("Plot/2x1_medium_large_map_comparison.pdf", format="pdf", dpi=300, bbox_inches='tight')
plt.savefig("Plot/medium_large_map_comparison.pdf", format="pdf", dpi=300, bbox_inches='tight')
plt.show()
if mode == "show_phases":
with open("SmallLafayetteFlood/rough_partitioning.txt", "rb") as fp: # Unpickling
rough_partitioning = pickle.load(fp)
rough_partitioning_x = rough_partitioning[0]
rough_partitioning_y = rough_partitioning[1]
with open("SmallLafayetteFlood/number_of_partitions.txt", "rb") as fp: # Unpickling
number_of_partitions = pickle.load(fp)
with open("SmallLafayetteFlood/cluster_centers.txt", "rb") as fp: # Unpickling
cluster_centers = pickle.load(fp)
with open("SmallLafayetteFlood/partition_colors.txt", "rb") as fp: # Unpickling
partition_colors = pickle.load(fp)
with open("SmallLafayetteFlood/dominated_cells.txt", "rb") as fp: # Unpickling
dominated_cells = pickle.load(fp)
dominated_cells_x = dominated_cells[0]
dominated_cells_y = dominated_cells[1]
with open("SmallLafayetteFlood/conflict_cells.txt", "rb") as fp: # Unpickling
conflict_cells = pickle.load(fp)
conflict_cells_x = conflict_cells[0]
conflict_cells_y = conflict_cells[1]
with open("SmallLafayetteFlood/final_partitioning.txt", "rb") as fp: # Unpickling
final_partitioning = pickle.load(fp)
final_partitioning_x = final_partitioning[0]
final_partitioning_y = final_partitioning[1]
with open("SmallLafayetteFlood/robot_initial_positions_in_cartesian.txt", "rb") as fp: # Unpickling
robot_initial_positions_in_cartesian = pickle.load(fp)
with open("SmallLafayetteFlood/optimal_paths_clone.txt", "rb") as fp: # Unpickling
optimal_paths_clone = pickle.load(fp)
plot_cell_boundary_size = 5
plot_robot_size = 30
plot_cell_size = 200
plot_cell_conflict_boundary_size = 25
plt.subplot(2, 2, 1)
for robot_id in range(number_of_partitions):
plt.scatter(rough_partitioning_x[robot_id], rough_partitioning_y[robot_id], marker="s",
s=plot_cell_boundary_size,
c=np.ones((len(rough_partitioning_x[robot_id]), 3)) * partition_colors[robot_id])
plt.scatter(cluster_centers[0], cluster_centers[1], s=plot_robot_size, c='black')
plt.axis("equal")
plt.subplot(2, 2, 2)
for robot_id in range(number_of_partitions):
plt.scatter(rough_partitioning_x[robot_id], rough_partitioning_y[robot_id], marker="s",
s=plot_cell_boundary_size,
c=np.ones((len(rough_partitioning_x[robot_id]), 3)) * partition_colors[robot_id])
plt.scatter(dominated_cells_x[robot_id], dominated_cells_y[robot_id], marker="s",
s=plot_cell_size,
c=np.ones((len(dominated_cells_x[robot_id]), 3)) * partition_colors[robot_id])
plt.scatter(conflict_cells_x, conflict_cells_y, marker="s", s=plot_cell_conflict_boundary_size,
c="black")
plt.axis("equal")
count = 0
for robot_id in range(number_of_partitions):
if count == 0:
plt.plot([optimal_paths_clone[robot_id][0, 0], optimal_paths_clone[robot_id][2, 0]], [optimal_paths_clone[robot_id][0, 1], optimal_paths_clone[robot_id][2, 1]],
c=partition_colors[robot_id], linewidth=8)
elif count == 1:
plt.plot([optimal_paths_clone[robot_id][0, 0], optimal_paths_clone[robot_id][3, 0]], [optimal_paths_clone[robot_id][0, 1], optimal_paths_clone[robot_id][3, 1]],
c=partition_colors[robot_id], linewidth=8)
elif count == 2:
plt.plot([optimal_paths_clone[robot_id][0, 0], optimal_paths_clone[robot_id][1, 0]], [optimal_paths_clone[robot_id][0, 1], optimal_paths_clone[robot_id][1, 1]],
c=partition_colors[robot_id], linewidth=8)
elif count == 3:
plt.plot([optimal_paths_clone[robot_id][0, 0], optimal_paths_clone[robot_id][2, 0]], [optimal_paths_clone[robot_id][0, 1], optimal_paths_clone[robot_id][2, 1]],
c=partition_colors[robot_id], linewidth=8)
elif count == 4:
plt.plot([optimal_paths_clone[robot_id][0, 0], optimal_paths_clone[robot_id][3, 0]], [optimal_paths_clone[robot_id][0, 1], optimal_paths_clone[robot_id][3, 1]],
c=partition_colors[robot_id], linewidth=8)
count += 1
plt.subplot(2, 2, 3)
for robot_id in range(number_of_partitions):
plt.scatter(final_partitioning_x[robot_id], final_partitioning_y[robot_id], marker="s",
s=plot_cell_size,
c=np.ones((len(final_partitioning_x[robot_id]), 3)) * partition_colors[robot_id])
plt.scatter(conflict_cells_x, conflict_cells_y, marker="s", s=plot_cell_conflict_boundary_size,
c="black")
plt.axis("equal")
ax4 = plt.subplot(2, 2, 4)
ax4.scatter(np.transpose(robot_initial_positions_in_cartesian)[0],
np.transpose(robot_initial_positions_in_cartesian)[1],
s=plot_robot_size, c="black")
for robot_id in range(number_of_partitions):
ax4.scatter(final_partitioning_x[robot_id], final_partitioning_y[robot_id], marker="s",
s=plot_cell_size,
c=np.ones((len(final_partitioning_x[robot_id]), 3)) * partition_colors[robot_id])
plt.axis("equal")
for robot_id in range(number_of_partitions):
ax4.plot(optimal_paths_clone[robot_id][:, 0], optimal_paths_clone[robot_id][:, 1],
c=partition_colors[robot_id])
plt.show()
if mode == "phase":
T0 = np.load("SmallLafayetteFlood/no_discontinuities/conflict_resolution/area_coverage.npy")
T1 = np.load("SmallLafayetteFlood/no_discontinuities/no_conflict_resolution/area_coverage.npy")
T2 = np.load("SmallLafayetteFlood/no_discontinuities/path_planning/area_coverage.npy")
T3 = np.load("SmallLafayetteFlood/no_discontinuities/no_path_planning/area_coverage.npy")
T4 = np.load("SmallLafayetteFlood/discontinuities/conflict_resolution/area_coverage.npy")
T5 = np.load("SmallLafayetteFlood/discontinuities/no_conflict_resolution/area_coverage.npy")
T6 = np.load("SmallLafayetteFlood/discontinuities/path_planning/area_coverage.npy")
T7 = np.load("SmallLafayetteFlood/discontinuities/no_path_planning/area_coverage.npy")
CT0 = np.load("SmallLafayetteFlood/no_discontinuities/conflict_resolution/total_computation_time.npy")
CT1 = np.load("SmallLafayetteFlood/no_discontinuities/no_conflict_resolution/total_computation_time.npy")
CT2 = np.load("SmallLafayetteFlood/no_discontinuities/path_planning/total_computation_time.npy")
CT3 = np.load("SmallLafayetteFlood/no_discontinuities/no_path_planning/total_computation_time.npy")
CT4 = np.load("SmallLafayetteFlood/discontinuities/conflict_resolution/total_computation_time.npy")
CT5 = np.load("SmallLafayetteFlood/discontinuities/no_conflict_resolution/total_computation_time.npy")
CT6 = np.load("SmallLafayetteFlood/discontinuities/path_planning/total_computation_time.npy")
CT7 = np.load("SmallLafayetteFlood/discontinuities/no_path_planning/total_computation_time.npy")
t0 = np.linspace(0, (len(T0) - 1) * sampling_rate, int(len(T0)))
t1 = np.linspace(0, (len(T1) - 1) * sampling_rate, int(len(T1)))
t2 = np.linspace(0, (len(T2) - 1) * sampling_rate, int(len(T2)))
t3 = np.linspace(0, (len(T3) - 1) * sampling_rate, int(len(T3)))
t4 = np.linspace(0, (len(T4) - 1) * sampling_rate, int(len(T4)))
t5 = np.linspace(0, (len(T5) - 1) * sampling_rate, int(len(T5)))
t6 = np.linspace(0, (len(T6) - 1) * sampling_rate, int(len(T6)))
t7 = np.linspace(0, (len(T7) - 1) * sampling_rate, int(len(T7)))
plt.rc('axes', titlesize=25) # fontsize of the title
plt.rc('axes', labelsize=20) # fontsize of the x and y labels
plt.rc('xtick', labelsize=15) # fontsize of the tick labels
plt.rc('ytick', labelsize=15) # fontsize of the tick labels
plt.rc('legend', fontsize=15) # fontsize of the legend
font = {'family': 'Times New Roman',
'weight': 'normal',
'size': 12}
matplotlib.rc("font", **font)
plt.grid()
p0, = plt.plot(t0, T0)
p1, = plt.plot(t1, T1)
p2, = plt.plot(t2, T2)
p3, = plt.plot(t3, T3)
print("Computation Times:")
print(CT0, CT1, CT2, CT3)
print("Mission Completion Times:")
print(max(t0), max(t1), max(t2), max(t3))
plt.title("Area Surveyed Over Time")
plt.xlabel("Time (s)")
plt.ylabel("Area Surveyed (%)")
plt.legend([p0, p1, p2, p3], ["State-biased \n Conflict Resolution", "Random Uniform \n Conflict Resolution", "Nearest Neighbor \n Path Planning", "Random Walk \n Path Planning"])
plt.savefig("Plot/lafayette_small_phase_test_AOT.pdf", format="pdf", dpi=300, bbox_inches='tight')
plt.show()
if mode == "general_NYC_QLB":
number_of_robots = [5, 10, 20, 30,40]
total_computation_time = np.load("Brooklyn_Init_Test/QLB_runs/total_computation_time.npy")
total_completion_time = np.load("Brooklyn_Init_Test/QLB_runs/total_mission_completion_time.npy")
tasks_data = pd.read_pickle("Brooklyn_Init_Test/QLB_runs/tasks_data.pkl")
time_per_data = pd.read_pickle("Brooklyn_Init_Test/QLB_runs/time_per_data.pkl")
plt.figure(figsize=(8, 6))
titlesize = 18 # fontsize of the title
axeslabelsize = 15 # fontsize of the x and y labels
xticklabelsize = 13 # fontsize of the tick labels
yticklabelsize = 13 # fontsize of the tick labels
legendsize = 15 # fontsize of the legend
font = {'family': 'Times New Roman',
'weight': 'normal',
'size': 12}
matplotlib.rc("font", **font)
plt.rc('axes', titlesize=titlesize) # fontsize of the title
plt.rc('axes', labelsize=axeslabelsize) # fontsize of the x and y labels
plt.rc('xtick', labelsize=xticklabelsize) # fontsize of the tick labels
plt.rc('ytick', labelsize=yticklabelsize) # fontsize of the tick labels
plt.rc('legend', fontsize=legendsize) # fontsize of the legend
plt.subplot(2, 2, 1)
ax_time_per_data = sns.lineplot(x="Number of Robots", y="Completion Time Per Robot", data=time_per_data)
plt.title("Mission Completion \n Time Per Robot \n for Various Robot Populations")
plt.xlabel("Robot Population Size")
plt.ylabel("Completion \n Time Per Robot \n (Seconds)")
plt.grid()
plt.subplot(2, 2, 2)
ax_tasks_data = sns.lineplot(x="Number of Robots", y="Tasks Per Robot", data=tasks_data)
plt.title("Number of Tasks Assigned to \n Each Robot for \n Various Robot Populations")
plt.xlabel("Robot Population Size")
plt.ylabel("Number of \n Tasks Per Robot")
plt.grid()
plt.subplot(2, 2, 3)
plt.plot(number_of_robots, total_completion_time)
plt.title("Total Mission Completion Time \n for Various Robot Populations")
plt.xlabel("Robot Population Size")
plt.ylabel("Total Mission \n Completion Time \n (Seconds)")
plt.grid()
plt.subplot(2, 2, 4)
plt.plot(number_of_robots, total_computation_time)
plt.title("Computation Time \n for Various Robot Populations")
plt.xlabel("Robot Population Size")
plt.ylabel("Computation \n Time (Seconds)")
plt.grid()
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.5 # the amount of width reserved for space between subplots,
# expressed as a fraction of the average axis width
hspace = 0.7 # the amount of height reserved for space between subplots,
# expressed as a fraction of the average axis height
plt.subplots_adjust(left, bottom, right, top, wspace, hspace)
plt.savefig("Plot/NYC_general_test_QLB.pdf", format="pdf", dpi=300, bbox_inches='tight')
plt.show()
if mode == "general_NYC_baseline":
number_of_robots = [5, 10, 20, 30,40]
total_computation_time = np.load("Brooklyn_Init_Test/baseline_runs/total_computation_time.npy")
total_completion_time = np.load("Brooklyn_Init_Test/baseline_runs/total_mission_completion_time.npy")
tasks_data = pd.read_pickle("Brooklyn_Init_Test/baseline_runs/tasks_data.pkl")
time_per_data = pd.read_pickle("Brooklyn_Init_Test/baseline_runs/time_per_data.pkl")
titlesize = 18 # fontsize of the title
axeslabelsize = 15 # fontsize of the x and y labels
xticklabelsize = 13 # fontsize of the tick labels
yticklabelsize = 13 # fontsize of the tick labels
legendsize = 15 # fontsize of the legend
# font = {'family': 'serif',
# 'weight': 'normal',
# 'size': 12}
font = {'family': 'Times New Roman',
'weight': 'normal',
'size': 12}
matplotlib.rc("font", **font)
plt.figure(figsize=(8, 6))
plt.rc('axes', titlesize=titlesize) # fontsize of the title
plt.rc('axes', labelsize=axeslabelsize) # fontsize of the x and y labels
plt.rc('xtick', labelsize=xticklabelsize) # fontsize of the tick labels
plt.rc('ytick', labelsize=yticklabelsize) # fontsize of the tick labels
plt.rc('legend', fontsize=legendsize) # fontsize of the legend
plt.subplot(2, 2, 1)
ax_time_per_data = sns.lineplot(x="Number of Robots", y="Completion Time Per Robot", data=time_per_data)
plt.title("Mission Completion \n Time Per Robot \n for Various Robot Populations")
plt.xlabel("Robot Population Size")
plt.ylabel("Completion \n Time Per Robot \n (Seconds)")
plt.grid()
plt.subplot(2, 2, 2)
ax_tasks_data = sns.lineplot(x="Number of Robots", y="Tasks Per Robot", data=tasks_data)
plt.title("Number of Tasks Assigned to \n Each Robot for \n Various Robot Populations")
plt.xlabel("Robot Population Size")
plt.ylabel("Number of \n Tasks Per Robot")
plt.grid()
plt.subplot(2, 2, 3)
plt.plot(number_of_robots, | |
<reponame>div-B-equals-0/dust-wave-case-studies
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext_format_version: '1.2'
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# language_info:
# codemirror_mode:
# name: ipython
# version: 3
# file_extension: .py
# mimetype: text/x-python
# name: python
# nbconvert_exporter: python
# pygments_lexer: ipython3
# version: 3.6.3
# ---
import numpy as np
from astropy.table import Table
import pandas as pd
from matplotlib import pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set_color_codes()
# ### Read in bow shock data from Kobulnicky:2017a
tab01 = Table.read("data/Kobulnicky2017/J_AJ_154_201_table1.dat.fits")
# Remove coordinate columns that we do not want. Also the color temperatures. *And we have to remove the Name field*. This is because of inconsitencies between the names in the different tables, which cause problems when we merge.
tab01.remove_columns(
[
'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs',
'T70/160', 'Name',
]
)
tab01[310:320]
tab01[20:30]
# Indicate lower limit fluxes by negative values, as in the published table:
for band in 'F3.6', 'F4.5', 'F5.8', 'F8.0', 'F70', 'F160':
lband = 'l_' + band
m = tab01[lband] == '<'
tab01[band][m] = -tab01[band][m]
tab01.remove_column(lband)
tab01
tab02 = Table.read("data/Kobulnicky2017/J_AJ_154_201_table2.dat.fits")
m = (tab02['RAh'] == 10)# & (tab01['RAm'] == 5)
tab02[m]
tab02.remove_columns(
[
'RAh', 'RAm', 'RAs', 'DE-', 'DEd', 'DEm', 'DEs',
'T70/160', 'Name',
]
)
for band in 'F3.4', 'F4.6', 'F12', 'F70', 'F160':
lband = 'l_' + band
m = tab02[lband] == '<'
tab02[band][m] = -tab02[band][m]
tab02.remove_column(lband)
tab02[70:90]
tab01[310:330]
# There is something wrong with Table 5. There is an integer column that contains `'---'` strings, which need dealing with or the reader will crash. This doesn't seem to be possible with the FITS table reader, so we resort to ascii, where we can fix it with the `fill_values` parameter.
tab05 = Table.read("data/Kobulnicky2017/table5.dat",
fill_values=[('---', -1)],
format='ascii.cds',
readme="data/Kobulnicky2017/ReadMe")
tab05.remove_columns(
[
'TSS', 'T22/T70', 'T70/160',
]
)
import astropy.units as u
import astropy.constants as const
tab05['LIR'] = tab05['FIR'].to(u.erg/u.cm**2/u.s)*4*np.pi*(tab05['Dist']*1e3*u.parsec).to(u.cm)**2 / const.L_sun.to(u.erg/u.s)
tab05
from astropy.table import join
tab05_01 = join(tab05, tab01, keys=('ID'), join_type='left')
tab05_01.remove_columns(['F3.6', 'F4.5', 'F5.8',])
tab05_01
# + {"scrolled": true}
tab05_01_02 = join(tab05_01, tab02, keys=('ID'), join_type='left')
tab05_01_02.remove_columns(['F3.4', 'F4.6',])
tab05_01_02
# -
# Now merge the WISE and Spitzer photometry, taking (8, 12) and (22, 24) as equivalent.
# Make a mask that is true for rows with Spitzer photometry
m_sst = ~tab05_01_02['Rad_1'].mask
m_wise = ~tab05_01_02['Rad_2'].mask
m_sst, m_wise
# +
groups = [
['Rad_1', 'Rad_2', 'Rad'],
['Height_1', 'Height_2', 'Height'],
['F8.0', 'F12', 'F8 or 12'],
['F24', 'F22', 'F24 or 22'],
['F70_1', 'F70_2', 'F70'],
['F160_1', 'F160_2', 'F160'],
['I70_1', 'I70_2', 'I70'],
['T24/70', 'T22/70', 'T2x/70'],
]
for sst, wise, merge in groups:
tab05_01_02[merge] = np.where(m_sst, tab05_01_02[sst], np.where(m_wise, tab05_01_02[wise], np.nan))
tab05_01_02[merge].mask = ~(m_sst | m_wise)
tab05_01_02.remove_columns([sst, wise])
tab05_01_02['Observatory'] = np.where(m_sst, 'SST', np.where(m_wise, 'WISE', None))
tab05_01_02
# -
# Now work out my own IR flux by weighted sum of the 8 to 160 bands. Originally, here we removed 329 because it lacks the requisite data, being absent from Tables 1 and 2. However, it is in Table 5 and also in K18, so we have a flux.
t = tab05_01_02
#t.remove_row(2)
t['FIR_will'] = 1e-10*(2.4*np.abs(t['F8 or 12']) + 1.6*t['F24 or 22'] + 0.51*t['F70'])
t[2]['FIR_will'] = t[2]['FIR']
t['ID', 'FIR', 'FIR_will']
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
sns.set_context("poster")
fig, ax = plt.subplots(figsize=(10, 8))
c = ax.scatter(t['FIR'], t['FIR_will'],
c=t['Dist'], cmap='viridis_r', vmin=0.0, vmax=2.5,
edgecolors='k', alpha=1.0)
fig.colorbar(c, ax=ax).set_label('Distance, kpc')
for id_, x, y in zip(t['ID'], t['FIR'], t['FIR_will']):
ax.annotate(
str(id_), (x, y), fontsize='xx-small',
xytext=(4,4), textcoords='offset points',
)
fmin, fmax = 2e-10, 2e-7
ax.plot([fmin, fmax], [fmin, fmax], ls='--')
ax.set(
xscale='log', yscale='log',
xlim=[fmin, fmax], ylim=[fmin, fmax],
xlabel=r'Kobulnicky+ (2018): $F_\mathrm{IR}$, mW/m$^2$',
ylabel=r'This paper: $F_\mathrm{IR}$, mW/m$^2$',
)
ax.set_aspect('equal')
fig.savefig('K18-flux-comparison.pdf')
None
# So everything looks OK, except:
#
# * Source 67 is over 10 times too bright in the Kobulnicky table
#
# So, I will use my fluxes instead.
# Now, compare the F70 and I70 to get an equivalent size: compare that with bow shock size
# Now add in the table that I transcribed from the 2018 paper:
tab18 = Table.read('kob18.fits')
tt = join(t, tab18, keys='ID')
tt['LIR_will'] = tt['FIR_will']*(u.erg/u.cm**2/u.s)*4*np.pi*(tt['Dist']*1e3*u.parsec).to(u.cm)**2 / const.L_sun.to(u.erg/u.s)
tt['LIR/L* will'] = tt['LIR_will']/(1e4*tt['L4'])
tt
fig, ax = plt.subplots(figsize=(8, 10))
xx, yy = 2.0/tt['L*/LIR_1'], 2*tt['LIR/L* will']
c = ax.scatter(xx, yy,
c=4.0 + np.log10(tt['L4']), cmap='magma', vmin=3.5, vmax=6.0,
edgecolors='k', alpha=1.0)
fig.colorbar(c, ax=ax, orientation="horizontal").set_label(r'$\log_{10}\ \left[L_* / L_\odot \right]$')
for id_, x, y in zip(tt['ID'], xx, yy):
ax.annotate(
str(id_), (x, y), fontsize='xx-small',
xytext=(4,4), textcoords='offset points',
)
fmin, fmax = 2e-4, 5e-1
ax.fill_between([fmin, fmax], [fmin/2, fmax/2], [fmin*2, fmax*2], color='k', alpha=0.2, zorder=-1)
ax.plot([fmin, fmax], [fmin, fmax], ls='--', zorder=-1)
ax.set(
xscale='log', yscale='log',
xlim=[fmin, fmax], ylim=[fmin, fmax],
xlabel=r'Kobulnicky+ (2017): $\tau = 2 L_\mathrm{IR}/L_*$',
ylabel=r'This paper: $\tau = 2 L_\mathrm{IR}/L_*$',
)
ax.set_aspect('equal')
fig.tight_layout()
fig.savefig('K17-tau-comparison.pdf')
None
# In this graph we compare the original luminosity ratio taken directly from the Kobulnicky (2017) table (x axis) with the ratio calculated using my new total IR fluxes, combined with the new luminosities in the Kobulnicky (2018) table (y axis). Most of the points show reasonable agreement between the two methods, with a few exceptions:
#
# * 67: this had the $F_\text{IR}$ overestimated in K17. Using a more reasonable value gives a lower $\tau$
# * 341: The spectral class has changed from B2 (K17) to O9 (K18), increasing the assumed $L_*$, which lowers $\tau$
# * 411: The luminosity class has changed from Ib (K17) to V (K18), so $L_*$ has been greatly reduced, which increases the estimated $\tau$
#
# And there doesn't seem to be any significant correlation with stellar luminosity.
tt['FIR_FIR'] = tt['FIR_will']/tt['FIR']
fdf = tt['FIR_will', 'FIR', 'FIR_FIR', 'T2x/70'].to_pandas()
fdf.drop(1)
fdf = fdf.drop(1).applymap(np.log10)
fdf.describe()
fdf.corr()
fdf.cov()
# _Now corrected for bad source 67 that had wrong flux in K17_
#
# So the two fluxes are very well correlated: \(r = 0.9996\) in log space. S.d. of 0.7 for each log F, whereas the log ratio has s.d. of 0.07.
fdf.sort_values('T2x/70')
# Next job is to estimate the shell pressure from the $\tau$:
#
# 1. Assume UV dust opacity per gas mass gives column density: $\Sigma = \tau/\kappa$.
# 2. Using measured thickness, find $\rho = \Sigma / H$
# 3. Assume sound speed, so $P_\mathrm{shell} = \rho a^2$
#
# Putting it all together gives $P_\mathrm{shell} = \tau a^2 / \kappa H$.
# Then we can compare that with the radiation pressure at the shell:
# $$
# P_\mathrm{rad} = \frac{L_*}{4\pi R^2 c}
# $$
# and define an observational momentum trapping efficiency: $\eta_\mathrm{obs} = P_\mathrm{shell} / P_\mathrm{rad}$, so that:
# $$
# \eta_\mathrm{obs} = \frac{\tau a^2}{\kappa H} \frac{4\pi R^2 c}{L_*}
# $$
# If we expand out the $\tau$, we se that $\eta_\mathrm{obs} \propto L_*^{-2}$, which is quite a steep sensitivity (especially since $L_*$ may be just a guess).
# Make a new table that just has the columns that we want. We take the $R_0$ from K18 Table 1. The thickness $H$ could be taken from K17: "Height" in Tables 1 and 2. *But* I don't trust those values. For instance, zeta Oph clearly has $H < 60''$ from its image, but the table gives $404''$, which is ridiculous given that $R_0 = 299''$. In fact, when I use these columns and calculate $H/R$, then I get a range from 0.5 to 3, which does not make any sense.
#
# _But maybe "height" is not what I think it is._ What is really wanted is the FWHM of the brightness profile, but H is measured at a low contour (small fraction of the peak brightness), so it is too large.
# It would be better to simply assume $H/R = 3 / (4 M^2)$, which is $\approx 0.1$ if $V = 30$ km/s, but more likely the velocities are lower. Let's assume 0.25 for now. Assume $\kappa = 600$ and $a = 11.4$ km/s
# +
colnames = tt.colnames[0:5] + ['L4', 'LIR_will', 'R0', 'D_kpc', 'U', 'Md_-8', 'V3']
ttt = tt[colnames]
ttt['tau'] = np.round(2*tt['LIR/L* will'], decimals=5)
ttt['H/R'] =np.round(tt['Height'] / tt['R0_as'], decimals=2)
cs = (11.4*u.km/u.s).cgs
kappa = 600.0*u.cm**2/u.g
H_R = 0.25
ttt['P_k_shell'] = np.array(ttt['tau'])*(cs**2 / (const.k_B*kappa * H_R * ttt['R0']*u.parsec)).cgs
ttt['n_shell'] = np.round(ttt['P_k_shell']/u.Kelvin/(2 * 1.0e4), decimals=1)
ttt['P_k_rad'] = (1e4*const.L_sun*ttt['L4'] / (4*np.pi * (ttt['R0']*u.pc)**2 * const.c * const.k_B)).cgs
ttt['eta_obs'] = np.round(ttt['P_k_shell'].data/ttt['P_k_rad'].data, decimals=5)
ttt
# +
fig, ax = plt.subplots(figsize=(10, 8))
xx, yy = ttt['tau'], ttt['eta_obs']
c = ax.scatter(xx, yy,
c=4.0 + np.log10(tt['L4']),
# c=np.log10(tt['Teff']),
cmap='magma', vmin=4.0, vmax=6.0,
edgecolors='k', alpha=1.0, s=300)
fig.colorbar(c, ax=ax).set_label(
r'$\log_{10}\ \left[L_* / L_\odot \right]$'
# r'$\log_{10}\ \left[T_\mathrm{eff} \right]$'
)
for id_, x, y in zip(tt['ID'], xx, yy):
ax.annotate(
str(id_), (x, y), fontsize=10, color='k',
fontweight='black', fontstretch='condensed',
xytext=(0,0), textcoords='offset points', ha='center', va='center',
)
ax.annotate(
str(id_), (x, y), fontsize=10, color='w',
xytext=(0,0), textcoords='offset points', ha='center', va='center',
)
fmin, fmax = 8e-5, 8.0
ax.plot([fmin, fmax], [fmin, fmax], ls='--')
ax.fill_between([fmin, fmax], [25*fmin, 25*fmax], [1.5*fmin, 1.5*fmax], color='k', alpha=0.05)
ax.set(
xscale='log', yscale='log',
xlim=[fmin, fmax], ylim=[fmin, fmax],
xlabel=r'UV optical depth of shell: $\tau$',
ylabel=r'Shell | |
/ (0.0001 + 0.9999 * m.b24) +
9.92855620143344)**2 + (-m.x355 / (0.0001 + 0.9999 * m.b24) +
5.74951319729978)**2 + (-m.x356 / (0.0001 + 0.9999 * m.b24) +
3.53637928232447)**2 - 1) * (0.0001 + 0.9999 * m.b24) + 0.0144073937673116
* m.b24 <= 0.0144073937673116)
m.e105 = Constraint(expr= ((-m.x357 / (0.0001 + 0.9999 * m.b25) +
0.45538917132773)**2 + (-m.x358 / (0.0001 + 0.9999 * m.b25) +
1.48603663213761)**2 + (-m.x359 / (0.0001 + 0.9999 * m.b25) +
9.82188246465487)**2 + (-m.x360 / (0.0001 + 0.9999 * m.b25) +
7.4461736036308)**2 - 1) * (0.0001 + 0.9999 * m.b25) + 0.015333056065432 *
m.b25 <= 0.015333056065432)
m.e106 = Constraint(expr= ((-m.x361 / (0.0001 + 0.9999 * m.b26) +
6.75415604579508)**2 + (-m.x362 / (0.0001 + 0.9999 * m.b26) +
4.56383909220998)**2 + (-m.x363 / (0.0001 + 0.9999 * m.b26) +
5.80815448731129)**2 + (-m.x364 / (0.0001 + 0.9999 * m.b26) +
1.39230685391611)**2 - 1) * (0.0001 + 0.9999 * m.b26) + 0.010112042807447 *
m.b26 <= 0.010112042807447)
m.e107 = Constraint(expr= ((-m.x365 / (0.0001 + 0.9999 * m.b27) +
8.85890029315383)**2 + (-m.x366 / (0.0001 + 0.9999 * m.b27) +
6.16141864425327)**2 + (-m.x367 / (0.0001 + 0.9999 * m.b27) +
2.25373679776979)**2 + (-m.x368 / (0.0001 + 0.9999 * m.b27) +
2.60508233041854)**2 - 1) * (0.0001 + 0.9999 * m.b27) + 0.0127308977615673
* m.b27 <= 0.0127308977615673)
m.e108 = Constraint(expr= ((-m.x369 / (0.0001 + 0.9999 * m.b28) +
5.3549498366276)**2 + (-m.x370 / (0.0001 + 0.9999 * m.b28) +
6.5028620799581)**2 + (-m.x371 / (0.0001 + 0.9999 * m.b28) +
3.77860881416883)**2 + (-m.x372 / (0.0001 + 0.9999 * m.b28) +
3.88773754222155)**2 - 1) * (0.0001 + 0.9999 * m.b28) + 0.00993550907514682
* m.b28 <= 0.00993550907514682)
m.e109 = Constraint(expr= ((-m.x373 / (0.0001 + 0.9999 * m.b29) +
0.113259012567859)**2 + (-m.x374 / (0.0001 + 0.9999 * m.b29) +
4.95866301539607)**2 + (-m.x375 / (0.0001 + 0.9999 * m.b29) +
8.39278608004883)**2 + (-m.x376 / (0.0001 + 0.9999 * m.b29) +
2.33998997152253)**2 - 1) * (0.0001 + 0.9999 * m.b29) + 0.00995155777564721
* m.b29 <= 0.00995155777564721)
m.e110 = Constraint(expr= ((-m.x377 / (0.0001 + 0.9999 * m.b30) +
4.01186017931937)**2 + (-m.x378 / (0.0001 + 0.9999 * m.b30) +
1.58505280271348)**2 + (-m.x379 / (0.0001 + 0.9999 * m.b30) +
6.66701704740187)**2 + (-m.x380 / (0.0001 + 0.9999 * m.b30) +
6.15579863983494)**2 - 1) * (0.0001 + 0.9999 * m.b30) + 0.00999503876903392
* m.b30 <= 0.00999503876903392)
m.e111 = Constraint(expr= ((-m.x381 / (0.0001 + 0.9999 * m.b31) +
6.96869478428464)**2 + (-m.x382 / (0.0001 + 0.9999 * m.b31) +
4.10398909952988)**2 + (-m.x383 / (0.0001 + 0.9999 * m.b31) +
6.0102827867986)**2 + (-m.x384 / (0.0001 + 0.9999 * m.b31) +
2.74411959285283)**2 - 1) * (0.0001 + 0.9999 * m.b31) + 0.0108059125042742
* m.b31 <= 0.0108059125042742)
m.e112 = Constraint(expr= ((-m.x385 / (0.0001 + 0.9999 * m.b32) +
4.87285195085022)**2 + (-m.x386 / (0.0001 + 0.9999 * m.b32) +
9.86295765077007)**2 + (-m.x387 / (0.0001 + 0.9999 * m.b32) +
0.143489359365916)**2 + (-m.x388 / (0.0001 + 0.9999 * m.b32) +
8.80632332989694)**2 - 1) * (0.0001 + 0.9999 * m.b32) + 0.0197594539542727
* m.b32 <= 0.0197594539542727)
m.e113 = Constraint(expr= ((-m.x389 / (0.0001 + 0.9999 * m.b33) +
7.83131209913544)**2 + (-m.x390 / (0.0001 + 0.9999 * m.b33) +
1.43609337392774)**2 + (-m.x391 / (0.0001 + 0.9999 * m.b33) +
3.30054263355244)**2 + (-m.x392 / (0.0001 + 0.9999 * m.b33) +
0.00125718557078214)**2 - 1) * (0.0001 + 0.9999 * m.b33) +
0.00732853966291172 * m.b33 <= 0.00732853966291172)
m.e114 = Constraint(expr= ((-m.x393 / (0.0001 + 0.9999 * m.b34) +
5.66454764000128)**2 + (-m.x394 / (0.0001 + 0.9999 * m.b34) +
5.56331545204103)**2 + (-m.x395 / (0.0001 + 0.9999 * m.b34) +
1.14993711831163)**2 + (-m.x396 / (0.0001 + 0.9999 * m.b34) +
4.5715891663462)**2 - 1) * (0.0001 + 0.9999 * m.b34) + 0.00842593616666874
* m.b34 <= 0.00842593616666874)
m.e115 = Constraint(expr= ((-m.x397 / (0.0001 + 0.9999 * m.b35) +
3.33190103022031)**2 + (-m.x398 / (0.0001 + 0.9999 * m.b35) +
4.9445883792678)**2 + (-m.x399 / (0.0001 + 0.9999 * m.b35) +
7.30728727625694)**2 + (-m.x400 / (0.0001 + 0.9999 * m.b35) +
8.01246235442081)**2 - 1) * (0.0001 + 0.9999 * m.b35) + 0.0152146519034331
* m.b35 <= 0.0152146519034331)
m.e116 = Constraint(expr= ((-m.x401 / (0.0001 + 0.9999 * m.b36) +
9.33298244503801)**2 + (-m.x402 / (0.0001 + 0.9999 * m.b36) +
0.723851580512842)**2 + (-m.x403 / (0.0001 + 0.9999 * m.b36) +
8.42864317892565)**2 + (-m.x404 / (0.0001 + 0.9999 * m.b36) +
4.32119007374061)**2 - 1) * (0.0001 + 0.9999 * m.b36) + 0.0176343231921043
* m.b36 <= 0.0176343231921043)
m.e117 = Constraint(expr= ((-m.x405 / (0.0001 + 0.9999 * m.b37) +
5.08063287228698)**2 + (-m.x406 / (0.0001 + 0.9999 * m.b37) +
8.38761519865226)**2 + (-m.x407 / (0.0001 + 0.9999 * m.b37) +
1.4027356086197)**2 + (-m.x408 / (0.0001 + 0.9999 * m.b37) +
6.86412480592018)**2 - 1) * (0.0001 + 0.9999 * m.b37) + 0.0144248795642564
* m.b37 <= 0.0144248795642564)
m.e118 = Constraint(expr= ((-m.x409 / (0.0001 + 0.9999 * m.b38) +
2.37234068047016)**2 + (-m.x410 / (0.0001 + 0.9999 * m.b38) +
7.05084260559812)**2 + (-m.x411 / (0.0001 + 0.9999 * m.b38) +
9.48571415448197)**2 + (-m.x412 / (0.0001 + 0.9999 * m.b38) +
5.77659906719162)**2 - 1) * (0.0001 + 0.9999 * m.b38) + 0.017769025155675 *
m.b38 <= 0.017769025155675)
m.e119 = Constraint(expr= ((-m.x413 / (0.0001 + 0.9999 * m.b39) +
4.16198173364841)**2 + (-m.x414 / (0.0001 + 0.9999 * m.b39) +
5.45114144772148)**2 + (-m.x415 / (0.0001 + 0.9999 * m.b39) +
9.00182905163397)**2 + (-m.x416 / (0.0001 + 0.9999 * m.b39) +
3.4826499770368)**2 - 1) * (0.0001 + 0.9999 * m.b39) + 0.0139198812171686 *
m.b39 <= 0.0139198812171686)
m.e120 = Constraint(expr= ((-m.x417 / (0.0001 + 0.9999 * m.b40) +
4.45933786757702)**2 + (-m.x418 / (0.0001 + 0.9999 * m.b40) +
4.47805189258463)**2 + (-m.x419 / (0.0001 + 0.9999 * m.b40) +
6.61692822015399)**2 + (-m.x420 / (0.0001 + 0.9999 * m.b40) +
5.6343120215581)**2 - 1) * (0.0001 + 0.9999 * m.b40) + 0.0114467853996832 *
m.b40 <= 0.0114467853996832)
m.e121 = Constraint(expr= m.b1 + m.b2 + m.b3 + m.b4 + m.b5 + m.b6 + m.b7 + m.b8
+ m.b9 + m.b10 + m.b11 + m.b12 + m.b13 + m.b14 + m.b15 + m.b16 + m.b17 +
m.b18 + m.b19 + m.b20 + m.b21 + m.b22 + m.b23 + m.b24 + m.b25 + m.b26 +
m.b27 + m.b28 + m.b29 + m.b30 + m.b31 + m.b32 + m.b33 + m.b34 + m.b35 +
m.b36 + m.b37 + m.b38 + m.b39 + m.b40 == 1)
m.e122 = Constraint(expr= ((-m.x421 / (0.0001 + 0.9999 * m.b41) +
4.04180710023322)**2 + (-m.x422 / (0.0001 + 0.9999 * m.b41) +
0.0638120906615358)**2 + (-m.x423 / (0.0001 + 0.9999 * m.b41) +
9.31163964055327)**2 + (-m.x424 / (0.0001 + 0.9999 * m.b41) +
9.59399362610548)**2 - 1) * (0.0001 + 0.9999 * m.b41) + 0.0194091623111686
* m.b41 <= 0.0194091623111686)
m.e123 = Constraint(expr= ((-m.x425 / (0.0001 + 0.9999 * m.b42) +
7.58630473662528)**2 + (-m.x426 / (0.0001 + 0.9999 * m.b42) +
9.81696808234314)**2 + (-m.x427 / (0.0001 + 0.9999 * m.b42) +
6.80594062551012)**2 + (-m.x428 / (0.0001 + 0.9999 * m.b42) +
5.73941560922778)**2 - 1) * (0.0001 + 0.9999 * m.b42) + 0.0232186601220104
* m.b42 <= 0.0232186601220104)
m.e124 = Constraint(expr= ((-m.x429 / (0.0001 + 0.9999 * m.b43) +
4.73576208695481)**2 + (-m.x430 / (0.0001 + 0.9999 * m.b43) +
2.81737915136856)**2 + (-m.x431 / (0.0001 + 0.9999 * m.b43) +
0.919919756378161)**2 + (-m.x432 / (0.0001 + 0.9999 * m.b43) +
0.427396562561213)**2 - 1) * (0.0001 + 0.9999 * m.b43) +
0.00303939880066688 * m.b43 <= 0.00303939880066688)
m.e125 = Constraint(expr= ((-m.x433 / (0.0001 + 0.9999 * m.b44) +
0.428853030190813)**2 + (-m.x434 / (0.0001 + 0.9999 * m.b44) +
5.71294529671424)**2 + (-m.x435 / (0.0001 + 0.9999 * m.b44) +
2.06079707847737)**2 + (-m.x436 / (0.0001 + 0.9999 * m.b44) +
3.87755584734058)**2 - 1) * (0.0001 + 0.9999 * m.b44) + 0.00511039828326592
* m.b44 <= 0.00511039828326592)
m.e126 = Constraint(expr= ((-m.x437 / (0.0001 + 0.9999 * m.b45) +
1.0774677742481)**2 + (-m.x438 / (0.0001 + 0.9999 * m.b45) +
0.802343324640142)**2 + (-m.x439 / (0.0001 + 0.9999 * m.b45) +
5.05560926630768)**2 + (-m.x440 / (0.0001 + 0.9999 * m.b45) +
6.38583388950109)**2 - 1) * (0.0001 + 0.9999 * m.b45) + 0.00671427511330145
* m.b45 <= 0.00671427511330145)
m.e127 | |
<reponame>Federico-PizarroBejarano/safe-control-gym
import os
import matplotlib.pyplot as plt
import numpy as np
from safe_control_gym.utils.utils import mkdirs
from safe_control_gym.controllers.mpc.mpc_utils import compute_state_rmse
def get_cost(test_runs):
num_epochs = len(test_runs)
num_episodes = len(test_runs[0])
costs = np.zeros((num_epochs, num_episodes))
for epoch in range(num_epochs):
for episode in range(num_episodes):
cost = np.sum(test_runs[epoch][episode]['obs'] ** 2)
costs[epoch, episode] = cost
mean_cost = np.mean(costs, axis=1)
return mean_cost
def get_average_rmse_error(runs):
num_epochs = len(runs)
num_episodes = len(runs[0])
costs = np.zeros((num_epochs, num_episodes))
for epoch in range(num_epochs):
for episode in range(num_episodes):
mse, rmse = runs[epoch][episode]['total_rmse_state_error']
costs[epoch, episode] = rmse
mean_cost = np.mean(costs, axis=1)
return mean_cost
def plot_runs(all_runs, num_epochs, episode=0, ind=0, ylabel='x position', dir=None):
plt.plot(all_runs[0][episode]['state'][:, ind], label='Linear MPC')
for epoch in range(1, num_epochs):
#plot the first episode of each epoch
plt.plot(all_runs[epoch][episode]['state'][:, ind], label='GP-MPC %s' % epoch)
plt.title(ylabel)
plt.xlabel('Step')
plt.ylabel(ylabel)
plt.legend()
save_str = 'ep%s_ind%s_state.png' % (episode, ind)
if dir is not None:
save_str = os.path.join(dir, save_str)
plt.savefig(save_str)
else:
plt.show()
plt.cla()
plt.clf()
def plot_learning_curve(avg_rewards, num_points_per_epoch, stem, dir):
samples = num_points_per_epoch
rewards = np.array(avg_rewards)
plt.plot(samples, rewards)
plt.title('Avg Episode' + stem)
plt.xlabel('Training Steps')
plt.ylabel(stem)
save_str = os.path.join(dir, stem + '.png')
plt.savefig(save_str)
plt.cla()
plt.clf()
data = np.vstack((samples, rewards)).T
fname = os.path.join(dir, stem + '.csv')
header = 'Train steps,Cost'
np.savetxt(fname,
data,
delimiter=',',
header=header)
def filter_sequences(x_seq, actions, x_next_seq, threshold):
# Find where the difference in step is greater than the filter threshold
x_diff_abs = np.abs(x_next_seq - x_seq)
rows_to_keep = np.all(x_diff_abs<1, axis=1)
x_seq_filt = x_seq[rows_to_keep, :]
x_next_seq_filt = x_next_seq[rows_to_keep, :]
actions_filt = actions[rows_to_keep, :]
return x_seq_filt, actions_filt, x_next_seq_filt
def get_quad_cost(test_runs, ref):
num_epochs = len(test_runs)
num_episodes = len(test_runs[0])
costs = np.zeros((num_epochs, num_episodes))
for epoch in range(num_epochs):
for episode in range(num_episodes):
cost = np.sum((test_runs[epoch][episode]['obs'][1:,[0,2]] - ref[:,[0,2]]) ** 2)
costs[epoch, episode] = cost
mean_cost = np.mean(costs, axis=1)
return mean_cost
def get_quad_average_rmse_error(runs, ref):
num_epochs = len(runs)
num_episodes = len(runs[0])
costs = np.zeros((num_epochs, num_episodes))
for epoch in range(num_epochs):
for episode in range(num_episodes):
mse, rmse = runs[epoch][episode]['total_rmse_state_error']
costs[epoch, episode] = rmse
mean_cost = np.mean(costs, axis=1)
return mean_cost
def get_quad_average_rmse_error_xz_only(runs, ref):
num_epochs = len(runs)
num_episodes = len(runs[0])
costs = np.zeros((num_epochs, num_episodes))
for epoch in range(num_epochs):
for episode in range(num_episodes):
mse, rmse = compute_state_rmse(runs[epoch][episode]['state'][1:,[0,2]] - ref[:,[0,2]])
costs[epoch, episode] = rmse
mean_cost = np.mean(costs, axis=1)
return mean_cost
def make_plots(test_runs, train_runs, num_inds, dir):
num_epochs = len(test_runs)
num_episodes = len(test_runs[0])
fig_dir = os.path.join(dir,'figs')
mkdirs(fig_dir)
# Make plot of all trajectories.
num_points_per_epoch = []
for episode_i in range(num_episodes):
for ind in range(num_inds):
ylabel = 'x%s' % ind
plot_runs(test_runs, num_epochs, episode=episode_i, ind=ind, ylabel=ylabel, dir=fig_dir)
# Compute the number of training points (x-axis for most figures).
num_points = 0
num_points_per_epoch.append(num_points)
for epoch in range(1, num_epochs):
num_train_episodes = len(train_runs[epoch])
for episode in range(num_train_episodes):
num_points += train_runs[epoch][episode]['obs'].shape[0]
num_points_per_epoch.append(num_points)
# Plot violation data
nsamp, viol, mean_viol, maximums = get_constraint_violations(test_runs,
train_runs)
plot_constraint_violation(num_points_per_epoch, viol, fig_dir)
avg_maxs = get_avg_of_max_viol_theta_dot(maximums)
plot_average_peak_theta_dot_viol(num_points_per_epoch, avg_maxs, fig_dir)
# Plot Learning Curves
avg_rmse_error = get_average_rmse_error(test_runs)
plot_learning_curve(avg_rmse_error, num_points_per_epoch, 'avg_rmse_cost_learning_curve', fig_dir)
common_costs = get_cost(test_runs)
plot_learning_curve(common_costs, num_points_per_epoch, 'common_cost_learning_curve', fig_dir)
def make_quad_plots(test_runs, train_runs, trajectory, num_inds, dir):
num_epochs = len(test_runs)
num_episodes = len(test_runs[0])
fig_dir = os.path.join(dir,'figs')
mkdirs(fig_dir)
num_points_per_epoch = []
for episode_i in range(num_episodes):
for ind in range(num_inds):
ylabel = 'x%s' % ind
plot_runs(test_runs, num_epochs, episode=episode_i, ind=ind, ylabel=ylabel, dir=fig_dir)
num_points = 0
num_points_per_epoch.append(num_points)
for epoch in range(1, num_epochs):
num_train_episodes = len(train_runs[epoch])
for episode in range(num_train_episodes):
num_points += train_runs[epoch][episode]['obs'].shape[0]
num_points_per_epoch.append(num_points)
common_costs = get_quad_cost(test_runs, trajectory)
plot_learning_curve(common_costs, num_points_per_epoch, 'common_xz_cost_learning_curve', fig_dir)
rmse_error = get_quad_average_rmse_error(test_runs, trajectory)
plot_learning_curve(rmse_error, num_points_per_epoch, 'rmse_error_learning_curve', fig_dir)
rmse_error_xz = get_quad_average_rmse_error_xz_only(test_runs, trajectory)
plot_learning_curve(rmse_error_xz, num_points_per_epoch, 'rmse_xz_error_learning_curve', fig_dir)
def gather_training_samples(all_runs, epoch_i, num_samples, rand_generator=None):
n_episodes = len(all_runs[epoch_i].keys())
num_samples_per_episode = int(num_samples/n_episodes)
x_seq_int = []
x_next_seq_int = []
actions_int = []
for episode_i in range(n_episodes):
run_results_int = all_runs[epoch_i][episode_i]
n = run_results_int['action'].shape[0]
if num_samples_per_episode < n:
if rand_generator is not None:
rand_inds_int = rand_generator.choice(n-1, num_samples_per_episode, replace=False)
else:
rand_inds_int = np.arange(num_samples_per_episode)
else:
rand_inds_int = np.arange(n-1)
next_inds_int = rand_inds_int + 1
x_seq_int.append(run_results_int.obs[rand_inds_int, :])
actions_int.append(run_results_int.action[rand_inds_int, :])
x_next_seq_int.append(run_results_int.obs[next_inds_int, :])
x_seq_int = np.vstack(x_seq_int)
actions_int = np.vstack(actions_int)
x_next_seq_int = np.vstack(x_next_seq_int)
return x_seq_int, actions_int, x_next_seq_int
def gather_training_samples_from_all_data(all_runs, num_samples):
n_epochs = len(all_runs.keys())
n_episodes_per_epoch = len(all_runs[0].keys())
num_samples_per_episode = int(num_samples/(n_episodes_per_epoch*n_epochs))
x_seq_int = []
x_next_seq_int = []
actions_int = []
for epoch_i in range(n_epochs):
for episode_i in range(n_episodes_per_epoch):
run_results_int = all_runs[epoch_i][episode_i]
n = run_results_int['action'].shape[0]
if num_samples_per_episode < n:
#rand_inds_int = np.random.choice(n-1, num_samples_per_episode)
rand_inds_int = np.arange(num_samples_per_episode)
else:
rand_inds_int = np.arange(n-1)
next_inds_int = rand_inds_int + 1
x_seq_int.append(run_results_int.obs[rand_inds_int, :])
actions_int.append(run_results_int.action[rand_inds_int, :])
x_next_seq_int.append(run_results_int.obs[next_inds_int, :])
x_seq_int = np.vstack(x_seq_int)
actions_int = np.vstack(actions_int)
x_next_seq_int = np.vstack(x_next_seq_int)
return x_seq_int, actions_int, x_next_seq_int
def make_traking_plot(runs, traj, dir):
num_epochs = len(runs.keys())
plt.figure()
plt.plot(runs[0][0]['obs'][:, 0], runs[0][0]['obs'][:, 2], label='Linear MPC')
traj_lin = np.vstack((runs[0][0]['obs'][:, 0], runs[0][0]['obs'][:, 2])).T
np.savetxt(os.path.join(dir, 'traj_lin_mpc.csv'), traj_lin, delimiter=',')
for epoch in range(1, num_epochs):
traj1 = np.vstack((runs[epoch][0]['obs'][:, 0], runs[epoch][0]['obs'][:, 2])).T
np.savetxt(os.path.join(dir, 'traj_%s.csv' % epoch), traj1, delimiter=',')
plt.plot(runs[epoch][0]['obs'][:, 0], runs[epoch][0]['obs'][:, 2], label='GP-MPC %s' % epoch)
plt.plot(traj[:,0], traj[:,2], 'k',label='Reference')
plt.plot([-0.4,-0.4],[0.0, 0.9], 'r', label='Limit')
plt.plot([0.4,0.4],[0.0, 0.9], 'r')
plt.plot([-0.4,0.4],[0.9, 0.9], 'r')
plt.plot([-0.4,0.4],[0.0, 0.0], 'r')
plt.legend()
plt.title("Quadrotor Impossible Tracking")
plt.xlabel('X position (m)')
plt.ylabel('Z position (m)')
save_str = os.path.join(dir, 'quad_traj.png')
plt.savefig(save_str)
def get_constraint_violations(test_runs,
train_runs):
num_train_samples_by_epoch = []
violations_per_epoch = []
max_violations_per_epoch = []
mean_violations_per_epoch = []
num_samples = 0
num_epochs = len(train_runs.keys())
n_train_samples_per_epoch = 0
for epoch in range(num_epochs):
violations_per_episode = []
max_violations_per_episode = []
num_train_samples_per_episode = len(train_runs[epoch].keys())
for train_episode in range(num_train_samples_per_episode):
n_train_samples_per_epoch += len(train_runs[epoch][train_episode]['info'])
num_test_episodes_per_epoch = len(test_runs[epoch].keys())
for test_episode in range(num_test_episodes_per_epoch):
violations = 0
max_violations = np.zeros(test_runs[epoch][test_episode]['info'][0]['constraint_values'].shape)
n = len(test_runs[epoch][test_episode]['info'])
for i in range(n):
#violations += test_runs[epoch][test_episode]['info'][i]['constraint_violation'] # Due to bug.
violations += int(np.any(test_runs[epoch][test_episode]['info'][i]['constraint_values'] > 0))
max_violations = np.maximum(max_violations,
test_runs[epoch][test_episode]['info'][i]['constraint_values'])
violations_per_episode.append(violations)
max_violations_per_episode.append(max_violations)
num_train_samples_by_epoch.append(n_train_samples_per_epoch)
violations_per_epoch.append(violations_per_episode)
max_violations_per_epoch.append(np.vstack(max_violations_per_episode))
mean_violations_per_epoch.append(np.mean(violations_per_episode))
num_samples += n
return num_train_samples_by_epoch, violations_per_epoch, mean_violations_per_epoch, max_violations_per_epoch
def plot_constraint_violation(viol_samp, viols, dir):
violations = np.array(viols)
train_time = np.array(viol_samp)
mean_viol = np.mean(violations, axis=1)
max = np.max(violations, axis=1)
min = np.min(violations, axis=1)
plt.plot(train_time, mean_viol, label='mean')
plt.plot(train_time, max, label='max')
plt.plot(train_time, min, label='min')
plt.legend()
plt.xlabel('Train Steps')
plt.ylabel('Number of violations')
stem = 'number_viol'
fname = os.path.join(dir, stem + '.png')
plt.savefig(fname)
plt.cla()
plt.clf()
data = np.vstack((train_time, mean_viol, max, min)).T
fname = os.path.join(dir, stem + '.csv')
np.savetxt( fname,
data,
delimiter=',',
header='Train Steps (s),Mean,Max,Min')
def get_avg_of_max_viol_theta_dot(maximums):
""" get the average of the max violations in theta_dot across episodes for each epoch."""
num_epochs = len(maximums)
max_avgs = []
for epoch in range(num_epochs):
max_avgs.append(np.mean(np.max(maximums[epoch][:,[3,7]], axis=1)))
return max_avgs
def plot_average_peak_theta_dot_viol(train_sample, avg_max, dir):
plt.plot(train_sample, avg_max)
plt.xlabel('Training Time (s)')
plt.ylabel('Avg Peak Violation (rad/s)')
plt.title('Theta_dot Average Peak Violation')
stem = 'peak_viol'
fname = os.path.join(dir, stem + '.png')
plt.savefig(fname)
plt.cla()
plt.clf()
data = np.vstack((train_sample, avg_max)).T
fname = os.path.join(dir, stem + '.csv')
np.savetxt(fname,
data,
delimiter=',',
header='Train Time (s),Avg peak violation (rad/s)')
def plot_robustness(runs, pole_lengths, label, dir):
num_coeff = len(runs)
# compute common costs
avg_costs = get_cost(runs)
plt.plot(pole_lengths, avg_costs)
plt.title('GP-MPC' + label + ' robustness')
plt.xlabel(label + ' Bounds')
plt.ylabel('Normalized Common Cost')
plt.savefig(os.path.join(dir,'common_cost_robust_plot.png'))
plt.cla()
plt.clf()
data = np.vstack((pole_lengths, avg_costs)).T
fname = os.path.join(dir, 'common_cost_robust_plot.csv')
header = 'Coeff,Avg Cost'
np.savetxt(fname,
data,
delimiter=',',
header=header)
def plot_robustness_runs(all_runs, num_epochs, parameters, episode=0, ind=0, ylabel='x position', dir=None):
for epoch in range(0, num_epochs):
# plot the first episode of each epoch
plt.plot(all_runs[epoch][episode]['state'][:, ind], label='%s' % parameters[epoch])
plt.title(ylabel)
plt.xlabel('Step')
plt.ylabel(ylabel)
plt.legend()
save_str = 'ep%s_ind%s_state.png' % (episode, ind)
if dir is not None:
save_str = os.path.join(dir, save_str)
plt.savefig(save_str)
else:
plt.show()
plt.cla()
plt.clf()
def table_csv(runs, dir):
num_epochs = len(runs)
num_epiosdes = len(runs[0])
rmse_errors = np.zeros((num_epiosdes, num_epochs))
for epoch in range(num_epochs):
for episodes in range(num_epiosdes):
mse, rmse = runs[epoch][episodes]['total_rmse_state_error']
rmse_errors[episodes, epoch] = rmse
np.savetxt(os.path.join(dir, 'total_rmse_state_error_table.csv'), rmse_errors, delimiter=',')
def plot_robustness_rmse(runs, pole_lengths, label, dir):
num_coeff = len(runs)
# compute common costs
avg_costs = get_average_rmse_error(runs)
plt.plot(pole_lengths, avg_costs)
plt.title('GP-MPC ' + label+ ' Robustness')
plt.xlabel(label + ' Bounds')
plt.ylabel('Average RMSE')
plt.savefig(os.path.join(dir,'rmse_robust_plot.png'))
plt.cla()
plt.clf()
data = np.vstack((pole_lengths, avg_costs)).T
fname = os.path.join(dir, 'rmse_robust_plot.csv')
header = 'Coeff,Avg Cost'
np.savetxt(fname,
data,
delimiter=',',
header=header)
def plot_all_robustness_runs(runs, parameters, dir):
num_epochs = len(runs)
num_episodes = len(runs[0])
fig_dir = os.path.join(dir, 'figs')
mkdirs(fig_dir)
num_inds = runs[0][0]['state'].shape[1]
# Make plot of all trajectories.
num_points_per_epoch = []
for episode_i in range(num_episodes):
for ind in range(num_inds):
ylabel = 'x%s' % ind
plot_robustness_runs(runs, num_epochs, parameters, episode=episode_i, ind=ind, ylabel=ylabel, dir=fig_dir)
def plot_constraint_from_csv(fname,
plot_name):
data = np.genfromtxt(fname, delimiter=',')
plt.plot(data[:,0], data[:,1])
plt.title(plot_name)
plt.xlabel('Train Steps (s)')
plt.ylabel('Avg number of violations')
plt.show()
def plot_data_eff_from_csv(fname,
plot_name):
data = np.genfromtxt(fname, delimiter=',')
plt.plot(data[:,0], data[:,1])
plt.title(plot_name)
plt.xlabel('Train Steps (s)')
plt.ylabel('Eval. Cost')
plt.show()
def plot_robustness_from_csv(fname,
plot_name,
x_label):
data = np.genfromtxt(fname, delimiter=',')
plt.plot(data[:,0], data[:,1])
plt.title(plot_name)
plt.xlabel(x_label)
plt.ylabel('Eval. Cost')
plt.show()
def plot_impossible_traj_from_csv(fnames):
plt.figure()
n = len(fnames)
lin_mpc_data = np.genfromtxt(fnames[0], delimiter=',')
plt.plot(lin_mpc_data[:,0], lin_mpc_data[:,1], label='Linear MPC')
for i in range(1, n):
traj = np.genfromtxt(fnames[i], delimiter=',')
plt.plot(traj[:,0], traj[:,1], label='GP-MPC %s' | |
# -*- coding: utf-8 -*-
"""
The a module sets up three objects from class functions.
- input_data() establishes where data is loaded from.
- configuration() establishes various configuration variables used in the rest of the code.
- output_data() establishes where data is written to.
These are intended to be changed by the configurationplusfiles_runner.py module.
"""
##### import statements
import pandas as pd
import numpy as np
import itertools
import matplotlib.pyplot as plt
#%matplotlib inline
import welly
from welly import Well
import lasio
import glob
import pickle
import math
import os
##### import from other modules
##### Classes
class input_data:
"""
A class object that holds paths and other information related to input data such as log files location, top files, well information files, etc.
Parameters
----------
picks_file_path: str
A string for the file path to the file with all the pick names and depths.
picks_delimiter_str: str
The delimiter of the file that has all the picks.
path_to_logs_str: str
The path to the directory with all the well logs.
"""
def __init__(self, picks_file_path, picks_delimiter_str, path_to_logs_str):
#### Default initiation = ('../../../SPE_006_originalData/OilSandsDB/PICKS.TXT','\t','../../../SPE_006_originalData/OilSandsDB/Logs/*.LAS')
#### Only things that are mandatory on initiation are below
self.data_directory = "../data/Mannville_input_data/v0.0.3-alpha/mannville_demo_data/"
self.picks_file_path = (
picks_file_path
) #### example = '../../../SPE_006_originalData/OilSandsDB/PICKS.TXT'
self.picks_delimiter_str = picks_delimiter_str #### example = '\t'
self.picks_df = pd.read_csv(picks_file_path, delimiter=picks_delimiter_str)
self.picks_dic = self.data_directory + "OilSandsDB/PICKS.TXT"
self.picks_dic_file_path_delimiter = "\t"
self.logs_path_to_folder = (
path_to_logs_str
) #### example = '../../../SPE_006_originalData/OilSandsDB/Logs/*.LAS'
#### non-mandatory attributes, defaults should work for the example dataset. Can be changed with set functions below
self.wells_file_path = self.data_directory + "OilSandsDB/WELLS.TXT"
self.wells_file_path_delimiter = "\t"
self.gis_file_path = self.data_directory + "well_lat_lng.csv"
self.gis_file_path_delimiter = ","
self.gis_lat_col = "lat"
self.gis_long_col = "lng"
# wells_wTopsCuves_toLoad = 'WellNamesWithGivenTopsCurves_defaultFileName.pkl'
#### for logs
self.las_folder_path = self.data_directory + "OilSandsDB/Logs/"
self.well_format = ".LAS"
#### Technically optional but often used.
#### GIS file is mandatory if you want to use information from nearby wells or well's general location.
self.wells_df = None
self.gis_df = None
def load_wells_file(self):
""" load wells file into pandas dataframe """
self.wells_df = pd.read_csv(
self.wells_file_path, delimiter=self.wells_file_path_delimiter
)
return self.wells_df
def load_gis_file(self):
""" load wells file into pandas dataframe """
self.gis_df = pd.read_csv(
self.gis_file_path, delimiter=self.gis_file_path_delimiter
)
return self.gis_df
def set_wells_file_path(self, wells_file_path_str, wells_file_delimiter):
""" set wells file path as attribute of object and returns wells data frame using load_well_file. Can be txt, tsv, or csv"""
self.wells_file_path = wells_file_path_str
self.wells_file_path_delimiter = wells_file_delimiter
return self.load_wells_file()
def set_gis_file_path(self, gis_file_path_str, gis_file_path_delimiter):
""" set wells file path as attribute of object and returns wells data frame using load_well_file. Can be txt, tsv, or csv"""
self.gis_file_path = gis_file_path_str
self.gis_file_path_delimiter = gis_file_path_delimiter
return self.load_gis_file()
class configuration:
"""
A class to keep configuration variables you might change between runs. That is why it has a large number of attributes listed below.
Types of information information stored in here would mandetory curves or mandatory tops, column names, name of the top you're trying to predict, etc.
The object created by this class is used throughout Predictatops, so many modules reimport it.
Be careful to not change something in one module close your code, start up later working with the next module and except your changes to persis unless you saved them or wrote them into the configurationplusfiles_runner.py file.
Parameters
----------
none:none
None.
Attributes
----------
csv_of_well_names_wTopsCuves__name : str
csv_of_well_names_wTopsCuves__name
csv_of_well_names_wTopCurves__path : str
csv_of_well_names_wTopsCuves__name
must_have_curves_list : list
An array of strings that are curve names like ['ILD', 'NPHI', 'GR', 'DPHI', 'DEPT']
curve_windows_for_rolling_features : list
Array of integers like [5,7,11,21]
must_have_tops__list : list
An array of tops list that could be integers or strings like [13000,14000]
target_top : str
A string or integer like 1300
top_under_target : str
A string or interger that is a top name and is the name of a top under the top you want to predict such as 14000
top_name_col_in_picks_df : str
The top name as it appears in the picks dataframe
siteID_col_in_picks_df : str
The string for the siteID column in the picks dataframe like 'SitID'
UWI : str
The string for the UWI column like "UWI"
DEPTH_col_in_featureCreation : str
The string for the depth column like "DEPT"
HorID_name_col_in_picks_df : str
The string for the horizon ID column like "HorID"
quality_col_name_in_picks_df : str
The string for the quality of the pick column like "Quality"
picks_depth_col_in_picks_df : str
The string for the pick column name like 'Pick'
col_topTarget_Depth_predBy_NN1thick : str
The string for the top target depth predicted by nearest neighbor thickess like 'topTarget_Depth_predBy_NN1thick'
quality_items_to_skip__list : str
The array of the integers for the quality of wells to optionally skip as not good quality picks. An example is [-1,0]
test : str
Honestly forget what this is come back and find out but is should be "test0"
pick_class_str : str
String for the top taget pick prediction column like 'TopTarget_Pick_pred'
threshold_returnCurvesThatArePresentInThisManyWells : int
The integer for the number of wells a curve has to be present in to be kept for example 2000
max_numb_wells_to_load : int
Max number of wells to load out of all the wells in the directory with wells. This is used for when you're testing. Example is 1000000
split_traintest_percent : float
The percent in 0 to 1 terms for train vs. split. You give the percent to keep. example is 0.8
kdtree_leaf : int
Levels of kdtree? default is 2
kdtree_k : int
Integer for number of neighbors or K in k nearest neighbor code for finding nearby wells for each well. Default is 8
rebalanceClassZeroMultiplier : int
When rebalancing class zero. The number of instances of class zero is duplicated by this times. Default is 100
rebalanceClass95Multiplier : int
When rebalancing class zero. The number of instances of class 95 is duplicated by this times. Default is 40
NN1_topTarget_DEPTH : str
The string used in the column that holds the depth of the top in the first nearest neighbor training well. For example 'NN1_topTarget_DEPTH'
NN1_TopHelper_DEPTH : str
Helper depth for calculations for NN1_topTarget_DEPTH. Example is "NN1_TopHelper_DEPTH"
trainOrTest : str
String for column that holds string of either train or test. Example is 'trainOrTest'
colsToNotTurnToFloats : list
List of columsn to not turn to floads during feature creation. Examples is ['UWI', 'SitID', 'trainOrTest','Neighbors_Obj']
zonesAroundTops : object
An object of class lables and depths around top to create those classes in. Example is {"100":[0],"95":[-0.5,0.5],"60":[-5,0.5],"70":[0.5,5],"0":[]} #### NOTE: The code in createFeat_withinZoneOfKnownPick(df,config) function in features.py current ASSUMES only 5 zone labels
columns_to_not_trainOn_andNotCurves : list
List of strings for names of columns to not train on and are not curves. Example is ['FromBotWell','FromTopWel''rowsToEdge','lat','lng', 'SitID','TopHelper_HorID','TopTarget_HorID','TopHelper_DEPTH','diff_Top_Depth_Real_v_predBy_NN1thick','diff_TopTarget_DEPTH_v_rowDEPT','diff_TopHelper_DEPTH_v_rowDEPT','class_DistFrPick_TopHelper','NewWell','LastBitWell','TopWellDept','BotWellDept','WellThickness','rowsToEdge','closTopBotDist','closerToBotOrTop','Neighbors_Obj']
columns_to_not_trainOn_andAreCurves : list
list of strings for columns to not train on that are curves. Example is ['RHOB','SP','CALI','COND','DELT','DENS','DPHI:1','DPHI:2','DT','GR:1','GR:2','IL','ILD:1','ILD:2','ILM','LITH','LLD','LLS','PHID','PHIN','RESD','RT','SFL','SFLU','SN','SNP','Sp']
columns_to_use_as_labels : list
List of strings for columns to use as labels. Examples are= ['class_DistFrPick_TopTarget','UWI','trainOrTest','TopTarget_DEPTH']
"""
def __init__(self):
#### intermediate files and paths
self.csv_of_well_names_wTopsCuves__name = ""
self.csv_of_well_names_wTopCurves__path = "."
#### Choices
self.must_have_curves_list = [""] # ['ILD', 'NPHI', 'GR', 'DPHI', 'DEPT']
self.curve_windows_for_rolling_features = [5, 7, 11, 21]
self.must_have_tops__list = [13000, 14000]
self.target_top = 13000
self.top_under_target = 14000
#### Column string names
self.top_name_col_in_picks_df = ""
self.siteID_col_in_picks_df = "SitID"
self.UWI = "UWI"
self.DEPTH_col_in_featureCreation = "DEPT"
self.HorID_name_col_in_picks_df = "HorID"
self.quality_col_name_in_picks_df = "Quality"
self.picks_depth_col_in_picks_df = "Pick"
self.col_topTarget_Depth_predBy_NN1thick = "topTarget_Depth_predBy_NN1thick"
self.quality_items_to_skip__list = [-1, 0]
self.test = "test0"
self.pick_class_str = "TopTarget_Pick_pred"
self.threshold_returnCurvesThatArePresentInThisManyWells = 2000
self.max_numb_wells_to_load = 1000000
self.split_traintest_percent = 0.8
self.kdtree_leaf = 2
self.kdtree_k = 8
self.rebalanceClassZeroMultiplier = 100
self.rebalanceClass95Multiplier = 40
self.NN1_topTarget_DEPTH = "NN1_topTarget_DEPTH"
self.NN1_TopHelper_DEPTH = "NN1_TopHelper_DEPTH"
self.trainOrTest = "trainOrTest"
self.colsToNotTurnToFloats = ["UWI", "SitID", "trainOrTest", "Neighbors_Obj"]
self.zonesAroundTops = {
"100": [0],
"95": [-0.5, 0.5],
"60": [-5, 0.5],
"70": [0.5, 5],
"0": [],
} #### NOTE: The code in createFeat_withinZoneOfKnownPick(df,config) function in features.py current ASSUMES only 5 zone labels
self.columns_to_not_trainOn_andNotCurves = [
"FromBotWell",
"FromTopWel" "rowsToEdge",
"lat",
"lng",
"SitID",
"TopHelper_HorID",
"TopTarget_HorID",
"TopHelper_DEPTH",
"diff_Top_Depth_Real_v_predBy_NN1thick",
"diff_TopTarget_DEPTH_v_rowDEPT",
"diff_TopHelper_DEPTH_v_rowDEPT",
"class_DistFrPick_TopHelper",
"NewWell",
"LastBitWell",
"TopWellDept",
"BotWellDept",
"WellThickness",
"rowsToEdge",
"closTopBotDist",
"closerToBotOrTop",
"Neighbors_Obj",
]
self.columns_to_not_trainOn_andAreCurves = [
"RHOB",
"SP",
"CALI",
"COND",
"DELT",
"DENS",
"DPHI:1",
"DPHI:2",
"DT",
"GR:1",
"GR:2",
"IL",
"ILD:1",
"ILD:2",
"ILM",
"LITH",
"LLD",
"LLS",
"PHID",
"PHIN",
"RESD",
"RT",
"SFL",
"SFLU",
"SN",
"SNP",
"Sp",
]
| |
With QtWebEngine, this setting also controls
# other features with tracking capabilities similar to those of cookies;
# including IndexedDB, DOM storage, filesystem API, service workers, and
# AppCache. Note that with QtWebKit, only `all` and `never` are
# supported as per-domain values. Setting `no-3rdparty` or `no-
# unknown-3rdparty` per-domain on QtWebKit will have the same effect as
# `all`. If this setting is used with URL patterns, the pattern gets
# applied to the origin/first party URL of the page making the request,
# not the request URL. With QtWebEngine 5.15.0+, paths will be stripped
# from URLs, so URL patterns using paths will not match. With
# QtWebEngine 5.15.2+, subdomains are additionally stripped as well, so
# you will typically need to set this setting for `example.com` when the
# cookie is set on `somesubdomain.example.com` for it to work properly.
# To debug issues with this setting, start qutebrowser with `--debug
# --logfilter network --debug-flag log-cookies` which will show all
# cookies being set.
# Type: String
# Valid values:
# - all: Accept all cookies.
# - no-3rdparty: Accept cookies from the same origin only. This is known to break some sites, such as GMail.
# - no-unknown-3rdparty: Accept cookies from the same origin only, unless a cookie is already set for the domain. On QtWebEngine, this is the same as no-3rdparty.
# - never: Don't accept cookies at all.
config.set('content.cookies.accept', 'all', 'chrome-devtools://*')
# Which cookies to accept. With QtWebEngine, this setting also controls
# other features with tracking capabilities similar to those of cookies;
# including IndexedDB, DOM storage, filesystem API, service workers, and
# AppCache. Note that with QtWebKit, only `all` and `never` are
# supported as per-domain values. Setting `no-3rdparty` or `no-
# unknown-3rdparty` per-domain on QtWebKit will have the same effect as
# `all`. If this setting is used with URL patterns, the pattern gets
# applied to the origin/first party URL of the page making the request,
# not the request URL. With QtWebEngine 5.15.0+, paths will be stripped
# from URLs, so URL patterns using paths will not match. With
# QtWebEngine 5.15.2+, subdomains are additionally stripped as well, so
# you will typically need to set this setting for `example.com` when the
# cookie is set on `somesubdomain.example.com` for it to work properly.
# To debug issues with this setting, start qutebrowser with `--debug
# --logfilter network --debug-flag log-cookies` which will show all
# cookies being set.
# Type: String
# Valid values:
# - all: Accept all cookies.
# - no-3rdparty: Accept cookies from the same origin only. This is known to break some sites, such as GMail.
# - no-unknown-3rdparty: Accept cookies from the same origin only, unless a cookie is already set for the domain. On QtWebEngine, this is the same as no-3rdparty.
# - never: Don't accept cookies at all.
config.set('content.cookies.accept', 'all', 'devtools://*')
# Which cookies to accept. With QtWebEngine, this setting also controls
# other features with tracking capabilities similar to those of cookies;
# including IndexedDB, DOM storage, filesystem API, service workers, and
# AppCache. Note that with QtWebKit, only `all` and `never` are
# supported as per-domain values. Setting `no-3rdparty` or `no-
# unknown-3rdparty` per-domain on QtWebKit will have the same effect as
# `all`. If this setting is used with URL patterns, the pattern gets
# applied to the origin/first party URL of the page making the request,
# not the request URL. With QtWebEngine 5.15.0+, paths will be stripped
# from URLs, so URL patterns using paths will not match. With
# QtWebEngine 5.15.2+, subdomains are additionally stripped as well, so
# you will typically need to set this setting for `example.com` when the
# cookie is set on `somesubdomain.example.com` for it to work properly.
# To debug issues with this setting, start qutebrowser with `--debug
# --logfilter network --debug-flag log-cookies` which will show all
# cookies being set.
# Type: String
# Valid values:
# - all: Accept all cookies.
# - no-3rdparty: Accept cookies from the same origin only. This is known to break some sites, such as GMail.
# - no-unknown-3rdparty: Accept cookies from the same origin only, unless a cookie is already set for the domain. On QtWebEngine, this is the same as no-3rdparty.
# - never: Don't accept cookies at all.
c.content.cookies.accept = 'all'
# Store cookies.
# Type: Bool
c.content.cookies.store = True
# Default encoding to use for websites. The encoding must be a string
# describing an encoding such as _utf-8_, _iso-8859-1_, etc.
# Type: String
c.content.default_encoding = 'iso-8859-1'
# Limit fullscreen to the browser window (does not expand to fill the
# screen).
# Type: Bool
c.content.fullscreen.window = False
# Set fullscreen notification overlay timeout in milliseconds. If set to
# 0, no overlay will be displayed.
# Type: Int
c.content.fullscreen.overlay_timeout = 3000
# Allow websites to share screen content.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
c.content.desktop_capture = 'ask'
# Allow websites to share screen content.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
config.set('content.desktop_capture', True, 'https://meet.google.com')
# Try to pre-fetch DNS entries to speed up browsing.
# Type: Bool
c.content.dns_prefetch = True
# Allow websites to request geolocations.
# Type: BoolAsk
# Valid values:
# - true
# - false
# - ask
c.content.geolocation = 'ask'
# Value to send in the `Accept-Language` header. Note that the value
# read from JavaScript is always the global value.
# Type: String
config.set('content.headers.accept_language', '', 'https://matchmaker.krunker.io/*')
# Value to send in the `Accept-Language` header. Note that the value
# read from JavaScript is always the global value.
# Type: String
c.content.headers.accept_language = 'en-US,en;q=0.9'
# Custom headers for qutebrowser HTTP requests.
# Type: Dict
c.content.headers.custom = {}
# Value to send in the `DNT` header. When this is set to true,
# qutebrowser asks websites to not track your identity. If set to null,
# the DNT header is not sent at all.
# Type: Bool
c.content.headers.do_not_track = True
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}) AppleWebKit/{webkit_version} (KHTML, like Gecko) {upstream_browser_key}/{upstream_browser_version} Safari/{webkit_version}', 'https://web.whatsapp.com/')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}) AppleWebKit/{webkit_version} (KHTML, like Gecko) {upstream_browser_key}/{upstream_browser_version} Safari/{webkit_version} Edg/{upstream_browser_version}', 'https://accounts.google.com/*')
# User agent to send. The following placeholders are defined: *
# `{os_info}`: Something like "X11; Linux x86_64". * `{webkit_version}`:
# The underlying WebKit version (set to a fixed value with
# QtWebEngine). * `{qt_key}`: "Qt" for QtWebKit, "QtWebEngine" for
# QtWebEngine. * `{qt_version}`: The underlying Qt version. *
# `{upstream_browser_key}`: "Version" for QtWebKit, "Chrome" for
# QtWebEngine. * `{upstream_browser_version}`: The corresponding
# Safari/Chrome version. * `{qutebrowser_version}`: The currently
# running qutebrowser version. The default value is equal to the
# unchanged user agent of QtWebKit/QtWebEngine. Note that the value
# read from JavaScript is always the global value. With QtWebEngine
# between 5.12 and 5.14 (inclusive), changing the value exposed to
# JavaScript requires a restart.
# Type: FormatString
config.set('content.headers.user_agent', 'Mozilla/5.0 ({os_info}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99 Safari/537.36', 'https://*.slack.com/*')
# Enable the ad/host blocker
# Type: Bool
c.content.blocking.enabled = True
# List of URLs to host blocklists for the host blocker. Only used when
# the simple host-blocker is used (see `content.blocking.method`). The
# file can be in one of the following formats: - An `/etc/hosts`-like
# file - One host per line - A zip-file of any of the above, with either
# only | |
<filename>cage/core.py<gh_stars>0
# Encoding: utf-8
import json
import math
import numpy as np
import pymatgen as pmg
import cage.utils as utils
import pymatgen.symmetry.analyzer as syman
from itertools import combinations
from monty.io import zopen
from monty.json import MSONable
from pymatgen.core.structure import Molecule
from pymatgen.core.structure import SiteCollection
from pymatgen.core.operations import SymmOp
"""
Core tools of the cage package. Defines the Cage, OccupiedCage and Facet class.
"""
__author__ = "<NAME>"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "alpha"
__date__ = "14 JUN 2017"
SYMMETRY_TOLERANCE = 1e-2
# This is a tolerance value to determine the symmetry operations of the Cage.
# It is also used to determine which facets are equivalent. The standard value
# of 1E-2 is usually pretty good. In case the right non-equivalent facets are
# not found, it might be worth trying to tweaking this value.
ANGLE_TOLERANCE = math.pi / 20
# This value is important when determining whether or not a new site is a part
# of the facet. In principle, the site should be in the plane of the facet,
# i.e. the angle between the line connecting the center of the facet and the
# site and the normal of the Facet should be pi/2. This parameters allows a
# deviation of the angle up to ANGLE_TOLERANCE
class Cage(Molecule):
"""
A Cage is a pymatgen.Molecule-based object for molecules shaped similar
to fullerenes.
"""
def __init__(self, species, coords, charge=0, spin_multiplicity=None,
validate_proximity=False, site_properties=None):
"""
Create a Cage instance. The Cage molecule's geometric center is
automatically centered on the origin.
Args:
species (List of pymatgen.Specie): List of atomic species. Possible
kinds of input include a list of dict of elements/species and
occupancies, a List of elements/specie specified as actual
Element/Specie, Strings ("Fe", "Fe2+") or atomic numbers
(1,56).
coords (List of (3,) numpy.ndarray): List of cartesian coordinates
of each species.
charge (float): Charge for the molecule. Defaults to 0.
validate_proximity (bool): Whether to check if there are sites
that are less than 1 Ang apart. Defaults to False.
site_properties (dict): Properties associated with the sites as
a dict of sequences, e.g., {"magmom":[5,5,5,5]}. The
sequences have to be the same length as the atomic species
and fractional_coords. Defaults to None for no properties.
Returns:
(*cage.Cage*)
"""
super(Cage, self).__init__(species, coords, charge, spin_multiplicity,
validate_proximity, site_properties)
self.center()
self._facets = None
self._pointgroup = None
self._symmops = None
self._facet_dict = None
@property
def facets(self):
"""
Surface Facets of the Cage. Note that in case the surface facets have
not been set up using *find.surface.facets()*, the property is equal to
*None*.
Returns:
(List of Facets): The surface facets of the Cage, as set up using
find_surface_facets()
"""
return self._facets
@property
def pointgroup(self):
"""
The Schoenflies PointGroup of the Cage molecule.
Returns:
(*pymatgen.symmetry.analyzer.PointGroup*)
"""
if not self._pointgroup:
self._pointgroup = syman.PointGroupAnalyzer(self).get_pointgroup()
return self._pointgroup
@property
def symmops(self):
"""
The symmetry operations of the Cage.
Returns:
(*List of pymatgen.Symmop*)
"""
if not self._symmops:
# Set up the point group analyzer
pgan = syman.PointGroupAnalyzer(self)
# Find the full set of symmetry operations
self._symmops = syman.generate_full_symmops(pgan.symmops,
SYMMETRY_TOLERANCE)
return self._symmops
@property
def anion_center(self):
anion_coords = [site.coords for site in self.sites
if site.specie not in OccupiedCage.CATIONS]
return sum(anion_coords) / len(anion_coords)
@classmethod
def from_poscar(cls, filename):
"""
Imports a Cage from a VASP POSCAR file.
Args:
filename (string): Filename of the POSCAR file.
Returns:
(*cage.Cage*)
"""
# Import the structure from the POSCAR file
structure = pmg.Structure.from_file(filename)
# Generate the molecule object from the structure sites
cage = cls(structure.species, structure.cart_coords)
return cage
@classmethod
def from_molecule(cls, mol):
"""
Initializes a Cage from a Molecule.
Args:
mol (pymatgen.Molecule): The molecule from which to initialize the
cage.
Returns:
(*cage.Cage*)
"""
assert type(mol) is Molecule or type(mol) is Cage
return cls(species=mol.species, coords=mol.cart_coords,
charge=mol.charge, spin_multiplicity=mol.spin_multiplicity,
site_properties=mol.site_properties)
def to_poscar(self, filename='POSCAR'):
"""
Writes the Cage to a POSCAR file.
"""
pass # TODO
def copy(self):
"""
Convenience method to get a copy of the Cage. Overwritten from the
Molecule class to conserve the charge of the molecule.
Returns:
cage.core.Cage
"""
copy = super(Cage, self).copy()
copy.set_charge_and_spin(charge=self.charge)
return copy
def center(self, point=None):
"""
Center the Cage around a point by updating the sites, i.e. find the
coordinates for the sites so that the geometric center is on the point
provided. In case no point is provided, the molecule is centered around
the origin.
Note: Running this method will reset the facets and symmetry
information to None.
Args:
point ((3,) numpy.ndarray): Point around which to center the
molecule.
"""
center = sum([site.coords for site in self.sites]) / len(self.sites)
if point is not None:
center -= point
# Find the new coordinates
new_coords = np.array(self.cart_coords) - center
# Update the sites
sites = []
for i in range(len(self.species)):
prop = None
if self.site_properties:
prop = {k: v[i] for k, v in self.site_properties.items()}
sites.append(pmg.Site(self.species[i], new_coords[i],
properties=prop))
self._sites = sites
self._facets = None
self._symmops = None
self._pointgroup = None
self._facet_dict = None
def redefine_origin(self, origin):
"""
Change the coordinates of the Cage, in order to redefine the origin.
Note: Running this method will reset the facets and symmetry
information to None.
Args:
origin ((3,) numpy.ndarray): Origin coordinates.
"""
# Find the new coordinates
new_coords = np.array(self.cart_coords) - origin
# Update the sites
sites = []
for i in range(len(self.species)):
prop = None
if self.site_properties:
prop = {k: v[i] for k, v in self.site_properties.items()}
sites.append(pmg.Site(self.species[i], new_coords[i],
properties=prop))
self._sites = sites
self._facets = None
self._symmops = None
self._pointgroup = None
self._facet_dict = None
def insert(self, index, species, coords, validate_proximity=True,
properties=None):
"""
Overwrite the insert method of the Molecule class, in order to
reset the facets, symmetry operations and point group after the site
has been inserted.
Note: Running this method will reset the facets and symmetry
information to None.
Args:
index (int): Index to insert site.
species (pymatgen.Specie): Species of inserted site.
coords ((3,) numpy.ndarray): Coordinates of inserted site.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to True.
properties (dict): A dictionary of properties for the Site.
"""
super(Cage, self).insert(index, species, coords, validate_proximity,
properties)
self._facets = None
self._symmops = None
self._pointgroup = None
self._facet_dict = None
def append(self, species, coords, validate_proximity=True,
properties=None):
"""
Overwrite the append method of the Molecule class, in order to
reset the facets, symmetry operations and point group after the site
has been appended.
Note: Running this method will reset the facets and symmetry
information to None.
Args:
species (pymatgen.Specie): Species of inserted site.
coords ((3,) numpy.ndarray): Coordinates of inserted site.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to True.
properties (dict): A dictionary of properties for the Site.
"""
super(Cage, self).append(species, coords, validate_proximity,
properties)
self._facets = None
self._symmops = None
self._pointgroup = None
self._facet_dict = None
def find_surface_facets(self, ignore=()):
"""
Find all the surface facets of the Cage object. A surface facet is
defined as a facet for which all atoms of non-ignored species are on
one side of the surface defined by the facet.
Args:
ignore (List/Tuple of str/Element): The elements to ignore for the
surface facet determination. Can be either a tuple or list of
Elements or strings describing those elements
"""
# Check if the content of ignore contains strings
if any([type(item) is str for item in ignore]):
# If so, turn them into elements
ignore = tuple([pmg.Element(item) for item in ignore])
# Find all the sites which should not be ignored
valid_sites = [site for site in self.sites if site.specie not in ignore]
# Find all of the Facets from combinations of three valid Sites
all_facets = [Facet(list(combination)) for combination
in combinations(valid_sites, 3)]
# Flip the normal of the facets in case it points to the center of mass
# of the Cage
for facet in all_facets:
if facet.angle_to_normal(self.center_of_mass) < math.pi / 2:
facet.flip_normal()
# Find all the facets that are "on the surface"
facets_surf = []
for facet in all_facets:
# If all the angles are larger than pi/2, it's a surface site
all_angles_smaller = True
# Check the other sites in the molecule
| |
<filename>code/proteomics_preprocessing.py
'''Preparing datasets for language modelling, classification and sequence annotation
'''
import fire
# This needs to be called in order to load local implemented submodules
import os
import sys
#module_path = os.path.abspath(os.path.join('../'))
#if module_path not in sys.path:
# sys.path.append(module_path)
from tqdm import tqdm as tqdm
from utils.proteomics_utils import *
from utils.dataset_utils import *
#for log header
import datetime
import subprocess
import itertools
import ast
from pandas import concat
#for kinase dicts
from collections import defaultdict
######################################################################################################
#DATA PATHS
######################################################################################################
#path to the data directory
data_path = Path('../data')
#SEQUENCE DATA
#ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.xml.gz (this is swissprot not the whole uniprot)
path_sprot=data_path/'uniprot_sprot_2017_03.xml'
#CLUSTER DATA (default path)
#ftp://ftp.ebi.ac.uk/pub/databases/uniprot/uniref/uniref50/uniref50.xml.gz unzipped 100GB file (whole uniprot not just swissprot)
path_uniref = data_path/"uniref50_2017_03.xml"
#path to the data directory also uploaded to git
git_data_path = Path('../git_data')
#path to the temporary directory for dataframes
tmp_data_path = Path("../tmp_data")#should be moved to ./data_tmp or similar
tmp_data_path.mkdir(exist_ok=True)
######################################################################################################
#AUX FUNCTIONS
######################################################################################################
def load_uniprot(source=path_sprot,parse_features=[]):
'''parse and load uniprot xml
parse_features: list of features to be parsed (modified residue for PTM etc.)
'''
pf="_"+"_".join([p.replace(" ","_") for p in parse_features])
path_pkl = tmp_data_path/(source.stem+(pf if len(pf)>1 else "")+".pkl")
if path_pkl.exists():
print("Loading uniprot pkl from",path_pkl)
return pd.read_pickle(path_pkl)
else:
print(path_pkl, "not found. Parsing uniprot xml...")
df=parse_uniprot_xml(source,parse_features=parse_features)
df.to_pickle(path_pkl)
return df
def load_uniref(source=path_uniref,source_uniprot=path_sprot):
'''parse and load uniref xml
'''
path_pkl = tmp_data_path/(source.stem+("_"+source_uniprot.stem if source_uniprot is not None else "full")+".pkl")
if path_pkl.exists():
print("Loading uniref pkl from",path_pkl)
return pd.read_pickle(path_pkl)
else:
print(path_pkl,"not found. Parsing uniref...")
df_selection = None if source_uniprot is None else load_uniprot(source_uniprot)
df = parse_uniref(source,max_entries=0,parse_sequence=False,df_selection=df_selection)
df.to_pickle(path_pkl)
return df
def load_cdhit(df, cluster_type, dataset):
'''loads/creates cluster dataframe for a given sequence dataframe (saves pickled result for later runs)
cluster_type: cdhit05 (uniref-like cdhit down to threshold 0.5 in three stages), cdhit04 (direct cdhit down to threshold 0.4) and recalculate_cdhit05 and recalculate_cdhit04 (which do not make use of cached clusters)
dataset: e.g. sprot determines the name of the pkl file
'''
path_pkl = tmp_data_path/(cluster_type+"_"+dataset+".pkl")
if path_pkl.exists() and not(cluster_type == "recalculate_cdhit04" or cluster_type == "recalculate_cdhit05"):
print("Loading cdhit pkl from",path_pkl)
return pd.read_pickle(path_pkl)
else:
print(path_pkl,"not found. Running cdhit...")
if(cluster_type=="cdhit05" or cluster_type=="recalculate_cdhit05"):#uniref-like cdhit 0.5 in three stages
threshold=[1.0,0.9,0.5]
alignment_coverage=[0.0,0.9,0.8]
else:#direct cdhit clustering to 0.4
threshold=[0.4]
alignment_coverage=[0.0]
df=clusters_df_from_sequence_df(df[["sequence"]],threshold=threshold,alignment_coverage=alignment_coverage)
df.to_pickle(path_pkl)
return df
def write_log_header(path, kwargs, filename="logfile.log",append=True):
path.mkdir(exist_ok=True)
(path/"models").mkdir(exist_ok=True)
if "self" in kwargs:
del kwargs["self"]
print("======================================\nCommand:"," ".join(sys.argv))
time = datetime.datetime.now()
print("started at ",time)
commit = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).strip()
print("Commit:",commit)
print("\nArguments:")
for k in sorted(kwargs.keys()):
print(k,":",kwargs[k])
print("")
filepath=path/filename
with filepath.open("w" if append is False else "a", encoding ="utf-8") as f:
f.write("\n\nCommand "+" ".join(sys.argv)+"\n")
f.write("started at "+str(time)+"\n")
f.write("Commit "+str(commit)+"\n")
f.write("\nArguments:\n")
for k in sorted(kwargs.keys()):
f.write(k+": "+str(kwargs[k])+"\n")
f.write("\n")
def write_log(path, text, filename="logfile.log",append=True):
filepath=path/filename
with filepath.open("w" if append is False else "a", encoding ="utf-8") as f:
f.write(str(text)+"\n")
######################################################################################################
class Preprocess(object):
"""Preprocessing class (implemented as class to allow different function calls from CLI)."""
#############################
# GENERAL
#############################
def _preprocess_default(self,path,df,df_cluster,pad_idx=0,sequence_len_min_aas=0,sequence_len_max_aas=0,sequence_len_max_tokens=0,drop_fragments=False,minproteinexistence=0,exclude_aas=[],nfolds=None,sampling_ratio=[0.8,0.1,0.1],ignore_pretrained_clusters=False,save_prev_ids=True,regression=False,sequence_output=False,bpe=False,bpe_vocab_size=100, sampling_method_train=1, sampling_method_valtest=3,subsampling_ratio_train=1.0,randomize=False,random_seed=42,mask_idx=1,tok_itos_in=[],label_itos_in=[],pretrained_path=None, df_redundancy=None, df_cluster_redundancy=None):
'''internal routine for lm preprocessing
path: Pathlib model path
df_cluster: clusters dataframe
pad_idx: index of the padding token
sequence_len_min_aas: only consider sequences of at least sequence_len_min_aas aas length 0 for no restriction
sequence_len_max_aas: only consider sequences up to sequence_len_max_aas aas length 0 for no restriction
sequence_len_max_tokens: only consider sequences up to sequence_len_max_tokens tokens length (after tokenization) 0 for no restriction
drop_fragments: drop proteins with fragment marker (requires fragment column in df)
minproteinexistence: drop proteins with proteinexistence < value (requires proteinexistence column in df)
exclude_aas: drop sequences that contain aas in the exclude list e.g. exclude_aas=["B","J","X","Z"] for sequences with only canonical AAs ( B = D or N, J = I or L, X = unknown, Z = E or Q)
nfolds: if None perform a single split according to sampling_ratio else nfold CV split
sampling_ratio: sampling ratios for train/val/test
regression: regression task
sequence_output: output/label is a sequence
ignore_pretrained_clusters: do not take into account existing clusters from the previous LM step during train-test-split
bpe: apply BPE
bpe_vocab_size: vocabulary size (including original tokens) for BPE
sampling_method: sampling method for train test split as defined in dataset_utils
subsampling_ratio_train: artificially reduce train (clusters) to specified ratio (1.0 for full dataset)
mask_idx: index of the mask token (for BERT masked LM training) None for none
tok_itos_in: allows to pass tok_itos_in (used for trembl processing) will use pretrained tok_itos if an empty list is passed
df_redundancy: optional df with same structure as df with additional redundant sequences
df_cluster_redundancy: optional cluster df including redundant and original sequences
'''
#filter by fragments
if(drop_fragments):
if("fragment" in df.columns):
df = df[df.fragment == 0]
if(df_redundancy is not None):
df_redundancy = df_redundancy[df_redundancy.fragment == 0]
else:
print("Warning: could not drop fragments as fragment column is not available")
#filter by proteinexistence
if(minproteinexistence>0):
if("proteinexistence" in df.columns):
df = df[df.proteinexistence >= minproteinexistence]
if(df_redundancy is not None):
df_redundancy = df_redundancy[df_redundancy.proteinexistence >= minproteinexistence]
else:
print("Warning: could not filter by protein existence as proteinexistence column is not available")
#filter by non-canonical AAs
if(len(exclude_aas)>0):
df = filter_aas(df, exclude_aas)
if(df_redundancy is not None):
df_redundancy = filter_aas(df_redundancy, exclude_aas)
#filter by aa length
if(sequence_len_min_aas>0):
df = df[df.sequence.apply(len)>sequence_len_min_aas]
if(df_redundancy is not None):
df_redundancy = df_redundancy[df_redundancy.sequence.apply(len)>sequence_len_min_aas]
if(sequence_len_max_aas>0):
df = df[df.sequence.apply(len)<sequence_len_max_aas]
if(df_redundancy is not None):
df_redundancy = df_redundancy[df_redundancy.sequence.apply(len)<sequence_len_max_aas]
if(bpe):
spt=sentencepiece_tokenizer(path if pretrained_path is None else pretrained_path,df,vocab_size=bpe_vocab_size)
if(len(tok_itos_in)==0 and pretrained_path is not None):
tok_itos_in = np.load(pretrained_path/"tok_itos.npy")
if(pretrained_path is not None and ignore_pretrained_clusters is False):
if(nfolds is None):
train_ids_prev = np.load(pretrained_path/"train_IDs_prev.npy")
val_ids_prev = np.load(pretrained_path/"val_IDs_prev.npy")
test_ids_prev = np.load(pretrained_path/"test_IDs_prev.npy")
else:
cluster_ids_prev = np.load(pretrained_path/"cluster_IDs_CV_prev.npy")
else:
if(nfolds is None):
train_ids_prev = []
val_ids_prev = []
test_ids_prev =[]
else:
cluster_ids_prev = []
prepare_dataset(path,df,tokenizer=(spt.tokenize if bpe is True else list_tokenizer),pad_idx=pad_idx,sequence_len_max_tokens=sequence_len_max_tokens,mask_idx=mask_idx,tok_itos_in=tok_itos_in,label_itos_in=label_itos_in,df_seq_redundant=df_redundancy,regression=regression,sequence_output=sequence_output)
ids_current_all = np.load(path/"ID.npy") #filtered by sequence length
ids_current = np.intersect1d(list(df.index),ids_current_all) #ids_current_all might contain more sequences
ids_current_redundancy = [] if df_cluster_redundancy is None else np.intersect1d(list(df_redundancy.index),ids_current_all)
if(nfolds is None):
train_test_split(path,ids_current,ids_current_all,df_cluster,train_ids_prev=train_ids_prev,val_ids_prev=val_ids_prev,test_ids_prev=test_ids_prev,sampling_ratio=sampling_ratio,sampling_method_train=sampling_method_train,sampling_method_valtest=sampling_method_valtest,subsampling_ratio_train=subsampling_ratio_train,randomize=randomize,random_seed=random_seed,save_prev_ids=save_prev_ids,ids_current_redundancy=ids_current_redundancy,df_cluster_redundancy=df_cluster_redundancy)
else:
cv_split(path,ids_current,ids_current_all,df_cluster,clusters_prev=cluster_ids_prev,nfolds=nfolds, sampling_method_train=sampling_method_train, sampling_method_valtest=sampling_method_valtest, randomize=randomize, random_seed=random_seed, save_prev_ids=save_prev_ids)
#############################
# LM
#############################
def lm_sprot(self, source_sprot=path_sprot,source_uniref=path_uniref,only_human_proteome=False,drop_fragments=False,minproteinexistence=0,exclude_aas=[],working_folder="./lm_sprot",pretrained_folder="",pad_idx=0,sequence_len_min_aas=0,sequence_len_max_aas=0,sequence_len_max_tokens=0,nfolds=None,sampling_ratio=[0.9,0.05,0.05],cluster_type="uniref",ignore_clusters=False,bpe=False,bpe_vocab_size=100,sampling_method_train=1, sampling_method_valtest=3,subsampling_ratio_train=1.0,randomize=False,random_seed=42,mask_idx=1):
'''prepare sprot LM data
only_human_proteome: filter only human proteome
drop_fragments: drop proteins marked as fragments
minproteinexistence: drop proteins with protein existence smaller than minproteinexistence
working_folder: path of the data folder to be created (as string)
pretrained_folder: path to pretrained folder (as string; empty string for none)
pad_idx: index of the padding token
sequence_len_max_tokens: only consider sequences up to sequence_len_max_tokens tokens length (after tokenization) 0 for no restriction
nfolds: number of CV splits; None for single split
sampling_ratio: sampling ratios for train/val/test
ignore_clusters: do not use cluster information for train/test split
cluster_type: source of clustering information (uniref, cdhit05: cdhit threshold 0.5 similar to uniref procedure, cdhit04: cdhit with threshold 0.4)
bpe: apply BPE
bpe_vocab_size: vocabulary size (including original tokens) for BPE
sampling_method: sampling method for train test split as defined in dataset_utils
subsampling_ratio_train: portion of train clusters used
pick_representative_for_val_test: just select a single representative per cluster for validation and test set
mask_idx: index of the mask token (for BERT masked LM training) None for none
'''
print("Preparing sprot LM")
LM_PATH=Path(working_folder)
write_log_header(LM_PATH,locals())
source_sprot = Path(source_sprot)
df = load_uniprot(source=source_sprot)
if(only_human_proteome):
df = filter_human_proteome(df)
print("Extracted {} human proteines".format(df.shape[0]))
if(ignore_clusters is False):
if(cluster_type[:6]=="uniref"):
df_cluster = load_uniref(source_uniref,source_sprot)
else:
df_cluster = load_cdhit(df,cluster_type,source_sprot.stem)
else:
df_cluster = None
self._preprocess_default(path=LM_PATH,df=df,df_cluster=df_cluster,pad_idx=pad_idx,sequence_len_min_aas=sequence_len_min_aas,sequence_len_max_aas=sequence_len_max_aas,sequence_len_max_tokens=sequence_len_max_tokens,drop_fragments=drop_fragments,minproteinexistence=minproteinexistence,exclude_aas=exclude_aas,nfolds=nfolds,sampling_ratio=sampling_ratio,bpe=bpe,bpe_vocab_size=bpe_vocab_size,sampling_method_train=sampling_method_train,sampling_method_valtest=sampling_method_valtest,subsampling_ratio_train=subsampling_ratio_train,randomize=randomize,random_seed=random_seed,mask_idx=mask_idx,pretrained_path=Path(pretrained_folder) if pretrained_folder !="" else None)
def lm_netchop(self,working_folder="./lm_netchop",pretrained_folder="",
existing_netchop_peptides=None, netchop_path="netchop", protein_fasta_file="./proteins.fasta",
pad_idx=0,sequence_len_min_aas=0,sequence_len_max_aas=0,sequence_len_max_tokens=0,exclude_aas=[],nfolds=None,sampling_ratio=[0.9,0.05,0.05],
netchop_min_length=8, netchop_max_length=20, netchop_repeats=30,
cluster_type="cdhit05",ignore_clusters=True,bpe=False,bpe_vocab_size=100,
sampling_method_train=1, sampling_method_valtest=3,randomize=False,random_seed=42,mask_idx=1):
'''
Prepare netchop digested sprot LM data. Starts netchop as a suprocess and slices proteins from fasta file. Tokanizes sequences and performs train-val-test split.
working_folder: path of the data folder to be created (as string)
pretrained_folder: path to pretrained folder (as string; empty string for none)
pad_idx: index of the padding token
sequence_len_max_tokens: only consider sequences up to sequence_len_max_tokens tokens length (after tokenization) 0 for no restriction
netchop_path: e.g. ./netchop to call netchop in present directory (the default value requires to place symlink to netchop tcsh in netchop3.1 into searchpath e.g. ~/bin) BE CAREFUL NETCHOP TMP PATH MAY NOT BE TOO LONG
protein_fasta_file: fasta file with proteins
existing_netchop_peptides: None if not available, filename if netchop digested peptides are available as fasta file
netchop_min_length: minimum length of netchop slices sequence, shorter sequences are discarded
netchop_max_length: maximum length of netchop slices sequence, longer sequences are discarded
netchop_repeats: numbers of iterations stochastic netchop slicing is applied to the whole set of proteins
nfolds: number of CV splits; None for single | |
<filename>otter/test/rest/test_application.py
# encoding: utf-8
"""
Tests for :mod:`otter.rest.application`
"""
import json
import mock
from twisted.internet.defer import succeed
from twisted.trial.unittest import TestCase
from otter.rest.application import Otter
from otter.rest.otterapp import OtterApp
from otter.rest.decorators import with_transaction_id, log_arguments
from otter.test.rest.request import RequestTestMixin, RestAPITestMixin
from otter.test.utils import patch
from otter.util.http import (get_autoscale_links, transaction_id, get_collection_links,
get_groups_links, get_policies_links, get_webhooks_links,
next_marker_by_offset)
from otter.util.config import set_config_data
class LinkGenerationTestCase(TestCase):
"""
Tests for generating autoscale links
"""
def setUp(self):
"""
Set a blank root URL
"""
self.base_url_patcher = mock.patch(
"otter.util.http.get_url_root", return_value="")
self.base_url_patcher.start()
def tearDown(self):
"""
Undo blanking the root URL
"""
self.base_url_patcher.stop()
def _expected_json(self, url):
return [
{
'rel': 'self',
'href': url
}
]
def test_get_only_groups_link(self):
"""
If only the tenant ID is passed, and the rest of the arguments are
blank, then the returned base link is /v<api>/<tenant>/groups/
"""
self.assertEqual(
get_autoscale_links('11111', api_version='3', format=None),
'/v3/11111/groups/')
expected_url = '/v1.0/11111/groups/'
# test default API
self.assertEqual(get_autoscale_links('11111', format=None),
expected_url)
# test JSON formatting
self.assertEqual(get_autoscale_links('11111'),
self._expected_json(expected_url))
def test_get_only_groups_link_for_varying_other_args(self):
"""
So long as the group ID is not a valid number, we still get the groups
link /v<api>/<tenant>/groups/
"""
equivalents = [
get_autoscale_links('11111', group_id='', format=None),
get_autoscale_links('11111', policy_id='5', format=None),
get_autoscale_links('11111', policy_id='', format=None)
]
for equivalent in equivalents:
self.assertEqual(equivalent, '/v1.0/11111/groups/')
def test_get_tenant_id_and_group_id(self):
"""
If only the tenant ID and group ID are passed, and the rest of the
arguments are blank, then the returned base link is
/v<api>/<tenant>/groups/<group>
"""
self.assertEqual(
get_autoscale_links('11111', group_id='1', api_version='3',
format=None),
'/v3/11111/groups/1/')
expected_url = '/v1.0/11111/groups/1/'
# test default API
self.assertEqual(
get_autoscale_links('11111', group_id='1', format=None),
expected_url)
# test JSON formatting
self.assertEqual(get_autoscale_links('11111', group_id='1'),
self._expected_json(expected_url))
def test_get_groups_and_group_id_link_for_varying_other_args(self):
"""
So long as the policy ID is None, we still get the groups
link /v<api>/<tenant>/groups/<group_id>
"""
equivalents = [
get_autoscale_links('11111', group_id='1', webhook_id='1',
format=None),
get_autoscale_links('11111', group_id='1', webhook_id='',
format=None),
]
for equivalent in equivalents:
self.assertEqual(equivalent, '/v1.0/11111/groups/1/')
def test_get_tenant_id_and_group_id_and_blank_policy_id(self):
"""
If the tenant ID, the group ID, and an empty policy ID (not None) are
passed, the returned based link is
/v<api>/<tenant>/groups/<group>/policies
"""
self.assertEqual(
get_autoscale_links('11111', group_id='1', policy_id="",
api_version='3', format=None),
'/v3/11111/groups/1/policies/')
expected_url = '/v1.0/11111/groups/1/policies/'
# test default API
self.assertEqual(
get_autoscale_links('11111', group_id='1', policy_id="",
format=None),
expected_url)
# test JSON formatting
self.assertEqual(
get_autoscale_links('11111', group_id='1', policy_id=""),
self._expected_json(expected_url))
def test_get_tenant_id_and_group_id_and_policy_id(self):
"""
If the tenant ID, the group ID, and a policy ID (not blank) are
passed, the returned based link is
/v<api>/<tenant>/groups/<group>/policies/<policy>
"""
self.assertEqual(
get_autoscale_links('11111', group_id='1', policy_id="5",
api_version='3', format=None),
'/v3/11111/groups/1/policies/5/')
expected_url = '/v1.0/11111/groups/1/policies/5/'
# test default API
self.assertEqual(
get_autoscale_links('11111', group_id='1', policy_id="5",
format=None),
expected_url)
# test JSON formatting
self.assertEqual(
get_autoscale_links('11111', group_id='1', policy_id="5"),
self._expected_json(expected_url))
def test_get_tenant_group_policy_ids_and_blank_webhook_id(self):
"""
If the tenant ID, the group ID, the policy ID, and an empty wbehook ID
(not None) are passed, the returned based link is
/v<api>/<tenant>/groups/<group>/policies/<policy>/webhooks
"""
self.assertEqual(
get_autoscale_links('11111', group_id='1', policy_id="2",
webhook_id="", api_version='3', format=None),
'/v3/11111/groups/1/policies/2/webhooks/')
expected_url = '/v1.0/11111/groups/1/policies/2/webhooks/'
# test default API
self.assertEqual(
get_autoscale_links('11111', group_id='1', policy_id="2",
webhook_id="", format=None),
expected_url)
# test JSON formatting
self.assertEqual(
get_autoscale_links('11111', group_id='1', policy_id="2",
webhook_id=""),
self._expected_json(expected_url))
def test_get_tenant_group_policy_and_webhook_id(self):
"""
If the tenant ID, the group ID, the policy ID, and a webhook ID
(not blank) are passed, the returned based link is
/v<api>/<tenant>/groups/<group>/policies/<policy>/webhooks/<webhook>
"""
self.assertEqual(
get_autoscale_links('11111', group_id='1', policy_id="2",
webhook_id="3", api_version='3', format=None),
'/v3/11111/groups/1/policies/2/webhooks/3/')
expected_url = '/v1.0/11111/groups/1/policies/2/webhooks/3/'
# test default API
self.assertEqual(
get_autoscale_links('11111', group_id='1', policy_id="2",
webhook_id="3", format=None),
expected_url)
# test JSON formatting
self.assertEqual(
get_autoscale_links('11111', group_id='1', policy_id="2",
webhook_id="3"),
self._expected_json(expected_url))
def test_capability_url_included_with_capability_hash(self):
"""
If a capability_hash parameter is passed in, an extra link is added to
the JSON blob containing a capability URL. But in the non-formatted
URL, nothing changes.
"""
pairs = [("group_id", "1"), ("policy_id", "2"), ("webhook_id", "3")]
expected = [
'/v1.0/11111/groups/1/',
'/v1.0/11111/groups/1/policies/2/',
'/v1.0/11111/groups/1/policies/2/webhooks/3/'
]
for i in range(3):
self.assertEqual(
get_autoscale_links(
'11111', format=None, capability_hash='xxx',
**dict(pairs[:(i + 1)])),
expected[i])
json_blob = get_autoscale_links(
'11111', capability_hash='xxx', **dict(pairs[:(i + 1)]))
self.assertEqual(len(json_blob), 2)
self.assertIn({'rel': 'capability', 'href': '/v1.0/execute/1/xxx/'},
json_blob)
def test_capability_version(self):
"""
There is a default capability version of 1, but whatever capability
version is passed is the one used
"""
# default version
json_blob = get_autoscale_links(
'11111', group_id='1', policy_id='2', webhook_id='3',
capability_hash='xxx')
self.assertIn({'rel': 'capability', 'href': '/v1.0/execute/1/xxx/'},
json_blob)
json_blob = get_autoscale_links(
'11111', group_id='1', policy_id='2', webhook_id='3',
capability_hash='xxx', capability_version="8")
self.assertIn({'rel': 'capability', 'href': '/v1.0/execute/8/xxx/'},
json_blob)
def test_capability_urls_unicode_escaped(self):
"""
Even if unicode path bits are provided, only bytes urls are returned
"""
self.assertTrue(isinstance(
get_autoscale_links(u'11111', group_id=u'1', policy_id=u'2',
format=None),
str))
snowman = get_autoscale_links('☃', group_id='☃', format=None)
self.assertEqual(snowman, '/v1.0/%E2%98%83/groups/%E2%98%83/')
self.assertTrue(isinstance(snowman, str))
class CollectionLinksTests(TestCase):
"""
Tests for `get_collection_links`
"""
def setUp(self):
"""
Setup sample collection
"""
self.coll = [{'id': '23'}, {'id': '567'}, {'id': '3444'}]
set_config_data({'url_root': 'http://localhost'})
self.addCleanup(set_config_data, {})
def test_small_collection(self):
"""
Collection len < limit gives self link only. No next link.
"""
links = get_collection_links(self.coll, 'url', 'self', limit=20)
self.assertEqual(links, [{'href': 'http://localhost/url?limit=20', 'rel': 'self'}])
def test_limit_collection(self):
"""
Collection len == limit gives next link also - defaults to calculating
the next marker by id.
"""
links = get_collection_links(self.coll, 'url', 'self', limit=3)
self.assertEqual(links, [{'href': 'http://localhost/url?limit=3', 'rel': 'self'},
{'href': 'http://localhost/url?limit=3&marker=3444', 'rel': 'next'}])
def test_big_collection(self):
"""
Collection len > limit gives next link with marker based on limit -
defaults to calculating the next marker by id.
"""
links = get_collection_links(self.coll, 'url', 'self', limit=2)
self.assertEqual(links, [{'href': 'http://localhost/url?limit=2', 'rel': 'self'},
{'href': 'http://localhost/url?limit=2&marker=567', 'rel': 'next'}])
def test_no_limit(self):
"""
Defaults to config limit if not given, leaving it off the self URL, and
calculates the next marker by id by default
"""
set_config_data({'limits': {'pagination': 3}, 'url_root': 'http://localhost'})
links = get_collection_links(self.coll, 'url', 'self')
self.assertEqual(links, [{'href': 'http://localhost/url', 'rel': 'self'},
{'href': 'http://localhost/url?limit=3&marker=3444', 'rel': 'next'}])
def test_rel_None(self):
"""
Does not include self link if rel is None, even if there is a next
link, and calculates the next marker by id by default
"""
links = get_collection_links(self.coll, 'url', None, limit=3)
self.assertEqual(links,
[{'href': 'http://localhost/url?limit=3&marker=3444', 'rel': 'next'}])
def test_ignore_url_marker_query_params(self):
"""
If the marker parameter is provided, it will override the marker params
in the url
"""
links = get_collection_links(self.coll, 'url?marker=0', 'self',
marker='1')
self.assertEqual(
links, [{'href': 'http://localhost/url?limit=100&marker=1', 'rel': 'self'}])
def test_ignore_url_limit_query_params(self):
"""
If the limit parameter is provided, it will override the marker params
in the url
"""
links = get_collection_links(self.coll, 'url?limit=100&marker=1', 'self',
limit=20)
self.assertEqual(
links, [{'href': 'http://localhost/url?limit=20&marker=1', 'rel': 'self'}])
def test_passes_additional_query_params(self):
"""
If there are more query params besides marker and limit,
``get_collection_links`` will not destroy them
"""
links = get_collection_links(self.coll, 'url?hat=black&scarf=hipster', 'self',
limit=1)
self.assertEqual(
links,
[{'href': 'http://localhost/url?hat=black&limit=1&scarf=hipster', 'rel': 'self'},
{'href': 'http://localhost/url?hat=black&limit=1&marker=23&scarf=hipster',
'rel': 'next'}])
def test_current_marker_in_self_link(self):
"""
If a current marker is provided it is included in the self link
"""
links = get_collection_links(self.coll, 'url', 'self', marker='1',
limit=20)
self.assertEqual(links,
[{'href': 'http://localhost/url?limit=20&marker=1', 'rel': 'self'}])
def test_marker_by_offset_no_current_marker(self):
"""
When passed ``next_marker_by_offset``, next_marker is the next item offset
by the limit
"""
links = get_collection_links(self.coll, 'url', 'self', limit=1,
next_marker=next_marker_by_offset)
self.assertEqual(links, [{'href': 'http://localhost/url?limit=1', 'rel': 'self'},
{'href': 'http://localhost/url?limit=1&marker=1', 'rel': 'next'}])
def test_marker_by_offset_from_current_marker(self):
"""
When passed ``next_marker_by_offset``, next_marker is the next item offset
by the limit, but takes into account the current marker too
"""
links = get_collection_links(self.coll, 'url', 'self', limit=1,
marker=1,
next_marker=next_marker_by_offset)
self.assertEqual(links, [{'href': 'http://localhost/url?limit=1&marker=1', 'rel': 'self'},
{'href': 'http://localhost/url?limit=1&marker=2', 'rel': 'next'}])
def test_use_provided_absolute_url_if_provided(self):
"""
If an absolute url is passed, use that instead of trying to get the url
root.
"""
links = get_collection_links(self.coll, 'http://otherroot/url', 'self', limit=20)
self.assertEqual(links, [{'href': 'http://otherroot/url?limit=20', 'rel': 'self'}])
class GetSpecificCollectionsLinks(TestCase):
"""
Test for `get_groups_links`
"""
def setUp(self):
"""
Mock get_autoscale_links and get_collection_links
"""
self.gal = patch(self, 'otter.util.http.get_autoscale_links', return_value='url')
self.gcl = patch(self, 'otter.util.http.get_collection_links', return_value='col links')
def test_get_groups_links(self):
"""
`get_groups_links` gets link from `get_autoscale_links` and delegates to
get_collection_links
"""
links = get_groups_links('groups', 'tid', rel='rel', limit=2, marker='3')
self.assertEqual(links, 'col links')
self.gal.assert_called_once_with('tid', format=None)
self.gcl.assert_called_once_with('groups', 'url', 'rel', 2, '3')
def test_get_policies_links(self):
"""
`get_policies_links` gets link from `get_autoscale_links` and delegates to
get_collection_links
"""
links = get_policies_links('policies', 'tid', 'gid', rel='rel', limit=2, marker='3')
self.assertEqual(links, 'col links')
self.gal.assert_called_once_with('tid', 'gid', '', format=None)
self.gcl.assert_called_once_with('policies', 'url', 'rel', 2, '3')
def test_get_webhooks_links(self):
"""
`get_webhooks_links` gets link from `get_autoscale_links` and delegates to
get_collection_links
"""
links = get_webhooks_links('webhooks', 'tid', 'gid', 'pid', rel='rel',
limit=2, marker='3')
self.assertEqual(links, 'col links')
self.gal.assert_called_once_with('tid', 'gid', 'pid', '', format=None)
self.gcl.assert_called_once_with('webhooks', 'url', 'rel', 2, '3')
class RouteTests(RequestTestMixin, TestCase):
"""
Test app.route.
"""
def test_non_strict_slashes(self):
"""
app.route should use strict_slahes=False which means that for a given
route ending in a '/' the non-'/' version will result in a the handler
being invoked directly instead of redirected.
"""
requests = [0]
class FakeApp(object):
app = OtterApp()
log = mock.Mock()
@app.route('/v1.0/foo/')
@with_transaction_id()
def foo(self, request):
requests[0] += 1
return 'ok'
self.assert_status_code(200, | |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for concurrency libraries."""
import glob
import os
import random
import re
import sys
import threading
import time
from flaky import flaky
import pytest
import coverage
from coverage import env
from coverage.data import line_counts
from coverage.exceptions import ConfigError
from coverage.files import abs_file
from coverage.misc import import_local_file
from tests.coveragetest import CoverageTest
# These libraries aren't always available, we'll skip tests if they aren't.
try:
import multiprocessing
except ImportError: # pragma: only jython
multiprocessing = None
try:
import eventlet
except ImportError:
eventlet = None
try:
import gevent
except ImportError:
gevent = None
try:
import greenlet
except ImportError: # pragma: only jython
greenlet = None
def measurable_line(l):
"""Is this a line of code coverage will measure?
Not blank, not a comment, and not "else"
"""
l = l.strip()
if not l:
return False
if l.startswith('#'):
return False
if l.startswith('else:'):
return False
if env.JYTHON and l.startswith(('try:', 'except:', 'except ', 'break', 'with ')):
# Jython doesn't measure these statements.
return False # pragma: only jython
return True
def line_count(s):
"""How many measurable lines are in `s`?"""
return len(list(filter(measurable_line, s.splitlines())))
def print_simple_annotation(code, linenos):
"""Print the lines in `code` with X for each line number in `linenos`."""
for lineno, line in enumerate(code.splitlines(), start=1):
print(" {} {}".format("X" if lineno in linenos else " ", line))
class LineCountTest(CoverageTest):
"""Test the helpers here."""
run_in_temp_dir = False
def test_line_count(self):
CODE = """
# Hey there!
x = 1
if x:
print("hello")
else:
print("bye")
print("done")
"""
assert line_count(CODE) == 5
# The code common to all the concurrency models.
SUM_RANGE_Q = """
# Above this will be imports defining queue and threading.
class Producer(threading.Thread):
def __init__(self, limit, q):
threading.Thread.__init__(self)
self.limit = limit
self.q = q
def run(self):
for i in range(self.limit):
self.q.put(i)
self.q.put(None)
class Consumer(threading.Thread):
def __init__(self, q, qresult):
threading.Thread.__init__(self)
self.q = q
self.qresult = qresult
def run(self):
sum = 0
while "no peephole".upper():
i = self.q.get()
if i is None:
break
sum += i
self.qresult.put(sum)
def sum_range(limit):
q = queue.Queue()
qresult = queue.Queue()
c = Consumer(q, qresult)
p = Producer(limit, q)
c.start()
p.start()
p.join()
c.join()
return qresult.get()
# Below this will be something using sum_range.
"""
PRINT_SUM_RANGE = """
print(sum_range({QLIMIT}))
"""
# Import the things to use threads.
THREAD = """
import threading
import queue
"""
# Import the things to use eventlet.
EVENTLET = """
import eventlet.green.threading as threading
import eventlet.queue as queue
"""
# Import the things to use gevent.
GEVENT = """
from gevent import monkey
monkey.patch_thread()
import threading
import gevent.queue as queue
"""
# Uncomplicated code that doesn't use any of the concurrency stuff, to test
# the simple case under each of the regimes.
SIMPLE = """
total = 0
for i in range({QLIMIT}):
total += i
print(total)
"""
def cant_trace_msg(concurrency, the_module):
"""What might coverage.py say about a concurrency setting and imported module?"""
# In the concurrency choices, "multiprocessing" doesn't count, so remove it.
if "multiprocessing" in concurrency:
parts = concurrency.split(",")
parts.remove("multiprocessing")
concurrency = ",".join(parts)
if the_module is None:
# We don't even have the underlying module installed, we expect
# coverage to alert us to this fact.
expected_out = (
f"Couldn't trace with concurrency={concurrency}, the module isn't installed.\n"
)
elif env.C_TRACER or concurrency == "thread" or concurrency == "":
expected_out = None
else:
expected_out = (
f"Can't support concurrency={concurrency} with PyTracer, only threads are supported.\n"
)
return expected_out
class ConcurrencyTest(CoverageTest):
"""Tests of the concurrency support in coverage.py."""
QLIMIT = 1000
def try_some_code(self, code, concurrency, the_module, expected_out=None):
"""Run some concurrency testing code and see that it was all covered.
`code` is the Python code to execute. `concurrency` is the name of
the concurrency regime to test it under. `the_module` is the imported
module that must be available for this to work at all. `expected_out`
is the text we expect the code to produce.
"""
self.make_file("try_it.py", code)
cmd = f"coverage run --concurrency={concurrency} try_it.py"
out = self.run_command(cmd)
expected_cant_trace = cant_trace_msg(concurrency, the_module)
if expected_cant_trace is not None:
assert out == expected_cant_trace
pytest.skip(f"Can't test: {expected_cant_trace}")
else:
# We can fully measure the code if we are using the C tracer, which
# can support all the concurrency, or if we are using threads.
if expected_out is None:
expected_out = "%d\n" % (sum(range(self.QLIMIT)))
print(code)
assert out == expected_out
# Read the coverage file and see that try_it.py has all its lines
# executed.
data = coverage.CoverageData(".coverage")
data.read()
# If the test fails, it's helpful to see this info:
fname = abs_file("try_it.py")
linenos = data.lines(fname)
print(f"{len(linenos)}: {linenos}")
print_simple_annotation(code, linenos)
lines = line_count(code)
assert line_counts(data)['try_it.py'] == lines
def test_threads(self):
code = (THREAD + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "thread", threading)
def test_threads_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "thread", threading)
def test_eventlet(self):
code = (EVENTLET + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "eventlet", eventlet)
def test_eventlet_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "eventlet", eventlet)
# https://github.com/nedbat/coveragepy/issues/663
@pytest.mark.skipif(env.WINDOWS, reason="gevent has problems on Windows: #663")
def test_gevent(self):
code = (GEVENT + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "gevent", gevent)
def test_gevent_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "gevent", gevent)
def test_greenlet(self):
GREENLET = """\
from greenlet import greenlet
def test1(x, y):
z = gr2.switch(x+y)
print(z)
def test2(u):
print(u)
gr1.switch(42)
gr1 = greenlet(test1)
gr2 = greenlet(test2)
gr1.switch("hello", " world")
"""
self.try_some_code(GREENLET, "greenlet", greenlet, "hello world\n42\n")
def test_greenlet_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "greenlet", greenlet)
def test_bug_330(self):
BUG_330 = """\
from weakref import WeakKeyDictionary
import eventlet
def do():
eventlet.sleep(.01)
gts = WeakKeyDictionary()
for _ in range(100):
gts[eventlet.spawn(do)] = True
eventlet.sleep(.005)
eventlet.sleep(.1)
print(len(gts))
"""
self.try_some_code(BUG_330, "eventlet", eventlet, "0\n")
def test_threads_with_gevent(self):
self.make_file("both.py", """\
import queue
import threading
import gevent
def work1(q):
q.put(1)
def gwork(q):
gevent.spawn(work1, q).join()
q.put(None)
print("done")
q = queue.Queue()
t = threading.Thread(target=gwork, args=(q,))
t.start()
t.join()
answer = q.get()
assert answer == 1
""")
out = self.run_command("coverage run --concurrency=thread,gevent both.py")
if gevent is None:
assert out == (
"Couldn't trace with concurrency=gevent, the module isn't installed.\n"
)
pytest.skip("Can't run test without gevent installed.")
if not env.C_TRACER:
assert out == (
"Can't support concurrency=gevent with PyTracer, only threads are supported.\n"
)
pytest.skip("Can't run gevent with PyTracer")
assert out == "done\n"
out = self.run_command("coverage report -m")
last_line = self.squeezed_lines(out)[-1]
assert re.search(r"TOTAL \d+ 0 100%", last_line)
def test_bad_concurrency(self):
with pytest.raises(ConfigError, match="Unknown concurrency choices: nothing"):
self.command_line("run --concurrency=nothing prog.py")
def test_bad_concurrency_in_config(self):
self.make_file(".coveragerc", "[run]\nconcurrency = nothing\n")
with pytest.raises(ConfigError, match="Unknown concurrency choices: nothing"):
self.command_line("run prog.py")
def test_no_multiple_light_concurrency(self):
with pytest.raises(ConfigError, match="Conflicting concurrency settings: eventlet, gevent"):
self.command_line("run --concurrency=gevent,eventlet prog.py")
def test_no_multiple_light_concurrency_in_config(self):
self.make_file(".coveragerc", "[run]\nconcurrency = gevent, eventlet\n")
with pytest.raises(ConfigError, match="Conflicting concurrency settings: eventlet, gevent"):
self.command_line("run prog.py")
def test_multiprocessing_needs_config_file(self):
with pytest.raises(ConfigError, match="multiprocessing requires a configuration file"):
self.command_line("run --concurrency=multiprocessing prog.py")
class WithoutConcurrencyModuleTest(CoverageTest):
"""Tests of what happens if the requested concurrency isn't installed."""
@pytest.mark.parametrize("module", ["eventlet", "gevent", "greenlet"])
def test_missing_module(self, module):
self.make_file("prog.py", "a = 1")
sys.modules[module] = None
msg = f"Couldn't trace with concurrency={module}, the module isn't installed."
with pytest.raises(ConfigError, match=msg):
self.command_line(f"run --concurrency={module} prog.py")
SQUARE_OR_CUBE_WORK = """
def work(x):
# Use different lines in different subprocesses.
if x % 2:
y = x*x
else:
y = x*x*x
return y
"""
SUM_RANGE_WORK = """
def work(x):
return sum_range((x+1)*100)
"""
MULTI_CODE = """
# Above this will be a definition of work().
import multiprocessing
import os
import time
import sys
def process_worker_main(args):
# Need to pause, or the tasks go too quickly, and some processes
# in the pool don't get any work, and then don't record data.
ret = work(*args)
time.sleep(0.1)
return os.getpid(), ret
if __name__ == "__main__": # pragma: no branch
# This if is on a single line so we can get 100% coverage
# even if we have no arguments.
if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1])
pool = multiprocessing.Pool({NPROCS})
inputs = [(x,) for x in range({UPTO})]
outputs = pool.imap_unordered(process_worker_main, inputs)
pids = set()
total = 0
for pid, sq in outputs:
pids.add(pid)
total += sq
print("%d pids, total = %d" % (len(pids), total))
pool.close()
pool.join()
"""
@pytest.fixture(params=["fork", "spawn"], name="start_method")
def start_method_fixture(request):
"""Parameterized fixture to choose the start_method for multiprocessing."""
start_method = request.param
if start_method not in multiprocessing.get_all_start_methods():
# Windows doesn't support "fork".
pytest.skip(f"start_method={start_method} not supported here")
return start_method
@pytest.mark.skipif(not multiprocessing, reason="No multiprocessing in this Python")
@flaky(max_runs=30) # Sometimes a test fails due to inherent randomness. Try more times.
class MultiprocessingTest(CoverageTest):
"""Test support of the multiprocessing module."""
def try_multiprocessing_code(
self,
code,
expected_out,
the_module,
nprocs,
start_method,
concurrency="multiprocessing",
args="",
| |
<filename>GNN/train.py
import sys
sys.path.append("..")
import os
import time
import json
import torch
import argparse
import numpy as np
import torch.nn as nn
from tqdm import tqdm
from GNN.dataset import DataSet
from itertools import product
from gnn import HierarchicalGNN, MLP, Linear, CustomizedGNN
from torch.nn import functional as F
from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix
material = True
ITERATION = 10
class HierarchicalClassifier(object):
def __init__(self, args):
self.verbose = args.verbose
self.device = torch.device(args.device)
self.batch_size = args.batch_size
self.num_epochs = args.num_epochs
self.num_layers = args.num_layers
self.num_class_l1 = args.num_class_l1
self.num_class_l2 = args.num_class_l2
self.num_class_l3 = args.num_class_l3
self.patience = args.patience
self.node_dim = args.node_dim
self.edge_dim = args.edge_dim
self.hid_dim = args.hid_dim
self.lr = args.lr
if args.network == 'mlp':
self.model = MLP(self.node_dim, self.hid_dim, self.num_class_l1,
self.num_class_l2, self.num_class_l3).to(self.device)
elif args.network == 'linear':
self.model = Linear(self.node_dim, self.hid_dim, self.num_class_l1,
self.num_class_l2, self.num_class_l3).to(self.device)
else: # Default is here
self.model = HierarchicalGNN(self.node_dim, self.edge_dim, self.hid_dim,
self.num_class_l1, self.num_class_l2, self.num_class_l3,
self.num_layers, args.network).to(self.device)
def load(self):
if os.path.exists('checkpoint.pkl'):
self.model.load_state_dict(torch.load('checkpoint.pkl'))
else:
raise Exception('Checkpoint not found ...')
def train(self, train_loader, val_loader, weights):
best_loss, best_state, patience_count = 1e9, self.model.state_dict(), 0
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr) # Adam Optimizer
# scheduler: change the attributes of your neural network such as weights and learning rate in order to reduce the losses
# adjust learning rate: Set the learning rate of each parameter group using a cosine annealing schedule
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=self.num_epochs)
weights_l1 = weights[1].to(self.device) # tier 1 function weights
weights_l2 = weights[2].to(self.device) # tier 2 function weights
weights_l3 = weights[3].to(self.device) # tier 3 function weights
for epoch in range(self.num_epochs):
self.model.train() # set to train mode
epoch_loss = 0.
start = time.time()
for batch in train_loader:
batch = batch.to(self.device)
optimizer.zero_grad()
# gnn.forward() parameters:
# x = batch.x.float() - node attributes
# edge_index = batch.edge_index - 2 nodes to each edge
# e = batch.e.float() - edge attributes
# y1 = F.one_hot(batch.y1, self.num_class_l1) - tier 1 function ground truth
# y2 = F.one_hot(batch.y2, self.num_class_l2) - tier 2 function ground truth
# compute tier predictions
if args.network in ('mlp', 'linear'):
logits_l1, logits_l2, logits_l3 = self.model(
batch.x.float(), F.one_hot(batch.y1, self.num_class_l1), F.one_hot(batch.y2, self.num_class_l2))
else: # by default
logits_l1, logits_l2, logits_l3 = self.model(
batch.x.float(), batch.edge_index, batch.e.float(),
F.one_hot(batch.y1, self.num_class_l1),
F.one_hot(batch.y2, self.num_class_l2))
# compute joint loss across all tiers (with ground truth)
is_labeled = batch.y1 > 0 # It actually has a ground truth
loss1 = nn.CrossEntropyLoss(weight=weights_l1)(logits_l1[is_labeled], batch.y1[is_labeled])
is_labeled = batch.y2 > 0
loss2 = nn.CrossEntropyLoss(weight=weights_l2)(logits_l2[is_labeled], batch.y2[is_labeled])
is_labeled = batch.y3 > 0
loss3 = nn.CrossEntropyLoss(weight=weights_l3)(logits_l3[is_labeled], batch.y3[is_labeled])
loss = loss1 + loss2 + loss3
epoch_loss += loss.item()
loss.backward() # back propagation (compute gradients and update parameters)
optimizer.step() # optimizer takes one step
scheduler.step() # scheduler takes one step
end = time.time()
val_loss, _, _, _, _, _, _ = self.predict(val_loader) # evaluate and obtain loss on validation set
if self.verbose:
print(f'Epoch: {epoch + 1:03d}/{self.num_epochs}, Time: {end - start:.2f}s, '
f'Train Loss: {epoch_loss / len(train_loader):.4f}, Val Loss: {val_loss: .4f}')
if best_loss > val_loss: # if this state better than previous best, store it
best_loss = val_loss
best_state = self.model.state_dict()
patience_count = 0
else:
patience_count += 1
if patience_count == self.patience:
if self.verbose:
print('Early stopping ...')
break
self.model.load_state_dict(best_state)
print("Saving the model...")
torch.save(best_state, 'checkpoint.pkl')
@torch.no_grad()
def predict(self, data_loader): # data_loader is the testing/validation set
self.model.eval() # set to evaluation
loss = 0.
yp_l1, yp_l2, yp_l3 = [], [], []
yt_l1, yt_l2, yt_l3 = [], [], []
for batch in data_loader:
batch = batch.to(self.device)
# x = node attributes; edge_index = 2 nodes to each edge; e = edge attributes
if args.network in ('mlp', 'linear'):
logits_l1, logits_l2, logits_l3 = self.model.predict(batch.x.float())
else:
logits_l1, logits_l2, logits_l3 = self.model.predict(batch.x.float(), batch.edge_index, batch.e.float())
is_labeled = batch.y1 > 0
loss1 = nn.CrossEntropyLoss()(logits_l1[is_labeled],
batch.y1[is_labeled]) # compare predicted with ground truth
is_labeled = batch.y2 > 0
loss2 = nn.CrossEntropyLoss()(logits_l2[is_labeled], batch.y2[is_labeled])
is_labeled = batch.y3 > 0
loss3 = nn.CrossEntropyLoss()(logits_l3[is_labeled], batch.y3[is_labeled])
loss += (loss1 + loss2 + loss3).item()
yp_l1.append(torch.argmax(logits_l1, dim=-1))
yp_l2.append(torch.argmax(logits_l2, dim=-1))
yp_l3.append(torch.argmax(logits_l3, dim=-1))
yt_l1.append(batch.y1)
yt_l2.append(batch.y2)
yt_l3.append(batch.y3)
loss /= len(data_loader)
yp_l1 = torch.cat(yp_l1, -1)
yp_l2 = torch.cat(yp_l2, -1)
yp_l3 = torch.cat(yp_l3, -1)
yt_l1 = torch.cat(yt_l1, -1)
yt_l2 = torch.cat(yt_l2, -1)
yt_l3 = torch.cat(yt_l3, -1)
return loss, yp_l1, yp_l2, yp_l3, yt_l1, yt_l2, yt_l3
class CustomizedClassifier(object):
def __init__(self, args):
self.verbose = args.verbose
self.device = torch.device(args.device)
self.batch_size = args.batch_size
self.num_epochs = args.num_epochs
self.num_layers = args.num_layers
self.num_class_l1 = args.num_class_l1
self.num_class_l2 = args.num_class_l2
self.num_class_l3 = args.num_class_l3
# TODO: add num_materials - done
self.num_materials = args.num_materials
self.patience = args.patience
self.node_dim = args.node_dim
self.edge_dim = args.edge_dim
self.hid_dim = args.hid_dim
self.lr = args.lr
if args.network == 'mlp':
self.model = MLP(self.node_dim, self.hid_dim, self.num_class_l1,
self.num_class_l2, self.num_class_l3).to(self.device)
elif args.network == 'linear':
self.model = Linear(self.node_dim, self.hid_dim, self.num_class_l1,
self.num_class_l2, self.num_class_l3).to(self.device)
else: # Default is here
# TODO: change to customized GNN model - done
self.model = CustomizedGNN(self.node_dim, self.edge_dim, self.hid_dim,
self.num_materials,
self.num_layers, args.network).to(self.device)
def load(self):
if os.path.exists('checkpoint.pkl'):
self.model.load_state_dict(torch.load('checkpoint.pkl'))
else:
raise Exception('Checkpoint not found ...')
def train(self, train_loader, val_loader, weights):
best_loss, best_state, patience_count = 1e9, self.model.state_dict(), 0
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr) # Adam Optimizer
# scheduler: change the attributes of your neural network such as weights and learning rate in order to reduce the losses
# adjust learning rate: Set the learning rate of each parameter group using a cosine annealing schedule
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=self.num_epochs)
# TODO: Extract the weights of material from the weights dictionary - done
weights_material = weights[4].to(self.device)
for epoch in range(self.num_epochs):
self.model.train() # set to train mode
epoch_loss = 0.
start = time.time()
for batch in train_loader:
batch = batch.to(self.device)
optimizer.zero_grad()
if args.network in ('mlp', 'linear'):
logits_l1, logits_l2, logits_l3 = self.model(
batch.x.float(), F.one_hot(batch.y1, self.num_class_l1), F.one_hot(batch.y2, self.num_class_l2))
else: # by default
# TODO: changed to with no ground truth for training - done
material_predictions = self.model(
batch.x.float(), batch.edge_index, batch.e.float())
# compute loss (with ground truth)
# TODO: calculate loss for material only - done
is_labeled = batch.material > 0 # It actually has a ground truth
loss = nn.CrossEntropyLoss(weight=weights_material)(material_predictions[is_labeled],
batch.material[is_labeled])
epoch_loss += loss.item()
loss.backward() # back propagation (compute gradients and update parameters)
optimizer.step() # optimizer takes one step
scheduler.step() # scheduler takes one step
end = time.time()
val_loss, _, _ = self.predict(val_loader, weights_material) # evaluate and obtain loss on validation set
if self.verbose:
print(f'Epoch: {epoch + 1:03d}/{self.num_epochs}, Time: {end - start:.2f}s, '
f'Train Loss: {epoch_loss / len(train_loader):.4f}, Val Loss: {val_loss: .4f}')
if best_loss > val_loss: # if this state better than previous best, store it
best_loss = val_loss
best_state = self.model.state_dict()
patience_count = 0
else:
patience_count += 1
if patience_count == self.patience:
if self.verbose:
print('Early stopping ...')
break
self.model.load_state_dict(best_state)
print("Saving the model...")
torch.save(best_state, 'checkpoint.pkl')
@torch.no_grad()
def predict(self, data_loader, weights_material): # data_loader is the testing/validation set
self.model.eval() # set to evaluation
loss = 0.
# TODO: only need prediction and ground truths of the material - done
material_p, material_t = [], []
for batch in data_loader:
batch = batch.to(self.device)
# x = node attributes; edge_index = 2 nodes to each edge; e = edge attributes
if args.network in ('mlp', 'linear'): # ignore this
logits_l1, logits_l2, logits_l3 = self.model.predict(batch.x.float())
else:
material_predictions = self.model.predict(batch.x.float(), batch.edge_index, batch.e.float())
is_labeled = batch.material > 0
loss = nn.CrossEntropyLoss()(material_predictions[is_labeled], # TODO: need weights or not?
batch.material[is_labeled]) # compare predicted with ground truth
# TODO: predictions and ground truths for materials only - done
material_p.append(torch.argmax(material_predictions, dim=-1))
material_t.append(batch.material)
loss /= len(data_loader)
material_p = torch.cat(material_p, -1)
material_t = torch.cat(material_t, -1)
return loss, material_p, material_t
def cross_validate(args):
dataset = DataSet(args.batch_size, args.node_feature, args.edge_feature)
args.node_dim = dataset.node_dim
args.edge_dim = dataset.edge_dim
args.num_class_l1 = dataset.num_class_l1
args.num_class_l2 = dataset.num_class_l2
args.num_class_l3 = dataset.num_class_l3
# # TODO: add num_materials
# args.num_materials = dataset.num_materials
# print("node_dim: ", dataset.node_dim) # 316
# print("edge_dim: ", dataset.edge_dim) # 75
# print("num_class_l1: ", dataset.num_class_l1) # 9
# print("num_class_l2: ", dataset.num_class_l2) # 22
# print("num_class_l3: ", dataset.num_class_l3) # 23
result = { # Store the results to be put into the test log
'tier1': {
'f1': {
'macro': {'mean': .0, 'std': .0, 'data': []},
'micro': {'mean': .0, 'std': .0, 'data': []},
'weighted': {'mean': .0, 'std': .0, 'data': []},
},
'precision': {
'macro': {'mean': .0, 'std': .0, 'data': []},
'micro': {'mean': .0, 'std': .0, 'data': []},
'weighted': {'mean': .0, 'std': .0, 'data': []},
},
'recall': {
'macro': {'mean': .0, | |
<filename>lib/helpers/pvrartwork.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
script.module.metadatautils
pvrartwork.py
Get metadata for Kodi PVR programs
"""
import os, sys
if sys.version_info.major == 3:
from .utils import get_clean_image, DialogSelect, log_msg, extend_dict, ADDON_ID, download_artwork, normalize_string
from urllib.parse import quote_plus
else:
from utils import get_clean_image, DialogSelect, log_msg, extend_dict, ADDON_ID, download_artwork, normalize_string
from urllib import quote_plus
import xbmc
import xbmcgui
import xbmcvfs
from difflib import SequenceMatcher as SM
from operator import itemgetter
import re
from datetime import timedelta
class PvrArtwork(object):
"""get artwork for kodi pvr"""
def __init__(self, metadatautils):
"""Initialize - optionaly provide our base MetadataUtils class"""
self._mutils = metadatautils
self.cache = self._mutils.cache
def get_pvr_artwork(self, title, channel, genre="", manual_select=False, ignore_cache=False):
"""
collect full metadata and artwork for pvr entries
parameters: title (required)
channel: channel name (required)
year: year or date (optional)
genre: (optional)
the more optional parameters are supplied, the better the search results
"""
details = {"art": {}}
# try cache first
# use searchtitle when searching cache
cache_title = title.lower()
cache_channel = channel.lower()
searchtitle = self.get_searchtitle(cache_title, cache_channel)
# original cache_str assignment cache_str = "pvr_artwork.%s.%s" % (title.lower(), channel.lower())
cache_str = "pvr_artwork.%s.%s" % (searchtitle, channel.lower())
cache = self._mutils.cache.get(cache_str)
if cache and not manual_select and not ignore_cache:
log_msg("get_pvr_artwork - return data from cache - %s" % cache_str)
details = cache
else:
# no cache - start our lookup adventure
log_msg("get_pvr_artwork - no data in cache - start lookup - %s" % cache_str)
# workaround for recordings
recordingdetails = self.lookup_local_recording(title, channel)
if recordingdetails and not (channel and genre):
genre = recordingdetails["genre"]
channel = recordingdetails["channel"]
details["pvrtitle"] = title
details["pvrchannel"] = channel
details["pvrgenre"] = genre
details["cachestr"] = cache_str
details["media_type"] = ""
details["art"] = {}
# filter genre unknown/other
if not genre or genre.split(" / ")[0] in xbmc.getLocalizedString(19499).split(" / "):
details["genre"] = []
genre = ""
log_msg("genre is unknown so ignore....")
else:
details["genre"] = genre.split(" / ")
details["media_type"] = self.get_mediatype_from_genre(genre)
searchtitle = self.get_searchtitle(title, channel)
# only continue if we pass our basic checks
filterstr = self.pvr_proceed_lookup(title, channel, genre, recordingdetails)
proceed_lookup = False if filterstr else True
if not proceed_lookup and manual_select:
# warn user about active skip filter
proceed_lookup = xbmcgui.Dialog().yesno(
message=self._mutils.addon.getLocalizedString(32027), line2=filterstr,
heading=xbmc.getLocalizedString(750))
if proceed_lookup:
# if manual lookup get the title from the user
if manual_select:
if sys.version_info.major == 3:
searchtitle = xbmcgui.Dialog().input(xbmc.getLocalizedString(16017), searchtitle,
type=xbmcgui.INPUT_ALPHANUM)
else:
searchtitle = xbmcgui.Dialog().input(xbmc.getLocalizedString(16017), searchtitle,
type=xbmcgui.INPUT_ALPHANUM).decode("utf-8")
if not searchtitle:
return
# if manual lookup and no mediatype, ask the user
if manual_select and not details["media_type"]:
yesbtn = self._mutils.addon.getLocalizedString(32042)
nobtn = self._mutils.addon.getLocalizedString(32043)
header = self._mutils.addon.getLocalizedString(32041)
if xbmcgui.Dialog().yesno(header, header, yeslabel=yesbtn, nolabel=nobtn):
details["media_type"] = "movie"
else:
details["media_type"] = "tvshow"
# append thumb from recordingdetails
if recordingdetails and recordingdetails.get("thumbnail"):
details["art"]["thumb"] = recordingdetails["thumbnail"]
# lookup custom path
details = extend_dict(details, self.lookup_custom_path(searchtitle, title))
# lookup movie/tv library
details = extend_dict(details, self.lookup_local_library(searchtitle, details["media_type"]))
# do internet scraping if enabled
if self._mutils.addon.getSetting("pvr_art_scraper") == "true":
log_msg(
"pvrart start scraping metadata for title: %s - media_type: %s" %
(searchtitle, details["media_type"]))
# prefer tmdb scraper
tmdb_result = self._mutils.get_tmdb_details(
"", "", searchtitle, "", "", details["media_type"],
manual_select=manual_select, ignore_cache=manual_select)
log_msg("pvrart lookup for title: %s - TMDB result: %s" % (searchtitle, tmdb_result))
if tmdb_result:
details["media_type"] = tmdb_result["media_type"]
details = extend_dict(details, tmdb_result)
# fallback to tvdb scraper
# following 3 lines added as part of "auto refresh" fix. ensure manual_select=true for TVDB lookup. No idea why this works
tempmanualselect = manual_select
manual_select="true"
log_msg("DEBUG INFO: TVDB lookup: searchtitle: %s channel: %s manual_select: %s" %(searchtitle, channel, manual_select))
if (not tmdb_result or (tmdb_result and not tmdb_result.get("art")) or
details["media_type"] == "tvshow"):
# original code: tvdb_match = self.lookup_tvdb(searchtitle, channel, manual_select=manual_select). part of "auto refresh" fix.
tvdb_match = self.lookup_tvdb(searchtitle, channel, manual_select=manual_select, tempmanualselect=tempmanualselect)
log_msg("pvrart lookup for title: %s - TVDB result: %s" % (searchtitle, tvdb_match))
if tvdb_match:
# get full tvdb results and extend with tmdb
if not details["media_type"]:
details["media_type"] = "tvshow"
details = extend_dict(details, self._mutils.thetvdb.get_series(tvdb_match))
details = extend_dict(details, self._mutils.tmdb.get_videodetails_by_externalid(
tvdb_match, "tvdb_id"), ["poster", "fanart"])
# part of "auto refresh" fix - revert manual_select to original value
manual_select = tempmanualselect
# fanart.tv scraping - append result to existing art
if details.get("imdbnumber") and details["media_type"] == "movie":
details["art"] = extend_dict(
details["art"], self._mutils.fanarttv.movie(
details["imdbnumber"]), [
"poster", "fanart", "landscape"])
elif details.get("tvdb_id") and details["media_type"] == "tvshow":
details["art"] = extend_dict(
details["art"], self._mutils.fanarttv.tvshow(
details["tvdb_id"]), [
"poster", "fanart", "landscape"])
# append omdb details
if details.get("imdbnumber"):
details = extend_dict(
details, self._mutils.omdb.get_details_by_imdbid(
details["imdbnumber"]), [
"rating", "votes"])
# set thumbnail - prefer scrapers
thumb = ""
if details.get("thumbnail"):
thumb = details["thumbnail"]
elif details["art"].get("landscape"):
thumb = details["art"]["landscape"]
elif details["art"].get("fanart"):
thumb = details["art"]["fanart"]
elif details["art"].get("poster"):
thumb = details["art"]["poster"]
# use google images as last-resort fallback for thumbs - if enabled
elif self._mutils.addon.getSetting("pvr_art_google") == "true":
if manual_select:
google_title = searchtitle
else:
google_title = '%s %s' % (searchtitle, "imdb")
thumb = self._mutils.google.search_image(google_title, manual_select)
if thumb:
details["thumbnail"] = thumb
details["art"]["thumb"] = thumb
# extrafanart
if details["art"].get("fanarts"):
for count, item in enumerate(details["art"]["fanarts"]):
details["art"]["fanart.%s" % count] = item
if not details["art"].get("extrafanart") and len(details["art"]["fanarts"]) > 1:
details["art"]["extrafanart"] = "plugin://script.skin.helper.service/"\
"?action=extrafanart&fanarts=%s" % quote_plus(repr(details["art"]["fanarts"]))
# download artwork to custom folder
if self._mutils.addon.getSetting("pvr_art_download") == "true":
details["art"] = download_artwork(self.get_custom_path(searchtitle, title), details["art"])
log_msg("pvrart lookup for title: %s - final result: %s" % (searchtitle, details))
# always store result in cache
# manual lookups should not expire too often
if manual_select:
self._mutils.cache.set(cache_str, details, expiration=timedelta(days=365))
else:
self._mutils.cache.set(cache_str, details, expiration=timedelta(days=365))
return details
def manual_set_pvr_artwork(self, title, channel, genre):
"""manual override artwork options"""
details = self.get_pvr_artwork(title, channel, genre)
cache_str = details["cachestr"]
# show dialogselect with all artwork option
from .utils import manual_set_artwork
changemade, artwork = manual_set_artwork(details["art"], "pvr")
if changemade:
details["art"] = artwork
# save results in cache
self._mutils.cache.set(cache_str, details, expiration=timedelta(days=365))
def pvr_artwork_options(self, title, channel, genre):
"""show options for pvr artwork"""
if not channel and genre:
channel, genre = self.get_pvr_channel_and_genre(title)
ignorechannels = self._mutils.addon.getSetting("pvr_art_ignore_channels").split("|")
ignoretitles = self._mutils.addon.getSetting("pvr_art_ignore_titles").split("|")
options = []
options.append(self._mutils.addon.getLocalizedString(32028)) # Refresh item (auto lookup)
options.append(self._mutils.addon.getLocalizedString(32029)) # Refresh item (manual lookup)
options.append(self._mutils.addon.getLocalizedString(32036)) # Choose art
if channel in ignorechannels:
options.append(self._mutils.addon.getLocalizedString(32030)) # Remove channel from ignore list
else:
options.append(self._mutils.addon.getLocalizedString(32031)) # Add channel to ignore list
if title in ignoretitles:
options.append(self._mutils.addon.getLocalizedString(32032)) # Remove title from ignore list
else:
options.append(self._mutils.addon.getLocalizedString(32033)) # Add title to ignore list
options.append(self._mutils.addon.getLocalizedString(32034)) # Open addon settings
header = self._mutils.addon.getLocalizedString(32035)
dialog = xbmcgui.Dialog()
ret = dialog.select(header, options)
del dialog
if ret == 0:
# Refresh item (auto lookup)
self.get_pvr_artwork(title=title, channel=channel, genre=genre, ignore_cache=True, manual_select=False)
elif ret == 1:
# Refresh item (manual lookup)
self.get_pvr_artwork(title=title, channel=channel, genre=genre, ignore_cache=True, manual_select=True)
elif ret == 2:
# Choose art
self.manual_set_pvr_artwork(title, channel, genre)
elif ret == 3:
# Add/remove channel to ignore list
if channel in ignorechannels:
ignorechannels.remove(channel)
else:
ignorechannels.append(channel)
ignorechannels_str = "|".join(ignorechannels)
self._mutils.addon.setSetting("pvr_art_ignore_channels", ignorechannels_str)
self.get_pvr_artwork(title=title, channel=channel, genre=genre, ignore_cache=True, manual_select=False)
elif ret == 4:
# Add/remove title to ignore list
if title in ignoretitles:
ignoretitles.remove(title)
else:
ignoretitles.append(title)
ignoretitles_str = "|".join(ignoretitles)
self._mutils.addon.setSetting("pvr_art_ignore_titles", ignoretitles_str)
self.get_pvr_artwork(title=title, channel=channel, genre=genre, ignore_cache=True, manual_select=False)
elif ret == 5:
# Open addon settings
xbmc.executebuiltin("Addon.OpenSettings(%s)" % ADDON_ID)
def pvr_proceed_lookup(self, title, channel, genre, recordingdetails):
"""perform some checks if we can proceed with the lookup"""
filters = []
if not title:
filters.append("Title is empty")
for item in self._mutils.addon.getSetting("pvr_art_ignore_titles").split("|"):
if item and item.lower() == title.lower():
filters.append("Title is in list of titles to ignore")
for item in self._mutils.addon.getSetting("pvr_art_ignore_channels").split("|"):
if item and item.lower() == channel.lower():
filters.append("Channel is in list of channels to ignore")
for item in self._mutils.addon.getSetting("pvr_art_ignore_genres").split("|"):
if genre and item and item.lower() in genre.lower():
filters.append("Genre is in list of genres to ignore")
if self._mutils.addon.getSetting("pvr_art_ignore_commongenre") == "true":
# skip common genres like sports, weather, news etc.
genre = genre.lower()
kodi_strings = [19516, 19517, 19518, 19520, 19548, 19549, 19551,
19552, 19553, 19554, 19555, 19556, 19557, 19558, 19559]
for kodi_string in kodi_strings:
kodi_string = xbmc.getLocalizedString(kodi_string).lower()
if (genre and (genre in kodi_string or kodi_string in genre)) or kodi_string in title:
filters.append("Common genres like weather/sports are set to be ignored")
if self._mutils.addon.getSetting("pvr_art_recordings_only") == "true" and not recordingdetails:
filters.append("PVR Artwork is enabled for recordings only")
if filters:
filterstr = " - ".join(filters)
log_msg("PVR artwork - filter active for title: %s - channel %s --> %s" % (title, channel, filterstr))
return filterstr
else:
return ""
@staticmethod
def get_mediatype_from_genre(genre):
"""guess media type from genre for better matching"""
media_type = ""
if "movie" | |
= self.get_common_message_string(ticket)
Trace.log(level, "FINISHED %s returned %s" % (common_message, status))
# log the new work list
self.log_work_list(ticket)
# report back to original client - probably a mover
#
# Some functions need to handle the reply directly (list_volumes).
# They, should set 'no_reply' to python true in the ticket.
if not ticket.get('no_reply', None):
self.reply_to_caller(ticket)
self.robotNotAtHome = 1
self.lastWorkTime = time.time()
# if work queue is closed and work_list is empty, do insert
#
# Shouldn't there be a better way of scheduling this? Waiting until
# a previous request complets doesn't seem correct. This also leads
# into how sts could/should be processed.
sts = self.doWaitingInserts()
# simple elapsed timer
def delta_t(self,begin):
(ut, st,cut, cst,now) = os.times()
return (now-begin, now)
#Kill the process with pid, use cmd as the command string that is
# getting killed for logging purposes.
def kill_it(self, pid, cmd):
message = "killing %d => %s" % (pid, cmd)
print timeofday.tod(), message
Trace.trace(e_errors.INFO, message)
os.kill(pid,signal.SIGTERM)
time.sleep(1)
p, r = os.waitpid(pid,os.WNOHANG)
if p == 0:
message = "kill -9ing %d => %s" % (pid, cmd)
print timeofday.tod(), message
Trace.trace(e_errors.INFO, message)
os.kill(pid, signal.SIGKILL)
time.sleep(2)
p, r = os.waitpid(pid,os.WNOHANG)
#########################################################################
#
# AML2 robot loader server
#
#########################################################################
class AML2_MediaLoader(MediaLoaderMethods):
def __init__(self, medch, max_work=7, csc=None):
global aml2
MediaLoaderMethods.__init__(self, medch, max_work, csc)
try:
import aml2
except ImportError:
message = "Unable to load ACI library. Exiting."
Trace.log(e_errors.ERROR, message)
sys.stderr.write("%s\n" % message)
sys.exit(1)
# robot choices are 'R1', 'R2' or 'Both'
if self.mc_config.has_key('RobotArm'): # error if robot not in config
self.robotArm = string.strip(self.mc_config['RobotArm'])
else:
Trace.log(e_errors.ERROR, "ERROR:mc:aml2 no robot arm key in configuration")
self.robotArm = string.strip(self.mc_config['RobotArm']) # force the exception
return
if self.mc_config.has_key('IOBoxMedia'): # error if IO box media assignments not in config
self.mediaIOassign = self.mc_config['IOBoxMedia']
else:
Trace.log(e_errors.ERROR, "ERROR:mc:aml2 no IO box media assignments in configuration")
self.mediaIOassign = self.mc_config['IOBoxMedia'] # force the exception
return
if self.mc_config.has_key('DriveCleanTime'): # error if DriveCleanTime assignments not in config
self.driveCleanTime = self.mc_config['DriveCleanTime']
else:
Trace.log(e_errors.ERROR, "ERROR:mc:aml2 no DriveCleanTime assignments in configuration")
self.driveCleanTime = self.mc_config['DriveCleanTime'] # force the exception
return
if self.mc_config.has_key('IdleTimeHome'):
if (type(self.mc_config['IdleTimeHome']) == types.IntType and
self.mc_config['IdleTimeHome'] > 20):
self.idleTimeLimit = self.mc_config['IdleTimeHome']
else:
Trace.log(e_errors.INFO, "mc:aml2 IdleHomeTime is not defined or too small, default used")
self.prepare=self.unload
# retry function call
def retry_function(self,function,*args):
count = self.getNretry()
rpcErrors = 0
sts=("",0,"")
while count > 0 and sts[0] != e_errors.OK:
try:
sts=apply(function,args)
if sts[1] != 0:
if self.logdetail:
Trace.log(e_errors.ERROR, 'retry_function: function %s %s error %s'%(repr(function),args,sts[2]))
if sts[1] == 1 and rpcErrors < 2: # RPC failure
time.sleep(10)
rpcErrors = rpcErrors + 1
elif (sts[1] == 5 or # requested drive in use
sts[1] == 8 or # DAS was unable to communicate with AMU
sts[1] == 10 or # AMU was unable to communicate with robot
#sts[1] == 34 or # The aci request timed out
sts[1] == 24): # requested volume in use
count = count - 1
time.sleep(20)
elif (sts[1] == e_errors.MC_VOLNOTHOME): # tape not in home position
count = count - 1
time.sleep(120)
else:
break
except:
exc,val,tb = Trace.handle_error()
return "ERROR", 37, str(val) #XXX very ad-hoc!
## this is "command error" in aml2.py
return sts
"""
def checkMyself(self):
# do regularily scheduled internal checks
if self.robotNotAtHome and (time.time()-self.lastWorkTime) > self.idleTimeLimit:
self.robotNotAtHome = 0
ticket = { 'function' : 'homeAndRestart', 'robotArm' : self.robotArm }
sts = self.robotHomeAndRestart(ticket)
self.lastWorkTime = time.time()
"""
#########################################################################
# These functions are overridden from the generic class.
#########################################################################
# load volume into the drive;
def load(self, ticket):
drive = ticket['drive_id']
external_label = ticket['vol_ticket']['external_label']
media_type = ticket['vol_ticket']['media_type']
return self.retry_function(aml2.mount, external_label,
drive, media_type)
# unload volume from the drive
def unload(self, ticket):
drive = ticket['drive_id']
external_label = ticket['vol_ticket']['external_label']
media_type = ticket['vol_ticket']['media_type']
return self.retry_function(aml2.dismount, external_label,
drive, media_type)
def insert(self, ticket):
self.insertRA = None
classTicket = { 'mcSelf' : self }
ticket['timeOfCmd'] = time.time()
ticket['medIOassign'] = self.mediaIOassign
return self.retry_function(aml2.insert, ticket, classTicket)
def eject(self, ticket):
classTicket = { 'mcSelf' : self }
ticket['medIOassign'] = self.mediaIOassign
return self.retry_function(aml2.eject, ticket, classTicket)
def robotHomeAndRestart(self, ticket):
classTicket = { 'mcSelf' : self }
ticket['robotArm'] = self.robotArm
return self.retry_function(aml2.robotHomeAndRestart,
ticket, classTicket)
def getVolState(self, ticket):
external_label = ticket['external_label']
media_type = ticket['media_type']
stat,volstate = aml2.view(external_label,media_type)
state='U' # unknown
if stat != 0:
#return 'BAD', stat, 'aci_view return code', state
return aml2.convert_status(stat)
if volstate == None:
return 'BAD', stat, 'volume %s not found'%(external_label,),state
#Return the correct media type.
ticket['media_type'] = aml2.media_names.get(volstate.media_type,
"unknown")
return (e_errors.OK, 0, "", volstate.attrib)
def cleanCycle(self, inTicket):
__pychecker__ = "unusednames=i"
#do drive cleaning cycle
Trace.log(e_errors.INFO, 'mc:aml2 ticket='+repr(inTicket))
#classTicket = { 'mcSelf' : self }
try:
drive = inTicket['moverConfig']['mc_device']
except KeyError:
Trace.log(e_errors.ERROR, 'mc:aml2 no device field found in ticket.')
status = 37
return e_errors.DOESNOTEXIST, status, "no device field found in ticket"
driveType = drive[:2] # ... need device type, not actual device
try:
if self.driveCleanTime:
cleanTime = self.driveCleanTime[driveType][0] # clean time in seconds
driveCleanCycles = self.driveCleanTime[driveType][1] # number of cleaning cycles
else:
cleanTime = 60
driveCleanCycles = 1
except KeyError:
cleanTime = 60
driveCleanCycles = 1
vcc = volume_clerk_client.VolumeClerkClient(self.csc)
min_remaining_bytes = 1
vol_veto_list = []
first_found = 0
libraryManagers = inTicket['moverConfig']['library']
if type(libraryManagers) == types.StringType:
lm = libraryManagers
library = string.split(libraryManagers,".")[0]
elif type(libraryManagers) == types.ListType:
lm = libraryManagers[0]
library = string.split(libraryManagers[0],".")[0]
else:
Trace.log(e_errors.ERROR, 'mc:aml2 library_manager field not found in ticket.')
status = 37
return e_errors.DOESNOTEXIST, status, "no library_manager field found in ticket"
lm_info = self.csc.get(lm)
if not lm_info.has_key('CleanTapeVolumeFamily'):
Trace.log(e_errors.ERROR, 'mc: no CleanTapeVolumeFamily field found in ticket.')
status = 37
return e_errors.DOESNOTEXIST, status, "no CleanTapeVolumeFamily field found in ticket"
cleanTapeVolumeFamily = lm_info['CleanTapeVolumeFamily']
v = vcc.next_write_volume(library,
min_remaining_bytes, cleanTapeVolumeFamily,
vol_veto_list, first_found, exact_match=1) # get which volume to use
if v["status"][0] != e_errors.OK:
Trace.log(e_errors.ERROR,"error getting cleaning volume:%s %s"%
(v["status"][0],v["status"][1]))
status = 37
return v["status"][0], 0, v["status"][1]
for i in range(driveCleanCycles):
Trace.log(e_errors.INFO, "AML2 clean drive %s, vol. %s"%(drive,v['external_label']))
#rt = self.load(v['external_label'], drive, v['media_type'])
rt = self.retry_function(aml2.mount, v['external_label'],
drive, v['media_type'])
status = rt[1]
if status != 0: # mount returned error
s1,s2,s3 = self.retry_function(aml2.convert_status,status)
return s1, s2, s3
time.sleep(cleanTime) # wait cleanTime seconds
#rt = self.unload(v['external_label'], drive, v['media_type'])
rt = self.retry_function(aml2.dismount, v['external_label'],
drive, v['media_type'])
status = rt[1]
if status != 0: # dismount returned error
s1,s2,s3 = self.retry_function(aml2.convert_status,status)
return s1, s2, s3
Trace.log(e_errors.INFO,"AML2 Clean returned %s"%(rt,))
retTicket = vcc.get_remaining_bytes(v['external_label'])
remaining_bytes = retTicket['remaining_bytes']-1
vcc.set_remaining_bytes(v['external_label'],remaining_bytes,'\0', None)
return (e_errors.OK, 0, None)
def doWaitingInserts(self):
#do delayed insertvols
if self.workQueueClosed and len(self.work_list)==0:
self.workQueueClosed = 0
ticket = { 'function' : 'insert',
'timeOfCmd' : self.timeInsert,
'r_a' : self.insertRA }
self.DoWork( self.insert, ticket)
return (e_errors.OK, 0, None)
def query_robot(self, ticket):
__pychecker__ = "no-argsused"
#Name of the aci library function.
command = "aci_robstat"
t0 = time.time()
status, status_code, response = self.robotStatus(self.robotArm)
delta = time.time() - t0
# got response, parse it and put it into the standard form
if not e_errors.is_ok(status[0]):
E=19 #19 = ???
msg = "robot status %i: %s => %i,%s, %f" % \
(E, command, status_code, response, delta)
Trace.log(e_errors.ERROR, msg)
return (status, E, response, "", msg)
msg = "%s => %i,%s, %f" % (command, status_code, response, delta)
Trace.log(e_errors.INFO, msg)
return (e_errors.OK, 0, msg, "", "")
def getDriveState(self, ticket):
drive = ticket['drive']
stat, drivestate = aml2.drive_state(drive)
state='N' # unknown
if stat != 0:
#return 'BAD', stat, "aci_drivestatus2 return code", state
return aml2.convert_status(stat)
if drivestate == None:
return 'BAD', stat, "drive %s not found" % (drive,), state
if drivestate.drive_state == aml2.ACI_DRIVE_UP:
state = "U" #UP
elif drivestate.drive_state == aml2.ACI_DRIVE_DOWN:
state = "D" #DOWN
#Update the ticket with additional information.
drive_info = {}
drive_info['state'] = aml2.drive_state_names.get(
drivestate.drive_state, "unknown")
drive_info['type'] = aml2.drive_names.get(str(drivestate.drive_state),
"unknown")
drive_info['status'] = 0
drive_info['volume'] = drivestate.volser
ticket['drive_info'] = drive_info
return (e_errors.OK, 0, drivestate.volser, "%s %d" %
(state, drivestate.drive_state))
def listDrives(self, ticket):
stat, drives = aml2.drives_states()
if stat != 0:
#ticket['status'] = 'BAD', stat, "aci_drivestatus2 return code"
ticket['status'] = aml2.convert_status(stat)
drives = [] #To avoid TypeErrors.
else:
ticket['status'] = (e_errors.OK, 0, "")
drive_list = []
for drive in drives:
use_state = aml2.drive_state_names.get(drive.drive_state,
drive.drive_state)
use_type = aml2.drive_names.get(drive.type, drive.type)
##################################################
# The aml2 is not very good and knowning the difference between
# an LTO1 and LTO2 drive. Ask the mover for the correct
# drive type.
movers = self.csc.get_movers2(3, 2)
for mover in movers:
if mover['mc_device'] == drive.drive_name:
import mover_client
flags = enstore_constants.NO_LOG | enstore_constants.NO_ALARM
mc = mover_client.MoverClient(self.csc,
mover['name'],
flags = flags,
rcv_timeout = 3,
rcv_tries = 2)
status = mc.status(3, 2) #Get the status.
del | |
BRICK_COLOR3
brick.color = BRICK_COLOR3
elif i <= 7:
brick.fill_color = BRICK_COLOR4
brick.color = BRICK_COLOR4
elif i <= 9:
brick.fill_color = BRICK_COLOR5
brick.color = BRICK_COLOR5
else:
brick.fill_color = 'black'
self.window.add(brick, x=j * (BRICK_WIDTH + BRICK_SPACING),
y=BRICK_OFFSET + i * (BRICK_HEIGHT + BRICK_SPACING))
self.__brick_left = BRICK_COLS * BRICK_ROWS
self.__life = NUM_LIVES
self.lives.text = 'Balls left:' + str(self.__life)
self.drop_switch = False
def drop_trigger(self):
x = self.paddle.x
if self.dropcolor == 'red':
# will decrease you paddle size
# prevents the paddle to be unplayable
if self.paddle.width>30:
originalx = self.paddle.width
paddle = GRect(originalx - 25, PADDLE_HEIGHT)
paddle.filled = True
self.window.remove(self.paddle)
self.paddle = paddle
self.window.add(paddle, x=x,
y=self.window.height - paddle.height - PADDLE_OFFSET)
elif self.dropcolor == 'orange':
# extra points
self.scorepoints += 10
self.score.text = 'Score:' + str(self.scorepoints)
elif self.dropcolor == 'yellow':
# ball ignores collisions until it reaches the top
self.superball_switch = True
elif self.dropcolor == 'green':
# add a extra ball on screen
self.ball_available=False
self.window.add(self.ball2,self.window.width/2-self.ball2.width/2,self.window.height/2-self.ball2.height/2)
elif self.dropcolor == 'blue':
# increaces the paddle size
originalx = self.paddle.width
paddle = GRect(originalx + 25, PADDLE_HEIGHT)
paddle.filled = True
self.window.remove(self.paddle)
self.paddle = paddle
self.window.add(paddle, x=x, y=self.window.height - paddle.height - PADDLE_OFFSET)
elif self.dropcolor == 'purple':
# extra life
self.__life += 1
self.lives.text = 'Balls left:' + str(self.__life)
def super_collision(self):
# removes all bricks in path until it reaches the top
if self.ball.y < self.window.height / 2:
h = self.window.get_object_at(self.ball.x - BALL_CORECT, self.ball.y + self.ball.height / 2)
f = self.window.get_object_at(self.ball.x + self.ball.width + BALL_CORECT,
self.ball.y + self.ball.height / 2)
if f != None and f != self.score and f != self.lives and f != (self.drop and self.ball2):
self.window.remove(f)
self.__brick_left -= 1
self.random_drop(f)
self.scorepoints += 1
elif h != None and h != self.score and h != self.lives and h != (self.drop and self.ball2):
self.window.remove(h)
self.__brick_left -= 1
self.random_drop(h)
self.scorepoints += 1
# elif self.__dy < 0:
# a = self.window.get_object_at(
# self.ball.x + self.ball.width / 2 + self.ball_radius / 1.414 + BALL_CORECT,
# self.ball.y + self.ball.height / 2 - self.ball_radius / 1.414 - BALL_CORECT + CORRECT)
# d = self.window.get_object_at(
# self.ball.x + self.ball.width / 2 - self.ball_radius / 1.414 - BALL_CORECT,
# self.ball.y + self.ball.height / 2 - self.ball_radius / 1.414 - BALL_CORECT + CORRECT)
# e = self.window.get_object_at(self.ball.x + self.ball.width / 2, self.ball.y - BALL_CORECT - CORRECT)
#
# if a != None and a != self.score and a != self.lives and (a != self.drop and self.ball2):
# self.window.remove(a)
#
# self.__brick_left -= 1
# self.random_drop(a)
# self.scorepoints += 1
# elif d != None and d != self.score and d != self.lives and (d != self.drop and self.ball2):
# self.window.remove(d)
#
# self.__brick_left -= 1
# self.random_drop(d)
# self.scorepoints += 1
# elif e != None and e != self.score and e != self.lives and (e != self.drop and self.ball2):
#
# self.window.remove(e)
# self.__brick_left -= 1
# self.random_drop(e)
# self.scorepoints += 1
#
# else:
# b = self.window.get_object_at(
# self.ball.x + self.ball.width / 2 + self.ball_radius / 1.414 + BALL_CORECT,
# self.ball.y + self.ball.height / 2 + self.ball_radius / 1.414 + BALL_CORECT - CORRECT)
# c = self.window.get_object_at(
# self.ball.x + self.ball.width / 2 - self.ball_radius / 1.414 - BALL_CORECT,
# self.ball.y + self.ball.height / 2 + self.ball_radius / 1.414 + BALL_CORECT - CORRECT)
# g = self.window.get_object_at(self.ball.x + self.ball.width / 2,
# self.ball.y + self.ball.height + BALL_CORECT + CORRECT)
# if b != None and b != self.score and b != self.lives and (b != self.drop and self.ball2):
# self.window.remove(b)
#
# self.__brick_left -= 1
# self.random_drop(b)
# self.scorepoints += 1
# elif c != None and c != self.score and c != self.lives and (c != self.drop and self.ball2):
# self.window.remove(c)
#
# self.__brick_left -= 1
# self.random_drop(c)
# self.scorepoints += 1
# elif g != None and g != self.score and g != self.lives and (g != self.drop and self.ball2):
#
# self.window.remove(g)
# self.__brick_left -= 1
# self.random_drop(g)
# self.scorepoints += 1
def how_to_play_setup(self):
"""
tells the player how this game works and what
does the powerup do
"""
self.window.clear()
self.howto_switch=True
introduction = GLabel('Move the paddle and break bricks')
introduction.font='Arial-20-bold'
self.window.add(introduction,self.window.width/2-introduction.width/2, self.window.height*2/14)
self.window.add(self.back_button, 0, self.window.height)
red_drop = GRoundRect(15, 15)
red_drop.filled = True
red_drop.fill_color = 'red'
self.window.add(red_drop, self.window.width*2/14, self.window.height*4/14 )
redlabel=GLabel('Decreases your paddle size')
redlabel.font='Arial-15-bold'
self.window.add(redlabel, self.window.width * 3 / 14, self.window.height * 4.4 / 14)
blue_drop = GRoundRect(15, 15)
blue_drop.filled = True
blue_drop.fill_color = 'blue'
self.window.add(blue_drop, self.window.width * 2 / 14, self.window.height * 5 / 14)
bluelabel = GLabel('Increases your paddle size')
bluelabel.font = 'Arial-15-bold'
self.window.add(bluelabel, self.window.width * 3 / 14, self.window.height * 5.4 / 14)
green_drop = GRoundRect(15, 15)
green_drop.filled = True
green_drop.fill_color = 'green'
self.window.add(green_drop, self.window.width * 2 / 14, self.window.height * 6 / 14)
greenlabel = GLabel('Extra ball on screen')
greenlabel.font = 'Arial-15-bold'
self.window.add(greenlabel, self.window.width * 3 / 14, self.window.height * 6.4 / 14)
purple_drop = GRoundRect(15, 15)
purple_drop.filled = True
purple_drop.fill_color = 'purple'
self.window.add(purple_drop, self.window.width * 2 / 14, self.window.height * 7 / 14)
purplelabel = GLabel('Extra life')
purplelabel.font = 'Arial-15-bold'
self.window.add(purplelabel, self.window.width * 3 / 14, self.window.height * 7.4 / 14)
yellow_drop = GRoundRect(15, 15)
yellow_drop.filled = True
yellow_drop.fill_color = 'yellow'
self.window.add(yellow_drop, self.window.width * 2 / 14, self.window.height * 8 / 14)
yellowlabel = GLabel('Your ball ignores all brick collisions')
yellowlabel.font = 'Arial-15-bold'
self.window.add(yellowlabel, self.window.width * 3 / 14, self.window.height * 8.4 / 14)
yellowlabel2 = GLabel('until it reaches the top')
yellowlabel2.font = 'Arial-15-bold'
self.window.add(yellowlabel2, self.window.width * 3 / 14, self.window.height * 9.4 / 14)
orange_drop = GRoundRect(15, 15)
orange_drop.filled = True
orange_drop.fill_color = 'orange'
self.window.add(orange_drop, self.window.width * 2 / 14, self.window.height * 10 / 14)
orangelabel = GLabel('+15 points')
orangelabel.font = 'Arial-15-bold'
self.window.add(orangelabel, self.window.width * 3 / 14, self.window.height * 10.4 / 14)
onmouseclicked(self.back)
dy=6
while self.howto_switch:
red_drop.move(0,dy)
green_drop.move(0,dy)
blue_drop.move(0,dy)
yellow_drop.move(0,dy)
orange_drop.move(0,dy)
purple_drop.move(0,dy)
dy=-dy
pause(500)
def ball2_collision(self):
# the collision model for the second ball
self.collision2_window()
self.collision2_paddle()
self.better2_collision()
self.ball2.move(self.__dx2, self.__dy2)
def collision2_paddle(self):
if self.paddle.x - self.ball2.width < self.ball2.x < self.paddle.x + self.paddle.width:
if 0 < self.paddle.y - self.ball2.y - self.ball2.height + 7 < 8:
if self.__dy2 > 0:
self.__dy2 = -self.__dy2
def collision2_window(self):
if 0 > self.ball2.x:
self.__dx2 = -self.__dx2
elif self.ball2.x > self.window.width - self.ball2.width:
self.__dx2 = -self.__dx2
if 0 > self.ball2.y:
self.__dy2 = -self.__dy2
def better2_collision(self):
if self.ball2.y < self.window.height / 2:
h = self.window.get_object_at(self.ball2.x - BALL_CORECT, self.ball2.y + self.ball2.height / 2)
f = self.window.get_object_at(self.ball2.x + self.ball2.width + BALL_CORECT,
self.ball2.y + self.ball2.height / 2)
if f != None and f != self.score and f != self.lives and f != self.drop and f!=self.ball:
self.window.remove(f)
self.__dx2 = -self.__dx2
self.__brick_left -= 1
self.random_drop(f)
self.scorepoints += 1
elif h != None and h != self.score and h != self.lives and h != self.drop and h!=self.ball:
self.window.remove(h)
self.__dx2 = -self.__dx2
self.__brick_left -= 1
self.random_drop(h)
self.scorepoints += 1
elif self.__dy2 < 0:
a = self.window.get_object_at(
self.ball2.x + self.ball2.width / 2 + self.ball_radius / 1.414 + BALL_CORECT,
self.ball2.y + self.ball2.height / 2 - self.ball_radius / 1.414 - BALL_CORECT + CORRECT)
d = self.window.get_object_at(
self.ball2.x + self.ball2.width / 2 - self.ball_radius / 1.414 - BALL_CORECT,
self.ball2.y + self.ball2.height / 2 - self.ball_radius / 1.414 - BALL_CORECT + CORRECT)
e = self.window.get_object_at(self.ball2.x + self.ball2.width / 2, self.ball2.y - BALL_CORECT - CORRECT)
if (a != None and a != self.score) and (e and f) == None and a != self.lives and a != self.drop and a!=self.ball:
self.window.remove(a)
self.__dy2 = -self.__dy2
self.__dx2 = -self.__dx2
self.__brick_left -= 1
self.random_drop(a)
self.scorepoints += 1
elif (d != None and d != self.score) and (e and h) == None and d != self.lives and d != self.drop and d!=self.ball:
self.window.remove(d)
self.__dy2 = -self.__dy2
self.__dx2 = -self.__dx2
self.__brick_left -= 1
self.random_drop(d)
self.scorepoints += 1
elif e != None and (d and a) == None and e != self.score and e != self.lives and e != self.drop and e!=self.ball:
self.window.remove(e)
self.__dy2 = -self.__dy2
self.__brick_left -= 1
self.random_drop(e)
self.scorepoints += 1
else:
| |
<filename>tests/test_archiver.py
import logging
import os
import shutil
import tempfile
from functools import wraps
import json
import mock
from urllib import quote_plus
from pylons import config
from nose.tools import assert_raises, assert_equal
from ckan import model
from ckan import plugins
from ckan.logic import get_action
try:
from ckan.tests.helpers import reset_db
from ckan.tests import factories as ckan_factories
from ckan.tests.legacy import BaseCase
except ImportError:
from ckan.new_tests.helpers import reset_db
from ckan.new_tests import factories as ckan_factories
from ckan.tests import BaseCase
from ckanext.archiver import model as archiver_model
from ckanext.archiver.model import Archival
from ckanext.archiver.tasks import (link_checker,
update_resource,
update_package,
download,
api_request,
DownloadError,
ChooseNotToDownload,
LinkCheckerError,
LinkInvalidError,
CkanError,
response_is_an_api_error
)
from mock_remote_server import MockEchoTestServer, MockWmsServer, MockWfsServer
# enable celery logging for when you run nosetests -s
log = logging.getLogger('ckanext.archiver.tasks')
def get_logger():
return log
update_resource.get_logger = get_logger
update_package.get_logger = get_logger
def with_mock_url(url=''):
"""
Start a MockEchoTestServer call the decorated function with the server's address prepended to ``url``.
"""
def decorator(func):
@wraps(func)
def decorated(*args, **kwargs):
with MockEchoTestServer().serve() as serveraddr:
return func(*(args + ('%s/%s' % (serveraddr, url),)), **kwargs)
return decorated
return decorator
class TestLinkChecker(BaseCase):
"""
Tests for link checker task
"""
@classmethod
def setup_class(cls):
reset_db()
plugins.unload_all()
cls._saved_plugins_config = config.get('ckan.plugins', '')
config['ckan.plugins'] = 'archiver'
plugins.load_all(config)
@classmethod
def teardown_class(cls):
plugins.unload_all()
config['ckan.plugins'] = cls._saved_plugins_config
plugins.load_all(config)
def test_file_url(self):
url = u'file:///home/root/test.txt' # schema not allowed
context = json.dumps({})
data = json.dumps({'url': url})
assert_raises(LinkInvalidError, link_checker, context, data)
def test_bad_url(self):
url = u'http:www.buckshealthcare.nhs.uk/freedom-of-information.htm'
context = json.dumps({})
data = json.dumps({'url': url})
assert_raises(LinkInvalidError, link_checker, context, data)
@with_mock_url('+/http://www.homeoffice.gov.uk/publications/science-research-statistics/research-statistics/drugs-alcohol-research/hosb1310/hosb1310-ann2tabs?view=Binary')
def test_non_escaped_url(self, url):
context = json.dumps({})
data = json.dumps({'url': url})
res = link_checker(context, data)
assert res
def test_empty_url(self):
url = u''
context = json.dumps({})
data = json.dumps({'url': url})
assert_raises(LinkCheckerError, link_checker, context, data)
@with_mock_url('?status=503')
def test_url_with_503(self, url):
context = json.dumps({})
data = json.dumps({'url': url})
assert_raises(LinkCheckerError, link_checker, context, data)
@with_mock_url('?status=404')
def test_url_with_404(self, url):
context = json.dumps({})
data = json.dumps({'url': url})
assert_raises(LinkCheckerError, link_checker, context, data)
@with_mock_url('?status=405')
def test_url_with_405(self, url): # 405: method (HEAD) not allowed
context = json.dumps({})
data = json.dumps({'url': url})
assert_raises(LinkCheckerError, link_checker, context, data)
@with_mock_url('')
def test_url_with_30x_follows_redirect(self, url):
redirect_url = url + u'?status=200&content=test&content-type=text/csv'
url += u'?status=301&location=%s' % quote_plus(redirect_url)
context = json.dumps({})
data = json.dumps({'url': url})
result = json.loads(link_checker(context, data))
assert result
# e.g. "http://www.dasa.mod.uk/applications/newWeb/www/index.php?page=48&thiscontent=180&date=2011-05-26&pubType=1&PublishTime=09:30:00&from=home&tabOption=1"
@with_mock_url('?time=09:30&status=200')
def test_colon_in_query_string(self, url):
# accept, because browsers accept this
# see discussion: http://trac.ckan.org/ticket/318
context = json.dumps({})
data = json.dumps({'url': url})
result = json.loads(link_checker(context, data))
assert result
@with_mock_url('?status=200 ')
def test_trailing_whitespace(self, url):
# accept, because browsers accept this
context = json.dumps({})
data = json.dumps({'url': url})
result = json.loads(link_checker(context, data))
assert result
@with_mock_url('?status=200')
def test_good_url(self, url):
context = json.dumps({})
data = json.dumps({'url': url})
result = json.loads(link_checker(context, data))
assert result
class TestArchiver(BaseCase):
"""
Tests for Archiver 'update_resource'/'update_package' tasks
"""
@classmethod
def setup_class(cls):
reset_db()
archiver_model.init_tables(model.meta.engine)
cls.temp_dir = tempfile.mkdtemp()
cls.config = config.__file__
@classmethod
def teardown_class(cls):
os.removedirs(cls.temp_dir)
def teardown(self):
pkg = model.Package.get(u'testpkg')
if pkg:
model.repo.new_revision()
pkg.purge()
model.repo.commit_and_remove()
def _test_package(self, url, format=None):
pkg = {'resources': [
{'url': url, 'format': format or 'TXT', 'description': 'Test'}
]}
pkg = ckan_factories.Dataset(**pkg)
return pkg
def _test_resource(self, url, format=None):
pkg = self._test_package(url, format)
return pkg['resources'][0]
def assert_archival_error(self, error_message_fragment, resource_id):
archival = Archival.get_for_resource(resource_id)
if error_message_fragment not in archival.reason:
print 'ERROR: %s (%s)' % (archival.reason, archival.status)
raise AssertionError(archival.reason)
def test_file_url(self):
res_id = self._test_resource('file:///home/root/test.txt')['id'] # scheme not allowed
result = update_resource(self.config, res_id)
assert not result, result
self.assert_archival_error('Invalid url scheme', res_id)
def test_bad_url(self):
res_id = self._test_resource('http:host.com')['id'] # no slashes
result = update_resource(self.config, res_id)
assert not result, result
self.assert_archival_error('Failed to parse', res_id)
@with_mock_url('?status=200&content=test&content-type=csv')
def test_resource_hash_and_content_length(self, url):
res_id = self._test_resource(url)['id']
result = json.loads(update_resource(self.config, res_id))
assert result['size'] == len('test')
from hashlib import sha1
assert result['hash'] == sha1('test').hexdigest(), result
_remove_archived_file(result.get('cache_filepath'))
@with_mock_url('?status=200&content=test&content-type=csv')
def test_archived_file(self, url):
res_id = self._test_resource(url)['id']
result = json.loads(update_resource(self.config, res_id))
assert result['cache_filepath']
assert os.path.exists(result['cache_filepath'])
with open(result['cache_filepath']) as f:
content = f.readlines()
assert len(content) == 1
assert content[0] == "test"
_remove_archived_file(result.get('cache_filepath'))
@with_mock_url('?content-type=application/foo&content=test')
def test_update_url_with_unknown_content_type(self, url):
res_id = self._test_resource(url, format='foo')['id'] # format has no effect
result = json.loads(update_resource(self.config, res_id))
assert result, result
assert result['mimetype'] == 'application/foo' # stored from the header
def test_wms_1_3(self):
with MockWmsServer(wms_version='1.3').serve() as url:
res_id = self._test_resource(url)['id']
result = json.loads(update_resource(self.config, res_id))
assert result, result
assert result['request_type'] == 'WMS 1.3'
with open(result['cache_filepath']) as f:
content = f.read()
assert '<WMT_MS_Capabilities' in content, content[:1000]
_remove_archived_file(result.get('cache_filepath'))
@with_mock_url('?status=200&content-type=csv')
def test_update_with_zero_length(self, url):
# i.e. no content
res_id = self._test_resource(url)['id']
result = update_resource(self.config, res_id)
assert not result, result
self.assert_archival_error('Content-length after streaming was 0', res_id)
@with_mock_url('?status=404&content=test&content-type=csv')
def test_file_not_found(self, url):
res_id = self._test_resource(url)['id']
result = update_resource(self.config, res_id)
assert not result, result
self.assert_archival_error('Server reported status error: 404 Not Found', res_id)
@with_mock_url('?status=500&content=test&content-type=csv')
def test_server_error(self, url):
res_id = self._test_resource(url)['id']
result = update_resource(self.config, res_id)
assert not result, result
self.assert_archival_error('Server reported status error: 500 Internal Server Error', res_id)
@with_mock_url('?status=200&content=short&length=1000001&content-type=csv')
def test_file_too_large_1(self, url):
# will stop after receiving the header
res_id = self._test_resource(url)['id']
result = update_resource(self.config, res_id)
assert not result, result
self.assert_archival_error('Content-length 1000001 exceeds maximum allowed value 1000000', res_id)
@with_mock_url('?status=200&content_long=test_contents_greater_than_the_max_length&no-content-length&content-type=csv')
def test_file_too_large_2(self, url):
# no size info in headers - it stops only after downloading the content
res_id = self._test_resource(url)['id']
result = update_resource(self.config, res_id)
assert not result, result
self.assert_archival_error('Content-length 1000001 exceeds maximum allowed value 1000000', res_id)
@with_mock_url('?status=200&content=content&length=abc&content-type=csv')
def test_content_length_not_integer(self, url):
res_id = self._test_resource(url)['id']
result = json.loads(update_resource(self.config, res_id))
assert result, result
@with_mock_url('?status=200&content=content&repeat-length&content-type=csv')
def test_content_length_repeated(self, url):
# listing the Content-Length header twice causes requests to
# store the value as a comma-separated list
res_id = self._test_resource(url)['id']
result = json.loads(update_resource(self.config, res_id))
assert result, result
@with_mock_url('')
def test_url_with_30x_follows_and_records_redirect(self, url):
redirect_url = url + u'?status=200&content=test&content-type=text/csv'
url += u'?status=301&location=%s' % quote_plus(redirect_url)
res_id = self._test_resource(url)['id']
result = json.loads(update_resource(self.config, res_id))
assert result
assert_equal(result['url_redirected_to'], redirect_url)
@with_mock_url('?status=200&content=test&content-type=csv')
def test_ipipe_notified(self, url):
testipipe = plugins.get_plugin('testipipe')
testipipe.reset()
res_id = self._test_resource(url)['id']
# celery.send_task doesn't respect CELERY_ALWAYS_EAGER
res = update_resource.apply_async(args=[self.config, res_id, 'queue1'])
res.get()
assert len(testipipe.calls) == 1
operation, queue, params = testipipe.calls[0]
assert operation == 'archived'
assert queue == 'queue1'
assert params.get('package_id') == None
assert params.get('resource_id') == res_id
@with_mock_url('?status=200&content=test&content-type=csv')
@mock.patch('ckan.lib.celery_app.celery.send_task')
def test_package_achived_when_resource_modified(self, url, send_task):
data_dict = self._test_resource(url)
data_dict['url'] = 'http://example.com/foo'
context = {'model': model,
'user': 'test',
'ignore_auth': True,
'session': model.Session}
result = get_action('resource_update')(context, data_dict)
assert_equal(send_task.called, True)
args, kwargs = send_task.call_args
assert args == ('archiver.update_package',)
@with_mock_url('?status=200&content=test&content-type=csv')
def test_ipipe_notified_dataset(self, url):
testipipe = plugins.get_plugin('testipipe')
testipipe.reset()
pkg = self._test_package(url)
# celery.send_task doesn't respect CELERY_ALWAYS_EAGER
res = update_package.apply_async(args=[self.config, pkg['id'], 'queue1'])
res.get()
assert len(testipipe.calls) == 2, len(testipipe.calls)
operation, queue, params = testipipe.calls[0]
assert operation == 'archived'
assert queue == 'queue1'
assert params.get('package_id') == None
assert params.get('resource_id') == pkg['resources'][0]['id']
operation, queue, params = testipipe.calls[1]
assert operation == 'package-archived'
assert queue == 'queue1'
assert params.get('package_id') == pkg['id']
assert params.get('resource_id') == None
class TestDownload(BaseCase):
'''Tests of the download method (and things it calls).
Doesn't need a fake CKAN to get/set the status of.
'''
@classmethod
def setup_class(cls):
reset_db()
config
cls.fake_context = {
'site_url': config.get('ckan.site_url_internally') or config['ckan.site_url'],
'cache_url_root': config.get('ckanext-archiver.cache_url_root'),
}
def teardown(self):
pkg = model.Package.get(u'testpkg')
if pkg:
model.repo.new_revision()
pkg.purge()
model.repo.commit_and_remove()
def _test_resource(self, url, format=None):
context = {'model': model, 'ignore_auth': True, 'session': model.Session, 'user': 'test'}
pkg = {'name': 'testpkg', 'resources': [
{'url': url, 'format': format or 'TXT', 'description': 'Test'}
]}
pkg = get_action('package_create')(context, pkg)
return pkg['resources'][0]
@with_mock_url('?status=200&method=get&content=test&content-type=csv')
def test_head_unsupported(self, url):
# This test was more relevant when we did HEAD requests. Now servers
# which respond badly to HEAD requests are not an issue.
resource = self._test_resource(url)
# HEAD request will return a 405 error, but it will persevere
# and do a GET request which will work.
result = download(self.fake_context, resource)
assert result['saved_file']
@with_mock_url('?status=200&content=test&content-type=csv')
def test_download_file(self, url):
resource = self._test_resource(url)
result = download(self.fake_context, resource)
assert result['saved_file']
assert os.path.exists(result['saved_file'])
_remove_archived_file(result.get('saved_file'))
# Modify the resource and check that the resource size gets updated
resource['url'] = url.replace('content=test', 'content=test2')
result = download(self.fake_context, resource)
assert_equal(result['size'], len('test2'))
_remove_archived_file(result.get('saved_file'))
def test_wms_1_3(self):
with MockWmsServer(wms_version='1.3').serve() as url:
resource = self._test_resource(url)
result = api_request(self.fake_context, resource)
assert result
assert int(result['size']) > 7800, result['length']
assert_equal(result['request_type'], 'WMS 1.3')
_remove_archived_file(result.get('saved_file'))
def test_wms_1_1_1(self):
with MockWmsServer(wms_version='1.1.1').serve() as url:
resource = self._test_resource(url)
result = api_request(self.fake_context, resource)
assert result
assert int(result['size']) > 7800, result['length']
assert_equal(result['request_type'], 'WMS 1.1.1')
_remove_archived_file(result.get('saved_file'))
def test_wfs(self):
with MockWfsServer().serve() as url:
resource = self._test_resource(url)
result = api_request(self.fake_context, resource)
assert result
assert int(result['size']) > 7800, result['length']
assert_equal(result['request_type'], 'WFS 2.0')
_remove_archived_file(result.get('saved_file'))
def test_wms_error(self):
wms_error_1 = '''<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<ServiceExceptionReport version="1.3.0"
xmlns="http://www.opengis.net/ogc"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.opengis.net/ogc http://schemas.opengis.net/wms/1.3.0/exceptions_1_3_0.xsd">
<ServiceException code="InvalidFormat">
Unknown service requested.
</ServiceException>
</ServiceExceptionReport>'''
assert_equal(response_is_an_api_error(wms_error_1), True)
wms_error_2 = '''<ows:ExceptionReport version='1.1.0' language='en' xmlns:ows='http://www.opengis.net/ows'><ows:Exception exceptionCode='NoApplicableCode'><ows:ExceptionText>Unknown operation name.</ows:ExceptionText></ows:Exception></ows:ExceptionReport>'''
assert_equal(response_is_an_api_error(wms_error_2), True)
def | |
<gh_stars>0
"""
Set of objects that implement different kinds of random noise generators.
"""
from __future__ import absolute_import
"""
Copyright 2009-2015 <NAME>
This file is part of pyo, a python module to help digital signal
processing script creation.
pyo is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
pyo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with pyo. If not, see <http://www.gnu.org/licenses/>.
"""
from ._core import *
from ._maps import *
class Randi(PyoObject):
"""
Periodic pseudo-random generator with interpolation.
Randi generates a pseudo-random number between `min` and `max`
values at a frequency specified by `freq` parameter. Randi will
produce straight-line interpolation between current number and the next.
:Parent: :py:class:`PyoObject`
:Args:
min: float or PyoObject, optional
Minimum value for the random generation. Defaults to 0.
max: float or PyoObject, optional
Maximum value for the random generation. Defaults to 1.
freq: float or PyoObject, optional
Polling frequency. Defaults to 1.
>>> s = Server().boot()
>>> s.start()
>>> freq = Randi(500, 3000, 4)
>>> noze = Noise().mix(2)
>>> a = Biquad(noze, freq=freq, q=5, type=2, mul=.5).out()
"""
def __init__(self, min=0.0, max=1.0, freq=1.0, mul=1, add=0):
pyoArgsAssert(self, "OOOOO", min, max, freq, mul, add)
PyoObject.__init__(self, mul, add)
self._min = min
self._max = max
self._freq = freq
min, max, freq, mul, add, lmax = convertArgsToLists(min, max, freq, mul, add)
self._base_objs = [
Randi_base(wrap(min, i), wrap(max, i), wrap(freq, i), wrap(mul, i), wrap(add, i)) for i in range(lmax)
]
self._init_play()
def setMin(self, x):
"""
Replace the `min` attribute.
:Args:
x: float or PyoObject
new `min` attribute.
"""
pyoArgsAssert(self, "O", x)
self._min = x
x, lmax = convertArgsToLists(x)
[obj.setMin(wrap(x, i)) for i, obj in enumerate(self._base_objs)]
def setMax(self, x):
"""
Replace the `max` attribute.
:Args:
x: float or PyoObject
new `max` attribute.
"""
pyoArgsAssert(self, "O", x)
self._max = x
x, lmax = convertArgsToLists(x)
[obj.setMax(wrap(x, i)) for i, obj in enumerate(self._base_objs)]
def setFreq(self, x):
"""
Replace the `freq` attribute.
:Args:
x: float or PyoObject
new `freq` attribute.
"""
pyoArgsAssert(self, "O", x)
self._freq = x
x, lmax = convertArgsToLists(x)
[obj.setFreq(wrap(x, i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [
SLMap(0.0, 1.0, "lin", "min", self._min),
SLMap(1.0, 2.0, "lin", "max", self._max),
SLMap(0.1, 20.0, "lin", "freq", self._freq),
SLMapMul(self._mul),
]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def min(self):
"""float or PyoObject. Minimum value."""
return self._min
@min.setter
def min(self, x):
self.setMin(x)
@property
def max(self):
"""float or PyoObject. Maximum value."""
return self._max
@max.setter
def max(self, x):
self.setMax(x)
@property
def freq(self):
"""float or PyoObject. Polling frequency."""
return self._freq
@freq.setter
def freq(self, x):
self.setFreq(x)
class Randh(PyoObject):
"""
Periodic pseudo-random generator.
Randh generates a pseudo-random number between `min` and `max`
values at a frequency specified by `freq` parameter. Randh will
hold generated value until next generation.
:Parent: :py:class:`PyoObject`
:Args:
min: float or PyoObject, optional
Minimum value for the random generation. Defaults to 0.
max: float or PyoObject, optional
Maximum value for the random generation. Defaults to 1.
freq: float or PyoObject, optional
Polling frequency. Defaults to 1.
>>> s = Server().boot()
>>> s.start()
>>> freq = Randh(500, 3000, 4)
>>> noze = Noise().mix(2)
>>> a = Biquad(noze, freq=freq, q=5, type=2, mul=.5).out()
"""
def __init__(self, min=0.0, max=1.0, freq=1.0, mul=1, add=0):
pyoArgsAssert(self, "OOOOO", min, max, freq, mul, add)
PyoObject.__init__(self, mul, add)
self._min = min
self._max = max
self._freq = freq
min, max, freq, mul, add, lmax = convertArgsToLists(min, max, freq, mul, add)
self._base_objs = [
Randh_base(wrap(min, i), wrap(max, i), wrap(freq, i), wrap(mul, i), wrap(add, i)) for i in range(lmax)
]
self._init_play()
def setMin(self, x):
"""
Replace the `min` attribute.
:Args:
x: float or PyoObject
new `min` attribute.
"""
pyoArgsAssert(self, "O", x)
self._min = x
x, lmax = convertArgsToLists(x)
[obj.setMin(wrap(x, i)) for i, obj in enumerate(self._base_objs)]
def setMax(self, x):
"""
Replace the `max` attribute.
:Args:
x: float or PyoObject
new `max` attribute.
"""
pyoArgsAssert(self, "O", x)
self._max = x
x, lmax = convertArgsToLists(x)
[obj.setMax(wrap(x, i)) for i, obj in enumerate(self._base_objs)]
def setFreq(self, x):
"""
Replace the `freq` attribute.
:Args:
x: float or PyoObject
new `freq` attribute.
"""
pyoArgsAssert(self, "O", x)
self._freq = x
x, lmax = convertArgsToLists(x)
[obj.setFreq(wrap(x, i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [
SLMap(0.0, 1.0, "lin", "min", self._min),
SLMap(1.0, 2.0, "lin", "max", self._max),
SLMap(0.1, 20.0, "lin", "freq", self._freq),
SLMapMul(self._mul),
]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def min(self):
"""float or PyoObject. Minimum value."""
return self._min
@min.setter
def min(self, x):
self.setMin(x)
@property
def max(self):
"""float or PyoObject. Maximum value."""
return self._max
@max.setter
def max(self, x):
self.setMax(x)
@property
def freq(self):
"""float or PyoObject. Polling frequency."""
return self._freq
@freq.setter
def freq(self, x):
self.setFreq(x)
class Choice(PyoObject):
"""
Periodically choose a new value from a user list.
Choice chooses a new value from a predefined list of floats `choice`
at a frequency specified by `freq` parameter. Choice will
hold choosen value until next generation.
:Parent: :py:class:`PyoObject`
:Args:
choice: list of floats or list of lists of floats
Possible values for the random generation.
freq: float or PyoObject, optional
Polling frequency. Defaults to 1.
>>> s = Server().boot()
>>> s.start()
>>> freqs = midiToHz([60,62,64,65,67,69,71,72])
>>> rnd = Choice(choice=freqs, freq=[3,4])
>>> a = SineLoop(rnd, feedback=0.05, mul=.2).out()
"""
def __init__(self, choice, freq=1.0, mul=1, add=0):
pyoArgsAssert(self, "lOOO", choice, freq, mul, add)
PyoObject.__init__(self, mul, add)
self._choice = choice
self._freq = freq
freq, mul, add, lmax = convertArgsToLists(freq, mul, add)
if type(choice[0]) != list:
self._base_objs = [Choice_base(choice, wrap(freq, i), wrap(mul, i), wrap(add, i)) for i in range(lmax)]
else:
choicelen = len(choice)
lmax = max(choicelen, lmax)
self._base_objs = [
Choice_base(wrap(choice, i), wrap(freq, i), wrap(mul, i), wrap(add, i)) for i in range(lmax)
]
self._init_play()
def setChoice(self, x):
"""
Replace the `choice` attribute.
:Args:
x: list of floats or list of lists of floats
new `choice` attribute.
"""
pyoArgsAssert(self, "l", x)
self._choice = x
if type(x[0]) != list:
[obj.setChoice(self._choice) for i, obj in enumerate(self._base_objs)]
else:
[obj.setChoice(wrap(self._choice, i)) for i, obj in enumerate(self._base_objs)]
def setFreq(self, x):
"""
Replace the `freq` attribute.
:Args:
x: float or PyoObject
new `freq` attribute.
"""
pyoArgsAssert(self, "O", x)
self._freq = x
x, lmax = convertArgsToLists(x)
[obj.setFreq(wrap(x, i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0.1, 20.0, "lin", "freq", self._freq), SLMapMul(self._mul)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def choice(self):
"""list of floats or list of lists of floats. Possible choices."""
return self._choice
@choice.setter
def choice(self, x):
self.setChoice(x)
@property
def freq(self):
"""float or PyoObject. Polling frequency."""
return self._freq
@freq.setter
def freq(self, x):
self.setFreq(x)
class RandInt(PyoObject):
"""
Periodic pseudo-random integer generator.
RandInt generates a pseudo-random integer number between 0 and `max`
values at a frequency specified by `freq` parameter. RandInt will
hold generated value until the next generation.
:Parent: :py:class:`PyoObject`
:Args:
max: float or PyoObject, optional
Maximum value for the random generation. Defaults to 100.
freq: float or PyoObject, optional
Polling frequency. Defaults to 1.
>>> s = Server().boot()
>>> s.start()
>>> freq = RandInt(max=10, freq=5, mul=100, add=500)
>>> jit = Randi(min=0.99, max=1.01, freq=[2.33,3.41])
>>> a = SineLoop(freq*jit, feedback=0.03, mul=.2).out()
"""
def __init__(self, max=100, freq=1.0, mul=1, add=0):
pyoArgsAssert(self, "OOOO", max, freq, mul, add)
PyoObject.__init__(self, mul, add)
self._max = max
self._freq = freq
max, freq, mul, add, lmax = convertArgsToLists(max, freq, mul, add)
self._base_objs = [RandInt_base(wrap(max, i), wrap(freq, i), wrap(mul, i), wrap(add, i)) for i in range(lmax)]
self._init_play()
def setMax(self, x):
"""
Replace the `max` attribute.
:Args:
x: float or PyoObject
new `max` attribute.
"""
pyoArgsAssert(self, "O", x)
self._max = x
x, lmax = convertArgsToLists(x)
[obj.setMax(wrap(x, i)) for i, obj in enumerate(self._base_objs)]
def setFreq(self, x):
"""
Replace the `freq` attribute.
:Args:
x: float or PyoObject
new `freq` attribute.
"""
pyoArgsAssert(self, "O", x)
self._freq = x
x, lmax = convertArgsToLists(x)
[obj.setFreq(wrap(x, i)) for i, obj | |
<filename>Chatbot.py
from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
from chatterbot.trainers import ChatterBotCorpusTrainer
import discord
import base64
from Crypto import Random
from Crypto.Hash import SHA256
from Crypto.Cipher import AES
def cipherAES(password, iv):
key = SHA256.new(password).digest()
return AES.new(key, AES.MODE_CFB, iv)
def encrypt2(plaintext, password):
iv = Random.new().read(AES.block_size)
return base64.b64encode(iv + cipherAES(password, iv).encrypt(plaintext))
def decrypt2(ciphertext, password):
d = base64.b64decode(ciphertext)
iv, ciphertext = d[:AES.block_size], d[AES.block_size:]
return cipherAES(password, iv).decrypt(ciphertext)
llaveEncripted = '<KEY>'
llave = decrypt2(llaveEncripted, b'mypass').decode("utf-8")
chat = ChatBot('Boty')
"""
talk = ['Hola', 'Que tal',
'Tengo una pregunta', 'Si, dime',
'Cuantos cursos puedo llevar en la univerisidad?', 'Lo normal son 5 cursos, para mas información acerca de los cursos y requisitos puedes visitar: https://apps2.umg.edu.gt/pensum',
'Cuales cursos puedo llevar en el semestre?', 'Dime de que semestre quieres conocer los cursos que corresponden',
'Primer ciclo', 'Al primer semestre correponden los siguientes cursos:\n 1590-001. DESARROLLO HUMANO Y PROFESIONAL,\n 1590-002. METODOLOGIA DE LA INVESTIGACION,\n 1590-003. CONTABILIDAD I,\n 1590-004. INTRODUCCION A LOS SISTEMAS DE COMPUTO,\n 1590-005. LOGICA DE SISTEMAS,\n para mas información acerca de los cursos y sus requisitos puedes visitar: https://apps2.umg.edu.gt/pensum',
'Segundo ciclo', 'Al segundo semestre correponden los siguientes cursos:\n 1590-006. PRECALCULO,\n 1590-007. ALGEBRA LINEAL,\n 1590-008. ALGORITMOS,\n 1590-009. CONTABILIDAD II,\n 1590-010. MATEMATICA DISCRETA,\n para mas información acerca de los cursos y sus requisitos puedes visitar: https://apps2.umg.edu.gt/pensum',
'Tercer ciclo', 'Al tercer semestre correponden los siguientes cursos:\n 1590-011. FISICA I,\n 1590-012. PROGRAMACION I,\n 1590-013. CALCULO I,\n 1590-014. PROCESO ADMINISTRATIVO,\n 1590-015. DERECHO INFORMATICO,\n para mas información acerca de los cursos y sus requisitos puedes visitar: https://apps2.umg.edu.gt/pensum',
'Cuarto ciclo', 'Al cuarto semestre correponden los siguientes cursos:\n 1590-016. MICROECONOMIA,\n 1590-017. PROGRAMACION II,\n 1590-018. CALCULO II,\n 1590-019. ESTADISTICA I,\n 1590-020. FISICA II,\n para mas información acerca de los cursos y sus requisitos puedes visitar: https://apps2.umg.edu.gt/pensum',
'Quinto ciclo', 'Al quinto semestre correponden los siguientes cursos:\n 1590-021. METODOS NUMERICOS,\n 1590-022. PROGRAMACION III,\n 1590-023. EMPRENDEDORES DE NEGOCIOS,\n 1590-024. ELECTRONICA ANALOGICA,\n 1590-025. ESTADISTICA II,\n para mas información acerca de los cursos y sus requisitos puedes visitar: https://apps2.umg.edu.gt/pensum',
'Sexto ciclo','Al sexto semestre correponden los siguientes cursos: 1590026 INVESTIGACION DE OPERACIONES. 1590027 BASES DE DATOS. 1590028 AUTOMATAS Y LENGUAJES FORMALES. 1590029 SISTEMAS OPERATIVOS I. 1590030 ELECTRONICA DIGITAL.',
'Septimo ciclo', 'Al septimo semestre correponden los siguientes cursos: 1590031 BASES DE DATOS II. 1590032 ANALISIS DE SISTEMAS I. 1590033 SISTEMAS OPERATIVOS II. 1590034 ARQUITECTURA DE COMPUTADORAS I. 1590035 COMPILADORES.',
'Octavo ciclo', 'Al octavo semestre correponden los siguientes cursos: 1590036 DESARROLLO WEB. 1590037 ANALISIS DE SISTEMAS II. 1590038 REDES DE COMPUTADORAS I. 1590039 ETICA PROFESIONAL. 1590040 ARQUITECTURA DE COMPUTADORAS II.',
'Noveno ciclo', 'Al noveno semestre correponden los siguientes cursos: 1590041 ADMINISTRACION DE TECNOLOGIAS DE INFORMACION. 1590042 INGENIERIA DE SOFTWARE. 1590043 PROYECTOD DE GRADUACION I. 1590044 REDES DE COMPUTADORAS II. 1590045 INTELIGENCIA ARTIFICIA.',
'Decimo ciclo','Al decimo semestre correponden los siguientes cursos: 1590046 TELECOMUNICACIONES. 1590047 SEMINARIOS DE TECNOLOGIAS DE INFORMACION. 1590048 ASEGURAMIENTO DE LA CALIDAD DEL SOFTWARE. 1590049 PROYECTO DE GRADUACION II. 1590050 SEGURIDAD Y AUDITORIA DE SISTEMAS.',
'Como puedo inscribirme en linea?', 'Los pasos para inscribirse son: 1. Entregar documentación en Registro y Control Académico\n 2. Pago de Matrícula\n 3. Asignación de Cursos. Los pagos puedes hacerlos por medio de la pagina de la Universidad: https://apps2.umg.edu.gt/pagos/,\r\n o\r por medio de tu banca en linea. \n Para obtener mas información sobre requisitos de ingreso visitar: https://umg.edu.gt/info/aspirantes \n Y para llenar formulario de nuevo estudiante visitar: https://apps2.umg.edu.gt/nuevosestudiantes/ Y para información sobre inscripciones visitar https://umg.edu.gt/info/inscripciones \n Y para asignar cursos en linea puedes visitar: https://umg.edu.gt/info/estudiantes/asignaciones',
'Cuales son los pasos para graduarme', 'Son los siguientes: a) 1000 horas de práctica laboral profesional: a partir del 7mo. ciclo b) Evaluación general en áreas de conocimiento (escalonado en 3 etapas): 1. Area de ciencias de la ingeniería: aprobado del 1 al 6o ciclo e inglés intermedios 4. 2. Área de análisis, diseño y Desarrollo: Aprobados del 1o. al 8o. ciclos e inglés avanzados II 3. Área de administración de tecnologías de información: Aprobados del 1o. al 10o. ciclo e inglés avanzados IV c) Trabajo de graduación ó 50% de créditos de una de las maestrías autorizadas.',
'Como hago un examen extraordinario', 'Debes de pagar 100 quetzales en tu banco y luego presentarsela a tu catedratico para tener el derecho',
'Por que no veo mi nota en el sistema', 'Puede ser por dos razones:\n 1.No estas solvente\n 2.No te examinaste',
'Cual es el proceso de cierre?, 'El proceso de cierre es el siguiente, debes tener ganados los 50 cursos que tiene la carrera para que puedas cerrar tu pensum.',
'Como apruebo mi proyecto de graduación?', '.Llevar un control de las asesorías y entrega de borradores de su Trabajo, al docente asesor y al revisor, por medio escrito haciendo constar fechas de entrega y devolución de los mismos, con las correcciones o enmiendas pertinentes. En otras palabras, el estudiante deberá llevar una bitácora del desarrollo del Trabajo de Graduación. Para mas info visita:http://cdn.umg.edu.gt/pdf/pregrados/arquitectura/normativos/normativo_trabajo_graduacion.pdf'
'Muchas gracias', 'Es un placer, Saludos']
"""
trainer = ChatterBotCorpusTrainer(chat)
trainer.train("chatterbot.corpus.spanish")
trainer = ListTrainer(chat)
#trainer.train(talk)
trainer.train(['Hola', 'Hola, como te va?',
'Tengo una pregunta', 'Si, dime...'
])
trainer.train(['como puedo inscribirme en linea', 'Los pasos para inscribirse son: 1. Entregar documentación en Registro y Control Académico\n 2. Pago de Matrícula\n 3. Asignación de Cursos. Los pagos puedes hacerlos por medio de la pagina de la Universidad: https://apps2.umg.edu.gt/pagos/,\r\n o\r por medio de tu banca en linea. \n Para obtener mas información sobre requisitos de ingreso visitar: https://umg.edu.gt/info/aspirantes \n Y para llenar formulario de nuevo estudiante visitar: https://apps2.umg.edu.gt/nuevosestudiantes/ Y para información sobre inscripciones visitar https://umg.edu.gt/info/inscripciones \n Y para asignar cursos en linea puedes visitar: https://umg.edu.gt/info/estudiantes/asignaciones',
'Cuales son los pasos para graduarme', 'Son los siguientes: a) 1000 horas de práctica laboral profesional: a partir del 7mo. ciclo b) Evaluación general en áreas de conocimiento (escalonado en 3 etapas): 1. Area de ciencias de la ingeniería: aprobado del 1 al 6o ciclo e inglés intermedios 4. 2. Área de análisis, diseño y Desarrollo: Aprobados del 1o. al 8o. ciclos e inglés avanzados II 3. Área de administración de tecnologías de información: Aprobados del 1o. al 10o. ciclo e inglés avanzados IV c) Trabajo de graduación ó 50% de créditos de una de las maestrías autorizadas.',
'como hago un examen extraordinario', 'Debes de pagar 100 quetzales en tu banco y luego presentarsela a tu catedratico para tener el derecho',
'por que no veo mi nota en el sistema', 'Puede ser por dos razones:\n 1.No estas solvente\n 2.No te examinaste',
'muchas gracias', 'Es un placer, Saludos'
])
trainer.train(['cuales y que cursos puedo llevar en el semestre', 'Dime de que semestre quieres conocer los cursos que corresponden',
'cuantos cursos llevaria en la U Universidad', 'Lo normal son 5 cursos, para mas información acerca de los cursos y requisitos puedes visitar: https://apps2.umg.edu.gt/pensum',
'primer 1er 1 ciclo', 'Al primer semestre correponden los siguientes cursos:\n 1590-001. DESARROLLO HUMANO Y PROFESIONAL,\n 1590-002. METODOLOGIA DE LA INVESTIGACION,\n 1590-003. CONTABILIDAD I,\n 1590-004. INTRODUCCION A LOS SISTEMAS DE COMPUTO,\n 1590-005. LOGICA DE SISTEMAS,\n para mas información acerca de los cursos y sus requisitos puedes visitar: https://apps2.umg.edu.gt/pensum',
'segundo 2do 2 ciclo', 'Al segundo semestre correponden los siguientes cursos:\n 1590-006. PRECALCULO,\n 1590-007. ALGEBRA LINEAL,\n 1590-008. ALGORITMOS,\n 1590-009. CONTABILIDAD II,\n 1590-010. MATEMATICA DISCRETA,\n para mas información acerca de los cursos y sus requisitos puedes visitar: https://apps2.umg.edu.gt/pensum',
'tercer 3ro 3 ciclo', 'Al tercer semestre correponden los siguientes cursos:\n 1590-011. FISICA I,\n 1590-012. PROGRAMACION I,\n 1590-013. CALCULO I,\n 1590-014. PROCESO ADMINISTRATIVO,\n 1590-015. DERECHO INFORMATICO,\n para mas información acerca de los cursos y sus requisitos puedes visitar: https://apps2.umg.edu.gt/pensum',
'cuarto 4to 4 ciclo', 'Al cuarto semestre correponden los siguientes cursos:\n 1590-016. MICROECONOMIA,\n 1590-017. PROGRAMACION II,\n 1590-018. CALCULO II,\n 1590-019. ESTADISTICA I,\n 1590-020. FISICA II,\n para mas información acerca de los cursos y sus requisitos puedes visitar: https://apps2.umg.edu.gt/pensum',
'quinto ciclo', 'Al quinto semestre correponden los siguientes cursos:\n 1590-021. METODOS NUMERICOS,\n 1590-022. PROGRAMACION III,\n 1590-023. EMPRENDEDORES DE NEGOCIOS,\n 1590-024. ELECTRONICA ANALOGICA,\n 1590-025. ESTADISTICA II,\n para mas información acerca de los cursos y sus requisitos puedes visitar: https://apps2.umg.edu.gt/pensum',
'Sexto ciclo','Al sexto semestre correponden |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.